libbpf: better fix for handling nulled-out struct_ops program
[linux-2.6-block.git] / tools / lib / bpf / libbpf.c
CommitLineData
1bc38b8f 1// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
6061a3d6 2
1b76c13e
WN
3/*
4 * Common eBPF ELF object loading operations.
5 *
6 * Copyright (C) 2013-2015 Alexei Starovoitov <ast@kernel.org>
7 * Copyright (C) 2015 Wang Nan <wangnan0@huawei.com>
8 * Copyright (C) 2015 Huawei Inc.
f367540c 9 * Copyright (C) 2017 Nicira, Inc.
d859900c 10 * Copyright (C) 2019 Isovalent, Inc.
1b76c13e
WN
11 */
12
b4269954 13#ifndef _GNU_SOURCE
531b014e 14#define _GNU_SOURCE
b4269954 15#endif
1b76c13e 16#include <stdlib.h>
b3f59d66
WN
17#include <stdio.h>
18#include <stdarg.h>
f367540c 19#include <libgen.h>
34090915 20#include <inttypes.h>
8ab9da57 21#include <limits.h>
b3f59d66 22#include <string.h>
1b76c13e 23#include <unistd.h>
cdb2f920 24#include <endian.h>
1a5e3fb1
WN
25#include <fcntl.h>
26#include <errno.h>
113e6b7e 27#include <ctype.h>
1b76c13e 28#include <asm/unistd.h>
e28ff1a8 29#include <linux/err.h>
cb1e5e96 30#include <linux/kernel.h>
1b76c13e 31#include <linux/bpf.h>
38d5d3b3 32#include <linux/btf.h>
47eff617 33#include <linux/filter.h>
f367540c 34#include <linux/limits.h>
438363c0 35#include <linux/perf_event.h>
9eea8faf 36#include <linux/bpf_perf_event.h>
a64af0ef 37#include <linux/ring_buffer.h>
fb84b822 38#include <sys/epoll.h>
63f2f5ee 39#include <sys/ioctl.h>
fb84b822 40#include <sys/mman.h>
f367540c
JS
41#include <sys/stat.h>
42#include <sys/types.h>
43#include <sys/vfs.h>
ddc7c304 44#include <sys/utsname.h>
dc3a2d25 45#include <sys/resource.h>
1a5e3fb1
WN
46#include <libelf.h>
47#include <gelf.h>
166750bc 48#include <zlib.h>
1b76c13e
WN
49
50#include "libbpf.h"
52d3352e 51#include "bpf.h"
8a138aed 52#include "btf.h"
6d41907c 53#include "str_error.h"
d7c4b398 54#include "libbpf_internal.h"
ddc7c304 55#include "hashmap.h"
67234743 56#include "bpf_gen_internal.h"
c44fd845 57#include "zip.h"
b3f59d66 58
f367540c
JS
59#ifndef BPF_FS_MAGIC
60#define BPF_FS_MAGIC 0xcafe4a11
61#endif
62
6b434b61
AN
63#define BPF_FS_DEFAULT_PATH "/sys/fs/bpf"
64
9c0f8cbd
AN
65#define BPF_INSN_SZ (sizeof(struct bpf_insn))
66
ff466b58
AI
67/* vsprintf() in __base_pr() uses nonliteral format string. It may break
68 * compilation if user enables corresponding warning. Disable it explicitly.
69 */
70#pragma GCC diagnostic ignored "-Wformat-nonliteral"
71
b3f59d66
WN
72#define __printf(a, b) __attribute__((format(printf, a, b)))
73
590a0088 74static struct bpf_map *bpf_object__add_map(struct bpf_object *obj);
aea28a60 75static bool prog_is_subprog(const struct bpf_object *obj, const struct bpf_program *prog);
f04deb90 76static int map_set_def_max_entries(struct bpf_map *map);
590a0088 77
ccde5760
DM
78static const char * const attach_type_name[] = {
79 [BPF_CGROUP_INET_INGRESS] = "cgroup_inet_ingress",
80 [BPF_CGROUP_INET_EGRESS] = "cgroup_inet_egress",
81 [BPF_CGROUP_INET_SOCK_CREATE] = "cgroup_inet_sock_create",
82 [BPF_CGROUP_INET_SOCK_RELEASE] = "cgroup_inet_sock_release",
83 [BPF_CGROUP_SOCK_OPS] = "cgroup_sock_ops",
84 [BPF_CGROUP_DEVICE] = "cgroup_device",
85 [BPF_CGROUP_INET4_BIND] = "cgroup_inet4_bind",
86 [BPF_CGROUP_INET6_BIND] = "cgroup_inet6_bind",
87 [BPF_CGROUP_INET4_CONNECT] = "cgroup_inet4_connect",
88 [BPF_CGROUP_INET6_CONNECT] = "cgroup_inet6_connect",
bf90438c 89 [BPF_CGROUP_UNIX_CONNECT] = "cgroup_unix_connect",
ccde5760
DM
90 [BPF_CGROUP_INET4_POST_BIND] = "cgroup_inet4_post_bind",
91 [BPF_CGROUP_INET6_POST_BIND] = "cgroup_inet6_post_bind",
92 [BPF_CGROUP_INET4_GETPEERNAME] = "cgroup_inet4_getpeername",
93 [BPF_CGROUP_INET6_GETPEERNAME] = "cgroup_inet6_getpeername",
bf90438c 94 [BPF_CGROUP_UNIX_GETPEERNAME] = "cgroup_unix_getpeername",
ccde5760
DM
95 [BPF_CGROUP_INET4_GETSOCKNAME] = "cgroup_inet4_getsockname",
96 [BPF_CGROUP_INET6_GETSOCKNAME] = "cgroup_inet6_getsockname",
bf90438c 97 [BPF_CGROUP_UNIX_GETSOCKNAME] = "cgroup_unix_getsockname",
ccde5760
DM
98 [BPF_CGROUP_UDP4_SENDMSG] = "cgroup_udp4_sendmsg",
99 [BPF_CGROUP_UDP6_SENDMSG] = "cgroup_udp6_sendmsg",
bf90438c 100 [BPF_CGROUP_UNIX_SENDMSG] = "cgroup_unix_sendmsg",
ccde5760
DM
101 [BPF_CGROUP_SYSCTL] = "cgroup_sysctl",
102 [BPF_CGROUP_UDP4_RECVMSG] = "cgroup_udp4_recvmsg",
103 [BPF_CGROUP_UDP6_RECVMSG] = "cgroup_udp6_recvmsg",
bf90438c 104 [BPF_CGROUP_UNIX_RECVMSG] = "cgroup_unix_recvmsg",
ccde5760
DM
105 [BPF_CGROUP_GETSOCKOPT] = "cgroup_getsockopt",
106 [BPF_CGROUP_SETSOCKOPT] = "cgroup_setsockopt",
107 [BPF_SK_SKB_STREAM_PARSER] = "sk_skb_stream_parser",
108 [BPF_SK_SKB_STREAM_VERDICT] = "sk_skb_stream_verdict",
109 [BPF_SK_SKB_VERDICT] = "sk_skb_verdict",
110 [BPF_SK_MSG_VERDICT] = "sk_msg_verdict",
111 [BPF_LIRC_MODE2] = "lirc_mode2",
112 [BPF_FLOW_DISSECTOR] = "flow_dissector",
113 [BPF_TRACE_RAW_TP] = "trace_raw_tp",
114 [BPF_TRACE_FENTRY] = "trace_fentry",
115 [BPF_TRACE_FEXIT] = "trace_fexit",
116 [BPF_MODIFY_RETURN] = "modify_return",
117 [BPF_LSM_MAC] = "lsm_mac",
bffcf348 118 [BPF_LSM_CGROUP] = "lsm_cgroup",
ccde5760
DM
119 [BPF_SK_LOOKUP] = "sk_lookup",
120 [BPF_TRACE_ITER] = "trace_iter",
121 [BPF_XDP_DEVMAP] = "xdp_devmap",
122 [BPF_XDP_CPUMAP] = "xdp_cpumap",
123 [BPF_XDP] = "xdp",
124 [BPF_SK_REUSEPORT_SELECT] = "sk_reuseport_select",
125 [BPF_SK_REUSEPORT_SELECT_OR_MIGRATE] = "sk_reuseport_select_or_migrate",
126 [BPF_PERF_EVENT] = "perf_event",
127 [BPF_TRACE_KPROBE_MULTI] = "trace_kprobe_multi",
8d1608d7 128 [BPF_STRUCT_OPS] = "struct_ops",
132328e8 129 [BPF_NETFILTER] = "netfilter",
fe20ce3a
DB
130 [BPF_TCX_INGRESS] = "tcx_ingress",
131 [BPF_TCX_EGRESS] = "tcx_egress",
8097e460 132 [BPF_TRACE_UPROBE_MULTI] = "trace_uprobe_multi",
05c31b4a
DB
133 [BPF_NETKIT_PRIMARY] = "netkit_primary",
134 [BPF_NETKIT_PEER] = "netkit_peer",
7b949654 135 [BPF_TRACE_KPROBE_SESSION] = "trace_kprobe_session",
ccde5760
DM
136};
137
ba5d1b58
DM
138static const char * const link_type_name[] = {
139 [BPF_LINK_TYPE_UNSPEC] = "unspec",
140 [BPF_LINK_TYPE_RAW_TRACEPOINT] = "raw_tracepoint",
141 [BPF_LINK_TYPE_TRACING] = "tracing",
142 [BPF_LINK_TYPE_CGROUP] = "cgroup",
143 [BPF_LINK_TYPE_ITER] = "iter",
144 [BPF_LINK_TYPE_NETNS] = "netns",
145 [BPF_LINK_TYPE_XDP] = "xdp",
146 [BPF_LINK_TYPE_PERF_EVENT] = "perf_event",
147 [BPF_LINK_TYPE_KPROBE_MULTI] = "kprobe_multi",
148 [BPF_LINK_TYPE_STRUCT_OPS] = "struct_ops",
d0fe92fb 149 [BPF_LINK_TYPE_NETFILTER] = "netfilter",
55cc3768 150 [BPF_LINK_TYPE_TCX] = "tcx",
8097e460 151 [BPF_LINK_TYPE_UPROBE_MULTI] = "uprobe_multi",
05c31b4a 152 [BPF_LINK_TYPE_NETKIT] = "netkit",
849989af 153 [BPF_LINK_TYPE_SOCKMAP] = "sockmap",
ba5d1b58
DM
154};
155
3e6dc020
DM
156static const char * const map_type_name[] = {
157 [BPF_MAP_TYPE_UNSPEC] = "unspec",
158 [BPF_MAP_TYPE_HASH] = "hash",
159 [BPF_MAP_TYPE_ARRAY] = "array",
160 [BPF_MAP_TYPE_PROG_ARRAY] = "prog_array",
161 [BPF_MAP_TYPE_PERF_EVENT_ARRAY] = "perf_event_array",
162 [BPF_MAP_TYPE_PERCPU_HASH] = "percpu_hash",
163 [BPF_MAP_TYPE_PERCPU_ARRAY] = "percpu_array",
164 [BPF_MAP_TYPE_STACK_TRACE] = "stack_trace",
165 [BPF_MAP_TYPE_CGROUP_ARRAY] = "cgroup_array",
166 [BPF_MAP_TYPE_LRU_HASH] = "lru_hash",
167 [BPF_MAP_TYPE_LRU_PERCPU_HASH] = "lru_percpu_hash",
168 [BPF_MAP_TYPE_LPM_TRIE] = "lpm_trie",
169 [BPF_MAP_TYPE_ARRAY_OF_MAPS] = "array_of_maps",
170 [BPF_MAP_TYPE_HASH_OF_MAPS] = "hash_of_maps",
171 [BPF_MAP_TYPE_DEVMAP] = "devmap",
172 [BPF_MAP_TYPE_DEVMAP_HASH] = "devmap_hash",
173 [BPF_MAP_TYPE_SOCKMAP] = "sockmap",
174 [BPF_MAP_TYPE_CPUMAP] = "cpumap",
175 [BPF_MAP_TYPE_XSKMAP] = "xskmap",
176 [BPF_MAP_TYPE_SOCKHASH] = "sockhash",
177 [BPF_MAP_TYPE_CGROUP_STORAGE] = "cgroup_storage",
178 [BPF_MAP_TYPE_REUSEPORT_SOCKARRAY] = "reuseport_sockarray",
179 [BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE] = "percpu_cgroup_storage",
180 [BPF_MAP_TYPE_QUEUE] = "queue",
181 [BPF_MAP_TYPE_STACK] = "stack",
182 [BPF_MAP_TYPE_SK_STORAGE] = "sk_storage",
183 [BPF_MAP_TYPE_STRUCT_OPS] = "struct_ops",
184 [BPF_MAP_TYPE_RINGBUF] = "ringbuf",
185 [BPF_MAP_TYPE_INODE_STORAGE] = "inode_storage",
186 [BPF_MAP_TYPE_TASK_STORAGE] = "task_storage",
187 [BPF_MAP_TYPE_BLOOM_FILTER] = "bloom_filter",
583c1f42 188 [BPF_MAP_TYPE_USER_RINGBUF] = "user_ringbuf",
4fe64af2 189 [BPF_MAP_TYPE_CGRP_STORAGE] = "cgrp_storage",
79ff13e9 190 [BPF_MAP_TYPE_ARENA] = "arena",
3e6dc020
DM
191};
192
d18616e7
DM
193static const char * const prog_type_name[] = {
194 [BPF_PROG_TYPE_UNSPEC] = "unspec",
195 [BPF_PROG_TYPE_SOCKET_FILTER] = "socket_filter",
196 [BPF_PROG_TYPE_KPROBE] = "kprobe",
197 [BPF_PROG_TYPE_SCHED_CLS] = "sched_cls",
198 [BPF_PROG_TYPE_SCHED_ACT] = "sched_act",
199 [BPF_PROG_TYPE_TRACEPOINT] = "tracepoint",
200 [BPF_PROG_TYPE_XDP] = "xdp",
201 [BPF_PROG_TYPE_PERF_EVENT] = "perf_event",
202 [BPF_PROG_TYPE_CGROUP_SKB] = "cgroup_skb",
203 [BPF_PROG_TYPE_CGROUP_SOCK] = "cgroup_sock",
204 [BPF_PROG_TYPE_LWT_IN] = "lwt_in",
205 [BPF_PROG_TYPE_LWT_OUT] = "lwt_out",
206 [BPF_PROG_TYPE_LWT_XMIT] = "lwt_xmit",
207 [BPF_PROG_TYPE_SOCK_OPS] = "sock_ops",
208 [BPF_PROG_TYPE_SK_SKB] = "sk_skb",
209 [BPF_PROG_TYPE_CGROUP_DEVICE] = "cgroup_device",
210 [BPF_PROG_TYPE_SK_MSG] = "sk_msg",
211 [BPF_PROG_TYPE_RAW_TRACEPOINT] = "raw_tracepoint",
212 [BPF_PROG_TYPE_CGROUP_SOCK_ADDR] = "cgroup_sock_addr",
213 [BPF_PROG_TYPE_LWT_SEG6LOCAL] = "lwt_seg6local",
214 [BPF_PROG_TYPE_LIRC_MODE2] = "lirc_mode2",
215 [BPF_PROG_TYPE_SK_REUSEPORT] = "sk_reuseport",
216 [BPF_PROG_TYPE_FLOW_DISSECTOR] = "flow_dissector",
217 [BPF_PROG_TYPE_CGROUP_SYSCTL] = "cgroup_sysctl",
218 [BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE] = "raw_tracepoint_writable",
219 [BPF_PROG_TYPE_CGROUP_SOCKOPT] = "cgroup_sockopt",
220 [BPF_PROG_TYPE_TRACING] = "tracing",
221 [BPF_PROG_TYPE_STRUCT_OPS] = "struct_ops",
222 [BPF_PROG_TYPE_EXT] = "ext",
223 [BPF_PROG_TYPE_LSM] = "lsm",
224 [BPF_PROG_TYPE_SK_LOOKUP] = "sk_lookup",
225 [BPF_PROG_TYPE_SYSCALL] = "syscall",
d0fe92fb 226 [BPF_PROG_TYPE_NETFILTER] = "netfilter",
d18616e7
DM
227};
228
a8a1f7d0
SF
229static int __base_pr(enum libbpf_print_level level, const char *format,
230 va_list args)
b3f59d66 231{
6f1ae8b6
YS
232 if (level == LIBBPF_DEBUG)
233 return 0;
234
a8a1f7d0 235 return vfprintf(stderr, format, args);
b3f59d66
WN
236}
237
a8a1f7d0 238static libbpf_print_fn_t __libbpf_pr = __base_pr;
b3f59d66 239
e87fd8ba 240libbpf_print_fn_t libbpf_set_print(libbpf_print_fn_t fn)
b3f59d66 241{
f1cb927c
JK
242 libbpf_print_fn_t old_print_fn;
243
244 old_print_fn = __atomic_exchange_n(&__libbpf_pr, fn, __ATOMIC_RELAXED);
e87fd8ba 245
e87fd8ba 246 return old_print_fn;
b3f59d66 247}
1a5e3fb1 248
8461ef8b
YS
249__printf(2, 3)
250void libbpf_print(enum libbpf_print_level level, const char *format, ...)
251{
252 va_list args;
d7c5802f 253 int old_errno;
f1cb927c 254 libbpf_print_fn_t print_fn;
8461ef8b 255
f1cb927c
JK
256 print_fn = __atomic_load_n(&__libbpf_pr, __ATOMIC_RELAXED);
257 if (!print_fn)
6f1ae8b6
YS
258 return;
259
d7c5802f
AN
260 old_errno = errno;
261
8461ef8b 262 va_start(args, format);
6f1ae8b6 263 __libbpf_pr(level, format, args);
8461ef8b 264 va_end(args);
d7c5802f
AN
265
266 errno = old_errno;
8461ef8b
YS
267}
268
dc3a2d25
THJ
269static void pr_perm_msg(int err)
270{
271 struct rlimit limit;
272 char buf[100];
273
274 if (err != -EPERM || geteuid() != 0)
275 return;
276
277 err = getrlimit(RLIMIT_MEMLOCK, &limit);
278 if (err)
279 return;
280
281 if (limit.rlim_cur == RLIM_INFINITY)
282 return;
283
284 if (limit.rlim_cur < 1024)
b5c7d0d0 285 snprintf(buf, sizeof(buf), "%zu bytes", (size_t)limit.rlim_cur);
dc3a2d25
THJ
286 else if (limit.rlim_cur < 1024*1024)
287 snprintf(buf, sizeof(buf), "%.1f KiB", (double)limit.rlim_cur / 1024);
288 else
289 snprintf(buf, sizeof(buf), "%.1f MiB", (double)limit.rlim_cur / (1024*1024));
290
291 pr_warn("permission error while running as root; try raising 'ulimit -l'? current value: %s\n",
292 buf);
293}
294
6371ca3b
WN
295#define STRERR_BUFSIZE 128
296
1a5e3fb1
WN
297/* Copied from tools/perf/util/util.h */
298#ifndef zfree
299# define zfree(ptr) ({ free(*ptr); *ptr = NULL; })
300#endif
301
302#ifndef zclose
303# define zclose(fd) ({ \
304 int ___err = 0; \
305 if ((fd) >= 0) \
306 ___err = close((fd)); \
307 fd = -1; \
308 ___err; })
309#endif
310
34be1646
SL
311static inline __u64 ptr_to_u64(const void *ptr)
312{
313 return (__u64) (unsigned long) ptr;
314}
315
5981881d
AN
316int libbpf_set_strict_mode(enum libbpf_strict_mode mode)
317{
bd054102 318 /* as of v1.0 libbpf_set_strict_mode() is a no-op */
5981881d
AN
319 return 0;
320}
321
7615209f
AN
322__u32 libbpf_major_version(void)
323{
324 return LIBBPF_MAJOR_VERSION;
325}
326
327__u32 libbpf_minor_version(void)
328{
329 return LIBBPF_MINOR_VERSION;
330}
331
332const char *libbpf_version_string(void)
333{
334#define __S(X) #X
335#define _S(X) __S(X)
336 return "v" _S(LIBBPF_MAJOR_VERSION) "." _S(LIBBPF_MINOR_VERSION);
337#undef _S
338#undef __S
339}
340
166750bc
AN
341enum reloc_type {
342 RELO_LD64,
343 RELO_CALL,
344 RELO_DATA,
a18f7214
AS
345 RELO_EXTERN_LD64,
346 RELO_EXTERN_CALL,
53eddb5e 347 RELO_SUBPROG_ADDR,
d0e92887 348 RELO_CORE,
166750bc
AN
349};
350
351struct reloc_desc {
352 enum reloc_type type;
353 int insn_idx;
d0e92887
AS
354 union {
355 const struct bpf_core_relo *core_relo; /* used when type == RELO_CORE */
356 struct {
357 int map_idx;
358 int sym_off;
3055ddd6 359 int ext_idx;
d0e92887
AS
360 };
361 };
166750bc
AN
362};
363
15ea31fa
AN
364/* stored as sec_def->cookie for all libbpf-supported SEC()s */
365enum sec_def_flags {
366 SEC_NONE = 0,
367 /* expected_attach_type is optional, if kernel doesn't support that */
368 SEC_EXP_ATTACH_OPT = 1,
369 /* legacy, only used by libbpf_get_type_names() and
370 * libbpf_attach_type_by_name(), not used by libbpf itself at all.
371 * This used to be associated with cgroup (and few other) BPF programs
372 * that were attachable through BPF_PROG_ATTACH command. Pretty
373 * meaningless nowadays, though.
374 */
375 SEC_ATTACHABLE = 2,
376 SEC_ATTACHABLE_OPT = SEC_ATTACHABLE | SEC_EXP_ATTACH_OPT,
377 /* attachment target is specified through BTF ID in either kernel or
e3ba8e4e
KM
378 * other BPF program's BTF object
379 */
15ea31fa
AN
380 SEC_ATTACH_BTF = 4,
381 /* BPF program type allows sleeping/blocking in kernel */
382 SEC_SLEEPABLE = 8,
082c4bfb 383 /* BPF program support non-linear XDP buffer */
450b167f 384 SEC_XDP_FRAGS = 16,
5902da6d
JO
385 /* Setup proper attach type for usdt probes. */
386 SEC_USDT = 32,
15ea31fa
AN
387};
388
25498a19 389struct bpf_sec_def {
697f104d 390 char *sec;
25498a19
AN
391 enum bpf_prog_type prog_type;
392 enum bpf_attach_type expected_attach_type;
15ea31fa 393 long cookie;
697f104d 394 int handler_id;
12d9466d 395
4fa5bcfe
AN
396 libbpf_prog_setup_fn_t prog_setup_fn;
397 libbpf_prog_prepare_load_fn_t prog_prepare_load_fn;
398 libbpf_prog_attach_fn_t prog_attach_fn;
25498a19
AN
399};
400
a5b8bd47
WN
401/*
402 * bpf_prog should be a better name but it has been used in
403 * linux/filter.h.
404 */
405struct bpf_program {
bd054102 406 char *name;
52109584 407 char *sec_name;
c1122392 408 size_t sec_idx;
bd054102 409 const struct bpf_sec_def *sec_def;
c1122392
AN
410 /* this program's instruction offset (in number of instructions)
411 * within its containing ELF section
412 */
413 size_t sec_insn_off;
414 /* number of original instructions in ELF section belonging to this
415 * program, not taking into account subprogram instructions possible
416 * appended later during relocation
417 */
418 size_t sec_insn_cnt;
419 /* Offset (in number of instructions) of the start of instruction
420 * belonging to this BPF program within its containing main BPF
421 * program. For the entry-point (main) BPF program, this is always
422 * zero. For a sub-program, this gets reset before each of main BPF
423 * programs are processed and relocated and is used to determined
424 * whether sub-program was already appended to the main program, and
425 * if yes, at which instruction offset.
426 */
427 size_t sub_insn_off;
428
c1122392
AN
429 /* instructions that belong to BPF program; insns[0] is located at
430 * sec_insn_off instruction within its ELF section in ELF file, so
431 * when mapping ELF file instruction index to the local instruction,
432 * one needs to subtract sec_insn_off; and vice versa.
433 */
a5b8bd47 434 struct bpf_insn *insns;
c1122392
AN
435 /* actual number of instruction in this BPF program's image; for
436 * entry-point BPF programs this includes the size of main program
437 * itself plus all the used sub-programs, appended at the end
438 */
c3c55696 439 size_t insns_cnt;
34090915 440
166750bc 441 struct reloc_desc *reloc_desc;
34090915 442 int nr_reloc;
b3ce9079
AN
443
444 /* BPF verifier log settings */
445 char *log_buf;
446 size_t log_size;
447 __u32 log_level;
55cffde2 448
aa9b1ac3 449 struct bpf_object *obj;
d7be143b 450
cf90a20d 451 int fd;
a3820c48 452 bool autoload;
43cb8cba 453 bool autoattach;
7e2925f6 454 bool sym_global;
aea28a60 455 bool mark_btf_static;
c1122392 456 enum bpf_prog_type type;
d7be143b 457 enum bpf_attach_type expected_attach_type;
7e2925f6 458 int exception_cb_idx;
cf90a20d 459
c1122392 460 int prog_ifindex;
91abb4a6 461 __u32 attach_btf_obj_fd;
12a8654b 462 __u32 attach_btf_id;
e7bf94db 463 __u32 attach_prog_fd;
cf90a20d 464
2993e051
YS
465 void *func_info;
466 __u32 func_info_rec_size;
f0187f0b 467 __u32 func_info_cnt;
47eff617 468
3d650141
MKL
469 void *line_info;
470 __u32 line_info_rec_size;
471 __u32 line_info_cnt;
04656198 472 __u32 prog_flags;
a5b8bd47
WN
473};
474
590a0088
MKL
475struct bpf_struct_ops {
476 const char *tname;
477 const struct btf_type *type;
478 struct bpf_program **progs;
479 __u32 *kern_func_off;
480 /* e.g. struct tcp_congestion_ops in bpf_prog's btf format */
481 void *data;
482 /* e.g. struct bpf_struct_ops_tcp_congestion_ops in
483 * btf_vmlinux's format.
484 * struct bpf_struct_ops_tcp_congestion_ops {
485 * [... some other kernel fields ...]
486 * struct tcp_congestion_ops data;
487 * }
488 * kern_vdata-size == sizeof(struct bpf_struct_ops_tcp_congestion_ops)
489 * bpf_map__init_kern_struct_ops() will populate the "kern_vdata"
490 * from "data".
491 */
492 void *kern_vdata;
493 __u32 type_id;
494};
495
ac9d1389
AN
496#define DATA_SEC ".data"
497#define BSS_SEC ".bss"
498#define RODATA_SEC ".rodata"
81bfdd08 499#define KCONFIG_SEC ".kconfig"
1c0c7074 500#define KSYMS_SEC ".ksyms"
590a0088 501#define STRUCT_OPS_SEC ".struct_ops"
809a69d6 502#define STRUCT_OPS_LINK_SEC ".struct_ops.link"
10ebe835 503#define ARENA_SEC ".addr_space.1"
ac9d1389 504
d859900c
DB
505enum libbpf_map_type {
506 LIBBPF_MAP_UNSPEC,
507 LIBBPF_MAP_DATA,
508 LIBBPF_MAP_BSS,
509 LIBBPF_MAP_RODATA,
81bfdd08 510 LIBBPF_MAP_KCONFIG,
d859900c
DB
511};
512
146bf811
AN
513struct bpf_map_def {
514 unsigned int type;
515 unsigned int key_size;
516 unsigned int value_size;
517 unsigned int max_entries;
518 unsigned int map_flags;
519};
520
9d759a9b 521struct bpf_map {
ec41817b 522 struct bpf_object *obj;
561bbcca 523 char *name;
aed65917
AN
524 /* real_name is defined for special internal maps (.rodata*,
525 * .data*, .bss, .kconfig) and preserves their original ELF section
e3ba8e4e 526 * name. This is important to be able to find corresponding BTF
aed65917
AN
527 * DATASEC information.
528 */
529 char *real_name;
01af3bf0 530 int fd;
db48814b
AN
531 int sec_idx;
532 size_t sec_offset;
f0307a7e 533 int map_ifindex;
addb9fc9 534 int inner_map_fd;
9d759a9b 535 struct bpf_map_def def;
1bdb6c9a 536 __u32 numa_node;
646f02ff 537 __u32 btf_var_idx;
9e926acd 538 int mod_btf_fd;
5b891af7
MKL
539 __u32 btf_key_type_id;
540 __u32 btf_value_type_id;
590a0088 541 __u32 btf_vmlinux_value_type_id;
d859900c 542 enum libbpf_map_type libbpf_type;
eba9c5f4 543 void *mmaped;
590a0088 544 struct bpf_struct_ops *st_ops;
646f02ff
AN
545 struct bpf_map *inner_map;
546 void **init_slots;
547 int init_slots_sz;
4580b25f
THJ
548 char *pin_path;
549 bool pinned;
ec6d5f47 550 bool reused;
ec41817b 551 bool autocreate;
47512102 552 __u64 map_extra;
d859900c
DB
553};
554
166750bc
AN
555enum extern_type {
556 EXT_UNKNOWN,
2e33efe3 557 EXT_KCFG,
1c0c7074 558 EXT_KSYM,
2e33efe3
AN
559};
560
561enum kcfg_type {
562 KCFG_UNKNOWN,
563 KCFG_CHAR,
564 KCFG_BOOL,
565 KCFG_INT,
566 KCFG_TRISTATE,
567 KCFG_CHAR_ARR,
166750bc
AN
568};
569
570struct extern_desc {
2e33efe3 571 enum extern_type type;
166750bc
AN
572 int sym_idx;
573 int btf_id;
2e33efe3
AN
574 int sec_btf_id;
575 const char *name;
5964a223 576 char *essent_name;
166750bc 577 bool is_set;
2e33efe3
AN
578 bool is_weak;
579 union {
580 struct {
581 enum kcfg_type type;
582 int sz;
583 int align;
584 int data_off;
585 bool is_signed;
586 } kcfg;
1c0c7074
AN
587 struct {
588 unsigned long long addr;
d370bbe1
HL
589
590 /* target btf_id of the corresponding kernel var. */
284d2587
AN
591 int kernel_btf_obj_fd;
592 int kernel_btf_id;
d370bbe1
HL
593
594 /* local btf_id of the ksym extern's type. */
595 __u32 type_id;
9dbe6015
KKD
596 /* BTF fd index to be patched in for insn->off, this is
597 * 0 for vmlinux BTF, index in obj->fd_array for module
598 * BTF
599 */
600 __s16 btf_fd_idx;
1c0c7074 601 } ksym;
2e33efe3 602 };
166750bc
AN
603};
604
4f33a53d
AN
605struct module_btf {
606 struct btf *btf;
607 char *name;
608 __u32 id;
91abb4a6 609 int fd;
9dbe6015 610 int fd_array_idx;
4f33a53d
AN
611};
612
25bbbd7a
AN
613enum sec_type {
614 SEC_UNUSED = 0,
615 SEC_RELO,
616 SEC_BSS,
617 SEC_DATA,
618 SEC_RODATA,
240bf8a5 619 SEC_ST_OPS,
25bbbd7a
AN
620};
621
622struct elf_sec_desc {
623 enum sec_type sec_type;
624 Elf64_Shdr *shdr;
625 Elf_Data *data;
626};
627
29a30ff5
AN
628struct elf_state {
629 int fd;
630 const void *obj_buf;
631 size_t obj_buf_sz;
632 Elf *elf;
ad23b723 633 Elf64_Ehdr *ehdr;
29a30ff5 634 Elf_Data *symbols;
2e7ba4f8 635 Elf_Data *arena_data;
29a30ff5
AN
636 size_t shstrndx; /* section index for section name strings */
637 size_t strtabidx;
25bbbd7a 638 struct elf_sec_desc *secs;
51deedc9 639 size_t sec_cnt;
29a30ff5
AN
640 int btf_maps_shndx;
641 __u32 btf_maps_sec_btf_id;
642 int text_shndx;
643 int symbols_shndx;
240bf8a5 644 bool has_st_ops;
2e7ba4f8 645 int arena_data_shndx;
4f33a53d
AN
646};
647
2e4913e0
AN
648struct usdt_manager;
649
1a5e3fb1 650struct bpf_object {
d859900c 651 char name[BPF_OBJ_NAME_LEN];
cb1e5e96 652 char license[64];
438363c0 653 __u32 kern_version;
0b3d1efa 654
a5b8bd47
WN
655 struct bpf_program *programs;
656 size_t nr_programs;
9d759a9b
WN
657 struct bpf_map *maps;
658 size_t nr_maps;
bf829271 659 size_t maps_cap;
9d759a9b 660
8601fd42 661 char *kconfig;
166750bc
AN
662 struct extern_desc *externs;
663 int nr_extern;
81bfdd08 664 int kconfig_map_idx;
166750bc 665
52d3352e 666 bool loaded;
c3c55696 667 bool has_subcalls;
25bbbd7a 668 bool has_rodata;
a5b8bd47 669
e2fa0156
AS
670 struct bpf_gen *gen_loader;
671
29a30ff5
AN
672 /* Information when doing ELF related work. Only valid if efile.elf is not NULL */
673 struct elf_state efile;
10931d24 674
8a138aed 675 struct btf *btf;
0f7515ca
AN
676 struct btf_ext *btf_ext;
677
a6ed02ca
KS
678 /* Parse and load BTF vmlinux if any of the programs in the object need
679 * it at load time.
680 */
681 struct btf *btf_vmlinux;
1373ff59
SC
682 /* Path to the custom BTF to be used for BPF CO-RE relocations as an
683 * override for vmlinux BTF.
684 */
685 char *btf_custom_path;
0f7515ca
AN
686 /* vmlinux BTF override for CO-RE relocations */
687 struct btf *btf_vmlinux_override;
4f33a53d
AN
688 /* Lazily initialized kernel module BTFs */
689 struct module_btf *btf_modules;
690 bool btf_modules_loaded;
691 size_t btf_module_cnt;
692 size_t btf_module_cap;
8a138aed 693
e0e3ea88
AN
694 /* optional log settings passed to BPF_BTF_LOAD and BPF_PROG_LOAD commands */
695 char *log_buf;
696 size_t log_size;
697 __u32 log_level;
698
9dbe6015
KKD
699 int *fd_array;
700 size_t fd_array_cap;
701 size_t fd_array_cnt;
702
2e4913e0
AN
703 struct usdt_manager *usdt_man;
704
2e7ba4f8
AN
705 struct bpf_map *arena_map;
706 void *arena_data;
707 size_t arena_data_sz;
708
6b434b61
AN
709 struct kern_feature_cache *feat_cache;
710 char *token_path;
711 int token_fd;
712
1a5e3fb1
WN
713 char path[];
714};
1a5e3fb1 715
88a82120
AN
716static const char *elf_sym_str(const struct bpf_object *obj, size_t off);
717static const char *elf_sec_str(const struct bpf_object *obj, size_t off);
718static Elf_Scn *elf_sec_by_idx(const struct bpf_object *obj, size_t idx);
719static Elf_Scn *elf_sec_by_name(const struct bpf_object *obj, const char *name);
ad23b723 720static Elf64_Shdr *elf_sec_hdr(const struct bpf_object *obj, Elf_Scn *scn);
88a82120
AN
721static const char *elf_sec_name(const struct bpf_object *obj, Elf_Scn *scn);
722static Elf_Data *elf_sec_data(const struct bpf_object *obj, Elf_Scn *scn);
ad23b723
AN
723static Elf64_Sym *elf_sym_by_idx(const struct bpf_object *obj, size_t idx);
724static Elf64_Rel *elf_rel_by_idx(Elf_Data *data, size_t idx);
88a82120 725
29cd77f4 726void bpf_program__unload(struct bpf_program *prog)
55cffde2
WN
727{
728 if (!prog)
729 return;
730
cf90a20d 731 zclose(prog->fd);
2993e051 732
2993e051 733 zfree(&prog->func_info);
07a09d1b 734 zfree(&prog->line_info);
55cffde2
WN
735}
736
a5b8bd47
WN
737static void bpf_program__exit(struct bpf_program *prog)
738{
739 if (!prog)
740 return;
741
55cffde2 742 bpf_program__unload(prog);
88cda1c9 743 zfree(&prog->name);
52109584 744 zfree(&prog->sec_name);
a5b8bd47 745 zfree(&prog->insns);
34090915
WN
746 zfree(&prog->reloc_desc);
747
748 prog->nr_reloc = 0;
a5b8bd47 749 prog->insns_cnt = 0;
c1122392 750 prog->sec_idx = -1;
a5b8bd47
WN
751}
752
c3c55696
AN
753static bool insn_is_subprog_call(const struct bpf_insn *insn)
754{
755 return BPF_CLASS(insn->code) == BPF_JMP &&
756 BPF_OP(insn->code) == BPF_CALL &&
757 BPF_SRC(insn->code) == BPF_K &&
758 insn->src_reg == BPF_PSEUDO_CALL &&
759 insn->dst_reg == 0 &&
760 insn->off == 0;
761}
762
aa0b8d43
MKL
763static bool is_call_insn(const struct bpf_insn *insn)
764{
765 return insn->code == (BPF_JMP | BPF_CALL);
766}
767
53eddb5e
YS
768static bool insn_is_pseudo_func(struct bpf_insn *insn)
769{
aa0b8d43 770 return is_ldimm64_insn(insn) && insn->src_reg == BPF_PSEUDO_FUNC;
53eddb5e
YS
771}
772
a5b8bd47 773static int
c3c55696
AN
774bpf_object__init_prog(struct bpf_object *obj, struct bpf_program *prog,
775 const char *name, size_t sec_idx, const char *sec_name,
776 size_t sec_off, void *insn_data, size_t insn_data_sz)
a5b8bd47 777{
c1122392
AN
778 if (insn_data_sz == 0 || insn_data_sz % BPF_INSN_SZ || sec_off % BPF_INSN_SZ) {
779 pr_warn("sec '%s': corrupted program '%s', offset %zu, size %zu\n",
780 sec_name, name, sec_off, insn_data_sz);
a5b8bd47
WN
781 return -EINVAL;
782 }
783
1ad9cbb8 784 memset(prog, 0, sizeof(*prog));
c3c55696
AN
785 prog->obj = obj;
786
c1122392
AN
787 prog->sec_idx = sec_idx;
788 prog->sec_insn_off = sec_off / BPF_INSN_SZ;
789 prog->sec_insn_cnt = insn_data_sz / BPF_INSN_SZ;
790 /* insns_cnt can later be increased by appending used subprograms */
791 prog->insns_cnt = prog->sec_insn_cnt;
a5b8bd47 792
c1122392 793 prog->type = BPF_PROG_TYPE_UNSPEC;
cf90a20d 794 prog->fd = -1;
7e2925f6 795 prog->exception_cb_idx = -1;
a3820c48
AN
796
797 /* libbpf's convention for SEC("?abc...") is that it's just like
798 * SEC("abc...") but the corresponding bpf_program starts out with
799 * autoload set to false.
800 */
801 if (sec_name[0] == '?') {
802 prog->autoload = false;
803 /* from now on forget there was ? in section name */
804 sec_name++;
805 } else {
806 prog->autoload = true;
807 }
a5b8bd47 808
43cb8cba
HL
809 prog->autoattach = true;
810
b3ce9079
AN
811 /* inherit object's log_level */
812 prog->log_level = obj->log_level;
813
52109584
AN
814 prog->sec_name = strdup(sec_name);
815 if (!prog->sec_name)
c1122392
AN
816 goto errout;
817
818 prog->name = strdup(name);
819 if (!prog->name)
a5b8bd47 820 goto errout;
a5b8bd47 821
c1122392
AN
822 prog->insns = malloc(insn_data_sz);
823 if (!prog->insns)
a5b8bd47 824 goto errout;
c1122392 825 memcpy(prog->insns, insn_data, insn_data_sz);
a5b8bd47
WN
826
827 return 0;
828errout:
c1122392 829 pr_warn("sec '%s': failed to allocate memory for prog '%s'\n", sec_name, name);
a5b8bd47
WN
830 bpf_program__exit(prog);
831 return -ENOMEM;
832}
833
834static int
c1122392
AN
835bpf_object__add_programs(struct bpf_object *obj, Elf_Data *sec_data,
836 const char *sec_name, int sec_idx)
a5b8bd47 837{
6245947c 838 Elf_Data *symbols = obj->efile.symbols;
c1122392
AN
839 struct bpf_program *prog, *progs;
840 void *data = sec_data->d_buf;
6245947c
AN
841 size_t sec_sz = sec_data->d_size, sec_off, prog_sz, nr_syms;
842 int nr_progs, err, i;
c1122392 843 const char *name;
ad23b723 844 Elf64_Sym *sym;
a5b8bd47
WN
845
846 progs = obj->programs;
847 nr_progs = obj->nr_programs;
ad23b723 848 nr_syms = symbols->d_size / sizeof(Elf64_Sym);
a5b8bd47 849
6245947c 850 for (i = 0; i < nr_syms; i++) {
ad23b723
AN
851 sym = elf_sym_by_idx(obj, i);
852
853 if (sym->st_shndx != sec_idx)
6245947c 854 continue;
ad23b723 855 if (ELF64_ST_TYPE(sym->st_info) != STT_FUNC)
6245947c 856 continue;
88cda1c9 857
ad23b723
AN
858 prog_sz = sym->st_size;
859 sec_off = sym->st_value;
88cda1c9 860
ad23b723 861 name = elf_sym_str(obj, sym->st_name);
c1122392
AN
862 if (!name) {
863 pr_warn("sec '%s': failed to get symbol name for offset %zu\n",
864 sec_name, sec_off);
865 return -LIBBPF_ERRNO__FORMAT;
866 }
88cda1c9 867
c1122392
AN
868 if (sec_off + prog_sz > sec_sz) {
869 pr_warn("sec '%s': program at offset %zu crosses section boundary\n",
870 sec_name, sec_off);
871 return -LIBBPF_ERRNO__FORMAT;
872 }
88cda1c9 873
ad23b723 874 if (sec_idx != obj->efile.text_shndx && ELF64_ST_BIND(sym->st_info) == STB_LOCAL) {
513f485c
AN
875 pr_warn("sec '%s': program '%s' is static and not supported\n", sec_name, name);
876 return -ENOTSUP;
877 }
878
c3c55696
AN
879 pr_debug("sec '%s': found program '%s' at insn offset %zu (%zu bytes), code size %zu insns (%zu bytes)\n",
880 sec_name, name, sec_off / BPF_INSN_SZ, sec_off, prog_sz / BPF_INSN_SZ, prog_sz);
88cda1c9 881
c3c55696 882 progs = libbpf_reallocarray(progs, nr_progs + 1, sizeof(*progs));
c1122392
AN
883 if (!progs) {
884 /*
885 * In this case the original obj->programs
886 * is still valid, so don't need special treat for
887 * bpf_close_object().
888 */
889 pr_warn("sec '%s': failed to alloc memory for new program '%s'\n",
890 sec_name, name);
891 return -ENOMEM;
88cda1c9 892 }
c1122392 893 obj->programs = progs;
88cda1c9 894
c1122392 895 prog = &progs[nr_progs];
9a94f277 896
c3c55696
AN
897 err = bpf_object__init_prog(obj, prog, name, sec_idx, sec_name,
898 sec_off, data + sec_off, prog_sz);
c1122392
AN
899 if (err)
900 return err;
9a94f277 901
7e2925f6
KKD
902 if (ELF64_ST_BIND(sym->st_info) != STB_LOCAL)
903 prog->sym_global = true;
904
e5670fa0
AN
905 /* if function is a global/weak symbol, but has restricted
906 * (STV_HIDDEN or STV_INTERNAL) visibility, mark its BTF FUNC
907 * as static to enable more permissive BPF verification mode
908 * with more outside context available to BPF verifier
aea28a60 909 */
7e2925f6
KKD
910 if (prog->sym_global && (ELF64_ST_VISIBILITY(sym->st_other) == STV_HIDDEN
911 || ELF64_ST_VISIBILITY(sym->st_other) == STV_INTERNAL))
aea28a60
AN
912 prog->mark_btf_static = true;
913
c1122392
AN
914 nr_progs++;
915 obj->nr_programs = nr_progs;
88cda1c9
MKL
916 }
917
918 return 0;
919}
920
590a0088
MKL
921static const struct btf_member *
922find_member_by_offset(const struct btf_type *t, __u32 bit_offset)
923{
924 struct btf_member *m;
925 int i;
926
927 for (i = 0, m = btf_members(t); i < btf_vlen(t); i++, m++) {
928 if (btf_member_bit_offset(t, i) == bit_offset)
929 return m;
930 }
931
932 return NULL;
933}
934
935static const struct btf_member *
936find_member_by_name(const struct btf *btf, const struct btf_type *t,
937 const char *name)
938{
939 struct btf_member *m;
940 int i;
941
942 for (i = 0, m = btf_members(t); i < btf_vlen(t); i++, m++) {
943 if (!strcmp(btf__name_by_offset(btf, m->name_off), name))
944 return m;
945 }
946
947 return NULL;
948}
949
9e926acd
KFL
950static int find_ksym_btf_id(struct bpf_object *obj, const char *ksym_name,
951 __u16 kind, struct btf **res_btf,
952 struct module_btf **res_mod_btf);
953
590a0088 954#define STRUCT_OPS_VALUE_PREFIX "bpf_struct_ops_"
a6ed02ca
KS
955static int find_btf_by_prefix_kind(const struct btf *btf, const char *prefix,
956 const char *name, __u32 kind);
590a0088
MKL
957
958static int
a2a5172c 959find_struct_ops_kern_types(struct bpf_object *obj, const char *tname_raw,
9e926acd 960 struct module_btf **mod_btf,
590a0088
MKL
961 const struct btf_type **type, __u32 *type_id,
962 const struct btf_type **vtype, __u32 *vtype_id,
963 const struct btf_member **data_member)
964{
965 const struct btf_type *kern_type, *kern_vtype;
966 const struct btf_member *kern_data_member;
9e926acd 967 struct btf *btf;
590a0088 968 __s32 kern_vtype_id, kern_type_id;
a2a5172c 969 char tname[256];
590a0088
MKL
970 __u32 i;
971
a2a5172c
EZ
972 snprintf(tname, sizeof(tname), "%.*s",
973 (int)bpf_core_essential_name_len(tname_raw), tname_raw);
974
9e926acd
KFL
975 kern_type_id = find_ksym_btf_id(obj, tname, BTF_KIND_STRUCT,
976 &btf, mod_btf);
590a0088
MKL
977 if (kern_type_id < 0) {
978 pr_warn("struct_ops init_kern: struct %s is not found in kernel BTF\n",
979 tname);
980 return kern_type_id;
981 }
982 kern_type = btf__type_by_id(btf, kern_type_id);
983
984 /* Find the corresponding "map_value" type that will be used
985 * in map_update(BPF_MAP_TYPE_STRUCT_OPS). For example,
986 * find "struct bpf_struct_ops_tcp_congestion_ops" from the
987 * btf_vmlinux.
988 */
a6ed02ca
KS
989 kern_vtype_id = find_btf_by_prefix_kind(btf, STRUCT_OPS_VALUE_PREFIX,
990 tname, BTF_KIND_STRUCT);
590a0088 991 if (kern_vtype_id < 0) {
a6ed02ca
KS
992 pr_warn("struct_ops init_kern: struct %s%s is not found in kernel BTF\n",
993 STRUCT_OPS_VALUE_PREFIX, tname);
590a0088
MKL
994 return kern_vtype_id;
995 }
996 kern_vtype = btf__type_by_id(btf, kern_vtype_id);
997
998 /* Find "struct tcp_congestion_ops" from
999 * struct bpf_struct_ops_tcp_congestion_ops {
1000 * [ ... ]
1001 * struct tcp_congestion_ops data;
1002 * }
1003 */
1004 kern_data_member = btf_members(kern_vtype);
1005 for (i = 0; i < btf_vlen(kern_vtype); i++, kern_data_member++) {
1006 if (kern_data_member->type == kern_type_id)
1007 break;
1008 }
1009 if (i == btf_vlen(kern_vtype)) {
a6ed02ca
KS
1010 pr_warn("struct_ops init_kern: struct %s data is not found in struct %s%s\n",
1011 tname, STRUCT_OPS_VALUE_PREFIX, tname);
590a0088
MKL
1012 return -EINVAL;
1013 }
1014
1015 *type = kern_type;
1016 *type_id = kern_type_id;
1017 *vtype = kern_vtype;
1018 *vtype_id = kern_vtype_id;
1019 *data_member = kern_data_member;
1020
1021 return 0;
1022}
1023
1024static bool bpf_map__is_struct_ops(const struct bpf_map *map)
1025{
1026 return map->def.type == BPF_MAP_TYPE_STRUCT_OPS;
1027}
1028
69e4a9d2
KFL
1029static bool is_valid_st_ops_program(struct bpf_object *obj,
1030 const struct bpf_program *prog)
1031{
1032 int i;
1033
1034 for (i = 0; i < obj->nr_programs; i++) {
1035 if (&obj->programs[i] == prog)
1036 return prog->type == BPF_PROG_TYPE_STRUCT_OPS;
1037 }
1038
1039 return false;
1040}
1041
fe9d049c
EZ
1042/* For each struct_ops program P, referenced from some struct_ops map M,
1043 * enable P.autoload if there are Ms for which M.autocreate is true,
1044 * disable P.autoload if for all Ms M.autocreate is false.
1045 * Don't change P.autoload for programs that are not referenced from any maps.
1046 */
1047static int bpf_object_adjust_struct_ops_autoload(struct bpf_object *obj)
1048{
1049 struct bpf_program *prog, *slot_prog;
1050 struct bpf_map *map;
1051 int i, j, k, vlen;
1052
1053 for (i = 0; i < obj->nr_programs; ++i) {
1054 int should_load = false;
1055 int use_cnt = 0;
1056
1057 prog = &obj->programs[i];
1058 if (prog->type != BPF_PROG_TYPE_STRUCT_OPS)
1059 continue;
1060
1061 for (j = 0; j < obj->nr_maps; ++j) {
1062 map = &obj->maps[j];
1063 if (!bpf_map__is_struct_ops(map))
1064 continue;
1065
1066 vlen = btf_vlen(map->st_ops->type);
1067 for (k = 0; k < vlen; ++k) {
1068 slot_prog = map->st_ops->progs[k];
1069 if (prog != slot_prog)
1070 continue;
1071
1072 use_cnt++;
1073 if (map->autocreate)
1074 should_load = true;
1075 }
1076 }
1077 if (use_cnt)
1078 prog->autoload = should_load;
1079 }
1080
1081 return 0;
1082}
1083
590a0088 1084/* Init the map's fields that depend on kern_btf */
9e926acd 1085static int bpf_map__init_kern_struct_ops(struct bpf_map *map)
590a0088
MKL
1086{
1087 const struct btf_member *member, *kern_member, *kern_data_member;
1088 const struct btf_type *type, *kern_type, *kern_vtype;
1089 __u32 i, kern_type_id, kern_vtype_id, kern_data_off;
9e926acd
KFL
1090 struct bpf_object *obj = map->obj;
1091 const struct btf *btf = obj->btf;
590a0088 1092 struct bpf_struct_ops *st_ops;
9e926acd
KFL
1093 const struct btf *kern_btf;
1094 struct module_btf *mod_btf;
590a0088
MKL
1095 void *data, *kern_data;
1096 const char *tname;
1097 int err;
1098
1099 st_ops = map->st_ops;
1100 type = st_ops->type;
1101 tname = st_ops->tname;
9e926acd 1102 err = find_struct_ops_kern_types(obj, tname, &mod_btf,
590a0088
MKL
1103 &kern_type, &kern_type_id,
1104 &kern_vtype, &kern_vtype_id,
1105 &kern_data_member);
1106 if (err)
1107 return err;
1108
9e926acd
KFL
1109 kern_btf = mod_btf ? mod_btf->btf : obj->btf_vmlinux;
1110
590a0088
MKL
1111 pr_debug("struct_ops init_kern %s: type_id:%u kern_type_id:%u kern_vtype_id:%u\n",
1112 map->name, st_ops->type_id, kern_type_id, kern_vtype_id);
1113
9e926acd 1114 map->mod_btf_fd = mod_btf ? mod_btf->fd : -1;
590a0088
MKL
1115 map->def.value_size = kern_vtype->size;
1116 map->btf_vmlinux_value_type_id = kern_vtype_id;
1117
1118 st_ops->kern_vdata = calloc(1, kern_vtype->size);
1119 if (!st_ops->kern_vdata)
1120 return -ENOMEM;
1121
1122 data = st_ops->data;
1123 kern_data_off = kern_data_member->offset / 8;
1124 kern_data = st_ops->kern_vdata + kern_data_off;
1125
1126 member = btf_members(type);
1127 for (i = 0; i < btf_vlen(type); i++, member++) {
1128 const struct btf_type *mtype, *kern_mtype;
1129 __u32 mtype_id, kern_mtype_id;
1130 void *mdata, *kern_mdata;
0737df6d 1131 struct bpf_program *prog;
590a0088
MKL
1132 __s64 msize, kern_msize;
1133 __u32 moff, kern_moff;
1134 __u32 kern_member_idx;
1135 const char *mname;
1136
1137 mname = btf__name_by_offset(btf, member->name_off);
c911fc61
KFL
1138 moff = member->offset / 8;
1139 mdata = data + moff;
1140 msize = btf__resolve_size(btf, member->type);
1141 if (msize < 0) {
1142 pr_warn("struct_ops init_kern %s: failed to resolve the size of member %s\n",
1143 map->name, mname);
1144 return msize;
1145 }
1146
590a0088
MKL
1147 kern_member = find_member_by_name(kern_btf, kern_type, mname);
1148 if (!kern_member) {
0737df6d
AN
1149 if (!libbpf_is_mem_zeroed(mdata, msize)) {
1150 pr_warn("struct_ops init_kern %s: Cannot find member %s in kernel BTF\n",
c911fc61 1151 map->name, mname);
0737df6d
AN
1152 return -ENOTSUP;
1153 }
1154
1155 prog = st_ops->progs[i];
1156 if (prog) {
1157 /* If we had declaratively set struct_ops callback, we need to
1158 * first validate that it's actually a struct_ops program.
1159 * And then force its autoload to false, because it doesn't have
1160 * a chance of succeeding from POV of the current struct_ops map.
1161 * If this program is still referenced somewhere else, though,
1162 * then bpf_object_adjust_struct_ops_autoload() will update its
1163 * autoload accordingly.
1164 */
1165 if (!is_valid_st_ops_program(obj, prog)) {
1166 pr_warn("struct_ops init_kern %s: member %s is declaratively assigned a non-struct_ops program\n",
1167 map->name, mname);
1168 return -EINVAL;
1169 }
1170 prog->autoload = false;
1171 st_ops->progs[i] = NULL;
c911fc61
KFL
1172 }
1173
0737df6d
AN
1174 /* Skip all-zero/NULL fields if they are not present in the kernel BTF */
1175 pr_info("struct_ops %s: member %s not found in kernel, skipping it as it's set to zero\n",
590a0088 1176 map->name, mname);
0737df6d 1177 continue;
590a0088
MKL
1178 }
1179
1180 kern_member_idx = kern_member - btf_members(kern_type);
1181 if (btf_member_bitfield_size(type, i) ||
1182 btf_member_bitfield_size(kern_type, kern_member_idx)) {
1183 pr_warn("struct_ops init_kern %s: bitfield %s is not supported\n",
1184 map->name, mname);
1185 return -ENOTSUP;
1186 }
1187
590a0088 1188 kern_moff = kern_member->offset / 8;
590a0088
MKL
1189 kern_mdata = kern_data + kern_moff;
1190
1191 mtype = skip_mods_and_typedefs(btf, member->type, &mtype_id);
1192 kern_mtype = skip_mods_and_typedefs(kern_btf, kern_member->type,
1193 &kern_mtype_id);
1194 if (BTF_INFO_KIND(mtype->info) !=
1195 BTF_INFO_KIND(kern_mtype->info)) {
1196 pr_warn("struct_ops init_kern %s: Unmatched member type %s %u != %u(kernel)\n",
1197 map->name, mname, BTF_INFO_KIND(mtype->info),
1198 BTF_INFO_KIND(kern_mtype->info));
1199 return -ENOTSUP;
1200 }
1201
1202 if (btf_is_ptr(mtype)) {
69e4a9d2
KFL
1203 /* Update the value from the shadow type */
1204 prog = *(void **)mdata;
1205 st_ops->progs[i] = prog;
d2836ddd
MKL
1206 if (!prog)
1207 continue;
69e4a9d2
KFL
1208 if (!is_valid_st_ops_program(obj, prog)) {
1209 pr_warn("struct_ops init_kern %s: member %s is not a struct_ops program\n",
1210 map->name, mname);
1211 return -ENOTSUP;
1212 }
d2836ddd 1213
590a0088
MKL
1214 kern_mtype = skip_mods_and_typedefs(kern_btf,
1215 kern_mtype->type,
1216 &kern_mtype_id);
d2836ddd
MKL
1217
1218 /* mtype->type must be a func_proto which was
1219 * guaranteed in bpf_object__collect_st_ops_relos(),
1220 * so only check kern_mtype for func_proto here.
1221 */
1222 if (!btf_is_func_proto(kern_mtype)) {
1223 pr_warn("struct_ops init_kern %s: kernel member %s is not a func ptr\n",
590a0088
MKL
1224 map->name, mname);
1225 return -ENOTSUP;
1226 }
1227
9e926acd
KFL
1228 if (mod_btf)
1229 prog->attach_btf_obj_fd = mod_btf->fd;
d9ab2f76
EZ
1230
1231 /* if we haven't yet processed this BPF program, record proper
1232 * attach_btf_id and member_idx
1233 */
1234 if (!prog->attach_btf_id) {
1235 prog->attach_btf_id = kern_type_id;
1236 prog->expected_attach_type = kern_member_idx;
1237 }
1238
1239 /* struct_ops BPF prog can be re-used between multiple
1240 * .struct_ops & .struct_ops.link as long as it's the
1241 * same struct_ops struct definition and the same
1242 * function pointer field
1243 */
1244 if (prog->attach_btf_id != kern_type_id) {
1245 pr_warn("struct_ops init_kern %s func ptr %s: invalid reuse of prog %s in sec %s with type %u: attach_btf_id %u != kern_type_id %u\n",
1246 map->name, mname, prog->name, prog->sec_name, prog->type,
1247 prog->attach_btf_id, kern_type_id);
1248 return -EINVAL;
1249 }
1250 if (prog->expected_attach_type != kern_member_idx) {
1251 pr_warn("struct_ops init_kern %s func ptr %s: invalid reuse of prog %s in sec %s with type %u: expected_attach_type %u != kern_member_idx %u\n",
1252 map->name, mname, prog->name, prog->sec_name, prog->type,
1253 prog->expected_attach_type, kern_member_idx);
1254 return -EINVAL;
1255 }
590a0088
MKL
1256
1257 st_ops->kern_func_off[i] = kern_data_off + kern_moff;
1258
1259 pr_debug("struct_ops init_kern %s: func ptr %s is set to prog %s from data(+%u) to kern_data(+%u)\n",
1260 map->name, mname, prog->name, moff,
1261 kern_moff);
1262
1263 continue;
1264 }
1265
590a0088 1266 kern_msize = btf__resolve_size(kern_btf, kern_mtype_id);
c911fc61 1267 if (kern_msize < 0 || msize != kern_msize) {
590a0088
MKL
1268 pr_warn("struct_ops init_kern %s: Error in size of member %s: %zd != %zd(kernel)\n",
1269 map->name, mname, (ssize_t)msize,
1270 (ssize_t)kern_msize);
1271 return -ENOTSUP;
1272 }
1273
1274 pr_debug("struct_ops init_kern %s: copy %s %u bytes from data(+%u) to kern_data(+%u)\n",
1275 map->name, mname, (unsigned int)msize,
1276 moff, kern_moff);
1277 memcpy(kern_mdata, mdata, msize);
1278 }
1279
1280 return 0;
1281}
1282
1283static int bpf_object__init_kern_struct_ops_maps(struct bpf_object *obj)
1284{
590a0088
MKL
1285 struct bpf_map *map;
1286 size_t i;
1287 int err;
1288
1289 for (i = 0; i < obj->nr_maps; i++) {
1290 map = &obj->maps[i];
1291
1292 if (!bpf_map__is_struct_ops(map))
1293 continue;
1294
8db05261
EZ
1295 if (!map->autocreate)
1296 continue;
1297
9e926acd 1298 err = bpf_map__init_kern_struct_ops(map);
a6ed02ca 1299 if (err)
590a0088 1300 return err;
590a0088
MKL
1301 }
1302
590a0088
MKL
1303 return 0;
1304}
1305
809a69d6 1306static int init_struct_ops_maps(struct bpf_object *obj, const char *sec_name,
240bf8a5 1307 int shndx, Elf_Data *data)
590a0088
MKL
1308{
1309 const struct btf_type *type, *datasec;
1310 const struct btf_var_secinfo *vsi;
1311 struct bpf_struct_ops *st_ops;
1312 const char *tname, *var_name;
1313 __s32 type_id, datasec_id;
1314 const struct btf *btf;
1315 struct bpf_map *map;
1316 __u32 i;
1317
809a69d6 1318 if (shndx == -1)
590a0088
MKL
1319 return 0;
1320
1321 btf = obj->btf;
809a69d6 1322 datasec_id = btf__find_by_name_kind(btf, sec_name,
590a0088
MKL
1323 BTF_KIND_DATASEC);
1324 if (datasec_id < 0) {
1325 pr_warn("struct_ops init: DATASEC %s not found\n",
809a69d6 1326 sec_name);
590a0088
MKL
1327 return -EINVAL;
1328 }
1329
1330 datasec = btf__type_by_id(btf, datasec_id);
1331 vsi = btf_var_secinfos(datasec);
1332 for (i = 0; i < btf_vlen(datasec); i++, vsi++) {
1333 type = btf__type_by_id(obj->btf, vsi->type);
1334 var_name = btf__name_by_offset(obj->btf, type->name_off);
1335
1336 type_id = btf__resolve_type(obj->btf, vsi->type);
1337 if (type_id < 0) {
1338 pr_warn("struct_ops init: Cannot resolve var type_id %u in DATASEC %s\n",
809a69d6 1339 vsi->type, sec_name);
590a0088
MKL
1340 return -EINVAL;
1341 }
1342
1343 type = btf__type_by_id(obj->btf, type_id);
1344 tname = btf__name_by_offset(obj->btf, type->name_off);
1345 if (!tname[0]) {
1346 pr_warn("struct_ops init: anonymous type is not supported\n");
1347 return -ENOTSUP;
1348 }
1349 if (!btf_is_struct(type)) {
1350 pr_warn("struct_ops init: %s is not a struct\n", tname);
1351 return -EINVAL;
1352 }
1353
1354 map = bpf_object__add_map(obj);
1355 if (IS_ERR(map))
1356 return PTR_ERR(map);
1357
809a69d6 1358 map->sec_idx = shndx;
590a0088
MKL
1359 map->sec_offset = vsi->offset;
1360 map->name = strdup(var_name);
1361 if (!map->name)
1362 return -ENOMEM;
3644d285 1363 map->btf_value_type_id = type_id;
590a0088 1364
5ad0ecbe
EZ
1365 /* Follow same convention as for programs autoload:
1366 * SEC("?.struct_ops") means map is not created by default.
1367 */
1368 if (sec_name[0] == '?') {
1369 map->autocreate = false;
1370 /* from now on forget there was ? in section name */
1371 sec_name++;
1372 }
1373
590a0088
MKL
1374 map->def.type = BPF_MAP_TYPE_STRUCT_OPS;
1375 map->def.key_size = sizeof(int);
1376 map->def.value_size = type->size;
1377 map->def.max_entries = 1;
240bf8a5 1378 map->def.map_flags = strcmp(sec_name, STRUCT_OPS_LINK_SEC) == 0 ? BPF_F_LINK : 0;
590a0088
MKL
1379
1380 map->st_ops = calloc(1, sizeof(*map->st_ops));
1381 if (!map->st_ops)
1382 return -ENOMEM;
1383 st_ops = map->st_ops;
1384 st_ops->data = malloc(type->size);
1385 st_ops->progs = calloc(btf_vlen(type), sizeof(*st_ops->progs));
1386 st_ops->kern_func_off = malloc(btf_vlen(type) *
1387 sizeof(*st_ops->kern_func_off));
1388 if (!st_ops->data || !st_ops->progs || !st_ops->kern_func_off)
1389 return -ENOMEM;
1390
809a69d6 1391 if (vsi->offset + type->size > data->d_size) {
590a0088 1392 pr_warn("struct_ops init: var %s is beyond the end of DATASEC %s\n",
809a69d6 1393 var_name, sec_name);
590a0088
MKL
1394 return -EINVAL;
1395 }
1396
1397 memcpy(st_ops->data,
809a69d6 1398 data->d_buf + vsi->offset,
590a0088
MKL
1399 type->size);
1400 st_ops->tname = tname;
1401 st_ops->type = type;
1402 st_ops->type_id = type_id;
1403
1404 pr_debug("struct_ops init: struct %s(type_id=%u) %s found at offset %u\n",
1405 tname, type_id, var_name, vsi->offset);
1406 }
1407
1408 return 0;
1409}
1410
809a69d6
KFL
1411static int bpf_object_init_struct_ops(struct bpf_object *obj)
1412{
240bf8a5
EZ
1413 const char *sec_name;
1414 int sec_idx, err;
809a69d6 1415
240bf8a5
EZ
1416 for (sec_idx = 0; sec_idx < obj->efile.sec_cnt; ++sec_idx) {
1417 struct elf_sec_desc *desc = &obj->efile.secs[sec_idx];
1418
1419 if (desc->sec_type != SEC_ST_OPS)
1420 continue;
1421
1422 sec_name = elf_sec_name(obj, elf_sec_by_idx(obj, sec_idx));
1423 if (!sec_name)
1424 return -LIBBPF_ERRNO__FORMAT;
1425
1426 err = init_struct_ops_maps(obj, sec_name, sec_idx, desc->data);
1427 if (err)
1428 return err;
1429 }
1430
1431 return 0;
809a69d6
KFL
1432}
1433
6c956392 1434static struct bpf_object *bpf_object__new(const char *path,
5e61f270 1435 const void *obj_buf,
2ce8450e
AN
1436 size_t obj_buf_sz,
1437 const char *obj_name)
1a5e3fb1
WN
1438{
1439 struct bpf_object *obj;
d859900c 1440 char *end;
1a5e3fb1
WN
1441
1442 obj = calloc(1, sizeof(struct bpf_object) + strlen(path) + 1);
1443 if (!obj) {
be18010e 1444 pr_warn("alloc memory failed for %s\n", path);
6371ca3b 1445 return ERR_PTR(-ENOMEM);
1a5e3fb1
WN
1446 }
1447
1448 strcpy(obj->path, path);
2ce8450e 1449 if (obj_name) {
9fc205b4 1450 libbpf_strlcpy(obj->name, obj_name, sizeof(obj->name));
2ce8450e
AN
1451 } else {
1452 /* Using basename() GNU version which doesn't modify arg. */
9fc205b4 1453 libbpf_strlcpy(obj->name, basename((void *)path), sizeof(obj->name));
2ce8450e
AN
1454 end = strchr(obj->name, '.');
1455 if (end)
1456 *end = 0;
1457 }
6c956392 1458
d859900c 1459 obj->efile.fd = -1;
6c956392 1460 /*
76e1022b 1461 * Caller of this function should also call
6c956392
WN
1462 * bpf_object__elf_finish() after data collection to return
1463 * obj_buf to user. If not, we should duplicate the buffer to
1464 * avoid user freeing them before elf finish.
1465 */
1466 obj->efile.obj_buf = obj_buf;
1467 obj->efile.obj_buf_sz = obj_buf_sz;
abd29c93 1468 obj->efile.btf_maps_shndx = -1;
81bfdd08 1469 obj->kconfig_map_idx = -1;
6c956392 1470
5e61f270 1471 obj->kern_version = get_kernel_version();
52d3352e 1472 obj->loaded = false;
9a208eff 1473
1a5e3fb1
WN
1474 return obj;
1475}
1476
1477static void bpf_object__elf_finish(struct bpf_object *obj)
1478{
29a30ff5 1479 if (!obj->efile.elf)
1a5e3fb1
WN
1480 return;
1481
b71a2ebf
GC
1482 elf_end(obj->efile.elf);
1483 obj->efile.elf = NULL;
bec7d68c 1484 obj->efile.symbols = NULL;
2e7ba4f8 1485 obj->efile.arena_data = NULL;
b62f06e8 1486
25bbbd7a
AN
1487 zfree(&obj->efile.secs);
1488 obj->efile.sec_cnt = 0;
1a5e3fb1 1489 zclose(obj->efile.fd);
6c956392
WN
1490 obj->efile.obj_buf = NULL;
1491 obj->efile.obj_buf_sz = 0;
1a5e3fb1
WN
1492}
1493
1494static int bpf_object__elf_init(struct bpf_object *obj)
1495{
ad23b723 1496 Elf64_Ehdr *ehdr;
1a5e3fb1 1497 int err = 0;
ad23b723 1498 Elf *elf;
1a5e3fb1 1499
29a30ff5 1500 if (obj->efile.elf) {
88a82120 1501 pr_warn("elf: init internal error\n");
6371ca3b 1502 return -LIBBPF_ERRNO__LIBELF;
1a5e3fb1
WN
1503 }
1504
6c956392 1505 if (obj->efile.obj_buf_sz > 0) {
146bf811 1506 /* obj_buf should have been validated by bpf_object__open_mem(). */
ad23b723 1507 elf = elf_memory((char *)obj->efile.obj_buf, obj->efile.obj_buf_sz);
6c956392 1508 } else {
92274e24 1509 obj->efile.fd = open(obj->path, O_RDONLY | O_CLOEXEC);
6c956392 1510 if (obj->efile.fd < 0) {
be5c5d4e 1511 char errmsg[STRERR_BUFSIZE], *cp;
1ce6a9fc 1512
be5c5d4e
AN
1513 err = -errno;
1514 cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
88a82120 1515 pr_warn("elf: failed to open %s: %s\n", obj->path, cp);
be5c5d4e 1516 return err;
6c956392
WN
1517 }
1518
ad23b723 1519 elf = elf_begin(obj->efile.fd, ELF_C_READ_MMAP, NULL);
1a5e3fb1
WN
1520 }
1521
ad23b723 1522 if (!elf) {
88a82120 1523 pr_warn("elf: failed to open %s as ELF file: %s\n", obj->path, elf_errmsg(-1));
6371ca3b 1524 err = -LIBBPF_ERRNO__LIBELF;
1a5e3fb1
WN
1525 goto errout;
1526 }
1527
ad23b723
AN
1528 obj->efile.elf = elf;
1529
1530 if (elf_kind(elf) != ELF_K_ELF) {
1531 err = -LIBBPF_ERRNO__FORMAT;
1532 pr_warn("elf: '%s' is not a proper ELF object\n", obj->path);
1533 goto errout;
1534 }
1535
1536 if (gelf_getclass(elf) != ELFCLASS64) {
1537 err = -LIBBPF_ERRNO__FORMAT;
1538 pr_warn("elf: '%s' is not a 64-bit ELF object\n", obj->path);
1539 goto errout;
1540 }
1541
1542 obj->efile.ehdr = ehdr = elf64_getehdr(elf);
1543 if (!obj->efile.ehdr) {
88a82120 1544 pr_warn("elf: failed to get ELF header from %s: %s\n", obj->path, elf_errmsg(-1));
6371ca3b 1545 err = -LIBBPF_ERRNO__FORMAT;
1a5e3fb1
WN
1546 goto errout;
1547 }
1a5e3fb1 1548
ad23b723 1549 if (elf_getshdrstrndx(elf, &obj->efile.shstrndx)) {
88a82120
AN
1550 pr_warn("elf: failed to get section names section index for %s: %s\n",
1551 obj->path, elf_errmsg(-1));
1552 err = -LIBBPF_ERRNO__FORMAT;
1553 goto errout;
1554 }
1555
70e79866 1556 /* ELF is corrupted/truncated, avoid calling elf_strptr. */
ad23b723 1557 if (!elf_rawdata(elf_getscn(elf, obj->efile.shstrndx), NULL)) {
88a82120
AN
1558 pr_warn("elf: failed to get section names strings from %s: %s\n",
1559 obj->path, elf_errmsg(-1));
8f3f5792
NK
1560 err = -LIBBPF_ERRNO__FORMAT;
1561 goto errout;
88a82120
AN
1562 }
1563
9b16137a 1564 /* Old LLVM set e_machine to EM_NONE */
ad23b723 1565 if (ehdr->e_type != ET_REL || (ehdr->e_machine && ehdr->e_machine != EM_BPF)) {
88a82120 1566 pr_warn("elf: %s is not a valid eBPF object file\n", obj->path);
6371ca3b 1567 err = -LIBBPF_ERRNO__FORMAT;
1a5e3fb1
WN
1568 goto errout;
1569 }
1570
1571 return 0;
1572errout:
1573 bpf_object__elf_finish(obj);
1574 return err;
1575}
1576
12ef5634 1577static int bpf_object__check_endianness(struct bpf_object *obj)
cc4228d5 1578{
3930198d 1579#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
ad23b723 1580 if (obj->efile.ehdr->e_ident[EI_DATA] == ELFDATA2LSB)
12ef5634 1581 return 0;
3930198d 1582#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
ad23b723 1583 if (obj->efile.ehdr->e_ident[EI_DATA] == ELFDATA2MSB)
12ef5634
AN
1584 return 0;
1585#else
1586# error "Unrecognized __BYTE_ORDER__"
1587#endif
88a82120 1588 pr_warn("elf: endianness mismatch in %s.\n", obj->path);
6371ca3b 1589 return -LIBBPF_ERRNO__ENDIAN;
cc4228d5
WN
1590}
1591
cb1e5e96 1592static int
399dc65e 1593bpf_object__init_license(struct bpf_object *obj, void *data, size_t size)
cb1e5e96 1594{
35a85550
SHY
1595 if (!data) {
1596 pr_warn("invalid license section in %s\n", obj->path);
1597 return -LIBBPF_ERRNO__FORMAT;
1598 }
f9798239
AN
1599 /* libbpf_strlcpy() only copies first N - 1 bytes, so size + 1 won't
1600 * go over allowed ELF data section buffer
1601 */
1602 libbpf_strlcpy(obj->license, data, min(size + 1, sizeof(obj->license)));
cb1e5e96
WN
1603 pr_debug("license of %s is %s\n", obj->path, obj->license);
1604 return 0;
1605}
1606
54b8625c
JF
1607static int
1608bpf_object__init_kversion(struct bpf_object *obj, void *data, size_t size)
1609{
1610 __u32 kver;
1611
35a85550 1612 if (!data || size != sizeof(kver)) {
be18010e 1613 pr_warn("invalid kver section in %s\n", obj->path);
54b8625c
JF
1614 return -LIBBPF_ERRNO__FORMAT;
1615 }
1616 memcpy(&kver, data, sizeof(kver));
1617 obj->kern_version = kver;
1618 pr_debug("kernel version of %s is %x\n", obj->path, obj->kern_version);
1619 return 0;
1620}
1621
addb9fc9
NS
1622static bool bpf_map_type__is_map_in_map(enum bpf_map_type type)
1623{
1624 if (type == BPF_MAP_TYPE_ARRAY_OF_MAPS ||
1625 type == BPF_MAP_TYPE_HASH_OF_MAPS)
1626 return true;
1627 return false;
1628}
1629
b96c07f3 1630static int find_elf_sec_sz(const struct bpf_object *obj, const char *name, __u32 *size)
1713d68b 1631{
25bbbd7a
AN
1632 Elf_Data *data;
1633 Elf_Scn *scn;
1713d68b 1634
25bbbd7a 1635 if (!name)
1713d68b 1636 return -EINVAL;
88a82120 1637
25bbbd7a
AN
1638 scn = elf_sec_by_name(obj, name);
1639 data = elf_sec_data(obj, scn);
1640 if (data) {
25bbbd7a 1641 *size = data->d_size;
08894d9c 1642 return 0; /* found it */
1713d68b
DB
1643 }
1644
08894d9c 1645 return -ENOENT;
1713d68b
DB
1646}
1647
f33f742d 1648static Elf64_Sym *find_elf_var_sym(const struct bpf_object *obj, const char *name)
1713d68b
DB
1649{
1650 Elf_Data *symbols = obj->efile.symbols;
1651 const char *sname;
1652 size_t si;
1653
ad23b723
AN
1654 for (si = 0; si < symbols->d_size / sizeof(Elf64_Sym); si++) {
1655 Elf64_Sym *sym = elf_sym_by_idx(obj, si);
1713d68b 1656
3a06ec0a
AN
1657 if (ELF64_ST_TYPE(sym->st_info) != STT_OBJECT)
1658 continue;
1659
1660 if (ELF64_ST_BIND(sym->st_info) != STB_GLOBAL &&
1661 ELF64_ST_BIND(sym->st_info) != STB_WEAK)
1713d68b
DB
1662 continue;
1663
ad23b723 1664 sname = elf_sym_str(obj, sym->st_name);
1713d68b 1665 if (!sname) {
ad23b723 1666 pr_warn("failed to get sym name string for var %s\n", name);
f33f742d 1667 return ERR_PTR(-EIO);
1713d68b 1668 }
f33f742d
AN
1669 if (strcmp(name, sname) == 0)
1670 return sym;
1713d68b
DB
1671 }
1672
f33f742d 1673 return ERR_PTR(-ENOENT);
1713d68b
DB
1674}
1675
9fa5e1a1
AN
1676/* Some versions of Android don't provide memfd_create() in their libc
1677 * implementation, so avoid complications and just go straight to Linux
1678 * syscall.
1679 */
1680static int sys_memfd_create(const char *name, unsigned flags)
1681{
1682 return syscall(__NR_memfd_create, name, flags);
1683}
1684
ddb2ffdc
ACM
1685#ifndef MFD_CLOEXEC
1686#define MFD_CLOEXEC 0x0001U
1687#endif
1688
dac645b9
AN
1689static int create_placeholder_fd(void)
1690{
1691 int fd;
1692
9fa5e1a1 1693 fd = ensure_good_fd(sys_memfd_create("libbpf-placeholder-fd", MFD_CLOEXEC));
dac645b9
AN
1694 if (fd < 0)
1695 return -errno;
1696 return fd;
1697}
1698
bf829271 1699static struct bpf_map *bpf_object__add_map(struct bpf_object *obj)
d859900c 1700{
69721203
AN
1701 struct bpf_map *map;
1702 int err;
bf829271 1703
69721203
AN
1704 err = libbpf_ensure_mem((void **)&obj->maps, &obj->maps_cap,
1705 sizeof(*obj->maps), obj->nr_maps + 1);
1706 if (err)
1707 return ERR_PTR(err);
bf829271 1708
69721203 1709 map = &obj->maps[obj->nr_maps++];
ec41817b 1710 map->obj = obj;
dac645b9
AN
1711 /* Preallocate map FD without actually creating BPF map just yet.
1712 * These map FD "placeholders" will be reused later without changing
1713 * FD value when map is actually created in the kernel.
1714 *
1715 * This is useful to be able to perform BPF program relocations
1716 * without having to create BPF maps before that step. This allows us
1717 * to finalize and load BTF very late in BPF object's loading phase,
1718 * right before BPF maps have to be created and BPF programs have to
1719 * be loaded. By having these map FD placeholders we can perform all
1720 * the sanitizations, relocations, and any other adjustments before we
1721 * start creating actual BPF kernel objects (BTF, maps, progs).
1722 */
1723 map->fd = create_placeholder_fd();
1724 if (map->fd < 0)
1725 return ERR_PTR(map->fd);
69721203 1726 map->inner_map_fd = -1;
ec41817b 1727 map->autocreate = true;
bf829271 1728
69721203 1729 return map;
d859900c
DB
1730}
1731
79ff13e9 1732static size_t array_map_mmap_sz(unsigned int value_sz, unsigned int max_entries)
eba9c5f4 1733{
9d0a2331 1734 const long page_sz = sysconf(_SC_PAGE_SIZE);
eba9c5f4
AN
1735 size_t map_sz;
1736
9d0a2331 1737 map_sz = (size_t)roundup(value_sz, 8) * max_entries;
eba9c5f4
AN
1738 map_sz = roundup(map_sz, page_sz);
1739 return map_sz;
1740}
1741
79ff13e9
AS
1742static size_t bpf_map_mmap_sz(const struct bpf_map *map)
1743{
1744 const long page_sz = sysconf(_SC_PAGE_SIZE);
1745
1746 switch (map->def.type) {
1747 case BPF_MAP_TYPE_ARRAY:
1748 return array_map_mmap_sz(map->def.value_size, map->def.max_entries);
1749 case BPF_MAP_TYPE_ARENA:
1750 return page_sz * map->def.max_entries;
1751 default:
1752 return 0; /* not supported */
1753 }
1754}
1755
9d0a2331
JK
1756static int bpf_map_mmap_resize(struct bpf_map *map, size_t old_sz, size_t new_sz)
1757{
1758 void *mmaped;
1759
1760 if (!map->mmaped)
1761 return -EINVAL;
1762
1763 if (old_sz == new_sz)
1764 return 0;
1765
1766 mmaped = mmap(NULL, new_sz, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANONYMOUS, -1, 0);
1767 if (mmaped == MAP_FAILED)
1768 return -errno;
1769
1770 memcpy(mmaped, map->mmaped, min(old_sz, new_sz));
1771 munmap(map->mmaped, old_sz);
1772 map->mmaped = mmaped;
1773 return 0;
1774}
1775
aed65917 1776static char *internal_map_name(struct bpf_object *obj, const char *real_name)
81bfdd08 1777{
113e6b7e 1778 char map_name[BPF_OBJ_NAME_LEN], *p;
aed65917
AN
1779 int pfx_len, sfx_len = max((size_t)7, strlen(real_name));
1780
1781 /* This is one of the more confusing parts of libbpf for various
1782 * reasons, some of which are historical. The original idea for naming
1783 * internal names was to include as much of BPF object name prefix as
1784 * possible, so that it can be distinguished from similar internal
1785 * maps of a different BPF object.
1786 * As an example, let's say we have bpf_object named 'my_object_name'
1787 * and internal map corresponding to '.rodata' ELF section. The final
1788 * map name advertised to user and to the kernel will be
1789 * 'my_objec.rodata', taking first 8 characters of object name and
1790 * entire 7 characters of '.rodata'.
1791 * Somewhat confusingly, if internal map ELF section name is shorter
1792 * than 7 characters, e.g., '.bss', we still reserve 7 characters
1793 * for the suffix, even though we only have 4 actual characters, and
1794 * resulting map will be called 'my_objec.bss', not even using all 15
1795 * characters allowed by the kernel. Oh well, at least the truncated
1796 * object name is somewhat consistent in this case. But if the map
1797 * name is '.kconfig', we'll still have entirety of '.kconfig' added
1798 * (8 chars) and thus will be left with only first 7 characters of the
1799 * object name ('my_obje'). Happy guessing, user, that the final map
1800 * name will be "my_obje.kconfig".
1801 * Now, with libbpf starting to support arbitrarily named .rodata.*
1802 * and .data.* data sections, it's possible that ELF section name is
1803 * longer than allowed 15 chars, so we now need to be careful to take
1804 * only up to 15 first characters of ELF name, taking no BPF object
1805 * name characters at all. So '.rodata.abracadabra' will result in
1806 * '.rodata.abracad' kernel and user-visible name.
1807 * We need to keep this convoluted logic intact for .data, .bss and
1808 * .rodata maps, but for new custom .data.custom and .rodata.custom
1809 * maps we use their ELF names as is, not prepending bpf_object name
1810 * in front. We still need to truncate them to 15 characters for the
1811 * kernel. Full name can be recovered for such maps by using DATASEC
1812 * BTF type associated with such map's value type, though.
1813 */
1814 if (sfx_len >= BPF_OBJ_NAME_LEN)
1815 sfx_len = BPF_OBJ_NAME_LEN - 1;
1816
1817 /* if there are two or more dots in map name, it's a custom dot map */
1818 if (strchr(real_name + 1, '.') != NULL)
1819 pfx_len = 0;
1820 else
1821 pfx_len = min((size_t)BPF_OBJ_NAME_LEN - sfx_len - 1, strlen(obj->name));
81bfdd08
AN
1822
1823 snprintf(map_name, sizeof(map_name), "%.*s%.*s", pfx_len, obj->name,
aed65917 1824 sfx_len, real_name);
81bfdd08 1825
113e6b7e
THJ
1826 /* sanitise map name to characters allowed by kernel */
1827 for (p = map_name; *p && p < map_name + sizeof(map_name); p++)
1828 if (!isalnum(*p) && *p != '_' && *p != '.')
1829 *p = '_';
1830
81bfdd08
AN
1831 return strdup(map_name);
1832}
1833
262cfb74 1834static int
4fcac46c
AN
1835map_fill_btf_type_info(struct bpf_object *obj, struct bpf_map *map);
1836
1837/* Internal BPF map is mmap()'able only if at least one of corresponding
1838 * DATASEC's VARs are to be exposed through BPF skeleton. I.e., it's a GLOBAL
1839 * variable and it's not marked as __hidden (which turns it into, effectively,
1840 * a STATIC variable).
1841 */
1842static bool map_is_mmapable(struct bpf_object *obj, struct bpf_map *map)
1843{
1844 const struct btf_type *t, *vt;
1845 struct btf_var_secinfo *vsi;
1846 int i, n;
1847
1848 if (!map->btf_value_type_id)
1849 return false;
1850
1851 t = btf__type_by_id(obj->btf, map->btf_value_type_id);
1852 if (!btf_is_datasec(t))
1853 return false;
1854
1855 vsi = btf_var_secinfos(t);
1856 for (i = 0, n = btf_vlen(t); i < n; i++, vsi++) {
1857 vt = btf__type_by_id(obj->btf, vsi->type);
1858 if (!btf_is_var(vt))
1859 continue;
1860
1861 if (btf_var(vt)->linkage != BTF_VAR_STATIC)
1862 return true;
1863 }
1864
1865 return false;
1866}
262cfb74 1867
d859900c 1868static int
bf829271 1869bpf_object__init_internal_map(struct bpf_object *obj, enum libbpf_map_type type,
aed65917 1870 const char *real_name, int sec_idx, void *data, size_t data_sz)
d859900c 1871{
bf829271
AN
1872 struct bpf_map_def *def;
1873 struct bpf_map *map;
9d0a2331 1874 size_t mmap_sz;
eba9c5f4 1875 int err;
bf829271
AN
1876
1877 map = bpf_object__add_map(obj);
1878 if (IS_ERR(map))
1879 return PTR_ERR(map);
d859900c
DB
1880
1881 map->libbpf_type = type;
db48814b
AN
1882 map->sec_idx = sec_idx;
1883 map->sec_offset = 0;
aed65917
AN
1884 map->real_name = strdup(real_name);
1885 map->name = internal_map_name(obj, real_name);
1886 if (!map->real_name || !map->name) {
1887 zfree(&map->real_name);
1888 zfree(&map->name);
d859900c
DB
1889 return -ENOMEM;
1890 }
1891
bf829271 1892 def = &map->def;
d859900c
DB
1893 def->type = BPF_MAP_TYPE_ARRAY;
1894 def->key_size = sizeof(int);
eba9c5f4 1895 def->value_size = data_sz;
d859900c 1896 def->max_entries = 1;
81bfdd08 1897 def->map_flags = type == LIBBPF_MAP_RODATA || type == LIBBPF_MAP_KCONFIG
2e7ba4f8 1898 ? BPF_F_RDONLY_PROG : 0;
4fcac46c
AN
1899
1900 /* failures are fine because of maps like .rodata.str1.1 */
1901 (void) map_fill_btf_type_info(obj, map);
1902
1903 if (map_is_mmapable(obj, map))
1904 def->map_flags |= BPF_F_MMAPABLE;
7fe74b43
AN
1905
1906 pr_debug("map '%s' (global data): at sec_idx %d, offset %zu, flags %x.\n",
81bfdd08 1907 map->name, map->sec_idx, map->sec_offset, def->map_flags);
7fe74b43 1908
79ff13e9 1909 mmap_sz = bpf_map_mmap_sz(map);
9d0a2331 1910 map->mmaped = mmap(NULL, mmap_sz, PROT_READ | PROT_WRITE,
eba9c5f4
AN
1911 MAP_SHARED | MAP_ANONYMOUS, -1, 0);
1912 if (map->mmaped == MAP_FAILED) {
1913 err = -errno;
1914 map->mmaped = NULL;
1915 pr_warn("failed to alloc map '%s' content buffer: %d\n",
1916 map->name, err);
aed65917 1917 zfree(&map->real_name);
eba9c5f4
AN
1918 zfree(&map->name);
1919 return err;
d859900c
DB
1920 }
1921
166750bc 1922 if (data)
eba9c5f4
AN
1923 memcpy(map->mmaped, data, data_sz);
1924
e1d1dc46 1925 pr_debug("map %td is \"%s\"\n", map - obj->maps, map->name);
d859900c
DB
1926 return 0;
1927}
1928
bf829271
AN
1929static int bpf_object__init_global_data_maps(struct bpf_object *obj)
1930{
25bbbd7a 1931 struct elf_sec_desc *sec_desc;
aed65917 1932 const char *sec_name;
25bbbd7a 1933 int err = 0, sec_idx;
bf829271 1934
bf829271
AN
1935 /*
1936 * Populate obj->maps with libbpf internal maps.
1937 */
25bbbd7a
AN
1938 for (sec_idx = 1; sec_idx < obj->efile.sec_cnt; sec_idx++) {
1939 sec_desc = &obj->efile.secs[sec_idx];
1940
47ea7417 1941 /* Skip recognized sections with size 0. */
d4e6d684 1942 if (!sec_desc->data || sec_desc->data->d_size == 0)
47ea7417
JH
1943 continue;
1944
25bbbd7a
AN
1945 switch (sec_desc->sec_type) {
1946 case SEC_DATA:
aed65917 1947 sec_name = elf_sec_name(obj, elf_sec_by_idx(obj, sec_idx));
25bbbd7a 1948 err = bpf_object__init_internal_map(obj, LIBBPF_MAP_DATA,
aed65917 1949 sec_name, sec_idx,
25bbbd7a
AN
1950 sec_desc->data->d_buf,
1951 sec_desc->data->d_size);
1952 break;
1953 case SEC_RODATA:
1954 obj->has_rodata = true;
aed65917 1955 sec_name = elf_sec_name(obj, elf_sec_by_idx(obj, sec_idx));
25bbbd7a 1956 err = bpf_object__init_internal_map(obj, LIBBPF_MAP_RODATA,
aed65917 1957 sec_name, sec_idx,
25bbbd7a
AN
1958 sec_desc->data->d_buf,
1959 sec_desc->data->d_size);
1960 break;
1961 case SEC_BSS:
aed65917 1962 sec_name = elf_sec_name(obj, elf_sec_by_idx(obj, sec_idx));
25bbbd7a 1963 err = bpf_object__init_internal_map(obj, LIBBPF_MAP_BSS,
aed65917 1964 sec_name, sec_idx,
25bbbd7a
AN
1965 NULL,
1966 sec_desc->data->d_size);
1967 break;
1968 default:
1969 /* skip */
1970 break;
1971 }
bf829271
AN
1972 if (err)
1973 return err;
1974 }
1975 return 0;
1976}
1977
166750bc
AN
1978
1979static struct extern_desc *find_extern_by_name(const struct bpf_object *obj,
1980 const void *name)
1981{
1982 int i;
1983
1984 for (i = 0; i < obj->nr_extern; i++) {
1985 if (strcmp(obj->externs[i].name, name) == 0)
1986 return &obj->externs[i];
1987 }
1988 return NULL;
1989}
1990
c56e5977
YS
1991static struct extern_desc *find_extern_by_name_with_len(const struct bpf_object *obj,
1992 const void *name, int len)
1993{
1994 const char *ext_name;
1995 int i;
1996
1997 for (i = 0; i < obj->nr_extern; i++) {
1998 ext_name = obj->externs[i].name;
1999 if (strlen(ext_name) == len && strncmp(ext_name, name, len) == 0)
2000 return &obj->externs[i];
2001 }
2002 return NULL;
2003}
2004
2e33efe3
AN
2005static int set_kcfg_value_tri(struct extern_desc *ext, void *ext_val,
2006 char value)
166750bc 2007{
2e33efe3
AN
2008 switch (ext->kcfg.type) {
2009 case KCFG_BOOL:
166750bc 2010 if (value == 'm') {
55d00c37 2011 pr_warn("extern (kcfg) '%s': value '%c' implies tristate or char type\n",
166750bc
AN
2012 ext->name, value);
2013 return -EINVAL;
2014 }
2015 *(bool *)ext_val = value == 'y' ? true : false;
2016 break;
2e33efe3 2017 case KCFG_TRISTATE:
166750bc
AN
2018 if (value == 'y')
2019 *(enum libbpf_tristate *)ext_val = TRI_YES;
2020 else if (value == 'm')
2021 *(enum libbpf_tristate *)ext_val = TRI_MODULE;
2022 else /* value == 'n' */
2023 *(enum libbpf_tristate *)ext_val = TRI_NO;
2024 break;
2e33efe3 2025 case KCFG_CHAR:
166750bc
AN
2026 *(char *)ext_val = value;
2027 break;
2e33efe3
AN
2028 case KCFG_UNKNOWN:
2029 case KCFG_INT:
2030 case KCFG_CHAR_ARR:
166750bc 2031 default:
55d00c37 2032 pr_warn("extern (kcfg) '%s': value '%c' implies bool, tristate, or char type\n",
166750bc
AN
2033 ext->name, value);
2034 return -EINVAL;
2035 }
2036 ext->is_set = true;
2037 return 0;
2038}
2039
2e33efe3
AN
2040static int set_kcfg_value_str(struct extern_desc *ext, char *ext_val,
2041 const char *value)
166750bc
AN
2042{
2043 size_t len;
2044
2e33efe3 2045 if (ext->kcfg.type != KCFG_CHAR_ARR) {
55d00c37
AN
2046 pr_warn("extern (kcfg) '%s': value '%s' implies char array type\n",
2047 ext->name, value);
166750bc
AN
2048 return -EINVAL;
2049 }
2050
2051 len = strlen(value);
2052 if (value[len - 1] != '"') {
2e33efe3 2053 pr_warn("extern (kcfg) '%s': invalid string config '%s'\n",
166750bc
AN
2054 ext->name, value);
2055 return -EINVAL;
2056 }
2057
2058 /* strip quotes */
2059 len -= 2;
2e33efe3 2060 if (len >= ext->kcfg.sz) {
55d00c37 2061 pr_warn("extern (kcfg) '%s': long string '%s' of (%zu bytes) truncated to %d bytes\n",
2e33efe3
AN
2062 ext->name, value, len, ext->kcfg.sz - 1);
2063 len = ext->kcfg.sz - 1;
166750bc
AN
2064 }
2065 memcpy(ext_val, value + 1, len);
2066 ext_val[len] = '\0';
2067 ext->is_set = true;
2068 return 0;
2069}
2070
2071static int parse_u64(const char *value, __u64 *res)
2072{
2073 char *value_end;
2074 int err;
2075
2076 errno = 0;
2077 *res = strtoull(value, &value_end, 0);
2078 if (errno) {
2079 err = -errno;
2080 pr_warn("failed to parse '%s' as integer: %d\n", value, err);
2081 return err;
2082 }
2083 if (*value_end) {
2084 pr_warn("failed to parse '%s' as integer completely\n", value);
2085 return -EINVAL;
2086 }
2087 return 0;
2088}
2089
2e33efe3 2090static bool is_kcfg_value_in_range(const struct extern_desc *ext, __u64 v)
166750bc 2091{
2e33efe3 2092 int bit_sz = ext->kcfg.sz * 8;
166750bc 2093
2e33efe3 2094 if (ext->kcfg.sz == 8)
166750bc
AN
2095 return true;
2096
2097 /* Validate that value stored in u64 fits in integer of `ext->sz`
2098 * bytes size without any loss of information. If the target integer
2099 * is signed, we rely on the following limits of integer type of
2100 * Y bits and subsequent transformation:
2101 *
2102 * -2^(Y-1) <= X <= 2^(Y-1) - 1
2103 * 0 <= X + 2^(Y-1) <= 2^Y - 1
2104 * 0 <= X + 2^(Y-1) < 2^Y
2105 *
2106 * For unsigned target integer, check that all the (64 - Y) bits are
2107 * zero.
2108 */
2e33efe3 2109 if (ext->kcfg.is_signed)
166750bc
AN
2110 return v + (1ULL << (bit_sz - 1)) < (1ULL << bit_sz);
2111 else
2112 return (v >> bit_sz) == 0;
2113}
2114
2e33efe3
AN
2115static int set_kcfg_value_num(struct extern_desc *ext, void *ext_val,
2116 __u64 value)
166750bc 2117{
55d00c37
AN
2118 if (ext->kcfg.type != KCFG_INT && ext->kcfg.type != KCFG_CHAR &&
2119 ext->kcfg.type != KCFG_BOOL) {
2120 pr_warn("extern (kcfg) '%s': value '%llu' implies integer, char, or boolean type\n",
7745ff98 2121 ext->name, (unsigned long long)value);
166750bc
AN
2122 return -EINVAL;
2123 }
55d00c37
AN
2124 if (ext->kcfg.type == KCFG_BOOL && value > 1) {
2125 pr_warn("extern (kcfg) '%s': value '%llu' isn't boolean compatible\n",
2126 ext->name, (unsigned long long)value);
2127 return -EINVAL;
2128
2129 }
2e33efe3 2130 if (!is_kcfg_value_in_range(ext, value)) {
55d00c37 2131 pr_warn("extern (kcfg) '%s': value '%llu' doesn't fit in %d bytes\n",
2e33efe3 2132 ext->name, (unsigned long long)value, ext->kcfg.sz);
166750bc
AN
2133 return -ERANGE;
2134 }
2e33efe3 2135 switch (ext->kcfg.sz) {
e3ba8e4e
KM
2136 case 1:
2137 *(__u8 *)ext_val = value;
2138 break;
2139 case 2:
2140 *(__u16 *)ext_val = value;
2141 break;
2142 case 4:
2143 *(__u32 *)ext_val = value;
2144 break;
2145 case 8:
2146 *(__u64 *)ext_val = value;
2147 break;
2148 default:
2149 return -EINVAL;
166750bc
AN
2150 }
2151 ext->is_set = true;
2152 return 0;
2153}
2154
8601fd42
AN
2155static int bpf_object__process_kconfig_line(struct bpf_object *obj,
2156 char *buf, void *data)
166750bc 2157{
166750bc 2158 struct extern_desc *ext;
8601fd42 2159 char *sep, *value;
166750bc
AN
2160 int len, err = 0;
2161 void *ext_val;
2162 __u64 num;
166750bc 2163
13d35a0c 2164 if (!str_has_pfx(buf, "CONFIG_"))
8601fd42 2165 return 0;
166750bc 2166
8601fd42
AN
2167 sep = strchr(buf, '=');
2168 if (!sep) {
2169 pr_warn("failed to parse '%s': no separator\n", buf);
2170 return -EINVAL;
2171 }
2172
2173 /* Trim ending '\n' */
2174 len = strlen(buf);
2175 if (buf[len - 1] == '\n')
2176 buf[len - 1] = '\0';
2177 /* Split on '=' and ensure that a value is present. */
2178 *sep = '\0';
2179 if (!sep[1]) {
2180 *sep = '=';
2181 pr_warn("failed to parse '%s': no value\n", buf);
2182 return -EINVAL;
2183 }
2184
2185 ext = find_extern_by_name(obj, buf);
2186 if (!ext || ext->is_set)
2187 return 0;
2188
2e33efe3 2189 ext_val = data + ext->kcfg.data_off;
8601fd42
AN
2190 value = sep + 1;
2191
2192 switch (*value) {
2193 case 'y': case 'n': case 'm':
2e33efe3 2194 err = set_kcfg_value_tri(ext, ext_val, *value);
8601fd42
AN
2195 break;
2196 case '"':
2e33efe3 2197 err = set_kcfg_value_str(ext, ext_val, value);
8601fd42
AN
2198 break;
2199 default:
2200 /* assume integer */
2201 err = parse_u64(value, &num);
2202 if (err) {
55d00c37 2203 pr_warn("extern (kcfg) '%s': value '%s' isn't a valid integer\n", ext->name, value);
8601fd42
AN
2204 return err;
2205 }
55d00c37
AN
2206 if (ext->kcfg.type != KCFG_INT && ext->kcfg.type != KCFG_CHAR) {
2207 pr_warn("extern (kcfg) '%s': value '%s' implies integer type\n", ext->name, value);
2208 return -EINVAL;
2209 }
2e33efe3 2210 err = set_kcfg_value_num(ext, ext_val, num);
8601fd42 2211 break;
166750bc 2212 }
8601fd42
AN
2213 if (err)
2214 return err;
55d00c37 2215 pr_debug("extern (kcfg) '%s': set to %s\n", ext->name, value);
8601fd42
AN
2216 return 0;
2217}
2218
2219static int bpf_object__read_kconfig_file(struct bpf_object *obj, void *data)
2220{
2221 char buf[PATH_MAX];
2222 struct utsname uts;
2223 int len, err = 0;
2224 gzFile file;
2225
2226 uname(&uts);
2227 len = snprintf(buf, PATH_MAX, "/boot/config-%s", uts.release);
2228 if (len < 0)
2229 return -EINVAL;
2230 else if (len >= PATH_MAX)
2231 return -ENAMETOOLONG;
2232
2233 /* gzopen also accepts uncompressed files. */
8e50750f 2234 file = gzopen(buf, "re");
8601fd42 2235 if (!file)
8e50750f 2236 file = gzopen("/proc/config.gz", "re");
8601fd42 2237
166750bc 2238 if (!file) {
8601fd42 2239 pr_warn("failed to open system Kconfig\n");
166750bc
AN
2240 return -ENOENT;
2241 }
2242
2243 while (gzgets(file, buf, sizeof(buf))) {
8601fd42
AN
2244 err = bpf_object__process_kconfig_line(obj, buf, data);
2245 if (err) {
2246 pr_warn("error parsing system Kconfig line '%s': %d\n",
2247 buf, err);
166750bc
AN
2248 goto out;
2249 }
8601fd42 2250 }
166750bc 2251
8601fd42
AN
2252out:
2253 gzclose(file);
2254 return err;
2255}
166750bc 2256
8601fd42
AN
2257static int bpf_object__read_kconfig_mem(struct bpf_object *obj,
2258 const char *config, void *data)
2259{
2260 char buf[PATH_MAX];
2261 int err = 0;
2262 FILE *file;
166750bc 2263
8601fd42
AN
2264 file = fmemopen((void *)config, strlen(config), "r");
2265 if (!file) {
2266 err = -errno;
2267 pr_warn("failed to open in-memory Kconfig: %d\n", err);
2268 return err;
2269 }
2270
2271 while (fgets(buf, sizeof(buf), file)) {
2272 err = bpf_object__process_kconfig_line(obj, buf, data);
2273 if (err) {
2274 pr_warn("error parsing in-memory Kconfig line '%s': %d\n",
2275 buf, err);
166750bc
AN
2276 break;
2277 }
166750bc
AN
2278 }
2279
8601fd42 2280 fclose(file);
166750bc
AN
2281 return err;
2282}
2283
81bfdd08 2284static int bpf_object__init_kconfig_map(struct bpf_object *obj)
166750bc 2285{
2e33efe3 2286 struct extern_desc *last_ext = NULL, *ext;
166750bc 2287 size_t map_sz;
2e33efe3 2288 int i, err;
166750bc 2289
2e33efe3
AN
2290 for (i = 0; i < obj->nr_extern; i++) {
2291 ext = &obj->externs[i];
2292 if (ext->type == EXT_KCFG)
2293 last_ext = ext;
2294 }
166750bc 2295
2e33efe3
AN
2296 if (!last_ext)
2297 return 0;
166750bc 2298
2e33efe3 2299 map_sz = last_ext->kcfg.data_off + last_ext->kcfg.sz;
81bfdd08 2300 err = bpf_object__init_internal_map(obj, LIBBPF_MAP_KCONFIG,
aed65917 2301 ".kconfig", obj->efile.symbols_shndx,
166750bc
AN
2302 NULL, map_sz);
2303 if (err)
2304 return err;
2305
81bfdd08 2306 obj->kconfig_map_idx = obj->nr_maps - 1;
166750bc
AN
2307
2308 return 0;
2309}
2310
42869d28 2311const struct btf_type *
ddc7c304 2312skip_mods_and_typedefs(const struct btf *btf, __u32 id, __u32 *res_id)
abd29c93
AN
2313{
2314 const struct btf_type *t = btf__type_by_id(btf, id);
8837fe5d 2315
ddc7c304
AN
2316 if (res_id)
2317 *res_id = id;
2318
2319 while (btf_is_mod(t) || btf_is_typedef(t)) {
2320 if (res_id)
2321 *res_id = t->type;
2322 t = btf__type_by_id(btf, t->type);
abd29c93 2323 }
ddc7c304
AN
2324
2325 return t;
abd29c93
AN
2326}
2327
590a0088
MKL
2328static const struct btf_type *
2329resolve_func_ptr(const struct btf *btf, __u32 id, __u32 *res_id)
2330{
2331 const struct btf_type *t;
2332
2333 t = skip_mods_and_typedefs(btf, id, NULL);
2334 if (!btf_is_ptr(t))
2335 return NULL;
2336
2337 t = skip_mods_and_typedefs(btf, t->type, res_id);
2338
2339 return btf_is_func_proto(t) ? t : NULL;
2340}
2341
774e132e 2342static const char *__btf_kind_str(__u16 kind)
81ba0889 2343{
774e132e 2344 switch (kind) {
81ba0889
AN
2345 case BTF_KIND_UNKN: return "void";
2346 case BTF_KIND_INT: return "int";
2347 case BTF_KIND_PTR: return "ptr";
2348 case BTF_KIND_ARRAY: return "array";
2349 case BTF_KIND_STRUCT: return "struct";
2350 case BTF_KIND_UNION: return "union";
2351 case BTF_KIND_ENUM: return "enum";
2352 case BTF_KIND_FWD: return "fwd";
2353 case BTF_KIND_TYPEDEF: return "typedef";
2354 case BTF_KIND_VOLATILE: return "volatile";
2355 case BTF_KIND_CONST: return "const";
2356 case BTF_KIND_RESTRICT: return "restrict";
2357 case BTF_KIND_FUNC: return "func";
2358 case BTF_KIND_FUNC_PROTO: return "func_proto";
2359 case BTF_KIND_VAR: return "var";
2360 case BTF_KIND_DATASEC: return "datasec";
22541a9e 2361 case BTF_KIND_FLOAT: return "float";
223f903e 2362 case BTF_KIND_DECL_TAG: return "decl_tag";
2dc1e488 2363 case BTF_KIND_TYPE_TAG: return "type_tag";
f2a62588 2364 case BTF_KIND_ENUM64: return "enum64";
81ba0889
AN
2365 default: return "unknown";
2366 }
2367}
2368
42869d28 2369const char *btf_kind_str(const struct btf_type *t)
774e132e
MKL
2370{
2371 return __btf_kind_str(btf_kind(t));
2372}
2373
ef99b02b
AN
2374/*
2375 * Fetch integer attribute of BTF map definition. Such attributes are
2376 * represented using a pointer to an array, in which dimensionality of array
2377 * encodes specified integer value. E.g., int (*type)[BPF_MAP_TYPE_ARRAY];
2378 * encodes `type => BPF_MAP_TYPE_ARRAY` key/value pair completely using BTF
2379 * type definition, while using only sizeof(void *) space in ELF data section.
2380 */
2381static bool get_map_field_int(const char *map_name, const struct btf *btf,
8983b731
AN
2382 const struct btf_member *m, __u32 *res)
2383{
ddc7c304 2384 const struct btf_type *t = skip_mods_and_typedefs(btf, m->type, NULL);
abd29c93 2385 const char *name = btf__name_by_offset(btf, m->name_off);
ef99b02b
AN
2386 const struct btf_array *arr_info;
2387 const struct btf_type *arr_t;
abd29c93 2388
b03bc685 2389 if (!btf_is_ptr(t)) {
81ba0889
AN
2390 pr_warn("map '%s': attr '%s': expected PTR, got %s.\n",
2391 map_name, name, btf_kind_str(t));
abd29c93
AN
2392 return false;
2393 }
ef99b02b
AN
2394
2395 arr_t = btf__type_by_id(btf, t->type);
2396 if (!arr_t) {
be18010e
KW
2397 pr_warn("map '%s': attr '%s': type [%u] not found.\n",
2398 map_name, name, t->type);
abd29c93
AN
2399 return false;
2400 }
b03bc685 2401 if (!btf_is_array(arr_t)) {
81ba0889
AN
2402 pr_warn("map '%s': attr '%s': expected ARRAY, got %s.\n",
2403 map_name, name, btf_kind_str(arr_t));
abd29c93
AN
2404 return false;
2405 }
b03bc685 2406 arr_info = btf_array(arr_t);
ef99b02b 2407 *res = arr_info->nelems;
abd29c93
AN
2408 return true;
2409}
2410
d147357e
AS
2411static bool get_map_field_long(const char *map_name, const struct btf *btf,
2412 const struct btf_member *m, __u64 *res)
2413{
2414 const struct btf_type *t = skip_mods_and_typedefs(btf, m->type, NULL);
2415 const char *name = btf__name_by_offset(btf, m->name_off);
2416
2417 if (btf_is_ptr(t)) {
2418 __u32 res32;
2419 bool ret;
2420
2421 ret = get_map_field_int(map_name, btf, m, &res32);
2422 if (ret)
2423 *res = (__u64)res32;
2424 return ret;
2425 }
2426
2427 if (!btf_is_enum(t) && !btf_is_enum64(t)) {
2428 pr_warn("map '%s': attr '%s': expected ENUM or ENUM64, got %s.\n",
2429 map_name, name, btf_kind_str(t));
2430 return false;
2431 }
2432
2433 if (btf_vlen(t) != 1) {
2434 pr_warn("map '%s': attr '%s': invalid __ulong\n",
2435 map_name, name);
2436 return false;
2437 }
2438
2439 if (btf_is_enum(t)) {
2440 const struct btf_enum *e = btf_enum(t);
2441
2442 *res = e->val;
2443 } else {
2444 const struct btf_enum64 *e = btf_enum64(t);
2445
2446 *res = btf_enum64_value(e);
2447 }
2448 return true;
2449}
2450
e588c116
WY
2451static int pathname_concat(char *buf, size_t buf_sz, const char *path, const char *name)
2452{
2453 int len;
2454
2455 len = snprintf(buf, buf_sz, "%s/%s", path, name);
2456 if (len < 0)
2457 return -EINVAL;
2458 if (len >= buf_sz)
2459 return -ENAMETOOLONG;
2460
2461 return 0;
2462}
2463
57a00f41
THJ
2464static int build_map_pin_path(struct bpf_map *map, const char *path)
2465{
2466 char buf[PATH_MAX];
e588c116 2467 int err;
57a00f41
THJ
2468
2469 if (!path)
6b434b61 2470 path = BPF_FS_DEFAULT_PATH;
57a00f41 2471
e588c116
WY
2472 err = pathname_concat(buf, sizeof(buf), path, bpf_map__name(map));
2473 if (err)
2474 return err;
57a00f41 2475
6e9cab2e 2476 return bpf_map__set_pin_path(map, buf);
57a00f41
THJ
2477}
2478
146bf811
AN
2479/* should match definition in bpf_helpers.h */
2480enum libbpf_pin_type {
2481 LIBBPF_PIN_NONE,
2482 /* PIN_BY_NAME: pin maps by name (in /sys/fs/bpf by default) */
2483 LIBBPF_PIN_BY_NAME,
2484};
2485
c7ef5ec9
AN
2486int parse_btf_map_def(const char *map_name, struct btf *btf,
2487 const struct btf_type *def_t, bool strict,
2488 struct btf_map_def *map_def, struct btf_map_def *inner_def)
abd29c93 2489{
41017e56 2490 const struct btf_type *t;
abd29c93 2491 const struct btf_member *m;
c7ef5ec9 2492 bool is_inner = inner_def == NULL;
abd29c93
AN
2493 int vlen, i;
2494
c7ef5ec9
AN
2495 vlen = btf_vlen(def_t);
2496 m = btf_members(def_t);
abd29c93 2497 for (i = 0; i < vlen; i++, m++) {
c7ef5ec9 2498 const char *name = btf__name_by_offset(btf, m->name_off);
abd29c93
AN
2499
2500 if (!name) {
c7ef5ec9 2501 pr_warn("map '%s': invalid field #%d.\n", map_name, i);
abd29c93
AN
2502 return -EINVAL;
2503 }
2504 if (strcmp(name, "type") == 0) {
c7ef5ec9 2505 if (!get_map_field_int(map_name, btf, m, &map_def->map_type))
abd29c93 2506 return -EINVAL;
c7ef5ec9 2507 map_def->parts |= MAP_DEF_MAP_TYPE;
abd29c93 2508 } else if (strcmp(name, "max_entries") == 0) {
c7ef5ec9 2509 if (!get_map_field_int(map_name, btf, m, &map_def->max_entries))
abd29c93 2510 return -EINVAL;
c7ef5ec9 2511 map_def->parts |= MAP_DEF_MAX_ENTRIES;
abd29c93 2512 } else if (strcmp(name, "map_flags") == 0) {
c7ef5ec9 2513 if (!get_map_field_int(map_name, btf, m, &map_def->map_flags))
abd29c93 2514 return -EINVAL;
c7ef5ec9 2515 map_def->parts |= MAP_DEF_MAP_FLAGS;
1bdb6c9a 2516 } else if (strcmp(name, "numa_node") == 0) {
c7ef5ec9 2517 if (!get_map_field_int(map_name, btf, m, &map_def->numa_node))
1bdb6c9a 2518 return -EINVAL;
c7ef5ec9 2519 map_def->parts |= MAP_DEF_NUMA_NODE;
abd29c93
AN
2520 } else if (strcmp(name, "key_size") == 0) {
2521 __u32 sz;
2522
c7ef5ec9 2523 if (!get_map_field_int(map_name, btf, m, &sz))
abd29c93 2524 return -EINVAL;
c7ef5ec9 2525 if (map_def->key_size && map_def->key_size != sz) {
be18010e 2526 pr_warn("map '%s': conflicting key size %u != %u.\n",
c7ef5ec9 2527 map_name, map_def->key_size, sz);
abd29c93
AN
2528 return -EINVAL;
2529 }
c7ef5ec9
AN
2530 map_def->key_size = sz;
2531 map_def->parts |= MAP_DEF_KEY_SIZE;
abd29c93
AN
2532 } else if (strcmp(name, "key") == 0) {
2533 __s64 sz;
2534
c7ef5ec9 2535 t = btf__type_by_id(btf, m->type);
abd29c93 2536 if (!t) {
be18010e 2537 pr_warn("map '%s': key type [%d] not found.\n",
c7ef5ec9 2538 map_name, m->type);
abd29c93
AN
2539 return -EINVAL;
2540 }
b03bc685 2541 if (!btf_is_ptr(t)) {
81ba0889 2542 pr_warn("map '%s': key spec is not PTR: %s.\n",
c7ef5ec9 2543 map_name, btf_kind_str(t));
abd29c93
AN
2544 return -EINVAL;
2545 }
c7ef5ec9 2546 sz = btf__resolve_size(btf, t->type);
abd29c93 2547 if (sz < 0) {
679152d3 2548 pr_warn("map '%s': can't determine key size for type [%u]: %zd.\n",
c7ef5ec9 2549 map_name, t->type, (ssize_t)sz);
abd29c93
AN
2550 return sz;
2551 }
c7ef5ec9 2552 if (map_def->key_size && map_def->key_size != sz) {
679152d3 2553 pr_warn("map '%s': conflicting key size %u != %zd.\n",
c7ef5ec9 2554 map_name, map_def->key_size, (ssize_t)sz);
abd29c93
AN
2555 return -EINVAL;
2556 }
c7ef5ec9
AN
2557 map_def->key_size = sz;
2558 map_def->key_type_id = t->type;
2559 map_def->parts |= MAP_DEF_KEY_SIZE | MAP_DEF_KEY_TYPE;
abd29c93
AN
2560 } else if (strcmp(name, "value_size") == 0) {
2561 __u32 sz;
2562
c7ef5ec9 2563 if (!get_map_field_int(map_name, btf, m, &sz))
abd29c93 2564 return -EINVAL;
c7ef5ec9 2565 if (map_def->value_size && map_def->value_size != sz) {
be18010e 2566 pr_warn("map '%s': conflicting value size %u != %u.\n",
c7ef5ec9 2567 map_name, map_def->value_size, sz);
abd29c93
AN
2568 return -EINVAL;
2569 }
c7ef5ec9
AN
2570 map_def->value_size = sz;
2571 map_def->parts |= MAP_DEF_VALUE_SIZE;
abd29c93
AN
2572 } else if (strcmp(name, "value") == 0) {
2573 __s64 sz;
2574
c7ef5ec9 2575 t = btf__type_by_id(btf, m->type);
abd29c93 2576 if (!t) {
be18010e 2577 pr_warn("map '%s': value type [%d] not found.\n",
c7ef5ec9 2578 map_name, m->type);
abd29c93
AN
2579 return -EINVAL;
2580 }
b03bc685 2581 if (!btf_is_ptr(t)) {
81ba0889 2582 pr_warn("map '%s': value spec is not PTR: %s.\n",
c7ef5ec9 2583 map_name, btf_kind_str(t));
abd29c93
AN
2584 return -EINVAL;
2585 }
c7ef5ec9 2586 sz = btf__resolve_size(btf, t->type);
abd29c93 2587 if (sz < 0) {
679152d3 2588 pr_warn("map '%s': can't determine value size for type [%u]: %zd.\n",
c7ef5ec9 2589 map_name, t->type, (ssize_t)sz);
abd29c93
AN
2590 return sz;
2591 }
c7ef5ec9 2592 if (map_def->value_size && map_def->value_size != sz) {
679152d3 2593 pr_warn("map '%s': conflicting value size %u != %zd.\n",
c7ef5ec9 2594 map_name, map_def->value_size, (ssize_t)sz);
abd29c93
AN
2595 return -EINVAL;
2596 }
c7ef5ec9
AN
2597 map_def->value_size = sz;
2598 map_def->value_type_id = t->type;
2599 map_def->parts |= MAP_DEF_VALUE_SIZE | MAP_DEF_VALUE_TYPE;
646f02ff
AN
2600 }
2601 else if (strcmp(name, "values") == 0) {
341ac5ff
HC
2602 bool is_map_in_map = bpf_map_type__is_map_in_map(map_def->map_type);
2603 bool is_prog_array = map_def->map_type == BPF_MAP_TYPE_PROG_ARRAY;
2604 const char *desc = is_map_in_map ? "map-in-map inner" : "prog-array value";
c7ef5ec9 2605 char inner_map_name[128];
646f02ff
AN
2606 int err;
2607
2608 if (is_inner) {
2609 pr_warn("map '%s': multi-level inner maps not supported.\n",
c7ef5ec9 2610 map_name);
646f02ff
AN
2611 return -ENOTSUP;
2612 }
2613 if (i != vlen - 1) {
2614 pr_warn("map '%s': '%s' member should be last.\n",
c7ef5ec9 2615 map_name, name);
646f02ff
AN
2616 return -EINVAL;
2617 }
341ac5ff
HC
2618 if (!is_map_in_map && !is_prog_array) {
2619 pr_warn("map '%s': should be map-in-map or prog-array.\n",
c7ef5ec9 2620 map_name);
646f02ff
AN
2621 return -ENOTSUP;
2622 }
c7ef5ec9 2623 if (map_def->value_size && map_def->value_size != 4) {
646f02ff 2624 pr_warn("map '%s': conflicting value size %u != 4.\n",
c7ef5ec9 2625 map_name, map_def->value_size);
646f02ff
AN
2626 return -EINVAL;
2627 }
c7ef5ec9
AN
2628 map_def->value_size = 4;
2629 t = btf__type_by_id(btf, m->type);
646f02ff 2630 if (!t) {
341ac5ff
HC
2631 pr_warn("map '%s': %s type [%d] not found.\n",
2632 map_name, desc, m->type);
646f02ff
AN
2633 return -EINVAL;
2634 }
2635 if (!btf_is_array(t) || btf_array(t)->nelems) {
341ac5ff
HC
2636 pr_warn("map '%s': %s spec is not a zero-sized array.\n",
2637 map_name, desc);
646f02ff
AN
2638 return -EINVAL;
2639 }
c7ef5ec9 2640 t = skip_mods_and_typedefs(btf, btf_array(t)->type, NULL);
646f02ff 2641 if (!btf_is_ptr(t)) {
341ac5ff
HC
2642 pr_warn("map '%s': %s def is of unexpected kind %s.\n",
2643 map_name, desc, btf_kind_str(t));
646f02ff
AN
2644 return -EINVAL;
2645 }
c7ef5ec9 2646 t = skip_mods_and_typedefs(btf, t->type, NULL);
341ac5ff
HC
2647 if (is_prog_array) {
2648 if (!btf_is_func_proto(t)) {
2649 pr_warn("map '%s': prog-array value def is of unexpected kind %s.\n",
2650 map_name, btf_kind_str(t));
2651 return -EINVAL;
2652 }
2653 continue;
2654 }
646f02ff 2655 if (!btf_is_struct(t)) {
81ba0889 2656 pr_warn("map '%s': map-in-map inner def is of unexpected kind %s.\n",
c7ef5ec9 2657 map_name, btf_kind_str(t));
646f02ff
AN
2658 return -EINVAL;
2659 }
2660
c7ef5ec9
AN
2661 snprintf(inner_map_name, sizeof(inner_map_name), "%s.inner", map_name);
2662 err = parse_btf_map_def(inner_map_name, btf, t, strict, inner_def, NULL);
646f02ff
AN
2663 if (err)
2664 return err;
c7ef5ec9
AN
2665
2666 map_def->parts |= MAP_DEF_INNER_MAP;
57a00f41
THJ
2667 } else if (strcmp(name, "pinning") == 0) {
2668 __u32 val;
57a00f41 2669
646f02ff 2670 if (is_inner) {
c7ef5ec9 2671 pr_warn("map '%s': inner def can't be pinned.\n", map_name);
646f02ff
AN
2672 return -EINVAL;
2673 }
c7ef5ec9 2674 if (!get_map_field_int(map_name, btf, m, &val))
57a00f41 2675 return -EINVAL;
c7ef5ec9 2676 if (val != LIBBPF_PIN_NONE && val != LIBBPF_PIN_BY_NAME) {
57a00f41 2677 pr_warn("map '%s': invalid pinning value %u.\n",
c7ef5ec9 2678 map_name, val);
57a00f41
THJ
2679 return -EINVAL;
2680 }
c7ef5ec9
AN
2681 map_def->pinning = val;
2682 map_def->parts |= MAP_DEF_PINNING;
47512102 2683 } else if (strcmp(name, "map_extra") == 0) {
d147357e 2684 __u64 map_extra;
47512102 2685
d147357e 2686 if (!get_map_field_long(map_name, btf, m, &map_extra))
47512102
JK
2687 return -EINVAL;
2688 map_def->map_extra = map_extra;
2689 map_def->parts |= MAP_DEF_MAP_EXTRA;
abd29c93
AN
2690 } else {
2691 if (strict) {
c7ef5ec9 2692 pr_warn("map '%s': unknown field '%s'.\n", map_name, name);
abd29c93
AN
2693 return -ENOTSUP;
2694 }
c7ef5ec9 2695 pr_debug("map '%s': ignoring unknown field '%s'.\n", map_name, name);
abd29c93
AN
2696 }
2697 }
2698
c7ef5ec9
AN
2699 if (map_def->map_type == BPF_MAP_TYPE_UNSPEC) {
2700 pr_warn("map '%s': map type isn't specified.\n", map_name);
abd29c93
AN
2701 return -EINVAL;
2702 }
2703
2704 return 0;
2705}
2706
597fbc46
AN
2707static size_t adjust_ringbuf_sz(size_t sz)
2708{
2709 __u32 page_sz = sysconf(_SC_PAGE_SIZE);
2710 __u32 mul;
2711
2712 /* if user forgot to set any size, make sure they see error */
2713 if (sz == 0)
2714 return 0;
2715 /* Kernel expects BPF_MAP_TYPE_RINGBUF's max_entries to be
2716 * a power-of-2 multiple of kernel's page size. If user diligently
2717 * satisified these conditions, pass the size through.
2718 */
2719 if ((sz % page_sz) == 0 && is_pow_of_2(sz / page_sz))
2720 return sz;
2721
2722 /* Otherwise find closest (page_sz * power_of_2) product bigger than
2723 * user-set size to satisfy both user size request and kernel
2724 * requirements and substitute correct max_entries for map creation.
2725 */
2726 for (mul = 1; mul <= UINT_MAX / page_sz; mul <<= 1) {
2727 if (mul * page_sz > sz)
2728 return mul * page_sz;
2729 }
2730
2731 /* if it's impossible to satisfy the conditions (i.e., user size is
2732 * very close to UINT_MAX but is not a power-of-2 multiple of
2733 * page_size) then just return original size and let kernel reject it
2734 */
2735 return sz;
2736}
2737
b66ccae0
DV
2738static bool map_is_ringbuf(const struct bpf_map *map)
2739{
2740 return map->def.type == BPF_MAP_TYPE_RINGBUF ||
2741 map->def.type == BPF_MAP_TYPE_USER_RINGBUF;
2742}
2743
c7ef5ec9
AN
2744static void fill_map_from_def(struct bpf_map *map, const struct btf_map_def *def)
2745{
2746 map->def.type = def->map_type;
2747 map->def.key_size = def->key_size;
2748 map->def.value_size = def->value_size;
2749 map->def.max_entries = def->max_entries;
2750 map->def.map_flags = def->map_flags;
47512102 2751 map->map_extra = def->map_extra;
c7ef5ec9
AN
2752
2753 map->numa_node = def->numa_node;
2754 map->btf_key_type_id = def->key_type_id;
2755 map->btf_value_type_id = def->value_type_id;
2756
597fbc46 2757 /* auto-adjust BPF ringbuf map max_entries to be a multiple of page size */
b66ccae0 2758 if (map_is_ringbuf(map))
597fbc46
AN
2759 map->def.max_entries = adjust_ringbuf_sz(map->def.max_entries);
2760
c7ef5ec9
AN
2761 if (def->parts & MAP_DEF_MAP_TYPE)
2762 pr_debug("map '%s': found type = %u.\n", map->name, def->map_type);
2763
2764 if (def->parts & MAP_DEF_KEY_TYPE)
2765 pr_debug("map '%s': found key [%u], sz = %u.\n",
2766 map->name, def->key_type_id, def->key_size);
2767 else if (def->parts & MAP_DEF_KEY_SIZE)
2768 pr_debug("map '%s': found key_size = %u.\n", map->name, def->key_size);
2769
2770 if (def->parts & MAP_DEF_VALUE_TYPE)
2771 pr_debug("map '%s': found value [%u], sz = %u.\n",
2772 map->name, def->value_type_id, def->value_size);
2773 else if (def->parts & MAP_DEF_VALUE_SIZE)
2774 pr_debug("map '%s': found value_size = %u.\n", map->name, def->value_size);
2775
2776 if (def->parts & MAP_DEF_MAX_ENTRIES)
2777 pr_debug("map '%s': found max_entries = %u.\n", map->name, def->max_entries);
2778 if (def->parts & MAP_DEF_MAP_FLAGS)
47512102
JK
2779 pr_debug("map '%s': found map_flags = 0x%x.\n", map->name, def->map_flags);
2780 if (def->parts & MAP_DEF_MAP_EXTRA)
2781 pr_debug("map '%s': found map_extra = 0x%llx.\n", map->name,
2782 (unsigned long long)def->map_extra);
c7ef5ec9
AN
2783 if (def->parts & MAP_DEF_PINNING)
2784 pr_debug("map '%s': found pinning = %u.\n", map->name, def->pinning);
2785 if (def->parts & MAP_DEF_NUMA_NODE)
2786 pr_debug("map '%s': found numa_node = %u.\n", map->name, def->numa_node);
2787
2788 if (def->parts & MAP_DEF_INNER_MAP)
2789 pr_debug("map '%s': found inner map definition.\n", map->name);
2790}
2791
c1cccec9
AN
2792static const char *btf_var_linkage_str(__u32 linkage)
2793{
2794 switch (linkage) {
2795 case BTF_VAR_STATIC: return "static";
2796 case BTF_VAR_GLOBAL_ALLOCATED: return "global";
2797 case BTF_VAR_GLOBAL_EXTERN: return "extern";
2798 default: return "unknown";
2799 }
2800}
2801
41017e56
AN
2802static int bpf_object__init_user_btf_map(struct bpf_object *obj,
2803 const struct btf_type *sec,
2804 int var_idx, int sec_idx,
2805 const Elf_Data *data, bool strict,
2806 const char *pin_root_path)
2807{
c7ef5ec9 2808 struct btf_map_def map_def = {}, inner_def = {};
41017e56
AN
2809 const struct btf_type *var, *def;
2810 const struct btf_var_secinfo *vi;
2811 const struct btf_var *var_extra;
2812 const char *map_name;
2813 struct bpf_map *map;
c7ef5ec9 2814 int err;
41017e56
AN
2815
2816 vi = btf_var_secinfos(sec) + var_idx;
2817 var = btf__type_by_id(obj->btf, vi->type);
2818 var_extra = btf_var(var);
2819 map_name = btf__name_by_offset(obj->btf, var->name_off);
2820
2821 if (map_name == NULL || map_name[0] == '\0') {
2822 pr_warn("map #%d: empty name.\n", var_idx);
2823 return -EINVAL;
2824 }
2825 if ((__u64)vi->offset + vi->size > data->d_size) {
2826 pr_warn("map '%s' BTF data is corrupted.\n", map_name);
2827 return -EINVAL;
2828 }
2829 if (!btf_is_var(var)) {
81ba0889
AN
2830 pr_warn("map '%s': unexpected var kind %s.\n",
2831 map_name, btf_kind_str(var));
41017e56
AN
2832 return -EINVAL;
2833 }
c1cccec9
AN
2834 if (var_extra->linkage != BTF_VAR_GLOBAL_ALLOCATED) {
2835 pr_warn("map '%s': unsupported map linkage %s.\n",
2836 map_name, btf_var_linkage_str(var_extra->linkage));
41017e56
AN
2837 return -EOPNOTSUPP;
2838 }
2839
2840 def = skip_mods_and_typedefs(obj->btf, var->type, NULL);
2841 if (!btf_is_struct(def)) {
81ba0889
AN
2842 pr_warn("map '%s': unexpected def kind %s.\n",
2843 map_name, btf_kind_str(var));
41017e56
AN
2844 return -EINVAL;
2845 }
2846 if (def->size > vi->size) {
2847 pr_warn("map '%s': invalid def size.\n", map_name);
2848 return -EINVAL;
2849 }
2850
2851 map = bpf_object__add_map(obj);
2852 if (IS_ERR(map))
2853 return PTR_ERR(map);
2854 map->name = strdup(map_name);
2855 if (!map->name) {
2856 pr_warn("map '%s': failed to alloc map name.\n", map_name);
2857 return -ENOMEM;
2858 }
2859 map->libbpf_type = LIBBPF_MAP_UNSPEC;
2860 map->def.type = BPF_MAP_TYPE_UNSPEC;
2861 map->sec_idx = sec_idx;
2862 map->sec_offset = vi->offset;
646f02ff 2863 map->btf_var_idx = var_idx;
41017e56
AN
2864 pr_debug("map '%s': at sec_idx %d, offset %zu.\n",
2865 map_name, map->sec_idx, map->sec_offset);
2866
c7ef5ec9
AN
2867 err = parse_btf_map_def(map->name, obj->btf, def, strict, &map_def, &inner_def);
2868 if (err)
2869 return err;
2870
2871 fill_map_from_def(map, &map_def);
2872
2873 if (map_def.pinning == LIBBPF_PIN_BY_NAME) {
2874 err = build_map_pin_path(map, pin_root_path);
2875 if (err) {
2876 pr_warn("map '%s': couldn't build pin path.\n", map->name);
2877 return err;
2878 }
2879 }
2880
2881 if (map_def.parts & MAP_DEF_INNER_MAP) {
2882 map->inner_map = calloc(1, sizeof(*map->inner_map));
2883 if (!map->inner_map)
2884 return -ENOMEM;
dac645b9
AN
2885 map->inner_map->fd = create_placeholder_fd();
2886 if (map->inner_map->fd < 0)
2887 return map->inner_map->fd;
c7ef5ec9
AN
2888 map->inner_map->sec_idx = sec_idx;
2889 map->inner_map->name = malloc(strlen(map_name) + sizeof(".inner") + 1);
2890 if (!map->inner_map->name)
2891 return -ENOMEM;
2892 sprintf(map->inner_map->name, "%s.inner", map_name);
2893
2894 fill_map_from_def(map->inner_map, &inner_def);
2895 }
2896
4fcac46c 2897 err = map_fill_btf_type_info(obj, map);
262cfb74
DK
2898 if (err)
2899 return err;
2900
c7ef5ec9 2901 return 0;
41017e56
AN
2902}
2903
2e7ba4f8
AN
2904static int init_arena_map_data(struct bpf_object *obj, struct bpf_map *map,
2905 const char *sec_name, int sec_idx,
2906 void *data, size_t data_sz)
2907{
2908 const long page_sz = sysconf(_SC_PAGE_SIZE);
2909 size_t mmap_sz;
2910
2911 mmap_sz = bpf_map_mmap_sz(obj->arena_map);
2912 if (roundup(data_sz, page_sz) > mmap_sz) {
2913 pr_warn("elf: sec '%s': declared ARENA map size (%zu) is too small to hold global __arena variables of size %zu\n",
2914 sec_name, mmap_sz, data_sz);
2915 return -E2BIG;
2916 }
2917
2918 obj->arena_data = malloc(data_sz);
2919 if (!obj->arena_data)
2920 return -ENOMEM;
2921 memcpy(obj->arena_data, data, data_sz);
2922 obj->arena_data_sz = data_sz;
2923
2924 /* make bpf_map__init_value() work for ARENA maps */
2925 map->mmaped = obj->arena_data;
2926
2927 return 0;
2928}
2929
57a00f41
THJ
2930static int bpf_object__init_user_btf_maps(struct bpf_object *obj, bool strict,
2931 const char *pin_root_path)
abd29c93
AN
2932{
2933 const struct btf_type *sec = NULL;
2934 int nr_types, i, vlen, err;
2935 const struct btf_type *t;
2936 const char *name;
2937 Elf_Data *data;
2938 Elf_Scn *scn;
2939
2940 if (obj->efile.btf_maps_shndx < 0)
2941 return 0;
2942
88a82120
AN
2943 scn = elf_sec_by_idx(obj, obj->efile.btf_maps_shndx);
2944 data = elf_sec_data(obj, scn);
abd29c93 2945 if (!scn || !data) {
88a82120
AN
2946 pr_warn("elf: failed to get %s map definitions for %s\n",
2947 MAPS_ELF_SEC, obj->path);
abd29c93
AN
2948 return -EINVAL;
2949 }
2950
6a886de0
HC
2951 nr_types = btf__type_cnt(obj->btf);
2952 for (i = 1; i < nr_types; i++) {
abd29c93 2953 t = btf__type_by_id(obj->btf, i);
b03bc685 2954 if (!btf_is_datasec(t))
abd29c93
AN
2955 continue;
2956 name = btf__name_by_offset(obj->btf, t->name_off);
2957 if (strcmp(name, MAPS_ELF_SEC) == 0) {
2958 sec = t;
646f02ff 2959 obj->efile.btf_maps_sec_btf_id = i;
abd29c93
AN
2960 break;
2961 }
2962 }
2963
2964 if (!sec) {
be18010e 2965 pr_warn("DATASEC '%s' not found.\n", MAPS_ELF_SEC);
abd29c93
AN
2966 return -ENOENT;
2967 }
2968
b03bc685 2969 vlen = btf_vlen(sec);
abd29c93
AN
2970 for (i = 0; i < vlen; i++) {
2971 err = bpf_object__init_user_btf_map(obj, sec, i,
2972 obj->efile.btf_maps_shndx,
8983b731
AN
2973 data, strict,
2974 pin_root_path);
abd29c93
AN
2975 if (err)
2976 return err;
2977 }
2978
2e7ba4f8
AN
2979 for (i = 0; i < obj->nr_maps; i++) {
2980 struct bpf_map *map = &obj->maps[i];
2981
2982 if (map->def.type != BPF_MAP_TYPE_ARENA)
2983 continue;
2984
2985 if (obj->arena_map) {
2986 pr_warn("map '%s': only single ARENA map is supported (map '%s' is also ARENA)\n",
2987 map->name, obj->arena_map->name);
2988 return -EINVAL;
2989 }
2990 obj->arena_map = map;
2991
2992 if (obj->efile.arena_data) {
2993 err = init_arena_map_data(obj, map, ARENA_SEC, obj->efile.arena_data_shndx,
2994 obj->efile.arena_data->d_buf,
2995 obj->efile.arena_data->d_size);
2996 if (err)
2997 return err;
2998 }
2999 }
3000 if (obj->efile.arena_data && !obj->arena_map) {
3001 pr_warn("elf: sec '%s': to use global __arena variables the ARENA map should be explicitly declared in SEC(\".maps\")\n",
3002 ARENA_SEC);
3003 return -ENOENT;
3004 }
3005
abd29c93
AN
3006 return 0;
3007}
3008
0d13bfce 3009static int bpf_object__init_maps(struct bpf_object *obj,
01af3bf0 3010 const struct bpf_object_open_opts *opts)
bf829271 3011{
166750bc
AN
3012 const char *pin_root_path;
3013 bool strict;
bd054102 3014 int err = 0;
8837fe5d 3015
166750bc
AN
3016 strict = !OPTS_GET(opts, relaxed_maps, false);
3017 pin_root_path = OPTS_GET(opts, pin_root_path, NULL);
bf829271 3018
40e1bcab 3019 err = bpf_object__init_user_btf_maps(obj, strict, pin_root_path);
166750bc 3020 err = err ?: bpf_object__init_global_data_maps(obj);
81bfdd08 3021 err = err ?: bpf_object__init_kconfig_map(obj);
809a69d6 3022 err = err ?: bpf_object_init_struct_ops(obj);
bf829271 3023
3b3af91c 3024 return err;
561bbcca
WN
3025}
3026
e3d91b0c
JDB
3027static bool section_have_execinstr(struct bpf_object *obj, int idx)
3028{
ad23b723 3029 Elf64_Shdr *sh;
e3d91b0c 3030
ad23b723
AN
3031 sh = elf_sec_hdr(obj, elf_sec_by_idx(obj, idx));
3032 if (!sh)
e3d91b0c
JDB
3033 return false;
3034
ad23b723 3035 return sh->sh_flags & SHF_EXECINSTR;
e3d91b0c
JDB
3036}
3037
6ebaa3fb
EZ
3038static bool starts_with_qmark(const char *s)
3039{
3040 return s && s[0] == '?';
3041}
3042
0f0e55d8
AN
3043static bool btf_needs_sanitization(struct bpf_object *obj)
3044{
9ca1f56a
AS
3045 bool has_func_global = kernel_supports(obj, FEAT_BTF_GLOBAL_FUNC);
3046 bool has_datasec = kernel_supports(obj, FEAT_BTF_DATASEC);
3047 bool has_float = kernel_supports(obj, FEAT_BTF_FLOAT);
3048 bool has_func = kernel_supports(obj, FEAT_BTF_FUNC);
223f903e 3049 bool has_decl_tag = kernel_supports(obj, FEAT_BTF_DECL_TAG);
2dc1e488 3050 bool has_type_tag = kernel_supports(obj, FEAT_BTF_TYPE_TAG);
f2a62588 3051 bool has_enum64 = kernel_supports(obj, FEAT_BTF_ENUM64);
6ebaa3fb 3052 bool has_qmark_datasec = kernel_supports(obj, FEAT_BTF_QMARK_DATASEC);
0f0e55d8 3053
2dc1e488 3054 return !has_func || !has_datasec || !has_func_global || !has_float ||
6ebaa3fb 3055 !has_decl_tag || !has_type_tag || !has_enum64 || !has_qmark_datasec;
0f0e55d8
AN
3056}
3057
f2a62588 3058static int bpf_object__sanitize_btf(struct bpf_object *obj, struct btf *btf)
d7c4b398 3059{
9ca1f56a
AS
3060 bool has_func_global = kernel_supports(obj, FEAT_BTF_GLOBAL_FUNC);
3061 bool has_datasec = kernel_supports(obj, FEAT_BTF_DATASEC);
3062 bool has_float = kernel_supports(obj, FEAT_BTF_FLOAT);
3063 bool has_func = kernel_supports(obj, FEAT_BTF_FUNC);
223f903e 3064 bool has_decl_tag = kernel_supports(obj, FEAT_BTF_DECL_TAG);
2dc1e488 3065 bool has_type_tag = kernel_supports(obj, FEAT_BTF_TYPE_TAG);
f2a62588 3066 bool has_enum64 = kernel_supports(obj, FEAT_BTF_ENUM64);
6ebaa3fb 3067 bool has_qmark_datasec = kernel_supports(obj, FEAT_BTF_QMARK_DATASEC);
c49a44b3 3068 int enum64_placeholder_id = 0;
d7c4b398
AN
3069 struct btf_type *t;
3070 int i, j, vlen;
d7c4b398 3071
6a886de0 3072 for (i = 1; i < btf__type_cnt(btf); i++) {
d7c4b398 3073 t = (struct btf_type *)btf__type_by_id(btf, i);
d7c4b398 3074
223f903e
YS
3075 if ((!has_datasec && btf_is_var(t)) || (!has_decl_tag && btf_is_decl_tag(t))) {
3076 /* replace VAR/DECL_TAG with INT */
d7c4b398 3077 t->info = BTF_INFO_ENC(BTF_KIND_INT, 0, 0);
1d4126c4
AN
3078 /*
3079 * using size = 1 is the safest choice, 4 will be too
3080 * big and cause kernel BTF validation failure if
3081 * original variable took less than 4 bytes
3082 */
3083 t->size = 1;
708852dc 3084 *(int *)(t + 1) = BTF_INT_ENC(0, 0, 8);
b03bc685 3085 } else if (!has_datasec && btf_is_datasec(t)) {
d7c4b398 3086 /* replace DATASEC with STRUCT */
b03bc685
AN
3087 const struct btf_var_secinfo *v = btf_var_secinfos(t);
3088 struct btf_member *m = btf_members(t);
d7c4b398
AN
3089 struct btf_type *vt;
3090 char *name;
3091
3092 name = (char *)btf__name_by_offset(btf, t->name_off);
3093 while (*name) {
6ebaa3fb 3094 if (*name == '.' || *name == '?')
d7c4b398
AN
3095 *name = '_';
3096 name++;
3097 }
3098
b03bc685 3099 vlen = btf_vlen(t);
d7c4b398
AN
3100 t->info = BTF_INFO_ENC(BTF_KIND_STRUCT, 0, vlen);
3101 for (j = 0; j < vlen; j++, v++, m++) {
3102 /* order of field assignments is important */
3103 m->offset = v->offset * 8;
3104 m->type = v->type;
3105 /* preserve variable name as member name */
3106 vt = (void *)btf__type_by_id(btf, v->type);
3107 m->name_off = vt->name_off;
3108 }
6ebaa3fb
EZ
3109 } else if (!has_qmark_datasec && btf_is_datasec(t) &&
3110 starts_with_qmark(btf__name_by_offset(btf, t->name_off))) {
3111 /* replace '?' prefix with '_' for DATASEC names */
3112 char *name;
3113
3114 name = (char *)btf__name_by_offset(btf, t->name_off);
3115 if (name[0] == '?')
3116 name[0] = '_';
b03bc685 3117 } else if (!has_func && btf_is_func_proto(t)) {
d7c4b398 3118 /* replace FUNC_PROTO with ENUM */
b03bc685 3119 vlen = btf_vlen(t);
d7c4b398
AN
3120 t->info = BTF_INFO_ENC(BTF_KIND_ENUM, 0, vlen);
3121 t->size = sizeof(__u32); /* kernel enforced */
b03bc685 3122 } else if (!has_func && btf_is_func(t)) {
d7c4b398
AN
3123 /* replace FUNC with TYPEDEF */
3124 t->info = BTF_INFO_ENC(BTF_KIND_TYPEDEF, 0, 0);
2d3eb67f
AS
3125 } else if (!has_func_global && btf_is_func(t)) {
3126 /* replace BTF_FUNC_GLOBAL with BTF_FUNC_STATIC */
3127 t->info = BTF_INFO_ENC(BTF_KIND_FUNC, 0, 0);
22541a9e
IL
3128 } else if (!has_float && btf_is_float(t)) {
3129 /* replace FLOAT with an equally-sized empty STRUCT;
3130 * since C compilers do not accept e.g. "float" as a
3131 * valid struct name, make it anonymous
3132 */
3133 t->name_off = 0;
3134 t->info = BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 0);
2dc1e488
YS
3135 } else if (!has_type_tag && btf_is_type_tag(t)) {
3136 /* replace TYPE_TAG with a CONST */
3137 t->name_off = 0;
3138 t->info = BTF_INFO_ENC(BTF_KIND_CONST, 0, 0);
f2a62588
YS
3139 } else if (!has_enum64 && btf_is_enum(t)) {
3140 /* clear the kflag */
3141 t->info = btf_type_info(btf_kind(t), btf_vlen(t), false);
3142 } else if (!has_enum64 && btf_is_enum64(t)) {
3143 /* replace ENUM64 with a union */
3144 struct btf_member *m;
3145
3146 if (enum64_placeholder_id == 0) {
3147 enum64_placeholder_id = btf__add_int(btf, "enum64_placeholder", 1, 0);
3148 if (enum64_placeholder_id < 0)
3149 return enum64_placeholder_id;
3150
3151 t = (struct btf_type *)btf__type_by_id(btf, i);
3152 }
3153
3154 m = btf_members(t);
3155 vlen = btf_vlen(t);
3156 t->info = BTF_INFO_ENC(BTF_KIND_UNION, 0, vlen);
3157 for (j = 0; j < vlen; j++, m++) {
3158 m->type = enum64_placeholder_id;
3159 m->offset = 0;
3160 }
e3ba8e4e 3161 }
d7c4b398 3162 }
f2a62588
YS
3163
3164 return 0;
d7c4b398
AN
3165}
3166
b35f14f4 3167static bool libbpf_needs_btf(const struct bpf_object *obj)
abd29c93 3168{
b35f14f4 3169 return obj->efile.btf_maps_shndx >= 0 ||
240bf8a5 3170 obj->efile.has_st_ops ||
b35f14f4
AN
3171 obj->nr_extern > 0;
3172}
3173
3174static bool kernel_needs_btf(const struct bpf_object *obj)
3175{
240bf8a5 3176 return obj->efile.has_st_ops;
abd29c93
AN
3177}
3178
063183bf 3179static int bpf_object__init_btf(struct bpf_object *obj,
9c6660d0
AN
3180 Elf_Data *btf_data,
3181 Elf_Data *btf_ext_data)
3182{
b7d7f3e1 3183 int err = -ENOENT;
9c6660d0
AN
3184
3185 if (btf_data) {
3186 obj->btf = btf__new(btf_data->d_buf, btf_data->d_size);
e9fc3ce9
AN
3187 err = libbpf_get_error(obj->btf);
3188 if (err) {
b7d7f3e1 3189 obj->btf = NULL;
e9fc3ce9 3190 pr_warn("Error loading ELF section %s: %d.\n", BTF_ELF_SEC, err);
9c6660d0
AN
3191 goto out;
3192 }
4c01925f
AN
3193 /* enforce 8-byte pointers for BPF-targeted BTFs */
3194 btf__set_pointer_size(obj->btf, 8);
9c6660d0
AN
3195 }
3196 if (btf_ext_data) {
11d5daa8
AN
3197 struct btf_ext_info *ext_segs[3];
3198 int seg_num, sec_num;
3199
9c6660d0
AN
3200 if (!obj->btf) {
3201 pr_debug("Ignore ELF section %s because its depending ELF section %s is not found.\n",
3202 BTF_EXT_ELF_SEC, BTF_ELF_SEC);
3203 goto out;
3204 }
e9fc3ce9
AN
3205 obj->btf_ext = btf_ext__new(btf_ext_data->d_buf, btf_ext_data->d_size);
3206 err = libbpf_get_error(obj->btf_ext);
3207 if (err) {
3208 pr_warn("Error loading ELF section %s: %d. Ignored and continue.\n",
3209 BTF_EXT_ELF_SEC, err);
9c6660d0
AN
3210 obj->btf_ext = NULL;
3211 goto out;
3212 }
11d5daa8
AN
3213
3214 /* setup .BTF.ext to ELF section mapping */
3215 ext_segs[0] = &obj->btf_ext->func_info;
3216 ext_segs[1] = &obj->btf_ext->line_info;
3217 ext_segs[2] = &obj->btf_ext->core_relo_info;
3218 for (seg_num = 0; seg_num < ARRAY_SIZE(ext_segs); seg_num++) {
3219 struct btf_ext_info *seg = ext_segs[seg_num];
3220 const struct btf_ext_info_sec *sec;
3221 const char *sec_name;
3222 Elf_Scn *scn;
3223
3224 if (seg->sec_cnt == 0)
3225 continue;
3226
3227 seg->sec_idxs = calloc(seg->sec_cnt, sizeof(*seg->sec_idxs));
3228 if (!seg->sec_idxs) {
3229 err = -ENOMEM;
3230 goto out;
3231 }
3232
3233 sec_num = 0;
3234 for_each_btf_ext_sec(seg, sec) {
3235 /* preventively increment index to avoid doing
3236 * this before every continue below
3237 */
3238 sec_num++;
3239
3240 sec_name = btf__name_by_offset(obj->btf, sec->sec_name_off);
3241 if (str_is_empty(sec_name))
3242 continue;
3243 scn = elf_sec_by_name(obj, sec_name);
3244 if (!scn)
3245 continue;
3246
3247 seg->sec_idxs[sec_num - 1] = elf_ndxscn(scn);
3248 }
3249 }
9c6660d0
AN
3250 }
3251out:
b35f14f4 3252 if (err && libbpf_needs_btf(obj)) {
be18010e 3253 pr_warn("BTF is required, but is missing or corrupted.\n");
b7d7f3e1 3254 return err;
abd29c93 3255 }
9c6660d0
AN
3256 return 0;
3257}
3258
b96c07f3
AN
3259static int compare_vsi_off(const void *_a, const void *_b)
3260{
3261 const struct btf_var_secinfo *a = _a;
3262 const struct btf_var_secinfo *b = _b;
3263
3264 return a->offset - b->offset;
3265}
3266
3267static int btf_fixup_datasec(struct bpf_object *obj, struct btf *btf,
3268 struct btf_type *t)
3269{
f33f742d
AN
3270 __u32 size = 0, i, vars = btf_vlen(t);
3271 const char *sec_name = btf__name_by_offset(btf, t->name_off);
b96c07f3 3272 struct btf_var_secinfo *vsi;
4fcac46c 3273 bool fixup_offsets = false;
f33f742d 3274 int err;
b96c07f3 3275
f33f742d 3276 if (!sec_name) {
b96c07f3
AN
3277 pr_debug("No name found in string section for DATASEC kind.\n");
3278 return -ENOENT;
3279 }
3280
4fcac46c
AN
3281 /* Extern-backing datasecs (.ksyms, .kconfig) have their size and
3282 * variable offsets set at the previous step. Further, not every
3283 * extern BTF VAR has corresponding ELF symbol preserved, so we skip
3284 * all fixups altogether for such sections and go straight to sorting
3285 * VARs within their DATASEC.
b96c07f3 3286 */
4fcac46c 3287 if (strcmp(sec_name, KCONFIG_SEC) == 0 || strcmp(sec_name, KSYMS_SEC) == 0)
b96c07f3
AN
3288 goto sort_vars;
3289
4fcac46c
AN
3290 /* Clang leaves DATASEC size and VAR offsets as zeroes, so we need to
3291 * fix this up. But BPF static linker already fixes this up and fills
3292 * all the sizes and offsets during static linking. So this step has
3293 * to be optional. But the STV_HIDDEN handling is non-optional for any
3294 * non-extern DATASEC, so the variable fixup loop below handles both
3295 * functions at the same time, paying the cost of BTF VAR <-> ELF
3296 * symbol matching just once.
3297 */
3298 if (t->size == 0) {
3299 err = find_elf_sec_sz(obj, sec_name, &size);
3300 if (err || !size) {
3301 pr_debug("sec '%s': failed to determine size from ELF: size %u, err %d\n",
3302 sec_name, size, err);
3303 return -ENOENT;
3304 }
b96c07f3 3305
4fcac46c
AN
3306 t->size = size;
3307 fixup_offsets = true;
3308 }
b96c07f3
AN
3309
3310 for (i = 0, vsi = btf_var_secinfos(t); i < vars; i++, vsi++) {
f33f742d
AN
3311 const struct btf_type *t_var;
3312 struct btf_var *var;
3313 const char *var_name;
3314 Elf64_Sym *sym;
3315
b96c07f3 3316 t_var = btf__type_by_id(btf, vsi->type);
88918dc1 3317 if (!t_var || !btf_is_var(t_var)) {
f33f742d 3318 pr_debug("sec '%s': unexpected non-VAR type found\n", sec_name);
b96c07f3
AN
3319 return -EINVAL;
3320 }
3321
88918dc1 3322 var = btf_var(t_var);
f33f742d 3323 if (var->linkage == BTF_VAR_STATIC || var->linkage == BTF_VAR_GLOBAL_EXTERN)
b96c07f3
AN
3324 continue;
3325
f33f742d
AN
3326 var_name = btf__name_by_offset(btf, t_var->name_off);
3327 if (!var_name) {
3328 pr_debug("sec '%s': failed to find name of DATASEC's member #%d\n",
3329 sec_name, i);
b96c07f3
AN
3330 return -ENOENT;
3331 }
3332
f33f742d
AN
3333 sym = find_elf_var_sym(obj, var_name);
3334 if (IS_ERR(sym)) {
3335 pr_debug("sec '%s': failed to find ELF symbol for VAR '%s'\n",
3336 sec_name, var_name);
b96c07f3
AN
3337 return -ENOENT;
3338 }
3339
4fcac46c
AN
3340 if (fixup_offsets)
3341 vsi->offset = sym->st_value;
3342
3343 /* if variable is a global/weak symbol, but has restricted
3344 * (STV_HIDDEN or STV_INTERNAL) visibility, mark its BTF VAR
3345 * as static. This follows similar logic for functions (BPF
3346 * subprogs) and influences libbpf's further decisions about
3347 * whether to make global data BPF array maps as
3348 * BPF_F_MMAPABLE.
3349 */
3350 if (ELF64_ST_VISIBILITY(sym->st_other) == STV_HIDDEN
3351 || ELF64_ST_VISIBILITY(sym->st_other) == STV_INTERNAL)
3352 var->linkage = BTF_VAR_STATIC;
b96c07f3
AN
3353 }
3354
3355sort_vars:
3356 qsort(btf_var_secinfos(t), vars, sizeof(*vsi), compare_vsi_off);
3357 return 0;
3358}
3359
f33f742d 3360static int bpf_object_fixup_btf(struct bpf_object *obj)
b96c07f3 3361{
f33f742d 3362 int i, n, err = 0;
b96c07f3 3363
f33f742d
AN
3364 if (!obj->btf)
3365 return 0;
3366
3367 n = btf__type_cnt(obj->btf);
6a886de0 3368 for (i = 1; i < n; i++) {
f33f742d 3369 struct btf_type *t = btf_type_by_id(obj->btf, i);
b96c07f3
AN
3370
3371 /* Loader needs to fix up some of the things compiler
3372 * couldn't get its hands on while emitting BTF. This
3373 * is section size and global variable offset. We use
3374 * the info from the ELF itself for this purpose.
3375 */
3376 if (btf_is_datasec(t)) {
f33f742d 3377 err = btf_fixup_datasec(obj, obj->btf, t);
b96c07f3 3378 if (err)
f33f742d 3379 return err;
b96c07f3
AN
3380 }
3381 }
3382
166750bc
AN
3383 return 0;
3384}
3385
fe62de31 3386static bool prog_needs_vmlinux_btf(struct bpf_program *prog)
a6ed02ca 3387{
1e092a03
KS
3388 if (prog->type == BPF_PROG_TYPE_STRUCT_OPS ||
3389 prog->type == BPF_PROG_TYPE_LSM)
a6ed02ca
KS
3390 return true;
3391
3392 /* BPF_PROG_TYPE_TRACING programs which do not attach to other programs
3393 * also need vmlinux BTF
3394 */
3395 if (prog->type == BPF_PROG_TYPE_TRACING && !prog->attach_prog_fd)
3396 return true;
3397
3398 return false;
3399}
3400
8b7b0e5f
DV
3401static bool map_needs_vmlinux_btf(struct bpf_map *map)
3402{
3403 return bpf_map__is_struct_ops(map);
3404}
3405
fe62de31 3406static bool obj_needs_vmlinux_btf(const struct bpf_object *obj)
a6ed02ca
KS
3407{
3408 struct bpf_program *prog;
8b7b0e5f 3409 struct bpf_map *map;
fe62de31 3410 int i;
a6ed02ca 3411
1373ff59
SC
3412 /* CO-RE relocations need kernel BTF, only when btf_custom_path
3413 * is not specified
3414 */
3415 if (obj->btf_ext && obj->btf_ext->core_relo_info.len && !obj->btf_custom_path)
fe62de31 3416 return true;
192b6638 3417
d370bbe1
HL
3418 /* Support for typed ksyms needs kernel BTF */
3419 for (i = 0; i < obj->nr_extern; i++) {
3420 const struct extern_desc *ext;
3421
3422 ext = &obj->externs[i];
fe62de31
AN
3423 if (ext->type == EXT_KSYM && ext->ksym.type_id)
3424 return true;
d370bbe1
HL
3425 }
3426
a6ed02ca 3427 bpf_object__for_each_program(prog, obj) {
a3820c48 3428 if (!prog->autoload)
d9297581 3429 continue;
fe62de31
AN
3430 if (prog_needs_vmlinux_btf(prog))
3431 return true;
a6ed02ca
KS
3432 }
3433
8b7b0e5f
DV
3434 bpf_object__for_each_map(map, obj) {
3435 if (map_needs_vmlinux_btf(map))
3436 return true;
3437 }
3438
fe62de31
AN
3439 return false;
3440}
3441
3442static int bpf_object__load_vmlinux_btf(struct bpf_object *obj, bool force)
3443{
3444 int err;
3445
3446 /* btf_vmlinux could be loaded earlier */
67234743 3447 if (obj->btf_vmlinux || obj->gen_loader)
fe62de31
AN
3448 return 0;
3449
3450 if (!force && !obj_needs_vmlinux_btf(obj))
192b6638
AN
3451 return 0;
3452
a710eed3 3453 obj->btf_vmlinux = btf__load_vmlinux_btf();
e9fc3ce9
AN
3454 err = libbpf_get_error(obj->btf_vmlinux);
3455 if (err) {
192b6638
AN
3456 pr_warn("Error loading vmlinux BTF: %d\n", err);
3457 obj->btf_vmlinux = NULL;
3458 return err;
3459 }
a6ed02ca
KS
3460 return 0;
3461}
3462
063183bf
AN
3463static int bpf_object__sanitize_and_load_btf(struct bpf_object *obj)
3464{
0f0e55d8
AN
3465 struct btf *kern_btf = obj->btf;
3466 bool btf_mandatory, sanitize;
aea28a60 3467 int i, err = 0;
063183bf
AN
3468
3469 if (!obj->btf)
3470 return 0;
3471
9ca1f56a 3472 if (!kernel_supports(obj, FEAT_BTF)) {
68b08647
AN
3473 if (kernel_needs_btf(obj)) {
3474 err = -EOPNOTSUPP;
3475 goto report;
3476 }
3477 pr_debug("Kernel doesn't support BTF, skipping uploading it.\n");
3478 return 0;
3479 }
3480
aea28a60
AN
3481 /* Even though some subprogs are global/weak, user might prefer more
3482 * permissive BPF verification process that BPF verifier performs for
3483 * static functions, taking into account more context from the caller
3484 * functions. In such case, they need to mark such subprogs with
3485 * __attribute__((visibility("hidden"))) and libbpf will adjust
3486 * corresponding FUNC BTF type to be marked as static and trigger more
3487 * involved BPF verification process.
3488 */
3489 for (i = 0; i < obj->nr_programs; i++) {
3490 struct bpf_program *prog = &obj->programs[i];
3491 struct btf_type *t;
3492 const char *name;
3493 int j, n;
3494
3495 if (!prog->mark_btf_static || !prog_is_subprog(obj, prog))
3496 continue;
3497
6a886de0
HC
3498 n = btf__type_cnt(obj->btf);
3499 for (j = 1; j < n; j++) {
aea28a60
AN
3500 t = btf_type_by_id(obj->btf, j);
3501 if (!btf_is_func(t) || btf_func_linkage(t) != BTF_FUNC_GLOBAL)
3502 continue;
3503
3504 name = btf__str_by_offset(obj->btf, t->name_off);
3505 if (strcmp(name, prog->name) != 0)
3506 continue;
3507
3508 t->info = btf_type_info(BTF_KIND_FUNC, BTF_FUNC_STATIC, 0);
3509 break;
3510 }
3511 }
3512
0f0e55d8
AN
3513 sanitize = btf_needs_sanitization(obj);
3514 if (sanitize) {
5c3320d7 3515 const void *raw_data;
0f0e55d8 3516 __u32 sz;
063183bf 3517
0f0e55d8 3518 /* clone BTF to sanitize a copy and leave the original intact */
6a886de0 3519 raw_data = btf__raw_data(obj->btf, &sz);
5c3320d7 3520 kern_btf = btf__new(raw_data, sz);
e9fc3ce9
AN
3521 err = libbpf_get_error(kern_btf);
3522 if (err)
3523 return err;
04efe591 3524
4c01925f
AN
3525 /* enforce 8-byte pointers for BPF-targeted BTFs */
3526 btf__set_pointer_size(obj->btf, 8);
f2a62588
YS
3527 err = bpf_object__sanitize_btf(obj, kern_btf);
3528 if (err)
3529 return err;
063183bf 3530 }
0f0e55d8 3531
67234743
AS
3532 if (obj->gen_loader) {
3533 __u32 raw_size = 0;
6a886de0 3534 const void *raw_data = btf__raw_data(kern_btf, &raw_size);
67234743
AS
3535
3536 if (!raw_data)
3537 return -ENOMEM;
3538 bpf_gen__load_btf(obj->gen_loader, raw_data, raw_size);
3539 /* Pretend to have valid FD to pass various fd >= 0 checks.
3540 * This fd == 0 will not be used with any syscall and will be reset to -1 eventually.
3541 */
3542 btf__set_fd(kern_btf, 0);
3543 } else {
e0e3ea88
AN
3544 /* currently BPF_BTF_LOAD only supports log_level 1 */
3545 err = btf_load_into_kernel(kern_btf, obj->log_buf, obj->log_size,
6b434b61 3546 obj->log_level ? 1 : 0, obj->token_fd);
67234743 3547 }
0f0e55d8
AN
3548 if (sanitize) {
3549 if (!err) {
3550 /* move fd to libbpf's BTF */
3551 btf__set_fd(obj->btf, btf__fd(kern_btf));
3552 btf__set_fd(kern_btf, -1);
3553 }
3554 btf__free(kern_btf);
3555 }
68b08647 3556report:
0f0e55d8
AN
3557 if (err) {
3558 btf_mandatory = kernel_needs_btf(obj);
3559 pr_warn("Error loading .BTF into kernel: %d. %s\n", err,
3560 btf_mandatory ? "BTF is mandatory, can't proceed."
3561 : "BTF is optional, ignoring.");
3562 if (!btf_mandatory)
3563 err = 0;
3564 }
3565 return err;
063183bf
AN
3566}
3567
88a82120
AN
3568static const char *elf_sym_str(const struct bpf_object *obj, size_t off)
3569{
3570 const char *name;
3571
3572 name = elf_strptr(obj->efile.elf, obj->efile.strtabidx, off);
3573 if (!name) {
3574 pr_warn("elf: failed to get section name string at offset %zu from %s: %s\n",
3575 off, obj->path, elf_errmsg(-1));
3576 return NULL;
3577 }
3578
3579 return name;
3580}
3581
3582static const char *elf_sec_str(const struct bpf_object *obj, size_t off)
3583{
3584 const char *name;
3585
3586 name = elf_strptr(obj->efile.elf, obj->efile.shstrndx, off);
3587 if (!name) {
3588 pr_warn("elf: failed to get section name string at offset %zu from %s: %s\n",
3589 off, obj->path, elf_errmsg(-1));
3590 return NULL;
3591 }
3592
3593 return name;
3594}
3595
3596static Elf_Scn *elf_sec_by_idx(const struct bpf_object *obj, size_t idx)
3597{
3598 Elf_Scn *scn;
3599
3600 scn = elf_getscn(obj->efile.elf, idx);
3601 if (!scn) {
3602 pr_warn("elf: failed to get section(%zu) from %s: %s\n",
3603 idx, obj->path, elf_errmsg(-1));
3604 return NULL;
3605 }
3606 return scn;
3607}
3608
3609static Elf_Scn *elf_sec_by_name(const struct bpf_object *obj, const char *name)
3610{
3611 Elf_Scn *scn = NULL;
3612 Elf *elf = obj->efile.elf;
3613 const char *sec_name;
3614
3615 while ((scn = elf_nextscn(elf, scn)) != NULL) {
3616 sec_name = elf_sec_name(obj, scn);
3617 if (!sec_name)
3618 return NULL;
3619
3620 if (strcmp(sec_name, name) != 0)
3621 continue;
3622
3623 return scn;
3624 }
3625 return NULL;
3626}
3627
ad23b723 3628static Elf64_Shdr *elf_sec_hdr(const struct bpf_object *obj, Elf_Scn *scn)
88a82120 3629{
ad23b723
AN
3630 Elf64_Shdr *shdr;
3631
88a82120 3632 if (!scn)
ad23b723 3633 return NULL;
88a82120 3634
ad23b723
AN
3635 shdr = elf64_getshdr(scn);
3636 if (!shdr) {
88a82120
AN
3637 pr_warn("elf: failed to get section(%zu) header from %s: %s\n",
3638 elf_ndxscn(scn), obj->path, elf_errmsg(-1));
ad23b723 3639 return NULL;
88a82120
AN
3640 }
3641
ad23b723 3642 return shdr;
88a82120
AN
3643}
3644
3645static const char *elf_sec_name(const struct bpf_object *obj, Elf_Scn *scn)
3646{
3647 const char *name;
ad23b723 3648 Elf64_Shdr *sh;
88a82120
AN
3649
3650 if (!scn)
3651 return NULL;
3652
ad23b723
AN
3653 sh = elf_sec_hdr(obj, scn);
3654 if (!sh)
88a82120
AN
3655 return NULL;
3656
ad23b723 3657 name = elf_sec_str(obj, sh->sh_name);
88a82120
AN
3658 if (!name) {
3659 pr_warn("elf: failed to get section(%zu) name from %s: %s\n",
3660 elf_ndxscn(scn), obj->path, elf_errmsg(-1));
3661 return NULL;
3662 }
3663
3664 return name;
3665}
3666
3667static Elf_Data *elf_sec_data(const struct bpf_object *obj, Elf_Scn *scn)
3668{
3669 Elf_Data *data;
3670
3671 if (!scn)
3672 return NULL;
3673
3674 data = elf_getdata(scn, 0);
3675 if (!data) {
3676 pr_warn("elf: failed to get section(%zu) %s data from %s: %s\n",
3677 elf_ndxscn(scn), elf_sec_name(obj, scn) ?: "<?>",
3678 obj->path, elf_errmsg(-1));
3679 return NULL;
3680 }
3681
3682 return data;
3683}
3684
ad23b723
AN
3685static Elf64_Sym *elf_sym_by_idx(const struct bpf_object *obj, size_t idx)
3686{
3687 if (idx >= obj->efile.symbols->d_size / sizeof(Elf64_Sym))
3688 return NULL;
3689
3690 return (Elf64_Sym *)obj->efile.symbols->d_buf + idx;
3691}
3692
3693static Elf64_Rel *elf_rel_by_idx(Elf_Data *data, size_t idx)
3694{
3695 if (idx >= data->d_size / sizeof(Elf64_Rel))
3696 return NULL;
3697
3698 return (Elf64_Rel *)data->d_buf + idx;
3699}
3700
50e09460
AN
3701static bool is_sec_name_dwarf(const char *name)
3702{
3703 /* approximation, but the actual list is too long */
13d35a0c 3704 return str_has_pfx(name, ".debug_");
50e09460
AN
3705}
3706
ad23b723 3707static bool ignore_elf_section(Elf64_Shdr *hdr, const char *name)
50e09460
AN
3708{
3709 /* no special handling of .strtab */
3710 if (hdr->sh_type == SHT_STRTAB)
3711 return true;
3712
3713 /* ignore .llvm_addrsig section as well */
faf6ed32 3714 if (hdr->sh_type == SHT_LLVM_ADDRSIG)
50e09460
AN
3715 return true;
3716
3717 /* no subprograms will lead to an empty .text section, ignore it */
3718 if (hdr->sh_type == SHT_PROGBITS && hdr->sh_size == 0 &&
3719 strcmp(name, ".text") == 0)
3720 return true;
3721
3722 /* DWARF sections */
3723 if (is_sec_name_dwarf(name))
3724 return true;
3725
13d35a0c 3726 if (str_has_pfx(name, ".rel")) {
50e09460
AN
3727 name += sizeof(".rel") - 1;
3728 /* DWARF section relocations */
3729 if (is_sec_name_dwarf(name))
3730 return true;
3731
3732 /* .BTF and .BTF.ext don't need relocations */
3733 if (strcmp(name, BTF_ELF_SEC) == 0 ||
3734 strcmp(name, BTF_EXT_ELF_SEC) == 0)
3735 return true;
3736 }
3737
3738 return false;
3739}
3740
db2b8b06
AN
3741static int cmp_progs(const void *_a, const void *_b)
3742{
3743 const struct bpf_program *a = _a;
3744 const struct bpf_program *b = _b;
3745
3746 if (a->sec_idx != b->sec_idx)
3747 return a->sec_idx < b->sec_idx ? -1 : 1;
3748
3749 /* sec_insn_off can't be the same within the section */
3750 return a->sec_insn_off < b->sec_insn_off ? -1 : 1;
3751}
3752
0d13bfce 3753static int bpf_object__elf_collect(struct bpf_object *obj)
29603665 3754{
25bbbd7a 3755 struct elf_sec_desc *sec_desc;
29603665 3756 Elf *elf = obj->efile.elf;
f0187f0b 3757 Elf_Data *btf_ext_data = NULL;
1713d68b 3758 Elf_Data *btf_data = NULL;
666810e8 3759 int idx = 0, err = 0;
0201c575
AN
3760 const char *name;
3761 Elf_Data *data;
3762 Elf_Scn *scn;
ad23b723 3763 Elf64_Shdr *sh;
29603665 3764
0d6988e1 3765 /* ELF section indices are 0-based, but sec #0 is special "invalid"
51deedc9
SHY
3766 * section. Since section count retrieved by elf_getshdrnum() does
3767 * include sec #0, it is already the necessary size of an array to keep
3768 * all the sections.
25bbbd7a 3769 */
51deedc9
SHY
3770 if (elf_getshdrnum(obj->efile.elf, &obj->efile.sec_cnt)) {
3771 pr_warn("elf: failed to get the number of sections for %s: %s\n",
3772 obj->path, elf_errmsg(-1));
3773 return -LIBBPF_ERRNO__FORMAT;
3774 }
25bbbd7a
AN
3775 obj->efile.secs = calloc(obj->efile.sec_cnt, sizeof(*obj->efile.secs));
3776 if (!obj->efile.secs)
3777 return -ENOMEM;
29603665 3778
0201c575
AN
3779 /* a bunch of ELF parsing functionality depends on processing symbols,
3780 * so do the first pass and find the symbol table
3781 */
3782 scn = NULL;
29603665 3783 while ((scn = elf_nextscn(elf, scn)) != NULL) {
ad23b723
AN
3784 sh = elf_sec_hdr(obj, scn);
3785 if (!sh)
0201c575
AN
3786 return -LIBBPF_ERRNO__FORMAT;
3787
ad23b723 3788 if (sh->sh_type == SHT_SYMTAB) {
0201c575
AN
3789 if (obj->efile.symbols) {
3790 pr_warn("elf: multiple symbol tables in %s\n", obj->path);
3791 return -LIBBPF_ERRNO__FORMAT;
3792 }
29603665 3793
0201c575
AN
3794 data = elf_sec_data(obj, scn);
3795 if (!data)
3796 return -LIBBPF_ERRNO__FORMAT;
3797
25bbbd7a
AN
3798 idx = elf_ndxscn(scn);
3799
0201c575 3800 obj->efile.symbols = data;
25bbbd7a 3801 obj->efile.symbols_shndx = idx;
ad23b723 3802 obj->efile.strtabidx = sh->sh_link;
0201c575
AN
3803 }
3804 }
3805
03e601f4
THJ
3806 if (!obj->efile.symbols) {
3807 pr_warn("elf: couldn't find symbol table in %s, stripped object file?\n",
3808 obj->path);
3809 return -ENOENT;
3810 }
3811
0201c575
AN
3812 scn = NULL;
3813 while ((scn = elf_nextscn(elf, scn)) != NULL) {
25bbbd7a
AN
3814 idx = elf_ndxscn(scn);
3815 sec_desc = &obj->efile.secs[idx];
88a82120 3816
ad23b723
AN
3817 sh = elf_sec_hdr(obj, scn);
3818 if (!sh)
01b29d1d 3819 return -LIBBPF_ERRNO__FORMAT;
29603665 3820
ad23b723 3821 name = elf_sec_str(obj, sh->sh_name);
88a82120 3822 if (!name)
01b29d1d 3823 return -LIBBPF_ERRNO__FORMAT;
29603665 3824
ad23b723 3825 if (ignore_elf_section(sh, name))
50e09460
AN
3826 continue;
3827
88a82120
AN
3828 data = elf_sec_data(obj, scn);
3829 if (!data)
01b29d1d 3830 return -LIBBPF_ERRNO__FORMAT;
88a82120
AN
3831
3832 pr_debug("elf: section(%d) %s, size %ld, link %d, flags %lx, type=%d\n",
077c066a 3833 idx, name, (unsigned long)data->d_size,
ad23b723
AN
3834 (int)sh->sh_link, (unsigned long)sh->sh_flags,
3835 (int)sh->sh_type);
cb1e5e96 3836
1713d68b 3837 if (strcmp(name, "license") == 0) {
88a82120 3838 err = bpf_object__init_license(obj, data->d_buf, data->d_size);
01b29d1d
AN
3839 if (err)
3840 return err;
1713d68b 3841 } else if (strcmp(name, "version") == 0) {
88a82120 3842 err = bpf_object__init_kversion(obj, data->d_buf, data->d_size);
54b8625c
JF
3843 if (err)
3844 return err;
1713d68b 3845 } else if (strcmp(name, "maps") == 0) {
e19db676
AN
3846 pr_warn("elf: legacy map definitions in 'maps' section are not supported by libbpf v1.0+\n");
3847 return -ENOTSUP;
abd29c93
AN
3848 } else if (strcmp(name, MAPS_ELF_SEC) == 0) {
3849 obj->efile.btf_maps_shndx = idx;
1713d68b 3850 } else if (strcmp(name, BTF_ELF_SEC) == 0) {
62554d52
AN
3851 if (sh->sh_type != SHT_PROGBITS)
3852 return -LIBBPF_ERRNO__FORMAT;
1713d68b 3853 btf_data = data;
2993e051 3854 } else if (strcmp(name, BTF_EXT_ELF_SEC) == 0) {
62554d52
AN
3855 if (sh->sh_type != SHT_PROGBITS)
3856 return -LIBBPF_ERRNO__FORMAT;
f0187f0b 3857 btf_ext_data = data;
ad23b723 3858 } else if (sh->sh_type == SHT_SYMTAB) {
0201c575 3859 /* already processed during the first pass above */
ad23b723
AN
3860 } else if (sh->sh_type == SHT_PROGBITS && data->d_size > 0) {
3861 if (sh->sh_flags & SHF_EXECINSTR) {
f8c7a4d4
JS
3862 if (strcmp(name, ".text") == 0)
3863 obj->efile.text_shndx = idx;
c1122392 3864 err = bpf_object__add_programs(obj, data, name, idx);
88a82120 3865 if (err)
01b29d1d 3866 return err;
aed65917
AN
3867 } else if (strcmp(name, DATA_SEC) == 0 ||
3868 str_has_pfx(name, DATA_SEC ".")) {
25bbbd7a
AN
3869 sec_desc->sec_type = SEC_DATA;
3870 sec_desc->shdr = sh;
3871 sec_desc->data = data;
aed65917
AN
3872 } else if (strcmp(name, RODATA_SEC) == 0 ||
3873 str_has_pfx(name, RODATA_SEC ".")) {
25bbbd7a
AN
3874 sec_desc->sec_type = SEC_RODATA;
3875 sec_desc->shdr = sh;
3876 sec_desc->data = data;
240bf8a5 3877 } else if (strcmp(name, STRUCT_OPS_SEC) == 0 ||
5ad0ecbe
EZ
3878 strcmp(name, STRUCT_OPS_LINK_SEC) == 0 ||
3879 strcmp(name, "?" STRUCT_OPS_SEC) == 0 ||
3880 strcmp(name, "?" STRUCT_OPS_LINK_SEC) == 0) {
240bf8a5
EZ
3881 sec_desc->sec_type = SEC_ST_OPS;
3882 sec_desc->shdr = sh;
3883 sec_desc->data = data;
3884 obj->efile.has_st_ops = true;
2e7ba4f8
AN
3885 } else if (strcmp(name, ARENA_SEC) == 0) {
3886 obj->efile.arena_data = data;
3887 obj->efile.arena_data_shndx = idx;
d859900c 3888 } else {
50e09460
AN
3889 pr_info("elf: skipping unrecognized data section(%d) %s\n",
3890 idx, name);
a5b8bd47 3891 }
ad23b723 3892 } else if (sh->sh_type == SHT_REL) {
25bbbd7a 3893 int targ_sec_idx = sh->sh_info; /* points to other section */
e3d91b0c 3894
b7332d28
AN
3895 if (sh->sh_entsize != sizeof(Elf64_Rel) ||
3896 targ_sec_idx >= obj->efile.sec_cnt)
3897 return -LIBBPF_ERRNO__FORMAT;
3898
e3d91b0c 3899 /* Only do relo for section with exec instructions */
25bbbd7a 3900 if (!section_have_execinstr(obj, targ_sec_idx) &&
646f02ff 3901 strcmp(name, ".rel" STRUCT_OPS_SEC) &&
809a69d6 3902 strcmp(name, ".rel" STRUCT_OPS_LINK_SEC) &&
5ad0ecbe
EZ
3903 strcmp(name, ".rel?" STRUCT_OPS_SEC) &&
3904 strcmp(name, ".rel?" STRUCT_OPS_LINK_SEC) &&
646f02ff 3905 strcmp(name, ".rel" MAPS_ELF_SEC)) {
50e09460 3906 pr_info("elf: skipping relo section(%d) %s for section(%d) %s\n",
25bbbd7a
AN
3907 idx, name, targ_sec_idx,
3908 elf_sec_name(obj, elf_sec_by_idx(obj, targ_sec_idx)) ?: "<?>");
e3d91b0c
JDB
3909 continue;
3910 }
b62f06e8 3911
25bbbd7a
AN
3912 sec_desc->sec_type = SEC_RELO;
3913 sec_desc->shdr = sh;
3914 sec_desc->data = data;
dc79f035
AS
3915 } else if (sh->sh_type == SHT_NOBITS && (strcmp(name, BSS_SEC) == 0 ||
3916 str_has_pfx(name, BSS_SEC "."))) {
25bbbd7a
AN
3917 sec_desc->sec_type = SEC_BSS;
3918 sec_desc->shdr = sh;
3919 sec_desc->data = data;
077c066a 3920 } else {
2e80be60 3921 pr_info("elf: skipping section(%d) %s (size %zu)\n", idx, name,
ad23b723 3922 (size_t)sh->sh_size);
bec7d68c 3923 }
29603665 3924 }
561bbcca 3925
d3a3aa0c 3926 if (!obj->efile.strtabidx || obj->efile.strtabidx > idx) {
88a82120 3927 pr_warn("elf: symbol strings section missing or invalid in %s\n", obj->path);
f102154d 3928 return -LIBBPF_ERRNO__FORMAT;
77ba9a5b 3929 }
db2b8b06
AN
3930
3931 /* sort BPF programs by section name and in-section instruction offset
e3ba8e4e
KM
3932 * for faster search
3933 */
2a6a9bf2
AN
3934 if (obj->nr_programs)
3935 qsort(obj->programs, obj->nr_programs, sizeof(*obj->programs), cmp_progs);
db2b8b06 3936
0d13bfce 3937 return bpf_object__init_btf(obj, btf_data, btf_ext_data);
29603665
WN
3938}
3939
ad23b723 3940static bool sym_is_extern(const Elf64_Sym *sym)
166750bc 3941{
ad23b723 3942 int bind = ELF64_ST_BIND(sym->st_info);
166750bc
AN
3943 /* externs are symbols w/ type=NOTYPE, bind=GLOBAL|WEAK, section=UND */
3944 return sym->st_shndx == SHN_UNDEF &&
3945 (bind == STB_GLOBAL || bind == STB_WEAK) &&
ad23b723 3946 ELF64_ST_TYPE(sym->st_info) == STT_NOTYPE;
166750bc
AN
3947}
3948
ad23b723 3949static bool sym_is_subprog(const Elf64_Sym *sym, int text_shndx)
53eddb5e 3950{
ad23b723
AN
3951 int bind = ELF64_ST_BIND(sym->st_info);
3952 int type = ELF64_ST_TYPE(sym->st_info);
53eddb5e
YS
3953
3954 /* in .text section */
3955 if (sym->st_shndx != text_shndx)
3956 return false;
3957
3958 /* local function */
3959 if (bind == STB_LOCAL && type == STT_SECTION)
3960 return true;
3961
3962 /* global function */
3963 return bind == STB_GLOBAL && type == STT_FUNC;
3964}
3965
166750bc
AN
3966static int find_extern_btf_id(const struct btf *btf, const char *ext_name)
3967{
3968 const struct btf_type *t;
5bd022ec 3969 const char *tname;
166750bc
AN
3970 int i, n;
3971
3972 if (!btf)
3973 return -ESRCH;
3974
6a886de0
HC
3975 n = btf__type_cnt(btf);
3976 for (i = 1; i < n; i++) {
166750bc
AN
3977 t = btf__type_by_id(btf, i);
3978
5bd022ec 3979 if (!btf_is_var(t) && !btf_is_func(t))
166750bc
AN
3980 continue;
3981
5bd022ec
MKL
3982 tname = btf__name_by_offset(btf, t->name_off);
3983 if (strcmp(tname, ext_name))
166750bc
AN
3984 continue;
3985
5bd022ec
MKL
3986 if (btf_is_var(t) &&
3987 btf_var(t)->linkage != BTF_VAR_GLOBAL_EXTERN)
3988 return -EINVAL;
3989
3990 if (btf_is_func(t) && btf_func_linkage(t) != BTF_FUNC_EXTERN)
166750bc
AN
3991 return -EINVAL;
3992
3993 return i;
3994 }
3995
3996 return -ENOENT;
3997}
3998
2e33efe3
AN
3999static int find_extern_sec_btf_id(struct btf *btf, int ext_btf_id) {
4000 const struct btf_var_secinfo *vs;
4001 const struct btf_type *t;
4002 int i, j, n;
4003
4004 if (!btf)
4005 return -ESRCH;
4006
6a886de0
HC
4007 n = btf__type_cnt(btf);
4008 for (i = 1; i < n; i++) {
2e33efe3
AN
4009 t = btf__type_by_id(btf, i);
4010
4011 if (!btf_is_datasec(t))
4012 continue;
4013
4014 vs = btf_var_secinfos(t);
4015 for (j = 0; j < btf_vlen(t); j++, vs++) {
4016 if (vs->type == ext_btf_id)
4017 return i;
4018 }
4019 }
4020
4021 return -ENOENT;
4022}
4023
4024static enum kcfg_type find_kcfg_type(const struct btf *btf, int id,
4025 bool *is_signed)
166750bc
AN
4026{
4027 const struct btf_type *t;
4028 const char *name;
4029
4030 t = skip_mods_and_typedefs(btf, id, NULL);
4031 name = btf__name_by_offset(btf, t->name_off);
4032
4033 if (is_signed)
4034 *is_signed = false;
4035 switch (btf_kind(t)) {
4036 case BTF_KIND_INT: {
4037 int enc = btf_int_encoding(t);
4038
4039 if (enc & BTF_INT_BOOL)
2e33efe3 4040 return t->size == 1 ? KCFG_BOOL : KCFG_UNKNOWN;
166750bc
AN
4041 if (is_signed)
4042 *is_signed = enc & BTF_INT_SIGNED;
4043 if (t->size == 1)
2e33efe3 4044 return KCFG_CHAR;
166750bc 4045 if (t->size < 1 || t->size > 8 || (t->size & (t->size - 1)))
2e33efe3
AN
4046 return KCFG_UNKNOWN;
4047 return KCFG_INT;
166750bc
AN
4048 }
4049 case BTF_KIND_ENUM:
4050 if (t->size != 4)
2e33efe3 4051 return KCFG_UNKNOWN;
166750bc 4052 if (strcmp(name, "libbpf_tristate"))
2e33efe3
AN
4053 return KCFG_UNKNOWN;
4054 return KCFG_TRISTATE;
f2a62588
YS
4055 case BTF_KIND_ENUM64:
4056 if (strcmp(name, "libbpf_tristate"))
4057 return KCFG_UNKNOWN;
4058 return KCFG_TRISTATE;
166750bc
AN
4059 case BTF_KIND_ARRAY:
4060 if (btf_array(t)->nelems == 0)
2e33efe3
AN
4061 return KCFG_UNKNOWN;
4062 if (find_kcfg_type(btf, btf_array(t)->type, NULL) != KCFG_CHAR)
4063 return KCFG_UNKNOWN;
4064 return KCFG_CHAR_ARR;
166750bc 4065 default:
2e33efe3 4066 return KCFG_UNKNOWN;
166750bc
AN
4067 }
4068}
4069
4070static int cmp_externs(const void *_a, const void *_b)
4071{
4072 const struct extern_desc *a = _a;
4073 const struct extern_desc *b = _b;
4074
2e33efe3
AN
4075 if (a->type != b->type)
4076 return a->type < b->type ? -1 : 1;
4077
4078 if (a->type == EXT_KCFG) {
4079 /* descending order by alignment requirements */
4080 if (a->kcfg.align != b->kcfg.align)
4081 return a->kcfg.align > b->kcfg.align ? -1 : 1;
4082 /* ascending order by size, within same alignment class */
4083 if (a->kcfg.sz != b->kcfg.sz)
4084 return a->kcfg.sz < b->kcfg.sz ? -1 : 1;
4085 }
4086
166750bc
AN
4087 /* resolve ties by name */
4088 return strcmp(a->name, b->name);
4089}
4090
1c0c7074
AN
4091static int find_int_btf_id(const struct btf *btf)
4092{
4093 const struct btf_type *t;
4094 int i, n;
4095
6a886de0
HC
4096 n = btf__type_cnt(btf);
4097 for (i = 1; i < n; i++) {
1c0c7074
AN
4098 t = btf__type_by_id(btf, i);
4099
4100 if (btf_is_int(t) && btf_int_bits(t) == 32)
4101 return i;
4102 }
4103
4104 return 0;
4105}
4106
5bd022ec
MKL
4107static int add_dummy_ksym_var(struct btf *btf)
4108{
4109 int i, int_btf_id, sec_btf_id, dummy_var_btf_id;
4110 const struct btf_var_secinfo *vs;
4111 const struct btf_type *sec;
4112
9683e577
IR
4113 if (!btf)
4114 return 0;
4115
5bd022ec
MKL
4116 sec_btf_id = btf__find_by_name_kind(btf, KSYMS_SEC,
4117 BTF_KIND_DATASEC);
4118 if (sec_btf_id < 0)
4119 return 0;
4120
4121 sec = btf__type_by_id(btf, sec_btf_id);
4122 vs = btf_var_secinfos(sec);
4123 for (i = 0; i < btf_vlen(sec); i++, vs++) {
4124 const struct btf_type *vt;
4125
4126 vt = btf__type_by_id(btf, vs->type);
4127 if (btf_is_func(vt))
4128 break;
4129 }
4130
4131 /* No func in ksyms sec. No need to add dummy var. */
4132 if (i == btf_vlen(sec))
4133 return 0;
4134
4135 int_btf_id = find_int_btf_id(btf);
4136 dummy_var_btf_id = btf__add_var(btf,
4137 "dummy_ksym",
4138 BTF_VAR_GLOBAL_ALLOCATED,
4139 int_btf_id);
4140 if (dummy_var_btf_id < 0)
4141 pr_warn("cannot create a dummy_ksym var\n");
4142
4143 return dummy_var_btf_id;
4144}
4145
166750bc
AN
4146static int bpf_object__collect_externs(struct bpf_object *obj)
4147{
1c0c7074 4148 struct btf_type *sec, *kcfg_sec = NULL, *ksym_sec = NULL;
166750bc
AN
4149 const struct btf_type *t;
4150 struct extern_desc *ext;
5bd022ec 4151 int i, n, off, dummy_var_btf_id;
2e33efe3 4152 const char *ext_name, *sec_name;
5964a223 4153 size_t ext_essent_len;
166750bc 4154 Elf_Scn *scn;
ad23b723 4155 Elf64_Shdr *sh;
166750bc
AN
4156
4157 if (!obj->efile.symbols)
4158 return 0;
4159
88a82120 4160 scn = elf_sec_by_idx(obj, obj->efile.symbols_shndx);
ad23b723 4161 sh = elf_sec_hdr(obj, scn);
83390787 4162 if (!sh || sh->sh_entsize != sizeof(Elf64_Sym))
166750bc 4163 return -LIBBPF_ERRNO__FORMAT;
166750bc 4164
5bd022ec
MKL
4165 dummy_var_btf_id = add_dummy_ksym_var(obj->btf);
4166 if (dummy_var_btf_id < 0)
4167 return dummy_var_btf_id;
4168
ad23b723 4169 n = sh->sh_size / sh->sh_entsize;
166750bc 4170 pr_debug("looking for externs among %d symbols...\n", n);
88a82120 4171
166750bc 4172 for (i = 0; i < n; i++) {
ad23b723 4173 Elf64_Sym *sym = elf_sym_by_idx(obj, i);
166750bc 4174
ad23b723 4175 if (!sym)
166750bc 4176 return -LIBBPF_ERRNO__FORMAT;
ad23b723 4177 if (!sym_is_extern(sym))
166750bc 4178 continue;
ad23b723 4179 ext_name = elf_sym_str(obj, sym->st_name);
166750bc
AN
4180 if (!ext_name || !ext_name[0])
4181 continue;
4182
4183 ext = obj->externs;
029258d7 4184 ext = libbpf_reallocarray(ext, obj->nr_extern + 1, sizeof(*ext));
166750bc
AN
4185 if (!ext)
4186 return -ENOMEM;
4187 obj->externs = ext;
4188 ext = &ext[obj->nr_extern];
4189 memset(ext, 0, sizeof(*ext));
4190 obj->nr_extern++;
4191
4192 ext->btf_id = find_extern_btf_id(obj->btf, ext_name);
4193 if (ext->btf_id <= 0) {
4194 pr_warn("failed to find BTF for extern '%s': %d\n",
4195 ext_name, ext->btf_id);
4196 return ext->btf_id;
4197 }
4198 t = btf__type_by_id(obj->btf, ext->btf_id);
4199 ext->name = btf__name_by_offset(obj->btf, t->name_off);
4200 ext->sym_idx = i;
ad23b723 4201 ext->is_weak = ELF64_ST_BIND(sym->st_info) == STB_WEAK;
2e33efe3 4202
5964a223
DM
4203 ext_essent_len = bpf_core_essential_name_len(ext->name);
4204 ext->essent_name = NULL;
4205 if (ext_essent_len != strlen(ext->name)) {
4206 ext->essent_name = strndup(ext->name, ext_essent_len);
4207 if (!ext->essent_name)
4208 return -ENOMEM;
4209 }
4210
2e33efe3
AN
4211 ext->sec_btf_id = find_extern_sec_btf_id(obj->btf, ext->btf_id);
4212 if (ext->sec_btf_id <= 0) {
4213 pr_warn("failed to find BTF for extern '%s' [%d] section: %d\n",
4214 ext_name, ext->btf_id, ext->sec_btf_id);
4215 return ext->sec_btf_id;
166750bc 4216 }
2e33efe3
AN
4217 sec = (void *)btf__type_by_id(obj->btf, ext->sec_btf_id);
4218 sec_name = btf__name_by_offset(obj->btf, sec->name_off);
4219
4220 if (strcmp(sec_name, KCONFIG_SEC) == 0) {
5bd022ec
MKL
4221 if (btf_is_func(t)) {
4222 pr_warn("extern function %s is unsupported under %s section\n",
4223 ext->name, KCONFIG_SEC);
4224 return -ENOTSUP;
4225 }
2e33efe3
AN
4226 kcfg_sec = sec;
4227 ext->type = EXT_KCFG;
4228 ext->kcfg.sz = btf__resolve_size(obj->btf, t->type);
4229 if (ext->kcfg.sz <= 0) {
4230 pr_warn("failed to resolve size of extern (kcfg) '%s': %d\n",
4231 ext_name, ext->kcfg.sz);
4232 return ext->kcfg.sz;
4233 }
4234 ext->kcfg.align = btf__align_of(obj->btf, t->type);
4235 if (ext->kcfg.align <= 0) {
4236 pr_warn("failed to determine alignment of extern (kcfg) '%s': %d\n",
4237 ext_name, ext->kcfg.align);
4238 return -EINVAL;
4239 }
4240 ext->kcfg.type = find_kcfg_type(obj->btf, t->type,
e3ba8e4e 4241 &ext->kcfg.is_signed);
2e33efe3 4242 if (ext->kcfg.type == KCFG_UNKNOWN) {
55d00c37 4243 pr_warn("extern (kcfg) '%s': type is unsupported\n", ext_name);
2e33efe3
AN
4244 return -ENOTSUP;
4245 }
1c0c7074 4246 } else if (strcmp(sec_name, KSYMS_SEC) == 0) {
1c0c7074
AN
4247 ksym_sec = sec;
4248 ext->type = EXT_KSYM;
d370bbe1
HL
4249 skip_mods_and_typedefs(obj->btf, t->type,
4250 &ext->ksym.type_id);
2e33efe3
AN
4251 } else {
4252 pr_warn("unrecognized extern section '%s'\n", sec_name);
166750bc
AN
4253 return -ENOTSUP;
4254 }
4255 }
4256 pr_debug("collected %d externs total\n", obj->nr_extern);
4257
4258 if (!obj->nr_extern)
4259 return 0;
4260
2e33efe3 4261 /* sort externs by type, for kcfg ones also by (align, size, name) */
166750bc 4262 qsort(obj->externs, obj->nr_extern, sizeof(*ext), cmp_externs);
166750bc 4263
1c0c7074
AN
4264 /* for .ksyms section, we need to turn all externs into allocated
4265 * variables in BTF to pass kernel verification; we do this by
4266 * pretending that each extern is a 8-byte variable
4267 */
4268 if (ksym_sec) {
4269 /* find existing 4-byte integer type in BTF to use for fake
4270 * extern variables in DATASEC
4271 */
4272 int int_btf_id = find_int_btf_id(obj->btf);
5bd022ec
MKL
4273 /* For extern function, a dummy_var added earlier
4274 * will be used to replace the vs->type and
4275 * its name string will be used to refill
4276 * the missing param's name.
4277 */
4278 const struct btf_type *dummy_var;
1c0c7074 4279
5bd022ec 4280 dummy_var = btf__type_by_id(obj->btf, dummy_var_btf_id);
1c0c7074
AN
4281 for (i = 0; i < obj->nr_extern; i++) {
4282 ext = &obj->externs[i];
4283 if (ext->type != EXT_KSYM)
4284 continue;
4285 pr_debug("extern (ksym) #%d: symbol %d, name %s\n",
4286 i, ext->sym_idx, ext->name);
4287 }
4288
4289 sec = ksym_sec;
4290 n = btf_vlen(sec);
4291 for (i = 0, off = 0; i < n; i++, off += sizeof(int)) {
4292 struct btf_var_secinfo *vs = btf_var_secinfos(sec) + i;
4293 struct btf_type *vt;
4294
4295 vt = (void *)btf__type_by_id(obj->btf, vs->type);
4296 ext_name = btf__name_by_offset(obj->btf, vt->name_off);
4297 ext = find_extern_by_name(obj, ext_name);
4298 if (!ext) {
5bd022ec
MKL
4299 pr_warn("failed to find extern definition for BTF %s '%s'\n",
4300 btf_kind_str(vt), ext_name);
1c0c7074
AN
4301 return -ESRCH;
4302 }
5bd022ec
MKL
4303 if (btf_is_func(vt)) {
4304 const struct btf_type *func_proto;
4305 struct btf_param *param;
4306 int j;
4307
4308 func_proto = btf__type_by_id(obj->btf,
4309 vt->type);
4310 param = btf_params(func_proto);
4311 /* Reuse the dummy_var string if the
4312 * func proto does not have param name.
4313 */
4314 for (j = 0; j < btf_vlen(func_proto); j++)
4315 if (param[j].type && !param[j].name_off)
4316 param[j].name_off =
4317 dummy_var->name_off;
4318 vs->type = dummy_var_btf_id;
4319 vt->info &= ~0xffff;
4320 vt->info |= BTF_FUNC_GLOBAL;
4321 } else {
4322 btf_var(vt)->linkage = BTF_VAR_GLOBAL_ALLOCATED;
4323 vt->type = int_btf_id;
4324 }
1c0c7074
AN
4325 vs->offset = off;
4326 vs->size = sizeof(int);
4327 }
4328 sec->size = off;
4329 }
4330
2e33efe3
AN
4331 if (kcfg_sec) {
4332 sec = kcfg_sec;
4333 /* for kcfg externs calculate their offsets within a .kconfig map */
4334 off = 0;
4335 for (i = 0; i < obj->nr_extern; i++) {
4336 ext = &obj->externs[i];
4337 if (ext->type != EXT_KCFG)
4338 continue;
166750bc 4339
2e33efe3
AN
4340 ext->kcfg.data_off = roundup(off, ext->kcfg.align);
4341 off = ext->kcfg.data_off + ext->kcfg.sz;
1c0c7074 4342 pr_debug("extern (kcfg) #%d: symbol %d, off %u, name %s\n",
2e33efe3
AN
4343 i, ext->sym_idx, ext->kcfg.data_off, ext->name);
4344 }
4345 sec->size = off;
4346 n = btf_vlen(sec);
4347 for (i = 0; i < n; i++) {
4348 struct btf_var_secinfo *vs = btf_var_secinfos(sec) + i;
4349
4350 t = btf__type_by_id(obj->btf, vs->type);
4351 ext_name = btf__name_by_offset(obj->btf, t->name_off);
4352 ext = find_extern_by_name(obj, ext_name);
4353 if (!ext) {
4354 pr_warn("failed to find extern definition for BTF var '%s'\n",
4355 ext_name);
4356 return -ESRCH;
4357 }
4358 btf_var(t)->linkage = BTF_VAR_GLOBAL_ALLOCATED;
4359 vs->offset = ext->kcfg.data_off;
166750bc 4360 }
166750bc 4361 }
166750bc
AN
4362 return 0;
4363}
4364
bd054102
AN
4365static bool prog_is_subprog(const struct bpf_object *obj, const struct bpf_program *prog)
4366{
197afc63 4367 return prog->sec_idx == obj->efile.text_shndx && obj->nr_programs > 1;
c3c55696
AN
4368}
4369
01af3bf0
AN
4370struct bpf_program *
4371bpf_object__find_program_by_name(const struct bpf_object *obj,
4372 const char *name)
4373{
4374 struct bpf_program *prog;
4375
4376 bpf_object__for_each_program(prog, obj) {
c3c55696
AN
4377 if (prog_is_subprog(obj, prog))
4378 continue;
01af3bf0
AN
4379 if (!strcmp(prog->name, name))
4380 return prog;
4381 }
e9fc3ce9 4382 return errno = ENOENT, NULL;
01af3bf0
AN
4383}
4384
d859900c
DB
4385static bool bpf_object__shndx_is_data(const struct bpf_object *obj,
4386 int shndx)
4387{
25bbbd7a
AN
4388 switch (obj->efile.secs[shndx].sec_type) {
4389 case SEC_BSS:
4390 case SEC_DATA:
4391 case SEC_RODATA:
4392 return true;
4393 default:
4394 return false;
4395 }
d859900c
DB
4396}
4397
4398static bool bpf_object__shndx_is_maps(const struct bpf_object *obj,
4399 int shndx)
4400{
e19db676 4401 return shndx == obj->efile.btf_maps_shndx;
d859900c
DB
4402}
4403
d859900c
DB
4404static enum libbpf_map_type
4405bpf_object__section_to_libbpf_map_type(const struct bpf_object *obj, int shndx)
4406{
25bbbd7a
AN
4407 if (shndx == obj->efile.symbols_shndx)
4408 return LIBBPF_MAP_KCONFIG;
4409
4410 switch (obj->efile.secs[shndx].sec_type) {
4411 case SEC_BSS:
d859900c 4412 return LIBBPF_MAP_BSS;
25bbbd7a
AN
4413 case SEC_DATA:
4414 return LIBBPF_MAP_DATA;
4415 case SEC_RODATA:
d859900c 4416 return LIBBPF_MAP_RODATA;
25bbbd7a 4417 default:
d859900c 4418 return LIBBPF_MAP_UNSPEC;
25bbbd7a 4419 }
d859900c
DB
4420}
4421
1f8e2bcb
AN
4422static int bpf_program__record_reloc(struct bpf_program *prog,
4423 struct reloc_desc *reloc_desc,
9c0f8cbd 4424 __u32 insn_idx, const char *sym_name,
ad23b723 4425 const Elf64_Sym *sym, const Elf64_Rel *rel)
1f8e2bcb
AN
4426{
4427 struct bpf_insn *insn = &prog->insns[insn_idx];
4428 size_t map_idx, nr_maps = prog->obj->nr_maps;
4429 struct bpf_object *obj = prog->obj;
4430 __u32 shdr_idx = sym->st_shndx;
4431 enum libbpf_map_type type;
9c0f8cbd 4432 const char *sym_sec_name;
1f8e2bcb
AN
4433 struct bpf_map *map;
4434
aa0b8d43 4435 if (!is_call_insn(insn) && !is_ldimm64_insn(insn)) {
9c0f8cbd
AN
4436 pr_warn("prog '%s': invalid relo against '%s' for insns[%d].code 0x%x\n",
4437 prog->name, sym_name, insn_idx, insn->code);
1f8e2bcb
AN
4438 return -LIBBPF_ERRNO__RELOC;
4439 }
166750bc
AN
4440
4441 if (sym_is_extern(sym)) {
ad23b723 4442 int sym_idx = ELF64_R_SYM(rel->r_info);
166750bc
AN
4443 int i, n = obj->nr_extern;
4444 struct extern_desc *ext;
4445
4446 for (i = 0; i < n; i++) {
4447 ext = &obj->externs[i];
4448 if (ext->sym_idx == sym_idx)
4449 break;
4450 }
4451 if (i >= n) {
9c0f8cbd
AN
4452 pr_warn("prog '%s': extern relo failed to find extern for '%s' (%d)\n",
4453 prog->name, sym_name, sym_idx);
166750bc
AN
4454 return -LIBBPF_ERRNO__RELOC;
4455 }
9c0f8cbd
AN
4456 pr_debug("prog '%s': found extern #%d '%s' (sym %d) for insn #%u\n",
4457 prog->name, i, ext->name, ext->sym_idx, insn_idx);
5bd022ec 4458 if (insn->code == (BPF_JMP | BPF_CALL))
a18f7214 4459 reloc_desc->type = RELO_EXTERN_CALL;
5bd022ec 4460 else
a18f7214 4461 reloc_desc->type = RELO_EXTERN_LD64;
166750bc 4462 reloc_desc->insn_idx = insn_idx;
3055ddd6 4463 reloc_desc->ext_idx = i;
166750bc
AN
4464 return 0;
4465 }
4466
aa0b8d43
MKL
4467 /* sub-program call relocation */
4468 if (is_call_insn(insn)) {
4469 if (insn->src_reg != BPF_PSEUDO_CALL) {
4470 pr_warn("prog '%s': incorrect bpf_call opcode\n", prog->name);
4471 return -LIBBPF_ERRNO__RELOC;
4472 }
4473 /* text_shndx can be 0, if no default "main" program exists */
4474 if (!shdr_idx || shdr_idx != obj->efile.text_shndx) {
4475 sym_sec_name = elf_sec_name(obj, elf_sec_by_idx(obj, shdr_idx));
4476 pr_warn("prog '%s': bad call relo against '%s' in section '%s'\n",
4477 prog->name, sym_name, sym_sec_name);
4478 return -LIBBPF_ERRNO__RELOC;
4479 }
4480 if (sym->st_value % BPF_INSN_SZ) {
4481 pr_warn("prog '%s': bad call relo against '%s' at offset %zu\n",
4482 prog->name, sym_name, (size_t)sym->st_value);
4483 return -LIBBPF_ERRNO__RELOC;
4484 }
4485 reloc_desc->type = RELO_CALL;
4486 reloc_desc->insn_idx = insn_idx;
4487 reloc_desc->sym_off = sym->st_value;
4488 return 0;
4489 }
4490
1f8e2bcb 4491 if (!shdr_idx || shdr_idx >= SHN_LORESERVE) {
9c0f8cbd
AN
4492 pr_warn("prog '%s': invalid relo against '%s' in special section 0x%x; forgot to initialize global var?..\n",
4493 prog->name, sym_name, shdr_idx);
1f8e2bcb
AN
4494 return -LIBBPF_ERRNO__RELOC;
4495 }
4496
53eddb5e
YS
4497 /* loading subprog addresses */
4498 if (sym_is_subprog(sym, obj->efile.text_shndx)) {
4499 /* global_func: sym->st_value = offset in the section, insn->imm = 0.
4500 * local_func: sym->st_value = 0, insn->imm = offset in the section.
4501 */
4502 if ((sym->st_value % BPF_INSN_SZ) || (insn->imm % BPF_INSN_SZ)) {
4503 pr_warn("prog '%s': bad subprog addr relo against '%s' at offset %zu+%d\n",
4504 prog->name, sym_name, (size_t)sym->st_value, insn->imm);
4505 return -LIBBPF_ERRNO__RELOC;
4506 }
4507
4508 reloc_desc->type = RELO_SUBPROG_ADDR;
4509 reloc_desc->insn_idx = insn_idx;
4510 reloc_desc->sym_off = sym->st_value;
4511 return 0;
4512 }
4513
1f8e2bcb 4514 type = bpf_object__section_to_libbpf_map_type(obj, shdr_idx);
9c0f8cbd 4515 sym_sec_name = elf_sec_name(obj, elf_sec_by_idx(obj, shdr_idx));
1f8e2bcb 4516
2e7ba4f8
AN
4517 /* arena data relocation */
4518 if (shdr_idx == obj->efile.arena_data_shndx) {
4519 reloc_desc->type = RELO_DATA;
4520 reloc_desc->insn_idx = insn_idx;
4521 reloc_desc->map_idx = obj->arena_map - obj->maps;
4522 reloc_desc->sym_off = sym->st_value;
4523 return 0;
4524 }
4525
1f8e2bcb
AN
4526 /* generic map reference relocation */
4527 if (type == LIBBPF_MAP_UNSPEC) {
4528 if (!bpf_object__shndx_is_maps(obj, shdr_idx)) {
9c0f8cbd
AN
4529 pr_warn("prog '%s': bad map relo against '%s' in section '%s'\n",
4530 prog->name, sym_name, sym_sec_name);
1f8e2bcb
AN
4531 return -LIBBPF_ERRNO__RELOC;
4532 }
4533 for (map_idx = 0; map_idx < nr_maps; map_idx++) {
4534 map = &obj->maps[map_idx];
4535 if (map->libbpf_type != type ||
4536 map->sec_idx != sym->st_shndx ||
4537 map->sec_offset != sym->st_value)
4538 continue;
9c0f8cbd
AN
4539 pr_debug("prog '%s': found map %zd (%s, sec %d, off %zu) for insn #%u\n",
4540 prog->name, map_idx, map->name, map->sec_idx,
1f8e2bcb
AN
4541 map->sec_offset, insn_idx);
4542 break;
4543 }
4544 if (map_idx >= nr_maps) {
9c0f8cbd
AN
4545 pr_warn("prog '%s': map relo failed to find map for section '%s', off %zu\n",
4546 prog->name, sym_sec_name, (size_t)sym->st_value);
1f8e2bcb
AN
4547 return -LIBBPF_ERRNO__RELOC;
4548 }
4549 reloc_desc->type = RELO_LD64;
4550 reloc_desc->insn_idx = insn_idx;
4551 reloc_desc->map_idx = map_idx;
53f8dd43 4552 reloc_desc->sym_off = 0; /* sym->st_value determines map_idx */
1f8e2bcb
AN
4553 return 0;
4554 }
4555
4556 /* global data map relocation */
4557 if (!bpf_object__shndx_is_data(obj, shdr_idx)) {
9c0f8cbd
AN
4558 pr_warn("prog '%s': bad data relo against section '%s'\n",
4559 prog->name, sym_sec_name);
1f8e2bcb 4560 return -LIBBPF_ERRNO__RELOC;
1f8e2bcb 4561 }
1f8e2bcb
AN
4562 for (map_idx = 0; map_idx < nr_maps; map_idx++) {
4563 map = &obj->maps[map_idx];
25bbbd7a 4564 if (map->libbpf_type != type || map->sec_idx != sym->st_shndx)
1f8e2bcb 4565 continue;
9c0f8cbd
AN
4566 pr_debug("prog '%s': found data map %zd (%s, sec %d, off %zu) for insn %u\n",
4567 prog->name, map_idx, map->name, map->sec_idx,
4568 map->sec_offset, insn_idx);
1f8e2bcb
AN
4569 break;
4570 }
4571 if (map_idx >= nr_maps) {
9c0f8cbd
AN
4572 pr_warn("prog '%s': data relo failed to find map for section '%s'\n",
4573 prog->name, sym_sec_name);
1f8e2bcb
AN
4574 return -LIBBPF_ERRNO__RELOC;
4575 }
4576
4577 reloc_desc->type = RELO_DATA;
4578 reloc_desc->insn_idx = insn_idx;
4579 reloc_desc->map_idx = map_idx;
53f8dd43 4580 reloc_desc->sym_off = sym->st_value;
1f8e2bcb
AN
4581 return 0;
4582}
4583
db2b8b06
AN
4584static bool prog_contains_insn(const struct bpf_program *prog, size_t insn_idx)
4585{
4586 return insn_idx >= prog->sec_insn_off &&
4587 insn_idx < prog->sec_insn_off + prog->sec_insn_cnt;
4588}
4589
4590static struct bpf_program *find_prog_by_sec_insn(const struct bpf_object *obj,
4591 size_t sec_idx, size_t insn_idx)
4592{
4593 int l = 0, r = obj->nr_programs - 1, m;
4594 struct bpf_program *prog;
4595
d0d382f9
SHY
4596 if (!obj->nr_programs)
4597 return NULL;
4598
db2b8b06
AN
4599 while (l < r) {
4600 m = l + (r - l + 1) / 2;
4601 prog = &obj->programs[m];
4602
4603 if (prog->sec_idx < sec_idx ||
4604 (prog->sec_idx == sec_idx && prog->sec_insn_off <= insn_idx))
4605 l = m;
4606 else
4607 r = m - 1;
4608 }
4609 /* matching program could be at index l, but it still might be the
4610 * wrong one, so we need to double check conditions for the last time
4611 */
4612 prog = &obj->programs[l];
4613 if (prog->sec_idx == sec_idx && prog_contains_insn(prog, insn_idx))
4614 return prog;
4615 return NULL;
4616}
4617
34090915 4618static int
ad23b723 4619bpf_object__collect_prog_relos(struct bpf_object *obj, Elf64_Shdr *shdr, Elf_Data *data)
34090915 4620{
9c0f8cbd 4621 const char *relo_sec_name, *sec_name;
b7332d28 4622 size_t sec_idx = shdr->sh_info, sym_idx;
c3c55696
AN
4623 struct bpf_program *prog;
4624 struct reloc_desc *relos;
1f8e2bcb 4625 int err, i, nrels;
c3c55696
AN
4626 const char *sym_name;
4627 __u32 insn_idx;
6245947c
AN
4628 Elf_Scn *scn;
4629 Elf_Data *scn_data;
ad23b723
AN
4630 Elf64_Sym *sym;
4631 Elf64_Rel *rel;
34090915 4632
b7332d28
AN
4633 if (sec_idx >= obj->efile.sec_cnt)
4634 return -EINVAL;
4635
6245947c
AN
4636 scn = elf_sec_by_idx(obj, sec_idx);
4637 scn_data = elf_sec_data(obj, scn);
fc3a5534
MZ
4638 if (!scn_data)
4639 return -LIBBPF_ERRNO__FORMAT;
6245947c 4640
9c0f8cbd 4641 relo_sec_name = elf_sec_str(obj, shdr->sh_name);
6245947c 4642 sec_name = elf_sec_name(obj, scn);
9c0f8cbd
AN
4643 if (!relo_sec_name || !sec_name)
4644 return -EINVAL;
4645
4646 pr_debug("sec '%s': collecting relocation for section(%zu) '%s'\n",
4647 relo_sec_name, sec_idx, sec_name);
34090915
WN
4648 nrels = shdr->sh_size / shdr->sh_entsize;
4649
34090915 4650 for (i = 0; i < nrels; i++) {
ad23b723
AN
4651 rel = elf_rel_by_idx(data, i);
4652 if (!rel) {
9c0f8cbd 4653 pr_warn("sec '%s': failed to get relo #%d\n", relo_sec_name, i);
6371ca3b 4654 return -LIBBPF_ERRNO__FORMAT;
34090915 4655 }
ad23b723 4656
b7332d28
AN
4657 sym_idx = ELF64_R_SYM(rel->r_info);
4658 sym = elf_sym_by_idx(obj, sym_idx);
ad23b723 4659 if (!sym) {
b7332d28
AN
4660 pr_warn("sec '%s': symbol #%zu not found for relo #%d\n",
4661 relo_sec_name, sym_idx, i);
4662 return -LIBBPF_ERRNO__FORMAT;
4663 }
4664
4665 if (sym->st_shndx >= obj->efile.sec_cnt) {
4666 pr_warn("sec '%s': corrupted symbol #%zu pointing to invalid section #%zu for relo #%d\n",
4667 relo_sec_name, sym_idx, (size_t)sym->st_shndx, i);
6371ca3b 4668 return -LIBBPF_ERRNO__FORMAT;
34090915 4669 }
6245947c 4670
ad23b723 4671 if (rel->r_offset % BPF_INSN_SZ || rel->r_offset >= scn_data->d_size) {
9c0f8cbd 4672 pr_warn("sec '%s': invalid offset 0x%zx for relo #%d\n",
b7332d28 4673 relo_sec_name, (size_t)rel->r_offset, i);
1f8e2bcb 4674 return -LIBBPF_ERRNO__FORMAT;
9c0f8cbd 4675 }
d859900c 4676
ad23b723 4677 insn_idx = rel->r_offset / BPF_INSN_SZ;
c3c55696
AN
4678 /* relocations against static functions are recorded as
4679 * relocations against the section that contains a function;
4680 * in such case, symbol will be STT_SECTION and sym.st_name
4681 * will point to empty string (0), so fetch section name
4682 * instead
4683 */
ad23b723
AN
4684 if (ELF64_ST_TYPE(sym->st_info) == STT_SECTION && sym->st_name == 0)
4685 sym_name = elf_sec_name(obj, elf_sec_by_idx(obj, sym->st_shndx));
c3c55696 4686 else
ad23b723 4687 sym_name = elf_sym_str(obj, sym->st_name);
c3c55696 4688 sym_name = sym_name ?: "<?";
d859900c 4689
9c0f8cbd
AN
4690 pr_debug("sec '%s': relo #%d: insn #%u against '%s'\n",
4691 relo_sec_name, i, insn_idx, sym_name);
666810e8 4692
c3c55696
AN
4693 prog = find_prog_by_sec_insn(obj, sec_idx, insn_idx);
4694 if (!prog) {
6245947c 4695 pr_debug("sec '%s': relo #%d: couldn't find program in section '%s' for insn #%u, probably overridden weak function, skipping...\n",
c3c55696 4696 relo_sec_name, i, sec_name, insn_idx);
6245947c 4697 continue;
c3c55696
AN
4698 }
4699
4700 relos = libbpf_reallocarray(prog->reloc_desc,
4701 prog->nr_reloc + 1, sizeof(*relos));
4702 if (!relos)
4703 return -ENOMEM;
4704 prog->reloc_desc = relos;
4705
4706 /* adjust insn_idx to local BPF program frame of reference */
4707 insn_idx -= prog->sec_insn_off;
4708 err = bpf_program__record_reloc(prog, &relos[prog->nr_reloc],
ad23b723 4709 insn_idx, sym_name, sym, rel);
1f8e2bcb
AN
4710 if (err)
4711 return err;
c3c55696
AN
4712
4713 prog->nr_reloc++;
34090915
WN
4714 }
4715 return 0;
4716}
4717
4fcac46c 4718static int map_fill_btf_type_info(struct bpf_object *obj, struct bpf_map *map)
8a138aed 4719{
aaf6886d 4720 int id;
8a138aed 4721
a8fee962
AN
4722 if (!obj->btf)
4723 return -ENOENT;
4724
590a0088
MKL
4725 /* if it's BTF-defined map, we don't need to search for type IDs.
4726 * For struct_ops map, it does not need btf_key_type_id and
4727 * btf_value_type_id.
4728 */
aaf6886d 4729 if (map->sec_idx == obj->efile.btf_maps_shndx || bpf_map__is_struct_ops(map))
abd29c93
AN
4730 return 0;
4731
aaf6886d
AN
4732 /*
4733 * LLVM annotates global data differently in BTF, that is,
4734 * only as '.data', '.bss' or '.rodata'.
4735 */
4736 if (!bpf_map__is_internal(map))
4737 return -ENOENT;
4738
4739 id = btf__find_by_name(obj->btf, map->real_name);
4740 if (id < 0)
4741 return id;
8a138aed 4742
aaf6886d
AN
4743 map->btf_key_type_id = 0;
4744 map->btf_value_type_id = id;
8a138aed
MKL
4745 return 0;
4746}
4747
97eb3138
MP
4748static int bpf_get_map_info_from_fdinfo(int fd, struct bpf_map_info *info)
4749{
4750 char file[PATH_MAX], buff[4096];
4751 FILE *fp;
4752 __u32 val;
4753 int err;
4754
4755 snprintf(file, sizeof(file), "/proc/%d/fdinfo/%d", getpid(), fd);
4756 memset(info, 0, sizeof(*info));
4757
59842c54 4758 fp = fopen(file, "re");
97eb3138
MP
4759 if (!fp) {
4760 err = -errno;
4761 pr_warn("failed to open %s: %d. No procfs support?\n", file,
4762 err);
4763 return err;
4764 }
4765
4766 while (fgets(buff, sizeof(buff), fp)) {
4767 if (sscanf(buff, "map_type:\t%u", &val) == 1)
4768 info->type = val;
4769 else if (sscanf(buff, "key_size:\t%u", &val) == 1)
4770 info->key_size = val;
4771 else if (sscanf(buff, "value_size:\t%u", &val) == 1)
4772 info->value_size = val;
4773 else if (sscanf(buff, "max_entries:\t%u", &val) == 1)
4774 info->max_entries = val;
4775 else if (sscanf(buff, "map_flags:\t%i", &val) == 1)
4776 info->map_flags = val;
4777 }
4778
4779 fclose(fp);
4780
4781 return 0;
4782}
4783
ec41817b
AN
4784bool bpf_map__autocreate(const struct bpf_map *map)
4785{
4786 return map->autocreate;
4787}
4788
4789int bpf_map__set_autocreate(struct bpf_map *map, bool autocreate)
4790{
4791 if (map->obj->loaded)
4792 return libbpf_err(-EBUSY);
4793
4794 map->autocreate = autocreate;
4795 return 0;
4796}
4797
26736eb9
JK
4798int bpf_map__reuse_fd(struct bpf_map *map, int fd)
4799{
813847a3 4800 struct bpf_map_info info;
bf3f0037 4801 __u32 len = sizeof(info), name_len;
26736eb9
JK
4802 int new_fd, err;
4803 char *new_name;
4804
813847a3 4805 memset(&info, 0, len);
629dfc66 4806 err = bpf_map_get_info_by_fd(fd, &info, &len);
97eb3138
MP
4807 if (err && errno == EINVAL)
4808 err = bpf_get_map_info_from_fdinfo(fd, &info);
26736eb9 4809 if (err)
e9fc3ce9 4810 return libbpf_err(err);
26736eb9 4811
bf3f0037
AW
4812 name_len = strlen(info.name);
4813 if (name_len == BPF_OBJ_NAME_LEN - 1 && strncmp(map->name, info.name, name_len) == 0)
4814 new_name = strdup(map->name);
4815 else
4816 new_name = strdup(info.name);
4817
26736eb9 4818 if (!new_name)
e9fc3ce9 4819 return libbpf_err(-errno);
26736eb9 4820
4aadd292
AN
4821 /*
4822 * Like dup(), but make sure new FD is >= 3 and has O_CLOEXEC set.
4823 * This is similar to what we do in ensure_good_fd(), but without
4824 * closing original FD.
4825 */
4826 new_fd = fcntl(fd, F_DUPFD_CLOEXEC, 3);
d1b4574a
THJ
4827 if (new_fd < 0) {
4828 err = -errno;
26736eb9 4829 goto err_free_new_name;
d1b4574a 4830 }
26736eb9 4831
dac645b9
AN
4832 err = reuse_fd(map->fd, new_fd);
4833 if (err)
4834 goto err_free_new_name;
4835
26736eb9
JK
4836 free(map->name);
4837
26736eb9
JK
4838 map->name = new_name;
4839 map->def.type = info.type;
4840 map->def.key_size = info.key_size;
4841 map->def.value_size = info.value_size;
4842 map->def.max_entries = info.max_entries;
4843 map->def.map_flags = info.map_flags;
4844 map->btf_key_type_id = info.btf_key_type_id;
4845 map->btf_value_type_id = info.btf_value_type_id;
ec6d5f47 4846 map->reused = true;
47512102 4847 map->map_extra = info.map_extra;
26736eb9
JK
4848
4849 return 0;
4850
26736eb9
JK
4851err_free_new_name:
4852 free(new_name);
e9fc3ce9 4853 return libbpf_err(err);
26736eb9
JK
4854}
4855
1bdb6c9a 4856__u32 bpf_map__max_entries(const struct bpf_map *map)
1a11a4c7 4857{
1bdb6c9a
AN
4858 return map->def.max_entries;
4859}
1a11a4c7 4860
b3278099
AN
4861struct bpf_map *bpf_map__inner_map(struct bpf_map *map)
4862{
4863 if (!bpf_map_type__is_map_in_map(map->def.type))
e9fc3ce9 4864 return errno = EINVAL, NULL;
b3278099
AN
4865
4866 return map->inner_map;
4867}
4868
1bdb6c9a
AN
4869int bpf_map__set_max_entries(struct bpf_map *map, __u32 max_entries)
4870{
597fbc46 4871 if (map->obj->loaded)
e9fc3ce9 4872 return libbpf_err(-EBUSY);
597fbc46 4873
1a11a4c7 4874 map->def.max_entries = max_entries;
597fbc46
AN
4875
4876 /* auto-adjust BPF ringbuf map max_entries to be a multiple of page size */
b66ccae0 4877 if (map_is_ringbuf(map))
597fbc46
AN
4878 map->def.max_entries = adjust_ringbuf_sz(map->def.max_entries);
4879
1a11a4c7
AI
4880 return 0;
4881}
4882
6b434b61
AN
4883static int bpf_object_prepare_token(struct bpf_object *obj)
4884{
4885 const char *bpffs_path;
4886 int bpffs_fd = -1, token_fd, err;
4887 bool mandatory;
4888 enum libbpf_print_level level;
4889
4890 /* token is explicitly prevented */
4891 if (obj->token_path && obj->token_path[0] == '\0') {
4892 pr_debug("object '%s': token is prevented, skipping...\n", obj->name);
4893 return 0;
4894 }
4895
4896 mandatory = obj->token_path != NULL;
4897 level = mandatory ? LIBBPF_WARN : LIBBPF_DEBUG;
4898
4899 bpffs_path = obj->token_path ?: BPF_FS_DEFAULT_PATH;
4900 bpffs_fd = open(bpffs_path, O_DIRECTORY, O_RDWR);
4901 if (bpffs_fd < 0) {
4902 err = -errno;
4903 __pr(level, "object '%s': failed (%d) to open BPF FS mount at '%s'%s\n",
4904 obj->name, err, bpffs_path,
4905 mandatory ? "" : ", skipping optional step...");
4906 return mandatory ? err : 0;
4907 }
4908
4909 token_fd = bpf_token_create(bpffs_fd, 0);
4910 close(bpffs_fd);
4911 if (token_fd < 0) {
4912 if (!mandatory && token_fd == -ENOENT) {
4913 pr_debug("object '%s': BPF FS at '%s' doesn't have BPF token delegation set up, skipping...\n",
4914 obj->name, bpffs_path);
4915 return 0;
4916 }
4917 __pr(level, "object '%s': failed (%d) to create BPF token from '%s'%s\n",
4918 obj->name, token_fd, bpffs_path,
4919 mandatory ? "" : ", skipping optional step...");
4920 return mandatory ? token_fd : 0;
4921 }
4922
4923 obj->feat_cache = calloc(1, sizeof(*obj->feat_cache));
4924 if (!obj->feat_cache) {
4925 close(token_fd);
4926 return -ENOMEM;
4927 }
4928
4929 obj->token_fd = token_fd;
4930 obj->feat_cache->token_fd = token_fd;
4931
4932 return 0;
4933}
4934
47eff617 4935static int
fd9eef1a 4936bpf_object__probe_loading(struct bpf_object *obj)
47eff617 4937{
47eff617
SF
4938 char *cp, errmsg[STRERR_BUFSIZE];
4939 struct bpf_insn insns[] = {
4940 BPF_MOV64_IMM(BPF_REG_0, 0),
4941 BPF_EXIT_INSN(),
4942 };
e32660ac 4943 int ret, insn_cnt = ARRAY_SIZE(insns);
6b434b61
AN
4944 LIBBPF_OPTS(bpf_prog_load_opts, opts,
4945 .token_fd = obj->token_fd,
4946 .prog_flags = obj->token_fd ? BPF_F_TOKEN_FD : 0,
4947 );
47eff617 4948
f9bceaa5
SF
4949 if (obj->gen_loader)
4950 return 0;
4951
e542f2c4
AN
4952 ret = bump_rlimit_memlock();
4953 if (ret)
4954 pr_warn("Failed to bump RLIMIT_MEMLOCK (err = %d), you might need to do it explicitly!\n", ret);
4955
47eff617 4956 /* make sure basic loading works */
6b434b61 4957 ret = bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER, NULL, "GPL", insns, insn_cnt, &opts);
e32660ac 4958 if (ret < 0)
6b434b61 4959 ret = bpf_prog_load(BPF_PROG_TYPE_TRACEPOINT, NULL, "GPL", insns, insn_cnt, &opts);
47eff617 4960 if (ret < 0) {
fd9eef1a
EC
4961 ret = errno;
4962 cp = libbpf_strerror_r(ret, errmsg, sizeof(errmsg));
4963 pr_warn("Error in %s():%s(%d). Couldn't load trivial BPF "
4964 "program. Make sure your kernel supports BPF "
4965 "(CONFIG_BPF_SYSCALL=y) and/or that RLIMIT_MEMLOCK is "
4966 "set to big enough value.\n", __func__, cp, ret);
4967 return -ret;
47eff617
SF
4968 }
4969 close(ret);
4970
fd9eef1a
EC
4971 return 0;
4972}
4973
d6dd1d49
AN
4974bool kernel_supports(const struct bpf_object *obj, enum kern_feature_id feat_id)
4975{
8263b338 4976 if (obj->gen_loader)
d6dd1d49
AN
4977 /* To generate loader program assume the latest kernel
4978 * to avoid doing extra prog_load, map_create syscalls.
4979 */
4980 return true;
4981
6b434b61
AN
4982 if (obj->token_fd)
4983 return feat_supported(obj->feat_cache, feat_id);
4984
d6dd1d49
AN
4985 return feat_supported(NULL, feat_id);
4986}
4987
57a00f41
THJ
4988static bool map_is_reuse_compat(const struct bpf_map *map, int map_fd)
4989{
813847a3 4990 struct bpf_map_info map_info;
57a00f41 4991 char msg[STRERR_BUFSIZE];
813847a3 4992 __u32 map_info_len = sizeof(map_info);
97eb3138 4993 int err;
57a00f41 4994
813847a3 4995 memset(&map_info, 0, map_info_len);
629dfc66 4996 err = bpf_map_get_info_by_fd(map_fd, &map_info, &map_info_len);
97eb3138
MP
4997 if (err && errno == EINVAL)
4998 err = bpf_get_map_info_from_fdinfo(map_fd, &map_info);
4999 if (err) {
5000 pr_warn("failed to get map info for map FD %d: %s\n", map_fd,
5001 libbpf_strerror_r(errno, msg, sizeof(msg)));
57a00f41
THJ
5002 return false;
5003 }
5004
5005 return (map_info.type == map->def.type &&
5006 map_info.key_size == map->def.key_size &&
5007 map_info.value_size == map->def.value_size &&
5008 map_info.max_entries == map->def.max_entries &&
47512102
JK
5009 map_info.map_flags == map->def.map_flags &&
5010 map_info.map_extra == map->map_extra);
57a00f41
THJ
5011}
5012
5013static int
5014bpf_object__reuse_map(struct bpf_map *map)
5015{
5016 char *cp, errmsg[STRERR_BUFSIZE];
5017 int err, pin_fd;
5018
5019 pin_fd = bpf_obj_get(map->pin_path);
5020 if (pin_fd < 0) {
5021 err = -errno;
5022 if (err == -ENOENT) {
5023 pr_debug("found no pinned map to reuse at '%s'\n",
5024 map->pin_path);
5025 return 0;
5026 }
5027
5028 cp = libbpf_strerror_r(-err, errmsg, sizeof(errmsg));
5029 pr_warn("couldn't retrieve pinned map '%s': %s\n",
5030 map->pin_path, cp);
5031 return err;
5032 }
5033
5034 if (!map_is_reuse_compat(map, pin_fd)) {
5035 pr_warn("couldn't reuse pinned map at '%s': parameter mismatch\n",
5036 map->pin_path);
5037 close(pin_fd);
5038 return -EINVAL;
5039 }
5040
5041 err = bpf_map__reuse_fd(map, pin_fd);
d0f325c3 5042 close(pin_fd);
e3ba8e4e 5043 if (err)
57a00f41 5044 return err;
e3ba8e4e 5045
57a00f41
THJ
5046 map->pinned = true;
5047 pr_debug("reused pinned map at '%s'\n", map->pin_path);
5048
5049 return 0;
5050}
5051
d859900c
DB
5052static int
5053bpf_object__populate_internal_map(struct bpf_object *obj, struct bpf_map *map)
5054{
166750bc 5055 enum libbpf_map_type map_type = map->libbpf_type;
d859900c
DB
5056 char *cp, errmsg[STRERR_BUFSIZE];
5057 int err, zero = 0;
d859900c 5058
67234743
AS
5059 if (obj->gen_loader) {
5060 bpf_gen__map_update_elem(obj->gen_loader, map - obj->maps,
5061 map->mmaped, map->def.value_size);
5062 if (map_type == LIBBPF_MAP_RODATA || map_type == LIBBPF_MAP_KCONFIG)
5063 bpf_gen__map_freeze(obj->gen_loader, map - obj->maps);
5064 return 0;
5065 }
2e7ba4f8 5066
eba9c5f4
AN
5067 err = bpf_map_update_elem(map->fd, &zero, map->mmaped, 0);
5068 if (err) {
5069 err = -errno;
5070 cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
5071 pr_warn("Error setting initial map(%s) contents: %s\n",
5072 map->name, cp);
5073 return err;
5074 }
d859900c 5075
81bfdd08
AN
5076 /* Freeze .rodata and .kconfig map as read-only from syscall side. */
5077 if (map_type == LIBBPF_MAP_RODATA || map_type == LIBBPF_MAP_KCONFIG) {
d859900c
DB
5078 err = bpf_map_freeze(map->fd);
5079 if (err) {
eba9c5f4
AN
5080 err = -errno;
5081 cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
be18010e
KW
5082 pr_warn("Error freezing map(%s) as read-only: %s\n",
5083 map->name, cp);
eba9c5f4 5084 return err;
d859900c
DB
5085 }
5086 }
eba9c5f4 5087 return 0;
d859900c
DB
5088}
5089
2d39d7c5
AN
5090static void bpf_map__destroy(struct bpf_map *map);
5091
f08c18e0
AN
5092static bool map_is_created(const struct bpf_map *map)
5093{
5094 return map->obj->loaded || map->reused;
5095}
5096
67234743 5097static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map, bool is_inner)
2d39d7c5 5098{
992c4225 5099 LIBBPF_OPTS(bpf_map_create_opts, create_attr);
2d39d7c5 5100 struct bpf_map_def *def = &map->def;
992c4225 5101 const char *map_name = NULL;
dac645b9 5102 int err = 0, map_fd;
2d39d7c5 5103
9ca1f56a 5104 if (kernel_supports(obj, FEAT_PROG_NAME))
992c4225 5105 map_name = map->name;
2d39d7c5 5106 create_attr.map_ifindex = map->map_ifindex;
2d39d7c5 5107 create_attr.map_flags = def->map_flags;
1bdb6c9a 5108 create_attr.numa_node = map->numa_node;
47512102 5109 create_attr.map_extra = map->map_extra;
6b434b61
AN
5110 create_attr.token_fd = obj->token_fd;
5111 if (obj->token_fd)
5112 create_attr.map_flags |= BPF_F_TOKEN_FD;
2d39d7c5 5113
9e926acd 5114 if (bpf_map__is_struct_ops(map)) {
992c4225 5115 create_attr.btf_vmlinux_value_type_id = map->btf_vmlinux_value_type_id;
9e926acd
KFL
5116 if (map->mod_btf_fd >= 0) {
5117 create_attr.value_type_btf_obj_fd = map->mod_btf_fd;
5118 create_attr.map_flags |= BPF_F_VTYPE_BTF_OBJ_FD;
5119 }
5120 }
2d39d7c5 5121
262cfb74 5122 if (obj->btf && btf__fd(obj->btf) >= 0) {
2d39d7c5
AN
5123 create_attr.btf_fd = btf__fd(obj->btf);
5124 create_attr.btf_key_type_id = map->btf_key_type_id;
5125 create_attr.btf_value_type_id = map->btf_value_type_id;
5126 }
5127
646f02ff
AN
5128 if (bpf_map_type__is_map_in_map(def->type)) {
5129 if (map->inner_map) {
f04deb90
AG
5130 err = map_set_def_max_entries(map->inner_map);
5131 if (err)
5132 return err;
67234743 5133 err = bpf_object__create_map(obj, map->inner_map, true);
646f02ff
AN
5134 if (err) {
5135 pr_warn("map '%s': failed to create inner map: %d\n",
5136 map->name, err);
5137 return err;
5138 }
f08c18e0 5139 map->inner_map_fd = map->inner_map->fd;
646f02ff
AN
5140 }
5141 if (map->inner_map_fd >= 0)
5142 create_attr.inner_map_fd = map->inner_map_fd;
5143 }
5144
f7310523
HC
5145 switch (def->type) {
5146 case BPF_MAP_TYPE_PERF_EVENT_ARRAY:
5147 case BPF_MAP_TYPE_CGROUP_ARRAY:
5148 case BPF_MAP_TYPE_STACK_TRACE:
5149 case BPF_MAP_TYPE_ARRAY_OF_MAPS:
5150 case BPF_MAP_TYPE_HASH_OF_MAPS:
5151 case BPF_MAP_TYPE_DEVMAP:
5152 case BPF_MAP_TYPE_DEVMAP_HASH:
5153 case BPF_MAP_TYPE_CPUMAP:
5154 case BPF_MAP_TYPE_XSKMAP:
5155 case BPF_MAP_TYPE_SOCKMAP:
5156 case BPF_MAP_TYPE_SOCKHASH:
5157 case BPF_MAP_TYPE_QUEUE:
5158 case BPF_MAP_TYPE_STACK:
79ff13e9 5159 case BPF_MAP_TYPE_ARENA:
f7310523
HC
5160 create_attr.btf_fd = 0;
5161 create_attr.btf_key_type_id = 0;
5162 create_attr.btf_value_type_id = 0;
5163 map->btf_key_type_id = 0;
5164 map->btf_value_type_id = 0;
3644d285
KFL
5165 break;
5166 case BPF_MAP_TYPE_STRUCT_OPS:
5167 create_attr.btf_value_type_id = 0;
5168 break;
f7310523
HC
5169 default:
5170 break;
5171 }
5172
67234743 5173 if (obj->gen_loader) {
992c4225 5174 bpf_gen__map_create(obj->gen_loader, def->type, map_name,
a4fbfdd7 5175 def->key_size, def->value_size, def->max_entries,
992c4225 5176 &create_attr, is_inner ? -1 : map - obj->maps);
dac645b9
AN
5177 /* We keep pretenting we have valid FD to pass various fd >= 0
5178 * checks by just keeping original placeholder FDs in place.
5179 * See bpf_object__add_map() comment.
5180 * This placeholder fd will not be used with any syscall and
5181 * will be reset to -1 eventually.
67234743 5182 */
dac645b9 5183 map_fd = map->fd;
67234743 5184 } else {
dac645b9
AN
5185 map_fd = bpf_map_create(def->type, map_name,
5186 def->key_size, def->value_size,
5187 def->max_entries, &create_attr);
67234743 5188 }
dac645b9 5189 if (map_fd < 0 && (create_attr.btf_key_type_id || create_attr.btf_value_type_id)) {
2d39d7c5 5190 char *cp, errmsg[STRERR_BUFSIZE];
2d39d7c5 5191
a21ab4c5 5192 err = -errno;
2d39d7c5
AN
5193 cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
5194 pr_warn("Error in bpf_create_map_xattr(%s):%s(%d). Retrying without BTF.\n",
5195 map->name, cp, err);
5196 create_attr.btf_fd = 0;
5197 create_attr.btf_key_type_id = 0;
5198 create_attr.btf_value_type_id = 0;
5199 map->btf_key_type_id = 0;
5200 map->btf_value_type_id = 0;
dac645b9
AN
5201 map_fd = bpf_map_create(def->type, map_name,
5202 def->key_size, def->value_size,
5203 def->max_entries, &create_attr);
2d39d7c5
AN
5204 }
5205
646f02ff 5206 if (bpf_map_type__is_map_in_map(def->type) && map->inner_map) {
67234743
AS
5207 if (obj->gen_loader)
5208 map->inner_map->fd = -1;
646f02ff
AN
5209 bpf_map__destroy(map->inner_map);
5210 zfree(&map->inner_map);
5211 }
5212
dac645b9
AN
5213 if (map_fd < 0)
5214 return map_fd;
5215
5216 /* obj->gen_loader case, prevent reuse_fd() from closing map_fd */
5217 if (map->fd == map_fd)
5218 return 0;
5219
5220 /* Keep placeholder FD value but now point it to the BPF map object.
5221 * This way everything that relied on this map's FD (e.g., relocated
5222 * ldimm64 instructions) will stay valid and won't need adjustments.
5223 * map->fd stays valid but now point to what map_fd points to.
5224 */
5225 return reuse_fd(map->fd, map_fd);
2d39d7c5
AN
5226}
5227
341ac5ff 5228static int init_map_in_map_slots(struct bpf_object *obj, struct bpf_map *map)
a0f2b7ac
HL
5229{
5230 const struct bpf_map *targ_map;
5231 unsigned int i;
67234743 5232 int fd, err = 0;
a0f2b7ac
HL
5233
5234 for (i = 0; i < map->init_slots_sz; i++) {
5235 if (!map->init_slots[i])
5236 continue;
5237
5238 targ_map = map->init_slots[i];
f08c18e0 5239 fd = targ_map->fd;
341ac5ff 5240
67234743 5241 if (obj->gen_loader) {
be05c944
AS
5242 bpf_gen__populate_outer_map(obj->gen_loader,
5243 map - obj->maps, i,
5244 targ_map - obj->maps);
67234743
AS
5245 } else {
5246 err = bpf_map_update_elem(map->fd, &i, &fd, 0);
5247 }
a0f2b7ac
HL
5248 if (err) {
5249 err = -errno;
5250 pr_warn("map '%s': failed to initialize slot [%d] to map '%s' fd=%d: %d\n",
341ac5ff 5251 map->name, i, targ_map->name, fd, err);
a0f2b7ac
HL
5252 return err;
5253 }
5254 pr_debug("map '%s': slot [%d] set to map '%s' fd=%d\n",
5255 map->name, i, targ_map->name, fd);
5256 }
5257
5258 zfree(&map->init_slots);
5259 map->init_slots_sz = 0;
5260
5261 return 0;
5262}
5263
341ac5ff
HC
5264static int init_prog_array_slots(struct bpf_object *obj, struct bpf_map *map)
5265{
5266 const struct bpf_program *targ_prog;
5267 unsigned int i;
5268 int fd, err;
5269
5270 if (obj->gen_loader)
5271 return -ENOTSUP;
5272
5273 for (i = 0; i < map->init_slots_sz; i++) {
5274 if (!map->init_slots[i])
5275 continue;
5276
5277 targ_prog = map->init_slots[i];
5278 fd = bpf_program__fd(targ_prog);
5279
5280 err = bpf_map_update_elem(map->fd, &i, &fd, 0);
5281 if (err) {
5282 err = -errno;
5283 pr_warn("map '%s': failed to initialize slot [%d] to prog '%s' fd=%d: %d\n",
5284 map->name, i, targ_prog->name, fd, err);
5285 return err;
5286 }
5287 pr_debug("map '%s': slot [%d] set to prog '%s' fd=%d\n",
5288 map->name, i, targ_prog->name, fd);
5289 }
5290
5291 zfree(&map->init_slots);
5292 map->init_slots_sz = 0;
5293
5294 return 0;
5295}
5296
5297static int bpf_object_init_prog_arrays(struct bpf_object *obj)
5298{
5299 struct bpf_map *map;
5300 int i, err;
5301
5302 for (i = 0; i < obj->nr_maps; i++) {
5303 map = &obj->maps[i];
5304
5305 if (!map->init_slots_sz || map->def.type != BPF_MAP_TYPE_PROG_ARRAY)
5306 continue;
5307
5308 err = init_prog_array_slots(obj, map);
dac645b9 5309 if (err < 0)
341ac5ff 5310 return err;
341ac5ff
HC
5311 }
5312 return 0;
5313}
5314
a4fbfdd7
ST
5315static int map_set_def_max_entries(struct bpf_map *map)
5316{
5317 if (map->def.type == BPF_MAP_TYPE_PERF_EVENT_ARRAY && !map->def.max_entries) {
5318 int nr_cpus;
5319
5320 nr_cpus = libbpf_num_possible_cpus();
5321 if (nr_cpus < 0) {
5322 pr_warn("map '%s': failed to determine number of system CPUs: %d\n",
5323 map->name, nr_cpus);
5324 return nr_cpus;
5325 }
5326 pr_debug("map '%s': setting size to %d\n", map->name, nr_cpus);
5327 map->def.max_entries = nr_cpus;
5328 }
5329
5330 return 0;
5331}
5332
52d3352e
WN
5333static int
5334bpf_object__create_maps(struct bpf_object *obj)
5335{
2d39d7c5
AN
5336 struct bpf_map *map;
5337 char *cp, errmsg[STRERR_BUFSIZE];
5338 unsigned int i, j;
8a138aed 5339 int err;
043c5bb3 5340 bool retried;
52d3352e 5341
9d759a9b 5342 for (i = 0; i < obj->nr_maps; i++) {
2d39d7c5 5343 map = &obj->maps[i];
8a138aed 5344
16e0c35c
AN
5345 /* To support old kernels, we skip creating global data maps
5346 * (.rodata, .data, .kconfig, etc); later on, during program
5347 * loading, if we detect that at least one of the to-be-loaded
5348 * programs is referencing any global data map, we'll error
5349 * out with program name and relocation index logged.
5350 * This approach allows to accommodate Clang emitting
5351 * unnecessary .rodata.str1.1 sections for string literals,
5352 * but also it allows to have CO-RE applications that use
5353 * global variables in some of BPF programs, but not others.
5354 * If those global variable-using programs are not loaded at
5355 * runtime due to bpf_program__set_autoload(prog, false),
5356 * bpf_object loading will succeed just fine even on old
5357 * kernels.
5358 */
ec41817b
AN
5359 if (bpf_map__is_internal(map) && !kernel_supports(obj, FEAT_GLOBAL_DATA))
5360 map->autocreate = false;
5361
5362 if (!map->autocreate) {
5363 pr_debug("map '%s': skipped auto-creating...\n", map->name);
16e0c35c 5364 continue;
229fae38 5365 }
16e0c35c 5366
a4fbfdd7
ST
5367 err = map_set_def_max_entries(map);
5368 if (err)
5369 goto err_out;
5370
043c5bb3
MP
5371 retried = false;
5372retry:
57a00f41
THJ
5373 if (map->pin_path) {
5374 err = bpf_object__reuse_map(map);
5375 if (err) {
2d39d7c5 5376 pr_warn("map '%s': error reusing pinned map\n",
57a00f41 5377 map->name);
2d39d7c5 5378 goto err_out;
57a00f41 5379 }
043c5bb3
MP
5380 if (retried && map->fd < 0) {
5381 pr_warn("map '%s': cannot find pinned map\n",
5382 map->name);
5383 err = -ENOENT;
5384 goto err_out;
5385 }
57a00f41
THJ
5386 }
5387
fa98b54b 5388 if (map->reused) {
2d39d7c5 5389 pr_debug("map '%s': skipping creation (preset fd=%d)\n",
26736eb9 5390 map->name, map->fd);
2c193d32 5391 } else {
67234743 5392 err = bpf_object__create_map(obj, map, false);
2c193d32 5393 if (err)
d859900c 5394 goto err_out;
d859900c 5395
2c193d32
HL
5396 pr_debug("map '%s': created successfully, fd=%d\n",
5397 map->name, map->fd);
646f02ff 5398
2c193d32
HL
5399 if (bpf_map__is_internal(map)) {
5400 err = bpf_object__populate_internal_map(obj, map);
dac645b9 5401 if (err < 0)
2c193d32 5402 goto err_out;
d859900c 5403 }
79ff13e9 5404 if (map->def.type == BPF_MAP_TYPE_ARENA) {
5ab8cb89
AN
5405 map->mmaped = mmap((void *)(long)map->map_extra,
5406 bpf_map_mmap_sz(map), PROT_READ | PROT_WRITE,
79ff13e9
AS
5407 map->map_extra ? MAP_SHARED | MAP_FIXED : MAP_SHARED,
5408 map->fd, 0);
5409 if (map->mmaped == MAP_FAILED) {
5410 err = -errno;
5411 map->mmaped = NULL;
5412 pr_warn("map '%s': failed to mmap arena: %d\n",
5413 map->name, err);
5414 return err;
5415 }
2e7ba4f8
AN
5416 if (obj->arena_data) {
5417 memcpy(map->mmaped, obj->arena_data, obj->arena_data_sz);
5418 zfree(&obj->arena_data);
5419 }
79ff13e9 5420 }
341ac5ff
HC
5421 if (map->init_slots_sz && map->def.type != BPF_MAP_TYPE_PROG_ARRAY) {
5422 err = init_map_in_map_slots(obj, map);
dac645b9 5423 if (err < 0)
646f02ff 5424 goto err_out;
646f02ff 5425 }
646f02ff
AN
5426 }
5427
57a00f41
THJ
5428 if (map->pin_path && !map->pinned) {
5429 err = bpf_map__pin(map, NULL);
5430 if (err) {
043c5bb3
MP
5431 if (!retried && err == -EEXIST) {
5432 retried = true;
5433 goto retry;
5434 }
2d39d7c5
AN
5435 pr_warn("map '%s': failed to auto-pin at '%s': %d\n",
5436 map->name, map->pin_path, err);
2d39d7c5 5437 goto err_out;
57a00f41
THJ
5438 }
5439 }
52d3352e
WN
5440 }
5441
52d3352e 5442 return 0;
2d39d7c5
AN
5443
5444err_out:
5445 cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
5446 pr_warn("map '%s': failed to create: %s(%d)\n", map->name, cp, err);
5447 pr_perm_msg(err);
5448 for (j = 0; j < i; j++)
5449 zclose(obj->maps[j].fd);
5450 return err;
52d3352e
WN
5451}
5452
ddc7c304
AN
5453static bool bpf_core_is_flavor_sep(const char *s)
5454{
5455 /* check X___Y name pattern, where X and Y are not underscores */
5456 return s[0] != '_' && /* X */
5457 s[1] == '_' && s[2] == '_' && s[3] == '_' && /* ___ */
5458 s[4] != '_'; /* Y */
5459}
5460
5461/* Given 'some_struct_name___with_flavor' return the length of a name prefix
5462 * before last triple underscore. Struct name part after last triple
5463 * underscore is ignored by BPF CO-RE relocation during relocation matching.
5464 */
b0588390 5465size_t bpf_core_essential_name_len(const char *name)
ddc7c304
AN
5466{
5467 size_t n = strlen(name);
5468 int i;
5469
5470 for (i = n - 5; i >= 0; i--) {
5471 if (bpf_core_is_flavor_sep(name + i))
5472 return i + 1;
5473 }
5474 return n;
5475}
5476
8de6cae4 5477void bpf_core_free_cands(struct bpf_core_cand_list *cands)
ddc7c304 5478{
8de6cae4
MV
5479 if (!cands)
5480 return;
5481
0f7515ca
AN
5482 free(cands->cands);
5483 free(cands);
ddc7c304
AN
5484}
5485
8de6cae4
MV
5486int bpf_core_add_cands(struct bpf_core_cand *local_cand,
5487 size_t local_essent_len,
5488 const struct btf *targ_btf,
5489 const char *targ_btf_name,
5490 int targ_start_id,
5491 struct bpf_core_cand_list *cands)
ddc7c304 5492{
301ba4d7 5493 struct bpf_core_cand *new_cands, *cand;
03d5b991
AN
5494 const struct btf_type *t, *local_t;
5495 const char *targ_name, *local_name;
0f7515ca
AN
5496 size_t targ_essent_len;
5497 int n, i;
ddc7c304 5498
03d5b991
AN
5499 local_t = btf__type_by_id(local_cand->btf, local_cand->id);
5500 local_name = btf__str_by_offset(local_cand->btf, local_t->name_off);
5501
6a886de0
HC
5502 n = btf__type_cnt(targ_btf);
5503 for (i = targ_start_id; i < n; i++) {
ddc7c304 5504 t = btf__type_by_id(targ_btf, i);
23b2a3a8 5505 if (!btf_kind_core_compat(t, local_t))
ddc7c304
AN
5506 continue;
5507
3fc32f40
AN
5508 targ_name = btf__name_by_offset(targ_btf, t->name_off);
5509 if (str_is_empty(targ_name))
d121e1d3
AN
5510 continue;
5511
ddc7c304
AN
5512 targ_essent_len = bpf_core_essential_name_len(targ_name);
5513 if (targ_essent_len != local_essent_len)
5514 continue;
5515
03d5b991 5516 if (strncmp(local_name, targ_name, local_essent_len) != 0)
0f7515ca
AN
5517 continue;
5518
5519 pr_debug("CO-RE relocating [%d] %s %s: found target candidate [%d] %s %s in [%s]\n",
03d5b991
AN
5520 local_cand->id, btf_kind_str(local_t),
5521 local_name, i, btf_kind_str(t), targ_name,
0f7515ca
AN
5522 targ_btf_name);
5523 new_cands = libbpf_reallocarray(cands->cands, cands->len + 1,
5524 sizeof(*cands->cands));
5525 if (!new_cands)
5526 return -ENOMEM;
5527
5528 cand = &new_cands[cands->len];
5529 cand->btf = targ_btf;
0f7515ca
AN
5530 cand->id = i;
5531
5532 cands->cands = new_cands;
5533 cands->len++;
ddc7c304 5534 }
0f7515ca
AN
5535 return 0;
5536}
5537
4f33a53d
AN
5538static int load_module_btfs(struct bpf_object *obj)
5539{
5540 struct bpf_btf_info info;
5541 struct module_btf *mod_btf;
5542 struct btf *btf;
5543 char name[64];
5544 __u32 id = 0, len;
5545 int err, fd;
5546
5547 if (obj->btf_modules_loaded)
5548 return 0;
5549
67234743
AS
5550 if (obj->gen_loader)
5551 return 0;
5552
4f33a53d
AN
5553 /* don't do this again, even if we find no module BTFs */
5554 obj->btf_modules_loaded = true;
5555
5556 /* kernel too old to support module BTFs */
9ca1f56a 5557 if (!kernel_supports(obj, FEAT_MODULE_BTF))
4f33a53d
AN
5558 return 0;
5559
5560 while (true) {
5561 err = bpf_btf_get_next_id(id, &id);
5562 if (err && errno == ENOENT)
5563 return 0;
2d2c9516
AT
5564 if (err && errno == EPERM) {
5565 pr_debug("skipping module BTFs loading, missing privileges\n");
5566 return 0;
5567 }
4f33a53d
AN
5568 if (err) {
5569 err = -errno;
5570 pr_warn("failed to iterate BTF objects: %d\n", err);
5571 return err;
ddc7c304 5572 }
4f33a53d
AN
5573
5574 fd = bpf_btf_get_fd_by_id(id);
5575 if (fd < 0) {
5576 if (errno == ENOENT)
5577 continue; /* expected race: BTF was unloaded */
5578 err = -errno;
5579 pr_warn("failed to get BTF object #%d FD: %d\n", id, err);
5580 return err;
5581 }
5582
5583 len = sizeof(info);
5584 memset(&info, 0, sizeof(info));
5585 info.name = ptr_to_u64(name);
5586 info.name_len = sizeof(name);
5587
629dfc66 5588 err = bpf_btf_get_info_by_fd(fd, &info, &len);
4f33a53d
AN
5589 if (err) {
5590 err = -errno;
5591 pr_warn("failed to get BTF object #%d info: %d\n", id, err);
91abb4a6 5592 goto err_out;
4f33a53d
AN
5593 }
5594
5595 /* ignore non-module BTFs */
5596 if (!info.kernel_btf || strcmp(name, "vmlinux") == 0) {
5597 close(fd);
5598 continue;
5599 }
5600
5601 btf = btf_get_from_fd(fd, obj->btf_vmlinux);
e9fc3ce9
AN
5602 err = libbpf_get_error(btf);
5603 if (err) {
5604 pr_warn("failed to load module [%s]'s BTF object #%d: %d\n",
5605 name, id, err);
91abb4a6 5606 goto err_out;
4f33a53d
AN
5607 }
5608
3b029e06 5609 err = libbpf_ensure_mem((void **)&obj->btf_modules, &obj->btf_module_cap,
e3ba8e4e 5610 sizeof(*obj->btf_modules), obj->btf_module_cnt + 1);
4f33a53d 5611 if (err)
91abb4a6 5612 goto err_out;
4f33a53d
AN
5613
5614 mod_btf = &obj->btf_modules[obj->btf_module_cnt++];
5615
5616 mod_btf->btf = btf;
5617 mod_btf->id = id;
91abb4a6 5618 mod_btf->fd = fd;
4f33a53d 5619 mod_btf->name = strdup(name);
91abb4a6
AN
5620 if (!mod_btf->name) {
5621 err = -ENOMEM;
5622 goto err_out;
5623 }
5624 continue;
5625
5626err_out:
5627 close(fd);
5628 return err;
ddc7c304 5629 }
4f33a53d
AN
5630
5631 return 0;
5632}
5633
301ba4d7 5634static struct bpf_core_cand_list *
0f7515ca
AN
5635bpf_core_find_cands(struct bpf_object *obj, const struct btf *local_btf, __u32 local_type_id)
5636{
301ba4d7
AS
5637 struct bpf_core_cand local_cand = {};
5638 struct bpf_core_cand_list *cands;
4f33a53d 5639 const struct btf *main_btf;
03d5b991
AN
5640 const struct btf_type *local_t;
5641 const char *local_name;
0f7515ca 5642 size_t local_essent_len;
4f33a53d 5643 int err, i;
0f7515ca
AN
5644
5645 local_cand.btf = local_btf;
03d5b991
AN
5646 local_cand.id = local_type_id;
5647 local_t = btf__type_by_id(local_btf, local_type_id);
5648 if (!local_t)
0f7515ca
AN
5649 return ERR_PTR(-EINVAL);
5650
03d5b991
AN
5651 local_name = btf__name_by_offset(local_btf, local_t->name_off);
5652 if (str_is_empty(local_name))
0f7515ca 5653 return ERR_PTR(-EINVAL);
03d5b991 5654 local_essent_len = bpf_core_essential_name_len(local_name);
0f7515ca
AN
5655
5656 cands = calloc(1, sizeof(*cands));
5657 if (!cands)
5658 return ERR_PTR(-ENOMEM);
5659
5660 /* Attempt to find target candidates in vmlinux BTF first */
4f33a53d
AN
5661 main_btf = obj->btf_vmlinux_override ?: obj->btf_vmlinux;
5662 err = bpf_core_add_cands(&local_cand, local_essent_len, main_btf, "vmlinux", 1, cands);
5663 if (err)
5664 goto err_out;
5665
5666 /* if vmlinux BTF has any candidate, don't got for module BTFs */
5667 if (cands->len)
5668 return cands;
5669
5670 /* if vmlinux BTF was overridden, don't attempt to load module BTFs */
5671 if (obj->btf_vmlinux_override)
5672 return cands;
5673
5674 /* now look through module BTFs, trying to still find candidates */
5675 err = load_module_btfs(obj);
5676 if (err)
5677 goto err_out;
5678
5679 for (i = 0; i < obj->btf_module_cnt; i++) {
5680 err = bpf_core_add_cands(&local_cand, local_essent_len,
5681 obj->btf_modules[i].btf,
5682 obj->btf_modules[i].name,
6a886de0 5683 btf__type_cnt(obj->btf_vmlinux),
4f33a53d
AN
5684 cands);
5685 if (err)
5686 goto err_out;
0f7515ca
AN
5687 }
5688
5689 return cands;
ddc7c304 5690err_out:
4f33a53d 5691 bpf_core_free_cands(cands);
ddc7c304
AN
5692 return ERR_PTR(err);
5693}
5694
3fc32f40
AN
5695/* Check local and target types for compatibility. This check is used for
5696 * type-based CO-RE relocations and follow slightly different rules than
5697 * field-based relocations. This function assumes that root types were already
5698 * checked for name match. Beyond that initial root-level name check, names
5699 * are completely ignored. Compatibility rules are as follows:
5700 * - any two STRUCTs/UNIONs/FWDs/ENUMs/INTs are considered compatible, but
5701 * kind should match for local and target types (i.e., STRUCT is not
5702 * compatible with UNION);
5703 * - for ENUMs, the size is ignored;
5704 * - for INT, size and signedness are ignored;
5705 * - for ARRAY, dimensionality is ignored, element types are checked for
5706 * compatibility recursively;
5707 * - CONST/VOLATILE/RESTRICT modifiers are ignored;
5708 * - TYPEDEFs/PTRs are compatible if types they pointing to are compatible;
5709 * - FUNC_PROTOs are compatible if they have compatible signature: same
5710 * number of input args and compatible return and argument types.
5711 * These rules are not set in stone and probably will be adjusted as we get
5712 * more experience with using BPF CO-RE relocations.
5713 */
b0588390
AS
5714int bpf_core_types_are_compat(const struct btf *local_btf, __u32 local_id,
5715 const struct btf *targ_btf, __u32 targ_id)
3fc32f40 5716{
fd75733d 5717 return __bpf_core_types_are_compat(local_btf, local_id, targ_btf, targ_id, 32);
3fc32f40
AN
5718}
5719
ec6209c8
DM
5720int bpf_core_types_match(const struct btf *local_btf, __u32 local_id,
5721 const struct btf *targ_btf, __u32 targ_id)
5722{
5723 return __bpf_core_types_match(local_btf, local_id, targ_btf, targ_id, false, 32);
5724}
5725
c302378b 5726static size_t bpf_core_hash_fn(const long key, void *ctx)
ddc7c304 5727{
c302378b 5728 return key;
ddc7c304
AN
5729}
5730
c302378b 5731static bool bpf_core_equal_fn(const long k1, const long k2, void *ctx)
ddc7c304
AN
5732{
5733 return k1 == k2;
5734}
5735
d0e92887
AS
5736static int record_relo_core(struct bpf_program *prog,
5737 const struct bpf_core_relo *core_relo, int insn_idx)
5738{
5739 struct reloc_desc *relos, *relo;
5740
5741 relos = libbpf_reallocarray(prog->reloc_desc,
5742 prog->nr_reloc + 1, sizeof(*relos));
5743 if (!relos)
5744 return -ENOMEM;
5745 relo = &relos[prog->nr_reloc];
5746 relo->type = RELO_CORE;
5747 relo->insn_idx = insn_idx;
5748 relo->core_relo = core_relo;
5749 prog->reloc_desc = relos;
5750 prog->nr_reloc++;
5751 return 0;
5752}
5753
9fdc4273
AN
5754static const struct bpf_core_relo *find_relo_core(struct bpf_program *prog, int insn_idx)
5755{
5756 struct reloc_desc *relo;
5757 int i;
5758
5759 for (i = 0; i < prog->nr_reloc; i++) {
5760 relo = &prog->reloc_desc[i];
5761 if (relo->type != RELO_CORE || relo->insn_idx != insn_idx)
5762 continue;
5763
5764 return relo->core_relo;
5765 }
5766
5767 return NULL;
5768}
5769
adb8fa19
MV
5770static int bpf_core_resolve_relo(struct bpf_program *prog,
5771 const struct bpf_core_relo *relo,
5772 int relo_idx,
5773 const struct btf *local_btf,
5774 struct hashmap *cand_cache,
5775 struct bpf_core_relo_res *targ_res)
3ee4f533 5776{
78c1f8d0 5777 struct bpf_core_spec specs_scratch[3] = {};
301ba4d7 5778 struct bpf_core_cand_list *cands = NULL;
3ee4f533
AS
5779 const char *prog_name = prog->name;
5780 const struct btf_type *local_type;
5781 const char *local_name;
5782 __u32 local_id = relo->type_id;
adb8fa19 5783 int err;
3ee4f533
AS
5784
5785 local_type = btf__type_by_id(local_btf, local_id);
5786 if (!local_type)
5787 return -EINVAL;
5788
5789 local_name = btf__name_by_offset(local_btf, local_type->name_off);
5790 if (!local_name)
5791 return -EINVAL;
5792
46334a0c 5793 if (relo->kind != BPF_CORE_TYPE_ID_LOCAL &&
c302378b 5794 !hashmap__find(cand_cache, local_id, &cands)) {
3ee4f533
AS
5795 cands = bpf_core_find_cands(prog->obj, local_btf, local_id);
5796 if (IS_ERR(cands)) {
5797 pr_warn("prog '%s': relo #%d: target candidate search failed for [%d] %s %s: %ld\n",
5798 prog_name, relo_idx, local_id, btf_kind_str(local_type),
5799 local_name, PTR_ERR(cands));
5800 return PTR_ERR(cands);
5801 }
c302378b 5802 err = hashmap__set(cand_cache, local_id, cands, NULL, NULL);
3ee4f533
AS
5803 if (err) {
5804 bpf_core_free_cands(cands);
5805 return err;
5806 }
5807 }
5808
adb8fa19
MV
5809 return bpf_core_calc_relo_insn(prog_name, relo, relo_idx, local_btf, cands, specs_scratch,
5810 targ_res);
3ee4f533
AS
5811}
5812
ddc7c304 5813static int
28b93c64 5814bpf_object__relocate_core(struct bpf_object *obj, const char *targ_btf_path)
ddc7c304
AN
5815{
5816 const struct btf_ext_info_sec *sec;
adb8fa19 5817 struct bpf_core_relo_res targ_res;
28b93c64 5818 const struct bpf_core_relo *rec;
ddc7c304
AN
5819 const struct btf_ext_info *seg;
5820 struct hashmap_entry *entry;
5821 struct hashmap *cand_cache = NULL;
5822 struct bpf_program *prog;
adb8fa19 5823 struct bpf_insn *insn;
ddc7c304 5824 const char *sec_name;
11d5daa8 5825 int i, err = 0, insn_idx, sec_idx, sec_num;
ddc7c304 5826
28b93c64
AN
5827 if (obj->btf_ext->core_relo_info.len == 0)
5828 return 0;
5829
0f7515ca
AN
5830 if (targ_btf_path) {
5831 obj->btf_vmlinux_override = btf__parse(targ_btf_path, NULL);
e9fc3ce9
AN
5832 err = libbpf_get_error(obj->btf_vmlinux_override);
5833 if (err) {
0f7515ca
AN
5834 pr_warn("failed to parse target BTF: %d\n", err);
5835 return err;
5836 }
ddc7c304
AN
5837 }
5838
5839 cand_cache = hashmap__new(bpf_core_hash_fn, bpf_core_equal_fn, NULL);
5840 if (IS_ERR(cand_cache)) {
5841 err = PTR_ERR(cand_cache);
5842 goto out;
5843 }
5844
28b93c64 5845 seg = &obj->btf_ext->core_relo_info;
11d5daa8 5846 sec_num = 0;
ddc7c304 5847 for_each_btf_ext_sec(seg, sec) {
11d5daa8
AN
5848 sec_idx = seg->sec_idxs[sec_num];
5849 sec_num++;
5850
ddc7c304
AN
5851 sec_name = btf__name_by_offset(obj->btf, sec->sec_name_off);
5852 if (str_is_empty(sec_name)) {
5853 err = -EINVAL;
5854 goto out;
5855 }
ddc7c304 5856
11d5daa8 5857 pr_debug("sec '%s': found %d CO-RE relocations\n", sec_name, sec->num_info);
ddc7c304
AN
5858
5859 for_each_btf_ext_rec(seg, sec, i, rec) {
adb8fa19
MV
5860 if (rec->insn_off % BPF_INSN_SZ)
5861 return -EINVAL;
db2b8b06
AN
5862 insn_idx = rec->insn_off / BPF_INSN_SZ;
5863 prog = find_prog_by_sec_insn(obj, sec_idx, insn_idx);
5864 if (!prog) {
e89d57d9
AN
5865 /* When __weak subprog is "overridden" by another instance
5866 * of the subprog from a different object file, linker still
5867 * appends all the .BTF.ext info that used to belong to that
5868 * eliminated subprogram.
5869 * This is similar to what x86-64 linker does for relocations.
5870 * So just ignore such relocations just like we ignore
5871 * subprog instructions when discovering subprograms.
5872 */
5873 pr_debug("sec '%s': skipping CO-RE relocation #%d for insn #%d belonging to eliminated weak subprogram\n",
5874 sec_name, i, insn_idx);
5875 continue;
db2b8b06 5876 }
47f7cf63
AN
5877 /* no need to apply CO-RE relocation if the program is
5878 * not going to be loaded
5879 */
a3820c48 5880 if (!prog->autoload)
47f7cf63 5881 continue;
db2b8b06 5882
adb8fa19
MV
5883 /* adjust insn_idx from section frame of reference to the local
5884 * program's frame of reference; (sub-)program code is not yet
5885 * relocated, so it's enough to just subtract in-section offset
5886 */
5887 insn_idx = insn_idx - prog->sec_insn_off;
5888 if (insn_idx >= prog->insns_cnt)
5889 return -EINVAL;
5890 insn = &prog->insns[insn_idx];
5891
185cfe83
AN
5892 err = record_relo_core(prog, rec, insn_idx);
5893 if (err) {
5894 pr_warn("prog '%s': relo #%d: failed to record relocation: %d\n",
5895 prog->name, i, err);
5896 goto out;
adb8fa19
MV
5897 }
5898
185cfe83
AN
5899 if (prog->obj->gen_loader)
5900 continue;
5901
adb8fa19 5902 err = bpf_core_resolve_relo(prog, rec, i, obj->btf, cand_cache, &targ_res);
ddc7c304 5903 if (err) {
be18010e 5904 pr_warn("prog '%s': relo #%d: failed to relocate: %d\n",
9c0f8cbd 5905 prog->name, i, err);
ddc7c304
AN
5906 goto out;
5907 }
adb8fa19
MV
5908
5909 err = bpf_core_patch_insn(prog->name, insn, insn_idx, rec, i, &targ_res);
5910 if (err) {
5911 pr_warn("prog '%s': relo #%d: failed to patch insn #%u: %d\n",
5912 prog->name, i, insn_idx, err);
5913 goto out;
5914 }
ddc7c304
AN
5915 }
5916 }
5917
5918out:
4f33a53d 5919 /* obj->btf_vmlinux and module BTFs are freed after object load */
0f7515ca
AN
5920 btf__free(obj->btf_vmlinux_override);
5921 obj->btf_vmlinux_override = NULL;
5922
ddc7c304
AN
5923 if (!IS_ERR_OR_NULL(cand_cache)) {
5924 hashmap__for_each_entry(cand_cache, entry, i) {
c302378b 5925 bpf_core_free_cands(entry->pvalue);
ddc7c304
AN
5926 }
5927 hashmap__free(cand_cache);
5928 }
5929 return err;
5930}
5931
ec41817b 5932/* base map load ldimm64 special constant, used also for log fixup logic */
3055ddd6
AN
5933#define POISON_LDIMM64_MAP_BASE 2001000000
5934#define POISON_LDIMM64_MAP_PFX "200100"
ec41817b
AN
5935
5936static void poison_map_ldimm64(struct bpf_program *prog, int relo_idx,
5937 int insn_idx, struct bpf_insn *insn,
5938 int map_idx, const struct bpf_map *map)
5939{
5940 int i;
5941
5942 pr_debug("prog '%s': relo #%d: poisoning insn #%d that loads map #%d '%s'\n",
5943 prog->name, relo_idx, insn_idx, map_idx, map->name);
5944
5945 /* we turn single ldimm64 into two identical invalid calls */
5946 for (i = 0; i < 2; i++) {
5947 insn->code = BPF_JMP | BPF_CALL;
5948 insn->dst_reg = 0;
5949 insn->src_reg = 0;
5950 insn->off = 0;
5951 /* if this instruction is reachable (not a dead code),
5952 * verifier will complain with something like:
5953 * invalid func unknown#2001000123
5954 * where lower 123 is map index into obj->maps[] array
5955 */
3055ddd6 5956 insn->imm = POISON_LDIMM64_MAP_BASE + map_idx;
ec41817b
AN
5957
5958 insn++;
5959 }
5960}
5961
05b6f766
AN
5962/* unresolved kfunc call special constant, used also for log fixup logic */
5963#define POISON_CALL_KFUNC_BASE 2002000000
5964#define POISON_CALL_KFUNC_PFX "2002"
5965
5966static void poison_kfunc_call(struct bpf_program *prog, int relo_idx,
5967 int insn_idx, struct bpf_insn *insn,
5968 int ext_idx, const struct extern_desc *ext)
5969{
5970 pr_debug("prog '%s': relo #%d: poisoning insn #%d that calls kfunc '%s'\n",
5971 prog->name, relo_idx, insn_idx, ext->name);
5972
5973 /* we turn kfunc call into invalid helper call with identifiable constant */
5974 insn->code = BPF_JMP | BPF_CALL;
5975 insn->dst_reg = 0;
5976 insn->src_reg = 0;
5977 insn->off = 0;
5978 /* if this instruction is reachable (not a dead code),
5979 * verifier will complain with something like:
5980 * invalid func unknown#2001000123
5981 * where lower 123 is extern index into obj->externs[] array
5982 */
5983 insn->imm = POISON_CALL_KFUNC_BASE + ext_idx;
5984}
5985
c3c55696
AN
5986/* Relocate data references within program code:
5987 * - map references;
5988 * - global variable references;
5989 * - extern references.
5990 */
48cca7e4 5991static int
c3c55696 5992bpf_object__relocate_data(struct bpf_object *obj, struct bpf_program *prog)
8a47a6c5 5993{
c3c55696 5994 int i;
8a47a6c5
WN
5995
5996 for (i = 0; i < prog->nr_reloc; i++) {
53f8dd43 5997 struct reloc_desc *relo = &prog->reloc_desc[i];
166750bc 5998 struct bpf_insn *insn = &prog->insns[relo->insn_idx];
ec41817b 5999 const struct bpf_map *map;
2e33efe3 6000 struct extern_desc *ext;
8a47a6c5 6001
166750bc
AN
6002 switch (relo->type) {
6003 case RELO_LD64:
ec41817b 6004 map = &obj->maps[relo->map_idx];
e2fa0156
AS
6005 if (obj->gen_loader) {
6006 insn[0].src_reg = BPF_PSEUDO_MAP_IDX;
6007 insn[0].imm = relo->map_idx;
ec41817b 6008 } else if (map->autocreate) {
e2fa0156 6009 insn[0].src_reg = BPF_PSEUDO_MAP_FD;
ec41817b
AN
6010 insn[0].imm = map->fd;
6011 } else {
6012 poison_map_ldimm64(prog, i, relo->insn_idx, insn,
6013 relo->map_idx, map);
e2fa0156 6014 }
166750bc
AN
6015 break;
6016 case RELO_DATA:
ec41817b 6017 map = &obj->maps[relo->map_idx];
166750bc 6018 insn[1].imm = insn[0].imm + relo->sym_off;
e2fa0156
AS
6019 if (obj->gen_loader) {
6020 insn[0].src_reg = BPF_PSEUDO_MAP_IDX_VALUE;
6021 insn[0].imm = relo->map_idx;
ec41817b 6022 } else if (map->autocreate) {
e2fa0156 6023 insn[0].src_reg = BPF_PSEUDO_MAP_VALUE;
ec41817b
AN
6024 insn[0].imm = map->fd;
6025 } else {
6026 poison_map_ldimm64(prog, i, relo->insn_idx, insn,
6027 relo->map_idx, map);
e2fa0156 6028 }
166750bc 6029 break;
a18f7214 6030 case RELO_EXTERN_LD64:
3055ddd6 6031 ext = &obj->externs[relo->ext_idx];
1c0c7074 6032 if (ext->type == EXT_KCFG) {
e2fa0156
AS
6033 if (obj->gen_loader) {
6034 insn[0].src_reg = BPF_PSEUDO_MAP_IDX_VALUE;
6035 insn[0].imm = obj->kconfig_map_idx;
6036 } else {
6037 insn[0].src_reg = BPF_PSEUDO_MAP_VALUE;
6038 insn[0].imm = obj->maps[obj->kconfig_map_idx].fd;
6039 }
1c0c7074
AN
6040 insn[1].imm = ext->kcfg.data_off;
6041 } else /* EXT_KSYM */ {
2211c825 6042 if (ext->ksym.type_id && ext->is_set) { /* typed ksyms */
d370bbe1 6043 insn[0].src_reg = BPF_PSEUDO_BTF_ID;
284d2587
AN
6044 insn[0].imm = ext->ksym.kernel_btf_id;
6045 insn[1].imm = ext->ksym.kernel_btf_obj_fd;
2211c825 6046 } else { /* typeless ksyms or unresolved typed ksyms */
d370bbe1
HL
6047 insn[0].imm = (__u32)ext->ksym.addr;
6048 insn[1].imm = ext->ksym.addr >> 32;
6049 }
1c0c7074 6050 }
166750bc 6051 break;
a18f7214 6052 case RELO_EXTERN_CALL:
3055ddd6 6053 ext = &obj->externs[relo->ext_idx];
5bd022ec 6054 insn[0].src_reg = BPF_PSEUDO_KFUNC_CALL;
466b2e13
KKD
6055 if (ext->is_set) {
6056 insn[0].imm = ext->ksym.kernel_btf_id;
6057 insn[0].off = ext->ksym.btf_fd_idx;
05b6f766
AN
6058 } else { /* unresolved weak kfunc call */
6059 poison_kfunc_call(prog, i, relo->insn_idx, insn,
6060 relo->ext_idx, ext);
466b2e13 6061 }
5bd022ec 6062 break;
53eddb5e 6063 case RELO_SUBPROG_ADDR:
b1268826
AS
6064 if (insn[0].src_reg != BPF_PSEUDO_FUNC) {
6065 pr_warn("prog '%s': relo #%d: bad insn\n",
6066 prog->name, i);
6067 return -EINVAL;
6068 }
6069 /* handled already */
53eddb5e 6070 break;
166750bc 6071 case RELO_CALL:
b1268826 6072 /* handled already */
166750bc 6073 break;
d0e92887
AS
6074 case RELO_CORE:
6075 /* will be handled by bpf_program_record_relos() */
6076 break;
166750bc 6077 default:
9c0f8cbd
AN
6078 pr_warn("prog '%s': relo #%d: bad relo type %d\n",
6079 prog->name, i, relo->type);
166750bc 6080 return -EINVAL;
8a47a6c5 6081 }
8a47a6c5
WN
6082 }
6083
c3c55696
AN
6084 return 0;
6085}
6086
8505e870
AN
6087static int adjust_prog_btf_ext_info(const struct bpf_object *obj,
6088 const struct bpf_program *prog,
6089 const struct btf_ext_info *ext_info,
6090 void **prog_info, __u32 *prog_rec_cnt,
6091 __u32 *prog_rec_sz)
6092{
6093 void *copy_start = NULL, *copy_end = NULL;
6094 void *rec, *rec_end, *new_prog_info;
6095 const struct btf_ext_info_sec *sec;
6096 size_t old_sz, new_sz;
11d5daa8 6097 int i, sec_num, sec_idx, off_adj;
8505e870 6098
11d5daa8 6099 sec_num = 0;
8505e870 6100 for_each_btf_ext_sec(ext_info, sec) {
11d5daa8
AN
6101 sec_idx = ext_info->sec_idxs[sec_num];
6102 sec_num++;
6103 if (prog->sec_idx != sec_idx)
8505e870
AN
6104 continue;
6105
6106 for_each_btf_ext_rec(ext_info, sec, i, rec) {
6107 __u32 insn_off = *(__u32 *)rec / BPF_INSN_SZ;
6108
6109 if (insn_off < prog->sec_insn_off)
6110 continue;
6111 if (insn_off >= prog->sec_insn_off + prog->sec_insn_cnt)
6112 break;
6113
6114 if (!copy_start)
6115 copy_start = rec;
6116 copy_end = rec + ext_info->rec_size;
6117 }
6118
6119 if (!copy_start)
6120 return -ENOENT;
6121
6122 /* append func/line info of a given (sub-)program to the main
6123 * program func/line info
6124 */
8eb62958 6125 old_sz = (size_t)(*prog_rec_cnt) * ext_info->rec_size;
8505e870
AN
6126 new_sz = old_sz + (copy_end - copy_start);
6127 new_prog_info = realloc(*prog_info, new_sz);
6128 if (!new_prog_info)
6129 return -ENOMEM;
6130 *prog_info = new_prog_info;
6131 *prog_rec_cnt = new_sz / ext_info->rec_size;
6132 memcpy(new_prog_info + old_sz, copy_start, copy_end - copy_start);
6133
6134 /* Kernel instruction offsets are in units of 8-byte
6135 * instructions, while .BTF.ext instruction offsets generated
6136 * by Clang are in units of bytes. So convert Clang offsets
6137 * into kernel offsets and adjust offset according to program
6138 * relocated position.
6139 */
6140 off_adj = prog->sub_insn_off - prog->sec_insn_off;
6141 rec = new_prog_info + old_sz;
6142 rec_end = new_prog_info + new_sz;
6143 for (; rec < rec_end; rec += ext_info->rec_size) {
6144 __u32 *insn_off = rec;
6145
6146 *insn_off = *insn_off / BPF_INSN_SZ + off_adj;
6147 }
6148 *prog_rec_sz = ext_info->rec_size;
6149 return 0;
6150 }
6151
6152 return -ENOENT;
6153}
6154
6155static int
6156reloc_prog_func_and_line_info(const struct bpf_object *obj,
6157 struct bpf_program *main_prog,
6158 const struct bpf_program *prog)
6159{
6160 int err;
6161
6162 /* no .BTF.ext relocation if .BTF.ext is missing or kernel doesn't
2f38fe68 6163 * support func/line info
8505e870 6164 */
9ca1f56a 6165 if (!obj->btf_ext || !kernel_supports(obj, FEAT_BTF_FUNC))
8505e870
AN
6166 return 0;
6167
6168 /* only attempt func info relocation if main program's func_info
6169 * relocation was successful
6170 */
6171 if (main_prog != prog && !main_prog->func_info)
6172 goto line_info;
6173
6174 err = adjust_prog_btf_ext_info(obj, prog, &obj->btf_ext->func_info,
6175 &main_prog->func_info,
6176 &main_prog->func_info_cnt,
6177 &main_prog->func_info_rec_size);
6178 if (err) {
6179 if (err != -ENOENT) {
6180 pr_warn("prog '%s': error relocating .BTF.ext function info: %d\n",
6181 prog->name, err);
6182 return err;
6183 }
6184 if (main_prog->func_info) {
6185 /*
6186 * Some info has already been found but has problem
6187 * in the last btf_ext reloc. Must have to error out.
6188 */
6189 pr_warn("prog '%s': missing .BTF.ext function info.\n", prog->name);
6190 return err;
6191 }
6192 /* Have problem loading the very first info. Ignore the rest. */
6193 pr_warn("prog '%s': missing .BTF.ext function info for the main program, skipping all of .BTF.ext func info.\n",
6194 prog->name);
6195 }
6196
6197line_info:
6198 /* don't relocate line info if main program's relocation failed */
6199 if (main_prog != prog && !main_prog->line_info)
6200 return 0;
6201
6202 err = adjust_prog_btf_ext_info(obj, prog, &obj->btf_ext->line_info,
6203 &main_prog->line_info,
6204 &main_prog->line_info_cnt,
6205 &main_prog->line_info_rec_size);
6206 if (err) {
6207 if (err != -ENOENT) {
6208 pr_warn("prog '%s': error relocating .BTF.ext line info: %d\n",
6209 prog->name, err);
6210 return err;
6211 }
6212 if (main_prog->line_info) {
6213 /*
6214 * Some info has already been found but has problem
6215 * in the last btf_ext reloc. Must have to error out.
6216 */
6217 pr_warn("prog '%s': missing .BTF.ext line info.\n", prog->name);
6218 return err;
6219 }
6220 /* Have problem loading the very first info. Ignore the rest. */
6221 pr_warn("prog '%s': missing .BTF.ext line info for the main program, skipping all of .BTF.ext line info.\n",
6222 prog->name);
6223 }
6224 return 0;
6225}
6226
c3c55696
AN
6227static int cmp_relo_by_insn_idx(const void *key, const void *elem)
6228{
6229 size_t insn_idx = *(const size_t *)key;
6230 const struct reloc_desc *relo = elem;
6231
6232 if (insn_idx == relo->insn_idx)
6233 return 0;
6234 return insn_idx < relo->insn_idx ? -1 : 1;
6235}
6236
6237static struct reloc_desc *find_prog_insn_relo(const struct bpf_program *prog, size_t insn_idx)
6238{
2a6a9bf2
AN
6239 if (!prog->nr_reloc)
6240 return NULL;
c3c55696
AN
6241 return bsearch(&insn_idx, prog->reloc_desc, prog->nr_reloc,
6242 sizeof(*prog->reloc_desc), cmp_relo_by_insn_idx);
6243}
6244
b1268826
AS
6245static int append_subprog_relos(struct bpf_program *main_prog, struct bpf_program *subprog)
6246{
6247 int new_cnt = main_prog->nr_reloc + subprog->nr_reloc;
6248 struct reloc_desc *relos;
6249 int i;
6250
6251 if (main_prog == subprog)
6252 return 0;
6253 relos = libbpf_reallocarray(main_prog->reloc_desc, new_cnt, sizeof(*relos));
8a0260db
AN
6254 /* if new count is zero, reallocarray can return a valid NULL result;
6255 * in this case the previous pointer will be freed, so we *have to*
6256 * reassign old pointer to the new value (even if it's NULL)
6257 */
6258 if (!relos && new_cnt)
b1268826 6259 return -ENOMEM;
2a6a9bf2
AN
6260 if (subprog->nr_reloc)
6261 memcpy(relos + main_prog->nr_reloc, subprog->reloc_desc,
6262 sizeof(*relos) * subprog->nr_reloc);
b1268826
AS
6263
6264 for (i = main_prog->nr_reloc; i < new_cnt; i++)
6265 relos[i].insn_idx += subprog->sub_insn_off;
6266 /* After insn_idx adjustment the 'relos' array is still sorted
6267 * by insn_idx and doesn't break bsearch.
6268 */
6269 main_prog->reloc_desc = relos;
6270 main_prog->nr_reloc = new_cnt;
6271 return 0;
6272}
6273
6c918709
KKD
6274static int
6275bpf_object__append_subprog_code(struct bpf_object *obj, struct bpf_program *main_prog,
6276 struct bpf_program *subprog)
6277{
6278 struct bpf_insn *insns;
6279 size_t new_cnt;
6280 int err;
6281
6282 subprog->sub_insn_off = main_prog->insns_cnt;
6283
6284 new_cnt = main_prog->insns_cnt + subprog->insns_cnt;
6285 insns = libbpf_reallocarray(main_prog->insns, new_cnt, sizeof(*insns));
6286 if (!insns) {
6287 pr_warn("prog '%s': failed to realloc prog code\n", main_prog->name);
6288 return -ENOMEM;
6289 }
6290 main_prog->insns = insns;
6291 main_prog->insns_cnt = new_cnt;
6292
6293 memcpy(main_prog->insns + subprog->sub_insn_off, subprog->insns,
6294 subprog->insns_cnt * sizeof(*insns));
6295
6296 pr_debug("prog '%s': added %zu insns from sub-prog '%s'\n",
6297 main_prog->name, subprog->insns_cnt, subprog->name);
6298
6299 /* The subprog insns are now appended. Append its relos too. */
6300 err = append_subprog_relos(main_prog, subprog);
6301 if (err)
6302 return err;
6303 return 0;
6304}
6305
c3c55696
AN
6306static int
6307bpf_object__reloc_code(struct bpf_object *obj, struct bpf_program *main_prog,
6308 struct bpf_program *prog)
6309{
7e2925f6 6310 size_t sub_insn_idx, insn_idx;
c3c55696 6311 struct bpf_program *subprog;
c3c55696 6312 struct reloc_desc *relo;
7e2925f6 6313 struct bpf_insn *insn;
c3c55696
AN
6314 int err;
6315
6316 err = reloc_prog_func_and_line_info(obj, main_prog, prog);
6317 if (err)
6318 return err;
6319
6320 for (insn_idx = 0; insn_idx < prog->sec_insn_cnt; insn_idx++) {
6321 insn = &main_prog->insns[prog->sub_insn_off + insn_idx];
53eddb5e 6322 if (!insn_is_subprog_call(insn) && !insn_is_pseudo_func(insn))
c3c55696
AN
6323 continue;
6324
6325 relo = find_prog_insn_relo(prog, insn_idx);
a18f7214 6326 if (relo && relo->type == RELO_EXTERN_CALL)
b1268826
AS
6327 /* kfunc relocations will be handled later
6328 * in bpf_object__relocate_data()
6329 */
6330 continue;
53eddb5e 6331 if (relo && relo->type != RELO_CALL && relo->type != RELO_SUBPROG_ADDR) {
c3c55696
AN
6332 pr_warn("prog '%s': unexpected relo for insn #%zu, type %d\n",
6333 prog->name, insn_idx, relo->type);
6334 return -LIBBPF_ERRNO__RELOC;
6335 }
6336 if (relo) {
6337 /* sub-program instruction index is a combination of
6338 * an offset of a symbol pointed to by relocation and
6339 * call instruction's imm field; for global functions,
6340 * call always has imm = -1, but for static functions
6341 * relocation is against STT_SECTION and insn->imm
6342 * points to a start of a static function
53eddb5e
YS
6343 *
6344 * for subprog addr relocation, the relo->sym_off + insn->imm is
6345 * the byte offset in the corresponding section.
c3c55696 6346 */
53eddb5e
YS
6347 if (relo->type == RELO_CALL)
6348 sub_insn_idx = relo->sym_off / BPF_INSN_SZ + insn->imm + 1;
6349 else
6350 sub_insn_idx = (relo->sym_off + insn->imm) / BPF_INSN_SZ;
6351 } else if (insn_is_pseudo_func(insn)) {
6352 /*
6353 * RELO_SUBPROG_ADDR relo is always emitted even if both
6354 * functions are in the same section, so it shouldn't reach here.
6355 */
6356 pr_warn("prog '%s': missing subprog addr relo for insn #%zu\n",
6357 prog->name, insn_idx);
6358 return -LIBBPF_ERRNO__RELOC;
c3c55696
AN
6359 } else {
6360 /* if subprogram call is to a static function within
6361 * the same ELF section, there won't be any relocation
6362 * emitted, but it also means there is no additional
6363 * offset necessary, insns->imm is relative to
6364 * instruction's original position within the section
6365 */
6366 sub_insn_idx = prog->sec_insn_off + insn_idx + insn->imm + 1;
6367 }
6368
6369 /* we enforce that sub-programs should be in .text section */
6370 subprog = find_prog_by_sec_insn(obj, obj->efile.text_shndx, sub_insn_idx);
6371 if (!subprog) {
6372 pr_warn("prog '%s': no .text section found yet sub-program call exists\n",
6373 prog->name);
6374 return -LIBBPF_ERRNO__RELOC;
6375 }
6376
6377 /* if it's the first call instruction calling into this
6378 * subprogram (meaning this subprog hasn't been processed
6379 * yet) within the context of current main program:
6380 * - append it at the end of main program's instructions blog;
6381 * - process is recursively, while current program is put on hold;
6382 * - if that subprogram calls some other not yet processes
6383 * subprogram, same thing will happen recursively until
6384 * there are no more unprocesses subprograms left to append
6385 * and relocate.
6386 */
6387 if (subprog->sub_insn_off == 0) {
6c918709 6388 err = bpf_object__append_subprog_code(obj, main_prog, subprog);
b1268826
AS
6389 if (err)
6390 return err;
c3c55696
AN
6391 err = bpf_object__reloc_code(obj, main_prog, subprog);
6392 if (err)
6393 return err;
6394 }
6395
6396 /* main_prog->insns memory could have been re-allocated, so
6397 * calculate pointer again
6398 */
6399 insn = &main_prog->insns[prog->sub_insn_off + insn_idx];
6400 /* calculate correct instruction position within current main
6401 * prog; each main prog can have a different set of
6402 * subprograms appended (potentially in different order as
6403 * well), so position of any subprog can be different for
e3ba8e4e
KM
6404 * different main programs
6405 */
c3c55696
AN
6406 insn->imm = subprog->sub_insn_off - (prog->sub_insn_off + insn_idx) - 1;
6407
c3c55696
AN
6408 pr_debug("prog '%s': insn #%zu relocated, imm %d points to subprog '%s' (now at %zu offset)\n",
6409 prog->name, insn_idx, insn->imm, subprog->name, subprog->sub_insn_off);
6410 }
6411
6412 return 0;
6413}
6414
6415/*
6416 * Relocate sub-program calls.
6417 *
6418 * Algorithm operates as follows. Each entry-point BPF program (referred to as
6419 * main prog) is processed separately. For each subprog (non-entry functions,
6420 * that can be called from either entry progs or other subprogs) gets their
6421 * sub_insn_off reset to zero. This serves as indicator that this subprogram
6422 * hasn't been yet appended and relocated within current main prog. Once its
6423 * relocated, sub_insn_off will point at the position within current main prog
6424 * where given subprog was appended. This will further be used to relocate all
6425 * the call instructions jumping into this subprog.
6426 *
6427 * We start with main program and process all call instructions. If the call
6428 * is into a subprog that hasn't been processed (i.e., subprog->sub_insn_off
6429 * is zero), subprog instructions are appended at the end of main program's
6430 * instruction array. Then main program is "put on hold" while we recursively
6431 * process newly appended subprogram. If that subprogram calls into another
6432 * subprogram that hasn't been appended, new subprogram is appended again to
6433 * the *main* prog's instructions (subprog's instructions are always left
6434 * untouched, as they need to be in unmodified state for subsequent main progs
6435 * and subprog instructions are always sent only as part of a main prog) and
6436 * the process continues recursively. Once all the subprogs called from a main
6437 * prog or any of its subprogs are appended (and relocated), all their
6438 * positions within finalized instructions array are known, so it's easy to
6439 * rewrite call instructions with correct relative offsets, corresponding to
6440 * desired target subprog.
6441 *
6442 * Its important to realize that some subprogs might not be called from some
6443 * main prog and any of its called/used subprogs. Those will keep their
6444 * subprog->sub_insn_off as zero at all times and won't be appended to current
6445 * main prog and won't be relocated within the context of current main prog.
6446 * They might still be used from other main progs later.
6447 *
6448 * Visually this process can be shown as below. Suppose we have two main
6449 * programs mainA and mainB and BPF object contains three subprogs: subA,
6450 * subB, and subC. mainA calls only subA, mainB calls only subC, but subA and
6451 * subC both call subB:
6452 *
6453 * +--------+ +-------+
6454 * | v v |
6455 * +--+---+ +--+-+-+ +---+--+
6456 * | subA | | subB | | subC |
6457 * +--+---+ +------+ +---+--+
6458 * ^ ^
6459 * | |
6460 * +---+-------+ +------+----+
6461 * | mainA | | mainB |
6462 * +-----------+ +-----------+
6463 *
6464 * We'll start relocating mainA, will find subA, append it and start
6465 * processing sub A recursively:
6466 *
6467 * +-----------+------+
6468 * | mainA | subA |
6469 * +-----------+------+
6470 *
6471 * At this point we notice that subB is used from subA, so we append it and
6472 * relocate (there are no further subcalls from subB):
6473 *
6474 * +-----------+------+------+
6475 * | mainA | subA | subB |
6476 * +-----------+------+------+
6477 *
6478 * At this point, we relocate subA calls, then go one level up and finish with
6479 * relocatin mainA calls. mainA is done.
6480 *
6481 * For mainB process is similar but results in different order. We start with
6482 * mainB and skip subA and subB, as mainB never calls them (at least
6483 * directly), but we see subC is needed, so we append and start processing it:
6484 *
6485 * +-----------+------+
6486 * | mainB | subC |
6487 * +-----------+------+
6488 * Now we see subC needs subB, so we go back to it, append and relocate it:
6489 *
6490 * +-----------+------+------+
6491 * | mainB | subC | subB |
6492 * +-----------+------+------+
6493 *
6494 * At this point we unwind recursion, relocate calls in subC, then in mainB.
6495 */
6496static int
6497bpf_object__relocate_calls(struct bpf_object *obj, struct bpf_program *prog)
6498{
6499 struct bpf_program *subprog;
d3d93e34 6500 int i, err;
c3c55696 6501
c3c55696
AN
6502 /* mark all subprogs as not relocated (yet) within the context of
6503 * current main program
6504 */
6505 for (i = 0; i < obj->nr_programs; i++) {
6506 subprog = &obj->programs[i];
6507 if (!prog_is_subprog(obj, subprog))
6508 continue;
6509
6510 subprog->sub_insn_off = 0;
c3c55696
AN
6511 }
6512
6513 err = bpf_object__reloc_code(obj, prog, prog);
6514 if (err)
6515 return err;
6516
8a47a6c5
WN
6517 return 0;
6518}
6519
67234743
AS
6520static void
6521bpf_object__free_relocs(struct bpf_object *obj)
6522{
6523 struct bpf_program *prog;
6524 int i;
6525
6526 /* free up relocation descriptors */
6527 for (i = 0; i < obj->nr_programs; i++) {
6528 prog = &obj->programs[i];
6529 zfree(&prog->reloc_desc);
6530 prog->nr_reloc = 0;
6531 }
6532}
6533
d0e92887
AS
6534static int cmp_relocs(const void *_a, const void *_b)
6535{
6536 const struct reloc_desc *a = _a;
6537 const struct reloc_desc *b = _b;
6538
6539 if (a->insn_idx != b->insn_idx)
6540 return a->insn_idx < b->insn_idx ? -1 : 1;
6541
6542 /* no two relocations should have the same insn_idx, but ... */
6543 if (a->type != b->type)
6544 return a->type < b->type ? -1 : 1;
6545
6546 return 0;
6547}
6548
6549static void bpf_object__sort_relos(struct bpf_object *obj)
6550{
6551 int i;
6552
6553 for (i = 0; i < obj->nr_programs; i++) {
6554 struct bpf_program *p = &obj->programs[i];
6555
6556 if (!p->nr_reloc)
6557 continue;
6558
6559 qsort(p->reloc_desc, p->nr_reloc, sizeof(*p->reloc_desc), cmp_relocs);
6560 }
6561}
6562
fb03be7c
AN
6563static int bpf_prog_assign_exc_cb(struct bpf_object *obj, struct bpf_program *prog)
6564{
6565 const char *str = "exception_callback:";
6566 size_t pfx_len = strlen(str);
6567 int i, j, n;
6568
6569 if (!obj->btf || !kernel_supports(obj, FEAT_BTF_DECL_TAG))
6570 return 0;
6571
6572 n = btf__type_cnt(obj->btf);
6573 for (i = 1; i < n; i++) {
6574 const char *name;
6575 struct btf_type *t;
6576
6577 t = btf_type_by_id(obj->btf, i);
6578 if (!btf_is_decl_tag(t) || btf_decl_tag(t)->component_idx != -1)
6579 continue;
6580
6581 name = btf__str_by_offset(obj->btf, t->name_off);
6582 if (strncmp(name, str, pfx_len) != 0)
6583 continue;
6584
6585 t = btf_type_by_id(obj->btf, t->type);
6586 if (!btf_is_func(t) || btf_func_linkage(t) != BTF_FUNC_GLOBAL) {
6587 pr_warn("prog '%s': exception_callback:<value> decl tag not applied to the main program\n",
6588 prog->name);
6589 return -EINVAL;
6590 }
6591 if (strcmp(prog->name, btf__str_by_offset(obj->btf, t->name_off)) != 0)
6592 continue;
6593 /* Multiple callbacks are specified for the same prog,
6594 * the verifier will eventually return an error for this
6595 * case, hence simply skip appending a subprog.
6596 */
6597 if (prog->exception_cb_idx >= 0) {
6598 prog->exception_cb_idx = -1;
6599 break;
6600 }
6601
6602 name += pfx_len;
6603 if (str_is_empty(name)) {
6604 pr_warn("prog '%s': exception_callback:<value> decl tag contains empty value\n",
6605 prog->name);
6606 return -EINVAL;
6607 }
6608
6609 for (j = 0; j < obj->nr_programs; j++) {
6610 struct bpf_program *subprog = &obj->programs[j];
6611
6612 if (!prog_is_subprog(obj, subprog))
6613 continue;
6614 if (strcmp(name, subprog->name) != 0)
6615 continue;
6616 /* Enforce non-hidden, as from verifier point of
6617 * view it expects global functions, whereas the
6618 * mark_btf_static fixes up linkage as static.
6619 */
6620 if (!subprog->sym_global || subprog->mark_btf_static) {
6621 pr_warn("prog '%s': exception callback %s must be a global non-hidden function\n",
6622 prog->name, subprog->name);
6623 return -EINVAL;
6624 }
6625 /* Let's see if we already saw a static exception callback with the same name */
6626 if (prog->exception_cb_idx >= 0) {
6627 pr_warn("prog '%s': multiple subprogs with same name as exception callback '%s'\n",
6628 prog->name, subprog->name);
6629 return -EINVAL;
6630 }
6631 prog->exception_cb_idx = j;
6632 break;
6633 }
6634
6635 if (prog->exception_cb_idx >= 0)
6636 continue;
6637
6638 pr_warn("prog '%s': cannot find exception callback '%s'\n", prog->name, name);
6639 return -ENOENT;
6640 }
6641
6642 return 0;
6643}
6644
2f38fe68
AN
6645static struct {
6646 enum bpf_prog_type prog_type;
6647 const char *ctx_name;
6648} global_ctx_map[] = {
6649 { BPF_PROG_TYPE_CGROUP_DEVICE, "bpf_cgroup_dev_ctx" },
6650 { BPF_PROG_TYPE_CGROUP_SKB, "__sk_buff" },
6651 { BPF_PROG_TYPE_CGROUP_SOCK, "bpf_sock" },
6652 { BPF_PROG_TYPE_CGROUP_SOCK_ADDR, "bpf_sock_addr" },
6653 { BPF_PROG_TYPE_CGROUP_SOCKOPT, "bpf_sockopt" },
6654 { BPF_PROG_TYPE_CGROUP_SYSCTL, "bpf_sysctl" },
6655 { BPF_PROG_TYPE_FLOW_DISSECTOR, "__sk_buff" },
6656 { BPF_PROG_TYPE_KPROBE, "bpf_user_pt_regs_t" },
6657 { BPF_PROG_TYPE_LWT_IN, "__sk_buff" },
6658 { BPF_PROG_TYPE_LWT_OUT, "__sk_buff" },
6659 { BPF_PROG_TYPE_LWT_SEG6LOCAL, "__sk_buff" },
6660 { BPF_PROG_TYPE_LWT_XMIT, "__sk_buff" },
6661 { BPF_PROG_TYPE_NETFILTER, "bpf_nf_ctx" },
6662 { BPF_PROG_TYPE_PERF_EVENT, "bpf_perf_event_data" },
6663 { BPF_PROG_TYPE_RAW_TRACEPOINT, "bpf_raw_tracepoint_args" },
6664 { BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE, "bpf_raw_tracepoint_args" },
6665 { BPF_PROG_TYPE_SCHED_ACT, "__sk_buff" },
6666 { BPF_PROG_TYPE_SCHED_CLS, "__sk_buff" },
6667 { BPF_PROG_TYPE_SK_LOOKUP, "bpf_sk_lookup" },
6668 { BPF_PROG_TYPE_SK_MSG, "sk_msg_md" },
6669 { BPF_PROG_TYPE_SK_REUSEPORT, "sk_reuseport_md" },
6670 { BPF_PROG_TYPE_SK_SKB, "__sk_buff" },
6671 { BPF_PROG_TYPE_SOCK_OPS, "bpf_sock_ops" },
6672 { BPF_PROG_TYPE_SOCKET_FILTER, "__sk_buff" },
6673 { BPF_PROG_TYPE_XDP, "xdp_md" },
6674 /* all other program types don't have "named" context structs */
6675};
6676
9eea8faf
AN
6677/* forward declarations for arch-specific underlying types of bpf_user_pt_regs_t typedef,
6678 * for below __builtin_types_compatible_p() checks;
6679 * with this approach we don't need any extra arch-specific #ifdef guards
6680 */
6681struct pt_regs;
6682struct user_pt_regs;
6683struct user_regs_struct;
6684
76ec90a9
AN
6685static bool need_func_arg_type_fixup(const struct btf *btf, const struct bpf_program *prog,
6686 const char *subprog_name, int arg_idx,
6687 int arg_type_id, const char *ctx_name)
6688{
6689 const struct btf_type *t;
6690 const char *tname;
6691
6692 /* check if existing parameter already matches verifier expectations */
6693 t = skip_mods_and_typedefs(btf, arg_type_id, NULL);
6694 if (!btf_is_ptr(t))
6695 goto out_warn;
6696
6697 /* typedef bpf_user_pt_regs_t is a special PITA case, valid for kprobe
6698 * and perf_event programs, so check this case early on and forget
6699 * about it for subsequent checks
6700 */
6701 while (btf_is_mod(t))
6702 t = btf__type_by_id(btf, t->type);
6703 if (btf_is_typedef(t) &&
6704 (prog->type == BPF_PROG_TYPE_KPROBE || prog->type == BPF_PROG_TYPE_PERF_EVENT)) {
6705 tname = btf__str_by_offset(btf, t->name_off) ?: "<anon>";
6706 if (strcmp(tname, "bpf_user_pt_regs_t") == 0)
6707 return false; /* canonical type for kprobe/perf_event */
6708 }
6709
6710 /* now we can ignore typedefs moving forward */
6711 t = skip_mods_and_typedefs(btf, t->type, NULL);
6712
6713 /* if it's `void *`, definitely fix up BTF info */
6714 if (btf_is_void(t))
6715 return true;
6716
6717 /* if it's already proper canonical type, no need to fix up */
6718 tname = btf__str_by_offset(btf, t->name_off) ?: "<anon>";
6719 if (btf_is_struct(t) && strcmp(tname, ctx_name) == 0)
6720 return false;
6721
6722 /* special cases */
6723 switch (prog->type) {
6724 case BPF_PROG_TYPE_KPROBE:
76ec90a9
AN
6725 /* `struct pt_regs *` is expected, but we need to fix up */
6726 if (btf_is_struct(t) && strcmp(tname, "pt_regs") == 0)
6727 return true;
6728 break;
9eea8faf
AN
6729 case BPF_PROG_TYPE_PERF_EVENT:
6730 if (__builtin_types_compatible_p(bpf_user_pt_regs_t, struct pt_regs) &&
6731 btf_is_struct(t) && strcmp(tname, "pt_regs") == 0)
d7bc416a 6732 return true;
9eea8faf
AN
6733 if (__builtin_types_compatible_p(bpf_user_pt_regs_t, struct user_pt_regs) &&
6734 btf_is_struct(t) && strcmp(tname, "user_pt_regs") == 0)
d7bc416a 6735 return true;
9eea8faf
AN
6736 if (__builtin_types_compatible_p(bpf_user_pt_regs_t, struct user_regs_struct) &&
6737 btf_is_struct(t) && strcmp(tname, "user_regs_struct") == 0)
d7bc416a 6738 return true;
9eea8faf 6739 break;
76ec90a9
AN
6740 case BPF_PROG_TYPE_RAW_TRACEPOINT:
6741 case BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE:
6742 /* allow u64* as ctx */
6743 if (btf_is_int(t) && t->size == 8)
6744 return true;
6745 break;
6746 default:
6747 break;
6748 }
6749
6750out_warn:
6751 pr_warn("prog '%s': subprog '%s' arg#%d is expected to be of `struct %s *` type\n",
6752 prog->name, subprog_name, arg_idx, ctx_name);
6753 return false;
6754}
6755
2f38fe68
AN
6756static int clone_func_btf_info(struct btf *btf, int orig_fn_id, struct bpf_program *prog)
6757{
6758 int fn_id, fn_proto_id, ret_type_id, orig_proto_id;
6759 int i, err, arg_cnt, fn_name_off, linkage;
6760 struct btf_type *fn_t, *fn_proto_t, *t;
6761 struct btf_param *p;
6762
6763 /* caller already validated FUNC -> FUNC_PROTO validity */
6764 fn_t = btf_type_by_id(btf, orig_fn_id);
6765 fn_proto_t = btf_type_by_id(btf, fn_t->type);
6766
6767 /* Note that each btf__add_xxx() operation invalidates
6768 * all btf_type and string pointers, so we need to be
6769 * very careful when cloning BTF types. BTF type
6770 * pointers have to be always refetched. And to avoid
6771 * problems with invalidated string pointers, we
6772 * add empty strings initially, then just fix up
6773 * name_off offsets in place. Offsets are stable for
6774 * existing strings, so that works out.
6775 */
6776 fn_name_off = fn_t->name_off; /* we are about to invalidate fn_t */
6777 linkage = btf_func_linkage(fn_t);
6778 orig_proto_id = fn_t->type; /* original FUNC_PROTO ID */
6779 ret_type_id = fn_proto_t->type; /* fn_proto_t will be invalidated */
6780 arg_cnt = btf_vlen(fn_proto_t);
6781
6782 /* clone FUNC_PROTO and its params */
6783 fn_proto_id = btf__add_func_proto(btf, ret_type_id);
6784 if (fn_proto_id < 0)
6785 return -EINVAL;
6786
6787 for (i = 0; i < arg_cnt; i++) {
6788 int name_off;
6789
6790 /* copy original parameter data */
6791 t = btf_type_by_id(btf, orig_proto_id);
6792 p = &btf_params(t)[i];
6793 name_off = p->name_off;
6794
6795 err = btf__add_func_param(btf, "", p->type);
6796 if (err)
6797 return err;
6798
6799 fn_proto_t = btf_type_by_id(btf, fn_proto_id);
6800 p = &btf_params(fn_proto_t)[i];
6801 p->name_off = name_off; /* use remembered str offset */
6802 }
6803
6804 /* clone FUNC now, btf__add_func() enforces non-empty name, so use
6805 * entry program's name as a placeholder, which we replace immediately
6806 * with original name_off
6807 */
6808 fn_id = btf__add_func(btf, prog->name, linkage, fn_proto_id);
6809 if (fn_id < 0)
6810 return -EINVAL;
6811
6812 fn_t = btf_type_by_id(btf, fn_id);
6813 fn_t->name_off = fn_name_off; /* reuse original string */
6814
6815 return fn_id;
6816}
6817
6818/* Check if main program or global subprog's function prototype has `arg:ctx`
6819 * argument tags, and, if necessary, substitute correct type to match what BPF
6820 * verifier would expect, taking into account specific program type. This
6821 * allows to support __arg_ctx tag transparently on old kernels that don't yet
6822 * have a native support for it in the verifier, making user's life much
6823 * easier.
6824 */
6825static int bpf_program_fixup_func_info(struct bpf_object *obj, struct bpf_program *prog)
6826{
76ec90a9 6827 const char *ctx_name = NULL, *ctx_tag = "arg:ctx", *fn_name;
2f38fe68
AN
6828 struct bpf_func_info_min *func_rec;
6829 struct btf_type *fn_t, *fn_proto_t;
6830 struct btf *btf = obj->btf;
6831 const struct btf_type *t;
6832 struct btf_param *p;
6833 int ptr_id = 0, struct_id, tag_id, orig_fn_id;
6834 int i, n, arg_idx, arg_cnt, err, rec_idx;
6835 int *orig_ids;
6836
6837 /* no .BTF.ext, no problem */
6838 if (!obj->btf_ext || !prog->func_info)
6839 return 0;
6840
01b55f4f 6841 /* don't do any fix ups if kernel natively supports __arg_ctx */
0e6d0a9d 6842 if (kernel_supports(obj, FEAT_ARG_CTX_TAG))
01b55f4f
AN
6843 return 0;
6844
2f38fe68
AN
6845 /* some BPF program types just don't have named context structs, so
6846 * this fallback mechanism doesn't work for them
6847 */
6848 for (i = 0; i < ARRAY_SIZE(global_ctx_map); i++) {
6849 if (global_ctx_map[i].prog_type != prog->type)
6850 continue;
6851 ctx_name = global_ctx_map[i].ctx_name;
6852 break;
6853 }
6854 if (!ctx_name)
6855 return 0;
6856
6857 /* remember original func BTF IDs to detect if we already cloned them */
6858 orig_ids = calloc(prog->func_info_cnt, sizeof(*orig_ids));
6859 if (!orig_ids)
6860 return -ENOMEM;
6861 for (i = 0; i < prog->func_info_cnt; i++) {
6862 func_rec = prog->func_info + prog->func_info_rec_size * i;
6863 orig_ids[i] = func_rec->type_id;
6864 }
6865
6866 /* go through each DECL_TAG with "arg:ctx" and see if it points to one
6867 * of our subprogs; if yes and subprog is global and needs adjustment,
6868 * clone and adjust FUNC -> FUNC_PROTO combo
6869 */
6870 for (i = 1, n = btf__type_cnt(btf); i < n; i++) {
6871 /* only DECL_TAG with "arg:ctx" value are interesting */
6872 t = btf__type_by_id(btf, i);
6873 if (!btf_is_decl_tag(t))
6874 continue;
6875 if (strcmp(btf__str_by_offset(btf, t->name_off), ctx_tag) != 0)
6876 continue;
6877
6878 /* only global funcs need adjustment, if at all */
6879 orig_fn_id = t->type;
6880 fn_t = btf_type_by_id(btf, orig_fn_id);
6881 if (!btf_is_func(fn_t) || btf_func_linkage(fn_t) != BTF_FUNC_GLOBAL)
6882 continue;
6883
6884 /* sanity check FUNC -> FUNC_PROTO chain, just in case */
6885 fn_proto_t = btf_type_by_id(btf, fn_t->type);
6886 if (!fn_proto_t || !btf_is_func_proto(fn_proto_t))
6887 continue;
6888
6889 /* find corresponding func_info record */
6890 func_rec = NULL;
6891 for (rec_idx = 0; rec_idx < prog->func_info_cnt; rec_idx++) {
6892 if (orig_ids[rec_idx] == t->type) {
6893 func_rec = prog->func_info + prog->func_info_rec_size * rec_idx;
6894 break;
6895 }
6896 }
6897 /* current main program doesn't call into this subprog */
6898 if (!func_rec)
6899 continue;
6900
6901 /* some more sanity checking of DECL_TAG */
6902 arg_cnt = btf_vlen(fn_proto_t);
6903 arg_idx = btf_decl_tag(t)->component_idx;
6904 if (arg_idx < 0 || arg_idx >= arg_cnt)
6905 continue;
6906
76ec90a9 6907 /* check if we should fix up argument type */
2f38fe68 6908 p = &btf_params(fn_proto_t)[arg_idx];
76ec90a9
AN
6909 fn_name = btf__str_by_offset(btf, fn_t->name_off) ?: "<anon>";
6910 if (!need_func_arg_type_fixup(btf, prog, fn_name, arg_idx, p->type, ctx_name))
6911 continue;
2f38fe68
AN
6912
6913 /* clone fn/fn_proto, unless we already did it for another arg */
6914 if (func_rec->type_id == orig_fn_id) {
6915 int fn_id;
6916
6917 fn_id = clone_func_btf_info(btf, orig_fn_id, prog);
6918 if (fn_id < 0) {
6919 err = fn_id;
6920 goto err_out;
6921 }
6922
6923 /* point func_info record to a cloned FUNC type */
6924 func_rec->type_id = fn_id;
6925 }
6926
6927 /* create PTR -> STRUCT type chain to mark PTR_TO_CTX argument;
6928 * we do it just once per main BPF program, as all global
6929 * funcs share the same program type, so need only PTR ->
6930 * STRUCT type chain
6931 */
6932 if (ptr_id == 0) {
6933 struct_id = btf__add_struct(btf, ctx_name, 0);
6934 ptr_id = btf__add_ptr(btf, struct_id);
6935 if (ptr_id < 0 || struct_id < 0) {
6936 err = -EINVAL;
6937 goto err_out;
6938 }
6939 }
6940
6941 /* for completeness, clone DECL_TAG and point it to cloned param */
6942 tag_id = btf__add_decl_tag(btf, ctx_tag, func_rec->type_id, arg_idx);
6943 if (tag_id < 0) {
6944 err = -EINVAL;
6945 goto err_out;
6946 }
6947
6948 /* all the BTF manipulations invalidated pointers, refetch them */
6949 fn_t = btf_type_by_id(btf, func_rec->type_id);
6950 fn_proto_t = btf_type_by_id(btf, fn_t->type);
6951
6952 /* fix up type ID pointed to by param */
6953 p = &btf_params(fn_proto_t)[arg_idx];
6954 p->type = ptr_id;
6955 }
6956
6957 free(orig_ids);
6958 return 0;
6959err_out:
6960 free(orig_ids);
6961 return err;
6962}
6963
6964static int bpf_object__relocate(struct bpf_object *obj, const char *targ_btf_path)
8a47a6c5
WN
6965{
6966 struct bpf_program *prog;
b1268826 6967 size_t i, j;
8a47a6c5
WN
6968 int err;
6969
ddc7c304
AN
6970 if (obj->btf_ext) {
6971 err = bpf_object__relocate_core(obj, targ_btf_path);
6972 if (err) {
be18010e
KW
6973 pr_warn("failed to perform CO-RE relocations: %d\n",
6974 err);
ddc7c304
AN
6975 return err;
6976 }
185cfe83 6977 bpf_object__sort_relos(obj);
ddc7c304 6978 }
b1268826
AS
6979
6980 /* Before relocating calls pre-process relocations and mark
6981 * few ld_imm64 instructions that points to subprogs.
6982 * Otherwise bpf_object__reloc_code() later would have to consider
6983 * all ld_imm64 insns as relocation candidates. That would
6984 * reduce relocation speed, since amount of find_prog_insn_relo()
6985 * would increase and most of them will fail to find a relo.
9173cac3
AN
6986 */
6987 for (i = 0; i < obj->nr_programs; i++) {
6988 prog = &obj->programs[i];
b1268826
AS
6989 for (j = 0; j < prog->nr_reloc; j++) {
6990 struct reloc_desc *relo = &prog->reloc_desc[j];
6991 struct bpf_insn *insn = &prog->insns[relo->insn_idx];
6992
6993 /* mark the insn, so it's recognized by insn_is_pseudo_func() */
6994 if (relo->type == RELO_SUBPROG_ADDR)
6995 insn[0].src_reg = BPF_PSEUDO_FUNC;
9173cac3 6996 }
9173cac3 6997 }
b1268826
AS
6998
6999 /* relocate subprogram calls and append used subprograms to main
c3c55696
AN
7000 * programs; each copy of subprogram code needs to be relocated
7001 * differently for each main program, because its code location might
b1268826
AS
7002 * have changed.
7003 * Append subprog relos to main programs to allow data relos to be
7004 * processed after text is completely relocated.
9173cac3 7005 */
8a47a6c5
WN
7006 for (i = 0; i < obj->nr_programs; i++) {
7007 prog = &obj->programs[i];
c3c55696
AN
7008 /* sub-program's sub-calls are relocated within the context of
7009 * its main program only
7010 */
7011 if (prog_is_subprog(obj, prog))
9173cac3 7012 continue;
a3820c48 7013 if (!prog->autoload)
16e0c35c 7014 continue;
8a47a6c5 7015
c3c55696 7016 err = bpf_object__relocate_calls(obj, prog);
8a47a6c5 7017 if (err) {
9c0f8cbd
AN
7018 pr_warn("prog '%s': failed to relocate calls: %d\n",
7019 prog->name, err);
8a47a6c5
WN
7020 return err;
7021 }
7e2925f6 7022
fb03be7c
AN
7023 err = bpf_prog_assign_exc_cb(obj, prog);
7024 if (err)
7025 return err;
7e2925f6
KKD
7026 /* Now, also append exception callback if it has not been done already. */
7027 if (prog->exception_cb_idx >= 0) {
7028 struct bpf_program *subprog = &obj->programs[prog->exception_cb_idx];
7029
7030 /* Calling exception callback directly is disallowed, which the
7031 * verifier will reject later. In case it was processed already,
7032 * we can skip this step, otherwise for all other valid cases we
7033 * have to append exception callback now.
7034 */
7035 if (subprog->sub_insn_off == 0) {
7036 err = bpf_object__append_subprog_code(obj, prog, subprog);
7037 if (err)
7038 return err;
7039 err = bpf_object__reloc_code(obj, prog, subprog);
7040 if (err)
7041 return err;
7042 }
7043 }
8a47a6c5 7044 }
b1268826
AS
7045 for (i = 0; i < obj->nr_programs; i++) {
7046 prog = &obj->programs[i];
7047 if (prog_is_subprog(obj, prog))
7048 continue;
a3820c48 7049 if (!prog->autoload)
16e0c35c 7050 continue;
2f38fe68
AN
7051
7052 /* Process data relos for main programs */
b1268826
AS
7053 err = bpf_object__relocate_data(obj, prog);
7054 if (err) {
7055 pr_warn("prog '%s': failed to relocate data references: %d\n",
7056 prog->name, err);
7057 return err;
7058 }
2f38fe68
AN
7059
7060 /* Fix up .BTF.ext information, if necessary */
7061 err = bpf_program_fixup_func_info(obj, prog);
7062 if (err) {
7063 pr_warn("prog '%s': failed to perform .BTF.ext fix ups: %d\n",
7064 prog->name, err);
7065 return err;
7066 }
b1268826 7067 }
185cfe83 7068
8a47a6c5
WN
7069 return 0;
7070}
7071
646f02ff 7072static int bpf_object__collect_st_ops_relos(struct bpf_object *obj,
ad23b723 7073 Elf64_Shdr *shdr, Elf_Data *data);
646f02ff
AN
7074
7075static int bpf_object__collect_map_relos(struct bpf_object *obj,
ad23b723 7076 Elf64_Shdr *shdr, Elf_Data *data)
646f02ff 7077{
15728ad3
AN
7078 const int bpf_ptr_sz = 8, host_ptr_sz = sizeof(void *);
7079 int i, j, nrels, new_sz;
063e6881 7080 const struct btf_var_secinfo *vi = NULL;
646f02ff 7081 const struct btf_type *sec, *var, *def;
341ac5ff
HC
7082 struct bpf_map *map = NULL, *targ_map = NULL;
7083 struct bpf_program *targ_prog = NULL;
7084 bool is_prog_array, is_map_in_map;
646f02ff 7085 const struct btf_member *member;
341ac5ff 7086 const char *name, *mname, *type;
646f02ff 7087 unsigned int moff;
ad23b723
AN
7088 Elf64_Sym *sym;
7089 Elf64_Rel *rel;
646f02ff
AN
7090 void *tmp;
7091
7092 if (!obj->efile.btf_maps_sec_btf_id || !obj->btf)
7093 return -EINVAL;
7094 sec = btf__type_by_id(obj->btf, obj->efile.btf_maps_sec_btf_id);
7095 if (!sec)
7096 return -EINVAL;
7097
646f02ff
AN
7098 nrels = shdr->sh_size / shdr->sh_entsize;
7099 for (i = 0; i < nrels; i++) {
ad23b723
AN
7100 rel = elf_rel_by_idx(data, i);
7101 if (!rel) {
646f02ff
AN
7102 pr_warn(".maps relo #%d: failed to get ELF relo\n", i);
7103 return -LIBBPF_ERRNO__FORMAT;
7104 }
ad23b723
AN
7105
7106 sym = elf_sym_by_idx(obj, ELF64_R_SYM(rel->r_info));
7107 if (!sym) {
646f02ff 7108 pr_warn(".maps relo #%d: symbol %zx not found\n",
ad23b723 7109 i, (size_t)ELF64_R_SYM(rel->r_info));
646f02ff
AN
7110 return -LIBBPF_ERRNO__FORMAT;
7111 }
ad23b723 7112 name = elf_sym_str(obj, sym->st_name) ?: "<?>";
646f02ff 7113
ad23b723
AN
7114 pr_debug(".maps relo #%d: for %zd value %zd rel->r_offset %zu name %d ('%s')\n",
7115 i, (ssize_t)(rel->r_info >> 32), (size_t)sym->st_value,
7116 (size_t)rel->r_offset, sym->st_name, name);
646f02ff
AN
7117
7118 for (j = 0; j < obj->nr_maps; j++) {
7119 map = &obj->maps[j];
7120 if (map->sec_idx != obj->efile.btf_maps_shndx)
7121 continue;
7122
7123 vi = btf_var_secinfos(sec) + map->btf_var_idx;
ad23b723
AN
7124 if (vi->offset <= rel->r_offset &&
7125 rel->r_offset + bpf_ptr_sz <= vi->offset + vi->size)
646f02ff
AN
7126 break;
7127 }
7128 if (j == obj->nr_maps) {
ad23b723
AN
7129 pr_warn(".maps relo #%d: cannot find map '%s' at rel->r_offset %zu\n",
7130 i, name, (size_t)rel->r_offset);
646f02ff
AN
7131 return -EINVAL;
7132 }
7133
341ac5ff
HC
7134 is_map_in_map = bpf_map_type__is_map_in_map(map->def.type);
7135 is_prog_array = map->def.type == BPF_MAP_TYPE_PROG_ARRAY;
7136 type = is_map_in_map ? "map" : "prog";
7137 if (is_map_in_map) {
7138 if (sym->st_shndx != obj->efile.btf_maps_shndx) {
7139 pr_warn(".maps relo #%d: '%s' isn't a BTF-defined map\n",
7140 i, name);
7141 return -LIBBPF_ERRNO__RELOC;
7142 }
7143 if (map->def.type == BPF_MAP_TYPE_HASH_OF_MAPS &&
7144 map->def.key_size != sizeof(int)) {
7145 pr_warn(".maps relo #%d: hash-of-maps '%s' should have key size %zu.\n",
7146 i, map->name, sizeof(int));
7147 return -EINVAL;
7148 }
7149 targ_map = bpf_object__find_map_by_name(obj, name);
7150 if (!targ_map) {
7151 pr_warn(".maps relo #%d: '%s' isn't a valid map reference\n",
7152 i, name);
7153 return -ESRCH;
7154 }
7155 } else if (is_prog_array) {
7156 targ_prog = bpf_object__find_program_by_name(obj, name);
7157 if (!targ_prog) {
7158 pr_warn(".maps relo #%d: '%s' isn't a valid program reference\n",
7159 i, name);
7160 return -ESRCH;
7161 }
7162 if (targ_prog->sec_idx != sym->st_shndx ||
7163 targ_prog->sec_insn_off * 8 != sym->st_value ||
7164 prog_is_subprog(obj, targ_prog)) {
7165 pr_warn(".maps relo #%d: '%s' isn't an entry-point program\n",
7166 i, name);
7167 return -LIBBPF_ERRNO__RELOC;
7168 }
7169 } else {
646f02ff
AN
7170 return -EINVAL;
7171 }
7172
646f02ff
AN
7173 var = btf__type_by_id(obj->btf, vi->type);
7174 def = skip_mods_and_typedefs(obj->btf, var->type, NULL);
7175 if (btf_vlen(def) == 0)
7176 return -EINVAL;
7177 member = btf_members(def) + btf_vlen(def) - 1;
7178 mname = btf__name_by_offset(obj->btf, member->name_off);
7179 if (strcmp(mname, "values"))
7180 return -EINVAL;
7181
7182 moff = btf_member_bit_offset(def, btf_vlen(def) - 1) / 8;
ad23b723 7183 if (rel->r_offset - vi->offset < moff)
646f02ff
AN
7184 return -EINVAL;
7185
ad23b723 7186 moff = rel->r_offset - vi->offset - moff;
15728ad3
AN
7187 /* here we use BPF pointer size, which is always 64 bit, as we
7188 * are parsing ELF that was built for BPF target
7189 */
7190 if (moff % bpf_ptr_sz)
646f02ff 7191 return -EINVAL;
15728ad3 7192 moff /= bpf_ptr_sz;
646f02ff
AN
7193 if (moff >= map->init_slots_sz) {
7194 new_sz = moff + 1;
029258d7 7195 tmp = libbpf_reallocarray(map->init_slots, new_sz, host_ptr_sz);
646f02ff
AN
7196 if (!tmp)
7197 return -ENOMEM;
7198 map->init_slots = tmp;
7199 memset(map->init_slots + map->init_slots_sz, 0,
15728ad3 7200 (new_sz - map->init_slots_sz) * host_ptr_sz);
646f02ff
AN
7201 map->init_slots_sz = new_sz;
7202 }
341ac5ff 7203 map->init_slots[moff] = is_map_in_map ? (void *)targ_map : (void *)targ_prog;
646f02ff 7204
341ac5ff
HC
7205 pr_debug(".maps relo #%d: map '%s' slot [%d] points to %s '%s'\n",
7206 i, map->name, moff, type, name);
646f02ff
AN
7207 }
7208
7209 return 0;
7210}
590a0088 7211
c3c55696
AN
7212static int bpf_object__collect_relos(struct bpf_object *obj)
7213{
7214 int i, err;
34090915 7215
25bbbd7a
AN
7216 for (i = 0; i < obj->efile.sec_cnt; i++) {
7217 struct elf_sec_desc *sec_desc = &obj->efile.secs[i];
7218 Elf64_Shdr *shdr;
7219 Elf_Data *data;
7220 int idx;
7221
7222 if (sec_desc->sec_type != SEC_RELO)
7223 continue;
7224
7225 shdr = sec_desc->shdr;
7226 data = sec_desc->data;
7227 idx = shdr->sh_info;
34090915 7228
240bf8a5 7229 if (shdr->sh_type != SHT_REL || idx < 0 || idx >= obj->efile.sec_cnt) {
be18010e 7230 pr_warn("internal error at %d\n", __LINE__);
6371ca3b 7231 return -LIBBPF_ERRNO__INTERNAL;
34090915
WN
7232 }
7233
240bf8a5 7234 if (obj->efile.secs[idx].sec_type == SEC_ST_OPS)
646f02ff 7235 err = bpf_object__collect_st_ops_relos(obj, shdr, data);
c3c55696 7236 else if (idx == obj->efile.btf_maps_shndx)
646f02ff 7237 err = bpf_object__collect_map_relos(obj, shdr, data);
c3c55696
AN
7238 else
7239 err = bpf_object__collect_prog_relos(obj, shdr, data);
34090915 7240 if (err)
6371ca3b 7241 return err;
34090915 7242 }
c3c55696 7243
d0e92887 7244 bpf_object__sort_relos(obj);
34090915
WN
7245 return 0;
7246}
7247
109cea5a
AN
7248static bool insn_is_helper_call(struct bpf_insn *insn, enum bpf_func_id *func_id)
7249{
9b2f6fec 7250 if (BPF_CLASS(insn->code) == BPF_JMP &&
109cea5a
AN
7251 BPF_OP(insn->code) == BPF_CALL &&
7252 BPF_SRC(insn->code) == BPF_K &&
9b2f6fec
AN
7253 insn->src_reg == 0 &&
7254 insn->dst_reg == 0) {
7255 *func_id = insn->imm;
109cea5a
AN
7256 return true;
7257 }
7258 return false;
7259}
7260
42869d28 7261static int bpf_object__sanitize_prog(struct bpf_object *obj, struct bpf_program *prog)
109cea5a
AN
7262{
7263 struct bpf_insn *insn = prog->insns;
7264 enum bpf_func_id func_id;
7265 int i;
7266
67234743
AS
7267 if (obj->gen_loader)
7268 return 0;
7269
109cea5a
AN
7270 for (i = 0; i < prog->insns_cnt; i++, insn++) {
7271 if (!insn_is_helper_call(insn, &func_id))
7272 continue;
7273
7274 /* on kernels that don't yet support
7275 * bpf_probe_read_{kernel,user}[_str] helpers, fall back
7276 * to bpf_probe_read() which works well for old kernels
7277 */
7278 switch (func_id) {
7279 case BPF_FUNC_probe_read_kernel:
7280 case BPF_FUNC_probe_read_user:
9ca1f56a 7281 if (!kernel_supports(obj, FEAT_PROBE_READ_KERN))
109cea5a
AN
7282 insn->imm = BPF_FUNC_probe_read;
7283 break;
7284 case BPF_FUNC_probe_read_kernel_str:
7285 case BPF_FUNC_probe_read_user_str:
9ca1f56a 7286 if (!kernel_supports(obj, FEAT_PROBE_READ_KERN))
109cea5a
AN
7287 insn->imm = BPF_FUNC_probe_read_str;
7288 break;
7289 default:
7290 break;
7291 }
7292 }
7293 return 0;
7294}
7295
15ea31fa
AN
7296static int libbpf_find_attach_btf_id(struct bpf_program *prog, const char *attach_name,
7297 int *btf_obj_fd, int *btf_type_id);
12d9466d 7298
4fa5bcfe
AN
7299/* this is called as prog->sec_def->prog_prepare_load_fn for libbpf-supported sec_defs */
7300static int libbpf_prepare_prog_load(struct bpf_program *prog,
7301 struct bpf_prog_load_opts *opts, long cookie)
12d9466d 7302{
15ea31fa
AN
7303 enum sec_def_flags def = cookie;
7304
12d9466d 7305 /* old kernels might not support specifying expected_attach_type */
15ea31fa 7306 if ((def & SEC_EXP_ATTACH_OPT) && !kernel_supports(prog->obj, FEAT_EXP_ATTACH_TYPE))
d10ef2b8 7307 opts->expected_attach_type = 0;
12d9466d 7308
15ea31fa 7309 if (def & SEC_SLEEPABLE)
d10ef2b8 7310 opts->prog_flags |= BPF_F_SLEEPABLE;
12d9466d 7311
082c4bfb
LB
7312 if (prog->type == BPF_PROG_TYPE_XDP && (def & SEC_XDP_FRAGS))
7313 opts->prog_flags |= BPF_F_XDP_HAS_FRAGS;
7314
5902da6d
JO
7315 /* special check for usdt to use uprobe_multi link */
7316 if ((def & SEC_USDT) && kernel_supports(prog->obj, FEAT_UPROBE_MULTI_LINK))
7317 prog->expected_attach_type = BPF_TRACE_UPROBE_MULTI;
7318
cc7d8f2c 7319 if ((def & SEC_ATTACH_BTF) && !prog->attach_btf_id) {
12d9466d 7320 int btf_obj_fd = 0, btf_type_id = 0, err;
15ea31fa 7321 const char *attach_name;
12d9466d 7322
cc7d8f2c
AN
7323 attach_name = strchr(prog->sec_name, '/');
7324 if (!attach_name) {
7325 /* if BPF program is annotated with just SEC("fentry")
7326 * (or similar) without declaratively specifying
7327 * target, then it is expected that target will be
7328 * specified with bpf_program__set_attach_target() at
7329 * runtime before BPF object load step. If not, then
7330 * there is nothing to load into the kernel as BPF
7331 * verifier won't be able to validate BPF program
7332 * correctness anyways.
7333 */
7334 pr_warn("prog '%s': no BTF-based attach target is specified, use bpf_program__set_attach_target()\n",
7335 prog->name);
7336 return -EINVAL;
7337 }
7338 attach_name++; /* skip over / */
7339
15ea31fa 7340 err = libbpf_find_attach_btf_id(prog, attach_name, &btf_obj_fd, &btf_type_id);
12d9466d
AN
7341 if (err)
7342 return err;
7343
7344 /* cache resolved BTF FD and BTF type ID in the prog */
7345 prog->attach_btf_obj_fd = btf_obj_fd;
7346 prog->attach_btf_id = btf_type_id;
7347
7348 /* but by now libbpf common logic is not utilizing
7349 * prog->atach_btf_obj_fd/prog->attach_btf_id anymore because
d10ef2b8
AN
7350 * this callback is called after opts were populated by
7351 * libbpf, so this callback has to update opts explicitly here
12d9466d 7352 */
d10ef2b8
AN
7353 opts->attach_btf_obj_fd = btf_obj_fd;
7354 opts->attach_btf_id = btf_type_id;
12d9466d
AN
7355 }
7356 return 0;
7357}
7358
9fdc4273
AN
7359static void fixup_verifier_log(struct bpf_program *prog, char *buf, size_t buf_sz);
7360
cf90a20d
AN
7361static int bpf_object_load_prog(struct bpf_object *obj, struct bpf_program *prog,
7362 struct bpf_insn *insns, int insns_cnt,
7363 const char *license, __u32 kern_version, int *prog_fd)
55cffde2 7364{
d10ef2b8
AN
7365 LIBBPF_OPTS(bpf_prog_load_opts, load_attr);
7366 const char *prog_name = NULL;
1ce6a9fc 7367 char *cp, errmsg[STRERR_BUFSIZE];
8395f320 7368 size_t log_buf_size = 0;
b3ce9079 7369 char *log_buf = NULL, *tmp;
b3ce9079
AN
7370 bool own_log_buf = true;
7371 __u32 log_level = prog->log_level;
9bf48fa1 7372 int ret, err;
55cffde2 7373
80b2b5c3
AM
7374 if (prog->type == BPF_PROG_TYPE_UNSPEC) {
7375 /*
7376 * The program type must be set. Most likely we couldn't find a proper
7377 * section definition at load time, and thus we didn't infer the type.
7378 */
7379 pr_warn("prog '%s': missing BPF prog type, check ELF section name '%s'\n",
7380 prog->name, prog->sec_name);
7381 return -EINVAL;
7382 }
7383
fba01a06
AN
7384 if (!insns || !insns_cnt)
7385 return -EINVAL;
7386
25bbbd7a 7387 if (kernel_supports(obj, FEAT_PROG_NAME))
d10ef2b8 7388 prog_name = prog->name;
12d9466d
AN
7389 load_attr.attach_prog_fd = prog->attach_prog_fd;
7390 load_attr.attach_btf_obj_fd = prog->attach_btf_obj_fd;
6aef10a4
AN
7391 load_attr.attach_btf_id = prog->attach_btf_id;
7392 load_attr.kern_version = kern_version;
7393 load_attr.prog_ifindex = prog->prog_ifindex;
7394
0f0e55d8 7395 /* specify func_info/line_info only if kernel supports them */
9bf48fa1
QM
7396 if (obj->btf && btf__fd(obj->btf) >= 0 && kernel_supports(obj, FEAT_BTF_FUNC)) {
7397 load_attr.prog_btf_fd = btf__fd(obj->btf);
0f0e55d8
AN
7398 load_attr.func_info = prog->func_info;
7399 load_attr.func_info_rec_size = prog->func_info_rec_size;
7400 load_attr.func_info_cnt = prog->func_info_cnt;
7401 load_attr.line_info = prog->line_info;
7402 load_attr.line_info_rec_size = prog->line_info_rec_size;
7403 load_attr.line_info_cnt = prog->line_info_cnt;
7404 }
b3ce9079 7405 load_attr.log_level = log_level;
04656198 7406 load_attr.prog_flags = prog->prog_flags;
25bbbd7a 7407 load_attr.fd_array = obj->fd_array;
55cffde2 7408
6b434b61
AN
7409 load_attr.token_fd = obj->token_fd;
7410 if (obj->token_fd)
7411 load_attr.prog_flags |= BPF_F_TOKEN_FD;
7412
12d9466d 7413 /* adjust load_attr if sec_def provides custom preload callback */
4fa5bcfe
AN
7414 if (prog->sec_def && prog->sec_def->prog_prepare_load_fn) {
7415 err = prog->sec_def->prog_prepare_load_fn(prog, &load_attr, prog->sec_def->cookie);
12d9466d
AN
7416 if (err < 0) {
7417 pr_warn("prog '%s': failed to prepare load attributes: %d\n",
7418 prog->name, err);
7419 return err;
7420 }
b63b3c49
JO
7421 insns = prog->insns;
7422 insns_cnt = prog->insns_cnt;
12d9466d
AN
7423 }
7424
5902da6d
JO
7425 /* allow prog_prepare_load_fn to change expected_attach_type */
7426 load_attr.expected_attach_type = prog->expected_attach_type;
7427
25bbbd7a 7428 if (obj->gen_loader) {
d10ef2b8
AN
7429 bpf_gen__prog_load(obj->gen_loader, prog->type, prog->name,
7430 license, insns, insns_cnt, &load_attr,
25bbbd7a 7431 prog - obj->programs);
be2f2d16 7432 *prog_fd = -1;
67234743
AS
7433 return 0;
7434 }
8395f320 7435
b3ce9079 7436retry_load:
bb412cf1 7437 /* if log_level is zero, we don't request logs initially even if
b3ce9079
AN
7438 * custom log_buf is specified; if the program load fails, then we'll
7439 * bump log_level to 1 and use either custom log_buf or we'll allocate
7440 * our own and retry the load to get details on what failed
7441 */
7442 if (log_level) {
7443 if (prog->log_buf) {
7444 log_buf = prog->log_buf;
7445 log_buf_size = prog->log_size;
7446 own_log_buf = false;
7447 } else if (obj->log_buf) {
7448 log_buf = obj->log_buf;
7449 log_buf_size = obj->log_size;
7450 own_log_buf = false;
7451 } else {
7452 log_buf_size = max((size_t)BPF_LOG_BUF_SIZE, log_buf_size * 2);
7453 tmp = realloc(log_buf, log_buf_size);
7454 if (!tmp) {
7455 ret = -ENOMEM;
7456 goto out;
7457 }
7458 log_buf = tmp;
7459 log_buf[0] = '\0';
7460 own_log_buf = true;
7461 }
8395f320 7462 }
55cffde2 7463
6aef10a4 7464 load_attr.log_buf = log_buf;
d10ef2b8 7465 load_attr.log_size = log_buf_size;
b3ce9079 7466 load_attr.log_level = log_level;
55cffde2 7467
b3ce9079 7468 ret = bpf_prog_load(prog->type, prog_name, license, insns, insns_cnt, &load_attr);
55cffde2 7469 if (ret >= 0) {
b3ce9079 7470 if (log_level && own_log_buf) {
ad9a7f96
AN
7471 pr_debug("prog '%s': -- BEGIN PROG LOAD LOG --\n%s-- END PROG LOAD LOG --\n",
7472 prog->name, log_buf);
7473 }
5d23328d 7474
25bbbd7a
AN
7475 if (obj->has_rodata && kernel_supports(obj, FEAT_PROG_BIND_MAP)) {
7476 struct bpf_map *map;
7477 int i;
7478
7479 for (i = 0; i < obj->nr_maps; i++) {
7480 map = &prog->obj->maps[i];
7481 if (map->libbpf_type != LIBBPF_MAP_RODATA)
7482 continue;
5d23328d 7483
f08c18e0 7484 if (bpf_prog_bind_map(ret, map->fd, NULL)) {
25bbbd7a 7485 cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
ad9a7f96
AN
7486 pr_warn("prog '%s': failed to bind map '%s': %s\n",
7487 prog->name, map->real_name, cp);
25bbbd7a
AN
7488 /* Don't fail hard if can't bind rodata. */
7489 }
5d23328d
YZ
7490 }
7491 }
7492
be2f2d16 7493 *prog_fd = ret;
55cffde2
WN
7494 ret = 0;
7495 goto out;
7496 }
7497
b3ce9079
AN
7498 if (log_level == 0) {
7499 log_level = 1;
da11b417
AS
7500 goto retry_load;
7501 }
b3ce9079
AN
7502 /* On ENOSPC, increase log buffer size and retry, unless custom
7503 * log_buf is specified.
7504 * Be careful to not overflow u32, though. Kernel's log buf size limit
7505 * isn't part of UAPI so it can always be bumped to full 4GB. So don't
7506 * multiply by 2 unless we are sure we'll fit within 32 bits.
7507 * Currently, we'll get -EINVAL when we reach (UINT_MAX >> 2).
7508 */
7509 if (own_log_buf && errno == ENOSPC && log_buf_size <= UINT_MAX / 2)
7510 goto retry_load;
2eda2145
AN
7511
7512 ret = -errno;
9fdc4273
AN
7513
7514 /* post-process verifier log to improve error descriptions */
7515 fixup_verifier_log(prog, log_buf, log_buf_size);
7516
24d6a808 7517 cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
ad9a7f96 7518 pr_warn("prog '%s': BPF program load failed: %s\n", prog->name, cp);
dc3a2d25 7519 pr_perm_msg(ret);
55cffde2 7520
b3ce9079 7521 if (own_log_buf && log_buf && log_buf[0] != '\0') {
ad9a7f96
AN
7522 pr_warn("prog '%s': -- BEGIN PROG LOAD LOG --\n%s-- END PROG LOAD LOG --\n",
7523 prog->name, log_buf);
7524 }
55cffde2
WN
7525
7526out:
b3ce9079
AN
7527 if (own_log_buf)
7528 free(log_buf);
55cffde2
WN
7529 return ret;
7530}
7531
9fdc4273
AN
7532static char *find_prev_line(char *buf, char *cur)
7533{
7534 char *p;
7535
7536 if (cur == buf) /* end of a log buf */
7537 return NULL;
7538
7539 p = cur - 1;
7540 while (p - 1 >= buf && *(p - 1) != '\n')
7541 p--;
7542
7543 return p;
7544}
7545
7546static void patch_log(char *buf, size_t buf_sz, size_t log_sz,
7547 char *orig, size_t orig_sz, const char *patch)
7548{
7549 /* size of the remaining log content to the right from the to-be-replaced part */
7550 size_t rem_sz = (buf + log_sz) - (orig + orig_sz);
7551 size_t patch_sz = strlen(patch);
7552
7553 if (patch_sz != orig_sz) {
7554 /* If patch line(s) are longer than original piece of verifier log,
7555 * shift log contents by (patch_sz - orig_sz) bytes to the right
7556 * starting from after to-be-replaced part of the log.
7557 *
7558 * If patch line(s) are shorter than original piece of verifier log,
7559 * shift log contents by (orig_sz - patch_sz) bytes to the left
7560 * starting from after to-be-replaced part of the log
7561 *
7562 * We need to be careful about not overflowing available
7563 * buf_sz capacity. If that's the case, we'll truncate the end
7564 * of the original log, as necessary.
7565 */
7566 if (patch_sz > orig_sz) {
7567 if (orig + patch_sz >= buf + buf_sz) {
7568 /* patch is big enough to cover remaining space completely */
7569 patch_sz -= (orig + patch_sz) - (buf + buf_sz) + 1;
7570 rem_sz = 0;
7571 } else if (patch_sz - orig_sz > buf_sz - log_sz) {
7572 /* patch causes part of remaining log to be truncated */
7573 rem_sz -= (patch_sz - orig_sz) - (buf_sz - log_sz);
7574 }
7575 }
7576 /* shift remaining log to the right by calculated amount */
7577 memmove(orig + patch_sz, orig + orig_sz, rem_sz);
7578 }
7579
7580 memcpy(orig, patch, patch_sz);
7581}
7582
7583static void fixup_log_failed_core_relo(struct bpf_program *prog,
7584 char *buf, size_t buf_sz, size_t log_sz,
7585 char *line1, char *line2, char *line3)
7586{
7587 /* Expected log for failed and not properly guarded CO-RE relocation:
7588 * line1 -> 123: (85) call unknown#195896080
7589 * line2 -> invalid func unknown#195896080
7590 * line3 -> <anything else or end of buffer>
7591 *
7592 * "123" is the index of the instruction that was poisoned. We extract
7593 * instruction index to find corresponding CO-RE relocation and
7594 * replace this part of the log with more relevant information about
7595 * failed CO-RE relocation.
7596 */
7597 const struct bpf_core_relo *relo;
7598 struct bpf_core_spec spec;
7599 char patch[512], spec_buf[256];
b198881d 7600 int insn_idx, err, spec_len;
9fdc4273
AN
7601
7602 if (sscanf(line1, "%d: (%*d) call unknown#195896080\n", &insn_idx) != 1)
7603 return;
7604
7605 relo = find_relo_core(prog, insn_idx);
7606 if (!relo)
7607 return;
7608
7609 err = bpf_core_parse_spec(prog->name, prog->obj->btf, relo, &spec);
7610 if (err)
7611 return;
7612
b198881d 7613 spec_len = bpf_core_format_spec(spec_buf, sizeof(spec_buf), &spec);
9fdc4273
AN
7614 snprintf(patch, sizeof(patch),
7615 "%d: <invalid CO-RE relocation>\n"
b198881d
AN
7616 "failed to resolve CO-RE relocation %s%s\n",
7617 insn_idx, spec_buf, spec_len >= sizeof(spec_buf) ? "..." : "");
9fdc4273
AN
7618
7619 patch_log(buf, buf_sz, log_sz, line1, line3 - line1, patch);
7620}
7621
ec41817b
AN
7622static void fixup_log_missing_map_load(struct bpf_program *prog,
7623 char *buf, size_t buf_sz, size_t log_sz,
7624 char *line1, char *line2, char *line3)
7625{
3055ddd6 7626 /* Expected log for failed and not properly guarded map reference:
ec41817b
AN
7627 * line1 -> 123: (85) call unknown#2001000345
7628 * line2 -> invalid func unknown#2001000345
7629 * line3 -> <anything else or end of buffer>
7630 *
7631 * "123" is the index of the instruction that was poisoned.
3055ddd6 7632 * "345" in "2001000345" is a map index in obj->maps to fetch map name.
ec41817b
AN
7633 */
7634 struct bpf_object *obj = prog->obj;
7635 const struct bpf_map *map;
7636 int insn_idx, map_idx;
7637 char patch[128];
7638
7639 if (sscanf(line1, "%d: (%*d) call unknown#%d\n", &insn_idx, &map_idx) != 2)
7640 return;
7641
3055ddd6 7642 map_idx -= POISON_LDIMM64_MAP_BASE;
ec41817b
AN
7643 if (map_idx < 0 || map_idx >= obj->nr_maps)
7644 return;
7645 map = &obj->maps[map_idx];
7646
7647 snprintf(patch, sizeof(patch),
7648 "%d: <invalid BPF map reference>\n"
7649 "BPF map '%s' is referenced but wasn't created\n",
7650 insn_idx, map->name);
7651
7652 patch_log(buf, buf_sz, log_sz, line1, line3 - line1, patch);
7653}
7654
05b6f766
AN
7655static void fixup_log_missing_kfunc_call(struct bpf_program *prog,
7656 char *buf, size_t buf_sz, size_t log_sz,
7657 char *line1, char *line2, char *line3)
7658{
7659 /* Expected log for failed and not properly guarded kfunc call:
7660 * line1 -> 123: (85) call unknown#2002000345
7661 * line2 -> invalid func unknown#2002000345
7662 * line3 -> <anything else or end of buffer>
7663 *
7664 * "123" is the index of the instruction that was poisoned.
7665 * "345" in "2002000345" is an extern index in obj->externs to fetch kfunc name.
7666 */
7667 struct bpf_object *obj = prog->obj;
7668 const struct extern_desc *ext;
7669 int insn_idx, ext_idx;
7670 char patch[128];
7671
7672 if (sscanf(line1, "%d: (%*d) call unknown#%d\n", &insn_idx, &ext_idx) != 2)
7673 return;
7674
7675 ext_idx -= POISON_CALL_KFUNC_BASE;
7676 if (ext_idx < 0 || ext_idx >= obj->nr_extern)
7677 return;
7678 ext = &obj->externs[ext_idx];
7679
7680 snprintf(patch, sizeof(patch),
7681 "%d: <invalid kfunc call>\n"
7682 "kfunc '%s' is referenced but wasn't resolved\n",
7683 insn_idx, ext->name);
7684
7685 patch_log(buf, buf_sz, log_sz, line1, line3 - line1, patch);
7686}
7687
9fdc4273
AN
7688static void fixup_verifier_log(struct bpf_program *prog, char *buf, size_t buf_sz)
7689{
7690 /* look for familiar error patterns in last N lines of the log */
7691 const size_t max_last_line_cnt = 10;
7692 char *prev_line, *cur_line, *next_line;
7693 size_t log_sz;
7694 int i;
7695
7696 if (!buf)
7697 return;
7698
7699 log_sz = strlen(buf) + 1;
7700 next_line = buf + log_sz - 1;
7701
7702 for (i = 0; i < max_last_line_cnt; i++, next_line = cur_line) {
7703 cur_line = find_prev_line(buf, next_line);
7704 if (!cur_line)
7705 return;
7706
9fdc4273
AN
7707 if (str_has_pfx(cur_line, "invalid func unknown#195896080\n")) {
7708 prev_line = find_prev_line(buf, cur_line);
7709 if (!prev_line)
7710 continue;
7711
3055ddd6 7712 /* failed CO-RE relocation case */
9fdc4273
AN
7713 fixup_log_failed_core_relo(prog, buf, buf_sz, log_sz,
7714 prev_line, cur_line, next_line);
7715 return;
3055ddd6 7716 } else if (str_has_pfx(cur_line, "invalid func unknown#"POISON_LDIMM64_MAP_PFX)) {
ec41817b
AN
7717 prev_line = find_prev_line(buf, cur_line);
7718 if (!prev_line)
7719 continue;
7720
3055ddd6 7721 /* reference to uncreated BPF map */
ec41817b
AN
7722 fixup_log_missing_map_load(prog, buf, buf_sz, log_sz,
7723 prev_line, cur_line, next_line);
7724 return;
05b6f766
AN
7725 } else if (str_has_pfx(cur_line, "invalid func unknown#"POISON_CALL_KFUNC_PFX)) {
7726 prev_line = find_prev_line(buf, cur_line);
7727 if (!prev_line)
7728 continue;
7729
7730 /* reference to unresolved kfunc */
7731 fixup_log_missing_kfunc_call(prog, buf, buf_sz, log_sz,
7732 prev_line, cur_line, next_line);
7733 return;
9fdc4273
AN
7734 }
7735 }
7736}
7737
d0e92887 7738static int bpf_program_record_relos(struct bpf_program *prog)
67234743
AS
7739{
7740 struct bpf_object *obj = prog->obj;
7741 int i;
7742
7743 for (i = 0; i < prog->nr_reloc; i++) {
7744 struct reloc_desc *relo = &prog->reloc_desc[i];
3055ddd6 7745 struct extern_desc *ext = &obj->externs[relo->ext_idx];
708cdc57 7746 int kind;
67234743
AS
7747
7748 switch (relo->type) {
a18f7214 7749 case RELO_EXTERN_LD64:
67234743
AS
7750 if (ext->type != EXT_KSYM)
7751 continue;
708cdc57
AS
7752 kind = btf_is_var(btf__type_by_id(obj->btf, ext->btf_id)) ?
7753 BTF_KIND_VAR : BTF_KIND_FUNC;
c24941cd
KKD
7754 bpf_gen__record_extern(obj->gen_loader, ext->name,
7755 ext->is_weak, !ext->ksym.type_id,
708cdc57 7756 true, kind, relo->insn_idx);
67234743 7757 break;
a18f7214 7758 case RELO_EXTERN_CALL:
c24941cd 7759 bpf_gen__record_extern(obj->gen_loader, ext->name,
708cdc57 7760 ext->is_weak, false, false, BTF_KIND_FUNC,
67234743
AS
7761 relo->insn_idx);
7762 break;
d0e92887
AS
7763 case RELO_CORE: {
7764 struct bpf_core_relo cr = {
7765 .insn_off = relo->insn_idx * 8,
7766 .type_id = relo->core_relo->type_id,
7767 .access_str_off = relo->core_relo->access_str_off,
7768 .kind = relo->core_relo->kind,
7769 };
7770
7771 bpf_gen__record_relo_core(obj->gen_loader, &cr);
7772 break;
7773 }
67234743
AS
7774 default:
7775 continue;
7776 }
7777 }
7778 return 0;
7779}
7780
55cffde2 7781static int
60276f98 7782bpf_object__load_progs(struct bpf_object *obj, int log_level)
55cffde2 7783{
d9297581 7784 struct bpf_program *prog;
55cffde2
WN
7785 size_t i;
7786 int err;
7787
109cea5a
AN
7788 for (i = 0; i < obj->nr_programs; i++) {
7789 prog = &obj->programs[i];
7790 err = bpf_object__sanitize_prog(obj, prog);
7791 if (err)
7792 return err;
7793 }
7794
55cffde2 7795 for (i = 0; i < obj->nr_programs; i++) {
d9297581 7796 prog = &obj->programs[i];
c3c55696 7797 if (prog_is_subprog(obj, prog))
48cca7e4 7798 continue;
a3820c48 7799 if (!prog->autoload) {
9c0f8cbd 7800 pr_debug("prog '%s': skipped loading\n", prog->name);
d9297581
AN
7801 continue;
7802 }
7803 prog->log_level |= log_level;
cf90a20d
AN
7804
7805 if (obj->gen_loader)
7806 bpf_program_record_relos(prog);
7807
7808 err = bpf_object_load_prog(obj, prog, prog->insns, prog->insns_cnt,
7809 obj->license, obj->kern_version, &prog->fd);
7810 if (err) {
7811 pr_warn("prog '%s': failed to load: %d\n", prog->name, err);
55cffde2 7812 return err;
cf90a20d 7813 }
55cffde2 7814 }
185cfe83
AN
7815
7816 bpf_object__free_relocs(obj);
55cffde2
WN
7817 return 0;
7818}
7819
25498a19
AN
7820static const struct bpf_sec_def *find_sec_def(const char *sec_name);
7821
91b4d1d1
AN
7822static int bpf_object_init_progs(struct bpf_object *obj, const struct bpf_object_open_opts *opts)
7823{
7824 struct bpf_program *prog;
12d9466d 7825 int err;
91b4d1d1
AN
7826
7827 bpf_object__for_each_program(prog, obj) {
7828 prog->sec_def = find_sec_def(prog->sec_name);
7829 if (!prog->sec_def) {
7830 /* couldn't guess, but user might manually specify */
7831 pr_debug("prog '%s': unrecognized ELF section name '%s'\n",
7832 prog->name, prog->sec_name);
7833 continue;
7834 }
7835
df286716
GS
7836 prog->type = prog->sec_def->prog_type;
7837 prog->expected_attach_type = prog->sec_def->expected_attach_type;
91b4d1d1 7838
12d9466d
AN
7839 /* sec_def can have custom callback which should be called
7840 * after bpf_program is initialized to adjust its properties
7841 */
4fa5bcfe
AN
7842 if (prog->sec_def->prog_setup_fn) {
7843 err = prog->sec_def->prog_setup_fn(prog, prog->sec_def->cookie);
12d9466d
AN
7844 if (err < 0) {
7845 pr_warn("prog '%s': failed to initialize: %d\n",
7846 prog->name, err);
7847 return err;
7848 }
7849 }
91b4d1d1
AN
7850 }
7851
7852 return 0;
7853}
7854
ad9a7f96
AN
7855static struct bpf_object *bpf_object_open(const char *path, const void *obj_buf, size_t obj_buf_sz,
7856 const struct bpf_object_open_opts *opts)
1a5e3fb1 7857{
6b434b61 7858 const char *obj_name, *kconfig, *btf_tmp_path, *token_path;
1a5e3fb1 7859 struct bpf_object *obj;
291ee02b 7860 char tmp_name[64];
d17aff80 7861 int err;
e0e3ea88
AN
7862 char *log_buf;
7863 size_t log_size;
7864 __u32 log_level;
1a5e3fb1
WN
7865
7866 if (elf_version(EV_CURRENT) == EV_NONE) {
be18010e
KW
7867 pr_warn("failed to init libelf for %s\n",
7868 path ? : "(mem buf)");
6371ca3b 7869 return ERR_PTR(-LIBBPF_ERRNO__LIBELF);
1a5e3fb1
WN
7870 }
7871
291ee02b
AN
7872 if (!OPTS_VALID(opts, bpf_object_open_opts))
7873 return ERR_PTR(-EINVAL);
7874
1aace10f 7875 obj_name = OPTS_GET(opts, object_name, NULL);
291ee02b
AN
7876 if (obj_buf) {
7877 if (!obj_name) {
7878 snprintf(tmp_name, sizeof(tmp_name), "%lx-%lx",
7879 (unsigned long)obj_buf,
7880 (unsigned long)obj_buf_sz);
7881 obj_name = tmp_name;
7882 }
7883 path = obj_name;
7884 pr_debug("loading object '%s' from buffer\n", obj_name);
7885 }
7886
e0e3ea88
AN
7887 log_buf = OPTS_GET(opts, kernel_log_buf, NULL);
7888 log_size = OPTS_GET(opts, kernel_log_size, 0);
7889 log_level = OPTS_GET(opts, kernel_log_level, 0);
7890 if (log_size > UINT_MAX)
7891 return ERR_PTR(-EINVAL);
7892 if (log_size && !log_buf)
7893 return ERR_PTR(-EINVAL);
7894
6b434b61 7895 token_path = OPTS_GET(opts, bpf_token_path, NULL);
cac270ad
AN
7896 /* if user didn't specify bpf_token_path explicitly, check if
7897 * LIBBPF_BPF_TOKEN_PATH envvar was set and treat it as bpf_token_path
7898 * option
7899 */
7900 if (!token_path)
7901 token_path = getenv("LIBBPF_BPF_TOKEN_PATH");
6b434b61
AN
7902 if (token_path && strlen(token_path) >= PATH_MAX)
7903 return ERR_PTR(-ENAMETOOLONG);
7904
2ce8450e 7905 obj = bpf_object__new(path, obj_buf, obj_buf_sz, obj_name);
6371ca3b
WN
7906 if (IS_ERR(obj))
7907 return obj;
1a5e3fb1 7908
e0e3ea88
AN
7909 obj->log_buf = log_buf;
7910 obj->log_size = log_size;
7911 obj->log_level = log_level;
7912
6b434b61
AN
7913 if (token_path) {
7914 obj->token_path = strdup(token_path);
7915 if (!obj->token_path) {
7916 err = -ENOMEM;
7917 goto out;
7918 }
7919 }
7920
1373ff59
SC
7921 btf_tmp_path = OPTS_GET(opts, btf_custom_path, NULL);
7922 if (btf_tmp_path) {
7923 if (strlen(btf_tmp_path) >= PATH_MAX) {
7924 err = -ENAMETOOLONG;
7925 goto out;
7926 }
7927 obj->btf_custom_path = strdup(btf_tmp_path);
7928 if (!obj->btf_custom_path) {
7929 err = -ENOMEM;
7930 goto out;
7931 }
7932 }
7933
8601fd42
AN
7934 kconfig = OPTS_GET(opts, kconfig, NULL);
7935 if (kconfig) {
7936 obj->kconfig = strdup(kconfig);
18353c87
SC
7937 if (!obj->kconfig) {
7938 err = -ENOMEM;
7939 goto out;
7940 }
166750bc 7941 }
291ee02b 7942
0d13bfce
AN
7943 err = bpf_object__elf_init(obj);
7944 err = err ? : bpf_object__check_endianness(obj);
7945 err = err ? : bpf_object__elf_collect(obj);
166750bc 7946 err = err ? : bpf_object__collect_externs(obj);
f33f742d 7947 err = err ? : bpf_object_fixup_btf(obj);
0d13bfce 7948 err = err ? : bpf_object__init_maps(obj, opts);
91b4d1d1 7949 err = err ? : bpf_object_init_progs(obj, opts);
c3c55696 7950 err = err ? : bpf_object__collect_relos(obj);
0d13bfce
AN
7951 if (err)
7952 goto out;
dd4436bb 7953
91b4d1d1 7954 bpf_object__elf_finish(obj);
dd4436bb 7955
1a5e3fb1
WN
7956 return obj;
7957out:
7958 bpf_object__close(obj);
6371ca3b 7959 return ERR_PTR(err);
1a5e3fb1
WN
7960}
7961
2ce8450e 7962struct bpf_object *
01af3bf0 7963bpf_object__open_file(const char *path, const struct bpf_object_open_opts *opts)
2ce8450e 7964{
2ce8450e 7965 if (!path)
e9fc3ce9 7966 return libbpf_err_ptr(-EINVAL);
2ce8450e
AN
7967
7968 pr_debug("loading %s\n", path);
7969
ad9a7f96 7970 return libbpf_ptr(bpf_object_open(path, NULL, 0, opts));
2ce8450e
AN
7971}
7972
146bf811
AN
7973struct bpf_object *bpf_object__open(const char *path)
7974{
7975 return bpf_object__open_file(path, NULL);
7976}
7977
2ce8450e
AN
7978struct bpf_object *
7979bpf_object__open_mem(const void *obj_buf, size_t obj_buf_sz,
01af3bf0 7980 const struct bpf_object_open_opts *opts)
6c956392 7981{
2ce8450e 7982 if (!obj_buf || obj_buf_sz == 0)
e9fc3ce9 7983 return libbpf_err_ptr(-EINVAL);
6c956392 7984
ad9a7f96 7985 return libbpf_ptr(bpf_object_open(NULL, obj_buf, obj_buf_sz, opts));
2ce8450e
AN
7986}
7987
4a404a7e 7988static int bpf_object_unload(struct bpf_object *obj)
52d3352e
WN
7989{
7990 size_t i;
7991
7992 if (!obj)
e9fc3ce9 7993 return libbpf_err(-EINVAL);
52d3352e 7994
590a0088 7995 for (i = 0; i < obj->nr_maps; i++) {
9d759a9b 7996 zclose(obj->maps[i].fd);
590a0088
MKL
7997 if (obj->maps[i].st_ops)
7998 zfree(&obj->maps[i].st_ops->kern_vdata);
7999 }
52d3352e 8000
55cffde2
WN
8001 for (i = 0; i < obj->nr_programs; i++)
8002 bpf_program__unload(&obj->programs[i]);
8003
52d3352e
WN
8004 return 0;
8005}
8006
0d13bfce
AN
8007static int bpf_object__sanitize_maps(struct bpf_object *obj)
8008{
8009 struct bpf_map *m;
8010
8011 bpf_object__for_each_map(m, obj) {
8012 if (!bpf_map__is_internal(m))
8013 continue;
9ca1f56a 8014 if (!kernel_supports(obj, FEAT_ARRAY_MMAP))
6920b086 8015 m->def.map_flags &= ~BPF_F_MMAPABLE;
0d13bfce
AN
8016 }
8017
8018 return 0;
8019}
8020
ad2b0528
YS
8021typedef int (*kallsyms_cb_t)(unsigned long long sym_addr, char sym_type,
8022 const char *sym_name, void *ctx);
8023
8024static int libbpf_kallsyms_parse(kallsyms_cb_t cb, void *ctx)
1c0c7074
AN
8025{
8026 char sym_type, sym_name[500];
8027 unsigned long long sym_addr;
1c0c7074
AN
8028 int ret, err = 0;
8029 FILE *f;
8030
59842c54 8031 f = fopen("/proc/kallsyms", "re");
1c0c7074
AN
8032 if (!f) {
8033 err = -errno;
8034 pr_warn("failed to open /proc/kallsyms: %d\n", err);
8035 return err;
8036 }
8037
8038 while (true) {
8039 ret = fscanf(f, "%llx %c %499s%*[^\n]\n",
8040 &sym_addr, &sym_type, sym_name);
8041 if (ret == EOF && feof(f))
8042 break;
8043 if (ret != 3) {
135c783f 8044 pr_warn("failed to read kallsyms entry: %d\n", ret);
1c0c7074 8045 err = -EINVAL;
85153ac0 8046 break;
1c0c7074
AN
8047 }
8048
85153ac0
JO
8049 err = cb(sym_addr, sym_type, sym_name, ctx);
8050 if (err)
8051 break;
1c0c7074
AN
8052 }
8053
1c0c7074
AN
8054 fclose(f);
8055 return err;
8056}
8057
85153ac0
JO
8058static int kallsyms_cb(unsigned long long sym_addr, char sym_type,
8059 const char *sym_name, void *ctx)
8060{
8061 struct bpf_object *obj = ctx;
8062 const struct btf_type *t;
8063 struct extern_desc *ext;
c56e5977 8064 char *res;
85153ac0 8065
c56e5977
YS
8066 res = strstr(sym_name, ".llvm.");
8067 if (sym_type == 'd' && res)
8068 ext = find_extern_by_name_with_len(obj, sym_name, res - sym_name);
8069 else
8070 ext = find_extern_by_name(obj, sym_name);
85153ac0
JO
8071 if (!ext || ext->type != EXT_KSYM)
8072 return 0;
8073
8074 t = btf__type_by_id(obj->btf, ext->btf_id);
8075 if (!btf_is_var(t))
8076 return 0;
8077
8078 if (ext->is_set && ext->ksym.addr != sym_addr) {
55d00c37 8079 pr_warn("extern (ksym) '%s': resolution is ambiguous: 0x%llx or 0x%llx\n",
85153ac0
JO
8080 sym_name, ext->ksym.addr, sym_addr);
8081 return -EINVAL;
8082 }
8083 if (!ext->is_set) {
8084 ext->is_set = true;
8085 ext->ksym.addr = sym_addr;
55d00c37 8086 pr_debug("extern (ksym) '%s': set to 0x%llx\n", sym_name, sym_addr);
85153ac0
JO
8087 }
8088 return 0;
8089}
8090
8091static int bpf_object__read_kallsyms_file(struct bpf_object *obj)
8092{
8093 return libbpf_kallsyms_parse(kallsyms_cb, obj);
8094}
8095
774e132e
MKL
8096static int find_ksym_btf_id(struct bpf_object *obj, const char *ksym_name,
8097 __u16 kind, struct btf **res_btf,
9dbe6015 8098 struct module_btf **res_mod_btf)
d370bbe1 8099{
9dbe6015 8100 struct module_btf *mod_btf;
284d2587 8101 struct btf *btf;
9dbe6015 8102 int i, id, err;
d370bbe1 8103
933d1aa3 8104 btf = obj->btf_vmlinux;
9dbe6015 8105 mod_btf = NULL;
774e132e
MKL
8106 id = btf__find_by_name_kind(btf, ksym_name, kind);
8107
933d1aa3
MKL
8108 if (id == -ENOENT) {
8109 err = load_module_btfs(obj);
8110 if (err)
8111 return err;
d370bbe1 8112
933d1aa3 8113 for (i = 0; i < obj->btf_module_cnt; i++) {
9dbe6015
KKD
8114 /* we assume module_btf's BTF FD is always >0 */
8115 mod_btf = &obj->btf_modules[i];
8116 btf = mod_btf->btf;
8117 id = btf__find_by_name_kind_own(btf, ksym_name, kind);
933d1aa3
MKL
8118 if (id != -ENOENT)
8119 break;
8120 }
8121 }
2211c825 8122 if (id <= 0)
933d1aa3 8123 return -ESRCH;
d370bbe1 8124
774e132e 8125 *res_btf = btf;
9dbe6015 8126 *res_mod_btf = mod_btf;
774e132e
MKL
8127 return id;
8128}
8129
8130static int bpf_object__resolve_ksym_var_btf_id(struct bpf_object *obj,
8131 struct extern_desc *ext)
8132{
8133 const struct btf_type *targ_var, *targ_type;
8134 __u32 targ_type_id, local_type_id;
9dbe6015 8135 struct module_btf *mod_btf = NULL;
774e132e 8136 const char *targ_var_name;
774e132e 8137 struct btf *btf = NULL;
9dbe6015 8138 int id, err;
774e132e 8139
9dbe6015 8140 id = find_ksym_btf_id(obj, ext->name, BTF_KIND_VAR, &btf, &mod_btf);
466b2e13
KKD
8141 if (id < 0) {
8142 if (id == -ESRCH && ext->is_weak)
8143 return 0;
2211c825
HL
8144 pr_warn("extern (var ksym) '%s': not found in kernel BTF\n",
8145 ext->name);
774e132e 8146 return id;
2211c825 8147 }
774e132e 8148
933d1aa3
MKL
8149 /* find local type_id */
8150 local_type_id = ext->ksym.type_id;
284d2587 8151
933d1aa3
MKL
8152 /* find target type_id */
8153 targ_var = btf__type_by_id(btf, id);
8154 targ_var_name = btf__name_by_offset(btf, targ_var->name_off);
8155 targ_type = skip_mods_and_typedefs(btf, targ_var->type, &targ_type_id);
d370bbe1 8156
933d1aa3
MKL
8157 err = bpf_core_types_are_compat(obj->btf, local_type_id,
8158 btf, targ_type_id);
8159 if (err <= 0) {
8160 const struct btf_type *local_type;
8161 const char *targ_name, *local_name;
d370bbe1 8162
933d1aa3
MKL
8163 local_type = btf__type_by_id(obj->btf, local_type_id);
8164 local_name = btf__name_by_offset(obj->btf, local_type->name_off);
8165 targ_name = btf__name_by_offset(btf, targ_type->name_off);
d370bbe1 8166
933d1aa3
MKL
8167 pr_warn("extern (var ksym) '%s': incompatible types, expected [%d] %s %s, but kernel has [%d] %s %s\n",
8168 ext->name, local_type_id,
8169 btf_kind_str(local_type), local_name, targ_type_id,
8170 btf_kind_str(targ_type), targ_name);
8171 return -EINVAL;
8172 }
d370bbe1 8173
933d1aa3 8174 ext->is_set = true;
9dbe6015 8175 ext->ksym.kernel_btf_obj_fd = mod_btf ? mod_btf->fd : 0;
933d1aa3
MKL
8176 ext->ksym.kernel_btf_id = id;
8177 pr_debug("extern (var ksym) '%s': resolved to [%d] %s %s\n",
8178 ext->name, id, btf_kind_str(targ_var), targ_var_name);
d370bbe1 8179
933d1aa3
MKL
8180 return 0;
8181}
d370bbe1 8182
5bd022ec
MKL
8183static int bpf_object__resolve_ksym_func_btf_id(struct bpf_object *obj,
8184 struct extern_desc *ext)
8185{
8186 int local_func_proto_id, kfunc_proto_id, kfunc_id;
9dbe6015 8187 struct module_btf *mod_btf = NULL;
5bd022ec
MKL
8188 const struct btf_type *kern_func;
8189 struct btf *kern_btf = NULL;
9dbe6015 8190 int ret;
5bd022ec
MKL
8191
8192 local_func_proto_id = ext->ksym.type_id;
8193
5964a223
DM
8194 kfunc_id = find_ksym_btf_id(obj, ext->essent_name ?: ext->name, BTF_KIND_FUNC, &kern_btf,
8195 &mod_btf);
5bd022ec 8196 if (kfunc_id < 0) {
466b2e13
KKD
8197 if (kfunc_id == -ESRCH && ext->is_weak)
8198 return 0;
8199 pr_warn("extern (func ksym) '%s': not found in kernel or module BTFs\n",
5bd022ec
MKL
8200 ext->name);
8201 return kfunc_id;
8202 }
8203
5bd022ec
MKL
8204 kern_func = btf__type_by_id(kern_btf, kfunc_id);
8205 kfunc_proto_id = kern_func->type;
8206
8207 ret = bpf_core_types_are_compat(obj->btf, local_func_proto_id,
8208 kern_btf, kfunc_proto_id);
8209 if (ret <= 0) {
5964a223
DM
8210 if (ext->is_weak)
8211 return 0;
8212
f709160d
AN
8213 pr_warn("extern (func ksym) '%s': func_proto [%d] incompatible with %s [%d]\n",
8214 ext->name, local_func_proto_id,
8215 mod_btf ? mod_btf->name : "vmlinux", kfunc_proto_id);
5bd022ec
MKL
8216 return -EINVAL;
8217 }
8218
9dbe6015
KKD
8219 /* set index for module BTF fd in fd_array, if unset */
8220 if (mod_btf && !mod_btf->fd_array_idx) {
8221 /* insn->off is s16 */
8222 if (obj->fd_array_cnt == INT16_MAX) {
8223 pr_warn("extern (func ksym) '%s': module BTF fd index %d too big to fit in bpf_insn offset\n",
8224 ext->name, mod_btf->fd_array_idx);
8225 return -E2BIG;
8226 }
8227 /* Cannot use index 0 for module BTF fd */
8228 if (!obj->fd_array_cnt)
8229 obj->fd_array_cnt = 1;
8230
8231 ret = libbpf_ensure_mem((void **)&obj->fd_array, &obj->fd_array_cap, sizeof(int),
8232 obj->fd_array_cnt + 1);
8233 if (ret)
8234 return ret;
8235 mod_btf->fd_array_idx = obj->fd_array_cnt;
8236 /* we assume module BTF FD is always >0 */
8237 obj->fd_array[obj->fd_array_cnt++] = mod_btf->fd;
8238 }
8239
5bd022ec 8240 ext->is_set = true;
5bd022ec 8241 ext->ksym.kernel_btf_id = kfunc_id;
9dbe6015 8242 ext->ksym.btf_fd_idx = mod_btf ? mod_btf->fd_array_idx : 0;
5fc13ad5
AS
8243 /* Also set kernel_btf_obj_fd to make sure that bpf_object__relocate_data()
8244 * populates FD into ld_imm64 insn when it's used to point to kfunc.
8245 * {kernel_btf_id, btf_fd_idx} -> fixup bpf_call.
8246 * {kernel_btf_id, kernel_btf_obj_fd} -> fixup ld_imm64.
8247 */
8248 ext->ksym.kernel_btf_obj_fd = mod_btf ? mod_btf->fd : 0;
f709160d
AN
8249 pr_debug("extern (func ksym) '%s': resolved to %s [%d]\n",
8250 ext->name, mod_btf ? mod_btf->name : "vmlinux", kfunc_id);
5bd022ec
MKL
8251
8252 return 0;
8253}
8254
933d1aa3
MKL
8255static int bpf_object__resolve_ksyms_btf_id(struct bpf_object *obj)
8256{
5bd022ec 8257 const struct btf_type *t;
933d1aa3
MKL
8258 struct extern_desc *ext;
8259 int i, err;
8260
8261 for (i = 0; i < obj->nr_extern; i++) {
8262 ext = &obj->externs[i];
8263 if (ext->type != EXT_KSYM || !ext->ksym.type_id)
8264 continue;
8265
67234743
AS
8266 if (obj->gen_loader) {
8267 ext->is_set = true;
8268 ext->ksym.kernel_btf_obj_fd = 0;
8269 ext->ksym.kernel_btf_id = 0;
8270 continue;
8271 }
5bd022ec
MKL
8272 t = btf__type_by_id(obj->btf, ext->btf_id);
8273 if (btf_is_var(t))
8274 err = bpf_object__resolve_ksym_var_btf_id(obj, ext);
8275 else
8276 err = bpf_object__resolve_ksym_func_btf_id(obj, ext);
933d1aa3
MKL
8277 if (err)
8278 return err;
d370bbe1
HL
8279 }
8280 return 0;
8281}
8282
166750bc 8283static int bpf_object__resolve_externs(struct bpf_object *obj,
8601fd42 8284 const char *extra_kconfig)
166750bc 8285{
1c0c7074 8286 bool need_config = false, need_kallsyms = false;
d370bbe1 8287 bool need_vmlinux_btf = false;
166750bc 8288 struct extern_desc *ext;
2e33efe3 8289 void *kcfg_data = NULL;
166750bc 8290 int err, i;
166750bc
AN
8291
8292 if (obj->nr_extern == 0)
8293 return 0;
8294
2e33efe3
AN
8295 if (obj->kconfig_map_idx >= 0)
8296 kcfg_data = obj->maps[obj->kconfig_map_idx].mmaped;
166750bc
AN
8297
8298 for (i = 0; i < obj->nr_extern; i++) {
8299 ext = &obj->externs[i];
8300
55d00c37
AN
8301 if (ext->type == EXT_KSYM) {
8302 if (ext->ksym.type_id)
8303 need_vmlinux_btf = true;
8304 else
8305 need_kallsyms = true;
8306 continue;
8307 } else if (ext->type == EXT_KCFG) {
8308 void *ext_ptr = kcfg_data + ext->kcfg.data_off;
8309 __u64 value = 0;
166750bc 8310
55d00c37
AN
8311 /* Kconfig externs need actual /proc/config.gz */
8312 if (str_has_pfx(ext->name, "CONFIG_")) {
8313 need_config = true;
8314 continue;
8315 }
8316
8317 /* Virtual kcfg externs are customly handled by libbpf */
8318 if (strcmp(ext->name, "LINUX_KERNEL_VERSION") == 0) {
8319 value = get_kernel_version();
8320 if (!value) {
8321 pr_warn("extern (kcfg) '%s': failed to get kernel version\n", ext->name);
8322 return -EINVAL;
8323 }
8324 } else if (strcmp(ext->name, "LINUX_HAS_BPF_COOKIE") == 0) {
8325 value = kernel_supports(obj, FEAT_BPF_COOKIE);
6f5d467d
AN
8326 } else if (strcmp(ext->name, "LINUX_HAS_SYSCALL_WRAPPER") == 0) {
8327 value = kernel_supports(obj, FEAT_SYSCALL_WRAPPER);
55d00c37
AN
8328 } else if (!str_has_pfx(ext->name, "LINUX_") || !ext->is_weak) {
8329 /* Currently libbpf supports only CONFIG_ and LINUX_ prefixed
8330 * __kconfig externs, where LINUX_ ones are virtual and filled out
8331 * customly by libbpf (their values don't come from Kconfig).
8332 * If LINUX_xxx variable is not recognized by libbpf, but is marked
8333 * __weak, it defaults to zero value, just like for CONFIG_xxx
8334 * externs.
8335 */
8336 pr_warn("extern (kcfg) '%s': unrecognized virtual extern\n", ext->name);
166750bc
AN
8337 return -EINVAL;
8338 }
55d00c37
AN
8339
8340 err = set_kcfg_value_num(ext, ext_ptr, value);
166750bc
AN
8341 if (err)
8342 return err;
55d00c37
AN
8343 pr_debug("extern (kcfg) '%s': set to 0x%llx\n",
8344 ext->name, (long long)value);
166750bc 8345 } else {
55d00c37 8346 pr_warn("extern '%s': unrecognized extern kind\n", ext->name);
166750bc
AN
8347 return -EINVAL;
8348 }
8349 }
8601fd42 8350 if (need_config && extra_kconfig) {
2e33efe3 8351 err = bpf_object__read_kconfig_mem(obj, extra_kconfig, kcfg_data);
8601fd42
AN
8352 if (err)
8353 return -EINVAL;
8354 need_config = false;
8355 for (i = 0; i < obj->nr_extern; i++) {
8356 ext = &obj->externs[i];
2e33efe3 8357 if (ext->type == EXT_KCFG && !ext->is_set) {
8601fd42
AN
8358 need_config = true;
8359 break;
8360 }
8361 }
8362 }
166750bc 8363 if (need_config) {
2e33efe3 8364 err = bpf_object__read_kconfig_file(obj, kcfg_data);
166750bc
AN
8365 if (err)
8366 return -EINVAL;
8367 }
1c0c7074
AN
8368 if (need_kallsyms) {
8369 err = bpf_object__read_kallsyms_file(obj);
8370 if (err)
8371 return -EINVAL;
8372 }
d370bbe1
HL
8373 if (need_vmlinux_btf) {
8374 err = bpf_object__resolve_ksyms_btf_id(obj);
8375 if (err)
8376 return -EINVAL;
8377 }
166750bc
AN
8378 for (i = 0; i < obj->nr_extern; i++) {
8379 ext = &obj->externs[i];
8380
8381 if (!ext->is_set && !ext->is_weak) {
55d00c37 8382 pr_warn("extern '%s' (strong): not resolved\n", ext->name);
166750bc
AN
8383 return -ESRCH;
8384 } else if (!ext->is_set) {
55d00c37 8385 pr_debug("extern '%s' (weak): not resolved, defaulting to zero\n",
166750bc
AN
8386 ext->name);
8387 }
8388 }
8389
8390 return 0;
8391}
8392
8d1608d7
KFL
8393static void bpf_map_prepare_vdata(const struct bpf_map *map)
8394{
8395 struct bpf_struct_ops *st_ops;
8396 __u32 i;
8397
8398 st_ops = map->st_ops;
8399 for (i = 0; i < btf_vlen(st_ops->type); i++) {
8400 struct bpf_program *prog = st_ops->progs[i];
8401 void *kern_data;
8402 int prog_fd;
8403
8404 if (!prog)
8405 continue;
8406
8407 prog_fd = bpf_program__fd(prog);
8408 kern_data = st_ops->kern_vdata + st_ops->kern_func_off[i];
8409 *(unsigned long *)kern_data = prog_fd;
8410 }
8411}
8412
8413static int bpf_object_prepare_struct_ops(struct bpf_object *obj)
8414{
8db05261 8415 struct bpf_map *map;
8d1608d7
KFL
8416 int i;
8417
8db05261
EZ
8418 for (i = 0; i < obj->nr_maps; i++) {
8419 map = &obj->maps[i];
8420
8421 if (!bpf_map__is_struct_ops(map))
8422 continue;
8423
8424 if (!map->autocreate)
8425 continue;
8426
8427 bpf_map_prepare_vdata(map);
8428 }
8d1608d7
KFL
8429
8430 return 0;
8431}
8432
e7b924ca 8433static int bpf_object_load(struct bpf_object *obj, int extra_log_level, const char *target_btf_path)
52d3352e 8434{
ec6d5f47 8435 int err, i;
6371ca3b 8436
52d3352e 8437 if (!obj)
e9fc3ce9 8438 return libbpf_err(-EINVAL);
52d3352e
WN
8439
8440 if (obj->loaded) {
d9297581 8441 pr_warn("object '%s': load can't be attempted twice\n", obj->name);
e9fc3ce9 8442 return libbpf_err(-EINVAL);
52d3352e
WN
8443 }
8444
67234743 8445 if (obj->gen_loader)
be315829 8446 bpf_gen__init(obj->gen_loader, extra_log_level, obj->nr_programs, obj->nr_maps);
67234743 8447
6b434b61
AN
8448 err = bpf_object_prepare_token(obj);
8449 err = err ? : bpf_object__probe_loading(obj);
fe62de31 8450 err = err ? : bpf_object__load_vmlinux_btf(obj, false);
8601fd42 8451 err = err ? : bpf_object__resolve_externs(obj, obj->kconfig);
0d13bfce 8452 err = err ? : bpf_object__sanitize_maps(obj);
590a0088 8453 err = err ? : bpf_object__init_kern_struct_ops_maps(obj);
fe9d049c 8454 err = err ? : bpf_object_adjust_struct_ops_autoload(obj);
e7b924ca 8455 err = err ? : bpf_object__relocate(obj, obj->btf_custom_path ? : target_btf_path);
1004742d 8456 err = err ? : bpf_object__sanitize_and_load_btf(obj);
dac645b9 8457 err = err ? : bpf_object__create_maps(obj);
e7b924ca 8458 err = err ? : bpf_object__load_progs(obj, extra_log_level);
341ac5ff 8459 err = err ? : bpf_object_init_prog_arrays(obj);
8d1608d7 8460 err = err ? : bpf_object_prepare_struct_ops(obj);
a6ed02ca 8461
67234743
AS
8462 if (obj->gen_loader) {
8463 /* reset FDs */
4729445b
KKD
8464 if (obj->btf)
8465 btf__set_fd(obj->btf, -1);
67234743 8466 if (!err)
ba05fd36 8467 err = bpf_gen__finish(obj->gen_loader, obj->nr_programs, obj->nr_maps);
67234743
AS
8468 }
8469
9dbe6015
KKD
8470 /* clean up fd_array */
8471 zfree(&obj->fd_array);
8472
4f33a53d
AN
8473 /* clean up module BTFs */
8474 for (i = 0; i < obj->btf_module_cnt; i++) {
91abb4a6 8475 close(obj->btf_modules[i].fd);
4f33a53d
AN
8476 btf__free(obj->btf_modules[i].btf);
8477 free(obj->btf_modules[i].name);
8478 }
8479 free(obj->btf_modules);
8480
8481 /* clean up vmlinux BTF */
a6ed02ca
KS
8482 btf__free(obj->btf_vmlinux);
8483 obj->btf_vmlinux = NULL;
8484
d9297581
AN
8485 obj->loaded = true; /* doesn't matter if successfully or not */
8486
0d13bfce
AN
8487 if (err)
8488 goto out;
52d3352e
WN
8489
8490 return 0;
8491out:
ec6d5f47
THJ
8492 /* unpin any maps that were auto-pinned during load */
8493 for (i = 0; i < obj->nr_maps; i++)
8494 if (obj->maps[i].pinned && !obj->maps[i].reused)
8495 bpf_map__unpin(&obj->maps[i], NULL);
8496
4a404a7e 8497 bpf_object_unload(obj);
be18010e 8498 pr_warn("failed to load object '%s'\n", obj->path);
e9fc3ce9 8499 return libbpf_err(err);
52d3352e
WN
8500}
8501
e7b924ca
AN
8502int bpf_object__load(struct bpf_object *obj)
8503{
8504 return bpf_object_load(obj, 0, NULL);
60276f98
QM
8505}
8506
196f8487
THJ
8507static int make_parent_dir(const char *path)
8508{
8509 char *cp, errmsg[STRERR_BUFSIZE];
8510 char *dname, *dir;
8511 int err = 0;
8512
8513 dname = strdup(path);
8514 if (dname == NULL)
8515 return -ENOMEM;
8516
8517 dir = dirname(dname);
8518 if (mkdir(dir, 0700) && errno != EEXIST)
8519 err = -errno;
8520
8521 free(dname);
8522 if (err) {
8523 cp = libbpf_strerror_r(-err, errmsg, sizeof(errmsg));
8524 pr_warn("failed to mkdir %s: %s\n", path, cp);
8525 }
8526 return err;
8527}
8528
f367540c
JS
8529static int check_path(const char *path)
8530{
1ce6a9fc 8531 char *cp, errmsg[STRERR_BUFSIZE];
f367540c
JS
8532 struct statfs st_fs;
8533 char *dname, *dir;
8534 int err = 0;
8535
8536 if (path == NULL)
8537 return -EINVAL;
8538
8539 dname = strdup(path);
8540 if (dname == NULL)
8541 return -ENOMEM;
8542
8543 dir = dirname(dname);
8544 if (statfs(dir, &st_fs)) {
24d6a808 8545 cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
be18010e 8546 pr_warn("failed to statfs %s: %s\n", dir, cp);
f367540c
JS
8547 err = -errno;
8548 }
8549 free(dname);
8550
8551 if (!err && st_fs.f_type != BPF_FS_MAGIC) {
be18010e 8552 pr_warn("specified path %s is not on BPF FS\n", path);
f367540c
JS
8553 err = -EINVAL;
8554 }
8555
8556 return err;
8557}
8558
cf90a20d 8559int bpf_program__pin(struct bpf_program *prog, const char *path)
f367540c 8560{
1ce6a9fc 8561 char *cp, errmsg[STRERR_BUFSIZE];
f367540c
JS
8562 int err;
8563
cf90a20d
AN
8564 if (prog->fd < 0) {
8565 pr_warn("prog '%s': can't pin program that wasn't loaded\n", prog->name);
8566 return libbpf_err(-EINVAL);
8567 }
8568
196f8487
THJ
8569 err = make_parent_dir(path);
8570 if (err)
e9fc3ce9 8571 return libbpf_err(err);
196f8487 8572
f367540c
JS
8573 err = check_path(path);
8574 if (err)
e9fc3ce9 8575 return libbpf_err(err);
f367540c 8576
cf90a20d 8577 if (bpf_obj_pin(prog->fd, path)) {
23ab656b
THJ
8578 err = -errno;
8579 cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
cf90a20d 8580 pr_warn("prog '%s': failed to pin at '%s': %s\n", prog->name, path, cp);
e9fc3ce9 8581 return libbpf_err(err);
f367540c 8582 }
f367540c 8583
cf90a20d 8584 pr_debug("prog '%s': pinned at '%s'\n", prog->name, path);
f367540c
JS
8585 return 0;
8586}
8587
cf90a20d 8588int bpf_program__unpin(struct bpf_program *prog, const char *path)
0c19a9fb
SF
8589{
8590 int err;
8591
cf90a20d
AN
8592 if (prog->fd < 0) {
8593 pr_warn("prog '%s': can't unpin program that wasn't loaded\n", prog->name);
e9fc3ce9 8594 return libbpf_err(-EINVAL);
0c19a9fb
SF
8595 }
8596
f367540c
JS
8597 err = check_path(path);
8598 if (err)
e9fc3ce9 8599 return libbpf_err(err);
f367540c 8600
cf90a20d 8601 err = unlink(path);
0c19a9fb 8602 if (err)
e9fc3ce9 8603 return libbpf_err(-errno);
0c19a9fb 8604
cf90a20d 8605 pr_debug("prog '%s': unpinned from '%s'\n", prog->name, path);
f367540c
JS
8606 return 0;
8607}
8608
b6989f35
JS
8609int bpf_map__pin(struct bpf_map *map, const char *path)
8610{
1ce6a9fc 8611 char *cp, errmsg[STRERR_BUFSIZE];
b6989f35
JS
8612 int err;
8613
b6989f35 8614 if (map == NULL) {
be18010e 8615 pr_warn("invalid map pointer\n");
e9fc3ce9 8616 return libbpf_err(-EINVAL);
b6989f35
JS
8617 }
8618
7b30c296
MY
8619 if (map->fd < 0) {
8620 pr_warn("map '%s': can't pin BPF map without FD (was it created?)\n", map->name);
8621 return libbpf_err(-EINVAL);
8622 }
8623
4580b25f
THJ
8624 if (map->pin_path) {
8625 if (path && strcmp(path, map->pin_path)) {
8626 pr_warn("map '%s' already has pin path '%s' different from '%s'\n",
8627 bpf_map__name(map), map->pin_path, path);
e9fc3ce9 8628 return libbpf_err(-EINVAL);
4580b25f
THJ
8629 } else if (map->pinned) {
8630 pr_debug("map '%s' already pinned at '%s'; not re-pinning\n",
8631 bpf_map__name(map), map->pin_path);
8632 return 0;
8633 }
8634 } else {
8635 if (!path) {
8636 pr_warn("missing a path to pin map '%s' at\n",
8637 bpf_map__name(map));
e9fc3ce9 8638 return libbpf_err(-EINVAL);
4580b25f
THJ
8639 } else if (map->pinned) {
8640 pr_warn("map '%s' already pinned\n", bpf_map__name(map));
e9fc3ce9 8641 return libbpf_err(-EEXIST);
4580b25f
THJ
8642 }
8643
8644 map->pin_path = strdup(path);
8645 if (!map->pin_path) {
8646 err = -errno;
8647 goto out_err;
8648 }
b6989f35
JS
8649 }
8650
196f8487
THJ
8651 err = make_parent_dir(map->pin_path);
8652 if (err)
e9fc3ce9 8653 return libbpf_err(err);
196f8487 8654
4580b25f
THJ
8655 err = check_path(map->pin_path);
8656 if (err)
e9fc3ce9 8657 return libbpf_err(err);
4580b25f
THJ
8658
8659 if (bpf_obj_pin(map->fd, map->pin_path)) {
8660 err = -errno;
8661 goto out_err;
8662 }
8663
8664 map->pinned = true;
8665 pr_debug("pinned map '%s'\n", map->pin_path);
0c19a9fb 8666
b6989f35 8667 return 0;
4580b25f
THJ
8668
8669out_err:
8670 cp = libbpf_strerror_r(-err, errmsg, sizeof(errmsg));
8671 pr_warn("failed to pin map: %s\n", cp);
e9fc3ce9 8672 return libbpf_err(err);
b6989f35
JS
8673}
8674
0c19a9fb
SF
8675int bpf_map__unpin(struct bpf_map *map, const char *path)
8676{
8677 int err;
8678
0c19a9fb 8679 if (map == NULL) {
be18010e 8680 pr_warn("invalid map pointer\n");
e9fc3ce9 8681 return libbpf_err(-EINVAL);
0c19a9fb
SF
8682 }
8683
4580b25f
THJ
8684 if (map->pin_path) {
8685 if (path && strcmp(path, map->pin_path)) {
8686 pr_warn("map '%s' already has pin path '%s' different from '%s'\n",
8687 bpf_map__name(map), map->pin_path, path);
e9fc3ce9 8688 return libbpf_err(-EINVAL);
4580b25f
THJ
8689 }
8690 path = map->pin_path;
8691 } else if (!path) {
8692 pr_warn("no path to unpin map '%s' from\n",
8693 bpf_map__name(map));
e9fc3ce9 8694 return libbpf_err(-EINVAL);
4580b25f
THJ
8695 }
8696
8697 err = check_path(path);
8698 if (err)
e9fc3ce9 8699 return libbpf_err(err);
4580b25f 8700
0c19a9fb
SF
8701 err = unlink(path);
8702 if (err != 0)
e9fc3ce9 8703 return libbpf_err(-errno);
4580b25f
THJ
8704
8705 map->pinned = false;
8706 pr_debug("unpinned map '%s' from '%s'\n", bpf_map__name(map), path);
0c19a9fb
SF
8707
8708 return 0;
8709}
8710
4580b25f
THJ
8711int bpf_map__set_pin_path(struct bpf_map *map, const char *path)
8712{
8713 char *new = NULL;
8714
8715 if (path) {
8716 new = strdup(path);
8717 if (!new)
e9fc3ce9 8718 return libbpf_err(-errno);
4580b25f
THJ
8719 }
8720
8721 free(map->pin_path);
8722 map->pin_path = new;
8723 return 0;
8724}
8725
20eccf29
AN
8726__alias(bpf_map__pin_path)
8727const char *bpf_map__get_pin_path(const struct bpf_map *map);
4580b25f 8728
e244d34d
EL
8729const char *bpf_map__pin_path(const struct bpf_map *map)
8730{
8731 return map->pin_path;
8732}
8733
4580b25f
THJ
8734bool bpf_map__is_pinned(const struct bpf_map *map)
8735{
8736 return map->pinned;
8737}
8738
9cf309c5
THJ
8739static void sanitize_pin_path(char *s)
8740{
8741 /* bpffs disallows periods in path names */
8742 while (*s) {
8743 if (*s == '.')
8744 *s = '_';
8745 s++;
8746 }
8747}
8748
0c19a9fb 8749int bpf_object__pin_maps(struct bpf_object *obj, const char *path)
d5148d85 8750{
d5148d85
JS
8751 struct bpf_map *map;
8752 int err;
8753
8754 if (!obj)
e9fc3ce9 8755 return libbpf_err(-ENOENT);
d5148d85
JS
8756
8757 if (!obj->loaded) {
be18010e 8758 pr_warn("object not yet loaded; load it first\n");
e9fc3ce9 8759 return libbpf_err(-ENOENT);
d5148d85
JS
8760 }
8761
f74a53d9 8762 bpf_object__for_each_map(map, obj) {
4580b25f 8763 char *pin_path = NULL;
0c19a9fb 8764 char buf[PATH_MAX];
0c19a9fb 8765
ec41817b 8766 if (!map->autocreate)
229fae38
SC
8767 continue;
8768
4580b25f 8769 if (path) {
e588c116
WY
8770 err = pathname_concat(buf, sizeof(buf), path, bpf_map__name(map));
8771 if (err)
4580b25f 8772 goto err_unpin_maps;
9cf309c5 8773 sanitize_pin_path(buf);
4580b25f
THJ
8774 pin_path = buf;
8775 } else if (!map->pin_path) {
8776 continue;
0c19a9fb
SF
8777 }
8778
4580b25f 8779 err = bpf_map__pin(map, pin_path);
0c19a9fb
SF
8780 if (err)
8781 goto err_unpin_maps;
8782 }
8783
8784 return 0;
8785
8786err_unpin_maps:
bcc40fc0 8787 while ((map = bpf_object__prev_map(obj, map))) {
4580b25f 8788 if (!map->pin_path)
0c19a9fb
SF
8789 continue;
8790
4580b25f 8791 bpf_map__unpin(map, NULL);
0c19a9fb
SF
8792 }
8793
e9fc3ce9 8794 return libbpf_err(err);
0c19a9fb
SF
8795}
8796
8797int bpf_object__unpin_maps(struct bpf_object *obj, const char *path)
8798{
8799 struct bpf_map *map;
8800 int err;
8801
8802 if (!obj)
e9fc3ce9 8803 return libbpf_err(-ENOENT);
0c19a9fb 8804
f74a53d9 8805 bpf_object__for_each_map(map, obj) {
4580b25f 8806 char *pin_path = NULL;
d5148d85 8807 char buf[PATH_MAX];
d5148d85 8808
4580b25f 8809 if (path) {
e588c116
WY
8810 err = pathname_concat(buf, sizeof(buf), path, bpf_map__name(map));
8811 if (err)
8812 return libbpf_err(err);
9cf309c5 8813 sanitize_pin_path(buf);
4580b25f
THJ
8814 pin_path = buf;
8815 } else if (!map->pin_path) {
8816 continue;
8817 }
d5148d85 8818
4580b25f 8819 err = bpf_map__unpin(map, pin_path);
d5148d85 8820 if (err)
e9fc3ce9 8821 return libbpf_err(err);
d5148d85
JS
8822 }
8823
0c19a9fb
SF
8824 return 0;
8825}
8826
8827int bpf_object__pin_programs(struct bpf_object *obj, const char *path)
8828{
8829 struct bpf_program *prog;
e588c116 8830 char buf[PATH_MAX];
0c19a9fb
SF
8831 int err;
8832
8833 if (!obj)
e9fc3ce9 8834 return libbpf_err(-ENOENT);
0c19a9fb
SF
8835
8836 if (!obj->loaded) {
be18010e 8837 pr_warn("object not yet loaded; load it first\n");
e9fc3ce9 8838 return libbpf_err(-ENOENT);
0c19a9fb
SF
8839 }
8840
0c19a9fb 8841 bpf_object__for_each_program(prog, obj) {
e588c116
WY
8842 err = pathname_concat(buf, sizeof(buf), path, prog->name);
8843 if (err)
0c19a9fb 8844 goto err_unpin_programs;
0c19a9fb
SF
8845
8846 err = bpf_program__pin(prog, buf);
8847 if (err)
8848 goto err_unpin_programs;
8849 }
8850
8851 return 0;
8852
8853err_unpin_programs:
bcc40fc0 8854 while ((prog = bpf_object__prev_program(obj, prog))) {
e588c116 8855 if (pathname_concat(buf, sizeof(buf), path, prog->name))
0c19a9fb
SF
8856 continue;
8857
8858 bpf_program__unpin(prog, buf);
8859 }
8860
e9fc3ce9 8861 return libbpf_err(err);
0c19a9fb
SF
8862}
8863
8864int bpf_object__unpin_programs(struct bpf_object *obj, const char *path)
8865{
8866 struct bpf_program *prog;
8867 int err;
8868
8869 if (!obj)
e9fc3ce9 8870 return libbpf_err(-ENOENT);
0c19a9fb 8871
d5148d85
JS
8872 bpf_object__for_each_program(prog, obj) {
8873 char buf[PATH_MAX];
d5148d85 8874
e588c116
WY
8875 err = pathname_concat(buf, sizeof(buf), path, prog->name);
8876 if (err)
8877 return libbpf_err(err);
d5148d85 8878
0c19a9fb 8879 err = bpf_program__unpin(prog, buf);
d5148d85 8880 if (err)
e9fc3ce9 8881 return libbpf_err(err);
d5148d85
JS
8882 }
8883
8884 return 0;
8885}
8886
0c19a9fb
SF
8887int bpf_object__pin(struct bpf_object *obj, const char *path)
8888{
8889 int err;
8890
8891 err = bpf_object__pin_maps(obj, path);
8892 if (err)
e9fc3ce9 8893 return libbpf_err(err);
0c19a9fb
SF
8894
8895 err = bpf_object__pin_programs(obj, path);
8896 if (err) {
8897 bpf_object__unpin_maps(obj, path);
e9fc3ce9 8898 return libbpf_err(err);
0c19a9fb
SF
8899 }
8900
8901 return 0;
8902}
8903
068ca522
DX
8904int bpf_object__unpin(struct bpf_object *obj, const char *path)
8905{
8906 int err;
8907
8908 err = bpf_object__unpin_programs(obj, path);
8909 if (err)
8910 return libbpf_err(err);
8911
8912 err = bpf_object__unpin_maps(obj, path);
8913 if (err)
8914 return libbpf_err(err);
8915
8916 return 0;
8917}
8918
2d39d7c5
AN
8919static void bpf_map__destroy(struct bpf_map *map)
8920{
646f02ff
AN
8921 if (map->inner_map) {
8922 bpf_map__destroy(map->inner_map);
8923 zfree(&map->inner_map);
8924 }
8925
8926 zfree(&map->init_slots);
8927 map->init_slots_sz = 0;
8928
2e7ba4f8
AN
8929 if (map->mmaped && map->mmaped != map->obj->arena_data)
8930 munmap(map->mmaped, bpf_map_mmap_sz(map));
8931 map->mmaped = NULL;
2d39d7c5
AN
8932
8933 if (map->st_ops) {
8934 zfree(&map->st_ops->data);
8935 zfree(&map->st_ops->progs);
8936 zfree(&map->st_ops->kern_func_off);
8937 zfree(&map->st_ops);
8938 }
8939
8940 zfree(&map->name);
aed65917 8941 zfree(&map->real_name);
2d39d7c5
AN
8942 zfree(&map->pin_path);
8943
8944 if (map->fd >= 0)
8945 zclose(map->fd);
8946}
8947
1a5e3fb1
WN
8948void bpf_object__close(struct bpf_object *obj)
8949{
a5b8bd47
WN
8950 size_t i;
8951
50450fc7 8952 if (IS_ERR_OR_NULL(obj))
1a5e3fb1
WN
8953 return;
8954
2e4913e0
AN
8955 usdt_manager_free(obj->usdt_man);
8956 obj->usdt_man = NULL;
8957
67234743 8958 bpf_gen__free(obj->gen_loader);
1a5e3fb1 8959 bpf_object__elf_finish(obj);
4a404a7e 8960 bpf_object_unload(obj);
8a138aed 8961 btf__free(obj->btf);
29d67fde 8962 btf__free(obj->btf_vmlinux);
2993e051 8963 btf_ext__free(obj->btf_ext);
1a5e3fb1 8964
2d39d7c5
AN
8965 for (i = 0; i < obj->nr_maps; i++)
8966 bpf_map__destroy(&obj->maps[i]);
d859900c 8967
1373ff59 8968 zfree(&obj->btf_custom_path);
8601fd42 8969 zfree(&obj->kconfig);
5964a223
DM
8970
8971 for (i = 0; i < obj->nr_extern; i++)
8972 zfree(&obj->externs[i].essent_name);
8973
166750bc
AN
8974 zfree(&obj->externs);
8975 obj->nr_extern = 0;
8976
9d759a9b
WN
8977 zfree(&obj->maps);
8978 obj->nr_maps = 0;
a5b8bd47
WN
8979
8980 if (obj->programs && obj->nr_programs) {
8981 for (i = 0; i < obj->nr_programs; i++)
8982 bpf_program__exit(&obj->programs[i]);
8983 }
8984 zfree(&obj->programs);
8985
6b434b61
AN
8986 zfree(&obj->feat_cache);
8987 zfree(&obj->token_path);
8988 if (obj->token_fd > 0)
8989 close(obj->token_fd);
8990
2e7ba4f8
AN
8991 zfree(&obj->arena_data);
8992
1a5e3fb1
WN
8993 free(obj);
8994}
aa9b1ac3 8995
a324aae3 8996const char *bpf_object__name(const struct bpf_object *obj)
acf860ae 8997{
e9fc3ce9 8998 return obj ? obj->name : libbpf_err_ptr(-EINVAL);
acf860ae
WN
8999}
9000
a324aae3 9001unsigned int bpf_object__kversion(const struct bpf_object *obj)
45825d8a 9002{
a7fe0450 9003 return obj ? obj->kern_version : 0;
45825d8a
WN
9004}
9005
a324aae3 9006struct btf *bpf_object__btf(const struct bpf_object *obj)
789f6bab
AI
9007{
9008 return obj ? obj->btf : NULL;
9009}
9010
8a138aed
MKL
9011int bpf_object__btf_fd(const struct bpf_object *obj)
9012{
9013 return obj->btf ? btf__fd(obj->btf) : -1;
9014}
9015
155f556d
RDT
9016int bpf_object__set_kversion(struct bpf_object *obj, __u32 kern_version)
9017{
9018 if (obj->loaded)
e9fc3ce9 9019 return libbpf_err(-EINVAL);
155f556d
RDT
9020
9021 obj->kern_version = kern_version;
9022
9023 return 0;
9024}
9025
67234743
AS
9026int bpf_object__gen_loader(struct bpf_object *obj, struct gen_loader_opts *opts)
9027{
9028 struct bpf_gen *gen;
9029
9030 if (!opts)
9031 return -EFAULT;
9032 if (!OPTS_VALID(opts, gen_loader_opts))
9033 return -EINVAL;
9034 gen = calloc(sizeof(*gen), 1);
9035 if (!gen)
9036 return -ENOMEM;
9037 gen->opts = opts;
9038 obj->gen_loader = gen;
9039 return 0;
9040}
9041
eac7d845 9042static struct bpf_program *
a324aae3
AN
9043__bpf_program__iter(const struct bpf_program *p, const struct bpf_object *obj,
9044 bool forward)
aa9b1ac3 9045{
a83d6e76 9046 size_t nr_programs = obj->nr_programs;
0c19a9fb 9047 ssize_t idx;
aa9b1ac3 9048
a83d6e76 9049 if (!nr_programs)
aa9b1ac3 9050 return NULL;
aa9b1ac3 9051
a83d6e76
MKL
9052 if (!p)
9053 /* Iter from the beginning */
9054 return forward ? &obj->programs[0] :
9055 &obj->programs[nr_programs - 1];
9056
0c19a9fb 9057 if (p->obj != obj) {
be18010e 9058 pr_warn("error: program handler doesn't match object\n");
e9fc3ce9 9059 return errno = EINVAL, NULL;
aa9b1ac3
WN
9060 }
9061
a83d6e76 9062 idx = (p - obj->programs) + (forward ? 1 : -1);
0c19a9fb 9063 if (idx >= obj->nr_programs || idx < 0)
aa9b1ac3
WN
9064 return NULL;
9065 return &obj->programs[idx];
9066}
9067
2088a3a7
HC
9068struct bpf_program *
9069bpf_object__next_program(const struct bpf_object *obj, struct bpf_program *prev)
eac7d845
JK
9070{
9071 struct bpf_program *prog = prev;
9072
9073 do {
a83d6e76 9074 prog = __bpf_program__iter(prog, obj, true);
c3c55696 9075 } while (prog && prog_is_subprog(obj, prog));
0c19a9fb
SF
9076
9077 return prog;
9078}
9079
2088a3a7
HC
9080struct bpf_program *
9081bpf_object__prev_program(const struct bpf_object *obj, struct bpf_program *next)
0c19a9fb
SF
9082{
9083 struct bpf_program *prog = next;
9084
0c19a9fb 9085 do {
a83d6e76 9086 prog = __bpf_program__iter(prog, obj, false);
c3c55696 9087 } while (prog && prog_is_subprog(obj, prog));
eac7d845
JK
9088
9089 return prog;
9090}
9091
9aba3613
JK
9092void bpf_program__set_ifindex(struct bpf_program *prog, __u32 ifindex)
9093{
9094 prog->prog_ifindex = ifindex;
9095}
9096
01af3bf0
AN
9097const char *bpf_program__name(const struct bpf_program *prog)
9098{
9099 return prog->name;
9100}
9101
52109584
AN
9102const char *bpf_program__section_name(const struct bpf_program *prog)
9103{
9104 return prog->sec_name;
9105}
9106
d9297581
AN
9107bool bpf_program__autoload(const struct bpf_program *prog)
9108{
a3820c48 9109 return prog->autoload;
d9297581
AN
9110}
9111
9112int bpf_program__set_autoload(struct bpf_program *prog, bool autoload)
9113{
9114 if (prog->obj->loaded)
e9fc3ce9 9115 return libbpf_err(-EINVAL);
d9297581 9116
a3820c48 9117 prog->autoload = autoload;
d9297581
AN
9118 return 0;
9119}
9120
43cb8cba
HL
9121bool bpf_program__autoattach(const struct bpf_program *prog)
9122{
9123 return prog->autoattach;
9124}
9125
9126void bpf_program__set_autoattach(struct bpf_program *prog, bool autoattach)
9127{
9128 prog->autoattach = autoattach;
9129}
9130
65a7fa2e
AN
9131const struct bpf_insn *bpf_program__insns(const struct bpf_program *prog)
9132{
9133 return prog->insns;
9134}
9135
9136size_t bpf_program__insn_cnt(const struct bpf_program *prog)
9137{
9138 return prog->insns_cnt;
9139}
9140
b63b3c49
JO
9141int bpf_program__set_insns(struct bpf_program *prog,
9142 struct bpf_insn *new_insns, size_t new_insn_cnt)
9143{
9144 struct bpf_insn *insns;
9145
9146 if (prog->obj->loaded)
9147 return -EBUSY;
9148
9149 insns = libbpf_reallocarray(prog->insns, new_insn_cnt, sizeof(*insns));
8a0260db
AN
9150 /* NULL is a valid return from reallocarray if the new count is zero */
9151 if (!insns && new_insn_cnt) {
b63b3c49
JO
9152 pr_warn("prog '%s': failed to realloc prog code\n", prog->name);
9153 return -ENOMEM;
9154 }
9155 memcpy(insns, new_insns, new_insn_cnt * sizeof(*insns));
9156
9157 prog->insns = insns;
9158 prog->insns_cnt = new_insn_cnt;
9159 return 0;
9160}
9161
cf90a20d 9162int bpf_program__fd(const struct bpf_program *prog)
b580563e 9163{
1e960043 9164 if (!prog)
e9fc3ce9 9165 return libbpf_err(-EINVAL);
1e960043 9166
cf90a20d 9167 if (prog->fd < 0)
e9fc3ce9 9168 return libbpf_err(-ENOENT);
b580563e 9169
cf90a20d 9170 return prog->fd;
aa9b1ac3 9171}
9d759a9b 9172
20eccf29
AN
9173__alias(bpf_program__type)
9174enum bpf_prog_type bpf_program__get_type(const struct bpf_program *prog);
9175
9176enum bpf_prog_type bpf_program__type(const struct bpf_program *prog)
f1eead9e
AN
9177{
9178 return prog->type;
9179}
9180
c628747c
AN
9181static size_t custom_sec_def_cnt;
9182static struct bpf_sec_def *custom_sec_defs;
9183static struct bpf_sec_def custom_fallback_def;
9184static bool has_custom_fallback_def;
9185static int last_custom_sec_def_handler_id;
9186
93442f13 9187int bpf_program__set_type(struct bpf_program *prog, enum bpf_prog_type type)
5f44e4c8 9188{
93442f13
GS
9189 if (prog->obj->loaded)
9190 return libbpf_err(-EBUSY);
9191
c628747c
AN
9192 /* if type is not changed, do nothing */
9193 if (prog->type == type)
9194 return 0;
9195
5f44e4c8 9196 prog->type = type;
c628747c
AN
9197
9198 /* If a program type was changed, we need to reset associated SEC()
9199 * handler, as it will be invalid now. The only exception is a generic
9200 * fallback handler, which by definition is program type-agnostic and
9201 * is a catch-all custom handler, optionally set by the application,
9202 * so should be able to handle any type of BPF program.
9203 */
9204 if (prog->sec_def != &custom_fallback_def)
9205 prog->sec_def = NULL;
93442f13 9206 return 0;
5f44e4c8
WN
9207}
9208
20eccf29
AN
9209__alias(bpf_program__expected_attach_type)
9210enum bpf_attach_type bpf_program__get_expected_attach_type(const struct bpf_program *prog);
9211
9212enum bpf_attach_type bpf_program__expected_attach_type(const struct bpf_program *prog)
f1eead9e
AN
9213{
9214 return prog->expected_attach_type;
9215}
9216
93442f13 9217int bpf_program__set_expected_attach_type(struct bpf_program *prog,
16962b24 9218 enum bpf_attach_type type)
d7be143b 9219{
93442f13
GS
9220 if (prog->obj->loaded)
9221 return libbpf_err(-EBUSY);
9222
d7be143b 9223 prog->expected_attach_type = type;
93442f13 9224 return 0;
d7be143b
AI
9225}
9226
a6ca7158
AN
9227__u32 bpf_program__flags(const struct bpf_program *prog)
9228{
9229 return prog->prog_flags;
9230}
9231
8cccee9e 9232int bpf_program__set_flags(struct bpf_program *prog, __u32 flags)
a6ca7158
AN
9233{
9234 if (prog->obj->loaded)
9235 return libbpf_err(-EBUSY);
9236
8cccee9e 9237 prog->prog_flags = flags;
a6ca7158
AN
9238 return 0;
9239}
9240
dbdd2c7f
AN
9241__u32 bpf_program__log_level(const struct bpf_program *prog)
9242{
9243 return prog->log_level;
9244}
9245
9246int bpf_program__set_log_level(struct bpf_program *prog, __u32 log_level)
9247{
9248 if (prog->obj->loaded)
9249 return libbpf_err(-EBUSY);
9250
9251 prog->log_level = log_level;
9252 return 0;
9253}
9254
b3ce9079
AN
9255const char *bpf_program__log_buf(const struct bpf_program *prog, size_t *log_size)
9256{
9257 *log_size = prog->log_size;
9258 return prog->log_buf;
9259}
9260
9261int bpf_program__set_log_buf(struct bpf_program *prog, char *log_buf, size_t log_size)
9262{
9263 if (log_size && !log_buf)
9264 return -EINVAL;
9265 if (prog->log_size > UINT_MAX)
9266 return -EINVAL;
9267 if (prog->obj->loaded)
9268 return -EBUSY;
9269
9270 prog->log_buf = log_buf;
9271 prog->log_size = log_size;
a6ca7158
AN
9272 return 0;
9273}
9274
15ea31fa 9275#define SEC_DEF(sec_pfx, ptype, atype, flags, ...) { \
697f104d 9276 .sec = (char *)sec_pfx, \
d7a18ea7 9277 .prog_type = BPF_PROG_TYPE_##ptype, \
15ea31fa
AN
9278 .expected_attach_type = atype, \
9279 .cookie = (long)(flags), \
4fa5bcfe 9280 .prog_prepare_load_fn = libbpf_prepare_prog_load, \
d7a18ea7
AN
9281 __VA_ARGS__ \
9282}
9283
4fa5bcfe 9284static int attach_kprobe(const struct bpf_program *prog, long cookie, struct bpf_link **link);
39f8dc43 9285static int attach_uprobe(const struct bpf_program *prog, long cookie, struct bpf_link **link);
708ac5be 9286static int attach_ksyscall(const struct bpf_program *prog, long cookie, struct bpf_link **link);
2e4913e0 9287static int attach_usdt(const struct bpf_program *prog, long cookie, struct bpf_link **link);
4fa5bcfe
AN
9288static int attach_tp(const struct bpf_program *prog, long cookie, struct bpf_link **link);
9289static int attach_raw_tp(const struct bpf_program *prog, long cookie, struct bpf_link **link);
9290static int attach_trace(const struct bpf_program *prog, long cookie, struct bpf_link **link);
ddc6b049 9291static int attach_kprobe_multi(const struct bpf_program *prog, long cookie, struct bpf_link **link);
2ca178f0 9292static int attach_kprobe_session(const struct bpf_program *prog, long cookie, struct bpf_link **link);
5bfdd32d 9293static int attach_uprobe_multi(const struct bpf_program *prog, long cookie, struct bpf_link **link);
4fa5bcfe
AN
9294static int attach_lsm(const struct bpf_program *prog, long cookie, struct bpf_link **link);
9295static int attach_iter(const struct bpf_program *prog, long cookie, struct bpf_link **link);
d7a18ea7 9296
d7a18ea7 9297static const struct bpf_sec_def section_defs[] = {
450b167f
AN
9298 SEC_DEF("socket", SOCKET_FILTER, 0, SEC_NONE),
9299 SEC_DEF("sk_reuseport/migrate", SK_REUSEPORT, BPF_SK_REUSEPORT_SELECT_OR_MIGRATE, SEC_ATTACHABLE),
9300 SEC_DEF("sk_reuseport", SK_REUSEPORT, BPF_SK_REUSEPORT_SELECT, SEC_ATTACHABLE),
9af8efc4 9301 SEC_DEF("kprobe+", KPROBE, 0, SEC_NONE, attach_kprobe),
39f8dc43 9302 SEC_DEF("uprobe+", KPROBE, 0, SEC_NONE, attach_uprobe),
c4cac71f 9303 SEC_DEF("uprobe.s+", KPROBE, 0, SEC_SLEEPABLE, attach_uprobe),
9af8efc4 9304 SEC_DEF("kretprobe+", KPROBE, 0, SEC_NONE, attach_kprobe),
39f8dc43 9305 SEC_DEF("uretprobe+", KPROBE, 0, SEC_NONE, attach_uprobe),
c4cac71f 9306 SEC_DEF("uretprobe.s+", KPROBE, 0, SEC_SLEEPABLE, attach_uprobe),
9af8efc4
AN
9307 SEC_DEF("kprobe.multi+", KPROBE, BPF_TRACE_KPROBE_MULTI, SEC_NONE, attach_kprobe_multi),
9308 SEC_DEF("kretprobe.multi+", KPROBE, BPF_TRACE_KPROBE_MULTI, SEC_NONE, attach_kprobe_multi),
2ca178f0 9309 SEC_DEF("kprobe.session+", KPROBE, BPF_TRACE_KPROBE_SESSION, SEC_NONE, attach_kprobe_session),
5bfdd32d
JO
9310 SEC_DEF("uprobe.multi+", KPROBE, BPF_TRACE_UPROBE_MULTI, SEC_NONE, attach_uprobe_multi),
9311 SEC_DEF("uretprobe.multi+", KPROBE, BPF_TRACE_UPROBE_MULTI, SEC_NONE, attach_uprobe_multi),
9312 SEC_DEF("uprobe.multi.s+", KPROBE, BPF_TRACE_UPROBE_MULTI, SEC_SLEEPABLE, attach_uprobe_multi),
9313 SEC_DEF("uretprobe.multi.s+", KPROBE, BPF_TRACE_UPROBE_MULTI, SEC_SLEEPABLE, attach_uprobe_multi),
708ac5be
AN
9314 SEC_DEF("ksyscall+", KPROBE, 0, SEC_NONE, attach_ksyscall),
9315 SEC_DEF("kretsyscall+", KPROBE, 0, SEC_NONE, attach_ksyscall),
5902da6d
JO
9316 SEC_DEF("usdt+", KPROBE, 0, SEC_USDT, attach_usdt),
9317 SEC_DEF("usdt.s+", KPROBE, 0, SEC_USDT | SEC_SLEEPABLE, attach_usdt),
fe20ce3a
DB
9318 SEC_DEF("tc/ingress", SCHED_CLS, BPF_TCX_INGRESS, SEC_NONE), /* alias for tcx */
9319 SEC_DEF("tc/egress", SCHED_CLS, BPF_TCX_EGRESS, SEC_NONE), /* alias for tcx */
9320 SEC_DEF("tcx/ingress", SCHED_CLS, BPF_TCX_INGRESS, SEC_NONE),
9321 SEC_DEF("tcx/egress", SCHED_CLS, BPF_TCX_EGRESS, SEC_NONE),
9322 SEC_DEF("tc", SCHED_CLS, 0, SEC_NONE), /* deprecated / legacy, use tcx */
9323 SEC_DEF("classifier", SCHED_CLS, 0, SEC_NONE), /* deprecated / legacy, use tcx */
9324 SEC_DEF("action", SCHED_ACT, 0, SEC_NONE), /* deprecated / legacy, use tcx */
05c31b4a
DB
9325 SEC_DEF("netkit/primary", SCHED_CLS, BPF_NETKIT_PRIMARY, SEC_NONE),
9326 SEC_DEF("netkit/peer", SCHED_CLS, BPF_NETKIT_PEER, SEC_NONE),
9af8efc4
AN
9327 SEC_DEF("tracepoint+", TRACEPOINT, 0, SEC_NONE, attach_tp),
9328 SEC_DEF("tp+", TRACEPOINT, 0, SEC_NONE, attach_tp),
9329 SEC_DEF("raw_tracepoint+", RAW_TRACEPOINT, 0, SEC_NONE, attach_raw_tp),
9330 SEC_DEF("raw_tp+", RAW_TRACEPOINT, 0, SEC_NONE, attach_raw_tp),
9331 SEC_DEF("raw_tracepoint.w+", RAW_TRACEPOINT_WRITABLE, 0, SEC_NONE, attach_raw_tp),
9332 SEC_DEF("raw_tp.w+", RAW_TRACEPOINT_WRITABLE, 0, SEC_NONE, attach_raw_tp),
cc7d8f2c
AN
9333 SEC_DEF("tp_btf+", TRACING, BPF_TRACE_RAW_TP, SEC_ATTACH_BTF, attach_trace),
9334 SEC_DEF("fentry+", TRACING, BPF_TRACE_FENTRY, SEC_ATTACH_BTF, attach_trace),
9335 SEC_DEF("fmod_ret+", TRACING, BPF_MODIFY_RETURN, SEC_ATTACH_BTF, attach_trace),
9336 SEC_DEF("fexit+", TRACING, BPF_TRACE_FEXIT, SEC_ATTACH_BTF, attach_trace),
9337 SEC_DEF("fentry.s+", TRACING, BPF_TRACE_FENTRY, SEC_ATTACH_BTF | SEC_SLEEPABLE, attach_trace),
9338 SEC_DEF("fmod_ret.s+", TRACING, BPF_MODIFY_RETURN, SEC_ATTACH_BTF | SEC_SLEEPABLE, attach_trace),
9339 SEC_DEF("fexit.s+", TRACING, BPF_TRACE_FEXIT, SEC_ATTACH_BTF | SEC_SLEEPABLE, attach_trace),
9340 SEC_DEF("freplace+", EXT, 0, SEC_ATTACH_BTF, attach_trace),
9341 SEC_DEF("lsm+", LSM, BPF_LSM_MAC, SEC_ATTACH_BTF, attach_lsm),
9342 SEC_DEF("lsm.s+", LSM, BPF_LSM_MAC, SEC_ATTACH_BTF | SEC_SLEEPABLE, attach_lsm),
bffcf348 9343 SEC_DEF("lsm_cgroup+", LSM, BPF_LSM_CGROUP, SEC_ATTACH_BTF),
cc7d8f2c
AN
9344 SEC_DEF("iter+", TRACING, BPF_TRACE_ITER, SEC_ATTACH_BTF, attach_iter),
9345 SEC_DEF("iter.s+", TRACING, BPF_TRACE_ITER, SEC_ATTACH_BTF | SEC_SLEEPABLE, attach_iter),
15ea31fa 9346 SEC_DEF("syscall", SYSCALL, 0, SEC_SLEEPABLE),
082c4bfb 9347 SEC_DEF("xdp.frags/devmap", XDP, BPF_XDP_DEVMAP, SEC_XDP_FRAGS),
4a4d4cee 9348 SEC_DEF("xdp/devmap", XDP, BPF_XDP_DEVMAP, SEC_ATTACHABLE),
082c4bfb 9349 SEC_DEF("xdp.frags/cpumap", XDP, BPF_XDP_CPUMAP, SEC_XDP_FRAGS),
4a4d4cee 9350 SEC_DEF("xdp/cpumap", XDP, BPF_XDP_CPUMAP, SEC_ATTACHABLE),
082c4bfb 9351 SEC_DEF("xdp.frags", XDP, BPF_XDP, SEC_XDP_FRAGS),
450b167f
AN
9352 SEC_DEF("xdp", XDP, BPF_XDP, SEC_ATTACHABLE_OPT),
9353 SEC_DEF("perf_event", PERF_EVENT, 0, SEC_NONE),
9354 SEC_DEF("lwt_in", LWT_IN, 0, SEC_NONE),
9355 SEC_DEF("lwt_out", LWT_OUT, 0, SEC_NONE),
9356 SEC_DEF("lwt_xmit", LWT_XMIT, 0, SEC_NONE),
9357 SEC_DEF("lwt_seg6local", LWT_SEG6LOCAL, 0, SEC_NONE),
9358 SEC_DEF("sockops", SOCK_OPS, BPF_CGROUP_SOCK_OPS, SEC_ATTACHABLE_OPT),
9359 SEC_DEF("sk_skb/stream_parser", SK_SKB, BPF_SK_SKB_STREAM_PARSER, SEC_ATTACHABLE_OPT),
9360 SEC_DEF("sk_skb/stream_verdict",SK_SKB, BPF_SK_SKB_STREAM_VERDICT, SEC_ATTACHABLE_OPT),
61df5756 9361 SEC_DEF("sk_skb/verdict", SK_SKB, BPF_SK_SKB_VERDICT, SEC_ATTACHABLE_OPT),
450b167f
AN
9362 SEC_DEF("sk_skb", SK_SKB, 0, SEC_NONE),
9363 SEC_DEF("sk_msg", SK_MSG, BPF_SK_MSG_VERDICT, SEC_ATTACHABLE_OPT),
9364 SEC_DEF("lirc_mode2", LIRC_MODE2, BPF_LIRC_MODE2, SEC_ATTACHABLE_OPT),
9365 SEC_DEF("flow_dissector", FLOW_DISSECTOR, BPF_FLOW_DISSECTOR, SEC_ATTACHABLE_OPT),
9366 SEC_DEF("cgroup_skb/ingress", CGROUP_SKB, BPF_CGROUP_INET_INGRESS, SEC_ATTACHABLE_OPT),
9367 SEC_DEF("cgroup_skb/egress", CGROUP_SKB, BPF_CGROUP_INET_EGRESS, SEC_ATTACHABLE_OPT),
9368 SEC_DEF("cgroup/skb", CGROUP_SKB, 0, SEC_NONE),
9369 SEC_DEF("cgroup/sock_create", CGROUP_SOCK, BPF_CGROUP_INET_SOCK_CREATE, SEC_ATTACHABLE),
9370 SEC_DEF("cgroup/sock_release", CGROUP_SOCK, BPF_CGROUP_INET_SOCK_RELEASE, SEC_ATTACHABLE),
9371 SEC_DEF("cgroup/sock", CGROUP_SOCK, BPF_CGROUP_INET_SOCK_CREATE, SEC_ATTACHABLE_OPT),
9372 SEC_DEF("cgroup/post_bind4", CGROUP_SOCK, BPF_CGROUP_INET4_POST_BIND, SEC_ATTACHABLE),
9373 SEC_DEF("cgroup/post_bind6", CGROUP_SOCK, BPF_CGROUP_INET6_POST_BIND, SEC_ATTACHABLE),
9374 SEC_DEF("cgroup/bind4", CGROUP_SOCK_ADDR, BPF_CGROUP_INET4_BIND, SEC_ATTACHABLE),
9375 SEC_DEF("cgroup/bind6", CGROUP_SOCK_ADDR, BPF_CGROUP_INET6_BIND, SEC_ATTACHABLE),
9376 SEC_DEF("cgroup/connect4", CGROUP_SOCK_ADDR, BPF_CGROUP_INET4_CONNECT, SEC_ATTACHABLE),
9377 SEC_DEF("cgroup/connect6", CGROUP_SOCK_ADDR, BPF_CGROUP_INET6_CONNECT, SEC_ATTACHABLE),
bf90438c 9378 SEC_DEF("cgroup/connect_unix", CGROUP_SOCK_ADDR, BPF_CGROUP_UNIX_CONNECT, SEC_ATTACHABLE),
450b167f
AN
9379 SEC_DEF("cgroup/sendmsg4", CGROUP_SOCK_ADDR, BPF_CGROUP_UDP4_SENDMSG, SEC_ATTACHABLE),
9380 SEC_DEF("cgroup/sendmsg6", CGROUP_SOCK_ADDR, BPF_CGROUP_UDP6_SENDMSG, SEC_ATTACHABLE),
bf90438c 9381 SEC_DEF("cgroup/sendmsg_unix", CGROUP_SOCK_ADDR, BPF_CGROUP_UNIX_SENDMSG, SEC_ATTACHABLE),
450b167f
AN
9382 SEC_DEF("cgroup/recvmsg4", CGROUP_SOCK_ADDR, BPF_CGROUP_UDP4_RECVMSG, SEC_ATTACHABLE),
9383 SEC_DEF("cgroup/recvmsg6", CGROUP_SOCK_ADDR, BPF_CGROUP_UDP6_RECVMSG, SEC_ATTACHABLE),
bf90438c 9384 SEC_DEF("cgroup/recvmsg_unix", CGROUP_SOCK_ADDR, BPF_CGROUP_UNIX_RECVMSG, SEC_ATTACHABLE),
450b167f
AN
9385 SEC_DEF("cgroup/getpeername4", CGROUP_SOCK_ADDR, BPF_CGROUP_INET4_GETPEERNAME, SEC_ATTACHABLE),
9386 SEC_DEF("cgroup/getpeername6", CGROUP_SOCK_ADDR, BPF_CGROUP_INET6_GETPEERNAME, SEC_ATTACHABLE),
bf90438c 9387 SEC_DEF("cgroup/getpeername_unix", CGROUP_SOCK_ADDR, BPF_CGROUP_UNIX_GETPEERNAME, SEC_ATTACHABLE),
450b167f
AN
9388 SEC_DEF("cgroup/getsockname4", CGROUP_SOCK_ADDR, BPF_CGROUP_INET4_GETSOCKNAME, SEC_ATTACHABLE),
9389 SEC_DEF("cgroup/getsockname6", CGROUP_SOCK_ADDR, BPF_CGROUP_INET6_GETSOCKNAME, SEC_ATTACHABLE),
bf90438c 9390 SEC_DEF("cgroup/getsockname_unix", CGROUP_SOCK_ADDR, BPF_CGROUP_UNIX_GETSOCKNAME, SEC_ATTACHABLE),
450b167f
AN
9391 SEC_DEF("cgroup/sysctl", CGROUP_SYSCTL, BPF_CGROUP_SYSCTL, SEC_ATTACHABLE),
9392 SEC_DEF("cgroup/getsockopt", CGROUP_SOCKOPT, BPF_CGROUP_GETSOCKOPT, SEC_ATTACHABLE),
9393 SEC_DEF("cgroup/setsockopt", CGROUP_SOCKOPT, BPF_CGROUP_SETSOCKOPT, SEC_ATTACHABLE),
9394 SEC_DEF("cgroup/dev", CGROUP_DEVICE, BPF_CGROUP_DEVICE, SEC_ATTACHABLE_OPT),
dd94d45c 9395 SEC_DEF("struct_ops+", STRUCT_OPS, 0, SEC_NONE),
913b2255 9396 SEC_DEF("struct_ops.s+", STRUCT_OPS, 0, SEC_SLEEPABLE),
450b167f 9397 SEC_DEF("sk_lookup", SK_LOOKUP, BPF_SK_LOOKUP, SEC_ATTACHABLE),
132328e8 9398 SEC_DEF("netfilter", NETFILTER, BPF_NETFILTER, SEC_NONE),
583c9009 9399};
d7be143b 9400
697f104d
AN
9401int libbpf_register_prog_handler(const char *sec,
9402 enum bpf_prog_type prog_type,
9403 enum bpf_attach_type exp_attach_type,
9404 const struct libbpf_prog_handler_opts *opts)
d7a18ea7 9405{
697f104d 9406 struct bpf_sec_def *sec_def;
d7a18ea7 9407
697f104d
AN
9408 if (!OPTS_VALID(opts, libbpf_prog_handler_opts))
9409 return libbpf_err(-EINVAL);
dd94d45c 9410
697f104d
AN
9411 if (last_custom_sec_def_handler_id == INT_MAX) /* prevent overflow */
9412 return libbpf_err(-E2BIG);
dd94d45c 9413
697f104d
AN
9414 if (sec) {
9415 sec_def = libbpf_reallocarray(custom_sec_defs, custom_sec_def_cnt + 1,
9416 sizeof(*sec_def));
9417 if (!sec_def)
9418 return libbpf_err(-ENOMEM);
dd94d45c 9419
697f104d
AN
9420 custom_sec_defs = sec_def;
9421 sec_def = &custom_sec_defs[custom_sec_def_cnt];
9422 } else {
9423 if (has_custom_fallback_def)
9424 return libbpf_err(-EBUSY);
dd94d45c 9425
697f104d
AN
9426 sec_def = &custom_fallback_def;
9427 }
9428
9429 sec_def->sec = sec ? strdup(sec) : NULL;
9430 if (sec && !sec_def->sec)
9431 return libbpf_err(-ENOMEM);
9432
9433 sec_def->prog_type = prog_type;
9434 sec_def->expected_attach_type = exp_attach_type;
9435 sec_def->cookie = OPTS_GET(opts, cookie, 0);
9436
9437 sec_def->prog_setup_fn = OPTS_GET(opts, prog_setup_fn, NULL);
9438 sec_def->prog_prepare_load_fn = OPTS_GET(opts, prog_prepare_load_fn, NULL);
9439 sec_def->prog_attach_fn = OPTS_GET(opts, prog_attach_fn, NULL);
9440
9441 sec_def->handler_id = ++last_custom_sec_def_handler_id;
9442
9443 if (sec)
9444 custom_sec_def_cnt++;
9445 else
9446 has_custom_fallback_def = true;
9447
9448 return sec_def->handler_id;
9449}
9450
9451int libbpf_unregister_prog_handler(int handler_id)
9452{
9453 struct bpf_sec_def *sec_defs;
9454 int i;
9455
9456 if (handler_id <= 0)
9457 return libbpf_err(-EINVAL);
9458
9459 if (has_custom_fallback_def && custom_fallback_def.handler_id == handler_id) {
9460 memset(&custom_fallback_def, 0, sizeof(custom_fallback_def));
9461 has_custom_fallback_def = false;
9462 return 0;
9463 }
9464
9465 for (i = 0; i < custom_sec_def_cnt; i++) {
9466 if (custom_sec_defs[i].handler_id == handler_id)
9467 break;
9468 }
9469
9470 if (i == custom_sec_def_cnt)
9471 return libbpf_err(-ENOENT);
9472
9473 free(custom_sec_defs[i].sec);
9474 for (i = i + 1; i < custom_sec_def_cnt; i++)
9475 custom_sec_defs[i - 1] = custom_sec_defs[i];
9476 custom_sec_def_cnt--;
9477
9478 /* try to shrink the array, but it's ok if we couldn't */
9479 sec_defs = libbpf_reallocarray(custom_sec_defs, custom_sec_def_cnt, sizeof(*sec_defs));
8a0260db
AN
9480 /* if new count is zero, reallocarray can return a valid NULL result;
9481 * in this case the previous pointer will be freed, so we *have to*
9482 * reassign old pointer to the new value (even if it's NULL)
9483 */
9484 if (sec_defs || custom_sec_def_cnt == 0)
697f104d
AN
9485 custom_sec_defs = sec_defs;
9486
9487 return 0;
9488}
9489
450b167f 9490static bool sec_def_matches(const struct bpf_sec_def *sec_def, const char *sec_name)
697f104d
AN
9491{
9492 size_t len = strlen(sec_def->sec);
9493
9494 /* "type/" always has to have proper SEC("type/extras") form */
9495 if (sec_def->sec[len - 1] == '/') {
9496 if (str_has_pfx(sec_name, sec_def->sec))
9497 return true;
9498 return false;
9499 }
9500
9501 /* "type+" means it can be either exact SEC("type") or
9502 * well-formed SEC("type/extras") with proper '/' separator
9503 */
9504 if (sec_def->sec[len - 1] == '+') {
9505 len--;
9506 /* not even a prefix */
9507 if (strncmp(sec_name, sec_def->sec, len) != 0)
9508 return false;
9509 /* exact match or has '/' separator */
9510 if (sec_name[len] == '\0' || sec_name[len] == '/')
9511 return true;
9512 return false;
9513 }
9514
697f104d
AN
9515 return strcmp(sec_name, sec_def->sec) == 0;
9516}
9517
9518static const struct bpf_sec_def *find_sec_def(const char *sec_name)
9519{
9520 const struct bpf_sec_def *sec_def;
9521 int i, n;
697f104d
AN
9522
9523 n = custom_sec_def_cnt;
9524 for (i = 0; i < n; i++) {
9525 sec_def = &custom_sec_defs[i];
450b167f 9526 if (sec_def_matches(sec_def, sec_name))
697f104d
AN
9527 return sec_def;
9528 }
9529
9530 n = ARRAY_SIZE(section_defs);
9531 for (i = 0; i < n; i++) {
9532 sec_def = &section_defs[i];
450b167f 9533 if (sec_def_matches(sec_def, sec_name))
dd94d45c 9534 return sec_def;
d7a18ea7 9535 }
697f104d
AN
9536
9537 if (has_custom_fallback_def)
9538 return &custom_fallback_def;
9539
d7a18ea7
AN
9540 return NULL;
9541}
9542
697f104d
AN
9543#define MAX_TYPE_NAME_SIZE 32
9544
c76e4c22
TS
9545static char *libbpf_get_type_names(bool attach_type)
9546{
d7a18ea7 9547 int i, len = ARRAY_SIZE(section_defs) * MAX_TYPE_NAME_SIZE;
c76e4c22
TS
9548 char *buf;
9549
9550 buf = malloc(len);
9551 if (!buf)
9552 return NULL;
9553
9554 buf[0] = '\0';
9555 /* Forge string buf with all available names */
d7a18ea7 9556 for (i = 0; i < ARRAY_SIZE(section_defs); i++) {
15ea31fa
AN
9557 const struct bpf_sec_def *sec_def = &section_defs[i];
9558
9559 if (attach_type) {
4fa5bcfe 9560 if (sec_def->prog_prepare_load_fn != libbpf_prepare_prog_load)
15ea31fa
AN
9561 continue;
9562
9563 if (!(sec_def->cookie & SEC_ATTACHABLE))
9564 continue;
9565 }
c76e4c22 9566
d7a18ea7 9567 if (strlen(buf) + strlen(section_defs[i].sec) + 2 > len) {
c76e4c22
TS
9568 free(buf);
9569 return NULL;
9570 }
9571 strcat(buf, " ");
d7a18ea7 9572 strcat(buf, section_defs[i].sec);
c76e4c22
TS
9573 }
9574
9575 return buf;
9576}
9577
b60df2a0
JK
9578int libbpf_prog_type_by_name(const char *name, enum bpf_prog_type *prog_type,
9579 enum bpf_attach_type *expected_attach_type)
583c9009 9580{
d7a18ea7 9581 const struct bpf_sec_def *sec_def;
c76e4c22 9582 char *type_names;
583c9009 9583
b60df2a0 9584 if (!name)
e9fc3ce9 9585 return libbpf_err(-EINVAL);
583c9009 9586
d7a18ea7
AN
9587 sec_def = find_sec_def(name);
9588 if (sec_def) {
9589 *prog_type = sec_def->prog_type;
9590 *expected_attach_type = sec_def->expected_attach_type;
b60df2a0
JK
9591 return 0;
9592 }
d7a18ea7 9593
4a3d6c6a 9594 pr_debug("failed to guess program type from ELF section '%s'\n", name);
c76e4c22
TS
9595 type_names = libbpf_get_type_names(false);
9596 if (type_names != NULL) {
3f519353 9597 pr_debug("supported section(type) names are:%s\n", type_names);
c76e4c22
TS
9598 free(type_names);
9599 }
9600
e9fc3ce9 9601 return libbpf_err(-ESRCH);
b60df2a0 9602}
583c9009 9603
ccde5760
DM
9604const char *libbpf_bpf_attach_type_str(enum bpf_attach_type t)
9605{
9606 if (t < 0 || t >= ARRAY_SIZE(attach_type_name))
9607 return NULL;
9608
9609 return attach_type_name[t];
9610}
9611
ba5d1b58
DM
9612const char *libbpf_bpf_link_type_str(enum bpf_link_type t)
9613{
9614 if (t < 0 || t >= ARRAY_SIZE(link_type_name))
9615 return NULL;
9616
9617 return link_type_name[t];
9618}
9619
3e6dc020
DM
9620const char *libbpf_bpf_map_type_str(enum bpf_map_type t)
9621{
9622 if (t < 0 || t >= ARRAY_SIZE(map_type_name))
9623 return NULL;
9624
9625 return map_type_name[t];
9626}
9627
d18616e7
DM
9628const char *libbpf_bpf_prog_type_str(enum bpf_prog_type t)
9629{
9630 if (t < 0 || t >= ARRAY_SIZE(prog_type_name))
9631 return NULL;
9632
9633 return prog_type_name[t];
9634}
9635
590a0088 9636static struct bpf_map *find_struct_ops_map_by_offset(struct bpf_object *obj,
809a69d6 9637 int sec_idx,
590a0088
MKL
9638 size_t offset)
9639{
9640 struct bpf_map *map;
9641 size_t i;
9642
9643 for (i = 0; i < obj->nr_maps; i++) {
9644 map = &obj->maps[i];
9645 if (!bpf_map__is_struct_ops(map))
9646 continue;
809a69d6
KFL
9647 if (map->sec_idx == sec_idx &&
9648 map->sec_offset <= offset &&
590a0088
MKL
9649 offset - map->sec_offset < map->def.value_size)
9650 return map;
9651 }
9652
9653 return NULL;
9654}
9655
69e4a9d2
KFL
9656/* Collect the reloc from ELF, populate the st_ops->progs[], and update
9657 * st_ops->data for shadow type.
9658 */
646f02ff 9659static int bpf_object__collect_st_ops_relos(struct bpf_object *obj,
ad23b723 9660 Elf64_Shdr *shdr, Elf_Data *data)
590a0088
MKL
9661{
9662 const struct btf_member *member;
9663 struct bpf_struct_ops *st_ops;
9664 struct bpf_program *prog;
9665 unsigned int shdr_idx;
9666 const struct btf *btf;
9667 struct bpf_map *map;
7e06aad5 9668 unsigned int moff, insn_idx;
590a0088 9669 const char *name;
1d1a3bcf 9670 __u32 member_idx;
ad23b723
AN
9671 Elf64_Sym *sym;
9672 Elf64_Rel *rel;
590a0088
MKL
9673 int i, nrels;
9674
590a0088
MKL
9675 btf = obj->btf;
9676 nrels = shdr->sh_size / shdr->sh_entsize;
9677 for (i = 0; i < nrels; i++) {
ad23b723
AN
9678 rel = elf_rel_by_idx(data, i);
9679 if (!rel) {
590a0088
MKL
9680 pr_warn("struct_ops reloc: failed to get %d reloc\n", i);
9681 return -LIBBPF_ERRNO__FORMAT;
9682 }
9683
ad23b723
AN
9684 sym = elf_sym_by_idx(obj, ELF64_R_SYM(rel->r_info));
9685 if (!sym) {
590a0088 9686 pr_warn("struct_ops reloc: symbol %zx not found\n",
ad23b723 9687 (size_t)ELF64_R_SYM(rel->r_info));
590a0088
MKL
9688 return -LIBBPF_ERRNO__FORMAT;
9689 }
9690
ad23b723 9691 name = elf_sym_str(obj, sym->st_name) ?: "<?>";
809a69d6 9692 map = find_struct_ops_map_by_offset(obj, shdr->sh_info, rel->r_offset);
590a0088 9693 if (!map) {
ad23b723
AN
9694 pr_warn("struct_ops reloc: cannot find map at rel->r_offset %zu\n",
9695 (size_t)rel->r_offset);
590a0088
MKL
9696 return -EINVAL;
9697 }
9698
ad23b723
AN
9699 moff = rel->r_offset - map->sec_offset;
9700 shdr_idx = sym->st_shndx;
590a0088 9701 st_ops = map->st_ops;
ad23b723 9702 pr_debug("struct_ops reloc %s: for %lld value %lld shdr_idx %u rel->r_offset %zu map->sec_offset %zu name %d (\'%s\')\n",
590a0088 9703 map->name,
ad23b723
AN
9704 (long long)(rel->r_info >> 32),
9705 (long long)sym->st_value,
9706 shdr_idx, (size_t)rel->r_offset,
9707 map->sec_offset, sym->st_name, name);
590a0088
MKL
9708
9709 if (shdr_idx >= SHN_LORESERVE) {
ad23b723
AN
9710 pr_warn("struct_ops reloc %s: rel->r_offset %zu shdr_idx %u unsupported non-static function\n",
9711 map->name, (size_t)rel->r_offset, shdr_idx);
590a0088
MKL
9712 return -LIBBPF_ERRNO__RELOC;
9713 }
ad23b723 9714 if (sym->st_value % BPF_INSN_SZ) {
7e06aad5 9715 pr_warn("struct_ops reloc %s: invalid target program offset %llu\n",
ad23b723 9716 map->name, (unsigned long long)sym->st_value);
7e06aad5
AN
9717 return -LIBBPF_ERRNO__FORMAT;
9718 }
ad23b723 9719 insn_idx = sym->st_value / BPF_INSN_SZ;
590a0088
MKL
9720
9721 member = find_member_by_offset(st_ops->type, moff * 8);
9722 if (!member) {
9723 pr_warn("struct_ops reloc %s: cannot find member at moff %u\n",
9724 map->name, moff);
9725 return -EINVAL;
9726 }
9727 member_idx = member - btf_members(st_ops->type);
9728 name = btf__name_by_offset(btf, member->name_off);
9729
9730 if (!resolve_func_ptr(btf, member->type, NULL)) {
9731 pr_warn("struct_ops reloc %s: cannot relocate non func ptr %s\n",
9732 map->name, name);
9733 return -EINVAL;
9734 }
9735
7e06aad5 9736 prog = find_prog_by_sec_insn(obj, shdr_idx, insn_idx);
590a0088
MKL
9737 if (!prog) {
9738 pr_warn("struct_ops reloc %s: cannot find prog at shdr_idx %u to relocate func ptr %s\n",
9739 map->name, shdr_idx, name);
9740 return -EINVAL;
9741 }
9742
91b4d1d1
AN
9743 /* prevent the use of BPF prog with invalid type */
9744 if (prog->type != BPF_PROG_TYPE_STRUCT_OPS) {
9745 pr_warn("struct_ops reloc %s: prog %s is not struct_ops BPF program\n",
9746 map->name, prog->name);
9747 return -EINVAL;
9748 }
590a0088 9749
590a0088 9750 st_ops->progs[member_idx] = prog;
69e4a9d2
KFL
9751
9752 /* st_ops->data will be exposed to users, being returned by
9753 * bpf_map__initial_value() as a pointer to the shadow
9754 * type. All function pointers in the original struct type
9755 * should be converted to a pointer to struct bpf_program
9756 * in the shadow type.
9757 */
9758 *((struct bpf_program **)(st_ops->data + moff)) = prog;
590a0088
MKL
9759 }
9760
9761 return 0;
590a0088
MKL
9762}
9763
a6ed02ca 9764#define BTF_TRACE_PREFIX "btf_trace_"
1e092a03 9765#define BTF_LSM_PREFIX "bpf_lsm_"
21aef70e 9766#define BTF_ITER_PREFIX "bpf_iter_"
a6ed02ca
KS
9767#define BTF_MAX_NAME_SIZE 128
9768
67234743
AS
9769void btf_get_kernel_prefix_kind(enum bpf_attach_type attach_type,
9770 const char **prefix, int *kind)
9771{
9772 switch (attach_type) {
9773 case BPF_TRACE_RAW_TP:
9774 *prefix = BTF_TRACE_PREFIX;
9775 *kind = BTF_KIND_TYPEDEF;
9776 break;
9777 case BPF_LSM_MAC:
bffcf348 9778 case BPF_LSM_CGROUP:
67234743
AS
9779 *prefix = BTF_LSM_PREFIX;
9780 *kind = BTF_KIND_FUNC;
9781 break;
9782 case BPF_TRACE_ITER:
9783 *prefix = BTF_ITER_PREFIX;
9784 *kind = BTF_KIND_FUNC;
9785 break;
9786 default:
9787 *prefix = "";
9788 *kind = BTF_KIND_FUNC;
9789 }
9790}
9791
a6ed02ca
KS
9792static int find_btf_by_prefix_kind(const struct btf *btf, const char *prefix,
9793 const char *name, __u32 kind)
9794{
9795 char btf_type_name[BTF_MAX_NAME_SIZE];
9796 int ret;
9797
9798 ret = snprintf(btf_type_name, sizeof(btf_type_name),
9799 "%s%s", prefix, name);
9800 /* snprintf returns the number of characters written excluding the
c139e40a 9801 * terminating null. So, if >= BTF_MAX_NAME_SIZE are written, it
a6ed02ca
KS
9802 * indicates truncation.
9803 */
9804 if (ret < 0 || ret >= sizeof(btf_type_name))
9805 return -ENAMETOOLONG;
9806 return btf__find_by_name_kind(btf, btf_type_name, kind);
9807}
9808
91abb4a6
AN
9809static inline int find_attach_btf_id(struct btf *btf, const char *name,
9810 enum bpf_attach_type attach_type)
a6ed02ca 9811{
67234743
AS
9812 const char *prefix;
9813 int kind;
a6ed02ca 9814
67234743
AS
9815 btf_get_kernel_prefix_kind(attach_type, &prefix, &kind);
9816 return find_btf_by_prefix_kind(btf, prefix, name, kind);
a6ed02ca
KS
9817}
9818
b8c54ea4
AS
9819int libbpf_find_vmlinux_btf_id(const char *name,
9820 enum bpf_attach_type attach_type)
12a8654b 9821{
a6ed02ca 9822 struct btf *btf;
3521ffa2 9823 int err;
12a8654b 9824
a710eed3 9825 btf = btf__load_vmlinux_btf();
e9fc3ce9
AN
9826 err = libbpf_get_error(btf);
9827 if (err) {
12a8654b 9828 pr_warn("vmlinux BTF is not found\n");
e9fc3ce9 9829 return libbpf_err(err);
12a8654b
AS
9830 }
9831
91abb4a6
AN
9832 err = find_attach_btf_id(btf, name, attach_type);
9833 if (err <= 0)
9834 pr_warn("%s is not found in vmlinux BTF\n", name);
9835
3521ffa2 9836 btf__free(btf);
e9fc3ce9 9837 return libbpf_err(err);
b8c54ea4
AS
9838}
9839
e7bf94db
AS
9840static int libbpf_find_prog_btf_id(const char *name, __u32 attach_prog_fd)
9841{
813847a3 9842 struct bpf_prog_info info;
ebc7b50a 9843 __u32 info_len = sizeof(info);
6cc93e2f 9844 struct btf *btf;
6d2d73cd 9845 int err;
e7bf94db 9846
813847a3 9847 memset(&info, 0, info_len);
629dfc66 9848 err = bpf_prog_get_info_by_fd(attach_prog_fd, &info, &info_len);
e9fc3ce9 9849 if (err) {
629dfc66 9850 pr_warn("failed bpf_prog_get_info_by_fd for FD %d: %d\n",
ebc7b50a 9851 attach_prog_fd, err);
e9fc3ce9 9852 return err;
e7bf94db 9853 }
6d2d73cd
QM
9854
9855 err = -EINVAL;
ebc7b50a 9856 if (!info.btf_id) {
e7bf94db
AS
9857 pr_warn("The target program doesn't have BTF\n");
9858 goto out;
9859 }
ebc7b50a
DM
9860 btf = btf__load_from_kernel_by_id(info.btf_id);
9861 err = libbpf_get_error(btf);
9862 if (err) {
9863 pr_warn("Failed to get BTF %d of the program: %d\n", info.btf_id, err);
e7bf94db
AS
9864 goto out;
9865 }
9866 err = btf__find_by_name_kind(btf, name, BTF_KIND_FUNC);
9867 btf__free(btf);
9868 if (err <= 0) {
9869 pr_warn("%s is not found in prog's BTF\n", name);
9870 goto out;
9871 }
9872out:
e7bf94db
AS
9873 return err;
9874}
9875
91abb4a6
AN
9876static int find_kernel_btf_id(struct bpf_object *obj, const char *attach_name,
9877 enum bpf_attach_type attach_type,
9878 int *btf_obj_fd, int *btf_type_id)
9879{
8f8a0242
VM
9880 int ret, i, mod_len;
9881 const char *fn_name, *mod_name = NULL;
91abb4a6 9882
8f8a0242
VM
9883 fn_name = strchr(attach_name, ':');
9884 if (fn_name) {
9885 mod_name = attach_name;
9886 mod_len = fn_name - mod_name;
9887 fn_name++;
9888 }
9889
9890 if (!mod_name || strncmp(mod_name, "vmlinux", mod_len) == 0) {
9891 ret = find_attach_btf_id(obj->btf_vmlinux,
9892 mod_name ? fn_name : attach_name,
9893 attach_type);
9894 if (ret > 0) {
9895 *btf_obj_fd = 0; /* vmlinux BTF */
9896 *btf_type_id = ret;
9897 return 0;
9898 }
9899 if (ret != -ENOENT)
9900 return ret;
91abb4a6 9901 }
91abb4a6
AN
9902
9903 ret = load_module_btfs(obj);
9904 if (ret)
9905 return ret;
9906
9907 for (i = 0; i < obj->btf_module_cnt; i++) {
9908 const struct module_btf *mod = &obj->btf_modules[i];
9909
8f8a0242
VM
9910 if (mod_name && strncmp(mod->name, mod_name, mod_len) != 0)
9911 continue;
9912
9913 ret = find_attach_btf_id(mod->btf,
9914 mod_name ? fn_name : attach_name,
9915 attach_type);
91abb4a6
AN
9916 if (ret > 0) {
9917 *btf_obj_fd = mod->fd;
9918 *btf_type_id = ret;
9919 return 0;
9920 }
9921 if (ret == -ENOENT)
9922 continue;
9923
9924 return ret;
9925 }
9926
9927 return -ESRCH;
9928}
9929
15ea31fa
AN
9930static int libbpf_find_attach_btf_id(struct bpf_program *prog, const char *attach_name,
9931 int *btf_obj_fd, int *btf_type_id)
b8c54ea4 9932{
a6ed02ca
KS
9933 enum bpf_attach_type attach_type = prog->expected_attach_type;
9934 __u32 attach_prog_fd = prog->attach_prog_fd;
b6291a6f 9935 int err = 0;
b8c54ea4 9936
91abb4a6 9937 /* BPF program's BTF ID */
749c202c
AN
9938 if (prog->type == BPF_PROG_TYPE_EXT || attach_prog_fd) {
9939 if (!attach_prog_fd) {
9940 pr_warn("prog '%s': attach program FD is not set\n", prog->name);
9941 return -EINVAL;
9942 }
91abb4a6
AN
9943 err = libbpf_find_prog_btf_id(attach_name, attach_prog_fd);
9944 if (err < 0) {
749c202c
AN
9945 pr_warn("prog '%s': failed to find BPF program (FD %d) BTF ID for '%s': %d\n",
9946 prog->name, attach_prog_fd, attach_name, err);
91abb4a6
AN
9947 return err;
9948 }
9949 *btf_obj_fd = 0;
9950 *btf_type_id = err;
9951 return 0;
9952 }
9953
9954 /* kernel/module BTF ID */
67234743
AS
9955 if (prog->obj->gen_loader) {
9956 bpf_gen__record_attach_target(prog->obj->gen_loader, attach_name, attach_type);
9957 *btf_obj_fd = 0;
9958 *btf_type_id = 1;
9959 } else {
9e926acd
KFL
9960 err = find_kernel_btf_id(prog->obj, attach_name,
9961 attach_type, btf_obj_fd,
9962 btf_type_id);
67234743 9963 }
91abb4a6 9964 if (err) {
749c202c
AN
9965 pr_warn("prog '%s': failed to find kernel BTF type ID of '%s': %d\n",
9966 prog->name, attach_name, err);
b8c54ea4 9967 return err;
12a8654b 9968 }
91abb4a6 9969 return 0;
12a8654b
AS
9970}
9971
956b620f
AI
9972int libbpf_attach_type_by_name(const char *name,
9973 enum bpf_attach_type *attach_type)
9974{
c76e4c22 9975 char *type_names;
b6291a6f 9976 const struct bpf_sec_def *sec_def;
956b620f
AI
9977
9978 if (!name)
e9fc3ce9 9979 return libbpf_err(-EINVAL);
956b620f 9980
b6291a6f
AN
9981 sec_def = find_sec_def(name);
9982 if (!sec_def) {
9983 pr_debug("failed to guess attach type based on ELF section name '%s'\n", name);
9984 type_names = libbpf_get_type_names(true);
9985 if (type_names != NULL) {
9986 pr_debug("attachable section(type) names are:%s\n", type_names);
9987 free(type_names);
9988 }
9989
9990 return libbpf_err(-EINVAL);
c76e4c22
TS
9991 }
9992
4fa5bcfe 9993 if (sec_def->prog_prepare_load_fn != libbpf_prepare_prog_load)
15ea31fa
AN
9994 return libbpf_err(-EINVAL);
9995 if (!(sec_def->cookie & SEC_ATTACHABLE))
b6291a6f
AN
9996 return libbpf_err(-EINVAL);
9997
9998 *attach_type = sec_def->expected_attach_type;
9999 return 0;
956b620f
AI
10000}
10001
a324aae3 10002int bpf_map__fd(const struct bpf_map *map)
9d759a9b 10003{
f08c18e0
AN
10004 if (!map)
10005 return libbpf_err(-EINVAL);
10006 if (!map_is_created(map))
10007 return -1;
10008 return map->fd;
9d759a9b
WN
10009}
10010
aed65917
AN
10011static bool map_uses_real_name(const struct bpf_map *map)
10012{
10013 /* Since libbpf started to support custom .data.* and .rodata.* maps,
10014 * their user-visible name differs from kernel-visible name. Users see
10015 * such map's corresponding ELF section name as a map name.
10016 * This check distinguishes .data/.rodata from .data.* and .rodata.*
10017 * maps to know which name has to be returned to the user.
10018 */
10019 if (map->libbpf_type == LIBBPF_MAP_DATA && strcmp(map->real_name, DATA_SEC) != 0)
10020 return true;
10021 if (map->libbpf_type == LIBBPF_MAP_RODATA && strcmp(map->real_name, RODATA_SEC) != 0)
10022 return true;
10023 return false;
10024}
10025
a324aae3 10026const char *bpf_map__name(const struct bpf_map *map)
561bbcca 10027{
aed65917
AN
10028 if (!map)
10029 return NULL;
10030
10031 if (map_uses_real_name(map))
10032 return map->real_name;
10033
10034 return map->name;
561bbcca
WN
10035}
10036
1bdb6c9a
AN
10037enum bpf_map_type bpf_map__type(const struct bpf_map *map)
10038{
10039 return map->def.type;
10040}
10041
10042int bpf_map__set_type(struct bpf_map *map, enum bpf_map_type type)
10043{
f08c18e0 10044 if (map_is_created(map))
e9fc3ce9 10045 return libbpf_err(-EBUSY);
1bdb6c9a
AN
10046 map->def.type = type;
10047 return 0;
10048}
10049
10050__u32 bpf_map__map_flags(const struct bpf_map *map)
10051{
10052 return map->def.map_flags;
10053}
10054
10055int bpf_map__set_map_flags(struct bpf_map *map, __u32 flags)
10056{
f08c18e0 10057 if (map_is_created(map))
e9fc3ce9 10058 return libbpf_err(-EBUSY);
1bdb6c9a
AN
10059 map->def.map_flags = flags;
10060 return 0;
10061}
10062
47512102
JK
10063__u64 bpf_map__map_extra(const struct bpf_map *map)
10064{
10065 return map->map_extra;
10066}
10067
10068int bpf_map__set_map_extra(struct bpf_map *map, __u64 map_extra)
10069{
f08c18e0 10070 if (map_is_created(map))
47512102
JK
10071 return libbpf_err(-EBUSY);
10072 map->map_extra = map_extra;
10073 return 0;
10074}
10075
1bdb6c9a
AN
10076__u32 bpf_map__numa_node(const struct bpf_map *map)
10077{
10078 return map->numa_node;
10079}
10080
10081int bpf_map__set_numa_node(struct bpf_map *map, __u32 numa_node)
10082{
f08c18e0 10083 if (map_is_created(map))
e9fc3ce9 10084 return libbpf_err(-EBUSY);
1bdb6c9a
AN
10085 map->numa_node = numa_node;
10086 return 0;
10087}
10088
10089__u32 bpf_map__key_size(const struct bpf_map *map)
10090{
10091 return map->def.key_size;
10092}
10093
10094int bpf_map__set_key_size(struct bpf_map *map, __u32 size)
10095{
f08c18e0 10096 if (map_is_created(map))
e9fc3ce9 10097 return libbpf_err(-EBUSY);
1bdb6c9a
AN
10098 map->def.key_size = size;
10099 return 0;
10100}
10101
10102__u32 bpf_map__value_size(const struct bpf_map *map)
10103{
10104 return map->def.value_size;
10105}
10106
9d0a2331
JK
10107static int map_btf_datasec_resize(struct bpf_map *map, __u32 size)
10108{
10109 struct btf *btf;
10110 struct btf_type *datasec_type, *var_type;
10111 struct btf_var_secinfo *var;
10112 const struct btf_type *array_type;
10113 const struct btf_array *array;
4c857a71
JK
10114 int vlen, element_sz, new_array_id;
10115 __u32 nr_elements;
9d0a2331
JK
10116
10117 /* check btf existence */
10118 btf = bpf_object__btf(map->obj);
10119 if (!btf)
10120 return -ENOENT;
10121
10122 /* verify map is datasec */
10123 datasec_type = btf_type_by_id(btf, bpf_map__btf_value_type_id(map));
10124 if (!btf_is_datasec(datasec_type)) {
10125 pr_warn("map '%s': cannot be resized, map value type is not a datasec\n",
10126 bpf_map__name(map));
10127 return -EINVAL;
10128 }
10129
10130 /* verify datasec has at least one var */
10131 vlen = btf_vlen(datasec_type);
10132 if (vlen == 0) {
10133 pr_warn("map '%s': cannot be resized, map value datasec is empty\n",
10134 bpf_map__name(map));
10135 return -EINVAL;
10136 }
10137
10138 /* verify last var in the datasec is an array */
10139 var = &btf_var_secinfos(datasec_type)[vlen - 1];
10140 var_type = btf_type_by_id(btf, var->type);
10141 array_type = skip_mods_and_typedefs(btf, var_type->type, NULL);
10142 if (!btf_is_array(array_type)) {
10143 pr_warn("map '%s': cannot be resized, last var must be an array\n",
10144 bpf_map__name(map));
10145 return -EINVAL;
10146 }
10147
10148 /* verify request size aligns with array */
10149 array = btf_array(array_type);
10150 element_sz = btf__resolve_size(btf, array->type);
10151 if (element_sz <= 0 || (size - var->offset) % element_sz != 0) {
10152 pr_warn("map '%s': cannot be resized, element size (%d) doesn't align with new total size (%u)\n",
10153 bpf_map__name(map), element_sz, size);
10154 return -EINVAL;
10155 }
10156
10157 /* create a new array based on the existing array, but with new length */
10158 nr_elements = (size - var->offset) / element_sz;
10159 new_array_id = btf__add_array(btf, array->index_type, array->type, nr_elements);
10160 if (new_array_id < 0)
10161 return new_array_id;
10162
10163 /* adding a new btf type invalidates existing pointers to btf objects,
10164 * so refresh pointers before proceeding
10165 */
10166 datasec_type = btf_type_by_id(btf, map->btf_value_type_id);
10167 var = &btf_var_secinfos(datasec_type)[vlen - 1];
10168 var_type = btf_type_by_id(btf, var->type);
10169
10170 /* finally update btf info */
10171 datasec_type->size = size;
10172 var->size = size - var->offset;
10173 var_type->type = new_array_id;
10174
10175 return 0;
10176}
10177
1bdb6c9a
AN
10178int bpf_map__set_value_size(struct bpf_map *map, __u32 size)
10179{
f08c18e0 10180 if (map->obj->loaded || map->reused)
e9fc3ce9 10181 return libbpf_err(-EBUSY);
9d0a2331
JK
10182
10183 if (map->mmaped) {
9d0a2331 10184 size_t mmap_old_sz, mmap_new_sz;
79ff13e9
AS
10185 int err;
10186
10187 if (map->def.type != BPF_MAP_TYPE_ARRAY)
10188 return -EOPNOTSUPP;
9d0a2331 10189
79ff13e9
AS
10190 mmap_old_sz = bpf_map_mmap_sz(map);
10191 mmap_new_sz = array_map_mmap_sz(size, map->def.max_entries);
9d0a2331
JK
10192 err = bpf_map_mmap_resize(map, mmap_old_sz, mmap_new_sz);
10193 if (err) {
10194 pr_warn("map '%s': failed to resize memory-mapped region: %d\n",
10195 bpf_map__name(map), err);
10196 return err;
10197 }
10198 err = map_btf_datasec_resize(map, size);
10199 if (err && err != -ENOENT) {
10200 pr_warn("map '%s': failed to adjust resized BTF, clearing BTF key/value info: %d\n",
10201 bpf_map__name(map), err);
10202 map->btf_value_type_id = 0;
10203 map->btf_key_type_id = 0;
10204 }
10205 }
10206
1bdb6c9a
AN
10207 map->def.value_size = size;
10208 return 0;
10209}
10210
5b891af7 10211__u32 bpf_map__btf_key_type_id(const struct bpf_map *map)
8a138aed 10212{
61746dbe 10213 return map ? map->btf_key_type_id : 0;
8a138aed
MKL
10214}
10215
5b891af7 10216__u32 bpf_map__btf_value_type_id(const struct bpf_map *map)
8a138aed 10217{
61746dbe 10218 return map ? map->btf_value_type_id : 0;
8a138aed
MKL
10219}
10220
e2842be5
THJ
10221int bpf_map__set_initial_value(struct bpf_map *map,
10222 const void *data, size_t size)
10223{
2e7ba4f8
AN
10224 size_t actual_sz;
10225
f08c18e0
AN
10226 if (map->obj->loaded || map->reused)
10227 return libbpf_err(-EBUSY);
10228
2e7ba4f8
AN
10229 if (!map->mmaped || map->libbpf_type == LIBBPF_MAP_KCONFIG)
10230 return libbpf_err(-EINVAL);
10231
10232 if (map->def.type == BPF_MAP_TYPE_ARENA)
10233 actual_sz = map->obj->arena_data_sz;
10234 else
10235 actual_sz = map->def.value_size;
10236 if (size != actual_sz)
e9fc3ce9 10237 return libbpf_err(-EINVAL);
e2842be5
THJ
10238
10239 memcpy(map->mmaped, data, size);
10240 return 0;
10241}
10242
2e7ba4f8 10243void *bpf_map__initial_value(const struct bpf_map *map, size_t *psize)
7723256b 10244{
69e4a9d2
KFL
10245 if (bpf_map__is_struct_ops(map)) {
10246 if (psize)
10247 *psize = map->def.value_size;
10248 return map->st_ops->data;
10249 }
10250
7723256b
AS
10251 if (!map->mmaped)
10252 return NULL;
2e7ba4f8
AN
10253
10254 if (map->def.type == BPF_MAP_TYPE_ARENA)
10255 *psize = map->obj->arena_data_sz;
10256 else
10257 *psize = map->def.value_size;
10258
7723256b
AS
10259 return map->mmaped;
10260}
10261
a324aae3 10262bool bpf_map__is_internal(const struct bpf_map *map)
d859900c
DB
10263{
10264 return map->libbpf_type != LIBBPF_MAP_UNSPEC;
10265}
10266
1bdb6c9a
AN
10267__u32 bpf_map__ifindex(const struct bpf_map *map)
10268{
10269 return map->map_ifindex;
10270}
10271
10272int bpf_map__set_ifindex(struct bpf_map *map, __u32 ifindex)
9aba3613 10273{
f08c18e0 10274 if (map_is_created(map))
e9fc3ce9 10275 return libbpf_err(-EBUSY);
9aba3613 10276 map->map_ifindex = ifindex;
1bdb6c9a 10277 return 0;
9aba3613
JK
10278}
10279
addb9fc9
NS
10280int bpf_map__set_inner_map_fd(struct bpf_map *map, int fd)
10281{
10282 if (!bpf_map_type__is_map_in_map(map->def.type)) {
be18010e 10283 pr_warn("error: unsupported map type\n");
e9fc3ce9 10284 return libbpf_err(-EINVAL);
addb9fc9
NS
10285 }
10286 if (map->inner_map_fd != -1) {
be18010e 10287 pr_warn("error: inner_map_fd already specified\n");
e9fc3ce9 10288 return libbpf_err(-EINVAL);
addb9fc9 10289 }
8f7b239e
AN
10290 if (map->inner_map) {
10291 bpf_map__destroy(map->inner_map);
10292 zfree(&map->inner_map);
10293 }
addb9fc9
NS
10294 map->inner_map_fd = fd;
10295 return 0;
10296}
10297
0c19a9fb 10298static struct bpf_map *
a324aae3 10299__bpf_map__iter(const struct bpf_map *m, const struct bpf_object *obj, int i)
9d759a9b 10300{
0c19a9fb 10301 ssize_t idx;
9d759a9b
WN
10302 struct bpf_map *s, *e;
10303
10304 if (!obj || !obj->maps)
e9fc3ce9 10305 return errno = EINVAL, NULL;
9d759a9b
WN
10306
10307 s = obj->maps;
10308 e = obj->maps + obj->nr_maps;
10309
0c19a9fb 10310 if ((m < s) || (m >= e)) {
be18010e
KW
10311 pr_warn("error in %s: map handler doesn't belong to object\n",
10312 __func__);
e9fc3ce9 10313 return errno = EINVAL, NULL;
9d759a9b
WN
10314 }
10315
0c19a9fb
SF
10316 idx = (m - obj->maps) + i;
10317 if (idx >= obj->nr_maps || idx < 0)
9d759a9b
WN
10318 return NULL;
10319 return &obj->maps[idx];
10320}
561bbcca 10321
2088a3a7
HC
10322struct bpf_map *
10323bpf_object__next_map(const struct bpf_object *obj, const struct bpf_map *prev)
0c19a9fb
SF
10324{
10325 if (prev == NULL)
10326 return obj->maps;
10327
10328 return __bpf_map__iter(prev, obj, 1);
10329}
10330
2088a3a7
HC
10331struct bpf_map *
10332bpf_object__prev_map(const struct bpf_object *obj, const struct bpf_map *next)
0c19a9fb
SF
10333{
10334 if (next == NULL) {
10335 if (!obj->nr_maps)
10336 return NULL;
10337 return obj->maps + obj->nr_maps - 1;
10338 }
10339
10340 return __bpf_map__iter(next, obj, -1);
10341}
10342
561bbcca 10343struct bpf_map *
a324aae3 10344bpf_object__find_map_by_name(const struct bpf_object *obj, const char *name)
561bbcca
WN
10345{
10346 struct bpf_map *pos;
10347
f74a53d9 10348 bpf_object__for_each_map(pos, obj) {
26071635
AN
10349 /* if it's a special internal map name (which always starts
10350 * with dot) then check if that special name matches the
10351 * real map name (ELF section name)
10352 */
10353 if (name[0] == '.') {
10354 if (pos->real_name && strcmp(pos->real_name, name) == 0)
10355 return pos;
10356 continue;
10357 }
10358 /* otherwise map name has to be an exact match */
aed65917
AN
10359 if (map_uses_real_name(pos)) {
10360 if (strcmp(pos->real_name, name) == 0)
10361 return pos;
10362 continue;
10363 }
10364 if (strcmp(pos->name, name) == 0)
561bbcca
WN
10365 return pos;
10366 }
e9fc3ce9 10367 return errno = ENOENT, NULL;
561bbcca 10368}
5a6acad1 10369
f3cea32d 10370int
a324aae3 10371bpf_object__find_map_fd_by_name(const struct bpf_object *obj, const char *name)
f3cea32d
MF
10372{
10373 return bpf_map__fd(bpf_object__find_map_by_name(obj, name));
10374}
10375
737d0646
AN
10376static int validate_map_op(const struct bpf_map *map, size_t key_sz,
10377 size_t value_sz, bool check_value_sz)
10378{
f08c18e0 10379 if (!map_is_created(map)) /* map is not yet created */
737d0646
AN
10380 return -ENOENT;
10381
10382 if (map->def.key_size != key_sz) {
10383 pr_warn("map '%s': unexpected key size %zu provided, expected %u\n",
10384 map->name, key_sz, map->def.key_size);
10385 return -EINVAL;
10386 }
10387
7b30c296
MY
10388 if (map->fd < 0) {
10389 pr_warn("map '%s': can't use BPF map without FD (was it created?)\n", map->name);
10390 return -EINVAL;
10391 }
10392
737d0646
AN
10393 if (!check_value_sz)
10394 return 0;
10395
10396 switch (map->def.type) {
10397 case BPF_MAP_TYPE_PERCPU_ARRAY:
10398 case BPF_MAP_TYPE_PERCPU_HASH:
10399 case BPF_MAP_TYPE_LRU_PERCPU_HASH:
10400 case BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE: {
10401 int num_cpu = libbpf_num_possible_cpus();
10402 size_t elem_sz = roundup(map->def.value_size, 8);
10403
10404 if (value_sz != num_cpu * elem_sz) {
10405 pr_warn("map '%s': unexpected value size %zu provided for per-CPU map, expected %d * %zu = %zd\n",
10406 map->name, value_sz, num_cpu, elem_sz, num_cpu * elem_sz);
10407 return -EINVAL;
10408 }
10409 break;
10410 }
10411 default:
10412 if (map->def.value_size != value_sz) {
10413 pr_warn("map '%s': unexpected value size %zu provided, expected %u\n",
10414 map->name, value_sz, map->def.value_size);
10415 return -EINVAL;
10416 }
10417 break;
10418 }
10419 return 0;
10420}
10421
10422int bpf_map__lookup_elem(const struct bpf_map *map,
10423 const void *key, size_t key_sz,
10424 void *value, size_t value_sz, __u64 flags)
10425{
10426 int err;
10427
10428 err = validate_map_op(map, key_sz, value_sz, true);
10429 if (err)
10430 return libbpf_err(err);
10431
10432 return bpf_map_lookup_elem_flags(map->fd, key, value, flags);
10433}
10434
10435int bpf_map__update_elem(const struct bpf_map *map,
10436 const void *key, size_t key_sz,
10437 const void *value, size_t value_sz, __u64 flags)
10438{
10439 int err;
10440
10441 err = validate_map_op(map, key_sz, value_sz, true);
10442 if (err)
10443 return libbpf_err(err);
10444
10445 return bpf_map_update_elem(map->fd, key, value, flags);
10446}
10447
10448int bpf_map__delete_elem(const struct bpf_map *map,
10449 const void *key, size_t key_sz, __u64 flags)
10450{
10451 int err;
10452
10453 err = validate_map_op(map, key_sz, 0, false /* check_value_sz */);
10454 if (err)
10455 return libbpf_err(err);
10456
10457 return bpf_map_delete_elem_flags(map->fd, key, flags);
10458}
10459
10460int bpf_map__lookup_and_delete_elem(const struct bpf_map *map,
10461 const void *key, size_t key_sz,
10462 void *value, size_t value_sz, __u64 flags)
10463{
10464 int err;
10465
10466 err = validate_map_op(map, key_sz, value_sz, true);
10467 if (err)
10468 return libbpf_err(err);
10469
10470 return bpf_map_lookup_and_delete_elem_flags(map->fd, key, value, flags);
10471}
10472
10473int bpf_map__get_next_key(const struct bpf_map *map,
10474 const void *cur_key, void *next_key, size_t key_sz)
10475{
10476 int err;
10477
10478 err = validate_map_op(map, key_sz, 0, false /* check_value_sz */);
10479 if (err)
10480 return libbpf_err(err);
10481
10482 return bpf_map_get_next_key(map->fd, cur_key, next_key);
10483}
10484
e28ff1a8
JS
10485long libbpf_get_error(const void *ptr)
10486{
e9fc3ce9
AN
10487 if (!IS_ERR_OR_NULL(ptr))
10488 return 0;
10489
10490 if (IS_ERR(ptr))
10491 errno = -PTR_ERR(ptr);
10492
10493 /* If ptr == NULL, then errno should be already set by the failing
10494 * API, because libbpf never returns NULL on success and it now always
10495 * sets errno on error. So no extra errno handling for ptr == NULL
10496 * case.
10497 */
10498 return -errno;
e28ff1a8 10499}
6f6d33f3 10500
cc4f864b
AN
10501/* Replace link's underlying BPF program with the new one */
10502int bpf_link__update_program(struct bpf_link *link, struct bpf_program *prog)
10503{
e9fc3ce9 10504 int ret;
7b30c296
MY
10505 int prog_fd = bpf_program__fd(prog);
10506
10507 if (prog_fd < 0) {
10508 pr_warn("prog '%s': can't use BPF program without FD (was it loaded?)\n",
10509 prog->name);
10510 return libbpf_err(-EINVAL);
10511 }
c139e40a 10512
7b30c296 10513 ret = bpf_link_update(bpf_link__fd(link), prog_fd, NULL);
e9fc3ce9 10514 return libbpf_err_errno(ret);
cc4f864b
AN
10515}
10516
d6958706
AN
10517/* Release "ownership" of underlying BPF resource (typically, BPF program
10518 * attached to some BPF hook, e.g., tracepoint, kprobe, etc). Disconnected
10519 * link, when destructed through bpf_link__destroy() call won't attempt to
10520 * detach/unregisted that BPF resource. This is useful in situations where,
10521 * say, attached BPF program has to outlive userspace program that attached it
10522 * in the system. Depending on type of BPF program, though, there might be
10523 * additional steps (like pinning BPF program in BPF FS) necessary to ensure
10524 * exit of userspace program doesn't trigger automatic detachment and clean up
10525 * inside the kernel.
10526 */
10527void bpf_link__disconnect(struct bpf_link *link)
10528{
10529 link->disconnected = true;
10530}
10531
1c2e9efc
AN
10532int bpf_link__destroy(struct bpf_link *link)
10533{
d6958706 10534 int err = 0;
1c2e9efc 10535
50450fc7 10536 if (IS_ERR_OR_NULL(link))
1c2e9efc
AN
10537 return 0;
10538
d6958706
AN
10539 if (!link->disconnected && link->detach)
10540 err = link->detach(link);
c016b68e
AN
10541 if (link->pin_path)
10542 free(link->pin_path);
d88b71d4
AN
10543 if (link->dealloc)
10544 link->dealloc(link);
10545 else
10546 free(link);
1c2e9efc 10547
e9fc3ce9 10548 return libbpf_err(err);
1c2e9efc
AN
10549}
10550
c016b68e
AN
10551int bpf_link__fd(const struct bpf_link *link)
10552{
10553 return link->fd;
10554}
10555
10556const char *bpf_link__pin_path(const struct bpf_link *link)
10557{
10558 return link->pin_path;
10559}
10560
10561static int bpf_link__detach_fd(struct bpf_link *link)
10562{
e9fc3ce9 10563 return libbpf_err_errno(close(link->fd));
c016b68e
AN
10564}
10565
10566struct bpf_link *bpf_link__open(const char *path)
10567{
10568 struct bpf_link *link;
10569 int fd;
10570
10571 fd = bpf_obj_get(path);
10572 if (fd < 0) {
10573 fd = -errno;
10574 pr_warn("failed to open link at %s: %d\n", path, fd);
e9fc3ce9 10575 return libbpf_err_ptr(fd);
c016b68e
AN
10576 }
10577
10578 link = calloc(1, sizeof(*link));
10579 if (!link) {
10580 close(fd);
e9fc3ce9 10581 return libbpf_err_ptr(-ENOMEM);
c016b68e
AN
10582 }
10583 link->detach = &bpf_link__detach_fd;
10584 link->fd = fd;
10585
10586 link->pin_path = strdup(path);
10587 if (!link->pin_path) {
10588 bpf_link__destroy(link);
e9fc3ce9 10589 return libbpf_err_ptr(-ENOMEM);
c016b68e
AN
10590 }
10591
10592 return link;
10593}
10594
2e49527e
AN
10595int bpf_link__detach(struct bpf_link *link)
10596{
10597 return bpf_link_detach(link->fd) ? -errno : 0;
10598}
10599
c016b68e
AN
10600int bpf_link__pin(struct bpf_link *link, const char *path)
10601{
10602 int err;
10603
10604 if (link->pin_path)
e9fc3ce9 10605 return libbpf_err(-EBUSY);
c016b68e
AN
10606 err = make_parent_dir(path);
10607 if (err)
e9fc3ce9 10608 return libbpf_err(err);
c016b68e
AN
10609 err = check_path(path);
10610 if (err)
e9fc3ce9 10611 return libbpf_err(err);
c016b68e
AN
10612
10613 link->pin_path = strdup(path);
10614 if (!link->pin_path)
e9fc3ce9 10615 return libbpf_err(-ENOMEM);
c016b68e
AN
10616
10617 if (bpf_obj_pin(link->fd, link->pin_path)) {
10618 err = -errno;
10619 zfree(&link->pin_path);
e9fc3ce9 10620 return libbpf_err(err);
c016b68e
AN
10621 }
10622
10623 pr_debug("link fd=%d: pinned at %s\n", link->fd, link->pin_path);
10624 return 0;
10625}
10626
10627int bpf_link__unpin(struct bpf_link *link)
10628{
10629 int err;
10630
10631 if (!link->pin_path)
e9fc3ce9 10632 return libbpf_err(-EINVAL);
c016b68e
AN
10633
10634 err = unlink(link->pin_path);
10635 if (err != 0)
af0efa05 10636 return -errno;
c016b68e
AN
10637
10638 pr_debug("link fd=%d: unpinned from %s\n", link->fd, link->pin_path);
10639 zfree(&link->pin_path);
10640 return 0;
10641}
63f2f5ee 10642
668ace0e
AN
10643struct bpf_link_perf {
10644 struct bpf_link link;
10645 int perf_event_fd;
ca304b40
RDT
10646 /* legacy kprobe support: keep track of probe identifier and type */
10647 char *legacy_probe_name;
46ed5fc3 10648 bool legacy_is_kprobe;
ca304b40 10649 bool legacy_is_retprobe;
668ace0e
AN
10650};
10651
46ed5fc3 10652static int remove_kprobe_event_legacy(const char *probe_name, bool retprobe);
cc10623c 10653static int remove_uprobe_event_legacy(const char *probe_name, bool retprobe);
46ed5fc3 10654
668ace0e 10655static int bpf_link_perf_detach(struct bpf_link *link)
63f2f5ee 10656{
668ace0e
AN
10657 struct bpf_link_perf *perf_link = container_of(link, struct bpf_link_perf, link);
10658 int err = 0;
63f2f5ee 10659
668ace0e 10660 if (ioctl(perf_link->perf_event_fd, PERF_EVENT_IOC_DISABLE, 0) < 0)
63f2f5ee
AN
10661 err = -errno;
10662
668ace0e
AN
10663 if (perf_link->perf_event_fd != link->fd)
10664 close(perf_link->perf_event_fd);
c016b68e 10665 close(link->fd);
668ace0e 10666
cc10623c 10667 /* legacy uprobe/kprobe needs to be removed after perf event fd closure */
46ed5fc3
AN
10668 if (perf_link->legacy_probe_name) {
10669 if (perf_link->legacy_is_kprobe) {
10670 err = remove_kprobe_event_legacy(perf_link->legacy_probe_name,
10671 perf_link->legacy_is_retprobe);
cc10623c
AN
10672 } else {
10673 err = remove_uprobe_event_legacy(perf_link->legacy_probe_name,
10674 perf_link->legacy_is_retprobe);
46ed5fc3
AN
10675 }
10676 }
ca304b40
RDT
10677
10678 return err;
63f2f5ee
AN
10679}
10680
668ace0e
AN
10681static void bpf_link_perf_dealloc(struct bpf_link *link)
10682{
10683 struct bpf_link_perf *perf_link = container_of(link, struct bpf_link_perf, link);
10684
ca304b40 10685 free(perf_link->legacy_probe_name);
668ace0e
AN
10686 free(perf_link);
10687}
10688
942025c9 10689struct bpf_link *bpf_program__attach_perf_event_opts(const struct bpf_program *prog, int pfd,
47faff37 10690 const struct bpf_perf_event_opts *opts)
63f2f5ee
AN
10691{
10692 char errmsg[STRERR_BUFSIZE];
668ace0e
AN
10693 struct bpf_link_perf *link;
10694 int prog_fd, link_fd = -1, err;
f8b299bc 10695 bool force_ioctl_attach;
63f2f5ee 10696
47faff37
AN
10697 if (!OPTS_VALID(opts, bpf_perf_event_opts))
10698 return libbpf_err_ptr(-EINVAL);
10699
63f2f5ee 10700 if (pfd < 0) {
52109584
AN
10701 pr_warn("prog '%s': invalid perf event FD %d\n",
10702 prog->name, pfd);
e9fc3ce9 10703 return libbpf_err_ptr(-EINVAL);
63f2f5ee
AN
10704 }
10705 prog_fd = bpf_program__fd(prog);
10706 if (prog_fd < 0) {
7b30c296 10707 pr_warn("prog '%s': can't attach BPF program without FD (was it loaded?)\n",
52109584 10708 prog->name);
e9fc3ce9 10709 return libbpf_err_ptr(-EINVAL);
63f2f5ee
AN
10710 }
10711
d6958706 10712 link = calloc(1, sizeof(*link));
63f2f5ee 10713 if (!link)
e9fc3ce9 10714 return libbpf_err_ptr(-ENOMEM);
668ace0e
AN
10715 link->link.detach = &bpf_link_perf_detach;
10716 link->link.dealloc = &bpf_link_perf_dealloc;
10717 link->perf_event_fd = pfd;
63f2f5ee 10718
f8b299bc
MD
10719 force_ioctl_attach = OPTS_GET(opts, force_ioctl_attach, false);
10720 if (kernel_supports(prog->obj, FEAT_PERF_LINK) && !force_ioctl_attach) {
47faff37
AN
10721 DECLARE_LIBBPF_OPTS(bpf_link_create_opts, link_opts,
10722 .perf_event.bpf_cookie = OPTS_GET(opts, bpf_cookie, 0));
10723
10724 link_fd = bpf_link_create(prog_fd, pfd, BPF_PERF_EVENT, &link_opts);
668ace0e
AN
10725 if (link_fd < 0) {
10726 err = -errno;
10727 pr_warn("prog '%s': failed to create BPF link for perf_event FD %d: %d (%s)\n",
10728 prog->name, pfd,
10729 err, libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
10730 goto err_out;
10731 }
10732 link->link.fd = link_fd;
10733 } else {
47faff37
AN
10734 if (OPTS_GET(opts, bpf_cookie, 0)) {
10735 pr_warn("prog '%s': user context value is not supported\n", prog->name);
10736 err = -EOPNOTSUPP;
10737 goto err_out;
10738 }
10739
668ace0e
AN
10740 if (ioctl(pfd, PERF_EVENT_IOC_SET_BPF, prog_fd) < 0) {
10741 err = -errno;
10742 pr_warn("prog '%s': failed to attach to perf_event FD %d: %s\n",
10743 prog->name, pfd, libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
10744 if (err == -EPROTO)
10745 pr_warn("prog '%s': try add PERF_SAMPLE_CALLCHAIN to or remove exclude_callchain_[kernel|user] from pfd %d\n",
10746 prog->name, pfd);
10747 goto err_out;
10748 }
10749 link->link.fd = pfd;
63f2f5ee
AN
10750 }
10751 if (ioctl(pfd, PERF_EVENT_IOC_ENABLE, 0) < 0) {
10752 err = -errno;
668ace0e 10753 pr_warn("prog '%s': failed to enable perf_event FD %d: %s\n",
52109584 10754 prog->name, pfd, libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
668ace0e 10755 goto err_out;
63f2f5ee 10756 }
668ace0e
AN
10757
10758 return &link->link;
10759err_out:
10760 if (link_fd >= 0)
10761 close(link_fd);
10762 free(link);
10763 return libbpf_err_ptr(err);
63f2f5ee
AN
10764}
10765
942025c9 10766struct bpf_link *bpf_program__attach_perf_event(const struct bpf_program *prog, int pfd)
47faff37
AN
10767{
10768 return bpf_program__attach_perf_event_opts(prog, pfd, NULL);
10769}
10770
b2650027
AN
10771/*
10772 * this function is expected to parse integer in the range of [0, 2^31-1] from
10773 * given file using scanf format string fmt. If actual parsed value is
10774 * negative, the result might be indistinguishable from error
10775 */
10776static int parse_uint_from_file(const char *file, const char *fmt)
10777{
10778 char buf[STRERR_BUFSIZE];
10779 int err, ret;
10780 FILE *f;
10781
59842c54 10782 f = fopen(file, "re");
b2650027
AN
10783 if (!f) {
10784 err = -errno;
10785 pr_debug("failed to open '%s': %s\n", file,
10786 libbpf_strerror_r(err, buf, sizeof(buf)));
10787 return err;
10788 }
10789 err = fscanf(f, fmt, &ret);
10790 if (err != 1) {
10791 err = err == EOF ? -EIO : -errno;
10792 pr_debug("failed to parse '%s': %s\n", file,
10793 libbpf_strerror_r(err, buf, sizeof(buf)));
10794 fclose(f);
10795 return err;
10796 }
10797 fclose(f);
10798 return ret;
10799}
10800
10801static int determine_kprobe_perf_type(void)
10802{
10803 const char *file = "/sys/bus/event_source/devices/kprobe/type";
10804
10805 return parse_uint_from_file(file, "%d\n");
10806}
10807
10808static int determine_uprobe_perf_type(void)
10809{
10810 const char *file = "/sys/bus/event_source/devices/uprobe/type";
10811
10812 return parse_uint_from_file(file, "%d\n");
10813}
10814
10815static int determine_kprobe_retprobe_bit(void)
10816{
10817 const char *file = "/sys/bus/event_source/devices/kprobe/format/retprobe";
10818
10819 return parse_uint_from_file(file, "config:%d\n");
10820}
10821
10822static int determine_uprobe_retprobe_bit(void)
10823{
10824 const char *file = "/sys/bus/event_source/devices/uprobe/format/retprobe";
10825
10826 return parse_uint_from_file(file, "config:%d\n");
10827}
10828
5e3b8356
AN
10829#define PERF_UPROBE_REF_CTR_OFFSET_BITS 32
10830#define PERF_UPROBE_REF_CTR_OFFSET_SHIFT 32
10831
b2650027 10832static int perf_event_open_probe(bool uprobe, bool retprobe, const char *name,
5e3b8356 10833 uint64_t offset, int pid, size_t ref_ctr_off)
b2650027 10834{
813847a3
AN
10835 const size_t attr_sz = sizeof(struct perf_event_attr);
10836 struct perf_event_attr attr;
b2650027 10837 char errmsg[STRERR_BUFSIZE];
708ac5be 10838 int type, pfd;
b2650027 10839
1520e846 10840 if ((__u64)ref_ctr_off >= (1ULL << PERF_UPROBE_REF_CTR_OFFSET_BITS))
5e3b8356
AN
10841 return -EINVAL;
10842
813847a3
AN
10843 memset(&attr, 0, attr_sz);
10844
b2650027
AN
10845 type = uprobe ? determine_uprobe_perf_type()
10846 : determine_kprobe_perf_type();
10847 if (type < 0) {
be18010e
KW
10848 pr_warn("failed to determine %s perf type: %s\n",
10849 uprobe ? "uprobe" : "kprobe",
10850 libbpf_strerror_r(type, errmsg, sizeof(errmsg)));
b2650027
AN
10851 return type;
10852 }
10853 if (retprobe) {
10854 int bit = uprobe ? determine_uprobe_retprobe_bit()
10855 : determine_kprobe_retprobe_bit();
10856
10857 if (bit < 0) {
be18010e
KW
10858 pr_warn("failed to determine %s retprobe bit: %s\n",
10859 uprobe ? "uprobe" : "kprobe",
10860 libbpf_strerror_r(bit, errmsg, sizeof(errmsg)));
b2650027
AN
10861 return bit;
10862 }
10863 attr.config |= 1 << bit;
10864 }
813847a3 10865 attr.size = attr_sz;
b2650027 10866 attr.type = type;
5e3b8356 10867 attr.config |= (__u64)ref_ctr_off << PERF_UPROBE_REF_CTR_OFFSET_SHIFT;
36db2a94
AN
10868 attr.config1 = ptr_to_u64(name); /* kprobe_func or uprobe_path */
10869 attr.config2 = offset; /* kprobe_addr or probe_offset */
b2650027
AN
10870
10871 /* pid filter is meaningful only for uprobes */
10872 pfd = syscall(__NR_perf_event_open, &attr,
10873 pid < 0 ? -1 : pid /* pid */,
10874 pid == -1 ? 0 : -1 /* cpu */,
10875 -1 /* group_fd */, PERF_FLAG_FD_CLOEXEC);
708ac5be 10876 return pfd >= 0 ? pfd : -errno;
b2650027
AN
10877}
10878
46ed5fc3
AN
10879static int append_to_file(const char *file, const char *fmt, ...)
10880{
10881 int fd, n, err = 0;
10882 va_list ap;
01dc26c9
LP
10883 char buf[1024];
10884
10885 va_start(ap, fmt);
10886 n = vsnprintf(buf, sizeof(buf), fmt, ap);
10887 va_end(ap);
10888
10889 if (n < 0 || n >= sizeof(buf))
10890 return -EINVAL;
46ed5fc3 10891
92274e24 10892 fd = open(file, O_WRONLY | O_APPEND | O_CLOEXEC, 0);
46ed5fc3
AN
10893 if (fd < 0)
10894 return -errno;
10895
01dc26c9 10896 if (write(fd, buf, n) < 0)
46ed5fc3
AN
10897 err = -errno;
10898
10899 close(fd);
10900 return err;
10901}
10902
a1ac9fd6
AN
10903#define DEBUGFS "/sys/kernel/debug/tracing"
10904#define TRACEFS "/sys/kernel/tracing"
10905
10906static bool use_debugfs(void)
10907{
10908 static int has_debugfs = -1;
10909
10910 if (has_debugfs < 0)
6a4ab886 10911 has_debugfs = faccessat(AT_FDCWD, DEBUGFS, F_OK, AT_EACCESS) == 0;
a1ac9fd6
AN
10912
10913 return has_debugfs == 1;
10914}
10915
10916static const char *tracefs_path(void)
10917{
10918 return use_debugfs() ? DEBUGFS : TRACEFS;
10919}
10920
10921static const char *tracefs_kprobe_events(void)
10922{
10923 return use_debugfs() ? DEBUGFS"/kprobe_events" : TRACEFS"/kprobe_events";
10924}
10925
10926static const char *tracefs_uprobe_events(void)
10927{
10928 return use_debugfs() ? DEBUGFS"/uprobe_events" : TRACEFS"/uprobe_events";
10929}
10930
8a3fe76f
JL
10931static const char *tracefs_available_filter_functions(void)
10932{
10933 return use_debugfs() ? DEBUGFS"/available_filter_functions"
10934 : TRACEFS"/available_filter_functions";
10935}
10936
56baeeba
JL
10937static const char *tracefs_available_filter_functions_addrs(void)
10938{
10939 return use_debugfs() ? DEBUGFS"/available_filter_functions_addrs"
10940 : TRACEFS"/available_filter_functions_addrs";
10941}
10942
46ed5fc3
AN
10943static void gen_kprobe_legacy_event_name(char *buf, size_t buf_sz,
10944 const char *kfunc_name, size_t offset)
10945{
51a33c60 10946 static int index = 0;
2fa07453 10947 int i;
51a33c60
QW
10948
10949 snprintf(buf, buf_sz, "libbpf_%u_%s_0x%zx_%d", getpid(), kfunc_name, offset,
10950 __sync_fetch_and_add(&index, 1));
2fa07453
MD
10951
10952 /* sanitize binary_path in the probe name */
10953 for (i = 0; buf[i]; i++) {
10954 if (!isalnum(buf[i]))
10955 buf[i] = '_';
10956 }
46ed5fc3
AN
10957}
10958
10959static int add_kprobe_event_legacy(const char *probe_name, bool retprobe,
10960 const char *kfunc_name, size_t offset)
10961{
a1ac9fd6 10962 return append_to_file(tracefs_kprobe_events(), "%c:%s/%s %s+0x%zx",
46ed5fc3
AN
10963 retprobe ? 'r' : 'p',
10964 retprobe ? "kretprobes" : "kprobes",
10965 probe_name, kfunc_name, offset);
10966}
10967
10968static int remove_kprobe_event_legacy(const char *probe_name, bool retprobe)
10969{
a1ac9fd6
AN
10970 return append_to_file(tracefs_kprobe_events(), "-:%s/%s",
10971 retprobe ? "kretprobes" : "kprobes", probe_name);
46ed5fc3
AN
10972}
10973
10974static int determine_kprobe_perf_type_legacy(const char *probe_name, bool retprobe)
10975{
10976 char file[256];
10977
a1ac9fd6
AN
10978 snprintf(file, sizeof(file), "%s/events/%s/%s/id",
10979 tracefs_path(), retprobe ? "kretprobes" : "kprobes", probe_name);
46ed5fc3
AN
10980
10981 return parse_uint_from_file(file, "%d\n");
10982}
10983
10984static int perf_event_kprobe_open_legacy(const char *probe_name, bool retprobe,
10985 const char *kfunc_name, size_t offset, int pid)
ca304b40 10986{
813847a3
AN
10987 const size_t attr_sz = sizeof(struct perf_event_attr);
10988 struct perf_event_attr attr;
ca304b40
RDT
10989 char errmsg[STRERR_BUFSIZE];
10990 int type, pfd, err;
10991
46ed5fc3 10992 err = add_kprobe_event_legacy(probe_name, retprobe, kfunc_name, offset);
ca304b40 10993 if (err < 0) {
46ed5fc3
AN
10994 pr_warn("failed to add legacy kprobe event for '%s+0x%zx': %s\n",
10995 kfunc_name, offset,
ca304b40
RDT
10996 libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
10997 return err;
10998 }
46ed5fc3 10999 type = determine_kprobe_perf_type_legacy(probe_name, retprobe);
ca304b40 11000 if (type < 0) {
80940293 11001 err = type;
46ed5fc3
AN
11002 pr_warn("failed to determine legacy kprobe event id for '%s+0x%zx': %s\n",
11003 kfunc_name, offset,
80940293
CW
11004 libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
11005 goto err_clean_legacy;
ca304b40 11006 }
813847a3
AN
11007
11008 memset(&attr, 0, attr_sz);
11009 attr.size = attr_sz;
ca304b40
RDT
11010 attr.config = type;
11011 attr.type = PERF_TYPE_TRACEPOINT;
11012
11013 pfd = syscall(__NR_perf_event_open, &attr,
11014 pid < 0 ? -1 : pid, /* pid */
11015 pid == -1 ? 0 : -1, /* cpu */
11016 -1 /* group_fd */, PERF_FLAG_FD_CLOEXEC);
11017 if (pfd < 0) {
11018 err = -errno;
11019 pr_warn("legacy kprobe perf_event_open() failed: %s\n",
11020 libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
80940293 11021 goto err_clean_legacy;
ca304b40
RDT
11022 }
11023 return pfd;
80940293
CW
11024
11025err_clean_legacy:
11026 /* Clear the newly added legacy kprobe_event */
11027 remove_kprobe_event_legacy(probe_name, retprobe);
11028 return err;
ca304b40
RDT
11029}
11030
708ac5be
AN
11031static const char *arch_specific_syscall_pfx(void)
11032{
11033#if defined(__x86_64__)
11034 return "x64";
11035#elif defined(__i386__)
11036 return "ia32";
11037#elif defined(__s390x__)
11038 return "s390x";
11039#elif defined(__s390__)
11040 return "s390";
11041#elif defined(__arm__)
11042 return "arm";
11043#elif defined(__aarch64__)
11044 return "arm64";
11045#elif defined(__mips__)
11046 return "mips";
11047#elif defined(__riscv)
11048 return "riscv";
64893e83
DM
11049#elif defined(__powerpc__)
11050 return "powerpc";
11051#elif defined(__powerpc64__)
11052 return "powerpc64";
708ac5be
AN
11053#else
11054 return NULL;
11055#endif
11056}
11057
f3dcee93 11058int probe_kern_syscall_wrapper(int token_fd)
708ac5be
AN
11059{
11060 char syscall_name[64];
11061 const char *ksys_pfx;
11062
11063 ksys_pfx = arch_specific_syscall_pfx();
11064 if (!ksys_pfx)
11065 return 0;
11066
11067 snprintf(syscall_name, sizeof(syscall_name), "__%s_sys_bpf", ksys_pfx);
11068
11069 if (determine_kprobe_perf_type() >= 0) {
11070 int pfd;
11071
11072 pfd = perf_event_open_probe(false, false, syscall_name, 0, getpid(), 0);
11073 if (pfd >= 0)
11074 close(pfd);
11075
11076 return pfd >= 0 ? 1 : 0;
11077 } else { /* legacy mode */
11078 char probe_name[128];
11079
11080 gen_kprobe_legacy_event_name(probe_name, sizeof(probe_name), syscall_name, 0);
11081 if (add_kprobe_event_legacy(probe_name, false, syscall_name, 0) < 0)
11082 return 0;
11083
11084 (void)remove_kprobe_event_legacy(probe_name, false);
11085 return 1;
11086 }
11087}
11088
da97553e 11089struct bpf_link *
942025c9 11090bpf_program__attach_kprobe_opts(const struct bpf_program *prog,
ac0ed488 11091 const char *func_name,
47faff37 11092 const struct bpf_kprobe_opts *opts)
b2650027 11093{
47faff37 11094 DECLARE_LIBBPF_OPTS(bpf_perf_event_opts, pe_opts);
f8b299bc 11095 enum probe_attach_mode attach_mode;
b2650027 11096 char errmsg[STRERR_BUFSIZE];
ca304b40 11097 char *legacy_probe = NULL;
b2650027 11098 struct bpf_link *link;
46ed5fc3 11099 size_t offset;
ca304b40 11100 bool retprobe, legacy;
b2650027
AN
11101 int pfd, err;
11102
da97553e
JO
11103 if (!OPTS_VALID(opts, bpf_kprobe_opts))
11104 return libbpf_err_ptr(-EINVAL);
11105
f8b299bc 11106 attach_mode = OPTS_GET(opts, attach_mode, PROBE_ATTACH_MODE_DEFAULT);
da97553e
JO
11107 retprobe = OPTS_GET(opts, retprobe, false);
11108 offset = OPTS_GET(opts, offset, 0);
47faff37 11109 pe_opts.bpf_cookie = OPTS_GET(opts, bpf_cookie, 0);
da97553e 11110
ca304b40 11111 legacy = determine_kprobe_perf_type() < 0;
f8b299bc
MD
11112 switch (attach_mode) {
11113 case PROBE_ATTACH_MODE_LEGACY:
11114 legacy = true;
11115 pe_opts.force_ioctl_attach = true;
11116 break;
11117 case PROBE_ATTACH_MODE_PERF:
11118 if (legacy)
11119 return libbpf_err_ptr(-ENOTSUP);
11120 pe_opts.force_ioctl_attach = true;
11121 break;
11122 case PROBE_ATTACH_MODE_LINK:
11123 if (legacy || !kernel_supports(prog->obj, FEAT_PERF_LINK))
11124 return libbpf_err_ptr(-ENOTSUP);
11125 break;
11126 case PROBE_ATTACH_MODE_DEFAULT:
11127 break;
11128 default:
11129 return libbpf_err_ptr(-EINVAL);
11130 }
11131
ca304b40
RDT
11132 if (!legacy) {
11133 pfd = perf_event_open_probe(false /* uprobe */, retprobe,
11134 func_name, offset,
11135 -1 /* pid */, 0 /* ref_ctr_off */);
11136 } else {
46ed5fc3
AN
11137 char probe_name[256];
11138
11139 gen_kprobe_legacy_event_name(probe_name, sizeof(probe_name),
11140 func_name, offset);
11141
71cff670 11142 legacy_probe = strdup(probe_name);
ca304b40
RDT
11143 if (!legacy_probe)
11144 return libbpf_err_ptr(-ENOMEM);
11145
46ed5fc3 11146 pfd = perf_event_kprobe_open_legacy(legacy_probe, retprobe, func_name,
ca304b40
RDT
11147 offset, -1 /* pid */);
11148 }
b2650027 11149 if (pfd < 0) {
46ed5fc3
AN
11150 err = -errno;
11151 pr_warn("prog '%s': failed to create %s '%s+0x%zx' perf event: %s\n",
11152 prog->name, retprobe ? "kretprobe" : "kprobe",
11153 func_name, offset,
303a2572
AN
11154 libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
11155 goto err_out;
b2650027 11156 }
47faff37 11157 link = bpf_program__attach_perf_event_opts(prog, pfd, &pe_opts);
e9fc3ce9
AN
11158 err = libbpf_get_error(link);
11159 if (err) {
b2650027 11160 close(pfd);
46ed5fc3
AN
11161 pr_warn("prog '%s': failed to attach to %s '%s+0x%zx': %s\n",
11162 prog->name, retprobe ? "kretprobe" : "kprobe",
11163 func_name, offset,
be18010e 11164 libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
80940293 11165 goto err_clean_legacy;
b2650027 11166 }
ca304b40
RDT
11167 if (legacy) {
11168 struct bpf_link_perf *perf_link = container_of(link, struct bpf_link_perf, link);
11169
11170 perf_link->legacy_probe_name = legacy_probe;
46ed5fc3 11171 perf_link->legacy_is_kprobe = true;
ca304b40
RDT
11172 perf_link->legacy_is_retprobe = retprobe;
11173 }
11174
b2650027 11175 return link;
80940293
CW
11176
11177err_clean_legacy:
11178 if (legacy)
11179 remove_kprobe_event_legacy(legacy_probe, retprobe);
303a2572
AN
11180err_out:
11181 free(legacy_probe);
11182 return libbpf_err_ptr(err);
b2650027
AN
11183}
11184
942025c9 11185struct bpf_link *bpf_program__attach_kprobe(const struct bpf_program *prog,
ac0ed488
JO
11186 bool retprobe,
11187 const char *func_name)
11188{
da97553e 11189 DECLARE_LIBBPF_OPTS(bpf_kprobe_opts, opts,
ac0ed488 11190 .retprobe = retprobe,
da97553e 11191 );
ac0ed488
JO
11192
11193 return bpf_program__attach_kprobe_opts(prog, func_name, &opts);
11194}
11195
708ac5be
AN
11196struct bpf_link *bpf_program__attach_ksyscall(const struct bpf_program *prog,
11197 const char *syscall_name,
11198 const struct bpf_ksyscall_opts *opts)
11199{
11200 LIBBPF_OPTS(bpf_kprobe_opts, kprobe_opts);
11201 char func_name[128];
11202
11203 if (!OPTS_VALID(opts, bpf_ksyscall_opts))
11204 return libbpf_err_ptr(-EINVAL);
11205
11206 if (kernel_supports(prog->obj, FEAT_SYSCALL_WRAPPER)) {
64893e83
DM
11207 /* arch_specific_syscall_pfx() should never return NULL here
11208 * because it is guarded by kernel_supports(). However, since
11209 * compiler does not know that we have an explicit conditional
11210 * as well.
11211 */
708ac5be 11212 snprintf(func_name, sizeof(func_name), "__%s_sys_%s",
64893e83 11213 arch_specific_syscall_pfx() ? : "", syscall_name);
708ac5be
AN
11214 } else {
11215 snprintf(func_name, sizeof(func_name), "__se_sys_%s", syscall_name);
11216 }
11217
11218 kprobe_opts.retprobe = OPTS_GET(opts, retprobe, false);
11219 kprobe_opts.bpf_cookie = OPTS_GET(opts, bpf_cookie, 0);
11220
11221 return bpf_program__attach_kprobe_opts(prog, func_name, &kprobe_opts);
11222}
11223
ddc6b049 11224/* Adapted from perf/util/string.c */
e613d1d0 11225bool glob_match(const char *str, const char *pat)
ddc6b049
JO
11226{
11227 while (*str && *pat && *pat != '*') {
11228 if (*pat == '?') { /* Matches any single character */
11229 str++;
11230 pat++;
11231 continue;
11232 }
11233 if (*str != *pat)
11234 return false;
11235 str++;
11236 pat++;
11237 }
11238 /* Check wild card */
11239 if (*pat == '*') {
11240 while (*pat == '*')
11241 pat++;
11242 if (!*pat) /* Tail wild card matches all */
11243 return true;
11244 while (*str)
11245 if (glob_match(str++, pat))
11246 return true;
11247 }
11248 return !*str && !*pat;
11249}
11250
11251struct kprobe_multi_resolve {
11252 const char *pattern;
11253 unsigned long *addrs;
11254 size_t cap;
11255 size_t cnt;
11256};
11257
8a3fe76f
JL
11258struct avail_kallsyms_data {
11259 char **syms;
11260 size_t cnt;
11261 struct kprobe_multi_resolve *res;
11262};
11263
11264static int avail_func_cmp(const void *a, const void *b)
11265{
11266 return strcmp(*(const char **)a, *(const char **)b);
11267}
11268
11269static int avail_kallsyms_cb(unsigned long long sym_addr, char sym_type,
11270 const char *sym_name, void *ctx)
ddc6b049 11271{
8a3fe76f
JL
11272 struct avail_kallsyms_data *data = ctx;
11273 struct kprobe_multi_resolve *res = data->res;
ddc6b049
JO
11274 int err;
11275
8a3fe76f 11276 if (!bsearch(&sym_name, data->syms, data->cnt, sizeof(*data->syms), avail_func_cmp))
ddc6b049
JO
11277 return 0;
11278
8a3fe76f 11279 err = libbpf_ensure_mem((void **)&res->addrs, &res->cap, sizeof(*res->addrs), res->cnt + 1);
ddc6b049
JO
11280 if (err)
11281 return err;
11282
8a3fe76f 11283 res->addrs[res->cnt++] = (unsigned long)sym_addr;
ddc6b049
JO
11284 return 0;
11285}
11286
8a3fe76f
JL
11287static int libbpf_available_kallsyms_parse(struct kprobe_multi_resolve *res)
11288{
11289 const char *available_functions_file = tracefs_available_filter_functions();
11290 struct avail_kallsyms_data data;
11291 char sym_name[500];
11292 FILE *f;
11293 int err = 0, ret, i;
11294 char **syms = NULL;
11295 size_t cap = 0, cnt = 0;
11296
11297 f = fopen(available_functions_file, "re");
11298 if (!f) {
11299 err = -errno;
11300 pr_warn("failed to open %s: %d\n", available_functions_file, err);
11301 return err;
11302 }
11303
11304 while (true) {
11305 char *name;
11306
11307 ret = fscanf(f, "%499s%*[^\n]\n", sym_name);
11308 if (ret == EOF && feof(f))
11309 break;
11310
11311 if (ret != 1) {
11312 pr_warn("failed to parse available_filter_functions entry: %d\n", ret);
11313 err = -EINVAL;
11314 goto cleanup;
11315 }
11316
11317 if (!glob_match(sym_name, res->pattern))
11318 continue;
11319
11320 err = libbpf_ensure_mem((void **)&syms, &cap, sizeof(*syms), cnt + 1);
11321 if (err)
11322 goto cleanup;
11323
11324 name = strdup(sym_name);
11325 if (!name) {
11326 err = -errno;
11327 goto cleanup;
11328 }
11329
11330 syms[cnt++] = name;
11331 }
11332
11333 /* no entries found, bail out */
11334 if (cnt == 0) {
11335 err = -ENOENT;
11336 goto cleanup;
11337 }
11338
11339 /* sort available functions */
11340 qsort(syms, cnt, sizeof(*syms), avail_func_cmp);
11341
11342 data.syms = syms;
11343 data.res = res;
11344 data.cnt = cnt;
11345 libbpf_kallsyms_parse(avail_kallsyms_cb, &data);
11346
11347 if (res->cnt == 0)
11348 err = -ENOENT;
11349
11350cleanup:
11351 for (i = 0; i < cnt; i++)
11352 free((char *)syms[i]);
11353 free(syms);
11354
11355 fclose(f);
11356 return err;
11357}
11358
56baeeba
JL
11359static bool has_available_filter_functions_addrs(void)
11360{
11361 return access(tracefs_available_filter_functions_addrs(), R_OK) != -1;
11362}
11363
11364static int libbpf_available_kprobes_parse(struct kprobe_multi_resolve *res)
11365{
11366 const char *available_path = tracefs_available_filter_functions_addrs();
11367 char sym_name[500];
11368 FILE *f;
11369 int ret, err = 0;
11370 unsigned long long sym_addr;
11371
11372 f = fopen(available_path, "re");
11373 if (!f) {
11374 err = -errno;
11375 pr_warn("failed to open %s: %d\n", available_path, err);
11376 return err;
11377 }
11378
11379 while (true) {
11380 ret = fscanf(f, "%llx %499s%*[^\n]\n", &sym_addr, sym_name);
11381 if (ret == EOF && feof(f))
11382 break;
11383
11384 if (ret != 2) {
11385 pr_warn("failed to parse available_filter_functions_addrs entry: %d\n",
11386 ret);
11387 err = -EINVAL;
11388 goto cleanup;
11389 }
11390
11391 if (!glob_match(sym_name, res->pattern))
11392 continue;
11393
11394 err = libbpf_ensure_mem((void **)&res->addrs, &res->cap,
11395 sizeof(*res->addrs), res->cnt + 1);
11396 if (err)
11397 goto cleanup;
11398
11399 res->addrs[res->cnt++] = (unsigned long)sym_addr;
11400 }
11401
11402 if (res->cnt == 0)
11403 err = -ENOENT;
11404
11405cleanup:
11406 fclose(f);
11407 return err;
11408}
11409
ddc6b049
JO
11410struct bpf_link *
11411bpf_program__attach_kprobe_multi_opts(const struct bpf_program *prog,
11412 const char *pattern,
11413 const struct bpf_kprobe_multi_opts *opts)
11414{
11415 LIBBPF_OPTS(bpf_link_create_opts, lopts);
11416 struct kprobe_multi_resolve res = {
11417 .pattern = pattern,
11418 };
2ca178f0 11419 enum bpf_attach_type attach_type;
ddc6b049
JO
11420 struct bpf_link *link = NULL;
11421 char errmsg[STRERR_BUFSIZE];
11422 const unsigned long *addrs;
11423 int err, link_fd, prog_fd;
2ca178f0 11424 bool retprobe, session;
ddc6b049
JO
11425 const __u64 *cookies;
11426 const char **syms;
ddc6b049
JO
11427 size_t cnt;
11428
11429 if (!OPTS_VALID(opts, bpf_kprobe_multi_opts))
11430 return libbpf_err_ptr(-EINVAL);
11431
7b30c296
MY
11432 prog_fd = bpf_program__fd(prog);
11433 if (prog_fd < 0) {
11434 pr_warn("prog '%s': can't attach BPF program without FD (was it loaded?)\n",
11435 prog->name);
11436 return libbpf_err_ptr(-EINVAL);
11437 }
11438
ddc6b049
JO
11439 syms = OPTS_GET(opts, syms, false);
11440 addrs = OPTS_GET(opts, addrs, false);
11441 cnt = OPTS_GET(opts, cnt, false);
11442 cookies = OPTS_GET(opts, cookies, false);
11443
11444 if (!pattern && !addrs && !syms)
11445 return libbpf_err_ptr(-EINVAL);
11446 if (pattern && (addrs || syms || cookies || cnt))
11447 return libbpf_err_ptr(-EINVAL);
11448 if (!pattern && !cnt)
11449 return libbpf_err_ptr(-EINVAL);
11450 if (addrs && syms)
11451 return libbpf_err_ptr(-EINVAL);
11452
11453 if (pattern) {
56baeeba
JL
11454 if (has_available_filter_functions_addrs())
11455 err = libbpf_available_kprobes_parse(&res);
11456 else
11457 err = libbpf_available_kallsyms_parse(&res);
ddc6b049
JO
11458 if (err)
11459 goto error;
ddc6b049
JO
11460 addrs = res.addrs;
11461 cnt = res.cnt;
11462 }
11463
11464 retprobe = OPTS_GET(opts, retprobe, false);
2ca178f0
JO
11465 session = OPTS_GET(opts, session, false);
11466
11467 if (retprobe && session)
11468 return libbpf_err_ptr(-EINVAL);
11469
11470 attach_type = session ? BPF_TRACE_KPROBE_SESSION : BPF_TRACE_KPROBE_MULTI;
ddc6b049
JO
11471
11472 lopts.kprobe_multi.syms = syms;
11473 lopts.kprobe_multi.addrs = addrs;
11474 lopts.kprobe_multi.cookies = cookies;
11475 lopts.kprobe_multi.cnt = cnt;
11476 lopts.kprobe_multi.flags = retprobe ? BPF_F_KPROBE_MULTI_RETURN : 0;
11477
11478 link = calloc(1, sizeof(*link));
11479 if (!link) {
11480 err = -ENOMEM;
11481 goto error;
11482 }
11483 link->detach = &bpf_link__detach_fd;
11484
2ca178f0 11485 link_fd = bpf_link_create(prog_fd, 0, attach_type, &lopts);
ddc6b049
JO
11486 if (link_fd < 0) {
11487 err = -errno;
11488 pr_warn("prog '%s': failed to attach: %s\n",
11489 prog->name, libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
11490 goto error;
11491 }
11492 link->fd = link_fd;
11493 free(res.addrs);
11494 return link;
11495
11496error:
11497 free(link);
11498 free(res.addrs);
11499 return libbpf_err_ptr(err);
11500}
11501
4fa5bcfe 11502static int attach_kprobe(const struct bpf_program *prog, long cookie, struct bpf_link **link)
d7a18ea7 11503{
da97553e 11504 DECLARE_LIBBPF_OPTS(bpf_kprobe_opts, opts);
a2488b5f 11505 unsigned long offset = 0;
d7a18ea7 11506 const char *func_name;
a2488b5f 11507 char *func;
4fa5bcfe 11508 int n;
d7a18ea7 11509
9af8efc4
AN
11510 *link = NULL;
11511
11512 /* no auto-attach for SEC("kprobe") and SEC("kretprobe") */
11513 if (strcmp(prog->sec_name, "kprobe") == 0 || strcmp(prog->sec_name, "kretprobe") == 0)
11514 return 0;
11515
13d35a0c
AN
11516 opts.retprobe = str_has_pfx(prog->sec_name, "kretprobe/");
11517 if (opts.retprobe)
11518 func_name = prog->sec_name + sizeof("kretprobe/") - 1;
11519 else
11520 func_name = prog->sec_name + sizeof("kprobe/") - 1;
d7a18ea7 11521
e3f9bc35 11522 n = sscanf(func_name, "%m[a-zA-Z0-9_.]+%li", &func, &offset);
a2488b5f 11523 if (n < 1) {
a2488b5f 11524 pr_warn("kprobe name is invalid: %s\n", func_name);
4fa5bcfe 11525 return -EINVAL;
a2488b5f
AM
11526 }
11527 if (opts.retprobe && offset != 0) {
1f71a468 11528 free(func);
a2488b5f 11529 pr_warn("kretprobes do not support offset specification\n");
4fa5bcfe 11530 return -EINVAL;
a2488b5f 11531 }
d7a18ea7 11532
a2488b5f 11533 opts.offset = offset;
4fa5bcfe 11534 *link = bpf_program__attach_kprobe_opts(prog, func, &opts);
a2488b5f 11535 free(func);
4fa5bcfe 11536 return libbpf_get_error(*link);
d7a18ea7
AN
11537}
11538
708ac5be
AN
11539static int attach_ksyscall(const struct bpf_program *prog, long cookie, struct bpf_link **link)
11540{
11541 LIBBPF_OPTS(bpf_ksyscall_opts, opts);
11542 const char *syscall_name;
11543
11544 *link = NULL;
11545
11546 /* no auto-attach for SEC("ksyscall") and SEC("kretsyscall") */
11547 if (strcmp(prog->sec_name, "ksyscall") == 0 || strcmp(prog->sec_name, "kretsyscall") == 0)
11548 return 0;
11549
11550 opts.retprobe = str_has_pfx(prog->sec_name, "kretsyscall/");
11551 if (opts.retprobe)
11552 syscall_name = prog->sec_name + sizeof("kretsyscall/") - 1;
11553 else
11554 syscall_name = prog->sec_name + sizeof("ksyscall/") - 1;
11555
11556 *link = bpf_program__attach_ksyscall(prog, syscall_name, &opts);
11557 return *link ? 0 : -errno;
11558}
11559
ddc6b049
JO
11560static int attach_kprobe_multi(const struct bpf_program *prog, long cookie, struct bpf_link **link)
11561{
11562 LIBBPF_OPTS(bpf_kprobe_multi_opts, opts);
11563 const char *spec;
11564 char *pattern;
11565 int n;
11566
9af8efc4
AN
11567 *link = NULL;
11568
11569 /* no auto-attach for SEC("kprobe.multi") and SEC("kretprobe.multi") */
11570 if (strcmp(prog->sec_name, "kprobe.multi") == 0 ||
11571 strcmp(prog->sec_name, "kretprobe.multi") == 0)
11572 return 0;
11573
ddc6b049
JO
11574 opts.retprobe = str_has_pfx(prog->sec_name, "kretprobe.multi/");
11575 if (opts.retprobe)
11576 spec = prog->sec_name + sizeof("kretprobe.multi/") - 1;
11577 else
11578 spec = prog->sec_name + sizeof("kprobe.multi/") - 1;
11579
11580 n = sscanf(spec, "%m[a-zA-Z0-9_.*?]", &pattern);
11581 if (n < 1) {
11582 pr_warn("kprobe multi pattern is invalid: %s\n", pattern);
11583 return -EINVAL;
11584 }
11585
11586 *link = bpf_program__attach_kprobe_multi_opts(prog, pattern, &opts);
11587 free(pattern);
11588 return libbpf_get_error(*link);
11589}
11590
2ca178f0
JO
11591static int attach_kprobe_session(const struct bpf_program *prog, long cookie,
11592 struct bpf_link **link)
11593{
11594 LIBBPF_OPTS(bpf_kprobe_multi_opts, opts, .session = true);
11595 const char *spec;
11596 char *pattern;
11597 int n;
11598
11599 *link = NULL;
11600
11601 /* no auto-attach for SEC("kprobe.session") */
11602 if (strcmp(prog->sec_name, "kprobe.session") == 0)
11603 return 0;
11604
11605 spec = prog->sec_name + sizeof("kprobe.session/") - 1;
11606 n = sscanf(spec, "%m[a-zA-Z0-9_.*?]", &pattern);
11607 if (n < 1) {
11608 pr_warn("kprobe session pattern is invalid: %s\n", pattern);
11609 return -EINVAL;
11610 }
11611
11612 *link = bpf_program__attach_kprobe_multi_opts(prog, pattern, &opts);
11613 free(pattern);
11614 return *link ? 0 : -errno;
11615}
11616
5bfdd32d
JO
11617static int attach_uprobe_multi(const struct bpf_program *prog, long cookie, struct bpf_link **link)
11618{
11619 char *probe_type = NULL, *binary_path = NULL, *func_name = NULL;
11620 LIBBPF_OPTS(bpf_uprobe_multi_opts, opts);
11621 int n, ret = -EINVAL;
11622
11623 *link = NULL;
11624
2147c8d0 11625 n = sscanf(prog->sec_name, "%m[^/]/%m[^:]:%m[^\n]",
5bfdd32d
JO
11626 &probe_type, &binary_path, &func_name);
11627 switch (n) {
11628 case 1:
11629 /* handle SEC("u[ret]probe") - format is valid, but auto-attach is impossible. */
11630 ret = 0;
11631 break;
11632 case 3:
11633 opts.retprobe = strcmp(probe_type, "uretprobe.multi") == 0;
11634 *link = bpf_program__attach_uprobe_multi(prog, -1, binary_path, func_name, &opts);
11635 ret = libbpf_get_error(*link);
11636 break;
11637 default:
11638 pr_warn("prog '%s': invalid format of section definition '%s'\n", prog->name,
11639 prog->sec_name);
11640 break;
11641 }
11642 free(probe_type);
11643 free(binary_path);
11644 free(func_name);
11645 return ret;
11646}
11647
cc10623c
AN
11648static void gen_uprobe_legacy_event_name(char *buf, size_t buf_sz,
11649 const char *binary_path, uint64_t offset)
11650{
11651 int i;
11652
11653 snprintf(buf, buf_sz, "libbpf_%u_%s_0x%zx", getpid(), binary_path, (size_t)offset);
11654
11655 /* sanitize binary_path in the probe name */
11656 for (i = 0; buf[i]; i++) {
11657 if (!isalnum(buf[i]))
11658 buf[i] = '_';
11659 }
11660}
11661
11662static inline int add_uprobe_event_legacy(const char *probe_name, bool retprobe,
11663 const char *binary_path, size_t offset)
11664{
a1ac9fd6 11665 return append_to_file(tracefs_uprobe_events(), "%c:%s/%s %s:0x%zx",
cc10623c
AN
11666 retprobe ? 'r' : 'p',
11667 retprobe ? "uretprobes" : "uprobes",
11668 probe_name, binary_path, offset);
11669}
11670
11671static inline int remove_uprobe_event_legacy(const char *probe_name, bool retprobe)
11672{
a1ac9fd6
AN
11673 return append_to_file(tracefs_uprobe_events(), "-:%s/%s",
11674 retprobe ? "uretprobes" : "uprobes", probe_name);
cc10623c
AN
11675}
11676
11677static int determine_uprobe_perf_type_legacy(const char *probe_name, bool retprobe)
11678{
11679 char file[512];
11680
a1ac9fd6
AN
11681 snprintf(file, sizeof(file), "%s/events/%s/%s/id",
11682 tracefs_path(), retprobe ? "uretprobes" : "uprobes", probe_name);
cc10623c
AN
11683
11684 return parse_uint_from_file(file, "%d\n");
11685}
11686
11687static int perf_event_uprobe_open_legacy(const char *probe_name, bool retprobe,
11688 const char *binary_path, size_t offset, int pid)
11689{
813847a3 11690 const size_t attr_sz = sizeof(struct perf_event_attr);
cc10623c
AN
11691 struct perf_event_attr attr;
11692 int type, pfd, err;
11693
11694 err = add_uprobe_event_legacy(probe_name, retprobe, binary_path, offset);
11695 if (err < 0) {
11696 pr_warn("failed to add legacy uprobe event for %s:0x%zx: %d\n",
11697 binary_path, (size_t)offset, err);
11698 return err;
11699 }
11700 type = determine_uprobe_perf_type_legacy(probe_name, retprobe);
11701 if (type < 0) {
2655144f 11702 err = type;
cc10623c 11703 pr_warn("failed to determine legacy uprobe event id for %s:0x%zx: %d\n",
2655144f
CW
11704 binary_path, offset, err);
11705 goto err_clean_legacy;
cc10623c
AN
11706 }
11707
813847a3
AN
11708 memset(&attr, 0, attr_sz);
11709 attr.size = attr_sz;
cc10623c
AN
11710 attr.config = type;
11711 attr.type = PERF_TYPE_TRACEPOINT;
11712
11713 pfd = syscall(__NR_perf_event_open, &attr,
11714 pid < 0 ? -1 : pid, /* pid */
11715 pid == -1 ? 0 : -1, /* cpu */
11716 -1 /* group_fd */, PERF_FLAG_FD_CLOEXEC);
11717 if (pfd < 0) {
11718 err = -errno;
11719 pr_warn("legacy uprobe perf_event_open() failed: %d\n", err);
2655144f 11720 goto err_clean_legacy;
cc10623c
AN
11721 }
11722 return pfd;
2655144f
CW
11723
11724err_clean_legacy:
11725 /* Clear the newly added legacy uprobe_event */
11726 remove_uprobe_event_legacy(probe_name, retprobe);
11727 return err;
cc10623c
AN
11728}
11729
c44fd845
DM
11730/* Find offset of function name in archive specified by path. Currently
11731 * supported are .zip files that do not compress their contents, as used on
11732 * Android in the form of APKs, for example. "file_name" is the name of the ELF
11733 * file inside the archive. "func_name" matches symbol name or name@@LIB for
11734 * library functions.
11735 *
11736 * An overview of the APK format specifically provided here:
11737 * https://en.wikipedia.org/w/index.php?title=Apk_(file_format)&oldid=1139099120#Package_contents
11738 */
11739static long elf_find_func_offset_from_archive(const char *archive_path, const char *file_name,
11740 const char *func_name)
11741{
11742 struct zip_archive *archive;
11743 struct zip_entry entry;
11744 long ret;
11745 Elf *elf;
11746
11747 archive = zip_archive_open(archive_path);
11748 if (IS_ERR(archive)) {
11749 ret = PTR_ERR(archive);
11750 pr_warn("zip: failed to open %s: %ld\n", archive_path, ret);
11751 return ret;
11752 }
11753
11754 ret = zip_archive_find_entry(archive, file_name, &entry);
11755 if (ret) {
11756 pr_warn("zip: could not find archive member %s in %s: %ld\n", file_name,
11757 archive_path, ret);
11758 goto out;
11759 }
11760 pr_debug("zip: found entry for %s in %s at 0x%lx\n", file_name, archive_path,
11761 (unsigned long)entry.data_offset);
11762
11763 if (entry.compression) {
11764 pr_warn("zip: entry %s of %s is compressed and cannot be handled\n", file_name,
11765 archive_path);
11766 ret = -LIBBPF_ERRNO__FORMAT;
11767 goto out;
11768 }
11769
11770 elf = elf_memory((void *)entry.data, entry.data_length);
11771 if (!elf) {
11772 pr_warn("elf: could not read elf file %s from %s: %s\n", file_name, archive_path,
11773 elf_errmsg(-1));
11774 ret = -LIBBPF_ERRNO__LIBELF;
11775 goto out;
11776 }
11777
11778 ret = elf_find_func_offset(elf, file_name, func_name);
11779 if (ret > 0) {
11780 pr_debug("elf: symbol address match for %s of %s in %s: 0x%x + 0x%lx = 0x%lx\n",
11781 func_name, file_name, archive_path, entry.data_offset, ret,
11782 ret + entry.data_offset);
11783 ret += entry.data_offset;
11784 }
11785 elf_end(elf);
11786
11787out:
11788 zip_archive_close(archive);
11789 return ret;
11790}
11791
56818931
IL
11792static const char *arch_specific_lib_paths(void)
11793{
11794 /*
11795 * Based on https://packages.debian.org/sid/libc6.
11796 *
11797 * Assume that the traced program is built for the same architecture
11798 * as libbpf, which should cover the vast majority of cases.
11799 */
11800#if defined(__x86_64__)
11801 return "/lib/x86_64-linux-gnu";
11802#elif defined(__i386__)
11803 return "/lib/i386-linux-gnu";
11804#elif defined(__s390x__)
11805 return "/lib/s390x-linux-gnu";
11806#elif defined(__s390__)
11807 return "/lib/s390-linux-gnu";
11808#elif defined(__arm__) && defined(__SOFTFP__)
11809 return "/lib/arm-linux-gnueabi";
11810#elif defined(__arm__) && !defined(__SOFTFP__)
11811 return "/lib/arm-linux-gnueabihf";
11812#elif defined(__aarch64__)
11813 return "/lib/aarch64-linux-gnu";
11814#elif defined(__mips__) && defined(__MIPSEL__) && _MIPS_SZLONG == 64
11815 return "/lib/mips64el-linux-gnuabi64";
11816#elif defined(__mips__) && defined(__MIPSEL__) && _MIPS_SZLONG == 32
11817 return "/lib/mipsel-linux-gnu";
11818#elif defined(__powerpc64__) && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
11819 return "/lib/powerpc64le-linux-gnu";
11820#elif defined(__sparc__) && defined(__arch64__)
11821 return "/lib/sparc64-linux-gnu";
11822#elif defined(__riscv) && __riscv_xlen == 64
11823 return "/lib/riscv64-linux-gnu";
11824#else
11825 return NULL;
11826#endif
11827}
11828
1ce3a60e
AM
11829/* Get full path to program/shared library. */
11830static int resolve_full_path(const char *file, char *result, size_t result_sz)
11831{
56818931 11832 const char *search_paths[3] = {};
9e32084e 11833 int i, perm;
1ce3a60e 11834
a1c9d61b 11835 if (str_has_sfx(file, ".so") || strstr(file, ".so.")) {
1ce3a60e
AM
11836 search_paths[0] = getenv("LD_LIBRARY_PATH");
11837 search_paths[1] = "/usr/lib64:/usr/lib";
56818931 11838 search_paths[2] = arch_specific_lib_paths();
9e32084e 11839 perm = R_OK;
1ce3a60e
AM
11840 } else {
11841 search_paths[0] = getenv("PATH");
11842 search_paths[1] = "/usr/bin:/usr/sbin";
9e32084e 11843 perm = R_OK | X_OK;
1ce3a60e
AM
11844 }
11845
11846 for (i = 0; i < ARRAY_SIZE(search_paths); i++) {
11847 const char *s;
11848
11849 if (!search_paths[i])
11850 continue;
11851 for (s = search_paths[i]; s != NULL; s = strchr(s, ':')) {
11852 char *next_path;
11853 int seg_len;
11854
11855 if (s[0] == ':')
11856 s++;
11857 next_path = strchr(s, ':');
11858 seg_len = next_path ? next_path - s : strlen(s);
11859 if (!seg_len)
11860 continue;
11861 snprintf(result, result_sz, "%.*s/%s", seg_len, s, file);
9e32084e 11862 /* ensure it has required permissions */
6a4ab886 11863 if (faccessat(AT_FDCWD, result, perm, AT_EACCESS) < 0)
1ce3a60e
AM
11864 continue;
11865 pr_debug("resolved '%s' to '%s'\n", file, result);
11866 return 0;
11867 }
11868 }
11869 return -ENOENT;
11870}
11871
3140cf12
JO
11872struct bpf_link *
11873bpf_program__attach_uprobe_multi(const struct bpf_program *prog,
11874 pid_t pid,
11875 const char *path,
11876 const char *func_pattern,
11877 const struct bpf_uprobe_multi_opts *opts)
11878{
11879 const unsigned long *ref_ctr_offsets = NULL, *offsets = NULL;
11880 LIBBPF_OPTS(bpf_link_create_opts, lopts);
11881 unsigned long *resolved_offsets = NULL;
11882 int err = 0, link_fd, prog_fd;
11883 struct bpf_link *link = NULL;
11884 char errmsg[STRERR_BUFSIZE];
11885 char full_path[PATH_MAX];
11886 const __u64 *cookies;
11887 const char **syms;
11888 size_t cnt;
11889
11890 if (!OPTS_VALID(opts, bpf_uprobe_multi_opts))
11891 return libbpf_err_ptr(-EINVAL);
11892
7b30c296
MY
11893 prog_fd = bpf_program__fd(prog);
11894 if (prog_fd < 0) {
11895 pr_warn("prog '%s': can't attach BPF program without FD (was it loaded?)\n",
11896 prog->name);
11897 return libbpf_err_ptr(-EINVAL);
11898 }
11899
3140cf12
JO
11900 syms = OPTS_GET(opts, syms, NULL);
11901 offsets = OPTS_GET(opts, offsets, NULL);
11902 ref_ctr_offsets = OPTS_GET(opts, ref_ctr_offsets, NULL);
11903 cookies = OPTS_GET(opts, cookies, NULL);
11904 cnt = OPTS_GET(opts, cnt, 0);
11905
11906 /*
11907 * User can specify 2 mutually exclusive set of inputs:
11908 *
11909 * 1) use only path/func_pattern/pid arguments
11910 *
11911 * 2) use path/pid with allowed combinations of:
11912 * syms/offsets/ref_ctr_offsets/cookies/cnt
11913 *
11914 * - syms and offsets are mutually exclusive
11915 * - ref_ctr_offsets and cookies are optional
11916 *
11917 * Any other usage results in error.
11918 */
11919
11920 if (!path)
11921 return libbpf_err_ptr(-EINVAL);
11922 if (!func_pattern && cnt == 0)
11923 return libbpf_err_ptr(-EINVAL);
11924
11925 if (func_pattern) {
11926 if (syms || offsets || ref_ctr_offsets || cookies || cnt)
11927 return libbpf_err_ptr(-EINVAL);
11928 } else {
11929 if (!!syms == !!offsets)
11930 return libbpf_err_ptr(-EINVAL);
11931 }
11932
11933 if (func_pattern) {
11934 if (!strchr(path, '/')) {
11935 err = resolve_full_path(path, full_path, sizeof(full_path));
11936 if (err) {
11937 pr_warn("prog '%s': failed to resolve full path for '%s': %d\n",
11938 prog->name, path, err);
11939 return libbpf_err_ptr(err);
11940 }
11941 path = full_path;
11942 }
11943
11944 err = elf_resolve_pattern_offsets(path, func_pattern,
11945 &resolved_offsets, &cnt);
11946 if (err < 0)
11947 return libbpf_err_ptr(err);
11948 offsets = resolved_offsets;
11949 } else if (syms) {
48f0dfd8 11950 err = elf_resolve_syms_offsets(path, cnt, syms, &resolved_offsets, STT_FUNC);
3140cf12
JO
11951 if (err < 0)
11952 return libbpf_err_ptr(err);
11953 offsets = resolved_offsets;
11954 }
11955
11956 lopts.uprobe_multi.path = path;
11957 lopts.uprobe_multi.offsets = offsets;
11958 lopts.uprobe_multi.ref_ctr_offsets = ref_ctr_offsets;
11959 lopts.uprobe_multi.cookies = cookies;
11960 lopts.uprobe_multi.cnt = cnt;
11961 lopts.uprobe_multi.flags = OPTS_GET(opts, retprobe, false) ? BPF_F_UPROBE_MULTI_RETURN : 0;
11962
11963 if (pid == 0)
11964 pid = getpid();
11965 if (pid > 0)
11966 lopts.uprobe_multi.pid = pid;
11967
11968 link = calloc(1, sizeof(*link));
11969 if (!link) {
11970 err = -ENOMEM;
11971 goto error;
11972 }
11973 link->detach = &bpf_link__detach_fd;
11974
3140cf12
JO
11975 link_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_UPROBE_MULTI, &lopts);
11976 if (link_fd < 0) {
11977 err = -errno;
11978 pr_warn("prog '%s': failed to attach multi-uprobe: %s\n",
11979 prog->name, libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
11980 goto error;
11981 }
11982 link->fd = link_fd;
11983 free(resolved_offsets);
11984 return link;
11985
11986error:
11987 free(resolved_offsets);
11988 free(link);
11989 return libbpf_err_ptr(err);
11990}
11991
47faff37 11992LIBBPF_API struct bpf_link *
942025c9 11993bpf_program__attach_uprobe_opts(const struct bpf_program *prog, pid_t pid,
47faff37
AN
11994 const char *binary_path, size_t func_offset,
11995 const struct bpf_uprobe_opts *opts)
b2650027 11996{
c44fd845 11997 const char *archive_path = NULL, *archive_sep = NULL;
cc10623c 11998 char errmsg[STRERR_BUFSIZE], *legacy_probe = NULL;
c44fd845 11999 DECLARE_LIBBPF_OPTS(bpf_perf_event_opts, pe_opts);
f8b299bc 12000 enum probe_attach_mode attach_mode;
c44fd845 12001 char full_path[PATH_MAX];
b2650027 12002 struct bpf_link *link;
5e3b8356 12003 size_t ref_ctr_off;
b2650027 12004 int pfd, err;
cc10623c 12005 bool retprobe, legacy;
433966e3 12006 const char *func_name;
47faff37
AN
12007
12008 if (!OPTS_VALID(opts, bpf_uprobe_opts))
12009 return libbpf_err_ptr(-EINVAL);
12010
f8b299bc 12011 attach_mode = OPTS_GET(opts, attach_mode, PROBE_ATTACH_MODE_DEFAULT);
47faff37 12012 retprobe = OPTS_GET(opts, retprobe, false);
5e3b8356 12013 ref_ctr_off = OPTS_GET(opts, ref_ctr_offset, 0);
47faff37 12014 pe_opts.bpf_cookie = OPTS_GET(opts, bpf_cookie, 0);
b2650027 12015
8ed2f5a6
HC
12016 if (!binary_path)
12017 return libbpf_err_ptr(-EINVAL);
12018
c44fd845
DM
12019 /* Check if "binary_path" refers to an archive. */
12020 archive_sep = strstr(binary_path, "!/");
12021 if (archive_sep) {
12022 full_path[0] = '\0';
12023 libbpf_strlcpy(full_path, binary_path,
12024 min(sizeof(full_path), (size_t)(archive_sep - binary_path + 1)));
12025 archive_path = full_path;
12026 binary_path = archive_sep + 2;
12027 } else if (!strchr(binary_path, '/')) {
12028 err = resolve_full_path(binary_path, full_path, sizeof(full_path));
1ce3a60e 12029 if (err) {
2e4913e0
AN
12030 pr_warn("prog '%s': failed to resolve full path for '%s': %d\n",
12031 prog->name, binary_path, err);
1ce3a60e
AM
12032 return libbpf_err_ptr(err);
12033 }
c44fd845 12034 binary_path = full_path;
1ce3a60e 12035 }
433966e3
AM
12036 func_name = OPTS_GET(opts, func_name, NULL);
12037 if (func_name) {
12038 long sym_off;
12039
c44fd845
DM
12040 if (archive_path) {
12041 sym_off = elf_find_func_offset_from_archive(archive_path, binary_path,
12042 func_name);
12043 binary_path = archive_path;
12044 } else {
12045 sym_off = elf_find_func_offset_from_file(binary_path, func_name);
12046 }
433966e3
AM
12047 if (sym_off < 0)
12048 return libbpf_err_ptr(sym_off);
12049 func_offset += sym_off;
12050 }
1ce3a60e 12051
cc10623c 12052 legacy = determine_uprobe_perf_type() < 0;
f8b299bc
MD
12053 switch (attach_mode) {
12054 case PROBE_ATTACH_MODE_LEGACY:
12055 legacy = true;
12056 pe_opts.force_ioctl_attach = true;
12057 break;
12058 case PROBE_ATTACH_MODE_PERF:
12059 if (legacy)
12060 return libbpf_err_ptr(-ENOTSUP);
12061 pe_opts.force_ioctl_attach = true;
12062 break;
12063 case PROBE_ATTACH_MODE_LINK:
12064 if (legacy || !kernel_supports(prog->obj, FEAT_PERF_LINK))
12065 return libbpf_err_ptr(-ENOTSUP);
12066 break;
12067 case PROBE_ATTACH_MODE_DEFAULT:
12068 break;
12069 default:
12070 return libbpf_err_ptr(-EINVAL);
12071 }
12072
cc10623c
AN
12073 if (!legacy) {
12074 pfd = perf_event_open_probe(true /* uprobe */, retprobe, binary_path,
12075 func_offset, pid, ref_ctr_off);
12076 } else {
1ce3a60e 12077 char probe_name[PATH_MAX + 64];
cc10623c
AN
12078
12079 if (ref_ctr_off)
12080 return libbpf_err_ptr(-EINVAL);
12081
12082 gen_uprobe_legacy_event_name(probe_name, sizeof(probe_name),
12083 binary_path, func_offset);
12084
12085 legacy_probe = strdup(probe_name);
12086 if (!legacy_probe)
12087 return libbpf_err_ptr(-ENOMEM);
12088
12089 pfd = perf_event_uprobe_open_legacy(legacy_probe, retprobe,
12090 binary_path, func_offset, pid);
12091 }
b2650027 12092 if (pfd < 0) {
cc10623c 12093 err = -errno;
52109584
AN
12094 pr_warn("prog '%s': failed to create %s '%s:0x%zx' perf event: %s\n",
12095 prog->name, retprobe ? "uretprobe" : "uprobe",
be18010e 12096 binary_path, func_offset,
cc10623c
AN
12097 libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
12098 goto err_out;
b2650027 12099 }
cc10623c 12100
47faff37 12101 link = bpf_program__attach_perf_event_opts(prog, pfd, &pe_opts);
e9fc3ce9
AN
12102 err = libbpf_get_error(link);
12103 if (err) {
b2650027 12104 close(pfd);
52109584
AN
12105 pr_warn("prog '%s': failed to attach to %s '%s:0x%zx': %s\n",
12106 prog->name, retprobe ? "uretprobe" : "uprobe",
be18010e
KW
12107 binary_path, func_offset,
12108 libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
2655144f 12109 goto err_clean_legacy;
cc10623c
AN
12110 }
12111 if (legacy) {
12112 struct bpf_link_perf *perf_link = container_of(link, struct bpf_link_perf, link);
12113
12114 perf_link->legacy_probe_name = legacy_probe;
12115 perf_link->legacy_is_kprobe = false;
12116 perf_link->legacy_is_retprobe = retprobe;
b2650027
AN
12117 }
12118 return link;
2655144f
CW
12119
12120err_clean_legacy:
12121 if (legacy)
12122 remove_uprobe_event_legacy(legacy_probe, retprobe);
cc10623c
AN
12123err_out:
12124 free(legacy_probe);
12125 return libbpf_err_ptr(err);
b2650027
AN
12126}
12127
39f8dc43
AM
12128/* Format of u[ret]probe section definition supporting auto-attach:
12129 * u[ret]probe/binary:function[+offset]
12130 *
12131 * binary can be an absolute/relative path or a filename; the latter is resolved to a
12132 * full binary path via bpf_program__attach_uprobe_opts.
12133 *
12134 * Specifying uprobe+ ensures we carry out strict matching; either "uprobe" must be
12135 * specified (and auto-attach is not possible) or the above format is specified for
12136 * auto-attach.
12137 */
12138static int attach_uprobe(const struct bpf_program *prog, long cookie, struct bpf_link **link)
12139{
12140 DECLARE_LIBBPF_OPTS(bpf_uprobe_opts, opts);
2147c8d0
HC
12141 char *probe_type = NULL, *binary_path = NULL, *func_name = NULL, *func_off;
12142 int n, c, ret = -EINVAL;
90db26e6 12143 long offset = 0;
39f8dc43
AM
12144
12145 *link = NULL;
12146
2147c8d0
HC
12147 n = sscanf(prog->sec_name, "%m[^/]/%m[^:]:%m[^\n]",
12148 &probe_type, &binary_path, &func_name);
90db26e6
AM
12149 switch (n) {
12150 case 1:
12151 /* handle SEC("u[ret]probe") - format is valid, but auto-attach is impossible. */
12152 ret = 0;
12153 break;
12154 case 2:
12155 pr_warn("prog '%s': section '%s' missing ':function[+offset]' specification\n",
12156 prog->name, prog->sec_name);
12157 break;
12158 case 3:
2147c8d0
HC
12159 /* check if user specifies `+offset`, if yes, this should be
12160 * the last part of the string, make sure sscanf read to EOL
12161 */
12162 func_off = strrchr(func_name, '+');
12163 if (func_off) {
12164 n = sscanf(func_off, "+%li%n", &offset, &c);
12165 if (n == 1 && *(func_off + c) == '\0')
12166 func_off[0] = '\0';
12167 else
12168 offset = 0;
12169 }
c4cac71f
DK
12170 opts.retprobe = strcmp(probe_type, "uretprobe") == 0 ||
12171 strcmp(probe_type, "uretprobe.s") == 0;
90db26e6
AM
12172 if (opts.retprobe && offset != 0) {
12173 pr_warn("prog '%s': uretprobes do not support offset specification\n",
12174 prog->name);
12175 break;
12176 }
12177 opts.func_name = func_name;
12178 *link = bpf_program__attach_uprobe_opts(prog, -1, binary_path, offset, &opts);
12179 ret = libbpf_get_error(*link);
12180 break;
12181 default:
12182 pr_warn("prog '%s': invalid format of section definition '%s'\n", prog->name,
39f8dc43 12183 prog->sec_name);
90db26e6 12184 break;
39f8dc43 12185 }
90db26e6
AM
12186 free(probe_type);
12187 free(binary_path);
12188 free(func_name);
39f8dc43 12189
90db26e6 12190 return ret;
39f8dc43
AM
12191}
12192
942025c9 12193struct bpf_link *bpf_program__attach_uprobe(const struct bpf_program *prog,
47faff37
AN
12194 bool retprobe, pid_t pid,
12195 const char *binary_path,
12196 size_t func_offset)
12197{
12198 DECLARE_LIBBPF_OPTS(bpf_uprobe_opts, opts, .retprobe = retprobe);
12199
12200 return bpf_program__attach_uprobe_opts(prog, pid, binary_path, func_offset, &opts);
12201}
12202
2e4913e0
AN
12203struct bpf_link *bpf_program__attach_usdt(const struct bpf_program *prog,
12204 pid_t pid, const char *binary_path,
12205 const char *usdt_provider, const char *usdt_name,
12206 const struct bpf_usdt_opts *opts)
12207{
12208 char resolved_path[512];
12209 struct bpf_object *obj = prog->obj;
12210 struct bpf_link *link;
5af25a41 12211 __u64 usdt_cookie;
2e4913e0
AN
12212 int err;
12213
12214 if (!OPTS_VALID(opts, bpf_uprobe_opts))
12215 return libbpf_err_ptr(-EINVAL);
12216
12217 if (bpf_program__fd(prog) < 0) {
7b30c296 12218 pr_warn("prog '%s': can't attach BPF program without FD (was it loaded?)\n",
2e4913e0
AN
12219 prog->name);
12220 return libbpf_err_ptr(-EINVAL);
12221 }
12222
8ed2f5a6
HC
12223 if (!binary_path)
12224 return libbpf_err_ptr(-EINVAL);
12225
2e4913e0
AN
12226 if (!strchr(binary_path, '/')) {
12227 err = resolve_full_path(binary_path, resolved_path, sizeof(resolved_path));
12228 if (err) {
12229 pr_warn("prog '%s': failed to resolve full path for '%s': %d\n",
12230 prog->name, binary_path, err);
12231 return libbpf_err_ptr(err);
12232 }
12233 binary_path = resolved_path;
12234 }
12235
12236 /* USDT manager is instantiated lazily on first USDT attach. It will
12237 * be destroyed together with BPF object in bpf_object__close().
12238 */
12239 if (IS_ERR(obj->usdt_man))
12240 return libbpf_ptr(obj->usdt_man);
12241 if (!obj->usdt_man) {
12242 obj->usdt_man = usdt_manager_new(obj);
12243 if (IS_ERR(obj->usdt_man))
12244 return libbpf_ptr(obj->usdt_man);
12245 }
12246
12247 usdt_cookie = OPTS_GET(opts, usdt_cookie, 0);
12248 link = usdt_manager_attach_usdt(obj->usdt_man, prog, pid, binary_path,
e3ba8e4e 12249 usdt_provider, usdt_name, usdt_cookie);
2e4913e0
AN
12250 err = libbpf_get_error(link);
12251 if (err)
12252 return libbpf_err_ptr(err);
12253 return link;
12254}
12255
12256static int attach_usdt(const struct bpf_program *prog, long cookie, struct bpf_link **link)
12257{
12258 char *path = NULL, *provider = NULL, *name = NULL;
12259 const char *sec_name;
12260 int n, err;
12261
12262 sec_name = bpf_program__section_name(prog);
12263 if (strcmp(sec_name, "usdt") == 0) {
12264 /* no auto-attach for just SEC("usdt") */
12265 *link = NULL;
12266 return 0;
12267 }
12268
12269 n = sscanf(sec_name, "usdt/%m[^:]:%m[^:]:%m[^:]", &path, &provider, &name);
12270 if (n != 3) {
12271 pr_warn("invalid section '%s', expected SEC(\"usdt/<path>:<provider>:<name>\")\n",
12272 sec_name);
12273 err = -EINVAL;
12274 } else {
12275 *link = bpf_program__attach_usdt(prog, -1 /* any process */, path,
12276 provider, name, NULL);
12277 err = libbpf_get_error(*link);
12278 }
12279 free(path);
12280 free(provider);
12281 free(name);
12282 return err;
12283}
12284
f6de59c1
AN
12285static int determine_tracepoint_id(const char *tp_category,
12286 const char *tp_name)
12287{
12288 char file[PATH_MAX];
12289 int ret;
12290
a1ac9fd6
AN
12291 ret = snprintf(file, sizeof(file), "%s/events/%s/%s/id",
12292 tracefs_path(), tp_category, tp_name);
f6de59c1
AN
12293 if (ret < 0)
12294 return -errno;
12295 if (ret >= sizeof(file)) {
12296 pr_debug("tracepoint %s/%s path is too long\n",
12297 tp_category, tp_name);
12298 return -E2BIG;
12299 }
12300 return parse_uint_from_file(file, "%d\n");
12301}
12302
12303static int perf_event_open_tracepoint(const char *tp_category,
12304 const char *tp_name)
12305{
813847a3
AN
12306 const size_t attr_sz = sizeof(struct perf_event_attr);
12307 struct perf_event_attr attr;
f6de59c1
AN
12308 char errmsg[STRERR_BUFSIZE];
12309 int tp_id, pfd, err;
12310
12311 tp_id = determine_tracepoint_id(tp_category, tp_name);
12312 if (tp_id < 0) {
be18010e
KW
12313 pr_warn("failed to determine tracepoint '%s/%s' perf event ID: %s\n",
12314 tp_category, tp_name,
12315 libbpf_strerror_r(tp_id, errmsg, sizeof(errmsg)));
f6de59c1
AN
12316 return tp_id;
12317 }
12318
813847a3 12319 memset(&attr, 0, attr_sz);
f6de59c1 12320 attr.type = PERF_TYPE_TRACEPOINT;
813847a3 12321 attr.size = attr_sz;
f6de59c1
AN
12322 attr.config = tp_id;
12323
12324 pfd = syscall(__NR_perf_event_open, &attr, -1 /* pid */, 0 /* cpu */,
12325 -1 /* group_fd */, PERF_FLAG_FD_CLOEXEC);
12326 if (pfd < 0) {
12327 err = -errno;
be18010e
KW
12328 pr_warn("tracepoint '%s/%s' perf_event_open() failed: %s\n",
12329 tp_category, tp_name,
12330 libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
f6de59c1
AN
12331 return err;
12332 }
12333 return pfd;
12334}
12335
942025c9 12336struct bpf_link *bpf_program__attach_tracepoint_opts(const struct bpf_program *prog,
47faff37
AN
12337 const char *tp_category,
12338 const char *tp_name,
12339 const struct bpf_tracepoint_opts *opts)
f6de59c1 12340{
47faff37 12341 DECLARE_LIBBPF_OPTS(bpf_perf_event_opts, pe_opts);
f6de59c1
AN
12342 char errmsg[STRERR_BUFSIZE];
12343 struct bpf_link *link;
12344 int pfd, err;
12345
47faff37
AN
12346 if (!OPTS_VALID(opts, bpf_tracepoint_opts))
12347 return libbpf_err_ptr(-EINVAL);
12348
12349 pe_opts.bpf_cookie = OPTS_GET(opts, bpf_cookie, 0);
12350
f6de59c1
AN
12351 pfd = perf_event_open_tracepoint(tp_category, tp_name);
12352 if (pfd < 0) {
52109584
AN
12353 pr_warn("prog '%s': failed to create tracepoint '%s/%s' perf event: %s\n",
12354 prog->name, tp_category, tp_name,
be18010e 12355 libbpf_strerror_r(pfd, errmsg, sizeof(errmsg)));
e9fc3ce9 12356 return libbpf_err_ptr(pfd);
f6de59c1 12357 }
47faff37 12358 link = bpf_program__attach_perf_event_opts(prog, pfd, &pe_opts);
e9fc3ce9
AN
12359 err = libbpf_get_error(link);
12360 if (err) {
f6de59c1 12361 close(pfd);
52109584
AN
12362 pr_warn("prog '%s': failed to attach to tracepoint '%s/%s': %s\n",
12363 prog->name, tp_category, tp_name,
be18010e 12364 libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
e9fc3ce9 12365 return libbpf_err_ptr(err);
f6de59c1
AN
12366 }
12367 return link;
12368}
12369
942025c9 12370struct bpf_link *bpf_program__attach_tracepoint(const struct bpf_program *prog,
47faff37
AN
12371 const char *tp_category,
12372 const char *tp_name)
12373{
12374 return bpf_program__attach_tracepoint_opts(prog, tp_category, tp_name, NULL);
12375}
12376
4fa5bcfe 12377static int attach_tp(const struct bpf_program *prog, long cookie, struct bpf_link **link)
d7a18ea7
AN
12378{
12379 char *sec_name, *tp_cat, *tp_name;
d7a18ea7 12380
9af8efc4
AN
12381 *link = NULL;
12382
12383 /* no auto-attach for SEC("tp") or SEC("tracepoint") */
12384 if (strcmp(prog->sec_name, "tp") == 0 || strcmp(prog->sec_name, "tracepoint") == 0)
12385 return 0;
12386
ac6a6586
AN
12387 sec_name = strdup(prog->sec_name);
12388 if (!sec_name)
12389 return -ENOMEM;
12390
13d35a0c
AN
12391 /* extract "tp/<category>/<name>" or "tracepoint/<category>/<name>" */
12392 if (str_has_pfx(prog->sec_name, "tp/"))
12393 tp_cat = sec_name + sizeof("tp/") - 1;
12394 else
12395 tp_cat = sec_name + sizeof("tracepoint/") - 1;
d7a18ea7
AN
12396 tp_name = strchr(tp_cat, '/');
12397 if (!tp_name) {
e9fc3ce9 12398 free(sec_name);
4fa5bcfe 12399 return -EINVAL;
d7a18ea7
AN
12400 }
12401 *tp_name = '\0';
12402 tp_name++;
12403
4fa5bcfe 12404 *link = bpf_program__attach_tracepoint(prog, tp_cat, tp_name);
d7a18ea7 12405 free(sec_name);
4fa5bcfe 12406 return libbpf_get_error(*link);
d7a18ea7
AN
12407}
12408
36ffb202
AN
12409struct bpf_link *
12410bpf_program__attach_raw_tracepoint_opts(const struct bpf_program *prog,
12411 const char *tp_name,
12412 struct bpf_raw_tracepoint_opts *opts)
84bf5e1f 12413{
36ffb202 12414 LIBBPF_OPTS(bpf_raw_tp_opts, raw_opts);
84bf5e1f 12415 char errmsg[STRERR_BUFSIZE];
c016b68e 12416 struct bpf_link *link;
84bf5e1f
AN
12417 int prog_fd, pfd;
12418
36ffb202
AN
12419 if (!OPTS_VALID(opts, bpf_raw_tracepoint_opts))
12420 return libbpf_err_ptr(-EINVAL);
12421
84bf5e1f
AN
12422 prog_fd = bpf_program__fd(prog);
12423 if (prog_fd < 0) {
52109584 12424 pr_warn("prog '%s': can't attach before loaded\n", prog->name);
e9fc3ce9 12425 return libbpf_err_ptr(-EINVAL);
84bf5e1f
AN
12426 }
12427
d6958706 12428 link = calloc(1, sizeof(*link));
84bf5e1f 12429 if (!link)
e9fc3ce9 12430 return libbpf_err_ptr(-ENOMEM);
c016b68e 12431 link->detach = &bpf_link__detach_fd;
84bf5e1f 12432
36ffb202
AN
12433 raw_opts.tp_name = tp_name;
12434 raw_opts.cookie = OPTS_GET(opts, cookie, 0);
12435 pfd = bpf_raw_tracepoint_open_opts(prog_fd, &raw_opts);
84bf5e1f
AN
12436 if (pfd < 0) {
12437 pfd = -errno;
12438 free(link);
52109584
AN
12439 pr_warn("prog '%s': failed to attach to raw tracepoint '%s': %s\n",
12440 prog->name, tp_name, libbpf_strerror_r(pfd, errmsg, sizeof(errmsg)));
e9fc3ce9 12441 return libbpf_err_ptr(pfd);
84bf5e1f
AN
12442 }
12443 link->fd = pfd;
c016b68e 12444 return link;
84bf5e1f
AN
12445}
12446
36ffb202
AN
12447struct bpf_link *bpf_program__attach_raw_tracepoint(const struct bpf_program *prog,
12448 const char *tp_name)
12449{
12450 return bpf_program__attach_raw_tracepoint_opts(prog, tp_name, NULL);
12451}
12452
4fa5bcfe 12453static int attach_raw_tp(const struct bpf_program *prog, long cookie, struct bpf_link **link)
d7a18ea7 12454{
ccaf12d6 12455 static const char *const prefixes[] = {
9af8efc4
AN
12456 "raw_tp",
12457 "raw_tracepoint",
12458 "raw_tp.w",
12459 "raw_tracepoint.w",
ccaf12d6
HT
12460 };
12461 size_t i;
12462 const char *tp_name = NULL;
13d35a0c 12463
9af8efc4
AN
12464 *link = NULL;
12465
ccaf12d6 12466 for (i = 0; i < ARRAY_SIZE(prefixes); i++) {
9af8efc4
AN
12467 size_t pfx_len;
12468
12469 if (!str_has_pfx(prog->sec_name, prefixes[i]))
12470 continue;
12471
12472 pfx_len = strlen(prefixes[i]);
12473 /* no auto-attach case of, e.g., SEC("raw_tp") */
12474 if (prog->sec_name[pfx_len] == '\0')
12475 return 0;
12476
12477 if (prog->sec_name[pfx_len] != '/')
12478 continue;
12479
12480 tp_name = prog->sec_name + pfx_len + 1;
12481 break;
ccaf12d6 12482 }
9af8efc4 12483
ccaf12d6
HT
12484 if (!tp_name) {
12485 pr_warn("prog '%s': invalid section name '%s'\n",
12486 prog->name, prog->sec_name);
4fa5bcfe 12487 return -EINVAL;
ccaf12d6 12488 }
d7a18ea7 12489
4fa5bcfe 12490 *link = bpf_program__attach_raw_tracepoint(prog, tp_name);
5fd2a60a 12491 return libbpf_get_error(*link);
d7a18ea7
AN
12492}
12493
1e092a03 12494/* Common logic for all BPF program types that attach to a btf_id */
129b9c5e
KFL
12495static struct bpf_link *bpf_program__attach_btf_id(const struct bpf_program *prog,
12496 const struct bpf_trace_opts *opts)
b8c54ea4 12497{
129b9c5e 12498 LIBBPF_OPTS(bpf_link_create_opts, link_opts);
b8c54ea4 12499 char errmsg[STRERR_BUFSIZE];
c016b68e 12500 struct bpf_link *link;
b8c54ea4
AS
12501 int prog_fd, pfd;
12502
129b9c5e
KFL
12503 if (!OPTS_VALID(opts, bpf_trace_opts))
12504 return libbpf_err_ptr(-EINVAL);
12505
b8c54ea4
AS
12506 prog_fd = bpf_program__fd(prog);
12507 if (prog_fd < 0) {
52109584 12508 pr_warn("prog '%s': can't attach before loaded\n", prog->name);
e9fc3ce9 12509 return libbpf_err_ptr(-EINVAL);
b8c54ea4
AS
12510 }
12511
d6958706 12512 link = calloc(1, sizeof(*link));
b8c54ea4 12513 if (!link)
e9fc3ce9 12514 return libbpf_err_ptr(-ENOMEM);
c016b68e 12515 link->detach = &bpf_link__detach_fd;
b8c54ea4 12516
8462e0b4 12517 /* libbpf is smart enough to redirect to BPF_RAW_TRACEPOINT_OPEN on old kernels */
129b9c5e
KFL
12518 link_opts.tracing.cookie = OPTS_GET(opts, cookie, 0);
12519 pfd = bpf_link_create(prog_fd, 0, bpf_program__expected_attach_type(prog), &link_opts);
b8c54ea4
AS
12520 if (pfd < 0) {
12521 pfd = -errno;
12522 free(link);
52109584
AN
12523 pr_warn("prog '%s': failed to attach: %s\n",
12524 prog->name, libbpf_strerror_r(pfd, errmsg, sizeof(errmsg)));
e9fc3ce9 12525 return libbpf_err_ptr(pfd);
b8c54ea4
AS
12526 }
12527 link->fd = pfd;
003fed59 12528 return link;
b8c54ea4
AS
12529}
12530
942025c9 12531struct bpf_link *bpf_program__attach_trace(const struct bpf_program *prog)
1e092a03 12532{
129b9c5e
KFL
12533 return bpf_program__attach_btf_id(prog, NULL);
12534}
12535
12536struct bpf_link *bpf_program__attach_trace_opts(const struct bpf_program *prog,
12537 const struct bpf_trace_opts *opts)
12538{
12539 return bpf_program__attach_btf_id(prog, opts);
1e092a03
KS
12540}
12541
942025c9 12542struct bpf_link *bpf_program__attach_lsm(const struct bpf_program *prog)
1e092a03 12543{
129b9c5e 12544 return bpf_program__attach_btf_id(prog, NULL);
1e092a03
KS
12545}
12546
4fa5bcfe 12547static int attach_trace(const struct bpf_program *prog, long cookie, struct bpf_link **link)
d7a18ea7 12548{
4fa5bcfe
AN
12549 *link = bpf_program__attach_trace(prog);
12550 return libbpf_get_error(*link);
d7a18ea7
AN
12551}
12552
4fa5bcfe 12553static int attach_lsm(const struct bpf_program *prog, long cookie, struct bpf_link **link)
1e092a03 12554{
4fa5bcfe
AN
12555 *link = bpf_program__attach_lsm(prog);
12556 return libbpf_get_error(*link);
1e092a03
KS
12557}
12558
d60d81ac 12559static struct bpf_link *
55cc3768
DB
12560bpf_program_attach_fd(const struct bpf_program *prog,
12561 int target_fd, const char *target_name,
12562 const struct bpf_link_create_opts *opts)
cc4f864b 12563{
cc4f864b
AN
12564 enum bpf_attach_type attach_type;
12565 char errmsg[STRERR_BUFSIZE];
12566 struct bpf_link *link;
12567 int prog_fd, link_fd;
12568
12569 prog_fd = bpf_program__fd(prog);
12570 if (prog_fd < 0) {
52109584 12571 pr_warn("prog '%s': can't attach before loaded\n", prog->name);
e9fc3ce9 12572 return libbpf_err_ptr(-EINVAL);
cc4f864b
AN
12573 }
12574
12575 link = calloc(1, sizeof(*link));
12576 if (!link)
e9fc3ce9 12577 return libbpf_err_ptr(-ENOMEM);
cc4f864b
AN
12578 link->detach = &bpf_link__detach_fd;
12579
20eccf29 12580 attach_type = bpf_program__expected_attach_type(prog);
55cc3768 12581 link_fd = bpf_link_create(prog_fd, target_fd, attach_type, opts);
cc4f864b
AN
12582 if (link_fd < 0) {
12583 link_fd = -errno;
12584 free(link);
52109584
AN
12585 pr_warn("prog '%s': failed to attach to %s: %s\n",
12586 prog->name, target_name,
cc4f864b 12587 libbpf_strerror_r(link_fd, errmsg, sizeof(errmsg)));
e9fc3ce9 12588 return libbpf_err_ptr(link_fd);
cc4f864b
AN
12589 }
12590 link->fd = link_fd;
12591 return link;
12592}
12593
d60d81ac 12594struct bpf_link *
942025c9 12595bpf_program__attach_cgroup(const struct bpf_program *prog, int cgroup_fd)
d60d81ac 12596{
55cc3768 12597 return bpf_program_attach_fd(prog, cgroup_fd, "cgroup", NULL);
d60d81ac
JS
12598}
12599
12600struct bpf_link *
942025c9 12601bpf_program__attach_netns(const struct bpf_program *prog, int netns_fd)
d60d81ac 12602{
55cc3768 12603 return bpf_program_attach_fd(prog, netns_fd, "netns", NULL);
d60d81ac
JS
12604}
12605
849989af
YS
12606struct bpf_link *
12607bpf_program__attach_sockmap(const struct bpf_program *prog, int map_fd)
12608{
12609 return bpf_program_attach_fd(prog, map_fd, "sockmap", NULL);
12610}
12611
942025c9 12612struct bpf_link *bpf_program__attach_xdp(const struct bpf_program *prog, int ifindex)
dc8698ca
AN
12613{
12614 /* target_fd/target_ifindex use the same field in LINK_CREATE */
55cc3768
DB
12615 return bpf_program_attach_fd(prog, ifindex, "xdp", NULL);
12616}
12617
12618struct bpf_link *
12619bpf_program__attach_tcx(const struct bpf_program *prog, int ifindex,
12620 const struct bpf_tcx_opts *opts)
12621{
12622 LIBBPF_OPTS(bpf_link_create_opts, link_create_opts);
12623 __u32 relative_id;
12624 int relative_fd;
12625
12626 if (!OPTS_VALID(opts, bpf_tcx_opts))
12627 return libbpf_err_ptr(-EINVAL);
12628
12629 relative_id = OPTS_GET(opts, relative_id, 0);
12630 relative_fd = OPTS_GET(opts, relative_fd, 0);
12631
12632 /* validate we don't have unexpected combinations of non-zero fields */
12633 if (!ifindex) {
12634 pr_warn("prog '%s': target netdevice ifindex cannot be zero\n",
12635 prog->name);
12636 return libbpf_err_ptr(-EINVAL);
12637 }
12638 if (relative_fd && relative_id) {
12639 pr_warn("prog '%s': relative_fd and relative_id cannot be set at the same time\n",
12640 prog->name);
12641 return libbpf_err_ptr(-EINVAL);
12642 }
12643
12644 link_create_opts.tcx.expected_revision = OPTS_GET(opts, expected_revision, 0);
12645 link_create_opts.tcx.relative_fd = relative_fd;
12646 link_create_opts.tcx.relative_id = relative_id;
12647 link_create_opts.flags = OPTS_GET(opts, flags, 0);
12648
12649 /* target_fd/target_ifindex use the same field in LINK_CREATE */
12650 return bpf_program_attach_fd(prog, ifindex, "tcx", &link_create_opts);
a5359091
THJ
12651}
12652
05c31b4a
DB
12653struct bpf_link *
12654bpf_program__attach_netkit(const struct bpf_program *prog, int ifindex,
12655 const struct bpf_netkit_opts *opts)
12656{
12657 LIBBPF_OPTS(bpf_link_create_opts, link_create_opts);
12658 __u32 relative_id;
12659 int relative_fd;
12660
12661 if (!OPTS_VALID(opts, bpf_netkit_opts))
12662 return libbpf_err_ptr(-EINVAL);
12663
12664 relative_id = OPTS_GET(opts, relative_id, 0);
12665 relative_fd = OPTS_GET(opts, relative_fd, 0);
12666
12667 /* validate we don't have unexpected combinations of non-zero fields */
12668 if (!ifindex) {
12669 pr_warn("prog '%s': target netdevice ifindex cannot be zero\n",
12670 prog->name);
12671 return libbpf_err_ptr(-EINVAL);
12672 }
12673 if (relative_fd && relative_id) {
12674 pr_warn("prog '%s': relative_fd and relative_id cannot be set at the same time\n",
12675 prog->name);
12676 return libbpf_err_ptr(-EINVAL);
12677 }
12678
12679 link_create_opts.netkit.expected_revision = OPTS_GET(opts, expected_revision, 0);
12680 link_create_opts.netkit.relative_fd = relative_fd;
12681 link_create_opts.netkit.relative_id = relative_id;
12682 link_create_opts.flags = OPTS_GET(opts, flags, 0);
12683
12684 return bpf_program_attach_fd(prog, ifindex, "netkit", &link_create_opts);
12685}
12686
942025c9 12687struct bpf_link *bpf_program__attach_freplace(const struct bpf_program *prog,
a5359091
THJ
12688 int target_fd,
12689 const char *attach_func_name)
12690{
12691 int btf_id;
12692
12693 if (!!target_fd != !!attach_func_name) {
12694 pr_warn("prog '%s': supply none or both of target_fd and attach_func_name\n",
12695 prog->name);
e9fc3ce9 12696 return libbpf_err_ptr(-EINVAL);
a5359091
THJ
12697 }
12698
12699 if (prog->type != BPF_PROG_TYPE_EXT) {
12700 pr_warn("prog '%s': only BPF_PROG_TYPE_EXT can attach as freplace",
12701 prog->name);
e9fc3ce9 12702 return libbpf_err_ptr(-EINVAL);
a5359091
THJ
12703 }
12704
12705 if (target_fd) {
55cc3768
DB
12706 LIBBPF_OPTS(bpf_link_create_opts, target_opts);
12707
a5359091
THJ
12708 btf_id = libbpf_find_prog_btf_id(attach_func_name, target_fd);
12709 if (btf_id < 0)
e9fc3ce9 12710 return libbpf_err_ptr(btf_id);
a5359091 12711
55cc3768
DB
12712 target_opts.target_btf_id = btf_id;
12713
12714 return bpf_program_attach_fd(prog, target_fd, "freplace",
12715 &target_opts);
a5359091
THJ
12716 } else {
12717 /* no target, so use raw_tracepoint_open for compatibility
12718 * with old kernels
12719 */
12720 return bpf_program__attach_trace(prog);
12721 }
dc8698ca
AN
12722}
12723
c09add2f 12724struct bpf_link *
942025c9 12725bpf_program__attach_iter(const struct bpf_program *prog,
c09add2f
YS
12726 const struct bpf_iter_attach_opts *opts)
12727{
cd31039a 12728 DECLARE_LIBBPF_OPTS(bpf_link_create_opts, link_create_opts);
c09add2f
YS
12729 char errmsg[STRERR_BUFSIZE];
12730 struct bpf_link *link;
12731 int prog_fd, link_fd;
cd31039a 12732 __u32 target_fd = 0;
c09add2f
YS
12733
12734 if (!OPTS_VALID(opts, bpf_iter_attach_opts))
e9fc3ce9 12735 return libbpf_err_ptr(-EINVAL);
c09add2f 12736
74fc097d
YS
12737 link_create_opts.iter_info = OPTS_GET(opts, link_info, (void *)0);
12738 link_create_opts.iter_info_len = OPTS_GET(opts, link_info_len, 0);
cd31039a 12739
c09add2f
YS
12740 prog_fd = bpf_program__fd(prog);
12741 if (prog_fd < 0) {
52109584 12742 pr_warn("prog '%s': can't attach before loaded\n", prog->name);
e9fc3ce9 12743 return libbpf_err_ptr(-EINVAL);
c09add2f
YS
12744 }
12745
12746 link = calloc(1, sizeof(*link));
12747 if (!link)
e9fc3ce9 12748 return libbpf_err_ptr(-ENOMEM);
c09add2f
YS
12749 link->detach = &bpf_link__detach_fd;
12750
cd31039a
YS
12751 link_fd = bpf_link_create(prog_fd, target_fd, BPF_TRACE_ITER,
12752 &link_create_opts);
c09add2f
YS
12753 if (link_fd < 0) {
12754 link_fd = -errno;
12755 free(link);
52109584
AN
12756 pr_warn("prog '%s': failed to attach to iterator: %s\n",
12757 prog->name, libbpf_strerror_r(link_fd, errmsg, sizeof(errmsg)));
e9fc3ce9 12758 return libbpf_err_ptr(link_fd);
c09add2f
YS
12759 }
12760 link->fd = link_fd;
12761 return link;
12762}
12763
4fa5bcfe 12764static int attach_iter(const struct bpf_program *prog, long cookie, struct bpf_link **link)
e9fc3ce9 12765{
4fa5bcfe
AN
12766 *link = bpf_program__attach_iter(prog, NULL);
12767 return libbpf_get_error(*link);
e9fc3ce9
AN
12768}
12769
52364abb
FW
12770struct bpf_link *bpf_program__attach_netfilter(const struct bpf_program *prog,
12771 const struct bpf_netfilter_opts *opts)
12772{
12773 LIBBPF_OPTS(bpf_link_create_opts, lopts);
12774 struct bpf_link *link;
12775 int prog_fd, link_fd;
12776
12777 if (!OPTS_VALID(opts, bpf_netfilter_opts))
12778 return libbpf_err_ptr(-EINVAL);
12779
12780 prog_fd = bpf_program__fd(prog);
12781 if (prog_fd < 0) {
12782 pr_warn("prog '%s': can't attach before loaded\n", prog->name);
12783 return libbpf_err_ptr(-EINVAL);
12784 }
12785
12786 link = calloc(1, sizeof(*link));
12787 if (!link)
12788 return libbpf_err_ptr(-ENOMEM);
12789
12790 link->detach = &bpf_link__detach_fd;
12791
12792 lopts.netfilter.pf = OPTS_GET(opts, pf, 0);
12793 lopts.netfilter.hooknum = OPTS_GET(opts, hooknum, 0);
12794 lopts.netfilter.priority = OPTS_GET(opts, priority, 0);
12795 lopts.netfilter.flags = OPTS_GET(opts, flags, 0);
12796
12797 link_fd = bpf_link_create(prog_fd, 0, BPF_NETFILTER, &lopts);
12798 if (link_fd < 0) {
12799 char errmsg[STRERR_BUFSIZE];
12800
12801 link_fd = -errno;
12802 free(link);
12803 pr_warn("prog '%s': failed to attach to netfilter: %s\n",
12804 prog->name, libbpf_strerror_r(link_fd, errmsg, sizeof(errmsg)));
12805 return libbpf_err_ptr(link_fd);
12806 }
12807 link->fd = link_fd;
12808
12809 return link;
12810}
12811
942025c9 12812struct bpf_link *bpf_program__attach(const struct bpf_program *prog)
d7a18ea7 12813{
4fa5bcfe
AN
12814 struct bpf_link *link = NULL;
12815 int err;
12816
12817 if (!prog->sec_def || !prog->sec_def->prog_attach_fn)
12818 return libbpf_err_ptr(-EOPNOTSUPP);
d7a18ea7 12819
7b30c296
MY
12820 if (bpf_program__fd(prog) < 0) {
12821 pr_warn("prog '%s': can't attach BPF program without FD (was it loaded?)\n",
12822 prog->name);
12823 return libbpf_err_ptr(-EINVAL);
12824 }
12825
4fa5bcfe
AN
12826 err = prog->sec_def->prog_attach_fn(prog, prog->sec_def->cookie, &link);
12827 if (err)
12828 return libbpf_err_ptr(err);
12829
12830 /* When calling bpf_program__attach() explicitly, auto-attach support
12831 * is expected to work, so NULL returned link is considered an error.
12832 * This is different for skeleton's attach, see comment in
12833 * bpf_object__attach_skeleton().
12834 */
12835 if (!link)
12836 return libbpf_err_ptr(-EOPNOTSUPP);
12837
12838 return link;
d7a18ea7
AN
12839}
12840
8d1608d7
KFL
12841struct bpf_link_struct_ops {
12842 struct bpf_link link;
12843 int map_fd;
12844};
12845
590a0088
MKL
12846static int bpf_link__detach_struct_ops(struct bpf_link *link)
12847{
8d1608d7 12848 struct bpf_link_struct_ops *st_link;
590a0088
MKL
12849 __u32 zero = 0;
12850
8d1608d7 12851 st_link = container_of(link, struct bpf_link_struct_ops, link);
590a0088 12852
8d1608d7
KFL
12853 if (st_link->map_fd < 0)
12854 /* w/o a real link */
12855 return bpf_map_delete_elem(link->fd, &zero);
12856
12857 return close(link->fd);
590a0088
MKL
12858}
12859
942025c9 12860struct bpf_link *bpf_map__attach_struct_ops(const struct bpf_map *map)
590a0088 12861{
8d1608d7
KFL
12862 struct bpf_link_struct_ops *link;
12863 __u32 zero = 0;
12864 int err, fd;
590a0088 12865
7b30c296 12866 if (!bpf_map__is_struct_ops(map))
e9fc3ce9 12867 return libbpf_err_ptr(-EINVAL);
590a0088 12868
7b30c296
MY
12869 if (map->fd < 0) {
12870 pr_warn("map '%s': can't attach BPF map without FD (was it created?)\n", map->name);
12871 return libbpf_err_ptr(-EINVAL);
12872 }
12873
590a0088
MKL
12874 link = calloc(1, sizeof(*link));
12875 if (!link)
e9fc3ce9 12876 return libbpf_err_ptr(-EINVAL);
590a0088 12877
8d1608d7
KFL
12878 /* kern_vdata should be prepared during the loading phase. */
12879 err = bpf_map_update_elem(map->fd, &zero, map->st_ops->kern_vdata, 0);
12880 /* It can be EBUSY if the map has been used to create or
12881 * update a link before. We don't allow updating the value of
12882 * a struct_ops once it is set. That ensures that the value
12883 * never changed. So, it is safe to skip EBUSY.
12884 */
12885 if (err && (!(map->def.map_flags & BPF_F_LINK) || err != -EBUSY)) {
12886 free(link);
12887 return libbpf_err_ptr(err);
12888 }
590a0088 12889
8d1608d7 12890 link->link.detach = bpf_link__detach_struct_ops;
590a0088 12891
8d1608d7
KFL
12892 if (!(map->def.map_flags & BPF_F_LINK)) {
12893 /* w/o a real link */
12894 link->link.fd = map->fd;
12895 link->map_fd = -1;
12896 return &link->link;
590a0088
MKL
12897 }
12898
8d1608d7
KFL
12899 fd = bpf_link_create(map->fd, 0, BPF_STRUCT_OPS, NULL);
12900 if (fd < 0) {
590a0088 12901 free(link);
8d1608d7 12902 return libbpf_err_ptr(fd);
590a0088
MKL
12903 }
12904
8d1608d7
KFL
12905 link->link.fd = fd;
12906 link->map_fd = map->fd;
590a0088 12907
8d1608d7 12908 return &link->link;
590a0088
MKL
12909}
12910
912dd4b0
KFL
12911/*
12912 * Swap the back struct_ops of a link with a new struct_ops map.
12913 */
12914int bpf_link__update_map(struct bpf_link *link, const struct bpf_map *map)
12915{
12916 struct bpf_link_struct_ops *st_ops_link;
12917 __u32 zero = 0;
12918 int err;
12919
7b30c296
MY
12920 if (!bpf_map__is_struct_ops(map))
12921 return -EINVAL;
12922
12923 if (map->fd < 0) {
12924 pr_warn("map '%s': can't use BPF map without FD (was it created?)\n", map->name);
912dd4b0 12925 return -EINVAL;
7b30c296 12926 }
912dd4b0
KFL
12927
12928 st_ops_link = container_of(link, struct bpf_link_struct_ops, link);
12929 /* Ensure the type of a link is correct */
12930 if (st_ops_link->map_fd < 0)
12931 return -EINVAL;
12932
12933 err = bpf_map_update_elem(map->fd, &zero, map->st_ops->kern_vdata, 0);
12934 /* It can be EBUSY if the map has been used to create or
12935 * update a link before. We don't allow updating the value of
12936 * a struct_ops once it is set. That ensures that the value
12937 * never changed. So, it is safe to skip EBUSY.
12938 */
12939 if (err && err != -EBUSY)
12940 return err;
12941
12942 err = bpf_link_update(link->fd, map->fd, NULL);
12943 if (err < 0)
12944 return err;
12945
12946 st_ops_link->map_fd = map->fd;
12947
12948 return 0;
12949}
12950
22dd7a58
AN
12951typedef enum bpf_perf_event_ret (*bpf_perf_event_print_t)(struct perf_event_header *hdr,
12952 void *private_data);
12953
7218c28c
CL
12954static enum bpf_perf_event_ret
12955perf_event_read_simple(void *mmap_mem, size_t mmap_size, size_t page_size,
12956 void **copy_mem, size_t *copy_size,
12957 bpf_perf_event_print_t fn, void *private_data)
d0cabbb0 12958{
3dca2115 12959 struct perf_event_mmap_page *header = mmap_mem;
a64af0ef 12960 __u64 data_head = ring_buffer_read_head(header);
d0cabbb0 12961 __u64 data_tail = header->data_tail;
3dca2115
DB
12962 void *base = ((__u8 *)header) + page_size;
12963 int ret = LIBBPF_PERF_EVENT_CONT;
12964 struct perf_event_header *ehdr;
12965 size_t ehdr_size;
12966
12967 while (data_head != data_tail) {
12968 ehdr = base + (data_tail & (mmap_size - 1));
12969 ehdr_size = ehdr->size;
12970
12971 if (((void *)ehdr) + ehdr_size > base + mmap_size) {
12972 void *copy_start = ehdr;
12973 size_t len_first = base + mmap_size - copy_start;
12974 size_t len_secnd = ehdr_size - len_first;
12975
12976 if (*copy_size < ehdr_size) {
12977 free(*copy_mem);
12978 *copy_mem = malloc(ehdr_size);
12979 if (!*copy_mem) {
12980 *copy_size = 0;
d0cabbb0
JK
12981 ret = LIBBPF_PERF_EVENT_ERROR;
12982 break;
12983 }
3dca2115 12984 *copy_size = ehdr_size;
d0cabbb0
JK
12985 }
12986
3dca2115
DB
12987 memcpy(*copy_mem, copy_start, len_first);
12988 memcpy(*copy_mem + len_first, base, len_secnd);
12989 ehdr = *copy_mem;
d0cabbb0
JK
12990 }
12991
3dca2115
DB
12992 ret = fn(ehdr, private_data);
12993 data_tail += ehdr_size;
d0cabbb0
JK
12994 if (ret != LIBBPF_PERF_EVENT_CONT)
12995 break;
d0cabbb0
JK
12996 }
12997
a64af0ef 12998 ring_buffer_write_tail(header, data_tail);
e9fc3ce9 12999 return libbpf_err(ret);
d0cabbb0 13000}
34be1646 13001
fb84b822
AN
13002struct perf_buffer;
13003
13004struct perf_buffer_params {
13005 struct perf_event_attr *attr;
13006 /* if event_cb is specified, it takes precendence */
13007 perf_buffer_event_fn event_cb;
13008 /* sample_cb and lost_cb are higher-level common-case callbacks */
13009 perf_buffer_sample_fn sample_cb;
13010 perf_buffer_lost_fn lost_cb;
13011 void *ctx;
13012 int cpu_cnt;
13013 int *cpus;
13014 int *map_keys;
13015};
13016
13017struct perf_cpu_buf {
13018 struct perf_buffer *pb;
13019 void *base; /* mmap()'ed memory */
13020 void *buf; /* for reconstructing segmented data */
13021 size_t buf_size;
13022 int fd;
13023 int cpu;
13024 int map_key;
13025};
13026
13027struct perf_buffer {
13028 perf_buffer_event_fn event_cb;
13029 perf_buffer_sample_fn sample_cb;
13030 perf_buffer_lost_fn lost_cb;
13031 void *ctx; /* passed into callbacks */
13032
13033 size_t page_size;
13034 size_t mmap_size;
13035 struct perf_cpu_buf **cpu_bufs;
13036 struct epoll_event *events;
783b8f01 13037 int cpu_cnt; /* number of allocated CPU buffers */
fb84b822
AN
13038 int epoll_fd; /* perf event FD */
13039 int map_fd; /* BPF_MAP_TYPE_PERF_EVENT_ARRAY BPF map FD */
13040};
13041
13042static void perf_buffer__free_cpu_buf(struct perf_buffer *pb,
13043 struct perf_cpu_buf *cpu_buf)
13044{
13045 if (!cpu_buf)
13046 return;
13047 if (cpu_buf->base &&
13048 munmap(cpu_buf->base, pb->mmap_size + pb->page_size))
be18010e 13049 pr_warn("failed to munmap cpu_buf #%d\n", cpu_buf->cpu);
fb84b822
AN
13050 if (cpu_buf->fd >= 0) {
13051 ioctl(cpu_buf->fd, PERF_EVENT_IOC_DISABLE, 0);
13052 close(cpu_buf->fd);
13053 }
13054 free(cpu_buf->buf);
13055 free(cpu_buf);
13056}
13057
13058void perf_buffer__free(struct perf_buffer *pb)
13059{
13060 int i;
13061
50450fc7 13062 if (IS_ERR_OR_NULL(pb))
fb84b822
AN
13063 return;
13064 if (pb->cpu_bufs) {
601b05ca 13065 for (i = 0; i < pb->cpu_cnt; i++) {
fb84b822
AN
13066 struct perf_cpu_buf *cpu_buf = pb->cpu_bufs[i];
13067
601b05ca
EC
13068 if (!cpu_buf)
13069 continue;
13070
fb84b822
AN
13071 bpf_map_delete_elem(pb->map_fd, &cpu_buf->map_key);
13072 perf_buffer__free_cpu_buf(pb, cpu_buf);
13073 }
13074 free(pb->cpu_bufs);
13075 }
13076 if (pb->epoll_fd >= 0)
13077 close(pb->epoll_fd);
13078 free(pb->events);
13079 free(pb);
13080}
13081
13082static struct perf_cpu_buf *
13083perf_buffer__open_cpu_buf(struct perf_buffer *pb, struct perf_event_attr *attr,
13084 int cpu, int map_key)
13085{
13086 struct perf_cpu_buf *cpu_buf;
13087 char msg[STRERR_BUFSIZE];
13088 int err;
13089
13090 cpu_buf = calloc(1, sizeof(*cpu_buf));
13091 if (!cpu_buf)
13092 return ERR_PTR(-ENOMEM);
13093
13094 cpu_buf->pb = pb;
13095 cpu_buf->cpu = cpu;
13096 cpu_buf->map_key = map_key;
13097
13098 cpu_buf->fd = syscall(__NR_perf_event_open, attr, -1 /* pid */, cpu,
13099 -1, PERF_FLAG_FD_CLOEXEC);
13100 if (cpu_buf->fd < 0) {
13101 err = -errno;
be18010e
KW
13102 pr_warn("failed to open perf buffer event on cpu #%d: %s\n",
13103 cpu, libbpf_strerror_r(err, msg, sizeof(msg)));
fb84b822
AN
13104 goto error;
13105 }
13106
13107 cpu_buf->base = mmap(NULL, pb->mmap_size + pb->page_size,
13108 PROT_READ | PROT_WRITE, MAP_SHARED,
13109 cpu_buf->fd, 0);
13110 if (cpu_buf->base == MAP_FAILED) {
13111 cpu_buf->base = NULL;
13112 err = -errno;
be18010e
KW
13113 pr_warn("failed to mmap perf buffer on cpu #%d: %s\n",
13114 cpu, libbpf_strerror_r(err, msg, sizeof(msg)));
fb84b822
AN
13115 goto error;
13116 }
13117
13118 if (ioctl(cpu_buf->fd, PERF_EVENT_IOC_ENABLE, 0) < 0) {
13119 err = -errno;
be18010e
KW
13120 pr_warn("failed to enable perf buffer event on cpu #%d: %s\n",
13121 cpu, libbpf_strerror_r(err, msg, sizeof(msg)));
fb84b822
AN
13122 goto error;
13123 }
13124
13125 return cpu_buf;
13126
13127error:
13128 perf_buffer__free_cpu_buf(pb, cpu_buf);
13129 return (struct perf_cpu_buf *)ERR_PTR(err);
13130}
13131
13132static struct perf_buffer *__perf_buffer__new(int map_fd, size_t page_cnt,
13133 struct perf_buffer_params *p);
13134
22dd7a58
AN
13135struct perf_buffer *perf_buffer__new(int map_fd, size_t page_cnt,
13136 perf_buffer_sample_fn sample_cb,
13137 perf_buffer_lost_fn lost_cb,
13138 void *ctx,
13139 const struct perf_buffer_opts *opts)
fb84b822 13140{
813847a3 13141 const size_t attr_sz = sizeof(struct perf_event_attr);
fb84b822 13142 struct perf_buffer_params p = {};
813847a3 13143 struct perf_event_attr attr;
ab8684b8 13144 __u32 sample_period;
41788934
AN
13145
13146 if (!OPTS_VALID(opts, perf_buffer_opts))
13147 return libbpf_err_ptr(-EINVAL);
4be6e05c 13148
ab8684b8
JD
13149 sample_period = OPTS_GET(opts, sample_period, 1);
13150 if (!sample_period)
13151 sample_period = 1;
13152
813847a3
AN
13153 memset(&attr, 0, attr_sz);
13154 attr.size = attr_sz;
65bb2e0f 13155 attr.config = PERF_COUNT_SW_BPF_OUTPUT;
4be6e05c
ACM
13156 attr.type = PERF_TYPE_SOFTWARE;
13157 attr.sample_type = PERF_SAMPLE_RAW;
ab8684b8
JD
13158 attr.sample_period = sample_period;
13159 attr.wakeup_events = sample_period;
fb84b822
AN
13160
13161 p.attr = &attr;
41788934
AN
13162 p.sample_cb = sample_cb;
13163 p.lost_cb = lost_cb;
13164 p.ctx = ctx;
fb84b822 13165
e9fc3ce9 13166 return libbpf_ptr(__perf_buffer__new(map_fd, page_cnt, &p));
fb84b822
AN
13167}
13168
22dd7a58
AN
13169struct perf_buffer *perf_buffer__new_raw(int map_fd, size_t page_cnt,
13170 struct perf_event_attr *attr,
13171 perf_buffer_event_fn event_cb, void *ctx,
13172 const struct perf_buffer_raw_opts *opts)
fb84b822
AN
13173{
13174 struct perf_buffer_params p = {};
13175
41332d6e 13176 if (!attr)
41788934
AN
13177 return libbpf_err_ptr(-EINVAL);
13178
13179 if (!OPTS_VALID(opts, perf_buffer_raw_opts))
13180 return libbpf_err_ptr(-EINVAL);
13181
13182 p.attr = attr;
13183 p.event_cb = event_cb;
13184 p.ctx = ctx;
13185 p.cpu_cnt = OPTS_GET(opts, cpu_cnt, 0);
13186 p.cpus = OPTS_GET(opts, cpus, NULL);
13187 p.map_keys = OPTS_GET(opts, map_keys, NULL);
fb84b822 13188
e9fc3ce9 13189 return libbpf_ptr(__perf_buffer__new(map_fd, page_cnt, &p));
fb84b822
AN
13190}
13191
13192static struct perf_buffer *__perf_buffer__new(int map_fd, size_t page_cnt,
13193 struct perf_buffer_params *p)
13194{
783b8f01 13195 const char *online_cpus_file = "/sys/devices/system/cpu/online";
0e289487 13196 struct bpf_map_info map;
fb84b822
AN
13197 char msg[STRERR_BUFSIZE];
13198 struct perf_buffer *pb;
783b8f01 13199 bool *online = NULL;
fb84b822 13200 __u32 map_info_len;
783b8f01 13201 int err, i, j, n;
fb84b822 13202
41332d6e 13203 if (page_cnt == 0 || (page_cnt & (page_cnt - 1))) {
be18010e
KW
13204 pr_warn("page count should be power of two, but is %zu\n",
13205 page_cnt);
fb84b822
AN
13206 return ERR_PTR(-EINVAL);
13207 }
13208
0e289487
AN
13209 /* best-effort sanity checks */
13210 memset(&map, 0, sizeof(map));
fb84b822 13211 map_info_len = sizeof(map);
629dfc66 13212 err = bpf_map_get_info_by_fd(map_fd, &map, &map_info_len);
fb84b822
AN
13213 if (err) {
13214 err = -errno;
0e289487
AN
13215 /* if BPF_OBJ_GET_INFO_BY_FD is supported, will return
13216 * -EBADFD, -EFAULT, or -E2BIG on real error
13217 */
13218 if (err != -EINVAL) {
13219 pr_warn("failed to get map info for map FD %d: %s\n",
13220 map_fd, libbpf_strerror_r(err, msg, sizeof(msg)));
13221 return ERR_PTR(err);
13222 }
13223 pr_debug("failed to get map info for FD %d; API not supported? Ignoring...\n",
13224 map_fd);
13225 } else {
13226 if (map.type != BPF_MAP_TYPE_PERF_EVENT_ARRAY) {
13227 pr_warn("map '%s' should be BPF_MAP_TYPE_PERF_EVENT_ARRAY\n",
13228 map.name);
13229 return ERR_PTR(-EINVAL);
13230 }
fb84b822
AN
13231 }
13232
13233 pb = calloc(1, sizeof(*pb));
13234 if (!pb)
13235 return ERR_PTR(-ENOMEM);
13236
13237 pb->event_cb = p->event_cb;
13238 pb->sample_cb = p->sample_cb;
13239 pb->lost_cb = p->lost_cb;
13240 pb->ctx = p->ctx;
13241
13242 pb->page_size = getpagesize();
13243 pb->mmap_size = pb->page_size * page_cnt;
13244 pb->map_fd = map_fd;
13245
13246 pb->epoll_fd = epoll_create1(EPOLL_CLOEXEC);
13247 if (pb->epoll_fd < 0) {
13248 err = -errno;
be18010e
KW
13249 pr_warn("failed to create epoll instance: %s\n",
13250 libbpf_strerror_r(err, msg, sizeof(msg)));
fb84b822
AN
13251 goto error;
13252 }
13253
13254 if (p->cpu_cnt > 0) {
13255 pb->cpu_cnt = p->cpu_cnt;
13256 } else {
13257 pb->cpu_cnt = libbpf_num_possible_cpus();
13258 if (pb->cpu_cnt < 0) {
13259 err = pb->cpu_cnt;
13260 goto error;
13261 }
0e289487 13262 if (map.max_entries && map.max_entries < pb->cpu_cnt)
fb84b822
AN
13263 pb->cpu_cnt = map.max_entries;
13264 }
13265
13266 pb->events = calloc(pb->cpu_cnt, sizeof(*pb->events));
13267 if (!pb->events) {
13268 err = -ENOMEM;
be18010e 13269 pr_warn("failed to allocate events: out of memory\n");
fb84b822
AN
13270 goto error;
13271 }
13272 pb->cpu_bufs = calloc(pb->cpu_cnt, sizeof(*pb->cpu_bufs));
13273 if (!pb->cpu_bufs) {
13274 err = -ENOMEM;
be18010e 13275 pr_warn("failed to allocate buffers: out of memory\n");
fb84b822
AN
13276 goto error;
13277 }
13278
783b8f01
AN
13279 err = parse_cpu_mask_file(online_cpus_file, &online, &n);
13280 if (err) {
13281 pr_warn("failed to get online CPU mask: %d\n", err);
13282 goto error;
13283 }
13284
13285 for (i = 0, j = 0; i < pb->cpu_cnt; i++) {
fb84b822
AN
13286 struct perf_cpu_buf *cpu_buf;
13287 int cpu, map_key;
13288
13289 cpu = p->cpu_cnt > 0 ? p->cpus[i] : i;
13290 map_key = p->cpu_cnt > 0 ? p->map_keys[i] : i;
13291
783b8f01
AN
13292 /* in case user didn't explicitly requested particular CPUs to
13293 * be attached to, skip offline/not present CPUs
13294 */
13295 if (p->cpu_cnt <= 0 && (cpu >= n || !online[cpu]))
13296 continue;
13297
fb84b822
AN
13298 cpu_buf = perf_buffer__open_cpu_buf(pb, p->attr, cpu, map_key);
13299 if (IS_ERR(cpu_buf)) {
13300 err = PTR_ERR(cpu_buf);
13301 goto error;
13302 }
13303
783b8f01 13304 pb->cpu_bufs[j] = cpu_buf;
fb84b822
AN
13305
13306 err = bpf_map_update_elem(pb->map_fd, &map_key,
13307 &cpu_buf->fd, 0);
13308 if (err) {
13309 err = -errno;
be18010e
KW
13310 pr_warn("failed to set cpu #%d, key %d -> perf FD %d: %s\n",
13311 cpu, map_key, cpu_buf->fd,
13312 libbpf_strerror_r(err, msg, sizeof(msg)));
fb84b822
AN
13313 goto error;
13314 }
13315
783b8f01
AN
13316 pb->events[j].events = EPOLLIN;
13317 pb->events[j].data.ptr = cpu_buf;
fb84b822 13318 if (epoll_ctl(pb->epoll_fd, EPOLL_CTL_ADD, cpu_buf->fd,
783b8f01 13319 &pb->events[j]) < 0) {
fb84b822 13320 err = -errno;
be18010e
KW
13321 pr_warn("failed to epoll_ctl cpu #%d perf FD %d: %s\n",
13322 cpu, cpu_buf->fd,
13323 libbpf_strerror_r(err, msg, sizeof(msg)));
fb84b822
AN
13324 goto error;
13325 }
783b8f01 13326 j++;
fb84b822 13327 }
783b8f01
AN
13328 pb->cpu_cnt = j;
13329 free(online);
fb84b822
AN
13330
13331 return pb;
13332
13333error:
783b8f01 13334 free(online);
fb84b822
AN
13335 if (pb)
13336 perf_buffer__free(pb);
13337 return ERR_PTR(err);
13338}
13339
13340struct perf_sample_raw {
13341 struct perf_event_header header;
13342 uint32_t size;
385bbf7b 13343 char data[];
fb84b822
AN
13344};
13345
13346struct perf_sample_lost {
13347 struct perf_event_header header;
13348 uint64_t id;
13349 uint64_t lost;
13350 uint64_t sample_id;
13351};
13352
13353static enum bpf_perf_event_ret
13354perf_buffer__process_record(struct perf_event_header *e, void *ctx)
13355{
13356 struct perf_cpu_buf *cpu_buf = ctx;
13357 struct perf_buffer *pb = cpu_buf->pb;
13358 void *data = e;
13359
13360 /* user wants full control over parsing perf event */
13361 if (pb->event_cb)
13362 return pb->event_cb(pb->ctx, cpu_buf->cpu, e);
13363
13364 switch (e->type) {
13365 case PERF_RECORD_SAMPLE: {
13366 struct perf_sample_raw *s = data;
13367
13368 if (pb->sample_cb)
13369 pb->sample_cb(pb->ctx, cpu_buf->cpu, s->data, s->size);
13370 break;
13371 }
13372 case PERF_RECORD_LOST: {
13373 struct perf_sample_lost *s = data;
13374
13375 if (pb->lost_cb)
13376 pb->lost_cb(pb->ctx, cpu_buf->cpu, s->lost);
13377 break;
13378 }
13379 default:
be18010e 13380 pr_warn("unknown perf sample type %d\n", e->type);
fb84b822
AN
13381 return LIBBPF_PERF_EVENT_ERROR;
13382 }
13383 return LIBBPF_PERF_EVENT_CONT;
13384}
13385
13386static int perf_buffer__process_records(struct perf_buffer *pb,
13387 struct perf_cpu_buf *cpu_buf)
13388{
13389 enum bpf_perf_event_ret ret;
13390
7218c28c
CL
13391 ret = perf_event_read_simple(cpu_buf->base, pb->mmap_size,
13392 pb->page_size, &cpu_buf->buf,
13393 &cpu_buf->buf_size,
13394 perf_buffer__process_record, cpu_buf);
fb84b822
AN
13395 if (ret != LIBBPF_PERF_EVENT_CONT)
13396 return ret;
13397 return 0;
13398}
13399
dca5612f
AN
13400int perf_buffer__epoll_fd(const struct perf_buffer *pb)
13401{
13402 return pb->epoll_fd;
13403}
13404
fb84b822
AN
13405int perf_buffer__poll(struct perf_buffer *pb, int timeout_ms)
13406{
13407 int i, cnt, err;
13408
13409 cnt = epoll_wait(pb->epoll_fd, pb->events, pb->cpu_cnt, timeout_ms);
e9fc3ce9 13410 if (cnt < 0)
af0efa05 13411 return -errno;
e9fc3ce9 13412
fb84b822
AN
13413 for (i = 0; i < cnt; i++) {
13414 struct perf_cpu_buf *cpu_buf = pb->events[i].data.ptr;
13415
13416 err = perf_buffer__process_records(pb, cpu_buf);
13417 if (err) {
be18010e 13418 pr_warn("error while processing records: %d\n", err);
e9fc3ce9 13419 return libbpf_err(err);
fb84b822
AN
13420 }
13421 }
e9fc3ce9 13422 return cnt;
fb84b822
AN
13423}
13424
dca5612f
AN
13425/* Return number of PERF_EVENT_ARRAY map slots set up by this perf_buffer
13426 * manager.
13427 */
13428size_t perf_buffer__buffer_cnt(const struct perf_buffer *pb)
13429{
13430 return pb->cpu_cnt;
13431}
13432
13433/*
13434 * Return perf_event FD of a ring buffer in *buf_idx* slot of
13435 * PERF_EVENT_ARRAY BPF map. This FD can be polled for new data using
13436 * select()/poll()/epoll() Linux syscalls.
13437 */
13438int perf_buffer__buffer_fd(const struct perf_buffer *pb, size_t buf_idx)
13439{
13440 struct perf_cpu_buf *cpu_buf;
13441
13442 if (buf_idx >= pb->cpu_cnt)
e9fc3ce9 13443 return libbpf_err(-EINVAL);
dca5612f
AN
13444
13445 cpu_buf = pb->cpu_bufs[buf_idx];
13446 if (!cpu_buf)
e9fc3ce9 13447 return libbpf_err(-ENOENT);
dca5612f
AN
13448
13449 return cpu_buf->fd;
13450}
13451
9ff5efde
JD
13452int perf_buffer__buffer(struct perf_buffer *pb, int buf_idx, void **buf, size_t *buf_size)
13453{
13454 struct perf_cpu_buf *cpu_buf;
13455
13456 if (buf_idx >= pb->cpu_cnt)
13457 return libbpf_err(-EINVAL);
13458
13459 cpu_buf = pb->cpu_bufs[buf_idx];
13460 if (!cpu_buf)
13461 return libbpf_err(-ENOENT);
13462
13463 *buf = cpu_buf->base;
13464 *buf_size = pb->mmap_size;
13465 return 0;
13466}
13467
dca5612f
AN
13468/*
13469 * Consume data from perf ring buffer corresponding to slot *buf_idx* in
13470 * PERF_EVENT_ARRAY BPF map without waiting/polling. If there is no data to
13471 * consume, do nothing and return success.
13472 * Returns:
13473 * - 0 on success;
13474 * - <0 on failure.
13475 */
13476int perf_buffer__consume_buffer(struct perf_buffer *pb, size_t buf_idx)
13477{
13478 struct perf_cpu_buf *cpu_buf;
13479
13480 if (buf_idx >= pb->cpu_cnt)
e9fc3ce9 13481 return libbpf_err(-EINVAL);
dca5612f
AN
13482
13483 cpu_buf = pb->cpu_bufs[buf_idx];
13484 if (!cpu_buf)
e9fc3ce9 13485 return libbpf_err(-ENOENT);
dca5612f
AN
13486
13487 return perf_buffer__process_records(pb, cpu_buf);
13488}
13489
272d51af
EC
13490int perf_buffer__consume(struct perf_buffer *pb)
13491{
13492 int i, err;
13493
13494 for (i = 0; i < pb->cpu_cnt; i++) {
13495 struct perf_cpu_buf *cpu_buf = pb->cpu_bufs[i];
13496
13497 if (!cpu_buf)
13498 continue;
13499
13500 err = perf_buffer__process_records(pb, cpu_buf);
13501 if (err) {
dca5612f 13502 pr_warn("perf_buffer: failed to process records in buffer #%d: %d\n", i, err);
e9fc3ce9 13503 return libbpf_err(err);
272d51af
EC
13504 }
13505 }
13506 return 0;
13507}
13508
ff26ce5c
EC
13509int bpf_program__set_attach_target(struct bpf_program *prog,
13510 int attach_prog_fd,
13511 const char *attach_func_name)
13512{
fe62de31 13513 int btf_obj_fd = 0, btf_id = 0, err;
ff26ce5c 13514
2d5ec1c6 13515 if (!prog || attach_prog_fd < 0)
e9fc3ce9 13516 return libbpf_err(-EINVAL);
ff26ce5c 13517
fe62de31 13518 if (prog->obj->loaded)
e9fc3ce9 13519 return libbpf_err(-EINVAL);
fe62de31 13520
2d5ec1c6
AN
13521 if (attach_prog_fd && !attach_func_name) {
13522 /* remember attach_prog_fd and let bpf_program__load() find
13523 * BTF ID during the program load
13524 */
13525 prog->attach_prog_fd = attach_prog_fd;
13526 return 0;
13527 }
13528
fe62de31 13529 if (attach_prog_fd) {
ff26ce5c
EC
13530 btf_id = libbpf_find_prog_btf_id(attach_func_name,
13531 attach_prog_fd);
fe62de31 13532 if (btf_id < 0)
e9fc3ce9 13533 return libbpf_err(btf_id);
fe62de31 13534 } else {
2d5ec1c6
AN
13535 if (!attach_func_name)
13536 return libbpf_err(-EINVAL);
13537
fe62de31
AN
13538 /* load btf_vmlinux, if not yet */
13539 err = bpf_object__load_vmlinux_btf(prog->obj, true);
13540 if (err)
e9fc3ce9 13541 return libbpf_err(err);
fe62de31
AN
13542 err = find_kernel_btf_id(prog->obj, attach_func_name,
13543 prog->expected_attach_type,
13544 &btf_obj_fd, &btf_id);
13545 if (err)
e9fc3ce9 13546 return libbpf_err(err);
fe62de31 13547 }
ff26ce5c
EC
13548
13549 prog->attach_btf_id = btf_id;
fe62de31 13550 prog->attach_btf_obj_fd = btf_obj_fd;
ff26ce5c
EC
13551 prog->attach_prog_fd = attach_prog_fd;
13552 return 0;
13553}
13554
6803ee25 13555int parse_cpu_mask_str(const char *s, bool **mask, int *mask_sz)
6446b315 13556{
6803ee25
AN
13557 int err = 0, n, len, start, end = -1;
13558 bool *tmp;
6446b315 13559
6803ee25
AN
13560 *mask = NULL;
13561 *mask_sz = 0;
13562
13563 /* Each sub string separated by ',' has format \d+-\d+ or \d+ */
13564 while (*s) {
13565 if (*s == ',' || *s == '\n') {
13566 s++;
13567 continue;
13568 }
13569 n = sscanf(s, "%d%n-%d%n", &start, &len, &end, &len);
13570 if (n <= 0 || n > 2) {
13571 pr_warn("Failed to get CPU range %s: %d\n", s, n);
13572 err = -EINVAL;
13573 goto cleanup;
13574 } else if (n == 1) {
13575 end = start;
13576 }
13577 if (start < 0 || start > end) {
13578 pr_warn("Invalid CPU range [%d,%d] in %s\n",
13579 start, end, s);
13580 err = -EINVAL;
13581 goto cleanup;
13582 }
13583 tmp = realloc(*mask, end + 1);
13584 if (!tmp) {
13585 err = -ENOMEM;
13586 goto cleanup;
13587 }
13588 *mask = tmp;
13589 memset(tmp + *mask_sz, 0, start - *mask_sz);
13590 memset(tmp + start, 1, end - start + 1);
13591 *mask_sz = end + 1;
13592 s += len;
13593 }
13594 if (!*mask_sz) {
13595 pr_warn("Empty CPU range\n");
13596 return -EINVAL;
13597 }
13598 return 0;
13599cleanup:
13600 free(*mask);
13601 *mask = NULL;
13602 return err;
13603}
13604
13605int parse_cpu_mask_file(const char *fcpu, bool **mask, int *mask_sz)
13606{
13607 int fd, err = 0, len;
13608 char buf[128];
6446b315 13609
92274e24 13610 fd = open(fcpu, O_RDONLY | O_CLOEXEC);
6446b315 13611 if (fd < 0) {
6803ee25
AN
13612 err = -errno;
13613 pr_warn("Failed to open cpu mask file %s: %d\n", fcpu, err);
13614 return err;
6446b315
HL
13615 }
13616 len = read(fd, buf, sizeof(buf));
13617 close(fd);
13618 if (len <= 0) {
6803ee25
AN
13619 err = len ? -errno : -EINVAL;
13620 pr_warn("Failed to read cpu mask from %s: %d\n", fcpu, err);
13621 return err;
6446b315 13622 }
6803ee25
AN
13623 if (len >= sizeof(buf)) {
13624 pr_warn("CPU mask is too big in file %s\n", fcpu);
13625 return -E2BIG;
6446b315
HL
13626 }
13627 buf[len] = '\0';
13628
6803ee25
AN
13629 return parse_cpu_mask_str(buf, mask, mask_sz);
13630}
13631
13632int libbpf_num_possible_cpus(void)
13633{
13634 static const char *fcpu = "/sys/devices/system/cpu/possible";
13635 static int cpus;
13636 int err, n, i, tmp_cpus;
13637 bool *mask;
13638
13639 tmp_cpus = READ_ONCE(cpus);
13640 if (tmp_cpus > 0)
13641 return tmp_cpus;
13642
13643 err = parse_cpu_mask_file(fcpu, &mask, &n);
13644 if (err)
e9fc3ce9 13645 return libbpf_err(err);
6803ee25
AN
13646
13647 tmp_cpus = 0;
13648 for (i = 0; i < n; i++) {
13649 if (mask[i])
13650 tmp_cpus++;
6446b315 13651 }
6803ee25 13652 free(mask);
56fbc241
TC
13653
13654 WRITE_ONCE(cpus, tmp_cpus);
13655 return tmp_cpus;
6446b315 13656}
d66562fb 13657
430025e5
DK
13658static int populate_skeleton_maps(const struct bpf_object *obj,
13659 struct bpf_map_skeleton *maps,
13660 size_t map_cnt)
13661{
13662 int i;
13663
13664 for (i = 0; i < map_cnt; i++) {
13665 struct bpf_map **map = maps[i].map;
13666 const char *name = maps[i].name;
13667 void **mmaped = maps[i].mmaped;
13668
13669 *map = bpf_object__find_map_by_name(obj, name);
13670 if (!*map) {
13671 pr_warn("failed to find skeleton map '%s'\n", name);
13672 return -ESRCH;
13673 }
13674
13675 /* externs shouldn't be pre-setup from user code */
13676 if (mmaped && (*map)->libbpf_type != LIBBPF_MAP_KCONFIG)
13677 *mmaped = (*map)->mmaped;
13678 }
13679 return 0;
13680}
13681
13682static int populate_skeleton_progs(const struct bpf_object *obj,
13683 struct bpf_prog_skeleton *progs,
13684 size_t prog_cnt)
13685{
13686 int i;
13687
13688 for (i = 0; i < prog_cnt; i++) {
13689 struct bpf_program **prog = progs[i].prog;
13690 const char *name = progs[i].name;
13691
13692 *prog = bpf_object__find_program_by_name(obj, name);
13693 if (!*prog) {
13694 pr_warn("failed to find skeleton program '%s'\n", name);
13695 return -ESRCH;
13696 }
13697 }
13698 return 0;
13699}
13700
d66562fb
AN
13701int bpf_object__open_skeleton(struct bpf_object_skeleton *s,
13702 const struct bpf_object_open_opts *opts)
13703{
13704 DECLARE_LIBBPF_OPTS(bpf_object_open_opts, skel_opts,
13705 .object_name = s->name,
13706 );
13707 struct bpf_object *obj;
430025e5 13708 int err;
d66562fb
AN
13709
13710 /* Attempt to preserve opts->object_name, unless overriden by user
13711 * explicitly. Overwriting object name for skeletons is discouraged,
13712 * as it breaks global data maps, because they contain object name
13713 * prefix as their own map name prefix. When skeleton is generated,
13714 * bpftool is making an assumption that this name will stay the same.
13715 */
13716 if (opts) {
13717 memcpy(&skel_opts, opts, sizeof(*opts));
13718 if (!opts->object_name)
13719 skel_opts.object_name = s->name;
13720 }
13721
13722 obj = bpf_object__open_mem(s->data, s->data_sz, &skel_opts);
e9fc3ce9
AN
13723 err = libbpf_get_error(obj);
13724 if (err) {
13725 pr_warn("failed to initialize skeleton BPF object '%s': %d\n",
13726 s->name, err);
13727 return libbpf_err(err);
d66562fb
AN
13728 }
13729
13730 *s->obj = obj;
430025e5
DK
13731 err = populate_skeleton_maps(obj, s->maps, s->map_cnt);
13732 if (err) {
13733 pr_warn("failed to populate skeleton maps for '%s': %d\n", s->name, err);
13734 return libbpf_err(err);
13735 }
13736
13737 err = populate_skeleton_progs(obj, s->progs, s->prog_cnt);
13738 if (err) {
13739 pr_warn("failed to populate skeleton progs for '%s': %d\n", s->name, err);
13740 return libbpf_err(err);
13741 }
d66562fb 13742
430025e5
DK
13743 return 0;
13744}
d66562fb 13745
430025e5
DK
13746int bpf_object__open_subskeleton(struct bpf_object_subskeleton *s)
13747{
13748 int err, len, var_idx, i;
13749 const char *var_name;
13750 const struct bpf_map *map;
13751 struct btf *btf;
13752 __u32 map_type_id;
13753 const struct btf_type *map_type, *var_type;
13754 const struct bpf_var_skeleton *var_skel;
13755 struct btf_var_secinfo *var;
d66562fb 13756
430025e5
DK
13757 if (!s->obj)
13758 return libbpf_err(-EINVAL);
13759
13760 btf = bpf_object__btf(s->obj);
13761 if (!btf) {
13762 pr_warn("subskeletons require BTF at runtime (object %s)\n",
e3ba8e4e 13763 bpf_object__name(s->obj));
430025e5 13764 return libbpf_err(-errno);
d66562fb
AN
13765 }
13766
430025e5
DK
13767 err = populate_skeleton_maps(s->obj, s->maps, s->map_cnt);
13768 if (err) {
13769 pr_warn("failed to populate subskeleton maps: %d\n", err);
13770 return libbpf_err(err);
13771 }
d66562fb 13772
430025e5
DK
13773 err = populate_skeleton_progs(s->obj, s->progs, s->prog_cnt);
13774 if (err) {
13775 pr_warn("failed to populate subskeleton maps: %d\n", err);
13776 return libbpf_err(err);
d66562fb
AN
13777 }
13778
430025e5
DK
13779 for (var_idx = 0; var_idx < s->var_cnt; var_idx++) {
13780 var_skel = &s->vars[var_idx];
13781 map = *var_skel->map;
13782 map_type_id = bpf_map__btf_value_type_id(map);
13783 map_type = btf__type_by_id(btf, map_type_id);
13784
13785 if (!btf_is_datasec(map_type)) {
13786 pr_warn("type for map '%1$s' is not a datasec: %2$s",
13787 bpf_map__name(map),
13788 __btf_kind_str(btf_kind(map_type)));
13789 return libbpf_err(-EINVAL);
13790 }
13791
13792 len = btf_vlen(map_type);
13793 var = btf_var_secinfos(map_type);
13794 for (i = 0; i < len; i++, var++) {
13795 var_type = btf__type_by_id(btf, var->type);
13796 var_name = btf__name_by_offset(btf, var_type->name_off);
13797 if (strcmp(var_name, var_skel->name) == 0) {
13798 *var_skel->addr = map->mmaped + var->offset;
13799 break;
13800 }
13801 }
13802 }
d66562fb
AN
13803 return 0;
13804}
13805
430025e5
DK
13806void bpf_object__destroy_subskeleton(struct bpf_object_subskeleton *s)
13807{
13808 if (!s)
13809 return;
13810 free(s->maps);
13811 free(s->progs);
13812 free(s->vars);
13813 free(s);
13814}
13815
d66562fb
AN
13816int bpf_object__load_skeleton(struct bpf_object_skeleton *s)
13817{
13818 int i, err;
13819
13820 err = bpf_object__load(*s->obj);
13821 if (err) {
13822 pr_warn("failed to load BPF skeleton '%s': %d\n", s->name, err);
e9fc3ce9 13823 return libbpf_err(err);
d66562fb
AN
13824 }
13825
13826 for (i = 0; i < s->map_cnt; i++) {
13827 struct bpf_map *map = *s->maps[i].map;
79ff13e9 13828 size_t mmap_sz = bpf_map_mmap_sz(map);
f08c18e0 13829 int prot, map_fd = map->fd;
d66562fb 13830 void **mmaped = s->maps[i].mmaped;
d66562fb
AN
13831
13832 if (!mmaped)
13833 continue;
13834
13835 if (!(map->def.map_flags & BPF_F_MMAPABLE)) {
13836 *mmaped = NULL;
13837 continue;
13838 }
13839
2e7ba4f8
AN
13840 if (map->def.type == BPF_MAP_TYPE_ARENA) {
13841 *mmaped = map->mmaped;
13842 continue;
13843 }
13844
d66562fb
AN
13845 if (map->def.map_flags & BPF_F_RDONLY_PROG)
13846 prot = PROT_READ;
13847 else
13848 prot = PROT_READ | PROT_WRITE;
13849
13850 /* Remap anonymous mmap()-ed "map initialization image" as
13851 * a BPF map-backed mmap()-ed memory, but preserving the same
13852 * memory address. This will cause kernel to change process'
13853 * page table to point to a different piece of kernel memory,
13854 * but from userspace point of view memory address (and its
13855 * contents, being identical at this point) will stay the
13856 * same. This mapping will be released by bpf_object__close()
13857 * as per normal clean up procedure, so we don't need to worry
13858 * about it from skeleton's clean up perspective.
13859 */
9d0a2331 13860 *mmaped = mmap(map->mmaped, mmap_sz, prot, MAP_SHARED | MAP_FIXED, map_fd, 0);
2ad97d47 13861 if (*mmaped == MAP_FAILED) {
d66562fb
AN
13862 err = -errno;
13863 *mmaped = NULL;
13864 pr_warn("failed to re-mmap() map '%s': %d\n",
13865 bpf_map__name(map), err);
e9fc3ce9 13866 return libbpf_err(err);
d66562fb
AN
13867 }
13868 }
13869
13870 return 0;
13871}
13872
13873int bpf_object__attach_skeleton(struct bpf_object_skeleton *s)
13874{
e9fc3ce9 13875 int i, err;
d66562fb
AN
13876
13877 for (i = 0; i < s->prog_cnt; i++) {
13878 struct bpf_program *prog = *s->progs[i].prog;
13879 struct bpf_link **link = s->progs[i].link;
d66562fb 13880
43cb8cba 13881 if (!prog->autoload || !prog->autoattach)
d9297581
AN
13882 continue;
13883
5532dfd4 13884 /* auto-attaching not supported for this program */
4fa5bcfe 13885 if (!prog->sec_def || !prog->sec_def->prog_attach_fn)
d66562fb
AN
13886 continue;
13887
4fa5bcfe
AN
13888 /* if user already set the link manually, don't attempt auto-attach */
13889 if (*link)
13890 continue;
13891
13892 err = prog->sec_def->prog_attach_fn(prog, prog->sec_def->cookie, link);
e9fc3ce9 13893 if (err) {
4fa5bcfe 13894 pr_warn("prog '%s': failed to auto-attach: %d\n",
e9fc3ce9
AN
13895 bpf_program__name(prog), err);
13896 return libbpf_err(err);
d66562fb 13897 }
4fa5bcfe
AN
13898
13899 /* It's possible that for some SEC() definitions auto-attach
13900 * is supported in some cases (e.g., if definition completely
13901 * specifies target information), but is not in other cases.
13902 * SEC("uprobe") is one such case. If user specified target
13903 * binary and function name, such BPF program can be
13904 * auto-attached. But if not, it shouldn't trigger skeleton's
13905 * attach to fail. It should just be skipped.
13906 * attach_fn signals such case with returning 0 (no error) and
13907 * setting link to NULL.
13908 */
d66562fb
AN
13909 }
13910
13911 return 0;
13912}
13913
13914void bpf_object__detach_skeleton(struct bpf_object_skeleton *s)
13915{
13916 int i;
13917
13918 for (i = 0; i < s->prog_cnt; i++) {
13919 struct bpf_link **link = s->progs[i].link;
13920
50450fc7 13921 bpf_link__destroy(*link);
d66562fb
AN
13922 *link = NULL;
13923 }
13924}
13925
13926void bpf_object__destroy_skeleton(struct bpf_object_skeleton *s)
13927{
a32ea51a
YS
13928 if (!s)
13929 return;
13930
d66562fb
AN
13931 if (s->progs)
13932 bpf_object__detach_skeleton(s);
13933 if (s->obj)
13934 bpf_object__close(*s->obj);
13935 free(s->maps);
13936 free(s->progs);
13937 free(s);
13938}