1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2018 Facebook */
4 #include <uapi/linux/btf.h>
5 #include <uapi/linux/bpf.h>
6 #include <uapi/linux/bpf_perf_event.h>
7 #include <uapi/linux/types.h>
8 #include <linux/seq_file.h>
9 #include <linux/compiler.h>
10 #include <linux/ctype.h>
11 #include <linux/errno.h>
12 #include <linux/slab.h>
13 #include <linux/anon_inodes.h>
14 #include <linux/file.h>
15 #include <linux/uaccess.h>
16 #include <linux/kernel.h>
17 #include <linux/idr.h>
18 #include <linux/sort.h>
19 #include <linux/bpf_verifier.h>
20 #include <linux/btf.h>
21 #include <linux/btf_ids.h>
22 #include <linux/bpf.h>
23 #include <linux/bpf_lsm.h>
24 #include <linux/skmsg.h>
25 #include <linux/perf_event.h>
26 #include <linux/bsearch.h>
27 #include <linux/kobject.h>
28 #include <linux/sysfs.h>
29 #include <linux/overflow.h>
31 #include <net/netfilter/nf_bpf_link.h>
35 #include "../tools/lib/bpf/relo_core.h"
37 /* BTF (BPF Type Format) is the meta data format which describes
38 * the data types of BPF program/map. Hence, it basically focus
39 * on the C programming language which the modern BPF is primary
44 * The BTF data is stored under the ".BTF" ELF section
48 * Each 'struct btf_type' object describes a C data type.
49 * Depending on the type it is describing, a 'struct btf_type'
50 * object may be followed by more data. F.e.
51 * To describe an array, 'struct btf_type' is followed by
54 * 'struct btf_type' and any extra data following it are
59 * The BTF type section contains a list of 'struct btf_type' objects.
60 * Each one describes a C type. Recall from the above section
61 * that a 'struct btf_type' object could be immediately followed by extra
62 * data in order to describe some particular C types.
66 * Each btf_type object is identified by a type_id. The type_id
67 * is implicitly implied by the location of the btf_type object in
68 * the BTF type section. The first one has type_id 1. The second
69 * one has type_id 2...etc. Hence, an earlier btf_type has
72 * A btf_type object may refer to another btf_type object by using
73 * type_id (i.e. the "type" in the "struct btf_type").
75 * NOTE that we cannot assume any reference-order.
76 * A btf_type object can refer to an earlier btf_type object
77 * but it can also refer to a later btf_type object.
79 * For example, to describe "const void *". A btf_type
80 * object describing "const" may refer to another btf_type
81 * object describing "void *". This type-reference is done
82 * by specifying type_id:
84 * [1] CONST (anon) type_id=2
85 * [2] PTR (anon) type_id=0
87 * The above is the btf_verifier debug log:
88 * - Each line started with "[?]" is a btf_type object
89 * - [?] is the type_id of the btf_type object.
90 * - CONST/PTR is the BTF_KIND_XXX
91 * - "(anon)" is the name of the type. It just
92 * happens that CONST and PTR has no name.
93 * - type_id=XXX is the 'u32 type' in btf_type
95 * NOTE: "void" has type_id 0
99 * The BTF string section contains the names used by the type section.
100 * Each string is referred by an "offset" from the beginning of the
103 * Each string is '\0' terminated.
105 * The first character in the string section must be '\0'
106 * which is used to mean 'anonymous'. Some btf_type may not
112 * To verify BTF data, two passes are needed.
116 * The first pass is to collect all btf_type objects to
117 * an array: "btf->types".
119 * Depending on the C type that a btf_type is describing,
120 * a btf_type may be followed by extra data. We don't know
121 * how many btf_type is there, and more importantly we don't
122 * know where each btf_type is located in the type section.
124 * Without knowing the location of each type_id, most verifications
125 * cannot be done. e.g. an earlier btf_type may refer to a later
126 * btf_type (recall the "const void *" above), so we cannot
127 * check this type-reference in the first pass.
129 * In the first pass, it still does some verifications (e.g.
130 * checking the name is a valid offset to the string section).
134 * The main focus is to resolve a btf_type that is referring
137 * We have to ensure the referring type:
138 * 1) does exist in the BTF (i.e. in btf->types[])
139 * 2) does not cause a loop:
148 * btf_type_needs_resolve() decides if a btf_type needs
151 * The needs_resolve type implements the "resolve()" ops which
152 * essentially does a DFS and detects backedge.
154 * During resolve (or DFS), different C types have different
155 * "RESOLVED" conditions.
157 * When resolving a BTF_KIND_STRUCT, we need to resolve all its
158 * members because a member is always referring to another
159 * type. A struct's member can be treated as "RESOLVED" if
160 * it is referring to a BTF_KIND_PTR. Otherwise, the
161 * following valid C struct would be rejected:
168 * When resolving a BTF_KIND_PTR, it needs to keep resolving if
169 * it is referring to another BTF_KIND_PTR. Otherwise, we cannot
170 * detect a pointer loop, e.g.:
171 * BTF_KIND_CONST -> BTF_KIND_PTR -> BTF_KIND_CONST -> BTF_KIND_PTR +
173 * +-----------------------------------------+
177 #define BITS_PER_U128 (sizeof(u64) * BITS_PER_BYTE * 2)
178 #define BITS_PER_BYTE_MASK (BITS_PER_BYTE - 1)
179 #define BITS_PER_BYTE_MASKED(bits) ((bits) & BITS_PER_BYTE_MASK)
180 #define BITS_ROUNDDOWN_BYTES(bits) ((bits) >> 3)
181 #define BITS_ROUNDUP_BYTES(bits) \
182 (BITS_ROUNDDOWN_BYTES(bits) + !!BITS_PER_BYTE_MASKED(bits))
184 #define BTF_INFO_MASK 0x9f00ffff
185 #define BTF_INT_MASK 0x0fffffff
186 #define BTF_TYPE_ID_VALID(type_id) ((type_id) <= BTF_MAX_TYPE)
187 #define BTF_STR_OFFSET_VALID(name_off) ((name_off) <= BTF_MAX_NAME_OFFSET)
189 /* 16MB for 64k structs and each has 16 members and
190 * a few MB spaces for the string section.
191 * The hard limit is S32_MAX.
193 #define BTF_MAX_SIZE (16 * 1024 * 1024)
195 #define for_each_member_from(i, from, struct_type, member) \
196 for (i = from, member = btf_type_member(struct_type) + from; \
197 i < btf_type_vlen(struct_type); \
200 #define for_each_vsi_from(i, from, struct_type, member) \
201 for (i = from, member = btf_type_var_secinfo(struct_type) + from; \
202 i < btf_type_vlen(struct_type); \
206 DEFINE_SPINLOCK(btf_idr_lock);
208 enum btf_kfunc_hook {
209 BTF_KFUNC_HOOK_COMMON,
212 BTF_KFUNC_HOOK_STRUCT_OPS,
213 BTF_KFUNC_HOOK_TRACING,
214 BTF_KFUNC_HOOK_SYSCALL,
215 BTF_KFUNC_HOOK_FMODRET,
216 BTF_KFUNC_HOOK_CGROUP,
217 BTF_KFUNC_HOOK_SCHED_ACT,
218 BTF_KFUNC_HOOK_SK_SKB,
219 BTF_KFUNC_HOOK_SOCKET_FILTER,
221 BTF_KFUNC_HOOK_NETFILTER,
222 BTF_KFUNC_HOOK_KPROBE,
227 BTF_KFUNC_SET_MAX_CNT = 256,
228 BTF_DTOR_KFUNC_MAX_CNT = 256,
229 BTF_KFUNC_FILTER_MAX_CNT = 16,
232 struct btf_kfunc_hook_filter {
233 btf_kfunc_filter_t filters[BTF_KFUNC_FILTER_MAX_CNT];
237 struct btf_kfunc_set_tab {
238 struct btf_id_set8 *sets[BTF_KFUNC_HOOK_MAX];
239 struct btf_kfunc_hook_filter hook_filters[BTF_KFUNC_HOOK_MAX];
242 struct btf_id_dtor_kfunc_tab {
244 struct btf_id_dtor_kfunc dtors[];
247 struct btf_struct_ops_tab {
250 struct bpf_struct_ops_desc ops[];
255 struct btf_type **types;
260 struct btf_header hdr;
261 u32 nr_types; /* includes VOID for base BTF */
267 struct btf_kfunc_set_tab *kfunc_set_tab;
268 struct btf_id_dtor_kfunc_tab *dtor_kfunc_tab;
269 struct btf_struct_metas *struct_meta_tab;
270 struct btf_struct_ops_tab *struct_ops_tab;
272 /* split BTF support */
273 struct btf *base_btf;
274 u32 start_id; /* first type ID in this BTF (0 for base BTF) */
275 u32 start_str_off; /* first string offset (0 for base BTF) */
276 char name[MODULE_NAME_LEN];
278 __u32 *base_id_map; /* map from distilled base BTF -> vmlinux BTF ids */
281 enum verifier_phase {
286 struct resolve_vertex {
287 const struct btf_type *t;
299 RESOLVE_TBD, /* To Be Determined */
300 RESOLVE_PTR, /* Resolving for Pointer */
301 RESOLVE_STRUCT_OR_ARRAY, /* Resolving for struct/union
306 #define MAX_RESOLVE_DEPTH 32
308 struct btf_sec_info {
313 struct btf_verifier_env {
316 struct resolve_vertex stack[MAX_RESOLVE_DEPTH];
317 struct bpf_verifier_log log;
320 enum verifier_phase phase;
321 enum resolve_mode resolve_mode;
324 static const char * const btf_kind_str[NR_BTF_KINDS] = {
325 [BTF_KIND_UNKN] = "UNKNOWN",
326 [BTF_KIND_INT] = "INT",
327 [BTF_KIND_PTR] = "PTR",
328 [BTF_KIND_ARRAY] = "ARRAY",
329 [BTF_KIND_STRUCT] = "STRUCT",
330 [BTF_KIND_UNION] = "UNION",
331 [BTF_KIND_ENUM] = "ENUM",
332 [BTF_KIND_FWD] = "FWD",
333 [BTF_KIND_TYPEDEF] = "TYPEDEF",
334 [BTF_KIND_VOLATILE] = "VOLATILE",
335 [BTF_KIND_CONST] = "CONST",
336 [BTF_KIND_RESTRICT] = "RESTRICT",
337 [BTF_KIND_FUNC] = "FUNC",
338 [BTF_KIND_FUNC_PROTO] = "FUNC_PROTO",
339 [BTF_KIND_VAR] = "VAR",
340 [BTF_KIND_DATASEC] = "DATASEC",
341 [BTF_KIND_FLOAT] = "FLOAT",
342 [BTF_KIND_DECL_TAG] = "DECL_TAG",
343 [BTF_KIND_TYPE_TAG] = "TYPE_TAG",
344 [BTF_KIND_ENUM64] = "ENUM64",
347 const char *btf_type_str(const struct btf_type *t)
349 return btf_kind_str[BTF_INFO_KIND(t->info)];
352 /* Chunk size we use in safe copy of data to be shown. */
353 #define BTF_SHOW_OBJ_SAFE_SIZE 32
356 * This is the maximum size of a base type value (equivalent to a
357 * 128-bit int); if we are at the end of our safe buffer and have
358 * less than 16 bytes space we can't be assured of being able
359 * to copy the next type safely, so in such cases we will initiate
362 #define BTF_SHOW_OBJ_BASE_TYPE_SIZE 16
365 #define BTF_SHOW_NAME_SIZE 80
368 * The suffix of a type that indicates it cannot alias another type when
369 * comparing BTF IDs for kfunc invocations.
371 #define NOCAST_ALIAS_SUFFIX "___init"
374 * Common data to all BTF show operations. Private show functions can add
375 * their own data to a structure containing a struct btf_show and consult it
376 * in the show callback. See btf_type_show() below.
378 * One challenge with showing nested data is we want to skip 0-valued
379 * data, but in order to figure out whether a nested object is all zeros
380 * we need to walk through it. As a result, we need to make two passes
381 * when handling structs, unions and arrays; the first path simply looks
382 * for nonzero data, while the second actually does the display. The first
383 * pass is signalled by show->state.depth_check being set, and if we
384 * encounter a non-zero value we set show->state.depth_to_show to
385 * the depth at which we encountered it. When we have completed the
386 * first pass, we will know if anything needs to be displayed if
387 * depth_to_show > depth. See btf_[struct,array]_show() for the
388 * implementation of this.
390 * Another problem is we want to ensure the data for display is safe to
391 * access. To support this, the anonymous "struct {} obj" tracks the data
392 * object and our safe copy of it. We copy portions of the data needed
393 * to the object "copy" buffer, but because its size is limited to
394 * BTF_SHOW_OBJ_COPY_LEN bytes, multiple copies may be required as we
395 * traverse larger objects for display.
397 * The various data type show functions all start with a call to
398 * btf_show_start_type() which returns a pointer to the safe copy
399 * of the data needed (or if BTF_SHOW_UNSAFE is specified, to the
400 * raw data itself). btf_show_obj_safe() is responsible for
401 * using copy_from_kernel_nofault() to update the safe data if necessary
402 * as we traverse the object's data. skbuff-like semantics are
405 * - obj.head points to the start of the toplevel object for display
406 * - obj.size is the size of the toplevel object
407 * - obj.data points to the current point in the original data at
408 * which our safe data starts. obj.data will advance as we copy
409 * portions of the data.
411 * In most cases a single copy will suffice, but larger data structures
412 * such as "struct task_struct" will require many copies. The logic in
413 * btf_show_obj_safe() handles the logic that determines if a new
414 * copy_from_kernel_nofault() is needed.
418 void *target; /* target of show operation (seq file, buffer) */
419 __printf(2, 0) void (*showfn)(struct btf_show *show, const char *fmt, va_list args);
420 const struct btf *btf;
421 /* below are used during iteration */
430 int status; /* non-zero for error */
431 const struct btf_type *type;
432 const struct btf_member *member;
433 char name[BTF_SHOW_NAME_SIZE]; /* space for member name/type */
439 u8 safe[BTF_SHOW_OBJ_SAFE_SIZE];
443 struct btf_kind_operations {
444 s32 (*check_meta)(struct btf_verifier_env *env,
445 const struct btf_type *t,
447 int (*resolve)(struct btf_verifier_env *env,
448 const struct resolve_vertex *v);
449 int (*check_member)(struct btf_verifier_env *env,
450 const struct btf_type *struct_type,
451 const struct btf_member *member,
452 const struct btf_type *member_type);
453 int (*check_kflag_member)(struct btf_verifier_env *env,
454 const struct btf_type *struct_type,
455 const struct btf_member *member,
456 const struct btf_type *member_type);
457 void (*log_details)(struct btf_verifier_env *env,
458 const struct btf_type *t);
459 void (*show)(const struct btf *btf, const struct btf_type *t,
460 u32 type_id, void *data, u8 bits_offsets,
461 struct btf_show *show);
464 static const struct btf_kind_operations * const kind_ops[NR_BTF_KINDS];
465 static struct btf_type btf_void;
467 static int btf_resolve(struct btf_verifier_env *env,
468 const struct btf_type *t, u32 type_id);
470 static int btf_func_check(struct btf_verifier_env *env,
471 const struct btf_type *t);
473 static bool btf_type_is_modifier(const struct btf_type *t)
475 /* Some of them is not strictly a C modifier
476 * but they are grouped into the same bucket
478 * A type (t) that refers to another
479 * type through t->type AND its size cannot
480 * be determined without following the t->type.
482 * ptr does not fall into this bucket
483 * because its size is always sizeof(void *).
485 switch (BTF_INFO_KIND(t->info)) {
486 case BTF_KIND_TYPEDEF:
487 case BTF_KIND_VOLATILE:
489 case BTF_KIND_RESTRICT:
490 case BTF_KIND_TYPE_TAG:
497 bool btf_type_is_void(const struct btf_type *t)
499 return t == &btf_void;
502 static bool btf_type_is_datasec(const struct btf_type *t)
504 return BTF_INFO_KIND(t->info) == BTF_KIND_DATASEC;
507 static bool btf_type_is_decl_tag(const struct btf_type *t)
509 return BTF_INFO_KIND(t->info) == BTF_KIND_DECL_TAG;
512 static bool btf_type_nosize(const struct btf_type *t)
514 return btf_type_is_void(t) || btf_type_is_fwd(t) ||
515 btf_type_is_func(t) || btf_type_is_func_proto(t) ||
516 btf_type_is_decl_tag(t);
519 static bool btf_type_nosize_or_null(const struct btf_type *t)
521 return !t || btf_type_nosize(t);
524 static bool btf_type_is_decl_tag_target(const struct btf_type *t)
526 return btf_type_is_func(t) || btf_type_is_struct(t) ||
527 btf_type_is_var(t) || btf_type_is_typedef(t);
530 bool btf_is_vmlinux(const struct btf *btf)
532 return btf->kernel_btf && !btf->base_btf;
535 u32 btf_nr_types(const struct btf *btf)
540 total += btf->nr_types;
547 s32 btf_find_by_name_kind(const struct btf *btf, const char *name, u8 kind)
549 const struct btf_type *t;
553 total = btf_nr_types(btf);
554 for (i = 1; i < total; i++) {
555 t = btf_type_by_id(btf, i);
556 if (BTF_INFO_KIND(t->info) != kind)
559 tname = btf_name_by_offset(btf, t->name_off);
560 if (!strcmp(tname, name))
567 s32 bpf_find_btf_id(const char *name, u32 kind, struct btf **btf_p)
573 btf = bpf_get_btf_vmlinux();
579 ret = btf_find_by_name_kind(btf, name, kind);
580 /* ret is never zero, since btf_find_by_name_kind returns
581 * positive btf_id or negative error.
589 /* If name is not found in vmlinux's BTF then search in module's BTFs */
590 spin_lock_bh(&btf_idr_lock);
591 idr_for_each_entry(&btf_idr, btf, id) {
592 if (!btf_is_module(btf))
594 /* linear search could be slow hence unlock/lock
595 * the IDR to avoiding holding it for too long
598 spin_unlock_bh(&btf_idr_lock);
599 ret = btf_find_by_name_kind(btf, name, kind);
605 spin_lock_bh(&btf_idr_lock);
607 spin_unlock_bh(&btf_idr_lock);
610 EXPORT_SYMBOL_GPL(bpf_find_btf_id);
612 const struct btf_type *btf_type_skip_modifiers(const struct btf *btf,
615 const struct btf_type *t = btf_type_by_id(btf, id);
617 while (btf_type_is_modifier(t)) {
619 t = btf_type_by_id(btf, t->type);
628 const struct btf_type *btf_type_resolve_ptr(const struct btf *btf,
631 const struct btf_type *t;
633 t = btf_type_skip_modifiers(btf, id, NULL);
634 if (!btf_type_is_ptr(t))
637 return btf_type_skip_modifiers(btf, t->type, res_id);
640 const struct btf_type *btf_type_resolve_func_ptr(const struct btf *btf,
643 const struct btf_type *ptype;
645 ptype = btf_type_resolve_ptr(btf, id, res_id);
646 if (ptype && btf_type_is_func_proto(ptype))
652 /* Types that act only as a source, not sink or intermediate
653 * type when resolving.
655 static bool btf_type_is_resolve_source_only(const struct btf_type *t)
657 return btf_type_is_var(t) ||
658 btf_type_is_decl_tag(t) ||
659 btf_type_is_datasec(t);
662 /* What types need to be resolved?
664 * btf_type_is_modifier() is an obvious one.
666 * btf_type_is_struct() because its member refers to
667 * another type (through member->type).
669 * btf_type_is_var() because the variable refers to
670 * another type. btf_type_is_datasec() holds multiple
671 * btf_type_is_var() types that need resolving.
673 * btf_type_is_array() because its element (array->type)
674 * refers to another type. Array can be thought of a
675 * special case of struct while array just has the same
676 * member-type repeated by array->nelems of times.
678 static bool btf_type_needs_resolve(const struct btf_type *t)
680 return btf_type_is_modifier(t) ||
681 btf_type_is_ptr(t) ||
682 btf_type_is_struct(t) ||
683 btf_type_is_array(t) ||
684 btf_type_is_var(t) ||
685 btf_type_is_func(t) ||
686 btf_type_is_decl_tag(t) ||
687 btf_type_is_datasec(t);
690 /* t->size can be used */
691 static bool btf_type_has_size(const struct btf_type *t)
693 switch (BTF_INFO_KIND(t->info)) {
695 case BTF_KIND_STRUCT:
698 case BTF_KIND_DATASEC:
700 case BTF_KIND_ENUM64:
707 static const char *btf_int_encoding_str(u8 encoding)
711 else if (encoding == BTF_INT_SIGNED)
713 else if (encoding == BTF_INT_CHAR)
715 else if (encoding == BTF_INT_BOOL)
721 static u32 btf_type_int(const struct btf_type *t)
723 return *(u32 *)(t + 1);
726 static const struct btf_array *btf_type_array(const struct btf_type *t)
728 return (const struct btf_array *)(t + 1);
731 static const struct btf_enum *btf_type_enum(const struct btf_type *t)
733 return (const struct btf_enum *)(t + 1);
736 static const struct btf_var *btf_type_var(const struct btf_type *t)
738 return (const struct btf_var *)(t + 1);
741 static const struct btf_decl_tag *btf_type_decl_tag(const struct btf_type *t)
743 return (const struct btf_decl_tag *)(t + 1);
746 static const struct btf_enum64 *btf_type_enum64(const struct btf_type *t)
748 return (const struct btf_enum64 *)(t + 1);
751 static const struct btf_kind_operations *btf_type_ops(const struct btf_type *t)
753 return kind_ops[BTF_INFO_KIND(t->info)];
756 static bool btf_name_offset_valid(const struct btf *btf, u32 offset)
758 if (!BTF_STR_OFFSET_VALID(offset))
761 while (offset < btf->start_str_off)
764 offset -= btf->start_str_off;
765 return offset < btf->hdr.str_len;
768 static bool __btf_name_char_ok(char c, bool first)
770 if ((first ? !isalpha(c) :
778 const char *btf_str_by_offset(const struct btf *btf, u32 offset)
780 while (offset < btf->start_str_off)
783 offset -= btf->start_str_off;
784 if (offset < btf->hdr.str_len)
785 return &btf->strings[offset];
790 static bool btf_name_valid_identifier(const struct btf *btf, u32 offset)
792 /* offset must be valid */
793 const char *src = btf_str_by_offset(btf, offset);
794 const char *src_limit;
796 if (!__btf_name_char_ok(*src, true))
799 /* set a limit on identifier length */
800 src_limit = src + KSYM_NAME_LEN;
802 while (*src && src < src_limit) {
803 if (!__btf_name_char_ok(*src, false))
811 /* Allow any printable character in DATASEC names */
812 static bool btf_name_valid_section(const struct btf *btf, u32 offset)
814 /* offset must be valid */
815 const char *src = btf_str_by_offset(btf, offset);
816 const char *src_limit;
821 /* set a limit on identifier length */
822 src_limit = src + KSYM_NAME_LEN;
823 while (*src && src < src_limit) {
832 static const char *__btf_name_by_offset(const struct btf *btf, u32 offset)
839 name = btf_str_by_offset(btf, offset);
840 return name ?: "(invalid-name-offset)";
843 const char *btf_name_by_offset(const struct btf *btf, u32 offset)
845 return btf_str_by_offset(btf, offset);
848 const struct btf_type *btf_type_by_id(const struct btf *btf, u32 type_id)
850 while (type_id < btf->start_id)
853 type_id -= btf->start_id;
854 if (type_id >= btf->nr_types)
856 return btf->types[type_id];
858 EXPORT_SYMBOL_GPL(btf_type_by_id);
861 * Regular int is not a bit field and it must be either
862 * u8/u16/u32/u64 or __int128.
864 static bool btf_type_int_is_regular(const struct btf_type *t)
866 u8 nr_bits, nr_bytes;
869 int_data = btf_type_int(t);
870 nr_bits = BTF_INT_BITS(int_data);
871 nr_bytes = BITS_ROUNDUP_BYTES(nr_bits);
872 if (BITS_PER_BYTE_MASKED(nr_bits) ||
873 BTF_INT_OFFSET(int_data) ||
874 (nr_bytes != sizeof(u8) && nr_bytes != sizeof(u16) &&
875 nr_bytes != sizeof(u32) && nr_bytes != sizeof(u64) &&
876 nr_bytes != (2 * sizeof(u64)))) {
884 * Check that given struct member is a regular int with expected
887 bool btf_member_is_reg_int(const struct btf *btf, const struct btf_type *s,
888 const struct btf_member *m,
889 u32 expected_offset, u32 expected_size)
891 const struct btf_type *t;
896 t = btf_type_id_size(btf, &id, NULL);
897 if (!t || !btf_type_is_int(t))
900 int_data = btf_type_int(t);
901 nr_bits = BTF_INT_BITS(int_data);
902 if (btf_type_kflag(s)) {
903 u32 bitfield_size = BTF_MEMBER_BITFIELD_SIZE(m->offset);
904 u32 bit_offset = BTF_MEMBER_BIT_OFFSET(m->offset);
906 /* if kflag set, int should be a regular int and
907 * bit offset should be at byte boundary.
909 return !bitfield_size &&
910 BITS_ROUNDUP_BYTES(bit_offset) == expected_offset &&
911 BITS_ROUNDUP_BYTES(nr_bits) == expected_size;
914 if (BTF_INT_OFFSET(int_data) ||
915 BITS_PER_BYTE_MASKED(m->offset) ||
916 BITS_ROUNDUP_BYTES(m->offset) != expected_offset ||
917 BITS_PER_BYTE_MASKED(nr_bits) ||
918 BITS_ROUNDUP_BYTES(nr_bits) != expected_size)
924 /* Similar to btf_type_skip_modifiers() but does not skip typedefs. */
925 static const struct btf_type *btf_type_skip_qualifiers(const struct btf *btf,
928 const struct btf_type *t = btf_type_by_id(btf, id);
930 while (btf_type_is_modifier(t) &&
931 BTF_INFO_KIND(t->info) != BTF_KIND_TYPEDEF) {
932 t = btf_type_by_id(btf, t->type);
938 #define BTF_SHOW_MAX_ITER 10
940 #define BTF_KIND_BIT(kind) (1ULL << kind)
943 * Populate show->state.name with type name information.
944 * Format of type name is
946 * [.member_name = ] (type_name)
948 static const char *btf_show_name(struct btf_show *show)
950 /* BTF_MAX_ITER array suffixes "[]" */
951 const char *array_suffixes = "[][][][][][][][][][]";
952 const char *array_suffix = &array_suffixes[strlen(array_suffixes)];
953 /* BTF_MAX_ITER pointer suffixes "*" */
954 const char *ptr_suffixes = "**********";
955 const char *ptr_suffix = &ptr_suffixes[strlen(ptr_suffixes)];
956 const char *name = NULL, *prefix = "", *parens = "";
957 const struct btf_member *m = show->state.member;
958 const struct btf_type *t;
959 const struct btf_array *array;
960 u32 id = show->state.type_id;
961 const char *member = NULL;
962 bool show_member = false;
966 show->state.name[0] = '\0';
969 * Don't show type name if we're showing an array member;
970 * in that case we show the array type so don't need to repeat
971 * ourselves for each member.
973 if (show->state.array_member)
976 /* Retrieve member name, if any. */
978 member = btf_name_by_offset(show->btf, m->name_off);
979 show_member = strlen(member) > 0;
984 * Start with type_id, as we have resolved the struct btf_type *
985 * via btf_modifier_show() past the parent typedef to the child
986 * struct, int etc it is defined as. In such cases, the type_id
987 * still represents the starting type while the struct btf_type *
988 * in our show->state points at the resolved type of the typedef.
990 t = btf_type_by_id(show->btf, id);
995 * The goal here is to build up the right number of pointer and
996 * array suffixes while ensuring the type name for a typedef
997 * is represented. Along the way we accumulate a list of
998 * BTF kinds we have encountered, since these will inform later
999 * display; for example, pointer types will not require an
1000 * opening "{" for struct, we will just display the pointer value.
1002 * We also want to accumulate the right number of pointer or array
1003 * indices in the format string while iterating until we get to
1004 * the typedef/pointee/array member target type.
1006 * We start by pointing at the end of pointer and array suffix
1007 * strings; as we accumulate pointers and arrays we move the pointer
1008 * or array string backwards so it will show the expected number of
1009 * '*' or '[]' for the type. BTF_SHOW_MAX_ITER of nesting of pointers
1010 * and/or arrays and typedefs are supported as a precaution.
1012 * We also want to get typedef name while proceeding to resolve
1013 * type it points to so that we can add parentheses if it is a
1014 * "typedef struct" etc.
1016 for (i = 0; i < BTF_SHOW_MAX_ITER; i++) {
1018 switch (BTF_INFO_KIND(t->info)) {
1019 case BTF_KIND_TYPEDEF:
1021 name = btf_name_by_offset(show->btf,
1023 kinds |= BTF_KIND_BIT(BTF_KIND_TYPEDEF);
1026 case BTF_KIND_ARRAY:
1027 kinds |= BTF_KIND_BIT(BTF_KIND_ARRAY);
1031 array = btf_type_array(t);
1032 if (array_suffix > array_suffixes)
1037 kinds |= BTF_KIND_BIT(BTF_KIND_PTR);
1038 if (ptr_suffix > ptr_suffixes)
1048 t = btf_type_skip_qualifiers(show->btf, id);
1050 /* We may not be able to represent this type; bail to be safe */
1051 if (i == BTF_SHOW_MAX_ITER)
1055 name = btf_name_by_offset(show->btf, t->name_off);
1057 switch (BTF_INFO_KIND(t->info)) {
1058 case BTF_KIND_STRUCT:
1059 case BTF_KIND_UNION:
1060 prefix = BTF_INFO_KIND(t->info) == BTF_KIND_STRUCT ?
1062 /* if it's an array of struct/union, parens is already set */
1063 if (!(kinds & (BTF_KIND_BIT(BTF_KIND_ARRAY))))
1067 case BTF_KIND_ENUM64:
1074 /* pointer does not require parens */
1075 if (kinds & BTF_KIND_BIT(BTF_KIND_PTR))
1077 /* typedef does not require struct/union/enum prefix */
1078 if (kinds & BTF_KIND_BIT(BTF_KIND_TYPEDEF))
1084 /* Even if we don't want type name info, we want parentheses etc */
1085 if (show->flags & BTF_SHOW_NONAME)
1086 snprintf(show->state.name, sizeof(show->state.name), "%s",
1089 snprintf(show->state.name, sizeof(show->state.name),
1090 "%s%s%s(%s%s%s%s%s%s)%s",
1091 /* first 3 strings comprise ".member = " */
1092 show_member ? "." : "",
1093 show_member ? member : "",
1094 show_member ? " = " : "",
1095 /* ...next is our prefix (struct, enum, etc) */
1097 strlen(prefix) > 0 && strlen(name) > 0 ? " " : "",
1098 /* ...this is the type name itself */
1100 /* ...suffixed by the appropriate '*', '[]' suffixes */
1101 strlen(ptr_suffix) > 0 ? " " : "", ptr_suffix,
1102 array_suffix, parens);
1104 return show->state.name;
1107 static const char *__btf_show_indent(struct btf_show *show)
1109 const char *indents = " ";
1110 const char *indent = &indents[strlen(indents)];
1112 if ((indent - show->state.depth) >= indents)
1113 return indent - show->state.depth;
1117 static const char *btf_show_indent(struct btf_show *show)
1119 return show->flags & BTF_SHOW_COMPACT ? "" : __btf_show_indent(show);
1122 static const char *btf_show_newline(struct btf_show *show)
1124 return show->flags & BTF_SHOW_COMPACT ? "" : "\n";
1127 static const char *btf_show_delim(struct btf_show *show)
1129 if (show->state.depth == 0)
1132 if ((show->flags & BTF_SHOW_COMPACT) && show->state.type &&
1133 BTF_INFO_KIND(show->state.type->info) == BTF_KIND_UNION)
1139 __printf(2, 3) static void btf_show(struct btf_show *show, const char *fmt, ...)
1143 if (!show->state.depth_check) {
1144 va_start(args, fmt);
1145 show->showfn(show, fmt, args);
1150 /* Macros are used here as btf_show_type_value[s]() prepends and appends
1151 * format specifiers to the format specifier passed in; these do the work of
1152 * adding indentation, delimiters etc while the caller simply has to specify
1153 * the type value(s) in the format specifier + value(s).
1155 #define btf_show_type_value(show, fmt, value) \
1157 if ((value) != (__typeof__(value))0 || \
1158 (show->flags & BTF_SHOW_ZERO) || \
1159 show->state.depth == 0) { \
1160 btf_show(show, "%s%s" fmt "%s%s", \
1161 btf_show_indent(show), \
1162 btf_show_name(show), \
1163 value, btf_show_delim(show), \
1164 btf_show_newline(show)); \
1165 if (show->state.depth > show->state.depth_to_show) \
1166 show->state.depth_to_show = show->state.depth; \
1170 #define btf_show_type_values(show, fmt, ...) \
1172 btf_show(show, "%s%s" fmt "%s%s", btf_show_indent(show), \
1173 btf_show_name(show), \
1174 __VA_ARGS__, btf_show_delim(show), \
1175 btf_show_newline(show)); \
1176 if (show->state.depth > show->state.depth_to_show) \
1177 show->state.depth_to_show = show->state.depth; \
1180 /* How much is left to copy to safe buffer after @data? */
1181 static int btf_show_obj_size_left(struct btf_show *show, void *data)
1183 return show->obj.head + show->obj.size - data;
1186 /* Is object pointed to by @data of @size already copied to our safe buffer? */
1187 static bool btf_show_obj_is_safe(struct btf_show *show, void *data, int size)
1189 return data >= show->obj.data &&
1190 (data + size) < (show->obj.data + BTF_SHOW_OBJ_SAFE_SIZE);
1194 * If object pointed to by @data of @size falls within our safe buffer, return
1195 * the equivalent pointer to the same safe data. Assumes
1196 * copy_from_kernel_nofault() has already happened and our safe buffer is
1199 static void *__btf_show_obj_safe(struct btf_show *show, void *data, int size)
1201 if (btf_show_obj_is_safe(show, data, size))
1202 return show->obj.safe + (data - show->obj.data);
1207 * Return a safe-to-access version of data pointed to by @data.
1208 * We do this by copying the relevant amount of information
1209 * to the struct btf_show obj.safe buffer using copy_from_kernel_nofault().
1211 * If BTF_SHOW_UNSAFE is specified, just return data as-is; no
1212 * safe copy is needed.
1214 * Otherwise we need to determine if we have the required amount
1215 * of data (determined by the @data pointer and the size of the
1216 * largest base type we can encounter (represented by
1217 * BTF_SHOW_OBJ_BASE_TYPE_SIZE). Having that much data ensures
1218 * that we will be able to print some of the current object,
1219 * and if more is needed a copy will be triggered.
1220 * Some objects such as structs will not fit into the buffer;
1221 * in such cases additional copies when we iterate over their
1222 * members may be needed.
1224 * btf_show_obj_safe() is used to return a safe buffer for
1225 * btf_show_start_type(); this ensures that as we recurse into
1226 * nested types we always have safe data for the given type.
1227 * This approach is somewhat wasteful; it's possible for example
1228 * that when iterating over a large union we'll end up copying the
1229 * same data repeatedly, but the goal is safety not performance.
1230 * We use stack data as opposed to per-CPU buffers because the
1231 * iteration over a type can take some time, and preemption handling
1232 * would greatly complicate use of the safe buffer.
1234 static void *btf_show_obj_safe(struct btf_show *show,
1235 const struct btf_type *t,
1238 const struct btf_type *rt;
1239 int size_left, size;
1242 if (show->flags & BTF_SHOW_UNSAFE)
1245 rt = btf_resolve_size(show->btf, t, &size);
1247 show->state.status = PTR_ERR(rt);
1252 * Is this toplevel object? If so, set total object size and
1253 * initialize pointers. Otherwise check if we still fall within
1254 * our safe object data.
1256 if (show->state.depth == 0) {
1257 show->obj.size = size;
1258 show->obj.head = data;
1261 * If the size of the current object is > our remaining
1262 * safe buffer we _may_ need to do a new copy. However
1263 * consider the case of a nested struct; it's size pushes
1264 * us over the safe buffer limit, but showing any individual
1265 * struct members does not. In such cases, we don't need
1266 * to initiate a fresh copy yet; however we definitely need
1267 * at least BTF_SHOW_OBJ_BASE_TYPE_SIZE bytes left
1268 * in our buffer, regardless of the current object size.
1269 * The logic here is that as we resolve types we will
1270 * hit a base type at some point, and we need to be sure
1271 * the next chunk of data is safely available to display
1272 * that type info safely. We cannot rely on the size of
1273 * the current object here because it may be much larger
1274 * than our current buffer (e.g. task_struct is 8k).
1275 * All we want to do here is ensure that we can print the
1276 * next basic type, which we can if either
1277 * - the current type size is within the safe buffer; or
1278 * - at least BTF_SHOW_OBJ_BASE_TYPE_SIZE bytes are left in
1281 safe = __btf_show_obj_safe(show, data,
1283 BTF_SHOW_OBJ_BASE_TYPE_SIZE));
1287 * We need a new copy to our safe object, either because we haven't
1288 * yet copied and are initializing safe data, or because the data
1289 * we want falls outside the boundaries of the safe object.
1292 size_left = btf_show_obj_size_left(show, data);
1293 if (size_left > BTF_SHOW_OBJ_SAFE_SIZE)
1294 size_left = BTF_SHOW_OBJ_SAFE_SIZE;
1295 show->state.status = copy_from_kernel_nofault(show->obj.safe,
1297 if (!show->state.status) {
1298 show->obj.data = data;
1299 safe = show->obj.safe;
1307 * Set the type we are starting to show and return a safe data pointer
1308 * to be used for showing the associated data.
1310 static void *btf_show_start_type(struct btf_show *show,
1311 const struct btf_type *t,
1312 u32 type_id, void *data)
1314 show->state.type = t;
1315 show->state.type_id = type_id;
1316 show->state.name[0] = '\0';
1318 return btf_show_obj_safe(show, t, data);
1321 static void btf_show_end_type(struct btf_show *show)
1323 show->state.type = NULL;
1324 show->state.type_id = 0;
1325 show->state.name[0] = '\0';
1328 static void *btf_show_start_aggr_type(struct btf_show *show,
1329 const struct btf_type *t,
1330 u32 type_id, void *data)
1332 void *safe_data = btf_show_start_type(show, t, type_id, data);
1337 btf_show(show, "%s%s%s", btf_show_indent(show),
1338 btf_show_name(show),
1339 btf_show_newline(show));
1340 show->state.depth++;
1344 static void btf_show_end_aggr_type(struct btf_show *show,
1347 show->state.depth--;
1348 btf_show(show, "%s%s%s%s", btf_show_indent(show), suffix,
1349 btf_show_delim(show), btf_show_newline(show));
1350 btf_show_end_type(show);
1353 static void btf_show_start_member(struct btf_show *show,
1354 const struct btf_member *m)
1356 show->state.member = m;
1359 static void btf_show_start_array_member(struct btf_show *show)
1361 show->state.array_member = 1;
1362 btf_show_start_member(show, NULL);
1365 static void btf_show_end_member(struct btf_show *show)
1367 show->state.member = NULL;
1370 static void btf_show_end_array_member(struct btf_show *show)
1372 show->state.array_member = 0;
1373 btf_show_end_member(show);
1376 static void *btf_show_start_array_type(struct btf_show *show,
1377 const struct btf_type *t,
1382 show->state.array_encoding = array_encoding;
1383 show->state.array_terminated = 0;
1384 return btf_show_start_aggr_type(show, t, type_id, data);
1387 static void btf_show_end_array_type(struct btf_show *show)
1389 show->state.array_encoding = 0;
1390 show->state.array_terminated = 0;
1391 btf_show_end_aggr_type(show, "]");
1394 static void *btf_show_start_struct_type(struct btf_show *show,
1395 const struct btf_type *t,
1399 return btf_show_start_aggr_type(show, t, type_id, data);
1402 static void btf_show_end_struct_type(struct btf_show *show)
1404 btf_show_end_aggr_type(show, "}");
1407 __printf(2, 3) static void __btf_verifier_log(struct bpf_verifier_log *log,
1408 const char *fmt, ...)
1412 va_start(args, fmt);
1413 bpf_verifier_vlog(log, fmt, args);
1417 __printf(2, 3) static void btf_verifier_log(struct btf_verifier_env *env,
1418 const char *fmt, ...)
1420 struct bpf_verifier_log *log = &env->log;
1423 if (!bpf_verifier_log_needed(log))
1426 va_start(args, fmt);
1427 bpf_verifier_vlog(log, fmt, args);
1431 __printf(4, 5) static void __btf_verifier_log_type(struct btf_verifier_env *env,
1432 const struct btf_type *t,
1434 const char *fmt, ...)
1436 struct bpf_verifier_log *log = &env->log;
1437 struct btf *btf = env->btf;
1440 if (!bpf_verifier_log_needed(log))
1443 if (log->level == BPF_LOG_KERNEL) {
1444 /* btf verifier prints all types it is processing via
1445 * btf_verifier_log_type(..., fmt = NULL).
1446 * Skip those prints for in-kernel BTF verification.
1451 /* Skip logging when loading module BTF with mismatches permitted */
1452 if (env->btf->base_btf && IS_ENABLED(CONFIG_MODULE_ALLOW_BTF_MISMATCH))
1456 __btf_verifier_log(log, "[%u] %s %s%s",
1459 __btf_name_by_offset(btf, t->name_off),
1460 log_details ? " " : "");
1463 btf_type_ops(t)->log_details(env, t);
1466 __btf_verifier_log(log, " ");
1467 va_start(args, fmt);
1468 bpf_verifier_vlog(log, fmt, args);
1472 __btf_verifier_log(log, "\n");
1475 #define btf_verifier_log_type(env, t, ...) \
1476 __btf_verifier_log_type((env), (t), true, __VA_ARGS__)
1477 #define btf_verifier_log_basic(env, t, ...) \
1478 __btf_verifier_log_type((env), (t), false, __VA_ARGS__)
1481 static void btf_verifier_log_member(struct btf_verifier_env *env,
1482 const struct btf_type *struct_type,
1483 const struct btf_member *member,
1484 const char *fmt, ...)
1486 struct bpf_verifier_log *log = &env->log;
1487 struct btf *btf = env->btf;
1490 if (!bpf_verifier_log_needed(log))
1493 if (log->level == BPF_LOG_KERNEL) {
1497 /* Skip logging when loading module BTF with mismatches permitted */
1498 if (env->btf->base_btf && IS_ENABLED(CONFIG_MODULE_ALLOW_BTF_MISMATCH))
1502 /* The CHECK_META phase already did a btf dump.
1504 * If member is logged again, it must hit an error in
1505 * parsing this member. It is useful to print out which
1506 * struct this member belongs to.
1508 if (env->phase != CHECK_META)
1509 btf_verifier_log_type(env, struct_type, NULL);
1511 if (btf_type_kflag(struct_type))
1512 __btf_verifier_log(log,
1513 "\t%s type_id=%u bitfield_size=%u bits_offset=%u",
1514 __btf_name_by_offset(btf, member->name_off),
1516 BTF_MEMBER_BITFIELD_SIZE(member->offset),
1517 BTF_MEMBER_BIT_OFFSET(member->offset));
1519 __btf_verifier_log(log, "\t%s type_id=%u bits_offset=%u",
1520 __btf_name_by_offset(btf, member->name_off),
1521 member->type, member->offset);
1524 __btf_verifier_log(log, " ");
1525 va_start(args, fmt);
1526 bpf_verifier_vlog(log, fmt, args);
1530 __btf_verifier_log(log, "\n");
1534 static void btf_verifier_log_vsi(struct btf_verifier_env *env,
1535 const struct btf_type *datasec_type,
1536 const struct btf_var_secinfo *vsi,
1537 const char *fmt, ...)
1539 struct bpf_verifier_log *log = &env->log;
1542 if (!bpf_verifier_log_needed(log))
1544 if (log->level == BPF_LOG_KERNEL && !fmt)
1546 if (env->phase != CHECK_META)
1547 btf_verifier_log_type(env, datasec_type, NULL);
1549 __btf_verifier_log(log, "\t type_id=%u offset=%u size=%u",
1550 vsi->type, vsi->offset, vsi->size);
1552 __btf_verifier_log(log, " ");
1553 va_start(args, fmt);
1554 bpf_verifier_vlog(log, fmt, args);
1558 __btf_verifier_log(log, "\n");
1561 static void btf_verifier_log_hdr(struct btf_verifier_env *env,
1564 struct bpf_verifier_log *log = &env->log;
1565 const struct btf *btf = env->btf;
1566 const struct btf_header *hdr;
1568 if (!bpf_verifier_log_needed(log))
1571 if (log->level == BPF_LOG_KERNEL)
1574 __btf_verifier_log(log, "magic: 0x%x\n", hdr->magic);
1575 __btf_verifier_log(log, "version: %u\n", hdr->version);
1576 __btf_verifier_log(log, "flags: 0x%x\n", hdr->flags);
1577 __btf_verifier_log(log, "hdr_len: %u\n", hdr->hdr_len);
1578 __btf_verifier_log(log, "type_off: %u\n", hdr->type_off);
1579 __btf_verifier_log(log, "type_len: %u\n", hdr->type_len);
1580 __btf_verifier_log(log, "str_off: %u\n", hdr->str_off);
1581 __btf_verifier_log(log, "str_len: %u\n", hdr->str_len);
1582 __btf_verifier_log(log, "btf_total_size: %u\n", btf_data_size);
1585 static int btf_add_type(struct btf_verifier_env *env, struct btf_type *t)
1587 struct btf *btf = env->btf;
1589 if (btf->types_size == btf->nr_types) {
1590 /* Expand 'types' array */
1592 struct btf_type **new_types;
1593 u32 expand_by, new_size;
1595 if (btf->start_id + btf->types_size == BTF_MAX_TYPE) {
1596 btf_verifier_log(env, "Exceeded max num of types");
1600 expand_by = max_t(u32, btf->types_size >> 2, 16);
1601 new_size = min_t(u32, BTF_MAX_TYPE,
1602 btf->types_size + expand_by);
1604 new_types = kvcalloc(new_size, sizeof(*new_types),
1605 GFP_KERNEL | __GFP_NOWARN);
1609 if (btf->nr_types == 0) {
1610 if (!btf->base_btf) {
1611 /* lazily init VOID type */
1612 new_types[0] = &btf_void;
1616 memcpy(new_types, btf->types,
1617 sizeof(*btf->types) * btf->nr_types);
1621 btf->types = new_types;
1622 btf->types_size = new_size;
1625 btf->types[btf->nr_types++] = t;
1630 static int btf_alloc_id(struct btf *btf)
1634 idr_preload(GFP_KERNEL);
1635 spin_lock_bh(&btf_idr_lock);
1636 id = idr_alloc_cyclic(&btf_idr, btf, 1, INT_MAX, GFP_ATOMIC);
1639 spin_unlock_bh(&btf_idr_lock);
1642 if (WARN_ON_ONCE(!id))
1645 return id > 0 ? 0 : id;
1648 static void btf_free_id(struct btf *btf)
1650 unsigned long flags;
1653 * In map-in-map, calling map_delete_elem() on outer
1654 * map will call bpf_map_put on the inner map.
1655 * It will then eventually call btf_free_id()
1656 * on the inner map. Some of the map_delete_elem()
1657 * implementation may have irq disabled, so
1658 * we need to use the _irqsave() version instead
1659 * of the _bh() version.
1661 spin_lock_irqsave(&btf_idr_lock, flags);
1662 idr_remove(&btf_idr, btf->id);
1663 spin_unlock_irqrestore(&btf_idr_lock, flags);
1666 static void btf_free_kfunc_set_tab(struct btf *btf)
1668 struct btf_kfunc_set_tab *tab = btf->kfunc_set_tab;
1673 for (hook = 0; hook < ARRAY_SIZE(tab->sets); hook++)
1674 kfree(tab->sets[hook]);
1676 btf->kfunc_set_tab = NULL;
1679 static void btf_free_dtor_kfunc_tab(struct btf *btf)
1681 struct btf_id_dtor_kfunc_tab *tab = btf->dtor_kfunc_tab;
1686 btf->dtor_kfunc_tab = NULL;
1689 static void btf_struct_metas_free(struct btf_struct_metas *tab)
1695 for (i = 0; i < tab->cnt; i++)
1696 btf_record_free(tab->types[i].record);
1700 static void btf_free_struct_meta_tab(struct btf *btf)
1702 struct btf_struct_metas *tab = btf->struct_meta_tab;
1704 btf_struct_metas_free(tab);
1705 btf->struct_meta_tab = NULL;
1708 static void btf_free_struct_ops_tab(struct btf *btf)
1710 struct btf_struct_ops_tab *tab = btf->struct_ops_tab;
1716 for (i = 0; i < tab->cnt; i++)
1717 bpf_struct_ops_desc_release(&tab->ops[i]);
1720 btf->struct_ops_tab = NULL;
1723 static void btf_free(struct btf *btf)
1725 btf_free_struct_meta_tab(btf);
1726 btf_free_dtor_kfunc_tab(btf);
1727 btf_free_kfunc_set_tab(btf);
1728 btf_free_struct_ops_tab(btf);
1730 kvfree(btf->resolved_sizes);
1731 kvfree(btf->resolved_ids);
1732 /* vmlinux does not allocate btf->data, it simply points it at
1735 if (!btf_is_vmlinux(btf))
1737 kvfree(btf->base_id_map);
1741 static void btf_free_rcu(struct rcu_head *rcu)
1743 struct btf *btf = container_of(rcu, struct btf, rcu);
1748 const char *btf_get_name(const struct btf *btf)
1753 void btf_get(struct btf *btf)
1755 refcount_inc(&btf->refcnt);
1758 void btf_put(struct btf *btf)
1760 if (btf && refcount_dec_and_test(&btf->refcnt)) {
1762 call_rcu(&btf->rcu, btf_free_rcu);
1766 struct btf *btf_base_btf(const struct btf *btf)
1768 return btf->base_btf;
1771 const struct btf_header *btf_header(const struct btf *btf)
1776 void btf_set_base_btf(struct btf *btf, const struct btf *base_btf)
1778 btf->base_btf = (struct btf *)base_btf;
1779 btf->start_id = btf_nr_types(base_btf);
1780 btf->start_str_off = base_btf->hdr.str_len;
1783 static int env_resolve_init(struct btf_verifier_env *env)
1785 struct btf *btf = env->btf;
1786 u32 nr_types = btf->nr_types;
1787 u32 *resolved_sizes = NULL;
1788 u32 *resolved_ids = NULL;
1789 u8 *visit_states = NULL;
1791 resolved_sizes = kvcalloc(nr_types, sizeof(*resolved_sizes),
1792 GFP_KERNEL | __GFP_NOWARN);
1793 if (!resolved_sizes)
1796 resolved_ids = kvcalloc(nr_types, sizeof(*resolved_ids),
1797 GFP_KERNEL | __GFP_NOWARN);
1801 visit_states = kvcalloc(nr_types, sizeof(*visit_states),
1802 GFP_KERNEL | __GFP_NOWARN);
1806 btf->resolved_sizes = resolved_sizes;
1807 btf->resolved_ids = resolved_ids;
1808 env->visit_states = visit_states;
1813 kvfree(resolved_sizes);
1814 kvfree(resolved_ids);
1815 kvfree(visit_states);
1819 static void btf_verifier_env_free(struct btf_verifier_env *env)
1821 kvfree(env->visit_states);
1825 static bool env_type_is_resolve_sink(const struct btf_verifier_env *env,
1826 const struct btf_type *next_type)
1828 switch (env->resolve_mode) {
1830 /* int, enum or void is a sink */
1831 return !btf_type_needs_resolve(next_type);
1833 /* int, enum, void, struct, array, func or func_proto is a sink
1836 return !btf_type_is_modifier(next_type) &&
1837 !btf_type_is_ptr(next_type);
1838 case RESOLVE_STRUCT_OR_ARRAY:
1839 /* int, enum, void, ptr, func or func_proto is a sink
1840 * for struct and array
1842 return !btf_type_is_modifier(next_type) &&
1843 !btf_type_is_array(next_type) &&
1844 !btf_type_is_struct(next_type);
1850 static bool env_type_is_resolved(const struct btf_verifier_env *env,
1853 /* base BTF types should be resolved by now */
1854 if (type_id < env->btf->start_id)
1857 return env->visit_states[type_id - env->btf->start_id] == RESOLVED;
1860 static int env_stack_push(struct btf_verifier_env *env,
1861 const struct btf_type *t, u32 type_id)
1863 const struct btf *btf = env->btf;
1864 struct resolve_vertex *v;
1866 if (env->top_stack == MAX_RESOLVE_DEPTH)
1869 if (type_id < btf->start_id
1870 || env->visit_states[type_id - btf->start_id] != NOT_VISITED)
1873 env->visit_states[type_id - btf->start_id] = VISITED;
1875 v = &env->stack[env->top_stack++];
1877 v->type_id = type_id;
1880 if (env->resolve_mode == RESOLVE_TBD) {
1881 if (btf_type_is_ptr(t))
1882 env->resolve_mode = RESOLVE_PTR;
1883 else if (btf_type_is_struct(t) || btf_type_is_array(t))
1884 env->resolve_mode = RESOLVE_STRUCT_OR_ARRAY;
1890 static void env_stack_set_next_member(struct btf_verifier_env *env,
1893 env->stack[env->top_stack - 1].next_member = next_member;
1896 static void env_stack_pop_resolved(struct btf_verifier_env *env,
1897 u32 resolved_type_id,
1900 u32 type_id = env->stack[--(env->top_stack)].type_id;
1901 struct btf *btf = env->btf;
1903 type_id -= btf->start_id; /* adjust to local type id */
1904 btf->resolved_sizes[type_id] = resolved_size;
1905 btf->resolved_ids[type_id] = resolved_type_id;
1906 env->visit_states[type_id] = RESOLVED;
1909 static const struct resolve_vertex *env_stack_peak(struct btf_verifier_env *env)
1911 return env->top_stack ? &env->stack[env->top_stack - 1] : NULL;
1914 /* Resolve the size of a passed-in "type"
1916 * type: is an array (e.g. u32 array[x][y])
1917 * return type: type "u32[x][y]", i.e. BTF_KIND_ARRAY,
1918 * *type_size: (x * y * sizeof(u32)). Hence, *type_size always
1919 * corresponds to the return type.
1921 * *elem_id: id of u32
1922 * *total_nelems: (x * y). Hence, individual elem size is
1923 * (*type_size / *total_nelems)
1924 * *type_id: id of type if it's changed within the function, 0 if not
1926 * type: is not an array (e.g. const struct X)
1927 * return type: type "struct X"
1928 * *type_size: sizeof(struct X)
1929 * *elem_type: same as return type ("struct X")
1932 * *type_id: id of type if it's changed within the function, 0 if not
1934 static const struct btf_type *
1935 __btf_resolve_size(const struct btf *btf, const struct btf_type *type,
1936 u32 *type_size, const struct btf_type **elem_type,
1937 u32 *elem_id, u32 *total_nelems, u32 *type_id)
1939 const struct btf_type *array_type = NULL;
1940 const struct btf_array *array = NULL;
1941 u32 i, size, nelems = 1, id = 0;
1943 for (i = 0; i < MAX_RESOLVE_DEPTH; i++) {
1944 switch (BTF_INFO_KIND(type->info)) {
1945 /* type->size can be used */
1947 case BTF_KIND_STRUCT:
1948 case BTF_KIND_UNION:
1950 case BTF_KIND_FLOAT:
1951 case BTF_KIND_ENUM64:
1956 size = sizeof(void *);
1960 case BTF_KIND_TYPEDEF:
1961 case BTF_KIND_VOLATILE:
1962 case BTF_KIND_CONST:
1963 case BTF_KIND_RESTRICT:
1964 case BTF_KIND_TYPE_TAG:
1966 type = btf_type_by_id(btf, type->type);
1969 case BTF_KIND_ARRAY:
1972 array = btf_type_array(type);
1973 if (nelems && array->nelems > U32_MAX / nelems)
1974 return ERR_PTR(-EINVAL);
1975 nelems *= array->nelems;
1976 type = btf_type_by_id(btf, array->type);
1979 /* type without size */
1981 return ERR_PTR(-EINVAL);
1985 return ERR_PTR(-EINVAL);
1988 if (nelems && size > U32_MAX / nelems)
1989 return ERR_PTR(-EINVAL);
1991 *type_size = nelems * size;
1993 *total_nelems = nelems;
1997 *elem_id = array ? array->type : 0;
2001 return array_type ? : type;
2004 const struct btf_type *
2005 btf_resolve_size(const struct btf *btf, const struct btf_type *type,
2008 return __btf_resolve_size(btf, type, type_size, NULL, NULL, NULL, NULL);
2011 static u32 btf_resolved_type_id(const struct btf *btf, u32 type_id)
2013 while (type_id < btf->start_id)
2014 btf = btf->base_btf;
2016 return btf->resolved_ids[type_id - btf->start_id];
2019 /* The input param "type_id" must point to a needs_resolve type */
2020 static const struct btf_type *btf_type_id_resolve(const struct btf *btf,
2023 *type_id = btf_resolved_type_id(btf, *type_id);
2024 return btf_type_by_id(btf, *type_id);
2027 static u32 btf_resolved_type_size(const struct btf *btf, u32 type_id)
2029 while (type_id < btf->start_id)
2030 btf = btf->base_btf;
2032 return btf->resolved_sizes[type_id - btf->start_id];
2035 const struct btf_type *btf_type_id_size(const struct btf *btf,
2036 u32 *type_id, u32 *ret_size)
2038 const struct btf_type *size_type;
2039 u32 size_type_id = *type_id;
2042 size_type = btf_type_by_id(btf, size_type_id);
2043 if (btf_type_nosize_or_null(size_type))
2046 if (btf_type_has_size(size_type)) {
2047 size = size_type->size;
2048 } else if (btf_type_is_array(size_type)) {
2049 size = btf_resolved_type_size(btf, size_type_id);
2050 } else if (btf_type_is_ptr(size_type)) {
2051 size = sizeof(void *);
2053 if (WARN_ON_ONCE(!btf_type_is_modifier(size_type) &&
2054 !btf_type_is_var(size_type)))
2057 size_type_id = btf_resolved_type_id(btf, size_type_id);
2058 size_type = btf_type_by_id(btf, size_type_id);
2059 if (btf_type_nosize_or_null(size_type))
2061 else if (btf_type_has_size(size_type))
2062 size = size_type->size;
2063 else if (btf_type_is_array(size_type))
2064 size = btf_resolved_type_size(btf, size_type_id);
2065 else if (btf_type_is_ptr(size_type))
2066 size = sizeof(void *);
2071 *type_id = size_type_id;
2078 static int btf_df_check_member(struct btf_verifier_env *env,
2079 const struct btf_type *struct_type,
2080 const struct btf_member *member,
2081 const struct btf_type *member_type)
2083 btf_verifier_log_basic(env, struct_type,
2084 "Unsupported check_member");
2088 static int btf_df_check_kflag_member(struct btf_verifier_env *env,
2089 const struct btf_type *struct_type,
2090 const struct btf_member *member,
2091 const struct btf_type *member_type)
2093 btf_verifier_log_basic(env, struct_type,
2094 "Unsupported check_kflag_member");
2098 /* Used for ptr, array struct/union and float type members.
2099 * int, enum and modifier types have their specific callback functions.
2101 static int btf_generic_check_kflag_member(struct btf_verifier_env *env,
2102 const struct btf_type *struct_type,
2103 const struct btf_member *member,
2104 const struct btf_type *member_type)
2106 if (BTF_MEMBER_BITFIELD_SIZE(member->offset)) {
2107 btf_verifier_log_member(env, struct_type, member,
2108 "Invalid member bitfield_size");
2112 /* bitfield size is 0, so member->offset represents bit offset only.
2113 * It is safe to call non kflag check_member variants.
2115 return btf_type_ops(member_type)->check_member(env, struct_type,
2120 static int btf_df_resolve(struct btf_verifier_env *env,
2121 const struct resolve_vertex *v)
2123 btf_verifier_log_basic(env, v->t, "Unsupported resolve");
2127 static void btf_df_show(const struct btf *btf, const struct btf_type *t,
2128 u32 type_id, void *data, u8 bits_offsets,
2129 struct btf_show *show)
2131 btf_show(show, "<unsupported kind:%u>", BTF_INFO_KIND(t->info));
2134 static int btf_int_check_member(struct btf_verifier_env *env,
2135 const struct btf_type *struct_type,
2136 const struct btf_member *member,
2137 const struct btf_type *member_type)
2139 u32 int_data = btf_type_int(member_type);
2140 u32 struct_bits_off = member->offset;
2141 u32 struct_size = struct_type->size;
2145 if (U32_MAX - struct_bits_off < BTF_INT_OFFSET(int_data)) {
2146 btf_verifier_log_member(env, struct_type, member,
2147 "bits_offset exceeds U32_MAX");
2151 struct_bits_off += BTF_INT_OFFSET(int_data);
2152 bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off);
2153 nr_copy_bits = BTF_INT_BITS(int_data) +
2154 BITS_PER_BYTE_MASKED(struct_bits_off);
2156 if (nr_copy_bits > BITS_PER_U128) {
2157 btf_verifier_log_member(env, struct_type, member,
2158 "nr_copy_bits exceeds 128");
2162 if (struct_size < bytes_offset ||
2163 struct_size - bytes_offset < BITS_ROUNDUP_BYTES(nr_copy_bits)) {
2164 btf_verifier_log_member(env, struct_type, member,
2165 "Member exceeds struct_size");
2172 static int btf_int_check_kflag_member(struct btf_verifier_env *env,
2173 const struct btf_type *struct_type,
2174 const struct btf_member *member,
2175 const struct btf_type *member_type)
2177 u32 struct_bits_off, nr_bits, nr_int_data_bits, bytes_offset;
2178 u32 int_data = btf_type_int(member_type);
2179 u32 struct_size = struct_type->size;
2182 /* a regular int type is required for the kflag int member */
2183 if (!btf_type_int_is_regular(member_type)) {
2184 btf_verifier_log_member(env, struct_type, member,
2185 "Invalid member base type");
2189 /* check sanity of bitfield size */
2190 nr_bits = BTF_MEMBER_BITFIELD_SIZE(member->offset);
2191 struct_bits_off = BTF_MEMBER_BIT_OFFSET(member->offset);
2192 nr_int_data_bits = BTF_INT_BITS(int_data);
2194 /* Not a bitfield member, member offset must be at byte
2197 if (BITS_PER_BYTE_MASKED(struct_bits_off)) {
2198 btf_verifier_log_member(env, struct_type, member,
2199 "Invalid member offset");
2203 nr_bits = nr_int_data_bits;
2204 } else if (nr_bits > nr_int_data_bits) {
2205 btf_verifier_log_member(env, struct_type, member,
2206 "Invalid member bitfield_size");
2210 bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off);
2211 nr_copy_bits = nr_bits + BITS_PER_BYTE_MASKED(struct_bits_off);
2212 if (nr_copy_bits > BITS_PER_U128) {
2213 btf_verifier_log_member(env, struct_type, member,
2214 "nr_copy_bits exceeds 128");
2218 if (struct_size < bytes_offset ||
2219 struct_size - bytes_offset < BITS_ROUNDUP_BYTES(nr_copy_bits)) {
2220 btf_verifier_log_member(env, struct_type, member,
2221 "Member exceeds struct_size");
2228 static s32 btf_int_check_meta(struct btf_verifier_env *env,
2229 const struct btf_type *t,
2232 u32 int_data, nr_bits, meta_needed = sizeof(int_data);
2235 if (meta_left < meta_needed) {
2236 btf_verifier_log_basic(env, t,
2237 "meta_left:%u meta_needed:%u",
2238 meta_left, meta_needed);
2242 if (btf_type_vlen(t)) {
2243 btf_verifier_log_type(env, t, "vlen != 0");
2247 if (btf_type_kflag(t)) {
2248 btf_verifier_log_type(env, t, "Invalid btf_info kind_flag");
2252 int_data = btf_type_int(t);
2253 if (int_data & ~BTF_INT_MASK) {
2254 btf_verifier_log_basic(env, t, "Invalid int_data:%x",
2259 nr_bits = BTF_INT_BITS(int_data) + BTF_INT_OFFSET(int_data);
2261 if (nr_bits > BITS_PER_U128) {
2262 btf_verifier_log_type(env, t, "nr_bits exceeds %zu",
2267 if (BITS_ROUNDUP_BYTES(nr_bits) > t->size) {
2268 btf_verifier_log_type(env, t, "nr_bits exceeds type_size");
2273 * Only one of the encoding bits is allowed and it
2274 * should be sufficient for the pretty print purpose (i.e. decoding).
2275 * Multiple bits can be allowed later if it is found
2276 * to be insufficient.
2278 encoding = BTF_INT_ENCODING(int_data);
2280 encoding != BTF_INT_SIGNED &&
2281 encoding != BTF_INT_CHAR &&
2282 encoding != BTF_INT_BOOL) {
2283 btf_verifier_log_type(env, t, "Unsupported encoding");
2287 btf_verifier_log_type(env, t, NULL);
2292 static void btf_int_log(struct btf_verifier_env *env,
2293 const struct btf_type *t)
2295 int int_data = btf_type_int(t);
2297 btf_verifier_log(env,
2298 "size=%u bits_offset=%u nr_bits=%u encoding=%s",
2299 t->size, BTF_INT_OFFSET(int_data),
2300 BTF_INT_BITS(int_data),
2301 btf_int_encoding_str(BTF_INT_ENCODING(int_data)));
2304 static void btf_int128_print(struct btf_show *show, void *data)
2306 /* data points to a __int128 number.
2308 * int128_num = *(__int128 *)data;
2309 * The below formulas shows what upper_num and lower_num represents:
2310 * upper_num = int128_num >> 64;
2311 * lower_num = int128_num & 0xffffffffFFFFFFFFULL;
2313 u64 upper_num, lower_num;
2315 #ifdef __BIG_ENDIAN_BITFIELD
2316 upper_num = *(u64 *)data;
2317 lower_num = *(u64 *)(data + 8);
2319 upper_num = *(u64 *)(data + 8);
2320 lower_num = *(u64 *)data;
2323 btf_show_type_value(show, "0x%llx", lower_num);
2325 btf_show_type_values(show, "0x%llx%016llx", upper_num,
2329 static void btf_int128_shift(u64 *print_num, u16 left_shift_bits,
2330 u16 right_shift_bits)
2332 u64 upper_num, lower_num;
2334 #ifdef __BIG_ENDIAN_BITFIELD
2335 upper_num = print_num[0];
2336 lower_num = print_num[1];
2338 upper_num = print_num[1];
2339 lower_num = print_num[0];
2342 /* shake out un-needed bits by shift/or operations */
2343 if (left_shift_bits >= 64) {
2344 upper_num = lower_num << (left_shift_bits - 64);
2347 upper_num = (upper_num << left_shift_bits) |
2348 (lower_num >> (64 - left_shift_bits));
2349 lower_num = lower_num << left_shift_bits;
2352 if (right_shift_bits >= 64) {
2353 lower_num = upper_num >> (right_shift_bits - 64);
2356 lower_num = (lower_num >> right_shift_bits) |
2357 (upper_num << (64 - right_shift_bits));
2358 upper_num = upper_num >> right_shift_bits;
2361 #ifdef __BIG_ENDIAN_BITFIELD
2362 print_num[0] = upper_num;
2363 print_num[1] = lower_num;
2365 print_num[0] = lower_num;
2366 print_num[1] = upper_num;
2370 static void btf_bitfield_show(void *data, u8 bits_offset,
2371 u8 nr_bits, struct btf_show *show)
2373 u16 left_shift_bits, right_shift_bits;
2376 u64 print_num[2] = {};
2378 nr_copy_bits = nr_bits + bits_offset;
2379 nr_copy_bytes = BITS_ROUNDUP_BYTES(nr_copy_bits);
2381 memcpy(print_num, data, nr_copy_bytes);
2383 #ifdef __BIG_ENDIAN_BITFIELD
2384 left_shift_bits = bits_offset;
2386 left_shift_bits = BITS_PER_U128 - nr_copy_bits;
2388 right_shift_bits = BITS_PER_U128 - nr_bits;
2390 btf_int128_shift(print_num, left_shift_bits, right_shift_bits);
2391 btf_int128_print(show, print_num);
2395 static void btf_int_bits_show(const struct btf *btf,
2396 const struct btf_type *t,
2397 void *data, u8 bits_offset,
2398 struct btf_show *show)
2400 u32 int_data = btf_type_int(t);
2401 u8 nr_bits = BTF_INT_BITS(int_data);
2402 u8 total_bits_offset;
2405 * bits_offset is at most 7.
2406 * BTF_INT_OFFSET() cannot exceed 128 bits.
2408 total_bits_offset = bits_offset + BTF_INT_OFFSET(int_data);
2409 data += BITS_ROUNDDOWN_BYTES(total_bits_offset);
2410 bits_offset = BITS_PER_BYTE_MASKED(total_bits_offset);
2411 btf_bitfield_show(data, bits_offset, nr_bits, show);
2414 static void btf_int_show(const struct btf *btf, const struct btf_type *t,
2415 u32 type_id, void *data, u8 bits_offset,
2416 struct btf_show *show)
2418 u32 int_data = btf_type_int(t);
2419 u8 encoding = BTF_INT_ENCODING(int_data);
2420 bool sign = encoding & BTF_INT_SIGNED;
2421 u8 nr_bits = BTF_INT_BITS(int_data);
2424 safe_data = btf_show_start_type(show, t, type_id, data);
2428 if (bits_offset || BTF_INT_OFFSET(int_data) ||
2429 BITS_PER_BYTE_MASKED(nr_bits)) {
2430 btf_int_bits_show(btf, t, safe_data, bits_offset, show);
2436 btf_int128_print(show, safe_data);
2440 btf_show_type_value(show, "%lld", *(s64 *)safe_data);
2442 btf_show_type_value(show, "%llu", *(u64 *)safe_data);
2446 btf_show_type_value(show, "%d", *(s32 *)safe_data);
2448 btf_show_type_value(show, "%u", *(u32 *)safe_data);
2452 btf_show_type_value(show, "%d", *(s16 *)safe_data);
2454 btf_show_type_value(show, "%u", *(u16 *)safe_data);
2457 if (show->state.array_encoding == BTF_INT_CHAR) {
2458 /* check for null terminator */
2459 if (show->state.array_terminated)
2461 if (*(char *)data == '\0') {
2462 show->state.array_terminated = 1;
2465 if (isprint(*(char *)data)) {
2466 btf_show_type_value(show, "'%c'",
2467 *(char *)safe_data);
2472 btf_show_type_value(show, "%d", *(s8 *)safe_data);
2474 btf_show_type_value(show, "%u", *(u8 *)safe_data);
2477 btf_int_bits_show(btf, t, safe_data, bits_offset, show);
2481 btf_show_end_type(show);
2484 static const struct btf_kind_operations int_ops = {
2485 .check_meta = btf_int_check_meta,
2486 .resolve = btf_df_resolve,
2487 .check_member = btf_int_check_member,
2488 .check_kflag_member = btf_int_check_kflag_member,
2489 .log_details = btf_int_log,
2490 .show = btf_int_show,
2493 static int btf_modifier_check_member(struct btf_verifier_env *env,
2494 const struct btf_type *struct_type,
2495 const struct btf_member *member,
2496 const struct btf_type *member_type)
2498 const struct btf_type *resolved_type;
2499 u32 resolved_type_id = member->type;
2500 struct btf_member resolved_member;
2501 struct btf *btf = env->btf;
2503 resolved_type = btf_type_id_size(btf, &resolved_type_id, NULL);
2504 if (!resolved_type) {
2505 btf_verifier_log_member(env, struct_type, member,
2510 resolved_member = *member;
2511 resolved_member.type = resolved_type_id;
2513 return btf_type_ops(resolved_type)->check_member(env, struct_type,
2518 static int btf_modifier_check_kflag_member(struct btf_verifier_env *env,
2519 const struct btf_type *struct_type,
2520 const struct btf_member *member,
2521 const struct btf_type *member_type)
2523 const struct btf_type *resolved_type;
2524 u32 resolved_type_id = member->type;
2525 struct btf_member resolved_member;
2526 struct btf *btf = env->btf;
2528 resolved_type = btf_type_id_size(btf, &resolved_type_id, NULL);
2529 if (!resolved_type) {
2530 btf_verifier_log_member(env, struct_type, member,
2535 resolved_member = *member;
2536 resolved_member.type = resolved_type_id;
2538 return btf_type_ops(resolved_type)->check_kflag_member(env, struct_type,
2543 static int btf_ptr_check_member(struct btf_verifier_env *env,
2544 const struct btf_type *struct_type,
2545 const struct btf_member *member,
2546 const struct btf_type *member_type)
2548 u32 struct_size, struct_bits_off, bytes_offset;
2550 struct_size = struct_type->size;
2551 struct_bits_off = member->offset;
2552 bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off);
2554 if (BITS_PER_BYTE_MASKED(struct_bits_off)) {
2555 btf_verifier_log_member(env, struct_type, member,
2556 "Member is not byte aligned");
2560 if (struct_size - bytes_offset < sizeof(void *)) {
2561 btf_verifier_log_member(env, struct_type, member,
2562 "Member exceeds struct_size");
2569 static int btf_ref_type_check_meta(struct btf_verifier_env *env,
2570 const struct btf_type *t,
2575 if (btf_type_vlen(t)) {
2576 btf_verifier_log_type(env, t, "vlen != 0");
2580 if (btf_type_kflag(t) && !btf_type_is_type_tag(t)) {
2581 btf_verifier_log_type(env, t, "Invalid btf_info kind_flag");
2585 if (!BTF_TYPE_ID_VALID(t->type)) {
2586 btf_verifier_log_type(env, t, "Invalid type_id");
2590 /* typedef/type_tag type must have a valid name, and other ref types,
2591 * volatile, const, restrict, should have a null name.
2593 if (BTF_INFO_KIND(t->info) == BTF_KIND_TYPEDEF) {
2595 !btf_name_valid_identifier(env->btf, t->name_off)) {
2596 btf_verifier_log_type(env, t, "Invalid name");
2599 } else if (BTF_INFO_KIND(t->info) == BTF_KIND_TYPE_TAG) {
2600 value = btf_name_by_offset(env->btf, t->name_off);
2601 if (!value || !value[0]) {
2602 btf_verifier_log_type(env, t, "Invalid name");
2607 btf_verifier_log_type(env, t, "Invalid name");
2612 btf_verifier_log_type(env, t, NULL);
2617 static int btf_modifier_resolve(struct btf_verifier_env *env,
2618 const struct resolve_vertex *v)
2620 const struct btf_type *t = v->t;
2621 const struct btf_type *next_type;
2622 u32 next_type_id = t->type;
2623 struct btf *btf = env->btf;
2625 next_type = btf_type_by_id(btf, next_type_id);
2626 if (!next_type || btf_type_is_resolve_source_only(next_type)) {
2627 btf_verifier_log_type(env, v->t, "Invalid type_id");
2631 if (!env_type_is_resolve_sink(env, next_type) &&
2632 !env_type_is_resolved(env, next_type_id))
2633 return env_stack_push(env, next_type, next_type_id);
2635 /* Figure out the resolved next_type_id with size.
2636 * They will be stored in the current modifier's
2637 * resolved_ids and resolved_sizes such that it can
2638 * save us a few type-following when we use it later (e.g. in
2641 if (!btf_type_id_size(btf, &next_type_id, NULL)) {
2642 if (env_type_is_resolved(env, next_type_id))
2643 next_type = btf_type_id_resolve(btf, &next_type_id);
2645 /* "typedef void new_void", "const void"...etc */
2646 if (!btf_type_is_void(next_type) &&
2647 !btf_type_is_fwd(next_type) &&
2648 !btf_type_is_func_proto(next_type)) {
2649 btf_verifier_log_type(env, v->t, "Invalid type_id");
2654 env_stack_pop_resolved(env, next_type_id, 0);
2659 static int btf_var_resolve(struct btf_verifier_env *env,
2660 const struct resolve_vertex *v)
2662 const struct btf_type *next_type;
2663 const struct btf_type *t = v->t;
2664 u32 next_type_id = t->type;
2665 struct btf *btf = env->btf;
2667 next_type = btf_type_by_id(btf, next_type_id);
2668 if (!next_type || btf_type_is_resolve_source_only(next_type)) {
2669 btf_verifier_log_type(env, v->t, "Invalid type_id");
2673 if (!env_type_is_resolve_sink(env, next_type) &&
2674 !env_type_is_resolved(env, next_type_id))
2675 return env_stack_push(env, next_type, next_type_id);
2677 if (btf_type_is_modifier(next_type)) {
2678 const struct btf_type *resolved_type;
2679 u32 resolved_type_id;
2681 resolved_type_id = next_type_id;
2682 resolved_type = btf_type_id_resolve(btf, &resolved_type_id);
2684 if (btf_type_is_ptr(resolved_type) &&
2685 !env_type_is_resolve_sink(env, resolved_type) &&
2686 !env_type_is_resolved(env, resolved_type_id))
2687 return env_stack_push(env, resolved_type,
2691 /* We must resolve to something concrete at this point, no
2692 * forward types or similar that would resolve to size of
2695 if (!btf_type_id_size(btf, &next_type_id, NULL)) {
2696 btf_verifier_log_type(env, v->t, "Invalid type_id");
2700 env_stack_pop_resolved(env, next_type_id, 0);
2705 static int btf_ptr_resolve(struct btf_verifier_env *env,
2706 const struct resolve_vertex *v)
2708 const struct btf_type *next_type;
2709 const struct btf_type *t = v->t;
2710 u32 next_type_id = t->type;
2711 struct btf *btf = env->btf;
2713 next_type = btf_type_by_id(btf, next_type_id);
2714 if (!next_type || btf_type_is_resolve_source_only(next_type)) {
2715 btf_verifier_log_type(env, v->t, "Invalid type_id");
2719 if (!env_type_is_resolve_sink(env, next_type) &&
2720 !env_type_is_resolved(env, next_type_id))
2721 return env_stack_push(env, next_type, next_type_id);
2723 /* If the modifier was RESOLVED during RESOLVE_STRUCT_OR_ARRAY,
2724 * the modifier may have stopped resolving when it was resolved
2725 * to a ptr (last-resolved-ptr).
2727 * We now need to continue from the last-resolved-ptr to
2728 * ensure the last-resolved-ptr will not referring back to
2729 * the current ptr (t).
2731 if (btf_type_is_modifier(next_type)) {
2732 const struct btf_type *resolved_type;
2733 u32 resolved_type_id;
2735 resolved_type_id = next_type_id;
2736 resolved_type = btf_type_id_resolve(btf, &resolved_type_id);
2738 if (btf_type_is_ptr(resolved_type) &&
2739 !env_type_is_resolve_sink(env, resolved_type) &&
2740 !env_type_is_resolved(env, resolved_type_id))
2741 return env_stack_push(env, resolved_type,
2745 if (!btf_type_id_size(btf, &next_type_id, NULL)) {
2746 if (env_type_is_resolved(env, next_type_id))
2747 next_type = btf_type_id_resolve(btf, &next_type_id);
2749 if (!btf_type_is_void(next_type) &&
2750 !btf_type_is_fwd(next_type) &&
2751 !btf_type_is_func_proto(next_type)) {
2752 btf_verifier_log_type(env, v->t, "Invalid type_id");
2757 env_stack_pop_resolved(env, next_type_id, 0);
2762 static void btf_modifier_show(const struct btf *btf,
2763 const struct btf_type *t,
2764 u32 type_id, void *data,
2765 u8 bits_offset, struct btf_show *show)
2767 if (btf->resolved_ids)
2768 t = btf_type_id_resolve(btf, &type_id);
2770 t = btf_type_skip_modifiers(btf, type_id, NULL);
2772 btf_type_ops(t)->show(btf, t, type_id, data, bits_offset, show);
2775 static void btf_var_show(const struct btf *btf, const struct btf_type *t,
2776 u32 type_id, void *data, u8 bits_offset,
2777 struct btf_show *show)
2779 t = btf_type_id_resolve(btf, &type_id);
2781 btf_type_ops(t)->show(btf, t, type_id, data, bits_offset, show);
2784 static void btf_ptr_show(const struct btf *btf, const struct btf_type *t,
2785 u32 type_id, void *data, u8 bits_offset,
2786 struct btf_show *show)
2790 safe_data = btf_show_start_type(show, t, type_id, data);
2794 /* It is a hashed value unless BTF_SHOW_PTR_RAW is specified */
2795 if (show->flags & BTF_SHOW_PTR_RAW)
2796 btf_show_type_value(show, "0x%px", *(void **)safe_data);
2798 btf_show_type_value(show, "0x%p", *(void **)safe_data);
2799 btf_show_end_type(show);
2802 static void btf_ref_type_log(struct btf_verifier_env *env,
2803 const struct btf_type *t)
2805 btf_verifier_log(env, "type_id=%u", t->type);
2808 static const struct btf_kind_operations modifier_ops = {
2809 .check_meta = btf_ref_type_check_meta,
2810 .resolve = btf_modifier_resolve,
2811 .check_member = btf_modifier_check_member,
2812 .check_kflag_member = btf_modifier_check_kflag_member,
2813 .log_details = btf_ref_type_log,
2814 .show = btf_modifier_show,
2817 static const struct btf_kind_operations ptr_ops = {
2818 .check_meta = btf_ref_type_check_meta,
2819 .resolve = btf_ptr_resolve,
2820 .check_member = btf_ptr_check_member,
2821 .check_kflag_member = btf_generic_check_kflag_member,
2822 .log_details = btf_ref_type_log,
2823 .show = btf_ptr_show,
2826 static s32 btf_fwd_check_meta(struct btf_verifier_env *env,
2827 const struct btf_type *t,
2830 if (btf_type_vlen(t)) {
2831 btf_verifier_log_type(env, t, "vlen != 0");
2836 btf_verifier_log_type(env, t, "type != 0");
2840 /* fwd type must have a valid name */
2842 !btf_name_valid_identifier(env->btf, t->name_off)) {
2843 btf_verifier_log_type(env, t, "Invalid name");
2847 btf_verifier_log_type(env, t, NULL);
2852 static void btf_fwd_type_log(struct btf_verifier_env *env,
2853 const struct btf_type *t)
2855 btf_verifier_log(env, "%s", btf_type_kflag(t) ? "union" : "struct");
2858 static const struct btf_kind_operations fwd_ops = {
2859 .check_meta = btf_fwd_check_meta,
2860 .resolve = btf_df_resolve,
2861 .check_member = btf_df_check_member,
2862 .check_kflag_member = btf_df_check_kflag_member,
2863 .log_details = btf_fwd_type_log,
2864 .show = btf_df_show,
2867 static int btf_array_check_member(struct btf_verifier_env *env,
2868 const struct btf_type *struct_type,
2869 const struct btf_member *member,
2870 const struct btf_type *member_type)
2872 u32 struct_bits_off = member->offset;
2873 u32 struct_size, bytes_offset;
2874 u32 array_type_id, array_size;
2875 struct btf *btf = env->btf;
2877 if (BITS_PER_BYTE_MASKED(struct_bits_off)) {
2878 btf_verifier_log_member(env, struct_type, member,
2879 "Member is not byte aligned");
2883 array_type_id = member->type;
2884 btf_type_id_size(btf, &array_type_id, &array_size);
2885 struct_size = struct_type->size;
2886 bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off);
2887 if (struct_size - bytes_offset < array_size) {
2888 btf_verifier_log_member(env, struct_type, member,
2889 "Member exceeds struct_size");
2896 static s32 btf_array_check_meta(struct btf_verifier_env *env,
2897 const struct btf_type *t,
2900 const struct btf_array *array = btf_type_array(t);
2901 u32 meta_needed = sizeof(*array);
2903 if (meta_left < meta_needed) {
2904 btf_verifier_log_basic(env, t,
2905 "meta_left:%u meta_needed:%u",
2906 meta_left, meta_needed);
2910 /* array type should not have a name */
2912 btf_verifier_log_type(env, t, "Invalid name");
2916 if (btf_type_vlen(t)) {
2917 btf_verifier_log_type(env, t, "vlen != 0");
2921 if (btf_type_kflag(t)) {
2922 btf_verifier_log_type(env, t, "Invalid btf_info kind_flag");
2927 btf_verifier_log_type(env, t, "size != 0");
2931 /* Array elem type and index type cannot be in type void,
2932 * so !array->type and !array->index_type are not allowed.
2934 if (!array->type || !BTF_TYPE_ID_VALID(array->type)) {
2935 btf_verifier_log_type(env, t, "Invalid elem");
2939 if (!array->index_type || !BTF_TYPE_ID_VALID(array->index_type)) {
2940 btf_verifier_log_type(env, t, "Invalid index");
2944 btf_verifier_log_type(env, t, NULL);
2949 static int btf_array_resolve(struct btf_verifier_env *env,
2950 const struct resolve_vertex *v)
2952 const struct btf_array *array = btf_type_array(v->t);
2953 const struct btf_type *elem_type, *index_type;
2954 u32 elem_type_id, index_type_id;
2955 struct btf *btf = env->btf;
2958 /* Check array->index_type */
2959 index_type_id = array->index_type;
2960 index_type = btf_type_by_id(btf, index_type_id);
2961 if (btf_type_nosize_or_null(index_type) ||
2962 btf_type_is_resolve_source_only(index_type)) {
2963 btf_verifier_log_type(env, v->t, "Invalid index");
2967 if (!env_type_is_resolve_sink(env, index_type) &&
2968 !env_type_is_resolved(env, index_type_id))
2969 return env_stack_push(env, index_type, index_type_id);
2971 index_type = btf_type_id_size(btf, &index_type_id, NULL);
2972 if (!index_type || !btf_type_is_int(index_type) ||
2973 !btf_type_int_is_regular(index_type)) {
2974 btf_verifier_log_type(env, v->t, "Invalid index");
2978 /* Check array->type */
2979 elem_type_id = array->type;
2980 elem_type = btf_type_by_id(btf, elem_type_id);
2981 if (btf_type_nosize_or_null(elem_type) ||
2982 btf_type_is_resolve_source_only(elem_type)) {
2983 btf_verifier_log_type(env, v->t,
2988 if (!env_type_is_resolve_sink(env, elem_type) &&
2989 !env_type_is_resolved(env, elem_type_id))
2990 return env_stack_push(env, elem_type, elem_type_id);
2992 elem_type = btf_type_id_size(btf, &elem_type_id, &elem_size);
2994 btf_verifier_log_type(env, v->t, "Invalid elem");
2998 if (btf_type_is_int(elem_type) && !btf_type_int_is_regular(elem_type)) {
2999 btf_verifier_log_type(env, v->t, "Invalid array of int");
3003 if (array->nelems && elem_size > U32_MAX / array->nelems) {
3004 btf_verifier_log_type(env, v->t,
3005 "Array size overflows U32_MAX");
3009 env_stack_pop_resolved(env, elem_type_id, elem_size * array->nelems);
3014 static void btf_array_log(struct btf_verifier_env *env,
3015 const struct btf_type *t)
3017 const struct btf_array *array = btf_type_array(t);
3019 btf_verifier_log(env, "type_id=%u index_type_id=%u nr_elems=%u",
3020 array->type, array->index_type, array->nelems);
3023 static void __btf_array_show(const struct btf *btf, const struct btf_type *t,
3024 u32 type_id, void *data, u8 bits_offset,
3025 struct btf_show *show)
3027 const struct btf_array *array = btf_type_array(t);
3028 const struct btf_kind_operations *elem_ops;
3029 const struct btf_type *elem_type;
3030 u32 i, elem_size = 0, elem_type_id;
3033 elem_type_id = array->type;
3034 elem_type = btf_type_skip_modifiers(btf, elem_type_id, NULL);
3035 if (elem_type && btf_type_has_size(elem_type))
3036 elem_size = elem_type->size;
3038 if (elem_type && btf_type_is_int(elem_type)) {
3039 u32 int_type = btf_type_int(elem_type);
3041 encoding = BTF_INT_ENCODING(int_type);
3044 * BTF_INT_CHAR encoding never seems to be set for
3045 * char arrays, so if size is 1 and element is
3046 * printable as a char, we'll do that.
3049 encoding = BTF_INT_CHAR;
3052 if (!btf_show_start_array_type(show, t, type_id, encoding, data))
3057 elem_ops = btf_type_ops(elem_type);
3059 for (i = 0; i < array->nelems; i++) {
3061 btf_show_start_array_member(show);
3063 elem_ops->show(btf, elem_type, elem_type_id, data,
3067 btf_show_end_array_member(show);
3069 if (show->state.array_terminated)
3073 btf_show_end_array_type(show);
3076 static void btf_array_show(const struct btf *btf, const struct btf_type *t,
3077 u32 type_id, void *data, u8 bits_offset,
3078 struct btf_show *show)
3080 const struct btf_member *m = show->state.member;
3083 * First check if any members would be shown (are non-zero).
3084 * See comments above "struct btf_show" definition for more
3085 * details on how this works at a high-level.
3087 if (show->state.depth > 0 && !(show->flags & BTF_SHOW_ZERO)) {
3088 if (!show->state.depth_check) {
3089 show->state.depth_check = show->state.depth + 1;
3090 show->state.depth_to_show = 0;
3092 __btf_array_show(btf, t, type_id, data, bits_offset, show);
3093 show->state.member = m;
3095 if (show->state.depth_check != show->state.depth + 1)
3097 show->state.depth_check = 0;
3099 if (show->state.depth_to_show <= show->state.depth)
3102 * Reaching here indicates we have recursed and found
3103 * non-zero array member(s).
3106 __btf_array_show(btf, t, type_id, data, bits_offset, show);
3109 static const struct btf_kind_operations array_ops = {
3110 .check_meta = btf_array_check_meta,
3111 .resolve = btf_array_resolve,
3112 .check_member = btf_array_check_member,
3113 .check_kflag_member = btf_generic_check_kflag_member,
3114 .log_details = btf_array_log,
3115 .show = btf_array_show,
3118 static int btf_struct_check_member(struct btf_verifier_env *env,
3119 const struct btf_type *struct_type,
3120 const struct btf_member *member,
3121 const struct btf_type *member_type)
3123 u32 struct_bits_off = member->offset;
3124 u32 struct_size, bytes_offset;
3126 if (BITS_PER_BYTE_MASKED(struct_bits_off)) {
3127 btf_verifier_log_member(env, struct_type, member,
3128 "Member is not byte aligned");
3132 struct_size = struct_type->size;
3133 bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off);
3134 if (struct_size - bytes_offset < member_type->size) {
3135 btf_verifier_log_member(env, struct_type, member,
3136 "Member exceeds struct_size");
3143 static s32 btf_struct_check_meta(struct btf_verifier_env *env,
3144 const struct btf_type *t,
3147 bool is_union = BTF_INFO_KIND(t->info) == BTF_KIND_UNION;
3148 const struct btf_member *member;
3149 u32 meta_needed, last_offset;
3150 struct btf *btf = env->btf;
3151 u32 struct_size = t->size;
3155 meta_needed = btf_type_vlen(t) * sizeof(*member);
3156 if (meta_left < meta_needed) {
3157 btf_verifier_log_basic(env, t,
3158 "meta_left:%u meta_needed:%u",
3159 meta_left, meta_needed);
3163 /* struct type either no name or a valid one */
3165 !btf_name_valid_identifier(env->btf, t->name_off)) {
3166 btf_verifier_log_type(env, t, "Invalid name");
3170 btf_verifier_log_type(env, t, NULL);
3173 for_each_member(i, t, member) {
3174 if (!btf_name_offset_valid(btf, member->name_off)) {
3175 btf_verifier_log_member(env, t, member,
3176 "Invalid member name_offset:%u",
3181 /* struct member either no name or a valid one */
3182 if (member->name_off &&
3183 !btf_name_valid_identifier(btf, member->name_off)) {
3184 btf_verifier_log_member(env, t, member, "Invalid name");
3187 /* A member cannot be in type void */
3188 if (!member->type || !BTF_TYPE_ID_VALID(member->type)) {
3189 btf_verifier_log_member(env, t, member,
3194 offset = __btf_member_bit_offset(t, member);
3195 if (is_union && offset) {
3196 btf_verifier_log_member(env, t, member,
3197 "Invalid member bits_offset");
3202 * ">" instead of ">=" because the last member could be
3205 if (last_offset > offset) {
3206 btf_verifier_log_member(env, t, member,
3207 "Invalid member bits_offset");
3211 if (BITS_ROUNDUP_BYTES(offset) > struct_size) {
3212 btf_verifier_log_member(env, t, member,
3213 "Member bits_offset exceeds its struct size");
3217 btf_verifier_log_member(env, t, member, NULL);
3218 last_offset = offset;
3224 static int btf_struct_resolve(struct btf_verifier_env *env,
3225 const struct resolve_vertex *v)
3227 const struct btf_member *member;
3231 /* Before continue resolving the next_member,
3232 * ensure the last member is indeed resolved to a
3233 * type with size info.
3235 if (v->next_member) {
3236 const struct btf_type *last_member_type;
3237 const struct btf_member *last_member;
3238 u32 last_member_type_id;
3240 last_member = btf_type_member(v->t) + v->next_member - 1;
3241 last_member_type_id = last_member->type;
3242 if (WARN_ON_ONCE(!env_type_is_resolved(env,
3243 last_member_type_id)))
3246 last_member_type = btf_type_by_id(env->btf,
3247 last_member_type_id);
3248 if (btf_type_kflag(v->t))
3249 err = btf_type_ops(last_member_type)->check_kflag_member(env, v->t,
3253 err = btf_type_ops(last_member_type)->check_member(env, v->t,
3260 for_each_member_from(i, v->next_member, v->t, member) {
3261 u32 member_type_id = member->type;
3262 const struct btf_type *member_type = btf_type_by_id(env->btf,
3265 if (btf_type_nosize_or_null(member_type) ||
3266 btf_type_is_resolve_source_only(member_type)) {
3267 btf_verifier_log_member(env, v->t, member,
3272 if (!env_type_is_resolve_sink(env, member_type) &&
3273 !env_type_is_resolved(env, member_type_id)) {
3274 env_stack_set_next_member(env, i + 1);
3275 return env_stack_push(env, member_type, member_type_id);
3278 if (btf_type_kflag(v->t))
3279 err = btf_type_ops(member_type)->check_kflag_member(env, v->t,
3283 err = btf_type_ops(member_type)->check_member(env, v->t,
3290 env_stack_pop_resolved(env, 0, 0);
3295 static void btf_struct_log(struct btf_verifier_env *env,
3296 const struct btf_type *t)
3298 btf_verifier_log(env, "size=%u vlen=%u", t->size, btf_type_vlen(t));
3302 BTF_FIELD_IGNORE = 0,
3303 BTF_FIELD_FOUND = 1,
3306 struct btf_field_info {
3307 enum btf_field_type type;
3314 const char *node_name;
3320 static int btf_find_struct(const struct btf *btf, const struct btf_type *t,
3321 u32 off, int sz, enum btf_field_type field_type,
3322 struct btf_field_info *info)
3324 if (!__btf_type_is_struct(t))
3325 return BTF_FIELD_IGNORE;
3327 return BTF_FIELD_IGNORE;
3328 info->type = field_type;
3330 return BTF_FIELD_FOUND;
3333 static int btf_find_kptr(const struct btf *btf, const struct btf_type *t,
3334 u32 off, int sz, struct btf_field_info *info, u32 field_mask)
3336 enum btf_field_type type;
3337 const char *tag_value;
3341 /* Permit modifiers on the pointer itself */
3342 if (btf_type_is_volatile(t))
3343 t = btf_type_by_id(btf, t->type);
3344 /* For PTR, sz is always == 8 */
3345 if (!btf_type_is_ptr(t))
3346 return BTF_FIELD_IGNORE;
3347 t = btf_type_by_id(btf, t->type);
3348 is_type_tag = btf_type_is_type_tag(t) && !btf_type_kflag(t);
3350 return BTF_FIELD_IGNORE;
3351 /* Reject extra tags */
3352 if (btf_type_is_type_tag(btf_type_by_id(btf, t->type)))
3354 tag_value = __btf_name_by_offset(btf, t->name_off);
3355 if (!strcmp("kptr_untrusted", tag_value))
3356 type = BPF_KPTR_UNREF;
3357 else if (!strcmp("kptr", tag_value))
3358 type = BPF_KPTR_REF;
3359 else if (!strcmp("percpu_kptr", tag_value))
3360 type = BPF_KPTR_PERCPU;
3361 else if (!strcmp("uptr", tag_value))
3366 if (!(type & field_mask))
3367 return BTF_FIELD_IGNORE;
3369 /* Get the base type */
3370 t = btf_type_skip_modifiers(btf, t->type, &res_id);
3371 /* Only pointer to struct is allowed */
3372 if (!__btf_type_is_struct(t))
3377 info->kptr.type_id = res_id;
3378 return BTF_FIELD_FOUND;
3381 int btf_find_next_decl_tag(const struct btf *btf, const struct btf_type *pt,
3382 int comp_idx, const char *tag_key, int last_id)
3384 int len = strlen(tag_key);
3387 for (i = last_id + 1, n = btf_nr_types(btf); i < n; i++) {
3388 const struct btf_type *t = btf_type_by_id(btf, i);
3390 if (!btf_type_is_decl_tag(t))
3392 if (pt != btf_type_by_id(btf, t->type))
3394 if (btf_type_decl_tag(t)->component_idx != comp_idx)
3396 if (strncmp(__btf_name_by_offset(btf, t->name_off), tag_key, len))
3403 const char *btf_find_decl_tag_value(const struct btf *btf, const struct btf_type *pt,
3404 int comp_idx, const char *tag_key)
3406 const char *value = NULL;
3407 const struct btf_type *t;
3410 id = btf_find_next_decl_tag(btf, pt, comp_idx, tag_key, 0);
3414 t = btf_type_by_id(btf, id);
3415 len = strlen(tag_key);
3416 value = __btf_name_by_offset(btf, t->name_off) + len;
3418 /* Prevent duplicate entries for same type */
3419 id = btf_find_next_decl_tag(btf, pt, comp_idx, tag_key, id);
3421 return ERR_PTR(-EEXIST);
3427 btf_find_graph_root(const struct btf *btf, const struct btf_type *pt,
3428 const struct btf_type *t, int comp_idx, u32 off,
3429 int sz, struct btf_field_info *info,
3430 enum btf_field_type head_type)
3432 const char *node_field_name;
3433 const char *value_type;
3436 if (!__btf_type_is_struct(t))
3437 return BTF_FIELD_IGNORE;
3439 return BTF_FIELD_IGNORE;
3440 value_type = btf_find_decl_tag_value(btf, pt, comp_idx, "contains:");
3441 if (IS_ERR(value_type))
3443 node_field_name = strstr(value_type, ":");
3444 if (!node_field_name)
3446 value_type = kstrndup(value_type, node_field_name - value_type, GFP_KERNEL | __GFP_NOWARN);
3449 id = btf_find_by_name_kind(btf, value_type, BTF_KIND_STRUCT);
3454 if (str_is_empty(node_field_name))
3456 info->type = head_type;
3458 info->graph_root.value_btf_id = id;
3459 info->graph_root.node_name = node_field_name;
3460 return BTF_FIELD_FOUND;
3463 #define field_mask_test_name(field_type, field_type_str) \
3464 if (field_mask & field_type && !strcmp(name, field_type_str)) { \
3465 type = field_type; \
3469 static int btf_get_field_type(const struct btf *btf, const struct btf_type *var_type,
3470 u32 field_mask, u32 *seen_mask,
3471 int *align, int *sz)
3474 const char *name = __btf_name_by_offset(btf, var_type->name_off);
3476 if (field_mask & BPF_SPIN_LOCK) {
3477 if (!strcmp(name, "bpf_spin_lock")) {
3478 if (*seen_mask & BPF_SPIN_LOCK)
3480 *seen_mask |= BPF_SPIN_LOCK;
3481 type = BPF_SPIN_LOCK;
3485 if (field_mask & BPF_RES_SPIN_LOCK) {
3486 if (!strcmp(name, "bpf_res_spin_lock")) {
3487 if (*seen_mask & BPF_RES_SPIN_LOCK)
3489 *seen_mask |= BPF_RES_SPIN_LOCK;
3490 type = BPF_RES_SPIN_LOCK;
3494 if (field_mask & BPF_TIMER) {
3495 if (!strcmp(name, "bpf_timer")) {
3496 if (*seen_mask & BPF_TIMER)
3498 *seen_mask |= BPF_TIMER;
3503 if (field_mask & BPF_WORKQUEUE) {
3504 if (!strcmp(name, "bpf_wq")) {
3505 if (*seen_mask & BPF_WORKQUEUE)
3507 *seen_mask |= BPF_WORKQUEUE;
3508 type = BPF_WORKQUEUE;
3512 field_mask_test_name(BPF_LIST_HEAD, "bpf_list_head");
3513 field_mask_test_name(BPF_LIST_NODE, "bpf_list_node");
3514 field_mask_test_name(BPF_RB_ROOT, "bpf_rb_root");
3515 field_mask_test_name(BPF_RB_NODE, "bpf_rb_node");
3516 field_mask_test_name(BPF_REFCOUNT, "bpf_refcount");
3518 /* Only return BPF_KPTR when all other types with matchable names fail */
3519 if (field_mask & (BPF_KPTR | BPF_UPTR) && !__btf_type_is_struct(var_type)) {
3520 type = BPF_KPTR_REF;
3525 *sz = btf_field_type_size(type);
3526 *align = btf_field_type_align(type);
3530 #undef field_mask_test_name
3532 /* Repeat a number of fields for a specified number of times.
3534 * Copy the fields starting from the first field and repeat them for
3535 * repeat_cnt times. The fields are repeated by adding the offset of each
3537 * (i + 1) * elem_size
3538 * where i is the repeat index and elem_size is the size of an element.
3540 static int btf_repeat_fields(struct btf_field_info *info, int info_cnt,
3541 u32 field_cnt, u32 repeat_cnt, u32 elem_size)
3546 /* Ensure not repeating fields that should not be repeated. */
3547 for (i = 0; i < field_cnt; i++) {
3548 switch (info[i].type) {
3549 case BPF_KPTR_UNREF:
3551 case BPF_KPTR_PERCPU:
3561 /* The type of struct size or variable size is u32,
3562 * so the multiplication will not overflow.
3564 if (field_cnt * (repeat_cnt + 1) > info_cnt)
3568 for (i = 0; i < repeat_cnt; i++) {
3569 memcpy(&info[cur], &info[0], field_cnt * sizeof(info[0]));
3570 for (j = 0; j < field_cnt; j++)
3571 info[cur++].off += (i + 1) * elem_size;
3577 static int btf_find_struct_field(const struct btf *btf,
3578 const struct btf_type *t, u32 field_mask,
3579 struct btf_field_info *info, int info_cnt,
3582 /* Find special fields in the struct type of a field.
3584 * This function is used to find fields of special types that is not a
3585 * global variable or a direct field of a struct type. It also handles the
3586 * repetition if it is the element type of an array.
3588 static int btf_find_nested_struct(const struct btf *btf, const struct btf_type *t,
3589 u32 off, u32 nelems,
3590 u32 field_mask, struct btf_field_info *info,
3591 int info_cnt, u32 level)
3596 if (level >= MAX_RESOLVE_DEPTH)
3599 ret = btf_find_struct_field(btf, t, field_mask, info, info_cnt, level);
3604 /* Shift the offsets of the nested struct fields to the offsets
3605 * related to the container.
3607 for (i = 0; i < ret; i++)
3611 err = btf_repeat_fields(info, info_cnt, ret, nelems - 1, t->size);
3621 static int btf_find_field_one(const struct btf *btf,
3622 const struct btf_type *var,
3623 const struct btf_type *var_type,
3625 u32 off, u32 expected_size,
3626 u32 field_mask, u32 *seen_mask,
3627 struct btf_field_info *info, int info_cnt,
3630 int ret, align, sz, field_type;
3631 struct btf_field_info tmp;
3632 const struct btf_array *array;
3635 /* Walk into array types to find the element type and the number of
3636 * elements in the (flattened) array.
3638 for (i = 0; i < MAX_RESOLVE_DEPTH && btf_type_is_array(var_type); i++) {
3639 array = btf_array(var_type);
3640 nelems *= array->nelems;
3641 var_type = btf_type_by_id(btf, array->type);
3643 if (i == MAX_RESOLVE_DEPTH)
3648 field_type = btf_get_field_type(btf, var_type,
3649 field_mask, seen_mask, &align, &sz);
3650 /* Look into variables of struct types */
3651 if (!field_type && __btf_type_is_struct(var_type)) {
3652 sz = var_type->size;
3653 if (expected_size && expected_size != sz * nelems)
3655 ret = btf_find_nested_struct(btf, var_type, off, nelems, field_mask,
3656 &info[0], info_cnt, level);
3660 if (field_type == 0)
3665 if (expected_size && expected_size != sz * nelems)
3670 switch (field_type) {
3672 case BPF_RES_SPIN_LOCK:
3678 ret = btf_find_struct(btf, var_type, off, sz, field_type,
3679 info_cnt ? &info[0] : &tmp);
3683 case BPF_KPTR_UNREF:
3685 case BPF_KPTR_PERCPU:
3687 ret = btf_find_kptr(btf, var_type, off, sz,
3688 info_cnt ? &info[0] : &tmp, field_mask);
3694 ret = btf_find_graph_root(btf, var, var_type,
3696 info_cnt ? &info[0] : &tmp,
3705 if (ret == BTF_FIELD_IGNORE)
3710 ret = btf_repeat_fields(info, info_cnt, 1, nelems - 1, sz);
3717 static int btf_find_struct_field(const struct btf *btf,
3718 const struct btf_type *t, u32 field_mask,
3719 struct btf_field_info *info, int info_cnt,
3723 const struct btf_member *member;
3724 u32 i, off, seen_mask = 0;
3726 for_each_member(i, t, member) {
3727 const struct btf_type *member_type = btf_type_by_id(btf,
3730 off = __btf_member_bit_offset(t, member);
3732 /* valid C code cannot generate such BTF */
3736 ret = btf_find_field_one(btf, t, member_type, i,
3738 field_mask, &seen_mask,
3739 &info[idx], info_cnt - idx, level);
3747 static int btf_find_datasec_var(const struct btf *btf, const struct btf_type *t,
3748 u32 field_mask, struct btf_field_info *info,
3749 int info_cnt, u32 level)
3752 const struct btf_var_secinfo *vsi;
3753 u32 i, off, seen_mask = 0;
3755 for_each_vsi(i, t, vsi) {
3756 const struct btf_type *var = btf_type_by_id(btf, vsi->type);
3757 const struct btf_type *var_type = btf_type_by_id(btf, var->type);
3760 ret = btf_find_field_one(btf, var, var_type, -1, off, vsi->size,
3761 field_mask, &seen_mask,
3762 &info[idx], info_cnt - idx,
3771 static int btf_find_field(const struct btf *btf, const struct btf_type *t,
3772 u32 field_mask, struct btf_field_info *info,
3775 if (__btf_type_is_struct(t))
3776 return btf_find_struct_field(btf, t, field_mask, info, info_cnt, 0);
3777 else if (btf_type_is_datasec(t))
3778 return btf_find_datasec_var(btf, t, field_mask, info, info_cnt, 0);
3782 /* Callers have to ensure the life cycle of btf if it is program BTF */
3783 static int btf_parse_kptr(const struct btf *btf, struct btf_field *field,
3784 struct btf_field_info *info)
3786 struct module *mod = NULL;
3787 const struct btf_type *t;
3788 /* If a matching btf type is found in kernel or module BTFs, kptr_ref
3789 * is that BTF, otherwise it's program BTF
3791 struct btf *kptr_btf;
3795 /* Find type in map BTF, and use it to look up the matching type
3796 * in vmlinux or module BTFs, by name and kind.
3798 t = btf_type_by_id(btf, info->kptr.type_id);
3799 id = bpf_find_btf_id(__btf_name_by_offset(btf, t->name_off), BTF_INFO_KIND(t->info),
3801 if (id == -ENOENT) {
3802 /* btf_parse_kptr should only be called w/ btf = program BTF */
3803 WARN_ON_ONCE(btf_is_kernel(btf));
3805 /* Type exists only in program BTF. Assume that it's a MEM_ALLOC
3806 * kptr allocated via bpf_obj_new
3808 field->kptr.dtor = NULL;
3809 id = info->kptr.type_id;
3810 kptr_btf = (struct btf *)btf;
3816 /* Find and stash the function pointer for the destruction function that
3817 * needs to be eventually invoked from the map free path.
3819 if (info->type == BPF_KPTR_REF) {
3820 const struct btf_type *dtor_func;
3821 const char *dtor_func_name;
3825 /* This call also serves as a whitelist of allowed objects that
3826 * can be used as a referenced pointer and be stored in a map at
3829 dtor_btf_id = btf_find_dtor_kfunc(kptr_btf, id);
3830 if (dtor_btf_id < 0) {
3835 dtor_func = btf_type_by_id(kptr_btf, dtor_btf_id);
3841 if (btf_is_module(kptr_btf)) {
3842 mod = btf_try_get_module(kptr_btf);
3849 /* We already verified dtor_func to be btf_type_is_func
3850 * in register_btf_id_dtor_kfuncs.
3852 dtor_func_name = __btf_name_by_offset(kptr_btf, dtor_func->name_off);
3853 addr = kallsyms_lookup_name(dtor_func_name);
3858 field->kptr.dtor = (void *)addr;
3862 field->kptr.btf_id = id;
3863 field->kptr.btf = kptr_btf;
3864 field->kptr.module = mod;
3873 static int btf_parse_graph_root(const struct btf *btf,
3874 struct btf_field *field,
3875 struct btf_field_info *info,
3876 const char *node_type_name,
3877 size_t node_type_align)
3879 const struct btf_type *t, *n = NULL;
3880 const struct btf_member *member;
3884 t = btf_type_by_id(btf, info->graph_root.value_btf_id);
3885 /* We've already checked that value_btf_id is a struct type. We
3886 * just need to figure out the offset of the list_node, and
3889 for_each_member(i, t, member) {
3890 if (strcmp(info->graph_root.node_name,
3891 __btf_name_by_offset(btf, member->name_off)))
3893 /* Invalid BTF, two members with same name */
3896 n = btf_type_by_id(btf, member->type);
3897 if (!__btf_type_is_struct(n))
3899 if (strcmp(node_type_name, __btf_name_by_offset(btf, n->name_off)))
3901 offset = __btf_member_bit_offset(n, member);
3905 if (offset % node_type_align)
3908 field->graph_root.btf = (struct btf *)btf;
3909 field->graph_root.value_btf_id = info->graph_root.value_btf_id;
3910 field->graph_root.node_offset = offset;
3917 static int btf_parse_list_head(const struct btf *btf, struct btf_field *field,
3918 struct btf_field_info *info)
3920 return btf_parse_graph_root(btf, field, info, "bpf_list_node",
3921 __alignof__(struct bpf_list_node));
3924 static int btf_parse_rb_root(const struct btf *btf, struct btf_field *field,
3925 struct btf_field_info *info)
3927 return btf_parse_graph_root(btf, field, info, "bpf_rb_node",
3928 __alignof__(struct bpf_rb_node));
3931 static int btf_field_cmp(const void *_a, const void *_b, const void *priv)
3933 const struct btf_field *a = (const struct btf_field *)_a;
3934 const struct btf_field *b = (const struct btf_field *)_b;
3936 if (a->offset < b->offset)
3938 else if (a->offset > b->offset)
3943 struct btf_record *btf_parse_fields(const struct btf *btf, const struct btf_type *t,
3944 u32 field_mask, u32 value_size)
3946 struct btf_field_info info_arr[BTF_FIELDS_MAX];
3947 u32 next_off = 0, field_type_size;
3948 struct btf_record *rec;
3951 ret = btf_find_field(btf, t, field_mask, info_arr, ARRAY_SIZE(info_arr));
3953 return ERR_PTR(ret);
3958 /* This needs to be kzalloc to zero out padding and unused fields, see
3959 * comment in btf_record_equal.
3961 rec = kzalloc(struct_size(rec, fields, cnt), GFP_KERNEL | __GFP_NOWARN);
3963 return ERR_PTR(-ENOMEM);
3965 rec->spin_lock_off = -EINVAL;
3966 rec->res_spin_lock_off = -EINVAL;
3967 rec->timer_off = -EINVAL;
3968 rec->wq_off = -EINVAL;
3969 rec->refcount_off = -EINVAL;
3970 for (i = 0; i < cnt; i++) {
3971 field_type_size = btf_field_type_size(info_arr[i].type);
3972 if (info_arr[i].off + field_type_size > value_size) {
3973 WARN_ONCE(1, "verifier bug off %d size %d", info_arr[i].off, value_size);
3977 if (info_arr[i].off < next_off) {
3981 next_off = info_arr[i].off + field_type_size;
3983 rec->field_mask |= info_arr[i].type;
3984 rec->fields[i].offset = info_arr[i].off;
3985 rec->fields[i].type = info_arr[i].type;
3986 rec->fields[i].size = field_type_size;
3988 switch (info_arr[i].type) {
3990 WARN_ON_ONCE(rec->spin_lock_off >= 0);
3991 /* Cache offset for faster lookup at runtime */
3992 rec->spin_lock_off = rec->fields[i].offset;
3994 case BPF_RES_SPIN_LOCK:
3995 WARN_ON_ONCE(rec->spin_lock_off >= 0);
3996 /* Cache offset for faster lookup at runtime */
3997 rec->res_spin_lock_off = rec->fields[i].offset;
4000 WARN_ON_ONCE(rec->timer_off >= 0);
4001 /* Cache offset for faster lookup at runtime */
4002 rec->timer_off = rec->fields[i].offset;
4005 WARN_ON_ONCE(rec->wq_off >= 0);
4006 /* Cache offset for faster lookup at runtime */
4007 rec->wq_off = rec->fields[i].offset;
4010 WARN_ON_ONCE(rec->refcount_off >= 0);
4011 /* Cache offset for faster lookup at runtime */
4012 rec->refcount_off = rec->fields[i].offset;
4014 case BPF_KPTR_UNREF:
4016 case BPF_KPTR_PERCPU:
4018 ret = btf_parse_kptr(btf, &rec->fields[i], &info_arr[i]);
4023 ret = btf_parse_list_head(btf, &rec->fields[i], &info_arr[i]);
4028 ret = btf_parse_rb_root(btf, &rec->fields[i], &info_arr[i]);
4042 if (rec->spin_lock_off >= 0 && rec->res_spin_lock_off >= 0) {
4047 /* bpf_{list_head, rb_node} require bpf_spin_lock */
4048 if ((btf_record_has_field(rec, BPF_LIST_HEAD) ||
4049 btf_record_has_field(rec, BPF_RB_ROOT)) &&
4050 (rec->spin_lock_off < 0 && rec->res_spin_lock_off < 0)) {
4055 if (rec->refcount_off < 0 &&
4056 btf_record_has_field(rec, BPF_LIST_NODE) &&
4057 btf_record_has_field(rec, BPF_RB_NODE)) {
4062 sort_r(rec->fields, rec->cnt, sizeof(struct btf_field), btf_field_cmp,
4067 btf_record_free(rec);
4068 return ERR_PTR(ret);
4071 int btf_check_and_fixup_fields(const struct btf *btf, struct btf_record *rec)
4075 /* There are three types that signify ownership of some other type:
4076 * kptr_ref, bpf_list_head, bpf_rb_root.
4077 * kptr_ref only supports storing kernel types, which can't store
4078 * references to program allocated local types.
4080 * Hence we only need to ensure that bpf_{list_head,rb_root} ownership
4081 * does not form cycles.
4083 if (IS_ERR_OR_NULL(rec) || !(rec->field_mask & (BPF_GRAPH_ROOT | BPF_UPTR)))
4085 for (i = 0; i < rec->cnt; i++) {
4086 struct btf_struct_meta *meta;
4087 const struct btf_type *t;
4090 if (rec->fields[i].type == BPF_UPTR) {
4091 /* The uptr only supports pinning one page and cannot
4092 * point to a kernel struct
4094 if (btf_is_kernel(rec->fields[i].kptr.btf))
4096 t = btf_type_by_id(rec->fields[i].kptr.btf,
4097 rec->fields[i].kptr.btf_id);
4100 if (t->size > PAGE_SIZE)
4105 if (!(rec->fields[i].type & BPF_GRAPH_ROOT))
4107 btf_id = rec->fields[i].graph_root.value_btf_id;
4108 meta = btf_find_struct_meta(btf, btf_id);
4111 rec->fields[i].graph_root.value_rec = meta->record;
4113 /* We need to set value_rec for all root types, but no need
4114 * to check ownership cycle for a type unless it's also a
4117 if (!(rec->field_mask & BPF_GRAPH_NODE))
4120 /* We need to ensure ownership acyclicity among all types. The
4121 * proper way to do it would be to topologically sort all BTF
4122 * IDs based on the ownership edges, since there can be multiple
4123 * bpf_{list_head,rb_node} in a type. Instead, we use the
4124 * following resaoning:
4126 * - A type can only be owned by another type in user BTF if it
4127 * has a bpf_{list,rb}_node. Let's call these node types.
4128 * - A type can only _own_ another type in user BTF if it has a
4129 * bpf_{list_head,rb_root}. Let's call these root types.
4131 * We ensure that if a type is both a root and node, its
4132 * element types cannot be root types.
4134 * To ensure acyclicity:
4136 * When A is an root type but not a node, its ownership
4140 * - A is an root, e.g. has bpf_rb_root.
4141 * - B is both a root and node, e.g. has bpf_rb_node and
4143 * - C is only an root, e.g. has bpf_list_node
4145 * When A is both a root and node, some other type already
4146 * owns it in the BTF domain, hence it can not own
4147 * another root type through any of the ownership edges.
4150 * - A is both an root and node.
4151 * - B is only an node.
4153 if (meta->record->field_mask & BPF_GRAPH_ROOT)
4159 static void __btf_struct_show(const struct btf *btf, const struct btf_type *t,
4160 u32 type_id, void *data, u8 bits_offset,
4161 struct btf_show *show)
4163 const struct btf_member *member;
4167 safe_data = btf_show_start_struct_type(show, t, type_id, data);
4171 for_each_member(i, t, member) {
4172 const struct btf_type *member_type = btf_type_by_id(btf,
4174 const struct btf_kind_operations *ops;
4175 u32 member_offset, bitfield_size;
4179 btf_show_start_member(show, member);
4181 member_offset = __btf_member_bit_offset(t, member);
4182 bitfield_size = __btf_member_bitfield_size(t, member);
4183 bytes_offset = BITS_ROUNDDOWN_BYTES(member_offset);
4184 bits8_offset = BITS_PER_BYTE_MASKED(member_offset);
4185 if (bitfield_size) {
4186 safe_data = btf_show_start_type(show, member_type,
4188 data + bytes_offset);
4190 btf_bitfield_show(safe_data,
4192 bitfield_size, show);
4193 btf_show_end_type(show);
4195 ops = btf_type_ops(member_type);
4196 ops->show(btf, member_type, member->type,
4197 data + bytes_offset, bits8_offset, show);
4200 btf_show_end_member(show);
4203 btf_show_end_struct_type(show);
4206 static void btf_struct_show(const struct btf *btf, const struct btf_type *t,
4207 u32 type_id, void *data, u8 bits_offset,
4208 struct btf_show *show)
4210 const struct btf_member *m = show->state.member;
4213 * First check if any members would be shown (are non-zero).
4214 * See comments above "struct btf_show" definition for more
4215 * details on how this works at a high-level.
4217 if (show->state.depth > 0 && !(show->flags & BTF_SHOW_ZERO)) {
4218 if (!show->state.depth_check) {
4219 show->state.depth_check = show->state.depth + 1;
4220 show->state.depth_to_show = 0;
4222 __btf_struct_show(btf, t, type_id, data, bits_offset, show);
4223 /* Restore saved member data here */
4224 show->state.member = m;
4225 if (show->state.depth_check != show->state.depth + 1)
4227 show->state.depth_check = 0;
4229 if (show->state.depth_to_show <= show->state.depth)
4232 * Reaching here indicates we have recursed and found
4233 * non-zero child values.
4237 __btf_struct_show(btf, t, type_id, data, bits_offset, show);
4240 static const struct btf_kind_operations struct_ops = {
4241 .check_meta = btf_struct_check_meta,
4242 .resolve = btf_struct_resolve,
4243 .check_member = btf_struct_check_member,
4244 .check_kflag_member = btf_generic_check_kflag_member,
4245 .log_details = btf_struct_log,
4246 .show = btf_struct_show,
4249 static int btf_enum_check_member(struct btf_verifier_env *env,
4250 const struct btf_type *struct_type,
4251 const struct btf_member *member,
4252 const struct btf_type *member_type)
4254 u32 struct_bits_off = member->offset;
4255 u32 struct_size, bytes_offset;
4257 if (BITS_PER_BYTE_MASKED(struct_bits_off)) {
4258 btf_verifier_log_member(env, struct_type, member,
4259 "Member is not byte aligned");
4263 struct_size = struct_type->size;
4264 bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off);
4265 if (struct_size - bytes_offset < member_type->size) {
4266 btf_verifier_log_member(env, struct_type, member,
4267 "Member exceeds struct_size");
4274 static int btf_enum_check_kflag_member(struct btf_verifier_env *env,
4275 const struct btf_type *struct_type,
4276 const struct btf_member *member,
4277 const struct btf_type *member_type)
4279 u32 struct_bits_off, nr_bits, bytes_end, struct_size;
4280 u32 int_bitsize = sizeof(int) * BITS_PER_BYTE;
4282 struct_bits_off = BTF_MEMBER_BIT_OFFSET(member->offset);
4283 nr_bits = BTF_MEMBER_BITFIELD_SIZE(member->offset);
4285 if (BITS_PER_BYTE_MASKED(struct_bits_off)) {
4286 btf_verifier_log_member(env, struct_type, member,
4287 "Member is not byte aligned");
4291 nr_bits = int_bitsize;
4292 } else if (nr_bits > int_bitsize) {
4293 btf_verifier_log_member(env, struct_type, member,
4294 "Invalid member bitfield_size");
4298 struct_size = struct_type->size;
4299 bytes_end = BITS_ROUNDUP_BYTES(struct_bits_off + nr_bits);
4300 if (struct_size < bytes_end) {
4301 btf_verifier_log_member(env, struct_type, member,
4302 "Member exceeds struct_size");
4309 static s32 btf_enum_check_meta(struct btf_verifier_env *env,
4310 const struct btf_type *t,
4313 const struct btf_enum *enums = btf_type_enum(t);
4314 struct btf *btf = env->btf;
4315 const char *fmt_str;
4319 nr_enums = btf_type_vlen(t);
4320 meta_needed = nr_enums * sizeof(*enums);
4322 if (meta_left < meta_needed) {
4323 btf_verifier_log_basic(env, t,
4324 "meta_left:%u meta_needed:%u",
4325 meta_left, meta_needed);
4329 if (t->size > 8 || !is_power_of_2(t->size)) {
4330 btf_verifier_log_type(env, t, "Unexpected size");
4334 /* enum type either no name or a valid one */
4336 !btf_name_valid_identifier(env->btf, t->name_off)) {
4337 btf_verifier_log_type(env, t, "Invalid name");
4341 btf_verifier_log_type(env, t, NULL);
4343 for (i = 0; i < nr_enums; i++) {
4344 if (!btf_name_offset_valid(btf, enums[i].name_off)) {
4345 btf_verifier_log(env, "\tInvalid name_offset:%u",
4350 /* enum member must have a valid name */
4351 if (!enums[i].name_off ||
4352 !btf_name_valid_identifier(btf, enums[i].name_off)) {
4353 btf_verifier_log_type(env, t, "Invalid name");
4357 if (env->log.level == BPF_LOG_KERNEL)
4359 fmt_str = btf_type_kflag(t) ? "\t%s val=%d\n" : "\t%s val=%u\n";
4360 btf_verifier_log(env, fmt_str,
4361 __btf_name_by_offset(btf, enums[i].name_off),
4368 static void btf_enum_log(struct btf_verifier_env *env,
4369 const struct btf_type *t)
4371 btf_verifier_log(env, "size=%u vlen=%u", t->size, btf_type_vlen(t));
4374 static void btf_enum_show(const struct btf *btf, const struct btf_type *t,
4375 u32 type_id, void *data, u8 bits_offset,
4376 struct btf_show *show)
4378 const struct btf_enum *enums = btf_type_enum(t);
4379 u32 i, nr_enums = btf_type_vlen(t);
4383 safe_data = btf_show_start_type(show, t, type_id, data);
4387 v = *(int *)safe_data;
4389 for (i = 0; i < nr_enums; i++) {
4390 if (v != enums[i].val)
4393 btf_show_type_value(show, "%s",
4394 __btf_name_by_offset(btf,
4395 enums[i].name_off));
4397 btf_show_end_type(show);
4401 if (btf_type_kflag(t))
4402 btf_show_type_value(show, "%d", v);
4404 btf_show_type_value(show, "%u", v);
4405 btf_show_end_type(show);
4408 static const struct btf_kind_operations enum_ops = {
4409 .check_meta = btf_enum_check_meta,
4410 .resolve = btf_df_resolve,
4411 .check_member = btf_enum_check_member,
4412 .check_kflag_member = btf_enum_check_kflag_member,
4413 .log_details = btf_enum_log,
4414 .show = btf_enum_show,
4417 static s32 btf_enum64_check_meta(struct btf_verifier_env *env,
4418 const struct btf_type *t,
4421 const struct btf_enum64 *enums = btf_type_enum64(t);
4422 struct btf *btf = env->btf;
4423 const char *fmt_str;
4427 nr_enums = btf_type_vlen(t);
4428 meta_needed = nr_enums * sizeof(*enums);
4430 if (meta_left < meta_needed) {
4431 btf_verifier_log_basic(env, t,
4432 "meta_left:%u meta_needed:%u",
4433 meta_left, meta_needed);
4437 if (t->size > 8 || !is_power_of_2(t->size)) {
4438 btf_verifier_log_type(env, t, "Unexpected size");
4442 /* enum type either no name or a valid one */
4444 !btf_name_valid_identifier(env->btf, t->name_off)) {
4445 btf_verifier_log_type(env, t, "Invalid name");
4449 btf_verifier_log_type(env, t, NULL);
4451 for (i = 0; i < nr_enums; i++) {
4452 if (!btf_name_offset_valid(btf, enums[i].name_off)) {
4453 btf_verifier_log(env, "\tInvalid name_offset:%u",
4458 /* enum member must have a valid name */
4459 if (!enums[i].name_off ||
4460 !btf_name_valid_identifier(btf, enums[i].name_off)) {
4461 btf_verifier_log_type(env, t, "Invalid name");
4465 if (env->log.level == BPF_LOG_KERNEL)
4468 fmt_str = btf_type_kflag(t) ? "\t%s val=%lld\n" : "\t%s val=%llu\n";
4469 btf_verifier_log(env, fmt_str,
4470 __btf_name_by_offset(btf, enums[i].name_off),
4471 btf_enum64_value(enums + i));
4477 static void btf_enum64_show(const struct btf *btf, const struct btf_type *t,
4478 u32 type_id, void *data, u8 bits_offset,
4479 struct btf_show *show)
4481 const struct btf_enum64 *enums = btf_type_enum64(t);
4482 u32 i, nr_enums = btf_type_vlen(t);
4486 safe_data = btf_show_start_type(show, t, type_id, data);
4490 v = *(u64 *)safe_data;
4492 for (i = 0; i < nr_enums; i++) {
4493 if (v != btf_enum64_value(enums + i))
4496 btf_show_type_value(show, "%s",
4497 __btf_name_by_offset(btf,
4498 enums[i].name_off));
4500 btf_show_end_type(show);
4504 if (btf_type_kflag(t))
4505 btf_show_type_value(show, "%lld", v);
4507 btf_show_type_value(show, "%llu", v);
4508 btf_show_end_type(show);
4511 static const struct btf_kind_operations enum64_ops = {
4512 .check_meta = btf_enum64_check_meta,
4513 .resolve = btf_df_resolve,
4514 .check_member = btf_enum_check_member,
4515 .check_kflag_member = btf_enum_check_kflag_member,
4516 .log_details = btf_enum_log,
4517 .show = btf_enum64_show,
4520 static s32 btf_func_proto_check_meta(struct btf_verifier_env *env,
4521 const struct btf_type *t,
4524 u32 meta_needed = btf_type_vlen(t) * sizeof(struct btf_param);
4526 if (meta_left < meta_needed) {
4527 btf_verifier_log_basic(env, t,
4528 "meta_left:%u meta_needed:%u",
4529 meta_left, meta_needed);
4534 btf_verifier_log_type(env, t, "Invalid name");
4538 if (btf_type_kflag(t)) {
4539 btf_verifier_log_type(env, t, "Invalid btf_info kind_flag");
4543 btf_verifier_log_type(env, t, NULL);
4548 static void btf_func_proto_log(struct btf_verifier_env *env,
4549 const struct btf_type *t)
4551 const struct btf_param *args = (const struct btf_param *)(t + 1);
4552 u16 nr_args = btf_type_vlen(t), i;
4554 btf_verifier_log(env, "return=%u args=(", t->type);
4556 btf_verifier_log(env, "void");
4560 if (nr_args == 1 && !args[0].type) {
4561 /* Only one vararg */
4562 btf_verifier_log(env, "vararg");
4566 btf_verifier_log(env, "%u %s", args[0].type,
4567 __btf_name_by_offset(env->btf,
4569 for (i = 1; i < nr_args - 1; i++)
4570 btf_verifier_log(env, ", %u %s", args[i].type,
4571 __btf_name_by_offset(env->btf,
4575 const struct btf_param *last_arg = &args[nr_args - 1];
4578 btf_verifier_log(env, ", %u %s", last_arg->type,
4579 __btf_name_by_offset(env->btf,
4580 last_arg->name_off));
4582 btf_verifier_log(env, ", vararg");
4586 btf_verifier_log(env, ")");
4589 static const struct btf_kind_operations func_proto_ops = {
4590 .check_meta = btf_func_proto_check_meta,
4591 .resolve = btf_df_resolve,
4593 * BTF_KIND_FUNC_PROTO cannot be directly referred by
4594 * a struct's member.
4596 * It should be a function pointer instead.
4597 * (i.e. struct's member -> BTF_KIND_PTR -> BTF_KIND_FUNC_PROTO)
4599 * Hence, there is no btf_func_check_member().
4601 .check_member = btf_df_check_member,
4602 .check_kflag_member = btf_df_check_kflag_member,
4603 .log_details = btf_func_proto_log,
4604 .show = btf_df_show,
4607 static s32 btf_func_check_meta(struct btf_verifier_env *env,
4608 const struct btf_type *t,
4612 !btf_name_valid_identifier(env->btf, t->name_off)) {
4613 btf_verifier_log_type(env, t, "Invalid name");
4617 if (btf_type_vlen(t) > BTF_FUNC_GLOBAL) {
4618 btf_verifier_log_type(env, t, "Invalid func linkage");
4622 if (btf_type_kflag(t)) {
4623 btf_verifier_log_type(env, t, "Invalid btf_info kind_flag");
4627 btf_verifier_log_type(env, t, NULL);
4632 static int btf_func_resolve(struct btf_verifier_env *env,
4633 const struct resolve_vertex *v)
4635 const struct btf_type *t = v->t;
4636 u32 next_type_id = t->type;
4639 err = btf_func_check(env, t);
4643 env_stack_pop_resolved(env, next_type_id, 0);
4647 static const struct btf_kind_operations func_ops = {
4648 .check_meta = btf_func_check_meta,
4649 .resolve = btf_func_resolve,
4650 .check_member = btf_df_check_member,
4651 .check_kflag_member = btf_df_check_kflag_member,
4652 .log_details = btf_ref_type_log,
4653 .show = btf_df_show,
4656 static s32 btf_var_check_meta(struct btf_verifier_env *env,
4657 const struct btf_type *t,
4660 const struct btf_var *var;
4661 u32 meta_needed = sizeof(*var);
4663 if (meta_left < meta_needed) {
4664 btf_verifier_log_basic(env, t,
4665 "meta_left:%u meta_needed:%u",
4666 meta_left, meta_needed);
4670 if (btf_type_vlen(t)) {
4671 btf_verifier_log_type(env, t, "vlen != 0");
4675 if (btf_type_kflag(t)) {
4676 btf_verifier_log_type(env, t, "Invalid btf_info kind_flag");
4681 !btf_name_valid_identifier(env->btf, t->name_off)) {
4682 btf_verifier_log_type(env, t, "Invalid name");
4686 /* A var cannot be in type void */
4687 if (!t->type || !BTF_TYPE_ID_VALID(t->type)) {
4688 btf_verifier_log_type(env, t, "Invalid type_id");
4692 var = btf_type_var(t);
4693 if (var->linkage != BTF_VAR_STATIC &&
4694 var->linkage != BTF_VAR_GLOBAL_ALLOCATED) {
4695 btf_verifier_log_type(env, t, "Linkage not supported");
4699 btf_verifier_log_type(env, t, NULL);
4704 static void btf_var_log(struct btf_verifier_env *env, const struct btf_type *t)
4706 const struct btf_var *var = btf_type_var(t);
4708 btf_verifier_log(env, "type_id=%u linkage=%u", t->type, var->linkage);
4711 static const struct btf_kind_operations var_ops = {
4712 .check_meta = btf_var_check_meta,
4713 .resolve = btf_var_resolve,
4714 .check_member = btf_df_check_member,
4715 .check_kflag_member = btf_df_check_kflag_member,
4716 .log_details = btf_var_log,
4717 .show = btf_var_show,
4720 static s32 btf_datasec_check_meta(struct btf_verifier_env *env,
4721 const struct btf_type *t,
4724 const struct btf_var_secinfo *vsi;
4725 u64 last_vsi_end_off = 0, sum = 0;
4728 meta_needed = btf_type_vlen(t) * sizeof(*vsi);
4729 if (meta_left < meta_needed) {
4730 btf_verifier_log_basic(env, t,
4731 "meta_left:%u meta_needed:%u",
4732 meta_left, meta_needed);
4737 btf_verifier_log_type(env, t, "size == 0");
4741 if (btf_type_kflag(t)) {
4742 btf_verifier_log_type(env, t, "Invalid btf_info kind_flag");
4747 !btf_name_valid_section(env->btf, t->name_off)) {
4748 btf_verifier_log_type(env, t, "Invalid name");
4752 btf_verifier_log_type(env, t, NULL);
4754 for_each_vsi(i, t, vsi) {
4755 /* A var cannot be in type void */
4756 if (!vsi->type || !BTF_TYPE_ID_VALID(vsi->type)) {
4757 btf_verifier_log_vsi(env, t, vsi,
4762 if (vsi->offset < last_vsi_end_off || vsi->offset >= t->size) {
4763 btf_verifier_log_vsi(env, t, vsi,
4768 if (!vsi->size || vsi->size > t->size) {
4769 btf_verifier_log_vsi(env, t, vsi,
4774 last_vsi_end_off = vsi->offset + vsi->size;
4775 if (last_vsi_end_off > t->size) {
4776 btf_verifier_log_vsi(env, t, vsi,
4777 "Invalid offset+size");
4781 btf_verifier_log_vsi(env, t, vsi, NULL);
4785 if (t->size < sum) {
4786 btf_verifier_log_type(env, t, "Invalid btf_info size");
4793 static int btf_datasec_resolve(struct btf_verifier_env *env,
4794 const struct resolve_vertex *v)
4796 const struct btf_var_secinfo *vsi;
4797 struct btf *btf = env->btf;
4800 env->resolve_mode = RESOLVE_TBD;
4801 for_each_vsi_from(i, v->next_member, v->t, vsi) {
4802 u32 var_type_id = vsi->type, type_id, type_size = 0;
4803 const struct btf_type *var_type = btf_type_by_id(env->btf,
4805 if (!var_type || !btf_type_is_var(var_type)) {
4806 btf_verifier_log_vsi(env, v->t, vsi,
4807 "Not a VAR kind member");
4811 if (!env_type_is_resolve_sink(env, var_type) &&
4812 !env_type_is_resolved(env, var_type_id)) {
4813 env_stack_set_next_member(env, i + 1);
4814 return env_stack_push(env, var_type, var_type_id);
4817 type_id = var_type->type;
4818 if (!btf_type_id_size(btf, &type_id, &type_size)) {
4819 btf_verifier_log_vsi(env, v->t, vsi, "Invalid type");
4823 if (vsi->size < type_size) {
4824 btf_verifier_log_vsi(env, v->t, vsi, "Invalid size");
4829 env_stack_pop_resolved(env, 0, 0);
4833 static void btf_datasec_log(struct btf_verifier_env *env,
4834 const struct btf_type *t)
4836 btf_verifier_log(env, "size=%u vlen=%u", t->size, btf_type_vlen(t));
4839 static void btf_datasec_show(const struct btf *btf,
4840 const struct btf_type *t, u32 type_id,
4841 void *data, u8 bits_offset,
4842 struct btf_show *show)
4844 const struct btf_var_secinfo *vsi;
4845 const struct btf_type *var;
4848 if (!btf_show_start_type(show, t, type_id, data))
4851 btf_show_type_value(show, "section (\"%s\") = {",
4852 __btf_name_by_offset(btf, t->name_off));
4853 for_each_vsi(i, t, vsi) {
4854 var = btf_type_by_id(btf, vsi->type);
4856 btf_show(show, ",");
4857 btf_type_ops(var)->show(btf, var, vsi->type,
4858 data + vsi->offset, bits_offset, show);
4860 btf_show_end_type(show);
4863 static const struct btf_kind_operations datasec_ops = {
4864 .check_meta = btf_datasec_check_meta,
4865 .resolve = btf_datasec_resolve,
4866 .check_member = btf_df_check_member,
4867 .check_kflag_member = btf_df_check_kflag_member,
4868 .log_details = btf_datasec_log,
4869 .show = btf_datasec_show,
4872 static s32 btf_float_check_meta(struct btf_verifier_env *env,
4873 const struct btf_type *t,
4876 if (btf_type_vlen(t)) {
4877 btf_verifier_log_type(env, t, "vlen != 0");
4881 if (btf_type_kflag(t)) {
4882 btf_verifier_log_type(env, t, "Invalid btf_info kind_flag");
4886 if (t->size != 2 && t->size != 4 && t->size != 8 && t->size != 12 &&
4888 btf_verifier_log_type(env, t, "Invalid type_size");
4892 btf_verifier_log_type(env, t, NULL);
4897 static int btf_float_check_member(struct btf_verifier_env *env,
4898 const struct btf_type *struct_type,
4899 const struct btf_member *member,
4900 const struct btf_type *member_type)
4902 u64 start_offset_bytes;
4903 u64 end_offset_bytes;
4908 /* Different architectures have different alignment requirements, so
4909 * here we check only for the reasonable minimum. This way we ensure
4910 * that types after CO-RE can pass the kernel BTF verifier.
4912 align_bytes = min_t(u64, sizeof(void *), member_type->size);
4913 align_bits = align_bytes * BITS_PER_BYTE;
4914 div64_u64_rem(member->offset, align_bits, &misalign_bits);
4915 if (misalign_bits) {
4916 btf_verifier_log_member(env, struct_type, member,
4917 "Member is not properly aligned");
4921 start_offset_bytes = member->offset / BITS_PER_BYTE;
4922 end_offset_bytes = start_offset_bytes + member_type->size;
4923 if (end_offset_bytes > struct_type->size) {
4924 btf_verifier_log_member(env, struct_type, member,
4925 "Member exceeds struct_size");
4932 static void btf_float_log(struct btf_verifier_env *env,
4933 const struct btf_type *t)
4935 btf_verifier_log(env, "size=%u", t->size);
4938 static const struct btf_kind_operations float_ops = {
4939 .check_meta = btf_float_check_meta,
4940 .resolve = btf_df_resolve,
4941 .check_member = btf_float_check_member,
4942 .check_kflag_member = btf_generic_check_kflag_member,
4943 .log_details = btf_float_log,
4944 .show = btf_df_show,
4947 static s32 btf_decl_tag_check_meta(struct btf_verifier_env *env,
4948 const struct btf_type *t,
4951 const struct btf_decl_tag *tag;
4952 u32 meta_needed = sizeof(*tag);
4956 if (meta_left < meta_needed) {
4957 btf_verifier_log_basic(env, t,
4958 "meta_left:%u meta_needed:%u",
4959 meta_left, meta_needed);
4963 value = btf_name_by_offset(env->btf, t->name_off);
4964 if (!value || !value[0]) {
4965 btf_verifier_log_type(env, t, "Invalid value");
4969 if (btf_type_vlen(t)) {
4970 btf_verifier_log_type(env, t, "vlen != 0");
4974 component_idx = btf_type_decl_tag(t)->component_idx;
4975 if (component_idx < -1) {
4976 btf_verifier_log_type(env, t, "Invalid component_idx");
4980 btf_verifier_log_type(env, t, NULL);
4985 static int btf_decl_tag_resolve(struct btf_verifier_env *env,
4986 const struct resolve_vertex *v)
4988 const struct btf_type *next_type;
4989 const struct btf_type *t = v->t;
4990 u32 next_type_id = t->type;
4991 struct btf *btf = env->btf;
4995 next_type = btf_type_by_id(btf, next_type_id);
4996 if (!next_type || !btf_type_is_decl_tag_target(next_type)) {
4997 btf_verifier_log_type(env, v->t, "Invalid type_id");
5001 if (!env_type_is_resolve_sink(env, next_type) &&
5002 !env_type_is_resolved(env, next_type_id))
5003 return env_stack_push(env, next_type, next_type_id);
5005 component_idx = btf_type_decl_tag(t)->component_idx;
5006 if (component_idx != -1) {
5007 if (btf_type_is_var(next_type) || btf_type_is_typedef(next_type)) {
5008 btf_verifier_log_type(env, v->t, "Invalid component_idx");
5012 if (btf_type_is_struct(next_type)) {
5013 vlen = btf_type_vlen(next_type);
5015 /* next_type should be a function */
5016 next_type = btf_type_by_id(btf, next_type->type);
5017 vlen = btf_type_vlen(next_type);
5020 if ((u32)component_idx >= vlen) {
5021 btf_verifier_log_type(env, v->t, "Invalid component_idx");
5026 env_stack_pop_resolved(env, next_type_id, 0);
5031 static void btf_decl_tag_log(struct btf_verifier_env *env, const struct btf_type *t)
5033 btf_verifier_log(env, "type=%u component_idx=%d", t->type,
5034 btf_type_decl_tag(t)->component_idx);
5037 static const struct btf_kind_operations decl_tag_ops = {
5038 .check_meta = btf_decl_tag_check_meta,
5039 .resolve = btf_decl_tag_resolve,
5040 .check_member = btf_df_check_member,
5041 .check_kflag_member = btf_df_check_kflag_member,
5042 .log_details = btf_decl_tag_log,
5043 .show = btf_df_show,
5046 static int btf_func_proto_check(struct btf_verifier_env *env,
5047 const struct btf_type *t)
5049 const struct btf_type *ret_type;
5050 const struct btf_param *args;
5051 const struct btf *btf;
5056 args = (const struct btf_param *)(t + 1);
5057 nr_args = btf_type_vlen(t);
5059 /* Check func return type which could be "void" (t->type == 0) */
5061 u32 ret_type_id = t->type;
5063 ret_type = btf_type_by_id(btf, ret_type_id);
5065 btf_verifier_log_type(env, t, "Invalid return type");
5069 if (btf_type_is_resolve_source_only(ret_type)) {
5070 btf_verifier_log_type(env, t, "Invalid return type");
5074 if (btf_type_needs_resolve(ret_type) &&
5075 !env_type_is_resolved(env, ret_type_id)) {
5076 err = btf_resolve(env, ret_type, ret_type_id);
5081 /* Ensure the return type is a type that has a size */
5082 if (!btf_type_id_size(btf, &ret_type_id, NULL)) {
5083 btf_verifier_log_type(env, t, "Invalid return type");
5091 /* Last func arg type_id could be 0 if it is a vararg */
5092 if (!args[nr_args - 1].type) {
5093 if (args[nr_args - 1].name_off) {
5094 btf_verifier_log_type(env, t, "Invalid arg#%u",
5101 for (i = 0; i < nr_args; i++) {
5102 const struct btf_type *arg_type;
5105 arg_type_id = args[i].type;
5106 arg_type = btf_type_by_id(btf, arg_type_id);
5108 btf_verifier_log_type(env, t, "Invalid arg#%u", i + 1);
5112 if (btf_type_is_resolve_source_only(arg_type)) {
5113 btf_verifier_log_type(env, t, "Invalid arg#%u", i + 1);
5117 if (args[i].name_off &&
5118 (!btf_name_offset_valid(btf, args[i].name_off) ||
5119 !btf_name_valid_identifier(btf, args[i].name_off))) {
5120 btf_verifier_log_type(env, t,
5121 "Invalid arg#%u", i + 1);
5125 if (btf_type_needs_resolve(arg_type) &&
5126 !env_type_is_resolved(env, arg_type_id)) {
5127 err = btf_resolve(env, arg_type, arg_type_id);
5132 if (!btf_type_id_size(btf, &arg_type_id, NULL)) {
5133 btf_verifier_log_type(env, t, "Invalid arg#%u", i + 1);
5141 static int btf_func_check(struct btf_verifier_env *env,
5142 const struct btf_type *t)
5144 const struct btf_type *proto_type;
5145 const struct btf_param *args;
5146 const struct btf *btf;
5150 proto_type = btf_type_by_id(btf, t->type);
5152 if (!proto_type || !btf_type_is_func_proto(proto_type)) {
5153 btf_verifier_log_type(env, t, "Invalid type_id");
5157 args = (const struct btf_param *)(proto_type + 1);
5158 nr_args = btf_type_vlen(proto_type);
5159 for (i = 0; i < nr_args; i++) {
5160 if (!args[i].name_off && args[i].type) {
5161 btf_verifier_log_type(env, t, "Invalid arg#%u", i + 1);
5169 static const struct btf_kind_operations * const kind_ops[NR_BTF_KINDS] = {
5170 [BTF_KIND_INT] = &int_ops,
5171 [BTF_KIND_PTR] = &ptr_ops,
5172 [BTF_KIND_ARRAY] = &array_ops,
5173 [BTF_KIND_STRUCT] = &struct_ops,
5174 [BTF_KIND_UNION] = &struct_ops,
5175 [BTF_KIND_ENUM] = &enum_ops,
5176 [BTF_KIND_FWD] = &fwd_ops,
5177 [BTF_KIND_TYPEDEF] = &modifier_ops,
5178 [BTF_KIND_VOLATILE] = &modifier_ops,
5179 [BTF_KIND_CONST] = &modifier_ops,
5180 [BTF_KIND_RESTRICT] = &modifier_ops,
5181 [BTF_KIND_FUNC] = &func_ops,
5182 [BTF_KIND_FUNC_PROTO] = &func_proto_ops,
5183 [BTF_KIND_VAR] = &var_ops,
5184 [BTF_KIND_DATASEC] = &datasec_ops,
5185 [BTF_KIND_FLOAT] = &float_ops,
5186 [BTF_KIND_DECL_TAG] = &decl_tag_ops,
5187 [BTF_KIND_TYPE_TAG] = &modifier_ops,
5188 [BTF_KIND_ENUM64] = &enum64_ops,
5191 static s32 btf_check_meta(struct btf_verifier_env *env,
5192 const struct btf_type *t,
5195 u32 saved_meta_left = meta_left;
5198 if (meta_left < sizeof(*t)) {
5199 btf_verifier_log(env, "[%u] meta_left:%u meta_needed:%zu",
5200 env->log_type_id, meta_left, sizeof(*t));
5203 meta_left -= sizeof(*t);
5205 if (t->info & ~BTF_INFO_MASK) {
5206 btf_verifier_log(env, "[%u] Invalid btf_info:%x",
5207 env->log_type_id, t->info);
5211 if (BTF_INFO_KIND(t->info) > BTF_KIND_MAX ||
5212 BTF_INFO_KIND(t->info) == BTF_KIND_UNKN) {
5213 btf_verifier_log(env, "[%u] Invalid kind:%u",
5214 env->log_type_id, BTF_INFO_KIND(t->info));
5218 if (!btf_name_offset_valid(env->btf, t->name_off)) {
5219 btf_verifier_log(env, "[%u] Invalid name_offset:%u",
5220 env->log_type_id, t->name_off);
5224 var_meta_size = btf_type_ops(t)->check_meta(env, t, meta_left);
5225 if (var_meta_size < 0)
5226 return var_meta_size;
5228 meta_left -= var_meta_size;
5230 return saved_meta_left - meta_left;
5233 static int btf_check_all_metas(struct btf_verifier_env *env)
5235 struct btf *btf = env->btf;
5236 struct btf_header *hdr;
5240 cur = btf->nohdr_data + hdr->type_off;
5241 end = cur + hdr->type_len;
5243 env->log_type_id = btf->base_btf ? btf->start_id : 1;
5245 struct btf_type *t = cur;
5248 meta_size = btf_check_meta(env, t, end - cur);
5252 btf_add_type(env, t);
5260 static bool btf_resolve_valid(struct btf_verifier_env *env,
5261 const struct btf_type *t,
5264 struct btf *btf = env->btf;
5266 if (!env_type_is_resolved(env, type_id))
5269 if (btf_type_is_struct(t) || btf_type_is_datasec(t))
5270 return !btf_resolved_type_id(btf, type_id) &&
5271 !btf_resolved_type_size(btf, type_id);
5273 if (btf_type_is_decl_tag(t) || btf_type_is_func(t))
5274 return btf_resolved_type_id(btf, type_id) &&
5275 !btf_resolved_type_size(btf, type_id);
5277 if (btf_type_is_modifier(t) || btf_type_is_ptr(t) ||
5278 btf_type_is_var(t)) {
5279 t = btf_type_id_resolve(btf, &type_id);
5281 !btf_type_is_modifier(t) &&
5282 !btf_type_is_var(t) &&
5283 !btf_type_is_datasec(t);
5286 if (btf_type_is_array(t)) {
5287 const struct btf_array *array = btf_type_array(t);
5288 const struct btf_type *elem_type;
5289 u32 elem_type_id = array->type;
5292 elem_type = btf_type_id_size(btf, &elem_type_id, &elem_size);
5293 return elem_type && !btf_type_is_modifier(elem_type) &&
5294 (array->nelems * elem_size ==
5295 btf_resolved_type_size(btf, type_id));
5301 static int btf_resolve(struct btf_verifier_env *env,
5302 const struct btf_type *t, u32 type_id)
5304 u32 save_log_type_id = env->log_type_id;
5305 const struct resolve_vertex *v;
5308 env->resolve_mode = RESOLVE_TBD;
5309 env_stack_push(env, t, type_id);
5310 while (!err && (v = env_stack_peak(env))) {
5311 env->log_type_id = v->type_id;
5312 err = btf_type_ops(v->t)->resolve(env, v);
5315 env->log_type_id = type_id;
5316 if (err == -E2BIG) {
5317 btf_verifier_log_type(env, t,
5318 "Exceeded max resolving depth:%u",
5320 } else if (err == -EEXIST) {
5321 btf_verifier_log_type(env, t, "Loop detected");
5324 /* Final sanity check */
5325 if (!err && !btf_resolve_valid(env, t, type_id)) {
5326 btf_verifier_log_type(env, t, "Invalid resolve state");
5330 env->log_type_id = save_log_type_id;
5334 static int btf_check_all_types(struct btf_verifier_env *env)
5336 struct btf *btf = env->btf;
5337 const struct btf_type *t;
5341 err = env_resolve_init(env);
5346 for (i = btf->base_btf ? 0 : 1; i < btf->nr_types; i++) {
5347 type_id = btf->start_id + i;
5348 t = btf_type_by_id(btf, type_id);
5350 env->log_type_id = type_id;
5351 if (btf_type_needs_resolve(t) &&
5352 !env_type_is_resolved(env, type_id)) {
5353 err = btf_resolve(env, t, type_id);
5358 if (btf_type_is_func_proto(t)) {
5359 err = btf_func_proto_check(env, t);
5368 static int btf_parse_type_sec(struct btf_verifier_env *env)
5370 const struct btf_header *hdr = &env->btf->hdr;
5373 /* Type section must align to 4 bytes */
5374 if (hdr->type_off & (sizeof(u32) - 1)) {
5375 btf_verifier_log(env, "Unaligned type_off");
5379 if (!env->btf->base_btf && !hdr->type_len) {
5380 btf_verifier_log(env, "No type found");
5384 err = btf_check_all_metas(env);
5388 return btf_check_all_types(env);
5391 static int btf_parse_str_sec(struct btf_verifier_env *env)
5393 const struct btf_header *hdr;
5394 struct btf *btf = env->btf;
5395 const char *start, *end;
5398 start = btf->nohdr_data + hdr->str_off;
5399 end = start + hdr->str_len;
5401 if (end != btf->data + btf->data_size) {
5402 btf_verifier_log(env, "String section is not at the end");
5406 btf->strings = start;
5408 if (btf->base_btf && !hdr->str_len)
5410 if (!hdr->str_len || hdr->str_len - 1 > BTF_MAX_NAME_OFFSET || end[-1]) {
5411 btf_verifier_log(env, "Invalid string section");
5414 if (!btf->base_btf && start[0]) {
5415 btf_verifier_log(env, "Invalid string section");
5422 static const size_t btf_sec_info_offset[] = {
5423 offsetof(struct btf_header, type_off),
5424 offsetof(struct btf_header, str_off),
5427 static int btf_sec_info_cmp(const void *a, const void *b)
5429 const struct btf_sec_info *x = a;
5430 const struct btf_sec_info *y = b;
5432 return (int)(x->off - y->off) ? : (int)(x->len - y->len);
5435 static int btf_check_sec_info(struct btf_verifier_env *env,
5438 struct btf_sec_info secs[ARRAY_SIZE(btf_sec_info_offset)];
5439 u32 total, expected_total, i;
5440 const struct btf_header *hdr;
5441 const struct btf *btf;
5446 /* Populate the secs from hdr */
5447 for (i = 0; i < ARRAY_SIZE(btf_sec_info_offset); i++)
5448 secs[i] = *(struct btf_sec_info *)((void *)hdr +
5449 btf_sec_info_offset[i]);
5451 sort(secs, ARRAY_SIZE(btf_sec_info_offset),
5452 sizeof(struct btf_sec_info), btf_sec_info_cmp, NULL);
5454 /* Check for gaps and overlap among sections */
5456 expected_total = btf_data_size - hdr->hdr_len;
5457 for (i = 0; i < ARRAY_SIZE(btf_sec_info_offset); i++) {
5458 if (expected_total < secs[i].off) {
5459 btf_verifier_log(env, "Invalid section offset");
5462 if (total < secs[i].off) {
5464 btf_verifier_log(env, "Unsupported section found");
5467 if (total > secs[i].off) {
5468 btf_verifier_log(env, "Section overlap found");
5471 if (expected_total - total < secs[i].len) {
5472 btf_verifier_log(env,
5473 "Total section length too long");
5476 total += secs[i].len;
5479 /* There is data other than hdr and known sections */
5480 if (expected_total != total) {
5481 btf_verifier_log(env, "Unsupported section found");
5488 static int btf_parse_hdr(struct btf_verifier_env *env)
5490 u32 hdr_len, hdr_copy, btf_data_size;
5491 const struct btf_header *hdr;
5495 btf_data_size = btf->data_size;
5497 if (btf_data_size < offsetofend(struct btf_header, hdr_len)) {
5498 btf_verifier_log(env, "hdr_len not found");
5503 hdr_len = hdr->hdr_len;
5504 if (btf_data_size < hdr_len) {
5505 btf_verifier_log(env, "btf_header not found");
5509 /* Ensure the unsupported header fields are zero */
5510 if (hdr_len > sizeof(btf->hdr)) {
5511 u8 *expected_zero = btf->data + sizeof(btf->hdr);
5512 u8 *end = btf->data + hdr_len;
5514 for (; expected_zero < end; expected_zero++) {
5515 if (*expected_zero) {
5516 btf_verifier_log(env, "Unsupported btf_header");
5522 hdr_copy = min_t(u32, hdr_len, sizeof(btf->hdr));
5523 memcpy(&btf->hdr, btf->data, hdr_copy);
5527 btf_verifier_log_hdr(env, btf_data_size);
5529 if (hdr->magic != BTF_MAGIC) {
5530 btf_verifier_log(env, "Invalid magic");
5534 if (hdr->version != BTF_VERSION) {
5535 btf_verifier_log(env, "Unsupported version");
5540 btf_verifier_log(env, "Unsupported flags");
5544 if (!btf->base_btf && btf_data_size == hdr->hdr_len) {
5545 btf_verifier_log(env, "No data");
5549 return btf_check_sec_info(env, btf_data_size);
5552 static const char *alloc_obj_fields[] = {
5561 static struct btf_struct_metas *
5562 btf_parse_struct_metas(struct bpf_verifier_log *log, struct btf *btf)
5564 struct btf_struct_metas *tab = NULL;
5565 struct btf_id_set *aof;
5568 BUILD_BUG_ON(offsetof(struct btf_id_set, cnt) != 0);
5569 BUILD_BUG_ON(sizeof(struct btf_id_set) != sizeof(u32));
5571 aof = kmalloc(sizeof(*aof), GFP_KERNEL | __GFP_NOWARN);
5573 return ERR_PTR(-ENOMEM);
5576 for (i = 0; i < ARRAY_SIZE(alloc_obj_fields); i++) {
5577 /* Try to find whether this special type exists in user BTF, and
5578 * if so remember its ID so we can easily find it among members
5579 * of structs that we iterate in the next loop.
5581 struct btf_id_set *new_aof;
5583 id = btf_find_by_name_kind(btf, alloc_obj_fields[i], BTF_KIND_STRUCT);
5587 new_aof = krealloc(aof, struct_size(new_aof, ids, aof->cnt + 1),
5588 GFP_KERNEL | __GFP_NOWARN);
5594 aof->ids[aof->cnt++] = id;
5597 n = btf_nr_types(btf);
5598 for (i = 1; i < n; i++) {
5599 /* Try to find if there are kptrs in user BTF and remember their ID */
5600 struct btf_id_set *new_aof;
5601 struct btf_field_info tmp;
5602 const struct btf_type *t;
5604 t = btf_type_by_id(btf, i);
5610 ret = btf_find_kptr(btf, t, 0, 0, &tmp, BPF_KPTR);
5611 if (ret != BTF_FIELD_FOUND)
5614 new_aof = krealloc(aof, struct_size(new_aof, ids, aof->cnt + 1),
5615 GFP_KERNEL | __GFP_NOWARN);
5621 aof->ids[aof->cnt++] = i;
5628 sort(&aof->ids, aof->cnt, sizeof(aof->ids[0]), btf_id_cmp_func, NULL);
5630 for (i = 1; i < n; i++) {
5631 struct btf_struct_metas *new_tab;
5632 const struct btf_member *member;
5633 struct btf_struct_meta *type;
5634 struct btf_record *record;
5635 const struct btf_type *t;
5638 t = btf_type_by_id(btf, i);
5639 if (!__btf_type_is_struct(t))
5644 for_each_member(j, t, member) {
5645 if (btf_id_set_contains(aof, member->type))
5650 tab_cnt = tab ? tab->cnt : 0;
5651 new_tab = krealloc(tab, struct_size(new_tab, types, tab_cnt + 1),
5652 GFP_KERNEL | __GFP_NOWARN);
5661 type = &tab->types[tab->cnt];
5663 record = btf_parse_fields(btf, t, BPF_SPIN_LOCK | BPF_RES_SPIN_LOCK | BPF_LIST_HEAD | BPF_LIST_NODE |
5664 BPF_RB_ROOT | BPF_RB_NODE | BPF_REFCOUNT |
5666 /* The record cannot be unset, treat it as an error if so */
5667 if (IS_ERR_OR_NULL(record)) {
5668 ret = PTR_ERR_OR_ZERO(record) ?: -EFAULT;
5671 type->record = record;
5677 btf_struct_metas_free(tab);
5680 return ERR_PTR(ret);
5683 struct btf_struct_meta *btf_find_struct_meta(const struct btf *btf, u32 btf_id)
5685 struct btf_struct_metas *tab;
5687 BUILD_BUG_ON(offsetof(struct btf_struct_meta, btf_id) != 0);
5688 tab = btf->struct_meta_tab;
5691 return bsearch(&btf_id, tab->types, tab->cnt, sizeof(tab->types[0]), btf_id_cmp_func);
5694 static int btf_check_type_tags(struct btf_verifier_env *env,
5695 struct btf *btf, int start_id)
5697 int i, n, good_id = start_id - 1;
5700 n = btf_nr_types(btf);
5701 for (i = start_id; i < n; i++) {
5702 const struct btf_type *t;
5703 int chain_limit = 32;
5706 t = btf_type_by_id(btf, i);
5709 if (!btf_type_is_modifier(t))
5714 in_tags = btf_type_is_type_tag(t);
5715 while (btf_type_is_modifier(t)) {
5716 if (!chain_limit--) {
5717 btf_verifier_log(env, "Max chain length or cycle detected");
5720 if (btf_type_is_type_tag(t)) {
5722 btf_verifier_log(env, "Type tags don't precede modifiers");
5725 } else if (in_tags) {
5728 if (cur_id <= good_id)
5730 /* Move to next type */
5732 t = btf_type_by_id(btf, cur_id);
5741 static int finalize_log(struct bpf_verifier_log *log, bpfptr_t uattr, u32 uattr_size)
5746 err = bpf_vlog_finalize(log, &log_true_size);
5748 if (uattr_size >= offsetofend(union bpf_attr, btf_log_true_size) &&
5749 copy_to_bpfptr_offset(uattr, offsetof(union bpf_attr, btf_log_true_size),
5750 &log_true_size, sizeof(log_true_size)))
5756 static struct btf *btf_parse(const union bpf_attr *attr, bpfptr_t uattr, u32 uattr_size)
5758 bpfptr_t btf_data = make_bpfptr(attr->btf, uattr.is_kernel);
5759 char __user *log_ubuf = u64_to_user_ptr(attr->btf_log_buf);
5760 struct btf_struct_metas *struct_meta_tab;
5761 struct btf_verifier_env *env = NULL;
5762 struct btf *btf = NULL;
5766 if (attr->btf_size > BTF_MAX_SIZE)
5767 return ERR_PTR(-E2BIG);
5769 env = kzalloc(sizeof(*env), GFP_KERNEL | __GFP_NOWARN);
5771 return ERR_PTR(-ENOMEM);
5773 /* user could have requested verbose verifier output
5774 * and supplied buffer to store the verification trace
5776 err = bpf_vlog_init(&env->log, attr->btf_log_level,
5777 log_ubuf, attr->btf_log_size);
5781 btf = kzalloc(sizeof(*btf), GFP_KERNEL | __GFP_NOWARN);
5788 data = kvmalloc(attr->btf_size, GFP_KERNEL | __GFP_NOWARN);
5795 btf->data_size = attr->btf_size;
5797 if (copy_from_bpfptr(data, btf_data, attr->btf_size)) {
5802 err = btf_parse_hdr(env);
5806 btf->nohdr_data = btf->data + btf->hdr.hdr_len;
5808 err = btf_parse_str_sec(env);
5812 err = btf_parse_type_sec(env);
5816 err = btf_check_type_tags(env, btf, 1);
5820 struct_meta_tab = btf_parse_struct_metas(&env->log, btf);
5821 if (IS_ERR(struct_meta_tab)) {
5822 err = PTR_ERR(struct_meta_tab);
5825 btf->struct_meta_tab = struct_meta_tab;
5827 if (struct_meta_tab) {
5830 for (i = 0; i < struct_meta_tab->cnt; i++) {
5831 err = btf_check_and_fixup_fields(btf, struct_meta_tab->types[i].record);
5837 err = finalize_log(&env->log, uattr, uattr_size);
5841 btf_verifier_env_free(env);
5842 refcount_set(&btf->refcnt, 1);
5846 btf_free_struct_meta_tab(btf);
5848 /* overwrite err with -ENOSPC or -EFAULT */
5849 ret = finalize_log(&env->log, uattr, uattr_size);
5853 btf_verifier_env_free(env);
5856 return ERR_PTR(err);
5859 extern char __start_BTF[];
5860 extern char __stop_BTF[];
5861 extern struct btf *btf_vmlinux;
5863 #define BPF_MAP_TYPE(_id, _ops)
5864 #define BPF_LINK_TYPE(_id, _name)
5866 struct bpf_ctx_convert {
5867 #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \
5868 prog_ctx_type _id##_prog; \
5869 kern_ctx_type _id##_kern;
5870 #include <linux/bpf_types.h>
5871 #undef BPF_PROG_TYPE
5873 /* 't' is written once under lock. Read many times. */
5874 const struct btf_type *t;
5877 #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \
5879 #include <linux/bpf_types.h>
5880 #undef BPF_PROG_TYPE
5881 __ctx_convert_unused, /* to avoid empty enum in extreme .config */
5883 static u8 bpf_ctx_convert_map[] = {
5884 #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \
5885 [_id] = __ctx_convert##_id,
5886 #include <linux/bpf_types.h>
5887 #undef BPF_PROG_TYPE
5888 0, /* avoid empty array */
5891 #undef BPF_LINK_TYPE
5893 static const struct btf_type *find_canonical_prog_ctx_type(enum bpf_prog_type prog_type)
5895 const struct btf_type *conv_struct;
5896 const struct btf_member *ctx_type;
5898 conv_struct = bpf_ctx_convert.t;
5901 /* prog_type is valid bpf program type. No need for bounds check. */
5902 ctx_type = btf_type_member(conv_struct) + bpf_ctx_convert_map[prog_type] * 2;
5903 /* ctx_type is a pointer to prog_ctx_type in vmlinux.
5904 * Like 'struct __sk_buff'
5906 return btf_type_by_id(btf_vmlinux, ctx_type->type);
5909 static int find_kern_ctx_type_id(enum bpf_prog_type prog_type)
5911 const struct btf_type *conv_struct;
5912 const struct btf_member *ctx_type;
5914 conv_struct = bpf_ctx_convert.t;
5917 /* prog_type is valid bpf program type. No need for bounds check. */
5918 ctx_type = btf_type_member(conv_struct) + bpf_ctx_convert_map[prog_type] * 2 + 1;
5919 /* ctx_type is a pointer to prog_ctx_type in vmlinux.
5920 * Like 'struct sk_buff'
5922 return ctx_type->type;
5925 bool btf_is_projection_of(const char *pname, const char *tname)
5927 if (strcmp(pname, "__sk_buff") == 0 && strcmp(tname, "sk_buff") == 0)
5929 if (strcmp(pname, "xdp_md") == 0 && strcmp(tname, "xdp_buff") == 0)
5934 bool btf_is_prog_ctx_type(struct bpf_verifier_log *log, const struct btf *btf,
5935 const struct btf_type *t, enum bpf_prog_type prog_type,
5938 const struct btf_type *ctx_type;
5939 const char *tname, *ctx_tname;
5941 t = btf_type_by_id(btf, t->type);
5943 /* KPROBE programs allow bpf_user_pt_regs_t typedef, which we need to
5944 * check before we skip all the typedef below.
5946 if (prog_type == BPF_PROG_TYPE_KPROBE) {
5947 while (btf_type_is_modifier(t) && !btf_type_is_typedef(t))
5948 t = btf_type_by_id(btf, t->type);
5950 if (btf_type_is_typedef(t)) {
5951 tname = btf_name_by_offset(btf, t->name_off);
5952 if (tname && strcmp(tname, "bpf_user_pt_regs_t") == 0)
5957 while (btf_type_is_modifier(t))
5958 t = btf_type_by_id(btf, t->type);
5959 if (!btf_type_is_struct(t)) {
5960 /* Only pointer to struct is supported for now.
5961 * That means that BPF_PROG_TYPE_TRACEPOINT with BTF
5962 * is not supported yet.
5963 * BPF_PROG_TYPE_RAW_TRACEPOINT is fine.
5967 tname = btf_name_by_offset(btf, t->name_off);
5969 bpf_log(log, "arg#%d struct doesn't have a name\n", arg);
5973 ctx_type = find_canonical_prog_ctx_type(prog_type);
5975 bpf_log(log, "btf_vmlinux is malformed\n");
5976 /* should not happen */
5980 ctx_tname = btf_name_by_offset(btf_vmlinux, ctx_type->name_off);
5982 /* should not happen */
5983 bpf_log(log, "Please fix kernel include/linux/bpf_types.h\n");
5986 /* program types without named context types work only with arg:ctx tag */
5987 if (ctx_tname[0] == '\0')
5989 /* only compare that prog's ctx type name is the same as
5990 * kernel expects. No need to compare field by field.
5991 * It's ok for bpf prog to do:
5992 * struct __sk_buff {};
5993 * int socket_filter_bpf_prog(struct __sk_buff *skb)
5994 * { // no fields of skb are ever used }
5996 if (btf_is_projection_of(ctx_tname, tname))
5998 if (strcmp(ctx_tname, tname)) {
5999 /* bpf_user_pt_regs_t is a typedef, so resolve it to
6000 * underlying struct and check name again
6002 if (!btf_type_is_modifier(ctx_type))
6004 while (btf_type_is_modifier(ctx_type))
6005 ctx_type = btf_type_by_id(btf_vmlinux, ctx_type->type);
6011 /* forward declarations for arch-specific underlying types of
6012 * bpf_user_pt_regs_t; this avoids the need for arch-specific #ifdef
6013 * compilation guards below for BPF_PROG_TYPE_PERF_EVENT checks, but still
6014 * works correctly with __builtin_types_compatible_p() on respective
6017 struct user_regs_struct;
6018 struct user_pt_regs;
6020 static int btf_validate_prog_ctx_type(struct bpf_verifier_log *log, const struct btf *btf,
6021 const struct btf_type *t, int arg,
6022 enum bpf_prog_type prog_type,
6023 enum bpf_attach_type attach_type)
6025 const struct btf_type *ctx_type;
6026 const char *tname, *ctx_tname;
6028 if (!btf_is_ptr(t)) {
6029 bpf_log(log, "arg#%d type isn't a pointer\n", arg);
6032 t = btf_type_by_id(btf, t->type);
6034 /* KPROBE and PERF_EVENT programs allow bpf_user_pt_regs_t typedef */
6035 if (prog_type == BPF_PROG_TYPE_KPROBE || prog_type == BPF_PROG_TYPE_PERF_EVENT) {
6036 while (btf_type_is_modifier(t) && !btf_type_is_typedef(t))
6037 t = btf_type_by_id(btf, t->type);
6039 if (btf_type_is_typedef(t)) {
6040 tname = btf_name_by_offset(btf, t->name_off);
6041 if (tname && strcmp(tname, "bpf_user_pt_regs_t") == 0)
6046 /* all other program types don't use typedefs for context type */
6047 while (btf_type_is_modifier(t))
6048 t = btf_type_by_id(btf, t->type);
6050 /* `void *ctx __arg_ctx` is always valid */
6051 if (btf_type_is_void(t))
6054 tname = btf_name_by_offset(btf, t->name_off);
6055 if (str_is_empty(tname)) {
6056 bpf_log(log, "arg#%d type doesn't have a name\n", arg);
6061 switch (prog_type) {
6062 case BPF_PROG_TYPE_KPROBE:
6063 if (__btf_type_is_struct(t) && strcmp(tname, "pt_regs") == 0)
6066 case BPF_PROG_TYPE_PERF_EVENT:
6067 if (__builtin_types_compatible_p(bpf_user_pt_regs_t, struct pt_regs) &&
6068 __btf_type_is_struct(t) && strcmp(tname, "pt_regs") == 0)
6070 if (__builtin_types_compatible_p(bpf_user_pt_regs_t, struct user_pt_regs) &&
6071 __btf_type_is_struct(t) && strcmp(tname, "user_pt_regs") == 0)
6073 if (__builtin_types_compatible_p(bpf_user_pt_regs_t, struct user_regs_struct) &&
6074 __btf_type_is_struct(t) && strcmp(tname, "user_regs_struct") == 0)
6077 case BPF_PROG_TYPE_RAW_TRACEPOINT:
6078 case BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE:
6079 /* allow u64* as ctx */
6080 if (btf_is_int(t) && t->size == 8)
6083 case BPF_PROG_TYPE_TRACING:
6084 switch (attach_type) {
6085 case BPF_TRACE_RAW_TP:
6086 /* tp_btf program is TRACING, so need special case here */
6087 if (__btf_type_is_struct(t) &&
6088 strcmp(tname, "bpf_raw_tracepoint_args") == 0)
6090 /* allow u64* as ctx */
6091 if (btf_is_int(t) && t->size == 8)
6094 case BPF_TRACE_ITER:
6095 /* allow struct bpf_iter__xxx types only */
6096 if (__btf_type_is_struct(t) &&
6097 strncmp(tname, "bpf_iter__", sizeof("bpf_iter__") - 1) == 0)
6100 case BPF_TRACE_FENTRY:
6101 case BPF_TRACE_FEXIT:
6102 case BPF_MODIFY_RETURN:
6103 /* allow u64* as ctx */
6104 if (btf_is_int(t) && t->size == 8)
6111 case BPF_PROG_TYPE_LSM:
6112 case BPF_PROG_TYPE_STRUCT_OPS:
6113 /* allow u64* as ctx */
6114 if (btf_is_int(t) && t->size == 8)
6117 case BPF_PROG_TYPE_TRACEPOINT:
6118 case BPF_PROG_TYPE_SYSCALL:
6119 case BPF_PROG_TYPE_EXT:
6120 return 0; /* anything goes */
6125 ctx_type = find_canonical_prog_ctx_type(prog_type);
6127 /* should not happen */
6128 bpf_log(log, "btf_vmlinux is malformed\n");
6132 /* resolve typedefs and check that underlying structs are matching as well */
6133 while (btf_type_is_modifier(ctx_type))
6134 ctx_type = btf_type_by_id(btf_vmlinux, ctx_type->type);
6136 /* if program type doesn't have distinctly named struct type for
6137 * context, then __arg_ctx argument can only be `void *`, which we
6138 * already checked above
6140 if (!__btf_type_is_struct(ctx_type)) {
6141 bpf_log(log, "arg#%d should be void pointer\n", arg);
6145 ctx_tname = btf_name_by_offset(btf_vmlinux, ctx_type->name_off);
6146 if (!__btf_type_is_struct(t) || strcmp(ctx_tname, tname) != 0) {
6147 bpf_log(log, "arg#%d should be `struct %s *`\n", arg, ctx_tname);
6154 static int btf_translate_to_vmlinux(struct bpf_verifier_log *log,
6156 const struct btf_type *t,
6157 enum bpf_prog_type prog_type,
6160 if (!btf_is_prog_ctx_type(log, btf, t, prog_type, arg))
6162 return find_kern_ctx_type_id(prog_type);
6165 int get_kern_ctx_btf_id(struct bpf_verifier_log *log, enum bpf_prog_type prog_type)
6167 const struct btf_member *kctx_member;
6168 const struct btf_type *conv_struct;
6169 const struct btf_type *kctx_type;
6172 conv_struct = bpf_ctx_convert.t;
6173 /* get member for kernel ctx type */
6174 kctx_member = btf_type_member(conv_struct) + bpf_ctx_convert_map[prog_type] * 2 + 1;
6175 kctx_type_id = kctx_member->type;
6176 kctx_type = btf_type_by_id(btf_vmlinux, kctx_type_id);
6177 if (!btf_type_is_struct(kctx_type)) {
6178 bpf_log(log, "kern ctx type id %u is not a struct\n", kctx_type_id);
6182 return kctx_type_id;
6185 BTF_ID_LIST(bpf_ctx_convert_btf_id)
6186 BTF_ID(struct, bpf_ctx_convert)
6188 static struct btf *btf_parse_base(struct btf_verifier_env *env, const char *name,
6189 void *data, unsigned int data_size)
6191 struct btf *btf = NULL;
6194 if (!IS_ENABLED(CONFIG_DEBUG_INFO_BTF))
6195 return ERR_PTR(-ENOENT);
6197 btf = kzalloc(sizeof(*btf), GFP_KERNEL | __GFP_NOWARN);
6205 btf->data_size = data_size;
6206 btf->kernel_btf = true;
6207 snprintf(btf->name, sizeof(btf->name), "%s", name);
6209 err = btf_parse_hdr(env);
6213 btf->nohdr_data = btf->data + btf->hdr.hdr_len;
6215 err = btf_parse_str_sec(env);
6219 err = btf_check_all_metas(env);
6223 err = btf_check_type_tags(env, btf, 1);
6227 refcount_set(&btf->refcnt, 1);
6236 return ERR_PTR(err);
6239 struct btf *btf_parse_vmlinux(void)
6241 struct btf_verifier_env *env = NULL;
6242 struct bpf_verifier_log *log;
6246 env = kzalloc(sizeof(*env), GFP_KERNEL | __GFP_NOWARN);
6248 return ERR_PTR(-ENOMEM);
6251 log->level = BPF_LOG_KERNEL;
6252 btf = btf_parse_base(env, "vmlinux", __start_BTF, __stop_BTF - __start_BTF);
6256 /* btf_parse_vmlinux() runs under bpf_verifier_lock */
6257 bpf_ctx_convert.t = btf_type_by_id(btf, bpf_ctx_convert_btf_id[0]);
6258 err = btf_alloc_id(btf);
6264 btf_verifier_env_free(env);
6268 /* If .BTF_ids section was created with distilled base BTF, both base and
6269 * split BTF ids will need to be mapped to actual base/split ids for
6270 * BTF now that it has been relocated.
6272 static __u32 btf_relocate_id(const struct btf *btf, __u32 id)
6274 if (!btf->base_btf || !btf->base_id_map)
6276 return btf->base_id_map[id];
6279 #ifdef CONFIG_DEBUG_INFO_BTF_MODULES
6281 static struct btf *btf_parse_module(const char *module_name, const void *data,
6282 unsigned int data_size, void *base_data,
6283 unsigned int base_data_size)
6285 struct btf *btf = NULL, *vmlinux_btf, *base_btf = NULL;
6286 struct btf_verifier_env *env = NULL;
6287 struct bpf_verifier_log *log;
6290 vmlinux_btf = bpf_get_btf_vmlinux();
6291 if (IS_ERR(vmlinux_btf))
6294 return ERR_PTR(-EINVAL);
6296 env = kzalloc(sizeof(*env), GFP_KERNEL | __GFP_NOWARN);
6298 return ERR_PTR(-ENOMEM);
6301 log->level = BPF_LOG_KERNEL;
6304 base_btf = btf_parse_base(env, ".BTF.base", base_data, base_data_size);
6305 if (IS_ERR(base_btf)) {
6306 err = PTR_ERR(base_btf);
6310 base_btf = vmlinux_btf;
6313 btf = kzalloc(sizeof(*btf), GFP_KERNEL | __GFP_NOWARN);
6320 btf->base_btf = base_btf;
6321 btf->start_id = base_btf->nr_types;
6322 btf->start_str_off = base_btf->hdr.str_len;
6323 btf->kernel_btf = true;
6324 snprintf(btf->name, sizeof(btf->name), "%s", module_name);
6326 btf->data = kvmemdup(data, data_size, GFP_KERNEL | __GFP_NOWARN);
6331 btf->data_size = data_size;
6333 err = btf_parse_hdr(env);
6337 btf->nohdr_data = btf->data + btf->hdr.hdr_len;
6339 err = btf_parse_str_sec(env);
6343 err = btf_check_all_metas(env);
6347 err = btf_check_type_tags(env, btf, btf_nr_types(base_btf));
6351 if (base_btf != vmlinux_btf) {
6352 err = btf_relocate(btf, vmlinux_btf, &btf->base_id_map);
6356 base_btf = vmlinux_btf;
6359 btf_verifier_env_free(env);
6360 refcount_set(&btf->refcnt, 1);
6364 btf_verifier_env_free(env);
6365 if (!IS_ERR(base_btf) && base_btf != vmlinux_btf)
6372 return ERR_PTR(err);
6375 #endif /* CONFIG_DEBUG_INFO_BTF_MODULES */
6377 struct btf *bpf_prog_get_target_btf(const struct bpf_prog *prog)
6379 struct bpf_prog *tgt_prog = prog->aux->dst_prog;
6382 return tgt_prog->aux->btf;
6384 return prog->aux->attach_btf;
6387 static bool is_void_or_int_ptr(struct btf *btf, const struct btf_type *t)
6389 /* skip modifiers */
6390 t = btf_type_skip_modifiers(btf, t->type, NULL);
6391 return btf_type_is_void(t) || btf_type_is_int(t);
6394 u32 btf_ctx_arg_idx(struct btf *btf, const struct btf_type *func_proto,
6397 const struct btf_param *args;
6398 const struct btf_type *t;
6399 u32 offset = 0, nr_args;
6405 nr_args = btf_type_vlen(func_proto);
6406 args = (const struct btf_param *)(func_proto + 1);
6407 for (i = 0; i < nr_args; i++) {
6408 t = btf_type_skip_modifiers(btf, args[i].type, NULL);
6409 offset += btf_type_is_ptr(t) ? 8 : roundup(t->size, 8);
6414 t = btf_type_skip_modifiers(btf, func_proto->type, NULL);
6415 offset += btf_type_is_ptr(t) ? 8 : roundup(t->size, 8);
6422 static bool prog_args_trusted(const struct bpf_prog *prog)
6424 enum bpf_attach_type atype = prog->expected_attach_type;
6426 switch (prog->type) {
6427 case BPF_PROG_TYPE_TRACING:
6428 return atype == BPF_TRACE_RAW_TP || atype == BPF_TRACE_ITER;
6429 case BPF_PROG_TYPE_LSM:
6430 return bpf_lsm_is_trusted(prog);
6431 case BPF_PROG_TYPE_STRUCT_OPS:
6438 int btf_ctx_arg_offset(const struct btf *btf, const struct btf_type *func_proto,
6441 const struct btf_param *args;
6442 const struct btf_type *t;
6446 args = btf_params(func_proto);
6447 for (i = 0; i < arg_no; i++) {
6448 t = btf_type_by_id(btf, args[i].type);
6449 t = btf_resolve_size(btf, t, &sz);
6452 off += roundup(sz, 8);
6458 struct bpf_raw_tp_null_args {
6463 static const struct bpf_raw_tp_null_args raw_tp_null_args[] = {
6465 { "sched_pi_setprio", 0x10 },
6466 /* ... from sched_numa_pair_template event class */
6467 { "sched_stick_numa", 0x100 },
6468 { "sched_swap_numa", 0x100 },
6470 { "afs_make_fs_call", 0x10 },
6471 { "afs_make_fs_calli", 0x10 },
6472 { "afs_make_fs_call1", 0x10 },
6473 { "afs_make_fs_call2", 0x10 },
6474 { "afs_protocol_error", 0x1 },
6475 { "afs_flock_ev", 0x10 },
6477 { "cachefiles_lookup", 0x1 | 0x200 },
6478 { "cachefiles_unlink", 0x1 },
6479 { "cachefiles_rename", 0x1 },
6480 { "cachefiles_prep_read", 0x1 },
6481 { "cachefiles_mark_active", 0x1 },
6482 { "cachefiles_mark_failed", 0x1 },
6483 { "cachefiles_mark_inactive", 0x1 },
6484 { "cachefiles_vfs_error", 0x1 },
6485 { "cachefiles_io_error", 0x1 },
6486 { "cachefiles_ondemand_open", 0x1 },
6487 { "cachefiles_ondemand_copen", 0x1 },
6488 { "cachefiles_ondemand_close", 0x1 },
6489 { "cachefiles_ondemand_read", 0x1 },
6490 { "cachefiles_ondemand_cread", 0x1 },
6491 { "cachefiles_ondemand_fd_write", 0x1 },
6492 { "cachefiles_ondemand_fd_release", 0x1 },
6493 /* ext4, from ext4__mballoc event class */
6494 { "ext4_mballoc_discard", 0x10 },
6495 { "ext4_mballoc_free", 0x10 },
6497 { "fib_table_lookup", 0x100 },
6499 /* ... from filelock_lock event class */
6500 { "posix_lock_inode", 0x10 },
6501 { "fcntl_setlk", 0x10 },
6502 { "locks_remove_posix", 0x10 },
6503 { "flock_lock_inode", 0x10 },
6504 /* ... from filelock_lease event class */
6505 { "break_lease_noblock", 0x10 },
6506 { "break_lease_block", 0x10 },
6507 { "break_lease_unblock", 0x10 },
6508 { "generic_delete_lease", 0x10 },
6509 { "time_out_leases", 0x10 },
6511 { "host1x_cdma_push_gather", 0x10000 },
6513 { "mm_khugepaged_scan_pmd", 0x10 },
6514 { "mm_collapse_huge_page_isolate", 0x1 },
6515 { "mm_khugepaged_scan_file", 0x10 },
6516 { "mm_khugepaged_collapse_file", 0x10 },
6518 { "mm_page_alloc", 0x1 },
6519 { "mm_page_pcpu_drain", 0x1 },
6520 /* .. from mm_page event class */
6521 { "mm_page_alloc_zone_locked", 0x1 },
6523 { "netfs_failure", 0x10 },
6525 { "device_pm_callback_start", 0x10 },
6527 { "qdisc_dequeue", 0x1000 },
6529 { "rxrpc_recvdata", 0x1 },
6530 { "rxrpc_resend", 0x10 },
6531 { "rxrpc_tq", 0x10 },
6532 { "rxrpc_client", 0x1 },
6534 {"kfree_skb", 0x1000},
6536 { "xs_stream_read_data", 0x1 },
6537 /* ... from xprt_cong_event event class */
6538 { "xprt_reserve_cong", 0x10 },
6539 { "xprt_release_cong", 0x10 },
6540 { "xprt_get_cong", 0x10 },
6541 { "xprt_put_cong", 0x10 },
6543 { "tcp_send_reset", 0x11 },
6544 { "tcp_sendmsg_locked", 0x100 },
6546 { "tegra_dma_tx_status", 0x100 },
6547 /* timer_migration */
6548 { "tmigr_update_events", 0x1 },
6549 /* writeback, from writeback_folio_template event class */
6550 { "writeback_dirty_folio", 0x10 },
6551 { "folio_wait_writeback", 0x10 },
6553 { "mr_integ_alloc", 0x2000 },
6555 { "bpf_testmod_test_read", 0x0 },
6557 { "amdgpu_vm_bo_map", 0x1 },
6558 { "amdgpu_vm_bo_unmap", 0x1 },
6560 { "netfs_folioq", 0x1 },
6561 /* xfs from xfs_defer_pending_class */
6562 { "xfs_defer_create_intent", 0x1 },
6563 { "xfs_defer_cancel_list", 0x1 },
6564 { "xfs_defer_pending_finish", 0x1 },
6565 { "xfs_defer_pending_abort", 0x1 },
6566 { "xfs_defer_relog_intent", 0x1 },
6567 { "xfs_defer_isolate_paused", 0x1 },
6568 { "xfs_defer_item_pause", 0x1 },
6569 { "xfs_defer_item_unpause", 0x1 },
6570 /* xfs from xfs_defer_pending_item_class */
6571 { "xfs_defer_add_item", 0x1 },
6572 { "xfs_defer_cancel_item", 0x1 },
6573 { "xfs_defer_finish_item", 0x1 },
6574 /* xfs from xfs_icwalk_class */
6575 { "xfs_ioc_free_eofblocks", 0x10 },
6576 { "xfs_blockgc_free_space", 0x10 },
6577 /* xfs from xfs_btree_cur_class */
6578 { "xfs_btree_updkeys", 0x100 },
6579 { "xfs_btree_overlapped_query_range", 0x100 },
6580 /* xfs from xfs_imap_class*/
6581 { "xfs_map_blocks_found", 0x10000 },
6582 { "xfs_map_blocks_alloc", 0x10000 },
6583 { "xfs_iomap_alloc", 0x1000 },
6584 { "xfs_iomap_found", 0x1000 },
6585 /* xfs from xfs_fs_class */
6586 { "xfs_inodegc_flush", 0x1 },
6587 { "xfs_inodegc_push", 0x1 },
6588 { "xfs_inodegc_start", 0x1 },
6589 { "xfs_inodegc_stop", 0x1 },
6590 { "xfs_inodegc_queue", 0x1 },
6591 { "xfs_inodegc_throttle", 0x1 },
6592 { "xfs_fs_sync_fs", 0x1 },
6593 { "xfs_blockgc_start", 0x1 },
6594 { "xfs_blockgc_stop", 0x1 },
6595 { "xfs_blockgc_worker", 0x1 },
6596 { "xfs_blockgc_flush_all", 0x1 },
6598 { "xchk_nlinks_live_update", 0x10 },
6599 /* xfs_scrub from xchk_metapath_class */
6600 { "xchk_metapath_lookup", 0x100 },
6602 { "nfsd_dirent", 0x1 },
6603 { "nfsd_file_acquire", 0x1001 },
6604 { "nfsd_file_insert_err", 0x1 },
6605 { "nfsd_file_cons_err", 0x1 },
6607 { "nfs4_setup_sequence", 0x1 },
6608 { "pnfs_update_layout", 0x10000 },
6609 { "nfs4_inode_callback_event", 0x200 },
6610 { "nfs4_inode_stateid_callback_event", 0x200 },
6611 /* nfs from pnfs_layout_event */
6612 { "pnfs_mds_fallback_pg_init_read", 0x10000 },
6613 { "pnfs_mds_fallback_pg_init_write", 0x10000 },
6614 { "pnfs_mds_fallback_pg_get_mirror_count", 0x10000 },
6615 { "pnfs_mds_fallback_read_done", 0x10000 },
6616 { "pnfs_mds_fallback_write_done", 0x10000 },
6617 { "pnfs_mds_fallback_read_pagelist", 0x10000 },
6618 { "pnfs_mds_fallback_write_pagelist", 0x10000 },
6620 { "coda_dec_pic_run", 0x10 },
6621 { "coda_dec_pic_done", 0x10 },
6623 { "cfg80211_scan_done", 0x11 },
6624 { "rdev_set_coalesce", 0x10 },
6625 { "cfg80211_report_wowlan_wakeup", 0x100 },
6626 { "cfg80211_inform_bss_frame", 0x100 },
6627 { "cfg80211_michael_mic_failure", 0x10000 },
6628 /* cfg80211 from wiphy_work_event */
6629 { "wiphy_work_queue", 0x10 },
6630 { "wiphy_work_run", 0x10 },
6631 { "wiphy_work_cancel", 0x10 },
6632 { "wiphy_work_flush", 0x10 },
6634 { "hugetlbfs_alloc_inode", 0x10 },
6636 { "spufs_context", 0x10 },
6638 { "kvm_page_fault_enter", 0x100 },
6640 { "dpu_crtc_setup_mixer", 0x100 },
6642 { "binder_transaction", 0x100 },
6644 { "btree_path_free", 0x100 },
6646 { "hfi1_sdma_progress", 0x1000 },
6648 { "iptfs_ingress_postq_event", 0x1000 },
6650 { "neigh_update", 0x10 },
6651 /* snd_firewire_lib */
6652 { "amdtp_packet", 0x100 },
6655 bool btf_ctx_access(int off, int size, enum bpf_access_type type,
6656 const struct bpf_prog *prog,
6657 struct bpf_insn_access_aux *info)
6659 const struct btf_type *t = prog->aux->attach_func_proto;
6660 struct bpf_prog *tgt_prog = prog->aux->dst_prog;
6661 struct btf *btf = bpf_prog_get_target_btf(prog);
6662 const char *tname = prog->aux->attach_func_name;
6663 struct bpf_verifier_log *log = info->log;
6664 const struct btf_param *args;
6665 bool ptr_err_raw_tp = false;
6666 const char *tag_value;
6671 bpf_log(log, "func '%s' offset %d is not multiple of 8\n",
6675 arg = btf_ctx_arg_idx(btf, t, off);
6676 args = (const struct btf_param *)(t + 1);
6677 /* if (t == NULL) Fall back to default BPF prog with
6678 * MAX_BPF_FUNC_REG_ARGS u64 arguments.
6680 nr_args = t ? btf_type_vlen(t) : MAX_BPF_FUNC_REG_ARGS;
6681 if (prog->aux->attach_btf_trace) {
6682 /* skip first 'void *__data' argument in btf_trace_##name typedef */
6687 if (arg > nr_args) {
6688 bpf_log(log, "func '%s' doesn't have %d-th argument\n",
6693 if (arg == nr_args) {
6694 switch (prog->expected_attach_type) {
6696 /* mark we are accessing the return value */
6697 info->is_retval = true;
6699 case BPF_LSM_CGROUP:
6700 case BPF_TRACE_FEXIT:
6701 /* When LSM programs are attached to void LSM hooks
6702 * they use FEXIT trampolines and when attached to
6703 * int LSM hooks, they use MODIFY_RETURN trampolines.
6705 * While the LSM programs are BPF_MODIFY_RETURN-like
6708 * if (ret_type != 'int')
6711 * is _not_ done here. This is still safe as LSM hooks
6712 * have only void and int return types.
6716 t = btf_type_by_id(btf, t->type);
6718 case BPF_MODIFY_RETURN:
6719 /* For now the BPF_MODIFY_RETURN can only be attached to
6720 * functions that return an int.
6725 t = btf_type_skip_modifiers(btf, t->type, NULL);
6726 if (!btf_type_is_small_int(t)) {
6728 "ret type %s not allowed for fmod_ret\n",
6734 bpf_log(log, "func '%s' doesn't have %d-th argument\n",
6740 /* Default prog with MAX_BPF_FUNC_REG_ARGS args */
6742 t = btf_type_by_id(btf, args[arg].type);
6745 /* skip modifiers */
6746 while (btf_type_is_modifier(t))
6747 t = btf_type_by_id(btf, t->type);
6748 if (btf_type_is_small_int(t) || btf_is_any_enum(t) || __btf_type_is_struct(t))
6749 /* accessing a scalar */
6751 if (!btf_type_is_ptr(t)) {
6753 "func '%s' arg%d '%s' has type %s. Only pointer access is allowed\n",
6755 __btf_name_by_offset(btf, t->name_off),
6760 if (size != sizeof(u64)) {
6761 bpf_log(log, "func '%s' size %d must be 8\n",
6766 /* check for PTR_TO_RDONLY_BUF_OR_NULL or PTR_TO_RDWR_BUF_OR_NULL */
6767 for (i = 0; i < prog->aux->ctx_arg_info_size; i++) {
6768 const struct bpf_ctx_arg_aux *ctx_arg_info = &prog->aux->ctx_arg_info[i];
6771 type = base_type(ctx_arg_info->reg_type);
6772 flag = type_flag(ctx_arg_info->reg_type);
6773 if (ctx_arg_info->offset == off && type == PTR_TO_BUF &&
6774 (flag & PTR_MAYBE_NULL)) {
6775 info->reg_type = ctx_arg_info->reg_type;
6781 * If it's a pointer to void, it's the same as scalar from the verifier
6782 * safety POV. Either way, no futher pointer walking is allowed.
6784 if (is_void_or_int_ptr(btf, t))
6787 /* this is a pointer to another type */
6788 for (i = 0; i < prog->aux->ctx_arg_info_size; i++) {
6789 const struct bpf_ctx_arg_aux *ctx_arg_info = &prog->aux->ctx_arg_info[i];
6791 if (ctx_arg_info->offset == off) {
6792 if (!ctx_arg_info->btf_id) {
6793 bpf_log(log,"invalid btf_id for context argument offset %u\n", off);
6797 info->reg_type = ctx_arg_info->reg_type;
6798 info->btf = ctx_arg_info->btf ? : btf_vmlinux;
6799 info->btf_id = ctx_arg_info->btf_id;
6800 info->ref_obj_id = ctx_arg_info->ref_obj_id;
6805 info->reg_type = PTR_TO_BTF_ID;
6806 if (prog_args_trusted(prog))
6807 info->reg_type |= PTR_TRUSTED;
6809 if (btf_param_match_suffix(btf, &args[arg], "__nullable"))
6810 info->reg_type |= PTR_MAYBE_NULL;
6812 if (prog->expected_attach_type == BPF_TRACE_RAW_TP) {
6813 struct btf *btf = prog->aux->attach_btf;
6814 const struct btf_type *t;
6817 /* BTF lookups cannot fail, return false on error */
6818 t = btf_type_by_id(btf, prog->aux->attach_btf_id);
6821 tname = btf_name_by_offset(btf, t->name_off);
6824 /* Checked by bpf_check_attach_target */
6825 tname += sizeof("btf_trace_") - 1;
6826 for (i = 0; i < ARRAY_SIZE(raw_tp_null_args); i++) {
6827 /* Is this a func with potential NULL args? */
6828 if (strcmp(tname, raw_tp_null_args[i].func))
6830 if (raw_tp_null_args[i].mask & (0x1ULL << (arg * 4)))
6831 info->reg_type |= PTR_MAYBE_NULL;
6832 /* Is the current arg IS_ERR? */
6833 if (raw_tp_null_args[i].mask & (0x2ULL << (arg * 4)))
6834 ptr_err_raw_tp = true;
6837 /* If we don't know NULL-ness specification and the tracepoint
6838 * is coming from a loadable module, be conservative and mark
6839 * argument as PTR_MAYBE_NULL.
6841 if (i == ARRAY_SIZE(raw_tp_null_args) && btf_is_module(btf))
6842 info->reg_type |= PTR_MAYBE_NULL;
6846 enum bpf_prog_type tgt_type;
6848 if (tgt_prog->type == BPF_PROG_TYPE_EXT)
6849 tgt_type = tgt_prog->aux->saved_dst_prog_type;
6851 tgt_type = tgt_prog->type;
6853 ret = btf_translate_to_vmlinux(log, btf, t, tgt_type, arg);
6855 info->btf = btf_vmlinux;
6864 info->btf_id = t->type;
6865 t = btf_type_by_id(btf, t->type);
6867 if (btf_type_is_type_tag(t) && !btf_type_kflag(t)) {
6868 tag_value = __btf_name_by_offset(btf, t->name_off);
6869 if (strcmp(tag_value, "user") == 0)
6870 info->reg_type |= MEM_USER;
6871 if (strcmp(tag_value, "percpu") == 0)
6872 info->reg_type |= MEM_PERCPU;
6875 /* skip modifiers */
6876 while (btf_type_is_modifier(t)) {
6877 info->btf_id = t->type;
6878 t = btf_type_by_id(btf, t->type);
6880 if (!btf_type_is_struct(t)) {
6882 "func '%s' arg%d type %s is not a struct\n",
6883 tname, arg, btf_type_str(t));
6886 bpf_log(log, "func '%s' arg%d has btf_id %d type %s '%s'\n",
6887 tname, arg, info->btf_id, btf_type_str(t),
6888 __btf_name_by_offset(btf, t->name_off));
6890 /* Perform all checks on the validity of type for this argument, but if
6891 * we know it can be IS_ERR at runtime, scrub pointer type and mark as
6894 if (ptr_err_raw_tp) {
6895 bpf_log(log, "marking pointer arg%d as scalar as it may encode error", arg);
6896 info->reg_type = SCALAR_VALUE;
6900 EXPORT_SYMBOL_GPL(btf_ctx_access);
6902 enum bpf_struct_walk_result {
6909 static int btf_struct_walk(struct bpf_verifier_log *log, const struct btf *btf,
6910 const struct btf_type *t, int off, int size,
6911 u32 *next_btf_id, enum bpf_type_flag *flag,
6912 const char **field_name)
6914 u32 i, moff, mtrue_end, msize = 0, total_nelems = 0;
6915 const struct btf_type *mtype, *elem_type = NULL;
6916 const struct btf_member *member;
6917 const char *tname, *mname, *tag_value;
6918 u32 vlen, elem_id, mid;
6921 if (btf_type_is_modifier(t))
6922 t = btf_type_skip_modifiers(btf, t->type, NULL);
6923 tname = __btf_name_by_offset(btf, t->name_off);
6924 if (!btf_type_is_struct(t)) {
6925 bpf_log(log, "Type '%s' is not a struct\n", tname);
6929 vlen = btf_type_vlen(t);
6930 if (BTF_INFO_KIND(t->info) == BTF_KIND_UNION && vlen != 1 && !(*flag & PTR_UNTRUSTED))
6932 * walking unions yields untrusted pointers
6933 * with exception of __bpf_md_ptr and other
6934 * unions with a single member
6936 *flag |= PTR_UNTRUSTED;
6938 if (off + size > t->size) {
6939 /* If the last element is a variable size array, we may
6940 * need to relax the rule.
6942 struct btf_array *array_elem;
6947 member = btf_type_member(t) + vlen - 1;
6948 mtype = btf_type_skip_modifiers(btf, member->type,
6950 if (!btf_type_is_array(mtype))
6953 array_elem = (struct btf_array *)(mtype + 1);
6954 if (array_elem->nelems != 0)
6957 moff = __btf_member_bit_offset(t, member) / 8;
6961 /* allow structure and integer */
6962 t = btf_type_skip_modifiers(btf, array_elem->type,
6965 if (btf_type_is_int(t))
6968 if (!btf_type_is_struct(t))
6971 off = (off - moff) % t->size;
6975 bpf_log(log, "access beyond struct %s at off %u size %u\n",
6980 for_each_member(i, t, member) {
6981 /* offset of the field in bytes */
6982 moff = __btf_member_bit_offset(t, member) / 8;
6983 if (off + size <= moff)
6984 /* won't find anything, field is already too far */
6987 if (__btf_member_bitfield_size(t, member)) {
6988 u32 end_bit = __btf_member_bit_offset(t, member) +
6989 __btf_member_bitfield_size(t, member);
6991 /* off <= moff instead of off == moff because clang
6992 * does not generate a BTF member for anonymous
6993 * bitfield like the ":16" here:
7000 BITS_ROUNDUP_BYTES(end_bit) <= off + size)
7003 /* off may be accessing a following member
7007 * Doing partial access at either end of this
7008 * bitfield. Continue on this case also to
7009 * treat it as not accessing this bitfield
7010 * and eventually error out as field not
7011 * found to keep it simple.
7012 * It could be relaxed if there was a legit
7013 * partial access case later.
7018 /* In case of "off" is pointing to holes of a struct */
7022 /* type of the field */
7024 mtype = btf_type_by_id(btf, member->type);
7025 mname = __btf_name_by_offset(btf, member->name_off);
7027 mtype = __btf_resolve_size(btf, mtype, &msize,
7028 &elem_type, &elem_id, &total_nelems,
7030 if (IS_ERR(mtype)) {
7031 bpf_log(log, "field %s doesn't have size\n", mname);
7035 mtrue_end = moff + msize;
7036 if (off >= mtrue_end)
7037 /* no overlap with member, keep iterating */
7040 if (btf_type_is_array(mtype)) {
7043 /* __btf_resolve_size() above helps to
7044 * linearize a multi-dimensional array.
7046 * The logic here is treating an array
7047 * in a struct as the following way:
7050 * struct inner array[2][2];
7056 * struct inner array_elem0;
7057 * struct inner array_elem1;
7058 * struct inner array_elem2;
7059 * struct inner array_elem3;
7062 * When accessing outer->array[1][0], it moves
7063 * moff to "array_elem2", set mtype to
7064 * "struct inner", and msize also becomes
7065 * sizeof(struct inner). Then most of the
7066 * remaining logic will fall through without
7067 * caring the current member is an array or
7070 * Unlike mtype/msize/moff, mtrue_end does not
7071 * change. The naming difference ("_true") tells
7072 * that it is not always corresponding to
7073 * the current mtype/msize/moff.
7074 * It is the true end of the current
7075 * member (i.e. array in this case). That
7076 * will allow an int array to be accessed like
7078 * i.e. allow access beyond the size of
7079 * the array's element as long as it is
7080 * within the mtrue_end boundary.
7083 /* skip empty array */
7084 if (moff == mtrue_end)
7087 msize /= total_nelems;
7088 elem_idx = (off - moff) / msize;
7089 moff += elem_idx * msize;
7094 /* the 'off' we're looking for is either equal to start
7095 * of this field or inside of this struct
7097 if (btf_type_is_struct(mtype)) {
7098 /* our field must be inside that union or struct */
7101 /* return if the offset matches the member offset */
7107 /* adjust offset we're looking for */
7112 if (btf_type_is_ptr(mtype)) {
7113 const struct btf_type *stype, *t;
7114 enum bpf_type_flag tmp_flag = 0;
7117 if (msize != size || off != moff) {
7119 "cannot access ptr member %s with moff %u in struct %s with off %u size %u\n",
7120 mname, moff, tname, off, size);
7124 /* check type tag */
7125 t = btf_type_by_id(btf, mtype->type);
7126 if (btf_type_is_type_tag(t) && !btf_type_kflag(t)) {
7127 tag_value = __btf_name_by_offset(btf, t->name_off);
7128 /* check __user tag */
7129 if (strcmp(tag_value, "user") == 0)
7130 tmp_flag = MEM_USER;
7131 /* check __percpu tag */
7132 if (strcmp(tag_value, "percpu") == 0)
7133 tmp_flag = MEM_PERCPU;
7134 /* check __rcu tag */
7135 if (strcmp(tag_value, "rcu") == 0)
7139 stype = btf_type_skip_modifiers(btf, mtype->type, &id);
7140 if (btf_type_is_struct(stype)) {
7144 *field_name = mname;
7149 /* Allow more flexible access within an int as long as
7150 * it is within mtrue_end.
7151 * Since mtrue_end could be the end of an array,
7152 * that also allows using an array of int as a scratch
7153 * space. e.g. skb->cb[].
7155 if (off + size > mtrue_end && !(*flag & PTR_UNTRUSTED)) {
7157 "access beyond the end of member %s (mend:%u) in struct %s with off %u size %u\n",
7158 mname, mtrue_end, tname, off, size);
7164 bpf_log(log, "struct %s doesn't have field at offset %d\n", tname, off);
7168 int btf_struct_access(struct bpf_verifier_log *log,
7169 const struct bpf_reg_state *reg,
7170 int off, int size, enum bpf_access_type atype __maybe_unused,
7171 u32 *next_btf_id, enum bpf_type_flag *flag,
7172 const char **field_name)
7174 const struct btf *btf = reg->btf;
7175 enum bpf_type_flag tmp_flag = 0;
7176 const struct btf_type *t;
7177 u32 id = reg->btf_id;
7180 while (type_is_alloc(reg->type)) {
7181 struct btf_struct_meta *meta;
7182 struct btf_record *rec;
7185 meta = btf_find_struct_meta(btf, id);
7189 for (i = 0; i < rec->cnt; i++) {
7190 struct btf_field *field = &rec->fields[i];
7191 u32 offset = field->offset;
7192 if (off < offset + field->size && offset < off + size) {
7194 "direct access to %s is disallowed\n",
7195 btf_field_type_name(field->type));
7202 t = btf_type_by_id(btf, id);
7204 err = btf_struct_walk(log, btf, t, off, size, &id, &tmp_flag, field_name);
7208 /* For local types, the destination register cannot
7209 * become a pointer again.
7211 if (type_is_alloc(reg->type))
7212 return SCALAR_VALUE;
7213 /* If we found the pointer or scalar on t+off,
7218 return PTR_TO_BTF_ID;
7220 return SCALAR_VALUE;
7222 /* We found nested struct, so continue the search
7223 * by diving in it. At this point the offset is
7224 * aligned with the new type, so set it to 0.
7226 t = btf_type_by_id(btf, id);
7230 /* It's either error or unknown return value..
7233 if (WARN_ONCE(err > 0, "unknown btf_struct_walk return value"))
7242 /* Check that two BTF types, each specified as an BTF object + id, are exactly
7243 * the same. Trivial ID check is not enough due to module BTFs, because we can
7244 * end up with two different module BTFs, but IDs point to the common type in
7247 bool btf_types_are_same(const struct btf *btf1, u32 id1,
7248 const struct btf *btf2, u32 id2)
7254 return btf_type_by_id(btf1, id1) == btf_type_by_id(btf2, id2);
7257 bool btf_struct_ids_match(struct bpf_verifier_log *log,
7258 const struct btf *btf, u32 id, int off,
7259 const struct btf *need_btf, u32 need_type_id,
7262 const struct btf_type *type;
7263 enum bpf_type_flag flag = 0;
7266 /* Are we already done? */
7267 if (off == 0 && btf_types_are_same(btf, id, need_btf, need_type_id))
7269 /* In case of strict type match, we do not walk struct, the top level
7270 * type match must succeed. When strict is true, off should have already
7276 type = btf_type_by_id(btf, id);
7279 err = btf_struct_walk(log, btf, type, off, 1, &id, &flag, NULL);
7280 if (err != WALK_STRUCT)
7283 /* We found nested struct object. If it matches
7284 * the requested ID, we're done. Otherwise let's
7285 * continue the search with offset 0 in the new
7288 if (!btf_types_are_same(btf, id, need_btf, need_type_id)) {
7296 static int __get_type_size(struct btf *btf, u32 btf_id,
7297 const struct btf_type **ret_type)
7299 const struct btf_type *t;
7301 *ret_type = btf_type_by_id(btf, 0);
7305 t = btf_type_by_id(btf, btf_id);
7306 while (t && btf_type_is_modifier(t))
7307 t = btf_type_by_id(btf, t->type);
7311 if (btf_type_is_ptr(t))
7312 /* kernel size of pointer. Not BPF's size of pointer*/
7313 return sizeof(void *);
7314 if (btf_type_is_int(t) || btf_is_any_enum(t) || __btf_type_is_struct(t))
7319 static u8 __get_type_fmodel_flags(const struct btf_type *t)
7323 if (__btf_type_is_struct(t))
7324 flags |= BTF_FMODEL_STRUCT_ARG;
7325 if (btf_type_is_signed_int(t))
7326 flags |= BTF_FMODEL_SIGNED_ARG;
7331 int btf_distill_func_proto(struct bpf_verifier_log *log,
7333 const struct btf_type *func,
7335 struct btf_func_model *m)
7337 const struct btf_param *args;
7338 const struct btf_type *t;
7343 /* BTF function prototype doesn't match the verifier types.
7344 * Fall back to MAX_BPF_FUNC_REG_ARGS u64 args.
7346 for (i = 0; i < MAX_BPF_FUNC_REG_ARGS; i++) {
7348 m->arg_flags[i] = 0;
7352 m->nr_args = MAX_BPF_FUNC_REG_ARGS;
7355 args = (const struct btf_param *)(func + 1);
7356 nargs = btf_type_vlen(func);
7357 if (nargs > MAX_BPF_FUNC_ARGS) {
7359 "The function %s has %d arguments. Too many.\n",
7363 ret = __get_type_size(btf, func->type, &t);
7364 if (ret < 0 || __btf_type_is_struct(t)) {
7366 "The function %s return type %s is unsupported.\n",
7367 tname, btf_type_str(t));
7371 m->ret_flags = __get_type_fmodel_flags(t);
7373 for (i = 0; i < nargs; i++) {
7374 if (i == nargs - 1 && args[i].type == 0) {
7376 "The function %s with variable args is unsupported.\n",
7380 ret = __get_type_size(btf, args[i].type, &t);
7382 /* No support of struct argument size greater than 16 bytes */
7383 if (ret < 0 || ret > 16) {
7385 "The function %s arg%d type %s is unsupported.\n",
7386 tname, i, btf_type_str(t));
7391 "The function %s has malformed void argument.\n",
7395 m->arg_size[i] = ret;
7396 m->arg_flags[i] = __get_type_fmodel_flags(t);
7402 /* Compare BTFs of two functions assuming only scalars and pointers to context.
7403 * t1 points to BTF_KIND_FUNC in btf1
7404 * t2 points to BTF_KIND_FUNC in btf2
7406 * EINVAL - function prototype mismatch
7407 * EFAULT - verifier bug
7408 * 0 - 99% match. The last 1% is validated by the verifier.
7410 static int btf_check_func_type_match(struct bpf_verifier_log *log,
7411 struct btf *btf1, const struct btf_type *t1,
7412 struct btf *btf2, const struct btf_type *t2)
7414 const struct btf_param *args1, *args2;
7415 const char *fn1, *fn2, *s1, *s2;
7416 u32 nargs1, nargs2, i;
7418 fn1 = btf_name_by_offset(btf1, t1->name_off);
7419 fn2 = btf_name_by_offset(btf2, t2->name_off);
7421 if (btf_func_linkage(t1) != BTF_FUNC_GLOBAL) {
7422 bpf_log(log, "%s() is not a global function\n", fn1);
7425 if (btf_func_linkage(t2) != BTF_FUNC_GLOBAL) {
7426 bpf_log(log, "%s() is not a global function\n", fn2);
7430 t1 = btf_type_by_id(btf1, t1->type);
7431 if (!t1 || !btf_type_is_func_proto(t1))
7433 t2 = btf_type_by_id(btf2, t2->type);
7434 if (!t2 || !btf_type_is_func_proto(t2))
7437 args1 = (const struct btf_param *)(t1 + 1);
7438 nargs1 = btf_type_vlen(t1);
7439 args2 = (const struct btf_param *)(t2 + 1);
7440 nargs2 = btf_type_vlen(t2);
7442 if (nargs1 != nargs2) {
7443 bpf_log(log, "%s() has %d args while %s() has %d args\n",
7444 fn1, nargs1, fn2, nargs2);
7448 t1 = btf_type_skip_modifiers(btf1, t1->type, NULL);
7449 t2 = btf_type_skip_modifiers(btf2, t2->type, NULL);
7450 if (t1->info != t2->info) {
7452 "Return type %s of %s() doesn't match type %s of %s()\n",
7453 btf_type_str(t1), fn1,
7454 btf_type_str(t2), fn2);
7458 for (i = 0; i < nargs1; i++) {
7459 t1 = btf_type_skip_modifiers(btf1, args1[i].type, NULL);
7460 t2 = btf_type_skip_modifiers(btf2, args2[i].type, NULL);
7462 if (t1->info != t2->info) {
7463 bpf_log(log, "arg%d in %s() is %s while %s() has %s\n",
7464 i, fn1, btf_type_str(t1),
7465 fn2, btf_type_str(t2));
7468 if (btf_type_has_size(t1) && t1->size != t2->size) {
7470 "arg%d in %s() has size %d while %s() has %d\n",
7476 /* global functions are validated with scalars and pointers
7477 * to context only. And only global functions can be replaced.
7478 * Hence type check only those types.
7480 if (btf_type_is_int(t1) || btf_is_any_enum(t1))
7482 if (!btf_type_is_ptr(t1)) {
7484 "arg%d in %s() has unrecognized type\n",
7488 t1 = btf_type_skip_modifiers(btf1, t1->type, NULL);
7489 t2 = btf_type_skip_modifiers(btf2, t2->type, NULL);
7490 if (!btf_type_is_struct(t1)) {
7492 "arg%d in %s() is not a pointer to context\n",
7496 if (!btf_type_is_struct(t2)) {
7498 "arg%d in %s() is not a pointer to context\n",
7502 /* This is an optional check to make program writing easier.
7503 * Compare names of structs and report an error to the user.
7504 * btf_prepare_func_args() already checked that t2 struct
7505 * is a context type. btf_prepare_func_args() will check
7506 * later that t1 struct is a context type as well.
7508 s1 = btf_name_by_offset(btf1, t1->name_off);
7509 s2 = btf_name_by_offset(btf2, t2->name_off);
7510 if (strcmp(s1, s2)) {
7512 "arg%d %s(struct %s *) doesn't match %s(struct %s *)\n",
7513 i, fn1, s1, fn2, s2);
7520 /* Compare BTFs of given program with BTF of target program */
7521 int btf_check_type_match(struct bpf_verifier_log *log, const struct bpf_prog *prog,
7522 struct btf *btf2, const struct btf_type *t2)
7524 struct btf *btf1 = prog->aux->btf;
7525 const struct btf_type *t1;
7528 if (!prog->aux->func_info) {
7529 bpf_log(log, "Program extension requires BTF\n");
7533 btf_id = prog->aux->func_info[0].type_id;
7537 t1 = btf_type_by_id(btf1, btf_id);
7538 if (!t1 || !btf_type_is_func(t1))
7541 return btf_check_func_type_match(log, btf1, t1, btf2, t2);
7544 static bool btf_is_dynptr_ptr(const struct btf *btf, const struct btf_type *t)
7548 t = btf_type_by_id(btf, t->type); /* skip PTR */
7550 while (btf_type_is_modifier(t))
7551 t = btf_type_by_id(btf, t->type);
7553 /* allow either struct or struct forward declaration */
7554 if (btf_type_is_struct(t) ||
7555 (btf_type_is_fwd(t) && btf_type_kflag(t) == 0)) {
7556 name = btf_str_by_offset(btf, t->name_off);
7557 return name && strcmp(name, "bpf_dynptr") == 0;
7563 struct bpf_cand_cache {
7569 const struct btf *btf;
7574 static DEFINE_MUTEX(cand_cache_mutex);
7576 static struct bpf_cand_cache *
7577 bpf_core_find_cands(struct bpf_core_ctx *ctx, u32 local_type_id);
7579 static int btf_get_ptr_to_btf_id(struct bpf_verifier_log *log, int arg_idx,
7580 const struct btf *btf, const struct btf_type *t)
7582 struct bpf_cand_cache *cc;
7583 struct bpf_core_ctx ctx = {
7587 u32 kern_type_id, type_id;
7590 /* skip PTR and modifiers */
7592 t = btf_type_by_id(btf, t->type);
7593 while (btf_type_is_modifier(t)) {
7595 t = btf_type_by_id(btf, t->type);
7598 mutex_lock(&cand_cache_mutex);
7599 cc = bpf_core_find_cands(&ctx, type_id);
7602 bpf_log(log, "arg#%d reference type('%s %s') candidate matching error: %d\n",
7603 arg_idx, btf_type_str(t), __btf_name_by_offset(btf, t->name_off),
7605 goto cand_cache_unlock;
7608 bpf_log(log, "arg#%d reference type('%s %s') %s\n",
7609 arg_idx, btf_type_str(t), __btf_name_by_offset(btf, t->name_off),
7610 cc->cnt == 0 ? "has no matches" : "is ambiguous");
7611 err = cc->cnt == 0 ? -ENOENT : -ESRCH;
7612 goto cand_cache_unlock;
7614 if (btf_is_module(cc->cands[0].btf)) {
7615 bpf_log(log, "arg#%d reference type('%s %s') points to kernel module type (unsupported)\n",
7616 arg_idx, btf_type_str(t), __btf_name_by_offset(btf, t->name_off));
7618 goto cand_cache_unlock;
7620 kern_type_id = cc->cands[0].id;
7623 mutex_unlock(&cand_cache_mutex);
7627 return kern_type_id;
7631 ARG_TAG_CTX = BIT_ULL(0),
7632 ARG_TAG_NONNULL = BIT_ULL(1),
7633 ARG_TAG_TRUSTED = BIT_ULL(2),
7634 ARG_TAG_NULLABLE = BIT_ULL(3),
7635 ARG_TAG_ARENA = BIT_ULL(4),
7638 /* Process BTF of a function to produce high-level expectation of function
7639 * arguments (like ARG_PTR_TO_CTX, or ARG_PTR_TO_MEM, etc). This information
7640 * is cached in subprog info for reuse.
7642 * EFAULT - there is a verifier bug. Abort verification.
7643 * EINVAL - cannot convert BTF.
7644 * 0 - Successfully processed BTF and constructed argument expectations.
7646 int btf_prepare_func_args(struct bpf_verifier_env *env, int subprog)
7648 bool is_global = subprog_aux(env, subprog)->linkage == BTF_FUNC_GLOBAL;
7649 struct bpf_subprog_info *sub = subprog_info(env, subprog);
7650 struct bpf_verifier_log *log = &env->log;
7651 struct bpf_prog *prog = env->prog;
7652 enum bpf_prog_type prog_type = prog->type;
7653 struct btf *btf = prog->aux->btf;
7654 const struct btf_param *args;
7655 const struct btf_type *t, *ref_t, *fn_t;
7656 u32 i, nargs, btf_id;
7659 if (sub->args_cached)
7662 if (!prog->aux->func_info) {
7663 verifier_bug(env, "func_info undefined");
7667 btf_id = prog->aux->func_info[subprog].type_id;
7669 if (!is_global) /* not fatal for static funcs */
7671 bpf_log(log, "Global functions need valid BTF\n");
7675 fn_t = btf_type_by_id(btf, btf_id);
7676 if (!fn_t || !btf_type_is_func(fn_t)) {
7677 /* These checks were already done by the verifier while loading
7678 * struct bpf_func_info
7680 bpf_log(log, "BTF of func#%d doesn't point to KIND_FUNC\n",
7684 tname = btf_name_by_offset(btf, fn_t->name_off);
7686 if (prog->aux->func_info_aux[subprog].unreliable) {
7687 verifier_bug(env, "unreliable BTF for function %s()", tname);
7690 if (prog_type == BPF_PROG_TYPE_EXT)
7691 prog_type = prog->aux->dst_prog->type;
7693 t = btf_type_by_id(btf, fn_t->type);
7694 if (!t || !btf_type_is_func_proto(t)) {
7695 bpf_log(log, "Invalid type of function %s()\n", tname);
7698 args = (const struct btf_param *)(t + 1);
7699 nargs = btf_type_vlen(t);
7700 if (nargs > MAX_BPF_FUNC_REG_ARGS) {
7703 bpf_log(log, "Global function %s() with %d > %d args. Buggy compiler.\n",
7704 tname, nargs, MAX_BPF_FUNC_REG_ARGS);
7707 /* check that function returns int, exception cb also requires this */
7708 t = btf_type_by_id(btf, t->type);
7709 while (btf_type_is_modifier(t))
7710 t = btf_type_by_id(btf, t->type);
7711 if (!btf_type_is_int(t) && !btf_is_any_enum(t)) {
7715 "Global function %s() doesn't return scalar. Only those are supported.\n",
7719 /* Convert BTF function arguments into verifier types.
7720 * Only PTR_TO_CTX and SCALAR are supported atm.
7722 for (i = 0; i < nargs; i++) {
7726 /* 'arg:<tag>' decl_tag takes precedence over derivation of
7727 * register type from BTF type itself
7729 while ((id = btf_find_next_decl_tag(btf, fn_t, i, "arg:", id)) > 0) {
7730 const struct btf_type *tag_t = btf_type_by_id(btf, id);
7731 const char *tag = __btf_name_by_offset(btf, tag_t->name_off) + 4;
7733 /* disallow arg tags in static subprogs */
7735 bpf_log(log, "arg#%d type tag is not supported in static functions\n", i);
7739 if (strcmp(tag, "ctx") == 0) {
7740 tags |= ARG_TAG_CTX;
7741 } else if (strcmp(tag, "trusted") == 0) {
7742 tags |= ARG_TAG_TRUSTED;
7743 } else if (strcmp(tag, "nonnull") == 0) {
7744 tags |= ARG_TAG_NONNULL;
7745 } else if (strcmp(tag, "nullable") == 0) {
7746 tags |= ARG_TAG_NULLABLE;
7747 } else if (strcmp(tag, "arena") == 0) {
7748 tags |= ARG_TAG_ARENA;
7750 bpf_log(log, "arg#%d has unsupported set of tags\n", i);
7754 if (id != -ENOENT) {
7755 bpf_log(log, "arg#%d type tag fetching failure: %d\n", i, id);
7759 t = btf_type_by_id(btf, args[i].type);
7760 while (btf_type_is_modifier(t))
7761 t = btf_type_by_id(btf, t->type);
7762 if (!btf_type_is_ptr(t))
7765 if ((tags & ARG_TAG_CTX) || btf_is_prog_ctx_type(log, btf, t, prog_type, i)) {
7766 if (tags & ~ARG_TAG_CTX) {
7767 bpf_log(log, "arg#%d has invalid combination of tags\n", i);
7770 if ((tags & ARG_TAG_CTX) &&
7771 btf_validate_prog_ctx_type(log, btf, t, i, prog_type,
7772 prog->expected_attach_type))
7774 sub->args[i].arg_type = ARG_PTR_TO_CTX;
7777 if (btf_is_dynptr_ptr(btf, t)) {
7779 bpf_log(log, "arg#%d has invalid combination of tags\n", i);
7782 sub->args[i].arg_type = ARG_PTR_TO_DYNPTR | MEM_RDONLY;
7785 if (tags & ARG_TAG_TRUSTED) {
7788 if (tags & ARG_TAG_NONNULL) {
7789 bpf_log(log, "arg#%d has invalid combination of tags\n", i);
7793 kern_type_id = btf_get_ptr_to_btf_id(log, i, btf, t);
7794 if (kern_type_id < 0)
7795 return kern_type_id;
7797 sub->args[i].arg_type = ARG_PTR_TO_BTF_ID | PTR_TRUSTED;
7798 if (tags & ARG_TAG_NULLABLE)
7799 sub->args[i].arg_type |= PTR_MAYBE_NULL;
7800 sub->args[i].btf_id = kern_type_id;
7803 if (tags & ARG_TAG_ARENA) {
7804 if (tags & ~ARG_TAG_ARENA) {
7805 bpf_log(log, "arg#%d arena cannot be combined with any other tags\n", i);
7808 sub->args[i].arg_type = ARG_PTR_TO_ARENA;
7811 if (is_global) { /* generic user data pointer */
7814 if (tags & ARG_TAG_NULLABLE) {
7815 bpf_log(log, "arg#%d has invalid combination of tags\n", i);
7819 t = btf_type_skip_modifiers(btf, t->type, NULL);
7820 ref_t = btf_resolve_size(btf, t, &mem_size);
7821 if (IS_ERR(ref_t)) {
7822 bpf_log(log, "arg#%d reference type('%s %s') size cannot be determined: %ld\n",
7823 i, btf_type_str(t), btf_name_by_offset(btf, t->name_off),
7828 sub->args[i].arg_type = ARG_PTR_TO_MEM | PTR_MAYBE_NULL;
7829 if (tags & ARG_TAG_NONNULL)
7830 sub->args[i].arg_type &= ~PTR_MAYBE_NULL;
7831 sub->args[i].mem_size = mem_size;
7837 bpf_log(log, "arg#%d has pointer tag, but is not a pointer type\n", i);
7840 if (btf_type_is_int(t) || btf_is_any_enum(t)) {
7841 sub->args[i].arg_type = ARG_ANYTHING;
7846 bpf_log(log, "Arg#%d type %s in %s() is not supported yet.\n",
7847 i, btf_type_str(t), tname);
7851 sub->arg_cnt = nargs;
7852 sub->args_cached = true;
7857 static void btf_type_show(const struct btf *btf, u32 type_id, void *obj,
7858 struct btf_show *show)
7860 const struct btf_type *t = btf_type_by_id(btf, type_id);
7863 memset(&show->state, 0, sizeof(show->state));
7864 memset(&show->obj, 0, sizeof(show->obj));
7866 btf_type_ops(t)->show(btf, t, type_id, obj, 0, show);
7869 __printf(2, 0) static void btf_seq_show(struct btf_show *show, const char *fmt,
7872 seq_vprintf((struct seq_file *)show->target, fmt, args);
7875 int btf_type_seq_show_flags(const struct btf *btf, u32 type_id,
7876 void *obj, struct seq_file *m, u64 flags)
7878 struct btf_show sseq;
7881 sseq.showfn = btf_seq_show;
7884 btf_type_show(btf, type_id, obj, &sseq);
7886 return sseq.state.status;
7889 void btf_type_seq_show(const struct btf *btf, u32 type_id, void *obj,
7892 (void) btf_type_seq_show_flags(btf, type_id, obj, m,
7893 BTF_SHOW_NONAME | BTF_SHOW_COMPACT |
7894 BTF_SHOW_ZERO | BTF_SHOW_UNSAFE);
7897 struct btf_show_snprintf {
7898 struct btf_show show;
7899 int len_left; /* space left in string */
7900 int len; /* length we would have written */
7903 __printf(2, 0) static void btf_snprintf_show(struct btf_show *show, const char *fmt,
7906 struct btf_show_snprintf *ssnprintf = (struct btf_show_snprintf *)show;
7909 len = vsnprintf(show->target, ssnprintf->len_left, fmt, args);
7912 ssnprintf->len_left = 0;
7913 ssnprintf->len = len;
7914 } else if (len >= ssnprintf->len_left) {
7915 /* no space, drive on to get length we would have written */
7916 ssnprintf->len_left = 0;
7917 ssnprintf->len += len;
7919 ssnprintf->len_left -= len;
7920 ssnprintf->len += len;
7921 show->target += len;
7925 int btf_type_snprintf_show(const struct btf *btf, u32 type_id, void *obj,
7926 char *buf, int len, u64 flags)
7928 struct btf_show_snprintf ssnprintf;
7930 ssnprintf.show.target = buf;
7931 ssnprintf.show.flags = flags;
7932 ssnprintf.show.showfn = btf_snprintf_show;
7933 ssnprintf.len_left = len;
7936 btf_type_show(btf, type_id, obj, (struct btf_show *)&ssnprintf);
7938 /* If we encountered an error, return it. */
7939 if (ssnprintf.show.state.status)
7940 return ssnprintf.show.state.status;
7942 /* Otherwise return length we would have written */
7943 return ssnprintf.len;
7946 #ifdef CONFIG_PROC_FS
7947 static void bpf_btf_show_fdinfo(struct seq_file *m, struct file *filp)
7949 const struct btf *btf = filp->private_data;
7951 seq_printf(m, "btf_id:\t%u\n", btf->id);
7955 static int btf_release(struct inode *inode, struct file *filp)
7957 btf_put(filp->private_data);
7961 const struct file_operations btf_fops = {
7962 #ifdef CONFIG_PROC_FS
7963 .show_fdinfo = bpf_btf_show_fdinfo,
7965 .release = btf_release,
7968 static int __btf_new_fd(struct btf *btf)
7970 return anon_inode_getfd("btf", &btf_fops, btf, O_RDONLY | O_CLOEXEC);
7973 int btf_new_fd(const union bpf_attr *attr, bpfptr_t uattr, u32 uattr_size)
7978 btf = btf_parse(attr, uattr, uattr_size);
7980 return PTR_ERR(btf);
7982 ret = btf_alloc_id(btf);
7989 * The BTF ID is published to the userspace.
7990 * All BTF free must go through call_rcu() from
7991 * now on (i.e. free by calling btf_put()).
7994 ret = __btf_new_fd(btf);
8001 struct btf *btf_get_by_fd(int fd)
8006 btf = __btf_get_by_fd(f);
8008 refcount_inc(&btf->refcnt);
8013 int btf_get_info_by_fd(const struct btf *btf,
8014 const union bpf_attr *attr,
8015 union bpf_attr __user *uattr)
8017 struct bpf_btf_info __user *uinfo;
8018 struct bpf_btf_info info;
8019 u32 info_copy, btf_copy;
8022 u32 uinfo_len, uname_len, name_len;
8025 uinfo = u64_to_user_ptr(attr->info.info);
8026 uinfo_len = attr->info.info_len;
8028 info_copy = min_t(u32, uinfo_len, sizeof(info));
8029 memset(&info, 0, sizeof(info));
8030 if (copy_from_user(&info, uinfo, info_copy))
8034 ubtf = u64_to_user_ptr(info.btf);
8035 btf_copy = min_t(u32, btf->data_size, info.btf_size);
8036 if (copy_to_user(ubtf, btf->data, btf_copy))
8038 info.btf_size = btf->data_size;
8040 info.kernel_btf = btf->kernel_btf;
8042 uname = u64_to_user_ptr(info.name);
8043 uname_len = info.name_len;
8044 if (!uname ^ !uname_len)
8047 name_len = strlen(btf->name);
8048 info.name_len = name_len;
8051 if (uname_len >= name_len + 1) {
8052 if (copy_to_user(uname, btf->name, name_len + 1))
8057 if (copy_to_user(uname, btf->name, uname_len - 1))
8059 if (put_user(zero, uname + uname_len - 1))
8061 /* let user-space know about too short buffer */
8066 if (copy_to_user(uinfo, &info, info_copy) ||
8067 put_user(info_copy, &uattr->info.info_len))
8073 int btf_get_fd_by_id(u32 id)
8079 btf = idr_find(&btf_idr, id);
8080 if (!btf || !refcount_inc_not_zero(&btf->refcnt))
8081 btf = ERR_PTR(-ENOENT);
8085 return PTR_ERR(btf);
8087 fd = __btf_new_fd(btf);
8094 u32 btf_obj_id(const struct btf *btf)
8099 bool btf_is_kernel(const struct btf *btf)
8101 return btf->kernel_btf;
8104 bool btf_is_module(const struct btf *btf)
8106 return btf->kernel_btf && strcmp(btf->name, "vmlinux") != 0;
8110 BTF_MODULE_F_LIVE = (1 << 0),
8113 #ifdef CONFIG_DEBUG_INFO_BTF_MODULES
8115 struct list_head list;
8116 struct module *module;
8118 struct bin_attribute *sysfs_attr;
8122 static LIST_HEAD(btf_modules);
8123 static DEFINE_MUTEX(btf_module_mutex);
8125 static void purge_cand_cache(struct btf *btf);
8127 static int btf_module_notify(struct notifier_block *nb, unsigned long op,
8130 struct btf_module *btf_mod, *tmp;
8131 struct module *mod = module;
8135 if (mod->btf_data_size == 0 ||
8136 (op != MODULE_STATE_COMING && op != MODULE_STATE_LIVE &&
8137 op != MODULE_STATE_GOING))
8141 case MODULE_STATE_COMING:
8142 btf_mod = kzalloc(sizeof(*btf_mod), GFP_KERNEL);
8147 btf = btf_parse_module(mod->name, mod->btf_data, mod->btf_data_size,
8148 mod->btf_base_data, mod->btf_base_data_size);
8151 if (!IS_ENABLED(CONFIG_MODULE_ALLOW_BTF_MISMATCH)) {
8152 pr_warn("failed to validate module [%s] BTF: %ld\n",
8153 mod->name, PTR_ERR(btf));
8156 pr_warn_once("Kernel module BTF mismatch detected, BTF debug info may be unavailable for some modules\n");
8160 err = btf_alloc_id(btf);
8167 purge_cand_cache(NULL);
8168 mutex_lock(&btf_module_mutex);
8169 btf_mod->module = module;
8171 list_add(&btf_mod->list, &btf_modules);
8172 mutex_unlock(&btf_module_mutex);
8174 if (IS_ENABLED(CONFIG_SYSFS)) {
8175 struct bin_attribute *attr;
8177 attr = kzalloc(sizeof(*attr), GFP_KERNEL);
8181 sysfs_bin_attr_init(attr);
8182 attr->attr.name = btf->name;
8183 attr->attr.mode = 0444;
8184 attr->size = btf->data_size;
8185 attr->private = btf->data;
8186 attr->read_new = sysfs_bin_attr_simple_read;
8188 err = sysfs_create_bin_file(btf_kobj, attr);
8190 pr_warn("failed to register module [%s] BTF in sysfs: %d\n",
8197 btf_mod->sysfs_attr = attr;
8201 case MODULE_STATE_LIVE:
8202 mutex_lock(&btf_module_mutex);
8203 list_for_each_entry_safe(btf_mod, tmp, &btf_modules, list) {
8204 if (btf_mod->module != module)
8207 btf_mod->flags |= BTF_MODULE_F_LIVE;
8210 mutex_unlock(&btf_module_mutex);
8212 case MODULE_STATE_GOING:
8213 mutex_lock(&btf_module_mutex);
8214 list_for_each_entry_safe(btf_mod, tmp, &btf_modules, list) {
8215 if (btf_mod->module != module)
8218 list_del(&btf_mod->list);
8219 if (btf_mod->sysfs_attr)
8220 sysfs_remove_bin_file(btf_kobj, btf_mod->sysfs_attr);
8221 purge_cand_cache(btf_mod->btf);
8222 btf_put(btf_mod->btf);
8223 kfree(btf_mod->sysfs_attr);
8227 mutex_unlock(&btf_module_mutex);
8231 return notifier_from_errno(err);
8234 static struct notifier_block btf_module_nb = {
8235 .notifier_call = btf_module_notify,
8238 static int __init btf_module_init(void)
8240 register_module_notifier(&btf_module_nb);
8244 fs_initcall(btf_module_init);
8245 #endif /* CONFIG_DEBUG_INFO_BTF_MODULES */
8247 struct module *btf_try_get_module(const struct btf *btf)
8249 struct module *res = NULL;
8250 #ifdef CONFIG_DEBUG_INFO_BTF_MODULES
8251 struct btf_module *btf_mod, *tmp;
8253 mutex_lock(&btf_module_mutex);
8254 list_for_each_entry_safe(btf_mod, tmp, &btf_modules, list) {
8255 if (btf_mod->btf != btf)
8258 /* We must only consider module whose __init routine has
8259 * finished, hence we must check for BTF_MODULE_F_LIVE flag,
8260 * which is set from the notifier callback for
8261 * MODULE_STATE_LIVE.
8263 if ((btf_mod->flags & BTF_MODULE_F_LIVE) && try_module_get(btf_mod->module))
8264 res = btf_mod->module;
8268 mutex_unlock(&btf_module_mutex);
8274 /* Returns struct btf corresponding to the struct module.
8275 * This function can return NULL or ERR_PTR.
8277 static struct btf *btf_get_module_btf(const struct module *module)
8279 #ifdef CONFIG_DEBUG_INFO_BTF_MODULES
8280 struct btf_module *btf_mod, *tmp;
8282 struct btf *btf = NULL;
8285 btf = bpf_get_btf_vmlinux();
8286 if (!IS_ERR_OR_NULL(btf))
8291 #ifdef CONFIG_DEBUG_INFO_BTF_MODULES
8292 mutex_lock(&btf_module_mutex);
8293 list_for_each_entry_safe(btf_mod, tmp, &btf_modules, list) {
8294 if (btf_mod->module != module)
8297 btf_get(btf_mod->btf);
8301 mutex_unlock(&btf_module_mutex);
8307 static int check_btf_kconfigs(const struct module *module, const char *feature)
8309 if (!module && IS_ENABLED(CONFIG_DEBUG_INFO_BTF)) {
8310 pr_err("missing vmlinux BTF, cannot register %s\n", feature);
8313 if (module && IS_ENABLED(CONFIG_DEBUG_INFO_BTF_MODULES))
8314 pr_warn("missing module BTF, cannot register %s\n", feature);
8318 BPF_CALL_4(bpf_btf_find_by_name_kind, char *, name, int, name_sz, u32, kind, int, flags)
8320 struct btf *btf = NULL;
8327 if (name_sz <= 1 || name[name_sz - 1])
8330 ret = bpf_find_btf_id(name, kind, &btf);
8331 if (ret > 0 && btf_is_module(btf)) {
8332 btf_obj_fd = __btf_new_fd(btf);
8333 if (btf_obj_fd < 0) {
8337 return ret | (((u64)btf_obj_fd) << 32);
8344 const struct bpf_func_proto bpf_btf_find_by_name_kind_proto = {
8345 .func = bpf_btf_find_by_name_kind,
8347 .ret_type = RET_INTEGER,
8348 .arg1_type = ARG_PTR_TO_MEM | MEM_RDONLY,
8349 .arg2_type = ARG_CONST_SIZE,
8350 .arg3_type = ARG_ANYTHING,
8351 .arg4_type = ARG_ANYTHING,
8354 BTF_ID_LIST_GLOBAL(btf_tracing_ids, MAX_BTF_TRACING_TYPE)
8355 #define BTF_TRACING_TYPE(name, type) BTF_ID(struct, type)
8356 BTF_TRACING_TYPE_xxx
8357 #undef BTF_TRACING_TYPE
8359 /* Validate well-formedness of iter argument type.
8360 * On success, return positive BTF ID of iter state's STRUCT type.
8361 * On error, negative error is returned.
8363 int btf_check_iter_arg(struct btf *btf, const struct btf_type *func, int arg_idx)
8365 const struct btf_param *arg;
8366 const struct btf_type *t;
8370 if (btf_type_vlen(func) <= arg_idx)
8373 arg = &btf_params(func)[arg_idx];
8374 t = btf_type_skip_modifiers(btf, arg->type, NULL);
8375 if (!t || !btf_type_is_ptr(t))
8377 t = btf_type_skip_modifiers(btf, t->type, &btf_id);
8378 if (!t || !__btf_type_is_struct(t))
8381 name = btf_name_by_offset(btf, t->name_off);
8382 if (!name || strncmp(name, ITER_PREFIX, sizeof(ITER_PREFIX) - 1))
8388 static int btf_check_iter_kfuncs(struct btf *btf, const char *func_name,
8389 const struct btf_type *func, u32 func_flags)
8391 u32 flags = func_flags & (KF_ITER_NEW | KF_ITER_NEXT | KF_ITER_DESTROY);
8392 const char *sfx, *iter_name;
8393 const struct btf_type *t;
8398 /* exactly one of KF_ITER_{NEW,NEXT,DESTROY} can be set */
8399 if (!flags || (flags & (flags - 1)))
8402 /* any BPF iter kfunc should have `struct bpf_iter_<type> *` first arg */
8403 nr_args = btf_type_vlen(func);
8407 btf_id = btf_check_iter_arg(btf, func, 0);
8411 /* sizeof(struct bpf_iter_<type>) should be a multiple of 8 to
8412 * fit nicely in stack slots
8414 t = btf_type_by_id(btf, btf_id);
8415 if (t->size == 0 || (t->size % 8))
8418 /* validate bpf_iter_<type>_{new,next,destroy}(struct bpf_iter_<type> *)
8421 iter_name = btf_name_by_offset(btf, t->name_off) + sizeof(ITER_PREFIX) - 1;
8422 if (flags & KF_ITER_NEW)
8424 else if (flags & KF_ITER_NEXT)
8426 else /* (flags & KF_ITER_DESTROY) */
8429 snprintf(exp_name, sizeof(exp_name), "bpf_iter_%s_%s", iter_name, sfx);
8430 if (strcmp(func_name, exp_name))
8433 /* only iter constructor should have extra arguments */
8434 if (!(flags & KF_ITER_NEW) && nr_args != 1)
8437 if (flags & KF_ITER_NEXT) {
8438 /* bpf_iter_<type>_next() should return pointer */
8439 t = btf_type_skip_modifiers(btf, func->type, NULL);
8440 if (!t || !btf_type_is_ptr(t))
8444 if (flags & KF_ITER_DESTROY) {
8445 /* bpf_iter_<type>_destroy() should return void */
8446 t = btf_type_by_id(btf, func->type);
8447 if (!t || !btf_type_is_void(t))
8454 static int btf_check_kfunc_protos(struct btf *btf, u32 func_id, u32 func_flags)
8456 const struct btf_type *func;
8457 const char *func_name;
8460 /* any kfunc should be FUNC -> FUNC_PROTO */
8461 func = btf_type_by_id(btf, func_id);
8462 if (!func || !btf_type_is_func(func))
8465 /* sanity check kfunc name */
8466 func_name = btf_name_by_offset(btf, func->name_off);
8467 if (!func_name || !func_name[0])
8470 func = btf_type_by_id(btf, func->type);
8471 if (!func || !btf_type_is_func_proto(func))
8474 if (func_flags & (KF_ITER_NEW | KF_ITER_NEXT | KF_ITER_DESTROY)) {
8475 err = btf_check_iter_kfuncs(btf, func_name, func, func_flags);
8483 /* Kernel Function (kfunc) BTF ID set registration API */
8485 static int btf_populate_kfunc_set(struct btf *btf, enum btf_kfunc_hook hook,
8486 const struct btf_kfunc_id_set *kset)
8488 struct btf_kfunc_hook_filter *hook_filter;
8489 struct btf_id_set8 *add_set = kset->set;
8490 bool vmlinux_set = !btf_is_module(btf);
8491 bool add_filter = !!kset->filter;
8492 struct btf_kfunc_set_tab *tab;
8493 struct btf_id_set8 *set;
8497 if (hook >= BTF_KFUNC_HOOK_MAX) {
8505 tab = btf->kfunc_set_tab;
8507 if (tab && add_filter) {
8510 hook_filter = &tab->hook_filters[hook];
8511 for (i = 0; i < hook_filter->nr_filters; i++) {
8512 if (hook_filter->filters[i] == kset->filter) {
8518 if (add_filter && hook_filter->nr_filters == BTF_KFUNC_FILTER_MAX_CNT) {
8525 tab = kzalloc(sizeof(*tab), GFP_KERNEL | __GFP_NOWARN);
8528 btf->kfunc_set_tab = tab;
8531 set = tab->sets[hook];
8532 /* Warn when register_btf_kfunc_id_set is called twice for the same hook
8535 if (WARN_ON_ONCE(set && !vmlinux_set)) {
8540 /* In case of vmlinux sets, there may be more than one set being
8541 * registered per hook. To create a unified set, we allocate a new set
8542 * and concatenate all individual sets being registered. While each set
8543 * is individually sorted, they may become unsorted when concatenated,
8544 * hence re-sorting the final set again is required to make binary
8545 * searching the set using btf_id_set8_contains function work.
8547 * For module sets, we need to allocate as we may need to relocate
8550 set_cnt = set ? set->cnt : 0;
8552 if (set_cnt > U32_MAX - add_set->cnt) {
8557 if (set_cnt + add_set->cnt > BTF_KFUNC_SET_MAX_CNT) {
8563 set = krealloc(tab->sets[hook],
8564 struct_size(set, pairs, set_cnt + add_set->cnt),
8565 GFP_KERNEL | __GFP_NOWARN);
8571 /* For newly allocated set, initialize set->cnt to 0 */
8572 if (!tab->sets[hook])
8574 tab->sets[hook] = set;
8576 /* Concatenate the two sets */
8577 memcpy(set->pairs + set->cnt, add_set->pairs, add_set->cnt * sizeof(set->pairs[0]));
8578 /* Now that the set is copied, update with relocated BTF ids */
8579 for (i = set->cnt; i < set->cnt + add_set->cnt; i++)
8580 set->pairs[i].id = btf_relocate_id(btf, set->pairs[i].id);
8582 set->cnt += add_set->cnt;
8584 sort(set->pairs, set->cnt, sizeof(set->pairs[0]), btf_id_cmp_func, NULL);
8587 hook_filter = &tab->hook_filters[hook];
8588 hook_filter->filters[hook_filter->nr_filters++] = kset->filter;
8592 btf_free_kfunc_set_tab(btf);
8596 static u32 *__btf_kfunc_id_set_contains(const struct btf *btf,
8597 enum btf_kfunc_hook hook,
8599 const struct bpf_prog *prog)
8601 struct btf_kfunc_hook_filter *hook_filter;
8602 struct btf_id_set8 *set;
8605 if (hook >= BTF_KFUNC_HOOK_MAX)
8607 if (!btf->kfunc_set_tab)
8609 hook_filter = &btf->kfunc_set_tab->hook_filters[hook];
8610 for (i = 0; i < hook_filter->nr_filters; i++) {
8611 if (hook_filter->filters[i](prog, kfunc_btf_id))
8614 set = btf->kfunc_set_tab->sets[hook];
8617 id = btf_id_set8_contains(set, kfunc_btf_id);
8620 /* The flags for BTF ID are located next to it */
8624 static int bpf_prog_type_to_kfunc_hook(enum bpf_prog_type prog_type)
8626 switch (prog_type) {
8627 case BPF_PROG_TYPE_UNSPEC:
8628 return BTF_KFUNC_HOOK_COMMON;
8629 case BPF_PROG_TYPE_XDP:
8630 return BTF_KFUNC_HOOK_XDP;
8631 case BPF_PROG_TYPE_SCHED_CLS:
8632 return BTF_KFUNC_HOOK_TC;
8633 case BPF_PROG_TYPE_STRUCT_OPS:
8634 return BTF_KFUNC_HOOK_STRUCT_OPS;
8635 case BPF_PROG_TYPE_TRACING:
8636 case BPF_PROG_TYPE_TRACEPOINT:
8637 case BPF_PROG_TYPE_PERF_EVENT:
8638 case BPF_PROG_TYPE_LSM:
8639 return BTF_KFUNC_HOOK_TRACING;
8640 case BPF_PROG_TYPE_SYSCALL:
8641 return BTF_KFUNC_HOOK_SYSCALL;
8642 case BPF_PROG_TYPE_CGROUP_SKB:
8643 case BPF_PROG_TYPE_CGROUP_SOCK:
8644 case BPF_PROG_TYPE_CGROUP_DEVICE:
8645 case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
8646 case BPF_PROG_TYPE_CGROUP_SOCKOPT:
8647 case BPF_PROG_TYPE_CGROUP_SYSCTL:
8648 case BPF_PROG_TYPE_SOCK_OPS:
8649 return BTF_KFUNC_HOOK_CGROUP;
8650 case BPF_PROG_TYPE_SCHED_ACT:
8651 return BTF_KFUNC_HOOK_SCHED_ACT;
8652 case BPF_PROG_TYPE_SK_SKB:
8653 return BTF_KFUNC_HOOK_SK_SKB;
8654 case BPF_PROG_TYPE_SOCKET_FILTER:
8655 return BTF_KFUNC_HOOK_SOCKET_FILTER;
8656 case BPF_PROG_TYPE_LWT_OUT:
8657 case BPF_PROG_TYPE_LWT_IN:
8658 case BPF_PROG_TYPE_LWT_XMIT:
8659 case BPF_PROG_TYPE_LWT_SEG6LOCAL:
8660 return BTF_KFUNC_HOOK_LWT;
8661 case BPF_PROG_TYPE_NETFILTER:
8662 return BTF_KFUNC_HOOK_NETFILTER;
8663 case BPF_PROG_TYPE_KPROBE:
8664 return BTF_KFUNC_HOOK_KPROBE;
8666 return BTF_KFUNC_HOOK_MAX;
8671 * Reference to the module (obtained using btf_try_get_module) corresponding to
8672 * the struct btf *MUST* be held when calling this function from verifier
8673 * context. This is usually true as we stash references in prog's kfunc_btf_tab;
8674 * keeping the reference for the duration of the call provides the necessary
8675 * protection for looking up a well-formed btf->kfunc_set_tab.
8677 u32 *btf_kfunc_id_set_contains(const struct btf *btf,
8679 const struct bpf_prog *prog)
8681 enum bpf_prog_type prog_type = resolve_prog_type(prog);
8682 enum btf_kfunc_hook hook;
8685 kfunc_flags = __btf_kfunc_id_set_contains(btf, BTF_KFUNC_HOOK_COMMON, kfunc_btf_id, prog);
8689 hook = bpf_prog_type_to_kfunc_hook(prog_type);
8690 return __btf_kfunc_id_set_contains(btf, hook, kfunc_btf_id, prog);
8693 u32 *btf_kfunc_is_modify_return(const struct btf *btf, u32 kfunc_btf_id,
8694 const struct bpf_prog *prog)
8696 return __btf_kfunc_id_set_contains(btf, BTF_KFUNC_HOOK_FMODRET, kfunc_btf_id, prog);
8699 static int __register_btf_kfunc_id_set(enum btf_kfunc_hook hook,
8700 const struct btf_kfunc_id_set *kset)
8705 btf = btf_get_module_btf(kset->owner);
8707 return check_btf_kconfigs(kset->owner, "kfunc");
8709 return PTR_ERR(btf);
8711 for (i = 0; i < kset->set->cnt; i++) {
8712 ret = btf_check_kfunc_protos(btf, btf_relocate_id(btf, kset->set->pairs[i].id),
8713 kset->set->pairs[i].flags);
8718 ret = btf_populate_kfunc_set(btf, hook, kset);
8725 /* This function must be invoked only from initcalls/module init functions */
8726 int register_btf_kfunc_id_set(enum bpf_prog_type prog_type,
8727 const struct btf_kfunc_id_set *kset)
8729 enum btf_kfunc_hook hook;
8731 /* All kfuncs need to be tagged as such in BTF.
8732 * WARN() for initcall registrations that do not check errors.
8734 if (!(kset->set->flags & BTF_SET8_KFUNCS)) {
8735 WARN_ON(!kset->owner);
8739 hook = bpf_prog_type_to_kfunc_hook(prog_type);
8740 return __register_btf_kfunc_id_set(hook, kset);
8742 EXPORT_SYMBOL_GPL(register_btf_kfunc_id_set);
8744 /* This function must be invoked only from initcalls/module init functions */
8745 int register_btf_fmodret_id_set(const struct btf_kfunc_id_set *kset)
8747 return __register_btf_kfunc_id_set(BTF_KFUNC_HOOK_FMODRET, kset);
8749 EXPORT_SYMBOL_GPL(register_btf_fmodret_id_set);
8751 s32 btf_find_dtor_kfunc(struct btf *btf, u32 btf_id)
8753 struct btf_id_dtor_kfunc_tab *tab = btf->dtor_kfunc_tab;
8754 struct btf_id_dtor_kfunc *dtor;
8758 /* Even though the size of tab->dtors[0] is > sizeof(u32), we only need
8759 * to compare the first u32 with btf_id, so we can reuse btf_id_cmp_func.
8761 BUILD_BUG_ON(offsetof(struct btf_id_dtor_kfunc, btf_id) != 0);
8762 dtor = bsearch(&btf_id, tab->dtors, tab->cnt, sizeof(tab->dtors[0]), btf_id_cmp_func);
8765 return dtor->kfunc_btf_id;
8768 static int btf_check_dtor_kfuncs(struct btf *btf, const struct btf_id_dtor_kfunc *dtors, u32 cnt)
8770 const struct btf_type *dtor_func, *dtor_func_proto, *t;
8771 const struct btf_param *args;
8775 for (i = 0; i < cnt; i++) {
8776 dtor_btf_id = btf_relocate_id(btf, dtors[i].kfunc_btf_id);
8778 dtor_func = btf_type_by_id(btf, dtor_btf_id);
8779 if (!dtor_func || !btf_type_is_func(dtor_func))
8782 dtor_func_proto = btf_type_by_id(btf, dtor_func->type);
8783 if (!dtor_func_proto || !btf_type_is_func_proto(dtor_func_proto))
8786 /* Make sure the prototype of the destructor kfunc is 'void func(type *)' */
8787 t = btf_type_by_id(btf, dtor_func_proto->type);
8788 if (!t || !btf_type_is_void(t))
8791 nr_args = btf_type_vlen(dtor_func_proto);
8794 args = btf_params(dtor_func_proto);
8795 t = btf_type_by_id(btf, args[0].type);
8796 /* Allow any pointer type, as width on targets Linux supports
8797 * will be same for all pointer types (i.e. sizeof(void *))
8799 if (!t || !btf_type_is_ptr(t))
8805 /* This function must be invoked only from initcalls/module init functions */
8806 int register_btf_id_dtor_kfuncs(const struct btf_id_dtor_kfunc *dtors, u32 add_cnt,
8807 struct module *owner)
8809 struct btf_id_dtor_kfunc_tab *tab;
8814 btf = btf_get_module_btf(owner);
8816 return check_btf_kconfigs(owner, "dtor kfuncs");
8818 return PTR_ERR(btf);
8820 if (add_cnt >= BTF_DTOR_KFUNC_MAX_CNT) {
8821 pr_err("cannot register more than %d kfunc destructors\n", BTF_DTOR_KFUNC_MAX_CNT);
8826 /* Ensure that the prototype of dtor kfuncs being registered is sane */
8827 ret = btf_check_dtor_kfuncs(btf, dtors, add_cnt);
8831 tab = btf->dtor_kfunc_tab;
8832 /* Only one call allowed for modules */
8833 if (WARN_ON_ONCE(tab && btf_is_module(btf))) {
8838 tab_cnt = tab ? tab->cnt : 0;
8839 if (tab_cnt > U32_MAX - add_cnt) {
8843 if (tab_cnt + add_cnt >= BTF_DTOR_KFUNC_MAX_CNT) {
8844 pr_err("cannot register more than %d kfunc destructors\n", BTF_DTOR_KFUNC_MAX_CNT);
8849 tab = krealloc(btf->dtor_kfunc_tab,
8850 struct_size(tab, dtors, tab_cnt + add_cnt),
8851 GFP_KERNEL | __GFP_NOWARN);
8857 if (!btf->dtor_kfunc_tab)
8859 btf->dtor_kfunc_tab = tab;
8861 memcpy(tab->dtors + tab->cnt, dtors, add_cnt * sizeof(tab->dtors[0]));
8863 /* remap BTF ids based on BTF relocation (if any) */
8864 for (i = tab_cnt; i < tab_cnt + add_cnt; i++) {
8865 tab->dtors[i].btf_id = btf_relocate_id(btf, tab->dtors[i].btf_id);
8866 tab->dtors[i].kfunc_btf_id = btf_relocate_id(btf, tab->dtors[i].kfunc_btf_id);
8869 tab->cnt += add_cnt;
8871 sort(tab->dtors, tab->cnt, sizeof(tab->dtors[0]), btf_id_cmp_func, NULL);
8875 btf_free_dtor_kfunc_tab(btf);
8879 EXPORT_SYMBOL_GPL(register_btf_id_dtor_kfuncs);
8881 #define MAX_TYPES_ARE_COMPAT_DEPTH 2
8883 /* Check local and target types for compatibility. This check is used for
8884 * type-based CO-RE relocations and follow slightly different rules than
8885 * field-based relocations. This function assumes that root types were already
8886 * checked for name match. Beyond that initial root-level name check, names
8887 * are completely ignored. Compatibility rules are as follows:
8888 * - any two STRUCTs/UNIONs/FWDs/ENUMs/INTs/ENUM64s are considered compatible, but
8889 * kind should match for local and target types (i.e., STRUCT is not
8890 * compatible with UNION);
8891 * - for ENUMs/ENUM64s, the size is ignored;
8892 * - for INT, size and signedness are ignored;
8893 * - for ARRAY, dimensionality is ignored, element types are checked for
8894 * compatibility recursively;
8895 * - CONST/VOLATILE/RESTRICT modifiers are ignored;
8896 * - TYPEDEFs/PTRs are compatible if types they pointing to are compatible;
8897 * - FUNC_PROTOs are compatible if they have compatible signature: same
8898 * number of input args and compatible return and argument types.
8899 * These rules are not set in stone and probably will be adjusted as we get
8900 * more experience with using BPF CO-RE relocations.
8902 int bpf_core_types_are_compat(const struct btf *local_btf, __u32 local_id,
8903 const struct btf *targ_btf, __u32 targ_id)
8905 return __bpf_core_types_are_compat(local_btf, local_id, targ_btf, targ_id,
8906 MAX_TYPES_ARE_COMPAT_DEPTH);
8909 #define MAX_TYPES_MATCH_DEPTH 2
8911 int bpf_core_types_match(const struct btf *local_btf, u32 local_id,
8912 const struct btf *targ_btf, u32 targ_id)
8914 return __bpf_core_types_match(local_btf, local_id, targ_btf, targ_id, false,
8915 MAX_TYPES_MATCH_DEPTH);
8918 static bool bpf_core_is_flavor_sep(const char *s)
8920 /* check X___Y name pattern, where X and Y are not underscores */
8921 return s[0] != '_' && /* X */
8922 s[1] == '_' && s[2] == '_' && s[3] == '_' && /* ___ */
8923 s[4] != '_'; /* Y */
8926 size_t bpf_core_essential_name_len(const char *name)
8928 size_t n = strlen(name);
8931 for (i = n - 5; i >= 0; i--) {
8932 if (bpf_core_is_flavor_sep(name + i))
8938 static void bpf_free_cands(struct bpf_cand_cache *cands)
8941 /* empty candidate array was allocated on stack */
8946 static void bpf_free_cands_from_cache(struct bpf_cand_cache *cands)
8952 #define VMLINUX_CAND_CACHE_SIZE 31
8953 static struct bpf_cand_cache *vmlinux_cand_cache[VMLINUX_CAND_CACHE_SIZE];
8955 #define MODULE_CAND_CACHE_SIZE 31
8956 static struct bpf_cand_cache *module_cand_cache[MODULE_CAND_CACHE_SIZE];
8958 static void __print_cand_cache(struct bpf_verifier_log *log,
8959 struct bpf_cand_cache **cache,
8962 struct bpf_cand_cache *cc;
8965 for (i = 0; i < cache_size; i++) {
8969 bpf_log(log, "[%d]%s(", i, cc->name);
8970 for (j = 0; j < cc->cnt; j++) {
8971 bpf_log(log, "%d", cc->cands[j].id);
8972 if (j < cc->cnt - 1)
8975 bpf_log(log, "), ");
8979 static void print_cand_cache(struct bpf_verifier_log *log)
8981 mutex_lock(&cand_cache_mutex);
8982 bpf_log(log, "vmlinux_cand_cache:");
8983 __print_cand_cache(log, vmlinux_cand_cache, VMLINUX_CAND_CACHE_SIZE);
8984 bpf_log(log, "\nmodule_cand_cache:");
8985 __print_cand_cache(log, module_cand_cache, MODULE_CAND_CACHE_SIZE);
8987 mutex_unlock(&cand_cache_mutex);
8990 static u32 hash_cands(struct bpf_cand_cache *cands)
8992 return jhash(cands->name, cands->name_len, 0);
8995 static struct bpf_cand_cache *check_cand_cache(struct bpf_cand_cache *cands,
8996 struct bpf_cand_cache **cache,
8999 struct bpf_cand_cache *cc = cache[hash_cands(cands) % cache_size];
9001 if (cc && cc->name_len == cands->name_len &&
9002 !strncmp(cc->name, cands->name, cands->name_len))
9007 static size_t sizeof_cands(int cnt)
9009 return offsetof(struct bpf_cand_cache, cands[cnt]);
9012 static struct bpf_cand_cache *populate_cand_cache(struct bpf_cand_cache *cands,
9013 struct bpf_cand_cache **cache,
9016 struct bpf_cand_cache **cc = &cache[hash_cands(cands) % cache_size], *new_cands;
9019 bpf_free_cands_from_cache(*cc);
9022 new_cands = kmemdup(cands, sizeof_cands(cands->cnt), GFP_KERNEL);
9024 bpf_free_cands(cands);
9025 return ERR_PTR(-ENOMEM);
9027 /* strdup the name, since it will stay in cache.
9028 * the cands->name points to strings in prog's BTF and the prog can be unloaded.
9030 new_cands->name = kmemdup_nul(cands->name, cands->name_len, GFP_KERNEL);
9031 bpf_free_cands(cands);
9032 if (!new_cands->name) {
9034 return ERR_PTR(-ENOMEM);
9040 #ifdef CONFIG_DEBUG_INFO_BTF_MODULES
9041 static void __purge_cand_cache(struct btf *btf, struct bpf_cand_cache **cache,
9044 struct bpf_cand_cache *cc;
9047 for (i = 0; i < cache_size; i++) {
9052 /* when new module is loaded purge all of module_cand_cache,
9053 * since new module might have candidates with the name
9054 * that matches cached cands.
9056 bpf_free_cands_from_cache(cc);
9060 /* when module is unloaded purge cache entries
9061 * that match module's btf
9063 for (j = 0; j < cc->cnt; j++)
9064 if (cc->cands[j].btf == btf) {
9065 bpf_free_cands_from_cache(cc);
9073 static void purge_cand_cache(struct btf *btf)
9075 mutex_lock(&cand_cache_mutex);
9076 __purge_cand_cache(btf, module_cand_cache, MODULE_CAND_CACHE_SIZE);
9077 mutex_unlock(&cand_cache_mutex);
9081 static struct bpf_cand_cache *
9082 bpf_core_add_cands(struct bpf_cand_cache *cands, const struct btf *targ_btf,
9085 struct bpf_cand_cache *new_cands;
9086 const struct btf_type *t;
9087 const char *targ_name;
9088 size_t targ_essent_len;
9091 n = btf_nr_types(targ_btf);
9092 for (i = targ_start_id; i < n; i++) {
9093 t = btf_type_by_id(targ_btf, i);
9094 if (btf_kind(t) != cands->kind)
9097 targ_name = btf_name_by_offset(targ_btf, t->name_off);
9101 /* the resched point is before strncmp to make sure that search
9102 * for non-existing name will have a chance to schedule().
9106 if (strncmp(cands->name, targ_name, cands->name_len) != 0)
9109 targ_essent_len = bpf_core_essential_name_len(targ_name);
9110 if (targ_essent_len != cands->name_len)
9113 /* most of the time there is only one candidate for a given kind+name pair */
9114 new_cands = kmalloc(sizeof_cands(cands->cnt + 1), GFP_KERNEL);
9116 bpf_free_cands(cands);
9117 return ERR_PTR(-ENOMEM);
9120 memcpy(new_cands, cands, sizeof_cands(cands->cnt));
9121 bpf_free_cands(cands);
9123 cands->cands[cands->cnt].btf = targ_btf;
9124 cands->cands[cands->cnt].id = i;
9130 static struct bpf_cand_cache *
9131 bpf_core_find_cands(struct bpf_core_ctx *ctx, u32 local_type_id)
9133 struct bpf_cand_cache *cands, *cc, local_cand = {};
9134 const struct btf *local_btf = ctx->btf;
9135 const struct btf_type *local_type;
9136 const struct btf *main_btf;
9137 size_t local_essent_len;
9138 struct btf *mod_btf;
9142 main_btf = bpf_get_btf_vmlinux();
9143 if (IS_ERR(main_btf))
9144 return ERR_CAST(main_btf);
9146 return ERR_PTR(-EINVAL);
9148 local_type = btf_type_by_id(local_btf, local_type_id);
9150 return ERR_PTR(-EINVAL);
9152 name = btf_name_by_offset(local_btf, local_type->name_off);
9153 if (str_is_empty(name))
9154 return ERR_PTR(-EINVAL);
9155 local_essent_len = bpf_core_essential_name_len(name);
9157 cands = &local_cand;
9159 cands->kind = btf_kind(local_type);
9160 cands->name_len = local_essent_len;
9162 cc = check_cand_cache(cands, vmlinux_cand_cache, VMLINUX_CAND_CACHE_SIZE);
9163 /* cands is a pointer to stack here */
9170 /* Attempt to find target candidates in vmlinux BTF first */
9171 cands = bpf_core_add_cands(cands, main_btf, 1);
9173 return ERR_CAST(cands);
9175 /* cands is a pointer to kmalloced memory here if cands->cnt > 0 */
9177 /* populate cache even when cands->cnt == 0 */
9178 cc = populate_cand_cache(cands, vmlinux_cand_cache, VMLINUX_CAND_CACHE_SIZE);
9180 return ERR_CAST(cc);
9182 /* if vmlinux BTF has any candidate, don't go for module BTFs */
9187 /* cands is a pointer to stack here and cands->cnt == 0 */
9188 cc = check_cand_cache(cands, module_cand_cache, MODULE_CAND_CACHE_SIZE);
9190 /* if cache has it return it even if cc->cnt == 0 */
9193 /* If candidate is not found in vmlinux's BTF then search in module's BTFs */
9194 spin_lock_bh(&btf_idr_lock);
9195 idr_for_each_entry(&btf_idr, mod_btf, id) {
9196 if (!btf_is_module(mod_btf))
9198 /* linear search could be slow hence unlock/lock
9199 * the IDR to avoiding holding it for too long
9202 spin_unlock_bh(&btf_idr_lock);
9203 cands = bpf_core_add_cands(cands, mod_btf, btf_nr_types(main_btf));
9206 return ERR_CAST(cands);
9207 spin_lock_bh(&btf_idr_lock);
9209 spin_unlock_bh(&btf_idr_lock);
9210 /* cands is a pointer to kmalloced memory here if cands->cnt > 0
9211 * or pointer to stack if cands->cnd == 0.
9212 * Copy it into the cache even when cands->cnt == 0 and
9213 * return the result.
9215 return populate_cand_cache(cands, module_cand_cache, MODULE_CAND_CACHE_SIZE);
9218 int bpf_core_apply(struct bpf_core_ctx *ctx, const struct bpf_core_relo *relo,
9219 int relo_idx, void *insn)
9221 bool need_cands = relo->kind != BPF_CORE_TYPE_ID_LOCAL;
9222 struct bpf_core_cand_list cands = {};
9223 struct bpf_core_relo_res targ_res;
9224 struct bpf_core_spec *specs;
9225 const struct btf_type *type;
9228 /* ~4k of temp memory necessary to convert LLVM spec like "0:1:0:5"
9229 * into arrays of btf_ids of struct fields and array indices.
9231 specs = kcalloc(3, sizeof(*specs), GFP_KERNEL);
9235 type = btf_type_by_id(ctx->btf, relo->type_id);
9237 bpf_log(ctx->log, "relo #%u: bad type id %u\n",
9238 relo_idx, relo->type_id);
9244 struct bpf_cand_cache *cc;
9247 mutex_lock(&cand_cache_mutex);
9248 cc = bpf_core_find_cands(ctx, relo->type_id);
9250 bpf_log(ctx->log, "target candidate search failed for %d\n",
9256 cands.cands = kcalloc(cc->cnt, sizeof(*cands.cands), GFP_KERNEL);
9262 for (i = 0; i < cc->cnt; i++) {
9264 "CO-RE relocating %s %s: found target candidate [%d]\n",
9265 btf_kind_str[cc->kind], cc->name, cc->cands[i].id);
9266 cands.cands[i].btf = cc->cands[i].btf;
9267 cands.cands[i].id = cc->cands[i].id;
9269 cands.len = cc->cnt;
9270 /* cand_cache_mutex needs to span the cache lookup and
9271 * copy of btf pointer into bpf_core_cand_list,
9272 * since module can be unloaded while bpf_core_calc_relo_insn
9273 * is working with module's btf.
9277 err = bpf_core_calc_relo_insn((void *)ctx->log, relo, relo_idx, ctx->btf, &cands, specs,
9282 err = bpf_core_patch_insn((void *)ctx->log, insn, relo->insn_off / 8, relo, relo_idx,
9289 mutex_unlock(&cand_cache_mutex);
9290 if (ctx->log->level & BPF_LOG_LEVEL2)
9291 print_cand_cache(ctx->log);
9296 bool btf_nested_type_is_trusted(struct bpf_verifier_log *log,
9297 const struct bpf_reg_state *reg,
9298 const char *field_name, u32 btf_id, const char *suffix)
9300 struct btf *btf = reg->btf;
9301 const struct btf_type *walk_type, *safe_type;
9303 char safe_tname[64];
9305 const struct btf_member *member;
9308 walk_type = btf_type_by_id(btf, reg->btf_id);
9312 tname = btf_name_by_offset(btf, walk_type->name_off);
9314 ret = snprintf(safe_tname, sizeof(safe_tname), "%s%s", tname, suffix);
9315 if (ret >= sizeof(safe_tname))
9318 safe_id = btf_find_by_name_kind(btf, safe_tname, BTF_INFO_KIND(walk_type->info));
9322 safe_type = btf_type_by_id(btf, safe_id);
9326 for_each_member(i, safe_type, member) {
9327 const char *m_name = __btf_name_by_offset(btf, member->name_off);
9328 const struct btf_type *mtype = btf_type_by_id(btf, member->type);
9331 if (!btf_type_is_ptr(mtype))
9334 btf_type_skip_modifiers(btf, mtype->type, &id);
9335 /* If we match on both type and name, the field is considered trusted. */
9336 if (btf_id == id && !strcmp(field_name, m_name))
9343 bool btf_type_ids_nocast_alias(struct bpf_verifier_log *log,
9344 const struct btf *reg_btf, u32 reg_id,
9345 const struct btf *arg_btf, u32 arg_id)
9347 const char *reg_name, *arg_name, *search_needle;
9348 const struct btf_type *reg_type, *arg_type;
9349 int reg_len, arg_len, cmp_len;
9350 size_t pattern_len = sizeof(NOCAST_ALIAS_SUFFIX) - sizeof(char);
9352 reg_type = btf_type_by_id(reg_btf, reg_id);
9356 arg_type = btf_type_by_id(arg_btf, arg_id);
9360 reg_name = btf_name_by_offset(reg_btf, reg_type->name_off);
9361 arg_name = btf_name_by_offset(arg_btf, arg_type->name_off);
9363 reg_len = strlen(reg_name);
9364 arg_len = strlen(arg_name);
9366 /* Exactly one of the two type names may be suffixed with ___init, so
9367 * if the strings are the same size, they can't possibly be no-cast
9368 * aliases of one another. If you have two of the same type names, e.g.
9369 * they're both nf_conn___init, it would be improper to return true
9370 * because they are _not_ no-cast aliases, they are the same type.
9372 if (reg_len == arg_len)
9375 /* Either of the two names must be the other name, suffixed with ___init. */
9376 if ((reg_len != arg_len + pattern_len) &&
9377 (arg_len != reg_len + pattern_len))
9380 if (reg_len < arg_len) {
9381 search_needle = strstr(arg_name, NOCAST_ALIAS_SUFFIX);
9384 search_needle = strstr(reg_name, NOCAST_ALIAS_SUFFIX);
9391 /* ___init suffix must come at the end of the name */
9392 if (*(search_needle + pattern_len) != '\0')
9395 return !strncmp(reg_name, arg_name, cmp_len);
9398 #ifdef CONFIG_BPF_JIT
9400 btf_add_struct_ops(struct btf *btf, struct bpf_struct_ops *st_ops,
9401 struct bpf_verifier_log *log)
9403 struct btf_struct_ops_tab *tab, *new_tab;
9406 tab = btf->struct_ops_tab;
9408 tab = kzalloc(struct_size(tab, ops, 4), GFP_KERNEL);
9412 btf->struct_ops_tab = tab;
9415 for (i = 0; i < tab->cnt; i++)
9416 if (tab->ops[i].st_ops == st_ops)
9419 if (tab->cnt == tab->capacity) {
9420 new_tab = krealloc(tab,
9421 struct_size(tab, ops, tab->capacity * 2),
9427 btf->struct_ops_tab = tab;
9430 tab->ops[btf->struct_ops_tab->cnt].st_ops = st_ops;
9432 err = bpf_struct_ops_desc_init(&tab->ops[btf->struct_ops_tab->cnt], btf, log);
9436 btf->struct_ops_tab->cnt++;
9441 const struct bpf_struct_ops_desc *
9442 bpf_struct_ops_find_value(struct btf *btf, u32 value_id)
9444 const struct bpf_struct_ops_desc *st_ops_list;
9450 if (!btf->struct_ops_tab)
9453 cnt = btf->struct_ops_tab->cnt;
9454 st_ops_list = btf->struct_ops_tab->ops;
9455 for (i = 0; i < cnt; i++) {
9456 if (st_ops_list[i].value_id == value_id)
9457 return &st_ops_list[i];
9463 const struct bpf_struct_ops_desc *
9464 bpf_struct_ops_find(struct btf *btf, u32 type_id)
9466 const struct bpf_struct_ops_desc *st_ops_list;
9472 if (!btf->struct_ops_tab)
9475 cnt = btf->struct_ops_tab->cnt;
9476 st_ops_list = btf->struct_ops_tab->ops;
9477 for (i = 0; i < cnt; i++) {
9478 if (st_ops_list[i].type_id == type_id)
9479 return &st_ops_list[i];
9485 int __register_bpf_struct_ops(struct bpf_struct_ops *st_ops)
9487 struct bpf_verifier_log *log;
9491 btf = btf_get_module_btf(st_ops->owner);
9493 return check_btf_kconfigs(st_ops->owner, "struct_ops");
9495 return PTR_ERR(btf);
9497 log = kzalloc(sizeof(*log), GFP_KERNEL | __GFP_NOWARN);
9503 log->level = BPF_LOG_KERNEL;
9505 err = btf_add_struct_ops(btf, st_ops, log);
9513 EXPORT_SYMBOL_GPL(__register_bpf_struct_ops);
9516 bool btf_param_match_suffix(const struct btf *btf,
9517 const struct btf_param *arg,
9520 int suffix_len = strlen(suffix), len;
9521 const char *param_name;
9523 /* In the future, this can be ported to use BTF tagging */
9524 param_name = btf_name_by_offset(btf, arg->name_off);
9525 if (str_is_empty(param_name))
9527 len = strlen(param_name);
9528 if (len <= suffix_len)
9530 param_name += len - suffix_len;
9531 return !strncmp(param_name, suffix, suffix_len);