bpf: Expose check_uarg_tail_zero()
[linux-2.6-block.git] / kernel / bpf / btf.c
CommitLineData
69b693f0
MKL
1/* SPDX-License-Identifier: GPL-2.0 */
2/* Copyright (c) 2018 Facebook */
3
4#include <uapi/linux/btf.h>
5#include <uapi/linux/types.h>
b00b8dae 6#include <linux/seq_file.h>
69b693f0
MKL
7#include <linux/compiler.h>
8#include <linux/errno.h>
9#include <linux/slab.h>
f56a653c
MKL
10#include <linux/anon_inodes.h>
11#include <linux/file.h>
69b693f0
MKL
12#include <linux/uaccess.h>
13#include <linux/kernel.h>
78958fca 14#include <linux/idr.h>
69b693f0
MKL
15#include <linux/bpf_verifier.h>
16#include <linux/btf.h>
17
18/* BTF (BPF Type Format) is the meta data format which describes
19 * the data types of BPF program/map. Hence, it basically focus
20 * on the C programming language which the modern BPF is primary
21 * using.
22 *
23 * ELF Section:
24 * ~~~~~~~~~~~
25 * The BTF data is stored under the ".BTF" ELF section
26 *
27 * struct btf_type:
28 * ~~~~~~~~~~~~~~~
29 * Each 'struct btf_type' object describes a C data type.
30 * Depending on the type it is describing, a 'struct btf_type'
31 * object may be followed by more data. F.e.
32 * To describe an array, 'struct btf_type' is followed by
33 * 'struct btf_array'.
34 *
35 * 'struct btf_type' and any extra data following it are
36 * 4 bytes aligned.
37 *
38 * Type section:
39 * ~~~~~~~~~~~~~
40 * The BTF type section contains a list of 'struct btf_type' objects.
41 * Each one describes a C type. Recall from the above section
42 * that a 'struct btf_type' object could be immediately followed by extra
43 * data in order to desribe some particular C types.
44 *
45 * type_id:
46 * ~~~~~~~
47 * Each btf_type object is identified by a type_id. The type_id
48 * is implicitly implied by the location of the btf_type object in
49 * the BTF type section. The first one has type_id 1. The second
50 * one has type_id 2...etc. Hence, an earlier btf_type has
51 * a smaller type_id.
52 *
53 * A btf_type object may refer to another btf_type object by using
54 * type_id (i.e. the "type" in the "struct btf_type").
55 *
56 * NOTE that we cannot assume any reference-order.
57 * A btf_type object can refer to an earlier btf_type object
58 * but it can also refer to a later btf_type object.
59 *
60 * For example, to describe "const void *". A btf_type
61 * object describing "const" may refer to another btf_type
62 * object describing "void *". This type-reference is done
63 * by specifying type_id:
64 *
65 * [1] CONST (anon) type_id=2
66 * [2] PTR (anon) type_id=0
67 *
68 * The above is the btf_verifier debug log:
69 * - Each line started with "[?]" is a btf_type object
70 * - [?] is the type_id of the btf_type object.
71 * - CONST/PTR is the BTF_KIND_XXX
72 * - "(anon)" is the name of the type. It just
73 * happens that CONST and PTR has no name.
74 * - type_id=XXX is the 'u32 type' in btf_type
75 *
76 * NOTE: "void" has type_id 0
77 *
78 * String section:
79 * ~~~~~~~~~~~~~~
80 * The BTF string section contains the names used by the type section.
81 * Each string is referred by an "offset" from the beginning of the
82 * string section.
83 *
84 * Each string is '\0' terminated.
85 *
86 * The first character in the string section must be '\0'
87 * which is used to mean 'anonymous'. Some btf_type may not
88 * have a name.
89 */
90
91/* BTF verification:
92 *
93 * To verify BTF data, two passes are needed.
94 *
95 * Pass #1
96 * ~~~~~~~
97 * The first pass is to collect all btf_type objects to
98 * an array: "btf->types".
99 *
100 * Depending on the C type that a btf_type is describing,
101 * a btf_type may be followed by extra data. We don't know
102 * how many btf_type is there, and more importantly we don't
103 * know where each btf_type is located in the type section.
104 *
105 * Without knowing the location of each type_id, most verifications
106 * cannot be done. e.g. an earlier btf_type may refer to a later
107 * btf_type (recall the "const void *" above), so we cannot
108 * check this type-reference in the first pass.
109 *
110 * In the first pass, it still does some verifications (e.g.
111 * checking the name is a valid offset to the string section).
eb3f595d
MKL
112 *
113 * Pass #2
114 * ~~~~~~~
115 * The main focus is to resolve a btf_type that is referring
116 * to another type.
117 *
118 * We have to ensure the referring type:
119 * 1) does exist in the BTF (i.e. in btf->types[])
120 * 2) does not cause a loop:
121 * struct A {
122 * struct B b;
123 * };
124 *
125 * struct B {
126 * struct A a;
127 * };
128 *
129 * btf_type_needs_resolve() decides if a btf_type needs
130 * to be resolved.
131 *
132 * The needs_resolve type implements the "resolve()" ops which
133 * essentially does a DFS and detects backedge.
134 *
135 * During resolve (or DFS), different C types have different
136 * "RESOLVED" conditions.
137 *
138 * When resolving a BTF_KIND_STRUCT, we need to resolve all its
139 * members because a member is always referring to another
140 * type. A struct's member can be treated as "RESOLVED" if
141 * it is referring to a BTF_KIND_PTR. Otherwise, the
142 * following valid C struct would be rejected:
143 *
144 * struct A {
145 * int m;
146 * struct A *a;
147 * };
148 *
149 * When resolving a BTF_KIND_PTR, it needs to keep resolving if
150 * it is referring to another BTF_KIND_PTR. Otherwise, we cannot
151 * detect a pointer loop, e.g.:
152 * BTF_KIND_CONST -> BTF_KIND_PTR -> BTF_KIND_CONST -> BTF_KIND_PTR +
153 * ^ |
154 * +-----------------------------------------+
155 *
69b693f0
MKL
156 */
157
158#define BITS_PER_U64 (sizeof(u64) * BITS_PER_BYTE)
159#define BITS_PER_BYTE_MASK (BITS_PER_BYTE - 1)
160#define BITS_PER_BYTE_MASKED(bits) ((bits) & BITS_PER_BYTE_MASK)
161#define BITS_ROUNDDOWN_BYTES(bits) ((bits) >> 3)
162#define BITS_ROUNDUP_BYTES(bits) \
163 (BITS_ROUNDDOWN_BYTES(bits) + !!BITS_PER_BYTE_MASKED(bits))
164
165/* 16MB for 64k structs and each has 16 members and
166 * a few MB spaces for the string section.
167 * The hard limit is S32_MAX.
168 */
169#define BTF_MAX_SIZE (16 * 1024 * 1024)
170/* 64k. We can raise it later. The hard limit is S32_MAX. */
171#define BTF_MAX_NR_TYPES 65535
172
173#define for_each_member(i, struct_type, member) \
174 for (i = 0, member = btf_type_member(struct_type); \
175 i < btf_type_vlen(struct_type); \
176 i++, member++)
177
eb3f595d
MKL
178#define for_each_member_from(i, from, struct_type, member) \
179 for (i = from, member = btf_type_member(struct_type) + from; \
180 i < btf_type_vlen(struct_type); \
181 i++, member++)
182
78958fca
MKL
183static DEFINE_IDR(btf_idr);
184static DEFINE_SPINLOCK(btf_idr_lock);
185
69b693f0
MKL
186struct btf {
187 union {
188 struct btf_header *hdr;
189 void *data;
190 };
191 struct btf_type **types;
eb3f595d
MKL
192 u32 *resolved_ids;
193 u32 *resolved_sizes;
69b693f0
MKL
194 const char *strings;
195 void *nohdr_data;
196 u32 nr_types;
197 u32 types_size;
198 u32 data_size;
f56a653c 199 refcount_t refcnt;
78958fca
MKL
200 u32 id;
201 struct rcu_head rcu;
69b693f0
MKL
202};
203
eb3f595d
MKL
204enum verifier_phase {
205 CHECK_META,
206 CHECK_TYPE,
207};
208
209struct resolve_vertex {
210 const struct btf_type *t;
211 u32 type_id;
212 u16 next_member;
213};
214
215enum visit_state {
216 NOT_VISITED,
217 VISITED,
218 RESOLVED,
219};
220
221enum resolve_mode {
222 RESOLVE_TBD, /* To Be Determined */
223 RESOLVE_PTR, /* Resolving for Pointer */
224 RESOLVE_STRUCT_OR_ARRAY, /* Resolving for struct/union
225 * or array
226 */
227};
228
229#define MAX_RESOLVE_DEPTH 32
230
69b693f0
MKL
231struct btf_verifier_env {
232 struct btf *btf;
eb3f595d
MKL
233 u8 *visit_states;
234 struct resolve_vertex stack[MAX_RESOLVE_DEPTH];
69b693f0
MKL
235 struct bpf_verifier_log log;
236 u32 log_type_id;
eb3f595d
MKL
237 u32 top_stack;
238 enum verifier_phase phase;
239 enum resolve_mode resolve_mode;
69b693f0
MKL
240};
241
242static const char * const btf_kind_str[NR_BTF_KINDS] = {
243 [BTF_KIND_UNKN] = "UNKNOWN",
244 [BTF_KIND_INT] = "INT",
245 [BTF_KIND_PTR] = "PTR",
246 [BTF_KIND_ARRAY] = "ARRAY",
247 [BTF_KIND_STRUCT] = "STRUCT",
248 [BTF_KIND_UNION] = "UNION",
249 [BTF_KIND_ENUM] = "ENUM",
250 [BTF_KIND_FWD] = "FWD",
251 [BTF_KIND_TYPEDEF] = "TYPEDEF",
252 [BTF_KIND_VOLATILE] = "VOLATILE",
253 [BTF_KIND_CONST] = "CONST",
254 [BTF_KIND_RESTRICT] = "RESTRICT",
255};
256
257struct btf_kind_operations {
258 s32 (*check_meta)(struct btf_verifier_env *env,
259 const struct btf_type *t,
260 u32 meta_left);
eb3f595d
MKL
261 int (*resolve)(struct btf_verifier_env *env,
262 const struct resolve_vertex *v);
179cde8c
MKL
263 int (*check_member)(struct btf_verifier_env *env,
264 const struct btf_type *struct_type,
265 const struct btf_member *member,
266 const struct btf_type *member_type);
69b693f0
MKL
267 void (*log_details)(struct btf_verifier_env *env,
268 const struct btf_type *t);
b00b8dae
MKL
269 void (*seq_show)(const struct btf *btf, const struct btf_type *t,
270 u32 type_id, void *data, u8 bits_offsets,
271 struct seq_file *m);
69b693f0
MKL
272};
273
274static const struct btf_kind_operations * const kind_ops[NR_BTF_KINDS];
275static struct btf_type btf_void;
276
eb3f595d
MKL
277static bool btf_type_is_modifier(const struct btf_type *t)
278{
279 /* Some of them is not strictly a C modifier
280 * but they are grouped into the same bucket
281 * for BTF concern:
282 * A type (t) that refers to another
283 * type through t->type AND its size cannot
284 * be determined without following the t->type.
285 *
286 * ptr does not fall into this bucket
287 * because its size is always sizeof(void *).
288 */
289 switch (BTF_INFO_KIND(t->info)) {
290 case BTF_KIND_TYPEDEF:
291 case BTF_KIND_VOLATILE:
292 case BTF_KIND_CONST:
293 case BTF_KIND_RESTRICT:
294 return true;
295 }
296
297 return false;
298}
299
300static bool btf_type_is_void(const struct btf_type *t)
301{
302 /* void => no type and size info.
303 * Hence, FWD is also treated as void.
304 */
305 return t == &btf_void || BTF_INFO_KIND(t->info) == BTF_KIND_FWD;
306}
307
308static bool btf_type_is_void_or_null(const struct btf_type *t)
309{
310 return !t || btf_type_is_void(t);
311}
312
313/* union is only a special case of struct:
314 * all its offsetof(member) == 0
315 */
316static bool btf_type_is_struct(const struct btf_type *t)
317{
318 u8 kind = BTF_INFO_KIND(t->info);
319
320 return kind == BTF_KIND_STRUCT || kind == BTF_KIND_UNION;
321}
322
323static bool btf_type_is_array(const struct btf_type *t)
324{
325 return BTF_INFO_KIND(t->info) == BTF_KIND_ARRAY;
326}
327
328static bool btf_type_is_ptr(const struct btf_type *t)
329{
330 return BTF_INFO_KIND(t->info) == BTF_KIND_PTR;
331}
332
333static bool btf_type_is_int(const struct btf_type *t)
334{
335 return BTF_INFO_KIND(t->info) == BTF_KIND_INT;
336}
337
338/* What types need to be resolved?
339 *
340 * btf_type_is_modifier() is an obvious one.
341 *
342 * btf_type_is_struct() because its member refers to
343 * another type (through member->type).
344
345 * btf_type_is_array() because its element (array->type)
346 * refers to another type. Array can be thought of a
347 * special case of struct while array just has the same
348 * member-type repeated by array->nelems of times.
349 */
350static bool btf_type_needs_resolve(const struct btf_type *t)
351{
352 return btf_type_is_modifier(t) ||
353 btf_type_is_ptr(t) ||
354 btf_type_is_struct(t) ||
355 btf_type_is_array(t);
356}
357
358/* t->size can be used */
359static bool btf_type_has_size(const struct btf_type *t)
360{
361 switch (BTF_INFO_KIND(t->info)) {
362 case BTF_KIND_INT:
363 case BTF_KIND_STRUCT:
364 case BTF_KIND_UNION:
365 case BTF_KIND_ENUM:
366 return true;
367 }
368
369 return false;
370}
371
69b693f0
MKL
372static const char *btf_int_encoding_str(u8 encoding)
373{
374 if (encoding == 0)
375 return "(none)";
376 else if (encoding == BTF_INT_SIGNED)
377 return "SIGNED";
378 else if (encoding == BTF_INT_CHAR)
379 return "CHAR";
380 else if (encoding == BTF_INT_BOOL)
381 return "BOOL";
382 else if (encoding == BTF_INT_VARARGS)
383 return "VARARGS";
384 else
385 return "UNKN";
386}
387
388static u16 btf_type_vlen(const struct btf_type *t)
389{
390 return BTF_INFO_VLEN(t->info);
391}
392
393static u32 btf_type_int(const struct btf_type *t)
394{
395 return *(u32 *)(t + 1);
396}
397
398static const struct btf_array *btf_type_array(const struct btf_type *t)
399{
400 return (const struct btf_array *)(t + 1);
401}
402
403static const struct btf_member *btf_type_member(const struct btf_type *t)
404{
405 return (const struct btf_member *)(t + 1);
406}
407
408static const struct btf_enum *btf_type_enum(const struct btf_type *t)
409{
410 return (const struct btf_enum *)(t + 1);
411}
412
413static const struct btf_kind_operations *btf_type_ops(const struct btf_type *t)
414{
415 return kind_ops[BTF_INFO_KIND(t->info)];
416}
417
418static bool btf_name_offset_valid(const struct btf *btf, u32 offset)
419{
420 return !BTF_STR_TBL_ELF_ID(offset) &&
421 BTF_STR_OFFSET(offset) < btf->hdr->str_len;
422}
423
424static const char *btf_name_by_offset(const struct btf *btf, u32 offset)
425{
426 if (!BTF_STR_OFFSET(offset))
427 return "(anon)";
428 else if (BTF_STR_OFFSET(offset) < btf->hdr->str_len)
429 return &btf->strings[BTF_STR_OFFSET(offset)];
430 else
431 return "(invalid-name-offset)";
432}
433
eb3f595d
MKL
434static const struct btf_type *btf_type_by_id(const struct btf *btf, u32 type_id)
435{
436 if (type_id > btf->nr_types)
437 return NULL;
438
439 return btf->types[type_id];
440}
441
69b693f0
MKL
442__printf(2, 3) static void __btf_verifier_log(struct bpf_verifier_log *log,
443 const char *fmt, ...)
444{
445 va_list args;
446
447 va_start(args, fmt);
448 bpf_verifier_vlog(log, fmt, args);
449 va_end(args);
450}
451
452__printf(2, 3) static void btf_verifier_log(struct btf_verifier_env *env,
453 const char *fmt, ...)
454{
455 struct bpf_verifier_log *log = &env->log;
456 va_list args;
457
458 if (!bpf_verifier_log_needed(log))
459 return;
460
461 va_start(args, fmt);
462 bpf_verifier_vlog(log, fmt, args);
463 va_end(args);
464}
465
466__printf(4, 5) static void __btf_verifier_log_type(struct btf_verifier_env *env,
467 const struct btf_type *t,
468 bool log_details,
469 const char *fmt, ...)
470{
471 struct bpf_verifier_log *log = &env->log;
472 u8 kind = BTF_INFO_KIND(t->info);
473 struct btf *btf = env->btf;
474 va_list args;
475
476 if (!bpf_verifier_log_needed(log))
477 return;
478
479 __btf_verifier_log(log, "[%u] %s %s%s",
480 env->log_type_id,
481 btf_kind_str[kind],
fbcf93eb 482 btf_name_by_offset(btf, t->name_off),
69b693f0
MKL
483 log_details ? " " : "");
484
485 if (log_details)
486 btf_type_ops(t)->log_details(env, t);
487
488 if (fmt && *fmt) {
489 __btf_verifier_log(log, " ");
490 va_start(args, fmt);
491 bpf_verifier_vlog(log, fmt, args);
492 va_end(args);
493 }
494
495 __btf_verifier_log(log, "\n");
496}
497
498#define btf_verifier_log_type(env, t, ...) \
499 __btf_verifier_log_type((env), (t), true, __VA_ARGS__)
500#define btf_verifier_log_basic(env, t, ...) \
501 __btf_verifier_log_type((env), (t), false, __VA_ARGS__)
502
503__printf(4, 5)
504static void btf_verifier_log_member(struct btf_verifier_env *env,
505 const struct btf_type *struct_type,
506 const struct btf_member *member,
507 const char *fmt, ...)
508{
509 struct bpf_verifier_log *log = &env->log;
510 struct btf *btf = env->btf;
511 va_list args;
512
513 if (!bpf_verifier_log_needed(log))
514 return;
515
eb3f595d
MKL
516 /* The CHECK_META phase already did a btf dump.
517 *
518 * If member is logged again, it must hit an error in
519 * parsing this member. It is useful to print out which
520 * struct this member belongs to.
521 */
522 if (env->phase != CHECK_META)
523 btf_verifier_log_type(env, struct_type, NULL);
524
69b693f0 525 __btf_verifier_log(log, "\t%s type_id=%u bits_offset=%u",
fbcf93eb 526 btf_name_by_offset(btf, member->name_off),
69b693f0
MKL
527 member->type, member->offset);
528
529 if (fmt && *fmt) {
530 __btf_verifier_log(log, " ");
531 va_start(args, fmt);
532 bpf_verifier_vlog(log, fmt, args);
533 va_end(args);
534 }
535
536 __btf_verifier_log(log, "\n");
537}
538
539static void btf_verifier_log_hdr(struct btf_verifier_env *env)
540{
541 struct bpf_verifier_log *log = &env->log;
542 const struct btf *btf = env->btf;
543 const struct btf_header *hdr;
544
545 if (!bpf_verifier_log_needed(log))
546 return;
547
548 hdr = btf->hdr;
549 __btf_verifier_log(log, "magic: 0x%x\n", hdr->magic);
550 __btf_verifier_log(log, "version: %u\n", hdr->version);
551 __btf_verifier_log(log, "flags: 0x%x\n", hdr->flags);
552 __btf_verifier_log(log, "parent_label: %u\n", hdr->parent_label);
553 __btf_verifier_log(log, "parent_name: %u\n", hdr->parent_name);
554 __btf_verifier_log(log, "label_off: %u\n", hdr->label_off);
555 __btf_verifier_log(log, "object_off: %u\n", hdr->object_off);
556 __btf_verifier_log(log, "func_off: %u\n", hdr->func_off);
557 __btf_verifier_log(log, "type_off: %u\n", hdr->type_off);
558 __btf_verifier_log(log, "str_off: %u\n", hdr->str_off);
559 __btf_verifier_log(log, "str_len: %u\n", hdr->str_len);
560 __btf_verifier_log(log, "btf_total_size: %u\n", btf->data_size);
561}
562
563static int btf_add_type(struct btf_verifier_env *env, struct btf_type *t)
564{
565 struct btf *btf = env->btf;
566
567 /* < 2 because +1 for btf_void which is always in btf->types[0].
568 * btf_void is not accounted in btf->nr_types because btf_void
569 * does not come from the BTF file.
570 */
571 if (btf->types_size - btf->nr_types < 2) {
572 /* Expand 'types' array */
573
574 struct btf_type **new_types;
575 u32 expand_by, new_size;
576
577 if (btf->types_size == BTF_MAX_NR_TYPES) {
578 btf_verifier_log(env, "Exceeded max num of types");
579 return -E2BIG;
580 }
581
582 expand_by = max_t(u32, btf->types_size >> 2, 16);
583 new_size = min_t(u32, BTF_MAX_NR_TYPES,
584 btf->types_size + expand_by);
585
586 new_types = kvzalloc(new_size * sizeof(*new_types),
587 GFP_KERNEL | __GFP_NOWARN);
588 if (!new_types)
589 return -ENOMEM;
590
591 if (btf->nr_types == 0)
592 new_types[0] = &btf_void;
593 else
594 memcpy(new_types, btf->types,
595 sizeof(*btf->types) * (btf->nr_types + 1));
596
597 kvfree(btf->types);
598 btf->types = new_types;
599 btf->types_size = new_size;
600 }
601
602 btf->types[++(btf->nr_types)] = t;
603
604 return 0;
605}
606
78958fca
MKL
607static int btf_alloc_id(struct btf *btf)
608{
609 int id;
610
611 idr_preload(GFP_KERNEL);
612 spin_lock_bh(&btf_idr_lock);
613 id = idr_alloc_cyclic(&btf_idr, btf, 1, INT_MAX, GFP_ATOMIC);
614 if (id > 0)
615 btf->id = id;
616 spin_unlock_bh(&btf_idr_lock);
617 idr_preload_end();
618
619 if (WARN_ON_ONCE(!id))
620 return -ENOSPC;
621
622 return id > 0 ? 0 : id;
623}
624
625static void btf_free_id(struct btf *btf)
626{
627 unsigned long flags;
628
629 /*
630 * In map-in-map, calling map_delete_elem() on outer
631 * map will call bpf_map_put on the inner map.
632 * It will then eventually call btf_free_id()
633 * on the inner map. Some of the map_delete_elem()
634 * implementation may have irq disabled, so
635 * we need to use the _irqsave() version instead
636 * of the _bh() version.
637 */
638 spin_lock_irqsave(&btf_idr_lock, flags);
639 idr_remove(&btf_idr, btf->id);
640 spin_unlock_irqrestore(&btf_idr_lock, flags);
641}
642
69b693f0
MKL
643static void btf_free(struct btf *btf)
644{
645 kvfree(btf->types);
eb3f595d
MKL
646 kvfree(btf->resolved_sizes);
647 kvfree(btf->resolved_ids);
69b693f0
MKL
648 kvfree(btf->data);
649 kfree(btf);
650}
651
78958fca 652static void btf_free_rcu(struct rcu_head *rcu)
f56a653c 653{
78958fca
MKL
654 struct btf *btf = container_of(rcu, struct btf, rcu);
655
656 btf_free(btf);
f56a653c
MKL
657}
658
659void btf_put(struct btf *btf)
660{
78958fca
MKL
661 if (btf && refcount_dec_and_test(&btf->refcnt)) {
662 btf_free_id(btf);
663 call_rcu(&btf->rcu, btf_free_rcu);
664 }
f56a653c
MKL
665}
666
eb3f595d
MKL
667static int env_resolve_init(struct btf_verifier_env *env)
668{
669 struct btf *btf = env->btf;
670 u32 nr_types = btf->nr_types;
671 u32 *resolved_sizes = NULL;
672 u32 *resolved_ids = NULL;
673 u8 *visit_states = NULL;
674
675 /* +1 for btf_void */
676 resolved_sizes = kvzalloc((nr_types + 1) * sizeof(*resolved_sizes),
677 GFP_KERNEL | __GFP_NOWARN);
678 if (!resolved_sizes)
679 goto nomem;
680
681 resolved_ids = kvzalloc((nr_types + 1) * sizeof(*resolved_ids),
682 GFP_KERNEL | __GFP_NOWARN);
683 if (!resolved_ids)
684 goto nomem;
685
686 visit_states = kvzalloc((nr_types + 1) * sizeof(*visit_states),
687 GFP_KERNEL | __GFP_NOWARN);
688 if (!visit_states)
689 goto nomem;
690
691 btf->resolved_sizes = resolved_sizes;
692 btf->resolved_ids = resolved_ids;
693 env->visit_states = visit_states;
694
695 return 0;
696
697nomem:
698 kvfree(resolved_sizes);
699 kvfree(resolved_ids);
700 kvfree(visit_states);
701 return -ENOMEM;
702}
703
69b693f0
MKL
704static void btf_verifier_env_free(struct btf_verifier_env *env)
705{
eb3f595d 706 kvfree(env->visit_states);
69b693f0
MKL
707 kfree(env);
708}
709
eb3f595d
MKL
710static bool env_type_is_resolve_sink(const struct btf_verifier_env *env,
711 const struct btf_type *next_type)
712{
713 switch (env->resolve_mode) {
714 case RESOLVE_TBD:
715 /* int, enum or void is a sink */
716 return !btf_type_needs_resolve(next_type);
717 case RESOLVE_PTR:
718 /* int, enum, void, struct or array is a sink for ptr */
719 return !btf_type_is_modifier(next_type) &&
720 !btf_type_is_ptr(next_type);
721 case RESOLVE_STRUCT_OR_ARRAY:
722 /* int, enum, void or ptr is a sink for struct and array */
723 return !btf_type_is_modifier(next_type) &&
724 !btf_type_is_array(next_type) &&
725 !btf_type_is_struct(next_type);
726 default:
727 BUG_ON(1);
728 }
729}
730
731static bool env_type_is_resolved(const struct btf_verifier_env *env,
732 u32 type_id)
733{
734 return env->visit_states[type_id] == RESOLVED;
735}
736
737static int env_stack_push(struct btf_verifier_env *env,
738 const struct btf_type *t, u32 type_id)
739{
740 struct resolve_vertex *v;
741
742 if (env->top_stack == MAX_RESOLVE_DEPTH)
743 return -E2BIG;
744
745 if (env->visit_states[type_id] != NOT_VISITED)
746 return -EEXIST;
747
748 env->visit_states[type_id] = VISITED;
749
750 v = &env->stack[env->top_stack++];
751 v->t = t;
752 v->type_id = type_id;
753 v->next_member = 0;
754
755 if (env->resolve_mode == RESOLVE_TBD) {
756 if (btf_type_is_ptr(t))
757 env->resolve_mode = RESOLVE_PTR;
758 else if (btf_type_is_struct(t) || btf_type_is_array(t))
759 env->resolve_mode = RESOLVE_STRUCT_OR_ARRAY;
760 }
761
762 return 0;
763}
764
765static void env_stack_set_next_member(struct btf_verifier_env *env,
766 u16 next_member)
767{
768 env->stack[env->top_stack - 1].next_member = next_member;
769}
770
771static void env_stack_pop_resolved(struct btf_verifier_env *env,
772 u32 resolved_type_id,
773 u32 resolved_size)
774{
775 u32 type_id = env->stack[--(env->top_stack)].type_id;
776 struct btf *btf = env->btf;
777
778 btf->resolved_sizes[type_id] = resolved_size;
779 btf->resolved_ids[type_id] = resolved_type_id;
780 env->visit_states[type_id] = RESOLVED;
781}
782
783static const struct resolve_vertex *env_stack_peak(struct btf_verifier_env *env)
784{
785 return env->top_stack ? &env->stack[env->top_stack - 1] : NULL;
786}
787
788/* The input param "type_id" must point to a needs_resolve type */
789static const struct btf_type *btf_type_id_resolve(const struct btf *btf,
790 u32 *type_id)
791{
792 *type_id = btf->resolved_ids[*type_id];
793 return btf_type_by_id(btf, *type_id);
794}
795
796const struct btf_type *btf_type_id_size(const struct btf *btf,
797 u32 *type_id, u32 *ret_size)
798{
799 const struct btf_type *size_type;
800 u32 size_type_id = *type_id;
801 u32 size = 0;
802
803 size_type = btf_type_by_id(btf, size_type_id);
804 if (btf_type_is_void_or_null(size_type))
805 return NULL;
806
807 if (btf_type_has_size(size_type)) {
808 size = size_type->size;
809 } else if (btf_type_is_array(size_type)) {
810 size = btf->resolved_sizes[size_type_id];
811 } else if (btf_type_is_ptr(size_type)) {
812 size = sizeof(void *);
813 } else {
814 if (WARN_ON_ONCE(!btf_type_is_modifier(size_type)))
815 return NULL;
816
817 size = btf->resolved_sizes[size_type_id];
818 size_type_id = btf->resolved_ids[size_type_id];
819 size_type = btf_type_by_id(btf, size_type_id);
820 if (btf_type_is_void(size_type))
821 return NULL;
822 }
823
824 *type_id = size_type_id;
825 if (ret_size)
826 *ret_size = size;
827
828 return size_type;
829}
830
179cde8c
MKL
831static int btf_df_check_member(struct btf_verifier_env *env,
832 const struct btf_type *struct_type,
833 const struct btf_member *member,
834 const struct btf_type *member_type)
835{
836 btf_verifier_log_basic(env, struct_type,
837 "Unsupported check_member");
838 return -EINVAL;
839}
840
eb3f595d
MKL
841static int btf_df_resolve(struct btf_verifier_env *env,
842 const struct resolve_vertex *v)
843{
844 btf_verifier_log_basic(env, v->t, "Unsupported resolve");
845 return -EINVAL;
846}
847
b00b8dae
MKL
848static void btf_df_seq_show(const struct btf *btf, const struct btf_type *t,
849 u32 type_id, void *data, u8 bits_offsets,
850 struct seq_file *m)
851{
852 seq_printf(m, "<unsupported kind:%u>", BTF_INFO_KIND(t->info));
853}
854
179cde8c
MKL
855static int btf_int_check_member(struct btf_verifier_env *env,
856 const struct btf_type *struct_type,
857 const struct btf_member *member,
858 const struct btf_type *member_type)
859{
860 u32 int_data = btf_type_int(member_type);
861 u32 struct_bits_off = member->offset;
862 u32 struct_size = struct_type->size;
863 u32 nr_copy_bits;
864 u32 bytes_offset;
865
866 if (U32_MAX - struct_bits_off < BTF_INT_OFFSET(int_data)) {
867 btf_verifier_log_member(env, struct_type, member,
868 "bits_offset exceeds U32_MAX");
869 return -EINVAL;
870 }
871
872 struct_bits_off += BTF_INT_OFFSET(int_data);
873 bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off);
874 nr_copy_bits = BTF_INT_BITS(int_data) +
875 BITS_PER_BYTE_MASKED(struct_bits_off);
876
877 if (nr_copy_bits > BITS_PER_U64) {
878 btf_verifier_log_member(env, struct_type, member,
879 "nr_copy_bits exceeds 64");
880 return -EINVAL;
881 }
882
883 if (struct_size < bytes_offset ||
884 struct_size - bytes_offset < BITS_ROUNDUP_BYTES(nr_copy_bits)) {
885 btf_verifier_log_member(env, struct_type, member,
886 "Member exceeds struct_size");
887 return -EINVAL;
888 }
889
890 return 0;
891}
892
69b693f0
MKL
893static s32 btf_int_check_meta(struct btf_verifier_env *env,
894 const struct btf_type *t,
895 u32 meta_left)
896{
897 u32 int_data, nr_bits, meta_needed = sizeof(int_data);
898 u16 encoding;
899
900 if (meta_left < meta_needed) {
901 btf_verifier_log_basic(env, t,
902 "meta_left:%u meta_needed:%u",
903 meta_left, meta_needed);
904 return -EINVAL;
905 }
906
907 if (btf_type_vlen(t)) {
908 btf_verifier_log_type(env, t, "vlen != 0");
909 return -EINVAL;
910 }
911
912 int_data = btf_type_int(t);
913 nr_bits = BTF_INT_BITS(int_data) + BTF_INT_OFFSET(int_data);
914
915 if (nr_bits > BITS_PER_U64) {
916 btf_verifier_log_type(env, t, "nr_bits exceeds %zu",
917 BITS_PER_U64);
918 return -EINVAL;
919 }
920
921 if (BITS_ROUNDUP_BYTES(nr_bits) > t->size) {
922 btf_verifier_log_type(env, t, "nr_bits exceeds type_size");
923 return -EINVAL;
924 }
925
926 encoding = BTF_INT_ENCODING(int_data);
927 if (encoding &&
928 encoding != BTF_INT_SIGNED &&
929 encoding != BTF_INT_CHAR &&
930 encoding != BTF_INT_BOOL &&
931 encoding != BTF_INT_VARARGS) {
932 btf_verifier_log_type(env, t, "Unsupported encoding");
933 return -ENOTSUPP;
934 }
935
936 btf_verifier_log_type(env, t, NULL);
937
938 return meta_needed;
939}
940
941static void btf_int_log(struct btf_verifier_env *env,
942 const struct btf_type *t)
943{
944 int int_data = btf_type_int(t);
945
946 btf_verifier_log(env,
947 "size=%u bits_offset=%u nr_bits=%u encoding=%s",
948 t->size, BTF_INT_OFFSET(int_data),
949 BTF_INT_BITS(int_data),
950 btf_int_encoding_str(BTF_INT_ENCODING(int_data)));
951}
952
b00b8dae
MKL
953static void btf_int_bits_seq_show(const struct btf *btf,
954 const struct btf_type *t,
955 void *data, u8 bits_offset,
956 struct seq_file *m)
957{
958 u32 int_data = btf_type_int(t);
959 u16 nr_bits = BTF_INT_BITS(int_data);
960 u16 total_bits_offset;
961 u16 nr_copy_bytes;
962 u16 nr_copy_bits;
963 u8 nr_upper_bits;
964 union {
965 u64 u64_num;
966 u8 u8_nums[8];
967 } print_num;
968
969 total_bits_offset = bits_offset + BTF_INT_OFFSET(int_data);
970 data += BITS_ROUNDDOWN_BYTES(total_bits_offset);
971 bits_offset = BITS_PER_BYTE_MASKED(total_bits_offset);
972 nr_copy_bits = nr_bits + bits_offset;
973 nr_copy_bytes = BITS_ROUNDUP_BYTES(nr_copy_bits);
974
975 print_num.u64_num = 0;
976 memcpy(&print_num.u64_num, data, nr_copy_bytes);
977
978 /* Ditch the higher order bits */
979 nr_upper_bits = BITS_PER_BYTE_MASKED(nr_copy_bits);
980 if (nr_upper_bits) {
981 /* We need to mask out some bits of the upper byte. */
982 u8 mask = (1 << nr_upper_bits) - 1;
983
984 print_num.u8_nums[nr_copy_bytes - 1] &= mask;
985 }
986
987 print_num.u64_num >>= bits_offset;
988
989 seq_printf(m, "0x%llx", print_num.u64_num);
990}
991
992static void btf_int_seq_show(const struct btf *btf, const struct btf_type *t,
993 u32 type_id, void *data, u8 bits_offset,
994 struct seq_file *m)
995{
996 u32 int_data = btf_type_int(t);
997 u8 encoding = BTF_INT_ENCODING(int_data);
998 bool sign = encoding & BTF_INT_SIGNED;
999 u32 nr_bits = BTF_INT_BITS(int_data);
1000
1001 if (bits_offset || BTF_INT_OFFSET(int_data) ||
1002 BITS_PER_BYTE_MASKED(nr_bits)) {
1003 btf_int_bits_seq_show(btf, t, data, bits_offset, m);
1004 return;
1005 }
1006
1007 switch (nr_bits) {
1008 case 64:
1009 if (sign)
1010 seq_printf(m, "%lld", *(s64 *)data);
1011 else
1012 seq_printf(m, "%llu", *(u64 *)data);
1013 break;
1014 case 32:
1015 if (sign)
1016 seq_printf(m, "%d", *(s32 *)data);
1017 else
1018 seq_printf(m, "%u", *(u32 *)data);
1019 break;
1020 case 16:
1021 if (sign)
1022 seq_printf(m, "%d", *(s16 *)data);
1023 else
1024 seq_printf(m, "%u", *(u16 *)data);
1025 break;
1026 case 8:
1027 if (sign)
1028 seq_printf(m, "%d", *(s8 *)data);
1029 else
1030 seq_printf(m, "%u", *(u8 *)data);
1031 break;
1032 default:
1033 btf_int_bits_seq_show(btf, t, data, bits_offset, m);
1034 }
1035}
1036
69b693f0
MKL
1037static const struct btf_kind_operations int_ops = {
1038 .check_meta = btf_int_check_meta,
eb3f595d 1039 .resolve = btf_df_resolve,
179cde8c 1040 .check_member = btf_int_check_member,
69b693f0 1041 .log_details = btf_int_log,
b00b8dae 1042 .seq_show = btf_int_seq_show,
69b693f0
MKL
1043};
1044
179cde8c
MKL
1045static int btf_modifier_check_member(struct btf_verifier_env *env,
1046 const struct btf_type *struct_type,
1047 const struct btf_member *member,
1048 const struct btf_type *member_type)
1049{
1050 const struct btf_type *resolved_type;
1051 u32 resolved_type_id = member->type;
1052 struct btf_member resolved_member;
1053 struct btf *btf = env->btf;
1054
1055 resolved_type = btf_type_id_size(btf, &resolved_type_id, NULL);
1056 if (!resolved_type) {
1057 btf_verifier_log_member(env, struct_type, member,
1058 "Invalid member");
1059 return -EINVAL;
1060 }
1061
1062 resolved_member = *member;
1063 resolved_member.type = resolved_type_id;
1064
1065 return btf_type_ops(resolved_type)->check_member(env, struct_type,
1066 &resolved_member,
1067 resolved_type);
1068}
1069
1070static int btf_ptr_check_member(struct btf_verifier_env *env,
1071 const struct btf_type *struct_type,
1072 const struct btf_member *member,
1073 const struct btf_type *member_type)
1074{
1075 u32 struct_size, struct_bits_off, bytes_offset;
1076
1077 struct_size = struct_type->size;
1078 struct_bits_off = member->offset;
1079 bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off);
1080
1081 if (BITS_PER_BYTE_MASKED(struct_bits_off)) {
1082 btf_verifier_log_member(env, struct_type, member,
1083 "Member is not byte aligned");
1084 return -EINVAL;
1085 }
1086
1087 if (struct_size - bytes_offset < sizeof(void *)) {
1088 btf_verifier_log_member(env, struct_type, member,
1089 "Member exceeds struct_size");
1090 return -EINVAL;
1091 }
1092
1093 return 0;
1094}
1095
69b693f0
MKL
1096static int btf_ref_type_check_meta(struct btf_verifier_env *env,
1097 const struct btf_type *t,
1098 u32 meta_left)
1099{
1100 if (btf_type_vlen(t)) {
1101 btf_verifier_log_type(env, t, "vlen != 0");
1102 return -EINVAL;
1103 }
1104
1105 if (BTF_TYPE_PARENT(t->type)) {
1106 btf_verifier_log_type(env, t, "Invalid type_id");
1107 return -EINVAL;
1108 }
1109
1110 btf_verifier_log_type(env, t, NULL);
1111
1112 return 0;
1113}
1114
eb3f595d
MKL
1115static int btf_modifier_resolve(struct btf_verifier_env *env,
1116 const struct resolve_vertex *v)
1117{
1118 const struct btf_type *t = v->t;
1119 const struct btf_type *next_type;
1120 u32 next_type_id = t->type;
1121 struct btf *btf = env->btf;
1122 u32 next_type_size = 0;
1123
1124 next_type = btf_type_by_id(btf, next_type_id);
1125 if (!next_type) {
1126 btf_verifier_log_type(env, v->t, "Invalid type_id");
1127 return -EINVAL;
1128 }
1129
1130 /* "typedef void new_void", "const void"...etc */
1131 if (btf_type_is_void(next_type))
1132 goto resolved;
1133
1134 if (!env_type_is_resolve_sink(env, next_type) &&
1135 !env_type_is_resolved(env, next_type_id))
1136 return env_stack_push(env, next_type, next_type_id);
1137
1138 /* Figure out the resolved next_type_id with size.
1139 * They will be stored in the current modifier's
1140 * resolved_ids and resolved_sizes such that it can
1141 * save us a few type-following when we use it later (e.g. in
1142 * pretty print).
1143 */
1144 if (!btf_type_id_size(btf, &next_type_id, &next_type_size) &&
1145 !btf_type_is_void(btf_type_id_resolve(btf, &next_type_id))) {
1146 btf_verifier_log_type(env, v->t, "Invalid type_id");
1147 return -EINVAL;
1148 }
1149
1150resolved:
1151 env_stack_pop_resolved(env, next_type_id, next_type_size);
1152
1153 return 0;
1154}
1155
1156static int btf_ptr_resolve(struct btf_verifier_env *env,
1157 const struct resolve_vertex *v)
1158{
1159 const struct btf_type *next_type;
1160 const struct btf_type *t = v->t;
1161 u32 next_type_id = t->type;
1162 struct btf *btf = env->btf;
1163 u32 next_type_size = 0;
1164
1165 next_type = btf_type_by_id(btf, next_type_id);
1166 if (!next_type) {
1167 btf_verifier_log_type(env, v->t, "Invalid type_id");
1168 return -EINVAL;
1169 }
1170
1171 /* "void *" */
1172 if (btf_type_is_void(next_type))
1173 goto resolved;
1174
1175 if (!env_type_is_resolve_sink(env, next_type) &&
1176 !env_type_is_resolved(env, next_type_id))
1177 return env_stack_push(env, next_type, next_type_id);
1178
1179 /* If the modifier was RESOLVED during RESOLVE_STRUCT_OR_ARRAY,
1180 * the modifier may have stopped resolving when it was resolved
1181 * to a ptr (last-resolved-ptr).
1182 *
1183 * We now need to continue from the last-resolved-ptr to
1184 * ensure the last-resolved-ptr will not referring back to
1185 * the currenct ptr (t).
1186 */
1187 if (btf_type_is_modifier(next_type)) {
1188 const struct btf_type *resolved_type;
1189 u32 resolved_type_id;
1190
1191 resolved_type_id = next_type_id;
1192 resolved_type = btf_type_id_resolve(btf, &resolved_type_id);
1193
1194 if (btf_type_is_ptr(resolved_type) &&
1195 !env_type_is_resolve_sink(env, resolved_type) &&
1196 !env_type_is_resolved(env, resolved_type_id))
1197 return env_stack_push(env, resolved_type,
1198 resolved_type_id);
1199 }
1200
1201 if (!btf_type_id_size(btf, &next_type_id, &next_type_size) &&
1202 !btf_type_is_void(btf_type_id_resolve(btf, &next_type_id))) {
1203 btf_verifier_log_type(env, v->t, "Invalid type_id");
1204 return -EINVAL;
1205 }
1206
1207resolved:
1208 env_stack_pop_resolved(env, next_type_id, 0);
1209
1210 return 0;
1211}
1212
b00b8dae
MKL
1213static void btf_modifier_seq_show(const struct btf *btf,
1214 const struct btf_type *t,
1215 u32 type_id, void *data,
1216 u8 bits_offset, struct seq_file *m)
1217{
1218 t = btf_type_id_resolve(btf, &type_id);
1219
1220 btf_type_ops(t)->seq_show(btf, t, type_id, data, bits_offset, m);
1221}
1222
1223static void btf_ptr_seq_show(const struct btf *btf, const struct btf_type *t,
1224 u32 type_id, void *data, u8 bits_offset,
1225 struct seq_file *m)
1226{
1227 /* It is a hashed value */
1228 seq_printf(m, "%p", *(void **)data);
1229}
1230
69b693f0
MKL
1231static void btf_ref_type_log(struct btf_verifier_env *env,
1232 const struct btf_type *t)
1233{
1234 btf_verifier_log(env, "type_id=%u", t->type);
1235}
1236
1237static struct btf_kind_operations modifier_ops = {
1238 .check_meta = btf_ref_type_check_meta,
eb3f595d 1239 .resolve = btf_modifier_resolve,
179cde8c 1240 .check_member = btf_modifier_check_member,
69b693f0 1241 .log_details = btf_ref_type_log,
b00b8dae 1242 .seq_show = btf_modifier_seq_show,
69b693f0
MKL
1243};
1244
1245static struct btf_kind_operations ptr_ops = {
1246 .check_meta = btf_ref_type_check_meta,
eb3f595d 1247 .resolve = btf_ptr_resolve,
179cde8c 1248 .check_member = btf_ptr_check_member,
69b693f0 1249 .log_details = btf_ref_type_log,
b00b8dae 1250 .seq_show = btf_ptr_seq_show,
69b693f0
MKL
1251};
1252
1253static struct btf_kind_operations fwd_ops = {
1254 .check_meta = btf_ref_type_check_meta,
eb3f595d 1255 .resolve = btf_df_resolve,
179cde8c 1256 .check_member = btf_df_check_member,
69b693f0 1257 .log_details = btf_ref_type_log,
b00b8dae 1258 .seq_show = btf_df_seq_show,
69b693f0
MKL
1259};
1260
179cde8c
MKL
1261static int btf_array_check_member(struct btf_verifier_env *env,
1262 const struct btf_type *struct_type,
1263 const struct btf_member *member,
1264 const struct btf_type *member_type)
1265{
1266 u32 struct_bits_off = member->offset;
1267 u32 struct_size, bytes_offset;
1268 u32 array_type_id, array_size;
1269 struct btf *btf = env->btf;
1270
1271 if (BITS_PER_BYTE_MASKED(struct_bits_off)) {
1272 btf_verifier_log_member(env, struct_type, member,
1273 "Member is not byte aligned");
1274 return -EINVAL;
1275 }
1276
1277 array_type_id = member->type;
1278 btf_type_id_size(btf, &array_type_id, &array_size);
1279 struct_size = struct_type->size;
1280 bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off);
1281 if (struct_size - bytes_offset < array_size) {
1282 btf_verifier_log_member(env, struct_type, member,
1283 "Member exceeds struct_size");
1284 return -EINVAL;
1285 }
1286
1287 return 0;
1288}
1289
69b693f0
MKL
1290static s32 btf_array_check_meta(struct btf_verifier_env *env,
1291 const struct btf_type *t,
1292 u32 meta_left)
1293{
1294 const struct btf_array *array = btf_type_array(t);
1295 u32 meta_needed = sizeof(*array);
1296
1297 if (meta_left < meta_needed) {
1298 btf_verifier_log_basic(env, t,
1299 "meta_left:%u meta_needed:%u",
1300 meta_left, meta_needed);
1301 return -EINVAL;
1302 }
1303
1304 if (btf_type_vlen(t)) {
1305 btf_verifier_log_type(env, t, "vlen != 0");
1306 return -EINVAL;
1307 }
1308
1309 /* We are a little forgiving on array->index_type since
1310 * the kernel is not using it.
1311 */
1312 /* Array elem cannot be in type void,
1313 * so !array->type is not allowed.
1314 */
1315 if (!array->type || BTF_TYPE_PARENT(array->type)) {
1316 btf_verifier_log_type(env, t, "Invalid type_id");
1317 return -EINVAL;
1318 }
1319
1320 btf_verifier_log_type(env, t, NULL);
1321
1322 return meta_needed;
1323}
1324
eb3f595d
MKL
1325static int btf_array_resolve(struct btf_verifier_env *env,
1326 const struct resolve_vertex *v)
1327{
1328 const struct btf_array *array = btf_type_array(v->t);
1329 const struct btf_type *elem_type;
1330 u32 elem_type_id = array->type;
1331 struct btf *btf = env->btf;
1332 u32 elem_size;
1333
1334 elem_type = btf_type_by_id(btf, elem_type_id);
1335 if (btf_type_is_void_or_null(elem_type)) {
1336 btf_verifier_log_type(env, v->t,
1337 "Invalid elem");
1338 return -EINVAL;
1339 }
1340
1341 if (!env_type_is_resolve_sink(env, elem_type) &&
1342 !env_type_is_resolved(env, elem_type_id))
1343 return env_stack_push(env, elem_type, elem_type_id);
1344
1345 elem_type = btf_type_id_size(btf, &elem_type_id, &elem_size);
1346 if (!elem_type) {
1347 btf_verifier_log_type(env, v->t, "Invalid elem");
1348 return -EINVAL;
1349 }
1350
1351 if (btf_type_is_int(elem_type)) {
1352 int int_type_data = btf_type_int(elem_type);
1353 u16 nr_bits = BTF_INT_BITS(int_type_data);
1354 u16 nr_bytes = BITS_ROUNDUP_BYTES(nr_bits);
1355
1356 /* Put more restriction on array of int. The int cannot
1357 * be a bit field and it must be either u8/u16/u32/u64.
1358 */
1359 if (BITS_PER_BYTE_MASKED(nr_bits) ||
1360 BTF_INT_OFFSET(int_type_data) ||
1361 (nr_bytes != sizeof(u8) && nr_bytes != sizeof(u16) &&
1362 nr_bytes != sizeof(u32) && nr_bytes != sizeof(u64))) {
1363 btf_verifier_log_type(env, v->t,
1364 "Invalid array of int");
1365 return -EINVAL;
1366 }
1367 }
1368
1369 if (array->nelems && elem_size > U32_MAX / array->nelems) {
1370 btf_verifier_log_type(env, v->t,
1371 "Array size overflows U32_MAX");
1372 return -EINVAL;
1373 }
1374
1375 env_stack_pop_resolved(env, elem_type_id, elem_size * array->nelems);
1376
1377 return 0;
1378}
1379
69b693f0
MKL
1380static void btf_array_log(struct btf_verifier_env *env,
1381 const struct btf_type *t)
1382{
1383 const struct btf_array *array = btf_type_array(t);
1384
1385 btf_verifier_log(env, "type_id=%u index_type_id=%u nr_elems=%u",
1386 array->type, array->index_type, array->nelems);
1387}
1388
b00b8dae
MKL
1389static void btf_array_seq_show(const struct btf *btf, const struct btf_type *t,
1390 u32 type_id, void *data, u8 bits_offset,
1391 struct seq_file *m)
1392{
1393 const struct btf_array *array = btf_type_array(t);
1394 const struct btf_kind_operations *elem_ops;
1395 const struct btf_type *elem_type;
1396 u32 i, elem_size, elem_type_id;
1397
1398 elem_type_id = array->type;
1399 elem_type = btf_type_id_size(btf, &elem_type_id, &elem_size);
1400 elem_ops = btf_type_ops(elem_type);
1401 seq_puts(m, "[");
1402 for (i = 0; i < array->nelems; i++) {
1403 if (i)
1404 seq_puts(m, ",");
1405
1406 elem_ops->seq_show(btf, elem_type, elem_type_id, data,
1407 bits_offset, m);
1408 data += elem_size;
1409 }
1410 seq_puts(m, "]");
1411}
1412
69b693f0
MKL
1413static struct btf_kind_operations array_ops = {
1414 .check_meta = btf_array_check_meta,
eb3f595d 1415 .resolve = btf_array_resolve,
179cde8c 1416 .check_member = btf_array_check_member,
69b693f0 1417 .log_details = btf_array_log,
b00b8dae 1418 .seq_show = btf_array_seq_show,
69b693f0
MKL
1419};
1420
179cde8c
MKL
1421static int btf_struct_check_member(struct btf_verifier_env *env,
1422 const struct btf_type *struct_type,
1423 const struct btf_member *member,
1424 const struct btf_type *member_type)
1425{
1426 u32 struct_bits_off = member->offset;
1427 u32 struct_size, bytes_offset;
1428
1429 if (BITS_PER_BYTE_MASKED(struct_bits_off)) {
1430 btf_verifier_log_member(env, struct_type, member,
1431 "Member is not byte aligned");
1432 return -EINVAL;
1433 }
1434
1435 struct_size = struct_type->size;
1436 bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off);
1437 if (struct_size - bytes_offset < member_type->size) {
1438 btf_verifier_log_member(env, struct_type, member,
1439 "Member exceeds struct_size");
1440 return -EINVAL;
1441 }
1442
1443 return 0;
1444}
1445
69b693f0
MKL
1446static s32 btf_struct_check_meta(struct btf_verifier_env *env,
1447 const struct btf_type *t,
1448 u32 meta_left)
1449{
1450 bool is_union = BTF_INFO_KIND(t->info) == BTF_KIND_UNION;
1451 const struct btf_member *member;
1452 struct btf *btf = env->btf;
1453 u32 struct_size = t->size;
1454 u32 meta_needed;
1455 u16 i;
1456
1457 meta_needed = btf_type_vlen(t) * sizeof(*member);
1458 if (meta_left < meta_needed) {
1459 btf_verifier_log_basic(env, t,
1460 "meta_left:%u meta_needed:%u",
1461 meta_left, meta_needed);
1462 return -EINVAL;
1463 }
1464
1465 btf_verifier_log_type(env, t, NULL);
1466
1467 for_each_member(i, t, member) {
fbcf93eb 1468 if (!btf_name_offset_valid(btf, member->name_off)) {
69b693f0
MKL
1469 btf_verifier_log_member(env, t, member,
1470 "Invalid member name_offset:%u",
fbcf93eb 1471 member->name_off);
69b693f0
MKL
1472 return -EINVAL;
1473 }
1474
1475 /* A member cannot be in type void */
1476 if (!member->type || BTF_TYPE_PARENT(member->type)) {
1477 btf_verifier_log_member(env, t, member,
1478 "Invalid type_id");
1479 return -EINVAL;
1480 }
1481
1482 if (is_union && member->offset) {
1483 btf_verifier_log_member(env, t, member,
1484 "Invalid member bits_offset");
1485 return -EINVAL;
1486 }
1487
1488 if (BITS_ROUNDUP_BYTES(member->offset) > struct_size) {
1489 btf_verifier_log_member(env, t, member,
1490 "Memmber bits_offset exceeds its struct size");
1491 return -EINVAL;
1492 }
1493
1494 btf_verifier_log_member(env, t, member, NULL);
1495 }
1496
1497 return meta_needed;
1498}
1499
eb3f595d
MKL
1500static int btf_struct_resolve(struct btf_verifier_env *env,
1501 const struct resolve_vertex *v)
1502{
1503 const struct btf_member *member;
179cde8c 1504 int err;
eb3f595d
MKL
1505 u16 i;
1506
1507 /* Before continue resolving the next_member,
1508 * ensure the last member is indeed resolved to a
1509 * type with size info.
1510 */
1511 if (v->next_member) {
179cde8c 1512 const struct btf_type *last_member_type;
eb3f595d
MKL
1513 const struct btf_member *last_member;
1514 u16 last_member_type_id;
1515
1516 last_member = btf_type_member(v->t) + v->next_member - 1;
1517 last_member_type_id = last_member->type;
1518 if (WARN_ON_ONCE(!env_type_is_resolved(env,
1519 last_member_type_id)))
1520 return -EINVAL;
179cde8c
MKL
1521
1522 last_member_type = btf_type_by_id(env->btf,
1523 last_member_type_id);
1524 err = btf_type_ops(last_member_type)->check_member(env, v->t,
1525 last_member,
1526 last_member_type);
1527 if (err)
1528 return err;
eb3f595d
MKL
1529 }
1530
1531 for_each_member_from(i, v->next_member, v->t, member) {
1532 u32 member_type_id = member->type;
1533 const struct btf_type *member_type = btf_type_by_id(env->btf,
1534 member_type_id);
1535
1536 if (btf_type_is_void_or_null(member_type)) {
1537 btf_verifier_log_member(env, v->t, member,
1538 "Invalid member");
1539 return -EINVAL;
1540 }
1541
1542 if (!env_type_is_resolve_sink(env, member_type) &&
1543 !env_type_is_resolved(env, member_type_id)) {
1544 env_stack_set_next_member(env, i + 1);
1545 return env_stack_push(env, member_type, member_type_id);
1546 }
179cde8c
MKL
1547
1548 err = btf_type_ops(member_type)->check_member(env, v->t,
1549 member,
1550 member_type);
1551 if (err)
1552 return err;
eb3f595d
MKL
1553 }
1554
1555 env_stack_pop_resolved(env, 0, 0);
1556
1557 return 0;
1558}
1559
69b693f0
MKL
1560static void btf_struct_log(struct btf_verifier_env *env,
1561 const struct btf_type *t)
1562{
1563 btf_verifier_log(env, "size=%u vlen=%u", t->size, btf_type_vlen(t));
1564}
1565
b00b8dae
MKL
1566static void btf_struct_seq_show(const struct btf *btf, const struct btf_type *t,
1567 u32 type_id, void *data, u8 bits_offset,
1568 struct seq_file *m)
1569{
1570 const char *seq = BTF_INFO_KIND(t->info) == BTF_KIND_UNION ? "|" : ",";
1571 const struct btf_member *member;
1572 u32 i;
1573
1574 seq_puts(m, "{");
1575 for_each_member(i, t, member) {
1576 const struct btf_type *member_type = btf_type_by_id(btf,
1577 member->type);
1578 u32 member_offset = member->offset;
1579 u32 bytes_offset = BITS_ROUNDDOWN_BYTES(member_offset);
1580 u8 bits8_offset = BITS_PER_BYTE_MASKED(member_offset);
1581 const struct btf_kind_operations *ops;
1582
1583 if (i)
1584 seq_puts(m, seq);
1585
1586 ops = btf_type_ops(member_type);
1587 ops->seq_show(btf, member_type, member->type,
1588 data + bytes_offset, bits8_offset, m);
1589 }
1590 seq_puts(m, "}");
1591}
1592
69b693f0
MKL
1593static struct btf_kind_operations struct_ops = {
1594 .check_meta = btf_struct_check_meta,
eb3f595d 1595 .resolve = btf_struct_resolve,
179cde8c 1596 .check_member = btf_struct_check_member,
69b693f0 1597 .log_details = btf_struct_log,
b00b8dae 1598 .seq_show = btf_struct_seq_show,
69b693f0
MKL
1599};
1600
179cde8c
MKL
1601static int btf_enum_check_member(struct btf_verifier_env *env,
1602 const struct btf_type *struct_type,
1603 const struct btf_member *member,
1604 const struct btf_type *member_type)
1605{
1606 u32 struct_bits_off = member->offset;
1607 u32 struct_size, bytes_offset;
1608
1609 if (BITS_PER_BYTE_MASKED(struct_bits_off)) {
1610 btf_verifier_log_member(env, struct_type, member,
1611 "Member is not byte aligned");
1612 return -EINVAL;
1613 }
1614
1615 struct_size = struct_type->size;
1616 bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off);
1617 if (struct_size - bytes_offset < sizeof(int)) {
1618 btf_verifier_log_member(env, struct_type, member,
1619 "Member exceeds struct_size");
1620 return -EINVAL;
1621 }
1622
1623 return 0;
1624}
1625
69b693f0
MKL
1626static s32 btf_enum_check_meta(struct btf_verifier_env *env,
1627 const struct btf_type *t,
1628 u32 meta_left)
1629{
1630 const struct btf_enum *enums = btf_type_enum(t);
1631 struct btf *btf = env->btf;
1632 u16 i, nr_enums;
1633 u32 meta_needed;
1634
1635 nr_enums = btf_type_vlen(t);
1636 meta_needed = nr_enums * sizeof(*enums);
1637
1638 if (meta_left < meta_needed) {
1639 btf_verifier_log_basic(env, t,
1640 "meta_left:%u meta_needed:%u",
1641 meta_left, meta_needed);
1642 return -EINVAL;
1643 }
1644
1645 if (t->size != sizeof(int)) {
1646 btf_verifier_log_type(env, t, "Expected size:%zu",
1647 sizeof(int));
1648 return -EINVAL;
1649 }
1650
1651 btf_verifier_log_type(env, t, NULL);
1652
1653 for (i = 0; i < nr_enums; i++) {
fbcf93eb 1654 if (!btf_name_offset_valid(btf, enums[i].name_off)) {
69b693f0 1655 btf_verifier_log(env, "\tInvalid name_offset:%u",
fbcf93eb 1656 enums[i].name_off);
69b693f0
MKL
1657 return -EINVAL;
1658 }
1659
1660 btf_verifier_log(env, "\t%s val=%d\n",
fbcf93eb 1661 btf_name_by_offset(btf, enums[i].name_off),
69b693f0
MKL
1662 enums[i].val);
1663 }
1664
1665 return meta_needed;
1666}
1667
1668static void btf_enum_log(struct btf_verifier_env *env,
1669 const struct btf_type *t)
1670{
1671 btf_verifier_log(env, "size=%u vlen=%u", t->size, btf_type_vlen(t));
1672}
1673
b00b8dae
MKL
1674static void btf_enum_seq_show(const struct btf *btf, const struct btf_type *t,
1675 u32 type_id, void *data, u8 bits_offset,
1676 struct seq_file *m)
1677{
1678 const struct btf_enum *enums = btf_type_enum(t);
1679 u32 i, nr_enums = btf_type_vlen(t);
1680 int v = *(int *)data;
1681
1682 for (i = 0; i < nr_enums; i++) {
1683 if (v == enums[i].val) {
1684 seq_printf(m, "%s",
fbcf93eb 1685 btf_name_by_offset(btf, enums[i].name_off));
b00b8dae
MKL
1686 return;
1687 }
1688 }
1689
1690 seq_printf(m, "%d", v);
1691}
1692
69b693f0
MKL
1693static struct btf_kind_operations enum_ops = {
1694 .check_meta = btf_enum_check_meta,
eb3f595d 1695 .resolve = btf_df_resolve,
179cde8c 1696 .check_member = btf_enum_check_member,
69b693f0 1697 .log_details = btf_enum_log,
b00b8dae 1698 .seq_show = btf_enum_seq_show,
69b693f0
MKL
1699};
1700
1701static const struct btf_kind_operations * const kind_ops[NR_BTF_KINDS] = {
1702 [BTF_KIND_INT] = &int_ops,
1703 [BTF_KIND_PTR] = &ptr_ops,
1704 [BTF_KIND_ARRAY] = &array_ops,
1705 [BTF_KIND_STRUCT] = &struct_ops,
1706 [BTF_KIND_UNION] = &struct_ops,
1707 [BTF_KIND_ENUM] = &enum_ops,
1708 [BTF_KIND_FWD] = &fwd_ops,
1709 [BTF_KIND_TYPEDEF] = &modifier_ops,
1710 [BTF_KIND_VOLATILE] = &modifier_ops,
1711 [BTF_KIND_CONST] = &modifier_ops,
1712 [BTF_KIND_RESTRICT] = &modifier_ops,
1713};
1714
1715static s32 btf_check_meta(struct btf_verifier_env *env,
1716 const struct btf_type *t,
1717 u32 meta_left)
1718{
1719 u32 saved_meta_left = meta_left;
1720 s32 var_meta_size;
1721
1722 if (meta_left < sizeof(*t)) {
1723 btf_verifier_log(env, "[%u] meta_left:%u meta_needed:%zu",
1724 env->log_type_id, meta_left, sizeof(*t));
1725 return -EINVAL;
1726 }
1727 meta_left -= sizeof(*t);
1728
1729 if (BTF_INFO_KIND(t->info) > BTF_KIND_MAX ||
1730 BTF_INFO_KIND(t->info) == BTF_KIND_UNKN) {
1731 btf_verifier_log(env, "[%u] Invalid kind:%u",
1732 env->log_type_id, BTF_INFO_KIND(t->info));
1733 return -EINVAL;
1734 }
1735
fbcf93eb 1736 if (!btf_name_offset_valid(env->btf, t->name_off)) {
69b693f0 1737 btf_verifier_log(env, "[%u] Invalid name_offset:%u",
fbcf93eb 1738 env->log_type_id, t->name_off);
69b693f0
MKL
1739 return -EINVAL;
1740 }
1741
1742 var_meta_size = btf_type_ops(t)->check_meta(env, t, meta_left);
1743 if (var_meta_size < 0)
1744 return var_meta_size;
1745
1746 meta_left -= var_meta_size;
1747
1748 return saved_meta_left - meta_left;
1749}
1750
1751static int btf_check_all_metas(struct btf_verifier_env *env)
1752{
1753 struct btf *btf = env->btf;
1754 struct btf_header *hdr;
1755 void *cur, *end;
1756
1757 hdr = btf->hdr;
1758 cur = btf->nohdr_data + hdr->type_off;
1759 end = btf->nohdr_data + hdr->str_off;
1760
1761 env->log_type_id = 1;
1762 while (cur < end) {
1763 struct btf_type *t = cur;
1764 s32 meta_size;
1765
1766 meta_size = btf_check_meta(env, t, end - cur);
1767 if (meta_size < 0)
1768 return meta_size;
1769
1770 btf_add_type(env, t);
1771 cur += meta_size;
1772 env->log_type_id++;
1773 }
1774
1775 return 0;
1776}
1777
eb3f595d
MKL
1778static int btf_resolve(struct btf_verifier_env *env,
1779 const struct btf_type *t, u32 type_id)
1780{
1781 const struct resolve_vertex *v;
1782 int err = 0;
1783
1784 env->resolve_mode = RESOLVE_TBD;
1785 env_stack_push(env, t, type_id);
1786 while (!err && (v = env_stack_peak(env))) {
1787 env->log_type_id = v->type_id;
1788 err = btf_type_ops(v->t)->resolve(env, v);
1789 }
1790
1791 env->log_type_id = type_id;
1792 if (err == -E2BIG)
1793 btf_verifier_log_type(env, t,
1794 "Exceeded max resolving depth:%u",
1795 MAX_RESOLVE_DEPTH);
1796 else if (err == -EEXIST)
1797 btf_verifier_log_type(env, t, "Loop detected");
1798
1799 return err;
1800}
1801
1802static bool btf_resolve_valid(struct btf_verifier_env *env,
1803 const struct btf_type *t,
1804 u32 type_id)
1805{
1806 struct btf *btf = env->btf;
1807
1808 if (!env_type_is_resolved(env, type_id))
1809 return false;
1810
1811 if (btf_type_is_struct(t))
1812 return !btf->resolved_ids[type_id] &&
1813 !btf->resolved_sizes[type_id];
1814
1815 if (btf_type_is_modifier(t) || btf_type_is_ptr(t)) {
1816 t = btf_type_id_resolve(btf, &type_id);
1817 return t && !btf_type_is_modifier(t);
1818 }
1819
1820 if (btf_type_is_array(t)) {
1821 const struct btf_array *array = btf_type_array(t);
1822 const struct btf_type *elem_type;
1823 u32 elem_type_id = array->type;
1824 u32 elem_size;
1825
1826 elem_type = btf_type_id_size(btf, &elem_type_id, &elem_size);
1827 return elem_type && !btf_type_is_modifier(elem_type) &&
1828 (array->nelems * elem_size ==
1829 btf->resolved_sizes[type_id]);
1830 }
1831
1832 return false;
1833}
1834
1835static int btf_check_all_types(struct btf_verifier_env *env)
1836{
1837 struct btf *btf = env->btf;
1838 u32 type_id;
1839 int err;
1840
1841 err = env_resolve_init(env);
1842 if (err)
1843 return err;
1844
1845 env->phase++;
1846 for (type_id = 1; type_id <= btf->nr_types; type_id++) {
1847 const struct btf_type *t = btf_type_by_id(btf, type_id);
1848
1849 env->log_type_id = type_id;
1850 if (btf_type_needs_resolve(t) &&
1851 !env_type_is_resolved(env, type_id)) {
1852 err = btf_resolve(env, t, type_id);
1853 if (err)
1854 return err;
1855 }
1856
1857 if (btf_type_needs_resolve(t) &&
1858 !btf_resolve_valid(env, t, type_id)) {
1859 btf_verifier_log_type(env, t, "Invalid resolve state");
1860 return -EINVAL;
1861 }
1862 }
1863
1864 return 0;
1865}
1866
69b693f0
MKL
1867static int btf_parse_type_sec(struct btf_verifier_env *env)
1868{
eb3f595d
MKL
1869 int err;
1870
1871 err = btf_check_all_metas(env);
1872 if (err)
1873 return err;
1874
1875 return btf_check_all_types(env);
69b693f0
MKL
1876}
1877
1878static int btf_parse_str_sec(struct btf_verifier_env *env)
1879{
1880 const struct btf_header *hdr;
1881 struct btf *btf = env->btf;
1882 const char *start, *end;
1883
1884 hdr = btf->hdr;
1885 start = btf->nohdr_data + hdr->str_off;
1886 end = start + hdr->str_len;
1887
1888 if (!hdr->str_len || hdr->str_len - 1 > BTF_MAX_NAME_OFFSET ||
1889 start[0] || end[-1]) {
1890 btf_verifier_log(env, "Invalid string section");
1891 return -EINVAL;
1892 }
1893
1894 btf->strings = start;
1895
1896 return 0;
1897}
1898
1899static int btf_parse_hdr(struct btf_verifier_env *env)
1900{
1901 const struct btf_header *hdr;
1902 struct btf *btf = env->btf;
1903 u32 meta_left;
1904
1905 if (btf->data_size < sizeof(*hdr)) {
1906 btf_verifier_log(env, "btf_header not found");
1907 return -EINVAL;
1908 }
1909
1910 btf_verifier_log_hdr(env);
1911
1912 hdr = btf->hdr;
1913 if (hdr->magic != BTF_MAGIC) {
1914 btf_verifier_log(env, "Invalid magic");
1915 return -EINVAL;
1916 }
1917
1918 if (hdr->version != BTF_VERSION) {
1919 btf_verifier_log(env, "Unsupported version");
1920 return -ENOTSUPP;
1921 }
1922
1923 if (hdr->flags) {
1924 btf_verifier_log(env, "Unsupported flags");
1925 return -ENOTSUPP;
1926 }
1927
1928 meta_left = btf->data_size - sizeof(*hdr);
1929 if (!meta_left) {
1930 btf_verifier_log(env, "No data");
1931 return -EINVAL;
1932 }
1933
1934 if (meta_left < hdr->type_off || hdr->str_off <= hdr->type_off ||
1935 /* Type section must align to 4 bytes */
1936 hdr->type_off & (sizeof(u32) - 1)) {
1937 btf_verifier_log(env, "Invalid type_off");
1938 return -EINVAL;
1939 }
1940
1941 if (meta_left < hdr->str_off ||
1942 meta_left - hdr->str_off < hdr->str_len) {
1943 btf_verifier_log(env, "Invalid str_off or str_len");
1944 return -EINVAL;
1945 }
1946
1947 btf->nohdr_data = btf->hdr + 1;
1948
1949 return 0;
1950}
1951
1952static struct btf *btf_parse(void __user *btf_data, u32 btf_data_size,
1953 u32 log_level, char __user *log_ubuf, u32 log_size)
1954{
1955 struct btf_verifier_env *env = NULL;
1956 struct bpf_verifier_log *log;
1957 struct btf *btf = NULL;
1958 u8 *data;
1959 int err;
1960
1961 if (btf_data_size > BTF_MAX_SIZE)
1962 return ERR_PTR(-E2BIG);
1963
1964 env = kzalloc(sizeof(*env), GFP_KERNEL | __GFP_NOWARN);
1965 if (!env)
1966 return ERR_PTR(-ENOMEM);
1967
1968 log = &env->log;
1969 if (log_level || log_ubuf || log_size) {
1970 /* user requested verbose verifier output
1971 * and supplied buffer to store the verification trace
1972 */
1973 log->level = log_level;
1974 log->ubuf = log_ubuf;
1975 log->len_total = log_size;
1976
1977 /* log attributes have to be sane */
1978 if (log->len_total < 128 || log->len_total > UINT_MAX >> 8 ||
1979 !log->level || !log->ubuf) {
1980 err = -EINVAL;
1981 goto errout;
1982 }
1983 }
1984
1985 btf = kzalloc(sizeof(*btf), GFP_KERNEL | __GFP_NOWARN);
1986 if (!btf) {
1987 err = -ENOMEM;
1988 goto errout;
1989 }
1990
1991 data = kvmalloc(btf_data_size, GFP_KERNEL | __GFP_NOWARN);
1992 if (!data) {
1993 err = -ENOMEM;
1994 goto errout;
1995 }
1996
1997 btf->data = data;
1998 btf->data_size = btf_data_size;
1999
2000 if (copy_from_user(data, btf_data, btf_data_size)) {
2001 err = -EFAULT;
2002 goto errout;
2003 }
2004
2005 env->btf = btf;
2006
2007 err = btf_parse_hdr(env);
2008 if (err)
2009 goto errout;
2010
2011 err = btf_parse_str_sec(env);
2012 if (err)
2013 goto errout;
2014
2015 err = btf_parse_type_sec(env);
2016 if (err)
2017 goto errout;
2018
2019 if (!err && log->level && bpf_verifier_log_full(log)) {
2020 err = -ENOSPC;
2021 goto errout;
2022 }
2023
2024 if (!err) {
2025 btf_verifier_env_free(env);
82e96972 2026 refcount_set(&btf->refcnt, 1);
69b693f0
MKL
2027 return btf;
2028 }
2029
2030errout:
2031 btf_verifier_env_free(env);
2032 if (btf)
2033 btf_free(btf);
2034 return ERR_PTR(err);
2035}
b00b8dae
MKL
2036
2037void btf_type_seq_show(const struct btf *btf, u32 type_id, void *obj,
2038 struct seq_file *m)
2039{
2040 const struct btf_type *t = btf_type_by_id(btf, type_id);
2041
2042 btf_type_ops(t)->seq_show(btf, t, type_id, obj, 0, m);
2043}
f56a653c
MKL
2044
2045static int btf_release(struct inode *inode, struct file *filp)
2046{
2047 btf_put(filp->private_data);
2048 return 0;
2049}
2050
60197cfb 2051const struct file_operations btf_fops = {
f56a653c
MKL
2052 .release = btf_release,
2053};
2054
78958fca
MKL
2055static int __btf_new_fd(struct btf *btf)
2056{
2057 return anon_inode_getfd("btf", &btf_fops, btf, O_RDONLY | O_CLOEXEC);
2058}
2059
f56a653c
MKL
2060int btf_new_fd(const union bpf_attr *attr)
2061{
2062 struct btf *btf;
78958fca 2063 int ret;
f56a653c
MKL
2064
2065 btf = btf_parse(u64_to_user_ptr(attr->btf),
2066 attr->btf_size, attr->btf_log_level,
2067 u64_to_user_ptr(attr->btf_log_buf),
2068 attr->btf_log_size);
2069 if (IS_ERR(btf))
2070 return PTR_ERR(btf);
2071
78958fca
MKL
2072 ret = btf_alloc_id(btf);
2073 if (ret) {
2074 btf_free(btf);
2075 return ret;
2076 }
2077
2078 /*
2079 * The BTF ID is published to the userspace.
2080 * All BTF free must go through call_rcu() from
2081 * now on (i.e. free by calling btf_put()).
2082 */
2083
2084 ret = __btf_new_fd(btf);
2085 if (ret < 0)
f56a653c
MKL
2086 btf_put(btf);
2087
78958fca 2088 return ret;
f56a653c
MKL
2089}
2090
2091struct btf *btf_get_by_fd(int fd)
2092{
2093 struct btf *btf;
2094 struct fd f;
2095
2096 f = fdget(fd);
2097
2098 if (!f.file)
2099 return ERR_PTR(-EBADF);
2100
2101 if (f.file->f_op != &btf_fops) {
2102 fdput(f);
2103 return ERR_PTR(-EINVAL);
2104 }
2105
2106 btf = f.file->private_data;
78958fca 2107 refcount_inc(&btf->refcnt);
f56a653c
MKL
2108 fdput(f);
2109
2110 return btf;
2111}
60197cfb
MKL
2112
2113int btf_get_info_by_fd(const struct btf *btf,
2114 const union bpf_attr *attr,
2115 union bpf_attr __user *uattr)
2116{
62dab84c
MKL
2117 struct bpf_btf_info __user *uinfo;
2118 struct bpf_btf_info info = {};
2119 u32 info_copy, btf_copy;
2120 void __user *ubtf;
2121 u32 uinfo_len;
60197cfb 2122
62dab84c
MKL
2123 uinfo = u64_to_user_ptr(attr->info.info);
2124 uinfo_len = attr->info.info_len;
2125
2126 info_copy = min_t(u32, uinfo_len, sizeof(info));
2127 if (copy_from_user(&info, uinfo, info_copy))
2128 return -EFAULT;
2129
2130 info.id = btf->id;
2131 ubtf = u64_to_user_ptr(info.btf);
2132 btf_copy = min_t(u32, btf->data_size, info.btf_size);
2133 if (copy_to_user(ubtf, btf->data, btf_copy))
2134 return -EFAULT;
2135 info.btf_size = btf->data_size;
2136
2137 if (copy_to_user(uinfo, &info, info_copy) ||
2138 put_user(info_copy, &uattr->info.info_len))
60197cfb
MKL
2139 return -EFAULT;
2140
2141 return 0;
2142}
78958fca
MKL
2143
2144int btf_get_fd_by_id(u32 id)
2145{
2146 struct btf *btf;
2147 int fd;
2148
2149 rcu_read_lock();
2150 btf = idr_find(&btf_idr, id);
2151 if (!btf || !refcount_inc_not_zero(&btf->refcnt))
2152 btf = ERR_PTR(-ENOENT);
2153 rcu_read_unlock();
2154
2155 if (IS_ERR(btf))
2156 return PTR_ERR(btf);
2157
2158 fd = __btf_new_fd(btf);
2159 if (fd < 0)
2160 btf_put(btf);
2161
2162 return fd;
2163}
2164
2165u32 btf_id(const struct btf *btf)
2166{
2167 return btf->id;
2168}