1 // SPDX-License-Identifier: LGPL-2.1
4 * Common eBPF ELF object loading operations.
6 * Copyright (C) 2013-2015 Alexei Starovoitov <ast@kernel.org>
7 * Copyright (C) 2015 Wang Nan <wangnan0@huawei.com>
8 * Copyright (C) 2015 Huawei Inc.
9 * Copyright (C) 2017 Nicira, Inc.
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public
13 * License as published by the Free Software Foundation;
14 * version 2.1 of the License (not later!)
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU Lesser General Public License for more details.
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with this program; if not, see <http://www.gnu.org/licenses>
35 #include <asm/unistd.h>
36 #include <linux/err.h>
37 #include <linux/kernel.h>
38 #include <linux/bpf.h>
39 #include <linux/list.h>
40 #include <linux/limits.h>
42 #include <sys/types.h>
56 #define BPF_FS_MAGIC 0xcafe4a11
59 #define __printf(a, b) __attribute__((format(printf, a, b)))
62 static int __base_pr(const char *format, ...)
67 va_start(args, format);
68 err = vfprintf(stderr, format, args);
73 static __printf(1, 2) libbpf_print_fn_t __pr_warning = __base_pr;
74 static __printf(1, 2) libbpf_print_fn_t __pr_info = __base_pr;
75 static __printf(1, 2) libbpf_print_fn_t __pr_debug;
77 #define __pr(func, fmt, ...) \
80 (func)("libbpf: " fmt, ##__VA_ARGS__); \
83 #define pr_warning(fmt, ...) __pr(__pr_warning, fmt, ##__VA_ARGS__)
84 #define pr_info(fmt, ...) __pr(__pr_info, fmt, ##__VA_ARGS__)
85 #define pr_debug(fmt, ...) __pr(__pr_debug, fmt, ##__VA_ARGS__)
87 void libbpf_set_print(libbpf_print_fn_t warn,
88 libbpf_print_fn_t info,
89 libbpf_print_fn_t debug)
96 #define STRERR_BUFSIZE 128
98 #define ERRNO_OFFSET(e) ((e) - __LIBBPF_ERRNO__START)
99 #define ERRCODE_OFFSET(c) ERRNO_OFFSET(LIBBPF_ERRNO__##c)
100 #define NR_ERRNO (__LIBBPF_ERRNO__END - __LIBBPF_ERRNO__START)
102 static const char *libbpf_strerror_table[NR_ERRNO] = {
103 [ERRCODE_OFFSET(LIBELF)] = "Something wrong in libelf",
104 [ERRCODE_OFFSET(FORMAT)] = "BPF object format invalid",
105 [ERRCODE_OFFSET(KVERSION)] = "'version' section incorrect or lost",
106 [ERRCODE_OFFSET(ENDIAN)] = "Endian mismatch",
107 [ERRCODE_OFFSET(INTERNAL)] = "Internal error in libbpf",
108 [ERRCODE_OFFSET(RELOC)] = "Relocation failed",
109 [ERRCODE_OFFSET(VERIFY)] = "Kernel verifier blocks program loading",
110 [ERRCODE_OFFSET(PROG2BIG)] = "Program too big",
111 [ERRCODE_OFFSET(KVER)] = "Incorrect kernel version",
112 [ERRCODE_OFFSET(PROGTYPE)] = "Kernel doesn't support this program type",
113 [ERRCODE_OFFSET(WRNGPID)] = "Wrong pid in netlink message",
114 [ERRCODE_OFFSET(INVSEQ)] = "Invalid netlink sequence",
117 int libbpf_strerror(int err, char *buf, size_t size)
122 err = err > 0 ? err : -err;
124 if (err < __LIBBPF_ERRNO__START) {
127 ret = strerror_r(err, buf, size);
128 buf[size - 1] = '\0';
132 if (err < __LIBBPF_ERRNO__END) {
135 msg = libbpf_strerror_table[ERRNO_OFFSET(err)];
136 snprintf(buf, size, "%s", msg);
137 buf[size - 1] = '\0';
141 snprintf(buf, size, "Unknown libbpf error %d", err);
142 buf[size - 1] = '\0';
146 #define CHECK_ERR(action, err, out) do { \
153 /* Copied from tools/perf/util/util.h */
155 # define zfree(ptr) ({ free(*ptr); *ptr = NULL; })
159 # define zclose(fd) ({ \
162 ___err = close((fd)); \
167 #ifdef HAVE_LIBELF_MMAP_SUPPORT
168 # define LIBBPF_ELF_C_READ_MMAP ELF_C_READ_MMAP
170 # define LIBBPF_ELF_C_READ_MMAP ELF_C_READ
174 * bpf_prog should be a better name but it has been used in
178 /* Index in elf obj file, for relocation use. */
183 struct bpf_insn *insns;
184 size_t insns_cnt, main_prog_cnt;
185 enum bpf_prog_type type;
204 bpf_program_prep_t preprocessor;
206 struct bpf_object *obj;
208 bpf_program_clear_priv_t clear_priv;
210 enum bpf_attach_type expected_attach_type;
218 struct bpf_map_def def;
219 uint32_t btf_key_type_id;
220 uint32_t btf_value_type_id;
222 bpf_map_clear_priv_t clear_priv;
225 static LIST_HEAD(bpf_objects_list);
231 struct bpf_program *programs;
233 struct bpf_map *maps;
239 * Information when doing elf related work. Only valid if fd
259 * All loaded bpf_object is linked in a list, which is
260 * hidden to caller. bpf_objects__<func> handlers deal with
263 struct list_head list;
268 bpf_object_clear_priv_t clear_priv;
272 #define obj_elf_valid(o) ((o)->efile.elf)
274 static void bpf_program__unload(struct bpf_program *prog)
282 * If the object is opened but the program was never loaded,
283 * it is possible that prog->instances.nr == -1.
285 if (prog->instances.nr > 0) {
286 for (i = 0; i < prog->instances.nr; i++)
287 zclose(prog->instances.fds[i]);
288 } else if (prog->instances.nr != -1) {
289 pr_warning("Internal error: instances.nr is %d\n",
293 prog->instances.nr = -1;
294 zfree(&prog->instances.fds);
297 static void bpf_program__exit(struct bpf_program *prog)
302 if (prog->clear_priv)
303 prog->clear_priv(prog, prog->priv);
306 prog->clear_priv = NULL;
308 bpf_program__unload(prog);
310 zfree(&prog->section_name);
312 zfree(&prog->reloc_desc);
320 bpf_program__init(void *data, size_t size, char *section_name, int idx,
321 struct bpf_program *prog)
323 if (size < sizeof(struct bpf_insn)) {
324 pr_warning("corrupted section '%s'\n", section_name);
328 bzero(prog, sizeof(*prog));
330 prog->section_name = strdup(section_name);
331 if (!prog->section_name) {
332 pr_warning("failed to alloc name for prog under section(%d) %s\n",
337 prog->insns = malloc(size);
339 pr_warning("failed to alloc insns for prog under section %s\n",
343 prog->insns_cnt = size / sizeof(struct bpf_insn);
344 memcpy(prog->insns, data,
345 prog->insns_cnt * sizeof(struct bpf_insn));
347 prog->instances.fds = NULL;
348 prog->instances.nr = -1;
349 prog->type = BPF_PROG_TYPE_KPROBE;
353 bpf_program__exit(prog);
358 bpf_object__add_program(struct bpf_object *obj, void *data, size_t size,
359 char *section_name, int idx)
361 struct bpf_program prog, *progs;
364 err = bpf_program__init(data, size, section_name, idx, &prog);
368 progs = obj->programs;
369 nr_progs = obj->nr_programs;
371 progs = realloc(progs, sizeof(progs[0]) * (nr_progs + 1));
374 * In this case the original obj->programs
375 * is still valid, so don't need special treat for
376 * bpf_close_object().
378 pr_warning("failed to alloc a new program under section '%s'\n",
380 bpf_program__exit(&prog);
384 pr_debug("found program %s\n", prog.section_name);
385 obj->programs = progs;
386 obj->nr_programs = nr_progs + 1;
388 progs[nr_progs] = prog;
393 bpf_object__init_prog_names(struct bpf_object *obj)
395 Elf_Data *symbols = obj->efile.symbols;
396 struct bpf_program *prog;
399 for (pi = 0; pi < obj->nr_programs; pi++) {
400 const char *name = NULL;
402 prog = &obj->programs[pi];
403 if (prog->idx == obj->efile.text_shndx) {
408 for (si = 0; si < symbols->d_size / sizeof(GElf_Sym) && !name;
412 if (!gelf_getsym(symbols, si, &sym))
414 if (sym.st_shndx != prog->idx)
416 if (GELF_ST_BIND(sym.st_info) != STB_GLOBAL)
419 name = elf_strptr(obj->efile.elf,
420 obj->efile.strtabidx,
423 pr_warning("failed to get sym name string for prog %s\n",
425 return -LIBBPF_ERRNO__LIBELF;
430 pr_warning("failed to find sym for prog %s\n",
435 prog->name = strdup(name);
437 pr_warning("failed to allocate memory for prog sym %s\n",
446 static struct bpf_object *bpf_object__new(const char *path,
450 struct bpf_object *obj;
452 obj = calloc(1, sizeof(struct bpf_object) + strlen(path) + 1);
454 pr_warning("alloc memory failed for %s\n", path);
455 return ERR_PTR(-ENOMEM);
458 strcpy(obj->path, path);
462 * Caller of this function should also calls
463 * bpf_object__elf_finish() after data collection to return
464 * obj_buf to user. If not, we should duplicate the buffer to
465 * avoid user freeing them before elf finish.
467 obj->efile.obj_buf = obj_buf;
468 obj->efile.obj_buf_sz = obj_buf_sz;
469 obj->efile.maps_shndx = -1;
473 INIT_LIST_HEAD(&obj->list);
474 list_add(&obj->list, &bpf_objects_list);
478 static void bpf_object__elf_finish(struct bpf_object *obj)
480 if (!obj_elf_valid(obj))
483 if (obj->efile.elf) {
484 elf_end(obj->efile.elf);
485 obj->efile.elf = NULL;
487 obj->efile.symbols = NULL;
489 zfree(&obj->efile.reloc);
490 obj->efile.nr_reloc = 0;
491 zclose(obj->efile.fd);
492 obj->efile.obj_buf = NULL;
493 obj->efile.obj_buf_sz = 0;
496 static int bpf_object__elf_init(struct bpf_object *obj)
501 if (obj_elf_valid(obj)) {
502 pr_warning("elf init: internal error\n");
503 return -LIBBPF_ERRNO__LIBELF;
506 if (obj->efile.obj_buf_sz > 0) {
508 * obj_buf should have been validated by
509 * bpf_object__open_buffer().
511 obj->efile.elf = elf_memory(obj->efile.obj_buf,
512 obj->efile.obj_buf_sz);
514 obj->efile.fd = open(obj->path, O_RDONLY);
515 if (obj->efile.fd < 0) {
516 pr_warning("failed to open %s: %s\n", obj->path,
521 obj->efile.elf = elf_begin(obj->efile.fd,
522 LIBBPF_ELF_C_READ_MMAP,
526 if (!obj->efile.elf) {
527 pr_warning("failed to open %s as ELF file\n",
529 err = -LIBBPF_ERRNO__LIBELF;
533 if (!gelf_getehdr(obj->efile.elf, &obj->efile.ehdr)) {
534 pr_warning("failed to get EHDR from %s\n",
536 err = -LIBBPF_ERRNO__FORMAT;
539 ep = &obj->efile.ehdr;
541 /* Old LLVM set e_machine to EM_NONE */
542 if ((ep->e_type != ET_REL) || (ep->e_machine && (ep->e_machine != EM_BPF))) {
543 pr_warning("%s is not an eBPF object file\n",
545 err = -LIBBPF_ERRNO__FORMAT;
551 bpf_object__elf_finish(obj);
556 bpf_object__check_endianness(struct bpf_object *obj)
558 static unsigned int const endian = 1;
560 switch (obj->efile.ehdr.e_ident[EI_DATA]) {
562 /* We are big endian, BPF obj is little endian. */
563 if (*(unsigned char const *)&endian != 1)
568 /* We are little endian, BPF obj is big endian. */
569 if (*(unsigned char const *)&endian != 0)
573 return -LIBBPF_ERRNO__ENDIAN;
579 pr_warning("Error: endianness mismatch.\n");
580 return -LIBBPF_ERRNO__ENDIAN;
584 bpf_object__init_license(struct bpf_object *obj,
585 void *data, size_t size)
587 memcpy(obj->license, data,
588 min(size, sizeof(obj->license) - 1));
589 pr_debug("license of %s is %s\n", obj->path, obj->license);
594 bpf_object__init_kversion(struct bpf_object *obj,
595 void *data, size_t size)
599 if (size != sizeof(kver)) {
600 pr_warning("invalid kver section in %s\n", obj->path);
601 return -LIBBPF_ERRNO__FORMAT;
603 memcpy(&kver, data, sizeof(kver));
604 obj->kern_version = kver;
605 pr_debug("kernel version of %s is %x\n", obj->path,
610 static int compare_bpf_map(const void *_a, const void *_b)
612 const struct bpf_map *a = _a;
613 const struct bpf_map *b = _b;
615 return a->offset - b->offset;
619 bpf_object__init_maps(struct bpf_object *obj)
621 int i, map_idx, map_def_sz, nr_maps = 0;
624 Elf_Data *symbols = obj->efile.symbols;
626 if (obj->efile.maps_shndx < 0)
631 scn = elf_getscn(obj->efile.elf, obj->efile.maps_shndx);
633 data = elf_getdata(scn, NULL);
635 pr_warning("failed to get Elf_Data from map section %d\n",
636 obj->efile.maps_shndx);
641 * Count number of maps. Each map has a name.
642 * Array of maps is not supported: only the first element is
645 * TODO: Detect array of map and report error.
647 for (i = 0; i < symbols->d_size / sizeof(GElf_Sym); i++) {
650 if (!gelf_getsym(symbols, i, &sym))
652 if (sym.st_shndx != obj->efile.maps_shndx)
657 /* Alloc obj->maps and fill nr_maps. */
658 pr_debug("maps in %s: %d maps in %zd bytes\n", obj->path,
659 nr_maps, data->d_size);
664 /* Assume equally sized map definitions */
665 map_def_sz = data->d_size / nr_maps;
666 if (!data->d_size || (data->d_size % nr_maps) != 0) {
667 pr_warning("unable to determine map definition size "
668 "section %s, %d maps in %zd bytes\n",
669 obj->path, nr_maps, data->d_size);
673 obj->maps = calloc(nr_maps, sizeof(obj->maps[0]));
675 pr_warning("alloc maps for object failed\n");
678 obj->nr_maps = nr_maps;
681 * fill all fd with -1 so won't close incorrect
682 * fd (fd=0 is stdin) when failure (zclose won't close
685 for (i = 0; i < nr_maps; i++)
686 obj->maps[i].fd = -1;
689 * Fill obj->maps using data in "maps" section.
691 for (i = 0, map_idx = 0; i < symbols->d_size / sizeof(GElf_Sym); i++) {
693 const char *map_name;
694 struct bpf_map_def *def;
696 if (!gelf_getsym(symbols, i, &sym))
698 if (sym.st_shndx != obj->efile.maps_shndx)
701 map_name = elf_strptr(obj->efile.elf,
702 obj->efile.strtabidx,
704 obj->maps[map_idx].offset = sym.st_value;
705 if (sym.st_value + map_def_sz > data->d_size) {
706 pr_warning("corrupted maps section in %s: last map \"%s\" too small\n",
707 obj->path, map_name);
711 obj->maps[map_idx].name = strdup(map_name);
712 if (!obj->maps[map_idx].name) {
713 pr_warning("failed to alloc map name\n");
716 pr_debug("map %d is \"%s\"\n", map_idx,
717 obj->maps[map_idx].name);
718 def = (struct bpf_map_def *)(data->d_buf + sym.st_value);
720 * If the definition of the map in the object file fits in
721 * bpf_map_def, copy it. Any extra fields in our version
722 * of bpf_map_def will default to zero as a result of the
725 if (map_def_sz <= sizeof(struct bpf_map_def)) {
726 memcpy(&obj->maps[map_idx].def, def, map_def_sz);
729 * Here the map structure being read is bigger than what
730 * we expect, truncate if the excess bits are all zero.
731 * If they are not zero, reject this map as
735 for (b = ((char *)def) + sizeof(struct bpf_map_def);
736 b < ((char *)def) + map_def_sz; b++) {
738 pr_warning("maps section in %s: \"%s\" "
739 "has unrecognized, non-zero "
741 obj->path, map_name);
745 memcpy(&obj->maps[map_idx].def, def,
746 sizeof(struct bpf_map_def));
751 qsort(obj->maps, obj->nr_maps, sizeof(obj->maps[0]), compare_bpf_map);
755 static bool section_have_execinstr(struct bpf_object *obj, int idx)
760 scn = elf_getscn(obj->efile.elf, idx);
764 if (gelf_getshdr(scn, &sh) != &sh)
767 if (sh.sh_flags & SHF_EXECINSTR)
773 static int bpf_object__elf_collect(struct bpf_object *obj)
775 Elf *elf = obj->efile.elf;
776 GElf_Ehdr *ep = &obj->efile.ehdr;
778 int idx = 0, err = 0;
780 /* Elf is corrupted/truncated, avoid calling elf_strptr. */
781 if (!elf_rawdata(elf_getscn(elf, ep->e_shstrndx), NULL)) {
782 pr_warning("failed to get e_shstrndx from %s\n",
784 return -LIBBPF_ERRNO__FORMAT;
787 while ((scn = elf_nextscn(elf, scn)) != NULL) {
793 if (gelf_getshdr(scn, &sh) != &sh) {
794 pr_warning("failed to get section(%d) header from %s\n",
796 err = -LIBBPF_ERRNO__FORMAT;
800 name = elf_strptr(elf, ep->e_shstrndx, sh.sh_name);
802 pr_warning("failed to get section(%d) name from %s\n",
804 err = -LIBBPF_ERRNO__FORMAT;
808 data = elf_getdata(scn, 0);
810 pr_warning("failed to get section(%d) data from %s(%s)\n",
811 idx, name, obj->path);
812 err = -LIBBPF_ERRNO__FORMAT;
815 pr_debug("section(%d) %s, size %ld, link %d, flags %lx, type=%d\n",
816 idx, name, (unsigned long)data->d_size,
817 (int)sh.sh_link, (unsigned long)sh.sh_flags,
820 if (strcmp(name, "license") == 0)
821 err = bpf_object__init_license(obj,
824 else if (strcmp(name, "version") == 0)
825 err = bpf_object__init_kversion(obj,
828 else if (strcmp(name, "maps") == 0)
829 obj->efile.maps_shndx = idx;
830 else if (strcmp(name, BTF_ELF_SEC) == 0) {
831 obj->btf = btf__new(data->d_buf, data->d_size,
833 if (IS_ERR(obj->btf)) {
834 pr_warning("Error loading ELF section %s: %ld. Ignored and continue.\n",
835 BTF_ELF_SEC, PTR_ERR(obj->btf));
838 } else if (sh.sh_type == SHT_SYMTAB) {
839 if (obj->efile.symbols) {
840 pr_warning("bpf: multiple SYMTAB in %s\n",
842 err = -LIBBPF_ERRNO__FORMAT;
844 obj->efile.symbols = data;
845 obj->efile.strtabidx = sh.sh_link;
847 } else if ((sh.sh_type == SHT_PROGBITS) &&
848 (sh.sh_flags & SHF_EXECINSTR) &&
849 (data->d_size > 0)) {
850 if (strcmp(name, ".text") == 0)
851 obj->efile.text_shndx = idx;
852 err = bpf_object__add_program(obj, data->d_buf,
853 data->d_size, name, idx);
855 char errmsg[STRERR_BUFSIZE];
857 strerror_r(-err, errmsg, sizeof(errmsg));
858 pr_warning("failed to alloc program %s (%s): %s",
859 name, obj->path, errmsg);
861 } else if (sh.sh_type == SHT_REL) {
862 void *reloc = obj->efile.reloc;
863 int nr_reloc = obj->efile.nr_reloc + 1;
864 int sec = sh.sh_info; /* points to other section */
866 /* Only do relo for section with exec instructions */
867 if (!section_have_execinstr(obj, sec)) {
868 pr_debug("skip relo %s(%d) for section(%d)\n",
873 reloc = realloc(reloc,
874 sizeof(*obj->efile.reloc) * nr_reloc);
876 pr_warning("realloc failed\n");
879 int n = nr_reloc - 1;
881 obj->efile.reloc = reloc;
882 obj->efile.nr_reloc = nr_reloc;
884 obj->efile.reloc[n].shdr = sh;
885 obj->efile.reloc[n].data = data;
888 pr_debug("skip section(%d) %s\n", idx, name);
894 if (!obj->efile.strtabidx || obj->efile.strtabidx >= idx) {
895 pr_warning("Corrupted ELF file: index of strtab invalid\n");
896 return LIBBPF_ERRNO__FORMAT;
898 if (obj->efile.maps_shndx >= 0) {
899 err = bpf_object__init_maps(obj);
903 err = bpf_object__init_prog_names(obj);
908 static struct bpf_program *
909 bpf_object__find_prog_by_idx(struct bpf_object *obj, int idx)
911 struct bpf_program *prog;
914 for (i = 0; i < obj->nr_programs; i++) {
915 prog = &obj->programs[i];
916 if (prog->idx == idx)
923 bpf_program__collect_reloc(struct bpf_program *prog, GElf_Shdr *shdr,
924 Elf_Data *data, struct bpf_object *obj)
926 Elf_Data *symbols = obj->efile.symbols;
927 int text_shndx = obj->efile.text_shndx;
928 int maps_shndx = obj->efile.maps_shndx;
929 struct bpf_map *maps = obj->maps;
930 size_t nr_maps = obj->nr_maps;
933 pr_debug("collecting relocating info for: '%s'\n",
935 nrels = shdr->sh_size / shdr->sh_entsize;
937 prog->reloc_desc = malloc(sizeof(*prog->reloc_desc) * nrels);
938 if (!prog->reloc_desc) {
939 pr_warning("failed to alloc memory in relocation\n");
942 prog->nr_reloc = nrels;
944 for (i = 0; i < nrels; i++) {
947 unsigned int insn_idx;
948 struct bpf_insn *insns = prog->insns;
951 if (!gelf_getrel(data, i, &rel)) {
952 pr_warning("relocation: failed to get %d reloc\n", i);
953 return -LIBBPF_ERRNO__FORMAT;
956 if (!gelf_getsym(symbols,
957 GELF_R_SYM(rel.r_info),
959 pr_warning("relocation: symbol %"PRIx64" not found\n",
960 GELF_R_SYM(rel.r_info));
961 return -LIBBPF_ERRNO__FORMAT;
963 pr_debug("relo for %lld value %lld name %d\n",
964 (long long) (rel.r_info >> 32),
965 (long long) sym.st_value, sym.st_name);
967 if (sym.st_shndx != maps_shndx && sym.st_shndx != text_shndx) {
968 pr_warning("Program '%s' contains non-map related relo data pointing to section %u\n",
969 prog->section_name, sym.st_shndx);
970 return -LIBBPF_ERRNO__RELOC;
973 insn_idx = rel.r_offset / sizeof(struct bpf_insn);
974 pr_debug("relocation: insn_idx=%u\n", insn_idx);
976 if (insns[insn_idx].code == (BPF_JMP | BPF_CALL)) {
977 if (insns[insn_idx].src_reg != BPF_PSEUDO_CALL) {
978 pr_warning("incorrect bpf_call opcode\n");
979 return -LIBBPF_ERRNO__RELOC;
981 prog->reloc_desc[i].type = RELO_CALL;
982 prog->reloc_desc[i].insn_idx = insn_idx;
983 prog->reloc_desc[i].text_off = sym.st_value;
987 if (insns[insn_idx].code != (BPF_LD | BPF_IMM | BPF_DW)) {
988 pr_warning("bpf: relocation: invalid relo for insns[%d].code 0x%x\n",
989 insn_idx, insns[insn_idx].code);
990 return -LIBBPF_ERRNO__RELOC;
993 /* TODO: 'maps' is sorted. We can use bsearch to make it faster. */
994 for (map_idx = 0; map_idx < nr_maps; map_idx++) {
995 if (maps[map_idx].offset == sym.st_value) {
996 pr_debug("relocation: find map %zd (%s) for insn %u\n",
997 map_idx, maps[map_idx].name, insn_idx);
1002 if (map_idx >= nr_maps) {
1003 pr_warning("bpf relocation: map_idx %d large than %d\n",
1004 (int)map_idx, (int)nr_maps - 1);
1005 return -LIBBPF_ERRNO__RELOC;
1008 prog->reloc_desc[i].type = RELO_LD64;
1009 prog->reloc_desc[i].insn_idx = insn_idx;
1010 prog->reloc_desc[i].map_idx = map_idx;
1015 static int bpf_map_find_btf_info(struct bpf_map *map, const struct btf *btf)
1017 struct bpf_map_def *def = &map->def;
1018 const size_t max_name = 256;
1019 int64_t key_size, value_size;
1020 int32_t key_id, value_id;
1021 char name[max_name];
1023 /* Find key type by name from BTF */
1024 if (snprintf(name, max_name, "%s_key", map->name) == max_name) {
1025 pr_warning("map:%s length of BTF key_type:%s_key is too long\n",
1026 map->name, map->name);
1030 key_id = btf__find_by_name(btf, name);
1032 pr_debug("map:%s key_type:%s cannot be found in BTF\n",
1037 key_size = btf__resolve_size(btf, key_id);
1039 pr_warning("map:%s key_type:%s cannot get the BTF type_size\n",
1044 if (def->key_size != key_size) {
1045 pr_warning("map:%s key_type:%s has BTF type_size:%u != key_size:%u\n",
1046 map->name, name, (unsigned int)key_size, def->key_size);
1050 /* Find value type from BTF */
1051 if (snprintf(name, max_name, "%s_value", map->name) == max_name) {
1052 pr_warning("map:%s length of BTF value_type:%s_value is too long\n",
1053 map->name, map->name);
1057 value_id = btf__find_by_name(btf, name);
1059 pr_debug("map:%s value_type:%s cannot be found in BTF\n",
1064 value_size = btf__resolve_size(btf, value_id);
1065 if (value_size < 0) {
1066 pr_warning("map:%s value_type:%s cannot get the BTF type_size\n",
1071 if (def->value_size != value_size) {
1072 pr_warning("map:%s value_type:%s has BTF type_size:%u != value_size:%u\n",
1073 map->name, name, (unsigned int)value_size, def->value_size);
1077 map->btf_key_type_id = key_id;
1078 map->btf_value_type_id = value_id;
1084 bpf_object__create_maps(struct bpf_object *obj)
1086 struct bpf_create_map_attr create_attr = {};
1090 for (i = 0; i < obj->nr_maps; i++) {
1091 struct bpf_map *map = &obj->maps[i];
1092 struct bpf_map_def *def = &map->def;
1093 int *pfd = &map->fd;
1095 create_attr.name = map->name;
1096 create_attr.map_ifindex = map->map_ifindex;
1097 create_attr.map_type = def->type;
1098 create_attr.map_flags = def->map_flags;
1099 create_attr.key_size = def->key_size;
1100 create_attr.value_size = def->value_size;
1101 create_attr.max_entries = def->max_entries;
1102 create_attr.btf_fd = 0;
1103 create_attr.btf_key_type_id = 0;
1104 create_attr.btf_value_type_id = 0;
1106 if (obj->btf && !bpf_map_find_btf_info(map, obj->btf)) {
1107 create_attr.btf_fd = btf__fd(obj->btf);
1108 create_attr.btf_key_type_id = map->btf_key_type_id;
1109 create_attr.btf_value_type_id = map->btf_value_type_id;
1112 *pfd = bpf_create_map_xattr(&create_attr);
1113 if (*pfd < 0 && create_attr.btf_key_type_id) {
1114 pr_warning("Error in bpf_create_map_xattr(%s):%s(%d). Retrying without BTF.\n",
1115 map->name, strerror(errno), errno);
1116 create_attr.btf_fd = 0;
1117 create_attr.btf_key_type_id = 0;
1118 create_attr.btf_value_type_id = 0;
1119 map->btf_key_type_id = 0;
1120 map->btf_value_type_id = 0;
1121 *pfd = bpf_create_map_xattr(&create_attr);
1128 pr_warning("failed to create map (name: '%s'): %s\n",
1131 for (j = 0; j < i; j++)
1132 zclose(obj->maps[j].fd);
1135 pr_debug("create map %s: fd=%d\n", map->name, *pfd);
1142 bpf_program__reloc_text(struct bpf_program *prog, struct bpf_object *obj,
1143 struct reloc_desc *relo)
1145 struct bpf_insn *insn, *new_insn;
1146 struct bpf_program *text;
1149 if (relo->type != RELO_CALL)
1150 return -LIBBPF_ERRNO__RELOC;
1152 if (prog->idx == obj->efile.text_shndx) {
1153 pr_warning("relo in .text insn %d into off %d\n",
1154 relo->insn_idx, relo->text_off);
1155 return -LIBBPF_ERRNO__RELOC;
1158 if (prog->main_prog_cnt == 0) {
1159 text = bpf_object__find_prog_by_idx(obj, obj->efile.text_shndx);
1161 pr_warning("no .text section found yet relo into text exist\n");
1162 return -LIBBPF_ERRNO__RELOC;
1164 new_cnt = prog->insns_cnt + text->insns_cnt;
1165 new_insn = realloc(prog->insns, new_cnt * sizeof(*insn));
1167 pr_warning("oom in prog realloc\n");
1170 memcpy(new_insn + prog->insns_cnt, text->insns,
1171 text->insns_cnt * sizeof(*insn));
1172 prog->insns = new_insn;
1173 prog->main_prog_cnt = prog->insns_cnt;
1174 prog->insns_cnt = new_cnt;
1175 pr_debug("added %zd insn from %s to prog %s\n",
1176 text->insns_cnt, text->section_name,
1177 prog->section_name);
1179 insn = &prog->insns[relo->insn_idx];
1180 insn->imm += prog->main_prog_cnt - relo->insn_idx;
1185 bpf_program__relocate(struct bpf_program *prog, struct bpf_object *obj)
1189 if (!prog || !prog->reloc_desc)
1192 for (i = 0; i < prog->nr_reloc; i++) {
1193 if (prog->reloc_desc[i].type == RELO_LD64) {
1194 struct bpf_insn *insns = prog->insns;
1195 int insn_idx, map_idx;
1197 insn_idx = prog->reloc_desc[i].insn_idx;
1198 map_idx = prog->reloc_desc[i].map_idx;
1200 if (insn_idx >= (int)prog->insns_cnt) {
1201 pr_warning("relocation out of range: '%s'\n",
1202 prog->section_name);
1203 return -LIBBPF_ERRNO__RELOC;
1205 insns[insn_idx].src_reg = BPF_PSEUDO_MAP_FD;
1206 insns[insn_idx].imm = obj->maps[map_idx].fd;
1208 err = bpf_program__reloc_text(prog, obj,
1209 &prog->reloc_desc[i]);
1215 zfree(&prog->reloc_desc);
1222 bpf_object__relocate(struct bpf_object *obj)
1224 struct bpf_program *prog;
1228 for (i = 0; i < obj->nr_programs; i++) {
1229 prog = &obj->programs[i];
1231 err = bpf_program__relocate(prog, obj);
1233 pr_warning("failed to relocate '%s'\n",
1234 prog->section_name);
1241 static int bpf_object__collect_reloc(struct bpf_object *obj)
1245 if (!obj_elf_valid(obj)) {
1246 pr_warning("Internal error: elf object is closed\n");
1247 return -LIBBPF_ERRNO__INTERNAL;
1250 for (i = 0; i < obj->efile.nr_reloc; i++) {
1251 GElf_Shdr *shdr = &obj->efile.reloc[i].shdr;
1252 Elf_Data *data = obj->efile.reloc[i].data;
1253 int idx = shdr->sh_info;
1254 struct bpf_program *prog;
1256 if (shdr->sh_type != SHT_REL) {
1257 pr_warning("internal error at %d\n", __LINE__);
1258 return -LIBBPF_ERRNO__INTERNAL;
1261 prog = bpf_object__find_prog_by_idx(obj, idx);
1263 pr_warning("relocation failed: no section(%d)\n", idx);
1264 return -LIBBPF_ERRNO__RELOC;
1267 err = bpf_program__collect_reloc(prog,
1277 load_program(enum bpf_prog_type type, enum bpf_attach_type expected_attach_type,
1278 const char *name, struct bpf_insn *insns, int insns_cnt,
1279 char *license, u32 kern_version, int *pfd, int prog_ifindex)
1281 struct bpf_load_program_attr load_attr;
1285 memset(&load_attr, 0, sizeof(struct bpf_load_program_attr));
1286 load_attr.prog_type = type;
1287 load_attr.expected_attach_type = expected_attach_type;
1288 load_attr.name = name;
1289 load_attr.insns = insns;
1290 load_attr.insns_cnt = insns_cnt;
1291 load_attr.license = license;
1292 load_attr.kern_version = kern_version;
1293 load_attr.prog_ifindex = prog_ifindex;
1295 if (!load_attr.insns || !load_attr.insns_cnt)
1298 log_buf = malloc(BPF_LOG_BUF_SIZE);
1300 pr_warning("Alloc log buffer for bpf loader error, continue without log\n");
1302 ret = bpf_load_program_xattr(&load_attr, log_buf, BPF_LOG_BUF_SIZE);
1310 ret = -LIBBPF_ERRNO__LOAD;
1311 pr_warning("load bpf program failed: %s\n", strerror(errno));
1313 if (log_buf && log_buf[0] != '\0') {
1314 ret = -LIBBPF_ERRNO__VERIFY;
1315 pr_warning("-- BEGIN DUMP LOG ---\n");
1316 pr_warning("\n%s\n", log_buf);
1317 pr_warning("-- END LOG --\n");
1318 } else if (load_attr.insns_cnt >= BPF_MAXINSNS) {
1319 pr_warning("Program too large (%zu insns), at most %d insns\n",
1320 load_attr.insns_cnt, BPF_MAXINSNS);
1321 ret = -LIBBPF_ERRNO__PROG2BIG;
1323 /* Wrong program type? */
1324 if (load_attr.prog_type != BPF_PROG_TYPE_KPROBE) {
1327 load_attr.prog_type = BPF_PROG_TYPE_KPROBE;
1328 load_attr.expected_attach_type = 0;
1329 fd = bpf_load_program_xattr(&load_attr, NULL, 0);
1332 ret = -LIBBPF_ERRNO__PROGTYPE;
1338 ret = -LIBBPF_ERRNO__KVER;
1347 bpf_program__load(struct bpf_program *prog,
1348 char *license, u32 kern_version)
1352 if (prog->instances.nr < 0 || !prog->instances.fds) {
1353 if (prog->preprocessor) {
1354 pr_warning("Internal error: can't load program '%s'\n",
1355 prog->section_name);
1356 return -LIBBPF_ERRNO__INTERNAL;
1359 prog->instances.fds = malloc(sizeof(int));
1360 if (!prog->instances.fds) {
1361 pr_warning("Not enough memory for BPF fds\n");
1364 prog->instances.nr = 1;
1365 prog->instances.fds[0] = -1;
1368 if (!prog->preprocessor) {
1369 if (prog->instances.nr != 1) {
1370 pr_warning("Program '%s' is inconsistent: nr(%d) != 1\n",
1371 prog->section_name, prog->instances.nr);
1373 err = load_program(prog->type, prog->expected_attach_type,
1374 prog->name, prog->insns, prog->insns_cnt,
1375 license, kern_version, &fd,
1376 prog->prog_ifindex);
1378 prog->instances.fds[0] = fd;
1382 for (i = 0; i < prog->instances.nr; i++) {
1383 struct bpf_prog_prep_result result;
1384 bpf_program_prep_t preprocessor = prog->preprocessor;
1386 bzero(&result, sizeof(result));
1387 err = preprocessor(prog, i, prog->insns,
1388 prog->insns_cnt, &result);
1390 pr_warning("Preprocessing the %dth instance of program '%s' failed\n",
1391 i, prog->section_name);
1395 if (!result.new_insn_ptr || !result.new_insn_cnt) {
1396 pr_debug("Skip loading the %dth instance of program '%s'\n",
1397 i, prog->section_name);
1398 prog->instances.fds[i] = -1;
1404 err = load_program(prog->type, prog->expected_attach_type,
1405 prog->name, result.new_insn_ptr,
1406 result.new_insn_cnt,
1407 license, kern_version, &fd,
1408 prog->prog_ifindex);
1411 pr_warning("Loading the %dth instance of program '%s' failed\n",
1412 i, prog->section_name);
1418 prog->instances.fds[i] = fd;
1422 pr_warning("failed to load program '%s'\n",
1423 prog->section_name);
1424 zfree(&prog->insns);
1425 prog->insns_cnt = 0;
1430 bpf_object__load_progs(struct bpf_object *obj)
1435 for (i = 0; i < obj->nr_programs; i++) {
1436 if (obj->programs[i].idx == obj->efile.text_shndx)
1438 err = bpf_program__load(&obj->programs[i],
1447 static bool bpf_prog_type__needs_kver(enum bpf_prog_type type)
1450 case BPF_PROG_TYPE_SOCKET_FILTER:
1451 case BPF_PROG_TYPE_SCHED_CLS:
1452 case BPF_PROG_TYPE_SCHED_ACT:
1453 case BPF_PROG_TYPE_XDP:
1454 case BPF_PROG_TYPE_CGROUP_SKB:
1455 case BPF_PROG_TYPE_CGROUP_SOCK:
1456 case BPF_PROG_TYPE_LWT_IN:
1457 case BPF_PROG_TYPE_LWT_OUT:
1458 case BPF_PROG_TYPE_LWT_XMIT:
1459 case BPF_PROG_TYPE_LWT_SEG6LOCAL:
1460 case BPF_PROG_TYPE_SOCK_OPS:
1461 case BPF_PROG_TYPE_SK_SKB:
1462 case BPF_PROG_TYPE_CGROUP_DEVICE:
1463 case BPF_PROG_TYPE_SK_MSG:
1464 case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
1466 case BPF_PROG_TYPE_UNSPEC:
1467 case BPF_PROG_TYPE_KPROBE:
1468 case BPF_PROG_TYPE_TRACEPOINT:
1469 case BPF_PROG_TYPE_PERF_EVENT:
1470 case BPF_PROG_TYPE_RAW_TRACEPOINT:
1476 static int bpf_object__validate(struct bpf_object *obj, bool needs_kver)
1478 if (needs_kver && obj->kern_version == 0) {
1479 pr_warning("%s doesn't provide kernel version\n",
1481 return -LIBBPF_ERRNO__KVERSION;
1486 static struct bpf_object *
1487 __bpf_object__open(const char *path, void *obj_buf, size_t obj_buf_sz,
1490 struct bpf_object *obj;
1493 if (elf_version(EV_CURRENT) == EV_NONE) {
1494 pr_warning("failed to init libelf for %s\n", path);
1495 return ERR_PTR(-LIBBPF_ERRNO__LIBELF);
1498 obj = bpf_object__new(path, obj_buf, obj_buf_sz);
1502 CHECK_ERR(bpf_object__elf_init(obj), err, out);
1503 CHECK_ERR(bpf_object__check_endianness(obj), err, out);
1504 CHECK_ERR(bpf_object__elf_collect(obj), err, out);
1505 CHECK_ERR(bpf_object__collect_reloc(obj), err, out);
1506 CHECK_ERR(bpf_object__validate(obj, needs_kver), err, out);
1508 bpf_object__elf_finish(obj);
1511 bpf_object__close(obj);
1512 return ERR_PTR(err);
1515 struct bpf_object *bpf_object__open(const char *path)
1517 /* param validation */
1521 pr_debug("loading %s\n", path);
1523 return __bpf_object__open(path, NULL, 0, true);
1526 struct bpf_object *bpf_object__open_buffer(void *obj_buf,
1532 /* param validation */
1533 if (!obj_buf || obj_buf_sz <= 0)
1537 snprintf(tmp_name, sizeof(tmp_name), "%lx-%lx",
1538 (unsigned long)obj_buf,
1539 (unsigned long)obj_buf_sz);
1540 tmp_name[sizeof(tmp_name) - 1] = '\0';
1543 pr_debug("loading object '%s' from buffer\n",
1546 return __bpf_object__open(name, obj_buf, obj_buf_sz, true);
1549 int bpf_object__unload(struct bpf_object *obj)
1556 for (i = 0; i < obj->nr_maps; i++)
1557 zclose(obj->maps[i].fd);
1559 for (i = 0; i < obj->nr_programs; i++)
1560 bpf_program__unload(&obj->programs[i]);
1565 int bpf_object__load(struct bpf_object *obj)
1573 pr_warning("object should not be loaded twice\n");
1579 CHECK_ERR(bpf_object__create_maps(obj), err, out);
1580 CHECK_ERR(bpf_object__relocate(obj), err, out);
1581 CHECK_ERR(bpf_object__load_progs(obj), err, out);
1585 bpf_object__unload(obj);
1586 pr_warning("failed to load object '%s'\n", obj->path);
1590 static int check_path(const char *path)
1592 struct statfs st_fs;
1599 dname = strdup(path);
1603 dir = dirname(dname);
1604 if (statfs(dir, &st_fs)) {
1605 pr_warning("failed to statfs %s: %s\n", dir, strerror(errno));
1610 if (!err && st_fs.f_type != BPF_FS_MAGIC) {
1611 pr_warning("specified path %s is not on BPF FS\n", path);
1618 int bpf_program__pin_instance(struct bpf_program *prog, const char *path,
1623 err = check_path(path);
1628 pr_warning("invalid program pointer\n");
1632 if (instance < 0 || instance >= prog->instances.nr) {
1633 pr_warning("invalid prog instance %d of prog %s (max %d)\n",
1634 instance, prog->section_name, prog->instances.nr);
1638 if (bpf_obj_pin(prog->instances.fds[instance], path)) {
1639 pr_warning("failed to pin program: %s\n", strerror(errno));
1642 pr_debug("pinned program '%s'\n", path);
1647 static int make_dir(const char *path)
1651 if (mkdir(path, 0700) && errno != EEXIST)
1655 pr_warning("failed to mkdir %s: %s\n", path, strerror(-err));
1659 int bpf_program__pin(struct bpf_program *prog, const char *path)
1663 err = check_path(path);
1668 pr_warning("invalid program pointer\n");
1672 if (prog->instances.nr <= 0) {
1673 pr_warning("no instances of prog %s to pin\n",
1674 prog->section_name);
1678 err = make_dir(path);
1682 for (i = 0; i < prog->instances.nr; i++) {
1686 len = snprintf(buf, PATH_MAX, "%s/%d", path, i);
1689 else if (len >= PATH_MAX)
1690 return -ENAMETOOLONG;
1692 err = bpf_program__pin_instance(prog, buf, i);
1700 int bpf_map__pin(struct bpf_map *map, const char *path)
1704 err = check_path(path);
1709 pr_warning("invalid map pointer\n");
1713 if (bpf_obj_pin(map->fd, path)) {
1714 pr_warning("failed to pin map: %s\n", strerror(errno));
1718 pr_debug("pinned map '%s'\n", path);
1722 int bpf_object__pin(struct bpf_object *obj, const char *path)
1724 struct bpf_program *prog;
1725 struct bpf_map *map;
1732 pr_warning("object not yet loaded; load it first\n");
1736 err = make_dir(path);
1740 bpf_map__for_each(map, obj) {
1744 len = snprintf(buf, PATH_MAX, "%s/%s", path,
1745 bpf_map__name(map));
1748 else if (len >= PATH_MAX)
1749 return -ENAMETOOLONG;
1751 err = bpf_map__pin(map, buf);
1756 bpf_object__for_each_program(prog, obj) {
1760 len = snprintf(buf, PATH_MAX, "%s/%s", path,
1761 prog->section_name);
1764 else if (len >= PATH_MAX)
1765 return -ENAMETOOLONG;
1767 err = bpf_program__pin(prog, buf);
1775 void bpf_object__close(struct bpf_object *obj)
1782 if (obj->clear_priv)
1783 obj->clear_priv(obj, obj->priv);
1785 bpf_object__elf_finish(obj);
1786 bpf_object__unload(obj);
1787 btf__free(obj->btf);
1789 for (i = 0; i < obj->nr_maps; i++) {
1790 zfree(&obj->maps[i].name);
1791 if (obj->maps[i].clear_priv)
1792 obj->maps[i].clear_priv(&obj->maps[i],
1794 obj->maps[i].priv = NULL;
1795 obj->maps[i].clear_priv = NULL;
1800 if (obj->programs && obj->nr_programs) {
1801 for (i = 0; i < obj->nr_programs; i++)
1802 bpf_program__exit(&obj->programs[i]);
1804 zfree(&obj->programs);
1806 list_del(&obj->list);
1811 bpf_object__next(struct bpf_object *prev)
1813 struct bpf_object *next;
1816 next = list_first_entry(&bpf_objects_list,
1820 next = list_next_entry(prev, list);
1822 /* Empty list is noticed here so don't need checking on entry. */
1823 if (&next->list == &bpf_objects_list)
1829 const char *bpf_object__name(struct bpf_object *obj)
1831 return obj ? obj->path : ERR_PTR(-EINVAL);
1834 unsigned int bpf_object__kversion(struct bpf_object *obj)
1836 return obj ? obj->kern_version : 0;
1839 int bpf_object__btf_fd(const struct bpf_object *obj)
1841 return obj->btf ? btf__fd(obj->btf) : -1;
1844 int bpf_object__set_priv(struct bpf_object *obj, void *priv,
1845 bpf_object_clear_priv_t clear_priv)
1847 if (obj->priv && obj->clear_priv)
1848 obj->clear_priv(obj, obj->priv);
1851 obj->clear_priv = clear_priv;
1855 void *bpf_object__priv(struct bpf_object *obj)
1857 return obj ? obj->priv : ERR_PTR(-EINVAL);
1860 struct bpf_program *
1861 bpf_program__next(struct bpf_program *prev, struct bpf_object *obj)
1869 return &obj->programs[0];
1871 if (prev->obj != obj) {
1872 pr_warning("error: program handler doesn't match object\n");
1876 idx = (prev - obj->programs) + 1;
1877 if (idx >= obj->nr_programs)
1879 return &obj->programs[idx];
1882 int bpf_program__set_priv(struct bpf_program *prog, void *priv,
1883 bpf_program_clear_priv_t clear_priv)
1885 if (prog->priv && prog->clear_priv)
1886 prog->clear_priv(prog, prog->priv);
1889 prog->clear_priv = clear_priv;
1893 void *bpf_program__priv(struct bpf_program *prog)
1895 return prog ? prog->priv : ERR_PTR(-EINVAL);
1898 const char *bpf_program__title(struct bpf_program *prog, bool needs_copy)
1902 title = prog->section_name;
1904 title = strdup(title);
1906 pr_warning("failed to strdup program title\n");
1907 return ERR_PTR(-ENOMEM);
1914 int bpf_program__fd(struct bpf_program *prog)
1916 return bpf_program__nth_fd(prog, 0);
1919 int bpf_program__set_prep(struct bpf_program *prog, int nr_instances,
1920 bpf_program_prep_t prep)
1924 if (nr_instances <= 0 || !prep)
1927 if (prog->instances.nr > 0 || prog->instances.fds) {
1928 pr_warning("Can't set pre-processor after loading\n");
1932 instances_fds = malloc(sizeof(int) * nr_instances);
1933 if (!instances_fds) {
1934 pr_warning("alloc memory failed for fds\n");
1938 /* fill all fd with -1 */
1939 memset(instances_fds, -1, sizeof(int) * nr_instances);
1941 prog->instances.nr = nr_instances;
1942 prog->instances.fds = instances_fds;
1943 prog->preprocessor = prep;
1947 int bpf_program__nth_fd(struct bpf_program *prog, int n)
1951 if (n >= prog->instances.nr || n < 0) {
1952 pr_warning("Can't get the %dth fd from program %s: only %d instances\n",
1953 n, prog->section_name, prog->instances.nr);
1957 fd = prog->instances.fds[n];
1959 pr_warning("%dth instance of program '%s' is invalid\n",
1960 n, prog->section_name);
1967 void bpf_program__set_type(struct bpf_program *prog, enum bpf_prog_type type)
1972 static bool bpf_program__is_type(struct bpf_program *prog,
1973 enum bpf_prog_type type)
1975 return prog ? (prog->type == type) : false;
1978 #define BPF_PROG_TYPE_FNS(NAME, TYPE) \
1979 int bpf_program__set_##NAME(struct bpf_program *prog) \
1983 bpf_program__set_type(prog, TYPE); \
1987 bool bpf_program__is_##NAME(struct bpf_program *prog) \
1989 return bpf_program__is_type(prog, TYPE); \
1992 BPF_PROG_TYPE_FNS(socket_filter, BPF_PROG_TYPE_SOCKET_FILTER);
1993 BPF_PROG_TYPE_FNS(kprobe, BPF_PROG_TYPE_KPROBE);
1994 BPF_PROG_TYPE_FNS(sched_cls, BPF_PROG_TYPE_SCHED_CLS);
1995 BPF_PROG_TYPE_FNS(sched_act, BPF_PROG_TYPE_SCHED_ACT);
1996 BPF_PROG_TYPE_FNS(tracepoint, BPF_PROG_TYPE_TRACEPOINT);
1997 BPF_PROG_TYPE_FNS(raw_tracepoint, BPF_PROG_TYPE_RAW_TRACEPOINT);
1998 BPF_PROG_TYPE_FNS(xdp, BPF_PROG_TYPE_XDP);
1999 BPF_PROG_TYPE_FNS(perf_event, BPF_PROG_TYPE_PERF_EVENT);
2001 void bpf_program__set_expected_attach_type(struct bpf_program *prog,
2002 enum bpf_attach_type type)
2004 prog->expected_attach_type = type;
2007 #define BPF_PROG_SEC_FULL(string, ptype, atype) \
2008 { string, sizeof(string) - 1, ptype, atype }
2010 #define BPF_PROG_SEC(string, ptype) BPF_PROG_SEC_FULL(string, ptype, 0)
2012 #define BPF_S_PROG_SEC(string, ptype) \
2013 BPF_PROG_SEC_FULL(string, BPF_PROG_TYPE_CGROUP_SOCK, ptype)
2015 #define BPF_SA_PROG_SEC(string, ptype) \
2016 BPF_PROG_SEC_FULL(string, BPF_PROG_TYPE_CGROUP_SOCK_ADDR, ptype)
2018 static const struct {
2021 enum bpf_prog_type prog_type;
2022 enum bpf_attach_type expected_attach_type;
2023 } section_names[] = {
2024 BPF_PROG_SEC("socket", BPF_PROG_TYPE_SOCKET_FILTER),
2025 BPF_PROG_SEC("kprobe/", BPF_PROG_TYPE_KPROBE),
2026 BPF_PROG_SEC("kretprobe/", BPF_PROG_TYPE_KPROBE),
2027 BPF_PROG_SEC("classifier", BPF_PROG_TYPE_SCHED_CLS),
2028 BPF_PROG_SEC("action", BPF_PROG_TYPE_SCHED_ACT),
2029 BPF_PROG_SEC("tracepoint/", BPF_PROG_TYPE_TRACEPOINT),
2030 BPF_PROG_SEC("raw_tracepoint/", BPF_PROG_TYPE_RAW_TRACEPOINT),
2031 BPF_PROG_SEC("xdp", BPF_PROG_TYPE_XDP),
2032 BPF_PROG_SEC("perf_event", BPF_PROG_TYPE_PERF_EVENT),
2033 BPF_PROG_SEC("cgroup/skb", BPF_PROG_TYPE_CGROUP_SKB),
2034 BPF_PROG_SEC("cgroup/sock", BPF_PROG_TYPE_CGROUP_SOCK),
2035 BPF_PROG_SEC("cgroup/dev", BPF_PROG_TYPE_CGROUP_DEVICE),
2036 BPF_PROG_SEC("lwt_in", BPF_PROG_TYPE_LWT_IN),
2037 BPF_PROG_SEC("lwt_out", BPF_PROG_TYPE_LWT_OUT),
2038 BPF_PROG_SEC("lwt_xmit", BPF_PROG_TYPE_LWT_XMIT),
2039 BPF_PROG_SEC("sockops", BPF_PROG_TYPE_SOCK_OPS),
2040 BPF_PROG_SEC("sk_skb", BPF_PROG_TYPE_SK_SKB),
2041 BPF_PROG_SEC("sk_msg", BPF_PROG_TYPE_SK_MSG),
2042 BPF_SA_PROG_SEC("cgroup/bind4", BPF_CGROUP_INET4_BIND),
2043 BPF_SA_PROG_SEC("cgroup/bind6", BPF_CGROUP_INET6_BIND),
2044 BPF_SA_PROG_SEC("cgroup/connect4", BPF_CGROUP_INET4_CONNECT),
2045 BPF_SA_PROG_SEC("cgroup/connect6", BPF_CGROUP_INET6_CONNECT),
2046 BPF_SA_PROG_SEC("cgroup/sendmsg4", BPF_CGROUP_UDP4_SENDMSG),
2047 BPF_SA_PROG_SEC("cgroup/sendmsg6", BPF_CGROUP_UDP6_SENDMSG),
2048 BPF_S_PROG_SEC("cgroup/post_bind4", BPF_CGROUP_INET4_POST_BIND),
2049 BPF_S_PROG_SEC("cgroup/post_bind6", BPF_CGROUP_INET6_POST_BIND),
2053 #undef BPF_PROG_SEC_FULL
2054 #undef BPF_S_PROG_SEC
2055 #undef BPF_SA_PROG_SEC
2057 static int bpf_program__identify_section(struct bpf_program *prog)
2061 if (!prog->section_name)
2064 for (i = 0; i < ARRAY_SIZE(section_names); i++)
2065 if (strncmp(prog->section_name, section_names[i].sec,
2066 section_names[i].len) == 0)
2070 pr_warning("failed to guess program type based on section name %s\n",
2071 prog->section_name);
2076 int bpf_map__fd(struct bpf_map *map)
2078 return map ? map->fd : -EINVAL;
2081 const struct bpf_map_def *bpf_map__def(struct bpf_map *map)
2083 return map ? &map->def : ERR_PTR(-EINVAL);
2086 const char *bpf_map__name(struct bpf_map *map)
2088 return map ? map->name : NULL;
2091 uint32_t bpf_map__btf_key_type_id(const struct bpf_map *map)
2093 return map ? map->btf_key_type_id : 0;
2096 uint32_t bpf_map__btf_value_type_id(const struct bpf_map *map)
2098 return map ? map->btf_value_type_id : 0;
2101 int bpf_map__set_priv(struct bpf_map *map, void *priv,
2102 bpf_map_clear_priv_t clear_priv)
2108 if (map->clear_priv)
2109 map->clear_priv(map, map->priv);
2113 map->clear_priv = clear_priv;
2117 void *bpf_map__priv(struct bpf_map *map)
2119 return map ? map->priv : ERR_PTR(-EINVAL);
2123 bpf_map__next(struct bpf_map *prev, struct bpf_object *obj)
2126 struct bpf_map *s, *e;
2128 if (!obj || !obj->maps)
2132 e = obj->maps + obj->nr_maps;
2137 if ((prev < s) || (prev >= e)) {
2138 pr_warning("error in %s: map handler doesn't belong to object\n",
2143 idx = (prev - obj->maps) + 1;
2144 if (idx >= obj->nr_maps)
2146 return &obj->maps[idx];
2150 bpf_object__find_map_by_name(struct bpf_object *obj, const char *name)
2152 struct bpf_map *pos;
2154 bpf_map__for_each(pos, obj) {
2155 if (pos->name && !strcmp(pos->name, name))
2162 bpf_object__find_map_by_offset(struct bpf_object *obj, size_t offset)
2166 for (i = 0; i < obj->nr_maps; i++) {
2167 if (obj->maps[i].offset == offset)
2168 return &obj->maps[i];
2170 return ERR_PTR(-ENOENT);
2173 long libbpf_get_error(const void *ptr)
2176 return PTR_ERR(ptr);
2180 int bpf_prog_load(const char *file, enum bpf_prog_type type,
2181 struct bpf_object **pobj, int *prog_fd)
2183 struct bpf_prog_load_attr attr;
2185 memset(&attr, 0, sizeof(struct bpf_prog_load_attr));
2187 attr.prog_type = type;
2188 attr.expected_attach_type = 0;
2190 return bpf_prog_load_xattr(&attr, pobj, prog_fd);
2193 int bpf_prog_load_xattr(const struct bpf_prog_load_attr *attr,
2194 struct bpf_object **pobj, int *prog_fd)
2196 struct bpf_program *prog, *first_prog = NULL;
2197 enum bpf_attach_type expected_attach_type;
2198 enum bpf_prog_type prog_type;
2199 struct bpf_object *obj;
2200 struct bpf_map *map;
2209 obj = __bpf_object__open(attr->file, NULL, 0,
2210 bpf_prog_type__needs_kver(attr->prog_type));
2211 if (IS_ERR_OR_NULL(obj))
2214 bpf_object__for_each_program(prog, obj) {
2216 * If type is not specified, try to guess it based on
2219 prog_type = attr->prog_type;
2220 prog->prog_ifindex = attr->ifindex;
2221 expected_attach_type = attr->expected_attach_type;
2222 if (prog_type == BPF_PROG_TYPE_UNSPEC) {
2223 section_idx = bpf_program__identify_section(prog);
2224 if (section_idx < 0) {
2225 bpf_object__close(obj);
2228 prog_type = section_names[section_idx].prog_type;
2229 expected_attach_type =
2230 section_names[section_idx].expected_attach_type;
2233 bpf_program__set_type(prog, prog_type);
2234 bpf_program__set_expected_attach_type(prog,
2235 expected_attach_type);
2237 if (prog->idx != obj->efile.text_shndx && !first_prog)
2241 bpf_map__for_each(map, obj) {
2242 map->map_ifindex = attr->ifindex;
2246 pr_warning("object file doesn't contain bpf program\n");
2247 bpf_object__close(obj);
2251 err = bpf_object__load(obj);
2253 bpf_object__close(obj);
2258 *prog_fd = bpf_program__fd(first_prog);
2262 enum bpf_perf_event_ret
2263 bpf_perf_event_read_simple(void *mem, unsigned long size,
2264 unsigned long page_size, void **buf, size_t *buf_len,
2265 bpf_perf_event_print_t fn, void *priv)
2267 volatile struct perf_event_mmap_page *header = mem;
2268 __u64 data_tail = header->data_tail;
2269 __u64 data_head = header->data_head;
2270 void *base, *begin, *end;
2273 asm volatile("" ::: "memory"); /* in real code it should be smp_rmb() */
2274 if (data_head == data_tail)
2275 return LIBBPF_PERF_EVENT_CONT;
2277 base = ((char *)header) + page_size;
2279 begin = base + data_tail % size;
2280 end = base + data_head % size;
2282 while (begin != end) {
2283 struct perf_event_header *ehdr;
2286 if (begin + ehdr->size > base + size) {
2287 long len = base + size - begin;
2289 if (*buf_len < ehdr->size) {
2291 *buf = malloc(ehdr->size);
2293 ret = LIBBPF_PERF_EVENT_ERROR;
2296 *buf_len = ehdr->size;
2299 memcpy(*buf, begin, len);
2300 memcpy(*buf + len, base, ehdr->size - len);
2301 ehdr = (void *)*buf;
2302 begin = base + ehdr->size - len;
2303 } else if (begin + ehdr->size == base + size) {
2306 begin += ehdr->size;
2309 ret = fn(ehdr, priv);
2310 if (ret != LIBBPF_PERF_EVENT_CONT)
2313 data_tail += ehdr->size;
2316 __sync_synchronize(); /* smp_mb() */
2317 header->data_tail = data_tail;