2 * Copyright (C) 2017 Netronome Systems, Inc.
4 * This software is dual licensed under the GNU General License Version 2,
5 * June 1991 as shown in the file COPYING in the top-level directory of this
6 * source tree or the BSD 2-Clause License provided below. You have the
7 * option to license this software under the complete terms of either license.
9 * The BSD 2-Clause License:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * 1. Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * 2. Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 /* Author: Jakub Kicinski <kubakici@wp.pl> */
44 #include <sys/types.h>
53 static const char * const prog_type_name[] = {
54 [BPF_PROG_TYPE_UNSPEC] = "unspec",
55 [BPF_PROG_TYPE_SOCKET_FILTER] = "socket_filter",
56 [BPF_PROG_TYPE_KPROBE] = "kprobe",
57 [BPF_PROG_TYPE_SCHED_CLS] = "sched_cls",
58 [BPF_PROG_TYPE_SCHED_ACT] = "sched_act",
59 [BPF_PROG_TYPE_TRACEPOINT] = "tracepoint",
60 [BPF_PROG_TYPE_XDP] = "xdp",
61 [BPF_PROG_TYPE_PERF_EVENT] = "perf_event",
62 [BPF_PROG_TYPE_CGROUP_SKB] = "cgroup_skb",
63 [BPF_PROG_TYPE_CGROUP_SOCK] = "cgroup_sock",
64 [BPF_PROG_TYPE_LWT_IN] = "lwt_in",
65 [BPF_PROG_TYPE_LWT_OUT] = "lwt_out",
66 [BPF_PROG_TYPE_LWT_XMIT] = "lwt_xmit",
67 [BPF_PROG_TYPE_SOCK_OPS] = "sock_ops",
68 [BPF_PROG_TYPE_SK_SKB] = "sk_skb",
69 [BPF_PROG_TYPE_CGROUP_DEVICE] = "cgroup_device",
72 static void print_boot_time(__u64 nsecs, char *buf, unsigned int size)
74 struct timespec real_time_ts, boot_time_ts;
75 time_t wallclock_secs;
80 if (clock_gettime(CLOCK_REALTIME, &real_time_ts) ||
81 clock_gettime(CLOCK_BOOTTIME, &boot_time_ts)) {
82 perror("Can't read clocks");
83 snprintf(buf, size, "%llu", nsecs / 1000000000);
87 wallclock_secs = (real_time_ts.tv_sec - boot_time_ts.tv_sec) +
90 if (!localtime_r(&wallclock_secs, &load_tm)) {
91 snprintf(buf, size, "%llu", nsecs / 1000000000);
95 strftime(buf, size, "%b %d/%H:%M", &load_tm);
98 static int prog_fd_by_tag(unsigned char *tag)
100 struct bpf_prog_info info = {};
101 __u32 len = sizeof(info);
107 err = bpf_prog_get_next_id(id, &id);
109 p_err("%s", strerror(errno));
113 fd = bpf_prog_get_fd_by_id(id);
115 p_err("can't get prog by id (%u): %s",
116 id, strerror(errno));
120 err = bpf_obj_get_info_by_fd(fd, &info, &len);
122 p_err("can't get prog info (%u): %s",
123 id, strerror(errno));
128 if (!memcmp(tag, info.tag, BPF_TAG_SIZE))
135 int prog_parse_fd(int *argc, char ***argv)
139 if (is_prefix(**argv, "id")) {
145 id = strtoul(**argv, &endptr, 0);
147 p_err("can't parse %s as ID", **argv);
152 fd = bpf_prog_get_fd_by_id(id);
154 p_err("get by id (%u): %s", id, strerror(errno));
156 } else if (is_prefix(**argv, "tag")) {
157 unsigned char tag[BPF_TAG_SIZE];
161 if (sscanf(**argv, BPF_TAG_FMT, tag, tag + 1, tag + 2,
162 tag + 3, tag + 4, tag + 5, tag + 6, tag + 7)
164 p_err("can't parse tag");
169 return prog_fd_by_tag(tag);
170 } else if (is_prefix(**argv, "pinned")) {
178 return open_obj_pinned_any(path, BPF_OBJ_PROG);
181 p_err("expected 'id', 'tag' or 'pinned', got: '%s'?", **argv);
185 static void show_prog_maps(int fd, u32 num_maps)
187 struct bpf_prog_info info = {};
188 __u32 len = sizeof(info);
189 __u32 map_ids[num_maps];
193 info.nr_map_ids = num_maps;
194 info.map_ids = ptr_to_u64(map_ids);
196 err = bpf_obj_get_info_by_fd(fd, &info, &len);
197 if (err || !info.nr_map_ids)
201 jsonw_name(json_wtr, "map_ids");
202 jsonw_start_array(json_wtr);
203 for (i = 0; i < info.nr_map_ids; i++)
204 jsonw_uint(json_wtr, map_ids[i]);
205 jsonw_end_array(json_wtr);
208 for (i = 0; i < info.nr_map_ids; i++)
209 printf("%u%s", map_ids[i],
210 i == info.nr_map_ids - 1 ? "" : ",");
214 static void print_prog_json(struct bpf_prog_info *info, int fd)
218 jsonw_start_object(json_wtr);
219 jsonw_uint_field(json_wtr, "id", info->id);
220 if (info->type < ARRAY_SIZE(prog_type_name))
221 jsonw_string_field(json_wtr, "type",
222 prog_type_name[info->type]);
224 jsonw_uint_field(json_wtr, "type", info->type);
227 jsonw_string_field(json_wtr, "name", info->name);
229 jsonw_name(json_wtr, "tag");
230 jsonw_printf(json_wtr, "\"" BPF_TAG_FMT "\"",
231 info->tag[0], info->tag[1], info->tag[2], info->tag[3],
232 info->tag[4], info->tag[5], info->tag[6], info->tag[7]);
234 print_dev_json(info->ifindex, info->netns_dev, info->netns_ino);
236 if (info->load_time) {
239 print_boot_time(info->load_time, buf, sizeof(buf));
241 /* Piggy back on load_time, since 0 uid is a valid one */
242 jsonw_string_field(json_wtr, "loaded_at", buf);
243 jsonw_uint_field(json_wtr, "uid", info->created_by_uid);
246 jsonw_uint_field(json_wtr, "bytes_xlated", info->xlated_prog_len);
248 if (info->jited_prog_len) {
249 jsonw_bool_field(json_wtr, "jited", true);
250 jsonw_uint_field(json_wtr, "bytes_jited", info->jited_prog_len);
252 jsonw_bool_field(json_wtr, "jited", false);
255 memlock = get_fdinfo(fd, "memlock");
257 jsonw_int_field(json_wtr, "bytes_memlock", atoi(memlock));
260 if (info->nr_map_ids)
261 show_prog_maps(fd, info->nr_map_ids);
263 if (!hash_empty(prog_table.table)) {
264 struct pinned_obj *obj;
266 jsonw_name(json_wtr, "pinned");
267 jsonw_start_array(json_wtr);
268 hash_for_each_possible(prog_table.table, obj, hash, info->id) {
269 if (obj->id == info->id)
270 jsonw_string(json_wtr, obj->path);
272 jsonw_end_array(json_wtr);
275 jsonw_end_object(json_wtr);
278 static void print_prog_plain(struct bpf_prog_info *info, int fd)
282 printf("%u: ", info->id);
283 if (info->type < ARRAY_SIZE(prog_type_name))
284 printf("%s ", prog_type_name[info->type]);
286 printf("type %u ", info->type);
289 printf("name %s ", info->name);
292 fprint_hex(stdout, info->tag, BPF_TAG_SIZE, "");
293 print_dev_plain(info->ifindex, info->netns_dev, info->netns_ino);
296 if (info->load_time) {
299 print_boot_time(info->load_time, buf, sizeof(buf));
301 /* Piggy back on load_time, since 0 uid is a valid one */
302 printf("\tloaded_at %s uid %u\n", buf, info->created_by_uid);
305 printf("\txlated %uB", info->xlated_prog_len);
307 if (info->jited_prog_len)
308 printf(" jited %uB", info->jited_prog_len);
310 printf(" not jited");
312 memlock = get_fdinfo(fd, "memlock");
314 printf(" memlock %sB", memlock);
317 if (info->nr_map_ids)
318 show_prog_maps(fd, info->nr_map_ids);
320 if (!hash_empty(prog_table.table)) {
321 struct pinned_obj *obj;
324 hash_for_each_possible(prog_table.table, obj, hash, info->id) {
325 if (obj->id == info->id)
326 printf("\tpinned %s\n", obj->path);
333 static int show_prog(int fd)
335 struct bpf_prog_info info = {};
336 __u32 len = sizeof(info);
339 err = bpf_obj_get_info_by_fd(fd, &info, &len);
341 p_err("can't get prog info: %s", strerror(errno));
346 print_prog_json(&info, fd);
348 print_prog_plain(&info, fd);
353 static int do_show(int argc, char **argv)
360 build_pinned_obj_table(&prog_table, BPF_OBJ_PROG);
363 fd = prog_parse_fd(&argc, &argv);
367 return show_prog(fd);
374 jsonw_start_array(json_wtr);
376 err = bpf_prog_get_next_id(id, &id);
378 if (errno == ENOENT) {
382 p_err("can't get next program: %s%s", strerror(errno),
383 errno == EINVAL ? " -- kernel too old?" : "");
388 fd = bpf_prog_get_fd_by_id(id);
392 p_err("can't get prog by id (%u): %s",
393 id, strerror(errno));
405 jsonw_end_array(json_wtr);
410 #define SYM_MAX_NAME 256
413 unsigned long address;
414 char name[SYM_MAX_NAME];
418 unsigned long address_call_base;
419 struct kernel_sym *sym_mapping;
421 char scratch_buff[SYM_MAX_NAME];
424 static int kernel_syms_cmp(const void *sym_a, const void *sym_b)
426 return ((struct kernel_sym *)sym_a)->address -
427 ((struct kernel_sym *)sym_b)->address;
430 static void kernel_syms_load(struct dump_data *dd)
432 struct kernel_sym *sym;
437 fp = fopen("/proc/kallsyms", "r");
442 if (!fgets(buff, sizeof(buff), fp))
444 tmp = realloc(dd->sym_mapping,
445 (dd->sym_count + 1) *
446 sizeof(*dd->sym_mapping));
449 free(dd->sym_mapping);
450 dd->sym_mapping = NULL;
454 dd->sym_mapping = tmp;
455 sym = &dd->sym_mapping[dd->sym_count];
456 if (sscanf(buff, "%p %*c %s", &address, sym->name) != 2)
458 sym->address = (unsigned long)address;
459 if (!strcmp(sym->name, "__bpf_call_base")) {
460 dd->address_call_base = sym->address;
461 /* sysctl kernel.kptr_restrict was set */
471 qsort(dd->sym_mapping, dd->sym_count,
472 sizeof(*dd->sym_mapping), kernel_syms_cmp);
475 static void kernel_syms_destroy(struct dump_data *dd)
477 free(dd->sym_mapping);
480 static struct kernel_sym *kernel_syms_search(struct dump_data *dd,
483 struct kernel_sym sym = {
487 return dd->sym_mapping ?
488 bsearch(&sym, dd->sym_mapping, dd->sym_count,
489 sizeof(*dd->sym_mapping), kernel_syms_cmp) : NULL;
492 static void print_insn(struct bpf_verifier_env *env, const char *fmt, ...)
501 static const char *print_call_pcrel(struct dump_data *dd,
502 struct kernel_sym *sym,
503 unsigned long address,
504 const struct bpf_insn *insn)
507 snprintf(dd->scratch_buff, sizeof(dd->scratch_buff),
508 "%+d#%s", insn->off, sym->name);
510 snprintf(dd->scratch_buff, sizeof(dd->scratch_buff),
511 "%+d#0x%lx", insn->off, address);
512 return dd->scratch_buff;
515 static const char *print_call_helper(struct dump_data *dd,
516 struct kernel_sym *sym,
517 unsigned long address)
520 snprintf(dd->scratch_buff, sizeof(dd->scratch_buff),
523 snprintf(dd->scratch_buff, sizeof(dd->scratch_buff),
525 return dd->scratch_buff;
528 static const char *print_call(void *private_data,
529 const struct bpf_insn *insn)
531 struct dump_data *dd = private_data;
532 unsigned long address = dd->address_call_base + insn->imm;
533 struct kernel_sym *sym;
535 sym = kernel_syms_search(dd, address);
536 if (insn->src_reg == BPF_PSEUDO_CALL)
537 return print_call_pcrel(dd, sym, address, insn);
539 return print_call_helper(dd, sym, address);
542 static const char *print_imm(void *private_data,
543 const struct bpf_insn *insn,
546 struct dump_data *dd = private_data;
548 if (insn->src_reg == BPF_PSEUDO_MAP_FD)
549 snprintf(dd->scratch_buff, sizeof(dd->scratch_buff),
550 "map[id:%u]", insn->imm);
552 snprintf(dd->scratch_buff, sizeof(dd->scratch_buff),
553 "0x%llx", (unsigned long long)full_imm);
554 return dd->scratch_buff;
557 static void dump_xlated_plain(struct dump_data *dd, void *buf,
558 unsigned int len, bool opcodes)
560 const struct bpf_insn_cbs cbs = {
561 .cb_print = print_insn,
562 .cb_call = print_call,
566 struct bpf_insn *insn = buf;
567 bool double_insn = false;
570 for (i = 0; i < len / sizeof(*insn); i++) {
576 double_insn = insn[i].code == (BPF_LD | BPF_IMM | BPF_DW);
579 print_bpf_insn(&cbs, NULL, insn + i, true);
583 fprint_hex(stdout, insn + i, 8, " ");
584 if (double_insn && i < len - 1) {
586 fprint_hex(stdout, insn + i + 1, 8, " ");
593 static void print_insn_json(struct bpf_verifier_env *env, const char *fmt, ...)
595 unsigned int l = strlen(fmt);
601 strncpy(chomped_fmt, fmt, l - 1);
602 chomped_fmt[l - 1] = '\0';
604 jsonw_vprintf_enquote(json_wtr, chomped_fmt, args);
608 static void dump_xlated_json(struct dump_data *dd, void *buf,
609 unsigned int len, bool opcodes)
611 const struct bpf_insn_cbs cbs = {
612 .cb_print = print_insn_json,
613 .cb_call = print_call,
617 struct bpf_insn *insn = buf;
618 bool double_insn = false;
621 jsonw_start_array(json_wtr);
622 for (i = 0; i < len / sizeof(*insn); i++) {
627 double_insn = insn[i].code == (BPF_LD | BPF_IMM | BPF_DW);
629 jsonw_start_object(json_wtr);
630 jsonw_name(json_wtr, "disasm");
631 print_bpf_insn(&cbs, NULL, insn + i, true);
634 jsonw_name(json_wtr, "opcodes");
635 jsonw_start_object(json_wtr);
637 jsonw_name(json_wtr, "code");
638 jsonw_printf(json_wtr, "\"0x%02hhx\"", insn[i].code);
640 jsonw_name(json_wtr, "src_reg");
641 jsonw_printf(json_wtr, "\"0x%hhx\"", insn[i].src_reg);
643 jsonw_name(json_wtr, "dst_reg");
644 jsonw_printf(json_wtr, "\"0x%hhx\"", insn[i].dst_reg);
646 jsonw_name(json_wtr, "off");
647 print_hex_data_json((uint8_t *)(&insn[i].off), 2);
649 jsonw_name(json_wtr, "imm");
650 if (double_insn && i < len - 1)
651 print_hex_data_json((uint8_t *)(&insn[i].imm),
654 print_hex_data_json((uint8_t *)(&insn[i].imm),
656 jsonw_end_object(json_wtr);
658 jsonw_end_object(json_wtr);
660 jsonw_end_array(json_wtr);
663 static int do_dump(int argc, char **argv)
665 struct bpf_prog_info info = {};
666 struct dump_data dd = {};
667 __u32 len = sizeof(info);
668 unsigned int buf_size;
669 char *filepath = NULL;
670 bool opcodes = false;
678 if (is_prefix(*argv, "jited")) {
679 member_len = &info.jited_prog_len;
680 member_ptr = &info.jited_prog_insns;
681 } else if (is_prefix(*argv, "xlated")) {
682 member_len = &info.xlated_prog_len;
683 member_ptr = &info.xlated_prog_insns;
685 p_err("expected 'xlated' or 'jited', got: %s", *argv);
693 fd = prog_parse_fd(&argc, &argv);
697 if (is_prefix(*argv, "file")) {
700 p_err("expected file path");
706 } else if (is_prefix(*argv, "opcodes")) {
716 err = bpf_obj_get_info_by_fd(fd, &info, &len);
718 p_err("can't get prog info: %s", strerror(errno));
723 p_info("no instructions returned");
728 buf_size = *member_len;
730 buf = malloc(buf_size);
732 p_err("mem alloc failed");
737 memset(&info, 0, sizeof(info));
739 *member_ptr = ptr_to_u64(buf);
740 *member_len = buf_size;
742 err = bpf_obj_get_info_by_fd(fd, &info, &len);
745 p_err("can't get prog info: %s", strerror(errno));
749 if (*member_len > buf_size) {
750 p_err("too many instructions returned");
754 if ((member_len == &info.jited_prog_len &&
755 info.jited_prog_insns == 0) ||
756 (member_len == &info.xlated_prog_len &&
757 info.xlated_prog_insns == 0)) {
758 p_err("error retrieving insn dump: kernel.kptr_restrict set?");
763 fd = open(filepath, O_WRONLY | O_CREAT | O_TRUNC, 0600);
765 p_err("can't open file %s: %s", filepath,
770 n = write(fd, buf, *member_len);
772 if (n != *member_len) {
773 p_err("error writing output file: %s",
774 n < 0 ? strerror(errno) : "short write");
778 if (member_len == &info.jited_prog_len) {
779 disasm_print_insn(buf, *member_len, opcodes);
781 kernel_syms_load(&dd);
783 dump_xlated_json(&dd, buf, *member_len, opcodes);
785 dump_xlated_plain(&dd, buf, *member_len, opcodes);
786 kernel_syms_destroy(&dd);
798 static int do_pin(int argc, char **argv)
802 err = do_pin_any(argc, argv, bpf_prog_get_fd_by_id);
803 if (!err && json_output)
804 jsonw_null(json_wtr);
808 static int do_load(int argc, char **argv)
810 struct bpf_object *obj;
816 if (bpf_prog_load(argv[0], BPF_PROG_TYPE_UNSPEC, &obj, &prog_fd)) {
817 p_err("failed to load program");
821 if (do_pin_fd(prog_fd, argv[1])) {
822 p_err("failed to pin program");
827 jsonw_null(json_wtr);
832 static int do_help(int argc, char **argv)
835 jsonw_null(json_wtr);
840 "Usage: %s %s { show | list } [PROG]\n"
841 " %s %s dump xlated PROG [{ file FILE | opcodes }]\n"
842 " %s %s dump jited PROG [{ file FILE | opcodes }]\n"
843 " %s %s pin PROG FILE\n"
844 " %s %s load OBJ FILE\n"
847 " " HELP_SPEC_PROGRAM "\n"
848 " " HELP_SPEC_OPTIONS "\n"
850 bin_name, argv[-2], bin_name, argv[-2], bin_name, argv[-2],
851 bin_name, argv[-2], bin_name, argv[-2], bin_name, argv[-2]);
856 static const struct cmd cmds[] = {
866 int do_prog(int argc, char **argv)
868 return cmd_select(cmds, argc, argv, do_help);