bpf: Support ->fill_link_info for perf_event
authorYafang Shao <laoar.shao@gmail.com>
Sun, 9 Jul 2023 02:56:28 +0000 (02:56 +0000)
committerAlexei Starovoitov <ast@kernel.org>
Wed, 12 Jul 2023 03:07:51 +0000 (20:07 -0700)
By introducing support for ->fill_link_info to the perf_event link, users
gain the ability to inspect it using `bpftool link show`. While the current
approach involves accessing this information via `bpftool perf show`,
consolidating link information for all link types in one place offers
greater convenience. Additionally, this patch extends support to the
generic perf event, which is not currently accommodated by
`bpftool perf show`. While only the perf type and config are exposed to
userspace, other attributes such as sample_period and sample_freq are
ignored. It's important to note that if kptr_restrict is not permitted, the
probed address will not be exposed, maintaining security measures.

A new enum bpf_perf_event_type is introduced to help the user understand
which struct is relevant.

Signed-off-by: Yafang Shao <laoar.shao@gmail.com>
Acked-by: Jiri Olsa <jolsa@kernel.org>
Link: https://lore.kernel.org/r/20230709025630.3735-9-laoar.shao@gmail.com
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
include/uapi/linux/bpf.h
kernel/bpf/syscall.c
kernel/trace/bpf_trace.c
tools/include/uapi/linux/bpf.h

index a4e881c64e0fe5f2f12d19b9d8f5c7de1c0965e3..600d0caebbd8ac592c8a5270b49253da2d4c350e 100644 (file)
@@ -1057,6 +1057,16 @@ enum bpf_link_type {
        MAX_BPF_LINK_TYPE,
 };
 
+enum bpf_perf_event_type {
+       BPF_PERF_EVENT_UNSPEC = 0,
+       BPF_PERF_EVENT_UPROBE = 1,
+       BPF_PERF_EVENT_URETPROBE = 2,
+       BPF_PERF_EVENT_KPROBE = 3,
+       BPF_PERF_EVENT_KRETPROBE = 4,
+       BPF_PERF_EVENT_TRACEPOINT = 5,
+       BPF_PERF_EVENT_EVENT = 6,
+};
+
 /* cgroup-bpf attach flags used in BPF_PROG_ATTACH command
  *
  * NONE(default): No further bpf programs allowed in the subtree.
@@ -6444,6 +6454,31 @@ struct bpf_link_info {
                        __u32 count; /* in/out: kprobe_multi function count */
                        __u32 flags;
                } kprobe_multi;
+               struct {
+                       __u32 type; /* enum bpf_perf_event_type */
+                       __u32 :32;
+                       union {
+                               struct {
+                                       __aligned_u64 file_name; /* in/out */
+                                       __u32 name_len;
+                                       __u32 offset; /* offset from file_name */
+                               } uprobe; /* BPF_PERF_EVENT_UPROBE, BPF_PERF_EVENT_URETPROBE */
+                               struct {
+                                       __aligned_u64 func_name; /* in/out */
+                                       __u32 name_len;
+                                       __u32 offset; /* offset from func_name */
+                                       __u64 addr;
+                               } kprobe; /* BPF_PERF_EVENT_KPROBE, BPF_PERF_EVENT_KRETPROBE */
+                               struct {
+                                       __aligned_u64 tp_name;   /* in/out */
+                                       __u32 name_len;
+                               } tracepoint; /* BPF_PERF_EVENT_TRACEPOINT */
+                               struct {
+                                       __u64 config;
+                                       __u32 type;
+                               } event; /* BPF_PERF_EVENT_EVENT */
+                       };
+               } perf_event;
        };
 } __attribute__((aligned(8)));
 
index 4aa6e5776a04b80bb359060a25709e13b831983f..ee8cb1a174aa15a2387a6e70d0972d68db6b5a65 100644 (file)
@@ -3364,9 +3364,155 @@ static void bpf_perf_link_dealloc(struct bpf_link *link)
        kfree(perf_link);
 }
 
+static int bpf_perf_link_fill_common(const struct perf_event *event,
+                                    char __user *uname, u32 ulen,
+                                    u64 *probe_offset, u64 *probe_addr,
+                                    u32 *fd_type)
+{
+       const char *buf;
+       u32 prog_id;
+       size_t len;
+       int err;
+
+       if (!ulen ^ !uname)
+               return -EINVAL;
+       if (!uname)
+               return 0;
+
+       err = bpf_get_perf_event_info(event, &prog_id, fd_type, &buf,
+                                     probe_offset, probe_addr);
+       if (err)
+               return err;
+
+       if (buf) {
+               len = strlen(buf);
+               err = bpf_copy_to_user(uname, buf, ulen, len);
+               if (err)
+                       return err;
+       } else {
+               char zero = '\0';
+
+               if (put_user(zero, uname))
+                       return -EFAULT;
+       }
+       return 0;
+}
+
+#ifdef CONFIG_KPROBE_EVENTS
+static int bpf_perf_link_fill_kprobe(const struct perf_event *event,
+                                    struct bpf_link_info *info)
+{
+       char __user *uname;
+       u64 addr, offset;
+       u32 ulen, type;
+       int err;
+
+       uname = u64_to_user_ptr(info->perf_event.kprobe.func_name);
+       ulen = info->perf_event.kprobe.name_len;
+       err = bpf_perf_link_fill_common(event, uname, ulen, &offset, &addr,
+                                       &type);
+       if (err)
+               return err;
+       if (type == BPF_FD_TYPE_KRETPROBE)
+               info->perf_event.type = BPF_PERF_EVENT_KRETPROBE;
+       else
+               info->perf_event.type = BPF_PERF_EVENT_KPROBE;
+
+       info->perf_event.kprobe.offset = offset;
+       if (!kallsyms_show_value(current_cred()))
+               addr = 0;
+       info->perf_event.kprobe.addr = addr;
+       return 0;
+}
+#endif
+
+#ifdef CONFIG_UPROBE_EVENTS
+static int bpf_perf_link_fill_uprobe(const struct perf_event *event,
+                                    struct bpf_link_info *info)
+{
+       char __user *uname;
+       u64 addr, offset;
+       u32 ulen, type;
+       int err;
+
+       uname = u64_to_user_ptr(info->perf_event.uprobe.file_name);
+       ulen = info->perf_event.uprobe.name_len;
+       err = bpf_perf_link_fill_common(event, uname, ulen, &offset, &addr,
+                                       &type);
+       if (err)
+               return err;
+
+       if (type == BPF_FD_TYPE_URETPROBE)
+               info->perf_event.type = BPF_PERF_EVENT_URETPROBE;
+       else
+               info->perf_event.type = BPF_PERF_EVENT_UPROBE;
+       info->perf_event.uprobe.offset = offset;
+       return 0;
+}
+#endif
+
+static int bpf_perf_link_fill_probe(const struct perf_event *event,
+                                   struct bpf_link_info *info)
+{
+#ifdef CONFIG_KPROBE_EVENTS
+       if (event->tp_event->flags & TRACE_EVENT_FL_KPROBE)
+               return bpf_perf_link_fill_kprobe(event, info);
+#endif
+#ifdef CONFIG_UPROBE_EVENTS
+       if (event->tp_event->flags & TRACE_EVENT_FL_UPROBE)
+               return bpf_perf_link_fill_uprobe(event, info);
+#endif
+       return -EOPNOTSUPP;
+}
+
+static int bpf_perf_link_fill_tracepoint(const struct perf_event *event,
+                                        struct bpf_link_info *info)
+{
+       char __user *uname;
+       u32 ulen;
+
+       uname = u64_to_user_ptr(info->perf_event.tracepoint.tp_name);
+       ulen = info->perf_event.tracepoint.name_len;
+       info->perf_event.type = BPF_PERF_EVENT_TRACEPOINT;
+       return bpf_perf_link_fill_common(event, uname, ulen, NULL, NULL, NULL);
+}
+
+static int bpf_perf_link_fill_perf_event(const struct perf_event *event,
+                                        struct bpf_link_info *info)
+{
+       info->perf_event.event.type = event->attr.type;
+       info->perf_event.event.config = event->attr.config;
+       info->perf_event.type = BPF_PERF_EVENT_EVENT;
+       return 0;
+}
+
+static int bpf_perf_link_fill_link_info(const struct bpf_link *link,
+                                       struct bpf_link_info *info)
+{
+       struct bpf_perf_link *perf_link;
+       const struct perf_event *event;
+
+       perf_link = container_of(link, struct bpf_perf_link, link);
+       event = perf_get_event(perf_link->perf_file);
+       if (IS_ERR(event))
+               return PTR_ERR(event);
+
+       switch (event->prog->type) {
+       case BPF_PROG_TYPE_PERF_EVENT:
+               return bpf_perf_link_fill_perf_event(event, info);
+       case BPF_PROG_TYPE_TRACEPOINT:
+               return bpf_perf_link_fill_tracepoint(event, info);
+       case BPF_PROG_TYPE_KPROBE:
+               return bpf_perf_link_fill_probe(event, info);
+       default:
+               return -EOPNOTSUPP;
+       }
+}
+
 static const struct bpf_link_ops bpf_perf_link_lops = {
        .release = bpf_perf_link_release,
        .dealloc = bpf_perf_link_dealloc,
+       .fill_link_info = bpf_perf_link_fill_link_info,
 };
 
 static int bpf_perf_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
index 31ec0e2853ec422062b04b41e56ce7ba98df152f..897edfc9ca129bd08b07710efb69cb74ed434c51 100644 (file)
@@ -2369,9 +2369,13 @@ int bpf_get_perf_event_info(const struct perf_event *event, u32 *prog_id,
        if (is_tracepoint || is_syscall_tp) {
                *buf = is_tracepoint ? event->tp_event->tp->name
                                     : event->tp_event->name;
-               *fd_type = BPF_FD_TYPE_TRACEPOINT;
-               *probe_offset = 0x0;
-               *probe_addr = 0x0;
+               /* We allow NULL pointer for tracepoint */
+               if (fd_type)
+                       *fd_type = BPF_FD_TYPE_TRACEPOINT;
+               if (probe_offset)
+                       *probe_offset = 0x0;
+               if (probe_addr)
+                       *probe_addr = 0x0;
        } else {
                /* kprobe/uprobe */
                err = -EOPNOTSUPP;
index a4e881c64e0fe5f2f12d19b9d8f5c7de1c0965e3..600d0caebbd8ac592c8a5270b49253da2d4c350e 100644 (file)
@@ -1057,6 +1057,16 @@ enum bpf_link_type {
        MAX_BPF_LINK_TYPE,
 };
 
+enum bpf_perf_event_type {
+       BPF_PERF_EVENT_UNSPEC = 0,
+       BPF_PERF_EVENT_UPROBE = 1,
+       BPF_PERF_EVENT_URETPROBE = 2,
+       BPF_PERF_EVENT_KPROBE = 3,
+       BPF_PERF_EVENT_KRETPROBE = 4,
+       BPF_PERF_EVENT_TRACEPOINT = 5,
+       BPF_PERF_EVENT_EVENT = 6,
+};
+
 /* cgroup-bpf attach flags used in BPF_PROG_ATTACH command
  *
  * NONE(default): No further bpf programs allowed in the subtree.
@@ -6444,6 +6454,31 @@ struct bpf_link_info {
                        __u32 count; /* in/out: kprobe_multi function count */
                        __u32 flags;
                } kprobe_multi;
+               struct {
+                       __u32 type; /* enum bpf_perf_event_type */
+                       __u32 :32;
+                       union {
+                               struct {
+                                       __aligned_u64 file_name; /* in/out */
+                                       __u32 name_len;
+                                       __u32 offset; /* offset from file_name */
+                               } uprobe; /* BPF_PERF_EVENT_UPROBE, BPF_PERF_EVENT_URETPROBE */
+                               struct {
+                                       __aligned_u64 func_name; /* in/out */
+                                       __u32 name_len;
+                                       __u32 offset; /* offset from func_name */
+                                       __u64 addr;
+                               } kprobe; /* BPF_PERF_EVENT_KPROBE, BPF_PERF_EVENT_KRETPROBE */
+                               struct {
+                                       __aligned_u64 tp_name;   /* in/out */
+                                       __u32 name_len;
+                               } tracepoint; /* BPF_PERF_EVENT_TRACEPOINT */
+                               struct {
+                                       __u64 config;
+                                       __u32 type;
+                               } event; /* BPF_PERF_EVENT_EVENT */
+                       };
+               } perf_event;
        };
 } __attribute__((aligned(8)));