perf/bpf: Reorder bpf_overflow_handler() ahead of __perf_event_overflow()
authorKyle Huey <me@kylehuey.com>
Fri, 12 Apr 2024 01:50:13 +0000 (18:50 -0700)
committerIngo Molnar <mingo@kernel.org>
Fri, 12 Apr 2024 09:49:48 +0000 (11:49 +0200)
This will allow __perf_event_overflow() to call bpf_overflow_handler().

Signed-off-by: Kyle Huey <khuey@kylehuey.com>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Link: https://lore.kernel.org/r/20240412015019.7060-2-khuey@kylehuey.com
kernel/events/core.c

index fd94e45a9d869294c2e3add556e4e712e6a883a5..ca0a90648fe6fd5027d273c6b44b83094df93200 100644 (file)
@@ -9563,6 +9563,98 @@ static inline bool sample_is_allowed(struct perf_event *event, struct pt_regs *r
        return true;
 }
 
+#ifdef CONFIG_BPF_SYSCALL
+static void bpf_overflow_handler(struct perf_event *event,
+                                struct perf_sample_data *data,
+                                struct pt_regs *regs)
+{
+       struct bpf_perf_event_data_kern ctx = {
+               .data = data,
+               .event = event,
+       };
+       struct bpf_prog *prog;
+       int ret = 0;
+
+       ctx.regs = perf_arch_bpf_user_pt_regs(regs);
+       if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1))
+               goto out;
+       rcu_read_lock();
+       prog = READ_ONCE(event->prog);
+       if (prog) {
+               perf_prepare_sample(data, event, regs);
+               ret = bpf_prog_run(prog, &ctx);
+       }
+       rcu_read_unlock();
+out:
+       __this_cpu_dec(bpf_prog_active);
+       if (!ret)
+               return;
+
+       event->orig_overflow_handler(event, data, regs);
+}
+
+static int perf_event_set_bpf_handler(struct perf_event *event,
+                                     struct bpf_prog *prog,
+                                     u64 bpf_cookie)
+{
+       if (event->overflow_handler_context)
+               /* hw breakpoint or kernel counter */
+               return -EINVAL;
+
+       if (event->prog)
+               return -EEXIST;
+
+       if (prog->type != BPF_PROG_TYPE_PERF_EVENT)
+               return -EINVAL;
+
+       if (event->attr.precise_ip &&
+           prog->call_get_stack &&
+           (!(event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) ||
+            event->attr.exclude_callchain_kernel ||
+            event->attr.exclude_callchain_user)) {
+               /*
+                * On perf_event with precise_ip, calling bpf_get_stack()
+                * may trigger unwinder warnings and occasional crashes.
+                * bpf_get_[stack|stackid] works around this issue by using
+                * callchain attached to perf_sample_data. If the
+                * perf_event does not full (kernel and user) callchain
+                * attached to perf_sample_data, do not allow attaching BPF
+                * program that calls bpf_get_[stack|stackid].
+                */
+               return -EPROTO;
+       }
+
+       event->prog = prog;
+       event->bpf_cookie = bpf_cookie;
+       event->orig_overflow_handler = READ_ONCE(event->overflow_handler);
+       WRITE_ONCE(event->overflow_handler, bpf_overflow_handler);
+       return 0;
+}
+
+static void perf_event_free_bpf_handler(struct perf_event *event)
+{
+       struct bpf_prog *prog = event->prog;
+
+       if (!prog)
+               return;
+
+       WRITE_ONCE(event->overflow_handler, event->orig_overflow_handler);
+       event->prog = NULL;
+       bpf_prog_put(prog);
+}
+#else
+static int perf_event_set_bpf_handler(struct perf_event *event,
+                                     struct bpf_prog *prog,
+                                     u64 bpf_cookie)
+{
+       return -EOPNOTSUPP;
+}
+
+static void perf_event_free_bpf_handler(struct perf_event *event)
+{
+}
+#endif
+
 /*
  * Generic event overflow handling, sampling.
  */
@@ -10441,97 +10533,6 @@ static void perf_event_free_filter(struct perf_event *event)
        ftrace_profile_free_filter(event);
 }
 
-#ifdef CONFIG_BPF_SYSCALL
-static void bpf_overflow_handler(struct perf_event *event,
-                                struct perf_sample_data *data,
-                                struct pt_regs *regs)
-{
-       struct bpf_perf_event_data_kern ctx = {
-               .data = data,
-               .event = event,
-       };
-       struct bpf_prog *prog;
-       int ret = 0;
-
-       ctx.regs = perf_arch_bpf_user_pt_regs(regs);
-       if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1))
-               goto out;
-       rcu_read_lock();
-       prog = READ_ONCE(event->prog);
-       if (prog) {
-               perf_prepare_sample(data, event, regs);
-               ret = bpf_prog_run(prog, &ctx);
-       }
-       rcu_read_unlock();
-out:
-       __this_cpu_dec(bpf_prog_active);
-       if (!ret)
-               return;
-
-       event->orig_overflow_handler(event, data, regs);
-}
-
-static int perf_event_set_bpf_handler(struct perf_event *event,
-                                     struct bpf_prog *prog,
-                                     u64 bpf_cookie)
-{
-       if (event->overflow_handler_context)
-               /* hw breakpoint or kernel counter */
-               return -EINVAL;
-
-       if (event->prog)
-               return -EEXIST;
-
-       if (prog->type != BPF_PROG_TYPE_PERF_EVENT)
-               return -EINVAL;
-
-       if (event->attr.precise_ip &&
-           prog->call_get_stack &&
-           (!(event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) ||
-            event->attr.exclude_callchain_kernel ||
-            event->attr.exclude_callchain_user)) {
-               /*
-                * On perf_event with precise_ip, calling bpf_get_stack()
-                * may trigger unwinder warnings and occasional crashes.
-                * bpf_get_[stack|stackid] works around this issue by using
-                * callchain attached to perf_sample_data. If the
-                * perf_event does not full (kernel and user) callchain
-                * attached to perf_sample_data, do not allow attaching BPF
-                * program that calls bpf_get_[stack|stackid].
-                */
-               return -EPROTO;
-       }
-
-       event->prog = prog;
-       event->bpf_cookie = bpf_cookie;
-       event->orig_overflow_handler = READ_ONCE(event->overflow_handler);
-       WRITE_ONCE(event->overflow_handler, bpf_overflow_handler);
-       return 0;
-}
-
-static void perf_event_free_bpf_handler(struct perf_event *event)
-{
-       struct bpf_prog *prog = event->prog;
-
-       if (!prog)
-               return;
-
-       WRITE_ONCE(event->overflow_handler, event->orig_overflow_handler);
-       event->prog = NULL;
-       bpf_prog_put(prog);
-}
-#else
-static int perf_event_set_bpf_handler(struct perf_event *event,
-                                     struct bpf_prog *prog,
-                                     u64 bpf_cookie)
-{
-       return -EOPNOTSUPP;
-}
-static void perf_event_free_bpf_handler(struct perf_event *event)
-{
-}
-#endif
-
 /*
  * returns true if the event is a tracepoint, or a kprobe/upprobe created
  * with perf_event_open()