1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2011-2015 PLUMgrid, http://plumgrid.com
3 * Copyright (c) 2016 Facebook
5 #include <linux/kernel.h>
6 #include <linux/types.h>
7 #include <linux/slab.h>
9 #include <linux/bpf_verifier.h>
10 #include <linux/bpf_perf_event.h>
11 #include <linux/btf.h>
12 #include <linux/filter.h>
13 #include <linux/uaccess.h>
14 #include <linux/ctype.h>
15 #include <linux/kprobes.h>
16 #include <linux/spinlock.h>
17 #include <linux/syscalls.h>
18 #include <linux/error-injection.h>
19 #include <linux/btf_ids.h>
20 #include <linux/bpf_lsm.h>
21 #include <linux/fprobe.h>
22 #include <linux/bsearch.h>
23 #include <linux/sort.h>
24 #include <linux/key.h>
25 #include <linux/verification.h>
26 #include <linux/namei.h>
27 #include <linux/fileattr.h>
29 #include <net/bpf_sk_storage.h>
31 #include <uapi/linux/bpf.h>
32 #include <uapi/linux/btf.h>
36 #include "trace_probe.h"
39 #define CREATE_TRACE_POINTS
40 #include "bpf_trace.h"
42 #define bpf_event_rcu_dereference(p) \
43 rcu_dereference_protected(p, lockdep_is_held(&bpf_event_mutex))
45 #define MAX_UPROBE_MULTI_CNT (1U << 20)
46 #define MAX_KPROBE_MULTI_CNT (1U << 20)
49 struct bpf_trace_module {
50 struct module *module;
51 struct list_head list;
54 static LIST_HEAD(bpf_trace_modules);
55 static DEFINE_MUTEX(bpf_module_mutex);
57 static struct bpf_raw_event_map *bpf_get_raw_tracepoint_module(const char *name)
59 struct bpf_raw_event_map *btp, *ret = NULL;
60 struct bpf_trace_module *btm;
63 mutex_lock(&bpf_module_mutex);
64 list_for_each_entry(btm, &bpf_trace_modules, list) {
65 for (i = 0; i < btm->module->num_bpf_raw_events; ++i) {
66 btp = &btm->module->bpf_raw_events[i];
67 if (!strcmp(btp->tp->name, name)) {
68 if (try_module_get(btm->module))
75 mutex_unlock(&bpf_module_mutex);
79 static struct bpf_raw_event_map *bpf_get_raw_tracepoint_module(const char *name)
83 #endif /* CONFIG_MODULES */
85 u64 bpf_get_stackid(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
86 u64 bpf_get_stack(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
88 static int bpf_btf_printf_prepare(struct btf_ptr *ptr, u32 btf_ptr_size,
89 u64 flags, const struct btf **btf,
91 static u64 bpf_kprobe_multi_cookie(struct bpf_run_ctx *ctx);
92 static u64 bpf_kprobe_multi_entry_ip(struct bpf_run_ctx *ctx);
94 static u64 bpf_uprobe_multi_cookie(struct bpf_run_ctx *ctx);
95 static u64 bpf_uprobe_multi_entry_ip(struct bpf_run_ctx *ctx);
98 * trace_call_bpf - invoke BPF program
99 * @call: tracepoint event
100 * @ctx: opaque context pointer
102 * kprobe handlers execute BPF programs via this helper.
103 * Can be used from static tracepoints in the future.
105 * Return: BPF programs always return an integer which is interpreted by
107 * 0 - return from kprobe (event is filtered out)
108 * 1 - store kprobe event into ring buffer
109 * Other values are reserved and currently alias to 1
111 unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx)
117 if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1)) {
119 * since some bpf program is already running on this cpu,
120 * don't call into another bpf program (same or different)
121 * and don't send kprobe event into ring-buffer,
122 * so return zero here
125 bpf_prog_inc_misses_counters(rcu_dereference(call->prog_array));
132 * Instead of moving rcu_read_lock/rcu_dereference/rcu_read_unlock
133 * to all call sites, we did a bpf_prog_array_valid() there to check
134 * whether call->prog_array is empty or not, which is
135 * a heuristic to speed up execution.
137 * If bpf_prog_array_valid() fetched prog_array was
138 * non-NULL, we go into trace_call_bpf() and do the actual
139 * proper rcu_dereference() under RCU lock.
140 * If it turns out that prog_array is NULL then, we bail out.
141 * For the opposite, if the bpf_prog_array_valid() fetched pointer
142 * was NULL, you'll skip the prog_array with the risk of missing
143 * out of events when it was updated in between this and the
144 * rcu_dereference() which is accepted risk.
147 ret = bpf_prog_run_array(rcu_dereference(call->prog_array),
152 __this_cpu_dec(bpf_prog_active);
157 #ifdef CONFIG_BPF_KPROBE_OVERRIDE
158 BPF_CALL_2(bpf_override_return, struct pt_regs *, regs, unsigned long, rc)
160 regs_set_return_value(regs, rc);
161 override_function_with_return(regs);
165 static const struct bpf_func_proto bpf_override_return_proto = {
166 .func = bpf_override_return,
168 .ret_type = RET_INTEGER,
169 .arg1_type = ARG_PTR_TO_CTX,
170 .arg2_type = ARG_ANYTHING,
174 static __always_inline int
175 bpf_probe_read_user_common(void *dst, u32 size, const void __user *unsafe_ptr)
179 ret = copy_from_user_nofault(dst, unsafe_ptr, size);
180 if (unlikely(ret < 0))
181 memset(dst, 0, size);
185 BPF_CALL_3(bpf_probe_read_user, void *, dst, u32, size,
186 const void __user *, unsafe_ptr)
188 return bpf_probe_read_user_common(dst, size, unsafe_ptr);
191 const struct bpf_func_proto bpf_probe_read_user_proto = {
192 .func = bpf_probe_read_user,
194 .ret_type = RET_INTEGER,
195 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
196 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
197 .arg3_type = ARG_ANYTHING,
200 static __always_inline int
201 bpf_probe_read_user_str_common(void *dst, u32 size,
202 const void __user *unsafe_ptr)
207 * NB: We rely on strncpy_from_user() not copying junk past the NUL
208 * terminator into `dst`.
210 * strncpy_from_user() does long-sized strides in the fast path. If the
211 * strncpy does not mask out the bytes after the NUL in `unsafe_ptr`,
212 * then there could be junk after the NUL in `dst`. If user takes `dst`
213 * and keys a hash map with it, then semantically identical strings can
214 * occupy multiple entries in the map.
216 ret = strncpy_from_user_nofault(dst, unsafe_ptr, size);
217 if (unlikely(ret < 0))
218 memset(dst, 0, size);
222 BPF_CALL_3(bpf_probe_read_user_str, void *, dst, u32, size,
223 const void __user *, unsafe_ptr)
225 return bpf_probe_read_user_str_common(dst, size, unsafe_ptr);
228 const struct bpf_func_proto bpf_probe_read_user_str_proto = {
229 .func = bpf_probe_read_user_str,
231 .ret_type = RET_INTEGER,
232 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
233 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
234 .arg3_type = ARG_ANYTHING,
237 BPF_CALL_3(bpf_probe_read_kernel, void *, dst, u32, size,
238 const void *, unsafe_ptr)
240 return bpf_probe_read_kernel_common(dst, size, unsafe_ptr);
243 const struct bpf_func_proto bpf_probe_read_kernel_proto = {
244 .func = bpf_probe_read_kernel,
246 .ret_type = RET_INTEGER,
247 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
248 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
249 .arg3_type = ARG_ANYTHING,
252 static __always_inline int
253 bpf_probe_read_kernel_str_common(void *dst, u32 size, const void *unsafe_ptr)
258 * The strncpy_from_kernel_nofault() call will likely not fill the
259 * entire buffer, but that's okay in this circumstance as we're probing
260 * arbitrary memory anyway similar to bpf_probe_read_*() and might
261 * as well probe the stack. Thus, memory is explicitly cleared
262 * only in error case, so that improper users ignoring return
263 * code altogether don't copy garbage; otherwise length of string
264 * is returned that can be used for bpf_perf_event_output() et al.
266 ret = strncpy_from_kernel_nofault(dst, unsafe_ptr, size);
267 if (unlikely(ret < 0))
268 memset(dst, 0, size);
272 BPF_CALL_3(bpf_probe_read_kernel_str, void *, dst, u32, size,
273 const void *, unsafe_ptr)
275 return bpf_probe_read_kernel_str_common(dst, size, unsafe_ptr);
278 const struct bpf_func_proto bpf_probe_read_kernel_str_proto = {
279 .func = bpf_probe_read_kernel_str,
281 .ret_type = RET_INTEGER,
282 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
283 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
284 .arg3_type = ARG_ANYTHING,
287 #ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
288 BPF_CALL_3(bpf_probe_read_compat, void *, dst, u32, size,
289 const void *, unsafe_ptr)
291 if ((unsigned long)unsafe_ptr < TASK_SIZE) {
292 return bpf_probe_read_user_common(dst, size,
293 (__force void __user *)unsafe_ptr);
295 return bpf_probe_read_kernel_common(dst, size, unsafe_ptr);
298 static const struct bpf_func_proto bpf_probe_read_compat_proto = {
299 .func = bpf_probe_read_compat,
301 .ret_type = RET_INTEGER,
302 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
303 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
304 .arg3_type = ARG_ANYTHING,
307 BPF_CALL_3(bpf_probe_read_compat_str, void *, dst, u32, size,
308 const void *, unsafe_ptr)
310 if ((unsigned long)unsafe_ptr < TASK_SIZE) {
311 return bpf_probe_read_user_str_common(dst, size,
312 (__force void __user *)unsafe_ptr);
314 return bpf_probe_read_kernel_str_common(dst, size, unsafe_ptr);
317 static const struct bpf_func_proto bpf_probe_read_compat_str_proto = {
318 .func = bpf_probe_read_compat_str,
320 .ret_type = RET_INTEGER,
321 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
322 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
323 .arg3_type = ARG_ANYTHING,
325 #endif /* CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE */
327 BPF_CALL_3(bpf_probe_write_user, void __user *, unsafe_ptr, const void *, src,
331 * Ensure we're in user context which is safe for the helper to
332 * run. This helper has no business in a kthread.
334 * access_ok() should prevent writing to non-user memory, but in
335 * some situations (nommu, temporary switch, etc) access_ok() does
336 * not provide enough validation, hence the check on KERNEL_DS.
338 * nmi_uaccess_okay() ensures the probe is not run in an interim
339 * state, when the task or mm are switched. This is specifically
340 * required to prevent the use of temporary mm.
343 if (unlikely(in_interrupt() ||
344 current->flags & (PF_KTHREAD | PF_EXITING)))
346 if (unlikely(!nmi_uaccess_okay()))
349 return copy_to_user_nofault(unsafe_ptr, src, size);
352 static const struct bpf_func_proto bpf_probe_write_user_proto = {
353 .func = bpf_probe_write_user,
355 .ret_type = RET_INTEGER,
356 .arg1_type = ARG_ANYTHING,
357 .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY,
358 .arg3_type = ARG_CONST_SIZE,
361 static const struct bpf_func_proto *bpf_get_probe_write_proto(void)
363 if (!capable(CAP_SYS_ADMIN))
366 pr_warn_ratelimited("%s[%d] is installing a program with bpf_probe_write_user helper that may corrupt user memory!",
367 current->comm, task_pid_nr(current));
369 return &bpf_probe_write_user_proto;
372 #define MAX_TRACE_PRINTK_VARARGS 3
373 #define BPF_TRACE_PRINTK_SIZE 1024
375 BPF_CALL_5(bpf_trace_printk, char *, fmt, u32, fmt_size, u64, arg1,
376 u64, arg2, u64, arg3)
378 u64 args[MAX_TRACE_PRINTK_VARARGS] = { arg1, arg2, arg3 };
379 struct bpf_bprintf_data data = {
380 .get_bin_args = true,
385 ret = bpf_bprintf_prepare(fmt, fmt_size, args,
386 MAX_TRACE_PRINTK_VARARGS, &data);
390 ret = bstr_printf(data.buf, MAX_BPRINTF_BUF, fmt, data.bin_args);
392 trace_bpf_trace_printk(data.buf);
394 bpf_bprintf_cleanup(&data);
399 static const struct bpf_func_proto bpf_trace_printk_proto = {
400 .func = bpf_trace_printk,
402 .ret_type = RET_INTEGER,
403 .arg1_type = ARG_PTR_TO_MEM | MEM_RDONLY,
404 .arg2_type = ARG_CONST_SIZE,
407 static void __set_printk_clr_event(void)
410 * This program might be calling bpf_trace_printk,
411 * so enable the associated bpf_trace/bpf_trace_printk event.
412 * Repeat this each time as it is possible a user has
413 * disabled bpf_trace_printk events. By loading a program
414 * calling bpf_trace_printk() however the user has expressed
415 * the intent to see such events.
417 if (trace_set_clr_event("bpf_trace", "bpf_trace_printk", 1))
418 pr_warn_ratelimited("could not enable bpf_trace_printk events");
421 const struct bpf_func_proto *bpf_get_trace_printk_proto(void)
423 __set_printk_clr_event();
424 return &bpf_trace_printk_proto;
427 BPF_CALL_4(bpf_trace_vprintk, char *, fmt, u32, fmt_size, const void *, args,
430 struct bpf_bprintf_data data = {
431 .get_bin_args = true,
436 if (data_len & 7 || data_len > MAX_BPRINTF_VARARGS * 8 ||
439 num_args = data_len / 8;
441 ret = bpf_bprintf_prepare(fmt, fmt_size, args, num_args, &data);
445 ret = bstr_printf(data.buf, MAX_BPRINTF_BUF, fmt, data.bin_args);
447 trace_bpf_trace_printk(data.buf);
449 bpf_bprintf_cleanup(&data);
454 static const struct bpf_func_proto bpf_trace_vprintk_proto = {
455 .func = bpf_trace_vprintk,
457 .ret_type = RET_INTEGER,
458 .arg1_type = ARG_PTR_TO_MEM | MEM_RDONLY,
459 .arg2_type = ARG_CONST_SIZE,
460 .arg3_type = ARG_PTR_TO_MEM | PTR_MAYBE_NULL | MEM_RDONLY,
461 .arg4_type = ARG_CONST_SIZE_OR_ZERO,
464 const struct bpf_func_proto *bpf_get_trace_vprintk_proto(void)
466 __set_printk_clr_event();
467 return &bpf_trace_vprintk_proto;
470 BPF_CALL_5(bpf_seq_printf, struct seq_file *, m, char *, fmt, u32, fmt_size,
471 const void *, args, u32, data_len)
473 struct bpf_bprintf_data data = {
474 .get_bin_args = true,
478 if (data_len & 7 || data_len > MAX_BPRINTF_VARARGS * 8 ||
481 num_args = data_len / 8;
483 err = bpf_bprintf_prepare(fmt, fmt_size, args, num_args, &data);
487 seq_bprintf(m, fmt, data.bin_args);
489 bpf_bprintf_cleanup(&data);
491 return seq_has_overflowed(m) ? -EOVERFLOW : 0;
494 BTF_ID_LIST_SINGLE(btf_seq_file_ids, struct, seq_file)
496 static const struct bpf_func_proto bpf_seq_printf_proto = {
497 .func = bpf_seq_printf,
499 .ret_type = RET_INTEGER,
500 .arg1_type = ARG_PTR_TO_BTF_ID,
501 .arg1_btf_id = &btf_seq_file_ids[0],
502 .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY,
503 .arg3_type = ARG_CONST_SIZE,
504 .arg4_type = ARG_PTR_TO_MEM | PTR_MAYBE_NULL | MEM_RDONLY,
505 .arg5_type = ARG_CONST_SIZE_OR_ZERO,
508 BPF_CALL_3(bpf_seq_write, struct seq_file *, m, const void *, data, u32, len)
510 return seq_write(m, data, len) ? -EOVERFLOW : 0;
513 static const struct bpf_func_proto bpf_seq_write_proto = {
514 .func = bpf_seq_write,
516 .ret_type = RET_INTEGER,
517 .arg1_type = ARG_PTR_TO_BTF_ID,
518 .arg1_btf_id = &btf_seq_file_ids[0],
519 .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY,
520 .arg3_type = ARG_CONST_SIZE_OR_ZERO,
523 BPF_CALL_4(bpf_seq_printf_btf, struct seq_file *, m, struct btf_ptr *, ptr,
524 u32, btf_ptr_size, u64, flags)
526 const struct btf *btf;
530 ret = bpf_btf_printf_prepare(ptr, btf_ptr_size, flags, &btf, &btf_id);
534 return btf_type_seq_show_flags(btf, btf_id, ptr->ptr, m, flags);
537 static const struct bpf_func_proto bpf_seq_printf_btf_proto = {
538 .func = bpf_seq_printf_btf,
540 .ret_type = RET_INTEGER,
541 .arg1_type = ARG_PTR_TO_BTF_ID,
542 .arg1_btf_id = &btf_seq_file_ids[0],
543 .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY,
544 .arg3_type = ARG_CONST_SIZE_OR_ZERO,
545 .arg4_type = ARG_ANYTHING,
548 static __always_inline int
549 get_map_perf_counter(struct bpf_map *map, u64 flags,
550 u64 *value, u64 *enabled, u64 *running)
552 struct bpf_array *array = container_of(map, struct bpf_array, map);
553 unsigned int cpu = smp_processor_id();
554 u64 index = flags & BPF_F_INDEX_MASK;
555 struct bpf_event_entry *ee;
557 if (unlikely(flags & ~(BPF_F_INDEX_MASK)))
559 if (index == BPF_F_CURRENT_CPU)
561 if (unlikely(index >= array->map.max_entries))
564 ee = READ_ONCE(array->ptrs[index]);
568 return perf_event_read_local(ee->event, value, enabled, running);
571 BPF_CALL_2(bpf_perf_event_read, struct bpf_map *, map, u64, flags)
576 err = get_map_perf_counter(map, flags, &value, NULL, NULL);
578 * this api is ugly since we miss [-22..-2] range of valid
579 * counter values, but that's uapi
586 static const struct bpf_func_proto bpf_perf_event_read_proto = {
587 .func = bpf_perf_event_read,
589 .ret_type = RET_INTEGER,
590 .arg1_type = ARG_CONST_MAP_PTR,
591 .arg2_type = ARG_ANYTHING,
594 BPF_CALL_4(bpf_perf_event_read_value, struct bpf_map *, map, u64, flags,
595 struct bpf_perf_event_value *, buf, u32, size)
599 if (unlikely(size != sizeof(struct bpf_perf_event_value)))
601 err = get_map_perf_counter(map, flags, &buf->counter, &buf->enabled,
607 memset(buf, 0, size);
611 static const struct bpf_func_proto bpf_perf_event_read_value_proto = {
612 .func = bpf_perf_event_read_value,
614 .ret_type = RET_INTEGER,
615 .arg1_type = ARG_CONST_MAP_PTR,
616 .arg2_type = ARG_ANYTHING,
617 .arg3_type = ARG_PTR_TO_UNINIT_MEM,
618 .arg4_type = ARG_CONST_SIZE,
621 static __always_inline u64
622 __bpf_perf_event_output(struct pt_regs *regs, struct bpf_map *map,
623 u64 flags, struct perf_sample_data *sd)
625 struct bpf_array *array = container_of(map, struct bpf_array, map);
626 unsigned int cpu = smp_processor_id();
627 u64 index = flags & BPF_F_INDEX_MASK;
628 struct bpf_event_entry *ee;
629 struct perf_event *event;
631 if (index == BPF_F_CURRENT_CPU)
633 if (unlikely(index >= array->map.max_entries))
636 ee = READ_ONCE(array->ptrs[index]);
641 if (unlikely(event->attr.type != PERF_TYPE_SOFTWARE ||
642 event->attr.config != PERF_COUNT_SW_BPF_OUTPUT))
645 if (unlikely(event->oncpu != cpu))
648 return perf_event_output(event, sd, regs);
652 * Support executing tracepoints in normal, irq, and nmi context that each call
653 * bpf_perf_event_output
655 struct bpf_trace_sample_data {
656 struct perf_sample_data sds[3];
659 static DEFINE_PER_CPU(struct bpf_trace_sample_data, bpf_trace_sds);
660 static DEFINE_PER_CPU(int, bpf_trace_nest_level);
661 BPF_CALL_5(bpf_perf_event_output, struct pt_regs *, regs, struct bpf_map *, map,
662 u64, flags, void *, data, u64, size)
664 struct bpf_trace_sample_data *sds;
665 struct perf_raw_record raw = {
671 struct perf_sample_data *sd;
675 sds = this_cpu_ptr(&bpf_trace_sds);
676 nest_level = this_cpu_inc_return(bpf_trace_nest_level);
678 if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(sds->sds))) {
683 sd = &sds->sds[nest_level - 1];
685 if (unlikely(flags & ~(BPF_F_INDEX_MASK))) {
690 perf_sample_data_init(sd, 0, 0);
691 perf_sample_save_raw_data(sd, &raw);
693 err = __bpf_perf_event_output(regs, map, flags, sd);
695 this_cpu_dec(bpf_trace_nest_level);
700 static const struct bpf_func_proto bpf_perf_event_output_proto = {
701 .func = bpf_perf_event_output,
703 .ret_type = RET_INTEGER,
704 .arg1_type = ARG_PTR_TO_CTX,
705 .arg2_type = ARG_CONST_MAP_PTR,
706 .arg3_type = ARG_ANYTHING,
707 .arg4_type = ARG_PTR_TO_MEM | MEM_RDONLY,
708 .arg5_type = ARG_CONST_SIZE_OR_ZERO,
711 static DEFINE_PER_CPU(int, bpf_event_output_nest_level);
712 struct bpf_nested_pt_regs {
713 struct pt_regs regs[3];
715 static DEFINE_PER_CPU(struct bpf_nested_pt_regs, bpf_pt_regs);
716 static DEFINE_PER_CPU(struct bpf_trace_sample_data, bpf_misc_sds);
718 u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
719 void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy)
721 struct perf_raw_frag frag = {
726 struct perf_raw_record raw = {
729 .next = ctx_size ? &frag : NULL,
735 struct perf_sample_data *sd;
736 struct pt_regs *regs;
741 nest_level = this_cpu_inc_return(bpf_event_output_nest_level);
743 if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(bpf_misc_sds.sds))) {
747 sd = this_cpu_ptr(&bpf_misc_sds.sds[nest_level - 1]);
748 regs = this_cpu_ptr(&bpf_pt_regs.regs[nest_level - 1]);
750 perf_fetch_caller_regs(regs);
751 perf_sample_data_init(sd, 0, 0);
752 perf_sample_save_raw_data(sd, &raw);
754 ret = __bpf_perf_event_output(regs, map, flags, sd);
756 this_cpu_dec(bpf_event_output_nest_level);
761 BPF_CALL_0(bpf_get_current_task)
763 return (long) current;
766 const struct bpf_func_proto bpf_get_current_task_proto = {
767 .func = bpf_get_current_task,
769 .ret_type = RET_INTEGER,
772 BPF_CALL_0(bpf_get_current_task_btf)
774 return (unsigned long) current;
777 const struct bpf_func_proto bpf_get_current_task_btf_proto = {
778 .func = bpf_get_current_task_btf,
780 .ret_type = RET_PTR_TO_BTF_ID_TRUSTED,
781 .ret_btf_id = &btf_tracing_ids[BTF_TRACING_TYPE_TASK],
784 BPF_CALL_1(bpf_task_pt_regs, struct task_struct *, task)
786 return (unsigned long) task_pt_regs(task);
789 BTF_ID_LIST(bpf_task_pt_regs_ids)
790 BTF_ID(struct, pt_regs)
792 const struct bpf_func_proto bpf_task_pt_regs_proto = {
793 .func = bpf_task_pt_regs,
795 .arg1_type = ARG_PTR_TO_BTF_ID,
796 .arg1_btf_id = &btf_tracing_ids[BTF_TRACING_TYPE_TASK],
797 .ret_type = RET_PTR_TO_BTF_ID,
798 .ret_btf_id = &bpf_task_pt_regs_ids[0],
801 BPF_CALL_2(bpf_current_task_under_cgroup, struct bpf_map *, map, u32, idx)
803 struct bpf_array *array = container_of(map, struct bpf_array, map);
806 if (unlikely(idx >= array->map.max_entries))
809 cgrp = READ_ONCE(array->ptrs[idx]);
813 return task_under_cgroup_hierarchy(current, cgrp);
816 static const struct bpf_func_proto bpf_current_task_under_cgroup_proto = {
817 .func = bpf_current_task_under_cgroup,
819 .ret_type = RET_INTEGER,
820 .arg1_type = ARG_CONST_MAP_PTR,
821 .arg2_type = ARG_ANYTHING,
824 struct send_signal_irq_work {
825 struct irq_work irq_work;
826 struct task_struct *task;
831 static DEFINE_PER_CPU(struct send_signal_irq_work, send_signal_work);
833 static void do_bpf_send_signal(struct irq_work *entry)
835 struct send_signal_irq_work *work;
837 work = container_of(entry, struct send_signal_irq_work, irq_work);
838 group_send_sig_info(work->sig, SEND_SIG_PRIV, work->task, work->type);
839 put_task_struct(work->task);
842 static int bpf_send_signal_common(u32 sig, enum pid_type type)
844 struct send_signal_irq_work *work = NULL;
846 /* Similar to bpf_probe_write_user, task needs to be
847 * in a sound condition and kernel memory access be
848 * permitted in order to send signal to the current
851 if (unlikely(current->flags & (PF_KTHREAD | PF_EXITING)))
853 if (unlikely(!nmi_uaccess_okay()))
855 /* Task should not be pid=1 to avoid kernel panic. */
856 if (unlikely(is_global_init(current)))
859 if (irqs_disabled()) {
860 /* Do an early check on signal validity. Otherwise,
861 * the error is lost in deferred irq_work.
863 if (unlikely(!valid_signal(sig)))
866 work = this_cpu_ptr(&send_signal_work);
867 if (irq_work_is_busy(&work->irq_work))
870 /* Add the current task, which is the target of sending signal,
871 * to the irq_work. The current task may change when queued
872 * irq works get executed.
874 work->task = get_task_struct(current);
877 irq_work_queue(&work->irq_work);
881 return group_send_sig_info(sig, SEND_SIG_PRIV, current, type);
884 BPF_CALL_1(bpf_send_signal, u32, sig)
886 return bpf_send_signal_common(sig, PIDTYPE_TGID);
889 static const struct bpf_func_proto bpf_send_signal_proto = {
890 .func = bpf_send_signal,
892 .ret_type = RET_INTEGER,
893 .arg1_type = ARG_ANYTHING,
896 BPF_CALL_1(bpf_send_signal_thread, u32, sig)
898 return bpf_send_signal_common(sig, PIDTYPE_PID);
901 static const struct bpf_func_proto bpf_send_signal_thread_proto = {
902 .func = bpf_send_signal_thread,
904 .ret_type = RET_INTEGER,
905 .arg1_type = ARG_ANYTHING,
908 BPF_CALL_3(bpf_d_path, struct path *, path, char *, buf, u32, sz)
918 * The path pointer is verified as trusted and safe to use,
919 * but let's double check it's valid anyway to workaround
920 * potentially broken verifier.
922 len = copy_from_kernel_nofault(©, path, sizeof(*path));
926 p = d_path(©, buf, sz);
931 memmove(buf, p, len);
937 BTF_SET_START(btf_allowlist_d_path)
938 #ifdef CONFIG_SECURITY
939 BTF_ID(func, security_file_permission)
940 BTF_ID(func, security_inode_getattr)
941 BTF_ID(func, security_file_open)
943 #ifdef CONFIG_SECURITY_PATH
944 BTF_ID(func, security_path_truncate)
946 BTF_ID(func, vfs_truncate)
947 BTF_ID(func, vfs_fallocate)
948 BTF_ID(func, dentry_open)
949 BTF_ID(func, vfs_getattr)
950 BTF_ID(func, filp_close)
951 BTF_SET_END(btf_allowlist_d_path)
953 static bool bpf_d_path_allowed(const struct bpf_prog *prog)
955 if (prog->type == BPF_PROG_TYPE_TRACING &&
956 prog->expected_attach_type == BPF_TRACE_ITER)
959 if (prog->type == BPF_PROG_TYPE_LSM)
960 return bpf_lsm_is_sleepable_hook(prog->aux->attach_btf_id);
962 return btf_id_set_contains(&btf_allowlist_d_path,
963 prog->aux->attach_btf_id);
966 BTF_ID_LIST_SINGLE(bpf_d_path_btf_ids, struct, path)
968 static const struct bpf_func_proto bpf_d_path_proto = {
971 .ret_type = RET_INTEGER,
972 .arg1_type = ARG_PTR_TO_BTF_ID,
973 .arg1_btf_id = &bpf_d_path_btf_ids[0],
974 .arg2_type = ARG_PTR_TO_MEM,
975 .arg3_type = ARG_CONST_SIZE_OR_ZERO,
976 .allowed = bpf_d_path_allowed,
979 #define BTF_F_ALL (BTF_F_COMPACT | BTF_F_NONAME | \
980 BTF_F_PTR_RAW | BTF_F_ZERO)
982 static int bpf_btf_printf_prepare(struct btf_ptr *ptr, u32 btf_ptr_size,
983 u64 flags, const struct btf **btf,
986 const struct btf_type *t;
988 if (unlikely(flags & ~(BTF_F_ALL)))
991 if (btf_ptr_size != sizeof(struct btf_ptr))
994 *btf = bpf_get_btf_vmlinux();
996 if (IS_ERR_OR_NULL(*btf))
997 return IS_ERR(*btf) ? PTR_ERR(*btf) : -EINVAL;
999 if (ptr->type_id > 0)
1000 *btf_id = ptr->type_id;
1005 t = btf_type_by_id(*btf, *btf_id);
1006 if (*btf_id <= 0 || !t)
1012 BPF_CALL_5(bpf_snprintf_btf, char *, str, u32, str_size, struct btf_ptr *, ptr,
1013 u32, btf_ptr_size, u64, flags)
1015 const struct btf *btf;
1019 ret = bpf_btf_printf_prepare(ptr, btf_ptr_size, flags, &btf, &btf_id);
1023 return btf_type_snprintf_show(btf, btf_id, ptr->ptr, str, str_size,
1027 const struct bpf_func_proto bpf_snprintf_btf_proto = {
1028 .func = bpf_snprintf_btf,
1030 .ret_type = RET_INTEGER,
1031 .arg1_type = ARG_PTR_TO_MEM,
1032 .arg2_type = ARG_CONST_SIZE,
1033 .arg3_type = ARG_PTR_TO_MEM | MEM_RDONLY,
1034 .arg4_type = ARG_CONST_SIZE,
1035 .arg5_type = ARG_ANYTHING,
1038 BPF_CALL_1(bpf_get_func_ip_tracing, void *, ctx)
1040 /* This helper call is inlined by verifier. */
1041 return ((u64 *)ctx)[-2];
1044 static const struct bpf_func_proto bpf_get_func_ip_proto_tracing = {
1045 .func = bpf_get_func_ip_tracing,
1047 .ret_type = RET_INTEGER,
1048 .arg1_type = ARG_PTR_TO_CTX,
1051 #ifdef CONFIG_X86_KERNEL_IBT
1052 static unsigned long get_entry_ip(unsigned long fentry_ip)
1056 /* We want to be extra safe in case entry ip is on the page edge,
1057 * but otherwise we need to avoid get_kernel_nofault()'s overhead.
1059 if ((fentry_ip & ~PAGE_MASK) < ENDBR_INSN_SIZE) {
1060 if (get_kernel_nofault(instr, (u32 *)(fentry_ip - ENDBR_INSN_SIZE)))
1063 instr = *(u32 *)(fentry_ip - ENDBR_INSN_SIZE);
1065 if (is_endbr(instr))
1066 fentry_ip -= ENDBR_INSN_SIZE;
1070 #define get_entry_ip(fentry_ip) fentry_ip
1073 BPF_CALL_1(bpf_get_func_ip_kprobe, struct pt_regs *, regs)
1075 struct bpf_trace_run_ctx *run_ctx __maybe_unused;
1078 #ifdef CONFIG_UPROBES
1079 run_ctx = container_of(current->bpf_ctx, struct bpf_trace_run_ctx, run_ctx);
1080 if (run_ctx->is_uprobe)
1081 return ((struct uprobe_dispatch_data *)current->utask->vaddr)->bp_addr;
1084 kp = kprobe_running();
1086 if (!kp || !(kp->flags & KPROBE_FLAG_ON_FUNC_ENTRY))
1089 return get_entry_ip((uintptr_t)kp->addr);
1092 static const struct bpf_func_proto bpf_get_func_ip_proto_kprobe = {
1093 .func = bpf_get_func_ip_kprobe,
1095 .ret_type = RET_INTEGER,
1096 .arg1_type = ARG_PTR_TO_CTX,
1099 BPF_CALL_1(bpf_get_func_ip_kprobe_multi, struct pt_regs *, regs)
1101 return bpf_kprobe_multi_entry_ip(current->bpf_ctx);
1104 static const struct bpf_func_proto bpf_get_func_ip_proto_kprobe_multi = {
1105 .func = bpf_get_func_ip_kprobe_multi,
1107 .ret_type = RET_INTEGER,
1108 .arg1_type = ARG_PTR_TO_CTX,
1111 BPF_CALL_1(bpf_get_attach_cookie_kprobe_multi, struct pt_regs *, regs)
1113 return bpf_kprobe_multi_cookie(current->bpf_ctx);
1116 static const struct bpf_func_proto bpf_get_attach_cookie_proto_kmulti = {
1117 .func = bpf_get_attach_cookie_kprobe_multi,
1119 .ret_type = RET_INTEGER,
1120 .arg1_type = ARG_PTR_TO_CTX,
1123 BPF_CALL_1(bpf_get_func_ip_uprobe_multi, struct pt_regs *, regs)
1125 return bpf_uprobe_multi_entry_ip(current->bpf_ctx);
1128 static const struct bpf_func_proto bpf_get_func_ip_proto_uprobe_multi = {
1129 .func = bpf_get_func_ip_uprobe_multi,
1131 .ret_type = RET_INTEGER,
1132 .arg1_type = ARG_PTR_TO_CTX,
1135 BPF_CALL_1(bpf_get_attach_cookie_uprobe_multi, struct pt_regs *, regs)
1137 return bpf_uprobe_multi_cookie(current->bpf_ctx);
1140 static const struct bpf_func_proto bpf_get_attach_cookie_proto_umulti = {
1141 .func = bpf_get_attach_cookie_uprobe_multi,
1143 .ret_type = RET_INTEGER,
1144 .arg1_type = ARG_PTR_TO_CTX,
1147 BPF_CALL_1(bpf_get_attach_cookie_trace, void *, ctx)
1149 struct bpf_trace_run_ctx *run_ctx;
1151 run_ctx = container_of(current->bpf_ctx, struct bpf_trace_run_ctx, run_ctx);
1152 return run_ctx->bpf_cookie;
1155 static const struct bpf_func_proto bpf_get_attach_cookie_proto_trace = {
1156 .func = bpf_get_attach_cookie_trace,
1158 .ret_type = RET_INTEGER,
1159 .arg1_type = ARG_PTR_TO_CTX,
1162 BPF_CALL_1(bpf_get_attach_cookie_pe, struct bpf_perf_event_data_kern *, ctx)
1164 return ctx->event->bpf_cookie;
1167 static const struct bpf_func_proto bpf_get_attach_cookie_proto_pe = {
1168 .func = bpf_get_attach_cookie_pe,
1170 .ret_type = RET_INTEGER,
1171 .arg1_type = ARG_PTR_TO_CTX,
1174 BPF_CALL_1(bpf_get_attach_cookie_tracing, void *, ctx)
1176 struct bpf_trace_run_ctx *run_ctx;
1178 run_ctx = container_of(current->bpf_ctx, struct bpf_trace_run_ctx, run_ctx);
1179 return run_ctx->bpf_cookie;
1182 static const struct bpf_func_proto bpf_get_attach_cookie_proto_tracing = {
1183 .func = bpf_get_attach_cookie_tracing,
1185 .ret_type = RET_INTEGER,
1186 .arg1_type = ARG_PTR_TO_CTX,
1189 BPF_CALL_3(bpf_get_branch_snapshot, void *, buf, u32, size, u64, flags)
1191 static const u32 br_entry_size = sizeof(struct perf_branch_entry);
1192 u32 entry_cnt = size / br_entry_size;
1194 entry_cnt = static_call(perf_snapshot_branch_stack)(buf, entry_cnt);
1196 if (unlikely(flags))
1202 return entry_cnt * br_entry_size;
1205 static const struct bpf_func_proto bpf_get_branch_snapshot_proto = {
1206 .func = bpf_get_branch_snapshot,
1208 .ret_type = RET_INTEGER,
1209 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
1210 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
1213 BPF_CALL_3(get_func_arg, void *, ctx, u32, n, u64 *, value)
1215 /* This helper call is inlined by verifier. */
1216 u64 nr_args = ((u64 *)ctx)[-1];
1218 if ((u64) n >= nr_args)
1220 *value = ((u64 *)ctx)[n];
1224 static const struct bpf_func_proto bpf_get_func_arg_proto = {
1225 .func = get_func_arg,
1226 .ret_type = RET_INTEGER,
1227 .arg1_type = ARG_PTR_TO_CTX,
1228 .arg2_type = ARG_ANYTHING,
1229 .arg3_type = ARG_PTR_TO_LONG,
1232 BPF_CALL_2(get_func_ret, void *, ctx, u64 *, value)
1234 /* This helper call is inlined by verifier. */
1235 u64 nr_args = ((u64 *)ctx)[-1];
1237 *value = ((u64 *)ctx)[nr_args];
1241 static const struct bpf_func_proto bpf_get_func_ret_proto = {
1242 .func = get_func_ret,
1243 .ret_type = RET_INTEGER,
1244 .arg1_type = ARG_PTR_TO_CTX,
1245 .arg2_type = ARG_PTR_TO_LONG,
1248 BPF_CALL_1(get_func_arg_cnt, void *, ctx)
1250 /* This helper call is inlined by verifier. */
1251 return ((u64 *)ctx)[-1];
1254 static const struct bpf_func_proto bpf_get_func_arg_cnt_proto = {
1255 .func = get_func_arg_cnt,
1256 .ret_type = RET_INTEGER,
1257 .arg1_type = ARG_PTR_TO_CTX,
1261 __bpf_kfunc_start_defs();
1264 * bpf_lookup_user_key - lookup a key by its serial
1265 * @serial: key handle serial number
1266 * @flags: lookup-specific flags
1268 * Search a key with a given *serial* and the provided *flags*.
1269 * If found, increment the reference count of the key by one, and
1270 * return it in the bpf_key structure.
1272 * The bpf_key structure must be passed to bpf_key_put() when done
1273 * with it, so that the key reference count is decremented and the
1274 * bpf_key structure is freed.
1276 * Permission checks are deferred to the time the key is used by
1277 * one of the available key-specific kfuncs.
1279 * Set *flags* with KEY_LOOKUP_CREATE, to attempt creating a requested
1280 * special keyring (e.g. session keyring), if it doesn't yet exist.
1281 * Set *flags* with KEY_LOOKUP_PARTIAL, to lookup a key without waiting
1282 * for the key construction, and to retrieve uninstantiated keys (keys
1283 * without data attached to them).
1285 * Return: a bpf_key pointer with a valid key pointer if the key is found, a
1286 * NULL pointer otherwise.
1288 __bpf_kfunc struct bpf_key *bpf_lookup_user_key(u32 serial, u64 flags)
1291 struct bpf_key *bkey;
1293 if (flags & ~KEY_LOOKUP_ALL)
1297 * Permission check is deferred until the key is used, as the
1298 * intent of the caller is unknown here.
1300 key_ref = lookup_user_key(serial, flags, KEY_DEFER_PERM_CHECK);
1301 if (IS_ERR(key_ref))
1304 bkey = kmalloc(sizeof(*bkey), GFP_KERNEL);
1306 key_put(key_ref_to_ptr(key_ref));
1310 bkey->key = key_ref_to_ptr(key_ref);
1311 bkey->has_ref = true;
1317 * bpf_lookup_system_key - lookup a key by a system-defined ID
1320 * Obtain a bpf_key structure with a key pointer set to the passed key ID.
1321 * The key pointer is marked as invalid, to prevent bpf_key_put() from
1322 * attempting to decrement the key reference count on that pointer. The key
1323 * pointer set in such way is currently understood only by
1324 * verify_pkcs7_signature().
1326 * Set *id* to one of the values defined in include/linux/verification.h:
1327 * 0 for the primary keyring (immutable keyring of system keys);
1328 * VERIFY_USE_SECONDARY_KEYRING for both the primary and secondary keyring
1329 * (where keys can be added only if they are vouched for by existing keys
1330 * in those keyrings); VERIFY_USE_PLATFORM_KEYRING for the platform
1331 * keyring (primarily used by the integrity subsystem to verify a kexec'ed
1332 * kerned image and, possibly, the initramfs signature).
1334 * Return: a bpf_key pointer with an invalid key pointer set from the
1335 * pre-determined ID on success, a NULL pointer otherwise
1337 __bpf_kfunc struct bpf_key *bpf_lookup_system_key(u64 id)
1339 struct bpf_key *bkey;
1341 if (system_keyring_id_check(id) < 0)
1344 bkey = kmalloc(sizeof(*bkey), GFP_ATOMIC);
1348 bkey->key = (struct key *)(unsigned long)id;
1349 bkey->has_ref = false;
1355 * bpf_key_put - decrement key reference count if key is valid and free bpf_key
1356 * @bkey: bpf_key structure
1358 * Decrement the reference count of the key inside *bkey*, if the pointer
1359 * is valid, and free *bkey*.
1361 __bpf_kfunc void bpf_key_put(struct bpf_key *bkey)
1369 #ifdef CONFIG_SYSTEM_DATA_VERIFICATION
1371 * bpf_verify_pkcs7_signature - verify a PKCS#7 signature
1372 * @data_ptr: data to verify
1373 * @sig_ptr: signature of the data
1374 * @trusted_keyring: keyring with keys trusted for signature verification
1376 * Verify the PKCS#7 signature *sig_ptr* against the supplied *data_ptr*
1377 * with keys in a keyring referenced by *trusted_keyring*.
1379 * Return: 0 on success, a negative value on error.
1381 __bpf_kfunc int bpf_verify_pkcs7_signature(struct bpf_dynptr_kern *data_ptr,
1382 struct bpf_dynptr_kern *sig_ptr,
1383 struct bpf_key *trusted_keyring)
1385 const void *data, *sig;
1386 u32 data_len, sig_len;
1389 if (trusted_keyring->has_ref) {
1391 * Do the permission check deferred in bpf_lookup_user_key().
1392 * See bpf_lookup_user_key() for more details.
1394 * A call to key_task_permission() here would be redundant, as
1395 * it is already done by keyring_search() called by
1396 * find_asymmetric_key().
1398 ret = key_validate(trusted_keyring->key);
1403 data_len = __bpf_dynptr_size(data_ptr);
1404 data = __bpf_dynptr_data(data_ptr, data_len);
1405 sig_len = __bpf_dynptr_size(sig_ptr);
1406 sig = __bpf_dynptr_data(sig_ptr, sig_len);
1408 return verify_pkcs7_signature(data, data_len, sig, sig_len,
1409 trusted_keyring->key,
1410 VERIFYING_UNSPECIFIED_SIGNATURE, NULL,
1413 #endif /* CONFIG_SYSTEM_DATA_VERIFICATION */
1415 __bpf_kfunc_end_defs();
1417 BTF_KFUNCS_START(key_sig_kfunc_set)
1418 BTF_ID_FLAGS(func, bpf_lookup_user_key, KF_ACQUIRE | KF_RET_NULL | KF_SLEEPABLE)
1419 BTF_ID_FLAGS(func, bpf_lookup_system_key, KF_ACQUIRE | KF_RET_NULL)
1420 BTF_ID_FLAGS(func, bpf_key_put, KF_RELEASE)
1421 #ifdef CONFIG_SYSTEM_DATA_VERIFICATION
1422 BTF_ID_FLAGS(func, bpf_verify_pkcs7_signature, KF_SLEEPABLE)
1424 BTF_KFUNCS_END(key_sig_kfunc_set)
1426 static const struct btf_kfunc_id_set bpf_key_sig_kfunc_set = {
1427 .owner = THIS_MODULE,
1428 .set = &key_sig_kfunc_set,
1431 static int __init bpf_key_sig_kfuncs_init(void)
1433 return register_btf_kfunc_id_set(BPF_PROG_TYPE_TRACING,
1434 &bpf_key_sig_kfunc_set);
1437 late_initcall(bpf_key_sig_kfuncs_init);
1438 #endif /* CONFIG_KEYS */
1440 /* filesystem kfuncs */
1441 __bpf_kfunc_start_defs();
1444 * bpf_get_file_xattr - get xattr of a file
1445 * @file: file to get xattr from
1446 * @name__str: name of the xattr
1447 * @value_ptr: output buffer of the xattr value
1449 * Get xattr *name__str* of *file* and store the output in *value_ptr*.
1451 * For security reasons, only *name__str* with prefix "user." is allowed.
1453 * Return: 0 on success, a negative value on error.
1455 __bpf_kfunc int bpf_get_file_xattr(struct file *file, const char *name__str,
1456 struct bpf_dynptr_kern *value_ptr)
1458 struct dentry *dentry;
1463 if (strncmp(name__str, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN))
1466 value_len = __bpf_dynptr_size(value_ptr);
1467 value = __bpf_dynptr_data_rw(value_ptr, value_len);
1471 dentry = file_dentry(file);
1472 ret = inode_permission(&nop_mnt_idmap, dentry->d_inode, MAY_READ);
1475 return __vfs_getxattr(dentry, dentry->d_inode, name__str, value, value_len);
1478 __bpf_kfunc_end_defs();
1480 BTF_KFUNCS_START(fs_kfunc_set_ids)
1481 BTF_ID_FLAGS(func, bpf_get_file_xattr, KF_SLEEPABLE | KF_TRUSTED_ARGS)
1482 BTF_KFUNCS_END(fs_kfunc_set_ids)
1484 static int bpf_get_file_xattr_filter(const struct bpf_prog *prog, u32 kfunc_id)
1486 if (!btf_id_set8_contains(&fs_kfunc_set_ids, kfunc_id))
1489 /* Only allow to attach from LSM hooks, to avoid recursion */
1490 return prog->type != BPF_PROG_TYPE_LSM ? -EACCES : 0;
1493 static const struct btf_kfunc_id_set bpf_fs_kfunc_set = {
1494 .owner = THIS_MODULE,
1495 .set = &fs_kfunc_set_ids,
1496 .filter = bpf_get_file_xattr_filter,
1499 static int __init bpf_fs_kfuncs_init(void)
1501 return register_btf_kfunc_id_set(BPF_PROG_TYPE_LSM, &bpf_fs_kfunc_set);
1504 late_initcall(bpf_fs_kfuncs_init);
1506 static const struct bpf_func_proto *
1507 bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1510 case BPF_FUNC_map_lookup_elem:
1511 return &bpf_map_lookup_elem_proto;
1512 case BPF_FUNC_map_update_elem:
1513 return &bpf_map_update_elem_proto;
1514 case BPF_FUNC_map_delete_elem:
1515 return &bpf_map_delete_elem_proto;
1516 case BPF_FUNC_map_push_elem:
1517 return &bpf_map_push_elem_proto;
1518 case BPF_FUNC_map_pop_elem:
1519 return &bpf_map_pop_elem_proto;
1520 case BPF_FUNC_map_peek_elem:
1521 return &bpf_map_peek_elem_proto;
1522 case BPF_FUNC_map_lookup_percpu_elem:
1523 return &bpf_map_lookup_percpu_elem_proto;
1524 case BPF_FUNC_ktime_get_ns:
1525 return &bpf_ktime_get_ns_proto;
1526 case BPF_FUNC_ktime_get_boot_ns:
1527 return &bpf_ktime_get_boot_ns_proto;
1528 case BPF_FUNC_tail_call:
1529 return &bpf_tail_call_proto;
1530 case BPF_FUNC_get_current_task:
1531 return &bpf_get_current_task_proto;
1532 case BPF_FUNC_get_current_task_btf:
1533 return &bpf_get_current_task_btf_proto;
1534 case BPF_FUNC_task_pt_regs:
1535 return &bpf_task_pt_regs_proto;
1536 case BPF_FUNC_get_current_uid_gid:
1537 return &bpf_get_current_uid_gid_proto;
1538 case BPF_FUNC_get_current_comm:
1539 return &bpf_get_current_comm_proto;
1540 case BPF_FUNC_trace_printk:
1541 return bpf_get_trace_printk_proto();
1542 case BPF_FUNC_get_smp_processor_id:
1543 return &bpf_get_smp_processor_id_proto;
1544 case BPF_FUNC_get_numa_node_id:
1545 return &bpf_get_numa_node_id_proto;
1546 case BPF_FUNC_perf_event_read:
1547 return &bpf_perf_event_read_proto;
1548 case BPF_FUNC_current_task_under_cgroup:
1549 return &bpf_current_task_under_cgroup_proto;
1550 case BPF_FUNC_get_prandom_u32:
1551 return &bpf_get_prandom_u32_proto;
1552 case BPF_FUNC_probe_write_user:
1553 return security_locked_down(LOCKDOWN_BPF_WRITE_USER) < 0 ?
1554 NULL : bpf_get_probe_write_proto();
1555 case BPF_FUNC_probe_read_user:
1556 return &bpf_probe_read_user_proto;
1557 case BPF_FUNC_probe_read_kernel:
1558 return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ?
1559 NULL : &bpf_probe_read_kernel_proto;
1560 case BPF_FUNC_probe_read_user_str:
1561 return &bpf_probe_read_user_str_proto;
1562 case BPF_FUNC_probe_read_kernel_str:
1563 return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ?
1564 NULL : &bpf_probe_read_kernel_str_proto;
1565 #ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
1566 case BPF_FUNC_probe_read:
1567 return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ?
1568 NULL : &bpf_probe_read_compat_proto;
1569 case BPF_FUNC_probe_read_str:
1570 return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ?
1571 NULL : &bpf_probe_read_compat_str_proto;
1573 #ifdef CONFIG_CGROUPS
1574 case BPF_FUNC_cgrp_storage_get:
1575 return &bpf_cgrp_storage_get_proto;
1576 case BPF_FUNC_cgrp_storage_delete:
1577 return &bpf_cgrp_storage_delete_proto;
1579 case BPF_FUNC_send_signal:
1580 return &bpf_send_signal_proto;
1581 case BPF_FUNC_send_signal_thread:
1582 return &bpf_send_signal_thread_proto;
1583 case BPF_FUNC_perf_event_read_value:
1584 return &bpf_perf_event_read_value_proto;
1585 case BPF_FUNC_ringbuf_output:
1586 return &bpf_ringbuf_output_proto;
1587 case BPF_FUNC_ringbuf_reserve:
1588 return &bpf_ringbuf_reserve_proto;
1589 case BPF_FUNC_ringbuf_submit:
1590 return &bpf_ringbuf_submit_proto;
1591 case BPF_FUNC_ringbuf_discard:
1592 return &bpf_ringbuf_discard_proto;
1593 case BPF_FUNC_ringbuf_query:
1594 return &bpf_ringbuf_query_proto;
1595 case BPF_FUNC_jiffies64:
1596 return &bpf_jiffies64_proto;
1597 case BPF_FUNC_get_task_stack:
1598 return &bpf_get_task_stack_proto;
1599 case BPF_FUNC_copy_from_user:
1600 return &bpf_copy_from_user_proto;
1601 case BPF_FUNC_copy_from_user_task:
1602 return &bpf_copy_from_user_task_proto;
1603 case BPF_FUNC_snprintf_btf:
1604 return &bpf_snprintf_btf_proto;
1605 case BPF_FUNC_per_cpu_ptr:
1606 return &bpf_per_cpu_ptr_proto;
1607 case BPF_FUNC_this_cpu_ptr:
1608 return &bpf_this_cpu_ptr_proto;
1609 case BPF_FUNC_task_storage_get:
1610 if (bpf_prog_check_recur(prog))
1611 return &bpf_task_storage_get_recur_proto;
1612 return &bpf_task_storage_get_proto;
1613 case BPF_FUNC_task_storage_delete:
1614 if (bpf_prog_check_recur(prog))
1615 return &bpf_task_storage_delete_recur_proto;
1616 return &bpf_task_storage_delete_proto;
1617 case BPF_FUNC_for_each_map_elem:
1618 return &bpf_for_each_map_elem_proto;
1619 case BPF_FUNC_snprintf:
1620 return &bpf_snprintf_proto;
1621 case BPF_FUNC_get_func_ip:
1622 return &bpf_get_func_ip_proto_tracing;
1623 case BPF_FUNC_get_branch_snapshot:
1624 return &bpf_get_branch_snapshot_proto;
1625 case BPF_FUNC_find_vma:
1626 return &bpf_find_vma_proto;
1627 case BPF_FUNC_trace_vprintk:
1628 return bpf_get_trace_vprintk_proto();
1630 return bpf_base_func_proto(func_id, prog);
1634 static bool is_kprobe_multi(const struct bpf_prog *prog)
1636 return prog->expected_attach_type == BPF_TRACE_KPROBE_MULTI ||
1637 prog->expected_attach_type == BPF_TRACE_KPROBE_SESSION;
1640 static inline bool is_kprobe_session(const struct bpf_prog *prog)
1642 return prog->expected_attach_type == BPF_TRACE_KPROBE_SESSION;
1645 static const struct bpf_func_proto *
1646 kprobe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1649 case BPF_FUNC_perf_event_output:
1650 return &bpf_perf_event_output_proto;
1651 case BPF_FUNC_get_stackid:
1652 return &bpf_get_stackid_proto;
1653 case BPF_FUNC_get_stack:
1654 return &bpf_get_stack_proto;
1655 #ifdef CONFIG_BPF_KPROBE_OVERRIDE
1656 case BPF_FUNC_override_return:
1657 return &bpf_override_return_proto;
1659 case BPF_FUNC_get_func_ip:
1660 if (is_kprobe_multi(prog))
1661 return &bpf_get_func_ip_proto_kprobe_multi;
1662 if (prog->expected_attach_type == BPF_TRACE_UPROBE_MULTI)
1663 return &bpf_get_func_ip_proto_uprobe_multi;
1664 return &bpf_get_func_ip_proto_kprobe;
1665 case BPF_FUNC_get_attach_cookie:
1666 if (is_kprobe_multi(prog))
1667 return &bpf_get_attach_cookie_proto_kmulti;
1668 if (prog->expected_attach_type == BPF_TRACE_UPROBE_MULTI)
1669 return &bpf_get_attach_cookie_proto_umulti;
1670 return &bpf_get_attach_cookie_proto_trace;
1672 return bpf_tracing_func_proto(func_id, prog);
1676 /* bpf+kprobe programs can access fields of 'struct pt_regs' */
1677 static bool kprobe_prog_is_valid_access(int off, int size, enum bpf_access_type type,
1678 const struct bpf_prog *prog,
1679 struct bpf_insn_access_aux *info)
1681 if (off < 0 || off >= sizeof(struct pt_regs))
1683 if (type != BPF_READ)
1685 if (off % size != 0)
1688 * Assertion for 32 bit to make sure last 8 byte access
1689 * (BPF_DW) to the last 4 byte member is disallowed.
1691 if (off + size > sizeof(struct pt_regs))
1697 const struct bpf_verifier_ops kprobe_verifier_ops = {
1698 .get_func_proto = kprobe_prog_func_proto,
1699 .is_valid_access = kprobe_prog_is_valid_access,
1702 const struct bpf_prog_ops kprobe_prog_ops = {
1705 BPF_CALL_5(bpf_perf_event_output_tp, void *, tp_buff, struct bpf_map *, map,
1706 u64, flags, void *, data, u64, size)
1708 struct pt_regs *regs = *(struct pt_regs **)tp_buff;
1711 * r1 points to perf tracepoint buffer where first 8 bytes are hidden
1712 * from bpf program and contain a pointer to 'struct pt_regs'. Fetch it
1713 * from there and call the same bpf_perf_event_output() helper inline.
1715 return ____bpf_perf_event_output(regs, map, flags, data, size);
1718 static const struct bpf_func_proto bpf_perf_event_output_proto_tp = {
1719 .func = bpf_perf_event_output_tp,
1721 .ret_type = RET_INTEGER,
1722 .arg1_type = ARG_PTR_TO_CTX,
1723 .arg2_type = ARG_CONST_MAP_PTR,
1724 .arg3_type = ARG_ANYTHING,
1725 .arg4_type = ARG_PTR_TO_MEM | MEM_RDONLY,
1726 .arg5_type = ARG_CONST_SIZE_OR_ZERO,
1729 BPF_CALL_3(bpf_get_stackid_tp, void *, tp_buff, struct bpf_map *, map,
1732 struct pt_regs *regs = *(struct pt_regs **)tp_buff;
1735 * Same comment as in bpf_perf_event_output_tp(), only that this time
1736 * the other helper's function body cannot be inlined due to being
1737 * external, thus we need to call raw helper function.
1739 return bpf_get_stackid((unsigned long) regs, (unsigned long) map,
1743 static const struct bpf_func_proto bpf_get_stackid_proto_tp = {
1744 .func = bpf_get_stackid_tp,
1746 .ret_type = RET_INTEGER,
1747 .arg1_type = ARG_PTR_TO_CTX,
1748 .arg2_type = ARG_CONST_MAP_PTR,
1749 .arg3_type = ARG_ANYTHING,
1752 BPF_CALL_4(bpf_get_stack_tp, void *, tp_buff, void *, buf, u32, size,
1755 struct pt_regs *regs = *(struct pt_regs **)tp_buff;
1757 return bpf_get_stack((unsigned long) regs, (unsigned long) buf,
1758 (unsigned long) size, flags, 0);
1761 static const struct bpf_func_proto bpf_get_stack_proto_tp = {
1762 .func = bpf_get_stack_tp,
1764 .ret_type = RET_INTEGER,
1765 .arg1_type = ARG_PTR_TO_CTX,
1766 .arg2_type = ARG_PTR_TO_UNINIT_MEM,
1767 .arg3_type = ARG_CONST_SIZE_OR_ZERO,
1768 .arg4_type = ARG_ANYTHING,
1771 static const struct bpf_func_proto *
1772 tp_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1775 case BPF_FUNC_perf_event_output:
1776 return &bpf_perf_event_output_proto_tp;
1777 case BPF_FUNC_get_stackid:
1778 return &bpf_get_stackid_proto_tp;
1779 case BPF_FUNC_get_stack:
1780 return &bpf_get_stack_proto_tp;
1781 case BPF_FUNC_get_attach_cookie:
1782 return &bpf_get_attach_cookie_proto_trace;
1784 return bpf_tracing_func_proto(func_id, prog);
1788 static bool tp_prog_is_valid_access(int off, int size, enum bpf_access_type type,
1789 const struct bpf_prog *prog,
1790 struct bpf_insn_access_aux *info)
1792 if (off < sizeof(void *) || off >= PERF_MAX_TRACE_SIZE)
1794 if (type != BPF_READ)
1796 if (off % size != 0)
1799 BUILD_BUG_ON(PERF_MAX_TRACE_SIZE % sizeof(__u64));
1803 const struct bpf_verifier_ops tracepoint_verifier_ops = {
1804 .get_func_proto = tp_prog_func_proto,
1805 .is_valid_access = tp_prog_is_valid_access,
1808 const struct bpf_prog_ops tracepoint_prog_ops = {
1811 BPF_CALL_3(bpf_perf_prog_read_value, struct bpf_perf_event_data_kern *, ctx,
1812 struct bpf_perf_event_value *, buf, u32, size)
1816 if (unlikely(size != sizeof(struct bpf_perf_event_value)))
1818 err = perf_event_read_local(ctx->event, &buf->counter, &buf->enabled,
1824 memset(buf, 0, size);
1828 static const struct bpf_func_proto bpf_perf_prog_read_value_proto = {
1829 .func = bpf_perf_prog_read_value,
1831 .ret_type = RET_INTEGER,
1832 .arg1_type = ARG_PTR_TO_CTX,
1833 .arg2_type = ARG_PTR_TO_UNINIT_MEM,
1834 .arg3_type = ARG_CONST_SIZE,
1837 BPF_CALL_4(bpf_read_branch_records, struct bpf_perf_event_data_kern *, ctx,
1838 void *, buf, u32, size, u64, flags)
1840 static const u32 br_entry_size = sizeof(struct perf_branch_entry);
1841 struct perf_branch_stack *br_stack = ctx->data->br_stack;
1844 if (unlikely(flags & ~BPF_F_GET_BRANCH_RECORDS_SIZE))
1847 if (unlikely(!(ctx->data->sample_flags & PERF_SAMPLE_BRANCH_STACK)))
1850 if (unlikely(!br_stack))
1853 if (flags & BPF_F_GET_BRANCH_RECORDS_SIZE)
1854 return br_stack->nr * br_entry_size;
1856 if (!buf || (size % br_entry_size != 0))
1859 to_copy = min_t(u32, br_stack->nr * br_entry_size, size);
1860 memcpy(buf, br_stack->entries, to_copy);
1865 static const struct bpf_func_proto bpf_read_branch_records_proto = {
1866 .func = bpf_read_branch_records,
1868 .ret_type = RET_INTEGER,
1869 .arg1_type = ARG_PTR_TO_CTX,
1870 .arg2_type = ARG_PTR_TO_MEM_OR_NULL,
1871 .arg3_type = ARG_CONST_SIZE_OR_ZERO,
1872 .arg4_type = ARG_ANYTHING,
1875 static const struct bpf_func_proto *
1876 pe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1879 case BPF_FUNC_perf_event_output:
1880 return &bpf_perf_event_output_proto_tp;
1881 case BPF_FUNC_get_stackid:
1882 return &bpf_get_stackid_proto_pe;
1883 case BPF_FUNC_get_stack:
1884 return &bpf_get_stack_proto_pe;
1885 case BPF_FUNC_perf_prog_read_value:
1886 return &bpf_perf_prog_read_value_proto;
1887 case BPF_FUNC_read_branch_records:
1888 return &bpf_read_branch_records_proto;
1889 case BPF_FUNC_get_attach_cookie:
1890 return &bpf_get_attach_cookie_proto_pe;
1892 return bpf_tracing_func_proto(func_id, prog);
1897 * bpf_raw_tp_regs are separate from bpf_pt_regs used from skb/xdp
1898 * to avoid potential recursive reuse issue when/if tracepoints are added
1899 * inside bpf_*_event_output, bpf_get_stackid and/or bpf_get_stack.
1901 * Since raw tracepoints run despite bpf_prog_active, support concurrent usage
1902 * in normal, irq, and nmi context.
1904 struct bpf_raw_tp_regs {
1905 struct pt_regs regs[3];
1907 static DEFINE_PER_CPU(struct bpf_raw_tp_regs, bpf_raw_tp_regs);
1908 static DEFINE_PER_CPU(int, bpf_raw_tp_nest_level);
1909 static struct pt_regs *get_bpf_raw_tp_regs(void)
1911 struct bpf_raw_tp_regs *tp_regs = this_cpu_ptr(&bpf_raw_tp_regs);
1912 int nest_level = this_cpu_inc_return(bpf_raw_tp_nest_level);
1914 if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(tp_regs->regs))) {
1915 this_cpu_dec(bpf_raw_tp_nest_level);
1916 return ERR_PTR(-EBUSY);
1919 return &tp_regs->regs[nest_level - 1];
1922 static void put_bpf_raw_tp_regs(void)
1924 this_cpu_dec(bpf_raw_tp_nest_level);
1927 BPF_CALL_5(bpf_perf_event_output_raw_tp, struct bpf_raw_tracepoint_args *, args,
1928 struct bpf_map *, map, u64, flags, void *, data, u64, size)
1930 struct pt_regs *regs = get_bpf_raw_tp_regs();
1934 return PTR_ERR(regs);
1936 perf_fetch_caller_regs(regs);
1937 ret = ____bpf_perf_event_output(regs, map, flags, data, size);
1939 put_bpf_raw_tp_regs();
1943 static const struct bpf_func_proto bpf_perf_event_output_proto_raw_tp = {
1944 .func = bpf_perf_event_output_raw_tp,
1946 .ret_type = RET_INTEGER,
1947 .arg1_type = ARG_PTR_TO_CTX,
1948 .arg2_type = ARG_CONST_MAP_PTR,
1949 .arg3_type = ARG_ANYTHING,
1950 .arg4_type = ARG_PTR_TO_MEM | MEM_RDONLY,
1951 .arg5_type = ARG_CONST_SIZE_OR_ZERO,
1954 extern const struct bpf_func_proto bpf_skb_output_proto;
1955 extern const struct bpf_func_proto bpf_xdp_output_proto;
1956 extern const struct bpf_func_proto bpf_xdp_get_buff_len_trace_proto;
1958 BPF_CALL_3(bpf_get_stackid_raw_tp, struct bpf_raw_tracepoint_args *, args,
1959 struct bpf_map *, map, u64, flags)
1961 struct pt_regs *regs = get_bpf_raw_tp_regs();
1965 return PTR_ERR(regs);
1967 perf_fetch_caller_regs(regs);
1968 /* similar to bpf_perf_event_output_tp, but pt_regs fetched differently */
1969 ret = bpf_get_stackid((unsigned long) regs, (unsigned long) map,
1971 put_bpf_raw_tp_regs();
1975 static const struct bpf_func_proto bpf_get_stackid_proto_raw_tp = {
1976 .func = bpf_get_stackid_raw_tp,
1978 .ret_type = RET_INTEGER,
1979 .arg1_type = ARG_PTR_TO_CTX,
1980 .arg2_type = ARG_CONST_MAP_PTR,
1981 .arg3_type = ARG_ANYTHING,
1984 BPF_CALL_4(bpf_get_stack_raw_tp, struct bpf_raw_tracepoint_args *, args,
1985 void *, buf, u32, size, u64, flags)
1987 struct pt_regs *regs = get_bpf_raw_tp_regs();
1991 return PTR_ERR(regs);
1993 perf_fetch_caller_regs(regs);
1994 ret = bpf_get_stack((unsigned long) regs, (unsigned long) buf,
1995 (unsigned long) size, flags, 0);
1996 put_bpf_raw_tp_regs();
2000 static const struct bpf_func_proto bpf_get_stack_proto_raw_tp = {
2001 .func = bpf_get_stack_raw_tp,
2003 .ret_type = RET_INTEGER,
2004 .arg1_type = ARG_PTR_TO_CTX,
2005 .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY,
2006 .arg3_type = ARG_CONST_SIZE_OR_ZERO,
2007 .arg4_type = ARG_ANYTHING,
2010 static const struct bpf_func_proto *
2011 raw_tp_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
2014 case BPF_FUNC_perf_event_output:
2015 return &bpf_perf_event_output_proto_raw_tp;
2016 case BPF_FUNC_get_stackid:
2017 return &bpf_get_stackid_proto_raw_tp;
2018 case BPF_FUNC_get_stack:
2019 return &bpf_get_stack_proto_raw_tp;
2020 case BPF_FUNC_get_attach_cookie:
2021 return &bpf_get_attach_cookie_proto_tracing;
2023 return bpf_tracing_func_proto(func_id, prog);
2027 const struct bpf_func_proto *
2028 tracing_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
2030 const struct bpf_func_proto *fn;
2034 case BPF_FUNC_skb_output:
2035 return &bpf_skb_output_proto;
2036 case BPF_FUNC_xdp_output:
2037 return &bpf_xdp_output_proto;
2038 case BPF_FUNC_skc_to_tcp6_sock:
2039 return &bpf_skc_to_tcp6_sock_proto;
2040 case BPF_FUNC_skc_to_tcp_sock:
2041 return &bpf_skc_to_tcp_sock_proto;
2042 case BPF_FUNC_skc_to_tcp_timewait_sock:
2043 return &bpf_skc_to_tcp_timewait_sock_proto;
2044 case BPF_FUNC_skc_to_tcp_request_sock:
2045 return &bpf_skc_to_tcp_request_sock_proto;
2046 case BPF_FUNC_skc_to_udp6_sock:
2047 return &bpf_skc_to_udp6_sock_proto;
2048 case BPF_FUNC_skc_to_unix_sock:
2049 return &bpf_skc_to_unix_sock_proto;
2050 case BPF_FUNC_skc_to_mptcp_sock:
2051 return &bpf_skc_to_mptcp_sock_proto;
2052 case BPF_FUNC_sk_storage_get:
2053 return &bpf_sk_storage_get_tracing_proto;
2054 case BPF_FUNC_sk_storage_delete:
2055 return &bpf_sk_storage_delete_tracing_proto;
2056 case BPF_FUNC_sock_from_file:
2057 return &bpf_sock_from_file_proto;
2058 case BPF_FUNC_get_socket_cookie:
2059 return &bpf_get_socket_ptr_cookie_proto;
2060 case BPF_FUNC_xdp_get_buff_len:
2061 return &bpf_xdp_get_buff_len_trace_proto;
2063 case BPF_FUNC_seq_printf:
2064 return prog->expected_attach_type == BPF_TRACE_ITER ?
2065 &bpf_seq_printf_proto :
2067 case BPF_FUNC_seq_write:
2068 return prog->expected_attach_type == BPF_TRACE_ITER ?
2069 &bpf_seq_write_proto :
2071 case BPF_FUNC_seq_printf_btf:
2072 return prog->expected_attach_type == BPF_TRACE_ITER ?
2073 &bpf_seq_printf_btf_proto :
2075 case BPF_FUNC_d_path:
2076 return &bpf_d_path_proto;
2077 case BPF_FUNC_get_func_arg:
2078 return bpf_prog_has_trampoline(prog) ? &bpf_get_func_arg_proto : NULL;
2079 case BPF_FUNC_get_func_ret:
2080 return bpf_prog_has_trampoline(prog) ? &bpf_get_func_ret_proto : NULL;
2081 case BPF_FUNC_get_func_arg_cnt:
2082 return bpf_prog_has_trampoline(prog) ? &bpf_get_func_arg_cnt_proto : NULL;
2083 case BPF_FUNC_get_attach_cookie:
2084 if (prog->type == BPF_PROG_TYPE_TRACING &&
2085 prog->expected_attach_type == BPF_TRACE_RAW_TP)
2086 return &bpf_get_attach_cookie_proto_tracing;
2087 return bpf_prog_has_trampoline(prog) ? &bpf_get_attach_cookie_proto_tracing : NULL;
2089 fn = raw_tp_prog_func_proto(func_id, prog);
2090 if (!fn && prog->expected_attach_type == BPF_TRACE_ITER)
2091 fn = bpf_iter_get_func_proto(func_id, prog);
2096 static bool raw_tp_prog_is_valid_access(int off, int size,
2097 enum bpf_access_type type,
2098 const struct bpf_prog *prog,
2099 struct bpf_insn_access_aux *info)
2101 return bpf_tracing_ctx_access(off, size, type);
2104 static bool tracing_prog_is_valid_access(int off, int size,
2105 enum bpf_access_type type,
2106 const struct bpf_prog *prog,
2107 struct bpf_insn_access_aux *info)
2109 return bpf_tracing_btf_ctx_access(off, size, type, prog, info);
2112 int __weak bpf_prog_test_run_tracing(struct bpf_prog *prog,
2113 const union bpf_attr *kattr,
2114 union bpf_attr __user *uattr)
2119 const struct bpf_verifier_ops raw_tracepoint_verifier_ops = {
2120 .get_func_proto = raw_tp_prog_func_proto,
2121 .is_valid_access = raw_tp_prog_is_valid_access,
2124 const struct bpf_prog_ops raw_tracepoint_prog_ops = {
2126 .test_run = bpf_prog_test_run_raw_tp,
2130 const struct bpf_verifier_ops tracing_verifier_ops = {
2131 .get_func_proto = tracing_prog_func_proto,
2132 .is_valid_access = tracing_prog_is_valid_access,
2135 const struct bpf_prog_ops tracing_prog_ops = {
2136 .test_run = bpf_prog_test_run_tracing,
2139 static bool raw_tp_writable_prog_is_valid_access(int off, int size,
2140 enum bpf_access_type type,
2141 const struct bpf_prog *prog,
2142 struct bpf_insn_access_aux *info)
2145 if (size != sizeof(u64) || type != BPF_READ)
2147 info->reg_type = PTR_TO_TP_BUFFER;
2149 return raw_tp_prog_is_valid_access(off, size, type, prog, info);
2152 const struct bpf_verifier_ops raw_tracepoint_writable_verifier_ops = {
2153 .get_func_proto = raw_tp_prog_func_proto,
2154 .is_valid_access = raw_tp_writable_prog_is_valid_access,
2157 const struct bpf_prog_ops raw_tracepoint_writable_prog_ops = {
2160 static bool pe_prog_is_valid_access(int off, int size, enum bpf_access_type type,
2161 const struct bpf_prog *prog,
2162 struct bpf_insn_access_aux *info)
2164 const int size_u64 = sizeof(u64);
2166 if (off < 0 || off >= sizeof(struct bpf_perf_event_data))
2168 if (type != BPF_READ)
2170 if (off % size != 0) {
2171 if (sizeof(unsigned long) != 4)
2175 if (off % size != 4)
2180 case bpf_ctx_range(struct bpf_perf_event_data, sample_period):
2181 bpf_ctx_record_field_size(info, size_u64);
2182 if (!bpf_ctx_narrow_access_ok(off, size, size_u64))
2185 case bpf_ctx_range(struct bpf_perf_event_data, addr):
2186 bpf_ctx_record_field_size(info, size_u64);
2187 if (!bpf_ctx_narrow_access_ok(off, size, size_u64))
2191 if (size != sizeof(long))
2198 static u32 pe_prog_convert_ctx_access(enum bpf_access_type type,
2199 const struct bpf_insn *si,
2200 struct bpf_insn *insn_buf,
2201 struct bpf_prog *prog, u32 *target_size)
2203 struct bpf_insn *insn = insn_buf;
2206 case offsetof(struct bpf_perf_event_data, sample_period):
2207 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern,
2208 data), si->dst_reg, si->src_reg,
2209 offsetof(struct bpf_perf_event_data_kern, data));
2210 *insn++ = BPF_LDX_MEM(BPF_DW, si->dst_reg, si->dst_reg,
2211 bpf_target_off(struct perf_sample_data, period, 8,
2214 case offsetof(struct bpf_perf_event_data, addr):
2215 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern,
2216 data), si->dst_reg, si->src_reg,
2217 offsetof(struct bpf_perf_event_data_kern, data));
2218 *insn++ = BPF_LDX_MEM(BPF_DW, si->dst_reg, si->dst_reg,
2219 bpf_target_off(struct perf_sample_data, addr, 8,
2223 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern,
2224 regs), si->dst_reg, si->src_reg,
2225 offsetof(struct bpf_perf_event_data_kern, regs));
2226 *insn++ = BPF_LDX_MEM(BPF_SIZEOF(long), si->dst_reg, si->dst_reg,
2231 return insn - insn_buf;
2234 const struct bpf_verifier_ops perf_event_verifier_ops = {
2235 .get_func_proto = pe_prog_func_proto,
2236 .is_valid_access = pe_prog_is_valid_access,
2237 .convert_ctx_access = pe_prog_convert_ctx_access,
2240 const struct bpf_prog_ops perf_event_prog_ops = {
2243 static DEFINE_MUTEX(bpf_event_mutex);
2245 #define BPF_TRACE_MAX_PROGS 64
2247 int perf_event_attach_bpf_prog(struct perf_event *event,
2248 struct bpf_prog *prog,
2251 struct bpf_prog_array *old_array;
2252 struct bpf_prog_array *new_array;
2256 * Kprobe override only works if they are on the function entry,
2257 * and only if they are on the opt-in list.
2259 if (prog->kprobe_override &&
2260 (!trace_kprobe_on_func_entry(event->tp_event) ||
2261 !trace_kprobe_error_injectable(event->tp_event)))
2264 mutex_lock(&bpf_event_mutex);
2269 old_array = bpf_event_rcu_dereference(event->tp_event->prog_array);
2271 bpf_prog_array_length(old_array) >= BPF_TRACE_MAX_PROGS) {
2276 ret = bpf_prog_array_copy(old_array, NULL, prog, bpf_cookie, &new_array);
2280 /* set the new array to event->tp_event and set event->prog */
2282 event->bpf_cookie = bpf_cookie;
2283 rcu_assign_pointer(event->tp_event->prog_array, new_array);
2284 bpf_prog_array_free_sleepable(old_array);
2287 mutex_unlock(&bpf_event_mutex);
2291 void perf_event_detach_bpf_prog(struct perf_event *event)
2293 struct bpf_prog_array *old_array;
2294 struct bpf_prog_array *new_array;
2297 mutex_lock(&bpf_event_mutex);
2302 old_array = bpf_event_rcu_dereference(event->tp_event->prog_array);
2303 ret = bpf_prog_array_copy(old_array, event->prog, NULL, 0, &new_array);
2307 bpf_prog_array_delete_safe(old_array, event->prog);
2309 rcu_assign_pointer(event->tp_event->prog_array, new_array);
2310 bpf_prog_array_free_sleepable(old_array);
2313 bpf_prog_put(event->prog);
2317 mutex_unlock(&bpf_event_mutex);
2320 int perf_event_query_prog_array(struct perf_event *event, void __user *info)
2322 struct perf_event_query_bpf __user *uquery = info;
2323 struct perf_event_query_bpf query = {};
2324 struct bpf_prog_array *progs;
2325 u32 *ids, prog_cnt, ids_len;
2328 if (!perfmon_capable())
2330 if (event->attr.type != PERF_TYPE_TRACEPOINT)
2332 if (copy_from_user(&query, uquery, sizeof(query)))
2335 ids_len = query.ids_len;
2336 if (ids_len > BPF_TRACE_MAX_PROGS)
2338 ids = kcalloc(ids_len, sizeof(u32), GFP_USER | __GFP_NOWARN);
2342 * The above kcalloc returns ZERO_SIZE_PTR when ids_len = 0, which
2343 * is required when user only wants to check for uquery->prog_cnt.
2344 * There is no need to check for it since the case is handled
2345 * gracefully in bpf_prog_array_copy_info.
2348 mutex_lock(&bpf_event_mutex);
2349 progs = bpf_event_rcu_dereference(event->tp_event->prog_array);
2350 ret = bpf_prog_array_copy_info(progs, ids, ids_len, &prog_cnt);
2351 mutex_unlock(&bpf_event_mutex);
2353 if (copy_to_user(&uquery->prog_cnt, &prog_cnt, sizeof(prog_cnt)) ||
2354 copy_to_user(uquery->ids, ids, ids_len * sizeof(u32)))
2361 extern struct bpf_raw_event_map __start__bpf_raw_tp[];
2362 extern struct bpf_raw_event_map __stop__bpf_raw_tp[];
2364 struct bpf_raw_event_map *bpf_get_raw_tracepoint(const char *name)
2366 struct bpf_raw_event_map *btp = __start__bpf_raw_tp;
2368 for (; btp < __stop__bpf_raw_tp; btp++) {
2369 if (!strcmp(btp->tp->name, name))
2373 return bpf_get_raw_tracepoint_module(name);
2376 void bpf_put_raw_tracepoint(struct bpf_raw_event_map *btp)
2381 mod = __module_address((unsigned long)btp);
2386 static __always_inline
2387 void __bpf_trace_run(struct bpf_raw_tp_link *link, u64 *args)
2389 struct bpf_prog *prog = link->link.prog;
2390 struct bpf_run_ctx *old_run_ctx;
2391 struct bpf_trace_run_ctx run_ctx;
2394 if (unlikely(this_cpu_inc_return(*(prog->active)) != 1)) {
2395 bpf_prog_inc_misses_counter(prog);
2399 run_ctx.bpf_cookie = link->cookie;
2400 old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx);
2403 (void) bpf_prog_run(prog, args);
2406 bpf_reset_run_ctx(old_run_ctx);
2408 this_cpu_dec(*(prog->active));
2411 #define UNPACK(...) __VA_ARGS__
2412 #define REPEAT_1(FN, DL, X, ...) FN(X)
2413 #define REPEAT_2(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_1(FN, DL, __VA_ARGS__)
2414 #define REPEAT_3(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_2(FN, DL, __VA_ARGS__)
2415 #define REPEAT_4(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_3(FN, DL, __VA_ARGS__)
2416 #define REPEAT_5(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_4(FN, DL, __VA_ARGS__)
2417 #define REPEAT_6(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_5(FN, DL, __VA_ARGS__)
2418 #define REPEAT_7(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_6(FN, DL, __VA_ARGS__)
2419 #define REPEAT_8(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_7(FN, DL, __VA_ARGS__)
2420 #define REPEAT_9(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_8(FN, DL, __VA_ARGS__)
2421 #define REPEAT_10(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_9(FN, DL, __VA_ARGS__)
2422 #define REPEAT_11(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_10(FN, DL, __VA_ARGS__)
2423 #define REPEAT_12(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_11(FN, DL, __VA_ARGS__)
2424 #define REPEAT(X, FN, DL, ...) REPEAT_##X(FN, DL, __VA_ARGS__)
2426 #define SARG(X) u64 arg##X
2427 #define COPY(X) args[X] = arg##X
2429 #define __DL_COM (,)
2430 #define __DL_SEM (;)
2432 #define __SEQ_0_11 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11
2434 #define BPF_TRACE_DEFN_x(x) \
2435 void bpf_trace_run##x(struct bpf_raw_tp_link *link, \
2436 REPEAT(x, SARG, __DL_COM, __SEQ_0_11)) \
2439 REPEAT(x, COPY, __DL_SEM, __SEQ_0_11); \
2440 __bpf_trace_run(link, args); \
2442 EXPORT_SYMBOL_GPL(bpf_trace_run##x)
2443 BPF_TRACE_DEFN_x(1);
2444 BPF_TRACE_DEFN_x(2);
2445 BPF_TRACE_DEFN_x(3);
2446 BPF_TRACE_DEFN_x(4);
2447 BPF_TRACE_DEFN_x(5);
2448 BPF_TRACE_DEFN_x(6);
2449 BPF_TRACE_DEFN_x(7);
2450 BPF_TRACE_DEFN_x(8);
2451 BPF_TRACE_DEFN_x(9);
2452 BPF_TRACE_DEFN_x(10);
2453 BPF_TRACE_DEFN_x(11);
2454 BPF_TRACE_DEFN_x(12);
2456 int bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_raw_tp_link *link)
2458 struct tracepoint *tp = btp->tp;
2459 struct bpf_prog *prog = link->link.prog;
2462 * check that program doesn't access arguments beyond what's
2463 * available in this tracepoint
2465 if (prog->aux->max_ctx_offset > btp->num_args * sizeof(u64))
2468 if (prog->aux->max_tp_access > btp->writable_size)
2471 return tracepoint_probe_register_may_exist(tp, (void *)btp->bpf_func, link);
2474 int bpf_probe_unregister(struct bpf_raw_event_map *btp, struct bpf_raw_tp_link *link)
2476 return tracepoint_probe_unregister(btp->tp, (void *)btp->bpf_func, link);
2479 int bpf_get_perf_event_info(const struct perf_event *event, u32 *prog_id,
2480 u32 *fd_type, const char **buf,
2481 u64 *probe_offset, u64 *probe_addr,
2482 unsigned long *missed)
2484 bool is_tracepoint, is_syscall_tp;
2485 struct bpf_prog *prog;
2492 /* not supporting BPF_PROG_TYPE_PERF_EVENT yet */
2493 if (prog->type == BPF_PROG_TYPE_PERF_EVENT)
2496 *prog_id = prog->aux->id;
2497 flags = event->tp_event->flags;
2498 is_tracepoint = flags & TRACE_EVENT_FL_TRACEPOINT;
2499 is_syscall_tp = is_syscall_trace_event(event->tp_event);
2501 if (is_tracepoint || is_syscall_tp) {
2502 *buf = is_tracepoint ? event->tp_event->tp->name
2503 : event->tp_event->name;
2504 /* We allow NULL pointer for tracepoint */
2506 *fd_type = BPF_FD_TYPE_TRACEPOINT;
2508 *probe_offset = 0x0;
2514 #ifdef CONFIG_KPROBE_EVENTS
2515 if (flags & TRACE_EVENT_FL_KPROBE)
2516 err = bpf_get_kprobe_info(event, fd_type, buf,
2517 probe_offset, probe_addr, missed,
2518 event->attr.type == PERF_TYPE_TRACEPOINT);
2520 #ifdef CONFIG_UPROBE_EVENTS
2521 if (flags & TRACE_EVENT_FL_UPROBE)
2522 err = bpf_get_uprobe_info(event, fd_type, buf,
2523 probe_offset, probe_addr,
2524 event->attr.type == PERF_TYPE_TRACEPOINT);
2531 static int __init send_signal_irq_work_init(void)
2534 struct send_signal_irq_work *work;
2536 for_each_possible_cpu(cpu) {
2537 work = per_cpu_ptr(&send_signal_work, cpu);
2538 init_irq_work(&work->irq_work, do_bpf_send_signal);
2543 subsys_initcall(send_signal_irq_work_init);
2545 #ifdef CONFIG_MODULES
2546 static int bpf_event_notify(struct notifier_block *nb, unsigned long op,
2549 struct bpf_trace_module *btm, *tmp;
2550 struct module *mod = module;
2553 if (mod->num_bpf_raw_events == 0 ||
2554 (op != MODULE_STATE_COMING && op != MODULE_STATE_GOING))
2557 mutex_lock(&bpf_module_mutex);
2560 case MODULE_STATE_COMING:
2561 btm = kzalloc(sizeof(*btm), GFP_KERNEL);
2563 btm->module = module;
2564 list_add(&btm->list, &bpf_trace_modules);
2569 case MODULE_STATE_GOING:
2570 list_for_each_entry_safe(btm, tmp, &bpf_trace_modules, list) {
2571 if (btm->module == module) {
2572 list_del(&btm->list);
2580 mutex_unlock(&bpf_module_mutex);
2583 return notifier_from_errno(ret);
2586 static struct notifier_block bpf_module_nb = {
2587 .notifier_call = bpf_event_notify,
2590 static int __init bpf_event_init(void)
2592 register_module_notifier(&bpf_module_nb);
2596 fs_initcall(bpf_event_init);
2597 #endif /* CONFIG_MODULES */
2599 struct bpf_session_run_ctx {
2600 struct bpf_run_ctx run_ctx;
2605 #ifdef CONFIG_FPROBE
2606 struct bpf_kprobe_multi_link {
2607 struct bpf_link link;
2609 unsigned long *addrs;
2613 struct module **mods;
2617 struct bpf_kprobe_multi_run_ctx {
2618 struct bpf_session_run_ctx session_ctx;
2619 struct bpf_kprobe_multi_link *link;
2620 unsigned long entry_ip;
2628 static int copy_user_syms(struct user_syms *us, unsigned long __user *usyms, u32 cnt)
2630 unsigned long __user usymbol;
2631 const char **syms = NULL;
2632 char *buf = NULL, *p;
2636 syms = kvmalloc_array(cnt, sizeof(*syms), GFP_KERNEL);
2640 buf = kvmalloc_array(cnt, KSYM_NAME_LEN, GFP_KERNEL);
2644 for (p = buf, i = 0; i < cnt; i++) {
2645 if (__get_user(usymbol, usyms + i)) {
2649 err = strncpy_from_user(p, (const char __user *) usymbol, KSYM_NAME_LEN);
2650 if (err == KSYM_NAME_LEN)
2670 static void kprobe_multi_put_modules(struct module **mods, u32 cnt)
2674 for (i = 0; i < cnt; i++)
2675 module_put(mods[i]);
2678 static void free_user_syms(struct user_syms *us)
2684 static void bpf_kprobe_multi_link_release(struct bpf_link *link)
2686 struct bpf_kprobe_multi_link *kmulti_link;
2688 kmulti_link = container_of(link, struct bpf_kprobe_multi_link, link);
2689 unregister_fprobe(&kmulti_link->fp);
2690 kprobe_multi_put_modules(kmulti_link->mods, kmulti_link->mods_cnt);
2693 static void bpf_kprobe_multi_link_dealloc(struct bpf_link *link)
2695 struct bpf_kprobe_multi_link *kmulti_link;
2697 kmulti_link = container_of(link, struct bpf_kprobe_multi_link, link);
2698 kvfree(kmulti_link->addrs);
2699 kvfree(kmulti_link->cookies);
2700 kfree(kmulti_link->mods);
2704 static int bpf_kprobe_multi_link_fill_link_info(const struct bpf_link *link,
2705 struct bpf_link_info *info)
2707 u64 __user *ucookies = u64_to_user_ptr(info->kprobe_multi.cookies);
2708 u64 __user *uaddrs = u64_to_user_ptr(info->kprobe_multi.addrs);
2709 struct bpf_kprobe_multi_link *kmulti_link;
2710 u32 ucount = info->kprobe_multi.count;
2713 if (!uaddrs ^ !ucount)
2715 if (ucookies && !ucount)
2718 kmulti_link = container_of(link, struct bpf_kprobe_multi_link, link);
2719 info->kprobe_multi.count = kmulti_link->cnt;
2720 info->kprobe_multi.flags = kmulti_link->flags;
2721 info->kprobe_multi.missed = kmulti_link->fp.nmissed;
2725 if (ucount < kmulti_link->cnt)
2728 ucount = kmulti_link->cnt;
2731 if (kmulti_link->cookies) {
2732 if (copy_to_user(ucookies, kmulti_link->cookies, ucount * sizeof(u64)))
2735 for (i = 0; i < ucount; i++) {
2736 if (put_user(0, ucookies + i))
2742 if (kallsyms_show_value(current_cred())) {
2743 if (copy_to_user(uaddrs, kmulti_link->addrs, ucount * sizeof(u64)))
2746 for (i = 0; i < ucount; i++) {
2747 if (put_user(0, uaddrs + i))
2754 static const struct bpf_link_ops bpf_kprobe_multi_link_lops = {
2755 .release = bpf_kprobe_multi_link_release,
2756 .dealloc_deferred = bpf_kprobe_multi_link_dealloc,
2757 .fill_link_info = bpf_kprobe_multi_link_fill_link_info,
2760 static void bpf_kprobe_multi_cookie_swap(void *a, void *b, int size, const void *priv)
2762 const struct bpf_kprobe_multi_link *link = priv;
2763 unsigned long *addr_a = a, *addr_b = b;
2764 u64 *cookie_a, *cookie_b;
2766 cookie_a = link->cookies + (addr_a - link->addrs);
2767 cookie_b = link->cookies + (addr_b - link->addrs);
2769 /* swap addr_a/addr_b and cookie_a/cookie_b values */
2770 swap(*addr_a, *addr_b);
2771 swap(*cookie_a, *cookie_b);
2774 static int bpf_kprobe_multi_addrs_cmp(const void *a, const void *b)
2776 const unsigned long *addr_a = a, *addr_b = b;
2778 if (*addr_a == *addr_b)
2780 return *addr_a < *addr_b ? -1 : 1;
2783 static int bpf_kprobe_multi_cookie_cmp(const void *a, const void *b, const void *priv)
2785 return bpf_kprobe_multi_addrs_cmp(a, b);
2788 static u64 bpf_kprobe_multi_cookie(struct bpf_run_ctx *ctx)
2790 struct bpf_kprobe_multi_run_ctx *run_ctx;
2791 struct bpf_kprobe_multi_link *link;
2792 u64 *cookie, entry_ip;
2793 unsigned long *addr;
2795 if (WARN_ON_ONCE(!ctx))
2797 run_ctx = container_of(current->bpf_ctx, struct bpf_kprobe_multi_run_ctx,
2798 session_ctx.run_ctx);
2799 link = run_ctx->link;
2802 entry_ip = run_ctx->entry_ip;
2803 addr = bsearch(&entry_ip, link->addrs, link->cnt, sizeof(entry_ip),
2804 bpf_kprobe_multi_addrs_cmp);
2807 cookie = link->cookies + (addr - link->addrs);
2811 static u64 bpf_kprobe_multi_entry_ip(struct bpf_run_ctx *ctx)
2813 struct bpf_kprobe_multi_run_ctx *run_ctx;
2815 run_ctx = container_of(current->bpf_ctx, struct bpf_kprobe_multi_run_ctx,
2816 session_ctx.run_ctx);
2817 return run_ctx->entry_ip;
2821 kprobe_multi_link_prog_run(struct bpf_kprobe_multi_link *link,
2822 unsigned long entry_ip, struct pt_regs *regs,
2823 bool is_return, void *data)
2825 struct bpf_kprobe_multi_run_ctx run_ctx = {
2827 .is_return = is_return,
2831 .entry_ip = entry_ip,
2833 struct bpf_run_ctx *old_run_ctx;
2836 if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1)) {
2837 bpf_prog_inc_misses_counter(link->link.prog);
2844 old_run_ctx = bpf_set_run_ctx(&run_ctx.session_ctx.run_ctx);
2845 err = bpf_prog_run(link->link.prog, regs);
2846 bpf_reset_run_ctx(old_run_ctx);
2851 __this_cpu_dec(bpf_prog_active);
2856 kprobe_multi_link_handler(struct fprobe *fp, unsigned long fentry_ip,
2857 unsigned long ret_ip, struct pt_regs *regs,
2860 struct bpf_kprobe_multi_link *link;
2863 link = container_of(fp, struct bpf_kprobe_multi_link, fp);
2864 err = kprobe_multi_link_prog_run(link, get_entry_ip(fentry_ip), regs, false, data);
2865 return is_kprobe_session(link->link.prog) ? err : 0;
2869 kprobe_multi_link_exit_handler(struct fprobe *fp, unsigned long fentry_ip,
2870 unsigned long ret_ip, struct pt_regs *regs,
2873 struct bpf_kprobe_multi_link *link;
2875 link = container_of(fp, struct bpf_kprobe_multi_link, fp);
2876 kprobe_multi_link_prog_run(link, get_entry_ip(fentry_ip), regs, true, data);
2879 static int symbols_cmp_r(const void *a, const void *b, const void *priv)
2881 const char **str_a = (const char **) a;
2882 const char **str_b = (const char **) b;
2884 return strcmp(*str_a, *str_b);
2887 struct multi_symbols_sort {
2892 static void symbols_swap_r(void *a, void *b, int size, const void *priv)
2894 const struct multi_symbols_sort *data = priv;
2895 const char **name_a = a, **name_b = b;
2897 swap(*name_a, *name_b);
2899 /* If defined, swap also related cookies. */
2900 if (data->cookies) {
2901 u64 *cookie_a, *cookie_b;
2903 cookie_a = data->cookies + (name_a - data->funcs);
2904 cookie_b = data->cookies + (name_b - data->funcs);
2905 swap(*cookie_a, *cookie_b);
2909 struct modules_array {
2910 struct module **mods;
2915 static int add_module(struct modules_array *arr, struct module *mod)
2917 struct module **mods;
2919 if (arr->mods_cnt == arr->mods_cap) {
2920 arr->mods_cap = max(16, arr->mods_cap * 3 / 2);
2921 mods = krealloc_array(arr->mods, arr->mods_cap, sizeof(*mods), GFP_KERNEL);
2927 arr->mods[arr->mods_cnt] = mod;
2932 static bool has_module(struct modules_array *arr, struct module *mod)
2936 for (i = arr->mods_cnt - 1; i >= 0; i--) {
2937 if (arr->mods[i] == mod)
2943 static int get_modules_for_addrs(struct module ***mods, unsigned long *addrs, u32 addrs_cnt)
2945 struct modules_array arr = {};
2948 for (i = 0; i < addrs_cnt; i++) {
2952 mod = __module_address(addrs[i]);
2953 /* Either no module or we it's already stored */
2954 if (!mod || has_module(&arr, mod)) {
2958 if (!try_module_get(mod))
2963 err = add_module(&arr, mod);
2970 /* We return either err < 0 in case of error, ... */
2972 kprobe_multi_put_modules(arr.mods, arr.mods_cnt);
2977 /* or number of modules found if everything is ok. */
2979 return arr.mods_cnt;
2982 static int addrs_check_error_injection_list(unsigned long *addrs, u32 cnt)
2986 for (i = 0; i < cnt; i++) {
2987 if (!within_error_injection_list(addrs[i]))
2993 int bpf_kprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
2995 struct bpf_kprobe_multi_link *link = NULL;
2996 struct bpf_link_primer link_primer;
2997 void __user *ucookies;
2998 unsigned long *addrs;
2999 u32 flags, cnt, size;
3000 void __user *uaddrs;
3001 u64 *cookies = NULL;
3005 /* no support for 32bit archs yet */
3006 if (sizeof(u64) != sizeof(void *))
3009 if (!is_kprobe_multi(prog))
3012 flags = attr->link_create.kprobe_multi.flags;
3013 if (flags & ~BPF_F_KPROBE_MULTI_RETURN)
3016 uaddrs = u64_to_user_ptr(attr->link_create.kprobe_multi.addrs);
3017 usyms = u64_to_user_ptr(attr->link_create.kprobe_multi.syms);
3018 if (!!uaddrs == !!usyms)
3021 cnt = attr->link_create.kprobe_multi.cnt;
3024 if (cnt > MAX_KPROBE_MULTI_CNT)
3027 size = cnt * sizeof(*addrs);
3028 addrs = kvmalloc_array(cnt, sizeof(*addrs), GFP_KERNEL);
3032 ucookies = u64_to_user_ptr(attr->link_create.kprobe_multi.cookies);
3034 cookies = kvmalloc_array(cnt, sizeof(*addrs), GFP_KERNEL);
3039 if (copy_from_user(cookies, ucookies, size)) {
3046 if (copy_from_user(addrs, uaddrs, size)) {
3051 struct multi_symbols_sort data = {
3054 struct user_syms us;
3056 err = copy_user_syms(&us, usyms, cnt);
3061 data.funcs = us.syms;
3063 sort_r(us.syms, cnt, sizeof(*us.syms), symbols_cmp_r,
3064 symbols_swap_r, &data);
3066 err = ftrace_lookup_symbols(us.syms, cnt, addrs);
3067 free_user_syms(&us);
3072 if (prog->kprobe_override && addrs_check_error_injection_list(addrs, cnt)) {
3077 link = kzalloc(sizeof(*link), GFP_KERNEL);
3083 bpf_link_init(&link->link, BPF_LINK_TYPE_KPROBE_MULTI,
3084 &bpf_kprobe_multi_link_lops, prog);
3086 err = bpf_link_prime(&link->link, &link_primer);
3090 if (!(flags & BPF_F_KPROBE_MULTI_RETURN))
3091 link->fp.entry_handler = kprobe_multi_link_handler;
3092 if ((flags & BPF_F_KPROBE_MULTI_RETURN) || is_kprobe_session(prog))
3093 link->fp.exit_handler = kprobe_multi_link_exit_handler;
3094 if (is_kprobe_session(prog))
3095 link->fp.entry_data_size = sizeof(u64);
3097 link->addrs = addrs;
3098 link->cookies = cookies;
3100 link->flags = flags;
3104 * Sorting addresses will trigger sorting cookies as well
3105 * (check bpf_kprobe_multi_cookie_swap). This way we can
3106 * find cookie based on the address in bpf_get_attach_cookie
3109 sort_r(addrs, cnt, sizeof(*addrs),
3110 bpf_kprobe_multi_cookie_cmp,
3111 bpf_kprobe_multi_cookie_swap,
3115 err = get_modules_for_addrs(&link->mods, addrs, cnt);
3117 bpf_link_cleanup(&link_primer);
3120 link->mods_cnt = err;
3122 err = register_fprobe_ips(&link->fp, addrs, cnt);
3124 kprobe_multi_put_modules(link->mods, link->mods_cnt);
3125 bpf_link_cleanup(&link_primer);
3129 return bpf_link_settle(&link_primer);
3137 #else /* !CONFIG_FPROBE */
3138 int bpf_kprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
3142 static u64 bpf_kprobe_multi_cookie(struct bpf_run_ctx *ctx)
3146 static u64 bpf_kprobe_multi_entry_ip(struct bpf_run_ctx *ctx)
3152 #ifdef CONFIG_UPROBES
3153 struct bpf_uprobe_multi_link;
3156 struct bpf_uprobe_multi_link *link;
3158 unsigned long ref_ctr_offset;
3160 struct uprobe_consumer consumer;
3163 struct bpf_uprobe_multi_link {
3165 struct bpf_link link;
3168 struct bpf_uprobe *uprobes;
3169 struct task_struct *task;
3172 struct bpf_uprobe_multi_run_ctx {
3173 struct bpf_run_ctx run_ctx;
3174 unsigned long entry_ip;
3175 struct bpf_uprobe *uprobe;
3178 static void bpf_uprobe_unregister(struct path *path, struct bpf_uprobe *uprobes,
3183 for (i = 0; i < cnt; i++) {
3184 uprobe_unregister(d_real_inode(path->dentry), uprobes[i].offset,
3185 &uprobes[i].consumer);
3189 static void bpf_uprobe_multi_link_release(struct bpf_link *link)
3191 struct bpf_uprobe_multi_link *umulti_link;
3193 umulti_link = container_of(link, struct bpf_uprobe_multi_link, link);
3194 bpf_uprobe_unregister(&umulti_link->path, umulti_link->uprobes, umulti_link->cnt);
3195 if (umulti_link->task)
3196 put_task_struct(umulti_link->task);
3197 path_put(&umulti_link->path);
3200 static void bpf_uprobe_multi_link_dealloc(struct bpf_link *link)
3202 struct bpf_uprobe_multi_link *umulti_link;
3204 umulti_link = container_of(link, struct bpf_uprobe_multi_link, link);
3205 kvfree(umulti_link->uprobes);
3209 static int bpf_uprobe_multi_link_fill_link_info(const struct bpf_link *link,
3210 struct bpf_link_info *info)
3212 u64 __user *uref_ctr_offsets = u64_to_user_ptr(info->uprobe_multi.ref_ctr_offsets);
3213 u64 __user *ucookies = u64_to_user_ptr(info->uprobe_multi.cookies);
3214 u64 __user *uoffsets = u64_to_user_ptr(info->uprobe_multi.offsets);
3215 u64 __user *upath = u64_to_user_ptr(info->uprobe_multi.path);
3216 u32 upath_size = info->uprobe_multi.path_size;
3217 struct bpf_uprobe_multi_link *umulti_link;
3218 u32 ucount = info->uprobe_multi.count;
3222 if (!upath ^ !upath_size)
3225 if ((uoffsets || uref_ctr_offsets || ucookies) && !ucount)
3228 umulti_link = container_of(link, struct bpf_uprobe_multi_link, link);
3229 info->uprobe_multi.count = umulti_link->cnt;
3230 info->uprobe_multi.flags = umulti_link->flags;
3231 info->uprobe_multi.pid = umulti_link->task ?
3232 task_pid_nr_ns(umulti_link->task, task_active_pid_ns(current)) : 0;
3237 upath_size = min_t(u32, upath_size, PATH_MAX);
3239 buf = kmalloc(upath_size, GFP_KERNEL);
3242 p = d_path(&umulti_link->path, buf, upath_size);
3247 upath_size = buf + upath_size - p;
3248 left = copy_to_user(upath, p, upath_size);
3252 info->uprobe_multi.path_size = upath_size;
3255 if (!uoffsets && !ucookies && !uref_ctr_offsets)
3258 if (ucount < umulti_link->cnt)
3261 ucount = umulti_link->cnt;
3263 for (i = 0; i < ucount; i++) {
3265 put_user(umulti_link->uprobes[i].offset, uoffsets + i))
3267 if (uref_ctr_offsets &&
3268 put_user(umulti_link->uprobes[i].ref_ctr_offset, uref_ctr_offsets + i))
3271 put_user(umulti_link->uprobes[i].cookie, ucookies + i))
3278 static const struct bpf_link_ops bpf_uprobe_multi_link_lops = {
3279 .release = bpf_uprobe_multi_link_release,
3280 .dealloc_deferred = bpf_uprobe_multi_link_dealloc,
3281 .fill_link_info = bpf_uprobe_multi_link_fill_link_info,
3284 static int uprobe_prog_run(struct bpf_uprobe *uprobe,
3285 unsigned long entry_ip,
3286 struct pt_regs *regs)
3288 struct bpf_uprobe_multi_link *link = uprobe->link;
3289 struct bpf_uprobe_multi_run_ctx run_ctx = {
3290 .entry_ip = entry_ip,
3293 struct bpf_prog *prog = link->link.prog;
3294 bool sleepable = prog->sleepable;
3295 struct bpf_run_ctx *old_run_ctx;
3298 if (link->task && current != link->task)
3302 rcu_read_lock_trace();
3308 old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx);
3309 err = bpf_prog_run(link->link.prog, regs);
3310 bpf_reset_run_ctx(old_run_ctx);
3315 rcu_read_unlock_trace();
3322 uprobe_multi_link_filter(struct uprobe_consumer *con, enum uprobe_filter_ctx ctx,
3323 struct mm_struct *mm)
3325 struct bpf_uprobe *uprobe;
3327 uprobe = container_of(con, struct bpf_uprobe, consumer);
3328 return uprobe->link->task->mm == mm;
3332 uprobe_multi_link_handler(struct uprobe_consumer *con, struct pt_regs *regs)
3334 struct bpf_uprobe *uprobe;
3336 uprobe = container_of(con, struct bpf_uprobe, consumer);
3337 return uprobe_prog_run(uprobe, instruction_pointer(regs), regs);
3341 uprobe_multi_link_ret_handler(struct uprobe_consumer *con, unsigned long func, struct pt_regs *regs)
3343 struct bpf_uprobe *uprobe;
3345 uprobe = container_of(con, struct bpf_uprobe, consumer);
3346 return uprobe_prog_run(uprobe, func, regs);
3349 static u64 bpf_uprobe_multi_entry_ip(struct bpf_run_ctx *ctx)
3351 struct bpf_uprobe_multi_run_ctx *run_ctx;
3353 run_ctx = container_of(current->bpf_ctx, struct bpf_uprobe_multi_run_ctx, run_ctx);
3354 return run_ctx->entry_ip;
3357 static u64 bpf_uprobe_multi_cookie(struct bpf_run_ctx *ctx)
3359 struct bpf_uprobe_multi_run_ctx *run_ctx;
3361 run_ctx = container_of(current->bpf_ctx, struct bpf_uprobe_multi_run_ctx, run_ctx);
3362 return run_ctx->uprobe->cookie;
3365 int bpf_uprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
3367 struct bpf_uprobe_multi_link *link = NULL;
3368 unsigned long __user *uref_ctr_offsets;
3369 struct bpf_link_primer link_primer;
3370 struct bpf_uprobe *uprobes = NULL;
3371 struct task_struct *task = NULL;
3372 unsigned long __user *uoffsets;
3373 u64 __user *ucookies;
3381 /* no support for 32bit archs yet */
3382 if (sizeof(u64) != sizeof(void *))
3385 if (prog->expected_attach_type != BPF_TRACE_UPROBE_MULTI)
3388 flags = attr->link_create.uprobe_multi.flags;
3389 if (flags & ~BPF_F_UPROBE_MULTI_RETURN)
3393 * path, offsets and cnt are mandatory,
3394 * ref_ctr_offsets and cookies are optional
3396 upath = u64_to_user_ptr(attr->link_create.uprobe_multi.path);
3397 uoffsets = u64_to_user_ptr(attr->link_create.uprobe_multi.offsets);
3398 cnt = attr->link_create.uprobe_multi.cnt;
3400 if (!upath || !uoffsets || !cnt)
3402 if (cnt > MAX_UPROBE_MULTI_CNT)
3405 uref_ctr_offsets = u64_to_user_ptr(attr->link_create.uprobe_multi.ref_ctr_offsets);
3406 ucookies = u64_to_user_ptr(attr->link_create.uprobe_multi.cookies);
3408 name = strndup_user(upath, PATH_MAX);
3410 err = PTR_ERR(name);
3414 err = kern_path(name, LOOKUP_FOLLOW, &path);
3419 if (!d_is_reg(path.dentry)) {
3421 goto error_path_put;
3424 pid = attr->link_create.uprobe_multi.pid;
3427 task = get_pid_task(find_vpid(pid), PIDTYPE_PID);
3431 goto error_path_put;
3437 link = kzalloc(sizeof(*link), GFP_KERNEL);
3438 uprobes = kvcalloc(cnt, sizeof(*uprobes), GFP_KERNEL);
3440 if (!uprobes || !link)
3443 for (i = 0; i < cnt; i++) {
3444 if (__get_user(uprobes[i].offset, uoffsets + i)) {
3448 if (uprobes[i].offset < 0) {
3452 if (uref_ctr_offsets && __get_user(uprobes[i].ref_ctr_offset, uref_ctr_offsets + i)) {
3456 if (ucookies && __get_user(uprobes[i].cookie, ucookies + i)) {
3461 uprobes[i].link = link;
3463 if (flags & BPF_F_UPROBE_MULTI_RETURN)
3464 uprobes[i].consumer.ret_handler = uprobe_multi_link_ret_handler;
3466 uprobes[i].consumer.handler = uprobe_multi_link_handler;
3469 uprobes[i].consumer.filter = uprobe_multi_link_filter;
3473 link->uprobes = uprobes;
3476 link->flags = flags;
3478 bpf_link_init(&link->link, BPF_LINK_TYPE_UPROBE_MULTI,
3479 &bpf_uprobe_multi_link_lops, prog);
3481 for (i = 0; i < cnt; i++) {
3482 err = uprobe_register_refctr(d_real_inode(link->path.dentry),
3484 uprobes[i].ref_ctr_offset,
3485 &uprobes[i].consumer);
3487 bpf_uprobe_unregister(&path, uprobes, i);
3492 err = bpf_link_prime(&link->link, &link_primer);
3496 return bpf_link_settle(&link_primer);
3502 put_task_struct(task);
3507 #else /* !CONFIG_UPROBES */
3508 int bpf_uprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
3512 static u64 bpf_uprobe_multi_cookie(struct bpf_run_ctx *ctx)
3516 static u64 bpf_uprobe_multi_entry_ip(struct bpf_run_ctx *ctx)
3520 #endif /* CONFIG_UPROBES */
3522 #ifdef CONFIG_FPROBE
3523 __bpf_kfunc_start_defs();
3525 __bpf_kfunc bool bpf_session_is_return(void)
3527 struct bpf_session_run_ctx *session_ctx;
3529 session_ctx = container_of(current->bpf_ctx, struct bpf_session_run_ctx, run_ctx);
3530 return session_ctx->is_return;
3533 __bpf_kfunc __u64 *bpf_session_cookie(void)
3535 struct bpf_session_run_ctx *session_ctx;
3537 session_ctx = container_of(current->bpf_ctx, struct bpf_session_run_ctx, run_ctx);
3538 return session_ctx->data;
3541 __bpf_kfunc_end_defs();
3543 BTF_KFUNCS_START(kprobe_multi_kfunc_set_ids)
3544 BTF_ID_FLAGS(func, bpf_session_is_return)
3545 BTF_ID_FLAGS(func, bpf_session_cookie)
3546 BTF_KFUNCS_END(kprobe_multi_kfunc_set_ids)
3548 static int bpf_kprobe_multi_filter(const struct bpf_prog *prog, u32 kfunc_id)
3550 if (!btf_id_set8_contains(&kprobe_multi_kfunc_set_ids, kfunc_id))
3553 if (!is_kprobe_session(prog))
3559 static const struct btf_kfunc_id_set bpf_kprobe_multi_kfunc_set = {
3560 .owner = THIS_MODULE,
3561 .set = &kprobe_multi_kfunc_set_ids,
3562 .filter = bpf_kprobe_multi_filter,
3565 static int __init bpf_kprobe_multi_kfuncs_init(void)
3567 return register_btf_kfunc_id_set(BPF_PROG_TYPE_KPROBE, &bpf_kprobe_multi_kfunc_set);
3570 late_initcall(bpf_kprobe_multi_kfuncs_init);