bpf: Avoid taking spinlock in bpf_task_storage_get if potential deadlock is detected
[linux-block.git] / kernel / trace / bpf_trace.c
CommitLineData
179a0cc4 1// SPDX-License-Identifier: GPL-2.0
2541517c 2/* Copyright (c) 2011-2015 PLUMgrid, http://plumgrid.com
0515e599 3 * Copyright (c) 2016 Facebook
2541517c
AS
4 */
5#include <linux/kernel.h>
6#include <linux/types.h>
7#include <linux/slab.h>
8#include <linux/bpf.h>
0515e599 9#include <linux/bpf_perf_event.h>
c4d0bfb4 10#include <linux/btf.h>
2541517c
AS
11#include <linux/filter.h>
12#include <linux/uaccess.h>
9c959c86 13#include <linux/ctype.h>
9802d865 14#include <linux/kprobes.h>
ac5a72ea 15#include <linux/spinlock.h>
41bdc4b4 16#include <linux/syscalls.h>
540adea3 17#include <linux/error-injection.h>
c9a0f3b8 18#include <linux/btf_ids.h>
6f100640 19#include <linux/bpf_lsm.h>
0dcac272 20#include <linux/fprobe.h>
ca74823c
JO
21#include <linux/bsearch.h>
22#include <linux/sort.h>
f3cf4134
RS
23#include <linux/key.h>
24#include <linux/verification.h>
6f100640 25
8e4597c6 26#include <net/bpf_sk_storage.h>
9802d865 27
c4d0bfb4
AM
28#include <uapi/linux/bpf.h>
29#include <uapi/linux/btf.h>
30
c7b6f29b
NA
31#include <asm/tlb.h>
32
9802d865 33#include "trace_probe.h"
2541517c
AS
34#include "trace.h"
35
ac5a72ea
AM
36#define CREATE_TRACE_POINTS
37#include "bpf_trace.h"
38
e672db03
SF
39#define bpf_event_rcu_dereference(p) \
40 rcu_dereference_protected(p, lockdep_is_held(&bpf_event_mutex))
41
a38d1107
MM
42#ifdef CONFIG_MODULES
43struct bpf_trace_module {
44 struct module *module;
45 struct list_head list;
46};
47
48static LIST_HEAD(bpf_trace_modules);
49static DEFINE_MUTEX(bpf_module_mutex);
50
51static struct bpf_raw_event_map *bpf_get_raw_tracepoint_module(const char *name)
52{
53 struct bpf_raw_event_map *btp, *ret = NULL;
54 struct bpf_trace_module *btm;
55 unsigned int i;
56
57 mutex_lock(&bpf_module_mutex);
58 list_for_each_entry(btm, &bpf_trace_modules, list) {
59 for (i = 0; i < btm->module->num_bpf_raw_events; ++i) {
60 btp = &btm->module->bpf_raw_events[i];
61 if (!strcmp(btp->tp->name, name)) {
62 if (try_module_get(btm->module))
63 ret = btp;
64 goto out;
65 }
66 }
67 }
68out:
69 mutex_unlock(&bpf_module_mutex);
70 return ret;
71}
72#else
73static struct bpf_raw_event_map *bpf_get_raw_tracepoint_module(const char *name)
74{
75 return NULL;
76}
77#endif /* CONFIG_MODULES */
78
035226b9 79u64 bpf_get_stackid(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
c195651e 80u64 bpf_get_stack(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
035226b9 81
eb411377
AM
82static int bpf_btf_printf_prepare(struct btf_ptr *ptr, u32 btf_ptr_size,
83 u64 flags, const struct btf **btf,
84 s32 *btf_id);
f7098690
JO
85static u64 bpf_kprobe_multi_cookie(struct bpf_run_ctx *ctx);
86static u64 bpf_kprobe_multi_entry_ip(struct bpf_run_ctx *ctx);
eb411377 87
2541517c
AS
88/**
89 * trace_call_bpf - invoke BPF program
e87c6bc3 90 * @call: tracepoint event
2541517c
AS
91 * @ctx: opaque context pointer
92 *
93 * kprobe handlers execute BPF programs via this helper.
94 * Can be used from static tracepoints in the future.
95 *
96 * Return: BPF programs always return an integer which is interpreted by
97 * kprobe handler as:
98 * 0 - return from kprobe (event is filtered out)
99 * 1 - store kprobe event into ring buffer
100 * Other values are reserved and currently alias to 1
101 */
e87c6bc3 102unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx)
2541517c
AS
103{
104 unsigned int ret;
105
b0a81b94 106 cant_sleep();
2541517c
AS
107
108 if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1)) {
109 /*
110 * since some bpf program is already running on this cpu,
111 * don't call into another bpf program (same or different)
112 * and don't send kprobe event into ring-buffer,
113 * so return zero here
114 */
115 ret = 0;
116 goto out;
117 }
118
e87c6bc3
YS
119 /*
120 * Instead of moving rcu_read_lock/rcu_dereference/rcu_read_unlock
121 * to all call sites, we did a bpf_prog_array_valid() there to check
122 * whether call->prog_array is empty or not, which is
2b5894cc 123 * a heuristic to speed up execution.
e87c6bc3
YS
124 *
125 * If bpf_prog_array_valid() fetched prog_array was
126 * non-NULL, we go into trace_call_bpf() and do the actual
127 * proper rcu_dereference() under RCU lock.
128 * If it turns out that prog_array is NULL then, we bail out.
129 * For the opposite, if the bpf_prog_array_valid() fetched pointer
130 * was NULL, you'll skip the prog_array with the risk of missing
131 * out of events when it was updated in between this and the
132 * rcu_dereference() which is accepted risk.
133 */
055eb955
SF
134 rcu_read_lock();
135 ret = bpf_prog_run_array(rcu_dereference(call->prog_array),
136 ctx, bpf_prog_run);
137 rcu_read_unlock();
2541517c
AS
138
139 out:
140 __this_cpu_dec(bpf_prog_active);
2541517c
AS
141
142 return ret;
143}
2541517c 144
9802d865
JB
145#ifdef CONFIG_BPF_KPROBE_OVERRIDE
146BPF_CALL_2(bpf_override_return, struct pt_regs *, regs, unsigned long, rc)
147{
9802d865 148 regs_set_return_value(regs, rc);
540adea3 149 override_function_with_return(regs);
9802d865
JB
150 return 0;
151}
152
153static const struct bpf_func_proto bpf_override_return_proto = {
154 .func = bpf_override_return,
155 .gpl_only = true,
156 .ret_type = RET_INTEGER,
157 .arg1_type = ARG_PTR_TO_CTX,
158 .arg2_type = ARG_ANYTHING,
159};
160#endif
161
8d92db5c
CH
162static __always_inline int
163bpf_probe_read_user_common(void *dst, u32 size, const void __user *unsafe_ptr)
2541517c 164{
8d92db5c 165 int ret;
2541517c 166
c0ee37e8 167 ret = copy_from_user_nofault(dst, unsafe_ptr, size);
6ae08ae3
DB
168 if (unlikely(ret < 0))
169 memset(dst, 0, size);
6ae08ae3
DB
170 return ret;
171}
172
8d92db5c
CH
173BPF_CALL_3(bpf_probe_read_user, void *, dst, u32, size,
174 const void __user *, unsafe_ptr)
175{
176 return bpf_probe_read_user_common(dst, size, unsafe_ptr);
177}
178
f470378c 179const struct bpf_func_proto bpf_probe_read_user_proto = {
6ae08ae3
DB
180 .func = bpf_probe_read_user,
181 .gpl_only = true,
182 .ret_type = RET_INTEGER,
183 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
184 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
185 .arg3_type = ARG_ANYTHING,
186};
187
8d92db5c
CH
188static __always_inline int
189bpf_probe_read_user_str_common(void *dst, u32 size,
190 const void __user *unsafe_ptr)
6ae08ae3 191{
8d92db5c 192 int ret;
6ae08ae3 193
6fa6d280
DX
194 /*
195 * NB: We rely on strncpy_from_user() not copying junk past the NUL
196 * terminator into `dst`.
197 *
198 * strncpy_from_user() does long-sized strides in the fast path. If the
199 * strncpy does not mask out the bytes after the NUL in `unsafe_ptr`,
200 * then there could be junk after the NUL in `dst`. If user takes `dst`
201 * and keys a hash map with it, then semantically identical strings can
202 * occupy multiple entries in the map.
203 */
8d92db5c 204 ret = strncpy_from_user_nofault(dst, unsafe_ptr, size);
6ae08ae3
DB
205 if (unlikely(ret < 0))
206 memset(dst, 0, size);
6ae08ae3
DB
207 return ret;
208}
209
8d92db5c
CH
210BPF_CALL_3(bpf_probe_read_user_str, void *, dst, u32, size,
211 const void __user *, unsafe_ptr)
212{
213 return bpf_probe_read_user_str_common(dst, size, unsafe_ptr);
214}
215
f470378c 216const struct bpf_func_proto bpf_probe_read_user_str_proto = {
6ae08ae3
DB
217 .func = bpf_probe_read_user_str,
218 .gpl_only = true,
219 .ret_type = RET_INTEGER,
220 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
221 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
222 .arg3_type = ARG_ANYTHING,
223};
224
225static __always_inline int
8d92db5c 226bpf_probe_read_kernel_common(void *dst, u32 size, const void *unsafe_ptr)
6ae08ae3 227{
ff40e510 228 int ret;
9d1f8be5 229
fe557319 230 ret = copy_from_kernel_nofault(dst, unsafe_ptr, size);
074f528e 231 if (unlikely(ret < 0))
ff40e510 232 memset(dst, 0, size);
6ae08ae3
DB
233 return ret;
234}
074f528e 235
6ae08ae3
DB
236BPF_CALL_3(bpf_probe_read_kernel, void *, dst, u32, size,
237 const void *, unsafe_ptr)
238{
8d92db5c 239 return bpf_probe_read_kernel_common(dst, size, unsafe_ptr);
6ae08ae3
DB
240}
241
f470378c 242const struct bpf_func_proto bpf_probe_read_kernel_proto = {
6ae08ae3
DB
243 .func = bpf_probe_read_kernel,
244 .gpl_only = true,
245 .ret_type = RET_INTEGER,
246 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
247 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
248 .arg3_type = ARG_ANYTHING,
249};
250
6ae08ae3 251static __always_inline int
8d92db5c 252bpf_probe_read_kernel_str_common(void *dst, u32 size, const void *unsafe_ptr)
6ae08ae3 253{
ff40e510 254 int ret;
8d92db5c 255
6ae08ae3 256 /*
8d92db5c
CH
257 * The strncpy_from_kernel_nofault() call will likely not fill the
258 * entire buffer, but that's okay in this circumstance as we're probing
6ae08ae3
DB
259 * arbitrary memory anyway similar to bpf_probe_read_*() and might
260 * as well probe the stack. Thus, memory is explicitly cleared
261 * only in error case, so that improper users ignoring return
262 * code altogether don't copy garbage; otherwise length of string
263 * is returned that can be used for bpf_perf_event_output() et al.
264 */
8d92db5c 265 ret = strncpy_from_kernel_nofault(dst, unsafe_ptr, size);
6ae08ae3 266 if (unlikely(ret < 0))
ff40e510 267 memset(dst, 0, size);
074f528e 268 return ret;
2541517c
AS
269}
270
6ae08ae3
DB
271BPF_CALL_3(bpf_probe_read_kernel_str, void *, dst, u32, size,
272 const void *, unsafe_ptr)
273{
8d92db5c 274 return bpf_probe_read_kernel_str_common(dst, size, unsafe_ptr);
6ae08ae3
DB
275}
276
f470378c 277const struct bpf_func_proto bpf_probe_read_kernel_str_proto = {
6ae08ae3
DB
278 .func = bpf_probe_read_kernel_str,
279 .gpl_only = true,
280 .ret_type = RET_INTEGER,
281 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
282 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
283 .arg3_type = ARG_ANYTHING,
284};
285
8d92db5c
CH
286#ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
287BPF_CALL_3(bpf_probe_read_compat, void *, dst, u32, size,
288 const void *, unsafe_ptr)
289{
290 if ((unsigned long)unsafe_ptr < TASK_SIZE) {
291 return bpf_probe_read_user_common(dst, size,
292 (__force void __user *)unsafe_ptr);
293 }
294 return bpf_probe_read_kernel_common(dst, size, unsafe_ptr);
295}
296
297static const struct bpf_func_proto bpf_probe_read_compat_proto = {
298 .func = bpf_probe_read_compat,
299 .gpl_only = true,
300 .ret_type = RET_INTEGER,
301 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
302 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
303 .arg3_type = ARG_ANYTHING,
304};
305
6ae08ae3
DB
306BPF_CALL_3(bpf_probe_read_compat_str, void *, dst, u32, size,
307 const void *, unsafe_ptr)
308{
8d92db5c
CH
309 if ((unsigned long)unsafe_ptr < TASK_SIZE) {
310 return bpf_probe_read_user_str_common(dst, size,
311 (__force void __user *)unsafe_ptr);
312 }
313 return bpf_probe_read_kernel_str_common(dst, size, unsafe_ptr);
6ae08ae3
DB
314}
315
316static const struct bpf_func_proto bpf_probe_read_compat_str_proto = {
317 .func = bpf_probe_read_compat_str,
2541517c
AS
318 .gpl_only = true,
319 .ret_type = RET_INTEGER,
39f19ebb 320 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
9c019e2b 321 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
2541517c
AS
322 .arg3_type = ARG_ANYTHING,
323};
8d92db5c 324#endif /* CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE */
2541517c 325
eb1b6688 326BPF_CALL_3(bpf_probe_write_user, void __user *, unsafe_ptr, const void *, src,
f3694e00 327 u32, size)
96ae5227 328{
96ae5227
SD
329 /*
330 * Ensure we're in user context which is safe for the helper to
331 * run. This helper has no business in a kthread.
332 *
333 * access_ok() should prevent writing to non-user memory, but in
334 * some situations (nommu, temporary switch, etc) access_ok() does
335 * not provide enough validation, hence the check on KERNEL_DS.
c7b6f29b
NA
336 *
337 * nmi_uaccess_okay() ensures the probe is not run in an interim
338 * state, when the task or mm are switched. This is specifically
339 * required to prevent the use of temporary mm.
96ae5227
SD
340 */
341
342 if (unlikely(in_interrupt() ||
343 current->flags & (PF_KTHREAD | PF_EXITING)))
344 return -EPERM;
c7b6f29b
NA
345 if (unlikely(!nmi_uaccess_okay()))
346 return -EPERM;
96ae5227 347
c0ee37e8 348 return copy_to_user_nofault(unsafe_ptr, src, size);
96ae5227
SD
349}
350
351static const struct bpf_func_proto bpf_probe_write_user_proto = {
352 .func = bpf_probe_write_user,
353 .gpl_only = true,
354 .ret_type = RET_INTEGER,
355 .arg1_type = ARG_ANYTHING,
216e3cd2 356 .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY,
39f19ebb 357 .arg3_type = ARG_CONST_SIZE,
96ae5227
SD
358};
359
360static const struct bpf_func_proto *bpf_get_probe_write_proto(void)
361{
2c78ee89
AS
362 if (!capable(CAP_SYS_ADMIN))
363 return NULL;
364
96ae5227
SD
365 pr_warn_ratelimited("%s[%d] is installing a program with bpf_probe_write_user helper that may corrupt user memory!",
366 current->comm, task_pid_nr(current));
367
368 return &bpf_probe_write_user_proto;
369}
370
ac5a72ea
AM
371static DEFINE_RAW_SPINLOCK(trace_printk_lock);
372
d9c9e4db
FR
373#define MAX_TRACE_PRINTK_VARARGS 3
374#define BPF_TRACE_PRINTK_SIZE 1024
ac5a72ea 375
d9c9e4db
FR
376BPF_CALL_5(bpf_trace_printk, char *, fmt, u32, fmt_size, u64, arg1,
377 u64, arg2, u64, arg3)
ac5a72ea 378{
d9c9e4db 379 u64 args[MAX_TRACE_PRINTK_VARARGS] = { arg1, arg2, arg3 };
48cac3f4 380 u32 *bin_args;
ac5a72ea
AM
381 static char buf[BPF_TRACE_PRINTK_SIZE];
382 unsigned long flags;
ac5a72ea
AM
383 int ret;
384
48cac3f4
FR
385 ret = bpf_bprintf_prepare(fmt, fmt_size, args, &bin_args,
386 MAX_TRACE_PRINTK_VARARGS);
d9c9e4db
FR
387 if (ret < 0)
388 return ret;
389
38d26d89 390 raw_spin_lock_irqsave(&trace_printk_lock, flags);
48cac3f4 391 ret = bstr_printf(buf, sizeof(buf), fmt, bin_args);
d9c9e4db 392
ac5a72ea
AM
393 trace_bpf_trace_printk(buf);
394 raw_spin_unlock_irqrestore(&trace_printk_lock, flags);
395
48cac3f4 396 bpf_bprintf_cleanup();
9c959c86 397
d9c9e4db 398 return ret;
9c959c86
AS
399}
400
401static const struct bpf_func_proto bpf_trace_printk_proto = {
402 .func = bpf_trace_printk,
403 .gpl_only = true,
404 .ret_type = RET_INTEGER,
216e3cd2 405 .arg1_type = ARG_PTR_TO_MEM | MEM_RDONLY,
39f19ebb 406 .arg2_type = ARG_CONST_SIZE,
9c959c86
AS
407};
408
10aceb62 409static void __set_printk_clr_event(void)
0756ea3e
AS
410{
411 /*
ac5a72ea
AM
412 * This program might be calling bpf_trace_printk,
413 * so enable the associated bpf_trace/bpf_trace_printk event.
414 * Repeat this each time as it is possible a user has
415 * disabled bpf_trace_printk events. By loading a program
416 * calling bpf_trace_printk() however the user has expressed
417 * the intent to see such events.
0756ea3e 418 */
ac5a72ea
AM
419 if (trace_set_clr_event("bpf_trace", "bpf_trace_printk", 1))
420 pr_warn_ratelimited("could not enable bpf_trace_printk events");
10aceb62 421}
0756ea3e 422
10aceb62
DM
423const struct bpf_func_proto *bpf_get_trace_printk_proto(void)
424{
425 __set_printk_clr_event();
0756ea3e
AS
426 return &bpf_trace_printk_proto;
427}
428
10aceb62
DM
429BPF_CALL_4(bpf_trace_vprintk, char *, fmt, u32, fmt_size, const void *, data,
430 u32, data_len)
431{
432 static char buf[BPF_TRACE_PRINTK_SIZE];
433 unsigned long flags;
434 int ret, num_args;
435 u32 *bin_args;
436
437 if (data_len & 7 || data_len > MAX_BPRINTF_VARARGS * 8 ||
438 (data_len && !data))
439 return -EINVAL;
440 num_args = data_len / 8;
441
442 ret = bpf_bprintf_prepare(fmt, fmt_size, data, &bin_args, num_args);
443 if (ret < 0)
444 return ret;
445
446 raw_spin_lock_irqsave(&trace_printk_lock, flags);
447 ret = bstr_printf(buf, sizeof(buf), fmt, bin_args);
448
449 trace_bpf_trace_printk(buf);
450 raw_spin_unlock_irqrestore(&trace_printk_lock, flags);
451
452 bpf_bprintf_cleanup();
453
454 return ret;
455}
456
457static const struct bpf_func_proto bpf_trace_vprintk_proto = {
458 .func = bpf_trace_vprintk,
459 .gpl_only = true,
460 .ret_type = RET_INTEGER,
216e3cd2 461 .arg1_type = ARG_PTR_TO_MEM | MEM_RDONLY,
10aceb62 462 .arg2_type = ARG_CONST_SIZE,
216e3cd2 463 .arg3_type = ARG_PTR_TO_MEM | PTR_MAYBE_NULL | MEM_RDONLY,
10aceb62
DM
464 .arg4_type = ARG_CONST_SIZE_OR_ZERO,
465};
466
467const struct bpf_func_proto *bpf_get_trace_vprintk_proto(void)
468{
469 __set_printk_clr_event();
470 return &bpf_trace_vprintk_proto;
471}
472
492e639f
YS
473BPF_CALL_5(bpf_seq_printf, struct seq_file *, m, char *, fmt, u32, fmt_size,
474 const void *, data, u32, data_len)
475{
d9c9e4db 476 int err, num_args;
48cac3f4 477 u32 *bin_args;
492e639f 478
335ff499 479 if (data_len & 7 || data_len > MAX_BPRINTF_VARARGS * 8 ||
d9c9e4db
FR
480 (data_len && !data))
481 return -EINVAL;
492e639f
YS
482 num_args = data_len / 8;
483
48cac3f4 484 err = bpf_bprintf_prepare(fmt, fmt_size, data, &bin_args, num_args);
d9c9e4db
FR
485 if (err < 0)
486 return err;
492e639f 487
48cac3f4
FR
488 seq_bprintf(m, fmt, bin_args);
489
490 bpf_bprintf_cleanup();
d9c9e4db
FR
491
492 return seq_has_overflowed(m) ? -EOVERFLOW : 0;
492e639f
YS
493}
494
9436ef6e 495BTF_ID_LIST_SINGLE(btf_seq_file_ids, struct, seq_file)
c9a0f3b8 496
492e639f
YS
497static const struct bpf_func_proto bpf_seq_printf_proto = {
498 .func = bpf_seq_printf,
499 .gpl_only = true,
500 .ret_type = RET_INTEGER,
501 .arg1_type = ARG_PTR_TO_BTF_ID,
9436ef6e 502 .arg1_btf_id = &btf_seq_file_ids[0],
216e3cd2 503 .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY,
492e639f 504 .arg3_type = ARG_CONST_SIZE,
216e3cd2 505 .arg4_type = ARG_PTR_TO_MEM | PTR_MAYBE_NULL | MEM_RDONLY,
492e639f 506 .arg5_type = ARG_CONST_SIZE_OR_ZERO,
492e639f
YS
507};
508
509BPF_CALL_3(bpf_seq_write, struct seq_file *, m, const void *, data, u32, len)
510{
511 return seq_write(m, data, len) ? -EOVERFLOW : 0;
512}
513
492e639f
YS
514static const struct bpf_func_proto bpf_seq_write_proto = {
515 .func = bpf_seq_write,
516 .gpl_only = true,
517 .ret_type = RET_INTEGER,
518 .arg1_type = ARG_PTR_TO_BTF_ID,
9436ef6e 519 .arg1_btf_id = &btf_seq_file_ids[0],
216e3cd2 520 .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY,
492e639f 521 .arg3_type = ARG_CONST_SIZE_OR_ZERO,
492e639f
YS
522};
523
eb411377
AM
524BPF_CALL_4(bpf_seq_printf_btf, struct seq_file *, m, struct btf_ptr *, ptr,
525 u32, btf_ptr_size, u64, flags)
526{
527 const struct btf *btf;
528 s32 btf_id;
529 int ret;
530
531 ret = bpf_btf_printf_prepare(ptr, btf_ptr_size, flags, &btf, &btf_id);
532 if (ret)
533 return ret;
534
535 return btf_type_seq_show_flags(btf, btf_id, ptr->ptr, m, flags);
536}
537
538static const struct bpf_func_proto bpf_seq_printf_btf_proto = {
539 .func = bpf_seq_printf_btf,
540 .gpl_only = true,
541 .ret_type = RET_INTEGER,
542 .arg1_type = ARG_PTR_TO_BTF_ID,
543 .arg1_btf_id = &btf_seq_file_ids[0],
216e3cd2 544 .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY,
492e639f 545 .arg3_type = ARG_CONST_SIZE_OR_ZERO,
eb411377 546 .arg4_type = ARG_ANYTHING,
492e639f
YS
547};
548
908432ca
YS
549static __always_inline int
550get_map_perf_counter(struct bpf_map *map, u64 flags,
551 u64 *value, u64 *enabled, u64 *running)
35578d79 552{
35578d79 553 struct bpf_array *array = container_of(map, struct bpf_array, map);
6816a7ff
DB
554 unsigned int cpu = smp_processor_id();
555 u64 index = flags & BPF_F_INDEX_MASK;
3b1efb19 556 struct bpf_event_entry *ee;
35578d79 557
6816a7ff
DB
558 if (unlikely(flags & ~(BPF_F_INDEX_MASK)))
559 return -EINVAL;
560 if (index == BPF_F_CURRENT_CPU)
561 index = cpu;
35578d79
KX
562 if (unlikely(index >= array->map.max_entries))
563 return -E2BIG;
564
3b1efb19 565 ee = READ_ONCE(array->ptrs[index]);
1ca1cc98 566 if (!ee)
35578d79
KX
567 return -ENOENT;
568
908432ca
YS
569 return perf_event_read_local(ee->event, value, enabled, running);
570}
571
572BPF_CALL_2(bpf_perf_event_read, struct bpf_map *, map, u64, flags)
573{
574 u64 value = 0;
575 int err;
576
577 err = get_map_perf_counter(map, flags, &value, NULL, NULL);
35578d79 578 /*
f91840a3
AS
579 * this api is ugly since we miss [-22..-2] range of valid
580 * counter values, but that's uapi
35578d79 581 */
f91840a3
AS
582 if (err)
583 return err;
584 return value;
35578d79
KX
585}
586
62544ce8 587static const struct bpf_func_proto bpf_perf_event_read_proto = {
35578d79 588 .func = bpf_perf_event_read,
1075ef59 589 .gpl_only = true,
35578d79
KX
590 .ret_type = RET_INTEGER,
591 .arg1_type = ARG_CONST_MAP_PTR,
592 .arg2_type = ARG_ANYTHING,
593};
594
908432ca
YS
595BPF_CALL_4(bpf_perf_event_read_value, struct bpf_map *, map, u64, flags,
596 struct bpf_perf_event_value *, buf, u32, size)
597{
598 int err = -EINVAL;
599
600 if (unlikely(size != sizeof(struct bpf_perf_event_value)))
601 goto clear;
602 err = get_map_perf_counter(map, flags, &buf->counter, &buf->enabled,
603 &buf->running);
604 if (unlikely(err))
605 goto clear;
606 return 0;
607clear:
608 memset(buf, 0, size);
609 return err;
610}
611
612static const struct bpf_func_proto bpf_perf_event_read_value_proto = {
613 .func = bpf_perf_event_read_value,
614 .gpl_only = true,
615 .ret_type = RET_INTEGER,
616 .arg1_type = ARG_CONST_MAP_PTR,
617 .arg2_type = ARG_ANYTHING,
618 .arg3_type = ARG_PTR_TO_UNINIT_MEM,
619 .arg4_type = ARG_CONST_SIZE,
620};
621
8e7a3920
DB
622static __always_inline u64
623__bpf_perf_event_output(struct pt_regs *regs, struct bpf_map *map,
283ca526 624 u64 flags, struct perf_sample_data *sd)
a43eec30 625{
a43eec30 626 struct bpf_array *array = container_of(map, struct bpf_array, map);
d7931330 627 unsigned int cpu = smp_processor_id();
1e33759c 628 u64 index = flags & BPF_F_INDEX_MASK;
3b1efb19 629 struct bpf_event_entry *ee;
a43eec30 630 struct perf_event *event;
a43eec30 631
1e33759c 632 if (index == BPF_F_CURRENT_CPU)
d7931330 633 index = cpu;
a43eec30
AS
634 if (unlikely(index >= array->map.max_entries))
635 return -E2BIG;
636
3b1efb19 637 ee = READ_ONCE(array->ptrs[index]);
1ca1cc98 638 if (!ee)
a43eec30
AS
639 return -ENOENT;
640
3b1efb19 641 event = ee->event;
a43eec30
AS
642 if (unlikely(event->attr.type != PERF_TYPE_SOFTWARE ||
643 event->attr.config != PERF_COUNT_SW_BPF_OUTPUT))
644 return -EINVAL;
645
d7931330 646 if (unlikely(event->oncpu != cpu))
a43eec30
AS
647 return -EOPNOTSUPP;
648
56201969 649 return perf_event_output(event, sd, regs);
a43eec30
AS
650}
651
9594dc3c
MM
652/*
653 * Support executing tracepoints in normal, irq, and nmi context that each call
654 * bpf_perf_event_output
655 */
656struct bpf_trace_sample_data {
657 struct perf_sample_data sds[3];
658};
659
660static DEFINE_PER_CPU(struct bpf_trace_sample_data, bpf_trace_sds);
661static DEFINE_PER_CPU(int, bpf_trace_nest_level);
f3694e00
DB
662BPF_CALL_5(bpf_perf_event_output, struct pt_regs *, regs, struct bpf_map *, map,
663 u64, flags, void *, data, u64, size)
8e7a3920 664{
9594dc3c
MM
665 struct bpf_trace_sample_data *sds = this_cpu_ptr(&bpf_trace_sds);
666 int nest_level = this_cpu_inc_return(bpf_trace_nest_level);
8e7a3920
DB
667 struct perf_raw_record raw = {
668 .frag = {
669 .size = size,
670 .data = data,
671 },
672 };
9594dc3c
MM
673 struct perf_sample_data *sd;
674 int err;
8e7a3920 675
9594dc3c
MM
676 if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(sds->sds))) {
677 err = -EBUSY;
678 goto out;
679 }
680
681 sd = &sds->sds[nest_level - 1];
682
683 if (unlikely(flags & ~(BPF_F_INDEX_MASK))) {
684 err = -EINVAL;
685 goto out;
686 }
8e7a3920 687
283ca526
DB
688 perf_sample_data_init(sd, 0, 0);
689 sd->raw = &raw;
690
9594dc3c
MM
691 err = __bpf_perf_event_output(regs, map, flags, sd);
692
693out:
694 this_cpu_dec(bpf_trace_nest_level);
695 return err;
8e7a3920
DB
696}
697
a43eec30
AS
698static const struct bpf_func_proto bpf_perf_event_output_proto = {
699 .func = bpf_perf_event_output,
1075ef59 700 .gpl_only = true,
a43eec30
AS
701 .ret_type = RET_INTEGER,
702 .arg1_type = ARG_PTR_TO_CTX,
703 .arg2_type = ARG_CONST_MAP_PTR,
704 .arg3_type = ARG_ANYTHING,
216e3cd2 705 .arg4_type = ARG_PTR_TO_MEM | MEM_RDONLY,
a60dd35d 706 .arg5_type = ARG_CONST_SIZE_OR_ZERO,
a43eec30
AS
707};
708
768fb61f
AZ
709static DEFINE_PER_CPU(int, bpf_event_output_nest_level);
710struct bpf_nested_pt_regs {
711 struct pt_regs regs[3];
712};
713static DEFINE_PER_CPU(struct bpf_nested_pt_regs, bpf_pt_regs);
714static DEFINE_PER_CPU(struct bpf_trace_sample_data, bpf_misc_sds);
bd570ff9 715
555c8a86
DB
716u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
717 void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy)
bd570ff9 718{
768fb61f 719 int nest_level = this_cpu_inc_return(bpf_event_output_nest_level);
555c8a86
DB
720 struct perf_raw_frag frag = {
721 .copy = ctx_copy,
722 .size = ctx_size,
723 .data = ctx,
724 };
725 struct perf_raw_record raw = {
726 .frag = {
183fc153
AM
727 {
728 .next = ctx_size ? &frag : NULL,
729 },
555c8a86
DB
730 .size = meta_size,
731 .data = meta,
732 },
733 };
768fb61f
AZ
734 struct perf_sample_data *sd;
735 struct pt_regs *regs;
736 u64 ret;
737
738 if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(bpf_misc_sds.sds))) {
739 ret = -EBUSY;
740 goto out;
741 }
742 sd = this_cpu_ptr(&bpf_misc_sds.sds[nest_level - 1]);
743 regs = this_cpu_ptr(&bpf_pt_regs.regs[nest_level - 1]);
bd570ff9
DB
744
745 perf_fetch_caller_regs(regs);
283ca526
DB
746 perf_sample_data_init(sd, 0, 0);
747 sd->raw = &raw;
bd570ff9 748
768fb61f
AZ
749 ret = __bpf_perf_event_output(regs, map, flags, sd);
750out:
751 this_cpu_dec(bpf_event_output_nest_level);
752 return ret;
bd570ff9
DB
753}
754
f3694e00 755BPF_CALL_0(bpf_get_current_task)
606274c5
AS
756{
757 return (long) current;
758}
759
f470378c 760const struct bpf_func_proto bpf_get_current_task_proto = {
606274c5
AS
761 .func = bpf_get_current_task,
762 .gpl_only = true,
763 .ret_type = RET_INTEGER,
764};
765
3ca1032a
KS
766BPF_CALL_0(bpf_get_current_task_btf)
767{
768 return (unsigned long) current;
769}
770
a396eda5 771const struct bpf_func_proto bpf_get_current_task_btf_proto = {
3ca1032a
KS
772 .func = bpf_get_current_task_btf,
773 .gpl_only = true,
774 .ret_type = RET_PTR_TO_BTF_ID,
d19ddb47 775 .ret_btf_id = &btf_tracing_ids[BTF_TRACING_TYPE_TASK],
3ca1032a
KS
776};
777
dd6e10fb
DX
778BPF_CALL_1(bpf_task_pt_regs, struct task_struct *, task)
779{
780 return (unsigned long) task_pt_regs(task);
781}
782
783BTF_ID_LIST(bpf_task_pt_regs_ids)
784BTF_ID(struct, pt_regs)
785
786const struct bpf_func_proto bpf_task_pt_regs_proto = {
787 .func = bpf_task_pt_regs,
788 .gpl_only = true,
789 .arg1_type = ARG_PTR_TO_BTF_ID,
d19ddb47 790 .arg1_btf_id = &btf_tracing_ids[BTF_TRACING_TYPE_TASK],
dd6e10fb
DX
791 .ret_type = RET_PTR_TO_BTF_ID,
792 .ret_btf_id = &bpf_task_pt_regs_ids[0],
793};
794
f3694e00 795BPF_CALL_2(bpf_current_task_under_cgroup, struct bpf_map *, map, u32, idx)
60d20f91 796{
60d20f91
SD
797 struct bpf_array *array = container_of(map, struct bpf_array, map);
798 struct cgroup *cgrp;
60d20f91 799
60d20f91
SD
800 if (unlikely(idx >= array->map.max_entries))
801 return -E2BIG;
802
803 cgrp = READ_ONCE(array->ptrs[idx]);
804 if (unlikely(!cgrp))
805 return -EAGAIN;
806
807 return task_under_cgroup_hierarchy(current, cgrp);
808}
809
810static const struct bpf_func_proto bpf_current_task_under_cgroup_proto = {
811 .func = bpf_current_task_under_cgroup,
812 .gpl_only = false,
813 .ret_type = RET_INTEGER,
814 .arg1_type = ARG_CONST_MAP_PTR,
815 .arg2_type = ARG_ANYTHING,
816};
817
8b401f9e
YS
818struct send_signal_irq_work {
819 struct irq_work irq_work;
820 struct task_struct *task;
821 u32 sig;
8482941f 822 enum pid_type type;
8b401f9e
YS
823};
824
825static DEFINE_PER_CPU(struct send_signal_irq_work, send_signal_work);
826
827static void do_bpf_send_signal(struct irq_work *entry)
828{
829 struct send_signal_irq_work *work;
830
831 work = container_of(entry, struct send_signal_irq_work, irq_work);
8482941f 832 group_send_sig_info(work->sig, SEND_SIG_PRIV, work->task, work->type);
8b401f9e
YS
833}
834
8482941f 835static int bpf_send_signal_common(u32 sig, enum pid_type type)
8b401f9e
YS
836{
837 struct send_signal_irq_work *work = NULL;
838
839 /* Similar to bpf_probe_write_user, task needs to be
840 * in a sound condition and kernel memory access be
841 * permitted in order to send signal to the current
842 * task.
843 */
844 if (unlikely(current->flags & (PF_KTHREAD | PF_EXITING)))
845 return -EPERM;
8b401f9e
YS
846 if (unlikely(!nmi_uaccess_okay()))
847 return -EPERM;
848
1bc7896e 849 if (irqs_disabled()) {
e1afb702
YS
850 /* Do an early check on signal validity. Otherwise,
851 * the error is lost in deferred irq_work.
852 */
853 if (unlikely(!valid_signal(sig)))
854 return -EINVAL;
855
8b401f9e 856 work = this_cpu_ptr(&send_signal_work);
7a9f50a0 857 if (irq_work_is_busy(&work->irq_work))
8b401f9e
YS
858 return -EBUSY;
859
860 /* Add the current task, which is the target of sending signal,
861 * to the irq_work. The current task may change when queued
862 * irq works get executed.
863 */
864 work->task = current;
865 work->sig = sig;
8482941f 866 work->type = type;
8b401f9e
YS
867 irq_work_queue(&work->irq_work);
868 return 0;
869 }
870
8482941f
YS
871 return group_send_sig_info(sig, SEND_SIG_PRIV, current, type);
872}
873
874BPF_CALL_1(bpf_send_signal, u32, sig)
875{
876 return bpf_send_signal_common(sig, PIDTYPE_TGID);
8b401f9e
YS
877}
878
879static const struct bpf_func_proto bpf_send_signal_proto = {
880 .func = bpf_send_signal,
881 .gpl_only = false,
882 .ret_type = RET_INTEGER,
883 .arg1_type = ARG_ANYTHING,
884};
885
8482941f
YS
886BPF_CALL_1(bpf_send_signal_thread, u32, sig)
887{
888 return bpf_send_signal_common(sig, PIDTYPE_PID);
889}
890
891static const struct bpf_func_proto bpf_send_signal_thread_proto = {
892 .func = bpf_send_signal_thread,
893 .gpl_only = false,
894 .ret_type = RET_INTEGER,
895 .arg1_type = ARG_ANYTHING,
896};
897
6e22ab9d
JO
898BPF_CALL_3(bpf_d_path, struct path *, path, char *, buf, u32, sz)
899{
900 long len;
901 char *p;
902
903 if (!sz)
904 return 0;
905
906 p = d_path(path, buf, sz);
907 if (IS_ERR(p)) {
908 len = PTR_ERR(p);
909 } else {
910 len = buf + sz - p;
911 memmove(buf, p, len);
912 }
913
914 return len;
915}
916
917BTF_SET_START(btf_allowlist_d_path)
a8a71796
JO
918#ifdef CONFIG_SECURITY
919BTF_ID(func, security_file_permission)
920BTF_ID(func, security_inode_getattr)
921BTF_ID(func, security_file_open)
922#endif
923#ifdef CONFIG_SECURITY_PATH
924BTF_ID(func, security_path_truncate)
925#endif
6e22ab9d
JO
926BTF_ID(func, vfs_truncate)
927BTF_ID(func, vfs_fallocate)
928BTF_ID(func, dentry_open)
929BTF_ID(func, vfs_getattr)
930BTF_ID(func, filp_close)
931BTF_SET_END(btf_allowlist_d_path)
932
933static bool bpf_d_path_allowed(const struct bpf_prog *prog)
934{
3d06f34a
SL
935 if (prog->type == BPF_PROG_TYPE_TRACING &&
936 prog->expected_attach_type == BPF_TRACE_ITER)
937 return true;
938
6f100640
KS
939 if (prog->type == BPF_PROG_TYPE_LSM)
940 return bpf_lsm_is_sleepable_hook(prog->aux->attach_btf_id);
941
942 return btf_id_set_contains(&btf_allowlist_d_path,
943 prog->aux->attach_btf_id);
6e22ab9d
JO
944}
945
9436ef6e 946BTF_ID_LIST_SINGLE(bpf_d_path_btf_ids, struct, path)
6e22ab9d
JO
947
948static const struct bpf_func_proto bpf_d_path_proto = {
949 .func = bpf_d_path,
950 .gpl_only = false,
951 .ret_type = RET_INTEGER,
952 .arg1_type = ARG_PTR_TO_BTF_ID,
9436ef6e 953 .arg1_btf_id = &bpf_d_path_btf_ids[0],
6e22ab9d
JO
954 .arg2_type = ARG_PTR_TO_MEM,
955 .arg3_type = ARG_CONST_SIZE_OR_ZERO,
6e22ab9d
JO
956 .allowed = bpf_d_path_allowed,
957};
958
c4d0bfb4
AM
959#define BTF_F_ALL (BTF_F_COMPACT | BTF_F_NONAME | \
960 BTF_F_PTR_RAW | BTF_F_ZERO)
961
962static int bpf_btf_printf_prepare(struct btf_ptr *ptr, u32 btf_ptr_size,
963 u64 flags, const struct btf **btf,
964 s32 *btf_id)
965{
966 const struct btf_type *t;
967
968 if (unlikely(flags & ~(BTF_F_ALL)))
969 return -EINVAL;
970
971 if (btf_ptr_size != sizeof(struct btf_ptr))
972 return -EINVAL;
973
974 *btf = bpf_get_btf_vmlinux();
975
976 if (IS_ERR_OR_NULL(*btf))
abbaa433 977 return IS_ERR(*btf) ? PTR_ERR(*btf) : -EINVAL;
c4d0bfb4
AM
978
979 if (ptr->type_id > 0)
980 *btf_id = ptr->type_id;
981 else
982 return -EINVAL;
983
984 if (*btf_id > 0)
985 t = btf_type_by_id(*btf, *btf_id);
986 if (*btf_id <= 0 || !t)
987 return -ENOENT;
988
989 return 0;
990}
991
992BPF_CALL_5(bpf_snprintf_btf, char *, str, u32, str_size, struct btf_ptr *, ptr,
993 u32, btf_ptr_size, u64, flags)
994{
995 const struct btf *btf;
996 s32 btf_id;
997 int ret;
998
999 ret = bpf_btf_printf_prepare(ptr, btf_ptr_size, flags, &btf, &btf_id);
1000 if (ret)
1001 return ret;
1002
1003 return btf_type_snprintf_show(btf, btf_id, ptr->ptr, str, str_size,
1004 flags);
1005}
1006
1007const struct bpf_func_proto bpf_snprintf_btf_proto = {
1008 .func = bpf_snprintf_btf,
1009 .gpl_only = false,
1010 .ret_type = RET_INTEGER,
1011 .arg1_type = ARG_PTR_TO_MEM,
1012 .arg2_type = ARG_CONST_SIZE,
216e3cd2 1013 .arg3_type = ARG_PTR_TO_MEM | MEM_RDONLY,
c4d0bfb4
AM
1014 .arg4_type = ARG_CONST_SIZE,
1015 .arg5_type = ARG_ANYTHING,
1016};
1017
9b99edca
JO
1018BPF_CALL_1(bpf_get_func_ip_tracing, void *, ctx)
1019{
1020 /* This helper call is inlined by verifier. */
f92c1e18 1021 return ((u64 *)ctx)[-2];
9b99edca
JO
1022}
1023
1024static const struct bpf_func_proto bpf_get_func_ip_proto_tracing = {
1025 .func = bpf_get_func_ip_tracing,
1026 .gpl_only = true,
1027 .ret_type = RET_INTEGER,
1028 .arg1_type = ARG_PTR_TO_CTX,
1029};
1030
c09eb2e5
JO
1031#ifdef CONFIG_X86_KERNEL_IBT
1032static unsigned long get_entry_ip(unsigned long fentry_ip)
1033{
1034 u32 instr;
1035
1036 /* Being extra safe in here in case entry ip is on the page-edge. */
1037 if (get_kernel_nofault(instr, (u32 *) fentry_ip - 1))
1038 return fentry_ip;
1039 if (is_endbr(instr))
1040 fentry_ip -= ENDBR_INSN_SIZE;
1041 return fentry_ip;
1042}
1043#else
1044#define get_entry_ip(fentry_ip) fentry_ip
1045#endif
1046
9ffd9f3f
JO
1047BPF_CALL_1(bpf_get_func_ip_kprobe, struct pt_regs *, regs)
1048{
1049 struct kprobe *kp = kprobe_running();
1050
0e253f7e
JO
1051 if (!kp || !(kp->flags & KPROBE_FLAG_ON_FUNC_ENTRY))
1052 return 0;
1053
1054 return get_entry_ip((uintptr_t)kp->addr);
9ffd9f3f
JO
1055}
1056
1057static const struct bpf_func_proto bpf_get_func_ip_proto_kprobe = {
1058 .func = bpf_get_func_ip_kprobe,
1059 .gpl_only = true,
1060 .ret_type = RET_INTEGER,
1061 .arg1_type = ARG_PTR_TO_CTX,
1062};
1063
42a57120
JO
1064BPF_CALL_1(bpf_get_func_ip_kprobe_multi, struct pt_regs *, regs)
1065{
f7098690 1066 return bpf_kprobe_multi_entry_ip(current->bpf_ctx);
42a57120
JO
1067}
1068
1069static const struct bpf_func_proto bpf_get_func_ip_proto_kprobe_multi = {
1070 .func = bpf_get_func_ip_kprobe_multi,
1071 .gpl_only = false,
1072 .ret_type = RET_INTEGER,
1073 .arg1_type = ARG_PTR_TO_CTX,
1074};
1075
ca74823c
JO
1076BPF_CALL_1(bpf_get_attach_cookie_kprobe_multi, struct pt_regs *, regs)
1077{
f7098690 1078 return bpf_kprobe_multi_cookie(current->bpf_ctx);
ca74823c
JO
1079}
1080
1081static const struct bpf_func_proto bpf_get_attach_cookie_proto_kmulti = {
1082 .func = bpf_get_attach_cookie_kprobe_multi,
1083 .gpl_only = false,
1084 .ret_type = RET_INTEGER,
1085 .arg1_type = ARG_PTR_TO_CTX,
1086};
1087
7adfc6c9
AN
1088BPF_CALL_1(bpf_get_attach_cookie_trace, void *, ctx)
1089{
1090 struct bpf_trace_run_ctx *run_ctx;
1091
1092 run_ctx = container_of(current->bpf_ctx, struct bpf_trace_run_ctx, run_ctx);
1093 return run_ctx->bpf_cookie;
1094}
1095
1096static const struct bpf_func_proto bpf_get_attach_cookie_proto_trace = {
1097 .func = bpf_get_attach_cookie_trace,
1098 .gpl_only = false,
1099 .ret_type = RET_INTEGER,
1100 .arg1_type = ARG_PTR_TO_CTX,
1101};
1102
1103BPF_CALL_1(bpf_get_attach_cookie_pe, struct bpf_perf_event_data_kern *, ctx)
1104{
1105 return ctx->event->bpf_cookie;
1106}
1107
1108static const struct bpf_func_proto bpf_get_attach_cookie_proto_pe = {
1109 .func = bpf_get_attach_cookie_pe,
1110 .gpl_only = false,
1111 .ret_type = RET_INTEGER,
1112 .arg1_type = ARG_PTR_TO_CTX,
1113};
1114
2fcc8241
KFL
1115BPF_CALL_1(bpf_get_attach_cookie_tracing, void *, ctx)
1116{
1117 struct bpf_trace_run_ctx *run_ctx;
1118
1119 run_ctx = container_of(current->bpf_ctx, struct bpf_trace_run_ctx, run_ctx);
1120 return run_ctx->bpf_cookie;
1121}
1122
1123static const struct bpf_func_proto bpf_get_attach_cookie_proto_tracing = {
1124 .func = bpf_get_attach_cookie_tracing,
1125 .gpl_only = false,
1126 .ret_type = RET_INTEGER,
1127 .arg1_type = ARG_PTR_TO_CTX,
1128};
1129
856c02db
SL
1130BPF_CALL_3(bpf_get_branch_snapshot, void *, buf, u32, size, u64, flags)
1131{
1132#ifndef CONFIG_X86
1133 return -ENOENT;
1134#else
1135 static const u32 br_entry_size = sizeof(struct perf_branch_entry);
1136 u32 entry_cnt = size / br_entry_size;
1137
1138 entry_cnt = static_call(perf_snapshot_branch_stack)(buf, entry_cnt);
1139
1140 if (unlikely(flags))
1141 return -EINVAL;
1142
1143 if (!entry_cnt)
1144 return -ENOENT;
1145
1146 return entry_cnt * br_entry_size;
1147#endif
1148}
1149
1150static const struct bpf_func_proto bpf_get_branch_snapshot_proto = {
1151 .func = bpf_get_branch_snapshot,
1152 .gpl_only = true,
1153 .ret_type = RET_INTEGER,
1154 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
1155 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
1156};
1157
f92c1e18
JO
1158BPF_CALL_3(get_func_arg, void *, ctx, u32, n, u64 *, value)
1159{
1160 /* This helper call is inlined by verifier. */
1161 u64 nr_args = ((u64 *)ctx)[-1];
1162
1163 if ((u64) n >= nr_args)
1164 return -EINVAL;
1165 *value = ((u64 *)ctx)[n];
1166 return 0;
1167}
1168
1169static const struct bpf_func_proto bpf_get_func_arg_proto = {
1170 .func = get_func_arg,
1171 .ret_type = RET_INTEGER,
1172 .arg1_type = ARG_PTR_TO_CTX,
1173 .arg2_type = ARG_ANYTHING,
1174 .arg3_type = ARG_PTR_TO_LONG,
1175};
1176
1177BPF_CALL_2(get_func_ret, void *, ctx, u64 *, value)
1178{
1179 /* This helper call is inlined by verifier. */
1180 u64 nr_args = ((u64 *)ctx)[-1];
1181
1182 *value = ((u64 *)ctx)[nr_args];
1183 return 0;
1184}
1185
1186static const struct bpf_func_proto bpf_get_func_ret_proto = {
1187 .func = get_func_ret,
1188 .ret_type = RET_INTEGER,
1189 .arg1_type = ARG_PTR_TO_CTX,
1190 .arg2_type = ARG_PTR_TO_LONG,
1191};
1192
1193BPF_CALL_1(get_func_arg_cnt, void *, ctx)
1194{
1195 /* This helper call is inlined by verifier. */
1196 return ((u64 *)ctx)[-1];
1197}
1198
1199static const struct bpf_func_proto bpf_get_func_arg_cnt_proto = {
1200 .func = get_func_arg_cnt,
1201 .ret_type = RET_INTEGER,
1202 .arg1_type = ARG_PTR_TO_CTX,
1203};
1204
f3cf4134
RS
1205#ifdef CONFIG_KEYS
1206__diag_push();
1207__diag_ignore_all("-Wmissing-prototypes",
1208 "kfuncs which will be used in BPF programs");
1209
1210/**
1211 * bpf_lookup_user_key - lookup a key by its serial
1212 * @serial: key handle serial number
1213 * @flags: lookup-specific flags
1214 *
1215 * Search a key with a given *serial* and the provided *flags*.
1216 * If found, increment the reference count of the key by one, and
1217 * return it in the bpf_key structure.
1218 *
1219 * The bpf_key structure must be passed to bpf_key_put() when done
1220 * with it, so that the key reference count is decremented and the
1221 * bpf_key structure is freed.
1222 *
1223 * Permission checks are deferred to the time the key is used by
1224 * one of the available key-specific kfuncs.
1225 *
1226 * Set *flags* with KEY_LOOKUP_CREATE, to attempt creating a requested
1227 * special keyring (e.g. session keyring), if it doesn't yet exist.
1228 * Set *flags* with KEY_LOOKUP_PARTIAL, to lookup a key without waiting
1229 * for the key construction, and to retrieve uninstantiated keys (keys
1230 * without data attached to them).
1231 *
1232 * Return: a bpf_key pointer with a valid key pointer if the key is found, a
1233 * NULL pointer otherwise.
1234 */
1235struct bpf_key *bpf_lookup_user_key(u32 serial, u64 flags)
1236{
1237 key_ref_t key_ref;
1238 struct bpf_key *bkey;
1239
1240 if (flags & ~KEY_LOOKUP_ALL)
1241 return NULL;
1242
1243 /*
1244 * Permission check is deferred until the key is used, as the
1245 * intent of the caller is unknown here.
1246 */
1247 key_ref = lookup_user_key(serial, flags, KEY_DEFER_PERM_CHECK);
1248 if (IS_ERR(key_ref))
1249 return NULL;
1250
1251 bkey = kmalloc(sizeof(*bkey), GFP_KERNEL);
1252 if (!bkey) {
1253 key_put(key_ref_to_ptr(key_ref));
1254 return NULL;
1255 }
1256
1257 bkey->key = key_ref_to_ptr(key_ref);
1258 bkey->has_ref = true;
1259
1260 return bkey;
1261}
1262
1263/**
1264 * bpf_lookup_system_key - lookup a key by a system-defined ID
1265 * @id: key ID
1266 *
1267 * Obtain a bpf_key structure with a key pointer set to the passed key ID.
1268 * The key pointer is marked as invalid, to prevent bpf_key_put() from
1269 * attempting to decrement the key reference count on that pointer. The key
1270 * pointer set in such way is currently understood only by
1271 * verify_pkcs7_signature().
1272 *
1273 * Set *id* to one of the values defined in include/linux/verification.h:
1274 * 0 for the primary keyring (immutable keyring of system keys);
1275 * VERIFY_USE_SECONDARY_KEYRING for both the primary and secondary keyring
1276 * (where keys can be added only if they are vouched for by existing keys
1277 * in those keyrings); VERIFY_USE_PLATFORM_KEYRING for the platform
1278 * keyring (primarily used by the integrity subsystem to verify a kexec'ed
1279 * kerned image and, possibly, the initramfs signature).
1280 *
1281 * Return: a bpf_key pointer with an invalid key pointer set from the
1282 * pre-determined ID on success, a NULL pointer otherwise
1283 */
1284struct bpf_key *bpf_lookup_system_key(u64 id)
1285{
1286 struct bpf_key *bkey;
1287
1288 if (system_keyring_id_check(id) < 0)
1289 return NULL;
1290
1291 bkey = kmalloc(sizeof(*bkey), GFP_ATOMIC);
1292 if (!bkey)
1293 return NULL;
1294
1295 bkey->key = (struct key *)(unsigned long)id;
1296 bkey->has_ref = false;
1297
1298 return bkey;
1299}
1300
1301/**
1302 * bpf_key_put - decrement key reference count if key is valid and free bpf_key
1303 * @bkey: bpf_key structure
1304 *
1305 * Decrement the reference count of the key inside *bkey*, if the pointer
1306 * is valid, and free *bkey*.
1307 */
1308void bpf_key_put(struct bpf_key *bkey)
1309{
1310 if (bkey->has_ref)
1311 key_put(bkey->key);
1312
1313 kfree(bkey);
1314}
1315
865b0566
RS
1316#ifdef CONFIG_SYSTEM_DATA_VERIFICATION
1317/**
1318 * bpf_verify_pkcs7_signature - verify a PKCS#7 signature
1319 * @data_ptr: data to verify
1320 * @sig_ptr: signature of the data
1321 * @trusted_keyring: keyring with keys trusted for signature verification
1322 *
1323 * Verify the PKCS#7 signature *sig_ptr* against the supplied *data_ptr*
1324 * with keys in a keyring referenced by *trusted_keyring*.
1325 *
1326 * Return: 0 on success, a negative value on error.
1327 */
1328int bpf_verify_pkcs7_signature(struct bpf_dynptr_kern *data_ptr,
1329 struct bpf_dynptr_kern *sig_ptr,
1330 struct bpf_key *trusted_keyring)
1331{
1332 int ret;
1333
1334 if (trusted_keyring->has_ref) {
1335 /*
1336 * Do the permission check deferred in bpf_lookup_user_key().
1337 * See bpf_lookup_user_key() for more details.
1338 *
1339 * A call to key_task_permission() here would be redundant, as
1340 * it is already done by keyring_search() called by
1341 * find_asymmetric_key().
1342 */
1343 ret = key_validate(trusted_keyring->key);
1344 if (ret < 0)
1345 return ret;
1346 }
1347
1348 return verify_pkcs7_signature(data_ptr->data,
1349 bpf_dynptr_get_size(data_ptr),
1350 sig_ptr->data,
1351 bpf_dynptr_get_size(sig_ptr),
1352 trusted_keyring->key,
1353 VERIFYING_UNSPECIFIED_SIGNATURE, NULL,
1354 NULL);
1355}
1356#endif /* CONFIG_SYSTEM_DATA_VERIFICATION */
1357
f3cf4134
RS
1358__diag_pop();
1359
1360BTF_SET8_START(key_sig_kfunc_set)
1361BTF_ID_FLAGS(func, bpf_lookup_user_key, KF_ACQUIRE | KF_RET_NULL | KF_SLEEPABLE)
1362BTF_ID_FLAGS(func, bpf_lookup_system_key, KF_ACQUIRE | KF_RET_NULL)
1363BTF_ID_FLAGS(func, bpf_key_put, KF_RELEASE)
865b0566
RS
1364#ifdef CONFIG_SYSTEM_DATA_VERIFICATION
1365BTF_ID_FLAGS(func, bpf_verify_pkcs7_signature, KF_SLEEPABLE)
1366#endif
f3cf4134
RS
1367BTF_SET8_END(key_sig_kfunc_set)
1368
1369static const struct btf_kfunc_id_set bpf_key_sig_kfunc_set = {
1370 .owner = THIS_MODULE,
1371 .set = &key_sig_kfunc_set,
1372};
1373
1374static int __init bpf_key_sig_kfuncs_init(void)
1375{
1376 return register_btf_kfunc_id_set(BPF_PROG_TYPE_TRACING,
1377 &bpf_key_sig_kfunc_set);
1378}
1379
1380late_initcall(bpf_key_sig_kfuncs_init);
1381#endif /* CONFIG_KEYS */
1382
7adfc6c9 1383static const struct bpf_func_proto *
fc611f47 1384bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
2541517c
AS
1385{
1386 switch (func_id) {
1387 case BPF_FUNC_map_lookup_elem:
1388 return &bpf_map_lookup_elem_proto;
1389 case BPF_FUNC_map_update_elem:
1390 return &bpf_map_update_elem_proto;
1391 case BPF_FUNC_map_delete_elem:
1392 return &bpf_map_delete_elem_proto;
02a8c817
AC
1393 case BPF_FUNC_map_push_elem:
1394 return &bpf_map_push_elem_proto;
1395 case BPF_FUNC_map_pop_elem:
1396 return &bpf_map_pop_elem_proto;
1397 case BPF_FUNC_map_peek_elem:
1398 return &bpf_map_peek_elem_proto;
07343110
FZ
1399 case BPF_FUNC_map_lookup_percpu_elem:
1400 return &bpf_map_lookup_percpu_elem_proto;
d9847d31
AS
1401 case BPF_FUNC_ktime_get_ns:
1402 return &bpf_ktime_get_ns_proto;
71d19214
MÅ»
1403 case BPF_FUNC_ktime_get_boot_ns:
1404 return &bpf_ktime_get_boot_ns_proto;
04fd61ab
AS
1405 case BPF_FUNC_tail_call:
1406 return &bpf_tail_call_proto;
ffeedafb
AS
1407 case BPF_FUNC_get_current_pid_tgid:
1408 return &bpf_get_current_pid_tgid_proto;
606274c5
AS
1409 case BPF_FUNC_get_current_task:
1410 return &bpf_get_current_task_proto;
3ca1032a
KS
1411 case BPF_FUNC_get_current_task_btf:
1412 return &bpf_get_current_task_btf_proto;
dd6e10fb
DX
1413 case BPF_FUNC_task_pt_regs:
1414 return &bpf_task_pt_regs_proto;
ffeedafb
AS
1415 case BPF_FUNC_get_current_uid_gid:
1416 return &bpf_get_current_uid_gid_proto;
1417 case BPF_FUNC_get_current_comm:
1418 return &bpf_get_current_comm_proto;
9c959c86 1419 case BPF_FUNC_trace_printk:
0756ea3e 1420 return bpf_get_trace_printk_proto();
ab1973d3
AS
1421 case BPF_FUNC_get_smp_processor_id:
1422 return &bpf_get_smp_processor_id_proto;
2d0e30c3
DB
1423 case BPF_FUNC_get_numa_node_id:
1424 return &bpf_get_numa_node_id_proto;
35578d79
KX
1425 case BPF_FUNC_perf_event_read:
1426 return &bpf_perf_event_read_proto;
60d20f91
SD
1427 case BPF_FUNC_current_task_under_cgroup:
1428 return &bpf_current_task_under_cgroup_proto;
8937bd80
AS
1429 case BPF_FUNC_get_prandom_u32:
1430 return &bpf_get_prandom_u32_proto;
51e1bb9e
DB
1431 case BPF_FUNC_probe_write_user:
1432 return security_locked_down(LOCKDOWN_BPF_WRITE_USER) < 0 ?
1433 NULL : bpf_get_probe_write_proto();
6ae08ae3
DB
1434 case BPF_FUNC_probe_read_user:
1435 return &bpf_probe_read_user_proto;
1436 case BPF_FUNC_probe_read_kernel:
71330842 1437 return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ?
ff40e510 1438 NULL : &bpf_probe_read_kernel_proto;
6ae08ae3
DB
1439 case BPF_FUNC_probe_read_user_str:
1440 return &bpf_probe_read_user_str_proto;
1441 case BPF_FUNC_probe_read_kernel_str:
71330842 1442 return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ?
ff40e510 1443 NULL : &bpf_probe_read_kernel_str_proto;
0ebeea8c
DB
1444#ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
1445 case BPF_FUNC_probe_read:
71330842 1446 return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ?
ff40e510 1447 NULL : &bpf_probe_read_compat_proto;
a5e8c070 1448 case BPF_FUNC_probe_read_str:
71330842 1449 return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ?
ff40e510 1450 NULL : &bpf_probe_read_compat_str_proto;
0ebeea8c 1451#endif
34ea38ca 1452#ifdef CONFIG_CGROUPS
bf6fa2c8
YS
1453 case BPF_FUNC_get_current_cgroup_id:
1454 return &bpf_get_current_cgroup_id_proto;
95b861a7
NK
1455 case BPF_FUNC_get_current_ancestor_cgroup_id:
1456 return &bpf_get_current_ancestor_cgroup_id_proto;
34ea38ca 1457#endif
8b401f9e
YS
1458 case BPF_FUNC_send_signal:
1459 return &bpf_send_signal_proto;
8482941f
YS
1460 case BPF_FUNC_send_signal_thread:
1461 return &bpf_send_signal_thread_proto;
b80b033b
SL
1462 case BPF_FUNC_perf_event_read_value:
1463 return &bpf_perf_event_read_value_proto;
b4490c5c
CN
1464 case BPF_FUNC_get_ns_current_pid_tgid:
1465 return &bpf_get_ns_current_pid_tgid_proto;
457f4436
AN
1466 case BPF_FUNC_ringbuf_output:
1467 return &bpf_ringbuf_output_proto;
1468 case BPF_FUNC_ringbuf_reserve:
1469 return &bpf_ringbuf_reserve_proto;
1470 case BPF_FUNC_ringbuf_submit:
1471 return &bpf_ringbuf_submit_proto;
1472 case BPF_FUNC_ringbuf_discard:
1473 return &bpf_ringbuf_discard_proto;
1474 case BPF_FUNC_ringbuf_query:
1475 return &bpf_ringbuf_query_proto;
72e2b2b6
YS
1476 case BPF_FUNC_jiffies64:
1477 return &bpf_jiffies64_proto;
fa28dcb8
SL
1478 case BPF_FUNC_get_task_stack:
1479 return &bpf_get_task_stack_proto;
07be4c4a
AS
1480 case BPF_FUNC_copy_from_user:
1481 return prog->aux->sleepable ? &bpf_copy_from_user_proto : NULL;
376040e4
KY
1482 case BPF_FUNC_copy_from_user_task:
1483 return prog->aux->sleepable ? &bpf_copy_from_user_task_proto : NULL;
c4d0bfb4
AM
1484 case BPF_FUNC_snprintf_btf:
1485 return &bpf_snprintf_btf_proto;
b7906b70 1486 case BPF_FUNC_per_cpu_ptr:
eaa6bcb7 1487 return &bpf_per_cpu_ptr_proto;
b7906b70 1488 case BPF_FUNC_this_cpu_ptr:
63d9b80d 1489 return &bpf_this_cpu_ptr_proto;
a10787e6 1490 case BPF_FUNC_task_storage_get:
0593dd34 1491 return &bpf_task_storage_get_recur_proto;
a10787e6 1492 case BPF_FUNC_task_storage_delete:
0593dd34 1493 return &bpf_task_storage_delete_recur_proto;
69c087ba
YS
1494 case BPF_FUNC_for_each_map_elem:
1495 return &bpf_for_each_map_elem_proto;
7b15523a
FR
1496 case BPF_FUNC_snprintf:
1497 return &bpf_snprintf_proto;
9b99edca
JO
1498 case BPF_FUNC_get_func_ip:
1499 return &bpf_get_func_ip_proto_tracing;
856c02db
SL
1500 case BPF_FUNC_get_branch_snapshot:
1501 return &bpf_get_branch_snapshot_proto;
7c7e3d31
SL
1502 case BPF_FUNC_find_vma:
1503 return &bpf_find_vma_proto;
10aceb62
DM
1504 case BPF_FUNC_trace_vprintk:
1505 return bpf_get_trace_vprintk_proto();
9fd82b61 1506 default:
b00628b1 1507 return bpf_base_func_proto(func_id);
9fd82b61
AS
1508 }
1509}
1510
5e43f899
AI
1511static const struct bpf_func_proto *
1512kprobe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
9fd82b61
AS
1513{
1514 switch (func_id) {
a43eec30
AS
1515 case BPF_FUNC_perf_event_output:
1516 return &bpf_perf_event_output_proto;
d5a3b1f6
AS
1517 case BPF_FUNC_get_stackid:
1518 return &bpf_get_stackid_proto;
c195651e
YS
1519 case BPF_FUNC_get_stack:
1520 return &bpf_get_stack_proto;
9802d865
JB
1521#ifdef CONFIG_BPF_KPROBE_OVERRIDE
1522 case BPF_FUNC_override_return:
1523 return &bpf_override_return_proto;
1524#endif
9ffd9f3f 1525 case BPF_FUNC_get_func_ip:
42a57120
JO
1526 return prog->expected_attach_type == BPF_TRACE_KPROBE_MULTI ?
1527 &bpf_get_func_ip_proto_kprobe_multi :
1528 &bpf_get_func_ip_proto_kprobe;
7adfc6c9 1529 case BPF_FUNC_get_attach_cookie:
ca74823c
JO
1530 return prog->expected_attach_type == BPF_TRACE_KPROBE_MULTI ?
1531 &bpf_get_attach_cookie_proto_kmulti :
1532 &bpf_get_attach_cookie_proto_trace;
2541517c 1533 default:
fc611f47 1534 return bpf_tracing_func_proto(func_id, prog);
2541517c
AS
1535 }
1536}
1537
1538/* bpf+kprobe programs can access fields of 'struct pt_regs' */
19de99f7 1539static bool kprobe_prog_is_valid_access(int off, int size, enum bpf_access_type type,
5e43f899 1540 const struct bpf_prog *prog,
23994631 1541 struct bpf_insn_access_aux *info)
2541517c 1542{
2541517c
AS
1543 if (off < 0 || off >= sizeof(struct pt_regs))
1544 return false;
2541517c
AS
1545 if (type != BPF_READ)
1546 return false;
2541517c
AS
1547 if (off % size != 0)
1548 return false;
2d071c64
DB
1549 /*
1550 * Assertion for 32 bit to make sure last 8 byte access
1551 * (BPF_DW) to the last 4 byte member is disallowed.
1552 */
1553 if (off + size > sizeof(struct pt_regs))
1554 return false;
1555
2541517c
AS
1556 return true;
1557}
1558
7de16e3a 1559const struct bpf_verifier_ops kprobe_verifier_ops = {
2541517c
AS
1560 .get_func_proto = kprobe_prog_func_proto,
1561 .is_valid_access = kprobe_prog_is_valid_access,
1562};
1563
7de16e3a
JK
1564const struct bpf_prog_ops kprobe_prog_ops = {
1565};
1566
f3694e00
DB
1567BPF_CALL_5(bpf_perf_event_output_tp, void *, tp_buff, struct bpf_map *, map,
1568 u64, flags, void *, data, u64, size)
9940d67c 1569{
f3694e00
DB
1570 struct pt_regs *regs = *(struct pt_regs **)tp_buff;
1571
9940d67c
AS
1572 /*
1573 * r1 points to perf tracepoint buffer where first 8 bytes are hidden
1574 * from bpf program and contain a pointer to 'struct pt_regs'. Fetch it
f3694e00 1575 * from there and call the same bpf_perf_event_output() helper inline.
9940d67c 1576 */
f3694e00 1577 return ____bpf_perf_event_output(regs, map, flags, data, size);
9940d67c
AS
1578}
1579
1580static const struct bpf_func_proto bpf_perf_event_output_proto_tp = {
1581 .func = bpf_perf_event_output_tp,
1582 .gpl_only = true,
1583 .ret_type = RET_INTEGER,
1584 .arg1_type = ARG_PTR_TO_CTX,
1585 .arg2_type = ARG_CONST_MAP_PTR,
1586 .arg3_type = ARG_ANYTHING,
216e3cd2 1587 .arg4_type = ARG_PTR_TO_MEM | MEM_RDONLY,
a60dd35d 1588 .arg5_type = ARG_CONST_SIZE_OR_ZERO,
9940d67c
AS
1589};
1590
f3694e00
DB
1591BPF_CALL_3(bpf_get_stackid_tp, void *, tp_buff, struct bpf_map *, map,
1592 u64, flags)
9940d67c 1593{
f3694e00 1594 struct pt_regs *regs = *(struct pt_regs **)tp_buff;
9940d67c 1595
f3694e00
DB
1596 /*
1597 * Same comment as in bpf_perf_event_output_tp(), only that this time
1598 * the other helper's function body cannot be inlined due to being
1599 * external, thus we need to call raw helper function.
1600 */
1601 return bpf_get_stackid((unsigned long) regs, (unsigned long) map,
1602 flags, 0, 0);
9940d67c
AS
1603}
1604
1605static const struct bpf_func_proto bpf_get_stackid_proto_tp = {
1606 .func = bpf_get_stackid_tp,
1607 .gpl_only = true,
1608 .ret_type = RET_INTEGER,
1609 .arg1_type = ARG_PTR_TO_CTX,
1610 .arg2_type = ARG_CONST_MAP_PTR,
1611 .arg3_type = ARG_ANYTHING,
1612};
1613
c195651e
YS
1614BPF_CALL_4(bpf_get_stack_tp, void *, tp_buff, void *, buf, u32, size,
1615 u64, flags)
1616{
1617 struct pt_regs *regs = *(struct pt_regs **)tp_buff;
1618
1619 return bpf_get_stack((unsigned long) regs, (unsigned long) buf,
1620 (unsigned long) size, flags, 0);
1621}
1622
1623static const struct bpf_func_proto bpf_get_stack_proto_tp = {
1624 .func = bpf_get_stack_tp,
1625 .gpl_only = true,
1626 .ret_type = RET_INTEGER,
1627 .arg1_type = ARG_PTR_TO_CTX,
1628 .arg2_type = ARG_PTR_TO_UNINIT_MEM,
1629 .arg3_type = ARG_CONST_SIZE_OR_ZERO,
1630 .arg4_type = ARG_ANYTHING,
1631};
1632
5e43f899
AI
1633static const struct bpf_func_proto *
1634tp_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
f005afed
YS
1635{
1636 switch (func_id) {
1637 case BPF_FUNC_perf_event_output:
1638 return &bpf_perf_event_output_proto_tp;
1639 case BPF_FUNC_get_stackid:
1640 return &bpf_get_stackid_proto_tp;
c195651e
YS
1641 case BPF_FUNC_get_stack:
1642 return &bpf_get_stack_proto_tp;
7adfc6c9
AN
1643 case BPF_FUNC_get_attach_cookie:
1644 return &bpf_get_attach_cookie_proto_trace;
f005afed 1645 default:
fc611f47 1646 return bpf_tracing_func_proto(func_id, prog);
f005afed
YS
1647 }
1648}
1649
1650static bool tp_prog_is_valid_access(int off, int size, enum bpf_access_type type,
5e43f899 1651 const struct bpf_prog *prog,
f005afed
YS
1652 struct bpf_insn_access_aux *info)
1653{
1654 if (off < sizeof(void *) || off >= PERF_MAX_TRACE_SIZE)
1655 return false;
1656 if (type != BPF_READ)
1657 return false;
1658 if (off % size != 0)
1659 return false;
1660
1661 BUILD_BUG_ON(PERF_MAX_TRACE_SIZE % sizeof(__u64));
1662 return true;
1663}
1664
1665const struct bpf_verifier_ops tracepoint_verifier_ops = {
1666 .get_func_proto = tp_prog_func_proto,
1667 .is_valid_access = tp_prog_is_valid_access,
1668};
1669
1670const struct bpf_prog_ops tracepoint_prog_ops = {
1671};
1672
1673BPF_CALL_3(bpf_perf_prog_read_value, struct bpf_perf_event_data_kern *, ctx,
4bebdc7a
YS
1674 struct bpf_perf_event_value *, buf, u32, size)
1675{
1676 int err = -EINVAL;
1677
1678 if (unlikely(size != sizeof(struct bpf_perf_event_value)))
1679 goto clear;
1680 err = perf_event_read_local(ctx->event, &buf->counter, &buf->enabled,
1681 &buf->running);
1682 if (unlikely(err))
1683 goto clear;
1684 return 0;
1685clear:
1686 memset(buf, 0, size);
1687 return err;
1688}
1689
f005afed
YS
1690static const struct bpf_func_proto bpf_perf_prog_read_value_proto = {
1691 .func = bpf_perf_prog_read_value,
4bebdc7a
YS
1692 .gpl_only = true,
1693 .ret_type = RET_INTEGER,
1694 .arg1_type = ARG_PTR_TO_CTX,
1695 .arg2_type = ARG_PTR_TO_UNINIT_MEM,
1696 .arg3_type = ARG_CONST_SIZE,
1697};
1698
fff7b643
DX
1699BPF_CALL_4(bpf_read_branch_records, struct bpf_perf_event_data_kern *, ctx,
1700 void *, buf, u32, size, u64, flags)
1701{
fff7b643
DX
1702 static const u32 br_entry_size = sizeof(struct perf_branch_entry);
1703 struct perf_branch_stack *br_stack = ctx->data->br_stack;
1704 u32 to_copy;
1705
1706 if (unlikely(flags & ~BPF_F_GET_BRANCH_RECORDS_SIZE))
1707 return -EINVAL;
1708
cce6a2d7
JO
1709 if (unlikely(!(ctx->data->sample_flags & PERF_SAMPLE_BRANCH_STACK)))
1710 return -ENOENT;
1711
fff7b643 1712 if (unlikely(!br_stack))
db52f572 1713 return -ENOENT;
fff7b643
DX
1714
1715 if (flags & BPF_F_GET_BRANCH_RECORDS_SIZE)
1716 return br_stack->nr * br_entry_size;
1717
1718 if (!buf || (size % br_entry_size != 0))
1719 return -EINVAL;
1720
1721 to_copy = min_t(u32, br_stack->nr * br_entry_size, size);
1722 memcpy(buf, br_stack->entries, to_copy);
1723
1724 return to_copy;
fff7b643
DX
1725}
1726
1727static const struct bpf_func_proto bpf_read_branch_records_proto = {
1728 .func = bpf_read_branch_records,
1729 .gpl_only = true,
1730 .ret_type = RET_INTEGER,
1731 .arg1_type = ARG_PTR_TO_CTX,
1732 .arg2_type = ARG_PTR_TO_MEM_OR_NULL,
1733 .arg3_type = ARG_CONST_SIZE_OR_ZERO,
1734 .arg4_type = ARG_ANYTHING,
1735};
1736
5e43f899
AI
1737static const struct bpf_func_proto *
1738pe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
9fd82b61
AS
1739{
1740 switch (func_id) {
1741 case BPF_FUNC_perf_event_output:
9940d67c 1742 return &bpf_perf_event_output_proto_tp;
9fd82b61 1743 case BPF_FUNC_get_stackid:
7b04d6d6 1744 return &bpf_get_stackid_proto_pe;
c195651e 1745 case BPF_FUNC_get_stack:
7b04d6d6 1746 return &bpf_get_stack_proto_pe;
4bebdc7a 1747 case BPF_FUNC_perf_prog_read_value:
f005afed 1748 return &bpf_perf_prog_read_value_proto;
fff7b643
DX
1749 case BPF_FUNC_read_branch_records:
1750 return &bpf_read_branch_records_proto;
7adfc6c9
AN
1751 case BPF_FUNC_get_attach_cookie:
1752 return &bpf_get_attach_cookie_proto_pe;
9fd82b61 1753 default:
fc611f47 1754 return bpf_tracing_func_proto(func_id, prog);
9fd82b61
AS
1755 }
1756}
1757
c4f6699d
AS
1758/*
1759 * bpf_raw_tp_regs are separate from bpf_pt_regs used from skb/xdp
1760 * to avoid potential recursive reuse issue when/if tracepoints are added
9594dc3c
MM
1761 * inside bpf_*_event_output, bpf_get_stackid and/or bpf_get_stack.
1762 *
1763 * Since raw tracepoints run despite bpf_prog_active, support concurrent usage
1764 * in normal, irq, and nmi context.
c4f6699d 1765 */
9594dc3c
MM
1766struct bpf_raw_tp_regs {
1767 struct pt_regs regs[3];
1768};
1769static DEFINE_PER_CPU(struct bpf_raw_tp_regs, bpf_raw_tp_regs);
1770static DEFINE_PER_CPU(int, bpf_raw_tp_nest_level);
1771static struct pt_regs *get_bpf_raw_tp_regs(void)
1772{
1773 struct bpf_raw_tp_regs *tp_regs = this_cpu_ptr(&bpf_raw_tp_regs);
1774 int nest_level = this_cpu_inc_return(bpf_raw_tp_nest_level);
1775
1776 if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(tp_regs->regs))) {
1777 this_cpu_dec(bpf_raw_tp_nest_level);
1778 return ERR_PTR(-EBUSY);
1779 }
1780
1781 return &tp_regs->regs[nest_level - 1];
1782}
1783
1784static void put_bpf_raw_tp_regs(void)
1785{
1786 this_cpu_dec(bpf_raw_tp_nest_level);
1787}
1788
c4f6699d
AS
1789BPF_CALL_5(bpf_perf_event_output_raw_tp, struct bpf_raw_tracepoint_args *, args,
1790 struct bpf_map *, map, u64, flags, void *, data, u64, size)
1791{
9594dc3c
MM
1792 struct pt_regs *regs = get_bpf_raw_tp_regs();
1793 int ret;
1794
1795 if (IS_ERR(regs))
1796 return PTR_ERR(regs);
c4f6699d
AS
1797
1798 perf_fetch_caller_regs(regs);
9594dc3c
MM
1799 ret = ____bpf_perf_event_output(regs, map, flags, data, size);
1800
1801 put_bpf_raw_tp_regs();
1802 return ret;
c4f6699d
AS
1803}
1804
1805static const struct bpf_func_proto bpf_perf_event_output_proto_raw_tp = {
1806 .func = bpf_perf_event_output_raw_tp,
1807 .gpl_only = true,
1808 .ret_type = RET_INTEGER,
1809 .arg1_type = ARG_PTR_TO_CTX,
1810 .arg2_type = ARG_CONST_MAP_PTR,
1811 .arg3_type = ARG_ANYTHING,
216e3cd2 1812 .arg4_type = ARG_PTR_TO_MEM | MEM_RDONLY,
c4f6699d
AS
1813 .arg5_type = ARG_CONST_SIZE_OR_ZERO,
1814};
1815
a7658e1a 1816extern const struct bpf_func_proto bpf_skb_output_proto;
d831ee84 1817extern const struct bpf_func_proto bpf_xdp_output_proto;
d9917302 1818extern const struct bpf_func_proto bpf_xdp_get_buff_len_trace_proto;
a7658e1a 1819
c4f6699d
AS
1820BPF_CALL_3(bpf_get_stackid_raw_tp, struct bpf_raw_tracepoint_args *, args,
1821 struct bpf_map *, map, u64, flags)
1822{
9594dc3c
MM
1823 struct pt_regs *regs = get_bpf_raw_tp_regs();
1824 int ret;
1825
1826 if (IS_ERR(regs))
1827 return PTR_ERR(regs);
c4f6699d
AS
1828
1829 perf_fetch_caller_regs(regs);
1830 /* similar to bpf_perf_event_output_tp, but pt_regs fetched differently */
9594dc3c
MM
1831 ret = bpf_get_stackid((unsigned long) regs, (unsigned long) map,
1832 flags, 0, 0);
1833 put_bpf_raw_tp_regs();
1834 return ret;
c4f6699d
AS
1835}
1836
1837static const struct bpf_func_proto bpf_get_stackid_proto_raw_tp = {
1838 .func = bpf_get_stackid_raw_tp,
1839 .gpl_only = true,
1840 .ret_type = RET_INTEGER,
1841 .arg1_type = ARG_PTR_TO_CTX,
1842 .arg2_type = ARG_CONST_MAP_PTR,
1843 .arg3_type = ARG_ANYTHING,
1844};
1845
c195651e
YS
1846BPF_CALL_4(bpf_get_stack_raw_tp, struct bpf_raw_tracepoint_args *, args,
1847 void *, buf, u32, size, u64, flags)
1848{
9594dc3c
MM
1849 struct pt_regs *regs = get_bpf_raw_tp_regs();
1850 int ret;
1851
1852 if (IS_ERR(regs))
1853 return PTR_ERR(regs);
c195651e
YS
1854
1855 perf_fetch_caller_regs(regs);
9594dc3c
MM
1856 ret = bpf_get_stack((unsigned long) regs, (unsigned long) buf,
1857 (unsigned long) size, flags, 0);
1858 put_bpf_raw_tp_regs();
1859 return ret;
c195651e
YS
1860}
1861
1862static const struct bpf_func_proto bpf_get_stack_proto_raw_tp = {
1863 .func = bpf_get_stack_raw_tp,
1864 .gpl_only = true,
1865 .ret_type = RET_INTEGER,
1866 .arg1_type = ARG_PTR_TO_CTX,
216e3cd2 1867 .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY,
c195651e
YS
1868 .arg3_type = ARG_CONST_SIZE_OR_ZERO,
1869 .arg4_type = ARG_ANYTHING,
1870};
1871
5e43f899
AI
1872static const struct bpf_func_proto *
1873raw_tp_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
c4f6699d
AS
1874{
1875 switch (func_id) {
1876 case BPF_FUNC_perf_event_output:
1877 return &bpf_perf_event_output_proto_raw_tp;
1878 case BPF_FUNC_get_stackid:
1879 return &bpf_get_stackid_proto_raw_tp;
c195651e
YS
1880 case BPF_FUNC_get_stack:
1881 return &bpf_get_stack_proto_raw_tp;
c4f6699d 1882 default:
fc611f47 1883 return bpf_tracing_func_proto(func_id, prog);
c4f6699d
AS
1884 }
1885}
1886
958a3f2d 1887const struct bpf_func_proto *
f1b9509c
AS
1888tracing_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1889{
3cee6fb8
MKL
1890 const struct bpf_func_proto *fn;
1891
f1b9509c
AS
1892 switch (func_id) {
1893#ifdef CONFIG_NET
1894 case BPF_FUNC_skb_output:
1895 return &bpf_skb_output_proto;
d831ee84
EC
1896 case BPF_FUNC_xdp_output:
1897 return &bpf_xdp_output_proto;
af7ec138
YS
1898 case BPF_FUNC_skc_to_tcp6_sock:
1899 return &bpf_skc_to_tcp6_sock_proto;
478cfbdf
YS
1900 case BPF_FUNC_skc_to_tcp_sock:
1901 return &bpf_skc_to_tcp_sock_proto;
1902 case BPF_FUNC_skc_to_tcp_timewait_sock:
1903 return &bpf_skc_to_tcp_timewait_sock_proto;
1904 case BPF_FUNC_skc_to_tcp_request_sock:
1905 return &bpf_skc_to_tcp_request_sock_proto;
0d4fad3e
YS
1906 case BPF_FUNC_skc_to_udp6_sock:
1907 return &bpf_skc_to_udp6_sock_proto;
9eeb3aa3
HC
1908 case BPF_FUNC_skc_to_unix_sock:
1909 return &bpf_skc_to_unix_sock_proto;
3bc253c2
GT
1910 case BPF_FUNC_skc_to_mptcp_sock:
1911 return &bpf_skc_to_mptcp_sock_proto;
8e4597c6
MKL
1912 case BPF_FUNC_sk_storage_get:
1913 return &bpf_sk_storage_get_tracing_proto;
1914 case BPF_FUNC_sk_storage_delete:
1915 return &bpf_sk_storage_delete_tracing_proto;
b60da495
FR
1916 case BPF_FUNC_sock_from_file:
1917 return &bpf_sock_from_file_proto;
c5dbb89f
FR
1918 case BPF_FUNC_get_socket_cookie:
1919 return &bpf_get_socket_ptr_cookie_proto;
d9917302
EC
1920 case BPF_FUNC_xdp_get_buff_len:
1921 return &bpf_xdp_get_buff_len_trace_proto;
f1b9509c 1922#endif
492e639f
YS
1923 case BPF_FUNC_seq_printf:
1924 return prog->expected_attach_type == BPF_TRACE_ITER ?
1925 &bpf_seq_printf_proto :
1926 NULL;
1927 case BPF_FUNC_seq_write:
1928 return prog->expected_attach_type == BPF_TRACE_ITER ?
1929 &bpf_seq_write_proto :
1930 NULL;
eb411377
AM
1931 case BPF_FUNC_seq_printf_btf:
1932 return prog->expected_attach_type == BPF_TRACE_ITER ?
1933 &bpf_seq_printf_btf_proto :
1934 NULL;
6e22ab9d
JO
1935 case BPF_FUNC_d_path:
1936 return &bpf_d_path_proto;
f92c1e18
JO
1937 case BPF_FUNC_get_func_arg:
1938 return bpf_prog_has_trampoline(prog) ? &bpf_get_func_arg_proto : NULL;
1939 case BPF_FUNC_get_func_ret:
1940 return bpf_prog_has_trampoline(prog) ? &bpf_get_func_ret_proto : NULL;
1941 case BPF_FUNC_get_func_arg_cnt:
1942 return bpf_prog_has_trampoline(prog) ? &bpf_get_func_arg_cnt_proto : NULL;
2fcc8241
KFL
1943 case BPF_FUNC_get_attach_cookie:
1944 return bpf_prog_has_trampoline(prog) ? &bpf_get_attach_cookie_proto_tracing : NULL;
f1b9509c 1945 default:
3cee6fb8
MKL
1946 fn = raw_tp_prog_func_proto(func_id, prog);
1947 if (!fn && prog->expected_attach_type == BPF_TRACE_ITER)
1948 fn = bpf_iter_get_func_proto(func_id, prog);
1949 return fn;
f1b9509c
AS
1950 }
1951}
1952
c4f6699d
AS
1953static bool raw_tp_prog_is_valid_access(int off, int size,
1954 enum bpf_access_type type,
5e43f899 1955 const struct bpf_prog *prog,
c4f6699d
AS
1956 struct bpf_insn_access_aux *info)
1957{
35346ab6 1958 return bpf_tracing_ctx_access(off, size, type);
f1b9509c
AS
1959}
1960
1961static bool tracing_prog_is_valid_access(int off, int size,
1962 enum bpf_access_type type,
1963 const struct bpf_prog *prog,
1964 struct bpf_insn_access_aux *info)
1965{
35346ab6 1966 return bpf_tracing_btf_ctx_access(off, size, type, prog, info);
c4f6699d
AS
1967}
1968
3e7c67d9
KS
1969int __weak bpf_prog_test_run_tracing(struct bpf_prog *prog,
1970 const union bpf_attr *kattr,
1971 union bpf_attr __user *uattr)
1972{
1973 return -ENOTSUPP;
1974}
1975
c4f6699d
AS
1976const struct bpf_verifier_ops raw_tracepoint_verifier_ops = {
1977 .get_func_proto = raw_tp_prog_func_proto,
1978 .is_valid_access = raw_tp_prog_is_valid_access,
1979};
1980
1981const struct bpf_prog_ops raw_tracepoint_prog_ops = {
ebfb4d40 1982#ifdef CONFIG_NET
1b4d60ec 1983 .test_run = bpf_prog_test_run_raw_tp,
ebfb4d40 1984#endif
c4f6699d
AS
1985};
1986
f1b9509c
AS
1987const struct bpf_verifier_ops tracing_verifier_ops = {
1988 .get_func_proto = tracing_prog_func_proto,
1989 .is_valid_access = tracing_prog_is_valid_access,
1990};
1991
1992const struct bpf_prog_ops tracing_prog_ops = {
da00d2f1 1993 .test_run = bpf_prog_test_run_tracing,
f1b9509c
AS
1994};
1995
9df1c28b
MM
1996static bool raw_tp_writable_prog_is_valid_access(int off, int size,
1997 enum bpf_access_type type,
1998 const struct bpf_prog *prog,
1999 struct bpf_insn_access_aux *info)
2000{
2001 if (off == 0) {
2002 if (size != sizeof(u64) || type != BPF_READ)
2003 return false;
2004 info->reg_type = PTR_TO_TP_BUFFER;
2005 }
2006 return raw_tp_prog_is_valid_access(off, size, type, prog, info);
2007}
2008
2009const struct bpf_verifier_ops raw_tracepoint_writable_verifier_ops = {
2010 .get_func_proto = raw_tp_prog_func_proto,
2011 .is_valid_access = raw_tp_writable_prog_is_valid_access,
2012};
2013
2014const struct bpf_prog_ops raw_tracepoint_writable_prog_ops = {
2015};
2016
0515e599 2017static bool pe_prog_is_valid_access(int off, int size, enum bpf_access_type type,
5e43f899 2018 const struct bpf_prog *prog,
23994631 2019 struct bpf_insn_access_aux *info)
0515e599 2020{
95da0cdb 2021 const int size_u64 = sizeof(u64);
31fd8581 2022
0515e599
AS
2023 if (off < 0 || off >= sizeof(struct bpf_perf_event_data))
2024 return false;
2025 if (type != BPF_READ)
2026 return false;
bc23105c
DB
2027 if (off % size != 0) {
2028 if (sizeof(unsigned long) != 4)
2029 return false;
2030 if (size != 8)
2031 return false;
2032 if (off % size != 4)
2033 return false;
2034 }
31fd8581 2035
f96da094
DB
2036 switch (off) {
2037 case bpf_ctx_range(struct bpf_perf_event_data, sample_period):
95da0cdb
TQ
2038 bpf_ctx_record_field_size(info, size_u64);
2039 if (!bpf_ctx_narrow_access_ok(off, size, size_u64))
2040 return false;
2041 break;
2042 case bpf_ctx_range(struct bpf_perf_event_data, addr):
2043 bpf_ctx_record_field_size(info, size_u64);
2044 if (!bpf_ctx_narrow_access_ok(off, size, size_u64))
23994631 2045 return false;
f96da094
DB
2046 break;
2047 default:
0515e599
AS
2048 if (size != sizeof(long))
2049 return false;
2050 }
f96da094 2051
0515e599
AS
2052 return true;
2053}
2054
6b8cc1d1
DB
2055static u32 pe_prog_convert_ctx_access(enum bpf_access_type type,
2056 const struct bpf_insn *si,
0515e599 2057 struct bpf_insn *insn_buf,
f96da094 2058 struct bpf_prog *prog, u32 *target_size)
0515e599
AS
2059{
2060 struct bpf_insn *insn = insn_buf;
2061
6b8cc1d1 2062 switch (si->off) {
0515e599 2063 case offsetof(struct bpf_perf_event_data, sample_period):
f035a515 2064 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern,
6b8cc1d1 2065 data), si->dst_reg, si->src_reg,
0515e599 2066 offsetof(struct bpf_perf_event_data_kern, data));
6b8cc1d1 2067 *insn++ = BPF_LDX_MEM(BPF_DW, si->dst_reg, si->dst_reg,
f96da094
DB
2068 bpf_target_off(struct perf_sample_data, period, 8,
2069 target_size));
0515e599 2070 break;
95da0cdb
TQ
2071 case offsetof(struct bpf_perf_event_data, addr):
2072 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern,
2073 data), si->dst_reg, si->src_reg,
2074 offsetof(struct bpf_perf_event_data_kern, data));
2075 *insn++ = BPF_LDX_MEM(BPF_DW, si->dst_reg, si->dst_reg,
2076 bpf_target_off(struct perf_sample_data, addr, 8,
2077 target_size));
2078 break;
0515e599 2079 default:
f035a515 2080 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern,
6b8cc1d1 2081 regs), si->dst_reg, si->src_reg,
0515e599 2082 offsetof(struct bpf_perf_event_data_kern, regs));
6b8cc1d1
DB
2083 *insn++ = BPF_LDX_MEM(BPF_SIZEOF(long), si->dst_reg, si->dst_reg,
2084 si->off);
0515e599
AS
2085 break;
2086 }
2087
2088 return insn - insn_buf;
2089}
2090
7de16e3a 2091const struct bpf_verifier_ops perf_event_verifier_ops = {
f005afed 2092 .get_func_proto = pe_prog_func_proto,
0515e599
AS
2093 .is_valid_access = pe_prog_is_valid_access,
2094 .convert_ctx_access = pe_prog_convert_ctx_access,
2095};
7de16e3a
JK
2096
2097const struct bpf_prog_ops perf_event_prog_ops = {
2098};
e87c6bc3
YS
2099
2100static DEFINE_MUTEX(bpf_event_mutex);
2101
c8c088ba
YS
2102#define BPF_TRACE_MAX_PROGS 64
2103
e87c6bc3 2104int perf_event_attach_bpf_prog(struct perf_event *event,
82e6b1ee
AN
2105 struct bpf_prog *prog,
2106 u64 bpf_cookie)
e87c6bc3 2107{
e672db03 2108 struct bpf_prog_array *old_array;
e87c6bc3
YS
2109 struct bpf_prog_array *new_array;
2110 int ret = -EEXIST;
2111
9802d865 2112 /*
b4da3340
MH
2113 * Kprobe override only works if they are on the function entry,
2114 * and only if they are on the opt-in list.
9802d865
JB
2115 */
2116 if (prog->kprobe_override &&
b4da3340 2117 (!trace_kprobe_on_func_entry(event->tp_event) ||
9802d865
JB
2118 !trace_kprobe_error_injectable(event->tp_event)))
2119 return -EINVAL;
2120
e87c6bc3
YS
2121 mutex_lock(&bpf_event_mutex);
2122
2123 if (event->prog)
07c41a29 2124 goto unlock;
e87c6bc3 2125
e672db03 2126 old_array = bpf_event_rcu_dereference(event->tp_event->prog_array);
c8c088ba
YS
2127 if (old_array &&
2128 bpf_prog_array_length(old_array) >= BPF_TRACE_MAX_PROGS) {
2129 ret = -E2BIG;
2130 goto unlock;
2131 }
2132
82e6b1ee 2133 ret = bpf_prog_array_copy(old_array, NULL, prog, bpf_cookie, &new_array);
e87c6bc3 2134 if (ret < 0)
07c41a29 2135 goto unlock;
e87c6bc3
YS
2136
2137 /* set the new array to event->tp_event and set event->prog */
2138 event->prog = prog;
82e6b1ee 2139 event->bpf_cookie = bpf_cookie;
e87c6bc3 2140 rcu_assign_pointer(event->tp_event->prog_array, new_array);
8c7dcb84 2141 bpf_prog_array_free_sleepable(old_array);
e87c6bc3 2142
07c41a29 2143unlock:
e87c6bc3
YS
2144 mutex_unlock(&bpf_event_mutex);
2145 return ret;
2146}
2147
2148void perf_event_detach_bpf_prog(struct perf_event *event)
2149{
e672db03 2150 struct bpf_prog_array *old_array;
e87c6bc3
YS
2151 struct bpf_prog_array *new_array;
2152 int ret;
2153
2154 mutex_lock(&bpf_event_mutex);
2155
2156 if (!event->prog)
07c41a29 2157 goto unlock;
e87c6bc3 2158
e672db03 2159 old_array = bpf_event_rcu_dereference(event->tp_event->prog_array);
82e6b1ee 2160 ret = bpf_prog_array_copy(old_array, event->prog, NULL, 0, &new_array);
170a7e3e
SY
2161 if (ret == -ENOENT)
2162 goto unlock;
e87c6bc3
YS
2163 if (ret < 0) {
2164 bpf_prog_array_delete_safe(old_array, event->prog);
2165 } else {
2166 rcu_assign_pointer(event->tp_event->prog_array, new_array);
8c7dcb84 2167 bpf_prog_array_free_sleepable(old_array);
e87c6bc3
YS
2168 }
2169
2170 bpf_prog_put(event->prog);
2171 event->prog = NULL;
2172
07c41a29 2173unlock:
e87c6bc3
YS
2174 mutex_unlock(&bpf_event_mutex);
2175}
f371b304 2176
f4e2298e 2177int perf_event_query_prog_array(struct perf_event *event, void __user *info)
f371b304
YS
2178{
2179 struct perf_event_query_bpf __user *uquery = info;
2180 struct perf_event_query_bpf query = {};
e672db03 2181 struct bpf_prog_array *progs;
3a38bb98 2182 u32 *ids, prog_cnt, ids_len;
f371b304
YS
2183 int ret;
2184
031258da 2185 if (!perfmon_capable())
f371b304
YS
2186 return -EPERM;
2187 if (event->attr.type != PERF_TYPE_TRACEPOINT)
2188 return -EINVAL;
2189 if (copy_from_user(&query, uquery, sizeof(query)))
2190 return -EFAULT;
3a38bb98
YS
2191
2192 ids_len = query.ids_len;
2193 if (ids_len > BPF_TRACE_MAX_PROGS)
9c481b90 2194 return -E2BIG;
3a38bb98
YS
2195 ids = kcalloc(ids_len, sizeof(u32), GFP_USER | __GFP_NOWARN);
2196 if (!ids)
2197 return -ENOMEM;
2198 /*
2199 * The above kcalloc returns ZERO_SIZE_PTR when ids_len = 0, which
2200 * is required when user only wants to check for uquery->prog_cnt.
2201 * There is no need to check for it since the case is handled
2202 * gracefully in bpf_prog_array_copy_info.
2203 */
f371b304
YS
2204
2205 mutex_lock(&bpf_event_mutex);
e672db03
SF
2206 progs = bpf_event_rcu_dereference(event->tp_event->prog_array);
2207 ret = bpf_prog_array_copy_info(progs, ids, ids_len, &prog_cnt);
f371b304
YS
2208 mutex_unlock(&bpf_event_mutex);
2209
3a38bb98
YS
2210 if (copy_to_user(&uquery->prog_cnt, &prog_cnt, sizeof(prog_cnt)) ||
2211 copy_to_user(uquery->ids, ids, ids_len * sizeof(u32)))
2212 ret = -EFAULT;
2213
2214 kfree(ids);
f371b304
YS
2215 return ret;
2216}
c4f6699d
AS
2217
2218extern struct bpf_raw_event_map __start__bpf_raw_tp[];
2219extern struct bpf_raw_event_map __stop__bpf_raw_tp[];
2220
a38d1107 2221struct bpf_raw_event_map *bpf_get_raw_tracepoint(const char *name)
c4f6699d
AS
2222{
2223 struct bpf_raw_event_map *btp = __start__bpf_raw_tp;
2224
2225 for (; btp < __stop__bpf_raw_tp; btp++) {
2226 if (!strcmp(btp->tp->name, name))
2227 return btp;
2228 }
a38d1107
MM
2229
2230 return bpf_get_raw_tracepoint_module(name);
2231}
2232
2233void bpf_put_raw_tracepoint(struct bpf_raw_event_map *btp)
2234{
12cc126d 2235 struct module *mod;
a38d1107 2236
12cc126d
AN
2237 preempt_disable();
2238 mod = __module_address((unsigned long)btp);
2239 module_put(mod);
2240 preempt_enable();
c4f6699d
AS
2241}
2242
2243static __always_inline
2244void __bpf_trace_run(struct bpf_prog *prog, u64 *args)
2245{
f03efe49 2246 cant_sleep();
05b24ff9
JO
2247 if (unlikely(this_cpu_inc_return(*(prog->active)) != 1)) {
2248 bpf_prog_inc_misses_counter(prog);
2249 goto out;
2250 }
c4f6699d 2251 rcu_read_lock();
fb7dd8bc 2252 (void) bpf_prog_run(prog, args);
c4f6699d 2253 rcu_read_unlock();
05b24ff9
JO
2254out:
2255 this_cpu_dec(*(prog->active));
c4f6699d
AS
2256}
2257
2258#define UNPACK(...) __VA_ARGS__
2259#define REPEAT_1(FN, DL, X, ...) FN(X)
2260#define REPEAT_2(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_1(FN, DL, __VA_ARGS__)
2261#define REPEAT_3(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_2(FN, DL, __VA_ARGS__)
2262#define REPEAT_4(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_3(FN, DL, __VA_ARGS__)
2263#define REPEAT_5(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_4(FN, DL, __VA_ARGS__)
2264#define REPEAT_6(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_5(FN, DL, __VA_ARGS__)
2265#define REPEAT_7(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_6(FN, DL, __VA_ARGS__)
2266#define REPEAT_8(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_7(FN, DL, __VA_ARGS__)
2267#define REPEAT_9(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_8(FN, DL, __VA_ARGS__)
2268#define REPEAT_10(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_9(FN, DL, __VA_ARGS__)
2269#define REPEAT_11(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_10(FN, DL, __VA_ARGS__)
2270#define REPEAT_12(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_11(FN, DL, __VA_ARGS__)
2271#define REPEAT(X, FN, DL, ...) REPEAT_##X(FN, DL, __VA_ARGS__)
2272
2273#define SARG(X) u64 arg##X
2274#define COPY(X) args[X] = arg##X
2275
2276#define __DL_COM (,)
2277#define __DL_SEM (;)
2278
2279#define __SEQ_0_11 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11
2280
2281#define BPF_TRACE_DEFN_x(x) \
2282 void bpf_trace_run##x(struct bpf_prog *prog, \
2283 REPEAT(x, SARG, __DL_COM, __SEQ_0_11)) \
2284 { \
2285 u64 args[x]; \
2286 REPEAT(x, COPY, __DL_SEM, __SEQ_0_11); \
2287 __bpf_trace_run(prog, args); \
2288 } \
2289 EXPORT_SYMBOL_GPL(bpf_trace_run##x)
2290BPF_TRACE_DEFN_x(1);
2291BPF_TRACE_DEFN_x(2);
2292BPF_TRACE_DEFN_x(3);
2293BPF_TRACE_DEFN_x(4);
2294BPF_TRACE_DEFN_x(5);
2295BPF_TRACE_DEFN_x(6);
2296BPF_TRACE_DEFN_x(7);
2297BPF_TRACE_DEFN_x(8);
2298BPF_TRACE_DEFN_x(9);
2299BPF_TRACE_DEFN_x(10);
2300BPF_TRACE_DEFN_x(11);
2301BPF_TRACE_DEFN_x(12);
2302
2303static int __bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog)
2304{
2305 struct tracepoint *tp = btp->tp;
2306
2307 /*
2308 * check that program doesn't access arguments beyond what's
2309 * available in this tracepoint
2310 */
2311 if (prog->aux->max_ctx_offset > btp->num_args * sizeof(u64))
2312 return -EINVAL;
2313
9df1c28b
MM
2314 if (prog->aux->max_tp_access > btp->writable_size)
2315 return -EINVAL;
2316
9913d574
SRV
2317 return tracepoint_probe_register_may_exist(tp, (void *)btp->bpf_func,
2318 prog);
c4f6699d
AS
2319}
2320
2321int bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog)
2322{
e16ec340 2323 return __bpf_probe_register(btp, prog);
c4f6699d
AS
2324}
2325
2326int bpf_probe_unregister(struct bpf_raw_event_map *btp, struct bpf_prog *prog)
2327{
e16ec340 2328 return tracepoint_probe_unregister(btp->tp, (void *)btp->bpf_func, prog);
c4f6699d 2329}
41bdc4b4
YS
2330
2331int bpf_get_perf_event_info(const struct perf_event *event, u32 *prog_id,
2332 u32 *fd_type, const char **buf,
2333 u64 *probe_offset, u64 *probe_addr)
2334{
2335 bool is_tracepoint, is_syscall_tp;
2336 struct bpf_prog *prog;
2337 int flags, err = 0;
2338
2339 prog = event->prog;
2340 if (!prog)
2341 return -ENOENT;
2342
2343 /* not supporting BPF_PROG_TYPE_PERF_EVENT yet */
2344 if (prog->type == BPF_PROG_TYPE_PERF_EVENT)
2345 return -EOPNOTSUPP;
2346
2347 *prog_id = prog->aux->id;
2348 flags = event->tp_event->flags;
2349 is_tracepoint = flags & TRACE_EVENT_FL_TRACEPOINT;
2350 is_syscall_tp = is_syscall_trace_event(event->tp_event);
2351
2352 if (is_tracepoint || is_syscall_tp) {
2353 *buf = is_tracepoint ? event->tp_event->tp->name
2354 : event->tp_event->name;
2355 *fd_type = BPF_FD_TYPE_TRACEPOINT;
2356 *probe_offset = 0x0;
2357 *probe_addr = 0x0;
2358 } else {
2359 /* kprobe/uprobe */
2360 err = -EOPNOTSUPP;
2361#ifdef CONFIG_KPROBE_EVENTS
2362 if (flags & TRACE_EVENT_FL_KPROBE)
2363 err = bpf_get_kprobe_info(event, fd_type, buf,
2364 probe_offset, probe_addr,
2365 event->attr.type == PERF_TYPE_TRACEPOINT);
2366#endif
2367#ifdef CONFIG_UPROBE_EVENTS
2368 if (flags & TRACE_EVENT_FL_UPROBE)
2369 err = bpf_get_uprobe_info(event, fd_type, buf,
2370 probe_offset,
2371 event->attr.type == PERF_TYPE_TRACEPOINT);
2372#endif
2373 }
2374
2375 return err;
2376}
a38d1107 2377
9db1ff0a
YS
2378static int __init send_signal_irq_work_init(void)
2379{
2380 int cpu;
2381 struct send_signal_irq_work *work;
2382
2383 for_each_possible_cpu(cpu) {
2384 work = per_cpu_ptr(&send_signal_work, cpu);
2385 init_irq_work(&work->irq_work, do_bpf_send_signal);
2386 }
2387 return 0;
2388}
2389
2390subsys_initcall(send_signal_irq_work_init);
2391
a38d1107 2392#ifdef CONFIG_MODULES
390e99cf
SF
2393static int bpf_event_notify(struct notifier_block *nb, unsigned long op,
2394 void *module)
a38d1107
MM
2395{
2396 struct bpf_trace_module *btm, *tmp;
2397 struct module *mod = module;
0340a6b7 2398 int ret = 0;
a38d1107
MM
2399
2400 if (mod->num_bpf_raw_events == 0 ||
2401 (op != MODULE_STATE_COMING && op != MODULE_STATE_GOING))
0340a6b7 2402 goto out;
a38d1107
MM
2403
2404 mutex_lock(&bpf_module_mutex);
2405
2406 switch (op) {
2407 case MODULE_STATE_COMING:
2408 btm = kzalloc(sizeof(*btm), GFP_KERNEL);
2409 if (btm) {
2410 btm->module = module;
2411 list_add(&btm->list, &bpf_trace_modules);
0340a6b7
PZ
2412 } else {
2413 ret = -ENOMEM;
a38d1107
MM
2414 }
2415 break;
2416 case MODULE_STATE_GOING:
2417 list_for_each_entry_safe(btm, tmp, &bpf_trace_modules, list) {
2418 if (btm->module == module) {
2419 list_del(&btm->list);
2420 kfree(btm);
2421 break;
2422 }
2423 }
2424 break;
2425 }
2426
2427 mutex_unlock(&bpf_module_mutex);
2428
0340a6b7
PZ
2429out:
2430 return notifier_from_errno(ret);
a38d1107
MM
2431}
2432
2433static struct notifier_block bpf_module_nb = {
2434 .notifier_call = bpf_event_notify,
2435};
2436
390e99cf 2437static int __init bpf_event_init(void)
a38d1107
MM
2438{
2439 register_module_notifier(&bpf_module_nb);
2440 return 0;
2441}
2442
2443fs_initcall(bpf_event_init);
2444#endif /* CONFIG_MODULES */
0dcac272
JO
2445
2446#ifdef CONFIG_FPROBE
2447struct bpf_kprobe_multi_link {
2448 struct bpf_link link;
2449 struct fprobe fp;
2450 unsigned long *addrs;
ca74823c
JO
2451 u64 *cookies;
2452 u32 cnt;
e22061b2
JO
2453 u32 mods_cnt;
2454 struct module **mods;
0dcac272
JO
2455};
2456
f7098690
JO
2457struct bpf_kprobe_multi_run_ctx {
2458 struct bpf_run_ctx run_ctx;
2459 struct bpf_kprobe_multi_link *link;
2460 unsigned long entry_ip;
2461};
2462
0236fec5
JO
2463struct user_syms {
2464 const char **syms;
2465 char *buf;
2466};
2467
2468static int copy_user_syms(struct user_syms *us, unsigned long __user *usyms, u32 cnt)
2469{
2470 unsigned long __user usymbol;
2471 const char **syms = NULL;
2472 char *buf = NULL, *p;
2473 int err = -ENOMEM;
2474 unsigned int i;
2475
fd58f7df 2476 syms = kvmalloc_array(cnt, sizeof(*syms), GFP_KERNEL);
0236fec5
JO
2477 if (!syms)
2478 goto error;
2479
fd58f7df 2480 buf = kvmalloc_array(cnt, KSYM_NAME_LEN, GFP_KERNEL);
0236fec5
JO
2481 if (!buf)
2482 goto error;
2483
2484 for (p = buf, i = 0; i < cnt; i++) {
2485 if (__get_user(usymbol, usyms + i)) {
2486 err = -EFAULT;
2487 goto error;
2488 }
2489 err = strncpy_from_user(p, (const char __user *) usymbol, KSYM_NAME_LEN);
2490 if (err == KSYM_NAME_LEN)
2491 err = -E2BIG;
2492 if (err < 0)
2493 goto error;
2494 syms[i] = p;
2495 p += err + 1;
2496 }
2497
2498 us->syms = syms;
2499 us->buf = buf;
2500 return 0;
2501
2502error:
2503 if (err) {
2504 kvfree(syms);
2505 kvfree(buf);
2506 }
2507 return err;
2508}
2509
e22061b2
JO
2510static void kprobe_multi_put_modules(struct module **mods, u32 cnt)
2511{
2512 u32 i;
2513
2514 for (i = 0; i < cnt; i++)
2515 module_put(mods[i]);
2516}
2517
0236fec5
JO
2518static void free_user_syms(struct user_syms *us)
2519{
2520 kvfree(us->syms);
2521 kvfree(us->buf);
2522}
2523
0dcac272
JO
2524static void bpf_kprobe_multi_link_release(struct bpf_link *link)
2525{
2526 struct bpf_kprobe_multi_link *kmulti_link;
2527
2528 kmulti_link = container_of(link, struct bpf_kprobe_multi_link, link);
2529 unregister_fprobe(&kmulti_link->fp);
e22061b2 2530 kprobe_multi_put_modules(kmulti_link->mods, kmulti_link->mods_cnt);
0dcac272
JO
2531}
2532
2533static void bpf_kprobe_multi_link_dealloc(struct bpf_link *link)
2534{
2535 struct bpf_kprobe_multi_link *kmulti_link;
2536
2537 kmulti_link = container_of(link, struct bpf_kprobe_multi_link, link);
2538 kvfree(kmulti_link->addrs);
ca74823c 2539 kvfree(kmulti_link->cookies);
e22061b2 2540 kfree(kmulti_link->mods);
0dcac272
JO
2541 kfree(kmulti_link);
2542}
2543
2544static const struct bpf_link_ops bpf_kprobe_multi_link_lops = {
2545 .release = bpf_kprobe_multi_link_release,
2546 .dealloc = bpf_kprobe_multi_link_dealloc,
2547};
2548
ca74823c
JO
2549static void bpf_kprobe_multi_cookie_swap(void *a, void *b, int size, const void *priv)
2550{
2551 const struct bpf_kprobe_multi_link *link = priv;
2552 unsigned long *addr_a = a, *addr_b = b;
2553 u64 *cookie_a, *cookie_b;
ca74823c
JO
2554
2555 cookie_a = link->cookies + (addr_a - link->addrs);
2556 cookie_b = link->cookies + (addr_b - link->addrs);
2557
2558 /* swap addr_a/addr_b and cookie_a/cookie_b values */
11e17ae4
JC
2559 swap(*addr_a, *addr_b);
2560 swap(*cookie_a, *cookie_b);
ca74823c
JO
2561}
2562
1a1b0716 2563static int bpf_kprobe_multi_addrs_cmp(const void *a, const void *b)
ca74823c
JO
2564{
2565 const unsigned long *addr_a = a, *addr_b = b;
2566
2567 if (*addr_a == *addr_b)
2568 return 0;
2569 return *addr_a < *addr_b ? -1 : 1;
2570}
2571
2572static int bpf_kprobe_multi_cookie_cmp(const void *a, const void *b, const void *priv)
2573{
1a1b0716 2574 return bpf_kprobe_multi_addrs_cmp(a, b);
ca74823c
JO
2575}
2576
f7098690 2577static u64 bpf_kprobe_multi_cookie(struct bpf_run_ctx *ctx)
ca74823c 2578{
f7098690 2579 struct bpf_kprobe_multi_run_ctx *run_ctx;
ca74823c 2580 struct bpf_kprobe_multi_link *link;
f7098690 2581 u64 *cookie, entry_ip;
ca74823c 2582 unsigned long *addr;
ca74823c
JO
2583
2584 if (WARN_ON_ONCE(!ctx))
2585 return 0;
f7098690
JO
2586 run_ctx = container_of(current->bpf_ctx, struct bpf_kprobe_multi_run_ctx, run_ctx);
2587 link = run_ctx->link;
ca74823c
JO
2588 if (!link->cookies)
2589 return 0;
f7098690
JO
2590 entry_ip = run_ctx->entry_ip;
2591 addr = bsearch(&entry_ip, link->addrs, link->cnt, sizeof(entry_ip),
1a1b0716 2592 bpf_kprobe_multi_addrs_cmp);
ca74823c
JO
2593 if (!addr)
2594 return 0;
2595 cookie = link->cookies + (addr - link->addrs);
2596 return *cookie;
2597}
2598
f7098690
JO
2599static u64 bpf_kprobe_multi_entry_ip(struct bpf_run_ctx *ctx)
2600{
2601 struct bpf_kprobe_multi_run_ctx *run_ctx;
2602
2603 run_ctx = container_of(current->bpf_ctx, struct bpf_kprobe_multi_run_ctx, run_ctx);
2604 return run_ctx->entry_ip;
2605}
2606
0dcac272
JO
2607static int
2608kprobe_multi_link_prog_run(struct bpf_kprobe_multi_link *link,
f7098690 2609 unsigned long entry_ip, struct pt_regs *regs)
0dcac272 2610{
f7098690
JO
2611 struct bpf_kprobe_multi_run_ctx run_ctx = {
2612 .link = link,
2613 .entry_ip = entry_ip,
2614 };
ca74823c 2615 struct bpf_run_ctx *old_run_ctx;
0dcac272
JO
2616 int err;
2617
2618 if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1)) {
2619 err = 0;
2620 goto out;
2621 }
2622
2623 migrate_disable();
2624 rcu_read_lock();
f7098690 2625 old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx);
0dcac272 2626 err = bpf_prog_run(link->link.prog, regs);
ca74823c 2627 bpf_reset_run_ctx(old_run_ctx);
0dcac272
JO
2628 rcu_read_unlock();
2629 migrate_enable();
2630
2631 out:
2632 __this_cpu_dec(bpf_prog_active);
2633 return err;
2634}
2635
2636static void
c09eb2e5 2637kprobe_multi_link_handler(struct fprobe *fp, unsigned long fentry_ip,
0dcac272
JO
2638 struct pt_regs *regs)
2639{
0dcac272
JO
2640 struct bpf_kprobe_multi_link *link;
2641
0dcac272 2642 link = container_of(fp, struct bpf_kprobe_multi_link, fp);
c09eb2e5 2643 kprobe_multi_link_prog_run(link, get_entry_ip(fentry_ip), regs);
0dcac272
JO
2644}
2645
eb5fb032 2646static int symbols_cmp_r(const void *a, const void *b, const void *priv)
0dcac272 2647{
0236fec5
JO
2648 const char **str_a = (const char **) a;
2649 const char **str_b = (const char **) b;
0dcac272 2650
0236fec5 2651 return strcmp(*str_a, *str_b);
0dcac272
JO
2652}
2653
eb5fb032
JO
2654struct multi_symbols_sort {
2655 const char **funcs;
2656 u64 *cookies;
2657};
2658
2659static void symbols_swap_r(void *a, void *b, int size, const void *priv)
2660{
2661 const struct multi_symbols_sort *data = priv;
2662 const char **name_a = a, **name_b = b;
2663
2664 swap(*name_a, *name_b);
2665
2666 /* If defined, swap also related cookies. */
2667 if (data->cookies) {
2668 u64 *cookie_a, *cookie_b;
2669
2670 cookie_a = data->cookies + (name_a - data->funcs);
2671 cookie_b = data->cookies + (name_b - data->funcs);
2672 swap(*cookie_a, *cookie_b);
2673 }
2674}
2675
e22061b2
JO
2676struct module_addr_args {
2677 unsigned long *addrs;
2678 u32 addrs_cnt;
2679 struct module **mods;
2680 int mods_cnt;
2681 int mods_cap;
2682};
2683
2684static int module_callback(void *data, const char *name,
2685 struct module *mod, unsigned long addr)
2686{
2687 struct module_addr_args *args = data;
2688 struct module **mods;
2689
2690 /* We iterate all modules symbols and for each we:
2691 * - search for it in provided addresses array
2692 * - if found we check if we already have the module pointer stored
2693 * (we iterate modules sequentially, so we can check just the last
2694 * module pointer)
2695 * - take module reference and store it
2696 */
2697 if (!bsearch(&addr, args->addrs, args->addrs_cnt, sizeof(addr),
2698 bpf_kprobe_multi_addrs_cmp))
2699 return 0;
2700
2701 if (args->mods && args->mods[args->mods_cnt - 1] == mod)
2702 return 0;
2703
2704 if (args->mods_cnt == args->mods_cap) {
2705 args->mods_cap = max(16, args->mods_cap * 3 / 2);
2706 mods = krealloc_array(args->mods, args->mods_cap, sizeof(*mods), GFP_KERNEL);
2707 if (!mods)
2708 return -ENOMEM;
2709 args->mods = mods;
2710 }
2711
2712 if (!try_module_get(mod))
2713 return -EINVAL;
2714
2715 args->mods[args->mods_cnt] = mod;
2716 args->mods_cnt++;
2717 return 0;
2718}
2719
2720static int get_modules_for_addrs(struct module ***mods, unsigned long *addrs, u32 addrs_cnt)
2721{
2722 struct module_addr_args args = {
2723 .addrs = addrs,
2724 .addrs_cnt = addrs_cnt,
2725 };
2726 int err;
2727
2728 /* We return either err < 0 in case of error, ... */
2729 err = module_kallsyms_on_each_symbol(module_callback, &args);
2730 if (err) {
2731 kprobe_multi_put_modules(args.mods, args.mods_cnt);
2732 kfree(args.mods);
2733 return err;
2734 }
2735
2736 /* or number of modules found if everything is ok. */
2737 *mods = args.mods;
2738 return args.mods_cnt;
2739}
2740
0dcac272
JO
2741int bpf_kprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
2742{
2743 struct bpf_kprobe_multi_link *link = NULL;
2744 struct bpf_link_primer link_primer;
ca74823c 2745 void __user *ucookies;
0dcac272
JO
2746 unsigned long *addrs;
2747 u32 flags, cnt, size;
2748 void __user *uaddrs;
ca74823c 2749 u64 *cookies = NULL;
0dcac272
JO
2750 void __user *usyms;
2751 int err;
2752
2753 /* no support for 32bit archs yet */
2754 if (sizeof(u64) != sizeof(void *))
2755 return -EOPNOTSUPP;
2756
2757 if (prog->expected_attach_type != BPF_TRACE_KPROBE_MULTI)
2758 return -EINVAL;
2759
2760 flags = attr->link_create.kprobe_multi.flags;
2761 if (flags & ~BPF_F_KPROBE_MULTI_RETURN)
2762 return -EINVAL;
2763
2764 uaddrs = u64_to_user_ptr(attr->link_create.kprobe_multi.addrs);
2765 usyms = u64_to_user_ptr(attr->link_create.kprobe_multi.syms);
2766 if (!!uaddrs == !!usyms)
2767 return -EINVAL;
2768
2769 cnt = attr->link_create.kprobe_multi.cnt;
2770 if (!cnt)
2771 return -EINVAL;
2772
2773 size = cnt * sizeof(*addrs);
fd58f7df 2774 addrs = kvmalloc_array(cnt, sizeof(*addrs), GFP_KERNEL);
0dcac272
JO
2775 if (!addrs)
2776 return -ENOMEM;
2777
eb5fb032
JO
2778 ucookies = u64_to_user_ptr(attr->link_create.kprobe_multi.cookies);
2779 if (ucookies) {
2780 cookies = kvmalloc_array(cnt, sizeof(*addrs), GFP_KERNEL);
2781 if (!cookies) {
2782 err = -ENOMEM;
2783 goto error;
2784 }
2785 if (copy_from_user(cookies, ucookies, size)) {
2786 err = -EFAULT;
2787 goto error;
2788 }
2789 }
2790
0dcac272
JO
2791 if (uaddrs) {
2792 if (copy_from_user(addrs, uaddrs, size)) {
2793 err = -EFAULT;
2794 goto error;
2795 }
2796 } else {
eb5fb032
JO
2797 struct multi_symbols_sort data = {
2798 .cookies = cookies,
2799 };
0236fec5
JO
2800 struct user_syms us;
2801
2802 err = copy_user_syms(&us, usyms, cnt);
2803 if (err)
2804 goto error;
2805
eb5fb032
JO
2806 if (cookies)
2807 data.funcs = us.syms;
2808
2809 sort_r(us.syms, cnt, sizeof(*us.syms), symbols_cmp_r,
2810 symbols_swap_r, &data);
2811
0236fec5
JO
2812 err = ftrace_lookup_symbols(us.syms, cnt, addrs);
2813 free_user_syms(&us);
0dcac272
JO
2814 if (err)
2815 goto error;
2816 }
2817
2818 link = kzalloc(sizeof(*link), GFP_KERNEL);
2819 if (!link) {
2820 err = -ENOMEM;
2821 goto error;
2822 }
2823
2824 bpf_link_init(&link->link, BPF_LINK_TYPE_KPROBE_MULTI,
2825 &bpf_kprobe_multi_link_lops, prog);
2826
2827 err = bpf_link_prime(&link->link, &link_primer);
2828 if (err)
2829 goto error;
2830
2831 if (flags & BPF_F_KPROBE_MULTI_RETURN)
2832 link->fp.exit_handler = kprobe_multi_link_handler;
2833 else
2834 link->fp.entry_handler = kprobe_multi_link_handler;
2835
2836 link->addrs = addrs;
ca74823c
JO
2837 link->cookies = cookies;
2838 link->cnt = cnt;
2839
2840 if (cookies) {
2841 /*
2842 * Sorting addresses will trigger sorting cookies as well
2843 * (check bpf_kprobe_multi_cookie_swap). This way we can
2844 * find cookie based on the address in bpf_get_attach_cookie
2845 * helper.
2846 */
2847 sort_r(addrs, cnt, sizeof(*addrs),
2848 bpf_kprobe_multi_cookie_cmp,
2849 bpf_kprobe_multi_cookie_swap,
2850 link);
e22061b2
JO
2851 } else {
2852 /*
2853 * We need to sort addrs array even if there are no cookies
2854 * provided, to allow bsearch in get_modules_for_addrs.
2855 */
2856 sort(addrs, cnt, sizeof(*addrs),
2857 bpf_kprobe_multi_addrs_cmp, NULL);
2858 }
2859
2860 err = get_modules_for_addrs(&link->mods, addrs, cnt);
2861 if (err < 0) {
2862 bpf_link_cleanup(&link_primer);
2863 return err;
ca74823c 2864 }
e22061b2 2865 link->mods_cnt = err;
0dcac272
JO
2866
2867 err = register_fprobe_ips(&link->fp, addrs, cnt);
2868 if (err) {
e22061b2 2869 kprobe_multi_put_modules(link->mods, link->mods_cnt);
0dcac272
JO
2870 bpf_link_cleanup(&link_primer);
2871 return err;
2872 }
2873
2874 return bpf_link_settle(&link_primer);
2875
2876error:
2877 kfree(link);
2878 kvfree(addrs);
ca74823c 2879 kvfree(cookies);
0dcac272
JO
2880 return err;
2881}
2882#else /* !CONFIG_FPROBE */
2883int bpf_kprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
2884{
2885 return -EOPNOTSUPP;
2886}
f7098690
JO
2887static u64 bpf_kprobe_multi_cookie(struct bpf_run_ctx *ctx)
2888{
2889 return 0;
2890}
2891static u64 bpf_kprobe_multi_entry_ip(struct bpf_run_ctx *ctx)
ca74823c
JO
2892{
2893 return 0;
2894}
0dcac272 2895#endif