compiler_types: Define __rcu as __attribute__((btf_type_tag("rcu")))
[linux-2.6-block.git] / kernel / trace / bpf_trace.c
CommitLineData
179a0cc4 1// SPDX-License-Identifier: GPL-2.0
2541517c 2/* Copyright (c) 2011-2015 PLUMgrid, http://plumgrid.com
0515e599 3 * Copyright (c) 2016 Facebook
2541517c
AS
4 */
5#include <linux/kernel.h>
6#include <linux/types.h>
7#include <linux/slab.h>
8#include <linux/bpf.h>
4279adb0 9#include <linux/bpf_verifier.h>
0515e599 10#include <linux/bpf_perf_event.h>
c4d0bfb4 11#include <linux/btf.h>
2541517c
AS
12#include <linux/filter.h>
13#include <linux/uaccess.h>
9c959c86 14#include <linux/ctype.h>
9802d865 15#include <linux/kprobes.h>
ac5a72ea 16#include <linux/spinlock.h>
41bdc4b4 17#include <linux/syscalls.h>
540adea3 18#include <linux/error-injection.h>
c9a0f3b8 19#include <linux/btf_ids.h>
6f100640 20#include <linux/bpf_lsm.h>
0dcac272 21#include <linux/fprobe.h>
ca74823c
JO
22#include <linux/bsearch.h>
23#include <linux/sort.h>
f3cf4134
RS
24#include <linux/key.h>
25#include <linux/verification.h>
6f100640 26
8e4597c6 27#include <net/bpf_sk_storage.h>
9802d865 28
c4d0bfb4
AM
29#include <uapi/linux/bpf.h>
30#include <uapi/linux/btf.h>
31
c7b6f29b
NA
32#include <asm/tlb.h>
33
9802d865 34#include "trace_probe.h"
2541517c
AS
35#include "trace.h"
36
ac5a72ea
AM
37#define CREATE_TRACE_POINTS
38#include "bpf_trace.h"
39
e672db03
SF
40#define bpf_event_rcu_dereference(p) \
41 rcu_dereference_protected(p, lockdep_is_held(&bpf_event_mutex))
42
a38d1107
MM
43#ifdef CONFIG_MODULES
44struct bpf_trace_module {
45 struct module *module;
46 struct list_head list;
47};
48
49static LIST_HEAD(bpf_trace_modules);
50static DEFINE_MUTEX(bpf_module_mutex);
51
52static struct bpf_raw_event_map *bpf_get_raw_tracepoint_module(const char *name)
53{
54 struct bpf_raw_event_map *btp, *ret = NULL;
55 struct bpf_trace_module *btm;
56 unsigned int i;
57
58 mutex_lock(&bpf_module_mutex);
59 list_for_each_entry(btm, &bpf_trace_modules, list) {
60 for (i = 0; i < btm->module->num_bpf_raw_events; ++i) {
61 btp = &btm->module->bpf_raw_events[i];
62 if (!strcmp(btp->tp->name, name)) {
63 if (try_module_get(btm->module))
64 ret = btp;
65 goto out;
66 }
67 }
68 }
69out:
70 mutex_unlock(&bpf_module_mutex);
71 return ret;
72}
73#else
74static struct bpf_raw_event_map *bpf_get_raw_tracepoint_module(const char *name)
75{
76 return NULL;
77}
78#endif /* CONFIG_MODULES */
79
035226b9 80u64 bpf_get_stackid(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
c195651e 81u64 bpf_get_stack(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
035226b9 82
eb411377
AM
83static int bpf_btf_printf_prepare(struct btf_ptr *ptr, u32 btf_ptr_size,
84 u64 flags, const struct btf **btf,
85 s32 *btf_id);
f7098690
JO
86static u64 bpf_kprobe_multi_cookie(struct bpf_run_ctx *ctx);
87static u64 bpf_kprobe_multi_entry_ip(struct bpf_run_ctx *ctx);
eb411377 88
2541517c
AS
89/**
90 * trace_call_bpf - invoke BPF program
e87c6bc3 91 * @call: tracepoint event
2541517c
AS
92 * @ctx: opaque context pointer
93 *
94 * kprobe handlers execute BPF programs via this helper.
95 * Can be used from static tracepoints in the future.
96 *
97 * Return: BPF programs always return an integer which is interpreted by
98 * kprobe handler as:
99 * 0 - return from kprobe (event is filtered out)
100 * 1 - store kprobe event into ring buffer
101 * Other values are reserved and currently alias to 1
102 */
e87c6bc3 103unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx)
2541517c
AS
104{
105 unsigned int ret;
106
b0a81b94 107 cant_sleep();
2541517c
AS
108
109 if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1)) {
110 /*
111 * since some bpf program is already running on this cpu,
112 * don't call into another bpf program (same or different)
113 * and don't send kprobe event into ring-buffer,
114 * so return zero here
115 */
116 ret = 0;
117 goto out;
118 }
119
e87c6bc3
YS
120 /*
121 * Instead of moving rcu_read_lock/rcu_dereference/rcu_read_unlock
122 * to all call sites, we did a bpf_prog_array_valid() there to check
123 * whether call->prog_array is empty or not, which is
2b5894cc 124 * a heuristic to speed up execution.
e87c6bc3
YS
125 *
126 * If bpf_prog_array_valid() fetched prog_array was
127 * non-NULL, we go into trace_call_bpf() and do the actual
128 * proper rcu_dereference() under RCU lock.
129 * If it turns out that prog_array is NULL then, we bail out.
130 * For the opposite, if the bpf_prog_array_valid() fetched pointer
131 * was NULL, you'll skip the prog_array with the risk of missing
132 * out of events when it was updated in between this and the
133 * rcu_dereference() which is accepted risk.
134 */
055eb955
SF
135 rcu_read_lock();
136 ret = bpf_prog_run_array(rcu_dereference(call->prog_array),
137 ctx, bpf_prog_run);
138 rcu_read_unlock();
2541517c
AS
139
140 out:
141 __this_cpu_dec(bpf_prog_active);
2541517c
AS
142
143 return ret;
144}
2541517c 145
9802d865
JB
146#ifdef CONFIG_BPF_KPROBE_OVERRIDE
147BPF_CALL_2(bpf_override_return, struct pt_regs *, regs, unsigned long, rc)
148{
9802d865 149 regs_set_return_value(regs, rc);
540adea3 150 override_function_with_return(regs);
9802d865
JB
151 return 0;
152}
153
154static const struct bpf_func_proto bpf_override_return_proto = {
155 .func = bpf_override_return,
156 .gpl_only = true,
157 .ret_type = RET_INTEGER,
158 .arg1_type = ARG_PTR_TO_CTX,
159 .arg2_type = ARG_ANYTHING,
160};
161#endif
162
8d92db5c
CH
163static __always_inline int
164bpf_probe_read_user_common(void *dst, u32 size, const void __user *unsafe_ptr)
2541517c 165{
8d92db5c 166 int ret;
2541517c 167
c0ee37e8 168 ret = copy_from_user_nofault(dst, unsafe_ptr, size);
6ae08ae3
DB
169 if (unlikely(ret < 0))
170 memset(dst, 0, size);
6ae08ae3
DB
171 return ret;
172}
173
8d92db5c
CH
174BPF_CALL_3(bpf_probe_read_user, void *, dst, u32, size,
175 const void __user *, unsafe_ptr)
176{
177 return bpf_probe_read_user_common(dst, size, unsafe_ptr);
178}
179
f470378c 180const struct bpf_func_proto bpf_probe_read_user_proto = {
6ae08ae3
DB
181 .func = bpf_probe_read_user,
182 .gpl_only = true,
183 .ret_type = RET_INTEGER,
184 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
185 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
186 .arg3_type = ARG_ANYTHING,
187};
188
8d92db5c
CH
189static __always_inline int
190bpf_probe_read_user_str_common(void *dst, u32 size,
191 const void __user *unsafe_ptr)
6ae08ae3 192{
8d92db5c 193 int ret;
6ae08ae3 194
6fa6d280
DX
195 /*
196 * NB: We rely on strncpy_from_user() not copying junk past the NUL
197 * terminator into `dst`.
198 *
199 * strncpy_from_user() does long-sized strides in the fast path. If the
200 * strncpy does not mask out the bytes after the NUL in `unsafe_ptr`,
201 * then there could be junk after the NUL in `dst`. If user takes `dst`
202 * and keys a hash map with it, then semantically identical strings can
203 * occupy multiple entries in the map.
204 */
8d92db5c 205 ret = strncpy_from_user_nofault(dst, unsafe_ptr, size);
6ae08ae3
DB
206 if (unlikely(ret < 0))
207 memset(dst, 0, size);
6ae08ae3
DB
208 return ret;
209}
210
8d92db5c
CH
211BPF_CALL_3(bpf_probe_read_user_str, void *, dst, u32, size,
212 const void __user *, unsafe_ptr)
213{
214 return bpf_probe_read_user_str_common(dst, size, unsafe_ptr);
215}
216
f470378c 217const struct bpf_func_proto bpf_probe_read_user_str_proto = {
6ae08ae3
DB
218 .func = bpf_probe_read_user_str,
219 .gpl_only = true,
220 .ret_type = RET_INTEGER,
221 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
222 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
223 .arg3_type = ARG_ANYTHING,
224};
225
226static __always_inline int
8d92db5c 227bpf_probe_read_kernel_common(void *dst, u32 size, const void *unsafe_ptr)
6ae08ae3 228{
ff40e510 229 int ret;
9d1f8be5 230
fe557319 231 ret = copy_from_kernel_nofault(dst, unsafe_ptr, size);
074f528e 232 if (unlikely(ret < 0))
ff40e510 233 memset(dst, 0, size);
6ae08ae3
DB
234 return ret;
235}
074f528e 236
6ae08ae3
DB
237BPF_CALL_3(bpf_probe_read_kernel, void *, dst, u32, size,
238 const void *, unsafe_ptr)
239{
8d92db5c 240 return bpf_probe_read_kernel_common(dst, size, unsafe_ptr);
6ae08ae3
DB
241}
242
f470378c 243const struct bpf_func_proto bpf_probe_read_kernel_proto = {
6ae08ae3
DB
244 .func = bpf_probe_read_kernel,
245 .gpl_only = true,
246 .ret_type = RET_INTEGER,
247 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
248 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
249 .arg3_type = ARG_ANYTHING,
250};
251
6ae08ae3 252static __always_inline int
8d92db5c 253bpf_probe_read_kernel_str_common(void *dst, u32 size, const void *unsafe_ptr)
6ae08ae3 254{
ff40e510 255 int ret;
8d92db5c 256
6ae08ae3 257 /*
8d92db5c
CH
258 * The strncpy_from_kernel_nofault() call will likely not fill the
259 * entire buffer, but that's okay in this circumstance as we're probing
6ae08ae3
DB
260 * arbitrary memory anyway similar to bpf_probe_read_*() and might
261 * as well probe the stack. Thus, memory is explicitly cleared
262 * only in error case, so that improper users ignoring return
263 * code altogether don't copy garbage; otherwise length of string
264 * is returned that can be used for bpf_perf_event_output() et al.
265 */
8d92db5c 266 ret = strncpy_from_kernel_nofault(dst, unsafe_ptr, size);
6ae08ae3 267 if (unlikely(ret < 0))
ff40e510 268 memset(dst, 0, size);
074f528e 269 return ret;
2541517c
AS
270}
271
6ae08ae3
DB
272BPF_CALL_3(bpf_probe_read_kernel_str, void *, dst, u32, size,
273 const void *, unsafe_ptr)
274{
8d92db5c 275 return bpf_probe_read_kernel_str_common(dst, size, unsafe_ptr);
6ae08ae3
DB
276}
277
f470378c 278const struct bpf_func_proto bpf_probe_read_kernel_str_proto = {
6ae08ae3
DB
279 .func = bpf_probe_read_kernel_str,
280 .gpl_only = true,
281 .ret_type = RET_INTEGER,
282 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
283 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
284 .arg3_type = ARG_ANYTHING,
285};
286
8d92db5c
CH
287#ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
288BPF_CALL_3(bpf_probe_read_compat, void *, dst, u32, size,
289 const void *, unsafe_ptr)
290{
291 if ((unsigned long)unsafe_ptr < TASK_SIZE) {
292 return bpf_probe_read_user_common(dst, size,
293 (__force void __user *)unsafe_ptr);
294 }
295 return bpf_probe_read_kernel_common(dst, size, unsafe_ptr);
296}
297
298static const struct bpf_func_proto bpf_probe_read_compat_proto = {
299 .func = bpf_probe_read_compat,
300 .gpl_only = true,
301 .ret_type = RET_INTEGER,
302 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
303 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
304 .arg3_type = ARG_ANYTHING,
305};
306
6ae08ae3
DB
307BPF_CALL_3(bpf_probe_read_compat_str, void *, dst, u32, size,
308 const void *, unsafe_ptr)
309{
8d92db5c
CH
310 if ((unsigned long)unsafe_ptr < TASK_SIZE) {
311 return bpf_probe_read_user_str_common(dst, size,
312 (__force void __user *)unsafe_ptr);
313 }
314 return bpf_probe_read_kernel_str_common(dst, size, unsafe_ptr);
6ae08ae3
DB
315}
316
317static const struct bpf_func_proto bpf_probe_read_compat_str_proto = {
318 .func = bpf_probe_read_compat_str,
2541517c
AS
319 .gpl_only = true,
320 .ret_type = RET_INTEGER,
39f19ebb 321 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
9c019e2b 322 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
2541517c
AS
323 .arg3_type = ARG_ANYTHING,
324};
8d92db5c 325#endif /* CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE */
2541517c 326
eb1b6688 327BPF_CALL_3(bpf_probe_write_user, void __user *, unsafe_ptr, const void *, src,
f3694e00 328 u32, size)
96ae5227 329{
96ae5227
SD
330 /*
331 * Ensure we're in user context which is safe for the helper to
332 * run. This helper has no business in a kthread.
333 *
334 * access_ok() should prevent writing to non-user memory, but in
335 * some situations (nommu, temporary switch, etc) access_ok() does
336 * not provide enough validation, hence the check on KERNEL_DS.
c7b6f29b
NA
337 *
338 * nmi_uaccess_okay() ensures the probe is not run in an interim
339 * state, when the task or mm are switched. This is specifically
340 * required to prevent the use of temporary mm.
96ae5227
SD
341 */
342
343 if (unlikely(in_interrupt() ||
344 current->flags & (PF_KTHREAD | PF_EXITING)))
345 return -EPERM;
c7b6f29b
NA
346 if (unlikely(!nmi_uaccess_okay()))
347 return -EPERM;
96ae5227 348
c0ee37e8 349 return copy_to_user_nofault(unsafe_ptr, src, size);
96ae5227
SD
350}
351
352static const struct bpf_func_proto bpf_probe_write_user_proto = {
353 .func = bpf_probe_write_user,
354 .gpl_only = true,
355 .ret_type = RET_INTEGER,
356 .arg1_type = ARG_ANYTHING,
216e3cd2 357 .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY,
39f19ebb 358 .arg3_type = ARG_CONST_SIZE,
96ae5227
SD
359};
360
361static const struct bpf_func_proto *bpf_get_probe_write_proto(void)
362{
2c78ee89
AS
363 if (!capable(CAP_SYS_ADMIN))
364 return NULL;
365
96ae5227
SD
366 pr_warn_ratelimited("%s[%d] is installing a program with bpf_probe_write_user helper that may corrupt user memory!",
367 current->comm, task_pid_nr(current));
368
369 return &bpf_probe_write_user_proto;
370}
371
ac5a72ea
AM
372static DEFINE_RAW_SPINLOCK(trace_printk_lock);
373
d9c9e4db
FR
374#define MAX_TRACE_PRINTK_VARARGS 3
375#define BPF_TRACE_PRINTK_SIZE 1024
ac5a72ea 376
d9c9e4db
FR
377BPF_CALL_5(bpf_trace_printk, char *, fmt, u32, fmt_size, u64, arg1,
378 u64, arg2, u64, arg3)
ac5a72ea 379{
d9c9e4db 380 u64 args[MAX_TRACE_PRINTK_VARARGS] = { arg1, arg2, arg3 };
48cac3f4 381 u32 *bin_args;
ac5a72ea
AM
382 static char buf[BPF_TRACE_PRINTK_SIZE];
383 unsigned long flags;
ac5a72ea
AM
384 int ret;
385
48cac3f4
FR
386 ret = bpf_bprintf_prepare(fmt, fmt_size, args, &bin_args,
387 MAX_TRACE_PRINTK_VARARGS);
d9c9e4db
FR
388 if (ret < 0)
389 return ret;
390
38d26d89 391 raw_spin_lock_irqsave(&trace_printk_lock, flags);
48cac3f4 392 ret = bstr_printf(buf, sizeof(buf), fmt, bin_args);
d9c9e4db 393
ac5a72ea
AM
394 trace_bpf_trace_printk(buf);
395 raw_spin_unlock_irqrestore(&trace_printk_lock, flags);
396
48cac3f4 397 bpf_bprintf_cleanup();
9c959c86 398
d9c9e4db 399 return ret;
9c959c86
AS
400}
401
402static const struct bpf_func_proto bpf_trace_printk_proto = {
403 .func = bpf_trace_printk,
404 .gpl_only = true,
405 .ret_type = RET_INTEGER,
216e3cd2 406 .arg1_type = ARG_PTR_TO_MEM | MEM_RDONLY,
39f19ebb 407 .arg2_type = ARG_CONST_SIZE,
9c959c86
AS
408};
409
10aceb62 410static void __set_printk_clr_event(void)
0756ea3e
AS
411{
412 /*
ac5a72ea
AM
413 * This program might be calling bpf_trace_printk,
414 * so enable the associated bpf_trace/bpf_trace_printk event.
415 * Repeat this each time as it is possible a user has
416 * disabled bpf_trace_printk events. By loading a program
417 * calling bpf_trace_printk() however the user has expressed
418 * the intent to see such events.
0756ea3e 419 */
ac5a72ea
AM
420 if (trace_set_clr_event("bpf_trace", "bpf_trace_printk", 1))
421 pr_warn_ratelimited("could not enable bpf_trace_printk events");
10aceb62 422}
0756ea3e 423
10aceb62
DM
424const struct bpf_func_proto *bpf_get_trace_printk_proto(void)
425{
426 __set_printk_clr_event();
0756ea3e
AS
427 return &bpf_trace_printk_proto;
428}
429
10aceb62
DM
430BPF_CALL_4(bpf_trace_vprintk, char *, fmt, u32, fmt_size, const void *, data,
431 u32, data_len)
432{
433 static char buf[BPF_TRACE_PRINTK_SIZE];
434 unsigned long flags;
435 int ret, num_args;
436 u32 *bin_args;
437
438 if (data_len & 7 || data_len > MAX_BPRINTF_VARARGS * 8 ||
439 (data_len && !data))
440 return -EINVAL;
441 num_args = data_len / 8;
442
443 ret = bpf_bprintf_prepare(fmt, fmt_size, data, &bin_args, num_args);
444 if (ret < 0)
445 return ret;
446
447 raw_spin_lock_irqsave(&trace_printk_lock, flags);
448 ret = bstr_printf(buf, sizeof(buf), fmt, bin_args);
449
450 trace_bpf_trace_printk(buf);
451 raw_spin_unlock_irqrestore(&trace_printk_lock, flags);
452
453 bpf_bprintf_cleanup();
454
455 return ret;
456}
457
458static const struct bpf_func_proto bpf_trace_vprintk_proto = {
459 .func = bpf_trace_vprintk,
460 .gpl_only = true,
461 .ret_type = RET_INTEGER,
216e3cd2 462 .arg1_type = ARG_PTR_TO_MEM | MEM_RDONLY,
10aceb62 463 .arg2_type = ARG_CONST_SIZE,
216e3cd2 464 .arg3_type = ARG_PTR_TO_MEM | PTR_MAYBE_NULL | MEM_RDONLY,
10aceb62
DM
465 .arg4_type = ARG_CONST_SIZE_OR_ZERO,
466};
467
468const struct bpf_func_proto *bpf_get_trace_vprintk_proto(void)
469{
470 __set_printk_clr_event();
471 return &bpf_trace_vprintk_proto;
472}
473
492e639f
YS
474BPF_CALL_5(bpf_seq_printf, struct seq_file *, m, char *, fmt, u32, fmt_size,
475 const void *, data, u32, data_len)
476{
d9c9e4db 477 int err, num_args;
48cac3f4 478 u32 *bin_args;
492e639f 479
335ff499 480 if (data_len & 7 || data_len > MAX_BPRINTF_VARARGS * 8 ||
d9c9e4db
FR
481 (data_len && !data))
482 return -EINVAL;
492e639f
YS
483 num_args = data_len / 8;
484
48cac3f4 485 err = bpf_bprintf_prepare(fmt, fmt_size, data, &bin_args, num_args);
d9c9e4db
FR
486 if (err < 0)
487 return err;
492e639f 488
48cac3f4
FR
489 seq_bprintf(m, fmt, bin_args);
490
491 bpf_bprintf_cleanup();
d9c9e4db
FR
492
493 return seq_has_overflowed(m) ? -EOVERFLOW : 0;
492e639f
YS
494}
495
9436ef6e 496BTF_ID_LIST_SINGLE(btf_seq_file_ids, struct, seq_file)
c9a0f3b8 497
492e639f
YS
498static const struct bpf_func_proto bpf_seq_printf_proto = {
499 .func = bpf_seq_printf,
500 .gpl_only = true,
501 .ret_type = RET_INTEGER,
502 .arg1_type = ARG_PTR_TO_BTF_ID,
9436ef6e 503 .arg1_btf_id = &btf_seq_file_ids[0],
216e3cd2 504 .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY,
492e639f 505 .arg3_type = ARG_CONST_SIZE,
216e3cd2 506 .arg4_type = ARG_PTR_TO_MEM | PTR_MAYBE_NULL | MEM_RDONLY,
492e639f 507 .arg5_type = ARG_CONST_SIZE_OR_ZERO,
492e639f
YS
508};
509
510BPF_CALL_3(bpf_seq_write, struct seq_file *, m, const void *, data, u32, len)
511{
512 return seq_write(m, data, len) ? -EOVERFLOW : 0;
513}
514
492e639f
YS
515static const struct bpf_func_proto bpf_seq_write_proto = {
516 .func = bpf_seq_write,
517 .gpl_only = true,
518 .ret_type = RET_INTEGER,
519 .arg1_type = ARG_PTR_TO_BTF_ID,
9436ef6e 520 .arg1_btf_id = &btf_seq_file_ids[0],
216e3cd2 521 .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY,
492e639f 522 .arg3_type = ARG_CONST_SIZE_OR_ZERO,
492e639f
YS
523};
524
eb411377
AM
525BPF_CALL_4(bpf_seq_printf_btf, struct seq_file *, m, struct btf_ptr *, ptr,
526 u32, btf_ptr_size, u64, flags)
527{
528 const struct btf *btf;
529 s32 btf_id;
530 int ret;
531
532 ret = bpf_btf_printf_prepare(ptr, btf_ptr_size, flags, &btf, &btf_id);
533 if (ret)
534 return ret;
535
536 return btf_type_seq_show_flags(btf, btf_id, ptr->ptr, m, flags);
537}
538
539static const struct bpf_func_proto bpf_seq_printf_btf_proto = {
540 .func = bpf_seq_printf_btf,
541 .gpl_only = true,
542 .ret_type = RET_INTEGER,
543 .arg1_type = ARG_PTR_TO_BTF_ID,
544 .arg1_btf_id = &btf_seq_file_ids[0],
216e3cd2 545 .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY,
492e639f 546 .arg3_type = ARG_CONST_SIZE_OR_ZERO,
eb411377 547 .arg4_type = ARG_ANYTHING,
492e639f
YS
548};
549
908432ca
YS
550static __always_inline int
551get_map_perf_counter(struct bpf_map *map, u64 flags,
552 u64 *value, u64 *enabled, u64 *running)
35578d79 553{
35578d79 554 struct bpf_array *array = container_of(map, struct bpf_array, map);
6816a7ff
DB
555 unsigned int cpu = smp_processor_id();
556 u64 index = flags & BPF_F_INDEX_MASK;
3b1efb19 557 struct bpf_event_entry *ee;
35578d79 558
6816a7ff
DB
559 if (unlikely(flags & ~(BPF_F_INDEX_MASK)))
560 return -EINVAL;
561 if (index == BPF_F_CURRENT_CPU)
562 index = cpu;
35578d79
KX
563 if (unlikely(index >= array->map.max_entries))
564 return -E2BIG;
565
3b1efb19 566 ee = READ_ONCE(array->ptrs[index]);
1ca1cc98 567 if (!ee)
35578d79
KX
568 return -ENOENT;
569
908432ca
YS
570 return perf_event_read_local(ee->event, value, enabled, running);
571}
572
573BPF_CALL_2(bpf_perf_event_read, struct bpf_map *, map, u64, flags)
574{
575 u64 value = 0;
576 int err;
577
578 err = get_map_perf_counter(map, flags, &value, NULL, NULL);
35578d79 579 /*
f91840a3
AS
580 * this api is ugly since we miss [-22..-2] range of valid
581 * counter values, but that's uapi
35578d79 582 */
f91840a3
AS
583 if (err)
584 return err;
585 return value;
35578d79
KX
586}
587
62544ce8 588static const struct bpf_func_proto bpf_perf_event_read_proto = {
35578d79 589 .func = bpf_perf_event_read,
1075ef59 590 .gpl_only = true,
35578d79
KX
591 .ret_type = RET_INTEGER,
592 .arg1_type = ARG_CONST_MAP_PTR,
593 .arg2_type = ARG_ANYTHING,
594};
595
908432ca
YS
596BPF_CALL_4(bpf_perf_event_read_value, struct bpf_map *, map, u64, flags,
597 struct bpf_perf_event_value *, buf, u32, size)
598{
599 int err = -EINVAL;
600
601 if (unlikely(size != sizeof(struct bpf_perf_event_value)))
602 goto clear;
603 err = get_map_perf_counter(map, flags, &buf->counter, &buf->enabled,
604 &buf->running);
605 if (unlikely(err))
606 goto clear;
607 return 0;
608clear:
609 memset(buf, 0, size);
610 return err;
611}
612
613static const struct bpf_func_proto bpf_perf_event_read_value_proto = {
614 .func = bpf_perf_event_read_value,
615 .gpl_only = true,
616 .ret_type = RET_INTEGER,
617 .arg1_type = ARG_CONST_MAP_PTR,
618 .arg2_type = ARG_ANYTHING,
619 .arg3_type = ARG_PTR_TO_UNINIT_MEM,
620 .arg4_type = ARG_CONST_SIZE,
621};
622
8e7a3920
DB
623static __always_inline u64
624__bpf_perf_event_output(struct pt_regs *regs, struct bpf_map *map,
283ca526 625 u64 flags, struct perf_sample_data *sd)
a43eec30 626{
a43eec30 627 struct bpf_array *array = container_of(map, struct bpf_array, map);
d7931330 628 unsigned int cpu = smp_processor_id();
1e33759c 629 u64 index = flags & BPF_F_INDEX_MASK;
3b1efb19 630 struct bpf_event_entry *ee;
a43eec30 631 struct perf_event *event;
a43eec30 632
1e33759c 633 if (index == BPF_F_CURRENT_CPU)
d7931330 634 index = cpu;
a43eec30
AS
635 if (unlikely(index >= array->map.max_entries))
636 return -E2BIG;
637
3b1efb19 638 ee = READ_ONCE(array->ptrs[index]);
1ca1cc98 639 if (!ee)
a43eec30
AS
640 return -ENOENT;
641
3b1efb19 642 event = ee->event;
a43eec30
AS
643 if (unlikely(event->attr.type != PERF_TYPE_SOFTWARE ||
644 event->attr.config != PERF_COUNT_SW_BPF_OUTPUT))
645 return -EINVAL;
646
d7931330 647 if (unlikely(event->oncpu != cpu))
a43eec30
AS
648 return -EOPNOTSUPP;
649
56201969 650 return perf_event_output(event, sd, regs);
a43eec30
AS
651}
652
9594dc3c
MM
653/*
654 * Support executing tracepoints in normal, irq, and nmi context that each call
655 * bpf_perf_event_output
656 */
657struct bpf_trace_sample_data {
658 struct perf_sample_data sds[3];
659};
660
661static DEFINE_PER_CPU(struct bpf_trace_sample_data, bpf_trace_sds);
662static DEFINE_PER_CPU(int, bpf_trace_nest_level);
f3694e00
DB
663BPF_CALL_5(bpf_perf_event_output, struct pt_regs *, regs, struct bpf_map *, map,
664 u64, flags, void *, data, u64, size)
8e7a3920 665{
9594dc3c
MM
666 struct bpf_trace_sample_data *sds = this_cpu_ptr(&bpf_trace_sds);
667 int nest_level = this_cpu_inc_return(bpf_trace_nest_level);
8e7a3920
DB
668 struct perf_raw_record raw = {
669 .frag = {
670 .size = size,
671 .data = data,
672 },
673 };
9594dc3c
MM
674 struct perf_sample_data *sd;
675 int err;
8e7a3920 676
9594dc3c
MM
677 if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(sds->sds))) {
678 err = -EBUSY;
679 goto out;
680 }
681
682 sd = &sds->sds[nest_level - 1];
683
684 if (unlikely(flags & ~(BPF_F_INDEX_MASK))) {
685 err = -EINVAL;
686 goto out;
687 }
8e7a3920 688
283ca526
DB
689 perf_sample_data_init(sd, 0, 0);
690 sd->raw = &raw;
21da7472 691 sd->sample_flags |= PERF_SAMPLE_RAW;
283ca526 692
9594dc3c
MM
693 err = __bpf_perf_event_output(regs, map, flags, sd);
694
695out:
696 this_cpu_dec(bpf_trace_nest_level);
697 return err;
8e7a3920
DB
698}
699
a43eec30
AS
700static const struct bpf_func_proto bpf_perf_event_output_proto = {
701 .func = bpf_perf_event_output,
1075ef59 702 .gpl_only = true,
a43eec30
AS
703 .ret_type = RET_INTEGER,
704 .arg1_type = ARG_PTR_TO_CTX,
705 .arg2_type = ARG_CONST_MAP_PTR,
706 .arg3_type = ARG_ANYTHING,
216e3cd2 707 .arg4_type = ARG_PTR_TO_MEM | MEM_RDONLY,
a60dd35d 708 .arg5_type = ARG_CONST_SIZE_OR_ZERO,
a43eec30
AS
709};
710
768fb61f
AZ
711static DEFINE_PER_CPU(int, bpf_event_output_nest_level);
712struct bpf_nested_pt_regs {
713 struct pt_regs regs[3];
714};
715static DEFINE_PER_CPU(struct bpf_nested_pt_regs, bpf_pt_regs);
716static DEFINE_PER_CPU(struct bpf_trace_sample_data, bpf_misc_sds);
bd570ff9 717
555c8a86
DB
718u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
719 void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy)
bd570ff9 720{
768fb61f 721 int nest_level = this_cpu_inc_return(bpf_event_output_nest_level);
555c8a86
DB
722 struct perf_raw_frag frag = {
723 .copy = ctx_copy,
724 .size = ctx_size,
725 .data = ctx,
726 };
727 struct perf_raw_record raw = {
728 .frag = {
183fc153
AM
729 {
730 .next = ctx_size ? &frag : NULL,
731 },
555c8a86
DB
732 .size = meta_size,
733 .data = meta,
734 },
735 };
768fb61f
AZ
736 struct perf_sample_data *sd;
737 struct pt_regs *regs;
738 u64 ret;
739
740 if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(bpf_misc_sds.sds))) {
741 ret = -EBUSY;
742 goto out;
743 }
744 sd = this_cpu_ptr(&bpf_misc_sds.sds[nest_level - 1]);
745 regs = this_cpu_ptr(&bpf_pt_regs.regs[nest_level - 1]);
bd570ff9
DB
746
747 perf_fetch_caller_regs(regs);
283ca526
DB
748 perf_sample_data_init(sd, 0, 0);
749 sd->raw = &raw;
21da7472 750 sd->sample_flags |= PERF_SAMPLE_RAW;
bd570ff9 751
768fb61f
AZ
752 ret = __bpf_perf_event_output(regs, map, flags, sd);
753out:
754 this_cpu_dec(bpf_event_output_nest_level);
755 return ret;
bd570ff9
DB
756}
757
f3694e00 758BPF_CALL_0(bpf_get_current_task)
606274c5
AS
759{
760 return (long) current;
761}
762
f470378c 763const struct bpf_func_proto bpf_get_current_task_proto = {
606274c5
AS
764 .func = bpf_get_current_task,
765 .gpl_only = true,
766 .ret_type = RET_INTEGER,
767};
768
3ca1032a
KS
769BPF_CALL_0(bpf_get_current_task_btf)
770{
771 return (unsigned long) current;
772}
773
a396eda5 774const struct bpf_func_proto bpf_get_current_task_btf_proto = {
3ca1032a
KS
775 .func = bpf_get_current_task_btf,
776 .gpl_only = true,
3f00c523 777 .ret_type = RET_PTR_TO_BTF_ID_TRUSTED,
d19ddb47 778 .ret_btf_id = &btf_tracing_ids[BTF_TRACING_TYPE_TASK],
3ca1032a
KS
779};
780
dd6e10fb
DX
781BPF_CALL_1(bpf_task_pt_regs, struct task_struct *, task)
782{
783 return (unsigned long) task_pt_regs(task);
784}
785
786BTF_ID_LIST(bpf_task_pt_regs_ids)
787BTF_ID(struct, pt_regs)
788
789const struct bpf_func_proto bpf_task_pt_regs_proto = {
790 .func = bpf_task_pt_regs,
791 .gpl_only = true,
792 .arg1_type = ARG_PTR_TO_BTF_ID,
d19ddb47 793 .arg1_btf_id = &btf_tracing_ids[BTF_TRACING_TYPE_TASK],
dd6e10fb
DX
794 .ret_type = RET_PTR_TO_BTF_ID,
795 .ret_btf_id = &bpf_task_pt_regs_ids[0],
796};
797
f3694e00 798BPF_CALL_2(bpf_current_task_under_cgroup, struct bpf_map *, map, u32, idx)
60d20f91 799{
60d20f91
SD
800 struct bpf_array *array = container_of(map, struct bpf_array, map);
801 struct cgroup *cgrp;
60d20f91 802
60d20f91
SD
803 if (unlikely(idx >= array->map.max_entries))
804 return -E2BIG;
805
806 cgrp = READ_ONCE(array->ptrs[idx]);
807 if (unlikely(!cgrp))
808 return -EAGAIN;
809
810 return task_under_cgroup_hierarchy(current, cgrp);
811}
812
813static const struct bpf_func_proto bpf_current_task_under_cgroup_proto = {
814 .func = bpf_current_task_under_cgroup,
815 .gpl_only = false,
816 .ret_type = RET_INTEGER,
817 .arg1_type = ARG_CONST_MAP_PTR,
818 .arg2_type = ARG_ANYTHING,
819};
820
8b401f9e
YS
821struct send_signal_irq_work {
822 struct irq_work irq_work;
823 struct task_struct *task;
824 u32 sig;
8482941f 825 enum pid_type type;
8b401f9e
YS
826};
827
828static DEFINE_PER_CPU(struct send_signal_irq_work, send_signal_work);
829
830static void do_bpf_send_signal(struct irq_work *entry)
831{
832 struct send_signal_irq_work *work;
833
834 work = container_of(entry, struct send_signal_irq_work, irq_work);
8482941f 835 group_send_sig_info(work->sig, SEND_SIG_PRIV, work->task, work->type);
8b401f9e
YS
836}
837
8482941f 838static int bpf_send_signal_common(u32 sig, enum pid_type type)
8b401f9e
YS
839{
840 struct send_signal_irq_work *work = NULL;
841
842 /* Similar to bpf_probe_write_user, task needs to be
843 * in a sound condition and kernel memory access be
844 * permitted in order to send signal to the current
845 * task.
846 */
847 if (unlikely(current->flags & (PF_KTHREAD | PF_EXITING)))
848 return -EPERM;
8b401f9e
YS
849 if (unlikely(!nmi_uaccess_okay()))
850 return -EPERM;
851
1bc7896e 852 if (irqs_disabled()) {
e1afb702
YS
853 /* Do an early check on signal validity. Otherwise,
854 * the error is lost in deferred irq_work.
855 */
856 if (unlikely(!valid_signal(sig)))
857 return -EINVAL;
858
8b401f9e 859 work = this_cpu_ptr(&send_signal_work);
7a9f50a0 860 if (irq_work_is_busy(&work->irq_work))
8b401f9e
YS
861 return -EBUSY;
862
863 /* Add the current task, which is the target of sending signal,
864 * to the irq_work. The current task may change when queued
865 * irq works get executed.
866 */
867 work->task = current;
868 work->sig = sig;
8482941f 869 work->type = type;
8b401f9e
YS
870 irq_work_queue(&work->irq_work);
871 return 0;
872 }
873
8482941f
YS
874 return group_send_sig_info(sig, SEND_SIG_PRIV, current, type);
875}
876
877BPF_CALL_1(bpf_send_signal, u32, sig)
878{
879 return bpf_send_signal_common(sig, PIDTYPE_TGID);
8b401f9e
YS
880}
881
882static const struct bpf_func_proto bpf_send_signal_proto = {
883 .func = bpf_send_signal,
884 .gpl_only = false,
885 .ret_type = RET_INTEGER,
886 .arg1_type = ARG_ANYTHING,
887};
888
8482941f
YS
889BPF_CALL_1(bpf_send_signal_thread, u32, sig)
890{
891 return bpf_send_signal_common(sig, PIDTYPE_PID);
892}
893
894static const struct bpf_func_proto bpf_send_signal_thread_proto = {
895 .func = bpf_send_signal_thread,
896 .gpl_only = false,
897 .ret_type = RET_INTEGER,
898 .arg1_type = ARG_ANYTHING,
899};
900
6e22ab9d
JO
901BPF_CALL_3(bpf_d_path, struct path *, path, char *, buf, u32, sz)
902{
903 long len;
904 char *p;
905
906 if (!sz)
907 return 0;
908
909 p = d_path(path, buf, sz);
910 if (IS_ERR(p)) {
911 len = PTR_ERR(p);
912 } else {
913 len = buf + sz - p;
914 memmove(buf, p, len);
915 }
916
917 return len;
918}
919
920BTF_SET_START(btf_allowlist_d_path)
a8a71796
JO
921#ifdef CONFIG_SECURITY
922BTF_ID(func, security_file_permission)
923BTF_ID(func, security_inode_getattr)
924BTF_ID(func, security_file_open)
925#endif
926#ifdef CONFIG_SECURITY_PATH
927BTF_ID(func, security_path_truncate)
928#endif
6e22ab9d
JO
929BTF_ID(func, vfs_truncate)
930BTF_ID(func, vfs_fallocate)
931BTF_ID(func, dentry_open)
932BTF_ID(func, vfs_getattr)
933BTF_ID(func, filp_close)
934BTF_SET_END(btf_allowlist_d_path)
935
936static bool bpf_d_path_allowed(const struct bpf_prog *prog)
937{
3d06f34a
SL
938 if (prog->type == BPF_PROG_TYPE_TRACING &&
939 prog->expected_attach_type == BPF_TRACE_ITER)
940 return true;
941
6f100640
KS
942 if (prog->type == BPF_PROG_TYPE_LSM)
943 return bpf_lsm_is_sleepable_hook(prog->aux->attach_btf_id);
944
945 return btf_id_set_contains(&btf_allowlist_d_path,
946 prog->aux->attach_btf_id);
6e22ab9d
JO
947}
948
9436ef6e 949BTF_ID_LIST_SINGLE(bpf_d_path_btf_ids, struct, path)
6e22ab9d
JO
950
951static const struct bpf_func_proto bpf_d_path_proto = {
952 .func = bpf_d_path,
953 .gpl_only = false,
954 .ret_type = RET_INTEGER,
955 .arg1_type = ARG_PTR_TO_BTF_ID,
9436ef6e 956 .arg1_btf_id = &bpf_d_path_btf_ids[0],
6e22ab9d
JO
957 .arg2_type = ARG_PTR_TO_MEM,
958 .arg3_type = ARG_CONST_SIZE_OR_ZERO,
6e22ab9d
JO
959 .allowed = bpf_d_path_allowed,
960};
961
c4d0bfb4
AM
962#define BTF_F_ALL (BTF_F_COMPACT | BTF_F_NONAME | \
963 BTF_F_PTR_RAW | BTF_F_ZERO)
964
965static int bpf_btf_printf_prepare(struct btf_ptr *ptr, u32 btf_ptr_size,
966 u64 flags, const struct btf **btf,
967 s32 *btf_id)
968{
969 const struct btf_type *t;
970
971 if (unlikely(flags & ~(BTF_F_ALL)))
972 return -EINVAL;
973
974 if (btf_ptr_size != sizeof(struct btf_ptr))
975 return -EINVAL;
976
977 *btf = bpf_get_btf_vmlinux();
978
979 if (IS_ERR_OR_NULL(*btf))
abbaa433 980 return IS_ERR(*btf) ? PTR_ERR(*btf) : -EINVAL;
c4d0bfb4
AM
981
982 if (ptr->type_id > 0)
983 *btf_id = ptr->type_id;
984 else
985 return -EINVAL;
986
987 if (*btf_id > 0)
988 t = btf_type_by_id(*btf, *btf_id);
989 if (*btf_id <= 0 || !t)
990 return -ENOENT;
991
992 return 0;
993}
994
995BPF_CALL_5(bpf_snprintf_btf, char *, str, u32, str_size, struct btf_ptr *, ptr,
996 u32, btf_ptr_size, u64, flags)
997{
998 const struct btf *btf;
999 s32 btf_id;
1000 int ret;
1001
1002 ret = bpf_btf_printf_prepare(ptr, btf_ptr_size, flags, &btf, &btf_id);
1003 if (ret)
1004 return ret;
1005
1006 return btf_type_snprintf_show(btf, btf_id, ptr->ptr, str, str_size,
1007 flags);
1008}
1009
1010const struct bpf_func_proto bpf_snprintf_btf_proto = {
1011 .func = bpf_snprintf_btf,
1012 .gpl_only = false,
1013 .ret_type = RET_INTEGER,
1014 .arg1_type = ARG_PTR_TO_MEM,
1015 .arg2_type = ARG_CONST_SIZE,
216e3cd2 1016 .arg3_type = ARG_PTR_TO_MEM | MEM_RDONLY,
c4d0bfb4
AM
1017 .arg4_type = ARG_CONST_SIZE,
1018 .arg5_type = ARG_ANYTHING,
1019};
1020
9b99edca
JO
1021BPF_CALL_1(bpf_get_func_ip_tracing, void *, ctx)
1022{
1023 /* This helper call is inlined by verifier. */
f92c1e18 1024 return ((u64 *)ctx)[-2];
9b99edca
JO
1025}
1026
1027static const struct bpf_func_proto bpf_get_func_ip_proto_tracing = {
1028 .func = bpf_get_func_ip_tracing,
1029 .gpl_only = true,
1030 .ret_type = RET_INTEGER,
1031 .arg1_type = ARG_PTR_TO_CTX,
1032};
1033
c09eb2e5
JO
1034#ifdef CONFIG_X86_KERNEL_IBT
1035static unsigned long get_entry_ip(unsigned long fentry_ip)
1036{
1037 u32 instr;
1038
1039 /* Being extra safe in here in case entry ip is on the page-edge. */
1040 if (get_kernel_nofault(instr, (u32 *) fentry_ip - 1))
1041 return fentry_ip;
1042 if (is_endbr(instr))
1043 fentry_ip -= ENDBR_INSN_SIZE;
1044 return fentry_ip;
1045}
1046#else
1047#define get_entry_ip(fentry_ip) fentry_ip
1048#endif
1049
9ffd9f3f
JO
1050BPF_CALL_1(bpf_get_func_ip_kprobe, struct pt_regs *, regs)
1051{
1052 struct kprobe *kp = kprobe_running();
1053
0e253f7e
JO
1054 if (!kp || !(kp->flags & KPROBE_FLAG_ON_FUNC_ENTRY))
1055 return 0;
1056
1057 return get_entry_ip((uintptr_t)kp->addr);
9ffd9f3f
JO
1058}
1059
1060static const struct bpf_func_proto bpf_get_func_ip_proto_kprobe = {
1061 .func = bpf_get_func_ip_kprobe,
1062 .gpl_only = true,
1063 .ret_type = RET_INTEGER,
1064 .arg1_type = ARG_PTR_TO_CTX,
1065};
1066
42a57120
JO
1067BPF_CALL_1(bpf_get_func_ip_kprobe_multi, struct pt_regs *, regs)
1068{
f7098690 1069 return bpf_kprobe_multi_entry_ip(current->bpf_ctx);
42a57120
JO
1070}
1071
1072static const struct bpf_func_proto bpf_get_func_ip_proto_kprobe_multi = {
1073 .func = bpf_get_func_ip_kprobe_multi,
1074 .gpl_only = false,
1075 .ret_type = RET_INTEGER,
1076 .arg1_type = ARG_PTR_TO_CTX,
1077};
1078
ca74823c
JO
1079BPF_CALL_1(bpf_get_attach_cookie_kprobe_multi, struct pt_regs *, regs)
1080{
f7098690 1081 return bpf_kprobe_multi_cookie(current->bpf_ctx);
ca74823c
JO
1082}
1083
1084static const struct bpf_func_proto bpf_get_attach_cookie_proto_kmulti = {
1085 .func = bpf_get_attach_cookie_kprobe_multi,
1086 .gpl_only = false,
1087 .ret_type = RET_INTEGER,
1088 .arg1_type = ARG_PTR_TO_CTX,
1089};
1090
7adfc6c9
AN
1091BPF_CALL_1(bpf_get_attach_cookie_trace, void *, ctx)
1092{
1093 struct bpf_trace_run_ctx *run_ctx;
1094
1095 run_ctx = container_of(current->bpf_ctx, struct bpf_trace_run_ctx, run_ctx);
1096 return run_ctx->bpf_cookie;
1097}
1098
1099static const struct bpf_func_proto bpf_get_attach_cookie_proto_trace = {
1100 .func = bpf_get_attach_cookie_trace,
1101 .gpl_only = false,
1102 .ret_type = RET_INTEGER,
1103 .arg1_type = ARG_PTR_TO_CTX,
1104};
1105
1106BPF_CALL_1(bpf_get_attach_cookie_pe, struct bpf_perf_event_data_kern *, ctx)
1107{
1108 return ctx->event->bpf_cookie;
1109}
1110
1111static const struct bpf_func_proto bpf_get_attach_cookie_proto_pe = {
1112 .func = bpf_get_attach_cookie_pe,
1113 .gpl_only = false,
1114 .ret_type = RET_INTEGER,
1115 .arg1_type = ARG_PTR_TO_CTX,
1116};
1117
2fcc8241
KFL
1118BPF_CALL_1(bpf_get_attach_cookie_tracing, void *, ctx)
1119{
1120 struct bpf_trace_run_ctx *run_ctx;
1121
1122 run_ctx = container_of(current->bpf_ctx, struct bpf_trace_run_ctx, run_ctx);
1123 return run_ctx->bpf_cookie;
1124}
1125
1126static const struct bpf_func_proto bpf_get_attach_cookie_proto_tracing = {
1127 .func = bpf_get_attach_cookie_tracing,
1128 .gpl_only = false,
1129 .ret_type = RET_INTEGER,
1130 .arg1_type = ARG_PTR_TO_CTX,
1131};
1132
856c02db
SL
1133BPF_CALL_3(bpf_get_branch_snapshot, void *, buf, u32, size, u64, flags)
1134{
1135#ifndef CONFIG_X86
1136 return -ENOENT;
1137#else
1138 static const u32 br_entry_size = sizeof(struct perf_branch_entry);
1139 u32 entry_cnt = size / br_entry_size;
1140
1141 entry_cnt = static_call(perf_snapshot_branch_stack)(buf, entry_cnt);
1142
1143 if (unlikely(flags))
1144 return -EINVAL;
1145
1146 if (!entry_cnt)
1147 return -ENOENT;
1148
1149 return entry_cnt * br_entry_size;
1150#endif
1151}
1152
1153static const struct bpf_func_proto bpf_get_branch_snapshot_proto = {
1154 .func = bpf_get_branch_snapshot,
1155 .gpl_only = true,
1156 .ret_type = RET_INTEGER,
1157 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
1158 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
1159};
1160
f92c1e18
JO
1161BPF_CALL_3(get_func_arg, void *, ctx, u32, n, u64 *, value)
1162{
1163 /* This helper call is inlined by verifier. */
1164 u64 nr_args = ((u64 *)ctx)[-1];
1165
1166 if ((u64) n >= nr_args)
1167 return -EINVAL;
1168 *value = ((u64 *)ctx)[n];
1169 return 0;
1170}
1171
1172static const struct bpf_func_proto bpf_get_func_arg_proto = {
1173 .func = get_func_arg,
1174 .ret_type = RET_INTEGER,
1175 .arg1_type = ARG_PTR_TO_CTX,
1176 .arg2_type = ARG_ANYTHING,
1177 .arg3_type = ARG_PTR_TO_LONG,
1178};
1179
1180BPF_CALL_2(get_func_ret, void *, ctx, u64 *, value)
1181{
1182 /* This helper call is inlined by verifier. */
1183 u64 nr_args = ((u64 *)ctx)[-1];
1184
1185 *value = ((u64 *)ctx)[nr_args];
1186 return 0;
1187}
1188
1189static const struct bpf_func_proto bpf_get_func_ret_proto = {
1190 .func = get_func_ret,
1191 .ret_type = RET_INTEGER,
1192 .arg1_type = ARG_PTR_TO_CTX,
1193 .arg2_type = ARG_PTR_TO_LONG,
1194};
1195
1196BPF_CALL_1(get_func_arg_cnt, void *, ctx)
1197{
1198 /* This helper call is inlined by verifier. */
1199 return ((u64 *)ctx)[-1];
1200}
1201
1202static const struct bpf_func_proto bpf_get_func_arg_cnt_proto = {
1203 .func = get_func_arg_cnt,
1204 .ret_type = RET_INTEGER,
1205 .arg1_type = ARG_PTR_TO_CTX,
1206};
1207
f3cf4134
RS
1208#ifdef CONFIG_KEYS
1209__diag_push();
1210__diag_ignore_all("-Wmissing-prototypes",
1211 "kfuncs which will be used in BPF programs");
1212
1213/**
1214 * bpf_lookup_user_key - lookup a key by its serial
1215 * @serial: key handle serial number
1216 * @flags: lookup-specific flags
1217 *
1218 * Search a key with a given *serial* and the provided *flags*.
1219 * If found, increment the reference count of the key by one, and
1220 * return it in the bpf_key structure.
1221 *
1222 * The bpf_key structure must be passed to bpf_key_put() when done
1223 * with it, so that the key reference count is decremented and the
1224 * bpf_key structure is freed.
1225 *
1226 * Permission checks are deferred to the time the key is used by
1227 * one of the available key-specific kfuncs.
1228 *
1229 * Set *flags* with KEY_LOOKUP_CREATE, to attempt creating a requested
1230 * special keyring (e.g. session keyring), if it doesn't yet exist.
1231 * Set *flags* with KEY_LOOKUP_PARTIAL, to lookup a key without waiting
1232 * for the key construction, and to retrieve uninstantiated keys (keys
1233 * without data attached to them).
1234 *
1235 * Return: a bpf_key pointer with a valid key pointer if the key is found, a
1236 * NULL pointer otherwise.
1237 */
1238struct bpf_key *bpf_lookup_user_key(u32 serial, u64 flags)
1239{
1240 key_ref_t key_ref;
1241 struct bpf_key *bkey;
1242
1243 if (flags & ~KEY_LOOKUP_ALL)
1244 return NULL;
1245
1246 /*
1247 * Permission check is deferred until the key is used, as the
1248 * intent of the caller is unknown here.
1249 */
1250 key_ref = lookup_user_key(serial, flags, KEY_DEFER_PERM_CHECK);
1251 if (IS_ERR(key_ref))
1252 return NULL;
1253
1254 bkey = kmalloc(sizeof(*bkey), GFP_KERNEL);
1255 if (!bkey) {
1256 key_put(key_ref_to_ptr(key_ref));
1257 return NULL;
1258 }
1259
1260 bkey->key = key_ref_to_ptr(key_ref);
1261 bkey->has_ref = true;
1262
1263 return bkey;
1264}
1265
1266/**
1267 * bpf_lookup_system_key - lookup a key by a system-defined ID
1268 * @id: key ID
1269 *
1270 * Obtain a bpf_key structure with a key pointer set to the passed key ID.
1271 * The key pointer is marked as invalid, to prevent bpf_key_put() from
1272 * attempting to decrement the key reference count on that pointer. The key
1273 * pointer set in such way is currently understood only by
1274 * verify_pkcs7_signature().
1275 *
1276 * Set *id* to one of the values defined in include/linux/verification.h:
1277 * 0 for the primary keyring (immutable keyring of system keys);
1278 * VERIFY_USE_SECONDARY_KEYRING for both the primary and secondary keyring
1279 * (where keys can be added only if they are vouched for by existing keys
1280 * in those keyrings); VERIFY_USE_PLATFORM_KEYRING for the platform
1281 * keyring (primarily used by the integrity subsystem to verify a kexec'ed
1282 * kerned image and, possibly, the initramfs signature).
1283 *
1284 * Return: a bpf_key pointer with an invalid key pointer set from the
1285 * pre-determined ID on success, a NULL pointer otherwise
1286 */
1287struct bpf_key *bpf_lookup_system_key(u64 id)
1288{
1289 struct bpf_key *bkey;
1290
1291 if (system_keyring_id_check(id) < 0)
1292 return NULL;
1293
1294 bkey = kmalloc(sizeof(*bkey), GFP_ATOMIC);
1295 if (!bkey)
1296 return NULL;
1297
1298 bkey->key = (struct key *)(unsigned long)id;
1299 bkey->has_ref = false;
1300
1301 return bkey;
1302}
1303
1304/**
1305 * bpf_key_put - decrement key reference count if key is valid and free bpf_key
1306 * @bkey: bpf_key structure
1307 *
1308 * Decrement the reference count of the key inside *bkey*, if the pointer
1309 * is valid, and free *bkey*.
1310 */
1311void bpf_key_put(struct bpf_key *bkey)
1312{
1313 if (bkey->has_ref)
1314 key_put(bkey->key);
1315
1316 kfree(bkey);
1317}
1318
865b0566
RS
1319#ifdef CONFIG_SYSTEM_DATA_VERIFICATION
1320/**
1321 * bpf_verify_pkcs7_signature - verify a PKCS#7 signature
1322 * @data_ptr: data to verify
1323 * @sig_ptr: signature of the data
1324 * @trusted_keyring: keyring with keys trusted for signature verification
1325 *
1326 * Verify the PKCS#7 signature *sig_ptr* against the supplied *data_ptr*
1327 * with keys in a keyring referenced by *trusted_keyring*.
1328 *
1329 * Return: 0 on success, a negative value on error.
1330 */
1331int bpf_verify_pkcs7_signature(struct bpf_dynptr_kern *data_ptr,
1332 struct bpf_dynptr_kern *sig_ptr,
1333 struct bpf_key *trusted_keyring)
1334{
1335 int ret;
1336
1337 if (trusted_keyring->has_ref) {
1338 /*
1339 * Do the permission check deferred in bpf_lookup_user_key().
1340 * See bpf_lookup_user_key() for more details.
1341 *
1342 * A call to key_task_permission() here would be redundant, as
1343 * it is already done by keyring_search() called by
1344 * find_asymmetric_key().
1345 */
1346 ret = key_validate(trusted_keyring->key);
1347 if (ret < 0)
1348 return ret;
1349 }
1350
1351 return verify_pkcs7_signature(data_ptr->data,
1352 bpf_dynptr_get_size(data_ptr),
1353 sig_ptr->data,
1354 bpf_dynptr_get_size(sig_ptr),
1355 trusted_keyring->key,
1356 VERIFYING_UNSPECIFIED_SIGNATURE, NULL,
1357 NULL);
1358}
1359#endif /* CONFIG_SYSTEM_DATA_VERIFICATION */
1360
f3cf4134
RS
1361__diag_pop();
1362
1363BTF_SET8_START(key_sig_kfunc_set)
1364BTF_ID_FLAGS(func, bpf_lookup_user_key, KF_ACQUIRE | KF_RET_NULL | KF_SLEEPABLE)
1365BTF_ID_FLAGS(func, bpf_lookup_system_key, KF_ACQUIRE | KF_RET_NULL)
1366BTF_ID_FLAGS(func, bpf_key_put, KF_RELEASE)
865b0566
RS
1367#ifdef CONFIG_SYSTEM_DATA_VERIFICATION
1368BTF_ID_FLAGS(func, bpf_verify_pkcs7_signature, KF_SLEEPABLE)
1369#endif
f3cf4134
RS
1370BTF_SET8_END(key_sig_kfunc_set)
1371
1372static const struct btf_kfunc_id_set bpf_key_sig_kfunc_set = {
1373 .owner = THIS_MODULE,
1374 .set = &key_sig_kfunc_set,
1375};
1376
1377static int __init bpf_key_sig_kfuncs_init(void)
1378{
1379 return register_btf_kfunc_id_set(BPF_PROG_TYPE_TRACING,
1380 &bpf_key_sig_kfunc_set);
1381}
1382
1383late_initcall(bpf_key_sig_kfuncs_init);
1384#endif /* CONFIG_KEYS */
1385
7adfc6c9 1386static const struct bpf_func_proto *
fc611f47 1387bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
2541517c
AS
1388{
1389 switch (func_id) {
1390 case BPF_FUNC_map_lookup_elem:
1391 return &bpf_map_lookup_elem_proto;
1392 case BPF_FUNC_map_update_elem:
1393 return &bpf_map_update_elem_proto;
1394 case BPF_FUNC_map_delete_elem:
1395 return &bpf_map_delete_elem_proto;
02a8c817
AC
1396 case BPF_FUNC_map_push_elem:
1397 return &bpf_map_push_elem_proto;
1398 case BPF_FUNC_map_pop_elem:
1399 return &bpf_map_pop_elem_proto;
1400 case BPF_FUNC_map_peek_elem:
1401 return &bpf_map_peek_elem_proto;
07343110
FZ
1402 case BPF_FUNC_map_lookup_percpu_elem:
1403 return &bpf_map_lookup_percpu_elem_proto;
d9847d31
AS
1404 case BPF_FUNC_ktime_get_ns:
1405 return &bpf_ktime_get_ns_proto;
71d19214
MÅ»
1406 case BPF_FUNC_ktime_get_boot_ns:
1407 return &bpf_ktime_get_boot_ns_proto;
04fd61ab
AS
1408 case BPF_FUNC_tail_call:
1409 return &bpf_tail_call_proto;
ffeedafb
AS
1410 case BPF_FUNC_get_current_pid_tgid:
1411 return &bpf_get_current_pid_tgid_proto;
606274c5
AS
1412 case BPF_FUNC_get_current_task:
1413 return &bpf_get_current_task_proto;
3ca1032a
KS
1414 case BPF_FUNC_get_current_task_btf:
1415 return &bpf_get_current_task_btf_proto;
dd6e10fb
DX
1416 case BPF_FUNC_task_pt_regs:
1417 return &bpf_task_pt_regs_proto;
ffeedafb
AS
1418 case BPF_FUNC_get_current_uid_gid:
1419 return &bpf_get_current_uid_gid_proto;
1420 case BPF_FUNC_get_current_comm:
1421 return &bpf_get_current_comm_proto;
9c959c86 1422 case BPF_FUNC_trace_printk:
0756ea3e 1423 return bpf_get_trace_printk_proto();
ab1973d3
AS
1424 case BPF_FUNC_get_smp_processor_id:
1425 return &bpf_get_smp_processor_id_proto;
2d0e30c3
DB
1426 case BPF_FUNC_get_numa_node_id:
1427 return &bpf_get_numa_node_id_proto;
35578d79
KX
1428 case BPF_FUNC_perf_event_read:
1429 return &bpf_perf_event_read_proto;
60d20f91
SD
1430 case BPF_FUNC_current_task_under_cgroup:
1431 return &bpf_current_task_under_cgroup_proto;
8937bd80
AS
1432 case BPF_FUNC_get_prandom_u32:
1433 return &bpf_get_prandom_u32_proto;
51e1bb9e
DB
1434 case BPF_FUNC_probe_write_user:
1435 return security_locked_down(LOCKDOWN_BPF_WRITE_USER) < 0 ?
1436 NULL : bpf_get_probe_write_proto();
6ae08ae3
DB
1437 case BPF_FUNC_probe_read_user:
1438 return &bpf_probe_read_user_proto;
1439 case BPF_FUNC_probe_read_kernel:
71330842 1440 return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ?
ff40e510 1441 NULL : &bpf_probe_read_kernel_proto;
6ae08ae3
DB
1442 case BPF_FUNC_probe_read_user_str:
1443 return &bpf_probe_read_user_str_proto;
1444 case BPF_FUNC_probe_read_kernel_str:
71330842 1445 return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ?
ff40e510 1446 NULL : &bpf_probe_read_kernel_str_proto;
0ebeea8c
DB
1447#ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
1448 case BPF_FUNC_probe_read:
71330842 1449 return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ?
ff40e510 1450 NULL : &bpf_probe_read_compat_proto;
a5e8c070 1451 case BPF_FUNC_probe_read_str:
71330842 1452 return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ?
ff40e510 1453 NULL : &bpf_probe_read_compat_str_proto;
0ebeea8c 1454#endif
34ea38ca 1455#ifdef CONFIG_CGROUPS
bf6fa2c8
YS
1456 case BPF_FUNC_get_current_cgroup_id:
1457 return &bpf_get_current_cgroup_id_proto;
95b861a7
NK
1458 case BPF_FUNC_get_current_ancestor_cgroup_id:
1459 return &bpf_get_current_ancestor_cgroup_id_proto;
c4bcfb38
YS
1460 case BPF_FUNC_cgrp_storage_get:
1461 return &bpf_cgrp_storage_get_proto;
1462 case BPF_FUNC_cgrp_storage_delete:
1463 return &bpf_cgrp_storage_delete_proto;
34ea38ca 1464#endif
8b401f9e
YS
1465 case BPF_FUNC_send_signal:
1466 return &bpf_send_signal_proto;
8482941f
YS
1467 case BPF_FUNC_send_signal_thread:
1468 return &bpf_send_signal_thread_proto;
b80b033b
SL
1469 case BPF_FUNC_perf_event_read_value:
1470 return &bpf_perf_event_read_value_proto;
b4490c5c
CN
1471 case BPF_FUNC_get_ns_current_pid_tgid:
1472 return &bpf_get_ns_current_pid_tgid_proto;
457f4436
AN
1473 case BPF_FUNC_ringbuf_output:
1474 return &bpf_ringbuf_output_proto;
1475 case BPF_FUNC_ringbuf_reserve:
1476 return &bpf_ringbuf_reserve_proto;
1477 case BPF_FUNC_ringbuf_submit:
1478 return &bpf_ringbuf_submit_proto;
1479 case BPF_FUNC_ringbuf_discard:
1480 return &bpf_ringbuf_discard_proto;
1481 case BPF_FUNC_ringbuf_query:
1482 return &bpf_ringbuf_query_proto;
72e2b2b6
YS
1483 case BPF_FUNC_jiffies64:
1484 return &bpf_jiffies64_proto;
fa28dcb8
SL
1485 case BPF_FUNC_get_task_stack:
1486 return &bpf_get_task_stack_proto;
07be4c4a
AS
1487 case BPF_FUNC_copy_from_user:
1488 return prog->aux->sleepable ? &bpf_copy_from_user_proto : NULL;
376040e4
KY
1489 case BPF_FUNC_copy_from_user_task:
1490 return prog->aux->sleepable ? &bpf_copy_from_user_task_proto : NULL;
c4d0bfb4
AM
1491 case BPF_FUNC_snprintf_btf:
1492 return &bpf_snprintf_btf_proto;
b7906b70 1493 case BPF_FUNC_per_cpu_ptr:
eaa6bcb7 1494 return &bpf_per_cpu_ptr_proto;
b7906b70 1495 case BPF_FUNC_this_cpu_ptr:
63d9b80d 1496 return &bpf_this_cpu_ptr_proto;
a10787e6 1497 case BPF_FUNC_task_storage_get:
4279adb0
MKL
1498 if (bpf_prog_check_recur(prog))
1499 return &bpf_task_storage_get_recur_proto;
a10787e6
SL
1500 return &bpf_task_storage_get_proto;
1501 case BPF_FUNC_task_storage_delete:
8a7dac37
MKL
1502 if (bpf_prog_check_recur(prog))
1503 return &bpf_task_storage_delete_recur_proto;
a10787e6 1504 return &bpf_task_storage_delete_proto;
69c087ba
YS
1505 case BPF_FUNC_for_each_map_elem:
1506 return &bpf_for_each_map_elem_proto;
7b15523a
FR
1507 case BPF_FUNC_snprintf:
1508 return &bpf_snprintf_proto;
9b99edca
JO
1509 case BPF_FUNC_get_func_ip:
1510 return &bpf_get_func_ip_proto_tracing;
856c02db
SL
1511 case BPF_FUNC_get_branch_snapshot:
1512 return &bpf_get_branch_snapshot_proto;
7c7e3d31
SL
1513 case BPF_FUNC_find_vma:
1514 return &bpf_find_vma_proto;
10aceb62
DM
1515 case BPF_FUNC_trace_vprintk:
1516 return bpf_get_trace_vprintk_proto();
9fd82b61 1517 default:
b00628b1 1518 return bpf_base_func_proto(func_id);
9fd82b61
AS
1519 }
1520}
1521
5e43f899
AI
1522static const struct bpf_func_proto *
1523kprobe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
9fd82b61
AS
1524{
1525 switch (func_id) {
a43eec30
AS
1526 case BPF_FUNC_perf_event_output:
1527 return &bpf_perf_event_output_proto;
d5a3b1f6
AS
1528 case BPF_FUNC_get_stackid:
1529 return &bpf_get_stackid_proto;
c195651e
YS
1530 case BPF_FUNC_get_stack:
1531 return &bpf_get_stack_proto;
9802d865
JB
1532#ifdef CONFIG_BPF_KPROBE_OVERRIDE
1533 case BPF_FUNC_override_return:
1534 return &bpf_override_return_proto;
1535#endif
9ffd9f3f 1536 case BPF_FUNC_get_func_ip:
42a57120
JO
1537 return prog->expected_attach_type == BPF_TRACE_KPROBE_MULTI ?
1538 &bpf_get_func_ip_proto_kprobe_multi :
1539 &bpf_get_func_ip_proto_kprobe;
7adfc6c9 1540 case BPF_FUNC_get_attach_cookie:
ca74823c
JO
1541 return prog->expected_attach_type == BPF_TRACE_KPROBE_MULTI ?
1542 &bpf_get_attach_cookie_proto_kmulti :
1543 &bpf_get_attach_cookie_proto_trace;
2541517c 1544 default:
fc611f47 1545 return bpf_tracing_func_proto(func_id, prog);
2541517c
AS
1546 }
1547}
1548
1549/* bpf+kprobe programs can access fields of 'struct pt_regs' */
19de99f7 1550static bool kprobe_prog_is_valid_access(int off, int size, enum bpf_access_type type,
5e43f899 1551 const struct bpf_prog *prog,
23994631 1552 struct bpf_insn_access_aux *info)
2541517c 1553{
2541517c
AS
1554 if (off < 0 || off >= sizeof(struct pt_regs))
1555 return false;
2541517c
AS
1556 if (type != BPF_READ)
1557 return false;
2541517c
AS
1558 if (off % size != 0)
1559 return false;
2d071c64
DB
1560 /*
1561 * Assertion for 32 bit to make sure last 8 byte access
1562 * (BPF_DW) to the last 4 byte member is disallowed.
1563 */
1564 if (off + size > sizeof(struct pt_regs))
1565 return false;
1566
2541517c
AS
1567 return true;
1568}
1569
7de16e3a 1570const struct bpf_verifier_ops kprobe_verifier_ops = {
2541517c
AS
1571 .get_func_proto = kprobe_prog_func_proto,
1572 .is_valid_access = kprobe_prog_is_valid_access,
1573};
1574
7de16e3a
JK
1575const struct bpf_prog_ops kprobe_prog_ops = {
1576};
1577
f3694e00
DB
1578BPF_CALL_5(bpf_perf_event_output_tp, void *, tp_buff, struct bpf_map *, map,
1579 u64, flags, void *, data, u64, size)
9940d67c 1580{
f3694e00
DB
1581 struct pt_regs *regs = *(struct pt_regs **)tp_buff;
1582
9940d67c
AS
1583 /*
1584 * r1 points to perf tracepoint buffer where first 8 bytes are hidden
1585 * from bpf program and contain a pointer to 'struct pt_regs'. Fetch it
f3694e00 1586 * from there and call the same bpf_perf_event_output() helper inline.
9940d67c 1587 */
f3694e00 1588 return ____bpf_perf_event_output(regs, map, flags, data, size);
9940d67c
AS
1589}
1590
1591static const struct bpf_func_proto bpf_perf_event_output_proto_tp = {
1592 .func = bpf_perf_event_output_tp,
1593 .gpl_only = true,
1594 .ret_type = RET_INTEGER,
1595 .arg1_type = ARG_PTR_TO_CTX,
1596 .arg2_type = ARG_CONST_MAP_PTR,
1597 .arg3_type = ARG_ANYTHING,
216e3cd2 1598 .arg4_type = ARG_PTR_TO_MEM | MEM_RDONLY,
a60dd35d 1599 .arg5_type = ARG_CONST_SIZE_OR_ZERO,
9940d67c
AS
1600};
1601
f3694e00
DB
1602BPF_CALL_3(bpf_get_stackid_tp, void *, tp_buff, struct bpf_map *, map,
1603 u64, flags)
9940d67c 1604{
f3694e00 1605 struct pt_regs *regs = *(struct pt_regs **)tp_buff;
9940d67c 1606
f3694e00
DB
1607 /*
1608 * Same comment as in bpf_perf_event_output_tp(), only that this time
1609 * the other helper's function body cannot be inlined due to being
1610 * external, thus we need to call raw helper function.
1611 */
1612 return bpf_get_stackid((unsigned long) regs, (unsigned long) map,
1613 flags, 0, 0);
9940d67c
AS
1614}
1615
1616static const struct bpf_func_proto bpf_get_stackid_proto_tp = {
1617 .func = bpf_get_stackid_tp,
1618 .gpl_only = true,
1619 .ret_type = RET_INTEGER,
1620 .arg1_type = ARG_PTR_TO_CTX,
1621 .arg2_type = ARG_CONST_MAP_PTR,
1622 .arg3_type = ARG_ANYTHING,
1623};
1624
c195651e
YS
1625BPF_CALL_4(bpf_get_stack_tp, void *, tp_buff, void *, buf, u32, size,
1626 u64, flags)
1627{
1628 struct pt_regs *regs = *(struct pt_regs **)tp_buff;
1629
1630 return bpf_get_stack((unsigned long) regs, (unsigned long) buf,
1631 (unsigned long) size, flags, 0);
1632}
1633
1634static const struct bpf_func_proto bpf_get_stack_proto_tp = {
1635 .func = bpf_get_stack_tp,
1636 .gpl_only = true,
1637 .ret_type = RET_INTEGER,
1638 .arg1_type = ARG_PTR_TO_CTX,
1639 .arg2_type = ARG_PTR_TO_UNINIT_MEM,
1640 .arg3_type = ARG_CONST_SIZE_OR_ZERO,
1641 .arg4_type = ARG_ANYTHING,
1642};
1643
5e43f899
AI
1644static const struct bpf_func_proto *
1645tp_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
f005afed
YS
1646{
1647 switch (func_id) {
1648 case BPF_FUNC_perf_event_output:
1649 return &bpf_perf_event_output_proto_tp;
1650 case BPF_FUNC_get_stackid:
1651 return &bpf_get_stackid_proto_tp;
c195651e
YS
1652 case BPF_FUNC_get_stack:
1653 return &bpf_get_stack_proto_tp;
7adfc6c9
AN
1654 case BPF_FUNC_get_attach_cookie:
1655 return &bpf_get_attach_cookie_proto_trace;
f005afed 1656 default:
fc611f47 1657 return bpf_tracing_func_proto(func_id, prog);
f005afed
YS
1658 }
1659}
1660
1661static bool tp_prog_is_valid_access(int off, int size, enum bpf_access_type type,
5e43f899 1662 const struct bpf_prog *prog,
f005afed
YS
1663 struct bpf_insn_access_aux *info)
1664{
1665 if (off < sizeof(void *) || off >= PERF_MAX_TRACE_SIZE)
1666 return false;
1667 if (type != BPF_READ)
1668 return false;
1669 if (off % size != 0)
1670 return false;
1671
1672 BUILD_BUG_ON(PERF_MAX_TRACE_SIZE % sizeof(__u64));
1673 return true;
1674}
1675
1676const struct bpf_verifier_ops tracepoint_verifier_ops = {
1677 .get_func_proto = tp_prog_func_proto,
1678 .is_valid_access = tp_prog_is_valid_access,
1679};
1680
1681const struct bpf_prog_ops tracepoint_prog_ops = {
1682};
1683
1684BPF_CALL_3(bpf_perf_prog_read_value, struct bpf_perf_event_data_kern *, ctx,
4bebdc7a
YS
1685 struct bpf_perf_event_value *, buf, u32, size)
1686{
1687 int err = -EINVAL;
1688
1689 if (unlikely(size != sizeof(struct bpf_perf_event_value)))
1690 goto clear;
1691 err = perf_event_read_local(ctx->event, &buf->counter, &buf->enabled,
1692 &buf->running);
1693 if (unlikely(err))
1694 goto clear;
1695 return 0;
1696clear:
1697 memset(buf, 0, size);
1698 return err;
1699}
1700
f005afed
YS
1701static const struct bpf_func_proto bpf_perf_prog_read_value_proto = {
1702 .func = bpf_perf_prog_read_value,
4bebdc7a
YS
1703 .gpl_only = true,
1704 .ret_type = RET_INTEGER,
1705 .arg1_type = ARG_PTR_TO_CTX,
1706 .arg2_type = ARG_PTR_TO_UNINIT_MEM,
1707 .arg3_type = ARG_CONST_SIZE,
1708};
1709
fff7b643
DX
1710BPF_CALL_4(bpf_read_branch_records, struct bpf_perf_event_data_kern *, ctx,
1711 void *, buf, u32, size, u64, flags)
1712{
fff7b643
DX
1713 static const u32 br_entry_size = sizeof(struct perf_branch_entry);
1714 struct perf_branch_stack *br_stack = ctx->data->br_stack;
1715 u32 to_copy;
1716
1717 if (unlikely(flags & ~BPF_F_GET_BRANCH_RECORDS_SIZE))
1718 return -EINVAL;
1719
cce6a2d7
JO
1720 if (unlikely(!(ctx->data->sample_flags & PERF_SAMPLE_BRANCH_STACK)))
1721 return -ENOENT;
1722
fff7b643 1723 if (unlikely(!br_stack))
db52f572 1724 return -ENOENT;
fff7b643
DX
1725
1726 if (flags & BPF_F_GET_BRANCH_RECORDS_SIZE)
1727 return br_stack->nr * br_entry_size;
1728
1729 if (!buf || (size % br_entry_size != 0))
1730 return -EINVAL;
1731
1732 to_copy = min_t(u32, br_stack->nr * br_entry_size, size);
1733 memcpy(buf, br_stack->entries, to_copy);
1734
1735 return to_copy;
fff7b643
DX
1736}
1737
1738static const struct bpf_func_proto bpf_read_branch_records_proto = {
1739 .func = bpf_read_branch_records,
1740 .gpl_only = true,
1741 .ret_type = RET_INTEGER,
1742 .arg1_type = ARG_PTR_TO_CTX,
1743 .arg2_type = ARG_PTR_TO_MEM_OR_NULL,
1744 .arg3_type = ARG_CONST_SIZE_OR_ZERO,
1745 .arg4_type = ARG_ANYTHING,
1746};
1747
5e43f899
AI
1748static const struct bpf_func_proto *
1749pe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
9fd82b61
AS
1750{
1751 switch (func_id) {
1752 case BPF_FUNC_perf_event_output:
9940d67c 1753 return &bpf_perf_event_output_proto_tp;
9fd82b61 1754 case BPF_FUNC_get_stackid:
7b04d6d6 1755 return &bpf_get_stackid_proto_pe;
c195651e 1756 case BPF_FUNC_get_stack:
7b04d6d6 1757 return &bpf_get_stack_proto_pe;
4bebdc7a 1758 case BPF_FUNC_perf_prog_read_value:
f005afed 1759 return &bpf_perf_prog_read_value_proto;
fff7b643
DX
1760 case BPF_FUNC_read_branch_records:
1761 return &bpf_read_branch_records_proto;
7adfc6c9
AN
1762 case BPF_FUNC_get_attach_cookie:
1763 return &bpf_get_attach_cookie_proto_pe;
9fd82b61 1764 default:
fc611f47 1765 return bpf_tracing_func_proto(func_id, prog);
9fd82b61
AS
1766 }
1767}
1768
c4f6699d
AS
1769/*
1770 * bpf_raw_tp_regs are separate from bpf_pt_regs used from skb/xdp
1771 * to avoid potential recursive reuse issue when/if tracepoints are added
9594dc3c
MM
1772 * inside bpf_*_event_output, bpf_get_stackid and/or bpf_get_stack.
1773 *
1774 * Since raw tracepoints run despite bpf_prog_active, support concurrent usage
1775 * in normal, irq, and nmi context.
c4f6699d 1776 */
9594dc3c
MM
1777struct bpf_raw_tp_regs {
1778 struct pt_regs regs[3];
1779};
1780static DEFINE_PER_CPU(struct bpf_raw_tp_regs, bpf_raw_tp_regs);
1781static DEFINE_PER_CPU(int, bpf_raw_tp_nest_level);
1782static struct pt_regs *get_bpf_raw_tp_regs(void)
1783{
1784 struct bpf_raw_tp_regs *tp_regs = this_cpu_ptr(&bpf_raw_tp_regs);
1785 int nest_level = this_cpu_inc_return(bpf_raw_tp_nest_level);
1786
1787 if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(tp_regs->regs))) {
1788 this_cpu_dec(bpf_raw_tp_nest_level);
1789 return ERR_PTR(-EBUSY);
1790 }
1791
1792 return &tp_regs->regs[nest_level - 1];
1793}
1794
1795static void put_bpf_raw_tp_regs(void)
1796{
1797 this_cpu_dec(bpf_raw_tp_nest_level);
1798}
1799
c4f6699d
AS
1800BPF_CALL_5(bpf_perf_event_output_raw_tp, struct bpf_raw_tracepoint_args *, args,
1801 struct bpf_map *, map, u64, flags, void *, data, u64, size)
1802{
9594dc3c
MM
1803 struct pt_regs *regs = get_bpf_raw_tp_regs();
1804 int ret;
1805
1806 if (IS_ERR(regs))
1807 return PTR_ERR(regs);
c4f6699d
AS
1808
1809 perf_fetch_caller_regs(regs);
9594dc3c
MM
1810 ret = ____bpf_perf_event_output(regs, map, flags, data, size);
1811
1812 put_bpf_raw_tp_regs();
1813 return ret;
c4f6699d
AS
1814}
1815
1816static const struct bpf_func_proto bpf_perf_event_output_proto_raw_tp = {
1817 .func = bpf_perf_event_output_raw_tp,
1818 .gpl_only = true,
1819 .ret_type = RET_INTEGER,
1820 .arg1_type = ARG_PTR_TO_CTX,
1821 .arg2_type = ARG_CONST_MAP_PTR,
1822 .arg3_type = ARG_ANYTHING,
216e3cd2 1823 .arg4_type = ARG_PTR_TO_MEM | MEM_RDONLY,
c4f6699d
AS
1824 .arg5_type = ARG_CONST_SIZE_OR_ZERO,
1825};
1826
a7658e1a 1827extern const struct bpf_func_proto bpf_skb_output_proto;
d831ee84 1828extern const struct bpf_func_proto bpf_xdp_output_proto;
d9917302 1829extern const struct bpf_func_proto bpf_xdp_get_buff_len_trace_proto;
a7658e1a 1830
c4f6699d
AS
1831BPF_CALL_3(bpf_get_stackid_raw_tp, struct bpf_raw_tracepoint_args *, args,
1832 struct bpf_map *, map, u64, flags)
1833{
9594dc3c
MM
1834 struct pt_regs *regs = get_bpf_raw_tp_regs();
1835 int ret;
1836
1837 if (IS_ERR(regs))
1838 return PTR_ERR(regs);
c4f6699d
AS
1839
1840 perf_fetch_caller_regs(regs);
1841 /* similar to bpf_perf_event_output_tp, but pt_regs fetched differently */
9594dc3c
MM
1842 ret = bpf_get_stackid((unsigned long) regs, (unsigned long) map,
1843 flags, 0, 0);
1844 put_bpf_raw_tp_regs();
1845 return ret;
c4f6699d
AS
1846}
1847
1848static const struct bpf_func_proto bpf_get_stackid_proto_raw_tp = {
1849 .func = bpf_get_stackid_raw_tp,
1850 .gpl_only = true,
1851 .ret_type = RET_INTEGER,
1852 .arg1_type = ARG_PTR_TO_CTX,
1853 .arg2_type = ARG_CONST_MAP_PTR,
1854 .arg3_type = ARG_ANYTHING,
1855};
1856
c195651e
YS
1857BPF_CALL_4(bpf_get_stack_raw_tp, struct bpf_raw_tracepoint_args *, args,
1858 void *, buf, u32, size, u64, flags)
1859{
9594dc3c
MM
1860 struct pt_regs *regs = get_bpf_raw_tp_regs();
1861 int ret;
1862
1863 if (IS_ERR(regs))
1864 return PTR_ERR(regs);
c195651e
YS
1865
1866 perf_fetch_caller_regs(regs);
9594dc3c
MM
1867 ret = bpf_get_stack((unsigned long) regs, (unsigned long) buf,
1868 (unsigned long) size, flags, 0);
1869 put_bpf_raw_tp_regs();
1870 return ret;
c195651e
YS
1871}
1872
1873static const struct bpf_func_proto bpf_get_stack_proto_raw_tp = {
1874 .func = bpf_get_stack_raw_tp,
1875 .gpl_only = true,
1876 .ret_type = RET_INTEGER,
1877 .arg1_type = ARG_PTR_TO_CTX,
216e3cd2 1878 .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY,
c195651e
YS
1879 .arg3_type = ARG_CONST_SIZE_OR_ZERO,
1880 .arg4_type = ARG_ANYTHING,
1881};
1882
5e43f899
AI
1883static const struct bpf_func_proto *
1884raw_tp_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
c4f6699d
AS
1885{
1886 switch (func_id) {
1887 case BPF_FUNC_perf_event_output:
1888 return &bpf_perf_event_output_proto_raw_tp;
1889 case BPF_FUNC_get_stackid:
1890 return &bpf_get_stackid_proto_raw_tp;
c195651e
YS
1891 case BPF_FUNC_get_stack:
1892 return &bpf_get_stack_proto_raw_tp;
c4f6699d 1893 default:
fc611f47 1894 return bpf_tracing_func_proto(func_id, prog);
c4f6699d
AS
1895 }
1896}
1897
958a3f2d 1898const struct bpf_func_proto *
f1b9509c
AS
1899tracing_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1900{
3cee6fb8
MKL
1901 const struct bpf_func_proto *fn;
1902
f1b9509c
AS
1903 switch (func_id) {
1904#ifdef CONFIG_NET
1905 case BPF_FUNC_skb_output:
1906 return &bpf_skb_output_proto;
d831ee84
EC
1907 case BPF_FUNC_xdp_output:
1908 return &bpf_xdp_output_proto;
af7ec138
YS
1909 case BPF_FUNC_skc_to_tcp6_sock:
1910 return &bpf_skc_to_tcp6_sock_proto;
478cfbdf
YS
1911 case BPF_FUNC_skc_to_tcp_sock:
1912 return &bpf_skc_to_tcp_sock_proto;
1913 case BPF_FUNC_skc_to_tcp_timewait_sock:
1914 return &bpf_skc_to_tcp_timewait_sock_proto;
1915 case BPF_FUNC_skc_to_tcp_request_sock:
1916 return &bpf_skc_to_tcp_request_sock_proto;
0d4fad3e
YS
1917 case BPF_FUNC_skc_to_udp6_sock:
1918 return &bpf_skc_to_udp6_sock_proto;
9eeb3aa3
HC
1919 case BPF_FUNC_skc_to_unix_sock:
1920 return &bpf_skc_to_unix_sock_proto;
3bc253c2
GT
1921 case BPF_FUNC_skc_to_mptcp_sock:
1922 return &bpf_skc_to_mptcp_sock_proto;
8e4597c6
MKL
1923 case BPF_FUNC_sk_storage_get:
1924 return &bpf_sk_storage_get_tracing_proto;
1925 case BPF_FUNC_sk_storage_delete:
1926 return &bpf_sk_storage_delete_tracing_proto;
b60da495
FR
1927 case BPF_FUNC_sock_from_file:
1928 return &bpf_sock_from_file_proto;
c5dbb89f
FR
1929 case BPF_FUNC_get_socket_cookie:
1930 return &bpf_get_socket_ptr_cookie_proto;
d9917302
EC
1931 case BPF_FUNC_xdp_get_buff_len:
1932 return &bpf_xdp_get_buff_len_trace_proto;
f1b9509c 1933#endif
492e639f
YS
1934 case BPF_FUNC_seq_printf:
1935 return prog->expected_attach_type == BPF_TRACE_ITER ?
1936 &bpf_seq_printf_proto :
1937 NULL;
1938 case BPF_FUNC_seq_write:
1939 return prog->expected_attach_type == BPF_TRACE_ITER ?
1940 &bpf_seq_write_proto :
1941 NULL;
eb411377
AM
1942 case BPF_FUNC_seq_printf_btf:
1943 return prog->expected_attach_type == BPF_TRACE_ITER ?
1944 &bpf_seq_printf_btf_proto :
1945 NULL;
6e22ab9d
JO
1946 case BPF_FUNC_d_path:
1947 return &bpf_d_path_proto;
f92c1e18
JO
1948 case BPF_FUNC_get_func_arg:
1949 return bpf_prog_has_trampoline(prog) ? &bpf_get_func_arg_proto : NULL;
1950 case BPF_FUNC_get_func_ret:
1951 return bpf_prog_has_trampoline(prog) ? &bpf_get_func_ret_proto : NULL;
1952 case BPF_FUNC_get_func_arg_cnt:
1953 return bpf_prog_has_trampoline(prog) ? &bpf_get_func_arg_cnt_proto : NULL;
2fcc8241
KFL
1954 case BPF_FUNC_get_attach_cookie:
1955 return bpf_prog_has_trampoline(prog) ? &bpf_get_attach_cookie_proto_tracing : NULL;
f1b9509c 1956 default:
3cee6fb8
MKL
1957 fn = raw_tp_prog_func_proto(func_id, prog);
1958 if (!fn && prog->expected_attach_type == BPF_TRACE_ITER)
1959 fn = bpf_iter_get_func_proto(func_id, prog);
1960 return fn;
f1b9509c
AS
1961 }
1962}
1963
c4f6699d
AS
1964static bool raw_tp_prog_is_valid_access(int off, int size,
1965 enum bpf_access_type type,
5e43f899 1966 const struct bpf_prog *prog,
c4f6699d
AS
1967 struct bpf_insn_access_aux *info)
1968{
35346ab6 1969 return bpf_tracing_ctx_access(off, size, type);
f1b9509c
AS
1970}
1971
1972static bool tracing_prog_is_valid_access(int off, int size,
1973 enum bpf_access_type type,
1974 const struct bpf_prog *prog,
1975 struct bpf_insn_access_aux *info)
1976{
35346ab6 1977 return bpf_tracing_btf_ctx_access(off, size, type, prog, info);
c4f6699d
AS
1978}
1979
3e7c67d9
KS
1980int __weak bpf_prog_test_run_tracing(struct bpf_prog *prog,
1981 const union bpf_attr *kattr,
1982 union bpf_attr __user *uattr)
1983{
1984 return -ENOTSUPP;
1985}
1986
c4f6699d
AS
1987const struct bpf_verifier_ops raw_tracepoint_verifier_ops = {
1988 .get_func_proto = raw_tp_prog_func_proto,
1989 .is_valid_access = raw_tp_prog_is_valid_access,
1990};
1991
1992const struct bpf_prog_ops raw_tracepoint_prog_ops = {
ebfb4d40 1993#ifdef CONFIG_NET
1b4d60ec 1994 .test_run = bpf_prog_test_run_raw_tp,
ebfb4d40 1995#endif
c4f6699d
AS
1996};
1997
f1b9509c
AS
1998const struct bpf_verifier_ops tracing_verifier_ops = {
1999 .get_func_proto = tracing_prog_func_proto,
2000 .is_valid_access = tracing_prog_is_valid_access,
2001};
2002
2003const struct bpf_prog_ops tracing_prog_ops = {
da00d2f1 2004 .test_run = bpf_prog_test_run_tracing,
f1b9509c
AS
2005};
2006
9df1c28b
MM
2007static bool raw_tp_writable_prog_is_valid_access(int off, int size,
2008 enum bpf_access_type type,
2009 const struct bpf_prog *prog,
2010 struct bpf_insn_access_aux *info)
2011{
2012 if (off == 0) {
2013 if (size != sizeof(u64) || type != BPF_READ)
2014 return false;
2015 info->reg_type = PTR_TO_TP_BUFFER;
2016 }
2017 return raw_tp_prog_is_valid_access(off, size, type, prog, info);
2018}
2019
2020const struct bpf_verifier_ops raw_tracepoint_writable_verifier_ops = {
2021 .get_func_proto = raw_tp_prog_func_proto,
2022 .is_valid_access = raw_tp_writable_prog_is_valid_access,
2023};
2024
2025const struct bpf_prog_ops raw_tracepoint_writable_prog_ops = {
2026};
2027
0515e599 2028static bool pe_prog_is_valid_access(int off, int size, enum bpf_access_type type,
5e43f899 2029 const struct bpf_prog *prog,
23994631 2030 struct bpf_insn_access_aux *info)
0515e599 2031{
95da0cdb 2032 const int size_u64 = sizeof(u64);
31fd8581 2033
0515e599
AS
2034 if (off < 0 || off >= sizeof(struct bpf_perf_event_data))
2035 return false;
2036 if (type != BPF_READ)
2037 return false;
bc23105c
DB
2038 if (off % size != 0) {
2039 if (sizeof(unsigned long) != 4)
2040 return false;
2041 if (size != 8)
2042 return false;
2043 if (off % size != 4)
2044 return false;
2045 }
31fd8581 2046
f96da094
DB
2047 switch (off) {
2048 case bpf_ctx_range(struct bpf_perf_event_data, sample_period):
95da0cdb
TQ
2049 bpf_ctx_record_field_size(info, size_u64);
2050 if (!bpf_ctx_narrow_access_ok(off, size, size_u64))
2051 return false;
2052 break;
2053 case bpf_ctx_range(struct bpf_perf_event_data, addr):
2054 bpf_ctx_record_field_size(info, size_u64);
2055 if (!bpf_ctx_narrow_access_ok(off, size, size_u64))
23994631 2056 return false;
f96da094
DB
2057 break;
2058 default:
0515e599
AS
2059 if (size != sizeof(long))
2060 return false;
2061 }
f96da094 2062
0515e599
AS
2063 return true;
2064}
2065
6b8cc1d1
DB
2066static u32 pe_prog_convert_ctx_access(enum bpf_access_type type,
2067 const struct bpf_insn *si,
0515e599 2068 struct bpf_insn *insn_buf,
f96da094 2069 struct bpf_prog *prog, u32 *target_size)
0515e599
AS
2070{
2071 struct bpf_insn *insn = insn_buf;
2072
6b8cc1d1 2073 switch (si->off) {
0515e599 2074 case offsetof(struct bpf_perf_event_data, sample_period):
f035a515 2075 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern,
6b8cc1d1 2076 data), si->dst_reg, si->src_reg,
0515e599 2077 offsetof(struct bpf_perf_event_data_kern, data));
6b8cc1d1 2078 *insn++ = BPF_LDX_MEM(BPF_DW, si->dst_reg, si->dst_reg,
f96da094
DB
2079 bpf_target_off(struct perf_sample_data, period, 8,
2080 target_size));
0515e599 2081 break;
95da0cdb
TQ
2082 case offsetof(struct bpf_perf_event_data, addr):
2083 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern,
2084 data), si->dst_reg, si->src_reg,
2085 offsetof(struct bpf_perf_event_data_kern, data));
2086 *insn++ = BPF_LDX_MEM(BPF_DW, si->dst_reg, si->dst_reg,
2087 bpf_target_off(struct perf_sample_data, addr, 8,
2088 target_size));
2089 break;
0515e599 2090 default:
f035a515 2091 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern,
6b8cc1d1 2092 regs), si->dst_reg, si->src_reg,
0515e599 2093 offsetof(struct bpf_perf_event_data_kern, regs));
6b8cc1d1
DB
2094 *insn++ = BPF_LDX_MEM(BPF_SIZEOF(long), si->dst_reg, si->dst_reg,
2095 si->off);
0515e599
AS
2096 break;
2097 }
2098
2099 return insn - insn_buf;
2100}
2101
7de16e3a 2102const struct bpf_verifier_ops perf_event_verifier_ops = {
f005afed 2103 .get_func_proto = pe_prog_func_proto,
0515e599
AS
2104 .is_valid_access = pe_prog_is_valid_access,
2105 .convert_ctx_access = pe_prog_convert_ctx_access,
2106};
7de16e3a
JK
2107
2108const struct bpf_prog_ops perf_event_prog_ops = {
2109};
e87c6bc3
YS
2110
2111static DEFINE_MUTEX(bpf_event_mutex);
2112
c8c088ba
YS
2113#define BPF_TRACE_MAX_PROGS 64
2114
e87c6bc3 2115int perf_event_attach_bpf_prog(struct perf_event *event,
82e6b1ee
AN
2116 struct bpf_prog *prog,
2117 u64 bpf_cookie)
e87c6bc3 2118{
e672db03 2119 struct bpf_prog_array *old_array;
e87c6bc3
YS
2120 struct bpf_prog_array *new_array;
2121 int ret = -EEXIST;
2122
9802d865 2123 /*
b4da3340
MH
2124 * Kprobe override only works if they are on the function entry,
2125 * and only if they are on the opt-in list.
9802d865
JB
2126 */
2127 if (prog->kprobe_override &&
b4da3340 2128 (!trace_kprobe_on_func_entry(event->tp_event) ||
9802d865
JB
2129 !trace_kprobe_error_injectable(event->tp_event)))
2130 return -EINVAL;
2131
e87c6bc3
YS
2132 mutex_lock(&bpf_event_mutex);
2133
2134 if (event->prog)
07c41a29 2135 goto unlock;
e87c6bc3 2136
e672db03 2137 old_array = bpf_event_rcu_dereference(event->tp_event->prog_array);
c8c088ba
YS
2138 if (old_array &&
2139 bpf_prog_array_length(old_array) >= BPF_TRACE_MAX_PROGS) {
2140 ret = -E2BIG;
2141 goto unlock;
2142 }
2143
82e6b1ee 2144 ret = bpf_prog_array_copy(old_array, NULL, prog, bpf_cookie, &new_array);
e87c6bc3 2145 if (ret < 0)
07c41a29 2146 goto unlock;
e87c6bc3
YS
2147
2148 /* set the new array to event->tp_event and set event->prog */
2149 event->prog = prog;
82e6b1ee 2150 event->bpf_cookie = bpf_cookie;
e87c6bc3 2151 rcu_assign_pointer(event->tp_event->prog_array, new_array);
8c7dcb84 2152 bpf_prog_array_free_sleepable(old_array);
e87c6bc3 2153
07c41a29 2154unlock:
e87c6bc3
YS
2155 mutex_unlock(&bpf_event_mutex);
2156 return ret;
2157}
2158
2159void perf_event_detach_bpf_prog(struct perf_event *event)
2160{
e672db03 2161 struct bpf_prog_array *old_array;
e87c6bc3
YS
2162 struct bpf_prog_array *new_array;
2163 int ret;
2164
2165 mutex_lock(&bpf_event_mutex);
2166
2167 if (!event->prog)
07c41a29 2168 goto unlock;
e87c6bc3 2169
e672db03 2170 old_array = bpf_event_rcu_dereference(event->tp_event->prog_array);
82e6b1ee 2171 ret = bpf_prog_array_copy(old_array, event->prog, NULL, 0, &new_array);
170a7e3e
SY
2172 if (ret == -ENOENT)
2173 goto unlock;
e87c6bc3
YS
2174 if (ret < 0) {
2175 bpf_prog_array_delete_safe(old_array, event->prog);
2176 } else {
2177 rcu_assign_pointer(event->tp_event->prog_array, new_array);
8c7dcb84 2178 bpf_prog_array_free_sleepable(old_array);
e87c6bc3
YS
2179 }
2180
2181 bpf_prog_put(event->prog);
2182 event->prog = NULL;
2183
07c41a29 2184unlock:
e87c6bc3
YS
2185 mutex_unlock(&bpf_event_mutex);
2186}
f371b304 2187
f4e2298e 2188int perf_event_query_prog_array(struct perf_event *event, void __user *info)
f371b304
YS
2189{
2190 struct perf_event_query_bpf __user *uquery = info;
2191 struct perf_event_query_bpf query = {};
e672db03 2192 struct bpf_prog_array *progs;
3a38bb98 2193 u32 *ids, prog_cnt, ids_len;
f371b304
YS
2194 int ret;
2195
031258da 2196 if (!perfmon_capable())
f371b304
YS
2197 return -EPERM;
2198 if (event->attr.type != PERF_TYPE_TRACEPOINT)
2199 return -EINVAL;
2200 if (copy_from_user(&query, uquery, sizeof(query)))
2201 return -EFAULT;
3a38bb98
YS
2202
2203 ids_len = query.ids_len;
2204 if (ids_len > BPF_TRACE_MAX_PROGS)
9c481b90 2205 return -E2BIG;
3a38bb98
YS
2206 ids = kcalloc(ids_len, sizeof(u32), GFP_USER | __GFP_NOWARN);
2207 if (!ids)
2208 return -ENOMEM;
2209 /*
2210 * The above kcalloc returns ZERO_SIZE_PTR when ids_len = 0, which
2211 * is required when user only wants to check for uquery->prog_cnt.
2212 * There is no need to check for it since the case is handled
2213 * gracefully in bpf_prog_array_copy_info.
2214 */
f371b304
YS
2215
2216 mutex_lock(&bpf_event_mutex);
e672db03
SF
2217 progs = bpf_event_rcu_dereference(event->tp_event->prog_array);
2218 ret = bpf_prog_array_copy_info(progs, ids, ids_len, &prog_cnt);
f371b304
YS
2219 mutex_unlock(&bpf_event_mutex);
2220
3a38bb98
YS
2221 if (copy_to_user(&uquery->prog_cnt, &prog_cnt, sizeof(prog_cnt)) ||
2222 copy_to_user(uquery->ids, ids, ids_len * sizeof(u32)))
2223 ret = -EFAULT;
2224
2225 kfree(ids);
f371b304
YS
2226 return ret;
2227}
c4f6699d
AS
2228
2229extern struct bpf_raw_event_map __start__bpf_raw_tp[];
2230extern struct bpf_raw_event_map __stop__bpf_raw_tp[];
2231
a38d1107 2232struct bpf_raw_event_map *bpf_get_raw_tracepoint(const char *name)
c4f6699d
AS
2233{
2234 struct bpf_raw_event_map *btp = __start__bpf_raw_tp;
2235
2236 for (; btp < __stop__bpf_raw_tp; btp++) {
2237 if (!strcmp(btp->tp->name, name))
2238 return btp;
2239 }
a38d1107
MM
2240
2241 return bpf_get_raw_tracepoint_module(name);
2242}
2243
2244void bpf_put_raw_tracepoint(struct bpf_raw_event_map *btp)
2245{
12cc126d 2246 struct module *mod;
a38d1107 2247
12cc126d
AN
2248 preempt_disable();
2249 mod = __module_address((unsigned long)btp);
2250 module_put(mod);
2251 preempt_enable();
c4f6699d
AS
2252}
2253
2254static __always_inline
2255void __bpf_trace_run(struct bpf_prog *prog, u64 *args)
2256{
f03efe49 2257 cant_sleep();
05b24ff9
JO
2258 if (unlikely(this_cpu_inc_return(*(prog->active)) != 1)) {
2259 bpf_prog_inc_misses_counter(prog);
2260 goto out;
2261 }
c4f6699d 2262 rcu_read_lock();
fb7dd8bc 2263 (void) bpf_prog_run(prog, args);
c4f6699d 2264 rcu_read_unlock();
05b24ff9
JO
2265out:
2266 this_cpu_dec(*(prog->active));
c4f6699d
AS
2267}
2268
2269#define UNPACK(...) __VA_ARGS__
2270#define REPEAT_1(FN, DL, X, ...) FN(X)
2271#define REPEAT_2(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_1(FN, DL, __VA_ARGS__)
2272#define REPEAT_3(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_2(FN, DL, __VA_ARGS__)
2273#define REPEAT_4(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_3(FN, DL, __VA_ARGS__)
2274#define REPEAT_5(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_4(FN, DL, __VA_ARGS__)
2275#define REPEAT_6(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_5(FN, DL, __VA_ARGS__)
2276#define REPEAT_7(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_6(FN, DL, __VA_ARGS__)
2277#define REPEAT_8(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_7(FN, DL, __VA_ARGS__)
2278#define REPEAT_9(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_8(FN, DL, __VA_ARGS__)
2279#define REPEAT_10(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_9(FN, DL, __VA_ARGS__)
2280#define REPEAT_11(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_10(FN, DL, __VA_ARGS__)
2281#define REPEAT_12(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_11(FN, DL, __VA_ARGS__)
2282#define REPEAT(X, FN, DL, ...) REPEAT_##X(FN, DL, __VA_ARGS__)
2283
2284#define SARG(X) u64 arg##X
2285#define COPY(X) args[X] = arg##X
2286
2287#define __DL_COM (,)
2288#define __DL_SEM (;)
2289
2290#define __SEQ_0_11 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11
2291
2292#define BPF_TRACE_DEFN_x(x) \
2293 void bpf_trace_run##x(struct bpf_prog *prog, \
2294 REPEAT(x, SARG, __DL_COM, __SEQ_0_11)) \
2295 { \
2296 u64 args[x]; \
2297 REPEAT(x, COPY, __DL_SEM, __SEQ_0_11); \
2298 __bpf_trace_run(prog, args); \
2299 } \
2300 EXPORT_SYMBOL_GPL(bpf_trace_run##x)
2301BPF_TRACE_DEFN_x(1);
2302BPF_TRACE_DEFN_x(2);
2303BPF_TRACE_DEFN_x(3);
2304BPF_TRACE_DEFN_x(4);
2305BPF_TRACE_DEFN_x(5);
2306BPF_TRACE_DEFN_x(6);
2307BPF_TRACE_DEFN_x(7);
2308BPF_TRACE_DEFN_x(8);
2309BPF_TRACE_DEFN_x(9);
2310BPF_TRACE_DEFN_x(10);
2311BPF_TRACE_DEFN_x(11);
2312BPF_TRACE_DEFN_x(12);
2313
2314static int __bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog)
2315{
2316 struct tracepoint *tp = btp->tp;
2317
2318 /*
2319 * check that program doesn't access arguments beyond what's
2320 * available in this tracepoint
2321 */
2322 if (prog->aux->max_ctx_offset > btp->num_args * sizeof(u64))
2323 return -EINVAL;
2324
9df1c28b
MM
2325 if (prog->aux->max_tp_access > btp->writable_size)
2326 return -EINVAL;
2327
9913d574
SRV
2328 return tracepoint_probe_register_may_exist(tp, (void *)btp->bpf_func,
2329 prog);
c4f6699d
AS
2330}
2331
2332int bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog)
2333{
e16ec340 2334 return __bpf_probe_register(btp, prog);
c4f6699d
AS
2335}
2336
2337int bpf_probe_unregister(struct bpf_raw_event_map *btp, struct bpf_prog *prog)
2338{
e16ec340 2339 return tracepoint_probe_unregister(btp->tp, (void *)btp->bpf_func, prog);
c4f6699d 2340}
41bdc4b4
YS
2341
2342int bpf_get_perf_event_info(const struct perf_event *event, u32 *prog_id,
2343 u32 *fd_type, const char **buf,
2344 u64 *probe_offset, u64 *probe_addr)
2345{
2346 bool is_tracepoint, is_syscall_tp;
2347 struct bpf_prog *prog;
2348 int flags, err = 0;
2349
2350 prog = event->prog;
2351 if (!prog)
2352 return -ENOENT;
2353
2354 /* not supporting BPF_PROG_TYPE_PERF_EVENT yet */
2355 if (prog->type == BPF_PROG_TYPE_PERF_EVENT)
2356 return -EOPNOTSUPP;
2357
2358 *prog_id = prog->aux->id;
2359 flags = event->tp_event->flags;
2360 is_tracepoint = flags & TRACE_EVENT_FL_TRACEPOINT;
2361 is_syscall_tp = is_syscall_trace_event(event->tp_event);
2362
2363 if (is_tracepoint || is_syscall_tp) {
2364 *buf = is_tracepoint ? event->tp_event->tp->name
2365 : event->tp_event->name;
2366 *fd_type = BPF_FD_TYPE_TRACEPOINT;
2367 *probe_offset = 0x0;
2368 *probe_addr = 0x0;
2369 } else {
2370 /* kprobe/uprobe */
2371 err = -EOPNOTSUPP;
2372#ifdef CONFIG_KPROBE_EVENTS
2373 if (flags & TRACE_EVENT_FL_KPROBE)
2374 err = bpf_get_kprobe_info(event, fd_type, buf,
2375 probe_offset, probe_addr,
2376 event->attr.type == PERF_TYPE_TRACEPOINT);
2377#endif
2378#ifdef CONFIG_UPROBE_EVENTS
2379 if (flags & TRACE_EVENT_FL_UPROBE)
2380 err = bpf_get_uprobe_info(event, fd_type, buf,
2381 probe_offset,
2382 event->attr.type == PERF_TYPE_TRACEPOINT);
2383#endif
2384 }
2385
2386 return err;
2387}
a38d1107 2388
9db1ff0a
YS
2389static int __init send_signal_irq_work_init(void)
2390{
2391 int cpu;
2392 struct send_signal_irq_work *work;
2393
2394 for_each_possible_cpu(cpu) {
2395 work = per_cpu_ptr(&send_signal_work, cpu);
2396 init_irq_work(&work->irq_work, do_bpf_send_signal);
2397 }
2398 return 0;
2399}
2400
2401subsys_initcall(send_signal_irq_work_init);
2402
a38d1107 2403#ifdef CONFIG_MODULES
390e99cf
SF
2404static int bpf_event_notify(struct notifier_block *nb, unsigned long op,
2405 void *module)
a38d1107
MM
2406{
2407 struct bpf_trace_module *btm, *tmp;
2408 struct module *mod = module;
0340a6b7 2409 int ret = 0;
a38d1107
MM
2410
2411 if (mod->num_bpf_raw_events == 0 ||
2412 (op != MODULE_STATE_COMING && op != MODULE_STATE_GOING))
0340a6b7 2413 goto out;
a38d1107
MM
2414
2415 mutex_lock(&bpf_module_mutex);
2416
2417 switch (op) {
2418 case MODULE_STATE_COMING:
2419 btm = kzalloc(sizeof(*btm), GFP_KERNEL);
2420 if (btm) {
2421 btm->module = module;
2422 list_add(&btm->list, &bpf_trace_modules);
0340a6b7
PZ
2423 } else {
2424 ret = -ENOMEM;
a38d1107
MM
2425 }
2426 break;
2427 case MODULE_STATE_GOING:
2428 list_for_each_entry_safe(btm, tmp, &bpf_trace_modules, list) {
2429 if (btm->module == module) {
2430 list_del(&btm->list);
2431 kfree(btm);
2432 break;
2433 }
2434 }
2435 break;
2436 }
2437
2438 mutex_unlock(&bpf_module_mutex);
2439
0340a6b7
PZ
2440out:
2441 return notifier_from_errno(ret);
a38d1107
MM
2442}
2443
2444static struct notifier_block bpf_module_nb = {
2445 .notifier_call = bpf_event_notify,
2446};
2447
390e99cf 2448static int __init bpf_event_init(void)
a38d1107
MM
2449{
2450 register_module_notifier(&bpf_module_nb);
2451 return 0;
2452}
2453
2454fs_initcall(bpf_event_init);
2455#endif /* CONFIG_MODULES */
0dcac272
JO
2456
2457#ifdef CONFIG_FPROBE
2458struct bpf_kprobe_multi_link {
2459 struct bpf_link link;
2460 struct fprobe fp;
2461 unsigned long *addrs;
ca74823c
JO
2462 u64 *cookies;
2463 u32 cnt;
e22061b2
JO
2464 u32 mods_cnt;
2465 struct module **mods;
0dcac272
JO
2466};
2467
f7098690
JO
2468struct bpf_kprobe_multi_run_ctx {
2469 struct bpf_run_ctx run_ctx;
2470 struct bpf_kprobe_multi_link *link;
2471 unsigned long entry_ip;
2472};
2473
0236fec5
JO
2474struct user_syms {
2475 const char **syms;
2476 char *buf;
2477};
2478
2479static int copy_user_syms(struct user_syms *us, unsigned long __user *usyms, u32 cnt)
2480{
2481 unsigned long __user usymbol;
2482 const char **syms = NULL;
2483 char *buf = NULL, *p;
2484 int err = -ENOMEM;
2485 unsigned int i;
2486
fd58f7df 2487 syms = kvmalloc_array(cnt, sizeof(*syms), GFP_KERNEL);
0236fec5
JO
2488 if (!syms)
2489 goto error;
2490
fd58f7df 2491 buf = kvmalloc_array(cnt, KSYM_NAME_LEN, GFP_KERNEL);
0236fec5
JO
2492 if (!buf)
2493 goto error;
2494
2495 for (p = buf, i = 0; i < cnt; i++) {
2496 if (__get_user(usymbol, usyms + i)) {
2497 err = -EFAULT;
2498 goto error;
2499 }
2500 err = strncpy_from_user(p, (const char __user *) usymbol, KSYM_NAME_LEN);
2501 if (err == KSYM_NAME_LEN)
2502 err = -E2BIG;
2503 if (err < 0)
2504 goto error;
2505 syms[i] = p;
2506 p += err + 1;
2507 }
2508
2509 us->syms = syms;
2510 us->buf = buf;
2511 return 0;
2512
2513error:
2514 if (err) {
2515 kvfree(syms);
2516 kvfree(buf);
2517 }
2518 return err;
2519}
2520
e22061b2
JO
2521static void kprobe_multi_put_modules(struct module **mods, u32 cnt)
2522{
2523 u32 i;
2524
2525 for (i = 0; i < cnt; i++)
2526 module_put(mods[i]);
2527}
2528
0236fec5
JO
2529static void free_user_syms(struct user_syms *us)
2530{
2531 kvfree(us->syms);
2532 kvfree(us->buf);
2533}
2534
0dcac272
JO
2535static void bpf_kprobe_multi_link_release(struct bpf_link *link)
2536{
2537 struct bpf_kprobe_multi_link *kmulti_link;
2538
2539 kmulti_link = container_of(link, struct bpf_kprobe_multi_link, link);
2540 unregister_fprobe(&kmulti_link->fp);
e22061b2 2541 kprobe_multi_put_modules(kmulti_link->mods, kmulti_link->mods_cnt);
0dcac272
JO
2542}
2543
2544static void bpf_kprobe_multi_link_dealloc(struct bpf_link *link)
2545{
2546 struct bpf_kprobe_multi_link *kmulti_link;
2547
2548 kmulti_link = container_of(link, struct bpf_kprobe_multi_link, link);
2549 kvfree(kmulti_link->addrs);
ca74823c 2550 kvfree(kmulti_link->cookies);
e22061b2 2551 kfree(kmulti_link->mods);
0dcac272
JO
2552 kfree(kmulti_link);
2553}
2554
2555static const struct bpf_link_ops bpf_kprobe_multi_link_lops = {
2556 .release = bpf_kprobe_multi_link_release,
2557 .dealloc = bpf_kprobe_multi_link_dealloc,
2558};
2559
ca74823c
JO
2560static void bpf_kprobe_multi_cookie_swap(void *a, void *b, int size, const void *priv)
2561{
2562 const struct bpf_kprobe_multi_link *link = priv;
2563 unsigned long *addr_a = a, *addr_b = b;
2564 u64 *cookie_a, *cookie_b;
ca74823c
JO
2565
2566 cookie_a = link->cookies + (addr_a - link->addrs);
2567 cookie_b = link->cookies + (addr_b - link->addrs);
2568
2569 /* swap addr_a/addr_b and cookie_a/cookie_b values */
11e17ae4
JC
2570 swap(*addr_a, *addr_b);
2571 swap(*cookie_a, *cookie_b);
ca74823c
JO
2572}
2573
1a1b0716 2574static int bpf_kprobe_multi_addrs_cmp(const void *a, const void *b)
ca74823c
JO
2575{
2576 const unsigned long *addr_a = a, *addr_b = b;
2577
2578 if (*addr_a == *addr_b)
2579 return 0;
2580 return *addr_a < *addr_b ? -1 : 1;
2581}
2582
2583static int bpf_kprobe_multi_cookie_cmp(const void *a, const void *b, const void *priv)
2584{
1a1b0716 2585 return bpf_kprobe_multi_addrs_cmp(a, b);
ca74823c
JO
2586}
2587
f7098690 2588static u64 bpf_kprobe_multi_cookie(struct bpf_run_ctx *ctx)
ca74823c 2589{
f7098690 2590 struct bpf_kprobe_multi_run_ctx *run_ctx;
ca74823c 2591 struct bpf_kprobe_multi_link *link;
f7098690 2592 u64 *cookie, entry_ip;
ca74823c 2593 unsigned long *addr;
ca74823c
JO
2594
2595 if (WARN_ON_ONCE(!ctx))
2596 return 0;
f7098690
JO
2597 run_ctx = container_of(current->bpf_ctx, struct bpf_kprobe_multi_run_ctx, run_ctx);
2598 link = run_ctx->link;
ca74823c
JO
2599 if (!link->cookies)
2600 return 0;
f7098690
JO
2601 entry_ip = run_ctx->entry_ip;
2602 addr = bsearch(&entry_ip, link->addrs, link->cnt, sizeof(entry_ip),
1a1b0716 2603 bpf_kprobe_multi_addrs_cmp);
ca74823c
JO
2604 if (!addr)
2605 return 0;
2606 cookie = link->cookies + (addr - link->addrs);
2607 return *cookie;
2608}
2609
f7098690
JO
2610static u64 bpf_kprobe_multi_entry_ip(struct bpf_run_ctx *ctx)
2611{
2612 struct bpf_kprobe_multi_run_ctx *run_ctx;
2613
2614 run_ctx = container_of(current->bpf_ctx, struct bpf_kprobe_multi_run_ctx, run_ctx);
2615 return run_ctx->entry_ip;
2616}
2617
0dcac272
JO
2618static int
2619kprobe_multi_link_prog_run(struct bpf_kprobe_multi_link *link,
f7098690 2620 unsigned long entry_ip, struct pt_regs *regs)
0dcac272 2621{
f7098690
JO
2622 struct bpf_kprobe_multi_run_ctx run_ctx = {
2623 .link = link,
2624 .entry_ip = entry_ip,
2625 };
ca74823c 2626 struct bpf_run_ctx *old_run_ctx;
0dcac272
JO
2627 int err;
2628
2629 if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1)) {
2630 err = 0;
2631 goto out;
2632 }
2633
2634 migrate_disable();
2635 rcu_read_lock();
f7098690 2636 old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx);
0dcac272 2637 err = bpf_prog_run(link->link.prog, regs);
ca74823c 2638 bpf_reset_run_ctx(old_run_ctx);
0dcac272
JO
2639 rcu_read_unlock();
2640 migrate_enable();
2641
2642 out:
2643 __this_cpu_dec(bpf_prog_active);
2644 return err;
2645}
2646
2647static void
c09eb2e5 2648kprobe_multi_link_handler(struct fprobe *fp, unsigned long fentry_ip,
0dcac272
JO
2649 struct pt_regs *regs)
2650{
0dcac272
JO
2651 struct bpf_kprobe_multi_link *link;
2652
0dcac272 2653 link = container_of(fp, struct bpf_kprobe_multi_link, fp);
c09eb2e5 2654 kprobe_multi_link_prog_run(link, get_entry_ip(fentry_ip), regs);
0dcac272
JO
2655}
2656
eb5fb032 2657static int symbols_cmp_r(const void *a, const void *b, const void *priv)
0dcac272 2658{
0236fec5
JO
2659 const char **str_a = (const char **) a;
2660 const char **str_b = (const char **) b;
0dcac272 2661
0236fec5 2662 return strcmp(*str_a, *str_b);
0dcac272
JO
2663}
2664
eb5fb032
JO
2665struct multi_symbols_sort {
2666 const char **funcs;
2667 u64 *cookies;
2668};
2669
2670static void symbols_swap_r(void *a, void *b, int size, const void *priv)
2671{
2672 const struct multi_symbols_sort *data = priv;
2673 const char **name_a = a, **name_b = b;
2674
2675 swap(*name_a, *name_b);
2676
2677 /* If defined, swap also related cookies. */
2678 if (data->cookies) {
2679 u64 *cookie_a, *cookie_b;
2680
2681 cookie_a = data->cookies + (name_a - data->funcs);
2682 cookie_b = data->cookies + (name_b - data->funcs);
2683 swap(*cookie_a, *cookie_b);
2684 }
2685}
2686
e22061b2
JO
2687struct module_addr_args {
2688 unsigned long *addrs;
2689 u32 addrs_cnt;
2690 struct module **mods;
2691 int mods_cnt;
2692 int mods_cap;
2693};
2694
2695static int module_callback(void *data, const char *name,
2696 struct module *mod, unsigned long addr)
2697{
2698 struct module_addr_args *args = data;
2699 struct module **mods;
2700
2701 /* We iterate all modules symbols and for each we:
2702 * - search for it in provided addresses array
2703 * - if found we check if we already have the module pointer stored
2704 * (we iterate modules sequentially, so we can check just the last
2705 * module pointer)
2706 * - take module reference and store it
2707 */
2708 if (!bsearch(&addr, args->addrs, args->addrs_cnt, sizeof(addr),
2709 bpf_kprobe_multi_addrs_cmp))
2710 return 0;
2711
2712 if (args->mods && args->mods[args->mods_cnt - 1] == mod)
2713 return 0;
2714
2715 if (args->mods_cnt == args->mods_cap) {
2716 args->mods_cap = max(16, args->mods_cap * 3 / 2);
2717 mods = krealloc_array(args->mods, args->mods_cap, sizeof(*mods), GFP_KERNEL);
2718 if (!mods)
2719 return -ENOMEM;
2720 args->mods = mods;
2721 }
2722
2723 if (!try_module_get(mod))
2724 return -EINVAL;
2725
2726 args->mods[args->mods_cnt] = mod;
2727 args->mods_cnt++;
2728 return 0;
2729}
2730
2731static int get_modules_for_addrs(struct module ***mods, unsigned long *addrs, u32 addrs_cnt)
2732{
2733 struct module_addr_args args = {
2734 .addrs = addrs,
2735 .addrs_cnt = addrs_cnt,
2736 };
2737 int err;
2738
2739 /* We return either err < 0 in case of error, ... */
2740 err = module_kallsyms_on_each_symbol(module_callback, &args);
2741 if (err) {
2742 kprobe_multi_put_modules(args.mods, args.mods_cnt);
2743 kfree(args.mods);
2744 return err;
2745 }
2746
2747 /* or number of modules found if everything is ok. */
2748 *mods = args.mods;
2749 return args.mods_cnt;
2750}
2751
0dcac272
JO
2752int bpf_kprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
2753{
2754 struct bpf_kprobe_multi_link *link = NULL;
2755 struct bpf_link_primer link_primer;
ca74823c 2756 void __user *ucookies;
0dcac272
JO
2757 unsigned long *addrs;
2758 u32 flags, cnt, size;
2759 void __user *uaddrs;
ca74823c 2760 u64 *cookies = NULL;
0dcac272
JO
2761 void __user *usyms;
2762 int err;
2763
2764 /* no support for 32bit archs yet */
2765 if (sizeof(u64) != sizeof(void *))
2766 return -EOPNOTSUPP;
2767
2768 if (prog->expected_attach_type != BPF_TRACE_KPROBE_MULTI)
2769 return -EINVAL;
2770
2771 flags = attr->link_create.kprobe_multi.flags;
2772 if (flags & ~BPF_F_KPROBE_MULTI_RETURN)
2773 return -EINVAL;
2774
2775 uaddrs = u64_to_user_ptr(attr->link_create.kprobe_multi.addrs);
2776 usyms = u64_to_user_ptr(attr->link_create.kprobe_multi.syms);
2777 if (!!uaddrs == !!usyms)
2778 return -EINVAL;
2779
2780 cnt = attr->link_create.kprobe_multi.cnt;
2781 if (!cnt)
2782 return -EINVAL;
2783
2784 size = cnt * sizeof(*addrs);
fd58f7df 2785 addrs = kvmalloc_array(cnt, sizeof(*addrs), GFP_KERNEL);
0dcac272
JO
2786 if (!addrs)
2787 return -ENOMEM;
2788
eb5fb032
JO
2789 ucookies = u64_to_user_ptr(attr->link_create.kprobe_multi.cookies);
2790 if (ucookies) {
2791 cookies = kvmalloc_array(cnt, sizeof(*addrs), GFP_KERNEL);
2792 if (!cookies) {
2793 err = -ENOMEM;
2794 goto error;
2795 }
2796 if (copy_from_user(cookies, ucookies, size)) {
2797 err = -EFAULT;
2798 goto error;
2799 }
2800 }
2801
0dcac272
JO
2802 if (uaddrs) {
2803 if (copy_from_user(addrs, uaddrs, size)) {
2804 err = -EFAULT;
2805 goto error;
2806 }
2807 } else {
eb5fb032
JO
2808 struct multi_symbols_sort data = {
2809 .cookies = cookies,
2810 };
0236fec5
JO
2811 struct user_syms us;
2812
2813 err = copy_user_syms(&us, usyms, cnt);
2814 if (err)
2815 goto error;
2816
eb5fb032
JO
2817 if (cookies)
2818 data.funcs = us.syms;
2819
2820 sort_r(us.syms, cnt, sizeof(*us.syms), symbols_cmp_r,
2821 symbols_swap_r, &data);
2822
0236fec5
JO
2823 err = ftrace_lookup_symbols(us.syms, cnt, addrs);
2824 free_user_syms(&us);
0dcac272
JO
2825 if (err)
2826 goto error;
2827 }
2828
2829 link = kzalloc(sizeof(*link), GFP_KERNEL);
2830 if (!link) {
2831 err = -ENOMEM;
2832 goto error;
2833 }
2834
2835 bpf_link_init(&link->link, BPF_LINK_TYPE_KPROBE_MULTI,
2836 &bpf_kprobe_multi_link_lops, prog);
2837
2838 err = bpf_link_prime(&link->link, &link_primer);
2839 if (err)
2840 goto error;
2841
2842 if (flags & BPF_F_KPROBE_MULTI_RETURN)
2843 link->fp.exit_handler = kprobe_multi_link_handler;
2844 else
2845 link->fp.entry_handler = kprobe_multi_link_handler;
2846
2847 link->addrs = addrs;
ca74823c
JO
2848 link->cookies = cookies;
2849 link->cnt = cnt;
2850
2851 if (cookies) {
2852 /*
2853 * Sorting addresses will trigger sorting cookies as well
2854 * (check bpf_kprobe_multi_cookie_swap). This way we can
2855 * find cookie based on the address in bpf_get_attach_cookie
2856 * helper.
2857 */
2858 sort_r(addrs, cnt, sizeof(*addrs),
2859 bpf_kprobe_multi_cookie_cmp,
2860 bpf_kprobe_multi_cookie_swap,
2861 link);
e22061b2
JO
2862 } else {
2863 /*
2864 * We need to sort addrs array even if there are no cookies
2865 * provided, to allow bsearch in get_modules_for_addrs.
2866 */
2867 sort(addrs, cnt, sizeof(*addrs),
2868 bpf_kprobe_multi_addrs_cmp, NULL);
2869 }
2870
2871 err = get_modules_for_addrs(&link->mods, addrs, cnt);
2872 if (err < 0) {
2873 bpf_link_cleanup(&link_primer);
2874 return err;
ca74823c 2875 }
e22061b2 2876 link->mods_cnt = err;
0dcac272
JO
2877
2878 err = register_fprobe_ips(&link->fp, addrs, cnt);
2879 if (err) {
e22061b2 2880 kprobe_multi_put_modules(link->mods, link->mods_cnt);
0dcac272
JO
2881 bpf_link_cleanup(&link_primer);
2882 return err;
2883 }
2884
2885 return bpf_link_settle(&link_primer);
2886
2887error:
2888 kfree(link);
2889 kvfree(addrs);
ca74823c 2890 kvfree(cookies);
0dcac272
JO
2891 return err;
2892}
2893#else /* !CONFIG_FPROBE */
2894int bpf_kprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
2895{
2896 return -EOPNOTSUPP;
2897}
f7098690
JO
2898static u64 bpf_kprobe_multi_cookie(struct bpf_run_ctx *ctx)
2899{
2900 return 0;
2901}
2902static u64 bpf_kprobe_multi_entry_ip(struct bpf_run_ctx *ctx)
ca74823c
JO
2903{
2904 return 0;
2905}
0dcac272 2906#endif