Merge branches 'acpi-scan', 'acpi-tad', 'acpi-extlog' and 'acpi-misc'
[linux-block.git] / kernel / trace / bpf_trace.c
CommitLineData
179a0cc4 1// SPDX-License-Identifier: GPL-2.0
2541517c 2/* Copyright (c) 2011-2015 PLUMgrid, http://plumgrid.com
0515e599 3 * Copyright (c) 2016 Facebook
2541517c
AS
4 */
5#include <linux/kernel.h>
6#include <linux/types.h>
7#include <linux/slab.h>
8#include <linux/bpf.h>
4279adb0 9#include <linux/bpf_verifier.h>
0515e599 10#include <linux/bpf_perf_event.h>
c4d0bfb4 11#include <linux/btf.h>
2541517c
AS
12#include <linux/filter.h>
13#include <linux/uaccess.h>
9c959c86 14#include <linux/ctype.h>
9802d865 15#include <linux/kprobes.h>
ac5a72ea 16#include <linux/spinlock.h>
41bdc4b4 17#include <linux/syscalls.h>
540adea3 18#include <linux/error-injection.h>
c9a0f3b8 19#include <linux/btf_ids.h>
6f100640 20#include <linux/bpf_lsm.h>
0dcac272 21#include <linux/fprobe.h>
ca74823c
JO
22#include <linux/bsearch.h>
23#include <linux/sort.h>
f3cf4134
RS
24#include <linux/key.h>
25#include <linux/verification.h>
6f100640 26
8e4597c6 27#include <net/bpf_sk_storage.h>
9802d865 28
c4d0bfb4
AM
29#include <uapi/linux/bpf.h>
30#include <uapi/linux/btf.h>
31
c7b6f29b
NA
32#include <asm/tlb.h>
33
9802d865 34#include "trace_probe.h"
2541517c
AS
35#include "trace.h"
36
ac5a72ea
AM
37#define CREATE_TRACE_POINTS
38#include "bpf_trace.h"
39
e672db03
SF
40#define bpf_event_rcu_dereference(p) \
41 rcu_dereference_protected(p, lockdep_is_held(&bpf_event_mutex))
42
a38d1107
MM
43#ifdef CONFIG_MODULES
44struct bpf_trace_module {
45 struct module *module;
46 struct list_head list;
47};
48
49static LIST_HEAD(bpf_trace_modules);
50static DEFINE_MUTEX(bpf_module_mutex);
51
52static struct bpf_raw_event_map *bpf_get_raw_tracepoint_module(const char *name)
53{
54 struct bpf_raw_event_map *btp, *ret = NULL;
55 struct bpf_trace_module *btm;
56 unsigned int i;
57
58 mutex_lock(&bpf_module_mutex);
59 list_for_each_entry(btm, &bpf_trace_modules, list) {
60 for (i = 0; i < btm->module->num_bpf_raw_events; ++i) {
61 btp = &btm->module->bpf_raw_events[i];
62 if (!strcmp(btp->tp->name, name)) {
63 if (try_module_get(btm->module))
64 ret = btp;
65 goto out;
66 }
67 }
68 }
69out:
70 mutex_unlock(&bpf_module_mutex);
71 return ret;
72}
73#else
74static struct bpf_raw_event_map *bpf_get_raw_tracepoint_module(const char *name)
75{
76 return NULL;
77}
78#endif /* CONFIG_MODULES */
79
035226b9 80u64 bpf_get_stackid(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
c195651e 81u64 bpf_get_stack(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
035226b9 82
eb411377
AM
83static int bpf_btf_printf_prepare(struct btf_ptr *ptr, u32 btf_ptr_size,
84 u64 flags, const struct btf **btf,
85 s32 *btf_id);
f7098690
JO
86static u64 bpf_kprobe_multi_cookie(struct bpf_run_ctx *ctx);
87static u64 bpf_kprobe_multi_entry_ip(struct bpf_run_ctx *ctx);
eb411377 88
2541517c
AS
89/**
90 * trace_call_bpf - invoke BPF program
e87c6bc3 91 * @call: tracepoint event
2541517c
AS
92 * @ctx: opaque context pointer
93 *
94 * kprobe handlers execute BPF programs via this helper.
95 * Can be used from static tracepoints in the future.
96 *
97 * Return: BPF programs always return an integer which is interpreted by
98 * kprobe handler as:
99 * 0 - return from kprobe (event is filtered out)
100 * 1 - store kprobe event into ring buffer
101 * Other values are reserved and currently alias to 1
102 */
e87c6bc3 103unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx)
2541517c
AS
104{
105 unsigned int ret;
106
b0a81b94 107 cant_sleep();
2541517c
AS
108
109 if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1)) {
110 /*
111 * since some bpf program is already running on this cpu,
112 * don't call into another bpf program (same or different)
113 * and don't send kprobe event into ring-buffer,
114 * so return zero here
115 */
116 ret = 0;
117 goto out;
118 }
119
e87c6bc3
YS
120 /*
121 * Instead of moving rcu_read_lock/rcu_dereference/rcu_read_unlock
122 * to all call sites, we did a bpf_prog_array_valid() there to check
123 * whether call->prog_array is empty or not, which is
2b5894cc 124 * a heuristic to speed up execution.
e87c6bc3
YS
125 *
126 * If bpf_prog_array_valid() fetched prog_array was
127 * non-NULL, we go into trace_call_bpf() and do the actual
128 * proper rcu_dereference() under RCU lock.
129 * If it turns out that prog_array is NULL then, we bail out.
130 * For the opposite, if the bpf_prog_array_valid() fetched pointer
131 * was NULL, you'll skip the prog_array with the risk of missing
132 * out of events when it was updated in between this and the
133 * rcu_dereference() which is accepted risk.
134 */
055eb955
SF
135 rcu_read_lock();
136 ret = bpf_prog_run_array(rcu_dereference(call->prog_array),
137 ctx, bpf_prog_run);
138 rcu_read_unlock();
2541517c
AS
139
140 out:
141 __this_cpu_dec(bpf_prog_active);
2541517c
AS
142
143 return ret;
144}
2541517c 145
9802d865
JB
146#ifdef CONFIG_BPF_KPROBE_OVERRIDE
147BPF_CALL_2(bpf_override_return, struct pt_regs *, regs, unsigned long, rc)
148{
9802d865 149 regs_set_return_value(regs, rc);
540adea3 150 override_function_with_return(regs);
9802d865
JB
151 return 0;
152}
153
154static const struct bpf_func_proto bpf_override_return_proto = {
155 .func = bpf_override_return,
156 .gpl_only = true,
157 .ret_type = RET_INTEGER,
158 .arg1_type = ARG_PTR_TO_CTX,
159 .arg2_type = ARG_ANYTHING,
160};
161#endif
162
8d92db5c
CH
163static __always_inline int
164bpf_probe_read_user_common(void *dst, u32 size, const void __user *unsafe_ptr)
2541517c 165{
8d92db5c 166 int ret;
2541517c 167
c0ee37e8 168 ret = copy_from_user_nofault(dst, unsafe_ptr, size);
6ae08ae3
DB
169 if (unlikely(ret < 0))
170 memset(dst, 0, size);
6ae08ae3
DB
171 return ret;
172}
173
8d92db5c
CH
174BPF_CALL_3(bpf_probe_read_user, void *, dst, u32, size,
175 const void __user *, unsafe_ptr)
176{
177 return bpf_probe_read_user_common(dst, size, unsafe_ptr);
178}
179
f470378c 180const struct bpf_func_proto bpf_probe_read_user_proto = {
6ae08ae3
DB
181 .func = bpf_probe_read_user,
182 .gpl_only = true,
183 .ret_type = RET_INTEGER,
184 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
185 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
186 .arg3_type = ARG_ANYTHING,
187};
188
8d92db5c
CH
189static __always_inline int
190bpf_probe_read_user_str_common(void *dst, u32 size,
191 const void __user *unsafe_ptr)
6ae08ae3 192{
8d92db5c 193 int ret;
6ae08ae3 194
6fa6d280
DX
195 /*
196 * NB: We rely on strncpy_from_user() not copying junk past the NUL
197 * terminator into `dst`.
198 *
199 * strncpy_from_user() does long-sized strides in the fast path. If the
200 * strncpy does not mask out the bytes after the NUL in `unsafe_ptr`,
201 * then there could be junk after the NUL in `dst`. If user takes `dst`
202 * and keys a hash map with it, then semantically identical strings can
203 * occupy multiple entries in the map.
204 */
8d92db5c 205 ret = strncpy_from_user_nofault(dst, unsafe_ptr, size);
6ae08ae3
DB
206 if (unlikely(ret < 0))
207 memset(dst, 0, size);
6ae08ae3
DB
208 return ret;
209}
210
8d92db5c
CH
211BPF_CALL_3(bpf_probe_read_user_str, void *, dst, u32, size,
212 const void __user *, unsafe_ptr)
213{
214 return bpf_probe_read_user_str_common(dst, size, unsafe_ptr);
215}
216
f470378c 217const struct bpf_func_proto bpf_probe_read_user_str_proto = {
6ae08ae3
DB
218 .func = bpf_probe_read_user_str,
219 .gpl_only = true,
220 .ret_type = RET_INTEGER,
221 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
222 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
223 .arg3_type = ARG_ANYTHING,
224};
225
226static __always_inline int
8d92db5c 227bpf_probe_read_kernel_common(void *dst, u32 size, const void *unsafe_ptr)
6ae08ae3 228{
ff40e510 229 int ret;
9d1f8be5 230
fe557319 231 ret = copy_from_kernel_nofault(dst, unsafe_ptr, size);
074f528e 232 if (unlikely(ret < 0))
ff40e510 233 memset(dst, 0, size);
6ae08ae3
DB
234 return ret;
235}
074f528e 236
6ae08ae3
DB
237BPF_CALL_3(bpf_probe_read_kernel, void *, dst, u32, size,
238 const void *, unsafe_ptr)
239{
8d92db5c 240 return bpf_probe_read_kernel_common(dst, size, unsafe_ptr);
6ae08ae3
DB
241}
242
f470378c 243const struct bpf_func_proto bpf_probe_read_kernel_proto = {
6ae08ae3
DB
244 .func = bpf_probe_read_kernel,
245 .gpl_only = true,
246 .ret_type = RET_INTEGER,
247 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
248 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
249 .arg3_type = ARG_ANYTHING,
250};
251
6ae08ae3 252static __always_inline int
8d92db5c 253bpf_probe_read_kernel_str_common(void *dst, u32 size, const void *unsafe_ptr)
6ae08ae3 254{
ff40e510 255 int ret;
8d92db5c 256
6ae08ae3 257 /*
8d92db5c
CH
258 * The strncpy_from_kernel_nofault() call will likely not fill the
259 * entire buffer, but that's okay in this circumstance as we're probing
6ae08ae3
DB
260 * arbitrary memory anyway similar to bpf_probe_read_*() and might
261 * as well probe the stack. Thus, memory is explicitly cleared
262 * only in error case, so that improper users ignoring return
263 * code altogether don't copy garbage; otherwise length of string
264 * is returned that can be used for bpf_perf_event_output() et al.
265 */
8d92db5c 266 ret = strncpy_from_kernel_nofault(dst, unsafe_ptr, size);
6ae08ae3 267 if (unlikely(ret < 0))
ff40e510 268 memset(dst, 0, size);
074f528e 269 return ret;
2541517c
AS
270}
271
6ae08ae3
DB
272BPF_CALL_3(bpf_probe_read_kernel_str, void *, dst, u32, size,
273 const void *, unsafe_ptr)
274{
8d92db5c 275 return bpf_probe_read_kernel_str_common(dst, size, unsafe_ptr);
6ae08ae3
DB
276}
277
f470378c 278const struct bpf_func_proto bpf_probe_read_kernel_str_proto = {
6ae08ae3
DB
279 .func = bpf_probe_read_kernel_str,
280 .gpl_only = true,
281 .ret_type = RET_INTEGER,
282 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
283 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
284 .arg3_type = ARG_ANYTHING,
285};
286
8d92db5c
CH
287#ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
288BPF_CALL_3(bpf_probe_read_compat, void *, dst, u32, size,
289 const void *, unsafe_ptr)
290{
291 if ((unsigned long)unsafe_ptr < TASK_SIZE) {
292 return bpf_probe_read_user_common(dst, size,
293 (__force void __user *)unsafe_ptr);
294 }
295 return bpf_probe_read_kernel_common(dst, size, unsafe_ptr);
296}
297
298static const struct bpf_func_proto bpf_probe_read_compat_proto = {
299 .func = bpf_probe_read_compat,
300 .gpl_only = true,
301 .ret_type = RET_INTEGER,
302 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
303 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
304 .arg3_type = ARG_ANYTHING,
305};
306
6ae08ae3
DB
307BPF_CALL_3(bpf_probe_read_compat_str, void *, dst, u32, size,
308 const void *, unsafe_ptr)
309{
8d92db5c
CH
310 if ((unsigned long)unsafe_ptr < TASK_SIZE) {
311 return bpf_probe_read_user_str_common(dst, size,
312 (__force void __user *)unsafe_ptr);
313 }
314 return bpf_probe_read_kernel_str_common(dst, size, unsafe_ptr);
6ae08ae3
DB
315}
316
317static const struct bpf_func_proto bpf_probe_read_compat_str_proto = {
318 .func = bpf_probe_read_compat_str,
2541517c
AS
319 .gpl_only = true,
320 .ret_type = RET_INTEGER,
39f19ebb 321 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
9c019e2b 322 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
2541517c
AS
323 .arg3_type = ARG_ANYTHING,
324};
8d92db5c 325#endif /* CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE */
2541517c 326
eb1b6688 327BPF_CALL_3(bpf_probe_write_user, void __user *, unsafe_ptr, const void *, src,
f3694e00 328 u32, size)
96ae5227 329{
96ae5227
SD
330 /*
331 * Ensure we're in user context which is safe for the helper to
332 * run. This helper has no business in a kthread.
333 *
334 * access_ok() should prevent writing to non-user memory, but in
335 * some situations (nommu, temporary switch, etc) access_ok() does
336 * not provide enough validation, hence the check on KERNEL_DS.
c7b6f29b
NA
337 *
338 * nmi_uaccess_okay() ensures the probe is not run in an interim
339 * state, when the task or mm are switched. This is specifically
340 * required to prevent the use of temporary mm.
96ae5227
SD
341 */
342
343 if (unlikely(in_interrupt() ||
344 current->flags & (PF_KTHREAD | PF_EXITING)))
345 return -EPERM;
c7b6f29b
NA
346 if (unlikely(!nmi_uaccess_okay()))
347 return -EPERM;
96ae5227 348
c0ee37e8 349 return copy_to_user_nofault(unsafe_ptr, src, size);
96ae5227
SD
350}
351
352static const struct bpf_func_proto bpf_probe_write_user_proto = {
353 .func = bpf_probe_write_user,
354 .gpl_only = true,
355 .ret_type = RET_INTEGER,
356 .arg1_type = ARG_ANYTHING,
216e3cd2 357 .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY,
39f19ebb 358 .arg3_type = ARG_CONST_SIZE,
96ae5227
SD
359};
360
361static const struct bpf_func_proto *bpf_get_probe_write_proto(void)
362{
2c78ee89
AS
363 if (!capable(CAP_SYS_ADMIN))
364 return NULL;
365
96ae5227
SD
366 pr_warn_ratelimited("%s[%d] is installing a program with bpf_probe_write_user helper that may corrupt user memory!",
367 current->comm, task_pid_nr(current));
368
369 return &bpf_probe_write_user_proto;
370}
371
d9c9e4db
FR
372#define MAX_TRACE_PRINTK_VARARGS 3
373#define BPF_TRACE_PRINTK_SIZE 1024
ac5a72ea 374
d9c9e4db
FR
375BPF_CALL_5(bpf_trace_printk, char *, fmt, u32, fmt_size, u64, arg1,
376 u64, arg2, u64, arg3)
ac5a72ea 377{
d9c9e4db 378 u64 args[MAX_TRACE_PRINTK_VARARGS] = { arg1, arg2, arg3 };
78aa1cc9
JO
379 struct bpf_bprintf_data data = {
380 .get_bin_args = true,
e2bb9e01 381 .get_buf = true,
78aa1cc9 382 };
ac5a72ea
AM
383 int ret;
384
78aa1cc9
JO
385 ret = bpf_bprintf_prepare(fmt, fmt_size, args,
386 MAX_TRACE_PRINTK_VARARGS, &data);
d9c9e4db
FR
387 if (ret < 0)
388 return ret;
389
e2bb9e01 390 ret = bstr_printf(data.buf, MAX_BPRINTF_BUF, fmt, data.bin_args);
d9c9e4db 391
e2bb9e01 392 trace_bpf_trace_printk(data.buf);
ac5a72ea 393
f19a4050 394 bpf_bprintf_cleanup(&data);
9c959c86 395
d9c9e4db 396 return ret;
9c959c86
AS
397}
398
399static const struct bpf_func_proto bpf_trace_printk_proto = {
400 .func = bpf_trace_printk,
401 .gpl_only = true,
402 .ret_type = RET_INTEGER,
216e3cd2 403 .arg1_type = ARG_PTR_TO_MEM | MEM_RDONLY,
39f19ebb 404 .arg2_type = ARG_CONST_SIZE,
9c959c86
AS
405};
406
10aceb62 407static void __set_printk_clr_event(void)
0756ea3e
AS
408{
409 /*
ac5a72ea
AM
410 * This program might be calling bpf_trace_printk,
411 * so enable the associated bpf_trace/bpf_trace_printk event.
412 * Repeat this each time as it is possible a user has
413 * disabled bpf_trace_printk events. By loading a program
414 * calling bpf_trace_printk() however the user has expressed
415 * the intent to see such events.
0756ea3e 416 */
ac5a72ea
AM
417 if (trace_set_clr_event("bpf_trace", "bpf_trace_printk", 1))
418 pr_warn_ratelimited("could not enable bpf_trace_printk events");
10aceb62 419}
0756ea3e 420
10aceb62
DM
421const struct bpf_func_proto *bpf_get_trace_printk_proto(void)
422{
423 __set_printk_clr_event();
0756ea3e
AS
424 return &bpf_trace_printk_proto;
425}
426
78aa1cc9 427BPF_CALL_4(bpf_trace_vprintk, char *, fmt, u32, fmt_size, const void *, args,
10aceb62
DM
428 u32, data_len)
429{
78aa1cc9
JO
430 struct bpf_bprintf_data data = {
431 .get_bin_args = true,
e2bb9e01 432 .get_buf = true,
78aa1cc9 433 };
10aceb62 434 int ret, num_args;
10aceb62
DM
435
436 if (data_len & 7 || data_len > MAX_BPRINTF_VARARGS * 8 ||
78aa1cc9 437 (data_len && !args))
10aceb62
DM
438 return -EINVAL;
439 num_args = data_len / 8;
440
78aa1cc9 441 ret = bpf_bprintf_prepare(fmt, fmt_size, args, num_args, &data);
10aceb62
DM
442 if (ret < 0)
443 return ret;
444
e2bb9e01 445 ret = bstr_printf(data.buf, MAX_BPRINTF_BUF, fmt, data.bin_args);
10aceb62 446
e2bb9e01 447 trace_bpf_trace_printk(data.buf);
10aceb62 448
f19a4050 449 bpf_bprintf_cleanup(&data);
10aceb62
DM
450
451 return ret;
452}
453
454static const struct bpf_func_proto bpf_trace_vprintk_proto = {
455 .func = bpf_trace_vprintk,
456 .gpl_only = true,
457 .ret_type = RET_INTEGER,
216e3cd2 458 .arg1_type = ARG_PTR_TO_MEM | MEM_RDONLY,
10aceb62 459 .arg2_type = ARG_CONST_SIZE,
216e3cd2 460 .arg3_type = ARG_PTR_TO_MEM | PTR_MAYBE_NULL | MEM_RDONLY,
10aceb62
DM
461 .arg4_type = ARG_CONST_SIZE_OR_ZERO,
462};
463
464const struct bpf_func_proto *bpf_get_trace_vprintk_proto(void)
465{
466 __set_printk_clr_event();
467 return &bpf_trace_vprintk_proto;
468}
469
492e639f 470BPF_CALL_5(bpf_seq_printf, struct seq_file *, m, char *, fmt, u32, fmt_size,
78aa1cc9 471 const void *, args, u32, data_len)
492e639f 472{
78aa1cc9
JO
473 struct bpf_bprintf_data data = {
474 .get_bin_args = true,
475 };
d9c9e4db 476 int err, num_args;
492e639f 477
335ff499 478 if (data_len & 7 || data_len > MAX_BPRINTF_VARARGS * 8 ||
78aa1cc9 479 (data_len && !args))
d9c9e4db 480 return -EINVAL;
492e639f
YS
481 num_args = data_len / 8;
482
78aa1cc9 483 err = bpf_bprintf_prepare(fmt, fmt_size, args, num_args, &data);
d9c9e4db
FR
484 if (err < 0)
485 return err;
492e639f 486
78aa1cc9 487 seq_bprintf(m, fmt, data.bin_args);
48cac3f4 488
f19a4050 489 bpf_bprintf_cleanup(&data);
d9c9e4db
FR
490
491 return seq_has_overflowed(m) ? -EOVERFLOW : 0;
492e639f
YS
492}
493
9436ef6e 494BTF_ID_LIST_SINGLE(btf_seq_file_ids, struct, seq_file)
c9a0f3b8 495
492e639f
YS
496static const struct bpf_func_proto bpf_seq_printf_proto = {
497 .func = bpf_seq_printf,
498 .gpl_only = true,
499 .ret_type = RET_INTEGER,
500 .arg1_type = ARG_PTR_TO_BTF_ID,
9436ef6e 501 .arg1_btf_id = &btf_seq_file_ids[0],
216e3cd2 502 .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY,
492e639f 503 .arg3_type = ARG_CONST_SIZE,
216e3cd2 504 .arg4_type = ARG_PTR_TO_MEM | PTR_MAYBE_NULL | MEM_RDONLY,
492e639f 505 .arg5_type = ARG_CONST_SIZE_OR_ZERO,
492e639f
YS
506};
507
508BPF_CALL_3(bpf_seq_write, struct seq_file *, m, const void *, data, u32, len)
509{
510 return seq_write(m, data, len) ? -EOVERFLOW : 0;
511}
512
492e639f
YS
513static const struct bpf_func_proto bpf_seq_write_proto = {
514 .func = bpf_seq_write,
515 .gpl_only = true,
516 .ret_type = RET_INTEGER,
517 .arg1_type = ARG_PTR_TO_BTF_ID,
9436ef6e 518 .arg1_btf_id = &btf_seq_file_ids[0],
216e3cd2 519 .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY,
492e639f 520 .arg3_type = ARG_CONST_SIZE_OR_ZERO,
492e639f
YS
521};
522
eb411377
AM
523BPF_CALL_4(bpf_seq_printf_btf, struct seq_file *, m, struct btf_ptr *, ptr,
524 u32, btf_ptr_size, u64, flags)
525{
526 const struct btf *btf;
527 s32 btf_id;
528 int ret;
529
530 ret = bpf_btf_printf_prepare(ptr, btf_ptr_size, flags, &btf, &btf_id);
531 if (ret)
532 return ret;
533
534 return btf_type_seq_show_flags(btf, btf_id, ptr->ptr, m, flags);
535}
536
537static const struct bpf_func_proto bpf_seq_printf_btf_proto = {
538 .func = bpf_seq_printf_btf,
539 .gpl_only = true,
540 .ret_type = RET_INTEGER,
541 .arg1_type = ARG_PTR_TO_BTF_ID,
542 .arg1_btf_id = &btf_seq_file_ids[0],
216e3cd2 543 .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY,
492e639f 544 .arg3_type = ARG_CONST_SIZE_OR_ZERO,
eb411377 545 .arg4_type = ARG_ANYTHING,
492e639f
YS
546};
547
908432ca
YS
548static __always_inline int
549get_map_perf_counter(struct bpf_map *map, u64 flags,
550 u64 *value, u64 *enabled, u64 *running)
35578d79 551{
35578d79 552 struct bpf_array *array = container_of(map, struct bpf_array, map);
6816a7ff
DB
553 unsigned int cpu = smp_processor_id();
554 u64 index = flags & BPF_F_INDEX_MASK;
3b1efb19 555 struct bpf_event_entry *ee;
35578d79 556
6816a7ff
DB
557 if (unlikely(flags & ~(BPF_F_INDEX_MASK)))
558 return -EINVAL;
559 if (index == BPF_F_CURRENT_CPU)
560 index = cpu;
35578d79
KX
561 if (unlikely(index >= array->map.max_entries))
562 return -E2BIG;
563
3b1efb19 564 ee = READ_ONCE(array->ptrs[index]);
1ca1cc98 565 if (!ee)
35578d79
KX
566 return -ENOENT;
567
908432ca
YS
568 return perf_event_read_local(ee->event, value, enabled, running);
569}
570
571BPF_CALL_2(bpf_perf_event_read, struct bpf_map *, map, u64, flags)
572{
573 u64 value = 0;
574 int err;
575
576 err = get_map_perf_counter(map, flags, &value, NULL, NULL);
35578d79 577 /*
f91840a3
AS
578 * this api is ugly since we miss [-22..-2] range of valid
579 * counter values, but that's uapi
35578d79 580 */
f91840a3
AS
581 if (err)
582 return err;
583 return value;
35578d79
KX
584}
585
62544ce8 586static const struct bpf_func_proto bpf_perf_event_read_proto = {
35578d79 587 .func = bpf_perf_event_read,
1075ef59 588 .gpl_only = true,
35578d79
KX
589 .ret_type = RET_INTEGER,
590 .arg1_type = ARG_CONST_MAP_PTR,
591 .arg2_type = ARG_ANYTHING,
592};
593
908432ca
YS
594BPF_CALL_4(bpf_perf_event_read_value, struct bpf_map *, map, u64, flags,
595 struct bpf_perf_event_value *, buf, u32, size)
596{
597 int err = -EINVAL;
598
599 if (unlikely(size != sizeof(struct bpf_perf_event_value)))
600 goto clear;
601 err = get_map_perf_counter(map, flags, &buf->counter, &buf->enabled,
602 &buf->running);
603 if (unlikely(err))
604 goto clear;
605 return 0;
606clear:
607 memset(buf, 0, size);
608 return err;
609}
610
611static const struct bpf_func_proto bpf_perf_event_read_value_proto = {
612 .func = bpf_perf_event_read_value,
613 .gpl_only = true,
614 .ret_type = RET_INTEGER,
615 .arg1_type = ARG_CONST_MAP_PTR,
616 .arg2_type = ARG_ANYTHING,
617 .arg3_type = ARG_PTR_TO_UNINIT_MEM,
618 .arg4_type = ARG_CONST_SIZE,
619};
620
8e7a3920
DB
621static __always_inline u64
622__bpf_perf_event_output(struct pt_regs *regs, struct bpf_map *map,
283ca526 623 u64 flags, struct perf_sample_data *sd)
a43eec30 624{
a43eec30 625 struct bpf_array *array = container_of(map, struct bpf_array, map);
d7931330 626 unsigned int cpu = smp_processor_id();
1e33759c 627 u64 index = flags & BPF_F_INDEX_MASK;
3b1efb19 628 struct bpf_event_entry *ee;
a43eec30 629 struct perf_event *event;
a43eec30 630
1e33759c 631 if (index == BPF_F_CURRENT_CPU)
d7931330 632 index = cpu;
a43eec30
AS
633 if (unlikely(index >= array->map.max_entries))
634 return -E2BIG;
635
3b1efb19 636 ee = READ_ONCE(array->ptrs[index]);
1ca1cc98 637 if (!ee)
a43eec30
AS
638 return -ENOENT;
639
3b1efb19 640 event = ee->event;
a43eec30
AS
641 if (unlikely(event->attr.type != PERF_TYPE_SOFTWARE ||
642 event->attr.config != PERF_COUNT_SW_BPF_OUTPUT))
643 return -EINVAL;
644
d7931330 645 if (unlikely(event->oncpu != cpu))
a43eec30
AS
646 return -EOPNOTSUPP;
647
56201969 648 return perf_event_output(event, sd, regs);
a43eec30
AS
649}
650
9594dc3c
MM
651/*
652 * Support executing tracepoints in normal, irq, and nmi context that each call
653 * bpf_perf_event_output
654 */
655struct bpf_trace_sample_data {
656 struct perf_sample_data sds[3];
657};
658
659static DEFINE_PER_CPU(struct bpf_trace_sample_data, bpf_trace_sds);
660static DEFINE_PER_CPU(int, bpf_trace_nest_level);
f3694e00
DB
661BPF_CALL_5(bpf_perf_event_output, struct pt_regs *, regs, struct bpf_map *, map,
662 u64, flags, void *, data, u64, size)
8e7a3920 663{
f2c67a3e 664 struct bpf_trace_sample_data *sds;
8e7a3920
DB
665 struct perf_raw_record raw = {
666 .frag = {
667 .size = size,
668 .data = data,
669 },
670 };
9594dc3c 671 struct perf_sample_data *sd;
f2c67a3e
JO
672 int nest_level, err;
673
674 preempt_disable();
675 sds = this_cpu_ptr(&bpf_trace_sds);
676 nest_level = this_cpu_inc_return(bpf_trace_nest_level);
8e7a3920 677
9594dc3c
MM
678 if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(sds->sds))) {
679 err = -EBUSY;
680 goto out;
681 }
682
683 sd = &sds->sds[nest_level - 1];
684
685 if (unlikely(flags & ~(BPF_F_INDEX_MASK))) {
686 err = -EINVAL;
687 goto out;
688 }
8e7a3920 689
283ca526 690 perf_sample_data_init(sd, 0, 0);
0a9081cf 691 perf_sample_save_raw_data(sd, &raw);
283ca526 692
9594dc3c 693 err = __bpf_perf_event_output(regs, map, flags, sd);
9594dc3c
MM
694out:
695 this_cpu_dec(bpf_trace_nest_level);
f2c67a3e 696 preempt_enable();
9594dc3c 697 return err;
8e7a3920
DB
698}
699
a43eec30
AS
700static const struct bpf_func_proto bpf_perf_event_output_proto = {
701 .func = bpf_perf_event_output,
1075ef59 702 .gpl_only = true,
a43eec30
AS
703 .ret_type = RET_INTEGER,
704 .arg1_type = ARG_PTR_TO_CTX,
705 .arg2_type = ARG_CONST_MAP_PTR,
706 .arg3_type = ARG_ANYTHING,
216e3cd2 707 .arg4_type = ARG_PTR_TO_MEM | MEM_RDONLY,
a60dd35d 708 .arg5_type = ARG_CONST_SIZE_OR_ZERO,
a43eec30
AS
709};
710
768fb61f
AZ
711static DEFINE_PER_CPU(int, bpf_event_output_nest_level);
712struct bpf_nested_pt_regs {
713 struct pt_regs regs[3];
714};
715static DEFINE_PER_CPU(struct bpf_nested_pt_regs, bpf_pt_regs);
716static DEFINE_PER_CPU(struct bpf_trace_sample_data, bpf_misc_sds);
bd570ff9 717
555c8a86
DB
718u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
719 void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy)
bd570ff9 720{
555c8a86
DB
721 struct perf_raw_frag frag = {
722 .copy = ctx_copy,
723 .size = ctx_size,
724 .data = ctx,
725 };
726 struct perf_raw_record raw = {
727 .frag = {
183fc153
AM
728 {
729 .next = ctx_size ? &frag : NULL,
730 },
555c8a86
DB
731 .size = meta_size,
732 .data = meta,
733 },
734 };
768fb61f
AZ
735 struct perf_sample_data *sd;
736 struct pt_regs *regs;
d62cc390 737 int nest_level;
768fb61f
AZ
738 u64 ret;
739
d62cc390
JO
740 preempt_disable();
741 nest_level = this_cpu_inc_return(bpf_event_output_nest_level);
742
768fb61f
AZ
743 if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(bpf_misc_sds.sds))) {
744 ret = -EBUSY;
745 goto out;
746 }
747 sd = this_cpu_ptr(&bpf_misc_sds.sds[nest_level - 1]);
748 regs = this_cpu_ptr(&bpf_pt_regs.regs[nest_level - 1]);
bd570ff9
DB
749
750 perf_fetch_caller_regs(regs);
283ca526 751 perf_sample_data_init(sd, 0, 0);
0a9081cf 752 perf_sample_save_raw_data(sd, &raw);
bd570ff9 753
768fb61f
AZ
754 ret = __bpf_perf_event_output(regs, map, flags, sd);
755out:
756 this_cpu_dec(bpf_event_output_nest_level);
d62cc390 757 preempt_enable();
768fb61f 758 return ret;
bd570ff9
DB
759}
760
f3694e00 761BPF_CALL_0(bpf_get_current_task)
606274c5
AS
762{
763 return (long) current;
764}
765
f470378c 766const struct bpf_func_proto bpf_get_current_task_proto = {
606274c5
AS
767 .func = bpf_get_current_task,
768 .gpl_only = true,
769 .ret_type = RET_INTEGER,
770};
771
3ca1032a
KS
772BPF_CALL_0(bpf_get_current_task_btf)
773{
774 return (unsigned long) current;
775}
776
a396eda5 777const struct bpf_func_proto bpf_get_current_task_btf_proto = {
3ca1032a
KS
778 .func = bpf_get_current_task_btf,
779 .gpl_only = true,
3f00c523 780 .ret_type = RET_PTR_TO_BTF_ID_TRUSTED,
d19ddb47 781 .ret_btf_id = &btf_tracing_ids[BTF_TRACING_TYPE_TASK],
3ca1032a
KS
782};
783
dd6e10fb
DX
784BPF_CALL_1(bpf_task_pt_regs, struct task_struct *, task)
785{
786 return (unsigned long) task_pt_regs(task);
787}
788
789BTF_ID_LIST(bpf_task_pt_regs_ids)
790BTF_ID(struct, pt_regs)
791
792const struct bpf_func_proto bpf_task_pt_regs_proto = {
793 .func = bpf_task_pt_regs,
794 .gpl_only = true,
795 .arg1_type = ARG_PTR_TO_BTF_ID,
d19ddb47 796 .arg1_btf_id = &btf_tracing_ids[BTF_TRACING_TYPE_TASK],
dd6e10fb
DX
797 .ret_type = RET_PTR_TO_BTF_ID,
798 .ret_btf_id = &bpf_task_pt_regs_ids[0],
799};
800
f3694e00 801BPF_CALL_2(bpf_current_task_under_cgroup, struct bpf_map *, map, u32, idx)
60d20f91 802{
60d20f91
SD
803 struct bpf_array *array = container_of(map, struct bpf_array, map);
804 struct cgroup *cgrp;
60d20f91 805
60d20f91
SD
806 if (unlikely(idx >= array->map.max_entries))
807 return -E2BIG;
808
809 cgrp = READ_ONCE(array->ptrs[idx]);
810 if (unlikely(!cgrp))
811 return -EAGAIN;
812
813 return task_under_cgroup_hierarchy(current, cgrp);
814}
815
816static const struct bpf_func_proto bpf_current_task_under_cgroup_proto = {
817 .func = bpf_current_task_under_cgroup,
818 .gpl_only = false,
819 .ret_type = RET_INTEGER,
820 .arg1_type = ARG_CONST_MAP_PTR,
821 .arg2_type = ARG_ANYTHING,
822};
823
8b401f9e
YS
824struct send_signal_irq_work {
825 struct irq_work irq_work;
826 struct task_struct *task;
827 u32 sig;
8482941f 828 enum pid_type type;
8b401f9e
YS
829};
830
831static DEFINE_PER_CPU(struct send_signal_irq_work, send_signal_work);
832
833static void do_bpf_send_signal(struct irq_work *entry)
834{
835 struct send_signal_irq_work *work;
836
837 work = container_of(entry, struct send_signal_irq_work, irq_work);
8482941f 838 group_send_sig_info(work->sig, SEND_SIG_PRIV, work->task, work->type);
bdb7fdb0 839 put_task_struct(work->task);
8b401f9e
YS
840}
841
8482941f 842static int bpf_send_signal_common(u32 sig, enum pid_type type)
8b401f9e
YS
843{
844 struct send_signal_irq_work *work = NULL;
845
846 /* Similar to bpf_probe_write_user, task needs to be
847 * in a sound condition and kernel memory access be
848 * permitted in order to send signal to the current
849 * task.
850 */
851 if (unlikely(current->flags & (PF_KTHREAD | PF_EXITING)))
852 return -EPERM;
8b401f9e
YS
853 if (unlikely(!nmi_uaccess_okay()))
854 return -EPERM;
a3d81bc1
HS
855 /* Task should not be pid=1 to avoid kernel panic. */
856 if (unlikely(is_global_init(current)))
857 return -EPERM;
8b401f9e 858
1bc7896e 859 if (irqs_disabled()) {
e1afb702
YS
860 /* Do an early check on signal validity. Otherwise,
861 * the error is lost in deferred irq_work.
862 */
863 if (unlikely(!valid_signal(sig)))
864 return -EINVAL;
865
8b401f9e 866 work = this_cpu_ptr(&send_signal_work);
7a9f50a0 867 if (irq_work_is_busy(&work->irq_work))
8b401f9e
YS
868 return -EBUSY;
869
870 /* Add the current task, which is the target of sending signal,
871 * to the irq_work. The current task may change when queued
872 * irq works get executed.
873 */
bdb7fdb0 874 work->task = get_task_struct(current);
8b401f9e 875 work->sig = sig;
8482941f 876 work->type = type;
8b401f9e
YS
877 irq_work_queue(&work->irq_work);
878 return 0;
879 }
880
8482941f
YS
881 return group_send_sig_info(sig, SEND_SIG_PRIV, current, type);
882}
883
884BPF_CALL_1(bpf_send_signal, u32, sig)
885{
886 return bpf_send_signal_common(sig, PIDTYPE_TGID);
8b401f9e
YS
887}
888
889static const struct bpf_func_proto bpf_send_signal_proto = {
890 .func = bpf_send_signal,
891 .gpl_only = false,
892 .ret_type = RET_INTEGER,
893 .arg1_type = ARG_ANYTHING,
894};
895
8482941f
YS
896BPF_CALL_1(bpf_send_signal_thread, u32, sig)
897{
898 return bpf_send_signal_common(sig, PIDTYPE_PID);
899}
900
901static const struct bpf_func_proto bpf_send_signal_thread_proto = {
902 .func = bpf_send_signal_thread,
903 .gpl_only = false,
904 .ret_type = RET_INTEGER,
905 .arg1_type = ARG_ANYTHING,
906};
907
6e22ab9d
JO
908BPF_CALL_3(bpf_d_path, struct path *, path, char *, buf, u32, sz)
909{
f46fab0e 910 struct path copy;
6e22ab9d
JO
911 long len;
912 char *p;
913
914 if (!sz)
915 return 0;
916
f46fab0e
JO
917 /*
918 * The path pointer is verified as trusted and safe to use,
919 * but let's double check it's valid anyway to workaround
920 * potentially broken verifier.
921 */
922 len = copy_from_kernel_nofault(&copy, path, sizeof(*path));
923 if (len < 0)
924 return len;
925
926 p = d_path(&copy, buf, sz);
6e22ab9d
JO
927 if (IS_ERR(p)) {
928 len = PTR_ERR(p);
929 } else {
930 len = buf + sz - p;
931 memmove(buf, p, len);
932 }
933
934 return len;
935}
936
937BTF_SET_START(btf_allowlist_d_path)
a8a71796
JO
938#ifdef CONFIG_SECURITY
939BTF_ID(func, security_file_permission)
940BTF_ID(func, security_inode_getattr)
941BTF_ID(func, security_file_open)
942#endif
943#ifdef CONFIG_SECURITY_PATH
944BTF_ID(func, security_path_truncate)
945#endif
6e22ab9d
JO
946BTF_ID(func, vfs_truncate)
947BTF_ID(func, vfs_fallocate)
948BTF_ID(func, dentry_open)
949BTF_ID(func, vfs_getattr)
950BTF_ID(func, filp_close)
951BTF_SET_END(btf_allowlist_d_path)
952
953static bool bpf_d_path_allowed(const struct bpf_prog *prog)
954{
3d06f34a
SL
955 if (prog->type == BPF_PROG_TYPE_TRACING &&
956 prog->expected_attach_type == BPF_TRACE_ITER)
957 return true;
958
6f100640
KS
959 if (prog->type == BPF_PROG_TYPE_LSM)
960 return bpf_lsm_is_sleepable_hook(prog->aux->attach_btf_id);
961
962 return btf_id_set_contains(&btf_allowlist_d_path,
963 prog->aux->attach_btf_id);
6e22ab9d
JO
964}
965
9436ef6e 966BTF_ID_LIST_SINGLE(bpf_d_path_btf_ids, struct, path)
6e22ab9d
JO
967
968static const struct bpf_func_proto bpf_d_path_proto = {
969 .func = bpf_d_path,
970 .gpl_only = false,
971 .ret_type = RET_INTEGER,
972 .arg1_type = ARG_PTR_TO_BTF_ID,
9436ef6e 973 .arg1_btf_id = &bpf_d_path_btf_ids[0],
6e22ab9d
JO
974 .arg2_type = ARG_PTR_TO_MEM,
975 .arg3_type = ARG_CONST_SIZE_OR_ZERO,
6e22ab9d
JO
976 .allowed = bpf_d_path_allowed,
977};
978
c4d0bfb4
AM
979#define BTF_F_ALL (BTF_F_COMPACT | BTF_F_NONAME | \
980 BTF_F_PTR_RAW | BTF_F_ZERO)
981
982static int bpf_btf_printf_prepare(struct btf_ptr *ptr, u32 btf_ptr_size,
983 u64 flags, const struct btf **btf,
984 s32 *btf_id)
985{
986 const struct btf_type *t;
987
988 if (unlikely(flags & ~(BTF_F_ALL)))
989 return -EINVAL;
990
991 if (btf_ptr_size != sizeof(struct btf_ptr))
992 return -EINVAL;
993
994 *btf = bpf_get_btf_vmlinux();
995
996 if (IS_ERR_OR_NULL(*btf))
abbaa433 997 return IS_ERR(*btf) ? PTR_ERR(*btf) : -EINVAL;
c4d0bfb4
AM
998
999 if (ptr->type_id > 0)
1000 *btf_id = ptr->type_id;
1001 else
1002 return -EINVAL;
1003
1004 if (*btf_id > 0)
1005 t = btf_type_by_id(*btf, *btf_id);
1006 if (*btf_id <= 0 || !t)
1007 return -ENOENT;
1008
1009 return 0;
1010}
1011
1012BPF_CALL_5(bpf_snprintf_btf, char *, str, u32, str_size, struct btf_ptr *, ptr,
1013 u32, btf_ptr_size, u64, flags)
1014{
1015 const struct btf *btf;
1016 s32 btf_id;
1017 int ret;
1018
1019 ret = bpf_btf_printf_prepare(ptr, btf_ptr_size, flags, &btf, &btf_id);
1020 if (ret)
1021 return ret;
1022
1023 return btf_type_snprintf_show(btf, btf_id, ptr->ptr, str, str_size,
1024 flags);
1025}
1026
1027const struct bpf_func_proto bpf_snprintf_btf_proto = {
1028 .func = bpf_snprintf_btf,
1029 .gpl_only = false,
1030 .ret_type = RET_INTEGER,
1031 .arg1_type = ARG_PTR_TO_MEM,
1032 .arg2_type = ARG_CONST_SIZE,
216e3cd2 1033 .arg3_type = ARG_PTR_TO_MEM | MEM_RDONLY,
c4d0bfb4
AM
1034 .arg4_type = ARG_CONST_SIZE,
1035 .arg5_type = ARG_ANYTHING,
1036};
1037
9b99edca
JO
1038BPF_CALL_1(bpf_get_func_ip_tracing, void *, ctx)
1039{
1040 /* This helper call is inlined by verifier. */
f92c1e18 1041 return ((u64 *)ctx)[-2];
9b99edca
JO
1042}
1043
1044static const struct bpf_func_proto bpf_get_func_ip_proto_tracing = {
1045 .func = bpf_get_func_ip_tracing,
1046 .gpl_only = true,
1047 .ret_type = RET_INTEGER,
1048 .arg1_type = ARG_PTR_TO_CTX,
1049};
1050
c09eb2e5
JO
1051#ifdef CONFIG_X86_KERNEL_IBT
1052static unsigned long get_entry_ip(unsigned long fentry_ip)
1053{
1054 u32 instr;
1055
1056 /* Being extra safe in here in case entry ip is on the page-edge. */
1057 if (get_kernel_nofault(instr, (u32 *) fentry_ip - 1))
1058 return fentry_ip;
1059 if (is_endbr(instr))
1060 fentry_ip -= ENDBR_INSN_SIZE;
1061 return fentry_ip;
1062}
1063#else
1064#define get_entry_ip(fentry_ip) fentry_ip
1065#endif
1066
9ffd9f3f
JO
1067BPF_CALL_1(bpf_get_func_ip_kprobe, struct pt_regs *, regs)
1068{
1069 struct kprobe *kp = kprobe_running();
1070
0e253f7e
JO
1071 if (!kp || !(kp->flags & KPROBE_FLAG_ON_FUNC_ENTRY))
1072 return 0;
1073
1074 return get_entry_ip((uintptr_t)kp->addr);
9ffd9f3f
JO
1075}
1076
1077static const struct bpf_func_proto bpf_get_func_ip_proto_kprobe = {
1078 .func = bpf_get_func_ip_kprobe,
1079 .gpl_only = true,
1080 .ret_type = RET_INTEGER,
1081 .arg1_type = ARG_PTR_TO_CTX,
1082};
1083
42a57120
JO
1084BPF_CALL_1(bpf_get_func_ip_kprobe_multi, struct pt_regs *, regs)
1085{
f7098690 1086 return bpf_kprobe_multi_entry_ip(current->bpf_ctx);
42a57120
JO
1087}
1088
1089static const struct bpf_func_proto bpf_get_func_ip_proto_kprobe_multi = {
1090 .func = bpf_get_func_ip_kprobe_multi,
1091 .gpl_only = false,
1092 .ret_type = RET_INTEGER,
1093 .arg1_type = ARG_PTR_TO_CTX,
1094};
1095
ca74823c
JO
1096BPF_CALL_1(bpf_get_attach_cookie_kprobe_multi, struct pt_regs *, regs)
1097{
f7098690 1098 return bpf_kprobe_multi_cookie(current->bpf_ctx);
ca74823c
JO
1099}
1100
1101static const struct bpf_func_proto bpf_get_attach_cookie_proto_kmulti = {
1102 .func = bpf_get_attach_cookie_kprobe_multi,
1103 .gpl_only = false,
1104 .ret_type = RET_INTEGER,
1105 .arg1_type = ARG_PTR_TO_CTX,
1106};
1107
7adfc6c9
AN
1108BPF_CALL_1(bpf_get_attach_cookie_trace, void *, ctx)
1109{
1110 struct bpf_trace_run_ctx *run_ctx;
1111
1112 run_ctx = container_of(current->bpf_ctx, struct bpf_trace_run_ctx, run_ctx);
1113 return run_ctx->bpf_cookie;
1114}
1115
1116static const struct bpf_func_proto bpf_get_attach_cookie_proto_trace = {
1117 .func = bpf_get_attach_cookie_trace,
1118 .gpl_only = false,
1119 .ret_type = RET_INTEGER,
1120 .arg1_type = ARG_PTR_TO_CTX,
1121};
1122
1123BPF_CALL_1(bpf_get_attach_cookie_pe, struct bpf_perf_event_data_kern *, ctx)
1124{
1125 return ctx->event->bpf_cookie;
1126}
1127
1128static const struct bpf_func_proto bpf_get_attach_cookie_proto_pe = {
1129 .func = bpf_get_attach_cookie_pe,
1130 .gpl_only = false,
1131 .ret_type = RET_INTEGER,
1132 .arg1_type = ARG_PTR_TO_CTX,
1133};
1134
2fcc8241
KFL
1135BPF_CALL_1(bpf_get_attach_cookie_tracing, void *, ctx)
1136{
1137 struct bpf_trace_run_ctx *run_ctx;
1138
1139 run_ctx = container_of(current->bpf_ctx, struct bpf_trace_run_ctx, run_ctx);
1140 return run_ctx->bpf_cookie;
1141}
1142
1143static const struct bpf_func_proto bpf_get_attach_cookie_proto_tracing = {
1144 .func = bpf_get_attach_cookie_tracing,
1145 .gpl_only = false,
1146 .ret_type = RET_INTEGER,
1147 .arg1_type = ARG_PTR_TO_CTX,
1148};
1149
856c02db
SL
1150BPF_CALL_3(bpf_get_branch_snapshot, void *, buf, u32, size, u64, flags)
1151{
1152#ifndef CONFIG_X86
1153 return -ENOENT;
1154#else
1155 static const u32 br_entry_size = sizeof(struct perf_branch_entry);
1156 u32 entry_cnt = size / br_entry_size;
1157
1158 entry_cnt = static_call(perf_snapshot_branch_stack)(buf, entry_cnt);
1159
1160 if (unlikely(flags))
1161 return -EINVAL;
1162
1163 if (!entry_cnt)
1164 return -ENOENT;
1165
1166 return entry_cnt * br_entry_size;
1167#endif
1168}
1169
1170static const struct bpf_func_proto bpf_get_branch_snapshot_proto = {
1171 .func = bpf_get_branch_snapshot,
1172 .gpl_only = true,
1173 .ret_type = RET_INTEGER,
1174 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
1175 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
1176};
1177
f92c1e18
JO
1178BPF_CALL_3(get_func_arg, void *, ctx, u32, n, u64 *, value)
1179{
1180 /* This helper call is inlined by verifier. */
1181 u64 nr_args = ((u64 *)ctx)[-1];
1182
1183 if ((u64) n >= nr_args)
1184 return -EINVAL;
1185 *value = ((u64 *)ctx)[n];
1186 return 0;
1187}
1188
1189static const struct bpf_func_proto bpf_get_func_arg_proto = {
1190 .func = get_func_arg,
1191 .ret_type = RET_INTEGER,
1192 .arg1_type = ARG_PTR_TO_CTX,
1193 .arg2_type = ARG_ANYTHING,
1194 .arg3_type = ARG_PTR_TO_LONG,
1195};
1196
1197BPF_CALL_2(get_func_ret, void *, ctx, u64 *, value)
1198{
1199 /* This helper call is inlined by verifier. */
1200 u64 nr_args = ((u64 *)ctx)[-1];
1201
1202 *value = ((u64 *)ctx)[nr_args];
1203 return 0;
1204}
1205
1206static const struct bpf_func_proto bpf_get_func_ret_proto = {
1207 .func = get_func_ret,
1208 .ret_type = RET_INTEGER,
1209 .arg1_type = ARG_PTR_TO_CTX,
1210 .arg2_type = ARG_PTR_TO_LONG,
1211};
1212
1213BPF_CALL_1(get_func_arg_cnt, void *, ctx)
1214{
1215 /* This helper call is inlined by verifier. */
1216 return ((u64 *)ctx)[-1];
1217}
1218
1219static const struct bpf_func_proto bpf_get_func_arg_cnt_proto = {
1220 .func = get_func_arg_cnt,
1221 .ret_type = RET_INTEGER,
1222 .arg1_type = ARG_PTR_TO_CTX,
1223};
1224
f3cf4134
RS
1225#ifdef CONFIG_KEYS
1226__diag_push();
1227__diag_ignore_all("-Wmissing-prototypes",
1228 "kfuncs which will be used in BPF programs");
1229
1230/**
1231 * bpf_lookup_user_key - lookup a key by its serial
1232 * @serial: key handle serial number
1233 * @flags: lookup-specific flags
1234 *
1235 * Search a key with a given *serial* and the provided *flags*.
1236 * If found, increment the reference count of the key by one, and
1237 * return it in the bpf_key structure.
1238 *
1239 * The bpf_key structure must be passed to bpf_key_put() when done
1240 * with it, so that the key reference count is decremented and the
1241 * bpf_key structure is freed.
1242 *
1243 * Permission checks are deferred to the time the key is used by
1244 * one of the available key-specific kfuncs.
1245 *
1246 * Set *flags* with KEY_LOOKUP_CREATE, to attempt creating a requested
1247 * special keyring (e.g. session keyring), if it doesn't yet exist.
1248 * Set *flags* with KEY_LOOKUP_PARTIAL, to lookup a key without waiting
1249 * for the key construction, and to retrieve uninstantiated keys (keys
1250 * without data attached to them).
1251 *
1252 * Return: a bpf_key pointer with a valid key pointer if the key is found, a
1253 * NULL pointer otherwise.
1254 */
400031e0 1255__bpf_kfunc struct bpf_key *bpf_lookup_user_key(u32 serial, u64 flags)
f3cf4134
RS
1256{
1257 key_ref_t key_ref;
1258 struct bpf_key *bkey;
1259
1260 if (flags & ~KEY_LOOKUP_ALL)
1261 return NULL;
1262
1263 /*
1264 * Permission check is deferred until the key is used, as the
1265 * intent of the caller is unknown here.
1266 */
1267 key_ref = lookup_user_key(serial, flags, KEY_DEFER_PERM_CHECK);
1268 if (IS_ERR(key_ref))
1269 return NULL;
1270
1271 bkey = kmalloc(sizeof(*bkey), GFP_KERNEL);
1272 if (!bkey) {
1273 key_put(key_ref_to_ptr(key_ref));
1274 return NULL;
1275 }
1276
1277 bkey->key = key_ref_to_ptr(key_ref);
1278 bkey->has_ref = true;
1279
1280 return bkey;
1281}
1282
1283/**
1284 * bpf_lookup_system_key - lookup a key by a system-defined ID
1285 * @id: key ID
1286 *
1287 * Obtain a bpf_key structure with a key pointer set to the passed key ID.
1288 * The key pointer is marked as invalid, to prevent bpf_key_put() from
1289 * attempting to decrement the key reference count on that pointer. The key
1290 * pointer set in such way is currently understood only by
1291 * verify_pkcs7_signature().
1292 *
1293 * Set *id* to one of the values defined in include/linux/verification.h:
1294 * 0 for the primary keyring (immutable keyring of system keys);
1295 * VERIFY_USE_SECONDARY_KEYRING for both the primary and secondary keyring
1296 * (where keys can be added only if they are vouched for by existing keys
1297 * in those keyrings); VERIFY_USE_PLATFORM_KEYRING for the platform
1298 * keyring (primarily used by the integrity subsystem to verify a kexec'ed
1299 * kerned image and, possibly, the initramfs signature).
1300 *
1301 * Return: a bpf_key pointer with an invalid key pointer set from the
1302 * pre-determined ID on success, a NULL pointer otherwise
1303 */
400031e0 1304__bpf_kfunc struct bpf_key *bpf_lookup_system_key(u64 id)
f3cf4134
RS
1305{
1306 struct bpf_key *bkey;
1307
1308 if (system_keyring_id_check(id) < 0)
1309 return NULL;
1310
1311 bkey = kmalloc(sizeof(*bkey), GFP_ATOMIC);
1312 if (!bkey)
1313 return NULL;
1314
1315 bkey->key = (struct key *)(unsigned long)id;
1316 bkey->has_ref = false;
1317
1318 return bkey;
1319}
1320
1321/**
1322 * bpf_key_put - decrement key reference count if key is valid and free bpf_key
1323 * @bkey: bpf_key structure
1324 *
1325 * Decrement the reference count of the key inside *bkey*, if the pointer
1326 * is valid, and free *bkey*.
1327 */
400031e0 1328__bpf_kfunc void bpf_key_put(struct bpf_key *bkey)
f3cf4134
RS
1329{
1330 if (bkey->has_ref)
1331 key_put(bkey->key);
1332
1333 kfree(bkey);
1334}
1335
865b0566
RS
1336#ifdef CONFIG_SYSTEM_DATA_VERIFICATION
1337/**
1338 * bpf_verify_pkcs7_signature - verify a PKCS#7 signature
1339 * @data_ptr: data to verify
1340 * @sig_ptr: signature of the data
1341 * @trusted_keyring: keyring with keys trusted for signature verification
1342 *
1343 * Verify the PKCS#7 signature *sig_ptr* against the supplied *data_ptr*
1344 * with keys in a keyring referenced by *trusted_keyring*.
1345 *
1346 * Return: 0 on success, a negative value on error.
1347 */
400031e0 1348__bpf_kfunc int bpf_verify_pkcs7_signature(struct bpf_dynptr_kern *data_ptr,
865b0566
RS
1349 struct bpf_dynptr_kern *sig_ptr,
1350 struct bpf_key *trusted_keyring)
1351{
1352 int ret;
1353
1354 if (trusted_keyring->has_ref) {
1355 /*
1356 * Do the permission check deferred in bpf_lookup_user_key().
1357 * See bpf_lookup_user_key() for more details.
1358 *
1359 * A call to key_task_permission() here would be redundant, as
1360 * it is already done by keyring_search() called by
1361 * find_asymmetric_key().
1362 */
1363 ret = key_validate(trusted_keyring->key);
1364 if (ret < 0)
1365 return ret;
1366 }
1367
1368 return verify_pkcs7_signature(data_ptr->data,
26662d73 1369 __bpf_dynptr_size(data_ptr),
865b0566 1370 sig_ptr->data,
26662d73 1371 __bpf_dynptr_size(sig_ptr),
865b0566
RS
1372 trusted_keyring->key,
1373 VERIFYING_UNSPECIFIED_SIGNATURE, NULL,
1374 NULL);
1375}
1376#endif /* CONFIG_SYSTEM_DATA_VERIFICATION */
1377
f3cf4134
RS
1378__diag_pop();
1379
1380BTF_SET8_START(key_sig_kfunc_set)
1381BTF_ID_FLAGS(func, bpf_lookup_user_key, KF_ACQUIRE | KF_RET_NULL | KF_SLEEPABLE)
1382BTF_ID_FLAGS(func, bpf_lookup_system_key, KF_ACQUIRE | KF_RET_NULL)
1383BTF_ID_FLAGS(func, bpf_key_put, KF_RELEASE)
865b0566
RS
1384#ifdef CONFIG_SYSTEM_DATA_VERIFICATION
1385BTF_ID_FLAGS(func, bpf_verify_pkcs7_signature, KF_SLEEPABLE)
1386#endif
f3cf4134
RS
1387BTF_SET8_END(key_sig_kfunc_set)
1388
1389static const struct btf_kfunc_id_set bpf_key_sig_kfunc_set = {
1390 .owner = THIS_MODULE,
1391 .set = &key_sig_kfunc_set,
1392};
1393
1394static int __init bpf_key_sig_kfuncs_init(void)
1395{
1396 return register_btf_kfunc_id_set(BPF_PROG_TYPE_TRACING,
1397 &bpf_key_sig_kfunc_set);
1398}
1399
1400late_initcall(bpf_key_sig_kfuncs_init);
1401#endif /* CONFIG_KEYS */
1402
7adfc6c9 1403static const struct bpf_func_proto *
fc611f47 1404bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
2541517c
AS
1405{
1406 switch (func_id) {
1407 case BPF_FUNC_map_lookup_elem:
1408 return &bpf_map_lookup_elem_proto;
1409 case BPF_FUNC_map_update_elem:
1410 return &bpf_map_update_elem_proto;
1411 case BPF_FUNC_map_delete_elem:
1412 return &bpf_map_delete_elem_proto;
02a8c817
AC
1413 case BPF_FUNC_map_push_elem:
1414 return &bpf_map_push_elem_proto;
1415 case BPF_FUNC_map_pop_elem:
1416 return &bpf_map_pop_elem_proto;
1417 case BPF_FUNC_map_peek_elem:
1418 return &bpf_map_peek_elem_proto;
07343110
FZ
1419 case BPF_FUNC_map_lookup_percpu_elem:
1420 return &bpf_map_lookup_percpu_elem_proto;
d9847d31
AS
1421 case BPF_FUNC_ktime_get_ns:
1422 return &bpf_ktime_get_ns_proto;
71d19214
MÅ»
1423 case BPF_FUNC_ktime_get_boot_ns:
1424 return &bpf_ktime_get_boot_ns_proto;
04fd61ab
AS
1425 case BPF_FUNC_tail_call:
1426 return &bpf_tail_call_proto;
ffeedafb
AS
1427 case BPF_FUNC_get_current_pid_tgid:
1428 return &bpf_get_current_pid_tgid_proto;
606274c5
AS
1429 case BPF_FUNC_get_current_task:
1430 return &bpf_get_current_task_proto;
3ca1032a
KS
1431 case BPF_FUNC_get_current_task_btf:
1432 return &bpf_get_current_task_btf_proto;
dd6e10fb
DX
1433 case BPF_FUNC_task_pt_regs:
1434 return &bpf_task_pt_regs_proto;
ffeedafb
AS
1435 case BPF_FUNC_get_current_uid_gid:
1436 return &bpf_get_current_uid_gid_proto;
1437 case BPF_FUNC_get_current_comm:
1438 return &bpf_get_current_comm_proto;
9c959c86 1439 case BPF_FUNC_trace_printk:
0756ea3e 1440 return bpf_get_trace_printk_proto();
ab1973d3
AS
1441 case BPF_FUNC_get_smp_processor_id:
1442 return &bpf_get_smp_processor_id_proto;
2d0e30c3
DB
1443 case BPF_FUNC_get_numa_node_id:
1444 return &bpf_get_numa_node_id_proto;
35578d79
KX
1445 case BPF_FUNC_perf_event_read:
1446 return &bpf_perf_event_read_proto;
60d20f91
SD
1447 case BPF_FUNC_current_task_under_cgroup:
1448 return &bpf_current_task_under_cgroup_proto;
8937bd80
AS
1449 case BPF_FUNC_get_prandom_u32:
1450 return &bpf_get_prandom_u32_proto;
51e1bb9e
DB
1451 case BPF_FUNC_probe_write_user:
1452 return security_locked_down(LOCKDOWN_BPF_WRITE_USER) < 0 ?
1453 NULL : bpf_get_probe_write_proto();
6ae08ae3
DB
1454 case BPF_FUNC_probe_read_user:
1455 return &bpf_probe_read_user_proto;
1456 case BPF_FUNC_probe_read_kernel:
71330842 1457 return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ?
ff40e510 1458 NULL : &bpf_probe_read_kernel_proto;
6ae08ae3
DB
1459 case BPF_FUNC_probe_read_user_str:
1460 return &bpf_probe_read_user_str_proto;
1461 case BPF_FUNC_probe_read_kernel_str:
71330842 1462 return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ?
ff40e510 1463 NULL : &bpf_probe_read_kernel_str_proto;
0ebeea8c
DB
1464#ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
1465 case BPF_FUNC_probe_read:
71330842 1466 return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ?
ff40e510 1467 NULL : &bpf_probe_read_compat_proto;
a5e8c070 1468 case BPF_FUNC_probe_read_str:
71330842 1469 return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ?
ff40e510 1470 NULL : &bpf_probe_read_compat_str_proto;
0ebeea8c 1471#endif
34ea38ca 1472#ifdef CONFIG_CGROUPS
c4bcfb38
YS
1473 case BPF_FUNC_cgrp_storage_get:
1474 return &bpf_cgrp_storage_get_proto;
1475 case BPF_FUNC_cgrp_storage_delete:
1476 return &bpf_cgrp_storage_delete_proto;
34ea38ca 1477#endif
8b401f9e
YS
1478 case BPF_FUNC_send_signal:
1479 return &bpf_send_signal_proto;
8482941f
YS
1480 case BPF_FUNC_send_signal_thread:
1481 return &bpf_send_signal_thread_proto;
b80b033b
SL
1482 case BPF_FUNC_perf_event_read_value:
1483 return &bpf_perf_event_read_value_proto;
b4490c5c
CN
1484 case BPF_FUNC_get_ns_current_pid_tgid:
1485 return &bpf_get_ns_current_pid_tgid_proto;
457f4436
AN
1486 case BPF_FUNC_ringbuf_output:
1487 return &bpf_ringbuf_output_proto;
1488 case BPF_FUNC_ringbuf_reserve:
1489 return &bpf_ringbuf_reserve_proto;
1490 case BPF_FUNC_ringbuf_submit:
1491 return &bpf_ringbuf_submit_proto;
1492 case BPF_FUNC_ringbuf_discard:
1493 return &bpf_ringbuf_discard_proto;
1494 case BPF_FUNC_ringbuf_query:
1495 return &bpf_ringbuf_query_proto;
72e2b2b6
YS
1496 case BPF_FUNC_jiffies64:
1497 return &bpf_jiffies64_proto;
fa28dcb8
SL
1498 case BPF_FUNC_get_task_stack:
1499 return &bpf_get_task_stack_proto;
07be4c4a 1500 case BPF_FUNC_copy_from_user:
01685c5b 1501 return &bpf_copy_from_user_proto;
376040e4 1502 case BPF_FUNC_copy_from_user_task:
01685c5b 1503 return &bpf_copy_from_user_task_proto;
c4d0bfb4
AM
1504 case BPF_FUNC_snprintf_btf:
1505 return &bpf_snprintf_btf_proto;
b7906b70 1506 case BPF_FUNC_per_cpu_ptr:
eaa6bcb7 1507 return &bpf_per_cpu_ptr_proto;
b7906b70 1508 case BPF_FUNC_this_cpu_ptr:
63d9b80d 1509 return &bpf_this_cpu_ptr_proto;
a10787e6 1510 case BPF_FUNC_task_storage_get:
4279adb0
MKL
1511 if (bpf_prog_check_recur(prog))
1512 return &bpf_task_storage_get_recur_proto;
a10787e6
SL
1513 return &bpf_task_storage_get_proto;
1514 case BPF_FUNC_task_storage_delete:
8a7dac37
MKL
1515 if (bpf_prog_check_recur(prog))
1516 return &bpf_task_storage_delete_recur_proto;
a10787e6 1517 return &bpf_task_storage_delete_proto;
69c087ba
YS
1518 case BPF_FUNC_for_each_map_elem:
1519 return &bpf_for_each_map_elem_proto;
7b15523a
FR
1520 case BPF_FUNC_snprintf:
1521 return &bpf_snprintf_proto;
9b99edca
JO
1522 case BPF_FUNC_get_func_ip:
1523 return &bpf_get_func_ip_proto_tracing;
856c02db
SL
1524 case BPF_FUNC_get_branch_snapshot:
1525 return &bpf_get_branch_snapshot_proto;
7c7e3d31
SL
1526 case BPF_FUNC_find_vma:
1527 return &bpf_find_vma_proto;
10aceb62
DM
1528 case BPF_FUNC_trace_vprintk:
1529 return bpf_get_trace_vprintk_proto();
9fd82b61 1530 default:
b00628b1 1531 return bpf_base_func_proto(func_id);
9fd82b61
AS
1532 }
1533}
1534
5e43f899
AI
1535static const struct bpf_func_proto *
1536kprobe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
9fd82b61
AS
1537{
1538 switch (func_id) {
a43eec30
AS
1539 case BPF_FUNC_perf_event_output:
1540 return &bpf_perf_event_output_proto;
d5a3b1f6
AS
1541 case BPF_FUNC_get_stackid:
1542 return &bpf_get_stackid_proto;
c195651e
YS
1543 case BPF_FUNC_get_stack:
1544 return &bpf_get_stack_proto;
9802d865
JB
1545#ifdef CONFIG_BPF_KPROBE_OVERRIDE
1546 case BPF_FUNC_override_return:
1547 return &bpf_override_return_proto;
1548#endif
9ffd9f3f 1549 case BPF_FUNC_get_func_ip:
42a57120
JO
1550 return prog->expected_attach_type == BPF_TRACE_KPROBE_MULTI ?
1551 &bpf_get_func_ip_proto_kprobe_multi :
1552 &bpf_get_func_ip_proto_kprobe;
7adfc6c9 1553 case BPF_FUNC_get_attach_cookie:
ca74823c
JO
1554 return prog->expected_attach_type == BPF_TRACE_KPROBE_MULTI ?
1555 &bpf_get_attach_cookie_proto_kmulti :
1556 &bpf_get_attach_cookie_proto_trace;
2541517c 1557 default:
fc611f47 1558 return bpf_tracing_func_proto(func_id, prog);
2541517c
AS
1559 }
1560}
1561
1562/* bpf+kprobe programs can access fields of 'struct pt_regs' */
19de99f7 1563static bool kprobe_prog_is_valid_access(int off, int size, enum bpf_access_type type,
5e43f899 1564 const struct bpf_prog *prog,
23994631 1565 struct bpf_insn_access_aux *info)
2541517c 1566{
2541517c
AS
1567 if (off < 0 || off >= sizeof(struct pt_regs))
1568 return false;
2541517c
AS
1569 if (type != BPF_READ)
1570 return false;
2541517c
AS
1571 if (off % size != 0)
1572 return false;
2d071c64
DB
1573 /*
1574 * Assertion for 32 bit to make sure last 8 byte access
1575 * (BPF_DW) to the last 4 byte member is disallowed.
1576 */
1577 if (off + size > sizeof(struct pt_regs))
1578 return false;
1579
2541517c
AS
1580 return true;
1581}
1582
7de16e3a 1583const struct bpf_verifier_ops kprobe_verifier_ops = {
2541517c
AS
1584 .get_func_proto = kprobe_prog_func_proto,
1585 .is_valid_access = kprobe_prog_is_valid_access,
1586};
1587
7de16e3a
JK
1588const struct bpf_prog_ops kprobe_prog_ops = {
1589};
1590
f3694e00
DB
1591BPF_CALL_5(bpf_perf_event_output_tp, void *, tp_buff, struct bpf_map *, map,
1592 u64, flags, void *, data, u64, size)
9940d67c 1593{
f3694e00
DB
1594 struct pt_regs *regs = *(struct pt_regs **)tp_buff;
1595
9940d67c
AS
1596 /*
1597 * r1 points to perf tracepoint buffer where first 8 bytes are hidden
1598 * from bpf program and contain a pointer to 'struct pt_regs'. Fetch it
f3694e00 1599 * from there and call the same bpf_perf_event_output() helper inline.
9940d67c 1600 */
f3694e00 1601 return ____bpf_perf_event_output(regs, map, flags, data, size);
9940d67c
AS
1602}
1603
1604static const struct bpf_func_proto bpf_perf_event_output_proto_tp = {
1605 .func = bpf_perf_event_output_tp,
1606 .gpl_only = true,
1607 .ret_type = RET_INTEGER,
1608 .arg1_type = ARG_PTR_TO_CTX,
1609 .arg2_type = ARG_CONST_MAP_PTR,
1610 .arg3_type = ARG_ANYTHING,
216e3cd2 1611 .arg4_type = ARG_PTR_TO_MEM | MEM_RDONLY,
a60dd35d 1612 .arg5_type = ARG_CONST_SIZE_OR_ZERO,
9940d67c
AS
1613};
1614
f3694e00
DB
1615BPF_CALL_3(bpf_get_stackid_tp, void *, tp_buff, struct bpf_map *, map,
1616 u64, flags)
9940d67c 1617{
f3694e00 1618 struct pt_regs *regs = *(struct pt_regs **)tp_buff;
9940d67c 1619
f3694e00
DB
1620 /*
1621 * Same comment as in bpf_perf_event_output_tp(), only that this time
1622 * the other helper's function body cannot be inlined due to being
1623 * external, thus we need to call raw helper function.
1624 */
1625 return bpf_get_stackid((unsigned long) regs, (unsigned long) map,
1626 flags, 0, 0);
9940d67c
AS
1627}
1628
1629static const struct bpf_func_proto bpf_get_stackid_proto_tp = {
1630 .func = bpf_get_stackid_tp,
1631 .gpl_only = true,
1632 .ret_type = RET_INTEGER,
1633 .arg1_type = ARG_PTR_TO_CTX,
1634 .arg2_type = ARG_CONST_MAP_PTR,
1635 .arg3_type = ARG_ANYTHING,
1636};
1637
c195651e
YS
1638BPF_CALL_4(bpf_get_stack_tp, void *, tp_buff, void *, buf, u32, size,
1639 u64, flags)
1640{
1641 struct pt_regs *regs = *(struct pt_regs **)tp_buff;
1642
1643 return bpf_get_stack((unsigned long) regs, (unsigned long) buf,
1644 (unsigned long) size, flags, 0);
1645}
1646
1647static const struct bpf_func_proto bpf_get_stack_proto_tp = {
1648 .func = bpf_get_stack_tp,
1649 .gpl_only = true,
1650 .ret_type = RET_INTEGER,
1651 .arg1_type = ARG_PTR_TO_CTX,
1652 .arg2_type = ARG_PTR_TO_UNINIT_MEM,
1653 .arg3_type = ARG_CONST_SIZE_OR_ZERO,
1654 .arg4_type = ARG_ANYTHING,
1655};
1656
5e43f899
AI
1657static const struct bpf_func_proto *
1658tp_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
f005afed
YS
1659{
1660 switch (func_id) {
1661 case BPF_FUNC_perf_event_output:
1662 return &bpf_perf_event_output_proto_tp;
1663 case BPF_FUNC_get_stackid:
1664 return &bpf_get_stackid_proto_tp;
c195651e
YS
1665 case BPF_FUNC_get_stack:
1666 return &bpf_get_stack_proto_tp;
7adfc6c9
AN
1667 case BPF_FUNC_get_attach_cookie:
1668 return &bpf_get_attach_cookie_proto_trace;
f005afed 1669 default:
fc611f47 1670 return bpf_tracing_func_proto(func_id, prog);
f005afed
YS
1671 }
1672}
1673
1674static bool tp_prog_is_valid_access(int off, int size, enum bpf_access_type type,
5e43f899 1675 const struct bpf_prog *prog,
f005afed
YS
1676 struct bpf_insn_access_aux *info)
1677{
1678 if (off < sizeof(void *) || off >= PERF_MAX_TRACE_SIZE)
1679 return false;
1680 if (type != BPF_READ)
1681 return false;
1682 if (off % size != 0)
1683 return false;
1684
1685 BUILD_BUG_ON(PERF_MAX_TRACE_SIZE % sizeof(__u64));
1686 return true;
1687}
1688
1689const struct bpf_verifier_ops tracepoint_verifier_ops = {
1690 .get_func_proto = tp_prog_func_proto,
1691 .is_valid_access = tp_prog_is_valid_access,
1692};
1693
1694const struct bpf_prog_ops tracepoint_prog_ops = {
1695};
1696
1697BPF_CALL_3(bpf_perf_prog_read_value, struct bpf_perf_event_data_kern *, ctx,
4bebdc7a
YS
1698 struct bpf_perf_event_value *, buf, u32, size)
1699{
1700 int err = -EINVAL;
1701
1702 if (unlikely(size != sizeof(struct bpf_perf_event_value)))
1703 goto clear;
1704 err = perf_event_read_local(ctx->event, &buf->counter, &buf->enabled,
1705 &buf->running);
1706 if (unlikely(err))
1707 goto clear;
1708 return 0;
1709clear:
1710 memset(buf, 0, size);
1711 return err;
1712}
1713
f005afed
YS
1714static const struct bpf_func_proto bpf_perf_prog_read_value_proto = {
1715 .func = bpf_perf_prog_read_value,
4bebdc7a
YS
1716 .gpl_only = true,
1717 .ret_type = RET_INTEGER,
1718 .arg1_type = ARG_PTR_TO_CTX,
1719 .arg2_type = ARG_PTR_TO_UNINIT_MEM,
1720 .arg3_type = ARG_CONST_SIZE,
1721};
1722
fff7b643
DX
1723BPF_CALL_4(bpf_read_branch_records, struct bpf_perf_event_data_kern *, ctx,
1724 void *, buf, u32, size, u64, flags)
1725{
fff7b643
DX
1726 static const u32 br_entry_size = sizeof(struct perf_branch_entry);
1727 struct perf_branch_stack *br_stack = ctx->data->br_stack;
1728 u32 to_copy;
1729
1730 if (unlikely(flags & ~BPF_F_GET_BRANCH_RECORDS_SIZE))
1731 return -EINVAL;
1732
cce6a2d7
JO
1733 if (unlikely(!(ctx->data->sample_flags & PERF_SAMPLE_BRANCH_STACK)))
1734 return -ENOENT;
1735
fff7b643 1736 if (unlikely(!br_stack))
db52f572 1737 return -ENOENT;
fff7b643
DX
1738
1739 if (flags & BPF_F_GET_BRANCH_RECORDS_SIZE)
1740 return br_stack->nr * br_entry_size;
1741
1742 if (!buf || (size % br_entry_size != 0))
1743 return -EINVAL;
1744
1745 to_copy = min_t(u32, br_stack->nr * br_entry_size, size);
1746 memcpy(buf, br_stack->entries, to_copy);
1747
1748 return to_copy;
fff7b643
DX
1749}
1750
1751static const struct bpf_func_proto bpf_read_branch_records_proto = {
1752 .func = bpf_read_branch_records,
1753 .gpl_only = true,
1754 .ret_type = RET_INTEGER,
1755 .arg1_type = ARG_PTR_TO_CTX,
1756 .arg2_type = ARG_PTR_TO_MEM_OR_NULL,
1757 .arg3_type = ARG_CONST_SIZE_OR_ZERO,
1758 .arg4_type = ARG_ANYTHING,
1759};
1760
5e43f899
AI
1761static const struct bpf_func_proto *
1762pe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
9fd82b61
AS
1763{
1764 switch (func_id) {
1765 case BPF_FUNC_perf_event_output:
9940d67c 1766 return &bpf_perf_event_output_proto_tp;
9fd82b61 1767 case BPF_FUNC_get_stackid:
7b04d6d6 1768 return &bpf_get_stackid_proto_pe;
c195651e 1769 case BPF_FUNC_get_stack:
7b04d6d6 1770 return &bpf_get_stack_proto_pe;
4bebdc7a 1771 case BPF_FUNC_perf_prog_read_value:
f005afed 1772 return &bpf_perf_prog_read_value_proto;
fff7b643
DX
1773 case BPF_FUNC_read_branch_records:
1774 return &bpf_read_branch_records_proto;
7adfc6c9
AN
1775 case BPF_FUNC_get_attach_cookie:
1776 return &bpf_get_attach_cookie_proto_pe;
9fd82b61 1777 default:
fc611f47 1778 return bpf_tracing_func_proto(func_id, prog);
9fd82b61
AS
1779 }
1780}
1781
c4f6699d
AS
1782/*
1783 * bpf_raw_tp_regs are separate from bpf_pt_regs used from skb/xdp
1784 * to avoid potential recursive reuse issue when/if tracepoints are added
9594dc3c
MM
1785 * inside bpf_*_event_output, bpf_get_stackid and/or bpf_get_stack.
1786 *
1787 * Since raw tracepoints run despite bpf_prog_active, support concurrent usage
1788 * in normal, irq, and nmi context.
c4f6699d 1789 */
9594dc3c
MM
1790struct bpf_raw_tp_regs {
1791 struct pt_regs regs[3];
1792};
1793static DEFINE_PER_CPU(struct bpf_raw_tp_regs, bpf_raw_tp_regs);
1794static DEFINE_PER_CPU(int, bpf_raw_tp_nest_level);
1795static struct pt_regs *get_bpf_raw_tp_regs(void)
1796{
1797 struct bpf_raw_tp_regs *tp_regs = this_cpu_ptr(&bpf_raw_tp_regs);
1798 int nest_level = this_cpu_inc_return(bpf_raw_tp_nest_level);
1799
1800 if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(tp_regs->regs))) {
1801 this_cpu_dec(bpf_raw_tp_nest_level);
1802 return ERR_PTR(-EBUSY);
1803 }
1804
1805 return &tp_regs->regs[nest_level - 1];
1806}
1807
1808static void put_bpf_raw_tp_regs(void)
1809{
1810 this_cpu_dec(bpf_raw_tp_nest_level);
1811}
1812
c4f6699d
AS
1813BPF_CALL_5(bpf_perf_event_output_raw_tp, struct bpf_raw_tracepoint_args *, args,
1814 struct bpf_map *, map, u64, flags, void *, data, u64, size)
1815{
9594dc3c
MM
1816 struct pt_regs *regs = get_bpf_raw_tp_regs();
1817 int ret;
1818
1819 if (IS_ERR(regs))
1820 return PTR_ERR(regs);
c4f6699d
AS
1821
1822 perf_fetch_caller_regs(regs);
9594dc3c
MM
1823 ret = ____bpf_perf_event_output(regs, map, flags, data, size);
1824
1825 put_bpf_raw_tp_regs();
1826 return ret;
c4f6699d
AS
1827}
1828
1829static const struct bpf_func_proto bpf_perf_event_output_proto_raw_tp = {
1830 .func = bpf_perf_event_output_raw_tp,
1831 .gpl_only = true,
1832 .ret_type = RET_INTEGER,
1833 .arg1_type = ARG_PTR_TO_CTX,
1834 .arg2_type = ARG_CONST_MAP_PTR,
1835 .arg3_type = ARG_ANYTHING,
216e3cd2 1836 .arg4_type = ARG_PTR_TO_MEM | MEM_RDONLY,
c4f6699d
AS
1837 .arg5_type = ARG_CONST_SIZE_OR_ZERO,
1838};
1839
a7658e1a 1840extern const struct bpf_func_proto bpf_skb_output_proto;
d831ee84 1841extern const struct bpf_func_proto bpf_xdp_output_proto;
d9917302 1842extern const struct bpf_func_proto bpf_xdp_get_buff_len_trace_proto;
a7658e1a 1843
c4f6699d
AS
1844BPF_CALL_3(bpf_get_stackid_raw_tp, struct bpf_raw_tracepoint_args *, args,
1845 struct bpf_map *, map, u64, flags)
1846{
9594dc3c
MM
1847 struct pt_regs *regs = get_bpf_raw_tp_regs();
1848 int ret;
1849
1850 if (IS_ERR(regs))
1851 return PTR_ERR(regs);
c4f6699d
AS
1852
1853 perf_fetch_caller_regs(regs);
1854 /* similar to bpf_perf_event_output_tp, but pt_regs fetched differently */
9594dc3c
MM
1855 ret = bpf_get_stackid((unsigned long) regs, (unsigned long) map,
1856 flags, 0, 0);
1857 put_bpf_raw_tp_regs();
1858 return ret;
c4f6699d
AS
1859}
1860
1861static const struct bpf_func_proto bpf_get_stackid_proto_raw_tp = {
1862 .func = bpf_get_stackid_raw_tp,
1863 .gpl_only = true,
1864 .ret_type = RET_INTEGER,
1865 .arg1_type = ARG_PTR_TO_CTX,
1866 .arg2_type = ARG_CONST_MAP_PTR,
1867 .arg3_type = ARG_ANYTHING,
1868};
1869
c195651e
YS
1870BPF_CALL_4(bpf_get_stack_raw_tp, struct bpf_raw_tracepoint_args *, args,
1871 void *, buf, u32, size, u64, flags)
1872{
9594dc3c
MM
1873 struct pt_regs *regs = get_bpf_raw_tp_regs();
1874 int ret;
1875
1876 if (IS_ERR(regs))
1877 return PTR_ERR(regs);
c195651e
YS
1878
1879 perf_fetch_caller_regs(regs);
9594dc3c
MM
1880 ret = bpf_get_stack((unsigned long) regs, (unsigned long) buf,
1881 (unsigned long) size, flags, 0);
1882 put_bpf_raw_tp_regs();
1883 return ret;
c195651e
YS
1884}
1885
1886static const struct bpf_func_proto bpf_get_stack_proto_raw_tp = {
1887 .func = bpf_get_stack_raw_tp,
1888 .gpl_only = true,
1889 .ret_type = RET_INTEGER,
1890 .arg1_type = ARG_PTR_TO_CTX,
216e3cd2 1891 .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY,
c195651e
YS
1892 .arg3_type = ARG_CONST_SIZE_OR_ZERO,
1893 .arg4_type = ARG_ANYTHING,
1894};
1895
5e43f899
AI
1896static const struct bpf_func_proto *
1897raw_tp_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
c4f6699d
AS
1898{
1899 switch (func_id) {
1900 case BPF_FUNC_perf_event_output:
1901 return &bpf_perf_event_output_proto_raw_tp;
1902 case BPF_FUNC_get_stackid:
1903 return &bpf_get_stackid_proto_raw_tp;
c195651e
YS
1904 case BPF_FUNC_get_stack:
1905 return &bpf_get_stack_proto_raw_tp;
c4f6699d 1906 default:
fc611f47 1907 return bpf_tracing_func_proto(func_id, prog);
c4f6699d
AS
1908 }
1909}
1910
958a3f2d 1911const struct bpf_func_proto *
f1b9509c
AS
1912tracing_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1913{
3cee6fb8
MKL
1914 const struct bpf_func_proto *fn;
1915
f1b9509c
AS
1916 switch (func_id) {
1917#ifdef CONFIG_NET
1918 case BPF_FUNC_skb_output:
1919 return &bpf_skb_output_proto;
d831ee84
EC
1920 case BPF_FUNC_xdp_output:
1921 return &bpf_xdp_output_proto;
af7ec138
YS
1922 case BPF_FUNC_skc_to_tcp6_sock:
1923 return &bpf_skc_to_tcp6_sock_proto;
478cfbdf
YS
1924 case BPF_FUNC_skc_to_tcp_sock:
1925 return &bpf_skc_to_tcp_sock_proto;
1926 case BPF_FUNC_skc_to_tcp_timewait_sock:
1927 return &bpf_skc_to_tcp_timewait_sock_proto;
1928 case BPF_FUNC_skc_to_tcp_request_sock:
1929 return &bpf_skc_to_tcp_request_sock_proto;
0d4fad3e
YS
1930 case BPF_FUNC_skc_to_udp6_sock:
1931 return &bpf_skc_to_udp6_sock_proto;
9eeb3aa3
HC
1932 case BPF_FUNC_skc_to_unix_sock:
1933 return &bpf_skc_to_unix_sock_proto;
3bc253c2
GT
1934 case BPF_FUNC_skc_to_mptcp_sock:
1935 return &bpf_skc_to_mptcp_sock_proto;
8e4597c6
MKL
1936 case BPF_FUNC_sk_storage_get:
1937 return &bpf_sk_storage_get_tracing_proto;
1938 case BPF_FUNC_sk_storage_delete:
1939 return &bpf_sk_storage_delete_tracing_proto;
b60da495
FR
1940 case BPF_FUNC_sock_from_file:
1941 return &bpf_sock_from_file_proto;
c5dbb89f
FR
1942 case BPF_FUNC_get_socket_cookie:
1943 return &bpf_get_socket_ptr_cookie_proto;
d9917302
EC
1944 case BPF_FUNC_xdp_get_buff_len:
1945 return &bpf_xdp_get_buff_len_trace_proto;
f1b9509c 1946#endif
492e639f
YS
1947 case BPF_FUNC_seq_printf:
1948 return prog->expected_attach_type == BPF_TRACE_ITER ?
1949 &bpf_seq_printf_proto :
1950 NULL;
1951 case BPF_FUNC_seq_write:
1952 return prog->expected_attach_type == BPF_TRACE_ITER ?
1953 &bpf_seq_write_proto :
1954 NULL;
eb411377
AM
1955 case BPF_FUNC_seq_printf_btf:
1956 return prog->expected_attach_type == BPF_TRACE_ITER ?
1957 &bpf_seq_printf_btf_proto :
1958 NULL;
6e22ab9d
JO
1959 case BPF_FUNC_d_path:
1960 return &bpf_d_path_proto;
f92c1e18
JO
1961 case BPF_FUNC_get_func_arg:
1962 return bpf_prog_has_trampoline(prog) ? &bpf_get_func_arg_proto : NULL;
1963 case BPF_FUNC_get_func_ret:
1964 return bpf_prog_has_trampoline(prog) ? &bpf_get_func_ret_proto : NULL;
1965 case BPF_FUNC_get_func_arg_cnt:
1966 return bpf_prog_has_trampoline(prog) ? &bpf_get_func_arg_cnt_proto : NULL;
2fcc8241
KFL
1967 case BPF_FUNC_get_attach_cookie:
1968 return bpf_prog_has_trampoline(prog) ? &bpf_get_attach_cookie_proto_tracing : NULL;
f1b9509c 1969 default:
3cee6fb8
MKL
1970 fn = raw_tp_prog_func_proto(func_id, prog);
1971 if (!fn && prog->expected_attach_type == BPF_TRACE_ITER)
1972 fn = bpf_iter_get_func_proto(func_id, prog);
1973 return fn;
f1b9509c
AS
1974 }
1975}
1976
c4f6699d
AS
1977static bool raw_tp_prog_is_valid_access(int off, int size,
1978 enum bpf_access_type type,
5e43f899 1979 const struct bpf_prog *prog,
c4f6699d
AS
1980 struct bpf_insn_access_aux *info)
1981{
35346ab6 1982 return bpf_tracing_ctx_access(off, size, type);
f1b9509c
AS
1983}
1984
1985static bool tracing_prog_is_valid_access(int off, int size,
1986 enum bpf_access_type type,
1987 const struct bpf_prog *prog,
1988 struct bpf_insn_access_aux *info)
1989{
35346ab6 1990 return bpf_tracing_btf_ctx_access(off, size, type, prog, info);
c4f6699d
AS
1991}
1992
3e7c67d9
KS
1993int __weak bpf_prog_test_run_tracing(struct bpf_prog *prog,
1994 const union bpf_attr *kattr,
1995 union bpf_attr __user *uattr)
1996{
1997 return -ENOTSUPP;
1998}
1999
c4f6699d
AS
2000const struct bpf_verifier_ops raw_tracepoint_verifier_ops = {
2001 .get_func_proto = raw_tp_prog_func_proto,
2002 .is_valid_access = raw_tp_prog_is_valid_access,
2003};
2004
2005const struct bpf_prog_ops raw_tracepoint_prog_ops = {
ebfb4d40 2006#ifdef CONFIG_NET
1b4d60ec 2007 .test_run = bpf_prog_test_run_raw_tp,
ebfb4d40 2008#endif
c4f6699d
AS
2009};
2010
f1b9509c
AS
2011const struct bpf_verifier_ops tracing_verifier_ops = {
2012 .get_func_proto = tracing_prog_func_proto,
2013 .is_valid_access = tracing_prog_is_valid_access,
2014};
2015
2016const struct bpf_prog_ops tracing_prog_ops = {
da00d2f1 2017 .test_run = bpf_prog_test_run_tracing,
f1b9509c
AS
2018};
2019
9df1c28b
MM
2020static bool raw_tp_writable_prog_is_valid_access(int off, int size,
2021 enum bpf_access_type type,
2022 const struct bpf_prog *prog,
2023 struct bpf_insn_access_aux *info)
2024{
2025 if (off == 0) {
2026 if (size != sizeof(u64) || type != BPF_READ)
2027 return false;
2028 info->reg_type = PTR_TO_TP_BUFFER;
2029 }
2030 return raw_tp_prog_is_valid_access(off, size, type, prog, info);
2031}
2032
2033const struct bpf_verifier_ops raw_tracepoint_writable_verifier_ops = {
2034 .get_func_proto = raw_tp_prog_func_proto,
2035 .is_valid_access = raw_tp_writable_prog_is_valid_access,
2036};
2037
2038const struct bpf_prog_ops raw_tracepoint_writable_prog_ops = {
2039};
2040
0515e599 2041static bool pe_prog_is_valid_access(int off, int size, enum bpf_access_type type,
5e43f899 2042 const struct bpf_prog *prog,
23994631 2043 struct bpf_insn_access_aux *info)
0515e599 2044{
95da0cdb 2045 const int size_u64 = sizeof(u64);
31fd8581 2046
0515e599
AS
2047 if (off < 0 || off >= sizeof(struct bpf_perf_event_data))
2048 return false;
2049 if (type != BPF_READ)
2050 return false;
bc23105c
DB
2051 if (off % size != 0) {
2052 if (sizeof(unsigned long) != 4)
2053 return false;
2054 if (size != 8)
2055 return false;
2056 if (off % size != 4)
2057 return false;
2058 }
31fd8581 2059
f96da094
DB
2060 switch (off) {
2061 case bpf_ctx_range(struct bpf_perf_event_data, sample_period):
95da0cdb
TQ
2062 bpf_ctx_record_field_size(info, size_u64);
2063 if (!bpf_ctx_narrow_access_ok(off, size, size_u64))
2064 return false;
2065 break;
2066 case bpf_ctx_range(struct bpf_perf_event_data, addr):
2067 bpf_ctx_record_field_size(info, size_u64);
2068 if (!bpf_ctx_narrow_access_ok(off, size, size_u64))
23994631 2069 return false;
f96da094
DB
2070 break;
2071 default:
0515e599
AS
2072 if (size != sizeof(long))
2073 return false;
2074 }
f96da094 2075
0515e599
AS
2076 return true;
2077}
2078
6b8cc1d1
DB
2079static u32 pe_prog_convert_ctx_access(enum bpf_access_type type,
2080 const struct bpf_insn *si,
0515e599 2081 struct bpf_insn *insn_buf,
f96da094 2082 struct bpf_prog *prog, u32 *target_size)
0515e599
AS
2083{
2084 struct bpf_insn *insn = insn_buf;
2085
6b8cc1d1 2086 switch (si->off) {
0515e599 2087 case offsetof(struct bpf_perf_event_data, sample_period):
f035a515 2088 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern,
6b8cc1d1 2089 data), si->dst_reg, si->src_reg,
0515e599 2090 offsetof(struct bpf_perf_event_data_kern, data));
6b8cc1d1 2091 *insn++ = BPF_LDX_MEM(BPF_DW, si->dst_reg, si->dst_reg,
f96da094
DB
2092 bpf_target_off(struct perf_sample_data, period, 8,
2093 target_size));
0515e599 2094 break;
95da0cdb
TQ
2095 case offsetof(struct bpf_perf_event_data, addr):
2096 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern,
2097 data), si->dst_reg, si->src_reg,
2098 offsetof(struct bpf_perf_event_data_kern, data));
2099 *insn++ = BPF_LDX_MEM(BPF_DW, si->dst_reg, si->dst_reg,
2100 bpf_target_off(struct perf_sample_data, addr, 8,
2101 target_size));
2102 break;
0515e599 2103 default:
f035a515 2104 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern,
6b8cc1d1 2105 regs), si->dst_reg, si->src_reg,
0515e599 2106 offsetof(struct bpf_perf_event_data_kern, regs));
6b8cc1d1
DB
2107 *insn++ = BPF_LDX_MEM(BPF_SIZEOF(long), si->dst_reg, si->dst_reg,
2108 si->off);
0515e599
AS
2109 break;
2110 }
2111
2112 return insn - insn_buf;
2113}
2114
7de16e3a 2115const struct bpf_verifier_ops perf_event_verifier_ops = {
f005afed 2116 .get_func_proto = pe_prog_func_proto,
0515e599
AS
2117 .is_valid_access = pe_prog_is_valid_access,
2118 .convert_ctx_access = pe_prog_convert_ctx_access,
2119};
7de16e3a
JK
2120
2121const struct bpf_prog_ops perf_event_prog_ops = {
2122};
e87c6bc3
YS
2123
2124static DEFINE_MUTEX(bpf_event_mutex);
2125
c8c088ba
YS
2126#define BPF_TRACE_MAX_PROGS 64
2127
e87c6bc3 2128int perf_event_attach_bpf_prog(struct perf_event *event,
82e6b1ee
AN
2129 struct bpf_prog *prog,
2130 u64 bpf_cookie)
e87c6bc3 2131{
e672db03 2132 struct bpf_prog_array *old_array;
e87c6bc3
YS
2133 struct bpf_prog_array *new_array;
2134 int ret = -EEXIST;
2135
9802d865 2136 /*
b4da3340
MH
2137 * Kprobe override only works if they are on the function entry,
2138 * and only if they are on the opt-in list.
9802d865
JB
2139 */
2140 if (prog->kprobe_override &&
b4da3340 2141 (!trace_kprobe_on_func_entry(event->tp_event) ||
9802d865
JB
2142 !trace_kprobe_error_injectable(event->tp_event)))
2143 return -EINVAL;
2144
e87c6bc3
YS
2145 mutex_lock(&bpf_event_mutex);
2146
2147 if (event->prog)
07c41a29 2148 goto unlock;
e87c6bc3 2149
e672db03 2150 old_array = bpf_event_rcu_dereference(event->tp_event->prog_array);
c8c088ba
YS
2151 if (old_array &&
2152 bpf_prog_array_length(old_array) >= BPF_TRACE_MAX_PROGS) {
2153 ret = -E2BIG;
2154 goto unlock;
2155 }
2156
82e6b1ee 2157 ret = bpf_prog_array_copy(old_array, NULL, prog, bpf_cookie, &new_array);
e87c6bc3 2158 if (ret < 0)
07c41a29 2159 goto unlock;
e87c6bc3
YS
2160
2161 /* set the new array to event->tp_event and set event->prog */
2162 event->prog = prog;
82e6b1ee 2163 event->bpf_cookie = bpf_cookie;
e87c6bc3 2164 rcu_assign_pointer(event->tp_event->prog_array, new_array);
8c7dcb84 2165 bpf_prog_array_free_sleepable(old_array);
e87c6bc3 2166
07c41a29 2167unlock:
e87c6bc3
YS
2168 mutex_unlock(&bpf_event_mutex);
2169 return ret;
2170}
2171
2172void perf_event_detach_bpf_prog(struct perf_event *event)
2173{
e672db03 2174 struct bpf_prog_array *old_array;
e87c6bc3
YS
2175 struct bpf_prog_array *new_array;
2176 int ret;
2177
2178 mutex_lock(&bpf_event_mutex);
2179
2180 if (!event->prog)
07c41a29 2181 goto unlock;
e87c6bc3 2182
e672db03 2183 old_array = bpf_event_rcu_dereference(event->tp_event->prog_array);
82e6b1ee 2184 ret = bpf_prog_array_copy(old_array, event->prog, NULL, 0, &new_array);
170a7e3e
SY
2185 if (ret == -ENOENT)
2186 goto unlock;
e87c6bc3
YS
2187 if (ret < 0) {
2188 bpf_prog_array_delete_safe(old_array, event->prog);
2189 } else {
2190 rcu_assign_pointer(event->tp_event->prog_array, new_array);
8c7dcb84 2191 bpf_prog_array_free_sleepable(old_array);
e87c6bc3
YS
2192 }
2193
2194 bpf_prog_put(event->prog);
2195 event->prog = NULL;
2196
07c41a29 2197unlock:
e87c6bc3
YS
2198 mutex_unlock(&bpf_event_mutex);
2199}
f371b304 2200
f4e2298e 2201int perf_event_query_prog_array(struct perf_event *event, void __user *info)
f371b304
YS
2202{
2203 struct perf_event_query_bpf __user *uquery = info;
2204 struct perf_event_query_bpf query = {};
e672db03 2205 struct bpf_prog_array *progs;
3a38bb98 2206 u32 *ids, prog_cnt, ids_len;
f371b304
YS
2207 int ret;
2208
031258da 2209 if (!perfmon_capable())
f371b304
YS
2210 return -EPERM;
2211 if (event->attr.type != PERF_TYPE_TRACEPOINT)
2212 return -EINVAL;
2213 if (copy_from_user(&query, uquery, sizeof(query)))
2214 return -EFAULT;
3a38bb98
YS
2215
2216 ids_len = query.ids_len;
2217 if (ids_len > BPF_TRACE_MAX_PROGS)
9c481b90 2218 return -E2BIG;
3a38bb98
YS
2219 ids = kcalloc(ids_len, sizeof(u32), GFP_USER | __GFP_NOWARN);
2220 if (!ids)
2221 return -ENOMEM;
2222 /*
2223 * The above kcalloc returns ZERO_SIZE_PTR when ids_len = 0, which
2224 * is required when user only wants to check for uquery->prog_cnt.
2225 * There is no need to check for it since the case is handled
2226 * gracefully in bpf_prog_array_copy_info.
2227 */
f371b304
YS
2228
2229 mutex_lock(&bpf_event_mutex);
e672db03
SF
2230 progs = bpf_event_rcu_dereference(event->tp_event->prog_array);
2231 ret = bpf_prog_array_copy_info(progs, ids, ids_len, &prog_cnt);
f371b304
YS
2232 mutex_unlock(&bpf_event_mutex);
2233
3a38bb98
YS
2234 if (copy_to_user(&uquery->prog_cnt, &prog_cnt, sizeof(prog_cnt)) ||
2235 copy_to_user(uquery->ids, ids, ids_len * sizeof(u32)))
2236 ret = -EFAULT;
2237
2238 kfree(ids);
f371b304
YS
2239 return ret;
2240}
c4f6699d
AS
2241
2242extern struct bpf_raw_event_map __start__bpf_raw_tp[];
2243extern struct bpf_raw_event_map __stop__bpf_raw_tp[];
2244
a38d1107 2245struct bpf_raw_event_map *bpf_get_raw_tracepoint(const char *name)
c4f6699d
AS
2246{
2247 struct bpf_raw_event_map *btp = __start__bpf_raw_tp;
2248
2249 for (; btp < __stop__bpf_raw_tp; btp++) {
2250 if (!strcmp(btp->tp->name, name))
2251 return btp;
2252 }
a38d1107
MM
2253
2254 return bpf_get_raw_tracepoint_module(name);
2255}
2256
2257void bpf_put_raw_tracepoint(struct bpf_raw_event_map *btp)
2258{
12cc126d 2259 struct module *mod;
a38d1107 2260
12cc126d
AN
2261 preempt_disable();
2262 mod = __module_address((unsigned long)btp);
2263 module_put(mod);
2264 preempt_enable();
c4f6699d
AS
2265}
2266
2267static __always_inline
2268void __bpf_trace_run(struct bpf_prog *prog, u64 *args)
2269{
f03efe49 2270 cant_sleep();
05b24ff9
JO
2271 if (unlikely(this_cpu_inc_return(*(prog->active)) != 1)) {
2272 bpf_prog_inc_misses_counter(prog);
2273 goto out;
2274 }
c4f6699d 2275 rcu_read_lock();
fb7dd8bc 2276 (void) bpf_prog_run(prog, args);
c4f6699d 2277 rcu_read_unlock();
05b24ff9
JO
2278out:
2279 this_cpu_dec(*(prog->active));
c4f6699d
AS
2280}
2281
2282#define UNPACK(...) __VA_ARGS__
2283#define REPEAT_1(FN, DL, X, ...) FN(X)
2284#define REPEAT_2(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_1(FN, DL, __VA_ARGS__)
2285#define REPEAT_3(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_2(FN, DL, __VA_ARGS__)
2286#define REPEAT_4(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_3(FN, DL, __VA_ARGS__)
2287#define REPEAT_5(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_4(FN, DL, __VA_ARGS__)
2288#define REPEAT_6(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_5(FN, DL, __VA_ARGS__)
2289#define REPEAT_7(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_6(FN, DL, __VA_ARGS__)
2290#define REPEAT_8(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_7(FN, DL, __VA_ARGS__)
2291#define REPEAT_9(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_8(FN, DL, __VA_ARGS__)
2292#define REPEAT_10(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_9(FN, DL, __VA_ARGS__)
2293#define REPEAT_11(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_10(FN, DL, __VA_ARGS__)
2294#define REPEAT_12(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_11(FN, DL, __VA_ARGS__)
2295#define REPEAT(X, FN, DL, ...) REPEAT_##X(FN, DL, __VA_ARGS__)
2296
2297#define SARG(X) u64 arg##X
2298#define COPY(X) args[X] = arg##X
2299
2300#define __DL_COM (,)
2301#define __DL_SEM (;)
2302
2303#define __SEQ_0_11 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11
2304
2305#define BPF_TRACE_DEFN_x(x) \
2306 void bpf_trace_run##x(struct bpf_prog *prog, \
2307 REPEAT(x, SARG, __DL_COM, __SEQ_0_11)) \
2308 { \
2309 u64 args[x]; \
2310 REPEAT(x, COPY, __DL_SEM, __SEQ_0_11); \
2311 __bpf_trace_run(prog, args); \
2312 } \
2313 EXPORT_SYMBOL_GPL(bpf_trace_run##x)
2314BPF_TRACE_DEFN_x(1);
2315BPF_TRACE_DEFN_x(2);
2316BPF_TRACE_DEFN_x(3);
2317BPF_TRACE_DEFN_x(4);
2318BPF_TRACE_DEFN_x(5);
2319BPF_TRACE_DEFN_x(6);
2320BPF_TRACE_DEFN_x(7);
2321BPF_TRACE_DEFN_x(8);
2322BPF_TRACE_DEFN_x(9);
2323BPF_TRACE_DEFN_x(10);
2324BPF_TRACE_DEFN_x(11);
2325BPF_TRACE_DEFN_x(12);
2326
2327static int __bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog)
2328{
2329 struct tracepoint *tp = btp->tp;
2330
2331 /*
2332 * check that program doesn't access arguments beyond what's
2333 * available in this tracepoint
2334 */
2335 if (prog->aux->max_ctx_offset > btp->num_args * sizeof(u64))
2336 return -EINVAL;
2337
9df1c28b
MM
2338 if (prog->aux->max_tp_access > btp->writable_size)
2339 return -EINVAL;
2340
9913d574
SRV
2341 return tracepoint_probe_register_may_exist(tp, (void *)btp->bpf_func,
2342 prog);
c4f6699d
AS
2343}
2344
2345int bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog)
2346{
e16ec340 2347 return __bpf_probe_register(btp, prog);
c4f6699d
AS
2348}
2349
2350int bpf_probe_unregister(struct bpf_raw_event_map *btp, struct bpf_prog *prog)
2351{
e16ec340 2352 return tracepoint_probe_unregister(btp->tp, (void *)btp->bpf_func, prog);
c4f6699d 2353}
41bdc4b4
YS
2354
2355int bpf_get_perf_event_info(const struct perf_event *event, u32 *prog_id,
2356 u32 *fd_type, const char **buf,
2357 u64 *probe_offset, u64 *probe_addr)
2358{
2359 bool is_tracepoint, is_syscall_tp;
2360 struct bpf_prog *prog;
2361 int flags, err = 0;
2362
2363 prog = event->prog;
2364 if (!prog)
2365 return -ENOENT;
2366
2367 /* not supporting BPF_PROG_TYPE_PERF_EVENT yet */
2368 if (prog->type == BPF_PROG_TYPE_PERF_EVENT)
2369 return -EOPNOTSUPP;
2370
2371 *prog_id = prog->aux->id;
2372 flags = event->tp_event->flags;
2373 is_tracepoint = flags & TRACE_EVENT_FL_TRACEPOINT;
2374 is_syscall_tp = is_syscall_trace_event(event->tp_event);
2375
2376 if (is_tracepoint || is_syscall_tp) {
2377 *buf = is_tracepoint ? event->tp_event->tp->name
2378 : event->tp_event->name;
2379 *fd_type = BPF_FD_TYPE_TRACEPOINT;
2380 *probe_offset = 0x0;
2381 *probe_addr = 0x0;
2382 } else {
2383 /* kprobe/uprobe */
2384 err = -EOPNOTSUPP;
2385#ifdef CONFIG_KPROBE_EVENTS
2386 if (flags & TRACE_EVENT_FL_KPROBE)
2387 err = bpf_get_kprobe_info(event, fd_type, buf,
2388 probe_offset, probe_addr,
2389 event->attr.type == PERF_TYPE_TRACEPOINT);
2390#endif
2391#ifdef CONFIG_UPROBE_EVENTS
2392 if (flags & TRACE_EVENT_FL_UPROBE)
2393 err = bpf_get_uprobe_info(event, fd_type, buf,
2394 probe_offset,
2395 event->attr.type == PERF_TYPE_TRACEPOINT);
2396#endif
2397 }
2398
2399 return err;
2400}
a38d1107 2401
9db1ff0a
YS
2402static int __init send_signal_irq_work_init(void)
2403{
2404 int cpu;
2405 struct send_signal_irq_work *work;
2406
2407 for_each_possible_cpu(cpu) {
2408 work = per_cpu_ptr(&send_signal_work, cpu);
2409 init_irq_work(&work->irq_work, do_bpf_send_signal);
2410 }
2411 return 0;
2412}
2413
2414subsys_initcall(send_signal_irq_work_init);
2415
a38d1107 2416#ifdef CONFIG_MODULES
390e99cf
SF
2417static int bpf_event_notify(struct notifier_block *nb, unsigned long op,
2418 void *module)
a38d1107
MM
2419{
2420 struct bpf_trace_module *btm, *tmp;
2421 struct module *mod = module;
0340a6b7 2422 int ret = 0;
a38d1107
MM
2423
2424 if (mod->num_bpf_raw_events == 0 ||
2425 (op != MODULE_STATE_COMING && op != MODULE_STATE_GOING))
0340a6b7 2426 goto out;
a38d1107
MM
2427
2428 mutex_lock(&bpf_module_mutex);
2429
2430 switch (op) {
2431 case MODULE_STATE_COMING:
2432 btm = kzalloc(sizeof(*btm), GFP_KERNEL);
2433 if (btm) {
2434 btm->module = module;
2435 list_add(&btm->list, &bpf_trace_modules);
0340a6b7
PZ
2436 } else {
2437 ret = -ENOMEM;
a38d1107
MM
2438 }
2439 break;
2440 case MODULE_STATE_GOING:
2441 list_for_each_entry_safe(btm, tmp, &bpf_trace_modules, list) {
2442 if (btm->module == module) {
2443 list_del(&btm->list);
2444 kfree(btm);
2445 break;
2446 }
2447 }
2448 break;
2449 }
2450
2451 mutex_unlock(&bpf_module_mutex);
2452
0340a6b7
PZ
2453out:
2454 return notifier_from_errno(ret);
a38d1107
MM
2455}
2456
2457static struct notifier_block bpf_module_nb = {
2458 .notifier_call = bpf_event_notify,
2459};
2460
390e99cf 2461static int __init bpf_event_init(void)
a38d1107
MM
2462{
2463 register_module_notifier(&bpf_module_nb);
2464 return 0;
2465}
2466
2467fs_initcall(bpf_event_init);
2468#endif /* CONFIG_MODULES */
0dcac272
JO
2469
2470#ifdef CONFIG_FPROBE
2471struct bpf_kprobe_multi_link {
2472 struct bpf_link link;
2473 struct fprobe fp;
2474 unsigned long *addrs;
ca74823c
JO
2475 u64 *cookies;
2476 u32 cnt;
e22061b2
JO
2477 u32 mods_cnt;
2478 struct module **mods;
0dcac272
JO
2479};
2480
f7098690
JO
2481struct bpf_kprobe_multi_run_ctx {
2482 struct bpf_run_ctx run_ctx;
2483 struct bpf_kprobe_multi_link *link;
2484 unsigned long entry_ip;
2485};
2486
0236fec5
JO
2487struct user_syms {
2488 const char **syms;
2489 char *buf;
2490};
2491
2492static int copy_user_syms(struct user_syms *us, unsigned long __user *usyms, u32 cnt)
2493{
2494 unsigned long __user usymbol;
2495 const char **syms = NULL;
2496 char *buf = NULL, *p;
2497 int err = -ENOMEM;
2498 unsigned int i;
2499
fd58f7df 2500 syms = kvmalloc_array(cnt, sizeof(*syms), GFP_KERNEL);
0236fec5
JO
2501 if (!syms)
2502 goto error;
2503
fd58f7df 2504 buf = kvmalloc_array(cnt, KSYM_NAME_LEN, GFP_KERNEL);
0236fec5
JO
2505 if (!buf)
2506 goto error;
2507
2508 for (p = buf, i = 0; i < cnt; i++) {
2509 if (__get_user(usymbol, usyms + i)) {
2510 err = -EFAULT;
2511 goto error;
2512 }
2513 err = strncpy_from_user(p, (const char __user *) usymbol, KSYM_NAME_LEN);
2514 if (err == KSYM_NAME_LEN)
2515 err = -E2BIG;
2516 if (err < 0)
2517 goto error;
2518 syms[i] = p;
2519 p += err + 1;
2520 }
2521
2522 us->syms = syms;
2523 us->buf = buf;
2524 return 0;
2525
2526error:
2527 if (err) {
2528 kvfree(syms);
2529 kvfree(buf);
2530 }
2531 return err;
2532}
2533
e22061b2
JO
2534static void kprobe_multi_put_modules(struct module **mods, u32 cnt)
2535{
2536 u32 i;
2537
2538 for (i = 0; i < cnt; i++)
2539 module_put(mods[i]);
2540}
2541
0236fec5
JO
2542static void free_user_syms(struct user_syms *us)
2543{
2544 kvfree(us->syms);
2545 kvfree(us->buf);
2546}
2547
0dcac272
JO
2548static void bpf_kprobe_multi_link_release(struct bpf_link *link)
2549{
2550 struct bpf_kprobe_multi_link *kmulti_link;
2551
2552 kmulti_link = container_of(link, struct bpf_kprobe_multi_link, link);
2553 unregister_fprobe(&kmulti_link->fp);
e22061b2 2554 kprobe_multi_put_modules(kmulti_link->mods, kmulti_link->mods_cnt);
0dcac272
JO
2555}
2556
2557static void bpf_kprobe_multi_link_dealloc(struct bpf_link *link)
2558{
2559 struct bpf_kprobe_multi_link *kmulti_link;
2560
2561 kmulti_link = container_of(link, struct bpf_kprobe_multi_link, link);
2562 kvfree(kmulti_link->addrs);
ca74823c 2563 kvfree(kmulti_link->cookies);
e22061b2 2564 kfree(kmulti_link->mods);
0dcac272
JO
2565 kfree(kmulti_link);
2566}
2567
2568static const struct bpf_link_ops bpf_kprobe_multi_link_lops = {
2569 .release = bpf_kprobe_multi_link_release,
2570 .dealloc = bpf_kprobe_multi_link_dealloc,
2571};
2572
ca74823c
JO
2573static void bpf_kprobe_multi_cookie_swap(void *a, void *b, int size, const void *priv)
2574{
2575 const struct bpf_kprobe_multi_link *link = priv;
2576 unsigned long *addr_a = a, *addr_b = b;
2577 u64 *cookie_a, *cookie_b;
ca74823c
JO
2578
2579 cookie_a = link->cookies + (addr_a - link->addrs);
2580 cookie_b = link->cookies + (addr_b - link->addrs);
2581
2582 /* swap addr_a/addr_b and cookie_a/cookie_b values */
11e17ae4
JC
2583 swap(*addr_a, *addr_b);
2584 swap(*cookie_a, *cookie_b);
ca74823c
JO
2585}
2586
1a1b0716 2587static int bpf_kprobe_multi_addrs_cmp(const void *a, const void *b)
ca74823c
JO
2588{
2589 const unsigned long *addr_a = a, *addr_b = b;
2590
2591 if (*addr_a == *addr_b)
2592 return 0;
2593 return *addr_a < *addr_b ? -1 : 1;
2594}
2595
2596static int bpf_kprobe_multi_cookie_cmp(const void *a, const void *b, const void *priv)
2597{
1a1b0716 2598 return bpf_kprobe_multi_addrs_cmp(a, b);
ca74823c
JO
2599}
2600
f7098690 2601static u64 bpf_kprobe_multi_cookie(struct bpf_run_ctx *ctx)
ca74823c 2602{
f7098690 2603 struct bpf_kprobe_multi_run_ctx *run_ctx;
ca74823c 2604 struct bpf_kprobe_multi_link *link;
f7098690 2605 u64 *cookie, entry_ip;
ca74823c 2606 unsigned long *addr;
ca74823c
JO
2607
2608 if (WARN_ON_ONCE(!ctx))
2609 return 0;
f7098690
JO
2610 run_ctx = container_of(current->bpf_ctx, struct bpf_kprobe_multi_run_ctx, run_ctx);
2611 link = run_ctx->link;
ca74823c
JO
2612 if (!link->cookies)
2613 return 0;
f7098690
JO
2614 entry_ip = run_ctx->entry_ip;
2615 addr = bsearch(&entry_ip, link->addrs, link->cnt, sizeof(entry_ip),
1a1b0716 2616 bpf_kprobe_multi_addrs_cmp);
ca74823c
JO
2617 if (!addr)
2618 return 0;
2619 cookie = link->cookies + (addr - link->addrs);
2620 return *cookie;
2621}
2622
f7098690
JO
2623static u64 bpf_kprobe_multi_entry_ip(struct bpf_run_ctx *ctx)
2624{
2625 struct bpf_kprobe_multi_run_ctx *run_ctx;
2626
2627 run_ctx = container_of(current->bpf_ctx, struct bpf_kprobe_multi_run_ctx, run_ctx);
2628 return run_ctx->entry_ip;
2629}
2630
0dcac272
JO
2631static int
2632kprobe_multi_link_prog_run(struct bpf_kprobe_multi_link *link,
f7098690 2633 unsigned long entry_ip, struct pt_regs *regs)
0dcac272 2634{
f7098690
JO
2635 struct bpf_kprobe_multi_run_ctx run_ctx = {
2636 .link = link,
2637 .entry_ip = entry_ip,
2638 };
ca74823c 2639 struct bpf_run_ctx *old_run_ctx;
0dcac272
JO
2640 int err;
2641
2642 if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1)) {
2643 err = 0;
2644 goto out;
2645 }
2646
2647 migrate_disable();
2648 rcu_read_lock();
f7098690 2649 old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx);
0dcac272 2650 err = bpf_prog_run(link->link.prog, regs);
ca74823c 2651 bpf_reset_run_ctx(old_run_ctx);
0dcac272
JO
2652 rcu_read_unlock();
2653 migrate_enable();
2654
2655 out:
2656 __this_cpu_dec(bpf_prog_active);
2657 return err;
2658}
2659
39d95420 2660static int
c09eb2e5 2661kprobe_multi_link_handler(struct fprobe *fp, unsigned long fentry_ip,
cb16330d
MHG
2662 unsigned long ret_ip, struct pt_regs *regs,
2663 void *data)
0dcac272 2664{
0dcac272
JO
2665 struct bpf_kprobe_multi_link *link;
2666
39d95420
MHG
2667 link = container_of(fp, struct bpf_kprobe_multi_link, fp);
2668 kprobe_multi_link_prog_run(link, get_entry_ip(fentry_ip), regs);
2669 return 0;
2670}
2671
2672static void
2673kprobe_multi_link_exit_handler(struct fprobe *fp, unsigned long fentry_ip,
cb16330d
MHG
2674 unsigned long ret_ip, struct pt_regs *regs,
2675 void *data)
0dcac272 2676{
0dcac272
JO
2677 struct bpf_kprobe_multi_link *link;
2678
0dcac272 2679 link = container_of(fp, struct bpf_kprobe_multi_link, fp);
c09eb2e5 2680 kprobe_multi_link_prog_run(link, get_entry_ip(fentry_ip), regs);
0dcac272
JO
2681}
2682
eb5fb032 2683static int symbols_cmp_r(const void *a, const void *b, const void *priv)
0dcac272 2684{
0236fec5
JO
2685 const char **str_a = (const char **) a;
2686 const char **str_b = (const char **) b;
0dcac272 2687
0236fec5 2688 return strcmp(*str_a, *str_b);
0dcac272
JO
2689}
2690
eb5fb032
JO
2691struct multi_symbols_sort {
2692 const char **funcs;
2693 u64 *cookies;
2694};
2695
2696static void symbols_swap_r(void *a, void *b, int size, const void *priv)
2697{
2698 const struct multi_symbols_sort *data = priv;
2699 const char **name_a = a, **name_b = b;
2700
2701 swap(*name_a, *name_b);
2702
2703 /* If defined, swap also related cookies. */
2704 if (data->cookies) {
2705 u64 *cookie_a, *cookie_b;
2706
2707 cookie_a = data->cookies + (name_a - data->funcs);
2708 cookie_b = data->cookies + (name_b - data->funcs);
2709 swap(*cookie_a, *cookie_b);
2710 }
2711}
2712
6a5f2d6e 2713struct modules_array {
e22061b2
JO
2714 struct module **mods;
2715 int mods_cnt;
2716 int mods_cap;
2717};
2718
6a5f2d6e 2719static int add_module(struct modules_array *arr, struct module *mod)
e22061b2 2720{
e22061b2
JO
2721 struct module **mods;
2722
6a5f2d6e
JO
2723 if (arr->mods_cnt == arr->mods_cap) {
2724 arr->mods_cap = max(16, arr->mods_cap * 3 / 2);
2725 mods = krealloc_array(arr->mods, arr->mods_cap, sizeof(*mods), GFP_KERNEL);
e22061b2
JO
2726 if (!mods)
2727 return -ENOMEM;
6a5f2d6e 2728 arr->mods = mods;
e22061b2
JO
2729 }
2730
6a5f2d6e
JO
2731 arr->mods[arr->mods_cnt] = mod;
2732 arr->mods_cnt++;
e22061b2
JO
2733 return 0;
2734}
2735
6a5f2d6e
JO
2736static bool has_module(struct modules_array *arr, struct module *mod)
2737{
2738 int i;
2739
2740 for (i = arr->mods_cnt - 1; i >= 0; i--) {
2741 if (arr->mods[i] == mod)
2742 return true;
2743 }
2744 return false;
2745}
2746
e22061b2
JO
2747static int get_modules_for_addrs(struct module ***mods, unsigned long *addrs, u32 addrs_cnt)
2748{
6a5f2d6e
JO
2749 struct modules_array arr = {};
2750 u32 i, err = 0;
2751
2752 for (i = 0; i < addrs_cnt; i++) {
2753 struct module *mod;
2754
2755 preempt_disable();
2756 mod = __module_address(addrs[i]);
2757 /* Either no module or we it's already stored */
2758 if (!mod || has_module(&arr, mod)) {
2759 preempt_enable();
2760 continue;
2761 }
2762 if (!try_module_get(mod))
2763 err = -EINVAL;
2764 preempt_enable();
2765 if (err)
2766 break;
2767 err = add_module(&arr, mod);
2768 if (err) {
2769 module_put(mod);
2770 break;
2771 }
2772 }
e22061b2
JO
2773
2774 /* We return either err < 0 in case of error, ... */
e22061b2 2775 if (err) {
6a5f2d6e
JO
2776 kprobe_multi_put_modules(arr.mods, arr.mods_cnt);
2777 kfree(arr.mods);
e22061b2
JO
2778 return err;
2779 }
2780
2781 /* or number of modules found if everything is ok. */
6a5f2d6e
JO
2782 *mods = arr.mods;
2783 return arr.mods_cnt;
e22061b2
JO
2784}
2785
0dcac272
JO
2786int bpf_kprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
2787{
2788 struct bpf_kprobe_multi_link *link = NULL;
2789 struct bpf_link_primer link_primer;
ca74823c 2790 void __user *ucookies;
0dcac272
JO
2791 unsigned long *addrs;
2792 u32 flags, cnt, size;
2793 void __user *uaddrs;
ca74823c 2794 u64 *cookies = NULL;
0dcac272
JO
2795 void __user *usyms;
2796 int err;
2797
2798 /* no support for 32bit archs yet */
2799 if (sizeof(u64) != sizeof(void *))
2800 return -EOPNOTSUPP;
2801
2802 if (prog->expected_attach_type != BPF_TRACE_KPROBE_MULTI)
2803 return -EINVAL;
2804
2805 flags = attr->link_create.kprobe_multi.flags;
2806 if (flags & ~BPF_F_KPROBE_MULTI_RETURN)
2807 return -EINVAL;
2808
2809 uaddrs = u64_to_user_ptr(attr->link_create.kprobe_multi.addrs);
2810 usyms = u64_to_user_ptr(attr->link_create.kprobe_multi.syms);
2811 if (!!uaddrs == !!usyms)
2812 return -EINVAL;
2813
2814 cnt = attr->link_create.kprobe_multi.cnt;
2815 if (!cnt)
2816 return -EINVAL;
2817
2818 size = cnt * sizeof(*addrs);
fd58f7df 2819 addrs = kvmalloc_array(cnt, sizeof(*addrs), GFP_KERNEL);
0dcac272
JO
2820 if (!addrs)
2821 return -ENOMEM;
2822
eb5fb032
JO
2823 ucookies = u64_to_user_ptr(attr->link_create.kprobe_multi.cookies);
2824 if (ucookies) {
2825 cookies = kvmalloc_array(cnt, sizeof(*addrs), GFP_KERNEL);
2826 if (!cookies) {
2827 err = -ENOMEM;
2828 goto error;
2829 }
2830 if (copy_from_user(cookies, ucookies, size)) {
2831 err = -EFAULT;
2832 goto error;
2833 }
2834 }
2835
0dcac272
JO
2836 if (uaddrs) {
2837 if (copy_from_user(addrs, uaddrs, size)) {
2838 err = -EFAULT;
2839 goto error;
2840 }
2841 } else {
eb5fb032
JO
2842 struct multi_symbols_sort data = {
2843 .cookies = cookies,
2844 };
0236fec5
JO
2845 struct user_syms us;
2846
2847 err = copy_user_syms(&us, usyms, cnt);
2848 if (err)
2849 goto error;
2850
eb5fb032
JO
2851 if (cookies)
2852 data.funcs = us.syms;
2853
2854 sort_r(us.syms, cnt, sizeof(*us.syms), symbols_cmp_r,
2855 symbols_swap_r, &data);
2856
0236fec5
JO
2857 err = ftrace_lookup_symbols(us.syms, cnt, addrs);
2858 free_user_syms(&us);
0dcac272
JO
2859 if (err)
2860 goto error;
2861 }
2862
2863 link = kzalloc(sizeof(*link), GFP_KERNEL);
2864 if (!link) {
2865 err = -ENOMEM;
2866 goto error;
2867 }
2868
2869 bpf_link_init(&link->link, BPF_LINK_TYPE_KPROBE_MULTI,
2870 &bpf_kprobe_multi_link_lops, prog);
2871
2872 err = bpf_link_prime(&link->link, &link_primer);
2873 if (err)
2874 goto error;
2875
2876 if (flags & BPF_F_KPROBE_MULTI_RETURN)
39d95420 2877 link->fp.exit_handler = kprobe_multi_link_exit_handler;
0dcac272
JO
2878 else
2879 link->fp.entry_handler = kprobe_multi_link_handler;
2880
2881 link->addrs = addrs;
ca74823c
JO
2882 link->cookies = cookies;
2883 link->cnt = cnt;
2884
2885 if (cookies) {
2886 /*
2887 * Sorting addresses will trigger sorting cookies as well
2888 * (check bpf_kprobe_multi_cookie_swap). This way we can
2889 * find cookie based on the address in bpf_get_attach_cookie
2890 * helper.
2891 */
2892 sort_r(addrs, cnt, sizeof(*addrs),
2893 bpf_kprobe_multi_cookie_cmp,
2894 bpf_kprobe_multi_cookie_swap,
2895 link);
e22061b2
JO
2896 }
2897
2898 err = get_modules_for_addrs(&link->mods, addrs, cnt);
2899 if (err < 0) {
2900 bpf_link_cleanup(&link_primer);
2901 return err;
ca74823c 2902 }
e22061b2 2903 link->mods_cnt = err;
0dcac272
JO
2904
2905 err = register_fprobe_ips(&link->fp, addrs, cnt);
2906 if (err) {
e22061b2 2907 kprobe_multi_put_modules(link->mods, link->mods_cnt);
0dcac272
JO
2908 bpf_link_cleanup(&link_primer);
2909 return err;
2910 }
2911
2912 return bpf_link_settle(&link_primer);
2913
2914error:
2915 kfree(link);
2916 kvfree(addrs);
ca74823c 2917 kvfree(cookies);
0dcac272
JO
2918 return err;
2919}
2920#else /* !CONFIG_FPROBE */
2921int bpf_kprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
2922{
2923 return -EOPNOTSUPP;
2924}
f7098690
JO
2925static u64 bpf_kprobe_multi_cookie(struct bpf_run_ctx *ctx)
2926{
2927 return 0;
2928}
2929static u64 bpf_kprobe_multi_entry_ip(struct bpf_run_ctx *ctx)
ca74823c
JO
2930{
2931 return 0;
2932}
0dcac272 2933#endif