Merge tag 'pm-6.16-rc5' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm
[linux-2.6-block.git] / kernel / trace / bpf_trace.c
CommitLineData
179a0cc4 1// SPDX-License-Identifier: GPL-2.0
2541517c 2/* Copyright (c) 2011-2015 PLUMgrid, http://plumgrid.com
0515e599 3 * Copyright (c) 2016 Facebook
2541517c
AS
4 */
5#include <linux/kernel.h>
6#include <linux/types.h>
7#include <linux/slab.h>
8#include <linux/bpf.h>
4279adb0 9#include <linux/bpf_verifier.h>
0515e599 10#include <linux/bpf_perf_event.h>
c4d0bfb4 11#include <linux/btf.h>
2541517c
AS
12#include <linux/filter.h>
13#include <linux/uaccess.h>
9c959c86 14#include <linux/ctype.h>
9802d865 15#include <linux/kprobes.h>
ac5a72ea 16#include <linux/spinlock.h>
41bdc4b4 17#include <linux/syscalls.h>
540adea3 18#include <linux/error-injection.h>
c9a0f3b8 19#include <linux/btf_ids.h>
6f100640 20#include <linux/bpf_lsm.h>
0dcac272 21#include <linux/fprobe.h>
ca74823c
JO
22#include <linux/bsearch.h>
23#include <linux/sort.h>
f3cf4134
RS
24#include <linux/key.h>
25#include <linux/verification.h>
89ae89f5 26#include <linux/namei.h>
6f100640 27
8e4597c6 28#include <net/bpf_sk_storage.h>
9802d865 29
c4d0bfb4
AM
30#include <uapi/linux/bpf.h>
31#include <uapi/linux/btf.h>
32
c7b6f29b
NA
33#include <asm/tlb.h>
34
9802d865 35#include "trace_probe.h"
2541517c
AS
36#include "trace.h"
37
ac5a72ea
AM
38#define CREATE_TRACE_POINTS
39#include "bpf_trace.h"
40
e672db03
SF
41#define bpf_event_rcu_dereference(p) \
42 rcu_dereference_protected(p, lockdep_is_held(&bpf_event_mutex))
43
8b2efe51 44#define MAX_UPROBE_MULTI_CNT (1U << 20)
d6d1e6c1 45#define MAX_KPROBE_MULTI_CNT (1U << 20)
8b2efe51 46
a38d1107
MM
47#ifdef CONFIG_MODULES
48struct bpf_trace_module {
49 struct module *module;
50 struct list_head list;
51};
52
53static LIST_HEAD(bpf_trace_modules);
54static DEFINE_MUTEX(bpf_module_mutex);
55
56static struct bpf_raw_event_map *bpf_get_raw_tracepoint_module(const char *name)
57{
58 struct bpf_raw_event_map *btp, *ret = NULL;
59 struct bpf_trace_module *btm;
60 unsigned int i;
61
62 mutex_lock(&bpf_module_mutex);
63 list_for_each_entry(btm, &bpf_trace_modules, list) {
64 for (i = 0; i < btm->module->num_bpf_raw_events; ++i) {
65 btp = &btm->module->bpf_raw_events[i];
66 if (!strcmp(btp->tp->name, name)) {
67 if (try_module_get(btm->module))
68 ret = btp;
69 goto out;
70 }
71 }
72 }
73out:
74 mutex_unlock(&bpf_module_mutex);
75 return ret;
76}
77#else
78static struct bpf_raw_event_map *bpf_get_raw_tracepoint_module(const char *name)
79{
80 return NULL;
81}
82#endif /* CONFIG_MODULES */
83
035226b9 84u64 bpf_get_stackid(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
c195651e 85u64 bpf_get_stack(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
035226b9 86
eb411377
AM
87static int bpf_btf_printf_prepare(struct btf_ptr *ptr, u32 btf_ptr_size,
88 u64 flags, const struct btf **btf,
89 s32 *btf_id);
f7098690
JO
90static u64 bpf_kprobe_multi_cookie(struct bpf_run_ctx *ctx);
91static u64 bpf_kprobe_multi_entry_ip(struct bpf_run_ctx *ctx);
eb411377 92
0b779b61 93static u64 bpf_uprobe_multi_cookie(struct bpf_run_ctx *ctx);
686328d8 94static u64 bpf_uprobe_multi_entry_ip(struct bpf_run_ctx *ctx);
0b779b61 95
2541517c
AS
96/**
97 * trace_call_bpf - invoke BPF program
e87c6bc3 98 * @call: tracepoint event
2541517c
AS
99 * @ctx: opaque context pointer
100 *
101 * kprobe handlers execute BPF programs via this helper.
102 * Can be used from static tracepoints in the future.
103 *
104 * Return: BPF programs always return an integer which is interpreted by
105 * kprobe handler as:
106 * 0 - return from kprobe (event is filtered out)
107 * 1 - store kprobe event into ring buffer
108 * Other values are reserved and currently alias to 1
109 */
e87c6bc3 110unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx)
2541517c
AS
111{
112 unsigned int ret;
113
b0a81b94 114 cant_sleep();
2541517c
AS
115
116 if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1)) {
117 /*
118 * since some bpf program is already running on this cpu,
119 * don't call into another bpf program (same or different)
120 * and don't send kprobe event into ring-buffer,
121 * so return zero here
122 */
dd865789
JO
123 rcu_read_lock();
124 bpf_prog_inc_misses_counters(rcu_dereference(call->prog_array));
125 rcu_read_unlock();
2541517c
AS
126 ret = 0;
127 goto out;
128 }
129
e87c6bc3
YS
130 /*
131 * Instead of moving rcu_read_lock/rcu_dereference/rcu_read_unlock
132 * to all call sites, we did a bpf_prog_array_valid() there to check
133 * whether call->prog_array is empty or not, which is
2b5894cc 134 * a heuristic to speed up execution.
e87c6bc3
YS
135 *
136 * If bpf_prog_array_valid() fetched prog_array was
137 * non-NULL, we go into trace_call_bpf() and do the actual
138 * proper rcu_dereference() under RCU lock.
139 * If it turns out that prog_array is NULL then, we bail out.
140 * For the opposite, if the bpf_prog_array_valid() fetched pointer
141 * was NULL, you'll skip the prog_array with the risk of missing
142 * out of events when it was updated in between this and the
143 * rcu_dereference() which is accepted risk.
144 */
055eb955
SF
145 rcu_read_lock();
146 ret = bpf_prog_run_array(rcu_dereference(call->prog_array),
147 ctx, bpf_prog_run);
148 rcu_read_unlock();
2541517c
AS
149
150 out:
151 __this_cpu_dec(bpf_prog_active);
2541517c
AS
152
153 return ret;
154}
2541517c 155
9802d865
JB
156#ifdef CONFIG_BPF_KPROBE_OVERRIDE
157BPF_CALL_2(bpf_override_return, struct pt_regs *, regs, unsigned long, rc)
158{
9802d865 159 regs_set_return_value(regs, rc);
540adea3 160 override_function_with_return(regs);
9802d865
JB
161 return 0;
162}
163
164static const struct bpf_func_proto bpf_override_return_proto = {
165 .func = bpf_override_return,
166 .gpl_only = true,
167 .ret_type = RET_INTEGER,
168 .arg1_type = ARG_PTR_TO_CTX,
169 .arg2_type = ARG_ANYTHING,
170};
171#endif
172
8d92db5c
CH
173static __always_inline int
174bpf_probe_read_user_common(void *dst, u32 size, const void __user *unsafe_ptr)
2541517c 175{
8d92db5c 176 int ret;
2541517c 177
c0ee37e8 178 ret = copy_from_user_nofault(dst, unsafe_ptr, size);
6ae08ae3
DB
179 if (unlikely(ret < 0))
180 memset(dst, 0, size);
6ae08ae3
DB
181 return ret;
182}
183
8d92db5c
CH
184BPF_CALL_3(bpf_probe_read_user, void *, dst, u32, size,
185 const void __user *, unsafe_ptr)
186{
187 return bpf_probe_read_user_common(dst, size, unsafe_ptr);
188}
189
f470378c 190const struct bpf_func_proto bpf_probe_read_user_proto = {
6ae08ae3
DB
191 .func = bpf_probe_read_user,
192 .gpl_only = true,
193 .ret_type = RET_INTEGER,
194 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
195 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
196 .arg3_type = ARG_ANYTHING,
197};
198
8d92db5c
CH
199static __always_inline int
200bpf_probe_read_user_str_common(void *dst, u32 size,
201 const void __user *unsafe_ptr)
6ae08ae3 202{
8d92db5c 203 int ret;
6ae08ae3 204
6fa6d280
DX
205 /*
206 * NB: We rely on strncpy_from_user() not copying junk past the NUL
207 * terminator into `dst`.
208 *
209 * strncpy_from_user() does long-sized strides in the fast path. If the
210 * strncpy does not mask out the bytes after the NUL in `unsafe_ptr`,
211 * then there could be junk after the NUL in `dst`. If user takes `dst`
212 * and keys a hash map with it, then semantically identical strings can
213 * occupy multiple entries in the map.
214 */
8d92db5c 215 ret = strncpy_from_user_nofault(dst, unsafe_ptr, size);
6ae08ae3
DB
216 if (unlikely(ret < 0))
217 memset(dst, 0, size);
6ae08ae3
DB
218 return ret;
219}
220
8d92db5c
CH
221BPF_CALL_3(bpf_probe_read_user_str, void *, dst, u32, size,
222 const void __user *, unsafe_ptr)
223{
224 return bpf_probe_read_user_str_common(dst, size, unsafe_ptr);
225}
226
f470378c 227const struct bpf_func_proto bpf_probe_read_user_str_proto = {
6ae08ae3
DB
228 .func = bpf_probe_read_user_str,
229 .gpl_only = true,
230 .ret_type = RET_INTEGER,
231 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
232 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
233 .arg3_type = ARG_ANYTHING,
234};
235
6ae08ae3
DB
236BPF_CALL_3(bpf_probe_read_kernel, void *, dst, u32, size,
237 const void *, unsafe_ptr)
238{
8d92db5c 239 return bpf_probe_read_kernel_common(dst, size, unsafe_ptr);
6ae08ae3
DB
240}
241
f470378c 242const struct bpf_func_proto bpf_probe_read_kernel_proto = {
6ae08ae3
DB
243 .func = bpf_probe_read_kernel,
244 .gpl_only = true,
245 .ret_type = RET_INTEGER,
246 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
247 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
248 .arg3_type = ARG_ANYTHING,
249};
250
6ae08ae3 251static __always_inline int
8d92db5c 252bpf_probe_read_kernel_str_common(void *dst, u32 size, const void *unsafe_ptr)
6ae08ae3 253{
ff40e510 254 int ret;
8d92db5c 255
6ae08ae3 256 /*
8d92db5c
CH
257 * The strncpy_from_kernel_nofault() call will likely not fill the
258 * entire buffer, but that's okay in this circumstance as we're probing
6ae08ae3
DB
259 * arbitrary memory anyway similar to bpf_probe_read_*() and might
260 * as well probe the stack. Thus, memory is explicitly cleared
261 * only in error case, so that improper users ignoring return
262 * code altogether don't copy garbage; otherwise length of string
263 * is returned that can be used for bpf_perf_event_output() et al.
264 */
8d92db5c 265 ret = strncpy_from_kernel_nofault(dst, unsafe_ptr, size);
6ae08ae3 266 if (unlikely(ret < 0))
ff40e510 267 memset(dst, 0, size);
074f528e 268 return ret;
2541517c
AS
269}
270
6ae08ae3
DB
271BPF_CALL_3(bpf_probe_read_kernel_str, void *, dst, u32, size,
272 const void *, unsafe_ptr)
273{
8d92db5c 274 return bpf_probe_read_kernel_str_common(dst, size, unsafe_ptr);
6ae08ae3
DB
275}
276
f470378c 277const struct bpf_func_proto bpf_probe_read_kernel_str_proto = {
6ae08ae3
DB
278 .func = bpf_probe_read_kernel_str,
279 .gpl_only = true,
280 .ret_type = RET_INTEGER,
281 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
282 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
283 .arg3_type = ARG_ANYTHING,
284};
285
8d92db5c
CH
286#ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
287BPF_CALL_3(bpf_probe_read_compat, void *, dst, u32, size,
288 const void *, unsafe_ptr)
289{
290 if ((unsigned long)unsafe_ptr < TASK_SIZE) {
291 return bpf_probe_read_user_common(dst, size,
292 (__force void __user *)unsafe_ptr);
293 }
294 return bpf_probe_read_kernel_common(dst, size, unsafe_ptr);
295}
296
297static const struct bpf_func_proto bpf_probe_read_compat_proto = {
298 .func = bpf_probe_read_compat,
299 .gpl_only = true,
300 .ret_type = RET_INTEGER,
301 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
302 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
303 .arg3_type = ARG_ANYTHING,
304};
305
6ae08ae3
DB
306BPF_CALL_3(bpf_probe_read_compat_str, void *, dst, u32, size,
307 const void *, unsafe_ptr)
308{
8d92db5c
CH
309 if ((unsigned long)unsafe_ptr < TASK_SIZE) {
310 return bpf_probe_read_user_str_common(dst, size,
311 (__force void __user *)unsafe_ptr);
312 }
313 return bpf_probe_read_kernel_str_common(dst, size, unsafe_ptr);
6ae08ae3
DB
314}
315
316static const struct bpf_func_proto bpf_probe_read_compat_str_proto = {
317 .func = bpf_probe_read_compat_str,
2541517c
AS
318 .gpl_only = true,
319 .ret_type = RET_INTEGER,
39f19ebb 320 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
9c019e2b 321 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
2541517c
AS
322 .arg3_type = ARG_ANYTHING,
323};
8d92db5c 324#endif /* CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE */
2541517c 325
eb1b6688 326BPF_CALL_3(bpf_probe_write_user, void __user *, unsafe_ptr, const void *, src,
f3694e00 327 u32, size)
96ae5227 328{
96ae5227
SD
329 /*
330 * Ensure we're in user context which is safe for the helper to
331 * run. This helper has no business in a kthread.
332 *
333 * access_ok() should prevent writing to non-user memory, but in
334 * some situations (nommu, temporary switch, etc) access_ok() does
335 * not provide enough validation, hence the check on KERNEL_DS.
c7b6f29b
NA
336 *
337 * nmi_uaccess_okay() ensures the probe is not run in an interim
338 * state, when the task or mm are switched. This is specifically
339 * required to prevent the use of temporary mm.
96ae5227
SD
340 */
341
342 if (unlikely(in_interrupt() ||
343 current->flags & (PF_KTHREAD | PF_EXITING)))
344 return -EPERM;
c7b6f29b
NA
345 if (unlikely(!nmi_uaccess_okay()))
346 return -EPERM;
96ae5227 347
c0ee37e8 348 return copy_to_user_nofault(unsafe_ptr, src, size);
96ae5227
SD
349}
350
351static const struct bpf_func_proto bpf_probe_write_user_proto = {
352 .func = bpf_probe_write_user,
353 .gpl_only = true,
354 .ret_type = RET_INTEGER,
355 .arg1_type = ARG_ANYTHING,
216e3cd2 356 .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY,
39f19ebb 357 .arg3_type = ARG_CONST_SIZE,
96ae5227
SD
358};
359
d9c9e4db
FR
360#define MAX_TRACE_PRINTK_VARARGS 3
361#define BPF_TRACE_PRINTK_SIZE 1024
ac5a72ea 362
d9c9e4db
FR
363BPF_CALL_5(bpf_trace_printk, char *, fmt, u32, fmt_size, u64, arg1,
364 u64, arg2, u64, arg3)
ac5a72ea 365{
d9c9e4db 366 u64 args[MAX_TRACE_PRINTK_VARARGS] = { arg1, arg2, arg3 };
78aa1cc9
JO
367 struct bpf_bprintf_data data = {
368 .get_bin_args = true,
e2bb9e01 369 .get_buf = true,
78aa1cc9 370 };
ac5a72ea
AM
371 int ret;
372
78aa1cc9
JO
373 ret = bpf_bprintf_prepare(fmt, fmt_size, args,
374 MAX_TRACE_PRINTK_VARARGS, &data);
d9c9e4db
FR
375 if (ret < 0)
376 return ret;
377
e2bb9e01 378 ret = bstr_printf(data.buf, MAX_BPRINTF_BUF, fmt, data.bin_args);
d9c9e4db 379
e2bb9e01 380 trace_bpf_trace_printk(data.buf);
ac5a72ea 381
f19a4050 382 bpf_bprintf_cleanup(&data);
9c959c86 383
d9c9e4db 384 return ret;
9c959c86
AS
385}
386
387static const struct bpf_func_proto bpf_trace_printk_proto = {
388 .func = bpf_trace_printk,
389 .gpl_only = true,
390 .ret_type = RET_INTEGER,
216e3cd2 391 .arg1_type = ARG_PTR_TO_MEM | MEM_RDONLY,
39f19ebb 392 .arg2_type = ARG_CONST_SIZE,
9c959c86
AS
393};
394
4580f4e0 395static void __set_printk_clr_event(struct work_struct *work)
0756ea3e
AS
396{
397 /*
ac5a72ea
AM
398 * This program might be calling bpf_trace_printk,
399 * so enable the associated bpf_trace/bpf_trace_printk event.
400 * Repeat this each time as it is possible a user has
401 * disabled bpf_trace_printk events. By loading a program
402 * calling bpf_trace_printk() however the user has expressed
403 * the intent to see such events.
0756ea3e 404 */
ac5a72ea
AM
405 if (trace_set_clr_event("bpf_trace", "bpf_trace_printk", 1))
406 pr_warn_ratelimited("could not enable bpf_trace_printk events");
10aceb62 407}
4580f4e0 408static DECLARE_WORK(set_printk_work, __set_printk_clr_event);
0756ea3e 409
10aceb62
DM
410const struct bpf_func_proto *bpf_get_trace_printk_proto(void)
411{
4580f4e0 412 schedule_work(&set_printk_work);
0756ea3e
AS
413 return &bpf_trace_printk_proto;
414}
415
78aa1cc9 416BPF_CALL_4(bpf_trace_vprintk, char *, fmt, u32, fmt_size, const void *, args,
10aceb62
DM
417 u32, data_len)
418{
78aa1cc9
JO
419 struct bpf_bprintf_data data = {
420 .get_bin_args = true,
e2bb9e01 421 .get_buf = true,
78aa1cc9 422 };
10aceb62 423 int ret, num_args;
10aceb62
DM
424
425 if (data_len & 7 || data_len > MAX_BPRINTF_VARARGS * 8 ||
78aa1cc9 426 (data_len && !args))
10aceb62
DM
427 return -EINVAL;
428 num_args = data_len / 8;
429
78aa1cc9 430 ret = bpf_bprintf_prepare(fmt, fmt_size, args, num_args, &data);
10aceb62
DM
431 if (ret < 0)
432 return ret;
433
e2bb9e01 434 ret = bstr_printf(data.buf, MAX_BPRINTF_BUF, fmt, data.bin_args);
10aceb62 435
e2bb9e01 436 trace_bpf_trace_printk(data.buf);
10aceb62 437
f19a4050 438 bpf_bprintf_cleanup(&data);
10aceb62
DM
439
440 return ret;
441}
442
443static const struct bpf_func_proto bpf_trace_vprintk_proto = {
444 .func = bpf_trace_vprintk,
445 .gpl_only = true,
446 .ret_type = RET_INTEGER,
216e3cd2 447 .arg1_type = ARG_PTR_TO_MEM | MEM_RDONLY,
10aceb62 448 .arg2_type = ARG_CONST_SIZE,
216e3cd2 449 .arg3_type = ARG_PTR_TO_MEM | PTR_MAYBE_NULL | MEM_RDONLY,
10aceb62
DM
450 .arg4_type = ARG_CONST_SIZE_OR_ZERO,
451};
452
453const struct bpf_func_proto *bpf_get_trace_vprintk_proto(void)
454{
4580f4e0 455 schedule_work(&set_printk_work);
10aceb62
DM
456 return &bpf_trace_vprintk_proto;
457}
458
492e639f 459BPF_CALL_5(bpf_seq_printf, struct seq_file *, m, char *, fmt, u32, fmt_size,
78aa1cc9 460 const void *, args, u32, data_len)
492e639f 461{
78aa1cc9
JO
462 struct bpf_bprintf_data data = {
463 .get_bin_args = true,
464 };
d9c9e4db 465 int err, num_args;
492e639f 466
335ff499 467 if (data_len & 7 || data_len > MAX_BPRINTF_VARARGS * 8 ||
78aa1cc9 468 (data_len && !args))
d9c9e4db 469 return -EINVAL;
492e639f
YS
470 num_args = data_len / 8;
471
78aa1cc9 472 err = bpf_bprintf_prepare(fmt, fmt_size, args, num_args, &data);
d9c9e4db
FR
473 if (err < 0)
474 return err;
492e639f 475
78aa1cc9 476 seq_bprintf(m, fmt, data.bin_args);
48cac3f4 477
f19a4050 478 bpf_bprintf_cleanup(&data);
d9c9e4db
FR
479
480 return seq_has_overflowed(m) ? -EOVERFLOW : 0;
492e639f
YS
481}
482
9436ef6e 483BTF_ID_LIST_SINGLE(btf_seq_file_ids, struct, seq_file)
c9a0f3b8 484
492e639f
YS
485static const struct bpf_func_proto bpf_seq_printf_proto = {
486 .func = bpf_seq_printf,
487 .gpl_only = true,
488 .ret_type = RET_INTEGER,
489 .arg1_type = ARG_PTR_TO_BTF_ID,
9436ef6e 490 .arg1_btf_id = &btf_seq_file_ids[0],
216e3cd2 491 .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY,
492e639f 492 .arg3_type = ARG_CONST_SIZE,
216e3cd2 493 .arg4_type = ARG_PTR_TO_MEM | PTR_MAYBE_NULL | MEM_RDONLY,
492e639f 494 .arg5_type = ARG_CONST_SIZE_OR_ZERO,
492e639f
YS
495};
496
497BPF_CALL_3(bpf_seq_write, struct seq_file *, m, const void *, data, u32, len)
498{
499 return seq_write(m, data, len) ? -EOVERFLOW : 0;
500}
501
492e639f
YS
502static const struct bpf_func_proto bpf_seq_write_proto = {
503 .func = bpf_seq_write,
504 .gpl_only = true,
505 .ret_type = RET_INTEGER,
506 .arg1_type = ARG_PTR_TO_BTF_ID,
9436ef6e 507 .arg1_btf_id = &btf_seq_file_ids[0],
216e3cd2 508 .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY,
492e639f 509 .arg3_type = ARG_CONST_SIZE_OR_ZERO,
492e639f
YS
510};
511
eb411377
AM
512BPF_CALL_4(bpf_seq_printf_btf, struct seq_file *, m, struct btf_ptr *, ptr,
513 u32, btf_ptr_size, u64, flags)
514{
515 const struct btf *btf;
516 s32 btf_id;
517 int ret;
518
519 ret = bpf_btf_printf_prepare(ptr, btf_ptr_size, flags, &btf, &btf_id);
520 if (ret)
521 return ret;
522
523 return btf_type_seq_show_flags(btf, btf_id, ptr->ptr, m, flags);
524}
525
526static const struct bpf_func_proto bpf_seq_printf_btf_proto = {
527 .func = bpf_seq_printf_btf,
528 .gpl_only = true,
529 .ret_type = RET_INTEGER,
530 .arg1_type = ARG_PTR_TO_BTF_ID,
531 .arg1_btf_id = &btf_seq_file_ids[0],
216e3cd2 532 .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY,
492e639f 533 .arg3_type = ARG_CONST_SIZE_OR_ZERO,
eb411377 534 .arg4_type = ARG_ANYTHING,
492e639f
YS
535};
536
908432ca
YS
537static __always_inline int
538get_map_perf_counter(struct bpf_map *map, u64 flags,
539 u64 *value, u64 *enabled, u64 *running)
35578d79 540{
35578d79 541 struct bpf_array *array = container_of(map, struct bpf_array, map);
6816a7ff
DB
542 unsigned int cpu = smp_processor_id();
543 u64 index = flags & BPF_F_INDEX_MASK;
3b1efb19 544 struct bpf_event_entry *ee;
35578d79 545
6816a7ff
DB
546 if (unlikely(flags & ~(BPF_F_INDEX_MASK)))
547 return -EINVAL;
548 if (index == BPF_F_CURRENT_CPU)
549 index = cpu;
35578d79
KX
550 if (unlikely(index >= array->map.max_entries))
551 return -E2BIG;
552
3b1efb19 553 ee = READ_ONCE(array->ptrs[index]);
1ca1cc98 554 if (!ee)
35578d79
KX
555 return -ENOENT;
556
908432ca
YS
557 return perf_event_read_local(ee->event, value, enabled, running);
558}
559
560BPF_CALL_2(bpf_perf_event_read, struct bpf_map *, map, u64, flags)
561{
562 u64 value = 0;
563 int err;
564
565 err = get_map_perf_counter(map, flags, &value, NULL, NULL);
35578d79 566 /*
f91840a3
AS
567 * this api is ugly since we miss [-22..-2] range of valid
568 * counter values, but that's uapi
35578d79 569 */
f91840a3
AS
570 if (err)
571 return err;
572 return value;
35578d79
KX
573}
574
ee971630 575const struct bpf_func_proto bpf_perf_event_read_proto = {
35578d79 576 .func = bpf_perf_event_read,
1075ef59 577 .gpl_only = true,
35578d79
KX
578 .ret_type = RET_INTEGER,
579 .arg1_type = ARG_CONST_MAP_PTR,
580 .arg2_type = ARG_ANYTHING,
581};
582
908432ca
YS
583BPF_CALL_4(bpf_perf_event_read_value, struct bpf_map *, map, u64, flags,
584 struct bpf_perf_event_value *, buf, u32, size)
585{
586 int err = -EINVAL;
587
588 if (unlikely(size != sizeof(struct bpf_perf_event_value)))
589 goto clear;
590 err = get_map_perf_counter(map, flags, &buf->counter, &buf->enabled,
591 &buf->running);
592 if (unlikely(err))
593 goto clear;
594 return 0;
595clear:
596 memset(buf, 0, size);
597 return err;
598}
599
600static const struct bpf_func_proto bpf_perf_event_read_value_proto = {
601 .func = bpf_perf_event_read_value,
602 .gpl_only = true,
603 .ret_type = RET_INTEGER,
604 .arg1_type = ARG_CONST_MAP_PTR,
605 .arg2_type = ARG_ANYTHING,
606 .arg3_type = ARG_PTR_TO_UNINIT_MEM,
607 .arg4_type = ARG_CONST_SIZE,
608};
609
ae0a457f
ET
610const struct bpf_func_proto *bpf_get_perf_event_read_value_proto(void)
611{
612 return &bpf_perf_event_read_value_proto;
613}
614
8e7a3920
DB
615static __always_inline u64
616__bpf_perf_event_output(struct pt_regs *regs, struct bpf_map *map,
b9c44b91
YC
617 u64 flags, struct perf_raw_record *raw,
618 struct perf_sample_data *sd)
a43eec30 619{
a43eec30 620 struct bpf_array *array = container_of(map, struct bpf_array, map);
d7931330 621 unsigned int cpu = smp_processor_id();
1e33759c 622 u64 index = flags & BPF_F_INDEX_MASK;
3b1efb19 623 struct bpf_event_entry *ee;
a43eec30 624 struct perf_event *event;
a43eec30 625
1e33759c 626 if (index == BPF_F_CURRENT_CPU)
d7931330 627 index = cpu;
a43eec30
AS
628 if (unlikely(index >= array->map.max_entries))
629 return -E2BIG;
630
3b1efb19 631 ee = READ_ONCE(array->ptrs[index]);
1ca1cc98 632 if (!ee)
a43eec30
AS
633 return -ENOENT;
634
3b1efb19 635 event = ee->event;
a43eec30
AS
636 if (unlikely(event->attr.type != PERF_TYPE_SOFTWARE ||
637 event->attr.config != PERF_COUNT_SW_BPF_OUTPUT))
638 return -EINVAL;
639
d7931330 640 if (unlikely(event->oncpu != cpu))
a43eec30
AS
641 return -EOPNOTSUPP;
642
b9c44b91
YC
643 perf_sample_save_raw_data(sd, event, raw);
644
56201969 645 return perf_event_output(event, sd, regs);
a43eec30
AS
646}
647
9594dc3c
MM
648/*
649 * Support executing tracepoints in normal, irq, and nmi context that each call
650 * bpf_perf_event_output
651 */
652struct bpf_trace_sample_data {
653 struct perf_sample_data sds[3];
654};
655
656static DEFINE_PER_CPU(struct bpf_trace_sample_data, bpf_trace_sds);
657static DEFINE_PER_CPU(int, bpf_trace_nest_level);
f3694e00
DB
658BPF_CALL_5(bpf_perf_event_output, struct pt_regs *, regs, struct bpf_map *, map,
659 u64, flags, void *, data, u64, size)
8e7a3920 660{
f2c67a3e 661 struct bpf_trace_sample_data *sds;
8e7a3920
DB
662 struct perf_raw_record raw = {
663 .frag = {
664 .size = size,
665 .data = data,
666 },
667 };
9594dc3c 668 struct perf_sample_data *sd;
f2c67a3e
JO
669 int nest_level, err;
670
671 preempt_disable();
672 sds = this_cpu_ptr(&bpf_trace_sds);
673 nest_level = this_cpu_inc_return(bpf_trace_nest_level);
8e7a3920 674
9594dc3c
MM
675 if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(sds->sds))) {
676 err = -EBUSY;
677 goto out;
678 }
679
680 sd = &sds->sds[nest_level - 1];
681
682 if (unlikely(flags & ~(BPF_F_INDEX_MASK))) {
683 err = -EINVAL;
684 goto out;
685 }
8e7a3920 686
283ca526 687 perf_sample_data_init(sd, 0, 0);
283ca526 688
b9c44b91 689 err = __bpf_perf_event_output(regs, map, flags, &raw, sd);
9594dc3c
MM
690out:
691 this_cpu_dec(bpf_trace_nest_level);
f2c67a3e 692 preempt_enable();
9594dc3c 693 return err;
8e7a3920
DB
694}
695
a43eec30
AS
696static const struct bpf_func_proto bpf_perf_event_output_proto = {
697 .func = bpf_perf_event_output,
1075ef59 698 .gpl_only = true,
a43eec30
AS
699 .ret_type = RET_INTEGER,
700 .arg1_type = ARG_PTR_TO_CTX,
701 .arg2_type = ARG_CONST_MAP_PTR,
702 .arg3_type = ARG_ANYTHING,
216e3cd2 703 .arg4_type = ARG_PTR_TO_MEM | MEM_RDONLY,
a60dd35d 704 .arg5_type = ARG_CONST_SIZE_OR_ZERO,
a43eec30
AS
705};
706
768fb61f
AZ
707static DEFINE_PER_CPU(int, bpf_event_output_nest_level);
708struct bpf_nested_pt_regs {
709 struct pt_regs regs[3];
710};
711static DEFINE_PER_CPU(struct bpf_nested_pt_regs, bpf_pt_regs);
712static DEFINE_PER_CPU(struct bpf_trace_sample_data, bpf_misc_sds);
bd570ff9 713
555c8a86
DB
714u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
715 void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy)
bd570ff9 716{
555c8a86
DB
717 struct perf_raw_frag frag = {
718 .copy = ctx_copy,
719 .size = ctx_size,
720 .data = ctx,
721 };
722 struct perf_raw_record raw = {
723 .frag = {
183fc153
AM
724 {
725 .next = ctx_size ? &frag : NULL,
726 },
555c8a86
DB
727 .size = meta_size,
728 .data = meta,
729 },
730 };
768fb61f
AZ
731 struct perf_sample_data *sd;
732 struct pt_regs *regs;
d62cc390 733 int nest_level;
768fb61f
AZ
734 u64 ret;
735
d62cc390
JO
736 preempt_disable();
737 nest_level = this_cpu_inc_return(bpf_event_output_nest_level);
738
768fb61f
AZ
739 if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(bpf_misc_sds.sds))) {
740 ret = -EBUSY;
741 goto out;
742 }
743 sd = this_cpu_ptr(&bpf_misc_sds.sds[nest_level - 1]);
744 regs = this_cpu_ptr(&bpf_pt_regs.regs[nest_level - 1]);
bd570ff9
DB
745
746 perf_fetch_caller_regs(regs);
283ca526 747 perf_sample_data_init(sd, 0, 0);
bd570ff9 748
b9c44b91 749 ret = __bpf_perf_event_output(regs, map, flags, &raw, sd);
768fb61f
AZ
750out:
751 this_cpu_dec(bpf_event_output_nest_level);
d62cc390 752 preempt_enable();
768fb61f 753 return ret;
bd570ff9
DB
754}
755
f3694e00 756BPF_CALL_0(bpf_get_current_task)
606274c5
AS
757{
758 return (long) current;
759}
760
f470378c 761const struct bpf_func_proto bpf_get_current_task_proto = {
606274c5
AS
762 .func = bpf_get_current_task,
763 .gpl_only = true,
764 .ret_type = RET_INTEGER,
765};
766
3ca1032a
KS
767BPF_CALL_0(bpf_get_current_task_btf)
768{
769 return (unsigned long) current;
770}
771
a396eda5 772const struct bpf_func_proto bpf_get_current_task_btf_proto = {
3ca1032a
KS
773 .func = bpf_get_current_task_btf,
774 .gpl_only = true,
3f00c523 775 .ret_type = RET_PTR_TO_BTF_ID_TRUSTED,
d19ddb47 776 .ret_btf_id = &btf_tracing_ids[BTF_TRACING_TYPE_TASK],
3ca1032a
KS
777};
778
dd6e10fb
DX
779BPF_CALL_1(bpf_task_pt_regs, struct task_struct *, task)
780{
781 return (unsigned long) task_pt_regs(task);
782}
783
784BTF_ID_LIST(bpf_task_pt_regs_ids)
785BTF_ID(struct, pt_regs)
786
787const struct bpf_func_proto bpf_task_pt_regs_proto = {
788 .func = bpf_task_pt_regs,
789 .gpl_only = true,
790 .arg1_type = ARG_PTR_TO_BTF_ID,
d19ddb47 791 .arg1_btf_id = &btf_tracing_ids[BTF_TRACING_TYPE_TASK],
dd6e10fb
DX
792 .ret_type = RET_PTR_TO_BTF_ID,
793 .ret_btf_id = &bpf_task_pt_regs_ids[0],
794};
795
8b401f9e
YS
796struct send_signal_irq_work {
797 struct irq_work irq_work;
798 struct task_struct *task;
799 u32 sig;
8482941f 800 enum pid_type type;
6280cf71
PM
801 bool has_siginfo;
802 struct kernel_siginfo info;
8b401f9e
YS
803};
804
805static DEFINE_PER_CPU(struct send_signal_irq_work, send_signal_work);
806
807static void do_bpf_send_signal(struct irq_work *entry)
808{
809 struct send_signal_irq_work *work;
6280cf71 810 struct kernel_siginfo *siginfo;
8b401f9e
YS
811
812 work = container_of(entry, struct send_signal_irq_work, irq_work);
6280cf71
PM
813 siginfo = work->has_siginfo ? &work->info : SEND_SIG_PRIV;
814
815 group_send_sig_info(work->sig, siginfo, work->task, work->type);
bdb7fdb0 816 put_task_struct(work->task);
8b401f9e
YS
817}
818
6280cf71 819static int bpf_send_signal_common(u32 sig, enum pid_type type, struct task_struct *task, u64 value)
8b401f9e
YS
820{
821 struct send_signal_irq_work *work = NULL;
6280cf71
PM
822 struct kernel_siginfo info;
823 struct kernel_siginfo *siginfo;
824
825 if (!task) {
826 task = current;
827 siginfo = SEND_SIG_PRIV;
828 } else {
829 clear_siginfo(&info);
830 info.si_signo = sig;
831 info.si_errno = 0;
832 info.si_code = SI_KERNEL;
833 info.si_pid = 0;
834 info.si_uid = 0;
835 info.si_value.sival_ptr = (void *)(unsigned long)value;
836 siginfo = &info;
837 }
8b401f9e
YS
838
839 /* Similar to bpf_probe_write_user, task needs to be
840 * in a sound condition and kernel memory access be
841 * permitted in order to send signal to the current
842 * task.
843 */
6280cf71 844 if (unlikely(task->flags & (PF_KTHREAD | PF_EXITING)))
8b401f9e 845 return -EPERM;
8b401f9e
YS
846 if (unlikely(!nmi_uaccess_okay()))
847 return -EPERM;
a3d81bc1 848 /* Task should not be pid=1 to avoid kernel panic. */
6280cf71 849 if (unlikely(is_global_init(task)))
a3d81bc1 850 return -EPERM;
8b401f9e 851
b4a8b5bb 852 if (preempt_count() != 0 || irqs_disabled()) {
e1afb702
YS
853 /* Do an early check on signal validity. Otherwise,
854 * the error is lost in deferred irq_work.
855 */
856 if (unlikely(!valid_signal(sig)))
857 return -EINVAL;
858
8b401f9e 859 work = this_cpu_ptr(&send_signal_work);
7a9f50a0 860 if (irq_work_is_busy(&work->irq_work))
8b401f9e
YS
861 return -EBUSY;
862
863 /* Add the current task, which is the target of sending signal,
864 * to the irq_work. The current task may change when queued
865 * irq works get executed.
866 */
6280cf71
PM
867 work->task = get_task_struct(task);
868 work->has_siginfo = siginfo == &info;
869 if (work->has_siginfo)
870 copy_siginfo(&work->info, &info);
8b401f9e 871 work->sig = sig;
8482941f 872 work->type = type;
8b401f9e
YS
873 irq_work_queue(&work->irq_work);
874 return 0;
875 }
876
6280cf71 877 return group_send_sig_info(sig, siginfo, task, type);
8482941f
YS
878}
879
880BPF_CALL_1(bpf_send_signal, u32, sig)
881{
6280cf71 882 return bpf_send_signal_common(sig, PIDTYPE_TGID, NULL, 0);
8b401f9e
YS
883}
884
ee971630 885const struct bpf_func_proto bpf_send_signal_proto = {
8b401f9e
YS
886 .func = bpf_send_signal,
887 .gpl_only = false,
888 .ret_type = RET_INTEGER,
889 .arg1_type = ARG_ANYTHING,
890};
891
8482941f
YS
892BPF_CALL_1(bpf_send_signal_thread, u32, sig)
893{
6280cf71 894 return bpf_send_signal_common(sig, PIDTYPE_PID, NULL, 0);
8482941f
YS
895}
896
ee971630 897const struct bpf_func_proto bpf_send_signal_thread_proto = {
8482941f
YS
898 .func = bpf_send_signal_thread,
899 .gpl_only = false,
900 .ret_type = RET_INTEGER,
901 .arg1_type = ARG_ANYTHING,
902};
903
6e22ab9d
JO
904BPF_CALL_3(bpf_d_path, struct path *, path, char *, buf, u32, sz)
905{
f46fab0e 906 struct path copy;
6e22ab9d
JO
907 long len;
908 char *p;
909
910 if (!sz)
911 return 0;
912
f46fab0e
JO
913 /*
914 * The path pointer is verified as trusted and safe to use,
915 * but let's double check it's valid anyway to workaround
916 * potentially broken verifier.
917 */
918 len = copy_from_kernel_nofault(&copy, path, sizeof(*path));
919 if (len < 0)
920 return len;
921
922 p = d_path(&copy, buf, sz);
6e22ab9d
JO
923 if (IS_ERR(p)) {
924 len = PTR_ERR(p);
925 } else {
926 len = buf + sz - p;
927 memmove(buf, p, len);
928 }
929
930 return len;
931}
932
933BTF_SET_START(btf_allowlist_d_path)
a8a71796
JO
934#ifdef CONFIG_SECURITY
935BTF_ID(func, security_file_permission)
936BTF_ID(func, security_inode_getattr)
937BTF_ID(func, security_file_open)
938#endif
939#ifdef CONFIG_SECURITY_PATH
940BTF_ID(func, security_path_truncate)
941#endif
6e22ab9d
JO
942BTF_ID(func, vfs_truncate)
943BTF_ID(func, vfs_fallocate)
944BTF_ID(func, dentry_open)
945BTF_ID(func, vfs_getattr)
946BTF_ID(func, filp_close)
947BTF_SET_END(btf_allowlist_d_path)
948
949static bool bpf_d_path_allowed(const struct bpf_prog *prog)
950{
3d06f34a
SL
951 if (prog->type == BPF_PROG_TYPE_TRACING &&
952 prog->expected_attach_type == BPF_TRACE_ITER)
953 return true;
954
6f100640
KS
955 if (prog->type == BPF_PROG_TYPE_LSM)
956 return bpf_lsm_is_sleepable_hook(prog->aux->attach_btf_id);
957
958 return btf_id_set_contains(&btf_allowlist_d_path,
959 prog->aux->attach_btf_id);
6e22ab9d
JO
960}
961
9436ef6e 962BTF_ID_LIST_SINGLE(bpf_d_path_btf_ids, struct, path)
6e22ab9d
JO
963
964static const struct bpf_func_proto bpf_d_path_proto = {
965 .func = bpf_d_path,
966 .gpl_only = false,
967 .ret_type = RET_INTEGER,
968 .arg1_type = ARG_PTR_TO_BTF_ID,
9436ef6e 969 .arg1_btf_id = &bpf_d_path_btf_ids[0],
6e22ab9d
JO
970 .arg2_type = ARG_PTR_TO_MEM,
971 .arg3_type = ARG_CONST_SIZE_OR_ZERO,
6e22ab9d
JO
972 .allowed = bpf_d_path_allowed,
973};
974
c4d0bfb4
AM
975#define BTF_F_ALL (BTF_F_COMPACT | BTF_F_NONAME | \
976 BTF_F_PTR_RAW | BTF_F_ZERO)
977
978static int bpf_btf_printf_prepare(struct btf_ptr *ptr, u32 btf_ptr_size,
979 u64 flags, const struct btf **btf,
980 s32 *btf_id)
981{
982 const struct btf_type *t;
983
984 if (unlikely(flags & ~(BTF_F_ALL)))
985 return -EINVAL;
986
987 if (btf_ptr_size != sizeof(struct btf_ptr))
988 return -EINVAL;
989
990 *btf = bpf_get_btf_vmlinux();
991
992 if (IS_ERR_OR_NULL(*btf))
abbaa433 993 return IS_ERR(*btf) ? PTR_ERR(*btf) : -EINVAL;
c4d0bfb4
AM
994
995 if (ptr->type_id > 0)
996 *btf_id = ptr->type_id;
997 else
998 return -EINVAL;
999
1000 if (*btf_id > 0)
1001 t = btf_type_by_id(*btf, *btf_id);
1002 if (*btf_id <= 0 || !t)
1003 return -ENOENT;
1004
1005 return 0;
1006}
1007
1008BPF_CALL_5(bpf_snprintf_btf, char *, str, u32, str_size, struct btf_ptr *, ptr,
1009 u32, btf_ptr_size, u64, flags)
1010{
1011 const struct btf *btf;
1012 s32 btf_id;
1013 int ret;
1014
1015 ret = bpf_btf_printf_prepare(ptr, btf_ptr_size, flags, &btf, &btf_id);
1016 if (ret)
1017 return ret;
1018
1019 return btf_type_snprintf_show(btf, btf_id, ptr->ptr, str, str_size,
1020 flags);
1021}
1022
1023const struct bpf_func_proto bpf_snprintf_btf_proto = {
1024 .func = bpf_snprintf_btf,
1025 .gpl_only = false,
1026 .ret_type = RET_INTEGER,
1027 .arg1_type = ARG_PTR_TO_MEM,
1028 .arg2_type = ARG_CONST_SIZE,
216e3cd2 1029 .arg3_type = ARG_PTR_TO_MEM | MEM_RDONLY,
c4d0bfb4
AM
1030 .arg4_type = ARG_CONST_SIZE,
1031 .arg5_type = ARG_ANYTHING,
1032};
1033
9b99edca
JO
1034BPF_CALL_1(bpf_get_func_ip_tracing, void *, ctx)
1035{
1036 /* This helper call is inlined by verifier. */
f92c1e18 1037 return ((u64 *)ctx)[-2];
9b99edca
JO
1038}
1039
1040static const struct bpf_func_proto bpf_get_func_ip_proto_tracing = {
1041 .func = bpf_get_func_ip_tracing,
1042 .gpl_only = true,
1043 .ret_type = RET_INTEGER,
1044 .arg1_type = ARG_PTR_TO_CTX,
1045};
1046
72e213a7 1047static inline unsigned long get_entry_ip(unsigned long fentry_ip)
c09eb2e5 1048{
72e213a7
PZ
1049#ifdef CONFIG_X86_KERNEL_IBT
1050 if (is_endbr((void *)(fentry_ip - ENDBR_INSN_SIZE)))
c09eb2e5 1051 fentry_ip -= ENDBR_INSN_SIZE;
72e213a7 1052#endif
c09eb2e5
JO
1053 return fentry_ip;
1054}
c09eb2e5 1055
9ffd9f3f
JO
1056BPF_CALL_1(bpf_get_func_ip_kprobe, struct pt_regs *, regs)
1057{
a3c485a5
JO
1058 struct bpf_trace_run_ctx *run_ctx __maybe_unused;
1059 struct kprobe *kp;
1060
1061#ifdef CONFIG_UPROBES
1062 run_ctx = container_of(current->bpf_ctx, struct bpf_trace_run_ctx, run_ctx);
1063 if (run_ctx->is_uprobe)
1064 return ((struct uprobe_dispatch_data *)current->utask->vaddr)->bp_addr;
1065#endif
1066
1067 kp = kprobe_running();
9ffd9f3f 1068
0e253f7e
JO
1069 if (!kp || !(kp->flags & KPROBE_FLAG_ON_FUNC_ENTRY))
1070 return 0;
1071
1072 return get_entry_ip((uintptr_t)kp->addr);
9ffd9f3f
JO
1073}
1074
1075static const struct bpf_func_proto bpf_get_func_ip_proto_kprobe = {
1076 .func = bpf_get_func_ip_kprobe,
1077 .gpl_only = true,
1078 .ret_type = RET_INTEGER,
1079 .arg1_type = ARG_PTR_TO_CTX,
1080};
1081
42a57120
JO
1082BPF_CALL_1(bpf_get_func_ip_kprobe_multi, struct pt_regs *, regs)
1083{
f7098690 1084 return bpf_kprobe_multi_entry_ip(current->bpf_ctx);
42a57120
JO
1085}
1086
1087static const struct bpf_func_proto bpf_get_func_ip_proto_kprobe_multi = {
1088 .func = bpf_get_func_ip_kprobe_multi,
1089 .gpl_only = false,
1090 .ret_type = RET_INTEGER,
1091 .arg1_type = ARG_PTR_TO_CTX,
1092};
1093
ca74823c
JO
1094BPF_CALL_1(bpf_get_attach_cookie_kprobe_multi, struct pt_regs *, regs)
1095{
f7098690 1096 return bpf_kprobe_multi_cookie(current->bpf_ctx);
ca74823c
JO
1097}
1098
1099static const struct bpf_func_proto bpf_get_attach_cookie_proto_kmulti = {
1100 .func = bpf_get_attach_cookie_kprobe_multi,
1101 .gpl_only = false,
1102 .ret_type = RET_INTEGER,
1103 .arg1_type = ARG_PTR_TO_CTX,
1104};
1105
686328d8
JO
1106BPF_CALL_1(bpf_get_func_ip_uprobe_multi, struct pt_regs *, regs)
1107{
1108 return bpf_uprobe_multi_entry_ip(current->bpf_ctx);
1109}
1110
1111static const struct bpf_func_proto bpf_get_func_ip_proto_uprobe_multi = {
1112 .func = bpf_get_func_ip_uprobe_multi,
1113 .gpl_only = false,
1114 .ret_type = RET_INTEGER,
1115 .arg1_type = ARG_PTR_TO_CTX,
1116};
1117
0b779b61
JO
1118BPF_CALL_1(bpf_get_attach_cookie_uprobe_multi, struct pt_regs *, regs)
1119{
1120 return bpf_uprobe_multi_cookie(current->bpf_ctx);
1121}
1122
1123static const struct bpf_func_proto bpf_get_attach_cookie_proto_umulti = {
1124 .func = bpf_get_attach_cookie_uprobe_multi,
1125 .gpl_only = false,
1126 .ret_type = RET_INTEGER,
1127 .arg1_type = ARG_PTR_TO_CTX,
1128};
1129
7adfc6c9
AN
1130BPF_CALL_1(bpf_get_attach_cookie_trace, void *, ctx)
1131{
1132 struct bpf_trace_run_ctx *run_ctx;
1133
1134 run_ctx = container_of(current->bpf_ctx, struct bpf_trace_run_ctx, run_ctx);
1135 return run_ctx->bpf_cookie;
1136}
1137
1138static const struct bpf_func_proto bpf_get_attach_cookie_proto_trace = {
1139 .func = bpf_get_attach_cookie_trace,
1140 .gpl_only = false,
1141 .ret_type = RET_INTEGER,
1142 .arg1_type = ARG_PTR_TO_CTX,
1143};
1144
1145BPF_CALL_1(bpf_get_attach_cookie_pe, struct bpf_perf_event_data_kern *, ctx)
1146{
1147 return ctx->event->bpf_cookie;
1148}
1149
1150static const struct bpf_func_proto bpf_get_attach_cookie_proto_pe = {
1151 .func = bpf_get_attach_cookie_pe,
1152 .gpl_only = false,
1153 .ret_type = RET_INTEGER,
1154 .arg1_type = ARG_PTR_TO_CTX,
1155};
1156
2fcc8241
KFL
1157BPF_CALL_1(bpf_get_attach_cookie_tracing, void *, ctx)
1158{
1159 struct bpf_trace_run_ctx *run_ctx;
1160
1161 run_ctx = container_of(current->bpf_ctx, struct bpf_trace_run_ctx, run_ctx);
1162 return run_ctx->bpf_cookie;
1163}
1164
1165static const struct bpf_func_proto bpf_get_attach_cookie_proto_tracing = {
1166 .func = bpf_get_attach_cookie_tracing,
1167 .gpl_only = false,
1168 .ret_type = RET_INTEGER,
1169 .arg1_type = ARG_PTR_TO_CTX,
1170};
1171
856c02db
SL
1172BPF_CALL_3(bpf_get_branch_snapshot, void *, buf, u32, size, u64, flags)
1173{
856c02db
SL
1174 static const u32 br_entry_size = sizeof(struct perf_branch_entry);
1175 u32 entry_cnt = size / br_entry_size;
1176
1177 entry_cnt = static_call(perf_snapshot_branch_stack)(buf, entry_cnt);
1178
1179 if (unlikely(flags))
1180 return -EINVAL;
1181
1182 if (!entry_cnt)
1183 return -ENOENT;
1184
1185 return entry_cnt * br_entry_size;
856c02db
SL
1186}
1187
ee971630 1188const struct bpf_func_proto bpf_get_branch_snapshot_proto = {
856c02db
SL
1189 .func = bpf_get_branch_snapshot,
1190 .gpl_only = true,
1191 .ret_type = RET_INTEGER,
1192 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
1193 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
1194};
1195
f92c1e18
JO
1196BPF_CALL_3(get_func_arg, void *, ctx, u32, n, u64 *, value)
1197{
1198 /* This helper call is inlined by verifier. */
1199 u64 nr_args = ((u64 *)ctx)[-1];
1200
1201 if ((u64) n >= nr_args)
1202 return -EINVAL;
1203 *value = ((u64 *)ctx)[n];
1204 return 0;
1205}
1206
1207static const struct bpf_func_proto bpf_get_func_arg_proto = {
1208 .func = get_func_arg,
1209 .ret_type = RET_INTEGER,
1210 .arg1_type = ARG_PTR_TO_CTX,
1211 .arg2_type = ARG_ANYTHING,
6fad274f 1212 .arg3_type = ARG_PTR_TO_FIXED_SIZE_MEM | MEM_UNINIT | MEM_WRITE | MEM_ALIGNED,
32556ce9 1213 .arg3_size = sizeof(u64),
f92c1e18
JO
1214};
1215
1216BPF_CALL_2(get_func_ret, void *, ctx, u64 *, value)
1217{
1218 /* This helper call is inlined by verifier. */
1219 u64 nr_args = ((u64 *)ctx)[-1];
1220
1221 *value = ((u64 *)ctx)[nr_args];
1222 return 0;
1223}
1224
1225static const struct bpf_func_proto bpf_get_func_ret_proto = {
1226 .func = get_func_ret,
1227 .ret_type = RET_INTEGER,
1228 .arg1_type = ARG_PTR_TO_CTX,
6fad274f 1229 .arg2_type = ARG_PTR_TO_FIXED_SIZE_MEM | MEM_UNINIT | MEM_WRITE | MEM_ALIGNED,
32556ce9 1230 .arg2_size = sizeof(u64),
f92c1e18
JO
1231};
1232
1233BPF_CALL_1(get_func_arg_cnt, void *, ctx)
1234{
1235 /* This helper call is inlined by verifier. */
1236 return ((u64 *)ctx)[-1];
1237}
1238
1239static const struct bpf_func_proto bpf_get_func_arg_cnt_proto = {
1240 .func = get_func_arg_cnt,
1241 .ret_type = RET_INTEGER,
1242 .arg1_type = ARG_PTR_TO_CTX,
1243};
1244
f3cf4134 1245#ifdef CONFIG_KEYS
391145ba 1246__bpf_kfunc_start_defs();
f3cf4134
RS
1247
1248/**
1249 * bpf_lookup_user_key - lookup a key by its serial
1250 * @serial: key handle serial number
1251 * @flags: lookup-specific flags
1252 *
1253 * Search a key with a given *serial* and the provided *flags*.
1254 * If found, increment the reference count of the key by one, and
1255 * return it in the bpf_key structure.
1256 *
1257 * The bpf_key structure must be passed to bpf_key_put() when done
1258 * with it, so that the key reference count is decremented and the
1259 * bpf_key structure is freed.
1260 *
1261 * Permission checks are deferred to the time the key is used by
1262 * one of the available key-specific kfuncs.
1263 *
1264 * Set *flags* with KEY_LOOKUP_CREATE, to attempt creating a requested
1265 * special keyring (e.g. session keyring), if it doesn't yet exist.
1266 * Set *flags* with KEY_LOOKUP_PARTIAL, to lookup a key without waiting
1267 * for the key construction, and to retrieve uninstantiated keys (keys
1268 * without data attached to them).
1269 *
1270 * Return: a bpf_key pointer with a valid key pointer if the key is found, a
1271 * NULL pointer otherwise.
1272 */
400031e0 1273__bpf_kfunc struct bpf_key *bpf_lookup_user_key(u32 serial, u64 flags)
f3cf4134
RS
1274{
1275 key_ref_t key_ref;
1276 struct bpf_key *bkey;
1277
1278 if (flags & ~KEY_LOOKUP_ALL)
1279 return NULL;
1280
1281 /*
1282 * Permission check is deferred until the key is used, as the
1283 * intent of the caller is unknown here.
1284 */
1285 key_ref = lookup_user_key(serial, flags, KEY_DEFER_PERM_CHECK);
1286 if (IS_ERR(key_ref))
1287 return NULL;
1288
1289 bkey = kmalloc(sizeof(*bkey), GFP_KERNEL);
1290 if (!bkey) {
1291 key_put(key_ref_to_ptr(key_ref));
1292 return NULL;
1293 }
1294
1295 bkey->key = key_ref_to_ptr(key_ref);
1296 bkey->has_ref = true;
1297
1298 return bkey;
1299}
1300
1301/**
1302 * bpf_lookup_system_key - lookup a key by a system-defined ID
1303 * @id: key ID
1304 *
1305 * Obtain a bpf_key structure with a key pointer set to the passed key ID.
1306 * The key pointer is marked as invalid, to prevent bpf_key_put() from
1307 * attempting to decrement the key reference count on that pointer. The key
1308 * pointer set in such way is currently understood only by
1309 * verify_pkcs7_signature().
1310 *
1311 * Set *id* to one of the values defined in include/linux/verification.h:
1312 * 0 for the primary keyring (immutable keyring of system keys);
1313 * VERIFY_USE_SECONDARY_KEYRING for both the primary and secondary keyring
1314 * (where keys can be added only if they are vouched for by existing keys
1315 * in those keyrings); VERIFY_USE_PLATFORM_KEYRING for the platform
1316 * keyring (primarily used by the integrity subsystem to verify a kexec'ed
1317 * kerned image and, possibly, the initramfs signature).
1318 *
1319 * Return: a bpf_key pointer with an invalid key pointer set from the
1320 * pre-determined ID on success, a NULL pointer otherwise
1321 */
400031e0 1322__bpf_kfunc struct bpf_key *bpf_lookup_system_key(u64 id)
f3cf4134
RS
1323{
1324 struct bpf_key *bkey;
1325
1326 if (system_keyring_id_check(id) < 0)
1327 return NULL;
1328
1329 bkey = kmalloc(sizeof(*bkey), GFP_ATOMIC);
1330 if (!bkey)
1331 return NULL;
1332
1333 bkey->key = (struct key *)(unsigned long)id;
1334 bkey->has_ref = false;
1335
1336 return bkey;
1337}
1338
1339/**
1340 * bpf_key_put - decrement key reference count if key is valid and free bpf_key
1341 * @bkey: bpf_key structure
1342 *
1343 * Decrement the reference count of the key inside *bkey*, if the pointer
1344 * is valid, and free *bkey*.
1345 */
400031e0 1346__bpf_kfunc void bpf_key_put(struct bpf_key *bkey)
f3cf4134
RS
1347{
1348 if (bkey->has_ref)
1349 key_put(bkey->key);
1350
1351 kfree(bkey);
1352}
1353
865b0566
RS
1354#ifdef CONFIG_SYSTEM_DATA_VERIFICATION
1355/**
1356 * bpf_verify_pkcs7_signature - verify a PKCS#7 signature
cce4c40b
DX
1357 * @data_p: data to verify
1358 * @sig_p: signature of the data
865b0566
RS
1359 * @trusted_keyring: keyring with keys trusted for signature verification
1360 *
1361 * Verify the PKCS#7 signature *sig_ptr* against the supplied *data_ptr*
1362 * with keys in a keyring referenced by *trusted_keyring*.
1363 *
1364 * Return: 0 on success, a negative value on error.
1365 */
cce4c40b
DX
1366__bpf_kfunc int bpf_verify_pkcs7_signature(struct bpf_dynptr *data_p,
1367 struct bpf_dynptr *sig_p,
865b0566
RS
1368 struct bpf_key *trusted_keyring)
1369{
cce4c40b
DX
1370 struct bpf_dynptr_kern *data_ptr = (struct bpf_dynptr_kern *)data_p;
1371 struct bpf_dynptr_kern *sig_ptr = (struct bpf_dynptr_kern *)sig_p;
74523c06
SL
1372 const void *data, *sig;
1373 u32 data_len, sig_len;
865b0566
RS
1374 int ret;
1375
1376 if (trusted_keyring->has_ref) {
1377 /*
1378 * Do the permission check deferred in bpf_lookup_user_key().
1379 * See bpf_lookup_user_key() for more details.
1380 *
1381 * A call to key_task_permission() here would be redundant, as
1382 * it is already done by keyring_search() called by
1383 * find_asymmetric_key().
1384 */
1385 ret = key_validate(trusted_keyring->key);
1386 if (ret < 0)
1387 return ret;
1388 }
1389
74523c06
SL
1390 data_len = __bpf_dynptr_size(data_ptr);
1391 data = __bpf_dynptr_data(data_ptr, data_len);
1392 sig_len = __bpf_dynptr_size(sig_ptr);
1393 sig = __bpf_dynptr_data(sig_ptr, sig_len);
1394
1395 return verify_pkcs7_signature(data, data_len, sig, sig_len,
865b0566
RS
1396 trusted_keyring->key,
1397 VERIFYING_UNSPECIFIED_SIGNATURE, NULL,
1398 NULL);
1399}
1400#endif /* CONFIG_SYSTEM_DATA_VERIFICATION */
1401
391145ba 1402__bpf_kfunc_end_defs();
f3cf4134 1403
6f3189f3 1404BTF_KFUNCS_START(key_sig_kfunc_set)
f3cf4134
RS
1405BTF_ID_FLAGS(func, bpf_lookup_user_key, KF_ACQUIRE | KF_RET_NULL | KF_SLEEPABLE)
1406BTF_ID_FLAGS(func, bpf_lookup_system_key, KF_ACQUIRE | KF_RET_NULL)
1407BTF_ID_FLAGS(func, bpf_key_put, KF_RELEASE)
865b0566
RS
1408#ifdef CONFIG_SYSTEM_DATA_VERIFICATION
1409BTF_ID_FLAGS(func, bpf_verify_pkcs7_signature, KF_SLEEPABLE)
1410#endif
6f3189f3 1411BTF_KFUNCS_END(key_sig_kfunc_set)
f3cf4134
RS
1412
1413static const struct btf_kfunc_id_set bpf_key_sig_kfunc_set = {
1414 .owner = THIS_MODULE,
1415 .set = &key_sig_kfunc_set,
1416};
1417
1418static int __init bpf_key_sig_kfuncs_init(void)
1419{
1420 return register_btf_kfunc_id_set(BPF_PROG_TYPE_TRACING,
1421 &bpf_key_sig_kfunc_set);
1422}
1423
1424late_initcall(bpf_key_sig_kfuncs_init);
1425#endif /* CONFIG_KEYS */
1426
7adfc6c9 1427static const struct bpf_func_proto *
fc611f47 1428bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
2541517c 1429{
3bfb49d7
ME
1430 const struct bpf_func_proto *func_proto;
1431
2541517c 1432 switch (func_id) {
ab1973d3
AS
1433 case BPF_FUNC_get_smp_processor_id:
1434 return &bpf_get_smp_processor_id_proto;
0ebeea8c
DB
1435#ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
1436 case BPF_FUNC_probe_read:
71330842 1437 return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ?
ff40e510 1438 NULL : &bpf_probe_read_compat_proto;
a5e8c070 1439 case BPF_FUNC_probe_read_str:
71330842 1440 return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ?
ff40e510 1441 NULL : &bpf_probe_read_compat_str_proto;
0ebeea8c 1442#endif
9b99edca
JO
1443 case BPF_FUNC_get_func_ip:
1444 return &bpf_get_func_ip_proto_tracing;
9fd82b61 1445 default:
3bfb49d7
ME
1446 break;
1447 }
1448
1449 func_proto = bpf_base_func_proto(func_id, prog);
1450 if (func_proto)
1451 return func_proto;
1452
1453 if (!bpf_token_capable(prog->aux->token, CAP_SYS_ADMIN))
1454 return NULL;
1455
1456 switch (func_id) {
1457 case BPF_FUNC_probe_write_user:
1458 return security_locked_down(LOCKDOWN_BPF_WRITE_USER) < 0 ?
1459 NULL : &bpf_probe_write_user_proto;
1460 default:
1461 return NULL;
9fd82b61
AS
1462 }
1463}
1464
535a3692
JO
1465static bool is_kprobe_multi(const struct bpf_prog *prog)
1466{
1467 return prog->expected_attach_type == BPF_TRACE_KPROBE_MULTI ||
1468 prog->expected_attach_type == BPF_TRACE_KPROBE_SESSION;
1469}
1470
1471static inline bool is_kprobe_session(const struct bpf_prog *prog)
1472{
1473 return prog->expected_attach_type == BPF_TRACE_KPROBE_SESSION;
1474}
1475
d920179b
JO
1476static inline bool is_uprobe_multi(const struct bpf_prog *prog)
1477{
1478 return prog->expected_attach_type == BPF_TRACE_UPROBE_MULTI ||
1479 prog->expected_attach_type == BPF_TRACE_UPROBE_SESSION;
1480}
1481
1482static inline bool is_uprobe_session(const struct bpf_prog *prog)
1483{
1484 return prog->expected_attach_type == BPF_TRACE_UPROBE_SESSION;
1485}
1486
5e43f899
AI
1487static const struct bpf_func_proto *
1488kprobe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
9fd82b61
AS
1489{
1490 switch (func_id) {
a43eec30
AS
1491 case BPF_FUNC_perf_event_output:
1492 return &bpf_perf_event_output_proto;
d5a3b1f6
AS
1493 case BPF_FUNC_get_stackid:
1494 return &bpf_get_stackid_proto;
c195651e 1495 case BPF_FUNC_get_stack:
d4dd9775 1496 return prog->sleepable ? &bpf_get_stack_sleepable_proto : &bpf_get_stack_proto;
9802d865
JB
1497#ifdef CONFIG_BPF_KPROBE_OVERRIDE
1498 case BPF_FUNC_override_return:
1499 return &bpf_override_return_proto;
1500#endif
9ffd9f3f 1501 case BPF_FUNC_get_func_ip:
535a3692 1502 if (is_kprobe_multi(prog))
686328d8 1503 return &bpf_get_func_ip_proto_kprobe_multi;
d920179b 1504 if (is_uprobe_multi(prog))
686328d8
JO
1505 return &bpf_get_func_ip_proto_uprobe_multi;
1506 return &bpf_get_func_ip_proto_kprobe;
7adfc6c9 1507 case BPF_FUNC_get_attach_cookie:
535a3692 1508 if (is_kprobe_multi(prog))
0b779b61 1509 return &bpf_get_attach_cookie_proto_kmulti;
d920179b 1510 if (is_uprobe_multi(prog))
0b779b61
JO
1511 return &bpf_get_attach_cookie_proto_umulti;
1512 return &bpf_get_attach_cookie_proto_trace;
2541517c 1513 default:
fc611f47 1514 return bpf_tracing_func_proto(func_id, prog);
2541517c
AS
1515 }
1516}
1517
1518/* bpf+kprobe programs can access fields of 'struct pt_regs' */
19de99f7 1519static bool kprobe_prog_is_valid_access(int off, int size, enum bpf_access_type type,
5e43f899 1520 const struct bpf_prog *prog,
23994631 1521 struct bpf_insn_access_aux *info)
2541517c 1522{
2541517c
AS
1523 if (off < 0 || off >= sizeof(struct pt_regs))
1524 return false;
2541517c
AS
1525 if (type != BPF_READ)
1526 return false;
2541517c
AS
1527 if (off % size != 0)
1528 return false;
2d071c64
DB
1529 /*
1530 * Assertion for 32 bit to make sure last 8 byte access
1531 * (BPF_DW) to the last 4 byte member is disallowed.
1532 */
1533 if (off + size > sizeof(struct pt_regs))
1534 return false;
1535
2541517c
AS
1536 return true;
1537}
1538
7de16e3a 1539const struct bpf_verifier_ops kprobe_verifier_ops = {
2541517c
AS
1540 .get_func_proto = kprobe_prog_func_proto,
1541 .is_valid_access = kprobe_prog_is_valid_access,
1542};
1543
7de16e3a
JK
1544const struct bpf_prog_ops kprobe_prog_ops = {
1545};
1546
f3694e00
DB
1547BPF_CALL_5(bpf_perf_event_output_tp, void *, tp_buff, struct bpf_map *, map,
1548 u64, flags, void *, data, u64, size)
9940d67c 1549{
f3694e00
DB
1550 struct pt_regs *regs = *(struct pt_regs **)tp_buff;
1551
9940d67c
AS
1552 /*
1553 * r1 points to perf tracepoint buffer where first 8 bytes are hidden
1554 * from bpf program and contain a pointer to 'struct pt_regs'. Fetch it
f3694e00 1555 * from there and call the same bpf_perf_event_output() helper inline.
9940d67c 1556 */
f3694e00 1557 return ____bpf_perf_event_output(regs, map, flags, data, size);
9940d67c
AS
1558}
1559
1560static const struct bpf_func_proto bpf_perf_event_output_proto_tp = {
1561 .func = bpf_perf_event_output_tp,
1562 .gpl_only = true,
1563 .ret_type = RET_INTEGER,
1564 .arg1_type = ARG_PTR_TO_CTX,
1565 .arg2_type = ARG_CONST_MAP_PTR,
1566 .arg3_type = ARG_ANYTHING,
216e3cd2 1567 .arg4_type = ARG_PTR_TO_MEM | MEM_RDONLY,
a60dd35d 1568 .arg5_type = ARG_CONST_SIZE_OR_ZERO,
9940d67c
AS
1569};
1570
f3694e00
DB
1571BPF_CALL_3(bpf_get_stackid_tp, void *, tp_buff, struct bpf_map *, map,
1572 u64, flags)
9940d67c 1573{
f3694e00 1574 struct pt_regs *regs = *(struct pt_regs **)tp_buff;
9940d67c 1575
f3694e00
DB
1576 /*
1577 * Same comment as in bpf_perf_event_output_tp(), only that this time
1578 * the other helper's function body cannot be inlined due to being
1579 * external, thus we need to call raw helper function.
1580 */
1581 return bpf_get_stackid((unsigned long) regs, (unsigned long) map,
1582 flags, 0, 0);
9940d67c
AS
1583}
1584
1585static const struct bpf_func_proto bpf_get_stackid_proto_tp = {
1586 .func = bpf_get_stackid_tp,
1587 .gpl_only = true,
1588 .ret_type = RET_INTEGER,
1589 .arg1_type = ARG_PTR_TO_CTX,
1590 .arg2_type = ARG_CONST_MAP_PTR,
1591 .arg3_type = ARG_ANYTHING,
1592};
1593
c195651e
YS
1594BPF_CALL_4(bpf_get_stack_tp, void *, tp_buff, void *, buf, u32, size,
1595 u64, flags)
1596{
1597 struct pt_regs *regs = *(struct pt_regs **)tp_buff;
1598
1599 return bpf_get_stack((unsigned long) regs, (unsigned long) buf,
1600 (unsigned long) size, flags, 0);
1601}
1602
1603static const struct bpf_func_proto bpf_get_stack_proto_tp = {
1604 .func = bpf_get_stack_tp,
1605 .gpl_only = true,
1606 .ret_type = RET_INTEGER,
1607 .arg1_type = ARG_PTR_TO_CTX,
1608 .arg2_type = ARG_PTR_TO_UNINIT_MEM,
1609 .arg3_type = ARG_CONST_SIZE_OR_ZERO,
1610 .arg4_type = ARG_ANYTHING,
1611};
1612
5e43f899
AI
1613static const struct bpf_func_proto *
1614tp_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
f005afed
YS
1615{
1616 switch (func_id) {
1617 case BPF_FUNC_perf_event_output:
1618 return &bpf_perf_event_output_proto_tp;
1619 case BPF_FUNC_get_stackid:
1620 return &bpf_get_stackid_proto_tp;
c195651e
YS
1621 case BPF_FUNC_get_stack:
1622 return &bpf_get_stack_proto_tp;
7adfc6c9
AN
1623 case BPF_FUNC_get_attach_cookie:
1624 return &bpf_get_attach_cookie_proto_trace;
f005afed 1625 default:
fc611f47 1626 return bpf_tracing_func_proto(func_id, prog);
f005afed
YS
1627 }
1628}
1629
1630static bool tp_prog_is_valid_access(int off, int size, enum bpf_access_type type,
5e43f899 1631 const struct bpf_prog *prog,
f005afed
YS
1632 struct bpf_insn_access_aux *info)
1633{
1634 if (off < sizeof(void *) || off >= PERF_MAX_TRACE_SIZE)
1635 return false;
1636 if (type != BPF_READ)
1637 return false;
1638 if (off % size != 0)
1639 return false;
1640
1641 BUILD_BUG_ON(PERF_MAX_TRACE_SIZE % sizeof(__u64));
1642 return true;
1643}
1644
1645const struct bpf_verifier_ops tracepoint_verifier_ops = {
1646 .get_func_proto = tp_prog_func_proto,
1647 .is_valid_access = tp_prog_is_valid_access,
1648};
1649
1650const struct bpf_prog_ops tracepoint_prog_ops = {
1651};
1652
1653BPF_CALL_3(bpf_perf_prog_read_value, struct bpf_perf_event_data_kern *, ctx,
4bebdc7a
YS
1654 struct bpf_perf_event_value *, buf, u32, size)
1655{
1656 int err = -EINVAL;
1657
1658 if (unlikely(size != sizeof(struct bpf_perf_event_value)))
1659 goto clear;
1660 err = perf_event_read_local(ctx->event, &buf->counter, &buf->enabled,
1661 &buf->running);
1662 if (unlikely(err))
1663 goto clear;
1664 return 0;
1665clear:
1666 memset(buf, 0, size);
1667 return err;
1668}
1669
f005afed
YS
1670static const struct bpf_func_proto bpf_perf_prog_read_value_proto = {
1671 .func = bpf_perf_prog_read_value,
4bebdc7a
YS
1672 .gpl_only = true,
1673 .ret_type = RET_INTEGER,
1674 .arg1_type = ARG_PTR_TO_CTX,
1675 .arg2_type = ARG_PTR_TO_UNINIT_MEM,
1676 .arg3_type = ARG_CONST_SIZE,
1677};
1678
fff7b643
DX
1679BPF_CALL_4(bpf_read_branch_records, struct bpf_perf_event_data_kern *, ctx,
1680 void *, buf, u32, size, u64, flags)
1681{
fff7b643
DX
1682 static const u32 br_entry_size = sizeof(struct perf_branch_entry);
1683 struct perf_branch_stack *br_stack = ctx->data->br_stack;
1684 u32 to_copy;
1685
1686 if (unlikely(flags & ~BPF_F_GET_BRANCH_RECORDS_SIZE))
1687 return -EINVAL;
1688
cce6a2d7
JO
1689 if (unlikely(!(ctx->data->sample_flags & PERF_SAMPLE_BRANCH_STACK)))
1690 return -ENOENT;
1691
fff7b643 1692 if (unlikely(!br_stack))
db52f572 1693 return -ENOENT;
fff7b643
DX
1694
1695 if (flags & BPF_F_GET_BRANCH_RECORDS_SIZE)
1696 return br_stack->nr * br_entry_size;
1697
1698 if (!buf || (size % br_entry_size != 0))
1699 return -EINVAL;
1700
1701 to_copy = min_t(u32, br_stack->nr * br_entry_size, size);
1702 memcpy(buf, br_stack->entries, to_copy);
1703
1704 return to_copy;
fff7b643
DX
1705}
1706
1707static const struct bpf_func_proto bpf_read_branch_records_proto = {
1708 .func = bpf_read_branch_records,
1709 .gpl_only = true,
1710 .ret_type = RET_INTEGER,
1711 .arg1_type = ARG_PTR_TO_CTX,
1712 .arg2_type = ARG_PTR_TO_MEM_OR_NULL,
1713 .arg3_type = ARG_CONST_SIZE_OR_ZERO,
1714 .arg4_type = ARG_ANYTHING,
1715};
1716
5e43f899
AI
1717static const struct bpf_func_proto *
1718pe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
9fd82b61
AS
1719{
1720 switch (func_id) {
1721 case BPF_FUNC_perf_event_output:
9940d67c 1722 return &bpf_perf_event_output_proto_tp;
9fd82b61 1723 case BPF_FUNC_get_stackid:
7b04d6d6 1724 return &bpf_get_stackid_proto_pe;
c195651e 1725 case BPF_FUNC_get_stack:
7b04d6d6 1726 return &bpf_get_stack_proto_pe;
4bebdc7a 1727 case BPF_FUNC_perf_prog_read_value:
f005afed 1728 return &bpf_perf_prog_read_value_proto;
fff7b643
DX
1729 case BPF_FUNC_read_branch_records:
1730 return &bpf_read_branch_records_proto;
7adfc6c9
AN
1731 case BPF_FUNC_get_attach_cookie:
1732 return &bpf_get_attach_cookie_proto_pe;
9fd82b61 1733 default:
fc611f47 1734 return bpf_tracing_func_proto(func_id, prog);
9fd82b61
AS
1735 }
1736}
1737
c4f6699d
AS
1738/*
1739 * bpf_raw_tp_regs are separate from bpf_pt_regs used from skb/xdp
1740 * to avoid potential recursive reuse issue when/if tracepoints are added
9594dc3c
MM
1741 * inside bpf_*_event_output, bpf_get_stackid and/or bpf_get_stack.
1742 *
1743 * Since raw tracepoints run despite bpf_prog_active, support concurrent usage
1744 * in normal, irq, and nmi context.
c4f6699d 1745 */
9594dc3c
MM
1746struct bpf_raw_tp_regs {
1747 struct pt_regs regs[3];
1748};
1749static DEFINE_PER_CPU(struct bpf_raw_tp_regs, bpf_raw_tp_regs);
1750static DEFINE_PER_CPU(int, bpf_raw_tp_nest_level);
1751static struct pt_regs *get_bpf_raw_tp_regs(void)
1752{
1753 struct bpf_raw_tp_regs *tp_regs = this_cpu_ptr(&bpf_raw_tp_regs);
1754 int nest_level = this_cpu_inc_return(bpf_raw_tp_nest_level);
1755
3880cdbe 1756 if (nest_level > ARRAY_SIZE(tp_regs->regs)) {
9594dc3c
MM
1757 this_cpu_dec(bpf_raw_tp_nest_level);
1758 return ERR_PTR(-EBUSY);
1759 }
1760
1761 return &tp_regs->regs[nest_level - 1];
1762}
1763
1764static void put_bpf_raw_tp_regs(void)
1765{
1766 this_cpu_dec(bpf_raw_tp_nest_level);
1767}
1768
c4f6699d
AS
1769BPF_CALL_5(bpf_perf_event_output_raw_tp, struct bpf_raw_tracepoint_args *, args,
1770 struct bpf_map *, map, u64, flags, void *, data, u64, size)
1771{
9594dc3c
MM
1772 struct pt_regs *regs = get_bpf_raw_tp_regs();
1773 int ret;
1774
1775 if (IS_ERR(regs))
1776 return PTR_ERR(regs);
c4f6699d
AS
1777
1778 perf_fetch_caller_regs(regs);
9594dc3c
MM
1779 ret = ____bpf_perf_event_output(regs, map, flags, data, size);
1780
1781 put_bpf_raw_tp_regs();
1782 return ret;
c4f6699d
AS
1783}
1784
1785static const struct bpf_func_proto bpf_perf_event_output_proto_raw_tp = {
1786 .func = bpf_perf_event_output_raw_tp,
1787 .gpl_only = true,
1788 .ret_type = RET_INTEGER,
1789 .arg1_type = ARG_PTR_TO_CTX,
1790 .arg2_type = ARG_CONST_MAP_PTR,
1791 .arg3_type = ARG_ANYTHING,
216e3cd2 1792 .arg4_type = ARG_PTR_TO_MEM | MEM_RDONLY,
c4f6699d
AS
1793 .arg5_type = ARG_CONST_SIZE_OR_ZERO,
1794};
1795
a7658e1a 1796extern const struct bpf_func_proto bpf_skb_output_proto;
d831ee84 1797extern const struct bpf_func_proto bpf_xdp_output_proto;
d9917302 1798extern const struct bpf_func_proto bpf_xdp_get_buff_len_trace_proto;
a7658e1a 1799
c4f6699d
AS
1800BPF_CALL_3(bpf_get_stackid_raw_tp, struct bpf_raw_tracepoint_args *, args,
1801 struct bpf_map *, map, u64, flags)
1802{
9594dc3c
MM
1803 struct pt_regs *regs = get_bpf_raw_tp_regs();
1804 int ret;
1805
1806 if (IS_ERR(regs))
1807 return PTR_ERR(regs);
c4f6699d
AS
1808
1809 perf_fetch_caller_regs(regs);
1810 /* similar to bpf_perf_event_output_tp, but pt_regs fetched differently */
9594dc3c
MM
1811 ret = bpf_get_stackid((unsigned long) regs, (unsigned long) map,
1812 flags, 0, 0);
1813 put_bpf_raw_tp_regs();
1814 return ret;
c4f6699d
AS
1815}
1816
1817static const struct bpf_func_proto bpf_get_stackid_proto_raw_tp = {
1818 .func = bpf_get_stackid_raw_tp,
1819 .gpl_only = true,
1820 .ret_type = RET_INTEGER,
1821 .arg1_type = ARG_PTR_TO_CTX,
1822 .arg2_type = ARG_CONST_MAP_PTR,
1823 .arg3_type = ARG_ANYTHING,
1824};
1825
c195651e
YS
1826BPF_CALL_4(bpf_get_stack_raw_tp, struct bpf_raw_tracepoint_args *, args,
1827 void *, buf, u32, size, u64, flags)
1828{
9594dc3c
MM
1829 struct pt_regs *regs = get_bpf_raw_tp_regs();
1830 int ret;
1831
1832 if (IS_ERR(regs))
1833 return PTR_ERR(regs);
c195651e
YS
1834
1835 perf_fetch_caller_regs(regs);
9594dc3c
MM
1836 ret = bpf_get_stack((unsigned long) regs, (unsigned long) buf,
1837 (unsigned long) size, flags, 0);
1838 put_bpf_raw_tp_regs();
1839 return ret;
c195651e
YS
1840}
1841
1842static const struct bpf_func_proto bpf_get_stack_proto_raw_tp = {
1843 .func = bpf_get_stack_raw_tp,
1844 .gpl_only = true,
1845 .ret_type = RET_INTEGER,
1846 .arg1_type = ARG_PTR_TO_CTX,
216e3cd2 1847 .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY,
c195651e
YS
1848 .arg3_type = ARG_CONST_SIZE_OR_ZERO,
1849 .arg4_type = ARG_ANYTHING,
1850};
1851
5e43f899
AI
1852static const struct bpf_func_proto *
1853raw_tp_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
c4f6699d
AS
1854{
1855 switch (func_id) {
1856 case BPF_FUNC_perf_event_output:
1857 return &bpf_perf_event_output_proto_raw_tp;
1858 case BPF_FUNC_get_stackid:
1859 return &bpf_get_stackid_proto_raw_tp;
c195651e
YS
1860 case BPF_FUNC_get_stack:
1861 return &bpf_get_stack_proto_raw_tp;
68ca5d4e
AN
1862 case BPF_FUNC_get_attach_cookie:
1863 return &bpf_get_attach_cookie_proto_tracing;
c4f6699d 1864 default:
fc611f47 1865 return bpf_tracing_func_proto(func_id, prog);
c4f6699d
AS
1866 }
1867}
1868
958a3f2d 1869const struct bpf_func_proto *
f1b9509c
AS
1870tracing_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1871{
3cee6fb8
MKL
1872 const struct bpf_func_proto *fn;
1873
f1b9509c
AS
1874 switch (func_id) {
1875#ifdef CONFIG_NET
1876 case BPF_FUNC_skb_output:
1877 return &bpf_skb_output_proto;
d831ee84
EC
1878 case BPF_FUNC_xdp_output:
1879 return &bpf_xdp_output_proto;
af7ec138
YS
1880 case BPF_FUNC_skc_to_tcp6_sock:
1881 return &bpf_skc_to_tcp6_sock_proto;
478cfbdf
YS
1882 case BPF_FUNC_skc_to_tcp_sock:
1883 return &bpf_skc_to_tcp_sock_proto;
1884 case BPF_FUNC_skc_to_tcp_timewait_sock:
1885 return &bpf_skc_to_tcp_timewait_sock_proto;
1886 case BPF_FUNC_skc_to_tcp_request_sock:
1887 return &bpf_skc_to_tcp_request_sock_proto;
0d4fad3e
YS
1888 case BPF_FUNC_skc_to_udp6_sock:
1889 return &bpf_skc_to_udp6_sock_proto;
9eeb3aa3
HC
1890 case BPF_FUNC_skc_to_unix_sock:
1891 return &bpf_skc_to_unix_sock_proto;
3bc253c2
GT
1892 case BPF_FUNC_skc_to_mptcp_sock:
1893 return &bpf_skc_to_mptcp_sock_proto;
8e4597c6
MKL
1894 case BPF_FUNC_sk_storage_get:
1895 return &bpf_sk_storage_get_tracing_proto;
1896 case BPF_FUNC_sk_storage_delete:
1897 return &bpf_sk_storage_delete_tracing_proto;
b60da495
FR
1898 case BPF_FUNC_sock_from_file:
1899 return &bpf_sock_from_file_proto;
c5dbb89f
FR
1900 case BPF_FUNC_get_socket_cookie:
1901 return &bpf_get_socket_ptr_cookie_proto;
d9917302
EC
1902 case BPF_FUNC_xdp_get_buff_len:
1903 return &bpf_xdp_get_buff_len_trace_proto;
f1b9509c 1904#endif
492e639f
YS
1905 case BPF_FUNC_seq_printf:
1906 return prog->expected_attach_type == BPF_TRACE_ITER ?
1907 &bpf_seq_printf_proto :
1908 NULL;
1909 case BPF_FUNC_seq_write:
1910 return prog->expected_attach_type == BPF_TRACE_ITER ?
1911 &bpf_seq_write_proto :
1912 NULL;
eb411377
AM
1913 case BPF_FUNC_seq_printf_btf:
1914 return prog->expected_attach_type == BPF_TRACE_ITER ?
1915 &bpf_seq_printf_btf_proto :
1916 NULL;
6e22ab9d
JO
1917 case BPF_FUNC_d_path:
1918 return &bpf_d_path_proto;
f92c1e18
JO
1919 case BPF_FUNC_get_func_arg:
1920 return bpf_prog_has_trampoline(prog) ? &bpf_get_func_arg_proto : NULL;
1921 case BPF_FUNC_get_func_ret:
1922 return bpf_prog_has_trampoline(prog) ? &bpf_get_func_ret_proto : NULL;
1923 case BPF_FUNC_get_func_arg_cnt:
1924 return bpf_prog_has_trampoline(prog) ? &bpf_get_func_arg_cnt_proto : NULL;
2fcc8241 1925 case BPF_FUNC_get_attach_cookie:
68ca5d4e
AN
1926 if (prog->type == BPF_PROG_TYPE_TRACING &&
1927 prog->expected_attach_type == BPF_TRACE_RAW_TP)
1928 return &bpf_get_attach_cookie_proto_tracing;
2fcc8241 1929 return bpf_prog_has_trampoline(prog) ? &bpf_get_attach_cookie_proto_tracing : NULL;
f1b9509c 1930 default:
3cee6fb8
MKL
1931 fn = raw_tp_prog_func_proto(func_id, prog);
1932 if (!fn && prog->expected_attach_type == BPF_TRACE_ITER)
1933 fn = bpf_iter_get_func_proto(func_id, prog);
1934 return fn;
f1b9509c
AS
1935 }
1936}
1937
c4f6699d
AS
1938static bool raw_tp_prog_is_valid_access(int off, int size,
1939 enum bpf_access_type type,
5e43f899 1940 const struct bpf_prog *prog,
c4f6699d
AS
1941 struct bpf_insn_access_aux *info)
1942{
35346ab6 1943 return bpf_tracing_ctx_access(off, size, type);
f1b9509c
AS
1944}
1945
1946static bool tracing_prog_is_valid_access(int off, int size,
1947 enum bpf_access_type type,
1948 const struct bpf_prog *prog,
1949 struct bpf_insn_access_aux *info)
1950{
35346ab6 1951 return bpf_tracing_btf_ctx_access(off, size, type, prog, info);
c4f6699d
AS
1952}
1953
3e7c67d9
KS
1954int __weak bpf_prog_test_run_tracing(struct bpf_prog *prog,
1955 const union bpf_attr *kattr,
1956 union bpf_attr __user *uattr)
1957{
1958 return -ENOTSUPP;
1959}
1960
c4f6699d
AS
1961const struct bpf_verifier_ops raw_tracepoint_verifier_ops = {
1962 .get_func_proto = raw_tp_prog_func_proto,
1963 .is_valid_access = raw_tp_prog_is_valid_access,
1964};
1965
1966const struct bpf_prog_ops raw_tracepoint_prog_ops = {
ebfb4d40 1967#ifdef CONFIG_NET
1b4d60ec 1968 .test_run = bpf_prog_test_run_raw_tp,
ebfb4d40 1969#endif
c4f6699d
AS
1970};
1971
f1b9509c
AS
1972const struct bpf_verifier_ops tracing_verifier_ops = {
1973 .get_func_proto = tracing_prog_func_proto,
1974 .is_valid_access = tracing_prog_is_valid_access,
1975};
1976
1977const struct bpf_prog_ops tracing_prog_ops = {
da00d2f1 1978 .test_run = bpf_prog_test_run_tracing,
f1b9509c
AS
1979};
1980
9df1c28b
MM
1981static bool raw_tp_writable_prog_is_valid_access(int off, int size,
1982 enum bpf_access_type type,
1983 const struct bpf_prog *prog,
1984 struct bpf_insn_access_aux *info)
1985{
1986 if (off == 0) {
1987 if (size != sizeof(u64) || type != BPF_READ)
1988 return false;
1989 info->reg_type = PTR_TO_TP_BUFFER;
1990 }
1991 return raw_tp_prog_is_valid_access(off, size, type, prog, info);
1992}
1993
1994const struct bpf_verifier_ops raw_tracepoint_writable_verifier_ops = {
1995 .get_func_proto = raw_tp_prog_func_proto,
1996 .is_valid_access = raw_tp_writable_prog_is_valid_access,
1997};
1998
1999const struct bpf_prog_ops raw_tracepoint_writable_prog_ops = {
2000};
2001
0515e599 2002static bool pe_prog_is_valid_access(int off, int size, enum bpf_access_type type,
5e43f899 2003 const struct bpf_prog *prog,
23994631 2004 struct bpf_insn_access_aux *info)
0515e599 2005{
95da0cdb 2006 const int size_u64 = sizeof(u64);
31fd8581 2007
0515e599
AS
2008 if (off < 0 || off >= sizeof(struct bpf_perf_event_data))
2009 return false;
2010 if (type != BPF_READ)
2011 return false;
bc23105c
DB
2012 if (off % size != 0) {
2013 if (sizeof(unsigned long) != 4)
2014 return false;
2015 if (size != 8)
2016 return false;
2017 if (off % size != 4)
2018 return false;
2019 }
31fd8581 2020
f96da094
DB
2021 switch (off) {
2022 case bpf_ctx_range(struct bpf_perf_event_data, sample_period):
95da0cdb
TQ
2023 bpf_ctx_record_field_size(info, size_u64);
2024 if (!bpf_ctx_narrow_access_ok(off, size, size_u64))
2025 return false;
2026 break;
2027 case bpf_ctx_range(struct bpf_perf_event_data, addr):
2028 bpf_ctx_record_field_size(info, size_u64);
2029 if (!bpf_ctx_narrow_access_ok(off, size, size_u64))
23994631 2030 return false;
f96da094
DB
2031 break;
2032 default:
0515e599
AS
2033 if (size != sizeof(long))
2034 return false;
2035 }
f96da094 2036
0515e599
AS
2037 return true;
2038}
2039
6b8cc1d1
DB
2040static u32 pe_prog_convert_ctx_access(enum bpf_access_type type,
2041 const struct bpf_insn *si,
0515e599 2042 struct bpf_insn *insn_buf,
f96da094 2043 struct bpf_prog *prog, u32 *target_size)
0515e599
AS
2044{
2045 struct bpf_insn *insn = insn_buf;
2046
6b8cc1d1 2047 switch (si->off) {
0515e599 2048 case offsetof(struct bpf_perf_event_data, sample_period):
f035a515 2049 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern,
6b8cc1d1 2050 data), si->dst_reg, si->src_reg,
0515e599 2051 offsetof(struct bpf_perf_event_data_kern, data));
6b8cc1d1 2052 *insn++ = BPF_LDX_MEM(BPF_DW, si->dst_reg, si->dst_reg,
f96da094
DB
2053 bpf_target_off(struct perf_sample_data, period, 8,
2054 target_size));
0515e599 2055 break;
95da0cdb
TQ
2056 case offsetof(struct bpf_perf_event_data, addr):
2057 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern,
2058 data), si->dst_reg, si->src_reg,
2059 offsetof(struct bpf_perf_event_data_kern, data));
2060 *insn++ = BPF_LDX_MEM(BPF_DW, si->dst_reg, si->dst_reg,
2061 bpf_target_off(struct perf_sample_data, addr, 8,
2062 target_size));
2063 break;
0515e599 2064 default:
f035a515 2065 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern,
6b8cc1d1 2066 regs), si->dst_reg, si->src_reg,
0515e599 2067 offsetof(struct bpf_perf_event_data_kern, regs));
6b8cc1d1
DB
2068 *insn++ = BPF_LDX_MEM(BPF_SIZEOF(long), si->dst_reg, si->dst_reg,
2069 si->off);
0515e599
AS
2070 break;
2071 }
2072
2073 return insn - insn_buf;
2074}
2075
7de16e3a 2076const struct bpf_verifier_ops perf_event_verifier_ops = {
f005afed 2077 .get_func_proto = pe_prog_func_proto,
0515e599
AS
2078 .is_valid_access = pe_prog_is_valid_access,
2079 .convert_ctx_access = pe_prog_convert_ctx_access,
2080};
7de16e3a
JK
2081
2082const struct bpf_prog_ops perf_event_prog_ops = {
2083};
e87c6bc3
YS
2084
2085static DEFINE_MUTEX(bpf_event_mutex);
2086
c8c088ba
YS
2087#define BPF_TRACE_MAX_PROGS 64
2088
e87c6bc3 2089int perf_event_attach_bpf_prog(struct perf_event *event,
82e6b1ee
AN
2090 struct bpf_prog *prog,
2091 u64 bpf_cookie)
e87c6bc3 2092{
e672db03 2093 struct bpf_prog_array *old_array;
e87c6bc3
YS
2094 struct bpf_prog_array *new_array;
2095 int ret = -EEXIST;
2096
9802d865 2097 /*
b4da3340
MH
2098 * Kprobe override only works if they are on the function entry,
2099 * and only if they are on the opt-in list.
9802d865
JB
2100 */
2101 if (prog->kprobe_override &&
b4da3340 2102 (!trace_kprobe_on_func_entry(event->tp_event) ||
9802d865
JB
2103 !trace_kprobe_error_injectable(event->tp_event)))
2104 return -EINVAL;
2105
e87c6bc3
YS
2106 mutex_lock(&bpf_event_mutex);
2107
2108 if (event->prog)
07c41a29 2109 goto unlock;
e87c6bc3 2110
e672db03 2111 old_array = bpf_event_rcu_dereference(event->tp_event->prog_array);
c8c088ba
YS
2112 if (old_array &&
2113 bpf_prog_array_length(old_array) >= BPF_TRACE_MAX_PROGS) {
2114 ret = -E2BIG;
2115 goto unlock;
2116 }
2117
82e6b1ee 2118 ret = bpf_prog_array_copy(old_array, NULL, prog, bpf_cookie, &new_array);
e87c6bc3 2119 if (ret < 0)
07c41a29 2120 goto unlock;
e87c6bc3
YS
2121
2122 /* set the new array to event->tp_event and set event->prog */
2123 event->prog = prog;
82e6b1ee 2124 event->bpf_cookie = bpf_cookie;
e87c6bc3 2125 rcu_assign_pointer(event->tp_event->prog_array, new_array);
8c7dcb84 2126 bpf_prog_array_free_sleepable(old_array);
e87c6bc3 2127
07c41a29 2128unlock:
e87c6bc3
YS
2129 mutex_unlock(&bpf_event_mutex);
2130 return ret;
2131}
2132
2133void perf_event_detach_bpf_prog(struct perf_event *event)
2134{
e672db03 2135 struct bpf_prog_array *old_array;
e87c6bc3 2136 struct bpf_prog_array *new_array;
ca3c4f64 2137 struct bpf_prog *prog = NULL;
e87c6bc3
YS
2138 int ret;
2139
2140 mutex_lock(&bpf_event_mutex);
2141
2142 if (!event->prog)
07c41a29 2143 goto unlock;
e87c6bc3 2144
e672db03 2145 old_array = bpf_event_rcu_dereference(event->tp_event->prog_array);
978c4486
JO
2146 if (!old_array)
2147 goto put;
2148
82e6b1ee 2149 ret = bpf_prog_array_copy(old_array, event->prog, NULL, 0, &new_array);
e87c6bc3
YS
2150 if (ret < 0) {
2151 bpf_prog_array_delete_safe(old_array, event->prog);
2152 } else {
2153 rcu_assign_pointer(event->tp_event->prog_array, new_array);
8c7dcb84 2154 bpf_prog_array_free_sleepable(old_array);
e87c6bc3
YS
2155 }
2156
978c4486 2157put:
ca3c4f64 2158 prog = event->prog;
e87c6bc3
YS
2159 event->prog = NULL;
2160
07c41a29 2161unlock:
e87c6bc3 2162 mutex_unlock(&bpf_event_mutex);
ca3c4f64
PL
2163
2164 if (prog) {
2165 /*
2166 * It could be that the bpf_prog is not sleepable (and will be freed
2167 * via normal RCU), but is called from a point that supports sleepable
2168 * programs and uses tasks-trace-RCU.
2169 */
2170 synchronize_rcu_tasks_trace();
2171
2172 bpf_prog_put(prog);
2173 }
e87c6bc3 2174}
f371b304 2175
f4e2298e 2176int perf_event_query_prog_array(struct perf_event *event, void __user *info)
f371b304
YS
2177{
2178 struct perf_event_query_bpf __user *uquery = info;
2179 struct perf_event_query_bpf query = {};
e672db03 2180 struct bpf_prog_array *progs;
3a38bb98 2181 u32 *ids, prog_cnt, ids_len;
f371b304
YS
2182 int ret;
2183
031258da 2184 if (!perfmon_capable())
f371b304
YS
2185 return -EPERM;
2186 if (event->attr.type != PERF_TYPE_TRACEPOINT)
2187 return -EINVAL;
2188 if (copy_from_user(&query, uquery, sizeof(query)))
2189 return -EFAULT;
3a38bb98
YS
2190
2191 ids_len = query.ids_len;
2192 if (ids_len > BPF_TRACE_MAX_PROGS)
9c481b90 2193 return -E2BIG;
3a38bb98
YS
2194 ids = kcalloc(ids_len, sizeof(u32), GFP_USER | __GFP_NOWARN);
2195 if (!ids)
2196 return -ENOMEM;
2197 /*
2198 * The above kcalloc returns ZERO_SIZE_PTR when ids_len = 0, which
2199 * is required when user only wants to check for uquery->prog_cnt.
2200 * There is no need to check for it since the case is handled
2201 * gracefully in bpf_prog_array_copy_info.
2202 */
f371b304
YS
2203
2204 mutex_lock(&bpf_event_mutex);
e672db03
SF
2205 progs = bpf_event_rcu_dereference(event->tp_event->prog_array);
2206 ret = bpf_prog_array_copy_info(progs, ids, ids_len, &prog_cnt);
f371b304
YS
2207 mutex_unlock(&bpf_event_mutex);
2208
3a38bb98
YS
2209 if (copy_to_user(&uquery->prog_cnt, &prog_cnt, sizeof(prog_cnt)) ||
2210 copy_to_user(uquery->ids, ids, ids_len * sizeof(u32)))
2211 ret = -EFAULT;
2212
2213 kfree(ids);
f371b304
YS
2214 return ret;
2215}
c4f6699d
AS
2216
2217extern struct bpf_raw_event_map __start__bpf_raw_tp[];
2218extern struct bpf_raw_event_map __stop__bpf_raw_tp[];
2219
a38d1107 2220struct bpf_raw_event_map *bpf_get_raw_tracepoint(const char *name)
c4f6699d
AS
2221{
2222 struct bpf_raw_event_map *btp = __start__bpf_raw_tp;
2223
2224 for (; btp < __stop__bpf_raw_tp; btp++) {
2225 if (!strcmp(btp->tp->name, name))
2226 return btp;
2227 }
a38d1107
MM
2228
2229 return bpf_get_raw_tracepoint_module(name);
2230}
2231
2232void bpf_put_raw_tracepoint(struct bpf_raw_event_map *btp)
2233{
12cc126d 2234 struct module *mod;
a38d1107 2235
8c6eb7ca 2236 guard(rcu)();
12cc126d
AN
2237 mod = __module_address((unsigned long)btp);
2238 module_put(mod);
c4f6699d
AS
2239}
2240
2241static __always_inline
d4dfc570 2242void __bpf_trace_run(struct bpf_raw_tp_link *link, u64 *args)
c4f6699d 2243{
d4dfc570 2244 struct bpf_prog *prog = link->link.prog;
68ca5d4e
AN
2245 struct bpf_run_ctx *old_run_ctx;
2246 struct bpf_trace_run_ctx run_ctx;
d4dfc570 2247
f03efe49 2248 cant_sleep();
05b24ff9
JO
2249 if (unlikely(this_cpu_inc_return(*(prog->active)) != 1)) {
2250 bpf_prog_inc_misses_counter(prog);
2251 goto out;
2252 }
68ca5d4e
AN
2253
2254 run_ctx.bpf_cookie = link->cookie;
2255 old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx);
2256
c4f6699d 2257 rcu_read_lock();
fb7dd8bc 2258 (void) bpf_prog_run(prog, args);
c4f6699d 2259 rcu_read_unlock();
68ca5d4e
AN
2260
2261 bpf_reset_run_ctx(old_run_ctx);
05b24ff9
JO
2262out:
2263 this_cpu_dec(*(prog->active));
c4f6699d
AS
2264}
2265
2266#define UNPACK(...) __VA_ARGS__
2267#define REPEAT_1(FN, DL, X, ...) FN(X)
2268#define REPEAT_2(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_1(FN, DL, __VA_ARGS__)
2269#define REPEAT_3(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_2(FN, DL, __VA_ARGS__)
2270#define REPEAT_4(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_3(FN, DL, __VA_ARGS__)
2271#define REPEAT_5(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_4(FN, DL, __VA_ARGS__)
2272#define REPEAT_6(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_5(FN, DL, __VA_ARGS__)
2273#define REPEAT_7(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_6(FN, DL, __VA_ARGS__)
2274#define REPEAT_8(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_7(FN, DL, __VA_ARGS__)
2275#define REPEAT_9(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_8(FN, DL, __VA_ARGS__)
2276#define REPEAT_10(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_9(FN, DL, __VA_ARGS__)
2277#define REPEAT_11(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_10(FN, DL, __VA_ARGS__)
2278#define REPEAT_12(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_11(FN, DL, __VA_ARGS__)
2279#define REPEAT(X, FN, DL, ...) REPEAT_##X(FN, DL, __VA_ARGS__)
2280
2281#define SARG(X) u64 arg##X
2282#define COPY(X) args[X] = arg##X
2283
2284#define __DL_COM (,)
2285#define __DL_SEM (;)
2286
2287#define __SEQ_0_11 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11
2288
2289#define BPF_TRACE_DEFN_x(x) \
d4dfc570 2290 void bpf_trace_run##x(struct bpf_raw_tp_link *link, \
c4f6699d
AS
2291 REPEAT(x, SARG, __DL_COM, __SEQ_0_11)) \
2292 { \
2293 u64 args[x]; \
2294 REPEAT(x, COPY, __DL_SEM, __SEQ_0_11); \
d4dfc570 2295 __bpf_trace_run(link, args); \
c4f6699d
AS
2296 } \
2297 EXPORT_SYMBOL_GPL(bpf_trace_run##x)
2298BPF_TRACE_DEFN_x(1);
2299BPF_TRACE_DEFN_x(2);
2300BPF_TRACE_DEFN_x(3);
2301BPF_TRACE_DEFN_x(4);
2302BPF_TRACE_DEFN_x(5);
2303BPF_TRACE_DEFN_x(6);
2304BPF_TRACE_DEFN_x(7);
2305BPF_TRACE_DEFN_x(8);
2306BPF_TRACE_DEFN_x(9);
2307BPF_TRACE_DEFN_x(10);
2308BPF_TRACE_DEFN_x(11);
2309BPF_TRACE_DEFN_x(12);
2310
d4dfc570 2311int bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_raw_tp_link *link)
c4f6699d
AS
2312{
2313 struct tracepoint *tp = btp->tp;
d4dfc570 2314 struct bpf_prog *prog = link->link.prog;
c4f6699d
AS
2315
2316 /*
2317 * check that program doesn't access arguments beyond what's
2318 * available in this tracepoint
2319 */
2320 if (prog->aux->max_ctx_offset > btp->num_args * sizeof(u64))
2321 return -EINVAL;
2322
9df1c28b
MM
2323 if (prog->aux->max_tp_access > btp->writable_size)
2324 return -EINVAL;
2325
d4dfc570 2326 return tracepoint_probe_register_may_exist(tp, (void *)btp->bpf_func, link);
c4f6699d
AS
2327}
2328
d4dfc570 2329int bpf_probe_unregister(struct bpf_raw_event_map *btp, struct bpf_raw_tp_link *link)
c4f6699d 2330{
d4dfc570 2331 return tracepoint_probe_unregister(btp->tp, (void *)btp->bpf_func, link);
c4f6699d 2332}
41bdc4b4
YS
2333
2334int bpf_get_perf_event_info(const struct perf_event *event, u32 *prog_id,
2335 u32 *fd_type, const char **buf,
3acf8ace
JO
2336 u64 *probe_offset, u64 *probe_addr,
2337 unsigned long *missed)
41bdc4b4
YS
2338{
2339 bool is_tracepoint, is_syscall_tp;
2340 struct bpf_prog *prog;
2341 int flags, err = 0;
2342
2343 prog = event->prog;
2344 if (!prog)
2345 return -ENOENT;
2346
2347 /* not supporting BPF_PROG_TYPE_PERF_EVENT yet */
2348 if (prog->type == BPF_PROG_TYPE_PERF_EVENT)
2349 return -EOPNOTSUPP;
2350
2351 *prog_id = prog->aux->id;
2352 flags = event->tp_event->flags;
2353 is_tracepoint = flags & TRACE_EVENT_FL_TRACEPOINT;
2354 is_syscall_tp = is_syscall_trace_event(event->tp_event);
2355
2356 if (is_tracepoint || is_syscall_tp) {
2357 *buf = is_tracepoint ? event->tp_event->tp->name
2358 : event->tp_event->name;
1b715e1b
YS
2359 /* We allow NULL pointer for tracepoint */
2360 if (fd_type)
2361 *fd_type = BPF_FD_TYPE_TRACEPOINT;
2362 if (probe_offset)
2363 *probe_offset = 0x0;
2364 if (probe_addr)
2365 *probe_addr = 0x0;
41bdc4b4
YS
2366 } else {
2367 /* kprobe/uprobe */
2368 err = -EOPNOTSUPP;
2369#ifdef CONFIG_KPROBE_EVENTS
2370 if (flags & TRACE_EVENT_FL_KPROBE)
2371 err = bpf_get_kprobe_info(event, fd_type, buf,
3acf8ace 2372 probe_offset, probe_addr, missed,
41bdc4b4
YS
2373 event->attr.type == PERF_TYPE_TRACEPOINT);
2374#endif
2375#ifdef CONFIG_UPROBE_EVENTS
2376 if (flags & TRACE_EVENT_FL_UPROBE)
2377 err = bpf_get_uprobe_info(event, fd_type, buf,
5125e757 2378 probe_offset, probe_addr,
41bdc4b4
YS
2379 event->attr.type == PERF_TYPE_TRACEPOINT);
2380#endif
2381 }
2382
2383 return err;
2384}
a38d1107 2385
9db1ff0a
YS
2386static int __init send_signal_irq_work_init(void)
2387{
2388 int cpu;
2389 struct send_signal_irq_work *work;
2390
2391 for_each_possible_cpu(cpu) {
2392 work = per_cpu_ptr(&send_signal_work, cpu);
2393 init_irq_work(&work->irq_work, do_bpf_send_signal);
2394 }
2395 return 0;
2396}
2397
2398subsys_initcall(send_signal_irq_work_init);
2399
a38d1107 2400#ifdef CONFIG_MODULES
390e99cf
SF
2401static int bpf_event_notify(struct notifier_block *nb, unsigned long op,
2402 void *module)
a38d1107
MM
2403{
2404 struct bpf_trace_module *btm, *tmp;
2405 struct module *mod = module;
0340a6b7 2406 int ret = 0;
a38d1107
MM
2407
2408 if (mod->num_bpf_raw_events == 0 ||
2409 (op != MODULE_STATE_COMING && op != MODULE_STATE_GOING))
0340a6b7 2410 goto out;
a38d1107
MM
2411
2412 mutex_lock(&bpf_module_mutex);
2413
2414 switch (op) {
2415 case MODULE_STATE_COMING:
2416 btm = kzalloc(sizeof(*btm), GFP_KERNEL);
2417 if (btm) {
2418 btm->module = module;
2419 list_add(&btm->list, &bpf_trace_modules);
0340a6b7
PZ
2420 } else {
2421 ret = -ENOMEM;
a38d1107
MM
2422 }
2423 break;
2424 case MODULE_STATE_GOING:
2425 list_for_each_entry_safe(btm, tmp, &bpf_trace_modules, list) {
2426 if (btm->module == module) {
2427 list_del(&btm->list);
2428 kfree(btm);
2429 break;
2430 }
2431 }
2432 break;
2433 }
2434
2435 mutex_unlock(&bpf_module_mutex);
2436
0340a6b7
PZ
2437out:
2438 return notifier_from_errno(ret);
a38d1107
MM
2439}
2440
2441static struct notifier_block bpf_module_nb = {
2442 .notifier_call = bpf_event_notify,
2443};
2444
390e99cf 2445static int __init bpf_event_init(void)
a38d1107
MM
2446{
2447 register_module_notifier(&bpf_module_nb);
2448 return 0;
2449}
2450
2451fs_initcall(bpf_event_init);
2452#endif /* CONFIG_MODULES */
0dcac272 2453
adf46d88
JO
2454struct bpf_session_run_ctx {
2455 struct bpf_run_ctx run_ctx;
2456 bool is_return;
5c919ace 2457 void *data;
adf46d88
JO
2458};
2459
0dcac272
JO
2460#ifdef CONFIG_FPROBE
2461struct bpf_kprobe_multi_link {
2462 struct bpf_link link;
2463 struct fprobe fp;
2464 unsigned long *addrs;
ca74823c
JO
2465 u64 *cookies;
2466 u32 cnt;
e22061b2
JO
2467 u32 mods_cnt;
2468 struct module **mods;
7ac8d0d2 2469 u32 flags;
0dcac272
JO
2470};
2471
f7098690 2472struct bpf_kprobe_multi_run_ctx {
adf46d88 2473 struct bpf_session_run_ctx session_ctx;
f7098690
JO
2474 struct bpf_kprobe_multi_link *link;
2475 unsigned long entry_ip;
2476};
2477
0236fec5
JO
2478struct user_syms {
2479 const char **syms;
2480 char *buf;
2481};
2482
8e2759da
MHG
2483#ifndef CONFIG_HAVE_FTRACE_REGS_HAVING_PT_REGS
2484static DEFINE_PER_CPU(struct pt_regs, bpf_kprobe_multi_pt_regs);
2485#define bpf_kprobe_multi_pt_regs_ptr() this_cpu_ptr(&bpf_kprobe_multi_pt_regs)
2486#else
2487#define bpf_kprobe_multi_pt_regs_ptr() (NULL)
2488#endif
2489
4f7caaa2
MHG
2490static unsigned long ftrace_get_entry_ip(unsigned long fentry_ip)
2491{
2492 unsigned long ip = ftrace_get_symaddr(fentry_ip);
2493
2494 return ip ? : fentry_ip;
2495}
2496
0236fec5
JO
2497static int copy_user_syms(struct user_syms *us, unsigned long __user *usyms, u32 cnt)
2498{
2499 unsigned long __user usymbol;
2500 const char **syms = NULL;
2501 char *buf = NULL, *p;
2502 int err = -ENOMEM;
2503 unsigned int i;
2504
fd58f7df 2505 syms = kvmalloc_array(cnt, sizeof(*syms), GFP_KERNEL);
0236fec5
JO
2506 if (!syms)
2507 goto error;
2508
fd58f7df 2509 buf = kvmalloc_array(cnt, KSYM_NAME_LEN, GFP_KERNEL);
0236fec5
JO
2510 if (!buf)
2511 goto error;
2512
2513 for (p = buf, i = 0; i < cnt; i++) {
2514 if (__get_user(usymbol, usyms + i)) {
2515 err = -EFAULT;
2516 goto error;
2517 }
2518 err = strncpy_from_user(p, (const char __user *) usymbol, KSYM_NAME_LEN);
2519 if (err == KSYM_NAME_LEN)
2520 err = -E2BIG;
2521 if (err < 0)
2522 goto error;
2523 syms[i] = p;
2524 p += err + 1;
2525 }
2526
2527 us->syms = syms;
2528 us->buf = buf;
2529 return 0;
2530
2531error:
2532 if (err) {
2533 kvfree(syms);
2534 kvfree(buf);
2535 }
2536 return err;
2537}
2538
e22061b2
JO
2539static void kprobe_multi_put_modules(struct module **mods, u32 cnt)
2540{
2541 u32 i;
2542
2543 for (i = 0; i < cnt; i++)
2544 module_put(mods[i]);
2545}
2546
0236fec5
JO
2547static void free_user_syms(struct user_syms *us)
2548{
2549 kvfree(us->syms);
2550 kvfree(us->buf);
2551}
2552
0dcac272
JO
2553static void bpf_kprobe_multi_link_release(struct bpf_link *link)
2554{
2555 struct bpf_kprobe_multi_link *kmulti_link;
2556
2557 kmulti_link = container_of(link, struct bpf_kprobe_multi_link, link);
2558 unregister_fprobe(&kmulti_link->fp);
e22061b2 2559 kprobe_multi_put_modules(kmulti_link->mods, kmulti_link->mods_cnt);
0dcac272
JO
2560}
2561
2562static void bpf_kprobe_multi_link_dealloc(struct bpf_link *link)
2563{
2564 struct bpf_kprobe_multi_link *kmulti_link;
2565
2566 kmulti_link = container_of(link, struct bpf_kprobe_multi_link, link);
2567 kvfree(kmulti_link->addrs);
ca74823c 2568 kvfree(kmulti_link->cookies);
e22061b2 2569 kfree(kmulti_link->mods);
0dcac272
JO
2570 kfree(kmulti_link);
2571}
2572
7ac8d0d2
YS
2573static int bpf_kprobe_multi_link_fill_link_info(const struct bpf_link *link,
2574 struct bpf_link_info *info)
2575{
9fd112b1 2576 u64 __user *ucookies = u64_to_user_ptr(info->kprobe_multi.cookies);
7ac8d0d2
YS
2577 u64 __user *uaddrs = u64_to_user_ptr(info->kprobe_multi.addrs);
2578 struct bpf_kprobe_multi_link *kmulti_link;
2579 u32 ucount = info->kprobe_multi.count;
2580 int err = 0, i;
2581
2582 if (!uaddrs ^ !ucount)
2583 return -EINVAL;
9fd112b1
JO
2584 if (ucookies && !ucount)
2585 return -EINVAL;
7ac8d0d2
YS
2586
2587 kmulti_link = container_of(link, struct bpf_kprobe_multi_link, link);
2588 info->kprobe_multi.count = kmulti_link->cnt;
2589 info->kprobe_multi.flags = kmulti_link->flags;
e2b2cd59 2590 info->kprobe_multi.missed = kmulti_link->fp.nmissed;
7ac8d0d2
YS
2591
2592 if (!uaddrs)
2593 return 0;
2594 if (ucount < kmulti_link->cnt)
2595 err = -ENOSPC;
2596 else
2597 ucount = kmulti_link->cnt;
2598
9fd112b1
JO
2599 if (ucookies) {
2600 if (kmulti_link->cookies) {
2601 if (copy_to_user(ucookies, kmulti_link->cookies, ucount * sizeof(u64)))
2602 return -EFAULT;
2603 } else {
2604 for (i = 0; i < ucount; i++) {
2605 if (put_user(0, ucookies + i))
2606 return -EFAULT;
2607 }
2608 }
2609 }
2610
7ac8d0d2
YS
2611 if (kallsyms_show_value(current_cred())) {
2612 if (copy_to_user(uaddrs, kmulti_link->addrs, ucount * sizeof(u64)))
2613 return -EFAULT;
2614 } else {
2615 for (i = 0; i < ucount; i++) {
2616 if (put_user(0, uaddrs + i))
2617 return -EFAULT;
2618 }
2619 }
2620 return err;
2621}
2622
0dcac272
JO
2623static const struct bpf_link_ops bpf_kprobe_multi_link_lops = {
2624 .release = bpf_kprobe_multi_link_release,
1a80dbcb 2625 .dealloc_deferred = bpf_kprobe_multi_link_dealloc,
7ac8d0d2 2626 .fill_link_info = bpf_kprobe_multi_link_fill_link_info,
0dcac272
JO
2627};
2628
ca74823c
JO
2629static void bpf_kprobe_multi_cookie_swap(void *a, void *b, int size, const void *priv)
2630{
2631 const struct bpf_kprobe_multi_link *link = priv;
2632 unsigned long *addr_a = a, *addr_b = b;
2633 u64 *cookie_a, *cookie_b;
ca74823c
JO
2634
2635 cookie_a = link->cookies + (addr_a - link->addrs);
2636 cookie_b = link->cookies + (addr_b - link->addrs);
2637
2638 /* swap addr_a/addr_b and cookie_a/cookie_b values */
11e17ae4
JC
2639 swap(*addr_a, *addr_b);
2640 swap(*cookie_a, *cookie_b);
ca74823c
JO
2641}
2642
1a1b0716 2643static int bpf_kprobe_multi_addrs_cmp(const void *a, const void *b)
ca74823c
JO
2644{
2645 const unsigned long *addr_a = a, *addr_b = b;
2646
2647 if (*addr_a == *addr_b)
2648 return 0;
2649 return *addr_a < *addr_b ? -1 : 1;
2650}
2651
2652static int bpf_kprobe_multi_cookie_cmp(const void *a, const void *b, const void *priv)
2653{
1a1b0716 2654 return bpf_kprobe_multi_addrs_cmp(a, b);
ca74823c
JO
2655}
2656
f7098690 2657static u64 bpf_kprobe_multi_cookie(struct bpf_run_ctx *ctx)
ca74823c 2658{
f7098690 2659 struct bpf_kprobe_multi_run_ctx *run_ctx;
ca74823c 2660 struct bpf_kprobe_multi_link *link;
f7098690 2661 u64 *cookie, entry_ip;
ca74823c 2662 unsigned long *addr;
ca74823c
JO
2663
2664 if (WARN_ON_ONCE(!ctx))
2665 return 0;
adf46d88
JO
2666 run_ctx = container_of(current->bpf_ctx, struct bpf_kprobe_multi_run_ctx,
2667 session_ctx.run_ctx);
f7098690 2668 link = run_ctx->link;
ca74823c
JO
2669 if (!link->cookies)
2670 return 0;
f7098690
JO
2671 entry_ip = run_ctx->entry_ip;
2672 addr = bsearch(&entry_ip, link->addrs, link->cnt, sizeof(entry_ip),
1a1b0716 2673 bpf_kprobe_multi_addrs_cmp);
ca74823c
JO
2674 if (!addr)
2675 return 0;
2676 cookie = link->cookies + (addr - link->addrs);
2677 return *cookie;
2678}
2679
f7098690
JO
2680static u64 bpf_kprobe_multi_entry_ip(struct bpf_run_ctx *ctx)
2681{
2682 struct bpf_kprobe_multi_run_ctx *run_ctx;
2683
adf46d88
JO
2684 run_ctx = container_of(current->bpf_ctx, struct bpf_kprobe_multi_run_ctx,
2685 session_ctx.run_ctx);
f7098690
JO
2686 return run_ctx->entry_ip;
2687}
2688
0dcac272
JO
2689static int
2690kprobe_multi_link_prog_run(struct bpf_kprobe_multi_link *link,
8e2759da 2691 unsigned long entry_ip, struct ftrace_regs *fregs,
5c919ace 2692 bool is_return, void *data)
0dcac272 2693{
f7098690 2694 struct bpf_kprobe_multi_run_ctx run_ctx = {
adf46d88
JO
2695 .session_ctx = {
2696 .is_return = is_return,
5c919ace 2697 .data = data,
adf46d88 2698 },
f7098690
JO
2699 .link = link,
2700 .entry_ip = entry_ip,
2701 };
ca74823c 2702 struct bpf_run_ctx *old_run_ctx;
8e2759da 2703 struct pt_regs *regs;
0dcac272
JO
2704 int err;
2705
2706 if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1)) {
f915fcb3 2707 bpf_prog_inc_misses_counter(link->link.prog);
2ebadb60 2708 err = 1;
0dcac272
JO
2709 goto out;
2710 }
2711
2712 migrate_disable();
2713 rcu_read_lock();
8e2759da 2714 regs = ftrace_partial_regs(fregs, bpf_kprobe_multi_pt_regs_ptr());
adf46d88 2715 old_run_ctx = bpf_set_run_ctx(&run_ctx.session_ctx.run_ctx);
0dcac272 2716 err = bpf_prog_run(link->link.prog, regs);
ca74823c 2717 bpf_reset_run_ctx(old_run_ctx);
0dcac272
JO
2718 rcu_read_unlock();
2719 migrate_enable();
2720
2721 out:
2722 __this_cpu_dec(bpf_prog_active);
2723 return err;
2724}
2725
39d95420 2726static int
c09eb2e5 2727kprobe_multi_link_handler(struct fprobe *fp, unsigned long fentry_ip,
46bc0823 2728 unsigned long ret_ip, struct ftrace_regs *fregs,
cb16330d 2729 void *data)
0dcac272 2730{
0dcac272 2731 struct bpf_kprobe_multi_link *link;
535a3692 2732 int err;
0dcac272 2733
39d95420 2734 link = container_of(fp, struct bpf_kprobe_multi_link, fp);
4f7caaa2
MHG
2735 err = kprobe_multi_link_prog_run(link, ftrace_get_entry_ip(fentry_ip),
2736 fregs, false, data);
535a3692 2737 return is_kprobe_session(link->link.prog) ? err : 0;
39d95420
MHG
2738}
2739
2740static void
2741kprobe_multi_link_exit_handler(struct fprobe *fp, unsigned long fentry_ip,
762abbc0 2742 unsigned long ret_ip, struct ftrace_regs *fregs,
cb16330d 2743 void *data)
0dcac272 2744{
0dcac272
JO
2745 struct bpf_kprobe_multi_link *link;
2746
0dcac272 2747 link = container_of(fp, struct bpf_kprobe_multi_link, fp);
4f7caaa2
MHG
2748 kprobe_multi_link_prog_run(link, ftrace_get_entry_ip(fentry_ip),
2749 fregs, true, data);
0dcac272
JO
2750}
2751
eb5fb032 2752static int symbols_cmp_r(const void *a, const void *b, const void *priv)
0dcac272 2753{
0236fec5
JO
2754 const char **str_a = (const char **) a;
2755 const char **str_b = (const char **) b;
0dcac272 2756
0236fec5 2757 return strcmp(*str_a, *str_b);
0dcac272
JO
2758}
2759
eb5fb032
JO
2760struct multi_symbols_sort {
2761 const char **funcs;
2762 u64 *cookies;
2763};
2764
2765static void symbols_swap_r(void *a, void *b, int size, const void *priv)
2766{
2767 const struct multi_symbols_sort *data = priv;
2768 const char **name_a = a, **name_b = b;
2769
2770 swap(*name_a, *name_b);
2771
2772 /* If defined, swap also related cookies. */
2773 if (data->cookies) {
2774 u64 *cookie_a, *cookie_b;
2775
2776 cookie_a = data->cookies + (name_a - data->funcs);
2777 cookie_b = data->cookies + (name_b - data->funcs);
2778 swap(*cookie_a, *cookie_b);
2779 }
2780}
2781
6a5f2d6e 2782struct modules_array {
e22061b2
JO
2783 struct module **mods;
2784 int mods_cnt;
2785 int mods_cap;
2786};
2787
6a5f2d6e 2788static int add_module(struct modules_array *arr, struct module *mod)
e22061b2 2789{
e22061b2
JO
2790 struct module **mods;
2791
6a5f2d6e
JO
2792 if (arr->mods_cnt == arr->mods_cap) {
2793 arr->mods_cap = max(16, arr->mods_cap * 3 / 2);
2794 mods = krealloc_array(arr->mods, arr->mods_cap, sizeof(*mods), GFP_KERNEL);
e22061b2
JO
2795 if (!mods)
2796 return -ENOMEM;
6a5f2d6e 2797 arr->mods = mods;
e22061b2
JO
2798 }
2799
6a5f2d6e
JO
2800 arr->mods[arr->mods_cnt] = mod;
2801 arr->mods_cnt++;
e22061b2
JO
2802 return 0;
2803}
2804
6a5f2d6e
JO
2805static bool has_module(struct modules_array *arr, struct module *mod)
2806{
2807 int i;
2808
2809 for (i = arr->mods_cnt - 1; i >= 0; i--) {
2810 if (arr->mods[i] == mod)
2811 return true;
2812 }
2813 return false;
2814}
2815
e22061b2
JO
2816static int get_modules_for_addrs(struct module ***mods, unsigned long *addrs, u32 addrs_cnt)
2817{
6a5f2d6e
JO
2818 struct modules_array arr = {};
2819 u32 i, err = 0;
2820
2821 for (i = 0; i < addrs_cnt; i++) {
8c6eb7ca 2822 bool skip_add = false;
6a5f2d6e
JO
2823 struct module *mod;
2824
8c6eb7ca
SAS
2825 scoped_guard(rcu) {
2826 mod = __module_address(addrs[i]);
2827 /* Either no module or it's already stored */
2828 if (!mod || has_module(&arr, mod)) {
2829 skip_add = true;
2830 break; /* scoped_guard */
2831 }
2832 if (!try_module_get(mod))
2833 err = -EINVAL;
6a5f2d6e 2834 }
8c6eb7ca
SAS
2835 if (skip_add)
2836 continue;
6a5f2d6e
JO
2837 if (err)
2838 break;
2839 err = add_module(&arr, mod);
2840 if (err) {
2841 module_put(mod);
2842 break;
2843 }
2844 }
e22061b2
JO
2845
2846 /* We return either err < 0 in case of error, ... */
e22061b2 2847 if (err) {
6a5f2d6e
JO
2848 kprobe_multi_put_modules(arr.mods, arr.mods_cnt);
2849 kfree(arr.mods);
e22061b2
JO
2850 return err;
2851 }
2852
2853 /* or number of modules found if everything is ok. */
6a5f2d6e
JO
2854 *mods = arr.mods;
2855 return arr.mods_cnt;
e22061b2
JO
2856}
2857
41bc46c1
JO
2858static int addrs_check_error_injection_list(unsigned long *addrs, u32 cnt)
2859{
2860 u32 i;
2861
2862 for (i = 0; i < cnt; i++) {
2863 if (!within_error_injection_list(addrs[i]))
2864 return -EINVAL;
2865 }
2866 return 0;
2867}
2868
0dcac272
JO
2869int bpf_kprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
2870{
2871 struct bpf_kprobe_multi_link *link = NULL;
2872 struct bpf_link_primer link_primer;
ca74823c 2873 void __user *ucookies;
0dcac272
JO
2874 unsigned long *addrs;
2875 u32 flags, cnt, size;
2876 void __user *uaddrs;
ca74823c 2877 u64 *cookies = NULL;
0dcac272
JO
2878 void __user *usyms;
2879 int err;
2880
2881 /* no support for 32bit archs yet */
2882 if (sizeof(u64) != sizeof(void *))
2883 return -EOPNOTSUPP;
2884
24391198
TC
2885 if (attr->link_create.flags)
2886 return -EINVAL;
2887
535a3692 2888 if (!is_kprobe_multi(prog))
0dcac272
JO
2889 return -EINVAL;
2890
2891 flags = attr->link_create.kprobe_multi.flags;
2892 if (flags & ~BPF_F_KPROBE_MULTI_RETURN)
2893 return -EINVAL;
2894
2895 uaddrs = u64_to_user_ptr(attr->link_create.kprobe_multi.addrs);
2896 usyms = u64_to_user_ptr(attr->link_create.kprobe_multi.syms);
2897 if (!!uaddrs == !!usyms)
2898 return -EINVAL;
2899
2900 cnt = attr->link_create.kprobe_multi.cnt;
2901 if (!cnt)
2902 return -EINVAL;
d6d1e6c1
HT
2903 if (cnt > MAX_KPROBE_MULTI_CNT)
2904 return -E2BIG;
0dcac272
JO
2905
2906 size = cnt * sizeof(*addrs);
fd58f7df 2907 addrs = kvmalloc_array(cnt, sizeof(*addrs), GFP_KERNEL);
0dcac272
JO
2908 if (!addrs)
2909 return -ENOMEM;
2910
eb5fb032
JO
2911 ucookies = u64_to_user_ptr(attr->link_create.kprobe_multi.cookies);
2912 if (ucookies) {
2913 cookies = kvmalloc_array(cnt, sizeof(*addrs), GFP_KERNEL);
2914 if (!cookies) {
2915 err = -ENOMEM;
2916 goto error;
2917 }
2918 if (copy_from_user(cookies, ucookies, size)) {
2919 err = -EFAULT;
2920 goto error;
2921 }
2922 }
2923
0dcac272
JO
2924 if (uaddrs) {
2925 if (copy_from_user(addrs, uaddrs, size)) {
2926 err = -EFAULT;
2927 goto error;
2928 }
2929 } else {
eb5fb032
JO
2930 struct multi_symbols_sort data = {
2931 .cookies = cookies,
2932 };
0236fec5
JO
2933 struct user_syms us;
2934
2935 err = copy_user_syms(&us, usyms, cnt);
2936 if (err)
2937 goto error;
2938
eb5fb032
JO
2939 if (cookies)
2940 data.funcs = us.syms;
2941
2942 sort_r(us.syms, cnt, sizeof(*us.syms), symbols_cmp_r,
2943 symbols_swap_r, &data);
2944
0236fec5
JO
2945 err = ftrace_lookup_symbols(us.syms, cnt, addrs);
2946 free_user_syms(&us);
0dcac272
JO
2947 if (err)
2948 goto error;
2949 }
2950
41bc46c1
JO
2951 if (prog->kprobe_override && addrs_check_error_injection_list(addrs, cnt)) {
2952 err = -EINVAL;
2953 goto error;
2954 }
2955
0dcac272
JO
2956 link = kzalloc(sizeof(*link), GFP_KERNEL);
2957 if (!link) {
2958 err = -ENOMEM;
2959 goto error;
2960 }
2961
2962 bpf_link_init(&link->link, BPF_LINK_TYPE_KPROBE_MULTI,
2963 &bpf_kprobe_multi_link_lops, prog);
2964
2965 err = bpf_link_prime(&link->link, &link_primer);
2966 if (err)
2967 goto error;
2968
535a3692 2969 if (!(flags & BPF_F_KPROBE_MULTI_RETURN))
0dcac272 2970 link->fp.entry_handler = kprobe_multi_link_handler;
535a3692
JO
2971 if ((flags & BPF_F_KPROBE_MULTI_RETURN) || is_kprobe_session(prog))
2972 link->fp.exit_handler = kprobe_multi_link_exit_handler;
5c919ace
JO
2973 if (is_kprobe_session(prog))
2974 link->fp.entry_data_size = sizeof(u64);
0dcac272
JO
2975
2976 link->addrs = addrs;
ca74823c
JO
2977 link->cookies = cookies;
2978 link->cnt = cnt;
7ac8d0d2 2979 link->flags = flags;
ca74823c
JO
2980
2981 if (cookies) {
2982 /*
2983 * Sorting addresses will trigger sorting cookies as well
2984 * (check bpf_kprobe_multi_cookie_swap). This way we can
2985 * find cookie based on the address in bpf_get_attach_cookie
2986 * helper.
2987 */
2988 sort_r(addrs, cnt, sizeof(*addrs),
2989 bpf_kprobe_multi_cookie_cmp,
2990 bpf_kprobe_multi_cookie_swap,
2991 link);
e22061b2
JO
2992 }
2993
2994 err = get_modules_for_addrs(&link->mods, addrs, cnt);
2995 if (err < 0) {
2996 bpf_link_cleanup(&link_primer);
2997 return err;
ca74823c 2998 }
e22061b2 2999 link->mods_cnt = err;
0dcac272
JO
3000
3001 err = register_fprobe_ips(&link->fp, addrs, cnt);
3002 if (err) {
e22061b2 3003 kprobe_multi_put_modules(link->mods, link->mods_cnt);
0dcac272
JO
3004 bpf_link_cleanup(&link_primer);
3005 return err;
3006 }
3007
3008 return bpf_link_settle(&link_primer);
3009
3010error:
3011 kfree(link);
3012 kvfree(addrs);
ca74823c 3013 kvfree(cookies);
0dcac272
JO
3014 return err;
3015}
3016#else /* !CONFIG_FPROBE */
3017int bpf_kprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
3018{
3019 return -EOPNOTSUPP;
3020}
f7098690
JO
3021static u64 bpf_kprobe_multi_cookie(struct bpf_run_ctx *ctx)
3022{
3023 return 0;
3024}
3025static u64 bpf_kprobe_multi_entry_ip(struct bpf_run_ctx *ctx)
ca74823c
JO
3026{
3027 return 0;
3028}
0dcac272 3029#endif
89ae89f5
JO
3030
3031#ifdef CONFIG_UPROBES
3032struct bpf_uprobe_multi_link;
3033
3034struct bpf_uprobe {
3035 struct bpf_uprobe_multi_link *link;
3036 loff_t offset;
4930b7f5 3037 unsigned long ref_ctr_offset;
0b779b61 3038 u64 cookie;
3c83a9ad 3039 struct uprobe *uprobe;
89ae89f5 3040 struct uprobe_consumer consumer;
d920179b 3041 bool session;
89ae89f5
JO
3042};
3043
3044struct bpf_uprobe_multi_link {
3045 struct path path;
3046 struct bpf_link link;
3047 u32 cnt;
e56fdbfb 3048 u32 flags;
89ae89f5 3049 struct bpf_uprobe *uprobes;
b733eead 3050 struct task_struct *task;
89ae89f5
JO
3051};
3052
3053struct bpf_uprobe_multi_run_ctx {
99b403d2 3054 struct bpf_session_run_ctx session_ctx;
89ae89f5 3055 unsigned long entry_ip;
0b779b61 3056 struct bpf_uprobe *uprobe;
89ae89f5
JO
3057};
3058
3c83a9ad 3059static void bpf_uprobe_unregister(struct bpf_uprobe *uprobes, u32 cnt)
89ae89f5
JO
3060{
3061 u32 i;
3062
3c83a9ad 3063 for (i = 0; i < cnt; i++)
04b01625
PZ
3064 uprobe_unregister_nosync(uprobes[i].uprobe, &uprobes[i].consumer);
3065
3066 if (cnt)
3067 uprobe_unregister_sync();
89ae89f5
JO
3068}
3069
3070static void bpf_uprobe_multi_link_release(struct bpf_link *link)
3071{
3072 struct bpf_uprobe_multi_link *umulti_link;
3073
3074 umulti_link = container_of(link, struct bpf_uprobe_multi_link, link);
3c83a9ad 3075 bpf_uprobe_unregister(umulti_link->uprobes, umulti_link->cnt);
e9c856ca
AN
3076 if (umulti_link->task)
3077 put_task_struct(umulti_link->task);
3078 path_put(&umulti_link->path);
89ae89f5
JO
3079}
3080
3081static void bpf_uprobe_multi_link_dealloc(struct bpf_link *link)
3082{
3083 struct bpf_uprobe_multi_link *umulti_link;
3084
3085 umulti_link = container_of(link, struct bpf_uprobe_multi_link, link);
89ae89f5
JO
3086 kvfree(umulti_link->uprobes);
3087 kfree(umulti_link);
3088}
3089
e56fdbfb
JO
3090static int bpf_uprobe_multi_link_fill_link_info(const struct bpf_link *link,
3091 struct bpf_link_info *info)
3092{
3093 u64 __user *uref_ctr_offsets = u64_to_user_ptr(info->uprobe_multi.ref_ctr_offsets);
3094 u64 __user *ucookies = u64_to_user_ptr(info->uprobe_multi.cookies);
3095 u64 __user *uoffsets = u64_to_user_ptr(info->uprobe_multi.offsets);
3096 u64 __user *upath = u64_to_user_ptr(info->uprobe_multi.path);
3097 u32 upath_size = info->uprobe_multi.path_size;
3098 struct bpf_uprobe_multi_link *umulti_link;
3099 u32 ucount = info->uprobe_multi.count;
3100 int err = 0, i;
ad6b5b6e
TW
3101 char *p, *buf;
3102 long left = 0;
e56fdbfb
JO
3103
3104 if (!upath ^ !upath_size)
3105 return -EINVAL;
3106
3107 if ((uoffsets || uref_ctr_offsets || ucookies) && !ucount)
3108 return -EINVAL;
3109
3110 umulti_link = container_of(link, struct bpf_uprobe_multi_link, link);
3111 info->uprobe_multi.count = umulti_link->cnt;
3112 info->uprobe_multi.flags = umulti_link->flags;
3113 info->uprobe_multi.pid = umulti_link->task ?
3114 task_pid_nr_ns(umulti_link->task, task_active_pid_ns(current)) : 0;
3115
ad6b5b6e
TW
3116 upath_size = upath_size ? min_t(u32, upath_size, PATH_MAX) : PATH_MAX;
3117 buf = kmalloc(upath_size, GFP_KERNEL);
3118 if (!buf)
3119 return -ENOMEM;
3120 p = d_path(&umulti_link->path, buf, upath_size);
3121 if (IS_ERR(p)) {
e56fdbfb 3122 kfree(buf);
ad6b5b6e 3123 return PTR_ERR(p);
e56fdbfb 3124 }
ad6b5b6e
TW
3125 upath_size = buf + upath_size - p;
3126
3127 if (upath)
3128 left = copy_to_user(upath, p, upath_size);
3129 kfree(buf);
3130 if (left)
3131 return -EFAULT;
3132 info->uprobe_multi.path_size = upath_size;
e56fdbfb
JO
3133
3134 if (!uoffsets && !ucookies && !uref_ctr_offsets)
3135 return 0;
3136
3137 if (ucount < umulti_link->cnt)
3138 err = -ENOSPC;
3139 else
3140 ucount = umulti_link->cnt;
3141
3142 for (i = 0; i < ucount; i++) {
3143 if (uoffsets &&
3144 put_user(umulti_link->uprobes[i].offset, uoffsets + i))
3145 return -EFAULT;
3146 if (uref_ctr_offsets &&
3147 put_user(umulti_link->uprobes[i].ref_ctr_offset, uref_ctr_offsets + i))
3148 return -EFAULT;
3149 if (ucookies &&
3150 put_user(umulti_link->uprobes[i].cookie, ucookies + i))
3151 return -EFAULT;
3152 }
3153
3154 return err;
3155}
3156
89ae89f5
JO
3157static const struct bpf_link_ops bpf_uprobe_multi_link_lops = {
3158 .release = bpf_uprobe_multi_link_release,
1a80dbcb 3159 .dealloc_deferred = bpf_uprobe_multi_link_dealloc,
e56fdbfb 3160 .fill_link_info = bpf_uprobe_multi_link_fill_link_info,
89ae89f5
JO
3161};
3162
3163static int uprobe_prog_run(struct bpf_uprobe *uprobe,
3164 unsigned long entry_ip,
99b403d2
JO
3165 struct pt_regs *regs,
3166 bool is_return, void *data)
89ae89f5
JO
3167{
3168 struct bpf_uprobe_multi_link *link = uprobe->link;
3169 struct bpf_uprobe_multi_run_ctx run_ctx = {
99b403d2
JO
3170 .session_ctx = {
3171 .is_return = is_return,
3172 .data = data,
3173 },
89ae89f5 3174 .entry_ip = entry_ip,
0b779b61 3175 .uprobe = uprobe,
89ae89f5
JO
3176 };
3177 struct bpf_prog *prog = link->link.prog;
66c84731 3178 bool sleepable = prog->sleepable;
89ae89f5 3179 struct bpf_run_ctx *old_run_ctx;
99b403d2 3180 int err;
89ae89f5 3181
900f362e 3182 if (link->task && !same_thread_group(current, link->task))
b733eead
JO
3183 return 0;
3184
89ae89f5
JO
3185 if (sleepable)
3186 rcu_read_lock_trace();
3187 else
3188 rcu_read_lock();
3189
3190 migrate_disable();
3191
99b403d2
JO
3192 old_run_ctx = bpf_set_run_ctx(&run_ctx.session_ctx.run_ctx);
3193 err = bpf_prog_run(link->link.prog, regs);
89ae89f5
JO
3194 bpf_reset_run_ctx(old_run_ctx);
3195
3196 migrate_enable();
3197
3198 if (sleepable)
3199 rcu_read_unlock_trace();
3200 else
3201 rcu_read_unlock();
99b403d2 3202 return err;
89ae89f5
JO
3203}
3204
b733eead 3205static bool
59da880a 3206uprobe_multi_link_filter(struct uprobe_consumer *con, struct mm_struct *mm)
b733eead
JO
3207{
3208 struct bpf_uprobe *uprobe;
3209
3210 uprobe = container_of(con, struct bpf_uprobe, consumer);
3211 return uprobe->link->task->mm == mm;
3212}
3213
89ae89f5 3214static int
da09a9e0
JO
3215uprobe_multi_link_handler(struct uprobe_consumer *con, struct pt_regs *regs,
3216 __u64 *data)
89ae89f5
JO
3217{
3218 struct bpf_uprobe *uprobe;
d920179b 3219 int ret;
89ae89f5
JO
3220
3221 uprobe = container_of(con, struct bpf_uprobe, consumer);
99b403d2 3222 ret = uprobe_prog_run(uprobe, instruction_pointer(regs), regs, false, data);
d920179b
JO
3223 if (uprobe->session)
3224 return ret ? UPROBE_HANDLER_IGNORE : 0;
3225 return 0;
89ae89f5
JO
3226}
3227
3228static int
da09a9e0
JO
3229uprobe_multi_link_ret_handler(struct uprobe_consumer *con, unsigned long func, struct pt_regs *regs,
3230 __u64 *data)
89ae89f5
JO
3231{
3232 struct bpf_uprobe *uprobe;
3233
3234 uprobe = container_of(con, struct bpf_uprobe, consumer);
99b403d2 3235 uprobe_prog_run(uprobe, func, regs, true, data);
d920179b 3236 return 0;
89ae89f5
JO
3237}
3238
686328d8
JO
3239static u64 bpf_uprobe_multi_entry_ip(struct bpf_run_ctx *ctx)
3240{
3241 struct bpf_uprobe_multi_run_ctx *run_ctx;
3242
99b403d2
JO
3243 run_ctx = container_of(current->bpf_ctx, struct bpf_uprobe_multi_run_ctx,
3244 session_ctx.run_ctx);
686328d8
JO
3245 return run_ctx->entry_ip;
3246}
3247
0b779b61
JO
3248static u64 bpf_uprobe_multi_cookie(struct bpf_run_ctx *ctx)
3249{
3250 struct bpf_uprobe_multi_run_ctx *run_ctx;
3251
99b403d2
JO
3252 run_ctx = container_of(current->bpf_ctx, struct bpf_uprobe_multi_run_ctx,
3253 session_ctx.run_ctx);
0b779b61
JO
3254 return run_ctx->uprobe->cookie;
3255}
3256
89ae89f5
JO
3257int bpf_uprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
3258{
3259 struct bpf_uprobe_multi_link *link = NULL;
3260 unsigned long __user *uref_ctr_offsets;
89ae89f5
JO
3261 struct bpf_link_primer link_primer;
3262 struct bpf_uprobe *uprobes = NULL;
b733eead 3263 struct task_struct *task = NULL;
89ae89f5 3264 unsigned long __user *uoffsets;
0b779b61 3265 u64 __user *ucookies;
89ae89f5
JO
3266 void __user *upath;
3267 u32 flags, cnt, i;
3268 struct path path;
3269 char *name;
b733eead 3270 pid_t pid;
89ae89f5
JO
3271 int err;
3272
3273 /* no support for 32bit archs yet */
3274 if (sizeof(u64) != sizeof(void *))
3275 return -EOPNOTSUPP;
3276
a76116f4
TC
3277 if (attr->link_create.flags)
3278 return -EINVAL;
3279
d920179b 3280 if (!is_uprobe_multi(prog))
89ae89f5
JO
3281 return -EINVAL;
3282
3283 flags = attr->link_create.uprobe_multi.flags;
3284 if (flags & ~BPF_F_UPROBE_MULTI_RETURN)
3285 return -EINVAL;
3286
3287 /*
3288 * path, offsets and cnt are mandatory,
0b779b61 3289 * ref_ctr_offsets and cookies are optional
89ae89f5
JO
3290 */
3291 upath = u64_to_user_ptr(attr->link_create.uprobe_multi.path);
3292 uoffsets = u64_to_user_ptr(attr->link_create.uprobe_multi.offsets);
3293 cnt = attr->link_create.uprobe_multi.cnt;
46ba0e49 3294 pid = attr->link_create.uprobe_multi.pid;
89ae89f5 3295
46ba0e49 3296 if (!upath || !uoffsets || !cnt || pid < 0)
89ae89f5 3297 return -EINVAL;
8b2efe51
HT
3298 if (cnt > MAX_UPROBE_MULTI_CNT)
3299 return -E2BIG;
89ae89f5
JO
3300
3301 uref_ctr_offsets = u64_to_user_ptr(attr->link_create.uprobe_multi.ref_ctr_offsets);
0b779b61 3302 ucookies = u64_to_user_ptr(attr->link_create.uprobe_multi.cookies);
89ae89f5
JO
3303
3304 name = strndup_user(upath, PATH_MAX);
3305 if (IS_ERR(name)) {
3306 err = PTR_ERR(name);
3307 return err;
3308 }
3309
3310 err = kern_path(name, LOOKUP_FOLLOW, &path);
3311 kfree(name);
3312 if (err)
3313 return err;
3314
3315 if (!d_is_reg(path.dentry)) {
3316 err = -EBADF;
3317 goto error_path_put;
3318 }
3319
b733eead 3320 if (pid) {
4e2e6841 3321 rcu_read_lock();
46ba0e49 3322 task = get_pid_task(find_vpid(pid), PIDTYPE_TGID);
4e2e6841 3323 rcu_read_unlock();
57eb5e1c
JO
3324 if (!task) {
3325 err = -ESRCH;
b733eead 3326 goto error_path_put;
57eb5e1c 3327 }
b733eead
JO
3328 }
3329
89ae89f5
JO
3330 err = -ENOMEM;
3331
3332 link = kzalloc(sizeof(*link), GFP_KERNEL);
3333 uprobes = kvcalloc(cnt, sizeof(*uprobes), GFP_KERNEL);
3334
3335 if (!uprobes || !link)
3336 goto error_free;
3337
89ae89f5 3338 for (i = 0; i < cnt; i++) {
3983c002 3339 if (__get_user(uprobes[i].offset, uoffsets + i)) {
0b779b61
JO
3340 err = -EFAULT;
3341 goto error_free;
3342 }
3983c002
JO
3343 if (uprobes[i].offset < 0) {
3344 err = -EINVAL;
3345 goto error_free;
3346 }
4930b7f5 3347 if (uref_ctr_offsets && __get_user(uprobes[i].ref_ctr_offset, uref_ctr_offsets + i)) {
89ae89f5
JO
3348 err = -EFAULT;
3349 goto error_free;
3350 }
3983c002 3351 if (ucookies && __get_user(uprobes[i].cookie, ucookies + i)) {
89ae89f5
JO
3352 err = -EFAULT;
3353 goto error_free;
3354 }
3355
3356 uprobes[i].link = link;
3357
d920179b 3358 if (!(flags & BPF_F_UPROBE_MULTI_RETURN))
89ae89f5 3359 uprobes[i].consumer.handler = uprobe_multi_link_handler;
d920179b
JO
3360 if (flags & BPF_F_UPROBE_MULTI_RETURN || is_uprobe_session(prog))
3361 uprobes[i].consumer.ret_handler = uprobe_multi_link_ret_handler;
3362 if (is_uprobe_session(prog))
3363 uprobes[i].session = true;
b733eead
JO
3364 if (pid)
3365 uprobes[i].consumer.filter = uprobe_multi_link_filter;
89ae89f5
JO
3366 }
3367
3368 link->cnt = cnt;
3369 link->uprobes = uprobes;
3370 link->path = path;
b733eead 3371 link->task = task;
e56fdbfb 3372 link->flags = flags;
89ae89f5
JO
3373
3374 bpf_link_init(&link->link, BPF_LINK_TYPE_UPROBE_MULTI,
3375 &bpf_uprobe_multi_link_lops, prog);
3376
3377 for (i = 0; i < cnt; i++) {
3c83a9ad
ON
3378 uprobes[i].uprobe = uprobe_register(d_real_inode(link->path.dentry),
3379 uprobes[i].offset,
3380 uprobes[i].ref_ctr_offset,
3381 &uprobes[i].consumer);
3382 if (IS_ERR(uprobes[i].uprobe)) {
3383 err = PTR_ERR(uprobes[i].uprobe);
5fe6e308
ON
3384 link->cnt = i;
3385 goto error_unregister;
89ae89f5
JO
3386 }
3387 }
3388
3389 err = bpf_link_prime(&link->link, &link_primer);
3390 if (err)
5fe6e308 3391 goto error_unregister;
89ae89f5 3392
89ae89f5
JO
3393 return bpf_link_settle(&link_primer);
3394
5fe6e308
ON
3395error_unregister:
3396 bpf_uprobe_unregister(uprobes, link->cnt);
3397
89ae89f5 3398error_free:
89ae89f5
JO
3399 kvfree(uprobes);
3400 kfree(link);
b733eead
JO
3401 if (task)
3402 put_task_struct(task);
89ae89f5
JO
3403error_path_put:
3404 path_put(&path);
3405 return err;
3406}
3407#else /* !CONFIG_UPROBES */
3408int bpf_uprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
3409{
3410 return -EOPNOTSUPP;
3411}
0b779b61
JO
3412static u64 bpf_uprobe_multi_cookie(struct bpf_run_ctx *ctx)
3413{
3414 return 0;
3415}
686328d8
JO
3416static u64 bpf_uprobe_multi_entry_ip(struct bpf_run_ctx *ctx)
3417{
3418 return 0;
3419}
89ae89f5 3420#endif /* CONFIG_UPROBES */
adf46d88 3421
adf46d88
JO
3422__bpf_kfunc_start_defs();
3423
3424__bpf_kfunc bool bpf_session_is_return(void)
3425{
3426 struct bpf_session_run_ctx *session_ctx;
3427
3428 session_ctx = container_of(current->bpf_ctx, struct bpf_session_run_ctx, run_ctx);
3429 return session_ctx->is_return;
3430}
3431
717d6313 3432__bpf_kfunc __u64 *bpf_session_cookie(void)
5c919ace
JO
3433{
3434 struct bpf_session_run_ctx *session_ctx;
3435
3436 session_ctx = container_of(current->bpf_ctx, struct bpf_session_run_ctx, run_ctx);
3437 return session_ctx->data;
3438}
3439
adf46d88
JO
3440__bpf_kfunc_end_defs();
3441
3442BTF_KFUNCS_START(kprobe_multi_kfunc_set_ids)
3443BTF_ID_FLAGS(func, bpf_session_is_return)
5c919ace 3444BTF_ID_FLAGS(func, bpf_session_cookie)
adf46d88
JO
3445BTF_KFUNCS_END(kprobe_multi_kfunc_set_ids)
3446
3447static int bpf_kprobe_multi_filter(const struct bpf_prog *prog, u32 kfunc_id)
3448{
3449 if (!btf_id_set8_contains(&kprobe_multi_kfunc_set_ids, kfunc_id))
3450 return 0;
3451
99b403d2 3452 if (!is_kprobe_session(prog) && !is_uprobe_session(prog))
adf46d88
JO
3453 return -EACCES;
3454
3455 return 0;
3456}
3457
3458static const struct btf_kfunc_id_set bpf_kprobe_multi_kfunc_set = {
3459 .owner = THIS_MODULE,
3460 .set = &kprobe_multi_kfunc_set_ids,
3461 .filter = bpf_kprobe_multi_filter,
3462};
3463
3464static int __init bpf_kprobe_multi_kfuncs_init(void)
3465{
3466 return register_btf_kfunc_id_set(BPF_PROG_TYPE_KPROBE, &bpf_kprobe_multi_kfunc_set);
3467}
3468
3469late_initcall(bpf_kprobe_multi_kfuncs_init);
6280cf71 3470
a498ee75
MY
3471typedef int (*copy_fn_t)(void *dst, const void *src, u32 size, struct task_struct *tsk);
3472
3473/*
3474 * The __always_inline is to make sure the compiler doesn't
3475 * generate indirect calls into callbacks, which is expensive,
3476 * on some kernel configurations. This allows compiler to put
3477 * direct calls into all the specific callback implementations
3478 * (copy_user_data_sleepable, copy_user_data_nofault, and so on)
3479 */
3480static __always_inline int __bpf_dynptr_copy_str(struct bpf_dynptr *dptr, u32 doff, u32 size,
3481 const void *unsafe_src,
3482 copy_fn_t str_copy_fn,
3483 struct task_struct *tsk)
3484{
3485 struct bpf_dynptr_kern *dst;
3486 u32 chunk_sz, off;
3487 void *dst_slice;
3488 int cnt, err;
3489 char buf[256];
3490
3491 dst_slice = bpf_dynptr_slice_rdwr(dptr, doff, NULL, size);
3492 if (likely(dst_slice))
3493 return str_copy_fn(dst_slice, unsafe_src, size, tsk);
3494
3495 dst = (struct bpf_dynptr_kern *)dptr;
3496 if (bpf_dynptr_check_off_len(dst, doff, size))
3497 return -E2BIG;
3498
3499 for (off = 0; off < size; off += chunk_sz - 1) {
3500 chunk_sz = min_t(u32, sizeof(buf), size - off);
3501 /* Expect str_copy_fn to return count of copied bytes, including
3502 * zero terminator. Next iteration increment off by chunk_sz - 1 to
3503 * overwrite NUL.
3504 */
3505 cnt = str_copy_fn(buf, unsafe_src + off, chunk_sz, tsk);
3506 if (cnt < 0)
3507 return cnt;
3508 err = __bpf_dynptr_write(dst, doff + off, buf, cnt, 0);
3509 if (err)
3510 return err;
3511 if (cnt < chunk_sz || chunk_sz == 1) /* we are done */
3512 return off + cnt;
3513 }
3514 return off;
3515}
3516
3517static __always_inline int __bpf_dynptr_copy(const struct bpf_dynptr *dptr, u32 doff,
3518 u32 size, const void *unsafe_src,
3519 copy_fn_t copy_fn, struct task_struct *tsk)
3520{
3521 struct bpf_dynptr_kern *dst;
3522 void *dst_slice;
3523 char buf[256];
3524 u32 off, chunk_sz;
3525 int err;
3526
3527 dst_slice = bpf_dynptr_slice_rdwr(dptr, doff, NULL, size);
3528 if (likely(dst_slice))
3529 return copy_fn(dst_slice, unsafe_src, size, tsk);
3530
3531 dst = (struct bpf_dynptr_kern *)dptr;
3532 if (bpf_dynptr_check_off_len(dst, doff, size))
3533 return -E2BIG;
3534
3535 for (off = 0; off < size; off += chunk_sz) {
3536 chunk_sz = min_t(u32, sizeof(buf), size - off);
3537 err = copy_fn(buf, unsafe_src + off, chunk_sz, tsk);
3538 if (err)
3539 return err;
3540 err = __bpf_dynptr_write(dst, doff + off, buf, chunk_sz, 0);
3541 if (err)
3542 return err;
3543 }
3544 return 0;
3545}
3546
3547static __always_inline int copy_user_data_nofault(void *dst, const void *unsafe_src,
3548 u32 size, struct task_struct *tsk)
3549{
3550 return copy_from_user_nofault(dst, (const void __user *)unsafe_src, size);
3551}
3552
3553static __always_inline int copy_user_data_sleepable(void *dst, const void *unsafe_src,
3554 u32 size, struct task_struct *tsk)
3555{
3556 int ret;
3557
079e5c56
MY
3558 if (!tsk) { /* Read from the current task */
3559 ret = copy_from_user(dst, (const void __user *)unsafe_src, size);
3560 if (ret)
3561 return -EFAULT;
3562 return 0;
3563 }
a498ee75
MY
3564
3565 ret = access_process_vm(tsk, (unsigned long)unsafe_src, dst, size, 0);
3566 if (ret != size)
3567 return -EFAULT;
3568 return 0;
3569}
3570
3571static __always_inline int copy_kernel_data_nofault(void *dst, const void *unsafe_src,
3572 u32 size, struct task_struct *tsk)
3573{
3574 return copy_from_kernel_nofault(dst, unsafe_src, size);
3575}
3576
3577static __always_inline int copy_user_str_nofault(void *dst, const void *unsafe_src,
3578 u32 size, struct task_struct *tsk)
3579{
3580 return strncpy_from_user_nofault(dst, (const void __user *)unsafe_src, size);
3581}
3582
3583static __always_inline int copy_user_str_sleepable(void *dst, const void *unsafe_src,
3584 u32 size, struct task_struct *tsk)
3585{
3586 int ret;
3587
3588 if (unlikely(size == 0))
3589 return 0;
3590
3591 if (tsk) {
3592 ret = copy_remote_vm_str(tsk, (unsigned long)unsafe_src, dst, size, 0);
3593 } else {
3594 ret = strncpy_from_user(dst, (const void __user *)unsafe_src, size - 1);
3595 /* strncpy_from_user does not guarantee NUL termination */
3596 if (ret >= 0)
3597 ((char *)dst)[ret] = '\0';
3598 }
3599
3600 if (ret < 0)
3601 return ret;
3602 return ret + 1;
3603}
3604
3605static __always_inline int copy_kernel_str_nofault(void *dst, const void *unsafe_src,
3606 u32 size, struct task_struct *tsk)
3607{
3608 return strncpy_from_kernel_nofault(dst, unsafe_src, size);
3609}
3610
6280cf71
PM
3611__bpf_kfunc_start_defs();
3612
3613__bpf_kfunc int bpf_send_signal_task(struct task_struct *task, int sig, enum pid_type type,
3614 u64 value)
3615{
3616 if (type != PIDTYPE_PID && type != PIDTYPE_TGID)
3617 return -EINVAL;
3618
3619 return bpf_send_signal_common(sig, type, task, value);
3620}
3621
a498ee75
MY
3622__bpf_kfunc int bpf_probe_read_user_dynptr(struct bpf_dynptr *dptr, u32 off,
3623 u32 size, const void __user *unsafe_ptr__ign)
3624{
3625 return __bpf_dynptr_copy(dptr, off, size, (const void *)unsafe_ptr__ign,
3626 copy_user_data_nofault, NULL);
3627}
3628
3629__bpf_kfunc int bpf_probe_read_kernel_dynptr(struct bpf_dynptr *dptr, u32 off,
3630 u32 size, const void *unsafe_ptr__ign)
3631{
3632 return __bpf_dynptr_copy(dptr, off, size, unsafe_ptr__ign,
3633 copy_kernel_data_nofault, NULL);
3634}
3635
3636__bpf_kfunc int bpf_probe_read_user_str_dynptr(struct bpf_dynptr *dptr, u32 off,
3637 u32 size, const void __user *unsafe_ptr__ign)
3638{
3639 return __bpf_dynptr_copy_str(dptr, off, size, (const void *)unsafe_ptr__ign,
3640 copy_user_str_nofault, NULL);
3641}
3642
3643__bpf_kfunc int bpf_probe_read_kernel_str_dynptr(struct bpf_dynptr *dptr, u32 off,
3644 u32 size, const void *unsafe_ptr__ign)
3645{
3646 return __bpf_dynptr_copy_str(dptr, off, size, unsafe_ptr__ign,
3647 copy_kernel_str_nofault, NULL);
3648}
3649
3650__bpf_kfunc int bpf_copy_from_user_dynptr(struct bpf_dynptr *dptr, u32 off,
3651 u32 size, const void __user *unsafe_ptr__ign)
3652{
3653 return __bpf_dynptr_copy(dptr, off, size, (const void *)unsafe_ptr__ign,
3654 copy_user_data_sleepable, NULL);
3655}
3656
3657__bpf_kfunc int bpf_copy_from_user_str_dynptr(struct bpf_dynptr *dptr, u32 off,
3658 u32 size, const void __user *unsafe_ptr__ign)
3659{
3660 return __bpf_dynptr_copy_str(dptr, off, size, (const void *)unsafe_ptr__ign,
3661 copy_user_str_sleepable, NULL);
3662}
3663
3664__bpf_kfunc int bpf_copy_from_user_task_dynptr(struct bpf_dynptr *dptr, u32 off,
3665 u32 size, const void __user *unsafe_ptr__ign,
3666 struct task_struct *tsk)
3667{
3668 return __bpf_dynptr_copy(dptr, off, size, (const void *)unsafe_ptr__ign,
3669 copy_user_data_sleepable, tsk);
3670}
3671
3672__bpf_kfunc int bpf_copy_from_user_task_str_dynptr(struct bpf_dynptr *dptr, u32 off,
3673 u32 size, const void __user *unsafe_ptr__ign,
3674 struct task_struct *tsk)
3675{
3676 return __bpf_dynptr_copy_str(dptr, off, size, (const void *)unsafe_ptr__ign,
3677 copy_user_str_sleepable, tsk);
3678}
3679
6280cf71 3680__bpf_kfunc_end_defs();