bpf: Be less specific about socket cookies guarantees
[linux-block.git] / kernel / trace / bpf_trace.c
CommitLineData
179a0cc4 1// SPDX-License-Identifier: GPL-2.0
2541517c 2/* Copyright (c) 2011-2015 PLUMgrid, http://plumgrid.com
0515e599 3 * Copyright (c) 2016 Facebook
2541517c
AS
4 */
5#include <linux/kernel.h>
6#include <linux/types.h>
7#include <linux/slab.h>
8#include <linux/bpf.h>
0515e599 9#include <linux/bpf_perf_event.h>
c4d0bfb4 10#include <linux/btf.h>
2541517c
AS
11#include <linux/filter.h>
12#include <linux/uaccess.h>
9c959c86 13#include <linux/ctype.h>
9802d865 14#include <linux/kprobes.h>
ac5a72ea 15#include <linux/spinlock.h>
41bdc4b4 16#include <linux/syscalls.h>
540adea3 17#include <linux/error-injection.h>
c9a0f3b8 18#include <linux/btf_ids.h>
6f100640
KS
19#include <linux/bpf_lsm.h>
20
8e4597c6 21#include <net/bpf_sk_storage.h>
9802d865 22
c4d0bfb4
AM
23#include <uapi/linux/bpf.h>
24#include <uapi/linux/btf.h>
25
c7b6f29b
NA
26#include <asm/tlb.h>
27
9802d865 28#include "trace_probe.h"
2541517c
AS
29#include "trace.h"
30
ac5a72ea
AM
31#define CREATE_TRACE_POINTS
32#include "bpf_trace.h"
33
e672db03
SF
34#define bpf_event_rcu_dereference(p) \
35 rcu_dereference_protected(p, lockdep_is_held(&bpf_event_mutex))
36
a38d1107
MM
37#ifdef CONFIG_MODULES
38struct bpf_trace_module {
39 struct module *module;
40 struct list_head list;
41};
42
43static LIST_HEAD(bpf_trace_modules);
44static DEFINE_MUTEX(bpf_module_mutex);
45
46static struct bpf_raw_event_map *bpf_get_raw_tracepoint_module(const char *name)
47{
48 struct bpf_raw_event_map *btp, *ret = NULL;
49 struct bpf_trace_module *btm;
50 unsigned int i;
51
52 mutex_lock(&bpf_module_mutex);
53 list_for_each_entry(btm, &bpf_trace_modules, list) {
54 for (i = 0; i < btm->module->num_bpf_raw_events; ++i) {
55 btp = &btm->module->bpf_raw_events[i];
56 if (!strcmp(btp->tp->name, name)) {
57 if (try_module_get(btm->module))
58 ret = btp;
59 goto out;
60 }
61 }
62 }
63out:
64 mutex_unlock(&bpf_module_mutex);
65 return ret;
66}
67#else
68static struct bpf_raw_event_map *bpf_get_raw_tracepoint_module(const char *name)
69{
70 return NULL;
71}
72#endif /* CONFIG_MODULES */
73
035226b9 74u64 bpf_get_stackid(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
c195651e 75u64 bpf_get_stack(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
035226b9 76
eb411377
AM
77static int bpf_btf_printf_prepare(struct btf_ptr *ptr, u32 btf_ptr_size,
78 u64 flags, const struct btf **btf,
79 s32 *btf_id);
80
2541517c
AS
81/**
82 * trace_call_bpf - invoke BPF program
e87c6bc3 83 * @call: tracepoint event
2541517c
AS
84 * @ctx: opaque context pointer
85 *
86 * kprobe handlers execute BPF programs via this helper.
87 * Can be used from static tracepoints in the future.
88 *
89 * Return: BPF programs always return an integer which is interpreted by
90 * kprobe handler as:
91 * 0 - return from kprobe (event is filtered out)
92 * 1 - store kprobe event into ring buffer
93 * Other values are reserved and currently alias to 1
94 */
e87c6bc3 95unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx)
2541517c
AS
96{
97 unsigned int ret;
98
99 if (in_nmi()) /* not supported yet */
100 return 1;
101
b0a81b94 102 cant_sleep();
2541517c
AS
103
104 if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1)) {
105 /*
106 * since some bpf program is already running on this cpu,
107 * don't call into another bpf program (same or different)
108 * and don't send kprobe event into ring-buffer,
109 * so return zero here
110 */
111 ret = 0;
112 goto out;
113 }
114
e87c6bc3
YS
115 /*
116 * Instead of moving rcu_read_lock/rcu_dereference/rcu_read_unlock
117 * to all call sites, we did a bpf_prog_array_valid() there to check
118 * whether call->prog_array is empty or not, which is
2b5894cc 119 * a heuristic to speed up execution.
e87c6bc3
YS
120 *
121 * If bpf_prog_array_valid() fetched prog_array was
122 * non-NULL, we go into trace_call_bpf() and do the actual
123 * proper rcu_dereference() under RCU lock.
124 * If it turns out that prog_array is NULL then, we bail out.
125 * For the opposite, if the bpf_prog_array_valid() fetched pointer
126 * was NULL, you'll skip the prog_array with the risk of missing
127 * out of events when it was updated in between this and the
128 * rcu_dereference() which is accepted risk.
129 */
130 ret = BPF_PROG_RUN_ARRAY_CHECK(call->prog_array, ctx, BPF_PROG_RUN);
2541517c
AS
131
132 out:
133 __this_cpu_dec(bpf_prog_active);
2541517c
AS
134
135 return ret;
136}
2541517c 137
9802d865
JB
138#ifdef CONFIG_BPF_KPROBE_OVERRIDE
139BPF_CALL_2(bpf_override_return, struct pt_regs *, regs, unsigned long, rc)
140{
9802d865 141 regs_set_return_value(regs, rc);
540adea3 142 override_function_with_return(regs);
9802d865
JB
143 return 0;
144}
145
146static const struct bpf_func_proto bpf_override_return_proto = {
147 .func = bpf_override_return,
148 .gpl_only = true,
149 .ret_type = RET_INTEGER,
150 .arg1_type = ARG_PTR_TO_CTX,
151 .arg2_type = ARG_ANYTHING,
152};
153#endif
154
8d92db5c
CH
155static __always_inline int
156bpf_probe_read_user_common(void *dst, u32 size, const void __user *unsafe_ptr)
2541517c 157{
8d92db5c 158 int ret;
2541517c 159
c0ee37e8 160 ret = copy_from_user_nofault(dst, unsafe_ptr, size);
6ae08ae3
DB
161 if (unlikely(ret < 0))
162 memset(dst, 0, size);
6ae08ae3
DB
163 return ret;
164}
165
8d92db5c
CH
166BPF_CALL_3(bpf_probe_read_user, void *, dst, u32, size,
167 const void __user *, unsafe_ptr)
168{
169 return bpf_probe_read_user_common(dst, size, unsafe_ptr);
170}
171
f470378c 172const struct bpf_func_proto bpf_probe_read_user_proto = {
6ae08ae3
DB
173 .func = bpf_probe_read_user,
174 .gpl_only = true,
175 .ret_type = RET_INTEGER,
176 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
177 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
178 .arg3_type = ARG_ANYTHING,
179};
180
8d92db5c
CH
181static __always_inline int
182bpf_probe_read_user_str_common(void *dst, u32 size,
183 const void __user *unsafe_ptr)
6ae08ae3 184{
8d92db5c 185 int ret;
6ae08ae3 186
6fa6d280
DX
187 /*
188 * NB: We rely on strncpy_from_user() not copying junk past the NUL
189 * terminator into `dst`.
190 *
191 * strncpy_from_user() does long-sized strides in the fast path. If the
192 * strncpy does not mask out the bytes after the NUL in `unsafe_ptr`,
193 * then there could be junk after the NUL in `dst`. If user takes `dst`
194 * and keys a hash map with it, then semantically identical strings can
195 * occupy multiple entries in the map.
196 */
8d92db5c 197 ret = strncpy_from_user_nofault(dst, unsafe_ptr, size);
6ae08ae3
DB
198 if (unlikely(ret < 0))
199 memset(dst, 0, size);
6ae08ae3
DB
200 return ret;
201}
202
8d92db5c
CH
203BPF_CALL_3(bpf_probe_read_user_str, void *, dst, u32, size,
204 const void __user *, unsafe_ptr)
205{
206 return bpf_probe_read_user_str_common(dst, size, unsafe_ptr);
207}
208
f470378c 209const struct bpf_func_proto bpf_probe_read_user_str_proto = {
6ae08ae3
DB
210 .func = bpf_probe_read_user_str,
211 .gpl_only = true,
212 .ret_type = RET_INTEGER,
213 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
214 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
215 .arg3_type = ARG_ANYTHING,
216};
217
218static __always_inline int
8d92db5c 219bpf_probe_read_kernel_common(void *dst, u32 size, const void *unsafe_ptr)
6ae08ae3
DB
220{
221 int ret = security_locked_down(LOCKDOWN_BPF_READ);
9d1f8be5 222
6ae08ae3 223 if (unlikely(ret < 0))
8d92db5c 224 goto fail;
fe557319 225 ret = copy_from_kernel_nofault(dst, unsafe_ptr, size);
074f528e 226 if (unlikely(ret < 0))
8d92db5c
CH
227 goto fail;
228 return ret;
229fail:
230 memset(dst, 0, size);
6ae08ae3
DB
231 return ret;
232}
074f528e 233
6ae08ae3
DB
234BPF_CALL_3(bpf_probe_read_kernel, void *, dst, u32, size,
235 const void *, unsafe_ptr)
236{
8d92db5c 237 return bpf_probe_read_kernel_common(dst, size, unsafe_ptr);
6ae08ae3
DB
238}
239
f470378c 240const struct bpf_func_proto bpf_probe_read_kernel_proto = {
6ae08ae3
DB
241 .func = bpf_probe_read_kernel,
242 .gpl_only = true,
243 .ret_type = RET_INTEGER,
244 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
245 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
246 .arg3_type = ARG_ANYTHING,
247};
248
6ae08ae3 249static __always_inline int
8d92db5c 250bpf_probe_read_kernel_str_common(void *dst, u32 size, const void *unsafe_ptr)
6ae08ae3
DB
251{
252 int ret = security_locked_down(LOCKDOWN_BPF_READ);
253
254 if (unlikely(ret < 0))
8d92db5c
CH
255 goto fail;
256
6ae08ae3 257 /*
8d92db5c
CH
258 * The strncpy_from_kernel_nofault() call will likely not fill the
259 * entire buffer, but that's okay in this circumstance as we're probing
6ae08ae3
DB
260 * arbitrary memory anyway similar to bpf_probe_read_*() and might
261 * as well probe the stack. Thus, memory is explicitly cleared
262 * only in error case, so that improper users ignoring return
263 * code altogether don't copy garbage; otherwise length of string
264 * is returned that can be used for bpf_perf_event_output() et al.
265 */
8d92db5c 266 ret = strncpy_from_kernel_nofault(dst, unsafe_ptr, size);
6ae08ae3 267 if (unlikely(ret < 0))
8d92db5c
CH
268 goto fail;
269
02553b91 270 return ret;
8d92db5c
CH
271fail:
272 memset(dst, 0, size);
074f528e 273 return ret;
2541517c
AS
274}
275
6ae08ae3
DB
276BPF_CALL_3(bpf_probe_read_kernel_str, void *, dst, u32, size,
277 const void *, unsafe_ptr)
278{
8d92db5c 279 return bpf_probe_read_kernel_str_common(dst, size, unsafe_ptr);
6ae08ae3
DB
280}
281
f470378c 282const struct bpf_func_proto bpf_probe_read_kernel_str_proto = {
6ae08ae3
DB
283 .func = bpf_probe_read_kernel_str,
284 .gpl_only = true,
285 .ret_type = RET_INTEGER,
286 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
287 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
288 .arg3_type = ARG_ANYTHING,
289};
290
8d92db5c
CH
291#ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
292BPF_CALL_3(bpf_probe_read_compat, void *, dst, u32, size,
293 const void *, unsafe_ptr)
294{
295 if ((unsigned long)unsafe_ptr < TASK_SIZE) {
296 return bpf_probe_read_user_common(dst, size,
297 (__force void __user *)unsafe_ptr);
298 }
299 return bpf_probe_read_kernel_common(dst, size, unsafe_ptr);
300}
301
302static const struct bpf_func_proto bpf_probe_read_compat_proto = {
303 .func = bpf_probe_read_compat,
304 .gpl_only = true,
305 .ret_type = RET_INTEGER,
306 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
307 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
308 .arg3_type = ARG_ANYTHING,
309};
310
6ae08ae3
DB
311BPF_CALL_3(bpf_probe_read_compat_str, void *, dst, u32, size,
312 const void *, unsafe_ptr)
313{
8d92db5c
CH
314 if ((unsigned long)unsafe_ptr < TASK_SIZE) {
315 return bpf_probe_read_user_str_common(dst, size,
316 (__force void __user *)unsafe_ptr);
317 }
318 return bpf_probe_read_kernel_str_common(dst, size, unsafe_ptr);
6ae08ae3
DB
319}
320
321static const struct bpf_func_proto bpf_probe_read_compat_str_proto = {
322 .func = bpf_probe_read_compat_str,
2541517c
AS
323 .gpl_only = true,
324 .ret_type = RET_INTEGER,
39f19ebb 325 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
9c019e2b 326 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
2541517c
AS
327 .arg3_type = ARG_ANYTHING,
328};
8d92db5c 329#endif /* CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE */
2541517c 330
eb1b6688 331BPF_CALL_3(bpf_probe_write_user, void __user *, unsafe_ptr, const void *, src,
f3694e00 332 u32, size)
96ae5227 333{
96ae5227
SD
334 /*
335 * Ensure we're in user context which is safe for the helper to
336 * run. This helper has no business in a kthread.
337 *
338 * access_ok() should prevent writing to non-user memory, but in
339 * some situations (nommu, temporary switch, etc) access_ok() does
340 * not provide enough validation, hence the check on KERNEL_DS.
c7b6f29b
NA
341 *
342 * nmi_uaccess_okay() ensures the probe is not run in an interim
343 * state, when the task or mm are switched. This is specifically
344 * required to prevent the use of temporary mm.
96ae5227
SD
345 */
346
347 if (unlikely(in_interrupt() ||
348 current->flags & (PF_KTHREAD | PF_EXITING)))
349 return -EPERM;
db68ce10 350 if (unlikely(uaccess_kernel()))
96ae5227 351 return -EPERM;
c7b6f29b
NA
352 if (unlikely(!nmi_uaccess_okay()))
353 return -EPERM;
96ae5227 354
c0ee37e8 355 return copy_to_user_nofault(unsafe_ptr, src, size);
96ae5227
SD
356}
357
358static const struct bpf_func_proto bpf_probe_write_user_proto = {
359 .func = bpf_probe_write_user,
360 .gpl_only = true,
361 .ret_type = RET_INTEGER,
362 .arg1_type = ARG_ANYTHING,
39f19ebb
AS
363 .arg2_type = ARG_PTR_TO_MEM,
364 .arg3_type = ARG_CONST_SIZE,
96ae5227
SD
365};
366
367static const struct bpf_func_proto *bpf_get_probe_write_proto(void)
368{
2c78ee89
AS
369 if (!capable(CAP_SYS_ADMIN))
370 return NULL;
371
96ae5227
SD
372 pr_warn_ratelimited("%s[%d] is installing a program with bpf_probe_write_user helper that may corrupt user memory!",
373 current->comm, task_pid_nr(current));
374
375 return &bpf_probe_write_user_proto;
376}
377
d7b2977b
CH
378static void bpf_trace_copy_string(char *buf, void *unsafe_ptr, char fmt_ptype,
379 size_t bufsz)
380{
381 void __user *user_ptr = (__force void __user *)unsafe_ptr;
382
383 buf[0] = 0;
384
385 switch (fmt_ptype) {
386 case 's':
387#ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
aec6ce59
CH
388 if ((unsigned long)unsafe_ptr < TASK_SIZE) {
389 strncpy_from_user_nofault(buf, user_ptr, bufsz);
390 break;
391 }
392 fallthrough;
d7b2977b
CH
393#endif
394 case 'k':
395 strncpy_from_kernel_nofault(buf, unsafe_ptr, bufsz);
396 break;
397 case 'u':
398 strncpy_from_user_nofault(buf, user_ptr, bufsz);
399 break;
400 }
401}
402
ac5a72ea
AM
403static DEFINE_RAW_SPINLOCK(trace_printk_lock);
404
405#define BPF_TRACE_PRINTK_SIZE 1024
406
0d360d64 407static __printf(1, 0) int bpf_do_trace_printk(const char *fmt, ...)
ac5a72ea
AM
408{
409 static char buf[BPF_TRACE_PRINTK_SIZE];
410 unsigned long flags;
411 va_list ap;
412 int ret;
413
414 raw_spin_lock_irqsave(&trace_printk_lock, flags);
415 va_start(ap, fmt);
416 ret = vsnprintf(buf, sizeof(buf), fmt, ap);
417 va_end(ap);
418 /* vsnprintf() will not append null for zero-length strings */
419 if (ret == 0)
420 buf[0] = '\0';
421 trace_bpf_trace_printk(buf);
422 raw_spin_unlock_irqrestore(&trace_printk_lock, flags);
423
424 return ret;
425}
426
9c959c86 427/*
7bda4b40 428 * Only limited trace_printk() conversion specifiers allowed:
2df6bb54 429 * %d %i %u %x %ld %li %lu %lx %lld %lli %llu %llx %p %pB %pks %pus %s
9c959c86 430 */
f3694e00
DB
431BPF_CALL_5(bpf_trace_printk, char *, fmt, u32, fmt_size, u64, arg1,
432 u64, arg2, u64, arg3)
9c959c86 433{
b2a5212f
DB
434 int i, mod[3] = {}, fmt_cnt = 0;
435 char buf[64], fmt_ptype;
436 void *unsafe_ptr = NULL;
8d3b7dce 437 bool str_seen = false;
9c959c86
AS
438
439 /*
440 * bpf_check()->check_func_arg()->check_stack_boundary()
441 * guarantees that fmt points to bpf program stack,
442 * fmt_size bytes of it were initialized and fmt_size > 0
443 */
444 if (fmt[--fmt_size] != 0)
445 return -EINVAL;
446
447 /* check format string for allowed specifiers */
448 for (i = 0; i < fmt_size; i++) {
449 if ((!isprint(fmt[i]) && !isspace(fmt[i])) || !isascii(fmt[i]))
450 return -EINVAL;
451
452 if (fmt[i] != '%')
453 continue;
454
455 if (fmt_cnt >= 3)
456 return -EINVAL;
457
458 /* fmt[i] != 0 && fmt[last] == 0, so we can access fmt[i + 1] */
459 i++;
460 if (fmt[i] == 'l') {
461 mod[fmt_cnt]++;
462 i++;
b2a5212f 463 } else if (fmt[i] == 'p') {
9c959c86 464 mod[fmt_cnt]++;
b2a5212f
DB
465 if ((fmt[i + 1] == 'k' ||
466 fmt[i + 1] == 'u') &&
467 fmt[i + 2] == 's') {
468 fmt_ptype = fmt[i + 1];
469 i += 2;
470 goto fmt_str;
471 }
472
2df6bb54
SL
473 if (fmt[i + 1] == 'B') {
474 i++;
475 goto fmt_next;
476 }
477
1efb6ee3
MP
478 /* disallow any further format extensions */
479 if (fmt[i + 1] != 0 &&
480 !isspace(fmt[i + 1]) &&
481 !ispunct(fmt[i + 1]))
9c959c86 482 return -EINVAL;
b2a5212f
DB
483
484 goto fmt_next;
485 } else if (fmt[i] == 's') {
486 mod[fmt_cnt]++;
487 fmt_ptype = fmt[i];
488fmt_str:
489 if (str_seen)
490 /* allow only one '%s' per fmt string */
491 return -EINVAL;
492 str_seen = true;
493
494 if (fmt[i + 1] != 0 &&
495 !isspace(fmt[i + 1]) &&
496 !ispunct(fmt[i + 1]))
497 return -EINVAL;
498
499 switch (fmt_cnt) {
500 case 0:
501 unsafe_ptr = (void *)(long)arg1;
502 arg1 = (long)buf;
503 break;
504 case 1:
505 unsafe_ptr = (void *)(long)arg2;
506 arg2 = (long)buf;
507 break;
508 case 2:
509 unsafe_ptr = (void *)(long)arg3;
510 arg3 = (long)buf;
511 break;
512 }
513
d7b2977b
CH
514 bpf_trace_copy_string(buf, unsafe_ptr, fmt_ptype,
515 sizeof(buf));
b2a5212f 516 goto fmt_next;
9c959c86
AS
517 }
518
519 if (fmt[i] == 'l') {
520 mod[fmt_cnt]++;
521 i++;
522 }
523
7bda4b40
JF
524 if (fmt[i] != 'i' && fmt[i] != 'd' &&
525 fmt[i] != 'u' && fmt[i] != 'x')
9c959c86 526 return -EINVAL;
b2a5212f 527fmt_next:
9c959c86
AS
528 fmt_cnt++;
529 }
530
88a5c690
DB
531/* Horrid workaround for getting va_list handling working with different
532 * argument type combinations generically for 32 and 64 bit archs.
533 */
534#define __BPF_TP_EMIT() __BPF_ARG3_TP()
535#define __BPF_TP(...) \
ac5a72ea 536 bpf_do_trace_printk(fmt, ##__VA_ARGS__)
88a5c690
DB
537
538#define __BPF_ARG1_TP(...) \
539 ((mod[0] == 2 || (mod[0] == 1 && __BITS_PER_LONG == 64)) \
540 ? __BPF_TP(arg1, ##__VA_ARGS__) \
541 : ((mod[0] == 1 || (mod[0] == 0 && __BITS_PER_LONG == 32)) \
542 ? __BPF_TP((long)arg1, ##__VA_ARGS__) \
543 : __BPF_TP((u32)arg1, ##__VA_ARGS__)))
544
545#define __BPF_ARG2_TP(...) \
546 ((mod[1] == 2 || (mod[1] == 1 && __BITS_PER_LONG == 64)) \
547 ? __BPF_ARG1_TP(arg2, ##__VA_ARGS__) \
548 : ((mod[1] == 1 || (mod[1] == 0 && __BITS_PER_LONG == 32)) \
549 ? __BPF_ARG1_TP((long)arg2, ##__VA_ARGS__) \
550 : __BPF_ARG1_TP((u32)arg2, ##__VA_ARGS__)))
551
552#define __BPF_ARG3_TP(...) \
553 ((mod[2] == 2 || (mod[2] == 1 && __BITS_PER_LONG == 64)) \
554 ? __BPF_ARG2_TP(arg3, ##__VA_ARGS__) \
555 : ((mod[2] == 1 || (mod[2] == 0 && __BITS_PER_LONG == 32)) \
556 ? __BPF_ARG2_TP((long)arg3, ##__VA_ARGS__) \
557 : __BPF_ARG2_TP((u32)arg3, ##__VA_ARGS__)))
558
559 return __BPF_TP_EMIT();
9c959c86
AS
560}
561
562static const struct bpf_func_proto bpf_trace_printk_proto = {
563 .func = bpf_trace_printk,
564 .gpl_only = true,
565 .ret_type = RET_INTEGER,
39f19ebb
AS
566 .arg1_type = ARG_PTR_TO_MEM,
567 .arg2_type = ARG_CONST_SIZE,
9c959c86
AS
568};
569
0756ea3e
AS
570const struct bpf_func_proto *bpf_get_trace_printk_proto(void)
571{
572 /*
ac5a72ea
AM
573 * This program might be calling bpf_trace_printk,
574 * so enable the associated bpf_trace/bpf_trace_printk event.
575 * Repeat this each time as it is possible a user has
576 * disabled bpf_trace_printk events. By loading a program
577 * calling bpf_trace_printk() however the user has expressed
578 * the intent to see such events.
0756ea3e 579 */
ac5a72ea
AM
580 if (trace_set_clr_event("bpf_trace", "bpf_trace_printk", 1))
581 pr_warn_ratelimited("could not enable bpf_trace_printk events");
0756ea3e
AS
582
583 return &bpf_trace_printk_proto;
584}
585
492e639f
YS
586#define MAX_SEQ_PRINTF_VARARGS 12
587#define MAX_SEQ_PRINTF_MAX_MEMCPY 6
588#define MAX_SEQ_PRINTF_STR_LEN 128
589
590struct bpf_seq_printf_buf {
591 char buf[MAX_SEQ_PRINTF_MAX_MEMCPY][MAX_SEQ_PRINTF_STR_LEN];
592};
593static DEFINE_PER_CPU(struct bpf_seq_printf_buf, bpf_seq_printf_buf);
594static DEFINE_PER_CPU(int, bpf_seq_printf_buf_used);
595
596BPF_CALL_5(bpf_seq_printf, struct seq_file *, m, char *, fmt, u32, fmt_size,
597 const void *, data, u32, data_len)
598{
599 int err = -EINVAL, fmt_cnt = 0, memcpy_cnt = 0;
600 int i, buf_used, copy_size, num_args;
601 u64 params[MAX_SEQ_PRINTF_VARARGS];
602 struct bpf_seq_printf_buf *bufs;
603 const u64 *args = data;
604
605 buf_used = this_cpu_inc_return(bpf_seq_printf_buf_used);
606 if (WARN_ON_ONCE(buf_used > 1)) {
607 err = -EBUSY;
608 goto out;
609 }
610
611 bufs = this_cpu_ptr(&bpf_seq_printf_buf);
612
613 /*
614 * bpf_check()->check_func_arg()->check_stack_boundary()
615 * guarantees that fmt points to bpf program stack,
616 * fmt_size bytes of it were initialized and fmt_size > 0
617 */
618 if (fmt[--fmt_size] != 0)
619 goto out;
620
621 if (data_len & 7)
622 goto out;
623
624 for (i = 0; i < fmt_size; i++) {
625 if (fmt[i] == '%') {
626 if (fmt[i + 1] == '%')
627 i++;
628 else if (!data || !data_len)
629 goto out;
630 }
631 }
632
633 num_args = data_len / 8;
634
635 /* check format string for allowed specifiers */
636 for (i = 0; i < fmt_size; i++) {
637 /* only printable ascii for now. */
638 if ((!isprint(fmt[i]) && !isspace(fmt[i])) || !isascii(fmt[i])) {
639 err = -EINVAL;
640 goto out;
641 }
642
643 if (fmt[i] != '%')
644 continue;
645
646 if (fmt[i + 1] == '%') {
647 i++;
648 continue;
649 }
650
651 if (fmt_cnt >= MAX_SEQ_PRINTF_VARARGS) {
652 err = -E2BIG;
653 goto out;
654 }
655
656 if (fmt_cnt >= num_args) {
657 err = -EINVAL;
658 goto out;
659 }
660
661 /* fmt[i] != 0 && fmt[last] == 0, so we can access fmt[i + 1] */
662 i++;
663
664 /* skip optional "[0 +-][num]" width formating field */
665 while (fmt[i] == '0' || fmt[i] == '+' || fmt[i] == '-' ||
666 fmt[i] == ' ')
667 i++;
668 if (fmt[i] >= '1' && fmt[i] <= '9') {
669 i++;
670 while (fmt[i] >= '0' && fmt[i] <= '9')
671 i++;
672 }
673
674 if (fmt[i] == 's') {
19c8d8ac
AM
675 void *unsafe_ptr;
676
492e639f
YS
677 /* try our best to copy */
678 if (memcpy_cnt >= MAX_SEQ_PRINTF_MAX_MEMCPY) {
679 err = -E2BIG;
680 goto out;
681 }
682
19c8d8ac
AM
683 unsafe_ptr = (void *)(long)args[fmt_cnt];
684 err = strncpy_from_kernel_nofault(bufs->buf[memcpy_cnt],
685 unsafe_ptr, MAX_SEQ_PRINTF_STR_LEN);
492e639f
YS
686 if (err < 0)
687 bufs->buf[memcpy_cnt][0] = '\0';
688 params[fmt_cnt] = (u64)(long)bufs->buf[memcpy_cnt];
689
690 fmt_cnt++;
691 memcpy_cnt++;
692 continue;
693 }
694
695 if (fmt[i] == 'p') {
696 if (fmt[i + 1] == 0 ||
697 fmt[i + 1] == 'K' ||
2df6bb54
SL
698 fmt[i + 1] == 'x' ||
699 fmt[i + 1] == 'B') {
492e639f
YS
700 /* just kernel pointers */
701 params[fmt_cnt] = args[fmt_cnt];
702 fmt_cnt++;
703 continue;
704 }
705
706 /* only support "%pI4", "%pi4", "%pI6" and "%pi6". */
707 if (fmt[i + 1] != 'i' && fmt[i + 1] != 'I') {
708 err = -EINVAL;
709 goto out;
710 }
711 if (fmt[i + 2] != '4' && fmt[i + 2] != '6') {
712 err = -EINVAL;
713 goto out;
714 }
715
716 if (memcpy_cnt >= MAX_SEQ_PRINTF_MAX_MEMCPY) {
717 err = -E2BIG;
718 goto out;
719 }
720
721
722 copy_size = (fmt[i + 2] == '4') ? 4 : 16;
723
fe557319 724 err = copy_from_kernel_nofault(bufs->buf[memcpy_cnt],
492e639f
YS
725 (void *) (long) args[fmt_cnt],
726 copy_size);
727 if (err < 0)
728 memset(bufs->buf[memcpy_cnt], 0, copy_size);
729 params[fmt_cnt] = (u64)(long)bufs->buf[memcpy_cnt];
730
731 i += 2;
732 fmt_cnt++;
733 memcpy_cnt++;
734 continue;
735 }
736
737 if (fmt[i] == 'l') {
738 i++;
739 if (fmt[i] == 'l')
740 i++;
741 }
742
743 if (fmt[i] != 'i' && fmt[i] != 'd' &&
c06b0229
YS
744 fmt[i] != 'u' && fmt[i] != 'x' &&
745 fmt[i] != 'X') {
492e639f
YS
746 err = -EINVAL;
747 goto out;
748 }
749
750 params[fmt_cnt] = args[fmt_cnt];
751 fmt_cnt++;
752 }
753
754 /* Maximumly we can have MAX_SEQ_PRINTF_VARARGS parameter, just give
755 * all of them to seq_printf().
756 */
757 seq_printf(m, fmt, params[0], params[1], params[2], params[3],
758 params[4], params[5], params[6], params[7], params[8],
759 params[9], params[10], params[11]);
760
761 err = seq_has_overflowed(m) ? -EOVERFLOW : 0;
762out:
763 this_cpu_dec(bpf_seq_printf_buf_used);
764 return err;
765}
766
9436ef6e 767BTF_ID_LIST_SINGLE(btf_seq_file_ids, struct, seq_file)
c9a0f3b8 768
492e639f
YS
769static const struct bpf_func_proto bpf_seq_printf_proto = {
770 .func = bpf_seq_printf,
771 .gpl_only = true,
772 .ret_type = RET_INTEGER,
773 .arg1_type = ARG_PTR_TO_BTF_ID,
9436ef6e 774 .arg1_btf_id = &btf_seq_file_ids[0],
492e639f
YS
775 .arg2_type = ARG_PTR_TO_MEM,
776 .arg3_type = ARG_CONST_SIZE,
777 .arg4_type = ARG_PTR_TO_MEM_OR_NULL,
778 .arg5_type = ARG_CONST_SIZE_OR_ZERO,
492e639f
YS
779};
780
781BPF_CALL_3(bpf_seq_write, struct seq_file *, m, const void *, data, u32, len)
782{
783 return seq_write(m, data, len) ? -EOVERFLOW : 0;
784}
785
492e639f
YS
786static const struct bpf_func_proto bpf_seq_write_proto = {
787 .func = bpf_seq_write,
788 .gpl_only = true,
789 .ret_type = RET_INTEGER,
790 .arg1_type = ARG_PTR_TO_BTF_ID,
9436ef6e 791 .arg1_btf_id = &btf_seq_file_ids[0],
492e639f
YS
792 .arg2_type = ARG_PTR_TO_MEM,
793 .arg3_type = ARG_CONST_SIZE_OR_ZERO,
492e639f
YS
794};
795
eb411377
AM
796BPF_CALL_4(bpf_seq_printf_btf, struct seq_file *, m, struct btf_ptr *, ptr,
797 u32, btf_ptr_size, u64, flags)
798{
799 const struct btf *btf;
800 s32 btf_id;
801 int ret;
802
803 ret = bpf_btf_printf_prepare(ptr, btf_ptr_size, flags, &btf, &btf_id);
804 if (ret)
805 return ret;
806
807 return btf_type_seq_show_flags(btf, btf_id, ptr->ptr, m, flags);
808}
809
810static const struct bpf_func_proto bpf_seq_printf_btf_proto = {
811 .func = bpf_seq_printf_btf,
812 .gpl_only = true,
813 .ret_type = RET_INTEGER,
814 .arg1_type = ARG_PTR_TO_BTF_ID,
815 .arg1_btf_id = &btf_seq_file_ids[0],
492e639f
YS
816 .arg2_type = ARG_PTR_TO_MEM,
817 .arg3_type = ARG_CONST_SIZE_OR_ZERO,
eb411377 818 .arg4_type = ARG_ANYTHING,
492e639f
YS
819};
820
908432ca
YS
821static __always_inline int
822get_map_perf_counter(struct bpf_map *map, u64 flags,
823 u64 *value, u64 *enabled, u64 *running)
35578d79 824{
35578d79 825 struct bpf_array *array = container_of(map, struct bpf_array, map);
6816a7ff
DB
826 unsigned int cpu = smp_processor_id();
827 u64 index = flags & BPF_F_INDEX_MASK;
3b1efb19 828 struct bpf_event_entry *ee;
35578d79 829
6816a7ff
DB
830 if (unlikely(flags & ~(BPF_F_INDEX_MASK)))
831 return -EINVAL;
832 if (index == BPF_F_CURRENT_CPU)
833 index = cpu;
35578d79
KX
834 if (unlikely(index >= array->map.max_entries))
835 return -E2BIG;
836
3b1efb19 837 ee = READ_ONCE(array->ptrs[index]);
1ca1cc98 838 if (!ee)
35578d79
KX
839 return -ENOENT;
840
908432ca
YS
841 return perf_event_read_local(ee->event, value, enabled, running);
842}
843
844BPF_CALL_2(bpf_perf_event_read, struct bpf_map *, map, u64, flags)
845{
846 u64 value = 0;
847 int err;
848
849 err = get_map_perf_counter(map, flags, &value, NULL, NULL);
35578d79 850 /*
f91840a3
AS
851 * this api is ugly since we miss [-22..-2] range of valid
852 * counter values, but that's uapi
35578d79 853 */
f91840a3
AS
854 if (err)
855 return err;
856 return value;
35578d79
KX
857}
858
62544ce8 859static const struct bpf_func_proto bpf_perf_event_read_proto = {
35578d79 860 .func = bpf_perf_event_read,
1075ef59 861 .gpl_only = true,
35578d79
KX
862 .ret_type = RET_INTEGER,
863 .arg1_type = ARG_CONST_MAP_PTR,
864 .arg2_type = ARG_ANYTHING,
865};
866
908432ca
YS
867BPF_CALL_4(bpf_perf_event_read_value, struct bpf_map *, map, u64, flags,
868 struct bpf_perf_event_value *, buf, u32, size)
869{
870 int err = -EINVAL;
871
872 if (unlikely(size != sizeof(struct bpf_perf_event_value)))
873 goto clear;
874 err = get_map_perf_counter(map, flags, &buf->counter, &buf->enabled,
875 &buf->running);
876 if (unlikely(err))
877 goto clear;
878 return 0;
879clear:
880 memset(buf, 0, size);
881 return err;
882}
883
884static const struct bpf_func_proto bpf_perf_event_read_value_proto = {
885 .func = bpf_perf_event_read_value,
886 .gpl_only = true,
887 .ret_type = RET_INTEGER,
888 .arg1_type = ARG_CONST_MAP_PTR,
889 .arg2_type = ARG_ANYTHING,
890 .arg3_type = ARG_PTR_TO_UNINIT_MEM,
891 .arg4_type = ARG_CONST_SIZE,
892};
893
8e7a3920
DB
894static __always_inline u64
895__bpf_perf_event_output(struct pt_regs *regs, struct bpf_map *map,
283ca526 896 u64 flags, struct perf_sample_data *sd)
a43eec30 897{
a43eec30 898 struct bpf_array *array = container_of(map, struct bpf_array, map);
d7931330 899 unsigned int cpu = smp_processor_id();
1e33759c 900 u64 index = flags & BPF_F_INDEX_MASK;
3b1efb19 901 struct bpf_event_entry *ee;
a43eec30 902 struct perf_event *event;
a43eec30 903
1e33759c 904 if (index == BPF_F_CURRENT_CPU)
d7931330 905 index = cpu;
a43eec30
AS
906 if (unlikely(index >= array->map.max_entries))
907 return -E2BIG;
908
3b1efb19 909 ee = READ_ONCE(array->ptrs[index]);
1ca1cc98 910 if (!ee)
a43eec30
AS
911 return -ENOENT;
912
3b1efb19 913 event = ee->event;
a43eec30
AS
914 if (unlikely(event->attr.type != PERF_TYPE_SOFTWARE ||
915 event->attr.config != PERF_COUNT_SW_BPF_OUTPUT))
916 return -EINVAL;
917
d7931330 918 if (unlikely(event->oncpu != cpu))
a43eec30
AS
919 return -EOPNOTSUPP;
920
56201969 921 return perf_event_output(event, sd, regs);
a43eec30
AS
922}
923
9594dc3c
MM
924/*
925 * Support executing tracepoints in normal, irq, and nmi context that each call
926 * bpf_perf_event_output
927 */
928struct bpf_trace_sample_data {
929 struct perf_sample_data sds[3];
930};
931
932static DEFINE_PER_CPU(struct bpf_trace_sample_data, bpf_trace_sds);
933static DEFINE_PER_CPU(int, bpf_trace_nest_level);
f3694e00
DB
934BPF_CALL_5(bpf_perf_event_output, struct pt_regs *, regs, struct bpf_map *, map,
935 u64, flags, void *, data, u64, size)
8e7a3920 936{
9594dc3c
MM
937 struct bpf_trace_sample_data *sds = this_cpu_ptr(&bpf_trace_sds);
938 int nest_level = this_cpu_inc_return(bpf_trace_nest_level);
8e7a3920
DB
939 struct perf_raw_record raw = {
940 .frag = {
941 .size = size,
942 .data = data,
943 },
944 };
9594dc3c
MM
945 struct perf_sample_data *sd;
946 int err;
8e7a3920 947
9594dc3c
MM
948 if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(sds->sds))) {
949 err = -EBUSY;
950 goto out;
951 }
952
953 sd = &sds->sds[nest_level - 1];
954
955 if (unlikely(flags & ~(BPF_F_INDEX_MASK))) {
956 err = -EINVAL;
957 goto out;
958 }
8e7a3920 959
283ca526
DB
960 perf_sample_data_init(sd, 0, 0);
961 sd->raw = &raw;
962
9594dc3c
MM
963 err = __bpf_perf_event_output(regs, map, flags, sd);
964
965out:
966 this_cpu_dec(bpf_trace_nest_level);
967 return err;
8e7a3920
DB
968}
969
a43eec30
AS
970static const struct bpf_func_proto bpf_perf_event_output_proto = {
971 .func = bpf_perf_event_output,
1075ef59 972 .gpl_only = true,
a43eec30
AS
973 .ret_type = RET_INTEGER,
974 .arg1_type = ARG_PTR_TO_CTX,
975 .arg2_type = ARG_CONST_MAP_PTR,
976 .arg3_type = ARG_ANYTHING,
39f19ebb 977 .arg4_type = ARG_PTR_TO_MEM,
a60dd35d 978 .arg5_type = ARG_CONST_SIZE_OR_ZERO,
a43eec30
AS
979};
980
768fb61f
AZ
981static DEFINE_PER_CPU(int, bpf_event_output_nest_level);
982struct bpf_nested_pt_regs {
983 struct pt_regs regs[3];
984};
985static DEFINE_PER_CPU(struct bpf_nested_pt_regs, bpf_pt_regs);
986static DEFINE_PER_CPU(struct bpf_trace_sample_data, bpf_misc_sds);
bd570ff9 987
555c8a86
DB
988u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
989 void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy)
bd570ff9 990{
768fb61f 991 int nest_level = this_cpu_inc_return(bpf_event_output_nest_level);
555c8a86
DB
992 struct perf_raw_frag frag = {
993 .copy = ctx_copy,
994 .size = ctx_size,
995 .data = ctx,
996 };
997 struct perf_raw_record raw = {
998 .frag = {
183fc153
AM
999 {
1000 .next = ctx_size ? &frag : NULL,
1001 },
555c8a86
DB
1002 .size = meta_size,
1003 .data = meta,
1004 },
1005 };
768fb61f
AZ
1006 struct perf_sample_data *sd;
1007 struct pt_regs *regs;
1008 u64 ret;
1009
1010 if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(bpf_misc_sds.sds))) {
1011 ret = -EBUSY;
1012 goto out;
1013 }
1014 sd = this_cpu_ptr(&bpf_misc_sds.sds[nest_level - 1]);
1015 regs = this_cpu_ptr(&bpf_pt_regs.regs[nest_level - 1]);
bd570ff9
DB
1016
1017 perf_fetch_caller_regs(regs);
283ca526
DB
1018 perf_sample_data_init(sd, 0, 0);
1019 sd->raw = &raw;
bd570ff9 1020
768fb61f
AZ
1021 ret = __bpf_perf_event_output(regs, map, flags, sd);
1022out:
1023 this_cpu_dec(bpf_event_output_nest_level);
1024 return ret;
bd570ff9
DB
1025}
1026
f3694e00 1027BPF_CALL_0(bpf_get_current_task)
606274c5
AS
1028{
1029 return (long) current;
1030}
1031
f470378c 1032const struct bpf_func_proto bpf_get_current_task_proto = {
606274c5
AS
1033 .func = bpf_get_current_task,
1034 .gpl_only = true,
1035 .ret_type = RET_INTEGER,
1036};
1037
3ca1032a
KS
1038BPF_CALL_0(bpf_get_current_task_btf)
1039{
1040 return (unsigned long) current;
1041}
1042
1043BTF_ID_LIST_SINGLE(bpf_get_current_btf_ids, struct, task_struct)
1044
1045static const struct bpf_func_proto bpf_get_current_task_btf_proto = {
1046 .func = bpf_get_current_task_btf,
1047 .gpl_only = true,
1048 .ret_type = RET_PTR_TO_BTF_ID,
1049 .ret_btf_id = &bpf_get_current_btf_ids[0],
1050};
1051
f3694e00 1052BPF_CALL_2(bpf_current_task_under_cgroup, struct bpf_map *, map, u32, idx)
60d20f91 1053{
60d20f91
SD
1054 struct bpf_array *array = container_of(map, struct bpf_array, map);
1055 struct cgroup *cgrp;
60d20f91 1056
60d20f91
SD
1057 if (unlikely(idx >= array->map.max_entries))
1058 return -E2BIG;
1059
1060 cgrp = READ_ONCE(array->ptrs[idx]);
1061 if (unlikely(!cgrp))
1062 return -EAGAIN;
1063
1064 return task_under_cgroup_hierarchy(current, cgrp);
1065}
1066
1067static const struct bpf_func_proto bpf_current_task_under_cgroup_proto = {
1068 .func = bpf_current_task_under_cgroup,
1069 .gpl_only = false,
1070 .ret_type = RET_INTEGER,
1071 .arg1_type = ARG_CONST_MAP_PTR,
1072 .arg2_type = ARG_ANYTHING,
1073};
1074
8b401f9e
YS
1075struct send_signal_irq_work {
1076 struct irq_work irq_work;
1077 struct task_struct *task;
1078 u32 sig;
8482941f 1079 enum pid_type type;
8b401f9e
YS
1080};
1081
1082static DEFINE_PER_CPU(struct send_signal_irq_work, send_signal_work);
1083
1084static void do_bpf_send_signal(struct irq_work *entry)
1085{
1086 struct send_signal_irq_work *work;
1087
1088 work = container_of(entry, struct send_signal_irq_work, irq_work);
8482941f 1089 group_send_sig_info(work->sig, SEND_SIG_PRIV, work->task, work->type);
8b401f9e
YS
1090}
1091
8482941f 1092static int bpf_send_signal_common(u32 sig, enum pid_type type)
8b401f9e
YS
1093{
1094 struct send_signal_irq_work *work = NULL;
1095
1096 /* Similar to bpf_probe_write_user, task needs to be
1097 * in a sound condition and kernel memory access be
1098 * permitted in order to send signal to the current
1099 * task.
1100 */
1101 if (unlikely(current->flags & (PF_KTHREAD | PF_EXITING)))
1102 return -EPERM;
1103 if (unlikely(uaccess_kernel()))
1104 return -EPERM;
1105 if (unlikely(!nmi_uaccess_okay()))
1106 return -EPERM;
1107
1bc7896e 1108 if (irqs_disabled()) {
e1afb702
YS
1109 /* Do an early check on signal validity. Otherwise,
1110 * the error is lost in deferred irq_work.
1111 */
1112 if (unlikely(!valid_signal(sig)))
1113 return -EINVAL;
1114
8b401f9e 1115 work = this_cpu_ptr(&send_signal_work);
7a9f50a0 1116 if (irq_work_is_busy(&work->irq_work))
8b401f9e
YS
1117 return -EBUSY;
1118
1119 /* Add the current task, which is the target of sending signal,
1120 * to the irq_work. The current task may change when queued
1121 * irq works get executed.
1122 */
1123 work->task = current;
1124 work->sig = sig;
8482941f 1125 work->type = type;
8b401f9e
YS
1126 irq_work_queue(&work->irq_work);
1127 return 0;
1128 }
1129
8482941f
YS
1130 return group_send_sig_info(sig, SEND_SIG_PRIV, current, type);
1131}
1132
1133BPF_CALL_1(bpf_send_signal, u32, sig)
1134{
1135 return bpf_send_signal_common(sig, PIDTYPE_TGID);
8b401f9e
YS
1136}
1137
1138static const struct bpf_func_proto bpf_send_signal_proto = {
1139 .func = bpf_send_signal,
1140 .gpl_only = false,
1141 .ret_type = RET_INTEGER,
1142 .arg1_type = ARG_ANYTHING,
1143};
1144
8482941f
YS
1145BPF_CALL_1(bpf_send_signal_thread, u32, sig)
1146{
1147 return bpf_send_signal_common(sig, PIDTYPE_PID);
1148}
1149
1150static const struct bpf_func_proto bpf_send_signal_thread_proto = {
1151 .func = bpf_send_signal_thread,
1152 .gpl_only = false,
1153 .ret_type = RET_INTEGER,
1154 .arg1_type = ARG_ANYTHING,
1155};
1156
6e22ab9d
JO
1157BPF_CALL_3(bpf_d_path, struct path *, path, char *, buf, u32, sz)
1158{
1159 long len;
1160 char *p;
1161
1162 if (!sz)
1163 return 0;
1164
1165 p = d_path(path, buf, sz);
1166 if (IS_ERR(p)) {
1167 len = PTR_ERR(p);
1168 } else {
1169 len = buf + sz - p;
1170 memmove(buf, p, len);
1171 }
1172
1173 return len;
1174}
1175
1176BTF_SET_START(btf_allowlist_d_path)
a8a71796
JO
1177#ifdef CONFIG_SECURITY
1178BTF_ID(func, security_file_permission)
1179BTF_ID(func, security_inode_getattr)
1180BTF_ID(func, security_file_open)
1181#endif
1182#ifdef CONFIG_SECURITY_PATH
1183BTF_ID(func, security_path_truncate)
1184#endif
6e22ab9d
JO
1185BTF_ID(func, vfs_truncate)
1186BTF_ID(func, vfs_fallocate)
1187BTF_ID(func, dentry_open)
1188BTF_ID(func, vfs_getattr)
1189BTF_ID(func, filp_close)
1190BTF_SET_END(btf_allowlist_d_path)
1191
1192static bool bpf_d_path_allowed(const struct bpf_prog *prog)
1193{
6f100640
KS
1194 if (prog->type == BPF_PROG_TYPE_LSM)
1195 return bpf_lsm_is_sleepable_hook(prog->aux->attach_btf_id);
1196
1197 return btf_id_set_contains(&btf_allowlist_d_path,
1198 prog->aux->attach_btf_id);
6e22ab9d
JO
1199}
1200
9436ef6e 1201BTF_ID_LIST_SINGLE(bpf_d_path_btf_ids, struct, path)
6e22ab9d
JO
1202
1203static const struct bpf_func_proto bpf_d_path_proto = {
1204 .func = bpf_d_path,
1205 .gpl_only = false,
1206 .ret_type = RET_INTEGER,
1207 .arg1_type = ARG_PTR_TO_BTF_ID,
9436ef6e 1208 .arg1_btf_id = &bpf_d_path_btf_ids[0],
6e22ab9d
JO
1209 .arg2_type = ARG_PTR_TO_MEM,
1210 .arg3_type = ARG_CONST_SIZE_OR_ZERO,
6e22ab9d
JO
1211 .allowed = bpf_d_path_allowed,
1212};
1213
c4d0bfb4
AM
1214#define BTF_F_ALL (BTF_F_COMPACT | BTF_F_NONAME | \
1215 BTF_F_PTR_RAW | BTF_F_ZERO)
1216
1217static int bpf_btf_printf_prepare(struct btf_ptr *ptr, u32 btf_ptr_size,
1218 u64 flags, const struct btf **btf,
1219 s32 *btf_id)
1220{
1221 const struct btf_type *t;
1222
1223 if (unlikely(flags & ~(BTF_F_ALL)))
1224 return -EINVAL;
1225
1226 if (btf_ptr_size != sizeof(struct btf_ptr))
1227 return -EINVAL;
1228
1229 *btf = bpf_get_btf_vmlinux();
1230
1231 if (IS_ERR_OR_NULL(*btf))
abbaa433 1232 return IS_ERR(*btf) ? PTR_ERR(*btf) : -EINVAL;
c4d0bfb4
AM
1233
1234 if (ptr->type_id > 0)
1235 *btf_id = ptr->type_id;
1236 else
1237 return -EINVAL;
1238
1239 if (*btf_id > 0)
1240 t = btf_type_by_id(*btf, *btf_id);
1241 if (*btf_id <= 0 || !t)
1242 return -ENOENT;
1243
1244 return 0;
1245}
1246
1247BPF_CALL_5(bpf_snprintf_btf, char *, str, u32, str_size, struct btf_ptr *, ptr,
1248 u32, btf_ptr_size, u64, flags)
1249{
1250 const struct btf *btf;
1251 s32 btf_id;
1252 int ret;
1253
1254 ret = bpf_btf_printf_prepare(ptr, btf_ptr_size, flags, &btf, &btf_id);
1255 if (ret)
1256 return ret;
1257
1258 return btf_type_snprintf_show(btf, btf_id, ptr->ptr, str, str_size,
1259 flags);
1260}
1261
1262const struct bpf_func_proto bpf_snprintf_btf_proto = {
1263 .func = bpf_snprintf_btf,
1264 .gpl_only = false,
1265 .ret_type = RET_INTEGER,
1266 .arg1_type = ARG_PTR_TO_MEM,
1267 .arg2_type = ARG_CONST_SIZE,
1268 .arg3_type = ARG_PTR_TO_MEM,
1269 .arg4_type = ARG_CONST_SIZE,
1270 .arg5_type = ARG_ANYTHING,
1271};
1272
fc611f47
KS
1273const struct bpf_func_proto *
1274bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
2541517c
AS
1275{
1276 switch (func_id) {
1277 case BPF_FUNC_map_lookup_elem:
1278 return &bpf_map_lookup_elem_proto;
1279 case BPF_FUNC_map_update_elem:
1280 return &bpf_map_update_elem_proto;
1281 case BPF_FUNC_map_delete_elem:
1282 return &bpf_map_delete_elem_proto;
02a8c817
AC
1283 case BPF_FUNC_map_push_elem:
1284 return &bpf_map_push_elem_proto;
1285 case BPF_FUNC_map_pop_elem:
1286 return &bpf_map_pop_elem_proto;
1287 case BPF_FUNC_map_peek_elem:
1288 return &bpf_map_peek_elem_proto;
d9847d31
AS
1289 case BPF_FUNC_ktime_get_ns:
1290 return &bpf_ktime_get_ns_proto;
71d19214
MÅ»
1291 case BPF_FUNC_ktime_get_boot_ns:
1292 return &bpf_ktime_get_boot_ns_proto;
d0551261
DB
1293 case BPF_FUNC_ktime_get_coarse_ns:
1294 return &bpf_ktime_get_coarse_ns_proto;
04fd61ab
AS
1295 case BPF_FUNC_tail_call:
1296 return &bpf_tail_call_proto;
ffeedafb
AS
1297 case BPF_FUNC_get_current_pid_tgid:
1298 return &bpf_get_current_pid_tgid_proto;
606274c5
AS
1299 case BPF_FUNC_get_current_task:
1300 return &bpf_get_current_task_proto;
3ca1032a
KS
1301 case BPF_FUNC_get_current_task_btf:
1302 return &bpf_get_current_task_btf_proto;
ffeedafb
AS
1303 case BPF_FUNC_get_current_uid_gid:
1304 return &bpf_get_current_uid_gid_proto;
1305 case BPF_FUNC_get_current_comm:
1306 return &bpf_get_current_comm_proto;
9c959c86 1307 case BPF_FUNC_trace_printk:
0756ea3e 1308 return bpf_get_trace_printk_proto();
ab1973d3
AS
1309 case BPF_FUNC_get_smp_processor_id:
1310 return &bpf_get_smp_processor_id_proto;
2d0e30c3
DB
1311 case BPF_FUNC_get_numa_node_id:
1312 return &bpf_get_numa_node_id_proto;
35578d79
KX
1313 case BPF_FUNC_perf_event_read:
1314 return &bpf_perf_event_read_proto;
96ae5227
SD
1315 case BPF_FUNC_probe_write_user:
1316 return bpf_get_probe_write_proto();
60d20f91
SD
1317 case BPF_FUNC_current_task_under_cgroup:
1318 return &bpf_current_task_under_cgroup_proto;
8937bd80
AS
1319 case BPF_FUNC_get_prandom_u32:
1320 return &bpf_get_prandom_u32_proto;
6ae08ae3
DB
1321 case BPF_FUNC_probe_read_user:
1322 return &bpf_probe_read_user_proto;
1323 case BPF_FUNC_probe_read_kernel:
1324 return &bpf_probe_read_kernel_proto;
6ae08ae3
DB
1325 case BPF_FUNC_probe_read_user_str:
1326 return &bpf_probe_read_user_str_proto;
1327 case BPF_FUNC_probe_read_kernel_str:
1328 return &bpf_probe_read_kernel_str_proto;
0ebeea8c
DB
1329#ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
1330 case BPF_FUNC_probe_read:
1331 return &bpf_probe_read_compat_proto;
a5e8c070 1332 case BPF_FUNC_probe_read_str:
6ae08ae3 1333 return &bpf_probe_read_compat_str_proto;
0ebeea8c 1334#endif
34ea38ca 1335#ifdef CONFIG_CGROUPS
bf6fa2c8
YS
1336 case BPF_FUNC_get_current_cgroup_id:
1337 return &bpf_get_current_cgroup_id_proto;
34ea38ca 1338#endif
8b401f9e
YS
1339 case BPF_FUNC_send_signal:
1340 return &bpf_send_signal_proto;
8482941f
YS
1341 case BPF_FUNC_send_signal_thread:
1342 return &bpf_send_signal_thread_proto;
b80b033b
SL
1343 case BPF_FUNC_perf_event_read_value:
1344 return &bpf_perf_event_read_value_proto;
b4490c5c
CN
1345 case BPF_FUNC_get_ns_current_pid_tgid:
1346 return &bpf_get_ns_current_pid_tgid_proto;
457f4436
AN
1347 case BPF_FUNC_ringbuf_output:
1348 return &bpf_ringbuf_output_proto;
1349 case BPF_FUNC_ringbuf_reserve:
1350 return &bpf_ringbuf_reserve_proto;
1351 case BPF_FUNC_ringbuf_submit:
1352 return &bpf_ringbuf_submit_proto;
1353 case BPF_FUNC_ringbuf_discard:
1354 return &bpf_ringbuf_discard_proto;
1355 case BPF_FUNC_ringbuf_query:
1356 return &bpf_ringbuf_query_proto;
72e2b2b6
YS
1357 case BPF_FUNC_jiffies64:
1358 return &bpf_jiffies64_proto;
fa28dcb8
SL
1359 case BPF_FUNC_get_task_stack:
1360 return &bpf_get_task_stack_proto;
07be4c4a
AS
1361 case BPF_FUNC_copy_from_user:
1362 return prog->aux->sleepable ? &bpf_copy_from_user_proto : NULL;
c4d0bfb4
AM
1363 case BPF_FUNC_snprintf_btf:
1364 return &bpf_snprintf_btf_proto;
b7906b70 1365 case BPF_FUNC_per_cpu_ptr:
eaa6bcb7 1366 return &bpf_per_cpu_ptr_proto;
b7906b70 1367 case BPF_FUNC_this_cpu_ptr:
63d9b80d 1368 return &bpf_this_cpu_ptr_proto;
9fd82b61
AS
1369 default:
1370 return NULL;
1371 }
1372}
1373
5e43f899
AI
1374static const struct bpf_func_proto *
1375kprobe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
9fd82b61
AS
1376{
1377 switch (func_id) {
a43eec30
AS
1378 case BPF_FUNC_perf_event_output:
1379 return &bpf_perf_event_output_proto;
d5a3b1f6
AS
1380 case BPF_FUNC_get_stackid:
1381 return &bpf_get_stackid_proto;
c195651e
YS
1382 case BPF_FUNC_get_stack:
1383 return &bpf_get_stack_proto;
9802d865
JB
1384#ifdef CONFIG_BPF_KPROBE_OVERRIDE
1385 case BPF_FUNC_override_return:
1386 return &bpf_override_return_proto;
1387#endif
2541517c 1388 default:
fc611f47 1389 return bpf_tracing_func_proto(func_id, prog);
2541517c
AS
1390 }
1391}
1392
1393/* bpf+kprobe programs can access fields of 'struct pt_regs' */
19de99f7 1394static bool kprobe_prog_is_valid_access(int off, int size, enum bpf_access_type type,
5e43f899 1395 const struct bpf_prog *prog,
23994631 1396 struct bpf_insn_access_aux *info)
2541517c 1397{
2541517c
AS
1398 if (off < 0 || off >= sizeof(struct pt_regs))
1399 return false;
2541517c
AS
1400 if (type != BPF_READ)
1401 return false;
2541517c
AS
1402 if (off % size != 0)
1403 return false;
2d071c64
DB
1404 /*
1405 * Assertion for 32 bit to make sure last 8 byte access
1406 * (BPF_DW) to the last 4 byte member is disallowed.
1407 */
1408 if (off + size > sizeof(struct pt_regs))
1409 return false;
1410
2541517c
AS
1411 return true;
1412}
1413
7de16e3a 1414const struct bpf_verifier_ops kprobe_verifier_ops = {
2541517c
AS
1415 .get_func_proto = kprobe_prog_func_proto,
1416 .is_valid_access = kprobe_prog_is_valid_access,
1417};
1418
7de16e3a
JK
1419const struct bpf_prog_ops kprobe_prog_ops = {
1420};
1421
f3694e00
DB
1422BPF_CALL_5(bpf_perf_event_output_tp, void *, tp_buff, struct bpf_map *, map,
1423 u64, flags, void *, data, u64, size)
9940d67c 1424{
f3694e00
DB
1425 struct pt_regs *regs = *(struct pt_regs **)tp_buff;
1426
9940d67c
AS
1427 /*
1428 * r1 points to perf tracepoint buffer where first 8 bytes are hidden
1429 * from bpf program and contain a pointer to 'struct pt_regs'. Fetch it
f3694e00 1430 * from there and call the same bpf_perf_event_output() helper inline.
9940d67c 1431 */
f3694e00 1432 return ____bpf_perf_event_output(regs, map, flags, data, size);
9940d67c
AS
1433}
1434
1435static const struct bpf_func_proto bpf_perf_event_output_proto_tp = {
1436 .func = bpf_perf_event_output_tp,
1437 .gpl_only = true,
1438 .ret_type = RET_INTEGER,
1439 .arg1_type = ARG_PTR_TO_CTX,
1440 .arg2_type = ARG_CONST_MAP_PTR,
1441 .arg3_type = ARG_ANYTHING,
39f19ebb 1442 .arg4_type = ARG_PTR_TO_MEM,
a60dd35d 1443 .arg5_type = ARG_CONST_SIZE_OR_ZERO,
9940d67c
AS
1444};
1445
f3694e00
DB
1446BPF_CALL_3(bpf_get_stackid_tp, void *, tp_buff, struct bpf_map *, map,
1447 u64, flags)
9940d67c 1448{
f3694e00 1449 struct pt_regs *regs = *(struct pt_regs **)tp_buff;
9940d67c 1450
f3694e00
DB
1451 /*
1452 * Same comment as in bpf_perf_event_output_tp(), only that this time
1453 * the other helper's function body cannot be inlined due to being
1454 * external, thus we need to call raw helper function.
1455 */
1456 return bpf_get_stackid((unsigned long) regs, (unsigned long) map,
1457 flags, 0, 0);
9940d67c
AS
1458}
1459
1460static const struct bpf_func_proto bpf_get_stackid_proto_tp = {
1461 .func = bpf_get_stackid_tp,
1462 .gpl_only = true,
1463 .ret_type = RET_INTEGER,
1464 .arg1_type = ARG_PTR_TO_CTX,
1465 .arg2_type = ARG_CONST_MAP_PTR,
1466 .arg3_type = ARG_ANYTHING,
1467};
1468
c195651e
YS
1469BPF_CALL_4(bpf_get_stack_tp, void *, tp_buff, void *, buf, u32, size,
1470 u64, flags)
1471{
1472 struct pt_regs *regs = *(struct pt_regs **)tp_buff;
1473
1474 return bpf_get_stack((unsigned long) regs, (unsigned long) buf,
1475 (unsigned long) size, flags, 0);
1476}
1477
1478static const struct bpf_func_proto bpf_get_stack_proto_tp = {
1479 .func = bpf_get_stack_tp,
1480 .gpl_only = true,
1481 .ret_type = RET_INTEGER,
1482 .arg1_type = ARG_PTR_TO_CTX,
1483 .arg2_type = ARG_PTR_TO_UNINIT_MEM,
1484 .arg3_type = ARG_CONST_SIZE_OR_ZERO,
1485 .arg4_type = ARG_ANYTHING,
1486};
1487
5e43f899
AI
1488static const struct bpf_func_proto *
1489tp_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
f005afed
YS
1490{
1491 switch (func_id) {
1492 case BPF_FUNC_perf_event_output:
1493 return &bpf_perf_event_output_proto_tp;
1494 case BPF_FUNC_get_stackid:
1495 return &bpf_get_stackid_proto_tp;
c195651e
YS
1496 case BPF_FUNC_get_stack:
1497 return &bpf_get_stack_proto_tp;
f005afed 1498 default:
fc611f47 1499 return bpf_tracing_func_proto(func_id, prog);
f005afed
YS
1500 }
1501}
1502
1503static bool tp_prog_is_valid_access(int off, int size, enum bpf_access_type type,
5e43f899 1504 const struct bpf_prog *prog,
f005afed
YS
1505 struct bpf_insn_access_aux *info)
1506{
1507 if (off < sizeof(void *) || off >= PERF_MAX_TRACE_SIZE)
1508 return false;
1509 if (type != BPF_READ)
1510 return false;
1511 if (off % size != 0)
1512 return false;
1513
1514 BUILD_BUG_ON(PERF_MAX_TRACE_SIZE % sizeof(__u64));
1515 return true;
1516}
1517
1518const struct bpf_verifier_ops tracepoint_verifier_ops = {
1519 .get_func_proto = tp_prog_func_proto,
1520 .is_valid_access = tp_prog_is_valid_access,
1521};
1522
1523const struct bpf_prog_ops tracepoint_prog_ops = {
1524};
1525
1526BPF_CALL_3(bpf_perf_prog_read_value, struct bpf_perf_event_data_kern *, ctx,
4bebdc7a
YS
1527 struct bpf_perf_event_value *, buf, u32, size)
1528{
1529 int err = -EINVAL;
1530
1531 if (unlikely(size != sizeof(struct bpf_perf_event_value)))
1532 goto clear;
1533 err = perf_event_read_local(ctx->event, &buf->counter, &buf->enabled,
1534 &buf->running);
1535 if (unlikely(err))
1536 goto clear;
1537 return 0;
1538clear:
1539 memset(buf, 0, size);
1540 return err;
1541}
1542
f005afed
YS
1543static const struct bpf_func_proto bpf_perf_prog_read_value_proto = {
1544 .func = bpf_perf_prog_read_value,
4bebdc7a
YS
1545 .gpl_only = true,
1546 .ret_type = RET_INTEGER,
1547 .arg1_type = ARG_PTR_TO_CTX,
1548 .arg2_type = ARG_PTR_TO_UNINIT_MEM,
1549 .arg3_type = ARG_CONST_SIZE,
1550};
1551
fff7b643
DX
1552BPF_CALL_4(bpf_read_branch_records, struct bpf_perf_event_data_kern *, ctx,
1553 void *, buf, u32, size, u64, flags)
1554{
1555#ifndef CONFIG_X86
1556 return -ENOENT;
1557#else
1558 static const u32 br_entry_size = sizeof(struct perf_branch_entry);
1559 struct perf_branch_stack *br_stack = ctx->data->br_stack;
1560 u32 to_copy;
1561
1562 if (unlikely(flags & ~BPF_F_GET_BRANCH_RECORDS_SIZE))
1563 return -EINVAL;
1564
1565 if (unlikely(!br_stack))
1566 return -EINVAL;
1567
1568 if (flags & BPF_F_GET_BRANCH_RECORDS_SIZE)
1569 return br_stack->nr * br_entry_size;
1570
1571 if (!buf || (size % br_entry_size != 0))
1572 return -EINVAL;
1573
1574 to_copy = min_t(u32, br_stack->nr * br_entry_size, size);
1575 memcpy(buf, br_stack->entries, to_copy);
1576
1577 return to_copy;
1578#endif
1579}
1580
1581static const struct bpf_func_proto bpf_read_branch_records_proto = {
1582 .func = bpf_read_branch_records,
1583 .gpl_only = true,
1584 .ret_type = RET_INTEGER,
1585 .arg1_type = ARG_PTR_TO_CTX,
1586 .arg2_type = ARG_PTR_TO_MEM_OR_NULL,
1587 .arg3_type = ARG_CONST_SIZE_OR_ZERO,
1588 .arg4_type = ARG_ANYTHING,
1589};
1590
5e43f899
AI
1591static const struct bpf_func_proto *
1592pe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
9fd82b61
AS
1593{
1594 switch (func_id) {
1595 case BPF_FUNC_perf_event_output:
9940d67c 1596 return &bpf_perf_event_output_proto_tp;
9fd82b61 1597 case BPF_FUNC_get_stackid:
7b04d6d6 1598 return &bpf_get_stackid_proto_pe;
c195651e 1599 case BPF_FUNC_get_stack:
7b04d6d6 1600 return &bpf_get_stack_proto_pe;
4bebdc7a 1601 case BPF_FUNC_perf_prog_read_value:
f005afed 1602 return &bpf_perf_prog_read_value_proto;
fff7b643
DX
1603 case BPF_FUNC_read_branch_records:
1604 return &bpf_read_branch_records_proto;
9fd82b61 1605 default:
fc611f47 1606 return bpf_tracing_func_proto(func_id, prog);
9fd82b61
AS
1607 }
1608}
1609
c4f6699d
AS
1610/*
1611 * bpf_raw_tp_regs are separate from bpf_pt_regs used from skb/xdp
1612 * to avoid potential recursive reuse issue when/if tracepoints are added
9594dc3c
MM
1613 * inside bpf_*_event_output, bpf_get_stackid and/or bpf_get_stack.
1614 *
1615 * Since raw tracepoints run despite bpf_prog_active, support concurrent usage
1616 * in normal, irq, and nmi context.
c4f6699d 1617 */
9594dc3c
MM
1618struct bpf_raw_tp_regs {
1619 struct pt_regs regs[3];
1620};
1621static DEFINE_PER_CPU(struct bpf_raw_tp_regs, bpf_raw_tp_regs);
1622static DEFINE_PER_CPU(int, bpf_raw_tp_nest_level);
1623static struct pt_regs *get_bpf_raw_tp_regs(void)
1624{
1625 struct bpf_raw_tp_regs *tp_regs = this_cpu_ptr(&bpf_raw_tp_regs);
1626 int nest_level = this_cpu_inc_return(bpf_raw_tp_nest_level);
1627
1628 if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(tp_regs->regs))) {
1629 this_cpu_dec(bpf_raw_tp_nest_level);
1630 return ERR_PTR(-EBUSY);
1631 }
1632
1633 return &tp_regs->regs[nest_level - 1];
1634}
1635
1636static void put_bpf_raw_tp_regs(void)
1637{
1638 this_cpu_dec(bpf_raw_tp_nest_level);
1639}
1640
c4f6699d
AS
1641BPF_CALL_5(bpf_perf_event_output_raw_tp, struct bpf_raw_tracepoint_args *, args,
1642 struct bpf_map *, map, u64, flags, void *, data, u64, size)
1643{
9594dc3c
MM
1644 struct pt_regs *regs = get_bpf_raw_tp_regs();
1645 int ret;
1646
1647 if (IS_ERR(regs))
1648 return PTR_ERR(regs);
c4f6699d
AS
1649
1650 perf_fetch_caller_regs(regs);
9594dc3c
MM
1651 ret = ____bpf_perf_event_output(regs, map, flags, data, size);
1652
1653 put_bpf_raw_tp_regs();
1654 return ret;
c4f6699d
AS
1655}
1656
1657static const struct bpf_func_proto bpf_perf_event_output_proto_raw_tp = {
1658 .func = bpf_perf_event_output_raw_tp,
1659 .gpl_only = true,
1660 .ret_type = RET_INTEGER,
1661 .arg1_type = ARG_PTR_TO_CTX,
1662 .arg2_type = ARG_CONST_MAP_PTR,
1663 .arg3_type = ARG_ANYTHING,
1664 .arg4_type = ARG_PTR_TO_MEM,
1665 .arg5_type = ARG_CONST_SIZE_OR_ZERO,
1666};
1667
a7658e1a 1668extern const struct bpf_func_proto bpf_skb_output_proto;
d831ee84 1669extern const struct bpf_func_proto bpf_xdp_output_proto;
a7658e1a 1670
c4f6699d
AS
1671BPF_CALL_3(bpf_get_stackid_raw_tp, struct bpf_raw_tracepoint_args *, args,
1672 struct bpf_map *, map, u64, flags)
1673{
9594dc3c
MM
1674 struct pt_regs *regs = get_bpf_raw_tp_regs();
1675 int ret;
1676
1677 if (IS_ERR(regs))
1678 return PTR_ERR(regs);
c4f6699d
AS
1679
1680 perf_fetch_caller_regs(regs);
1681 /* similar to bpf_perf_event_output_tp, but pt_regs fetched differently */
9594dc3c
MM
1682 ret = bpf_get_stackid((unsigned long) regs, (unsigned long) map,
1683 flags, 0, 0);
1684 put_bpf_raw_tp_regs();
1685 return ret;
c4f6699d
AS
1686}
1687
1688static const struct bpf_func_proto bpf_get_stackid_proto_raw_tp = {
1689 .func = bpf_get_stackid_raw_tp,
1690 .gpl_only = true,
1691 .ret_type = RET_INTEGER,
1692 .arg1_type = ARG_PTR_TO_CTX,
1693 .arg2_type = ARG_CONST_MAP_PTR,
1694 .arg3_type = ARG_ANYTHING,
1695};
1696
c195651e
YS
1697BPF_CALL_4(bpf_get_stack_raw_tp, struct bpf_raw_tracepoint_args *, args,
1698 void *, buf, u32, size, u64, flags)
1699{
9594dc3c
MM
1700 struct pt_regs *regs = get_bpf_raw_tp_regs();
1701 int ret;
1702
1703 if (IS_ERR(regs))
1704 return PTR_ERR(regs);
c195651e
YS
1705
1706 perf_fetch_caller_regs(regs);
9594dc3c
MM
1707 ret = bpf_get_stack((unsigned long) regs, (unsigned long) buf,
1708 (unsigned long) size, flags, 0);
1709 put_bpf_raw_tp_regs();
1710 return ret;
c195651e
YS
1711}
1712
1713static const struct bpf_func_proto bpf_get_stack_proto_raw_tp = {
1714 .func = bpf_get_stack_raw_tp,
1715 .gpl_only = true,
1716 .ret_type = RET_INTEGER,
1717 .arg1_type = ARG_PTR_TO_CTX,
1718 .arg2_type = ARG_PTR_TO_MEM,
1719 .arg3_type = ARG_CONST_SIZE_OR_ZERO,
1720 .arg4_type = ARG_ANYTHING,
1721};
1722
5e43f899
AI
1723static const struct bpf_func_proto *
1724raw_tp_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
c4f6699d
AS
1725{
1726 switch (func_id) {
1727 case BPF_FUNC_perf_event_output:
1728 return &bpf_perf_event_output_proto_raw_tp;
1729 case BPF_FUNC_get_stackid:
1730 return &bpf_get_stackid_proto_raw_tp;
c195651e
YS
1731 case BPF_FUNC_get_stack:
1732 return &bpf_get_stack_proto_raw_tp;
c4f6699d 1733 default:
fc611f47 1734 return bpf_tracing_func_proto(func_id, prog);
c4f6699d
AS
1735 }
1736}
1737
958a3f2d 1738const struct bpf_func_proto *
f1b9509c
AS
1739tracing_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1740{
1741 switch (func_id) {
1742#ifdef CONFIG_NET
1743 case BPF_FUNC_skb_output:
1744 return &bpf_skb_output_proto;
d831ee84
EC
1745 case BPF_FUNC_xdp_output:
1746 return &bpf_xdp_output_proto;
af7ec138
YS
1747 case BPF_FUNC_skc_to_tcp6_sock:
1748 return &bpf_skc_to_tcp6_sock_proto;
478cfbdf
YS
1749 case BPF_FUNC_skc_to_tcp_sock:
1750 return &bpf_skc_to_tcp_sock_proto;
1751 case BPF_FUNC_skc_to_tcp_timewait_sock:
1752 return &bpf_skc_to_tcp_timewait_sock_proto;
1753 case BPF_FUNC_skc_to_tcp_request_sock:
1754 return &bpf_skc_to_tcp_request_sock_proto;
0d4fad3e
YS
1755 case BPF_FUNC_skc_to_udp6_sock:
1756 return &bpf_skc_to_udp6_sock_proto;
8e4597c6
MKL
1757 case BPF_FUNC_sk_storage_get:
1758 return &bpf_sk_storage_get_tracing_proto;
1759 case BPF_FUNC_sk_storage_delete:
1760 return &bpf_sk_storage_delete_tracing_proto;
b60da495
FR
1761 case BPF_FUNC_sock_from_file:
1762 return &bpf_sock_from_file_proto;
f1b9509c 1763#endif
492e639f
YS
1764 case BPF_FUNC_seq_printf:
1765 return prog->expected_attach_type == BPF_TRACE_ITER ?
1766 &bpf_seq_printf_proto :
1767 NULL;
1768 case BPF_FUNC_seq_write:
1769 return prog->expected_attach_type == BPF_TRACE_ITER ?
1770 &bpf_seq_write_proto :
1771 NULL;
eb411377
AM
1772 case BPF_FUNC_seq_printf_btf:
1773 return prog->expected_attach_type == BPF_TRACE_ITER ?
1774 &bpf_seq_printf_btf_proto :
1775 NULL;
6e22ab9d
JO
1776 case BPF_FUNC_d_path:
1777 return &bpf_d_path_proto;
f1b9509c
AS
1778 default:
1779 return raw_tp_prog_func_proto(func_id, prog);
1780 }
1781}
1782
c4f6699d
AS
1783static bool raw_tp_prog_is_valid_access(int off, int size,
1784 enum bpf_access_type type,
5e43f899 1785 const struct bpf_prog *prog,
c4f6699d
AS
1786 struct bpf_insn_access_aux *info)
1787{
f1b9509c
AS
1788 if (off < 0 || off >= sizeof(__u64) * MAX_BPF_FUNC_ARGS)
1789 return false;
1790 if (type != BPF_READ)
1791 return false;
1792 if (off % size != 0)
1793 return false;
1794 return true;
1795}
1796
1797static bool tracing_prog_is_valid_access(int off, int size,
1798 enum bpf_access_type type,
1799 const struct bpf_prog *prog,
1800 struct bpf_insn_access_aux *info)
1801{
1802 if (off < 0 || off >= sizeof(__u64) * MAX_BPF_FUNC_ARGS)
c4f6699d
AS
1803 return false;
1804 if (type != BPF_READ)
1805 return false;
1806 if (off % size != 0)
1807 return false;
9e15db66 1808 return btf_ctx_access(off, size, type, prog, info);
c4f6699d
AS
1809}
1810
3e7c67d9
KS
1811int __weak bpf_prog_test_run_tracing(struct bpf_prog *prog,
1812 const union bpf_attr *kattr,
1813 union bpf_attr __user *uattr)
1814{
1815 return -ENOTSUPP;
1816}
1817
c4f6699d
AS
1818const struct bpf_verifier_ops raw_tracepoint_verifier_ops = {
1819 .get_func_proto = raw_tp_prog_func_proto,
1820 .is_valid_access = raw_tp_prog_is_valid_access,
1821};
1822
1823const struct bpf_prog_ops raw_tracepoint_prog_ops = {
ebfb4d40 1824#ifdef CONFIG_NET
1b4d60ec 1825 .test_run = bpf_prog_test_run_raw_tp,
ebfb4d40 1826#endif
c4f6699d
AS
1827};
1828
f1b9509c
AS
1829const struct bpf_verifier_ops tracing_verifier_ops = {
1830 .get_func_proto = tracing_prog_func_proto,
1831 .is_valid_access = tracing_prog_is_valid_access,
1832};
1833
1834const struct bpf_prog_ops tracing_prog_ops = {
da00d2f1 1835 .test_run = bpf_prog_test_run_tracing,
f1b9509c
AS
1836};
1837
9df1c28b
MM
1838static bool raw_tp_writable_prog_is_valid_access(int off, int size,
1839 enum bpf_access_type type,
1840 const struct bpf_prog *prog,
1841 struct bpf_insn_access_aux *info)
1842{
1843 if (off == 0) {
1844 if (size != sizeof(u64) || type != BPF_READ)
1845 return false;
1846 info->reg_type = PTR_TO_TP_BUFFER;
1847 }
1848 return raw_tp_prog_is_valid_access(off, size, type, prog, info);
1849}
1850
1851const struct bpf_verifier_ops raw_tracepoint_writable_verifier_ops = {
1852 .get_func_proto = raw_tp_prog_func_proto,
1853 .is_valid_access = raw_tp_writable_prog_is_valid_access,
1854};
1855
1856const struct bpf_prog_ops raw_tracepoint_writable_prog_ops = {
1857};
1858
0515e599 1859static bool pe_prog_is_valid_access(int off, int size, enum bpf_access_type type,
5e43f899 1860 const struct bpf_prog *prog,
23994631 1861 struct bpf_insn_access_aux *info)
0515e599 1862{
95da0cdb 1863 const int size_u64 = sizeof(u64);
31fd8581 1864
0515e599
AS
1865 if (off < 0 || off >= sizeof(struct bpf_perf_event_data))
1866 return false;
1867 if (type != BPF_READ)
1868 return false;
bc23105c
DB
1869 if (off % size != 0) {
1870 if (sizeof(unsigned long) != 4)
1871 return false;
1872 if (size != 8)
1873 return false;
1874 if (off % size != 4)
1875 return false;
1876 }
31fd8581 1877
f96da094
DB
1878 switch (off) {
1879 case bpf_ctx_range(struct bpf_perf_event_data, sample_period):
95da0cdb
TQ
1880 bpf_ctx_record_field_size(info, size_u64);
1881 if (!bpf_ctx_narrow_access_ok(off, size, size_u64))
1882 return false;
1883 break;
1884 case bpf_ctx_range(struct bpf_perf_event_data, addr):
1885 bpf_ctx_record_field_size(info, size_u64);
1886 if (!bpf_ctx_narrow_access_ok(off, size, size_u64))
23994631 1887 return false;
f96da094
DB
1888 break;
1889 default:
0515e599
AS
1890 if (size != sizeof(long))
1891 return false;
1892 }
f96da094 1893
0515e599
AS
1894 return true;
1895}
1896
6b8cc1d1
DB
1897static u32 pe_prog_convert_ctx_access(enum bpf_access_type type,
1898 const struct bpf_insn *si,
0515e599 1899 struct bpf_insn *insn_buf,
f96da094 1900 struct bpf_prog *prog, u32 *target_size)
0515e599
AS
1901{
1902 struct bpf_insn *insn = insn_buf;
1903
6b8cc1d1 1904 switch (si->off) {
0515e599 1905 case offsetof(struct bpf_perf_event_data, sample_period):
f035a515 1906 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern,
6b8cc1d1 1907 data), si->dst_reg, si->src_reg,
0515e599 1908 offsetof(struct bpf_perf_event_data_kern, data));
6b8cc1d1 1909 *insn++ = BPF_LDX_MEM(BPF_DW, si->dst_reg, si->dst_reg,
f96da094
DB
1910 bpf_target_off(struct perf_sample_data, period, 8,
1911 target_size));
0515e599 1912 break;
95da0cdb
TQ
1913 case offsetof(struct bpf_perf_event_data, addr):
1914 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern,
1915 data), si->dst_reg, si->src_reg,
1916 offsetof(struct bpf_perf_event_data_kern, data));
1917 *insn++ = BPF_LDX_MEM(BPF_DW, si->dst_reg, si->dst_reg,
1918 bpf_target_off(struct perf_sample_data, addr, 8,
1919 target_size));
1920 break;
0515e599 1921 default:
f035a515 1922 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern,
6b8cc1d1 1923 regs), si->dst_reg, si->src_reg,
0515e599 1924 offsetof(struct bpf_perf_event_data_kern, regs));
6b8cc1d1
DB
1925 *insn++ = BPF_LDX_MEM(BPF_SIZEOF(long), si->dst_reg, si->dst_reg,
1926 si->off);
0515e599
AS
1927 break;
1928 }
1929
1930 return insn - insn_buf;
1931}
1932
7de16e3a 1933const struct bpf_verifier_ops perf_event_verifier_ops = {
f005afed 1934 .get_func_proto = pe_prog_func_proto,
0515e599
AS
1935 .is_valid_access = pe_prog_is_valid_access,
1936 .convert_ctx_access = pe_prog_convert_ctx_access,
1937};
7de16e3a
JK
1938
1939const struct bpf_prog_ops perf_event_prog_ops = {
1940};
e87c6bc3
YS
1941
1942static DEFINE_MUTEX(bpf_event_mutex);
1943
c8c088ba
YS
1944#define BPF_TRACE_MAX_PROGS 64
1945
e87c6bc3
YS
1946int perf_event_attach_bpf_prog(struct perf_event *event,
1947 struct bpf_prog *prog)
1948{
e672db03 1949 struct bpf_prog_array *old_array;
e87c6bc3
YS
1950 struct bpf_prog_array *new_array;
1951 int ret = -EEXIST;
1952
9802d865 1953 /*
b4da3340
MH
1954 * Kprobe override only works if they are on the function entry,
1955 * and only if they are on the opt-in list.
9802d865
JB
1956 */
1957 if (prog->kprobe_override &&
b4da3340 1958 (!trace_kprobe_on_func_entry(event->tp_event) ||
9802d865
JB
1959 !trace_kprobe_error_injectable(event->tp_event)))
1960 return -EINVAL;
1961
e87c6bc3
YS
1962 mutex_lock(&bpf_event_mutex);
1963
1964 if (event->prog)
07c41a29 1965 goto unlock;
e87c6bc3 1966
e672db03 1967 old_array = bpf_event_rcu_dereference(event->tp_event->prog_array);
c8c088ba
YS
1968 if (old_array &&
1969 bpf_prog_array_length(old_array) >= BPF_TRACE_MAX_PROGS) {
1970 ret = -E2BIG;
1971 goto unlock;
1972 }
1973
e87c6bc3
YS
1974 ret = bpf_prog_array_copy(old_array, NULL, prog, &new_array);
1975 if (ret < 0)
07c41a29 1976 goto unlock;
e87c6bc3
YS
1977
1978 /* set the new array to event->tp_event and set event->prog */
1979 event->prog = prog;
1980 rcu_assign_pointer(event->tp_event->prog_array, new_array);
1981 bpf_prog_array_free(old_array);
1982
07c41a29 1983unlock:
e87c6bc3
YS
1984 mutex_unlock(&bpf_event_mutex);
1985 return ret;
1986}
1987
1988void perf_event_detach_bpf_prog(struct perf_event *event)
1989{
e672db03 1990 struct bpf_prog_array *old_array;
e87c6bc3
YS
1991 struct bpf_prog_array *new_array;
1992 int ret;
1993
1994 mutex_lock(&bpf_event_mutex);
1995
1996 if (!event->prog)
07c41a29 1997 goto unlock;
e87c6bc3 1998
e672db03 1999 old_array = bpf_event_rcu_dereference(event->tp_event->prog_array);
e87c6bc3 2000 ret = bpf_prog_array_copy(old_array, event->prog, NULL, &new_array);
170a7e3e
SY
2001 if (ret == -ENOENT)
2002 goto unlock;
e87c6bc3
YS
2003 if (ret < 0) {
2004 bpf_prog_array_delete_safe(old_array, event->prog);
2005 } else {
2006 rcu_assign_pointer(event->tp_event->prog_array, new_array);
2007 bpf_prog_array_free(old_array);
2008 }
2009
2010 bpf_prog_put(event->prog);
2011 event->prog = NULL;
2012
07c41a29 2013unlock:
e87c6bc3
YS
2014 mutex_unlock(&bpf_event_mutex);
2015}
f371b304 2016
f4e2298e 2017int perf_event_query_prog_array(struct perf_event *event, void __user *info)
f371b304
YS
2018{
2019 struct perf_event_query_bpf __user *uquery = info;
2020 struct perf_event_query_bpf query = {};
e672db03 2021 struct bpf_prog_array *progs;
3a38bb98 2022 u32 *ids, prog_cnt, ids_len;
f371b304
YS
2023 int ret;
2024
031258da 2025 if (!perfmon_capable())
f371b304
YS
2026 return -EPERM;
2027 if (event->attr.type != PERF_TYPE_TRACEPOINT)
2028 return -EINVAL;
2029 if (copy_from_user(&query, uquery, sizeof(query)))
2030 return -EFAULT;
3a38bb98
YS
2031
2032 ids_len = query.ids_len;
2033 if (ids_len > BPF_TRACE_MAX_PROGS)
9c481b90 2034 return -E2BIG;
3a38bb98
YS
2035 ids = kcalloc(ids_len, sizeof(u32), GFP_USER | __GFP_NOWARN);
2036 if (!ids)
2037 return -ENOMEM;
2038 /*
2039 * The above kcalloc returns ZERO_SIZE_PTR when ids_len = 0, which
2040 * is required when user only wants to check for uquery->prog_cnt.
2041 * There is no need to check for it since the case is handled
2042 * gracefully in bpf_prog_array_copy_info.
2043 */
f371b304
YS
2044
2045 mutex_lock(&bpf_event_mutex);
e672db03
SF
2046 progs = bpf_event_rcu_dereference(event->tp_event->prog_array);
2047 ret = bpf_prog_array_copy_info(progs, ids, ids_len, &prog_cnt);
f371b304
YS
2048 mutex_unlock(&bpf_event_mutex);
2049
3a38bb98
YS
2050 if (copy_to_user(&uquery->prog_cnt, &prog_cnt, sizeof(prog_cnt)) ||
2051 copy_to_user(uquery->ids, ids, ids_len * sizeof(u32)))
2052 ret = -EFAULT;
2053
2054 kfree(ids);
f371b304
YS
2055 return ret;
2056}
c4f6699d
AS
2057
2058extern struct bpf_raw_event_map __start__bpf_raw_tp[];
2059extern struct bpf_raw_event_map __stop__bpf_raw_tp[];
2060
a38d1107 2061struct bpf_raw_event_map *bpf_get_raw_tracepoint(const char *name)
c4f6699d
AS
2062{
2063 struct bpf_raw_event_map *btp = __start__bpf_raw_tp;
2064
2065 for (; btp < __stop__bpf_raw_tp; btp++) {
2066 if (!strcmp(btp->tp->name, name))
2067 return btp;
2068 }
a38d1107
MM
2069
2070 return bpf_get_raw_tracepoint_module(name);
2071}
2072
2073void bpf_put_raw_tracepoint(struct bpf_raw_event_map *btp)
2074{
12cc126d 2075 struct module *mod;
a38d1107 2076
12cc126d
AN
2077 preempt_disable();
2078 mod = __module_address((unsigned long)btp);
2079 module_put(mod);
2080 preempt_enable();
c4f6699d
AS
2081}
2082
2083static __always_inline
2084void __bpf_trace_run(struct bpf_prog *prog, u64 *args)
2085{
f03efe49 2086 cant_sleep();
c4f6699d 2087 rcu_read_lock();
c4f6699d 2088 (void) BPF_PROG_RUN(prog, args);
c4f6699d
AS
2089 rcu_read_unlock();
2090}
2091
2092#define UNPACK(...) __VA_ARGS__
2093#define REPEAT_1(FN, DL, X, ...) FN(X)
2094#define REPEAT_2(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_1(FN, DL, __VA_ARGS__)
2095#define REPEAT_3(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_2(FN, DL, __VA_ARGS__)
2096#define REPEAT_4(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_3(FN, DL, __VA_ARGS__)
2097#define REPEAT_5(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_4(FN, DL, __VA_ARGS__)
2098#define REPEAT_6(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_5(FN, DL, __VA_ARGS__)
2099#define REPEAT_7(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_6(FN, DL, __VA_ARGS__)
2100#define REPEAT_8(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_7(FN, DL, __VA_ARGS__)
2101#define REPEAT_9(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_8(FN, DL, __VA_ARGS__)
2102#define REPEAT_10(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_9(FN, DL, __VA_ARGS__)
2103#define REPEAT_11(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_10(FN, DL, __VA_ARGS__)
2104#define REPEAT_12(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_11(FN, DL, __VA_ARGS__)
2105#define REPEAT(X, FN, DL, ...) REPEAT_##X(FN, DL, __VA_ARGS__)
2106
2107#define SARG(X) u64 arg##X
2108#define COPY(X) args[X] = arg##X
2109
2110#define __DL_COM (,)
2111#define __DL_SEM (;)
2112
2113#define __SEQ_0_11 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11
2114
2115#define BPF_TRACE_DEFN_x(x) \
2116 void bpf_trace_run##x(struct bpf_prog *prog, \
2117 REPEAT(x, SARG, __DL_COM, __SEQ_0_11)) \
2118 { \
2119 u64 args[x]; \
2120 REPEAT(x, COPY, __DL_SEM, __SEQ_0_11); \
2121 __bpf_trace_run(prog, args); \
2122 } \
2123 EXPORT_SYMBOL_GPL(bpf_trace_run##x)
2124BPF_TRACE_DEFN_x(1);
2125BPF_TRACE_DEFN_x(2);
2126BPF_TRACE_DEFN_x(3);
2127BPF_TRACE_DEFN_x(4);
2128BPF_TRACE_DEFN_x(5);
2129BPF_TRACE_DEFN_x(6);
2130BPF_TRACE_DEFN_x(7);
2131BPF_TRACE_DEFN_x(8);
2132BPF_TRACE_DEFN_x(9);
2133BPF_TRACE_DEFN_x(10);
2134BPF_TRACE_DEFN_x(11);
2135BPF_TRACE_DEFN_x(12);
2136
2137static int __bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog)
2138{
2139 struct tracepoint *tp = btp->tp;
2140
2141 /*
2142 * check that program doesn't access arguments beyond what's
2143 * available in this tracepoint
2144 */
2145 if (prog->aux->max_ctx_offset > btp->num_args * sizeof(u64))
2146 return -EINVAL;
2147
9df1c28b
MM
2148 if (prog->aux->max_tp_access > btp->writable_size)
2149 return -EINVAL;
2150
c4f6699d
AS
2151 return tracepoint_probe_register(tp, (void *)btp->bpf_func, prog);
2152}
2153
2154int bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog)
2155{
e16ec340 2156 return __bpf_probe_register(btp, prog);
c4f6699d
AS
2157}
2158
2159int bpf_probe_unregister(struct bpf_raw_event_map *btp, struct bpf_prog *prog)
2160{
e16ec340 2161 return tracepoint_probe_unregister(btp->tp, (void *)btp->bpf_func, prog);
c4f6699d 2162}
41bdc4b4
YS
2163
2164int bpf_get_perf_event_info(const struct perf_event *event, u32 *prog_id,
2165 u32 *fd_type, const char **buf,
2166 u64 *probe_offset, u64 *probe_addr)
2167{
2168 bool is_tracepoint, is_syscall_tp;
2169 struct bpf_prog *prog;
2170 int flags, err = 0;
2171
2172 prog = event->prog;
2173 if (!prog)
2174 return -ENOENT;
2175
2176 /* not supporting BPF_PROG_TYPE_PERF_EVENT yet */
2177 if (prog->type == BPF_PROG_TYPE_PERF_EVENT)
2178 return -EOPNOTSUPP;
2179
2180 *prog_id = prog->aux->id;
2181 flags = event->tp_event->flags;
2182 is_tracepoint = flags & TRACE_EVENT_FL_TRACEPOINT;
2183 is_syscall_tp = is_syscall_trace_event(event->tp_event);
2184
2185 if (is_tracepoint || is_syscall_tp) {
2186 *buf = is_tracepoint ? event->tp_event->tp->name
2187 : event->tp_event->name;
2188 *fd_type = BPF_FD_TYPE_TRACEPOINT;
2189 *probe_offset = 0x0;
2190 *probe_addr = 0x0;
2191 } else {
2192 /* kprobe/uprobe */
2193 err = -EOPNOTSUPP;
2194#ifdef CONFIG_KPROBE_EVENTS
2195 if (flags & TRACE_EVENT_FL_KPROBE)
2196 err = bpf_get_kprobe_info(event, fd_type, buf,
2197 probe_offset, probe_addr,
2198 event->attr.type == PERF_TYPE_TRACEPOINT);
2199#endif
2200#ifdef CONFIG_UPROBE_EVENTS
2201 if (flags & TRACE_EVENT_FL_UPROBE)
2202 err = bpf_get_uprobe_info(event, fd_type, buf,
2203 probe_offset,
2204 event->attr.type == PERF_TYPE_TRACEPOINT);
2205#endif
2206 }
2207
2208 return err;
2209}
a38d1107 2210
9db1ff0a
YS
2211static int __init send_signal_irq_work_init(void)
2212{
2213 int cpu;
2214 struct send_signal_irq_work *work;
2215
2216 for_each_possible_cpu(cpu) {
2217 work = per_cpu_ptr(&send_signal_work, cpu);
2218 init_irq_work(&work->irq_work, do_bpf_send_signal);
2219 }
2220 return 0;
2221}
2222
2223subsys_initcall(send_signal_irq_work_init);
2224
a38d1107 2225#ifdef CONFIG_MODULES
390e99cf
SF
2226static int bpf_event_notify(struct notifier_block *nb, unsigned long op,
2227 void *module)
a38d1107
MM
2228{
2229 struct bpf_trace_module *btm, *tmp;
2230 struct module *mod = module;
0340a6b7 2231 int ret = 0;
a38d1107
MM
2232
2233 if (mod->num_bpf_raw_events == 0 ||
2234 (op != MODULE_STATE_COMING && op != MODULE_STATE_GOING))
0340a6b7 2235 goto out;
a38d1107
MM
2236
2237 mutex_lock(&bpf_module_mutex);
2238
2239 switch (op) {
2240 case MODULE_STATE_COMING:
2241 btm = kzalloc(sizeof(*btm), GFP_KERNEL);
2242 if (btm) {
2243 btm->module = module;
2244 list_add(&btm->list, &bpf_trace_modules);
0340a6b7
PZ
2245 } else {
2246 ret = -ENOMEM;
a38d1107
MM
2247 }
2248 break;
2249 case MODULE_STATE_GOING:
2250 list_for_each_entry_safe(btm, tmp, &bpf_trace_modules, list) {
2251 if (btm->module == module) {
2252 list_del(&btm->list);
2253 kfree(btm);
2254 break;
2255 }
2256 }
2257 break;
2258 }
2259
2260 mutex_unlock(&bpf_module_mutex);
2261
0340a6b7
PZ
2262out:
2263 return notifier_from_errno(ret);
a38d1107
MM
2264}
2265
2266static struct notifier_block bpf_module_nb = {
2267 .notifier_call = bpf_event_notify,
2268};
2269
390e99cf 2270static int __init bpf_event_init(void)
a38d1107
MM
2271{
2272 register_module_notifier(&bpf_module_nb);
2273 return 0;
2274}
2275
2276fs_initcall(bpf_event_init);
2277#endif /* CONFIG_MODULES */