bpf: Move to generic BTF show support, apply it to seq files/strings
[linux-block.git] / kernel / trace / bpf_trace.c
CommitLineData
179a0cc4 1// SPDX-License-Identifier: GPL-2.0
2541517c 2/* Copyright (c) 2011-2015 PLUMgrid, http://plumgrid.com
0515e599 3 * Copyright (c) 2016 Facebook
2541517c
AS
4 */
5#include <linux/kernel.h>
6#include <linux/types.h>
7#include <linux/slab.h>
8#include <linux/bpf.h>
0515e599 9#include <linux/bpf_perf_event.h>
2541517c
AS
10#include <linux/filter.h>
11#include <linux/uaccess.h>
9c959c86 12#include <linux/ctype.h>
9802d865 13#include <linux/kprobes.h>
ac5a72ea 14#include <linux/spinlock.h>
41bdc4b4 15#include <linux/syscalls.h>
540adea3 16#include <linux/error-injection.h>
c9a0f3b8 17#include <linux/btf_ids.h>
9802d865 18
c7b6f29b
NA
19#include <asm/tlb.h>
20
9802d865 21#include "trace_probe.h"
2541517c
AS
22#include "trace.h"
23
ac5a72ea
AM
24#define CREATE_TRACE_POINTS
25#include "bpf_trace.h"
26
e672db03
SF
27#define bpf_event_rcu_dereference(p) \
28 rcu_dereference_protected(p, lockdep_is_held(&bpf_event_mutex))
29
a38d1107
MM
30#ifdef CONFIG_MODULES
31struct bpf_trace_module {
32 struct module *module;
33 struct list_head list;
34};
35
36static LIST_HEAD(bpf_trace_modules);
37static DEFINE_MUTEX(bpf_module_mutex);
38
39static struct bpf_raw_event_map *bpf_get_raw_tracepoint_module(const char *name)
40{
41 struct bpf_raw_event_map *btp, *ret = NULL;
42 struct bpf_trace_module *btm;
43 unsigned int i;
44
45 mutex_lock(&bpf_module_mutex);
46 list_for_each_entry(btm, &bpf_trace_modules, list) {
47 for (i = 0; i < btm->module->num_bpf_raw_events; ++i) {
48 btp = &btm->module->bpf_raw_events[i];
49 if (!strcmp(btp->tp->name, name)) {
50 if (try_module_get(btm->module))
51 ret = btp;
52 goto out;
53 }
54 }
55 }
56out:
57 mutex_unlock(&bpf_module_mutex);
58 return ret;
59}
60#else
61static struct bpf_raw_event_map *bpf_get_raw_tracepoint_module(const char *name)
62{
63 return NULL;
64}
65#endif /* CONFIG_MODULES */
66
035226b9 67u64 bpf_get_stackid(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
c195651e 68u64 bpf_get_stack(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
035226b9 69
2541517c
AS
70/**
71 * trace_call_bpf - invoke BPF program
e87c6bc3 72 * @call: tracepoint event
2541517c
AS
73 * @ctx: opaque context pointer
74 *
75 * kprobe handlers execute BPF programs via this helper.
76 * Can be used from static tracepoints in the future.
77 *
78 * Return: BPF programs always return an integer which is interpreted by
79 * kprobe handler as:
80 * 0 - return from kprobe (event is filtered out)
81 * 1 - store kprobe event into ring buffer
82 * Other values are reserved and currently alias to 1
83 */
e87c6bc3 84unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx)
2541517c
AS
85{
86 unsigned int ret;
87
88 if (in_nmi()) /* not supported yet */
89 return 1;
90
b0a81b94 91 cant_sleep();
2541517c
AS
92
93 if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1)) {
94 /*
95 * since some bpf program is already running on this cpu,
96 * don't call into another bpf program (same or different)
97 * and don't send kprobe event into ring-buffer,
98 * so return zero here
99 */
100 ret = 0;
101 goto out;
102 }
103
e87c6bc3
YS
104 /*
105 * Instead of moving rcu_read_lock/rcu_dereference/rcu_read_unlock
106 * to all call sites, we did a bpf_prog_array_valid() there to check
107 * whether call->prog_array is empty or not, which is
108 * a heurisitc to speed up execution.
109 *
110 * If bpf_prog_array_valid() fetched prog_array was
111 * non-NULL, we go into trace_call_bpf() and do the actual
112 * proper rcu_dereference() under RCU lock.
113 * If it turns out that prog_array is NULL then, we bail out.
114 * For the opposite, if the bpf_prog_array_valid() fetched pointer
115 * was NULL, you'll skip the prog_array with the risk of missing
116 * out of events when it was updated in between this and the
117 * rcu_dereference() which is accepted risk.
118 */
119 ret = BPF_PROG_RUN_ARRAY_CHECK(call->prog_array, ctx, BPF_PROG_RUN);
2541517c
AS
120
121 out:
122 __this_cpu_dec(bpf_prog_active);
2541517c
AS
123
124 return ret;
125}
2541517c 126
9802d865
JB
127#ifdef CONFIG_BPF_KPROBE_OVERRIDE
128BPF_CALL_2(bpf_override_return, struct pt_regs *, regs, unsigned long, rc)
129{
9802d865 130 regs_set_return_value(regs, rc);
540adea3 131 override_function_with_return(regs);
9802d865
JB
132 return 0;
133}
134
135static const struct bpf_func_proto bpf_override_return_proto = {
136 .func = bpf_override_return,
137 .gpl_only = true,
138 .ret_type = RET_INTEGER,
139 .arg1_type = ARG_PTR_TO_CTX,
140 .arg2_type = ARG_ANYTHING,
141};
142#endif
143
8d92db5c
CH
144static __always_inline int
145bpf_probe_read_user_common(void *dst, u32 size, const void __user *unsafe_ptr)
2541517c 146{
8d92db5c 147 int ret;
2541517c 148
c0ee37e8 149 ret = copy_from_user_nofault(dst, unsafe_ptr, size);
6ae08ae3
DB
150 if (unlikely(ret < 0))
151 memset(dst, 0, size);
6ae08ae3
DB
152 return ret;
153}
154
8d92db5c
CH
155BPF_CALL_3(bpf_probe_read_user, void *, dst, u32, size,
156 const void __user *, unsafe_ptr)
157{
158 return bpf_probe_read_user_common(dst, size, unsafe_ptr);
159}
160
f470378c 161const struct bpf_func_proto bpf_probe_read_user_proto = {
6ae08ae3
DB
162 .func = bpf_probe_read_user,
163 .gpl_only = true,
164 .ret_type = RET_INTEGER,
165 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
166 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
167 .arg3_type = ARG_ANYTHING,
168};
169
8d92db5c
CH
170static __always_inline int
171bpf_probe_read_user_str_common(void *dst, u32 size,
172 const void __user *unsafe_ptr)
6ae08ae3 173{
8d92db5c 174 int ret;
6ae08ae3 175
8d92db5c 176 ret = strncpy_from_user_nofault(dst, unsafe_ptr, size);
6ae08ae3
DB
177 if (unlikely(ret < 0))
178 memset(dst, 0, size);
6ae08ae3
DB
179 return ret;
180}
181
8d92db5c
CH
182BPF_CALL_3(bpf_probe_read_user_str, void *, dst, u32, size,
183 const void __user *, unsafe_ptr)
184{
185 return bpf_probe_read_user_str_common(dst, size, unsafe_ptr);
186}
187
f470378c 188const struct bpf_func_proto bpf_probe_read_user_str_proto = {
6ae08ae3
DB
189 .func = bpf_probe_read_user_str,
190 .gpl_only = true,
191 .ret_type = RET_INTEGER,
192 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
193 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
194 .arg3_type = ARG_ANYTHING,
195};
196
197static __always_inline int
8d92db5c 198bpf_probe_read_kernel_common(void *dst, u32 size, const void *unsafe_ptr)
6ae08ae3
DB
199{
200 int ret = security_locked_down(LOCKDOWN_BPF_READ);
9d1f8be5 201
6ae08ae3 202 if (unlikely(ret < 0))
8d92db5c 203 goto fail;
fe557319 204 ret = copy_from_kernel_nofault(dst, unsafe_ptr, size);
074f528e 205 if (unlikely(ret < 0))
8d92db5c
CH
206 goto fail;
207 return ret;
208fail:
209 memset(dst, 0, size);
6ae08ae3
DB
210 return ret;
211}
074f528e 212
6ae08ae3
DB
213BPF_CALL_3(bpf_probe_read_kernel, void *, dst, u32, size,
214 const void *, unsafe_ptr)
215{
8d92db5c 216 return bpf_probe_read_kernel_common(dst, size, unsafe_ptr);
6ae08ae3
DB
217}
218
f470378c 219const struct bpf_func_proto bpf_probe_read_kernel_proto = {
6ae08ae3
DB
220 .func = bpf_probe_read_kernel,
221 .gpl_only = true,
222 .ret_type = RET_INTEGER,
223 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
224 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
225 .arg3_type = ARG_ANYTHING,
226};
227
6ae08ae3 228static __always_inline int
8d92db5c 229bpf_probe_read_kernel_str_common(void *dst, u32 size, const void *unsafe_ptr)
6ae08ae3
DB
230{
231 int ret = security_locked_down(LOCKDOWN_BPF_READ);
232
233 if (unlikely(ret < 0))
8d92db5c
CH
234 goto fail;
235
6ae08ae3 236 /*
8d92db5c
CH
237 * The strncpy_from_kernel_nofault() call will likely not fill the
238 * entire buffer, but that's okay in this circumstance as we're probing
6ae08ae3
DB
239 * arbitrary memory anyway similar to bpf_probe_read_*() and might
240 * as well probe the stack. Thus, memory is explicitly cleared
241 * only in error case, so that improper users ignoring return
242 * code altogether don't copy garbage; otherwise length of string
243 * is returned that can be used for bpf_perf_event_output() et al.
244 */
8d92db5c 245 ret = strncpy_from_kernel_nofault(dst, unsafe_ptr, size);
6ae08ae3 246 if (unlikely(ret < 0))
8d92db5c
CH
247 goto fail;
248
02553b91 249 return ret;
8d92db5c
CH
250fail:
251 memset(dst, 0, size);
074f528e 252 return ret;
2541517c
AS
253}
254
6ae08ae3
DB
255BPF_CALL_3(bpf_probe_read_kernel_str, void *, dst, u32, size,
256 const void *, unsafe_ptr)
257{
8d92db5c 258 return bpf_probe_read_kernel_str_common(dst, size, unsafe_ptr);
6ae08ae3
DB
259}
260
f470378c 261const struct bpf_func_proto bpf_probe_read_kernel_str_proto = {
6ae08ae3
DB
262 .func = bpf_probe_read_kernel_str,
263 .gpl_only = true,
264 .ret_type = RET_INTEGER,
265 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
266 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
267 .arg3_type = ARG_ANYTHING,
268};
269
8d92db5c
CH
270#ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
271BPF_CALL_3(bpf_probe_read_compat, void *, dst, u32, size,
272 const void *, unsafe_ptr)
273{
274 if ((unsigned long)unsafe_ptr < TASK_SIZE) {
275 return bpf_probe_read_user_common(dst, size,
276 (__force void __user *)unsafe_ptr);
277 }
278 return bpf_probe_read_kernel_common(dst, size, unsafe_ptr);
279}
280
281static const struct bpf_func_proto bpf_probe_read_compat_proto = {
282 .func = bpf_probe_read_compat,
283 .gpl_only = true,
284 .ret_type = RET_INTEGER,
285 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
286 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
287 .arg3_type = ARG_ANYTHING,
288};
289
6ae08ae3
DB
290BPF_CALL_3(bpf_probe_read_compat_str, void *, dst, u32, size,
291 const void *, unsafe_ptr)
292{
8d92db5c
CH
293 if ((unsigned long)unsafe_ptr < TASK_SIZE) {
294 return bpf_probe_read_user_str_common(dst, size,
295 (__force void __user *)unsafe_ptr);
296 }
297 return bpf_probe_read_kernel_str_common(dst, size, unsafe_ptr);
6ae08ae3
DB
298}
299
300static const struct bpf_func_proto bpf_probe_read_compat_str_proto = {
301 .func = bpf_probe_read_compat_str,
2541517c
AS
302 .gpl_only = true,
303 .ret_type = RET_INTEGER,
39f19ebb 304 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
9c019e2b 305 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
2541517c
AS
306 .arg3_type = ARG_ANYTHING,
307};
8d92db5c 308#endif /* CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE */
2541517c 309
eb1b6688 310BPF_CALL_3(bpf_probe_write_user, void __user *, unsafe_ptr, const void *, src,
f3694e00 311 u32, size)
96ae5227 312{
96ae5227
SD
313 /*
314 * Ensure we're in user context which is safe for the helper to
315 * run. This helper has no business in a kthread.
316 *
317 * access_ok() should prevent writing to non-user memory, but in
318 * some situations (nommu, temporary switch, etc) access_ok() does
319 * not provide enough validation, hence the check on KERNEL_DS.
c7b6f29b
NA
320 *
321 * nmi_uaccess_okay() ensures the probe is not run in an interim
322 * state, when the task or mm are switched. This is specifically
323 * required to prevent the use of temporary mm.
96ae5227
SD
324 */
325
326 if (unlikely(in_interrupt() ||
327 current->flags & (PF_KTHREAD | PF_EXITING)))
328 return -EPERM;
db68ce10 329 if (unlikely(uaccess_kernel()))
96ae5227 330 return -EPERM;
c7b6f29b
NA
331 if (unlikely(!nmi_uaccess_okay()))
332 return -EPERM;
96ae5227 333
c0ee37e8 334 return copy_to_user_nofault(unsafe_ptr, src, size);
96ae5227
SD
335}
336
337static const struct bpf_func_proto bpf_probe_write_user_proto = {
338 .func = bpf_probe_write_user,
339 .gpl_only = true,
340 .ret_type = RET_INTEGER,
341 .arg1_type = ARG_ANYTHING,
39f19ebb
AS
342 .arg2_type = ARG_PTR_TO_MEM,
343 .arg3_type = ARG_CONST_SIZE,
96ae5227
SD
344};
345
346static const struct bpf_func_proto *bpf_get_probe_write_proto(void)
347{
2c78ee89
AS
348 if (!capable(CAP_SYS_ADMIN))
349 return NULL;
350
96ae5227
SD
351 pr_warn_ratelimited("%s[%d] is installing a program with bpf_probe_write_user helper that may corrupt user memory!",
352 current->comm, task_pid_nr(current));
353
354 return &bpf_probe_write_user_proto;
355}
356
d7b2977b
CH
357static void bpf_trace_copy_string(char *buf, void *unsafe_ptr, char fmt_ptype,
358 size_t bufsz)
359{
360 void __user *user_ptr = (__force void __user *)unsafe_ptr;
361
362 buf[0] = 0;
363
364 switch (fmt_ptype) {
365 case 's':
366#ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
aec6ce59
CH
367 if ((unsigned long)unsafe_ptr < TASK_SIZE) {
368 strncpy_from_user_nofault(buf, user_ptr, bufsz);
369 break;
370 }
371 fallthrough;
d7b2977b
CH
372#endif
373 case 'k':
374 strncpy_from_kernel_nofault(buf, unsafe_ptr, bufsz);
375 break;
376 case 'u':
377 strncpy_from_user_nofault(buf, user_ptr, bufsz);
378 break;
379 }
380}
381
ac5a72ea
AM
382static DEFINE_RAW_SPINLOCK(trace_printk_lock);
383
384#define BPF_TRACE_PRINTK_SIZE 1024
385
0d360d64 386static __printf(1, 0) int bpf_do_trace_printk(const char *fmt, ...)
ac5a72ea
AM
387{
388 static char buf[BPF_TRACE_PRINTK_SIZE];
389 unsigned long flags;
390 va_list ap;
391 int ret;
392
393 raw_spin_lock_irqsave(&trace_printk_lock, flags);
394 va_start(ap, fmt);
395 ret = vsnprintf(buf, sizeof(buf), fmt, ap);
396 va_end(ap);
397 /* vsnprintf() will not append null for zero-length strings */
398 if (ret == 0)
399 buf[0] = '\0';
400 trace_bpf_trace_printk(buf);
401 raw_spin_unlock_irqrestore(&trace_printk_lock, flags);
402
403 return ret;
404}
405
9c959c86 406/*
7bda4b40 407 * Only limited trace_printk() conversion specifiers allowed:
2df6bb54 408 * %d %i %u %x %ld %li %lu %lx %lld %lli %llu %llx %p %pB %pks %pus %s
9c959c86 409 */
f3694e00
DB
410BPF_CALL_5(bpf_trace_printk, char *, fmt, u32, fmt_size, u64, arg1,
411 u64, arg2, u64, arg3)
9c959c86 412{
b2a5212f
DB
413 int i, mod[3] = {}, fmt_cnt = 0;
414 char buf[64], fmt_ptype;
415 void *unsafe_ptr = NULL;
8d3b7dce 416 bool str_seen = false;
9c959c86
AS
417
418 /*
419 * bpf_check()->check_func_arg()->check_stack_boundary()
420 * guarantees that fmt points to bpf program stack,
421 * fmt_size bytes of it were initialized and fmt_size > 0
422 */
423 if (fmt[--fmt_size] != 0)
424 return -EINVAL;
425
426 /* check format string for allowed specifiers */
427 for (i = 0; i < fmt_size; i++) {
428 if ((!isprint(fmt[i]) && !isspace(fmt[i])) || !isascii(fmt[i]))
429 return -EINVAL;
430
431 if (fmt[i] != '%')
432 continue;
433
434 if (fmt_cnt >= 3)
435 return -EINVAL;
436
437 /* fmt[i] != 0 && fmt[last] == 0, so we can access fmt[i + 1] */
438 i++;
439 if (fmt[i] == 'l') {
440 mod[fmt_cnt]++;
441 i++;
b2a5212f 442 } else if (fmt[i] == 'p') {
9c959c86 443 mod[fmt_cnt]++;
b2a5212f
DB
444 if ((fmt[i + 1] == 'k' ||
445 fmt[i + 1] == 'u') &&
446 fmt[i + 2] == 's') {
447 fmt_ptype = fmt[i + 1];
448 i += 2;
449 goto fmt_str;
450 }
451
2df6bb54
SL
452 if (fmt[i + 1] == 'B') {
453 i++;
454 goto fmt_next;
455 }
456
1efb6ee3
MP
457 /* disallow any further format extensions */
458 if (fmt[i + 1] != 0 &&
459 !isspace(fmt[i + 1]) &&
460 !ispunct(fmt[i + 1]))
9c959c86 461 return -EINVAL;
b2a5212f
DB
462
463 goto fmt_next;
464 } else if (fmt[i] == 's') {
465 mod[fmt_cnt]++;
466 fmt_ptype = fmt[i];
467fmt_str:
468 if (str_seen)
469 /* allow only one '%s' per fmt string */
470 return -EINVAL;
471 str_seen = true;
472
473 if (fmt[i + 1] != 0 &&
474 !isspace(fmt[i + 1]) &&
475 !ispunct(fmt[i + 1]))
476 return -EINVAL;
477
478 switch (fmt_cnt) {
479 case 0:
480 unsafe_ptr = (void *)(long)arg1;
481 arg1 = (long)buf;
482 break;
483 case 1:
484 unsafe_ptr = (void *)(long)arg2;
485 arg2 = (long)buf;
486 break;
487 case 2:
488 unsafe_ptr = (void *)(long)arg3;
489 arg3 = (long)buf;
490 break;
491 }
492
d7b2977b
CH
493 bpf_trace_copy_string(buf, unsafe_ptr, fmt_ptype,
494 sizeof(buf));
b2a5212f 495 goto fmt_next;
9c959c86
AS
496 }
497
498 if (fmt[i] == 'l') {
499 mod[fmt_cnt]++;
500 i++;
501 }
502
7bda4b40
JF
503 if (fmt[i] != 'i' && fmt[i] != 'd' &&
504 fmt[i] != 'u' && fmt[i] != 'x')
9c959c86 505 return -EINVAL;
b2a5212f 506fmt_next:
9c959c86
AS
507 fmt_cnt++;
508 }
509
88a5c690
DB
510/* Horrid workaround for getting va_list handling working with different
511 * argument type combinations generically for 32 and 64 bit archs.
512 */
513#define __BPF_TP_EMIT() __BPF_ARG3_TP()
514#define __BPF_TP(...) \
ac5a72ea 515 bpf_do_trace_printk(fmt, ##__VA_ARGS__)
88a5c690
DB
516
517#define __BPF_ARG1_TP(...) \
518 ((mod[0] == 2 || (mod[0] == 1 && __BITS_PER_LONG == 64)) \
519 ? __BPF_TP(arg1, ##__VA_ARGS__) \
520 : ((mod[0] == 1 || (mod[0] == 0 && __BITS_PER_LONG == 32)) \
521 ? __BPF_TP((long)arg1, ##__VA_ARGS__) \
522 : __BPF_TP((u32)arg1, ##__VA_ARGS__)))
523
524#define __BPF_ARG2_TP(...) \
525 ((mod[1] == 2 || (mod[1] == 1 && __BITS_PER_LONG == 64)) \
526 ? __BPF_ARG1_TP(arg2, ##__VA_ARGS__) \
527 : ((mod[1] == 1 || (mod[1] == 0 && __BITS_PER_LONG == 32)) \
528 ? __BPF_ARG1_TP((long)arg2, ##__VA_ARGS__) \
529 : __BPF_ARG1_TP((u32)arg2, ##__VA_ARGS__)))
530
531#define __BPF_ARG3_TP(...) \
532 ((mod[2] == 2 || (mod[2] == 1 && __BITS_PER_LONG == 64)) \
533 ? __BPF_ARG2_TP(arg3, ##__VA_ARGS__) \
534 : ((mod[2] == 1 || (mod[2] == 0 && __BITS_PER_LONG == 32)) \
535 ? __BPF_ARG2_TP((long)arg3, ##__VA_ARGS__) \
536 : __BPF_ARG2_TP((u32)arg3, ##__VA_ARGS__)))
537
538 return __BPF_TP_EMIT();
9c959c86
AS
539}
540
541static const struct bpf_func_proto bpf_trace_printk_proto = {
542 .func = bpf_trace_printk,
543 .gpl_only = true,
544 .ret_type = RET_INTEGER,
39f19ebb
AS
545 .arg1_type = ARG_PTR_TO_MEM,
546 .arg2_type = ARG_CONST_SIZE,
9c959c86
AS
547};
548
0756ea3e
AS
549const struct bpf_func_proto *bpf_get_trace_printk_proto(void)
550{
551 /*
ac5a72ea
AM
552 * This program might be calling bpf_trace_printk,
553 * so enable the associated bpf_trace/bpf_trace_printk event.
554 * Repeat this each time as it is possible a user has
555 * disabled bpf_trace_printk events. By loading a program
556 * calling bpf_trace_printk() however the user has expressed
557 * the intent to see such events.
0756ea3e 558 */
ac5a72ea
AM
559 if (trace_set_clr_event("bpf_trace", "bpf_trace_printk", 1))
560 pr_warn_ratelimited("could not enable bpf_trace_printk events");
0756ea3e
AS
561
562 return &bpf_trace_printk_proto;
563}
564
492e639f
YS
565#define MAX_SEQ_PRINTF_VARARGS 12
566#define MAX_SEQ_PRINTF_MAX_MEMCPY 6
567#define MAX_SEQ_PRINTF_STR_LEN 128
568
569struct bpf_seq_printf_buf {
570 char buf[MAX_SEQ_PRINTF_MAX_MEMCPY][MAX_SEQ_PRINTF_STR_LEN];
571};
572static DEFINE_PER_CPU(struct bpf_seq_printf_buf, bpf_seq_printf_buf);
573static DEFINE_PER_CPU(int, bpf_seq_printf_buf_used);
574
575BPF_CALL_5(bpf_seq_printf, struct seq_file *, m, char *, fmt, u32, fmt_size,
576 const void *, data, u32, data_len)
577{
578 int err = -EINVAL, fmt_cnt = 0, memcpy_cnt = 0;
579 int i, buf_used, copy_size, num_args;
580 u64 params[MAX_SEQ_PRINTF_VARARGS];
581 struct bpf_seq_printf_buf *bufs;
582 const u64 *args = data;
583
584 buf_used = this_cpu_inc_return(bpf_seq_printf_buf_used);
585 if (WARN_ON_ONCE(buf_used > 1)) {
586 err = -EBUSY;
587 goto out;
588 }
589
590 bufs = this_cpu_ptr(&bpf_seq_printf_buf);
591
592 /*
593 * bpf_check()->check_func_arg()->check_stack_boundary()
594 * guarantees that fmt points to bpf program stack,
595 * fmt_size bytes of it were initialized and fmt_size > 0
596 */
597 if (fmt[--fmt_size] != 0)
598 goto out;
599
600 if (data_len & 7)
601 goto out;
602
603 for (i = 0; i < fmt_size; i++) {
604 if (fmt[i] == '%') {
605 if (fmt[i + 1] == '%')
606 i++;
607 else if (!data || !data_len)
608 goto out;
609 }
610 }
611
612 num_args = data_len / 8;
613
614 /* check format string for allowed specifiers */
615 for (i = 0; i < fmt_size; i++) {
616 /* only printable ascii for now. */
617 if ((!isprint(fmt[i]) && !isspace(fmt[i])) || !isascii(fmt[i])) {
618 err = -EINVAL;
619 goto out;
620 }
621
622 if (fmt[i] != '%')
623 continue;
624
625 if (fmt[i + 1] == '%') {
626 i++;
627 continue;
628 }
629
630 if (fmt_cnt >= MAX_SEQ_PRINTF_VARARGS) {
631 err = -E2BIG;
632 goto out;
633 }
634
635 if (fmt_cnt >= num_args) {
636 err = -EINVAL;
637 goto out;
638 }
639
640 /* fmt[i] != 0 && fmt[last] == 0, so we can access fmt[i + 1] */
641 i++;
642
643 /* skip optional "[0 +-][num]" width formating field */
644 while (fmt[i] == '0' || fmt[i] == '+' || fmt[i] == '-' ||
645 fmt[i] == ' ')
646 i++;
647 if (fmt[i] >= '1' && fmt[i] <= '9') {
648 i++;
649 while (fmt[i] >= '0' && fmt[i] <= '9')
650 i++;
651 }
652
653 if (fmt[i] == 's') {
19c8d8ac
AM
654 void *unsafe_ptr;
655
492e639f
YS
656 /* try our best to copy */
657 if (memcpy_cnt >= MAX_SEQ_PRINTF_MAX_MEMCPY) {
658 err = -E2BIG;
659 goto out;
660 }
661
19c8d8ac
AM
662 unsafe_ptr = (void *)(long)args[fmt_cnt];
663 err = strncpy_from_kernel_nofault(bufs->buf[memcpy_cnt],
664 unsafe_ptr, MAX_SEQ_PRINTF_STR_LEN);
492e639f
YS
665 if (err < 0)
666 bufs->buf[memcpy_cnt][0] = '\0';
667 params[fmt_cnt] = (u64)(long)bufs->buf[memcpy_cnt];
668
669 fmt_cnt++;
670 memcpy_cnt++;
671 continue;
672 }
673
674 if (fmt[i] == 'p') {
675 if (fmt[i + 1] == 0 ||
676 fmt[i + 1] == 'K' ||
2df6bb54
SL
677 fmt[i + 1] == 'x' ||
678 fmt[i + 1] == 'B') {
492e639f
YS
679 /* just kernel pointers */
680 params[fmt_cnt] = args[fmt_cnt];
681 fmt_cnt++;
682 continue;
683 }
684
685 /* only support "%pI4", "%pi4", "%pI6" and "%pi6". */
686 if (fmt[i + 1] != 'i' && fmt[i + 1] != 'I') {
687 err = -EINVAL;
688 goto out;
689 }
690 if (fmt[i + 2] != '4' && fmt[i + 2] != '6') {
691 err = -EINVAL;
692 goto out;
693 }
694
695 if (memcpy_cnt >= MAX_SEQ_PRINTF_MAX_MEMCPY) {
696 err = -E2BIG;
697 goto out;
698 }
699
700
701 copy_size = (fmt[i + 2] == '4') ? 4 : 16;
702
fe557319 703 err = copy_from_kernel_nofault(bufs->buf[memcpy_cnt],
492e639f
YS
704 (void *) (long) args[fmt_cnt],
705 copy_size);
706 if (err < 0)
707 memset(bufs->buf[memcpy_cnt], 0, copy_size);
708 params[fmt_cnt] = (u64)(long)bufs->buf[memcpy_cnt];
709
710 i += 2;
711 fmt_cnt++;
712 memcpy_cnt++;
713 continue;
714 }
715
716 if (fmt[i] == 'l') {
717 i++;
718 if (fmt[i] == 'l')
719 i++;
720 }
721
722 if (fmt[i] != 'i' && fmt[i] != 'd' &&
c06b0229
YS
723 fmt[i] != 'u' && fmt[i] != 'x' &&
724 fmt[i] != 'X') {
492e639f
YS
725 err = -EINVAL;
726 goto out;
727 }
728
729 params[fmt_cnt] = args[fmt_cnt];
730 fmt_cnt++;
731 }
732
733 /* Maximumly we can have MAX_SEQ_PRINTF_VARARGS parameter, just give
734 * all of them to seq_printf().
735 */
736 seq_printf(m, fmt, params[0], params[1], params[2], params[3],
737 params[4], params[5], params[6], params[7], params[8],
738 params[9], params[10], params[11]);
739
740 err = seq_has_overflowed(m) ? -EOVERFLOW : 0;
741out:
742 this_cpu_dec(bpf_seq_printf_buf_used);
743 return err;
744}
745
9436ef6e 746BTF_ID_LIST_SINGLE(btf_seq_file_ids, struct, seq_file)
c9a0f3b8 747
492e639f
YS
748static const struct bpf_func_proto bpf_seq_printf_proto = {
749 .func = bpf_seq_printf,
750 .gpl_only = true,
751 .ret_type = RET_INTEGER,
752 .arg1_type = ARG_PTR_TO_BTF_ID,
9436ef6e 753 .arg1_btf_id = &btf_seq_file_ids[0],
492e639f
YS
754 .arg2_type = ARG_PTR_TO_MEM,
755 .arg3_type = ARG_CONST_SIZE,
756 .arg4_type = ARG_PTR_TO_MEM_OR_NULL,
757 .arg5_type = ARG_CONST_SIZE_OR_ZERO,
492e639f
YS
758};
759
760BPF_CALL_3(bpf_seq_write, struct seq_file *, m, const void *, data, u32, len)
761{
762 return seq_write(m, data, len) ? -EOVERFLOW : 0;
763}
764
492e639f
YS
765static const struct bpf_func_proto bpf_seq_write_proto = {
766 .func = bpf_seq_write,
767 .gpl_only = true,
768 .ret_type = RET_INTEGER,
769 .arg1_type = ARG_PTR_TO_BTF_ID,
9436ef6e 770 .arg1_btf_id = &btf_seq_file_ids[0],
492e639f
YS
771 .arg2_type = ARG_PTR_TO_MEM,
772 .arg3_type = ARG_CONST_SIZE_OR_ZERO,
492e639f
YS
773};
774
908432ca
YS
775static __always_inline int
776get_map_perf_counter(struct bpf_map *map, u64 flags,
777 u64 *value, u64 *enabled, u64 *running)
35578d79 778{
35578d79 779 struct bpf_array *array = container_of(map, struct bpf_array, map);
6816a7ff
DB
780 unsigned int cpu = smp_processor_id();
781 u64 index = flags & BPF_F_INDEX_MASK;
3b1efb19 782 struct bpf_event_entry *ee;
35578d79 783
6816a7ff
DB
784 if (unlikely(flags & ~(BPF_F_INDEX_MASK)))
785 return -EINVAL;
786 if (index == BPF_F_CURRENT_CPU)
787 index = cpu;
35578d79
KX
788 if (unlikely(index >= array->map.max_entries))
789 return -E2BIG;
790
3b1efb19 791 ee = READ_ONCE(array->ptrs[index]);
1ca1cc98 792 if (!ee)
35578d79
KX
793 return -ENOENT;
794
908432ca
YS
795 return perf_event_read_local(ee->event, value, enabled, running);
796}
797
798BPF_CALL_2(bpf_perf_event_read, struct bpf_map *, map, u64, flags)
799{
800 u64 value = 0;
801 int err;
802
803 err = get_map_perf_counter(map, flags, &value, NULL, NULL);
35578d79 804 /*
f91840a3
AS
805 * this api is ugly since we miss [-22..-2] range of valid
806 * counter values, but that's uapi
35578d79 807 */
f91840a3
AS
808 if (err)
809 return err;
810 return value;
35578d79
KX
811}
812
62544ce8 813static const struct bpf_func_proto bpf_perf_event_read_proto = {
35578d79 814 .func = bpf_perf_event_read,
1075ef59 815 .gpl_only = true,
35578d79
KX
816 .ret_type = RET_INTEGER,
817 .arg1_type = ARG_CONST_MAP_PTR,
818 .arg2_type = ARG_ANYTHING,
819};
820
908432ca
YS
821BPF_CALL_4(bpf_perf_event_read_value, struct bpf_map *, map, u64, flags,
822 struct bpf_perf_event_value *, buf, u32, size)
823{
824 int err = -EINVAL;
825
826 if (unlikely(size != sizeof(struct bpf_perf_event_value)))
827 goto clear;
828 err = get_map_perf_counter(map, flags, &buf->counter, &buf->enabled,
829 &buf->running);
830 if (unlikely(err))
831 goto clear;
832 return 0;
833clear:
834 memset(buf, 0, size);
835 return err;
836}
837
838static const struct bpf_func_proto bpf_perf_event_read_value_proto = {
839 .func = bpf_perf_event_read_value,
840 .gpl_only = true,
841 .ret_type = RET_INTEGER,
842 .arg1_type = ARG_CONST_MAP_PTR,
843 .arg2_type = ARG_ANYTHING,
844 .arg3_type = ARG_PTR_TO_UNINIT_MEM,
845 .arg4_type = ARG_CONST_SIZE,
846};
847
8e7a3920
DB
848static __always_inline u64
849__bpf_perf_event_output(struct pt_regs *regs, struct bpf_map *map,
283ca526 850 u64 flags, struct perf_sample_data *sd)
a43eec30 851{
a43eec30 852 struct bpf_array *array = container_of(map, struct bpf_array, map);
d7931330 853 unsigned int cpu = smp_processor_id();
1e33759c 854 u64 index = flags & BPF_F_INDEX_MASK;
3b1efb19 855 struct bpf_event_entry *ee;
a43eec30 856 struct perf_event *event;
a43eec30 857
1e33759c 858 if (index == BPF_F_CURRENT_CPU)
d7931330 859 index = cpu;
a43eec30
AS
860 if (unlikely(index >= array->map.max_entries))
861 return -E2BIG;
862
3b1efb19 863 ee = READ_ONCE(array->ptrs[index]);
1ca1cc98 864 if (!ee)
a43eec30
AS
865 return -ENOENT;
866
3b1efb19 867 event = ee->event;
a43eec30
AS
868 if (unlikely(event->attr.type != PERF_TYPE_SOFTWARE ||
869 event->attr.config != PERF_COUNT_SW_BPF_OUTPUT))
870 return -EINVAL;
871
d7931330 872 if (unlikely(event->oncpu != cpu))
a43eec30
AS
873 return -EOPNOTSUPP;
874
56201969 875 return perf_event_output(event, sd, regs);
a43eec30
AS
876}
877
9594dc3c
MM
878/*
879 * Support executing tracepoints in normal, irq, and nmi context that each call
880 * bpf_perf_event_output
881 */
882struct bpf_trace_sample_data {
883 struct perf_sample_data sds[3];
884};
885
886static DEFINE_PER_CPU(struct bpf_trace_sample_data, bpf_trace_sds);
887static DEFINE_PER_CPU(int, bpf_trace_nest_level);
f3694e00
DB
888BPF_CALL_5(bpf_perf_event_output, struct pt_regs *, regs, struct bpf_map *, map,
889 u64, flags, void *, data, u64, size)
8e7a3920 890{
9594dc3c
MM
891 struct bpf_trace_sample_data *sds = this_cpu_ptr(&bpf_trace_sds);
892 int nest_level = this_cpu_inc_return(bpf_trace_nest_level);
8e7a3920
DB
893 struct perf_raw_record raw = {
894 .frag = {
895 .size = size,
896 .data = data,
897 },
898 };
9594dc3c
MM
899 struct perf_sample_data *sd;
900 int err;
8e7a3920 901
9594dc3c
MM
902 if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(sds->sds))) {
903 err = -EBUSY;
904 goto out;
905 }
906
907 sd = &sds->sds[nest_level - 1];
908
909 if (unlikely(flags & ~(BPF_F_INDEX_MASK))) {
910 err = -EINVAL;
911 goto out;
912 }
8e7a3920 913
283ca526
DB
914 perf_sample_data_init(sd, 0, 0);
915 sd->raw = &raw;
916
9594dc3c
MM
917 err = __bpf_perf_event_output(regs, map, flags, sd);
918
919out:
920 this_cpu_dec(bpf_trace_nest_level);
921 return err;
8e7a3920
DB
922}
923
a43eec30
AS
924static const struct bpf_func_proto bpf_perf_event_output_proto = {
925 .func = bpf_perf_event_output,
1075ef59 926 .gpl_only = true,
a43eec30
AS
927 .ret_type = RET_INTEGER,
928 .arg1_type = ARG_PTR_TO_CTX,
929 .arg2_type = ARG_CONST_MAP_PTR,
930 .arg3_type = ARG_ANYTHING,
39f19ebb 931 .arg4_type = ARG_PTR_TO_MEM,
a60dd35d 932 .arg5_type = ARG_CONST_SIZE_OR_ZERO,
a43eec30
AS
933};
934
768fb61f
AZ
935static DEFINE_PER_CPU(int, bpf_event_output_nest_level);
936struct bpf_nested_pt_regs {
937 struct pt_regs regs[3];
938};
939static DEFINE_PER_CPU(struct bpf_nested_pt_regs, bpf_pt_regs);
940static DEFINE_PER_CPU(struct bpf_trace_sample_data, bpf_misc_sds);
bd570ff9 941
555c8a86
DB
942u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
943 void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy)
bd570ff9 944{
768fb61f 945 int nest_level = this_cpu_inc_return(bpf_event_output_nest_level);
555c8a86
DB
946 struct perf_raw_frag frag = {
947 .copy = ctx_copy,
948 .size = ctx_size,
949 .data = ctx,
950 };
951 struct perf_raw_record raw = {
952 .frag = {
183fc153
AM
953 {
954 .next = ctx_size ? &frag : NULL,
955 },
555c8a86
DB
956 .size = meta_size,
957 .data = meta,
958 },
959 };
768fb61f
AZ
960 struct perf_sample_data *sd;
961 struct pt_regs *regs;
962 u64 ret;
963
964 if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(bpf_misc_sds.sds))) {
965 ret = -EBUSY;
966 goto out;
967 }
968 sd = this_cpu_ptr(&bpf_misc_sds.sds[nest_level - 1]);
969 regs = this_cpu_ptr(&bpf_pt_regs.regs[nest_level - 1]);
bd570ff9
DB
970
971 perf_fetch_caller_regs(regs);
283ca526
DB
972 perf_sample_data_init(sd, 0, 0);
973 sd->raw = &raw;
bd570ff9 974
768fb61f
AZ
975 ret = __bpf_perf_event_output(regs, map, flags, sd);
976out:
977 this_cpu_dec(bpf_event_output_nest_level);
978 return ret;
bd570ff9
DB
979}
980
f3694e00 981BPF_CALL_0(bpf_get_current_task)
606274c5
AS
982{
983 return (long) current;
984}
985
f470378c 986const struct bpf_func_proto bpf_get_current_task_proto = {
606274c5
AS
987 .func = bpf_get_current_task,
988 .gpl_only = true,
989 .ret_type = RET_INTEGER,
990};
991
f3694e00 992BPF_CALL_2(bpf_current_task_under_cgroup, struct bpf_map *, map, u32, idx)
60d20f91 993{
60d20f91
SD
994 struct bpf_array *array = container_of(map, struct bpf_array, map);
995 struct cgroup *cgrp;
60d20f91 996
60d20f91
SD
997 if (unlikely(idx >= array->map.max_entries))
998 return -E2BIG;
999
1000 cgrp = READ_ONCE(array->ptrs[idx]);
1001 if (unlikely(!cgrp))
1002 return -EAGAIN;
1003
1004 return task_under_cgroup_hierarchy(current, cgrp);
1005}
1006
1007static const struct bpf_func_proto bpf_current_task_under_cgroup_proto = {
1008 .func = bpf_current_task_under_cgroup,
1009 .gpl_only = false,
1010 .ret_type = RET_INTEGER,
1011 .arg1_type = ARG_CONST_MAP_PTR,
1012 .arg2_type = ARG_ANYTHING,
1013};
1014
8b401f9e
YS
1015struct send_signal_irq_work {
1016 struct irq_work irq_work;
1017 struct task_struct *task;
1018 u32 sig;
8482941f 1019 enum pid_type type;
8b401f9e
YS
1020};
1021
1022static DEFINE_PER_CPU(struct send_signal_irq_work, send_signal_work);
1023
1024static void do_bpf_send_signal(struct irq_work *entry)
1025{
1026 struct send_signal_irq_work *work;
1027
1028 work = container_of(entry, struct send_signal_irq_work, irq_work);
8482941f 1029 group_send_sig_info(work->sig, SEND_SIG_PRIV, work->task, work->type);
8b401f9e
YS
1030}
1031
8482941f 1032static int bpf_send_signal_common(u32 sig, enum pid_type type)
8b401f9e
YS
1033{
1034 struct send_signal_irq_work *work = NULL;
1035
1036 /* Similar to bpf_probe_write_user, task needs to be
1037 * in a sound condition and kernel memory access be
1038 * permitted in order to send signal to the current
1039 * task.
1040 */
1041 if (unlikely(current->flags & (PF_KTHREAD | PF_EXITING)))
1042 return -EPERM;
1043 if (unlikely(uaccess_kernel()))
1044 return -EPERM;
1045 if (unlikely(!nmi_uaccess_okay()))
1046 return -EPERM;
1047
1bc7896e 1048 if (irqs_disabled()) {
e1afb702
YS
1049 /* Do an early check on signal validity. Otherwise,
1050 * the error is lost in deferred irq_work.
1051 */
1052 if (unlikely(!valid_signal(sig)))
1053 return -EINVAL;
1054
8b401f9e 1055 work = this_cpu_ptr(&send_signal_work);
153bedba 1056 if (atomic_read(&work->irq_work.flags) & IRQ_WORK_BUSY)
8b401f9e
YS
1057 return -EBUSY;
1058
1059 /* Add the current task, which is the target of sending signal,
1060 * to the irq_work. The current task may change when queued
1061 * irq works get executed.
1062 */
1063 work->task = current;
1064 work->sig = sig;
8482941f 1065 work->type = type;
8b401f9e
YS
1066 irq_work_queue(&work->irq_work);
1067 return 0;
1068 }
1069
8482941f
YS
1070 return group_send_sig_info(sig, SEND_SIG_PRIV, current, type);
1071}
1072
1073BPF_CALL_1(bpf_send_signal, u32, sig)
1074{
1075 return bpf_send_signal_common(sig, PIDTYPE_TGID);
8b401f9e
YS
1076}
1077
1078static const struct bpf_func_proto bpf_send_signal_proto = {
1079 .func = bpf_send_signal,
1080 .gpl_only = false,
1081 .ret_type = RET_INTEGER,
1082 .arg1_type = ARG_ANYTHING,
1083};
1084
8482941f
YS
1085BPF_CALL_1(bpf_send_signal_thread, u32, sig)
1086{
1087 return bpf_send_signal_common(sig, PIDTYPE_PID);
1088}
1089
1090static const struct bpf_func_proto bpf_send_signal_thread_proto = {
1091 .func = bpf_send_signal_thread,
1092 .gpl_only = false,
1093 .ret_type = RET_INTEGER,
1094 .arg1_type = ARG_ANYTHING,
1095};
1096
6e22ab9d
JO
1097BPF_CALL_3(bpf_d_path, struct path *, path, char *, buf, u32, sz)
1098{
1099 long len;
1100 char *p;
1101
1102 if (!sz)
1103 return 0;
1104
1105 p = d_path(path, buf, sz);
1106 if (IS_ERR(p)) {
1107 len = PTR_ERR(p);
1108 } else {
1109 len = buf + sz - p;
1110 memmove(buf, p, len);
1111 }
1112
1113 return len;
1114}
1115
1116BTF_SET_START(btf_allowlist_d_path)
a8a71796
JO
1117#ifdef CONFIG_SECURITY
1118BTF_ID(func, security_file_permission)
1119BTF_ID(func, security_inode_getattr)
1120BTF_ID(func, security_file_open)
1121#endif
1122#ifdef CONFIG_SECURITY_PATH
1123BTF_ID(func, security_path_truncate)
1124#endif
6e22ab9d
JO
1125BTF_ID(func, vfs_truncate)
1126BTF_ID(func, vfs_fallocate)
1127BTF_ID(func, dentry_open)
1128BTF_ID(func, vfs_getattr)
1129BTF_ID(func, filp_close)
1130BTF_SET_END(btf_allowlist_d_path)
1131
1132static bool bpf_d_path_allowed(const struct bpf_prog *prog)
1133{
1134 return btf_id_set_contains(&btf_allowlist_d_path, prog->aux->attach_btf_id);
1135}
1136
9436ef6e 1137BTF_ID_LIST_SINGLE(bpf_d_path_btf_ids, struct, path)
6e22ab9d
JO
1138
1139static const struct bpf_func_proto bpf_d_path_proto = {
1140 .func = bpf_d_path,
1141 .gpl_only = false,
1142 .ret_type = RET_INTEGER,
1143 .arg1_type = ARG_PTR_TO_BTF_ID,
9436ef6e 1144 .arg1_btf_id = &bpf_d_path_btf_ids[0],
6e22ab9d
JO
1145 .arg2_type = ARG_PTR_TO_MEM,
1146 .arg3_type = ARG_CONST_SIZE_OR_ZERO,
6e22ab9d
JO
1147 .allowed = bpf_d_path_allowed,
1148};
1149
fc611f47
KS
1150const struct bpf_func_proto *
1151bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
2541517c
AS
1152{
1153 switch (func_id) {
1154 case BPF_FUNC_map_lookup_elem:
1155 return &bpf_map_lookup_elem_proto;
1156 case BPF_FUNC_map_update_elem:
1157 return &bpf_map_update_elem_proto;
1158 case BPF_FUNC_map_delete_elem:
1159 return &bpf_map_delete_elem_proto;
02a8c817
AC
1160 case BPF_FUNC_map_push_elem:
1161 return &bpf_map_push_elem_proto;
1162 case BPF_FUNC_map_pop_elem:
1163 return &bpf_map_pop_elem_proto;
1164 case BPF_FUNC_map_peek_elem:
1165 return &bpf_map_peek_elem_proto;
d9847d31
AS
1166 case BPF_FUNC_ktime_get_ns:
1167 return &bpf_ktime_get_ns_proto;
71d19214
MÅ»
1168 case BPF_FUNC_ktime_get_boot_ns:
1169 return &bpf_ktime_get_boot_ns_proto;
04fd61ab
AS
1170 case BPF_FUNC_tail_call:
1171 return &bpf_tail_call_proto;
ffeedafb
AS
1172 case BPF_FUNC_get_current_pid_tgid:
1173 return &bpf_get_current_pid_tgid_proto;
606274c5
AS
1174 case BPF_FUNC_get_current_task:
1175 return &bpf_get_current_task_proto;
ffeedafb
AS
1176 case BPF_FUNC_get_current_uid_gid:
1177 return &bpf_get_current_uid_gid_proto;
1178 case BPF_FUNC_get_current_comm:
1179 return &bpf_get_current_comm_proto;
9c959c86 1180 case BPF_FUNC_trace_printk:
0756ea3e 1181 return bpf_get_trace_printk_proto();
ab1973d3
AS
1182 case BPF_FUNC_get_smp_processor_id:
1183 return &bpf_get_smp_processor_id_proto;
2d0e30c3
DB
1184 case BPF_FUNC_get_numa_node_id:
1185 return &bpf_get_numa_node_id_proto;
35578d79
KX
1186 case BPF_FUNC_perf_event_read:
1187 return &bpf_perf_event_read_proto;
96ae5227
SD
1188 case BPF_FUNC_probe_write_user:
1189 return bpf_get_probe_write_proto();
60d20f91
SD
1190 case BPF_FUNC_current_task_under_cgroup:
1191 return &bpf_current_task_under_cgroup_proto;
8937bd80
AS
1192 case BPF_FUNC_get_prandom_u32:
1193 return &bpf_get_prandom_u32_proto;
6ae08ae3
DB
1194 case BPF_FUNC_probe_read_user:
1195 return &bpf_probe_read_user_proto;
1196 case BPF_FUNC_probe_read_kernel:
1197 return &bpf_probe_read_kernel_proto;
6ae08ae3
DB
1198 case BPF_FUNC_probe_read_user_str:
1199 return &bpf_probe_read_user_str_proto;
1200 case BPF_FUNC_probe_read_kernel_str:
1201 return &bpf_probe_read_kernel_str_proto;
0ebeea8c
DB
1202#ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
1203 case BPF_FUNC_probe_read:
1204 return &bpf_probe_read_compat_proto;
a5e8c070 1205 case BPF_FUNC_probe_read_str:
6ae08ae3 1206 return &bpf_probe_read_compat_str_proto;
0ebeea8c 1207#endif
34ea38ca 1208#ifdef CONFIG_CGROUPS
bf6fa2c8
YS
1209 case BPF_FUNC_get_current_cgroup_id:
1210 return &bpf_get_current_cgroup_id_proto;
34ea38ca 1211#endif
8b401f9e
YS
1212 case BPF_FUNC_send_signal:
1213 return &bpf_send_signal_proto;
8482941f
YS
1214 case BPF_FUNC_send_signal_thread:
1215 return &bpf_send_signal_thread_proto;
b80b033b
SL
1216 case BPF_FUNC_perf_event_read_value:
1217 return &bpf_perf_event_read_value_proto;
b4490c5c
CN
1218 case BPF_FUNC_get_ns_current_pid_tgid:
1219 return &bpf_get_ns_current_pid_tgid_proto;
457f4436
AN
1220 case BPF_FUNC_ringbuf_output:
1221 return &bpf_ringbuf_output_proto;
1222 case BPF_FUNC_ringbuf_reserve:
1223 return &bpf_ringbuf_reserve_proto;
1224 case BPF_FUNC_ringbuf_submit:
1225 return &bpf_ringbuf_submit_proto;
1226 case BPF_FUNC_ringbuf_discard:
1227 return &bpf_ringbuf_discard_proto;
1228 case BPF_FUNC_ringbuf_query:
1229 return &bpf_ringbuf_query_proto;
72e2b2b6
YS
1230 case BPF_FUNC_jiffies64:
1231 return &bpf_jiffies64_proto;
fa28dcb8
SL
1232 case BPF_FUNC_get_task_stack:
1233 return &bpf_get_task_stack_proto;
07be4c4a
AS
1234 case BPF_FUNC_copy_from_user:
1235 return prog->aux->sleepable ? &bpf_copy_from_user_proto : NULL;
9fd82b61
AS
1236 default:
1237 return NULL;
1238 }
1239}
1240
5e43f899
AI
1241static const struct bpf_func_proto *
1242kprobe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
9fd82b61
AS
1243{
1244 switch (func_id) {
a43eec30
AS
1245 case BPF_FUNC_perf_event_output:
1246 return &bpf_perf_event_output_proto;
d5a3b1f6
AS
1247 case BPF_FUNC_get_stackid:
1248 return &bpf_get_stackid_proto;
c195651e
YS
1249 case BPF_FUNC_get_stack:
1250 return &bpf_get_stack_proto;
9802d865
JB
1251#ifdef CONFIG_BPF_KPROBE_OVERRIDE
1252 case BPF_FUNC_override_return:
1253 return &bpf_override_return_proto;
1254#endif
2541517c 1255 default:
fc611f47 1256 return bpf_tracing_func_proto(func_id, prog);
2541517c
AS
1257 }
1258}
1259
1260/* bpf+kprobe programs can access fields of 'struct pt_regs' */
19de99f7 1261static bool kprobe_prog_is_valid_access(int off, int size, enum bpf_access_type type,
5e43f899 1262 const struct bpf_prog *prog,
23994631 1263 struct bpf_insn_access_aux *info)
2541517c 1264{
2541517c
AS
1265 if (off < 0 || off >= sizeof(struct pt_regs))
1266 return false;
2541517c
AS
1267 if (type != BPF_READ)
1268 return false;
2541517c
AS
1269 if (off % size != 0)
1270 return false;
2d071c64
DB
1271 /*
1272 * Assertion for 32 bit to make sure last 8 byte access
1273 * (BPF_DW) to the last 4 byte member is disallowed.
1274 */
1275 if (off + size > sizeof(struct pt_regs))
1276 return false;
1277
2541517c
AS
1278 return true;
1279}
1280
7de16e3a 1281const struct bpf_verifier_ops kprobe_verifier_ops = {
2541517c
AS
1282 .get_func_proto = kprobe_prog_func_proto,
1283 .is_valid_access = kprobe_prog_is_valid_access,
1284};
1285
7de16e3a
JK
1286const struct bpf_prog_ops kprobe_prog_ops = {
1287};
1288
f3694e00
DB
1289BPF_CALL_5(bpf_perf_event_output_tp, void *, tp_buff, struct bpf_map *, map,
1290 u64, flags, void *, data, u64, size)
9940d67c 1291{
f3694e00
DB
1292 struct pt_regs *regs = *(struct pt_regs **)tp_buff;
1293
9940d67c
AS
1294 /*
1295 * r1 points to perf tracepoint buffer where first 8 bytes are hidden
1296 * from bpf program and contain a pointer to 'struct pt_regs'. Fetch it
f3694e00 1297 * from there and call the same bpf_perf_event_output() helper inline.
9940d67c 1298 */
f3694e00 1299 return ____bpf_perf_event_output(regs, map, flags, data, size);
9940d67c
AS
1300}
1301
1302static const struct bpf_func_proto bpf_perf_event_output_proto_tp = {
1303 .func = bpf_perf_event_output_tp,
1304 .gpl_only = true,
1305 .ret_type = RET_INTEGER,
1306 .arg1_type = ARG_PTR_TO_CTX,
1307 .arg2_type = ARG_CONST_MAP_PTR,
1308 .arg3_type = ARG_ANYTHING,
39f19ebb 1309 .arg4_type = ARG_PTR_TO_MEM,
a60dd35d 1310 .arg5_type = ARG_CONST_SIZE_OR_ZERO,
9940d67c
AS
1311};
1312
f3694e00
DB
1313BPF_CALL_3(bpf_get_stackid_tp, void *, tp_buff, struct bpf_map *, map,
1314 u64, flags)
9940d67c 1315{
f3694e00 1316 struct pt_regs *regs = *(struct pt_regs **)tp_buff;
9940d67c 1317
f3694e00
DB
1318 /*
1319 * Same comment as in bpf_perf_event_output_tp(), only that this time
1320 * the other helper's function body cannot be inlined due to being
1321 * external, thus we need to call raw helper function.
1322 */
1323 return bpf_get_stackid((unsigned long) regs, (unsigned long) map,
1324 flags, 0, 0);
9940d67c
AS
1325}
1326
1327static const struct bpf_func_proto bpf_get_stackid_proto_tp = {
1328 .func = bpf_get_stackid_tp,
1329 .gpl_only = true,
1330 .ret_type = RET_INTEGER,
1331 .arg1_type = ARG_PTR_TO_CTX,
1332 .arg2_type = ARG_CONST_MAP_PTR,
1333 .arg3_type = ARG_ANYTHING,
1334};
1335
c195651e
YS
1336BPF_CALL_4(bpf_get_stack_tp, void *, tp_buff, void *, buf, u32, size,
1337 u64, flags)
1338{
1339 struct pt_regs *regs = *(struct pt_regs **)tp_buff;
1340
1341 return bpf_get_stack((unsigned long) regs, (unsigned long) buf,
1342 (unsigned long) size, flags, 0);
1343}
1344
1345static const struct bpf_func_proto bpf_get_stack_proto_tp = {
1346 .func = bpf_get_stack_tp,
1347 .gpl_only = true,
1348 .ret_type = RET_INTEGER,
1349 .arg1_type = ARG_PTR_TO_CTX,
1350 .arg2_type = ARG_PTR_TO_UNINIT_MEM,
1351 .arg3_type = ARG_CONST_SIZE_OR_ZERO,
1352 .arg4_type = ARG_ANYTHING,
1353};
1354
5e43f899
AI
1355static const struct bpf_func_proto *
1356tp_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
f005afed
YS
1357{
1358 switch (func_id) {
1359 case BPF_FUNC_perf_event_output:
1360 return &bpf_perf_event_output_proto_tp;
1361 case BPF_FUNC_get_stackid:
1362 return &bpf_get_stackid_proto_tp;
c195651e
YS
1363 case BPF_FUNC_get_stack:
1364 return &bpf_get_stack_proto_tp;
f005afed 1365 default:
fc611f47 1366 return bpf_tracing_func_proto(func_id, prog);
f005afed
YS
1367 }
1368}
1369
1370static bool tp_prog_is_valid_access(int off, int size, enum bpf_access_type type,
5e43f899 1371 const struct bpf_prog *prog,
f005afed
YS
1372 struct bpf_insn_access_aux *info)
1373{
1374 if (off < sizeof(void *) || off >= PERF_MAX_TRACE_SIZE)
1375 return false;
1376 if (type != BPF_READ)
1377 return false;
1378 if (off % size != 0)
1379 return false;
1380
1381 BUILD_BUG_ON(PERF_MAX_TRACE_SIZE % sizeof(__u64));
1382 return true;
1383}
1384
1385const struct bpf_verifier_ops tracepoint_verifier_ops = {
1386 .get_func_proto = tp_prog_func_proto,
1387 .is_valid_access = tp_prog_is_valid_access,
1388};
1389
1390const struct bpf_prog_ops tracepoint_prog_ops = {
1391};
1392
1393BPF_CALL_3(bpf_perf_prog_read_value, struct bpf_perf_event_data_kern *, ctx,
4bebdc7a
YS
1394 struct bpf_perf_event_value *, buf, u32, size)
1395{
1396 int err = -EINVAL;
1397
1398 if (unlikely(size != sizeof(struct bpf_perf_event_value)))
1399 goto clear;
1400 err = perf_event_read_local(ctx->event, &buf->counter, &buf->enabled,
1401 &buf->running);
1402 if (unlikely(err))
1403 goto clear;
1404 return 0;
1405clear:
1406 memset(buf, 0, size);
1407 return err;
1408}
1409
f005afed
YS
1410static const struct bpf_func_proto bpf_perf_prog_read_value_proto = {
1411 .func = bpf_perf_prog_read_value,
4bebdc7a
YS
1412 .gpl_only = true,
1413 .ret_type = RET_INTEGER,
1414 .arg1_type = ARG_PTR_TO_CTX,
1415 .arg2_type = ARG_PTR_TO_UNINIT_MEM,
1416 .arg3_type = ARG_CONST_SIZE,
1417};
1418
fff7b643
DX
1419BPF_CALL_4(bpf_read_branch_records, struct bpf_perf_event_data_kern *, ctx,
1420 void *, buf, u32, size, u64, flags)
1421{
1422#ifndef CONFIG_X86
1423 return -ENOENT;
1424#else
1425 static const u32 br_entry_size = sizeof(struct perf_branch_entry);
1426 struct perf_branch_stack *br_stack = ctx->data->br_stack;
1427 u32 to_copy;
1428
1429 if (unlikely(flags & ~BPF_F_GET_BRANCH_RECORDS_SIZE))
1430 return -EINVAL;
1431
1432 if (unlikely(!br_stack))
1433 return -EINVAL;
1434
1435 if (flags & BPF_F_GET_BRANCH_RECORDS_SIZE)
1436 return br_stack->nr * br_entry_size;
1437
1438 if (!buf || (size % br_entry_size != 0))
1439 return -EINVAL;
1440
1441 to_copy = min_t(u32, br_stack->nr * br_entry_size, size);
1442 memcpy(buf, br_stack->entries, to_copy);
1443
1444 return to_copy;
1445#endif
1446}
1447
1448static const struct bpf_func_proto bpf_read_branch_records_proto = {
1449 .func = bpf_read_branch_records,
1450 .gpl_only = true,
1451 .ret_type = RET_INTEGER,
1452 .arg1_type = ARG_PTR_TO_CTX,
1453 .arg2_type = ARG_PTR_TO_MEM_OR_NULL,
1454 .arg3_type = ARG_CONST_SIZE_OR_ZERO,
1455 .arg4_type = ARG_ANYTHING,
1456};
1457
5e43f899
AI
1458static const struct bpf_func_proto *
1459pe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
9fd82b61
AS
1460{
1461 switch (func_id) {
1462 case BPF_FUNC_perf_event_output:
9940d67c 1463 return &bpf_perf_event_output_proto_tp;
9fd82b61 1464 case BPF_FUNC_get_stackid:
7b04d6d6 1465 return &bpf_get_stackid_proto_pe;
c195651e 1466 case BPF_FUNC_get_stack:
7b04d6d6 1467 return &bpf_get_stack_proto_pe;
4bebdc7a 1468 case BPF_FUNC_perf_prog_read_value:
f005afed 1469 return &bpf_perf_prog_read_value_proto;
fff7b643
DX
1470 case BPF_FUNC_read_branch_records:
1471 return &bpf_read_branch_records_proto;
9fd82b61 1472 default:
fc611f47 1473 return bpf_tracing_func_proto(func_id, prog);
9fd82b61
AS
1474 }
1475}
1476
c4f6699d
AS
1477/*
1478 * bpf_raw_tp_regs are separate from bpf_pt_regs used from skb/xdp
1479 * to avoid potential recursive reuse issue when/if tracepoints are added
9594dc3c
MM
1480 * inside bpf_*_event_output, bpf_get_stackid and/or bpf_get_stack.
1481 *
1482 * Since raw tracepoints run despite bpf_prog_active, support concurrent usage
1483 * in normal, irq, and nmi context.
c4f6699d 1484 */
9594dc3c
MM
1485struct bpf_raw_tp_regs {
1486 struct pt_regs regs[3];
1487};
1488static DEFINE_PER_CPU(struct bpf_raw_tp_regs, bpf_raw_tp_regs);
1489static DEFINE_PER_CPU(int, bpf_raw_tp_nest_level);
1490static struct pt_regs *get_bpf_raw_tp_regs(void)
1491{
1492 struct bpf_raw_tp_regs *tp_regs = this_cpu_ptr(&bpf_raw_tp_regs);
1493 int nest_level = this_cpu_inc_return(bpf_raw_tp_nest_level);
1494
1495 if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(tp_regs->regs))) {
1496 this_cpu_dec(bpf_raw_tp_nest_level);
1497 return ERR_PTR(-EBUSY);
1498 }
1499
1500 return &tp_regs->regs[nest_level - 1];
1501}
1502
1503static void put_bpf_raw_tp_regs(void)
1504{
1505 this_cpu_dec(bpf_raw_tp_nest_level);
1506}
1507
c4f6699d
AS
1508BPF_CALL_5(bpf_perf_event_output_raw_tp, struct bpf_raw_tracepoint_args *, args,
1509 struct bpf_map *, map, u64, flags, void *, data, u64, size)
1510{
9594dc3c
MM
1511 struct pt_regs *regs = get_bpf_raw_tp_regs();
1512 int ret;
1513
1514 if (IS_ERR(regs))
1515 return PTR_ERR(regs);
c4f6699d
AS
1516
1517 perf_fetch_caller_regs(regs);
9594dc3c
MM
1518 ret = ____bpf_perf_event_output(regs, map, flags, data, size);
1519
1520 put_bpf_raw_tp_regs();
1521 return ret;
c4f6699d
AS
1522}
1523
1524static const struct bpf_func_proto bpf_perf_event_output_proto_raw_tp = {
1525 .func = bpf_perf_event_output_raw_tp,
1526 .gpl_only = true,
1527 .ret_type = RET_INTEGER,
1528 .arg1_type = ARG_PTR_TO_CTX,
1529 .arg2_type = ARG_CONST_MAP_PTR,
1530 .arg3_type = ARG_ANYTHING,
1531 .arg4_type = ARG_PTR_TO_MEM,
1532 .arg5_type = ARG_CONST_SIZE_OR_ZERO,
1533};
1534
a7658e1a 1535extern const struct bpf_func_proto bpf_skb_output_proto;
d831ee84 1536extern const struct bpf_func_proto bpf_xdp_output_proto;
a7658e1a 1537
c4f6699d
AS
1538BPF_CALL_3(bpf_get_stackid_raw_tp, struct bpf_raw_tracepoint_args *, args,
1539 struct bpf_map *, map, u64, flags)
1540{
9594dc3c
MM
1541 struct pt_regs *regs = get_bpf_raw_tp_regs();
1542 int ret;
1543
1544 if (IS_ERR(regs))
1545 return PTR_ERR(regs);
c4f6699d
AS
1546
1547 perf_fetch_caller_regs(regs);
1548 /* similar to bpf_perf_event_output_tp, but pt_regs fetched differently */
9594dc3c
MM
1549 ret = bpf_get_stackid((unsigned long) regs, (unsigned long) map,
1550 flags, 0, 0);
1551 put_bpf_raw_tp_regs();
1552 return ret;
c4f6699d
AS
1553}
1554
1555static const struct bpf_func_proto bpf_get_stackid_proto_raw_tp = {
1556 .func = bpf_get_stackid_raw_tp,
1557 .gpl_only = true,
1558 .ret_type = RET_INTEGER,
1559 .arg1_type = ARG_PTR_TO_CTX,
1560 .arg2_type = ARG_CONST_MAP_PTR,
1561 .arg3_type = ARG_ANYTHING,
1562};
1563
c195651e
YS
1564BPF_CALL_4(bpf_get_stack_raw_tp, struct bpf_raw_tracepoint_args *, args,
1565 void *, buf, u32, size, u64, flags)
1566{
9594dc3c
MM
1567 struct pt_regs *regs = get_bpf_raw_tp_regs();
1568 int ret;
1569
1570 if (IS_ERR(regs))
1571 return PTR_ERR(regs);
c195651e
YS
1572
1573 perf_fetch_caller_regs(regs);
9594dc3c
MM
1574 ret = bpf_get_stack((unsigned long) regs, (unsigned long) buf,
1575 (unsigned long) size, flags, 0);
1576 put_bpf_raw_tp_regs();
1577 return ret;
c195651e
YS
1578}
1579
1580static const struct bpf_func_proto bpf_get_stack_proto_raw_tp = {
1581 .func = bpf_get_stack_raw_tp,
1582 .gpl_only = true,
1583 .ret_type = RET_INTEGER,
1584 .arg1_type = ARG_PTR_TO_CTX,
1585 .arg2_type = ARG_PTR_TO_MEM,
1586 .arg3_type = ARG_CONST_SIZE_OR_ZERO,
1587 .arg4_type = ARG_ANYTHING,
1588};
1589
5e43f899
AI
1590static const struct bpf_func_proto *
1591raw_tp_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
c4f6699d
AS
1592{
1593 switch (func_id) {
1594 case BPF_FUNC_perf_event_output:
1595 return &bpf_perf_event_output_proto_raw_tp;
1596 case BPF_FUNC_get_stackid:
1597 return &bpf_get_stackid_proto_raw_tp;
c195651e
YS
1598 case BPF_FUNC_get_stack:
1599 return &bpf_get_stack_proto_raw_tp;
c4f6699d 1600 default:
fc611f47 1601 return bpf_tracing_func_proto(func_id, prog);
c4f6699d
AS
1602 }
1603}
1604
958a3f2d 1605const struct bpf_func_proto *
f1b9509c
AS
1606tracing_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1607{
1608 switch (func_id) {
1609#ifdef CONFIG_NET
1610 case BPF_FUNC_skb_output:
1611 return &bpf_skb_output_proto;
d831ee84
EC
1612 case BPF_FUNC_xdp_output:
1613 return &bpf_xdp_output_proto;
af7ec138
YS
1614 case BPF_FUNC_skc_to_tcp6_sock:
1615 return &bpf_skc_to_tcp6_sock_proto;
478cfbdf
YS
1616 case BPF_FUNC_skc_to_tcp_sock:
1617 return &bpf_skc_to_tcp_sock_proto;
1618 case BPF_FUNC_skc_to_tcp_timewait_sock:
1619 return &bpf_skc_to_tcp_timewait_sock_proto;
1620 case BPF_FUNC_skc_to_tcp_request_sock:
1621 return &bpf_skc_to_tcp_request_sock_proto;
0d4fad3e
YS
1622 case BPF_FUNC_skc_to_udp6_sock:
1623 return &bpf_skc_to_udp6_sock_proto;
f1b9509c 1624#endif
492e639f
YS
1625 case BPF_FUNC_seq_printf:
1626 return prog->expected_attach_type == BPF_TRACE_ITER ?
1627 &bpf_seq_printf_proto :
1628 NULL;
1629 case BPF_FUNC_seq_write:
1630 return prog->expected_attach_type == BPF_TRACE_ITER ?
1631 &bpf_seq_write_proto :
1632 NULL;
6e22ab9d
JO
1633 case BPF_FUNC_d_path:
1634 return &bpf_d_path_proto;
f1b9509c
AS
1635 default:
1636 return raw_tp_prog_func_proto(func_id, prog);
1637 }
1638}
1639
c4f6699d
AS
1640static bool raw_tp_prog_is_valid_access(int off, int size,
1641 enum bpf_access_type type,
5e43f899 1642 const struct bpf_prog *prog,
c4f6699d
AS
1643 struct bpf_insn_access_aux *info)
1644{
f1b9509c
AS
1645 if (off < 0 || off >= sizeof(__u64) * MAX_BPF_FUNC_ARGS)
1646 return false;
1647 if (type != BPF_READ)
1648 return false;
1649 if (off % size != 0)
1650 return false;
1651 return true;
1652}
1653
1654static bool tracing_prog_is_valid_access(int off, int size,
1655 enum bpf_access_type type,
1656 const struct bpf_prog *prog,
1657 struct bpf_insn_access_aux *info)
1658{
1659 if (off < 0 || off >= sizeof(__u64) * MAX_BPF_FUNC_ARGS)
c4f6699d
AS
1660 return false;
1661 if (type != BPF_READ)
1662 return false;
1663 if (off % size != 0)
1664 return false;
9e15db66 1665 return btf_ctx_access(off, size, type, prog, info);
c4f6699d
AS
1666}
1667
3e7c67d9
KS
1668int __weak bpf_prog_test_run_tracing(struct bpf_prog *prog,
1669 const union bpf_attr *kattr,
1670 union bpf_attr __user *uattr)
1671{
1672 return -ENOTSUPP;
1673}
1674
c4f6699d
AS
1675const struct bpf_verifier_ops raw_tracepoint_verifier_ops = {
1676 .get_func_proto = raw_tp_prog_func_proto,
1677 .is_valid_access = raw_tp_prog_is_valid_access,
1678};
1679
1680const struct bpf_prog_ops raw_tracepoint_prog_ops = {
1b4d60ec 1681 .test_run = bpf_prog_test_run_raw_tp,
c4f6699d
AS
1682};
1683
f1b9509c
AS
1684const struct bpf_verifier_ops tracing_verifier_ops = {
1685 .get_func_proto = tracing_prog_func_proto,
1686 .is_valid_access = tracing_prog_is_valid_access,
1687};
1688
1689const struct bpf_prog_ops tracing_prog_ops = {
da00d2f1 1690 .test_run = bpf_prog_test_run_tracing,
f1b9509c
AS
1691};
1692
9df1c28b
MM
1693static bool raw_tp_writable_prog_is_valid_access(int off, int size,
1694 enum bpf_access_type type,
1695 const struct bpf_prog *prog,
1696 struct bpf_insn_access_aux *info)
1697{
1698 if (off == 0) {
1699 if (size != sizeof(u64) || type != BPF_READ)
1700 return false;
1701 info->reg_type = PTR_TO_TP_BUFFER;
1702 }
1703 return raw_tp_prog_is_valid_access(off, size, type, prog, info);
1704}
1705
1706const struct bpf_verifier_ops raw_tracepoint_writable_verifier_ops = {
1707 .get_func_proto = raw_tp_prog_func_proto,
1708 .is_valid_access = raw_tp_writable_prog_is_valid_access,
1709};
1710
1711const struct bpf_prog_ops raw_tracepoint_writable_prog_ops = {
1712};
1713
0515e599 1714static bool pe_prog_is_valid_access(int off, int size, enum bpf_access_type type,
5e43f899 1715 const struct bpf_prog *prog,
23994631 1716 struct bpf_insn_access_aux *info)
0515e599 1717{
95da0cdb 1718 const int size_u64 = sizeof(u64);
31fd8581 1719
0515e599
AS
1720 if (off < 0 || off >= sizeof(struct bpf_perf_event_data))
1721 return false;
1722 if (type != BPF_READ)
1723 return false;
bc23105c
DB
1724 if (off % size != 0) {
1725 if (sizeof(unsigned long) != 4)
1726 return false;
1727 if (size != 8)
1728 return false;
1729 if (off % size != 4)
1730 return false;
1731 }
31fd8581 1732
f96da094
DB
1733 switch (off) {
1734 case bpf_ctx_range(struct bpf_perf_event_data, sample_period):
95da0cdb
TQ
1735 bpf_ctx_record_field_size(info, size_u64);
1736 if (!bpf_ctx_narrow_access_ok(off, size, size_u64))
1737 return false;
1738 break;
1739 case bpf_ctx_range(struct bpf_perf_event_data, addr):
1740 bpf_ctx_record_field_size(info, size_u64);
1741 if (!bpf_ctx_narrow_access_ok(off, size, size_u64))
23994631 1742 return false;
f96da094
DB
1743 break;
1744 default:
0515e599
AS
1745 if (size != sizeof(long))
1746 return false;
1747 }
f96da094 1748
0515e599
AS
1749 return true;
1750}
1751
6b8cc1d1
DB
1752static u32 pe_prog_convert_ctx_access(enum bpf_access_type type,
1753 const struct bpf_insn *si,
0515e599 1754 struct bpf_insn *insn_buf,
f96da094 1755 struct bpf_prog *prog, u32 *target_size)
0515e599
AS
1756{
1757 struct bpf_insn *insn = insn_buf;
1758
6b8cc1d1 1759 switch (si->off) {
0515e599 1760 case offsetof(struct bpf_perf_event_data, sample_period):
f035a515 1761 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern,
6b8cc1d1 1762 data), si->dst_reg, si->src_reg,
0515e599 1763 offsetof(struct bpf_perf_event_data_kern, data));
6b8cc1d1 1764 *insn++ = BPF_LDX_MEM(BPF_DW, si->dst_reg, si->dst_reg,
f96da094
DB
1765 bpf_target_off(struct perf_sample_data, period, 8,
1766 target_size));
0515e599 1767 break;
95da0cdb
TQ
1768 case offsetof(struct bpf_perf_event_data, addr):
1769 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern,
1770 data), si->dst_reg, si->src_reg,
1771 offsetof(struct bpf_perf_event_data_kern, data));
1772 *insn++ = BPF_LDX_MEM(BPF_DW, si->dst_reg, si->dst_reg,
1773 bpf_target_off(struct perf_sample_data, addr, 8,
1774 target_size));
1775 break;
0515e599 1776 default:
f035a515 1777 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern,
6b8cc1d1 1778 regs), si->dst_reg, si->src_reg,
0515e599 1779 offsetof(struct bpf_perf_event_data_kern, regs));
6b8cc1d1
DB
1780 *insn++ = BPF_LDX_MEM(BPF_SIZEOF(long), si->dst_reg, si->dst_reg,
1781 si->off);
0515e599
AS
1782 break;
1783 }
1784
1785 return insn - insn_buf;
1786}
1787
7de16e3a 1788const struct bpf_verifier_ops perf_event_verifier_ops = {
f005afed 1789 .get_func_proto = pe_prog_func_proto,
0515e599
AS
1790 .is_valid_access = pe_prog_is_valid_access,
1791 .convert_ctx_access = pe_prog_convert_ctx_access,
1792};
7de16e3a
JK
1793
1794const struct bpf_prog_ops perf_event_prog_ops = {
1795};
e87c6bc3
YS
1796
1797static DEFINE_MUTEX(bpf_event_mutex);
1798
c8c088ba
YS
1799#define BPF_TRACE_MAX_PROGS 64
1800
e87c6bc3
YS
1801int perf_event_attach_bpf_prog(struct perf_event *event,
1802 struct bpf_prog *prog)
1803{
e672db03 1804 struct bpf_prog_array *old_array;
e87c6bc3
YS
1805 struct bpf_prog_array *new_array;
1806 int ret = -EEXIST;
1807
9802d865 1808 /*
b4da3340
MH
1809 * Kprobe override only works if they are on the function entry,
1810 * and only if they are on the opt-in list.
9802d865
JB
1811 */
1812 if (prog->kprobe_override &&
b4da3340 1813 (!trace_kprobe_on_func_entry(event->tp_event) ||
9802d865
JB
1814 !trace_kprobe_error_injectable(event->tp_event)))
1815 return -EINVAL;
1816
e87c6bc3
YS
1817 mutex_lock(&bpf_event_mutex);
1818
1819 if (event->prog)
07c41a29 1820 goto unlock;
e87c6bc3 1821
e672db03 1822 old_array = bpf_event_rcu_dereference(event->tp_event->prog_array);
c8c088ba
YS
1823 if (old_array &&
1824 bpf_prog_array_length(old_array) >= BPF_TRACE_MAX_PROGS) {
1825 ret = -E2BIG;
1826 goto unlock;
1827 }
1828
e87c6bc3
YS
1829 ret = bpf_prog_array_copy(old_array, NULL, prog, &new_array);
1830 if (ret < 0)
07c41a29 1831 goto unlock;
e87c6bc3
YS
1832
1833 /* set the new array to event->tp_event and set event->prog */
1834 event->prog = prog;
1835 rcu_assign_pointer(event->tp_event->prog_array, new_array);
1836 bpf_prog_array_free(old_array);
1837
07c41a29 1838unlock:
e87c6bc3
YS
1839 mutex_unlock(&bpf_event_mutex);
1840 return ret;
1841}
1842
1843void perf_event_detach_bpf_prog(struct perf_event *event)
1844{
e672db03 1845 struct bpf_prog_array *old_array;
e87c6bc3
YS
1846 struct bpf_prog_array *new_array;
1847 int ret;
1848
1849 mutex_lock(&bpf_event_mutex);
1850
1851 if (!event->prog)
07c41a29 1852 goto unlock;
e87c6bc3 1853
e672db03 1854 old_array = bpf_event_rcu_dereference(event->tp_event->prog_array);
e87c6bc3 1855 ret = bpf_prog_array_copy(old_array, event->prog, NULL, &new_array);
170a7e3e
SY
1856 if (ret == -ENOENT)
1857 goto unlock;
e87c6bc3
YS
1858 if (ret < 0) {
1859 bpf_prog_array_delete_safe(old_array, event->prog);
1860 } else {
1861 rcu_assign_pointer(event->tp_event->prog_array, new_array);
1862 bpf_prog_array_free(old_array);
1863 }
1864
1865 bpf_prog_put(event->prog);
1866 event->prog = NULL;
1867
07c41a29 1868unlock:
e87c6bc3
YS
1869 mutex_unlock(&bpf_event_mutex);
1870}
f371b304 1871
f4e2298e 1872int perf_event_query_prog_array(struct perf_event *event, void __user *info)
f371b304
YS
1873{
1874 struct perf_event_query_bpf __user *uquery = info;
1875 struct perf_event_query_bpf query = {};
e672db03 1876 struct bpf_prog_array *progs;
3a38bb98 1877 u32 *ids, prog_cnt, ids_len;
f371b304
YS
1878 int ret;
1879
031258da 1880 if (!perfmon_capable())
f371b304
YS
1881 return -EPERM;
1882 if (event->attr.type != PERF_TYPE_TRACEPOINT)
1883 return -EINVAL;
1884 if (copy_from_user(&query, uquery, sizeof(query)))
1885 return -EFAULT;
3a38bb98
YS
1886
1887 ids_len = query.ids_len;
1888 if (ids_len > BPF_TRACE_MAX_PROGS)
9c481b90 1889 return -E2BIG;
3a38bb98
YS
1890 ids = kcalloc(ids_len, sizeof(u32), GFP_USER | __GFP_NOWARN);
1891 if (!ids)
1892 return -ENOMEM;
1893 /*
1894 * The above kcalloc returns ZERO_SIZE_PTR when ids_len = 0, which
1895 * is required when user only wants to check for uquery->prog_cnt.
1896 * There is no need to check for it since the case is handled
1897 * gracefully in bpf_prog_array_copy_info.
1898 */
f371b304
YS
1899
1900 mutex_lock(&bpf_event_mutex);
e672db03
SF
1901 progs = bpf_event_rcu_dereference(event->tp_event->prog_array);
1902 ret = bpf_prog_array_copy_info(progs, ids, ids_len, &prog_cnt);
f371b304
YS
1903 mutex_unlock(&bpf_event_mutex);
1904
3a38bb98
YS
1905 if (copy_to_user(&uquery->prog_cnt, &prog_cnt, sizeof(prog_cnt)) ||
1906 copy_to_user(uquery->ids, ids, ids_len * sizeof(u32)))
1907 ret = -EFAULT;
1908
1909 kfree(ids);
f371b304
YS
1910 return ret;
1911}
c4f6699d
AS
1912
1913extern struct bpf_raw_event_map __start__bpf_raw_tp[];
1914extern struct bpf_raw_event_map __stop__bpf_raw_tp[];
1915
a38d1107 1916struct bpf_raw_event_map *bpf_get_raw_tracepoint(const char *name)
c4f6699d
AS
1917{
1918 struct bpf_raw_event_map *btp = __start__bpf_raw_tp;
1919
1920 for (; btp < __stop__bpf_raw_tp; btp++) {
1921 if (!strcmp(btp->tp->name, name))
1922 return btp;
1923 }
a38d1107
MM
1924
1925 return bpf_get_raw_tracepoint_module(name);
1926}
1927
1928void bpf_put_raw_tracepoint(struct bpf_raw_event_map *btp)
1929{
1930 struct module *mod = __module_address((unsigned long)btp);
1931
1932 if (mod)
1933 module_put(mod);
c4f6699d
AS
1934}
1935
1936static __always_inline
1937void __bpf_trace_run(struct bpf_prog *prog, u64 *args)
1938{
f03efe49 1939 cant_sleep();
c4f6699d 1940 rcu_read_lock();
c4f6699d 1941 (void) BPF_PROG_RUN(prog, args);
c4f6699d
AS
1942 rcu_read_unlock();
1943}
1944
1945#define UNPACK(...) __VA_ARGS__
1946#define REPEAT_1(FN, DL, X, ...) FN(X)
1947#define REPEAT_2(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_1(FN, DL, __VA_ARGS__)
1948#define REPEAT_3(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_2(FN, DL, __VA_ARGS__)
1949#define REPEAT_4(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_3(FN, DL, __VA_ARGS__)
1950#define REPEAT_5(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_4(FN, DL, __VA_ARGS__)
1951#define REPEAT_6(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_5(FN, DL, __VA_ARGS__)
1952#define REPEAT_7(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_6(FN, DL, __VA_ARGS__)
1953#define REPEAT_8(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_7(FN, DL, __VA_ARGS__)
1954#define REPEAT_9(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_8(FN, DL, __VA_ARGS__)
1955#define REPEAT_10(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_9(FN, DL, __VA_ARGS__)
1956#define REPEAT_11(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_10(FN, DL, __VA_ARGS__)
1957#define REPEAT_12(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_11(FN, DL, __VA_ARGS__)
1958#define REPEAT(X, FN, DL, ...) REPEAT_##X(FN, DL, __VA_ARGS__)
1959
1960#define SARG(X) u64 arg##X
1961#define COPY(X) args[X] = arg##X
1962
1963#define __DL_COM (,)
1964#define __DL_SEM (;)
1965
1966#define __SEQ_0_11 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11
1967
1968#define BPF_TRACE_DEFN_x(x) \
1969 void bpf_trace_run##x(struct bpf_prog *prog, \
1970 REPEAT(x, SARG, __DL_COM, __SEQ_0_11)) \
1971 { \
1972 u64 args[x]; \
1973 REPEAT(x, COPY, __DL_SEM, __SEQ_0_11); \
1974 __bpf_trace_run(prog, args); \
1975 } \
1976 EXPORT_SYMBOL_GPL(bpf_trace_run##x)
1977BPF_TRACE_DEFN_x(1);
1978BPF_TRACE_DEFN_x(2);
1979BPF_TRACE_DEFN_x(3);
1980BPF_TRACE_DEFN_x(4);
1981BPF_TRACE_DEFN_x(5);
1982BPF_TRACE_DEFN_x(6);
1983BPF_TRACE_DEFN_x(7);
1984BPF_TRACE_DEFN_x(8);
1985BPF_TRACE_DEFN_x(9);
1986BPF_TRACE_DEFN_x(10);
1987BPF_TRACE_DEFN_x(11);
1988BPF_TRACE_DEFN_x(12);
1989
1990static int __bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog)
1991{
1992 struct tracepoint *tp = btp->tp;
1993
1994 /*
1995 * check that program doesn't access arguments beyond what's
1996 * available in this tracepoint
1997 */
1998 if (prog->aux->max_ctx_offset > btp->num_args * sizeof(u64))
1999 return -EINVAL;
2000
9df1c28b
MM
2001 if (prog->aux->max_tp_access > btp->writable_size)
2002 return -EINVAL;
2003
c4f6699d
AS
2004 return tracepoint_probe_register(tp, (void *)btp->bpf_func, prog);
2005}
2006
2007int bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog)
2008{
e16ec340 2009 return __bpf_probe_register(btp, prog);
c4f6699d
AS
2010}
2011
2012int bpf_probe_unregister(struct bpf_raw_event_map *btp, struct bpf_prog *prog)
2013{
e16ec340 2014 return tracepoint_probe_unregister(btp->tp, (void *)btp->bpf_func, prog);
c4f6699d 2015}
41bdc4b4
YS
2016
2017int bpf_get_perf_event_info(const struct perf_event *event, u32 *prog_id,
2018 u32 *fd_type, const char **buf,
2019 u64 *probe_offset, u64 *probe_addr)
2020{
2021 bool is_tracepoint, is_syscall_tp;
2022 struct bpf_prog *prog;
2023 int flags, err = 0;
2024
2025 prog = event->prog;
2026 if (!prog)
2027 return -ENOENT;
2028
2029 /* not supporting BPF_PROG_TYPE_PERF_EVENT yet */
2030 if (prog->type == BPF_PROG_TYPE_PERF_EVENT)
2031 return -EOPNOTSUPP;
2032
2033 *prog_id = prog->aux->id;
2034 flags = event->tp_event->flags;
2035 is_tracepoint = flags & TRACE_EVENT_FL_TRACEPOINT;
2036 is_syscall_tp = is_syscall_trace_event(event->tp_event);
2037
2038 if (is_tracepoint || is_syscall_tp) {
2039 *buf = is_tracepoint ? event->tp_event->tp->name
2040 : event->tp_event->name;
2041 *fd_type = BPF_FD_TYPE_TRACEPOINT;
2042 *probe_offset = 0x0;
2043 *probe_addr = 0x0;
2044 } else {
2045 /* kprobe/uprobe */
2046 err = -EOPNOTSUPP;
2047#ifdef CONFIG_KPROBE_EVENTS
2048 if (flags & TRACE_EVENT_FL_KPROBE)
2049 err = bpf_get_kprobe_info(event, fd_type, buf,
2050 probe_offset, probe_addr,
2051 event->attr.type == PERF_TYPE_TRACEPOINT);
2052#endif
2053#ifdef CONFIG_UPROBE_EVENTS
2054 if (flags & TRACE_EVENT_FL_UPROBE)
2055 err = bpf_get_uprobe_info(event, fd_type, buf,
2056 probe_offset,
2057 event->attr.type == PERF_TYPE_TRACEPOINT);
2058#endif
2059 }
2060
2061 return err;
2062}
a38d1107 2063
9db1ff0a
YS
2064static int __init send_signal_irq_work_init(void)
2065{
2066 int cpu;
2067 struct send_signal_irq_work *work;
2068
2069 for_each_possible_cpu(cpu) {
2070 work = per_cpu_ptr(&send_signal_work, cpu);
2071 init_irq_work(&work->irq_work, do_bpf_send_signal);
2072 }
2073 return 0;
2074}
2075
2076subsys_initcall(send_signal_irq_work_init);
2077
a38d1107 2078#ifdef CONFIG_MODULES
390e99cf
SF
2079static int bpf_event_notify(struct notifier_block *nb, unsigned long op,
2080 void *module)
a38d1107
MM
2081{
2082 struct bpf_trace_module *btm, *tmp;
2083 struct module *mod = module;
2084
2085 if (mod->num_bpf_raw_events == 0 ||
2086 (op != MODULE_STATE_COMING && op != MODULE_STATE_GOING))
2087 return 0;
2088
2089 mutex_lock(&bpf_module_mutex);
2090
2091 switch (op) {
2092 case MODULE_STATE_COMING:
2093 btm = kzalloc(sizeof(*btm), GFP_KERNEL);
2094 if (btm) {
2095 btm->module = module;
2096 list_add(&btm->list, &bpf_trace_modules);
2097 }
2098 break;
2099 case MODULE_STATE_GOING:
2100 list_for_each_entry_safe(btm, tmp, &bpf_trace_modules, list) {
2101 if (btm->module == module) {
2102 list_del(&btm->list);
2103 kfree(btm);
2104 break;
2105 }
2106 }
2107 break;
2108 }
2109
2110 mutex_unlock(&bpf_module_mutex);
2111
2112 return 0;
2113}
2114
2115static struct notifier_block bpf_module_nb = {
2116 .notifier_call = bpf_event_notify,
2117};
2118
390e99cf 2119static int __init bpf_event_init(void)
a38d1107
MM
2120{
2121 register_module_notifier(&bpf_module_nb);
2122 return 0;
2123}
2124
2125fs_initcall(bpf_event_init);
2126#endif /* CONFIG_MODULES */