bpf: Remove inline from bpf_do_trace_printk
[linux-2.6-block.git] / kernel / trace / bpf_trace.c
CommitLineData
179a0cc4 1// SPDX-License-Identifier: GPL-2.0
2541517c 2/* Copyright (c) 2011-2015 PLUMgrid, http://plumgrid.com
0515e599 3 * Copyright (c) 2016 Facebook
2541517c
AS
4 */
5#include <linux/kernel.h>
6#include <linux/types.h>
7#include <linux/slab.h>
8#include <linux/bpf.h>
0515e599 9#include <linux/bpf_perf_event.h>
2541517c
AS
10#include <linux/filter.h>
11#include <linux/uaccess.h>
9c959c86 12#include <linux/ctype.h>
9802d865 13#include <linux/kprobes.h>
ac5a72ea 14#include <linux/spinlock.h>
41bdc4b4 15#include <linux/syscalls.h>
540adea3 16#include <linux/error-injection.h>
c9a0f3b8 17#include <linux/btf_ids.h>
9802d865 18
c7b6f29b
NA
19#include <asm/tlb.h>
20
9802d865 21#include "trace_probe.h"
2541517c
AS
22#include "trace.h"
23
ac5a72ea
AM
24#define CREATE_TRACE_POINTS
25#include "bpf_trace.h"
26
e672db03
SF
27#define bpf_event_rcu_dereference(p) \
28 rcu_dereference_protected(p, lockdep_is_held(&bpf_event_mutex))
29
a38d1107
MM
30#ifdef CONFIG_MODULES
31struct bpf_trace_module {
32 struct module *module;
33 struct list_head list;
34};
35
36static LIST_HEAD(bpf_trace_modules);
37static DEFINE_MUTEX(bpf_module_mutex);
38
39static struct bpf_raw_event_map *bpf_get_raw_tracepoint_module(const char *name)
40{
41 struct bpf_raw_event_map *btp, *ret = NULL;
42 struct bpf_trace_module *btm;
43 unsigned int i;
44
45 mutex_lock(&bpf_module_mutex);
46 list_for_each_entry(btm, &bpf_trace_modules, list) {
47 for (i = 0; i < btm->module->num_bpf_raw_events; ++i) {
48 btp = &btm->module->bpf_raw_events[i];
49 if (!strcmp(btp->tp->name, name)) {
50 if (try_module_get(btm->module))
51 ret = btp;
52 goto out;
53 }
54 }
55 }
56out:
57 mutex_unlock(&bpf_module_mutex);
58 return ret;
59}
60#else
61static struct bpf_raw_event_map *bpf_get_raw_tracepoint_module(const char *name)
62{
63 return NULL;
64}
65#endif /* CONFIG_MODULES */
66
035226b9 67u64 bpf_get_stackid(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
c195651e 68u64 bpf_get_stack(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
035226b9 69
2541517c
AS
70/**
71 * trace_call_bpf - invoke BPF program
e87c6bc3 72 * @call: tracepoint event
2541517c
AS
73 * @ctx: opaque context pointer
74 *
75 * kprobe handlers execute BPF programs via this helper.
76 * Can be used from static tracepoints in the future.
77 *
78 * Return: BPF programs always return an integer which is interpreted by
79 * kprobe handler as:
80 * 0 - return from kprobe (event is filtered out)
81 * 1 - store kprobe event into ring buffer
82 * Other values are reserved and currently alias to 1
83 */
e87c6bc3 84unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx)
2541517c
AS
85{
86 unsigned int ret;
87
88 if (in_nmi()) /* not supported yet */
89 return 1;
90
b0a81b94 91 cant_sleep();
2541517c
AS
92
93 if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1)) {
94 /*
95 * since some bpf program is already running on this cpu,
96 * don't call into another bpf program (same or different)
97 * and don't send kprobe event into ring-buffer,
98 * so return zero here
99 */
100 ret = 0;
101 goto out;
102 }
103
e87c6bc3
YS
104 /*
105 * Instead of moving rcu_read_lock/rcu_dereference/rcu_read_unlock
106 * to all call sites, we did a bpf_prog_array_valid() there to check
107 * whether call->prog_array is empty or not, which is
108 * a heurisitc to speed up execution.
109 *
110 * If bpf_prog_array_valid() fetched prog_array was
111 * non-NULL, we go into trace_call_bpf() and do the actual
112 * proper rcu_dereference() under RCU lock.
113 * If it turns out that prog_array is NULL then, we bail out.
114 * For the opposite, if the bpf_prog_array_valid() fetched pointer
115 * was NULL, you'll skip the prog_array with the risk of missing
116 * out of events when it was updated in between this and the
117 * rcu_dereference() which is accepted risk.
118 */
119 ret = BPF_PROG_RUN_ARRAY_CHECK(call->prog_array, ctx, BPF_PROG_RUN);
2541517c
AS
120
121 out:
122 __this_cpu_dec(bpf_prog_active);
2541517c
AS
123
124 return ret;
125}
2541517c 126
9802d865
JB
127#ifdef CONFIG_BPF_KPROBE_OVERRIDE
128BPF_CALL_2(bpf_override_return, struct pt_regs *, regs, unsigned long, rc)
129{
9802d865 130 regs_set_return_value(regs, rc);
540adea3 131 override_function_with_return(regs);
9802d865
JB
132 return 0;
133}
134
135static const struct bpf_func_proto bpf_override_return_proto = {
136 .func = bpf_override_return,
137 .gpl_only = true,
138 .ret_type = RET_INTEGER,
139 .arg1_type = ARG_PTR_TO_CTX,
140 .arg2_type = ARG_ANYTHING,
141};
142#endif
143
8d92db5c
CH
144static __always_inline int
145bpf_probe_read_user_common(void *dst, u32 size, const void __user *unsafe_ptr)
2541517c 146{
8d92db5c 147 int ret;
2541517c 148
c0ee37e8 149 ret = copy_from_user_nofault(dst, unsafe_ptr, size);
6ae08ae3
DB
150 if (unlikely(ret < 0))
151 memset(dst, 0, size);
6ae08ae3
DB
152 return ret;
153}
154
8d92db5c
CH
155BPF_CALL_3(bpf_probe_read_user, void *, dst, u32, size,
156 const void __user *, unsafe_ptr)
157{
158 return bpf_probe_read_user_common(dst, size, unsafe_ptr);
159}
160
f470378c 161const struct bpf_func_proto bpf_probe_read_user_proto = {
6ae08ae3
DB
162 .func = bpf_probe_read_user,
163 .gpl_only = true,
164 .ret_type = RET_INTEGER,
165 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
166 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
167 .arg3_type = ARG_ANYTHING,
168};
169
8d92db5c
CH
170static __always_inline int
171bpf_probe_read_user_str_common(void *dst, u32 size,
172 const void __user *unsafe_ptr)
6ae08ae3 173{
8d92db5c 174 int ret;
6ae08ae3 175
8d92db5c 176 ret = strncpy_from_user_nofault(dst, unsafe_ptr, size);
6ae08ae3
DB
177 if (unlikely(ret < 0))
178 memset(dst, 0, size);
6ae08ae3
DB
179 return ret;
180}
181
8d92db5c
CH
182BPF_CALL_3(bpf_probe_read_user_str, void *, dst, u32, size,
183 const void __user *, unsafe_ptr)
184{
185 return bpf_probe_read_user_str_common(dst, size, unsafe_ptr);
186}
187
f470378c 188const struct bpf_func_proto bpf_probe_read_user_str_proto = {
6ae08ae3
DB
189 .func = bpf_probe_read_user_str,
190 .gpl_only = true,
191 .ret_type = RET_INTEGER,
192 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
193 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
194 .arg3_type = ARG_ANYTHING,
195};
196
197static __always_inline int
8d92db5c 198bpf_probe_read_kernel_common(void *dst, u32 size, const void *unsafe_ptr)
6ae08ae3
DB
199{
200 int ret = security_locked_down(LOCKDOWN_BPF_READ);
9d1f8be5 201
6ae08ae3 202 if (unlikely(ret < 0))
8d92db5c 203 goto fail;
fe557319 204 ret = copy_from_kernel_nofault(dst, unsafe_ptr, size);
074f528e 205 if (unlikely(ret < 0))
8d92db5c
CH
206 goto fail;
207 return ret;
208fail:
209 memset(dst, 0, size);
6ae08ae3
DB
210 return ret;
211}
074f528e 212
6ae08ae3
DB
213BPF_CALL_3(bpf_probe_read_kernel, void *, dst, u32, size,
214 const void *, unsafe_ptr)
215{
8d92db5c 216 return bpf_probe_read_kernel_common(dst, size, unsafe_ptr);
6ae08ae3
DB
217}
218
f470378c 219const struct bpf_func_proto bpf_probe_read_kernel_proto = {
6ae08ae3
DB
220 .func = bpf_probe_read_kernel,
221 .gpl_only = true,
222 .ret_type = RET_INTEGER,
223 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
224 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
225 .arg3_type = ARG_ANYTHING,
226};
227
6ae08ae3 228static __always_inline int
8d92db5c 229bpf_probe_read_kernel_str_common(void *dst, u32 size, const void *unsafe_ptr)
6ae08ae3
DB
230{
231 int ret = security_locked_down(LOCKDOWN_BPF_READ);
232
233 if (unlikely(ret < 0))
8d92db5c
CH
234 goto fail;
235
6ae08ae3 236 /*
8d92db5c
CH
237 * The strncpy_from_kernel_nofault() call will likely not fill the
238 * entire buffer, but that's okay in this circumstance as we're probing
6ae08ae3
DB
239 * arbitrary memory anyway similar to bpf_probe_read_*() and might
240 * as well probe the stack. Thus, memory is explicitly cleared
241 * only in error case, so that improper users ignoring return
242 * code altogether don't copy garbage; otherwise length of string
243 * is returned that can be used for bpf_perf_event_output() et al.
244 */
8d92db5c 245 ret = strncpy_from_kernel_nofault(dst, unsafe_ptr, size);
6ae08ae3 246 if (unlikely(ret < 0))
8d92db5c
CH
247 goto fail;
248
02553b91 249 return ret;
8d92db5c
CH
250fail:
251 memset(dst, 0, size);
074f528e 252 return ret;
2541517c
AS
253}
254
6ae08ae3
DB
255BPF_CALL_3(bpf_probe_read_kernel_str, void *, dst, u32, size,
256 const void *, unsafe_ptr)
257{
8d92db5c 258 return bpf_probe_read_kernel_str_common(dst, size, unsafe_ptr);
6ae08ae3
DB
259}
260
f470378c 261const struct bpf_func_proto bpf_probe_read_kernel_str_proto = {
6ae08ae3
DB
262 .func = bpf_probe_read_kernel_str,
263 .gpl_only = true,
264 .ret_type = RET_INTEGER,
265 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
266 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
267 .arg3_type = ARG_ANYTHING,
268};
269
8d92db5c
CH
270#ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
271BPF_CALL_3(bpf_probe_read_compat, void *, dst, u32, size,
272 const void *, unsafe_ptr)
273{
274 if ((unsigned long)unsafe_ptr < TASK_SIZE) {
275 return bpf_probe_read_user_common(dst, size,
276 (__force void __user *)unsafe_ptr);
277 }
278 return bpf_probe_read_kernel_common(dst, size, unsafe_ptr);
279}
280
281static const struct bpf_func_proto bpf_probe_read_compat_proto = {
282 .func = bpf_probe_read_compat,
283 .gpl_only = true,
284 .ret_type = RET_INTEGER,
285 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
286 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
287 .arg3_type = ARG_ANYTHING,
288};
289
6ae08ae3
DB
290BPF_CALL_3(bpf_probe_read_compat_str, void *, dst, u32, size,
291 const void *, unsafe_ptr)
292{
8d92db5c
CH
293 if ((unsigned long)unsafe_ptr < TASK_SIZE) {
294 return bpf_probe_read_user_str_common(dst, size,
295 (__force void __user *)unsafe_ptr);
296 }
297 return bpf_probe_read_kernel_str_common(dst, size, unsafe_ptr);
6ae08ae3
DB
298}
299
300static const struct bpf_func_proto bpf_probe_read_compat_str_proto = {
301 .func = bpf_probe_read_compat_str,
2541517c
AS
302 .gpl_only = true,
303 .ret_type = RET_INTEGER,
39f19ebb 304 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
9c019e2b 305 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
2541517c
AS
306 .arg3_type = ARG_ANYTHING,
307};
8d92db5c 308#endif /* CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE */
2541517c 309
eb1b6688 310BPF_CALL_3(bpf_probe_write_user, void __user *, unsafe_ptr, const void *, src,
f3694e00 311 u32, size)
96ae5227 312{
96ae5227
SD
313 /*
314 * Ensure we're in user context which is safe for the helper to
315 * run. This helper has no business in a kthread.
316 *
317 * access_ok() should prevent writing to non-user memory, but in
318 * some situations (nommu, temporary switch, etc) access_ok() does
319 * not provide enough validation, hence the check on KERNEL_DS.
c7b6f29b
NA
320 *
321 * nmi_uaccess_okay() ensures the probe is not run in an interim
322 * state, when the task or mm are switched. This is specifically
323 * required to prevent the use of temporary mm.
96ae5227
SD
324 */
325
326 if (unlikely(in_interrupt() ||
327 current->flags & (PF_KTHREAD | PF_EXITING)))
328 return -EPERM;
db68ce10 329 if (unlikely(uaccess_kernel()))
96ae5227 330 return -EPERM;
c7b6f29b
NA
331 if (unlikely(!nmi_uaccess_okay()))
332 return -EPERM;
96ae5227 333
c0ee37e8 334 return copy_to_user_nofault(unsafe_ptr, src, size);
96ae5227
SD
335}
336
337static const struct bpf_func_proto bpf_probe_write_user_proto = {
338 .func = bpf_probe_write_user,
339 .gpl_only = true,
340 .ret_type = RET_INTEGER,
341 .arg1_type = ARG_ANYTHING,
39f19ebb
AS
342 .arg2_type = ARG_PTR_TO_MEM,
343 .arg3_type = ARG_CONST_SIZE,
96ae5227
SD
344};
345
346static const struct bpf_func_proto *bpf_get_probe_write_proto(void)
347{
2c78ee89
AS
348 if (!capable(CAP_SYS_ADMIN))
349 return NULL;
350
96ae5227
SD
351 pr_warn_ratelimited("%s[%d] is installing a program with bpf_probe_write_user helper that may corrupt user memory!",
352 current->comm, task_pid_nr(current));
353
354 return &bpf_probe_write_user_proto;
355}
356
d7b2977b
CH
357static void bpf_trace_copy_string(char *buf, void *unsafe_ptr, char fmt_ptype,
358 size_t bufsz)
359{
360 void __user *user_ptr = (__force void __user *)unsafe_ptr;
361
362 buf[0] = 0;
363
364 switch (fmt_ptype) {
365 case 's':
366#ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
aec6ce59
CH
367 if ((unsigned long)unsafe_ptr < TASK_SIZE) {
368 strncpy_from_user_nofault(buf, user_ptr, bufsz);
369 break;
370 }
371 fallthrough;
d7b2977b
CH
372#endif
373 case 'k':
374 strncpy_from_kernel_nofault(buf, unsafe_ptr, bufsz);
375 break;
376 case 'u':
377 strncpy_from_user_nofault(buf, user_ptr, bufsz);
378 break;
379 }
380}
381
ac5a72ea
AM
382static DEFINE_RAW_SPINLOCK(trace_printk_lock);
383
384#define BPF_TRACE_PRINTK_SIZE 1024
385
0d360d64 386static __printf(1, 0) int bpf_do_trace_printk(const char *fmt, ...)
ac5a72ea
AM
387{
388 static char buf[BPF_TRACE_PRINTK_SIZE];
389 unsigned long flags;
390 va_list ap;
391 int ret;
392
393 raw_spin_lock_irqsave(&trace_printk_lock, flags);
394 va_start(ap, fmt);
395 ret = vsnprintf(buf, sizeof(buf), fmt, ap);
396 va_end(ap);
397 /* vsnprintf() will not append null for zero-length strings */
398 if (ret == 0)
399 buf[0] = '\0';
400 trace_bpf_trace_printk(buf);
401 raw_spin_unlock_irqrestore(&trace_printk_lock, flags);
402
403 return ret;
404}
405
9c959c86 406/*
7bda4b40 407 * Only limited trace_printk() conversion specifiers allowed:
2df6bb54 408 * %d %i %u %x %ld %li %lu %lx %lld %lli %llu %llx %p %pB %pks %pus %s
9c959c86 409 */
f3694e00
DB
410BPF_CALL_5(bpf_trace_printk, char *, fmt, u32, fmt_size, u64, arg1,
411 u64, arg2, u64, arg3)
9c959c86 412{
b2a5212f
DB
413 int i, mod[3] = {}, fmt_cnt = 0;
414 char buf[64], fmt_ptype;
415 void *unsafe_ptr = NULL;
8d3b7dce 416 bool str_seen = false;
9c959c86
AS
417
418 /*
419 * bpf_check()->check_func_arg()->check_stack_boundary()
420 * guarantees that fmt points to bpf program stack,
421 * fmt_size bytes of it were initialized and fmt_size > 0
422 */
423 if (fmt[--fmt_size] != 0)
424 return -EINVAL;
425
426 /* check format string for allowed specifiers */
427 for (i = 0; i < fmt_size; i++) {
428 if ((!isprint(fmt[i]) && !isspace(fmt[i])) || !isascii(fmt[i]))
429 return -EINVAL;
430
431 if (fmt[i] != '%')
432 continue;
433
434 if (fmt_cnt >= 3)
435 return -EINVAL;
436
437 /* fmt[i] != 0 && fmt[last] == 0, so we can access fmt[i + 1] */
438 i++;
439 if (fmt[i] == 'l') {
440 mod[fmt_cnt]++;
441 i++;
b2a5212f 442 } else if (fmt[i] == 'p') {
9c959c86 443 mod[fmt_cnt]++;
b2a5212f
DB
444 if ((fmt[i + 1] == 'k' ||
445 fmt[i + 1] == 'u') &&
446 fmt[i + 2] == 's') {
447 fmt_ptype = fmt[i + 1];
448 i += 2;
449 goto fmt_str;
450 }
451
2df6bb54
SL
452 if (fmt[i + 1] == 'B') {
453 i++;
454 goto fmt_next;
455 }
456
1efb6ee3
MP
457 /* disallow any further format extensions */
458 if (fmt[i + 1] != 0 &&
459 !isspace(fmt[i + 1]) &&
460 !ispunct(fmt[i + 1]))
9c959c86 461 return -EINVAL;
b2a5212f
DB
462
463 goto fmt_next;
464 } else if (fmt[i] == 's') {
465 mod[fmt_cnt]++;
466 fmt_ptype = fmt[i];
467fmt_str:
468 if (str_seen)
469 /* allow only one '%s' per fmt string */
470 return -EINVAL;
471 str_seen = true;
472
473 if (fmt[i + 1] != 0 &&
474 !isspace(fmt[i + 1]) &&
475 !ispunct(fmt[i + 1]))
476 return -EINVAL;
477
478 switch (fmt_cnt) {
479 case 0:
480 unsafe_ptr = (void *)(long)arg1;
481 arg1 = (long)buf;
482 break;
483 case 1:
484 unsafe_ptr = (void *)(long)arg2;
485 arg2 = (long)buf;
486 break;
487 case 2:
488 unsafe_ptr = (void *)(long)arg3;
489 arg3 = (long)buf;
490 break;
491 }
492
d7b2977b
CH
493 bpf_trace_copy_string(buf, unsafe_ptr, fmt_ptype,
494 sizeof(buf));
b2a5212f 495 goto fmt_next;
9c959c86
AS
496 }
497
498 if (fmt[i] == 'l') {
499 mod[fmt_cnt]++;
500 i++;
501 }
502
7bda4b40
JF
503 if (fmt[i] != 'i' && fmt[i] != 'd' &&
504 fmt[i] != 'u' && fmt[i] != 'x')
9c959c86 505 return -EINVAL;
b2a5212f 506fmt_next:
9c959c86
AS
507 fmt_cnt++;
508 }
509
88a5c690
DB
510/* Horrid workaround for getting va_list handling working with different
511 * argument type combinations generically for 32 and 64 bit archs.
512 */
513#define __BPF_TP_EMIT() __BPF_ARG3_TP()
514#define __BPF_TP(...) \
ac5a72ea 515 bpf_do_trace_printk(fmt, ##__VA_ARGS__)
88a5c690
DB
516
517#define __BPF_ARG1_TP(...) \
518 ((mod[0] == 2 || (mod[0] == 1 && __BITS_PER_LONG == 64)) \
519 ? __BPF_TP(arg1, ##__VA_ARGS__) \
520 : ((mod[0] == 1 || (mod[0] == 0 && __BITS_PER_LONG == 32)) \
521 ? __BPF_TP((long)arg1, ##__VA_ARGS__) \
522 : __BPF_TP((u32)arg1, ##__VA_ARGS__)))
523
524#define __BPF_ARG2_TP(...) \
525 ((mod[1] == 2 || (mod[1] == 1 && __BITS_PER_LONG == 64)) \
526 ? __BPF_ARG1_TP(arg2, ##__VA_ARGS__) \
527 : ((mod[1] == 1 || (mod[1] == 0 && __BITS_PER_LONG == 32)) \
528 ? __BPF_ARG1_TP((long)arg2, ##__VA_ARGS__) \
529 : __BPF_ARG1_TP((u32)arg2, ##__VA_ARGS__)))
530
531#define __BPF_ARG3_TP(...) \
532 ((mod[2] == 2 || (mod[2] == 1 && __BITS_PER_LONG == 64)) \
533 ? __BPF_ARG2_TP(arg3, ##__VA_ARGS__) \
534 : ((mod[2] == 1 || (mod[2] == 0 && __BITS_PER_LONG == 32)) \
535 ? __BPF_ARG2_TP((long)arg3, ##__VA_ARGS__) \
536 : __BPF_ARG2_TP((u32)arg3, ##__VA_ARGS__)))
537
538 return __BPF_TP_EMIT();
9c959c86
AS
539}
540
541static const struct bpf_func_proto bpf_trace_printk_proto = {
542 .func = bpf_trace_printk,
543 .gpl_only = true,
544 .ret_type = RET_INTEGER,
39f19ebb
AS
545 .arg1_type = ARG_PTR_TO_MEM,
546 .arg2_type = ARG_CONST_SIZE,
9c959c86
AS
547};
548
0756ea3e
AS
549const struct bpf_func_proto *bpf_get_trace_printk_proto(void)
550{
551 /*
ac5a72ea
AM
552 * This program might be calling bpf_trace_printk,
553 * so enable the associated bpf_trace/bpf_trace_printk event.
554 * Repeat this each time as it is possible a user has
555 * disabled bpf_trace_printk events. By loading a program
556 * calling bpf_trace_printk() however the user has expressed
557 * the intent to see such events.
0756ea3e 558 */
ac5a72ea
AM
559 if (trace_set_clr_event("bpf_trace", "bpf_trace_printk", 1))
560 pr_warn_ratelimited("could not enable bpf_trace_printk events");
0756ea3e
AS
561
562 return &bpf_trace_printk_proto;
563}
564
492e639f
YS
565#define MAX_SEQ_PRINTF_VARARGS 12
566#define MAX_SEQ_PRINTF_MAX_MEMCPY 6
567#define MAX_SEQ_PRINTF_STR_LEN 128
568
569struct bpf_seq_printf_buf {
570 char buf[MAX_SEQ_PRINTF_MAX_MEMCPY][MAX_SEQ_PRINTF_STR_LEN];
571};
572static DEFINE_PER_CPU(struct bpf_seq_printf_buf, bpf_seq_printf_buf);
573static DEFINE_PER_CPU(int, bpf_seq_printf_buf_used);
574
575BPF_CALL_5(bpf_seq_printf, struct seq_file *, m, char *, fmt, u32, fmt_size,
576 const void *, data, u32, data_len)
577{
578 int err = -EINVAL, fmt_cnt = 0, memcpy_cnt = 0;
579 int i, buf_used, copy_size, num_args;
580 u64 params[MAX_SEQ_PRINTF_VARARGS];
581 struct bpf_seq_printf_buf *bufs;
582 const u64 *args = data;
583
584 buf_used = this_cpu_inc_return(bpf_seq_printf_buf_used);
585 if (WARN_ON_ONCE(buf_used > 1)) {
586 err = -EBUSY;
587 goto out;
588 }
589
590 bufs = this_cpu_ptr(&bpf_seq_printf_buf);
591
592 /*
593 * bpf_check()->check_func_arg()->check_stack_boundary()
594 * guarantees that fmt points to bpf program stack,
595 * fmt_size bytes of it were initialized and fmt_size > 0
596 */
597 if (fmt[--fmt_size] != 0)
598 goto out;
599
600 if (data_len & 7)
601 goto out;
602
603 for (i = 0; i < fmt_size; i++) {
604 if (fmt[i] == '%') {
605 if (fmt[i + 1] == '%')
606 i++;
607 else if (!data || !data_len)
608 goto out;
609 }
610 }
611
612 num_args = data_len / 8;
613
614 /* check format string for allowed specifiers */
615 for (i = 0; i < fmt_size; i++) {
616 /* only printable ascii for now. */
617 if ((!isprint(fmt[i]) && !isspace(fmt[i])) || !isascii(fmt[i])) {
618 err = -EINVAL;
619 goto out;
620 }
621
622 if (fmt[i] != '%')
623 continue;
624
625 if (fmt[i + 1] == '%') {
626 i++;
627 continue;
628 }
629
630 if (fmt_cnt >= MAX_SEQ_PRINTF_VARARGS) {
631 err = -E2BIG;
632 goto out;
633 }
634
635 if (fmt_cnt >= num_args) {
636 err = -EINVAL;
637 goto out;
638 }
639
640 /* fmt[i] != 0 && fmt[last] == 0, so we can access fmt[i + 1] */
641 i++;
642
643 /* skip optional "[0 +-][num]" width formating field */
644 while (fmt[i] == '0' || fmt[i] == '+' || fmt[i] == '-' ||
645 fmt[i] == ' ')
646 i++;
647 if (fmt[i] >= '1' && fmt[i] <= '9') {
648 i++;
649 while (fmt[i] >= '0' && fmt[i] <= '9')
650 i++;
651 }
652
653 if (fmt[i] == 's') {
19c8d8ac
AM
654 void *unsafe_ptr;
655
492e639f
YS
656 /* try our best to copy */
657 if (memcpy_cnt >= MAX_SEQ_PRINTF_MAX_MEMCPY) {
658 err = -E2BIG;
659 goto out;
660 }
661
19c8d8ac
AM
662 unsafe_ptr = (void *)(long)args[fmt_cnt];
663 err = strncpy_from_kernel_nofault(bufs->buf[memcpy_cnt],
664 unsafe_ptr, MAX_SEQ_PRINTF_STR_LEN);
492e639f
YS
665 if (err < 0)
666 bufs->buf[memcpy_cnt][0] = '\0';
667 params[fmt_cnt] = (u64)(long)bufs->buf[memcpy_cnt];
668
669 fmt_cnt++;
670 memcpy_cnt++;
671 continue;
672 }
673
674 if (fmt[i] == 'p') {
675 if (fmt[i + 1] == 0 ||
676 fmt[i + 1] == 'K' ||
2df6bb54
SL
677 fmt[i + 1] == 'x' ||
678 fmt[i + 1] == 'B') {
492e639f
YS
679 /* just kernel pointers */
680 params[fmt_cnt] = args[fmt_cnt];
681 fmt_cnt++;
682 continue;
683 }
684
685 /* only support "%pI4", "%pi4", "%pI6" and "%pi6". */
686 if (fmt[i + 1] != 'i' && fmt[i + 1] != 'I') {
687 err = -EINVAL;
688 goto out;
689 }
690 if (fmt[i + 2] != '4' && fmt[i + 2] != '6') {
691 err = -EINVAL;
692 goto out;
693 }
694
695 if (memcpy_cnt >= MAX_SEQ_PRINTF_MAX_MEMCPY) {
696 err = -E2BIG;
697 goto out;
698 }
699
700
701 copy_size = (fmt[i + 2] == '4') ? 4 : 16;
702
fe557319 703 err = copy_from_kernel_nofault(bufs->buf[memcpy_cnt],
492e639f
YS
704 (void *) (long) args[fmt_cnt],
705 copy_size);
706 if (err < 0)
707 memset(bufs->buf[memcpy_cnt], 0, copy_size);
708 params[fmt_cnt] = (u64)(long)bufs->buf[memcpy_cnt];
709
710 i += 2;
711 fmt_cnt++;
712 memcpy_cnt++;
713 continue;
714 }
715
716 if (fmt[i] == 'l') {
717 i++;
718 if (fmt[i] == 'l')
719 i++;
720 }
721
722 if (fmt[i] != 'i' && fmt[i] != 'd' &&
c06b0229
YS
723 fmt[i] != 'u' && fmt[i] != 'x' &&
724 fmt[i] != 'X') {
492e639f
YS
725 err = -EINVAL;
726 goto out;
727 }
728
729 params[fmt_cnt] = args[fmt_cnt];
730 fmt_cnt++;
731 }
732
733 /* Maximumly we can have MAX_SEQ_PRINTF_VARARGS parameter, just give
734 * all of them to seq_printf().
735 */
736 seq_printf(m, fmt, params[0], params[1], params[2], params[3],
737 params[4], params[5], params[6], params[7], params[8],
738 params[9], params[10], params[11]);
739
740 err = seq_has_overflowed(m) ? -EOVERFLOW : 0;
741out:
742 this_cpu_dec(bpf_seq_printf_buf_used);
743 return err;
744}
745
c9a0f3b8
JO
746BTF_ID_LIST(bpf_seq_printf_btf_ids)
747BTF_ID(struct, seq_file)
748
492e639f
YS
749static const struct bpf_func_proto bpf_seq_printf_proto = {
750 .func = bpf_seq_printf,
751 .gpl_only = true,
752 .ret_type = RET_INTEGER,
753 .arg1_type = ARG_PTR_TO_BTF_ID,
754 .arg2_type = ARG_PTR_TO_MEM,
755 .arg3_type = ARG_CONST_SIZE,
756 .arg4_type = ARG_PTR_TO_MEM_OR_NULL,
757 .arg5_type = ARG_CONST_SIZE_OR_ZERO,
758 .btf_id = bpf_seq_printf_btf_ids,
759};
760
761BPF_CALL_3(bpf_seq_write, struct seq_file *, m, const void *, data, u32, len)
762{
763 return seq_write(m, data, len) ? -EOVERFLOW : 0;
764}
765
c9a0f3b8
JO
766BTF_ID_LIST(bpf_seq_write_btf_ids)
767BTF_ID(struct, seq_file)
768
492e639f
YS
769static const struct bpf_func_proto bpf_seq_write_proto = {
770 .func = bpf_seq_write,
771 .gpl_only = true,
772 .ret_type = RET_INTEGER,
773 .arg1_type = ARG_PTR_TO_BTF_ID,
774 .arg2_type = ARG_PTR_TO_MEM,
775 .arg3_type = ARG_CONST_SIZE_OR_ZERO,
776 .btf_id = bpf_seq_write_btf_ids,
777};
778
908432ca
YS
779static __always_inline int
780get_map_perf_counter(struct bpf_map *map, u64 flags,
781 u64 *value, u64 *enabled, u64 *running)
35578d79 782{
35578d79 783 struct bpf_array *array = container_of(map, struct bpf_array, map);
6816a7ff
DB
784 unsigned int cpu = smp_processor_id();
785 u64 index = flags & BPF_F_INDEX_MASK;
3b1efb19 786 struct bpf_event_entry *ee;
35578d79 787
6816a7ff
DB
788 if (unlikely(flags & ~(BPF_F_INDEX_MASK)))
789 return -EINVAL;
790 if (index == BPF_F_CURRENT_CPU)
791 index = cpu;
35578d79
KX
792 if (unlikely(index >= array->map.max_entries))
793 return -E2BIG;
794
3b1efb19 795 ee = READ_ONCE(array->ptrs[index]);
1ca1cc98 796 if (!ee)
35578d79
KX
797 return -ENOENT;
798
908432ca
YS
799 return perf_event_read_local(ee->event, value, enabled, running);
800}
801
802BPF_CALL_2(bpf_perf_event_read, struct bpf_map *, map, u64, flags)
803{
804 u64 value = 0;
805 int err;
806
807 err = get_map_perf_counter(map, flags, &value, NULL, NULL);
35578d79 808 /*
f91840a3
AS
809 * this api is ugly since we miss [-22..-2] range of valid
810 * counter values, but that's uapi
35578d79 811 */
f91840a3
AS
812 if (err)
813 return err;
814 return value;
35578d79
KX
815}
816
62544ce8 817static const struct bpf_func_proto bpf_perf_event_read_proto = {
35578d79 818 .func = bpf_perf_event_read,
1075ef59 819 .gpl_only = true,
35578d79
KX
820 .ret_type = RET_INTEGER,
821 .arg1_type = ARG_CONST_MAP_PTR,
822 .arg2_type = ARG_ANYTHING,
823};
824
908432ca
YS
825BPF_CALL_4(bpf_perf_event_read_value, struct bpf_map *, map, u64, flags,
826 struct bpf_perf_event_value *, buf, u32, size)
827{
828 int err = -EINVAL;
829
830 if (unlikely(size != sizeof(struct bpf_perf_event_value)))
831 goto clear;
832 err = get_map_perf_counter(map, flags, &buf->counter, &buf->enabled,
833 &buf->running);
834 if (unlikely(err))
835 goto clear;
836 return 0;
837clear:
838 memset(buf, 0, size);
839 return err;
840}
841
842static const struct bpf_func_proto bpf_perf_event_read_value_proto = {
843 .func = bpf_perf_event_read_value,
844 .gpl_only = true,
845 .ret_type = RET_INTEGER,
846 .arg1_type = ARG_CONST_MAP_PTR,
847 .arg2_type = ARG_ANYTHING,
848 .arg3_type = ARG_PTR_TO_UNINIT_MEM,
849 .arg4_type = ARG_CONST_SIZE,
850};
851
8e7a3920
DB
852static __always_inline u64
853__bpf_perf_event_output(struct pt_regs *regs, struct bpf_map *map,
283ca526 854 u64 flags, struct perf_sample_data *sd)
a43eec30 855{
a43eec30 856 struct bpf_array *array = container_of(map, struct bpf_array, map);
d7931330 857 unsigned int cpu = smp_processor_id();
1e33759c 858 u64 index = flags & BPF_F_INDEX_MASK;
3b1efb19 859 struct bpf_event_entry *ee;
a43eec30 860 struct perf_event *event;
a43eec30 861
1e33759c 862 if (index == BPF_F_CURRENT_CPU)
d7931330 863 index = cpu;
a43eec30
AS
864 if (unlikely(index >= array->map.max_entries))
865 return -E2BIG;
866
3b1efb19 867 ee = READ_ONCE(array->ptrs[index]);
1ca1cc98 868 if (!ee)
a43eec30
AS
869 return -ENOENT;
870
3b1efb19 871 event = ee->event;
a43eec30
AS
872 if (unlikely(event->attr.type != PERF_TYPE_SOFTWARE ||
873 event->attr.config != PERF_COUNT_SW_BPF_OUTPUT))
874 return -EINVAL;
875
d7931330 876 if (unlikely(event->oncpu != cpu))
a43eec30
AS
877 return -EOPNOTSUPP;
878
56201969 879 return perf_event_output(event, sd, regs);
a43eec30
AS
880}
881
9594dc3c
MM
882/*
883 * Support executing tracepoints in normal, irq, and nmi context that each call
884 * bpf_perf_event_output
885 */
886struct bpf_trace_sample_data {
887 struct perf_sample_data sds[3];
888};
889
890static DEFINE_PER_CPU(struct bpf_trace_sample_data, bpf_trace_sds);
891static DEFINE_PER_CPU(int, bpf_trace_nest_level);
f3694e00
DB
892BPF_CALL_5(bpf_perf_event_output, struct pt_regs *, regs, struct bpf_map *, map,
893 u64, flags, void *, data, u64, size)
8e7a3920 894{
9594dc3c
MM
895 struct bpf_trace_sample_data *sds = this_cpu_ptr(&bpf_trace_sds);
896 int nest_level = this_cpu_inc_return(bpf_trace_nest_level);
8e7a3920
DB
897 struct perf_raw_record raw = {
898 .frag = {
899 .size = size,
900 .data = data,
901 },
902 };
9594dc3c
MM
903 struct perf_sample_data *sd;
904 int err;
8e7a3920 905
9594dc3c
MM
906 if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(sds->sds))) {
907 err = -EBUSY;
908 goto out;
909 }
910
911 sd = &sds->sds[nest_level - 1];
912
913 if (unlikely(flags & ~(BPF_F_INDEX_MASK))) {
914 err = -EINVAL;
915 goto out;
916 }
8e7a3920 917
283ca526
DB
918 perf_sample_data_init(sd, 0, 0);
919 sd->raw = &raw;
920
9594dc3c
MM
921 err = __bpf_perf_event_output(regs, map, flags, sd);
922
923out:
924 this_cpu_dec(bpf_trace_nest_level);
925 return err;
8e7a3920
DB
926}
927
a43eec30
AS
928static const struct bpf_func_proto bpf_perf_event_output_proto = {
929 .func = bpf_perf_event_output,
1075ef59 930 .gpl_only = true,
a43eec30
AS
931 .ret_type = RET_INTEGER,
932 .arg1_type = ARG_PTR_TO_CTX,
933 .arg2_type = ARG_CONST_MAP_PTR,
934 .arg3_type = ARG_ANYTHING,
39f19ebb 935 .arg4_type = ARG_PTR_TO_MEM,
a60dd35d 936 .arg5_type = ARG_CONST_SIZE_OR_ZERO,
a43eec30
AS
937};
938
768fb61f
AZ
939static DEFINE_PER_CPU(int, bpf_event_output_nest_level);
940struct bpf_nested_pt_regs {
941 struct pt_regs regs[3];
942};
943static DEFINE_PER_CPU(struct bpf_nested_pt_regs, bpf_pt_regs);
944static DEFINE_PER_CPU(struct bpf_trace_sample_data, bpf_misc_sds);
bd570ff9 945
555c8a86
DB
946u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
947 void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy)
bd570ff9 948{
768fb61f 949 int nest_level = this_cpu_inc_return(bpf_event_output_nest_level);
555c8a86
DB
950 struct perf_raw_frag frag = {
951 .copy = ctx_copy,
952 .size = ctx_size,
953 .data = ctx,
954 };
955 struct perf_raw_record raw = {
956 .frag = {
183fc153
AM
957 {
958 .next = ctx_size ? &frag : NULL,
959 },
555c8a86
DB
960 .size = meta_size,
961 .data = meta,
962 },
963 };
768fb61f
AZ
964 struct perf_sample_data *sd;
965 struct pt_regs *regs;
966 u64 ret;
967
968 if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(bpf_misc_sds.sds))) {
969 ret = -EBUSY;
970 goto out;
971 }
972 sd = this_cpu_ptr(&bpf_misc_sds.sds[nest_level - 1]);
973 regs = this_cpu_ptr(&bpf_pt_regs.regs[nest_level - 1]);
bd570ff9
DB
974
975 perf_fetch_caller_regs(regs);
283ca526
DB
976 perf_sample_data_init(sd, 0, 0);
977 sd->raw = &raw;
bd570ff9 978
768fb61f
AZ
979 ret = __bpf_perf_event_output(regs, map, flags, sd);
980out:
981 this_cpu_dec(bpf_event_output_nest_level);
982 return ret;
bd570ff9
DB
983}
984
f3694e00 985BPF_CALL_0(bpf_get_current_task)
606274c5
AS
986{
987 return (long) current;
988}
989
f470378c 990const struct bpf_func_proto bpf_get_current_task_proto = {
606274c5
AS
991 .func = bpf_get_current_task,
992 .gpl_only = true,
993 .ret_type = RET_INTEGER,
994};
995
f3694e00 996BPF_CALL_2(bpf_current_task_under_cgroup, struct bpf_map *, map, u32, idx)
60d20f91 997{
60d20f91
SD
998 struct bpf_array *array = container_of(map, struct bpf_array, map);
999 struct cgroup *cgrp;
60d20f91 1000
60d20f91
SD
1001 if (unlikely(idx >= array->map.max_entries))
1002 return -E2BIG;
1003
1004 cgrp = READ_ONCE(array->ptrs[idx]);
1005 if (unlikely(!cgrp))
1006 return -EAGAIN;
1007
1008 return task_under_cgroup_hierarchy(current, cgrp);
1009}
1010
1011static const struct bpf_func_proto bpf_current_task_under_cgroup_proto = {
1012 .func = bpf_current_task_under_cgroup,
1013 .gpl_only = false,
1014 .ret_type = RET_INTEGER,
1015 .arg1_type = ARG_CONST_MAP_PTR,
1016 .arg2_type = ARG_ANYTHING,
1017};
1018
8b401f9e
YS
1019struct send_signal_irq_work {
1020 struct irq_work irq_work;
1021 struct task_struct *task;
1022 u32 sig;
8482941f 1023 enum pid_type type;
8b401f9e
YS
1024};
1025
1026static DEFINE_PER_CPU(struct send_signal_irq_work, send_signal_work);
1027
1028static void do_bpf_send_signal(struct irq_work *entry)
1029{
1030 struct send_signal_irq_work *work;
1031
1032 work = container_of(entry, struct send_signal_irq_work, irq_work);
8482941f 1033 group_send_sig_info(work->sig, SEND_SIG_PRIV, work->task, work->type);
8b401f9e
YS
1034}
1035
8482941f 1036static int bpf_send_signal_common(u32 sig, enum pid_type type)
8b401f9e
YS
1037{
1038 struct send_signal_irq_work *work = NULL;
1039
1040 /* Similar to bpf_probe_write_user, task needs to be
1041 * in a sound condition and kernel memory access be
1042 * permitted in order to send signal to the current
1043 * task.
1044 */
1045 if (unlikely(current->flags & (PF_KTHREAD | PF_EXITING)))
1046 return -EPERM;
1047 if (unlikely(uaccess_kernel()))
1048 return -EPERM;
1049 if (unlikely(!nmi_uaccess_okay()))
1050 return -EPERM;
1051
1bc7896e 1052 if (irqs_disabled()) {
e1afb702
YS
1053 /* Do an early check on signal validity. Otherwise,
1054 * the error is lost in deferred irq_work.
1055 */
1056 if (unlikely(!valid_signal(sig)))
1057 return -EINVAL;
1058
8b401f9e 1059 work = this_cpu_ptr(&send_signal_work);
153bedba 1060 if (atomic_read(&work->irq_work.flags) & IRQ_WORK_BUSY)
8b401f9e
YS
1061 return -EBUSY;
1062
1063 /* Add the current task, which is the target of sending signal,
1064 * to the irq_work. The current task may change when queued
1065 * irq works get executed.
1066 */
1067 work->task = current;
1068 work->sig = sig;
8482941f 1069 work->type = type;
8b401f9e
YS
1070 irq_work_queue(&work->irq_work);
1071 return 0;
1072 }
1073
8482941f
YS
1074 return group_send_sig_info(sig, SEND_SIG_PRIV, current, type);
1075}
1076
1077BPF_CALL_1(bpf_send_signal, u32, sig)
1078{
1079 return bpf_send_signal_common(sig, PIDTYPE_TGID);
8b401f9e
YS
1080}
1081
1082static const struct bpf_func_proto bpf_send_signal_proto = {
1083 .func = bpf_send_signal,
1084 .gpl_only = false,
1085 .ret_type = RET_INTEGER,
1086 .arg1_type = ARG_ANYTHING,
1087};
1088
8482941f
YS
1089BPF_CALL_1(bpf_send_signal_thread, u32, sig)
1090{
1091 return bpf_send_signal_common(sig, PIDTYPE_PID);
1092}
1093
1094static const struct bpf_func_proto bpf_send_signal_thread_proto = {
1095 .func = bpf_send_signal_thread,
1096 .gpl_only = false,
1097 .ret_type = RET_INTEGER,
1098 .arg1_type = ARG_ANYTHING,
1099};
1100
fc611f47
KS
1101const struct bpf_func_proto *
1102bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
2541517c
AS
1103{
1104 switch (func_id) {
1105 case BPF_FUNC_map_lookup_elem:
1106 return &bpf_map_lookup_elem_proto;
1107 case BPF_FUNC_map_update_elem:
1108 return &bpf_map_update_elem_proto;
1109 case BPF_FUNC_map_delete_elem:
1110 return &bpf_map_delete_elem_proto;
02a8c817
AC
1111 case BPF_FUNC_map_push_elem:
1112 return &bpf_map_push_elem_proto;
1113 case BPF_FUNC_map_pop_elem:
1114 return &bpf_map_pop_elem_proto;
1115 case BPF_FUNC_map_peek_elem:
1116 return &bpf_map_peek_elem_proto;
d9847d31
AS
1117 case BPF_FUNC_ktime_get_ns:
1118 return &bpf_ktime_get_ns_proto;
71d19214
MÅ»
1119 case BPF_FUNC_ktime_get_boot_ns:
1120 return &bpf_ktime_get_boot_ns_proto;
04fd61ab
AS
1121 case BPF_FUNC_tail_call:
1122 return &bpf_tail_call_proto;
ffeedafb
AS
1123 case BPF_FUNC_get_current_pid_tgid:
1124 return &bpf_get_current_pid_tgid_proto;
606274c5
AS
1125 case BPF_FUNC_get_current_task:
1126 return &bpf_get_current_task_proto;
ffeedafb
AS
1127 case BPF_FUNC_get_current_uid_gid:
1128 return &bpf_get_current_uid_gid_proto;
1129 case BPF_FUNC_get_current_comm:
1130 return &bpf_get_current_comm_proto;
9c959c86 1131 case BPF_FUNC_trace_printk:
0756ea3e 1132 return bpf_get_trace_printk_proto();
ab1973d3
AS
1133 case BPF_FUNC_get_smp_processor_id:
1134 return &bpf_get_smp_processor_id_proto;
2d0e30c3
DB
1135 case BPF_FUNC_get_numa_node_id:
1136 return &bpf_get_numa_node_id_proto;
35578d79
KX
1137 case BPF_FUNC_perf_event_read:
1138 return &bpf_perf_event_read_proto;
96ae5227
SD
1139 case BPF_FUNC_probe_write_user:
1140 return bpf_get_probe_write_proto();
60d20f91
SD
1141 case BPF_FUNC_current_task_under_cgroup:
1142 return &bpf_current_task_under_cgroup_proto;
8937bd80
AS
1143 case BPF_FUNC_get_prandom_u32:
1144 return &bpf_get_prandom_u32_proto;
6ae08ae3
DB
1145 case BPF_FUNC_probe_read_user:
1146 return &bpf_probe_read_user_proto;
1147 case BPF_FUNC_probe_read_kernel:
1148 return &bpf_probe_read_kernel_proto;
6ae08ae3
DB
1149 case BPF_FUNC_probe_read_user_str:
1150 return &bpf_probe_read_user_str_proto;
1151 case BPF_FUNC_probe_read_kernel_str:
1152 return &bpf_probe_read_kernel_str_proto;
0ebeea8c
DB
1153#ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
1154 case BPF_FUNC_probe_read:
1155 return &bpf_probe_read_compat_proto;
a5e8c070 1156 case BPF_FUNC_probe_read_str:
6ae08ae3 1157 return &bpf_probe_read_compat_str_proto;
0ebeea8c 1158#endif
34ea38ca 1159#ifdef CONFIG_CGROUPS
bf6fa2c8
YS
1160 case BPF_FUNC_get_current_cgroup_id:
1161 return &bpf_get_current_cgroup_id_proto;
34ea38ca 1162#endif
8b401f9e
YS
1163 case BPF_FUNC_send_signal:
1164 return &bpf_send_signal_proto;
8482941f
YS
1165 case BPF_FUNC_send_signal_thread:
1166 return &bpf_send_signal_thread_proto;
b80b033b
SL
1167 case BPF_FUNC_perf_event_read_value:
1168 return &bpf_perf_event_read_value_proto;
b4490c5c
CN
1169 case BPF_FUNC_get_ns_current_pid_tgid:
1170 return &bpf_get_ns_current_pid_tgid_proto;
457f4436
AN
1171 case BPF_FUNC_ringbuf_output:
1172 return &bpf_ringbuf_output_proto;
1173 case BPF_FUNC_ringbuf_reserve:
1174 return &bpf_ringbuf_reserve_proto;
1175 case BPF_FUNC_ringbuf_submit:
1176 return &bpf_ringbuf_submit_proto;
1177 case BPF_FUNC_ringbuf_discard:
1178 return &bpf_ringbuf_discard_proto;
1179 case BPF_FUNC_ringbuf_query:
1180 return &bpf_ringbuf_query_proto;
72e2b2b6
YS
1181 case BPF_FUNC_jiffies64:
1182 return &bpf_jiffies64_proto;
fa28dcb8
SL
1183 case BPF_FUNC_get_task_stack:
1184 return &bpf_get_task_stack_proto;
9fd82b61
AS
1185 default:
1186 return NULL;
1187 }
1188}
1189
5e43f899
AI
1190static const struct bpf_func_proto *
1191kprobe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
9fd82b61
AS
1192{
1193 switch (func_id) {
a43eec30
AS
1194 case BPF_FUNC_perf_event_output:
1195 return &bpf_perf_event_output_proto;
d5a3b1f6
AS
1196 case BPF_FUNC_get_stackid:
1197 return &bpf_get_stackid_proto;
c195651e
YS
1198 case BPF_FUNC_get_stack:
1199 return &bpf_get_stack_proto;
9802d865
JB
1200#ifdef CONFIG_BPF_KPROBE_OVERRIDE
1201 case BPF_FUNC_override_return:
1202 return &bpf_override_return_proto;
1203#endif
2541517c 1204 default:
fc611f47 1205 return bpf_tracing_func_proto(func_id, prog);
2541517c
AS
1206 }
1207}
1208
1209/* bpf+kprobe programs can access fields of 'struct pt_regs' */
19de99f7 1210static bool kprobe_prog_is_valid_access(int off, int size, enum bpf_access_type type,
5e43f899 1211 const struct bpf_prog *prog,
23994631 1212 struct bpf_insn_access_aux *info)
2541517c 1213{
2541517c
AS
1214 if (off < 0 || off >= sizeof(struct pt_regs))
1215 return false;
2541517c
AS
1216 if (type != BPF_READ)
1217 return false;
2541517c
AS
1218 if (off % size != 0)
1219 return false;
2d071c64
DB
1220 /*
1221 * Assertion for 32 bit to make sure last 8 byte access
1222 * (BPF_DW) to the last 4 byte member is disallowed.
1223 */
1224 if (off + size > sizeof(struct pt_regs))
1225 return false;
1226
2541517c
AS
1227 return true;
1228}
1229
7de16e3a 1230const struct bpf_verifier_ops kprobe_verifier_ops = {
2541517c
AS
1231 .get_func_proto = kprobe_prog_func_proto,
1232 .is_valid_access = kprobe_prog_is_valid_access,
1233};
1234
7de16e3a
JK
1235const struct bpf_prog_ops kprobe_prog_ops = {
1236};
1237
f3694e00
DB
1238BPF_CALL_5(bpf_perf_event_output_tp, void *, tp_buff, struct bpf_map *, map,
1239 u64, flags, void *, data, u64, size)
9940d67c 1240{
f3694e00
DB
1241 struct pt_regs *regs = *(struct pt_regs **)tp_buff;
1242
9940d67c
AS
1243 /*
1244 * r1 points to perf tracepoint buffer where first 8 bytes are hidden
1245 * from bpf program and contain a pointer to 'struct pt_regs'. Fetch it
f3694e00 1246 * from there and call the same bpf_perf_event_output() helper inline.
9940d67c 1247 */
f3694e00 1248 return ____bpf_perf_event_output(regs, map, flags, data, size);
9940d67c
AS
1249}
1250
1251static const struct bpf_func_proto bpf_perf_event_output_proto_tp = {
1252 .func = bpf_perf_event_output_tp,
1253 .gpl_only = true,
1254 .ret_type = RET_INTEGER,
1255 .arg1_type = ARG_PTR_TO_CTX,
1256 .arg2_type = ARG_CONST_MAP_PTR,
1257 .arg3_type = ARG_ANYTHING,
39f19ebb 1258 .arg4_type = ARG_PTR_TO_MEM,
a60dd35d 1259 .arg5_type = ARG_CONST_SIZE_OR_ZERO,
9940d67c
AS
1260};
1261
f3694e00
DB
1262BPF_CALL_3(bpf_get_stackid_tp, void *, tp_buff, struct bpf_map *, map,
1263 u64, flags)
9940d67c 1264{
f3694e00 1265 struct pt_regs *regs = *(struct pt_regs **)tp_buff;
9940d67c 1266
f3694e00
DB
1267 /*
1268 * Same comment as in bpf_perf_event_output_tp(), only that this time
1269 * the other helper's function body cannot be inlined due to being
1270 * external, thus we need to call raw helper function.
1271 */
1272 return bpf_get_stackid((unsigned long) regs, (unsigned long) map,
1273 flags, 0, 0);
9940d67c
AS
1274}
1275
1276static const struct bpf_func_proto bpf_get_stackid_proto_tp = {
1277 .func = bpf_get_stackid_tp,
1278 .gpl_only = true,
1279 .ret_type = RET_INTEGER,
1280 .arg1_type = ARG_PTR_TO_CTX,
1281 .arg2_type = ARG_CONST_MAP_PTR,
1282 .arg3_type = ARG_ANYTHING,
1283};
1284
c195651e
YS
1285BPF_CALL_4(bpf_get_stack_tp, void *, tp_buff, void *, buf, u32, size,
1286 u64, flags)
1287{
1288 struct pt_regs *regs = *(struct pt_regs **)tp_buff;
1289
1290 return bpf_get_stack((unsigned long) regs, (unsigned long) buf,
1291 (unsigned long) size, flags, 0);
1292}
1293
1294static const struct bpf_func_proto bpf_get_stack_proto_tp = {
1295 .func = bpf_get_stack_tp,
1296 .gpl_only = true,
1297 .ret_type = RET_INTEGER,
1298 .arg1_type = ARG_PTR_TO_CTX,
1299 .arg2_type = ARG_PTR_TO_UNINIT_MEM,
1300 .arg3_type = ARG_CONST_SIZE_OR_ZERO,
1301 .arg4_type = ARG_ANYTHING,
1302};
1303
5e43f899
AI
1304static const struct bpf_func_proto *
1305tp_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
f005afed
YS
1306{
1307 switch (func_id) {
1308 case BPF_FUNC_perf_event_output:
1309 return &bpf_perf_event_output_proto_tp;
1310 case BPF_FUNC_get_stackid:
1311 return &bpf_get_stackid_proto_tp;
c195651e
YS
1312 case BPF_FUNC_get_stack:
1313 return &bpf_get_stack_proto_tp;
f005afed 1314 default:
fc611f47 1315 return bpf_tracing_func_proto(func_id, prog);
f005afed
YS
1316 }
1317}
1318
1319static bool tp_prog_is_valid_access(int off, int size, enum bpf_access_type type,
5e43f899 1320 const struct bpf_prog *prog,
f005afed
YS
1321 struct bpf_insn_access_aux *info)
1322{
1323 if (off < sizeof(void *) || off >= PERF_MAX_TRACE_SIZE)
1324 return false;
1325 if (type != BPF_READ)
1326 return false;
1327 if (off % size != 0)
1328 return false;
1329
1330 BUILD_BUG_ON(PERF_MAX_TRACE_SIZE % sizeof(__u64));
1331 return true;
1332}
1333
1334const struct bpf_verifier_ops tracepoint_verifier_ops = {
1335 .get_func_proto = tp_prog_func_proto,
1336 .is_valid_access = tp_prog_is_valid_access,
1337};
1338
1339const struct bpf_prog_ops tracepoint_prog_ops = {
1340};
1341
1342BPF_CALL_3(bpf_perf_prog_read_value, struct bpf_perf_event_data_kern *, ctx,
4bebdc7a
YS
1343 struct bpf_perf_event_value *, buf, u32, size)
1344{
1345 int err = -EINVAL;
1346
1347 if (unlikely(size != sizeof(struct bpf_perf_event_value)))
1348 goto clear;
1349 err = perf_event_read_local(ctx->event, &buf->counter, &buf->enabled,
1350 &buf->running);
1351 if (unlikely(err))
1352 goto clear;
1353 return 0;
1354clear:
1355 memset(buf, 0, size);
1356 return err;
1357}
1358
f005afed
YS
1359static const struct bpf_func_proto bpf_perf_prog_read_value_proto = {
1360 .func = bpf_perf_prog_read_value,
4bebdc7a
YS
1361 .gpl_only = true,
1362 .ret_type = RET_INTEGER,
1363 .arg1_type = ARG_PTR_TO_CTX,
1364 .arg2_type = ARG_PTR_TO_UNINIT_MEM,
1365 .arg3_type = ARG_CONST_SIZE,
1366};
1367
fff7b643
DX
1368BPF_CALL_4(bpf_read_branch_records, struct bpf_perf_event_data_kern *, ctx,
1369 void *, buf, u32, size, u64, flags)
1370{
1371#ifndef CONFIG_X86
1372 return -ENOENT;
1373#else
1374 static const u32 br_entry_size = sizeof(struct perf_branch_entry);
1375 struct perf_branch_stack *br_stack = ctx->data->br_stack;
1376 u32 to_copy;
1377
1378 if (unlikely(flags & ~BPF_F_GET_BRANCH_RECORDS_SIZE))
1379 return -EINVAL;
1380
1381 if (unlikely(!br_stack))
1382 return -EINVAL;
1383
1384 if (flags & BPF_F_GET_BRANCH_RECORDS_SIZE)
1385 return br_stack->nr * br_entry_size;
1386
1387 if (!buf || (size % br_entry_size != 0))
1388 return -EINVAL;
1389
1390 to_copy = min_t(u32, br_stack->nr * br_entry_size, size);
1391 memcpy(buf, br_stack->entries, to_copy);
1392
1393 return to_copy;
1394#endif
1395}
1396
1397static const struct bpf_func_proto bpf_read_branch_records_proto = {
1398 .func = bpf_read_branch_records,
1399 .gpl_only = true,
1400 .ret_type = RET_INTEGER,
1401 .arg1_type = ARG_PTR_TO_CTX,
1402 .arg2_type = ARG_PTR_TO_MEM_OR_NULL,
1403 .arg3_type = ARG_CONST_SIZE_OR_ZERO,
1404 .arg4_type = ARG_ANYTHING,
1405};
1406
5e43f899
AI
1407static const struct bpf_func_proto *
1408pe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
9fd82b61
AS
1409{
1410 switch (func_id) {
1411 case BPF_FUNC_perf_event_output:
9940d67c 1412 return &bpf_perf_event_output_proto_tp;
9fd82b61 1413 case BPF_FUNC_get_stackid:
7b04d6d6 1414 return &bpf_get_stackid_proto_pe;
c195651e 1415 case BPF_FUNC_get_stack:
7b04d6d6 1416 return &bpf_get_stack_proto_pe;
4bebdc7a 1417 case BPF_FUNC_perf_prog_read_value:
f005afed 1418 return &bpf_perf_prog_read_value_proto;
fff7b643
DX
1419 case BPF_FUNC_read_branch_records:
1420 return &bpf_read_branch_records_proto;
9fd82b61 1421 default:
fc611f47 1422 return bpf_tracing_func_proto(func_id, prog);
9fd82b61
AS
1423 }
1424}
1425
c4f6699d
AS
1426/*
1427 * bpf_raw_tp_regs are separate from bpf_pt_regs used from skb/xdp
1428 * to avoid potential recursive reuse issue when/if tracepoints are added
9594dc3c
MM
1429 * inside bpf_*_event_output, bpf_get_stackid and/or bpf_get_stack.
1430 *
1431 * Since raw tracepoints run despite bpf_prog_active, support concurrent usage
1432 * in normal, irq, and nmi context.
c4f6699d 1433 */
9594dc3c
MM
1434struct bpf_raw_tp_regs {
1435 struct pt_regs regs[3];
1436};
1437static DEFINE_PER_CPU(struct bpf_raw_tp_regs, bpf_raw_tp_regs);
1438static DEFINE_PER_CPU(int, bpf_raw_tp_nest_level);
1439static struct pt_regs *get_bpf_raw_tp_regs(void)
1440{
1441 struct bpf_raw_tp_regs *tp_regs = this_cpu_ptr(&bpf_raw_tp_regs);
1442 int nest_level = this_cpu_inc_return(bpf_raw_tp_nest_level);
1443
1444 if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(tp_regs->regs))) {
1445 this_cpu_dec(bpf_raw_tp_nest_level);
1446 return ERR_PTR(-EBUSY);
1447 }
1448
1449 return &tp_regs->regs[nest_level - 1];
1450}
1451
1452static void put_bpf_raw_tp_regs(void)
1453{
1454 this_cpu_dec(bpf_raw_tp_nest_level);
1455}
1456
c4f6699d
AS
1457BPF_CALL_5(bpf_perf_event_output_raw_tp, struct bpf_raw_tracepoint_args *, args,
1458 struct bpf_map *, map, u64, flags, void *, data, u64, size)
1459{
9594dc3c
MM
1460 struct pt_regs *regs = get_bpf_raw_tp_regs();
1461 int ret;
1462
1463 if (IS_ERR(regs))
1464 return PTR_ERR(regs);
c4f6699d
AS
1465
1466 perf_fetch_caller_regs(regs);
9594dc3c
MM
1467 ret = ____bpf_perf_event_output(regs, map, flags, data, size);
1468
1469 put_bpf_raw_tp_regs();
1470 return ret;
c4f6699d
AS
1471}
1472
1473static const struct bpf_func_proto bpf_perf_event_output_proto_raw_tp = {
1474 .func = bpf_perf_event_output_raw_tp,
1475 .gpl_only = true,
1476 .ret_type = RET_INTEGER,
1477 .arg1_type = ARG_PTR_TO_CTX,
1478 .arg2_type = ARG_CONST_MAP_PTR,
1479 .arg3_type = ARG_ANYTHING,
1480 .arg4_type = ARG_PTR_TO_MEM,
1481 .arg5_type = ARG_CONST_SIZE_OR_ZERO,
1482};
1483
a7658e1a 1484extern const struct bpf_func_proto bpf_skb_output_proto;
d831ee84 1485extern const struct bpf_func_proto bpf_xdp_output_proto;
a7658e1a 1486
c4f6699d
AS
1487BPF_CALL_3(bpf_get_stackid_raw_tp, struct bpf_raw_tracepoint_args *, args,
1488 struct bpf_map *, map, u64, flags)
1489{
9594dc3c
MM
1490 struct pt_regs *regs = get_bpf_raw_tp_regs();
1491 int ret;
1492
1493 if (IS_ERR(regs))
1494 return PTR_ERR(regs);
c4f6699d
AS
1495
1496 perf_fetch_caller_regs(regs);
1497 /* similar to bpf_perf_event_output_tp, but pt_regs fetched differently */
9594dc3c
MM
1498 ret = bpf_get_stackid((unsigned long) regs, (unsigned long) map,
1499 flags, 0, 0);
1500 put_bpf_raw_tp_regs();
1501 return ret;
c4f6699d
AS
1502}
1503
1504static const struct bpf_func_proto bpf_get_stackid_proto_raw_tp = {
1505 .func = bpf_get_stackid_raw_tp,
1506 .gpl_only = true,
1507 .ret_type = RET_INTEGER,
1508 .arg1_type = ARG_PTR_TO_CTX,
1509 .arg2_type = ARG_CONST_MAP_PTR,
1510 .arg3_type = ARG_ANYTHING,
1511};
1512
c195651e
YS
1513BPF_CALL_4(bpf_get_stack_raw_tp, struct bpf_raw_tracepoint_args *, args,
1514 void *, buf, u32, size, u64, flags)
1515{
9594dc3c
MM
1516 struct pt_regs *regs = get_bpf_raw_tp_regs();
1517 int ret;
1518
1519 if (IS_ERR(regs))
1520 return PTR_ERR(regs);
c195651e
YS
1521
1522 perf_fetch_caller_regs(regs);
9594dc3c
MM
1523 ret = bpf_get_stack((unsigned long) regs, (unsigned long) buf,
1524 (unsigned long) size, flags, 0);
1525 put_bpf_raw_tp_regs();
1526 return ret;
c195651e
YS
1527}
1528
1529static const struct bpf_func_proto bpf_get_stack_proto_raw_tp = {
1530 .func = bpf_get_stack_raw_tp,
1531 .gpl_only = true,
1532 .ret_type = RET_INTEGER,
1533 .arg1_type = ARG_PTR_TO_CTX,
1534 .arg2_type = ARG_PTR_TO_MEM,
1535 .arg3_type = ARG_CONST_SIZE_OR_ZERO,
1536 .arg4_type = ARG_ANYTHING,
1537};
1538
5e43f899
AI
1539static const struct bpf_func_proto *
1540raw_tp_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
c4f6699d
AS
1541{
1542 switch (func_id) {
1543 case BPF_FUNC_perf_event_output:
1544 return &bpf_perf_event_output_proto_raw_tp;
1545 case BPF_FUNC_get_stackid:
1546 return &bpf_get_stackid_proto_raw_tp;
c195651e
YS
1547 case BPF_FUNC_get_stack:
1548 return &bpf_get_stack_proto_raw_tp;
c4f6699d 1549 default:
fc611f47 1550 return bpf_tracing_func_proto(func_id, prog);
c4f6699d
AS
1551 }
1552}
1553
958a3f2d 1554const struct bpf_func_proto *
f1b9509c
AS
1555tracing_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1556{
1557 switch (func_id) {
1558#ifdef CONFIG_NET
1559 case BPF_FUNC_skb_output:
1560 return &bpf_skb_output_proto;
d831ee84
EC
1561 case BPF_FUNC_xdp_output:
1562 return &bpf_xdp_output_proto;
af7ec138
YS
1563 case BPF_FUNC_skc_to_tcp6_sock:
1564 return &bpf_skc_to_tcp6_sock_proto;
478cfbdf
YS
1565 case BPF_FUNC_skc_to_tcp_sock:
1566 return &bpf_skc_to_tcp_sock_proto;
1567 case BPF_FUNC_skc_to_tcp_timewait_sock:
1568 return &bpf_skc_to_tcp_timewait_sock_proto;
1569 case BPF_FUNC_skc_to_tcp_request_sock:
1570 return &bpf_skc_to_tcp_request_sock_proto;
0d4fad3e
YS
1571 case BPF_FUNC_skc_to_udp6_sock:
1572 return &bpf_skc_to_udp6_sock_proto;
f1b9509c 1573#endif
492e639f
YS
1574 case BPF_FUNC_seq_printf:
1575 return prog->expected_attach_type == BPF_TRACE_ITER ?
1576 &bpf_seq_printf_proto :
1577 NULL;
1578 case BPF_FUNC_seq_write:
1579 return prog->expected_attach_type == BPF_TRACE_ITER ?
1580 &bpf_seq_write_proto :
1581 NULL;
f1b9509c
AS
1582 default:
1583 return raw_tp_prog_func_proto(func_id, prog);
1584 }
1585}
1586
c4f6699d
AS
1587static bool raw_tp_prog_is_valid_access(int off, int size,
1588 enum bpf_access_type type,
5e43f899 1589 const struct bpf_prog *prog,
c4f6699d
AS
1590 struct bpf_insn_access_aux *info)
1591{
f1b9509c
AS
1592 if (off < 0 || off >= sizeof(__u64) * MAX_BPF_FUNC_ARGS)
1593 return false;
1594 if (type != BPF_READ)
1595 return false;
1596 if (off % size != 0)
1597 return false;
1598 return true;
1599}
1600
1601static bool tracing_prog_is_valid_access(int off, int size,
1602 enum bpf_access_type type,
1603 const struct bpf_prog *prog,
1604 struct bpf_insn_access_aux *info)
1605{
1606 if (off < 0 || off >= sizeof(__u64) * MAX_BPF_FUNC_ARGS)
c4f6699d
AS
1607 return false;
1608 if (type != BPF_READ)
1609 return false;
1610 if (off % size != 0)
1611 return false;
9e15db66 1612 return btf_ctx_access(off, size, type, prog, info);
c4f6699d
AS
1613}
1614
3e7c67d9
KS
1615int __weak bpf_prog_test_run_tracing(struct bpf_prog *prog,
1616 const union bpf_attr *kattr,
1617 union bpf_attr __user *uattr)
1618{
1619 return -ENOTSUPP;
1620}
1621
c4f6699d
AS
1622const struct bpf_verifier_ops raw_tracepoint_verifier_ops = {
1623 .get_func_proto = raw_tp_prog_func_proto,
1624 .is_valid_access = raw_tp_prog_is_valid_access,
1625};
1626
1627const struct bpf_prog_ops raw_tracepoint_prog_ops = {
1628};
1629
f1b9509c
AS
1630const struct bpf_verifier_ops tracing_verifier_ops = {
1631 .get_func_proto = tracing_prog_func_proto,
1632 .is_valid_access = tracing_prog_is_valid_access,
1633};
1634
1635const struct bpf_prog_ops tracing_prog_ops = {
da00d2f1 1636 .test_run = bpf_prog_test_run_tracing,
f1b9509c
AS
1637};
1638
9df1c28b
MM
1639static bool raw_tp_writable_prog_is_valid_access(int off, int size,
1640 enum bpf_access_type type,
1641 const struct bpf_prog *prog,
1642 struct bpf_insn_access_aux *info)
1643{
1644 if (off == 0) {
1645 if (size != sizeof(u64) || type != BPF_READ)
1646 return false;
1647 info->reg_type = PTR_TO_TP_BUFFER;
1648 }
1649 return raw_tp_prog_is_valid_access(off, size, type, prog, info);
1650}
1651
1652const struct bpf_verifier_ops raw_tracepoint_writable_verifier_ops = {
1653 .get_func_proto = raw_tp_prog_func_proto,
1654 .is_valid_access = raw_tp_writable_prog_is_valid_access,
1655};
1656
1657const struct bpf_prog_ops raw_tracepoint_writable_prog_ops = {
1658};
1659
0515e599 1660static bool pe_prog_is_valid_access(int off, int size, enum bpf_access_type type,
5e43f899 1661 const struct bpf_prog *prog,
23994631 1662 struct bpf_insn_access_aux *info)
0515e599 1663{
95da0cdb 1664 const int size_u64 = sizeof(u64);
31fd8581 1665
0515e599
AS
1666 if (off < 0 || off >= sizeof(struct bpf_perf_event_data))
1667 return false;
1668 if (type != BPF_READ)
1669 return false;
bc23105c
DB
1670 if (off % size != 0) {
1671 if (sizeof(unsigned long) != 4)
1672 return false;
1673 if (size != 8)
1674 return false;
1675 if (off % size != 4)
1676 return false;
1677 }
31fd8581 1678
f96da094
DB
1679 switch (off) {
1680 case bpf_ctx_range(struct bpf_perf_event_data, sample_period):
95da0cdb
TQ
1681 bpf_ctx_record_field_size(info, size_u64);
1682 if (!bpf_ctx_narrow_access_ok(off, size, size_u64))
1683 return false;
1684 break;
1685 case bpf_ctx_range(struct bpf_perf_event_data, addr):
1686 bpf_ctx_record_field_size(info, size_u64);
1687 if (!bpf_ctx_narrow_access_ok(off, size, size_u64))
23994631 1688 return false;
f96da094
DB
1689 break;
1690 default:
0515e599
AS
1691 if (size != sizeof(long))
1692 return false;
1693 }
f96da094 1694
0515e599
AS
1695 return true;
1696}
1697
6b8cc1d1
DB
1698static u32 pe_prog_convert_ctx_access(enum bpf_access_type type,
1699 const struct bpf_insn *si,
0515e599 1700 struct bpf_insn *insn_buf,
f96da094 1701 struct bpf_prog *prog, u32 *target_size)
0515e599
AS
1702{
1703 struct bpf_insn *insn = insn_buf;
1704
6b8cc1d1 1705 switch (si->off) {
0515e599 1706 case offsetof(struct bpf_perf_event_data, sample_period):
f035a515 1707 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern,
6b8cc1d1 1708 data), si->dst_reg, si->src_reg,
0515e599 1709 offsetof(struct bpf_perf_event_data_kern, data));
6b8cc1d1 1710 *insn++ = BPF_LDX_MEM(BPF_DW, si->dst_reg, si->dst_reg,
f96da094
DB
1711 bpf_target_off(struct perf_sample_data, period, 8,
1712 target_size));
0515e599 1713 break;
95da0cdb
TQ
1714 case offsetof(struct bpf_perf_event_data, addr):
1715 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern,
1716 data), si->dst_reg, si->src_reg,
1717 offsetof(struct bpf_perf_event_data_kern, data));
1718 *insn++ = BPF_LDX_MEM(BPF_DW, si->dst_reg, si->dst_reg,
1719 bpf_target_off(struct perf_sample_data, addr, 8,
1720 target_size));
1721 break;
0515e599 1722 default:
f035a515 1723 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern,
6b8cc1d1 1724 regs), si->dst_reg, si->src_reg,
0515e599 1725 offsetof(struct bpf_perf_event_data_kern, regs));
6b8cc1d1
DB
1726 *insn++ = BPF_LDX_MEM(BPF_SIZEOF(long), si->dst_reg, si->dst_reg,
1727 si->off);
0515e599
AS
1728 break;
1729 }
1730
1731 return insn - insn_buf;
1732}
1733
7de16e3a 1734const struct bpf_verifier_ops perf_event_verifier_ops = {
f005afed 1735 .get_func_proto = pe_prog_func_proto,
0515e599
AS
1736 .is_valid_access = pe_prog_is_valid_access,
1737 .convert_ctx_access = pe_prog_convert_ctx_access,
1738};
7de16e3a
JK
1739
1740const struct bpf_prog_ops perf_event_prog_ops = {
1741};
e87c6bc3
YS
1742
1743static DEFINE_MUTEX(bpf_event_mutex);
1744
c8c088ba
YS
1745#define BPF_TRACE_MAX_PROGS 64
1746
e87c6bc3
YS
1747int perf_event_attach_bpf_prog(struct perf_event *event,
1748 struct bpf_prog *prog)
1749{
e672db03 1750 struct bpf_prog_array *old_array;
e87c6bc3
YS
1751 struct bpf_prog_array *new_array;
1752 int ret = -EEXIST;
1753
9802d865 1754 /*
b4da3340
MH
1755 * Kprobe override only works if they are on the function entry,
1756 * and only if they are on the opt-in list.
9802d865
JB
1757 */
1758 if (prog->kprobe_override &&
b4da3340 1759 (!trace_kprobe_on_func_entry(event->tp_event) ||
9802d865
JB
1760 !trace_kprobe_error_injectable(event->tp_event)))
1761 return -EINVAL;
1762
e87c6bc3
YS
1763 mutex_lock(&bpf_event_mutex);
1764
1765 if (event->prog)
07c41a29 1766 goto unlock;
e87c6bc3 1767
e672db03 1768 old_array = bpf_event_rcu_dereference(event->tp_event->prog_array);
c8c088ba
YS
1769 if (old_array &&
1770 bpf_prog_array_length(old_array) >= BPF_TRACE_MAX_PROGS) {
1771 ret = -E2BIG;
1772 goto unlock;
1773 }
1774
e87c6bc3
YS
1775 ret = bpf_prog_array_copy(old_array, NULL, prog, &new_array);
1776 if (ret < 0)
07c41a29 1777 goto unlock;
e87c6bc3
YS
1778
1779 /* set the new array to event->tp_event and set event->prog */
1780 event->prog = prog;
1781 rcu_assign_pointer(event->tp_event->prog_array, new_array);
1782 bpf_prog_array_free(old_array);
1783
07c41a29 1784unlock:
e87c6bc3
YS
1785 mutex_unlock(&bpf_event_mutex);
1786 return ret;
1787}
1788
1789void perf_event_detach_bpf_prog(struct perf_event *event)
1790{
e672db03 1791 struct bpf_prog_array *old_array;
e87c6bc3
YS
1792 struct bpf_prog_array *new_array;
1793 int ret;
1794
1795 mutex_lock(&bpf_event_mutex);
1796
1797 if (!event->prog)
07c41a29 1798 goto unlock;
e87c6bc3 1799
e672db03 1800 old_array = bpf_event_rcu_dereference(event->tp_event->prog_array);
e87c6bc3 1801 ret = bpf_prog_array_copy(old_array, event->prog, NULL, &new_array);
170a7e3e
SY
1802 if (ret == -ENOENT)
1803 goto unlock;
e87c6bc3
YS
1804 if (ret < 0) {
1805 bpf_prog_array_delete_safe(old_array, event->prog);
1806 } else {
1807 rcu_assign_pointer(event->tp_event->prog_array, new_array);
1808 bpf_prog_array_free(old_array);
1809 }
1810
1811 bpf_prog_put(event->prog);
1812 event->prog = NULL;
1813
07c41a29 1814unlock:
e87c6bc3
YS
1815 mutex_unlock(&bpf_event_mutex);
1816}
f371b304 1817
f4e2298e 1818int perf_event_query_prog_array(struct perf_event *event, void __user *info)
f371b304
YS
1819{
1820 struct perf_event_query_bpf __user *uquery = info;
1821 struct perf_event_query_bpf query = {};
e672db03 1822 struct bpf_prog_array *progs;
3a38bb98 1823 u32 *ids, prog_cnt, ids_len;
f371b304
YS
1824 int ret;
1825
031258da 1826 if (!perfmon_capable())
f371b304
YS
1827 return -EPERM;
1828 if (event->attr.type != PERF_TYPE_TRACEPOINT)
1829 return -EINVAL;
1830 if (copy_from_user(&query, uquery, sizeof(query)))
1831 return -EFAULT;
3a38bb98
YS
1832
1833 ids_len = query.ids_len;
1834 if (ids_len > BPF_TRACE_MAX_PROGS)
9c481b90 1835 return -E2BIG;
3a38bb98
YS
1836 ids = kcalloc(ids_len, sizeof(u32), GFP_USER | __GFP_NOWARN);
1837 if (!ids)
1838 return -ENOMEM;
1839 /*
1840 * The above kcalloc returns ZERO_SIZE_PTR when ids_len = 0, which
1841 * is required when user only wants to check for uquery->prog_cnt.
1842 * There is no need to check for it since the case is handled
1843 * gracefully in bpf_prog_array_copy_info.
1844 */
f371b304
YS
1845
1846 mutex_lock(&bpf_event_mutex);
e672db03
SF
1847 progs = bpf_event_rcu_dereference(event->tp_event->prog_array);
1848 ret = bpf_prog_array_copy_info(progs, ids, ids_len, &prog_cnt);
f371b304
YS
1849 mutex_unlock(&bpf_event_mutex);
1850
3a38bb98
YS
1851 if (copy_to_user(&uquery->prog_cnt, &prog_cnt, sizeof(prog_cnt)) ||
1852 copy_to_user(uquery->ids, ids, ids_len * sizeof(u32)))
1853 ret = -EFAULT;
1854
1855 kfree(ids);
f371b304
YS
1856 return ret;
1857}
c4f6699d
AS
1858
1859extern struct bpf_raw_event_map __start__bpf_raw_tp[];
1860extern struct bpf_raw_event_map __stop__bpf_raw_tp[];
1861
a38d1107 1862struct bpf_raw_event_map *bpf_get_raw_tracepoint(const char *name)
c4f6699d
AS
1863{
1864 struct bpf_raw_event_map *btp = __start__bpf_raw_tp;
1865
1866 for (; btp < __stop__bpf_raw_tp; btp++) {
1867 if (!strcmp(btp->tp->name, name))
1868 return btp;
1869 }
a38d1107
MM
1870
1871 return bpf_get_raw_tracepoint_module(name);
1872}
1873
1874void bpf_put_raw_tracepoint(struct bpf_raw_event_map *btp)
1875{
1876 struct module *mod = __module_address((unsigned long)btp);
1877
1878 if (mod)
1879 module_put(mod);
c4f6699d
AS
1880}
1881
1882static __always_inline
1883void __bpf_trace_run(struct bpf_prog *prog, u64 *args)
1884{
f03efe49 1885 cant_sleep();
c4f6699d 1886 rcu_read_lock();
c4f6699d 1887 (void) BPF_PROG_RUN(prog, args);
c4f6699d
AS
1888 rcu_read_unlock();
1889}
1890
1891#define UNPACK(...) __VA_ARGS__
1892#define REPEAT_1(FN, DL, X, ...) FN(X)
1893#define REPEAT_2(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_1(FN, DL, __VA_ARGS__)
1894#define REPEAT_3(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_2(FN, DL, __VA_ARGS__)
1895#define REPEAT_4(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_3(FN, DL, __VA_ARGS__)
1896#define REPEAT_5(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_4(FN, DL, __VA_ARGS__)
1897#define REPEAT_6(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_5(FN, DL, __VA_ARGS__)
1898#define REPEAT_7(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_6(FN, DL, __VA_ARGS__)
1899#define REPEAT_8(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_7(FN, DL, __VA_ARGS__)
1900#define REPEAT_9(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_8(FN, DL, __VA_ARGS__)
1901#define REPEAT_10(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_9(FN, DL, __VA_ARGS__)
1902#define REPEAT_11(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_10(FN, DL, __VA_ARGS__)
1903#define REPEAT_12(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_11(FN, DL, __VA_ARGS__)
1904#define REPEAT(X, FN, DL, ...) REPEAT_##X(FN, DL, __VA_ARGS__)
1905
1906#define SARG(X) u64 arg##X
1907#define COPY(X) args[X] = arg##X
1908
1909#define __DL_COM (,)
1910#define __DL_SEM (;)
1911
1912#define __SEQ_0_11 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11
1913
1914#define BPF_TRACE_DEFN_x(x) \
1915 void bpf_trace_run##x(struct bpf_prog *prog, \
1916 REPEAT(x, SARG, __DL_COM, __SEQ_0_11)) \
1917 { \
1918 u64 args[x]; \
1919 REPEAT(x, COPY, __DL_SEM, __SEQ_0_11); \
1920 __bpf_trace_run(prog, args); \
1921 } \
1922 EXPORT_SYMBOL_GPL(bpf_trace_run##x)
1923BPF_TRACE_DEFN_x(1);
1924BPF_TRACE_DEFN_x(2);
1925BPF_TRACE_DEFN_x(3);
1926BPF_TRACE_DEFN_x(4);
1927BPF_TRACE_DEFN_x(5);
1928BPF_TRACE_DEFN_x(6);
1929BPF_TRACE_DEFN_x(7);
1930BPF_TRACE_DEFN_x(8);
1931BPF_TRACE_DEFN_x(9);
1932BPF_TRACE_DEFN_x(10);
1933BPF_TRACE_DEFN_x(11);
1934BPF_TRACE_DEFN_x(12);
1935
1936static int __bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog)
1937{
1938 struct tracepoint *tp = btp->tp;
1939
1940 /*
1941 * check that program doesn't access arguments beyond what's
1942 * available in this tracepoint
1943 */
1944 if (prog->aux->max_ctx_offset > btp->num_args * sizeof(u64))
1945 return -EINVAL;
1946
9df1c28b
MM
1947 if (prog->aux->max_tp_access > btp->writable_size)
1948 return -EINVAL;
1949
c4f6699d
AS
1950 return tracepoint_probe_register(tp, (void *)btp->bpf_func, prog);
1951}
1952
1953int bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog)
1954{
e16ec340 1955 return __bpf_probe_register(btp, prog);
c4f6699d
AS
1956}
1957
1958int bpf_probe_unregister(struct bpf_raw_event_map *btp, struct bpf_prog *prog)
1959{
e16ec340 1960 return tracepoint_probe_unregister(btp->tp, (void *)btp->bpf_func, prog);
c4f6699d 1961}
41bdc4b4
YS
1962
1963int bpf_get_perf_event_info(const struct perf_event *event, u32 *prog_id,
1964 u32 *fd_type, const char **buf,
1965 u64 *probe_offset, u64 *probe_addr)
1966{
1967 bool is_tracepoint, is_syscall_tp;
1968 struct bpf_prog *prog;
1969 int flags, err = 0;
1970
1971 prog = event->prog;
1972 if (!prog)
1973 return -ENOENT;
1974
1975 /* not supporting BPF_PROG_TYPE_PERF_EVENT yet */
1976 if (prog->type == BPF_PROG_TYPE_PERF_EVENT)
1977 return -EOPNOTSUPP;
1978
1979 *prog_id = prog->aux->id;
1980 flags = event->tp_event->flags;
1981 is_tracepoint = flags & TRACE_EVENT_FL_TRACEPOINT;
1982 is_syscall_tp = is_syscall_trace_event(event->tp_event);
1983
1984 if (is_tracepoint || is_syscall_tp) {
1985 *buf = is_tracepoint ? event->tp_event->tp->name
1986 : event->tp_event->name;
1987 *fd_type = BPF_FD_TYPE_TRACEPOINT;
1988 *probe_offset = 0x0;
1989 *probe_addr = 0x0;
1990 } else {
1991 /* kprobe/uprobe */
1992 err = -EOPNOTSUPP;
1993#ifdef CONFIG_KPROBE_EVENTS
1994 if (flags & TRACE_EVENT_FL_KPROBE)
1995 err = bpf_get_kprobe_info(event, fd_type, buf,
1996 probe_offset, probe_addr,
1997 event->attr.type == PERF_TYPE_TRACEPOINT);
1998#endif
1999#ifdef CONFIG_UPROBE_EVENTS
2000 if (flags & TRACE_EVENT_FL_UPROBE)
2001 err = bpf_get_uprobe_info(event, fd_type, buf,
2002 probe_offset,
2003 event->attr.type == PERF_TYPE_TRACEPOINT);
2004#endif
2005 }
2006
2007 return err;
2008}
a38d1107 2009
9db1ff0a
YS
2010static int __init send_signal_irq_work_init(void)
2011{
2012 int cpu;
2013 struct send_signal_irq_work *work;
2014
2015 for_each_possible_cpu(cpu) {
2016 work = per_cpu_ptr(&send_signal_work, cpu);
2017 init_irq_work(&work->irq_work, do_bpf_send_signal);
2018 }
2019 return 0;
2020}
2021
2022subsys_initcall(send_signal_irq_work_init);
2023
a38d1107 2024#ifdef CONFIG_MODULES
390e99cf
SF
2025static int bpf_event_notify(struct notifier_block *nb, unsigned long op,
2026 void *module)
a38d1107
MM
2027{
2028 struct bpf_trace_module *btm, *tmp;
2029 struct module *mod = module;
2030
2031 if (mod->num_bpf_raw_events == 0 ||
2032 (op != MODULE_STATE_COMING && op != MODULE_STATE_GOING))
2033 return 0;
2034
2035 mutex_lock(&bpf_module_mutex);
2036
2037 switch (op) {
2038 case MODULE_STATE_COMING:
2039 btm = kzalloc(sizeof(*btm), GFP_KERNEL);
2040 if (btm) {
2041 btm->module = module;
2042 list_add(&btm->list, &bpf_trace_modules);
2043 }
2044 break;
2045 case MODULE_STATE_GOING:
2046 list_for_each_entry_safe(btm, tmp, &bpf_trace_modules, list) {
2047 if (btm->module == module) {
2048 list_del(&btm->list);
2049 kfree(btm);
2050 break;
2051 }
2052 }
2053 break;
2054 }
2055
2056 mutex_unlock(&bpf_module_mutex);
2057
2058 return 0;
2059}
2060
2061static struct notifier_block bpf_module_nb = {
2062 .notifier_call = bpf_event_notify,
2063};
2064
390e99cf 2065static int __init bpf_event_init(void)
a38d1107
MM
2066{
2067 register_module_notifier(&bpf_module_nb);
2068 return 0;
2069}
2070
2071fs_initcall(bpf_event_init);
2072#endif /* CONFIG_MODULES */