selftests/bpf: Fix overflow tests to reflect iter size increase
[linux-block.git] / kernel / trace / bpf_trace.c
CommitLineData
179a0cc4 1// SPDX-License-Identifier: GPL-2.0
2541517c 2/* Copyright (c) 2011-2015 PLUMgrid, http://plumgrid.com
0515e599 3 * Copyright (c) 2016 Facebook
2541517c
AS
4 */
5#include <linux/kernel.h>
6#include <linux/types.h>
7#include <linux/slab.h>
8#include <linux/bpf.h>
0515e599 9#include <linux/bpf_perf_event.h>
c4d0bfb4 10#include <linux/btf.h>
2541517c
AS
11#include <linux/filter.h>
12#include <linux/uaccess.h>
9c959c86 13#include <linux/ctype.h>
9802d865 14#include <linux/kprobes.h>
ac5a72ea 15#include <linux/spinlock.h>
41bdc4b4 16#include <linux/syscalls.h>
540adea3 17#include <linux/error-injection.h>
c9a0f3b8 18#include <linux/btf_ids.h>
9802d865 19
c4d0bfb4
AM
20#include <uapi/linux/bpf.h>
21#include <uapi/linux/btf.h>
22
c7b6f29b
NA
23#include <asm/tlb.h>
24
9802d865 25#include "trace_probe.h"
2541517c
AS
26#include "trace.h"
27
ac5a72ea
AM
28#define CREATE_TRACE_POINTS
29#include "bpf_trace.h"
30
e672db03
SF
31#define bpf_event_rcu_dereference(p) \
32 rcu_dereference_protected(p, lockdep_is_held(&bpf_event_mutex))
33
a38d1107
MM
34#ifdef CONFIG_MODULES
35struct bpf_trace_module {
36 struct module *module;
37 struct list_head list;
38};
39
40static LIST_HEAD(bpf_trace_modules);
41static DEFINE_MUTEX(bpf_module_mutex);
42
43static struct bpf_raw_event_map *bpf_get_raw_tracepoint_module(const char *name)
44{
45 struct bpf_raw_event_map *btp, *ret = NULL;
46 struct bpf_trace_module *btm;
47 unsigned int i;
48
49 mutex_lock(&bpf_module_mutex);
50 list_for_each_entry(btm, &bpf_trace_modules, list) {
51 for (i = 0; i < btm->module->num_bpf_raw_events; ++i) {
52 btp = &btm->module->bpf_raw_events[i];
53 if (!strcmp(btp->tp->name, name)) {
54 if (try_module_get(btm->module))
55 ret = btp;
56 goto out;
57 }
58 }
59 }
60out:
61 mutex_unlock(&bpf_module_mutex);
62 return ret;
63}
64#else
65static struct bpf_raw_event_map *bpf_get_raw_tracepoint_module(const char *name)
66{
67 return NULL;
68}
69#endif /* CONFIG_MODULES */
70
035226b9 71u64 bpf_get_stackid(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
c195651e 72u64 bpf_get_stack(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
035226b9 73
2541517c
AS
74/**
75 * trace_call_bpf - invoke BPF program
e87c6bc3 76 * @call: tracepoint event
2541517c
AS
77 * @ctx: opaque context pointer
78 *
79 * kprobe handlers execute BPF programs via this helper.
80 * Can be used from static tracepoints in the future.
81 *
82 * Return: BPF programs always return an integer which is interpreted by
83 * kprobe handler as:
84 * 0 - return from kprobe (event is filtered out)
85 * 1 - store kprobe event into ring buffer
86 * Other values are reserved and currently alias to 1
87 */
e87c6bc3 88unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx)
2541517c
AS
89{
90 unsigned int ret;
91
92 if (in_nmi()) /* not supported yet */
93 return 1;
94
b0a81b94 95 cant_sleep();
2541517c
AS
96
97 if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1)) {
98 /*
99 * since some bpf program is already running on this cpu,
100 * don't call into another bpf program (same or different)
101 * and don't send kprobe event into ring-buffer,
102 * so return zero here
103 */
104 ret = 0;
105 goto out;
106 }
107
e87c6bc3
YS
108 /*
109 * Instead of moving rcu_read_lock/rcu_dereference/rcu_read_unlock
110 * to all call sites, we did a bpf_prog_array_valid() there to check
111 * whether call->prog_array is empty or not, which is
112 * a heurisitc to speed up execution.
113 *
114 * If bpf_prog_array_valid() fetched prog_array was
115 * non-NULL, we go into trace_call_bpf() and do the actual
116 * proper rcu_dereference() under RCU lock.
117 * If it turns out that prog_array is NULL then, we bail out.
118 * For the opposite, if the bpf_prog_array_valid() fetched pointer
119 * was NULL, you'll skip the prog_array with the risk of missing
120 * out of events when it was updated in between this and the
121 * rcu_dereference() which is accepted risk.
122 */
123 ret = BPF_PROG_RUN_ARRAY_CHECK(call->prog_array, ctx, BPF_PROG_RUN);
2541517c
AS
124
125 out:
126 __this_cpu_dec(bpf_prog_active);
2541517c
AS
127
128 return ret;
129}
2541517c 130
9802d865
JB
131#ifdef CONFIG_BPF_KPROBE_OVERRIDE
132BPF_CALL_2(bpf_override_return, struct pt_regs *, regs, unsigned long, rc)
133{
9802d865 134 regs_set_return_value(regs, rc);
540adea3 135 override_function_with_return(regs);
9802d865
JB
136 return 0;
137}
138
139static const struct bpf_func_proto bpf_override_return_proto = {
140 .func = bpf_override_return,
141 .gpl_only = true,
142 .ret_type = RET_INTEGER,
143 .arg1_type = ARG_PTR_TO_CTX,
144 .arg2_type = ARG_ANYTHING,
145};
146#endif
147
8d92db5c
CH
148static __always_inline int
149bpf_probe_read_user_common(void *dst, u32 size, const void __user *unsafe_ptr)
2541517c 150{
8d92db5c 151 int ret;
2541517c 152
c0ee37e8 153 ret = copy_from_user_nofault(dst, unsafe_ptr, size);
6ae08ae3
DB
154 if (unlikely(ret < 0))
155 memset(dst, 0, size);
6ae08ae3
DB
156 return ret;
157}
158
8d92db5c
CH
159BPF_CALL_3(bpf_probe_read_user, void *, dst, u32, size,
160 const void __user *, unsafe_ptr)
161{
162 return bpf_probe_read_user_common(dst, size, unsafe_ptr);
163}
164
f470378c 165const struct bpf_func_proto bpf_probe_read_user_proto = {
6ae08ae3
DB
166 .func = bpf_probe_read_user,
167 .gpl_only = true,
168 .ret_type = RET_INTEGER,
169 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
170 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
171 .arg3_type = ARG_ANYTHING,
172};
173
8d92db5c
CH
174static __always_inline int
175bpf_probe_read_user_str_common(void *dst, u32 size,
176 const void __user *unsafe_ptr)
6ae08ae3 177{
8d92db5c 178 int ret;
6ae08ae3 179
8d92db5c 180 ret = strncpy_from_user_nofault(dst, unsafe_ptr, size);
6ae08ae3
DB
181 if (unlikely(ret < 0))
182 memset(dst, 0, size);
6ae08ae3
DB
183 return ret;
184}
185
8d92db5c
CH
186BPF_CALL_3(bpf_probe_read_user_str, void *, dst, u32, size,
187 const void __user *, unsafe_ptr)
188{
189 return bpf_probe_read_user_str_common(dst, size, unsafe_ptr);
190}
191
f470378c 192const struct bpf_func_proto bpf_probe_read_user_str_proto = {
6ae08ae3
DB
193 .func = bpf_probe_read_user_str,
194 .gpl_only = true,
195 .ret_type = RET_INTEGER,
196 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
197 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
198 .arg3_type = ARG_ANYTHING,
199};
200
201static __always_inline int
8d92db5c 202bpf_probe_read_kernel_common(void *dst, u32 size, const void *unsafe_ptr)
6ae08ae3
DB
203{
204 int ret = security_locked_down(LOCKDOWN_BPF_READ);
9d1f8be5 205
6ae08ae3 206 if (unlikely(ret < 0))
8d92db5c 207 goto fail;
fe557319 208 ret = copy_from_kernel_nofault(dst, unsafe_ptr, size);
074f528e 209 if (unlikely(ret < 0))
8d92db5c
CH
210 goto fail;
211 return ret;
212fail:
213 memset(dst, 0, size);
6ae08ae3
DB
214 return ret;
215}
074f528e 216
6ae08ae3
DB
217BPF_CALL_3(bpf_probe_read_kernel, void *, dst, u32, size,
218 const void *, unsafe_ptr)
219{
8d92db5c 220 return bpf_probe_read_kernel_common(dst, size, unsafe_ptr);
6ae08ae3
DB
221}
222
f470378c 223const struct bpf_func_proto bpf_probe_read_kernel_proto = {
6ae08ae3
DB
224 .func = bpf_probe_read_kernel,
225 .gpl_only = true,
226 .ret_type = RET_INTEGER,
227 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
228 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
229 .arg3_type = ARG_ANYTHING,
230};
231
6ae08ae3 232static __always_inline int
8d92db5c 233bpf_probe_read_kernel_str_common(void *dst, u32 size, const void *unsafe_ptr)
6ae08ae3
DB
234{
235 int ret = security_locked_down(LOCKDOWN_BPF_READ);
236
237 if (unlikely(ret < 0))
8d92db5c
CH
238 goto fail;
239
6ae08ae3 240 /*
8d92db5c
CH
241 * The strncpy_from_kernel_nofault() call will likely not fill the
242 * entire buffer, but that's okay in this circumstance as we're probing
6ae08ae3
DB
243 * arbitrary memory anyway similar to bpf_probe_read_*() and might
244 * as well probe the stack. Thus, memory is explicitly cleared
245 * only in error case, so that improper users ignoring return
246 * code altogether don't copy garbage; otherwise length of string
247 * is returned that can be used for bpf_perf_event_output() et al.
248 */
8d92db5c 249 ret = strncpy_from_kernel_nofault(dst, unsafe_ptr, size);
6ae08ae3 250 if (unlikely(ret < 0))
8d92db5c
CH
251 goto fail;
252
02553b91 253 return ret;
8d92db5c
CH
254fail:
255 memset(dst, 0, size);
074f528e 256 return ret;
2541517c
AS
257}
258
6ae08ae3
DB
259BPF_CALL_3(bpf_probe_read_kernel_str, void *, dst, u32, size,
260 const void *, unsafe_ptr)
261{
8d92db5c 262 return bpf_probe_read_kernel_str_common(dst, size, unsafe_ptr);
6ae08ae3
DB
263}
264
f470378c 265const struct bpf_func_proto bpf_probe_read_kernel_str_proto = {
6ae08ae3
DB
266 .func = bpf_probe_read_kernel_str,
267 .gpl_only = true,
268 .ret_type = RET_INTEGER,
269 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
270 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
271 .arg3_type = ARG_ANYTHING,
272};
273
8d92db5c
CH
274#ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
275BPF_CALL_3(bpf_probe_read_compat, void *, dst, u32, size,
276 const void *, unsafe_ptr)
277{
278 if ((unsigned long)unsafe_ptr < TASK_SIZE) {
279 return bpf_probe_read_user_common(dst, size,
280 (__force void __user *)unsafe_ptr);
281 }
282 return bpf_probe_read_kernel_common(dst, size, unsafe_ptr);
283}
284
285static const struct bpf_func_proto bpf_probe_read_compat_proto = {
286 .func = bpf_probe_read_compat,
287 .gpl_only = true,
288 .ret_type = RET_INTEGER,
289 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
290 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
291 .arg3_type = ARG_ANYTHING,
292};
293
6ae08ae3
DB
294BPF_CALL_3(bpf_probe_read_compat_str, void *, dst, u32, size,
295 const void *, unsafe_ptr)
296{
8d92db5c
CH
297 if ((unsigned long)unsafe_ptr < TASK_SIZE) {
298 return bpf_probe_read_user_str_common(dst, size,
299 (__force void __user *)unsafe_ptr);
300 }
301 return bpf_probe_read_kernel_str_common(dst, size, unsafe_ptr);
6ae08ae3
DB
302}
303
304static const struct bpf_func_proto bpf_probe_read_compat_str_proto = {
305 .func = bpf_probe_read_compat_str,
2541517c
AS
306 .gpl_only = true,
307 .ret_type = RET_INTEGER,
39f19ebb 308 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
9c019e2b 309 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
2541517c
AS
310 .arg3_type = ARG_ANYTHING,
311};
8d92db5c 312#endif /* CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE */
2541517c 313
eb1b6688 314BPF_CALL_3(bpf_probe_write_user, void __user *, unsafe_ptr, const void *, src,
f3694e00 315 u32, size)
96ae5227 316{
96ae5227
SD
317 /*
318 * Ensure we're in user context which is safe for the helper to
319 * run. This helper has no business in a kthread.
320 *
321 * access_ok() should prevent writing to non-user memory, but in
322 * some situations (nommu, temporary switch, etc) access_ok() does
323 * not provide enough validation, hence the check on KERNEL_DS.
c7b6f29b
NA
324 *
325 * nmi_uaccess_okay() ensures the probe is not run in an interim
326 * state, when the task or mm are switched. This is specifically
327 * required to prevent the use of temporary mm.
96ae5227
SD
328 */
329
330 if (unlikely(in_interrupt() ||
331 current->flags & (PF_KTHREAD | PF_EXITING)))
332 return -EPERM;
db68ce10 333 if (unlikely(uaccess_kernel()))
96ae5227 334 return -EPERM;
c7b6f29b
NA
335 if (unlikely(!nmi_uaccess_okay()))
336 return -EPERM;
96ae5227 337
c0ee37e8 338 return copy_to_user_nofault(unsafe_ptr, src, size);
96ae5227
SD
339}
340
341static const struct bpf_func_proto bpf_probe_write_user_proto = {
342 .func = bpf_probe_write_user,
343 .gpl_only = true,
344 .ret_type = RET_INTEGER,
345 .arg1_type = ARG_ANYTHING,
39f19ebb
AS
346 .arg2_type = ARG_PTR_TO_MEM,
347 .arg3_type = ARG_CONST_SIZE,
96ae5227
SD
348};
349
350static const struct bpf_func_proto *bpf_get_probe_write_proto(void)
351{
2c78ee89
AS
352 if (!capable(CAP_SYS_ADMIN))
353 return NULL;
354
96ae5227
SD
355 pr_warn_ratelimited("%s[%d] is installing a program with bpf_probe_write_user helper that may corrupt user memory!",
356 current->comm, task_pid_nr(current));
357
358 return &bpf_probe_write_user_proto;
359}
360
d7b2977b
CH
361static void bpf_trace_copy_string(char *buf, void *unsafe_ptr, char fmt_ptype,
362 size_t bufsz)
363{
364 void __user *user_ptr = (__force void __user *)unsafe_ptr;
365
366 buf[0] = 0;
367
368 switch (fmt_ptype) {
369 case 's':
370#ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
aec6ce59
CH
371 if ((unsigned long)unsafe_ptr < TASK_SIZE) {
372 strncpy_from_user_nofault(buf, user_ptr, bufsz);
373 break;
374 }
375 fallthrough;
d7b2977b
CH
376#endif
377 case 'k':
378 strncpy_from_kernel_nofault(buf, unsafe_ptr, bufsz);
379 break;
380 case 'u':
381 strncpy_from_user_nofault(buf, user_ptr, bufsz);
382 break;
383 }
384}
385
ac5a72ea
AM
386static DEFINE_RAW_SPINLOCK(trace_printk_lock);
387
388#define BPF_TRACE_PRINTK_SIZE 1024
389
0d360d64 390static __printf(1, 0) int bpf_do_trace_printk(const char *fmt, ...)
ac5a72ea
AM
391{
392 static char buf[BPF_TRACE_PRINTK_SIZE];
393 unsigned long flags;
394 va_list ap;
395 int ret;
396
397 raw_spin_lock_irqsave(&trace_printk_lock, flags);
398 va_start(ap, fmt);
399 ret = vsnprintf(buf, sizeof(buf), fmt, ap);
400 va_end(ap);
401 /* vsnprintf() will not append null for zero-length strings */
402 if (ret == 0)
403 buf[0] = '\0';
404 trace_bpf_trace_printk(buf);
405 raw_spin_unlock_irqrestore(&trace_printk_lock, flags);
406
407 return ret;
408}
409
9c959c86 410/*
7bda4b40 411 * Only limited trace_printk() conversion specifiers allowed:
2df6bb54 412 * %d %i %u %x %ld %li %lu %lx %lld %lli %llu %llx %p %pB %pks %pus %s
9c959c86 413 */
f3694e00
DB
414BPF_CALL_5(bpf_trace_printk, char *, fmt, u32, fmt_size, u64, arg1,
415 u64, arg2, u64, arg3)
9c959c86 416{
b2a5212f
DB
417 int i, mod[3] = {}, fmt_cnt = 0;
418 char buf[64], fmt_ptype;
419 void *unsafe_ptr = NULL;
8d3b7dce 420 bool str_seen = false;
9c959c86
AS
421
422 /*
423 * bpf_check()->check_func_arg()->check_stack_boundary()
424 * guarantees that fmt points to bpf program stack,
425 * fmt_size bytes of it were initialized and fmt_size > 0
426 */
427 if (fmt[--fmt_size] != 0)
428 return -EINVAL;
429
430 /* check format string for allowed specifiers */
431 for (i = 0; i < fmt_size; i++) {
432 if ((!isprint(fmt[i]) && !isspace(fmt[i])) || !isascii(fmt[i]))
433 return -EINVAL;
434
435 if (fmt[i] != '%')
436 continue;
437
438 if (fmt_cnt >= 3)
439 return -EINVAL;
440
441 /* fmt[i] != 0 && fmt[last] == 0, so we can access fmt[i + 1] */
442 i++;
443 if (fmt[i] == 'l') {
444 mod[fmt_cnt]++;
445 i++;
b2a5212f 446 } else if (fmt[i] == 'p') {
9c959c86 447 mod[fmt_cnt]++;
b2a5212f
DB
448 if ((fmt[i + 1] == 'k' ||
449 fmt[i + 1] == 'u') &&
450 fmt[i + 2] == 's') {
451 fmt_ptype = fmt[i + 1];
452 i += 2;
453 goto fmt_str;
454 }
455
2df6bb54
SL
456 if (fmt[i + 1] == 'B') {
457 i++;
458 goto fmt_next;
459 }
460
1efb6ee3
MP
461 /* disallow any further format extensions */
462 if (fmt[i + 1] != 0 &&
463 !isspace(fmt[i + 1]) &&
464 !ispunct(fmt[i + 1]))
9c959c86 465 return -EINVAL;
b2a5212f
DB
466
467 goto fmt_next;
468 } else if (fmt[i] == 's') {
469 mod[fmt_cnt]++;
470 fmt_ptype = fmt[i];
471fmt_str:
472 if (str_seen)
473 /* allow only one '%s' per fmt string */
474 return -EINVAL;
475 str_seen = true;
476
477 if (fmt[i + 1] != 0 &&
478 !isspace(fmt[i + 1]) &&
479 !ispunct(fmt[i + 1]))
480 return -EINVAL;
481
482 switch (fmt_cnt) {
483 case 0:
484 unsafe_ptr = (void *)(long)arg1;
485 arg1 = (long)buf;
486 break;
487 case 1:
488 unsafe_ptr = (void *)(long)arg2;
489 arg2 = (long)buf;
490 break;
491 case 2:
492 unsafe_ptr = (void *)(long)arg3;
493 arg3 = (long)buf;
494 break;
495 }
496
d7b2977b
CH
497 bpf_trace_copy_string(buf, unsafe_ptr, fmt_ptype,
498 sizeof(buf));
b2a5212f 499 goto fmt_next;
9c959c86
AS
500 }
501
502 if (fmt[i] == 'l') {
503 mod[fmt_cnt]++;
504 i++;
505 }
506
7bda4b40
JF
507 if (fmt[i] != 'i' && fmt[i] != 'd' &&
508 fmt[i] != 'u' && fmt[i] != 'x')
9c959c86 509 return -EINVAL;
b2a5212f 510fmt_next:
9c959c86
AS
511 fmt_cnt++;
512 }
513
88a5c690
DB
514/* Horrid workaround for getting va_list handling working with different
515 * argument type combinations generically for 32 and 64 bit archs.
516 */
517#define __BPF_TP_EMIT() __BPF_ARG3_TP()
518#define __BPF_TP(...) \
ac5a72ea 519 bpf_do_trace_printk(fmt, ##__VA_ARGS__)
88a5c690
DB
520
521#define __BPF_ARG1_TP(...) \
522 ((mod[0] == 2 || (mod[0] == 1 && __BITS_PER_LONG == 64)) \
523 ? __BPF_TP(arg1, ##__VA_ARGS__) \
524 : ((mod[0] == 1 || (mod[0] == 0 && __BITS_PER_LONG == 32)) \
525 ? __BPF_TP((long)arg1, ##__VA_ARGS__) \
526 : __BPF_TP((u32)arg1, ##__VA_ARGS__)))
527
528#define __BPF_ARG2_TP(...) \
529 ((mod[1] == 2 || (mod[1] == 1 && __BITS_PER_LONG == 64)) \
530 ? __BPF_ARG1_TP(arg2, ##__VA_ARGS__) \
531 : ((mod[1] == 1 || (mod[1] == 0 && __BITS_PER_LONG == 32)) \
532 ? __BPF_ARG1_TP((long)arg2, ##__VA_ARGS__) \
533 : __BPF_ARG1_TP((u32)arg2, ##__VA_ARGS__)))
534
535#define __BPF_ARG3_TP(...) \
536 ((mod[2] == 2 || (mod[2] == 1 && __BITS_PER_LONG == 64)) \
537 ? __BPF_ARG2_TP(arg3, ##__VA_ARGS__) \
538 : ((mod[2] == 1 || (mod[2] == 0 && __BITS_PER_LONG == 32)) \
539 ? __BPF_ARG2_TP((long)arg3, ##__VA_ARGS__) \
540 : __BPF_ARG2_TP((u32)arg3, ##__VA_ARGS__)))
541
542 return __BPF_TP_EMIT();
9c959c86
AS
543}
544
545static const struct bpf_func_proto bpf_trace_printk_proto = {
546 .func = bpf_trace_printk,
547 .gpl_only = true,
548 .ret_type = RET_INTEGER,
39f19ebb
AS
549 .arg1_type = ARG_PTR_TO_MEM,
550 .arg2_type = ARG_CONST_SIZE,
9c959c86
AS
551};
552
0756ea3e
AS
553const struct bpf_func_proto *bpf_get_trace_printk_proto(void)
554{
555 /*
ac5a72ea
AM
556 * This program might be calling bpf_trace_printk,
557 * so enable the associated bpf_trace/bpf_trace_printk event.
558 * Repeat this each time as it is possible a user has
559 * disabled bpf_trace_printk events. By loading a program
560 * calling bpf_trace_printk() however the user has expressed
561 * the intent to see such events.
0756ea3e 562 */
ac5a72ea
AM
563 if (trace_set_clr_event("bpf_trace", "bpf_trace_printk", 1))
564 pr_warn_ratelimited("could not enable bpf_trace_printk events");
0756ea3e
AS
565
566 return &bpf_trace_printk_proto;
567}
568
492e639f
YS
569#define MAX_SEQ_PRINTF_VARARGS 12
570#define MAX_SEQ_PRINTF_MAX_MEMCPY 6
571#define MAX_SEQ_PRINTF_STR_LEN 128
572
573struct bpf_seq_printf_buf {
574 char buf[MAX_SEQ_PRINTF_MAX_MEMCPY][MAX_SEQ_PRINTF_STR_LEN];
575};
576static DEFINE_PER_CPU(struct bpf_seq_printf_buf, bpf_seq_printf_buf);
577static DEFINE_PER_CPU(int, bpf_seq_printf_buf_used);
578
579BPF_CALL_5(bpf_seq_printf, struct seq_file *, m, char *, fmt, u32, fmt_size,
580 const void *, data, u32, data_len)
581{
582 int err = -EINVAL, fmt_cnt = 0, memcpy_cnt = 0;
583 int i, buf_used, copy_size, num_args;
584 u64 params[MAX_SEQ_PRINTF_VARARGS];
585 struct bpf_seq_printf_buf *bufs;
586 const u64 *args = data;
587
588 buf_used = this_cpu_inc_return(bpf_seq_printf_buf_used);
589 if (WARN_ON_ONCE(buf_used > 1)) {
590 err = -EBUSY;
591 goto out;
592 }
593
594 bufs = this_cpu_ptr(&bpf_seq_printf_buf);
595
596 /*
597 * bpf_check()->check_func_arg()->check_stack_boundary()
598 * guarantees that fmt points to bpf program stack,
599 * fmt_size bytes of it were initialized and fmt_size > 0
600 */
601 if (fmt[--fmt_size] != 0)
602 goto out;
603
604 if (data_len & 7)
605 goto out;
606
607 for (i = 0; i < fmt_size; i++) {
608 if (fmt[i] == '%') {
609 if (fmt[i + 1] == '%')
610 i++;
611 else if (!data || !data_len)
612 goto out;
613 }
614 }
615
616 num_args = data_len / 8;
617
618 /* check format string for allowed specifiers */
619 for (i = 0; i < fmt_size; i++) {
620 /* only printable ascii for now. */
621 if ((!isprint(fmt[i]) && !isspace(fmt[i])) || !isascii(fmt[i])) {
622 err = -EINVAL;
623 goto out;
624 }
625
626 if (fmt[i] != '%')
627 continue;
628
629 if (fmt[i + 1] == '%') {
630 i++;
631 continue;
632 }
633
634 if (fmt_cnt >= MAX_SEQ_PRINTF_VARARGS) {
635 err = -E2BIG;
636 goto out;
637 }
638
639 if (fmt_cnt >= num_args) {
640 err = -EINVAL;
641 goto out;
642 }
643
644 /* fmt[i] != 0 && fmt[last] == 0, so we can access fmt[i + 1] */
645 i++;
646
647 /* skip optional "[0 +-][num]" width formating field */
648 while (fmt[i] == '0' || fmt[i] == '+' || fmt[i] == '-' ||
649 fmt[i] == ' ')
650 i++;
651 if (fmt[i] >= '1' && fmt[i] <= '9') {
652 i++;
653 while (fmt[i] >= '0' && fmt[i] <= '9')
654 i++;
655 }
656
657 if (fmt[i] == 's') {
19c8d8ac
AM
658 void *unsafe_ptr;
659
492e639f
YS
660 /* try our best to copy */
661 if (memcpy_cnt >= MAX_SEQ_PRINTF_MAX_MEMCPY) {
662 err = -E2BIG;
663 goto out;
664 }
665
19c8d8ac
AM
666 unsafe_ptr = (void *)(long)args[fmt_cnt];
667 err = strncpy_from_kernel_nofault(bufs->buf[memcpy_cnt],
668 unsafe_ptr, MAX_SEQ_PRINTF_STR_LEN);
492e639f
YS
669 if (err < 0)
670 bufs->buf[memcpy_cnt][0] = '\0';
671 params[fmt_cnt] = (u64)(long)bufs->buf[memcpy_cnt];
672
673 fmt_cnt++;
674 memcpy_cnt++;
675 continue;
676 }
677
678 if (fmt[i] == 'p') {
679 if (fmt[i + 1] == 0 ||
680 fmt[i + 1] == 'K' ||
2df6bb54
SL
681 fmt[i + 1] == 'x' ||
682 fmt[i + 1] == 'B') {
492e639f
YS
683 /* just kernel pointers */
684 params[fmt_cnt] = args[fmt_cnt];
685 fmt_cnt++;
686 continue;
687 }
688
689 /* only support "%pI4", "%pi4", "%pI6" and "%pi6". */
690 if (fmt[i + 1] != 'i' && fmt[i + 1] != 'I') {
691 err = -EINVAL;
692 goto out;
693 }
694 if (fmt[i + 2] != '4' && fmt[i + 2] != '6') {
695 err = -EINVAL;
696 goto out;
697 }
698
699 if (memcpy_cnt >= MAX_SEQ_PRINTF_MAX_MEMCPY) {
700 err = -E2BIG;
701 goto out;
702 }
703
704
705 copy_size = (fmt[i + 2] == '4') ? 4 : 16;
706
fe557319 707 err = copy_from_kernel_nofault(bufs->buf[memcpy_cnt],
492e639f
YS
708 (void *) (long) args[fmt_cnt],
709 copy_size);
710 if (err < 0)
711 memset(bufs->buf[memcpy_cnt], 0, copy_size);
712 params[fmt_cnt] = (u64)(long)bufs->buf[memcpy_cnt];
713
714 i += 2;
715 fmt_cnt++;
716 memcpy_cnt++;
717 continue;
718 }
719
720 if (fmt[i] == 'l') {
721 i++;
722 if (fmt[i] == 'l')
723 i++;
724 }
725
726 if (fmt[i] != 'i' && fmt[i] != 'd' &&
c06b0229
YS
727 fmt[i] != 'u' && fmt[i] != 'x' &&
728 fmt[i] != 'X') {
492e639f
YS
729 err = -EINVAL;
730 goto out;
731 }
732
733 params[fmt_cnt] = args[fmt_cnt];
734 fmt_cnt++;
735 }
736
737 /* Maximumly we can have MAX_SEQ_PRINTF_VARARGS parameter, just give
738 * all of them to seq_printf().
739 */
740 seq_printf(m, fmt, params[0], params[1], params[2], params[3],
741 params[4], params[5], params[6], params[7], params[8],
742 params[9], params[10], params[11]);
743
744 err = seq_has_overflowed(m) ? -EOVERFLOW : 0;
745out:
746 this_cpu_dec(bpf_seq_printf_buf_used);
747 return err;
748}
749
9436ef6e 750BTF_ID_LIST_SINGLE(btf_seq_file_ids, struct, seq_file)
c9a0f3b8 751
492e639f
YS
752static const struct bpf_func_proto bpf_seq_printf_proto = {
753 .func = bpf_seq_printf,
754 .gpl_only = true,
755 .ret_type = RET_INTEGER,
756 .arg1_type = ARG_PTR_TO_BTF_ID,
9436ef6e 757 .arg1_btf_id = &btf_seq_file_ids[0],
492e639f
YS
758 .arg2_type = ARG_PTR_TO_MEM,
759 .arg3_type = ARG_CONST_SIZE,
760 .arg4_type = ARG_PTR_TO_MEM_OR_NULL,
761 .arg5_type = ARG_CONST_SIZE_OR_ZERO,
492e639f
YS
762};
763
764BPF_CALL_3(bpf_seq_write, struct seq_file *, m, const void *, data, u32, len)
765{
766 return seq_write(m, data, len) ? -EOVERFLOW : 0;
767}
768
492e639f
YS
769static const struct bpf_func_proto bpf_seq_write_proto = {
770 .func = bpf_seq_write,
771 .gpl_only = true,
772 .ret_type = RET_INTEGER,
773 .arg1_type = ARG_PTR_TO_BTF_ID,
9436ef6e 774 .arg1_btf_id = &btf_seq_file_ids[0],
492e639f
YS
775 .arg2_type = ARG_PTR_TO_MEM,
776 .arg3_type = ARG_CONST_SIZE_OR_ZERO,
492e639f
YS
777};
778
908432ca
YS
779static __always_inline int
780get_map_perf_counter(struct bpf_map *map, u64 flags,
781 u64 *value, u64 *enabled, u64 *running)
35578d79 782{
35578d79 783 struct bpf_array *array = container_of(map, struct bpf_array, map);
6816a7ff
DB
784 unsigned int cpu = smp_processor_id();
785 u64 index = flags & BPF_F_INDEX_MASK;
3b1efb19 786 struct bpf_event_entry *ee;
35578d79 787
6816a7ff
DB
788 if (unlikely(flags & ~(BPF_F_INDEX_MASK)))
789 return -EINVAL;
790 if (index == BPF_F_CURRENT_CPU)
791 index = cpu;
35578d79
KX
792 if (unlikely(index >= array->map.max_entries))
793 return -E2BIG;
794
3b1efb19 795 ee = READ_ONCE(array->ptrs[index]);
1ca1cc98 796 if (!ee)
35578d79
KX
797 return -ENOENT;
798
908432ca
YS
799 return perf_event_read_local(ee->event, value, enabled, running);
800}
801
802BPF_CALL_2(bpf_perf_event_read, struct bpf_map *, map, u64, flags)
803{
804 u64 value = 0;
805 int err;
806
807 err = get_map_perf_counter(map, flags, &value, NULL, NULL);
35578d79 808 /*
f91840a3
AS
809 * this api is ugly since we miss [-22..-2] range of valid
810 * counter values, but that's uapi
35578d79 811 */
f91840a3
AS
812 if (err)
813 return err;
814 return value;
35578d79
KX
815}
816
62544ce8 817static const struct bpf_func_proto bpf_perf_event_read_proto = {
35578d79 818 .func = bpf_perf_event_read,
1075ef59 819 .gpl_only = true,
35578d79
KX
820 .ret_type = RET_INTEGER,
821 .arg1_type = ARG_CONST_MAP_PTR,
822 .arg2_type = ARG_ANYTHING,
823};
824
908432ca
YS
825BPF_CALL_4(bpf_perf_event_read_value, struct bpf_map *, map, u64, flags,
826 struct bpf_perf_event_value *, buf, u32, size)
827{
828 int err = -EINVAL;
829
830 if (unlikely(size != sizeof(struct bpf_perf_event_value)))
831 goto clear;
832 err = get_map_perf_counter(map, flags, &buf->counter, &buf->enabled,
833 &buf->running);
834 if (unlikely(err))
835 goto clear;
836 return 0;
837clear:
838 memset(buf, 0, size);
839 return err;
840}
841
842static const struct bpf_func_proto bpf_perf_event_read_value_proto = {
843 .func = bpf_perf_event_read_value,
844 .gpl_only = true,
845 .ret_type = RET_INTEGER,
846 .arg1_type = ARG_CONST_MAP_PTR,
847 .arg2_type = ARG_ANYTHING,
848 .arg3_type = ARG_PTR_TO_UNINIT_MEM,
849 .arg4_type = ARG_CONST_SIZE,
850};
851
8e7a3920
DB
852static __always_inline u64
853__bpf_perf_event_output(struct pt_regs *regs, struct bpf_map *map,
283ca526 854 u64 flags, struct perf_sample_data *sd)
a43eec30 855{
a43eec30 856 struct bpf_array *array = container_of(map, struct bpf_array, map);
d7931330 857 unsigned int cpu = smp_processor_id();
1e33759c 858 u64 index = flags & BPF_F_INDEX_MASK;
3b1efb19 859 struct bpf_event_entry *ee;
a43eec30 860 struct perf_event *event;
a43eec30 861
1e33759c 862 if (index == BPF_F_CURRENT_CPU)
d7931330 863 index = cpu;
a43eec30
AS
864 if (unlikely(index >= array->map.max_entries))
865 return -E2BIG;
866
3b1efb19 867 ee = READ_ONCE(array->ptrs[index]);
1ca1cc98 868 if (!ee)
a43eec30
AS
869 return -ENOENT;
870
3b1efb19 871 event = ee->event;
a43eec30
AS
872 if (unlikely(event->attr.type != PERF_TYPE_SOFTWARE ||
873 event->attr.config != PERF_COUNT_SW_BPF_OUTPUT))
874 return -EINVAL;
875
d7931330 876 if (unlikely(event->oncpu != cpu))
a43eec30
AS
877 return -EOPNOTSUPP;
878
56201969 879 return perf_event_output(event, sd, regs);
a43eec30
AS
880}
881
9594dc3c
MM
882/*
883 * Support executing tracepoints in normal, irq, and nmi context that each call
884 * bpf_perf_event_output
885 */
886struct bpf_trace_sample_data {
887 struct perf_sample_data sds[3];
888};
889
890static DEFINE_PER_CPU(struct bpf_trace_sample_data, bpf_trace_sds);
891static DEFINE_PER_CPU(int, bpf_trace_nest_level);
f3694e00
DB
892BPF_CALL_5(bpf_perf_event_output, struct pt_regs *, regs, struct bpf_map *, map,
893 u64, flags, void *, data, u64, size)
8e7a3920 894{
9594dc3c
MM
895 struct bpf_trace_sample_data *sds = this_cpu_ptr(&bpf_trace_sds);
896 int nest_level = this_cpu_inc_return(bpf_trace_nest_level);
8e7a3920
DB
897 struct perf_raw_record raw = {
898 .frag = {
899 .size = size,
900 .data = data,
901 },
902 };
9594dc3c
MM
903 struct perf_sample_data *sd;
904 int err;
8e7a3920 905
9594dc3c
MM
906 if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(sds->sds))) {
907 err = -EBUSY;
908 goto out;
909 }
910
911 sd = &sds->sds[nest_level - 1];
912
913 if (unlikely(flags & ~(BPF_F_INDEX_MASK))) {
914 err = -EINVAL;
915 goto out;
916 }
8e7a3920 917
283ca526
DB
918 perf_sample_data_init(sd, 0, 0);
919 sd->raw = &raw;
920
9594dc3c
MM
921 err = __bpf_perf_event_output(regs, map, flags, sd);
922
923out:
924 this_cpu_dec(bpf_trace_nest_level);
925 return err;
8e7a3920
DB
926}
927
a43eec30
AS
928static const struct bpf_func_proto bpf_perf_event_output_proto = {
929 .func = bpf_perf_event_output,
1075ef59 930 .gpl_only = true,
a43eec30
AS
931 .ret_type = RET_INTEGER,
932 .arg1_type = ARG_PTR_TO_CTX,
933 .arg2_type = ARG_CONST_MAP_PTR,
934 .arg3_type = ARG_ANYTHING,
39f19ebb 935 .arg4_type = ARG_PTR_TO_MEM,
a60dd35d 936 .arg5_type = ARG_CONST_SIZE_OR_ZERO,
a43eec30
AS
937};
938
768fb61f
AZ
939static DEFINE_PER_CPU(int, bpf_event_output_nest_level);
940struct bpf_nested_pt_regs {
941 struct pt_regs regs[3];
942};
943static DEFINE_PER_CPU(struct bpf_nested_pt_regs, bpf_pt_regs);
944static DEFINE_PER_CPU(struct bpf_trace_sample_data, bpf_misc_sds);
bd570ff9 945
555c8a86
DB
946u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
947 void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy)
bd570ff9 948{
768fb61f 949 int nest_level = this_cpu_inc_return(bpf_event_output_nest_level);
555c8a86
DB
950 struct perf_raw_frag frag = {
951 .copy = ctx_copy,
952 .size = ctx_size,
953 .data = ctx,
954 };
955 struct perf_raw_record raw = {
956 .frag = {
183fc153
AM
957 {
958 .next = ctx_size ? &frag : NULL,
959 },
555c8a86
DB
960 .size = meta_size,
961 .data = meta,
962 },
963 };
768fb61f
AZ
964 struct perf_sample_data *sd;
965 struct pt_regs *regs;
966 u64 ret;
967
968 if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(bpf_misc_sds.sds))) {
969 ret = -EBUSY;
970 goto out;
971 }
972 sd = this_cpu_ptr(&bpf_misc_sds.sds[nest_level - 1]);
973 regs = this_cpu_ptr(&bpf_pt_regs.regs[nest_level - 1]);
bd570ff9
DB
974
975 perf_fetch_caller_regs(regs);
283ca526
DB
976 perf_sample_data_init(sd, 0, 0);
977 sd->raw = &raw;
bd570ff9 978
768fb61f
AZ
979 ret = __bpf_perf_event_output(regs, map, flags, sd);
980out:
981 this_cpu_dec(bpf_event_output_nest_level);
982 return ret;
bd570ff9
DB
983}
984
f3694e00 985BPF_CALL_0(bpf_get_current_task)
606274c5
AS
986{
987 return (long) current;
988}
989
f470378c 990const struct bpf_func_proto bpf_get_current_task_proto = {
606274c5
AS
991 .func = bpf_get_current_task,
992 .gpl_only = true,
993 .ret_type = RET_INTEGER,
994};
995
f3694e00 996BPF_CALL_2(bpf_current_task_under_cgroup, struct bpf_map *, map, u32, idx)
60d20f91 997{
60d20f91
SD
998 struct bpf_array *array = container_of(map, struct bpf_array, map);
999 struct cgroup *cgrp;
60d20f91 1000
60d20f91
SD
1001 if (unlikely(idx >= array->map.max_entries))
1002 return -E2BIG;
1003
1004 cgrp = READ_ONCE(array->ptrs[idx]);
1005 if (unlikely(!cgrp))
1006 return -EAGAIN;
1007
1008 return task_under_cgroup_hierarchy(current, cgrp);
1009}
1010
1011static const struct bpf_func_proto bpf_current_task_under_cgroup_proto = {
1012 .func = bpf_current_task_under_cgroup,
1013 .gpl_only = false,
1014 .ret_type = RET_INTEGER,
1015 .arg1_type = ARG_CONST_MAP_PTR,
1016 .arg2_type = ARG_ANYTHING,
1017};
1018
8b401f9e
YS
1019struct send_signal_irq_work {
1020 struct irq_work irq_work;
1021 struct task_struct *task;
1022 u32 sig;
8482941f 1023 enum pid_type type;
8b401f9e
YS
1024};
1025
1026static DEFINE_PER_CPU(struct send_signal_irq_work, send_signal_work);
1027
1028static void do_bpf_send_signal(struct irq_work *entry)
1029{
1030 struct send_signal_irq_work *work;
1031
1032 work = container_of(entry, struct send_signal_irq_work, irq_work);
8482941f 1033 group_send_sig_info(work->sig, SEND_SIG_PRIV, work->task, work->type);
8b401f9e
YS
1034}
1035
8482941f 1036static int bpf_send_signal_common(u32 sig, enum pid_type type)
8b401f9e
YS
1037{
1038 struct send_signal_irq_work *work = NULL;
1039
1040 /* Similar to bpf_probe_write_user, task needs to be
1041 * in a sound condition and kernel memory access be
1042 * permitted in order to send signal to the current
1043 * task.
1044 */
1045 if (unlikely(current->flags & (PF_KTHREAD | PF_EXITING)))
1046 return -EPERM;
1047 if (unlikely(uaccess_kernel()))
1048 return -EPERM;
1049 if (unlikely(!nmi_uaccess_okay()))
1050 return -EPERM;
1051
1bc7896e 1052 if (irqs_disabled()) {
e1afb702
YS
1053 /* Do an early check on signal validity. Otherwise,
1054 * the error is lost in deferred irq_work.
1055 */
1056 if (unlikely(!valid_signal(sig)))
1057 return -EINVAL;
1058
8b401f9e 1059 work = this_cpu_ptr(&send_signal_work);
153bedba 1060 if (atomic_read(&work->irq_work.flags) & IRQ_WORK_BUSY)
8b401f9e
YS
1061 return -EBUSY;
1062
1063 /* Add the current task, which is the target of sending signal,
1064 * to the irq_work. The current task may change when queued
1065 * irq works get executed.
1066 */
1067 work->task = current;
1068 work->sig = sig;
8482941f 1069 work->type = type;
8b401f9e
YS
1070 irq_work_queue(&work->irq_work);
1071 return 0;
1072 }
1073
8482941f
YS
1074 return group_send_sig_info(sig, SEND_SIG_PRIV, current, type);
1075}
1076
1077BPF_CALL_1(bpf_send_signal, u32, sig)
1078{
1079 return bpf_send_signal_common(sig, PIDTYPE_TGID);
8b401f9e
YS
1080}
1081
1082static const struct bpf_func_proto bpf_send_signal_proto = {
1083 .func = bpf_send_signal,
1084 .gpl_only = false,
1085 .ret_type = RET_INTEGER,
1086 .arg1_type = ARG_ANYTHING,
1087};
1088
8482941f
YS
1089BPF_CALL_1(bpf_send_signal_thread, u32, sig)
1090{
1091 return bpf_send_signal_common(sig, PIDTYPE_PID);
1092}
1093
1094static const struct bpf_func_proto bpf_send_signal_thread_proto = {
1095 .func = bpf_send_signal_thread,
1096 .gpl_only = false,
1097 .ret_type = RET_INTEGER,
1098 .arg1_type = ARG_ANYTHING,
1099};
1100
6e22ab9d
JO
1101BPF_CALL_3(bpf_d_path, struct path *, path, char *, buf, u32, sz)
1102{
1103 long len;
1104 char *p;
1105
1106 if (!sz)
1107 return 0;
1108
1109 p = d_path(path, buf, sz);
1110 if (IS_ERR(p)) {
1111 len = PTR_ERR(p);
1112 } else {
1113 len = buf + sz - p;
1114 memmove(buf, p, len);
1115 }
1116
1117 return len;
1118}
1119
1120BTF_SET_START(btf_allowlist_d_path)
a8a71796
JO
1121#ifdef CONFIG_SECURITY
1122BTF_ID(func, security_file_permission)
1123BTF_ID(func, security_inode_getattr)
1124BTF_ID(func, security_file_open)
1125#endif
1126#ifdef CONFIG_SECURITY_PATH
1127BTF_ID(func, security_path_truncate)
1128#endif
6e22ab9d
JO
1129BTF_ID(func, vfs_truncate)
1130BTF_ID(func, vfs_fallocate)
1131BTF_ID(func, dentry_open)
1132BTF_ID(func, vfs_getattr)
1133BTF_ID(func, filp_close)
1134BTF_SET_END(btf_allowlist_d_path)
1135
1136static bool bpf_d_path_allowed(const struct bpf_prog *prog)
1137{
1138 return btf_id_set_contains(&btf_allowlist_d_path, prog->aux->attach_btf_id);
1139}
1140
9436ef6e 1141BTF_ID_LIST_SINGLE(bpf_d_path_btf_ids, struct, path)
6e22ab9d
JO
1142
1143static const struct bpf_func_proto bpf_d_path_proto = {
1144 .func = bpf_d_path,
1145 .gpl_only = false,
1146 .ret_type = RET_INTEGER,
1147 .arg1_type = ARG_PTR_TO_BTF_ID,
9436ef6e 1148 .arg1_btf_id = &bpf_d_path_btf_ids[0],
6e22ab9d
JO
1149 .arg2_type = ARG_PTR_TO_MEM,
1150 .arg3_type = ARG_CONST_SIZE_OR_ZERO,
6e22ab9d
JO
1151 .allowed = bpf_d_path_allowed,
1152};
1153
c4d0bfb4
AM
1154#define BTF_F_ALL (BTF_F_COMPACT | BTF_F_NONAME | \
1155 BTF_F_PTR_RAW | BTF_F_ZERO)
1156
1157static int bpf_btf_printf_prepare(struct btf_ptr *ptr, u32 btf_ptr_size,
1158 u64 flags, const struct btf **btf,
1159 s32 *btf_id)
1160{
1161 const struct btf_type *t;
1162
1163 if (unlikely(flags & ~(BTF_F_ALL)))
1164 return -EINVAL;
1165
1166 if (btf_ptr_size != sizeof(struct btf_ptr))
1167 return -EINVAL;
1168
1169 *btf = bpf_get_btf_vmlinux();
1170
1171 if (IS_ERR_OR_NULL(*btf))
1172 return PTR_ERR(*btf);
1173
1174 if (ptr->type_id > 0)
1175 *btf_id = ptr->type_id;
1176 else
1177 return -EINVAL;
1178
1179 if (*btf_id > 0)
1180 t = btf_type_by_id(*btf, *btf_id);
1181 if (*btf_id <= 0 || !t)
1182 return -ENOENT;
1183
1184 return 0;
1185}
1186
1187BPF_CALL_5(bpf_snprintf_btf, char *, str, u32, str_size, struct btf_ptr *, ptr,
1188 u32, btf_ptr_size, u64, flags)
1189{
1190 const struct btf *btf;
1191 s32 btf_id;
1192 int ret;
1193
1194 ret = bpf_btf_printf_prepare(ptr, btf_ptr_size, flags, &btf, &btf_id);
1195 if (ret)
1196 return ret;
1197
1198 return btf_type_snprintf_show(btf, btf_id, ptr->ptr, str, str_size,
1199 flags);
1200}
1201
1202const struct bpf_func_proto bpf_snprintf_btf_proto = {
1203 .func = bpf_snprintf_btf,
1204 .gpl_only = false,
1205 .ret_type = RET_INTEGER,
1206 .arg1_type = ARG_PTR_TO_MEM,
1207 .arg2_type = ARG_CONST_SIZE,
1208 .arg3_type = ARG_PTR_TO_MEM,
1209 .arg4_type = ARG_CONST_SIZE,
1210 .arg5_type = ARG_ANYTHING,
1211};
1212
fc611f47
KS
1213const struct bpf_func_proto *
1214bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
2541517c
AS
1215{
1216 switch (func_id) {
1217 case BPF_FUNC_map_lookup_elem:
1218 return &bpf_map_lookup_elem_proto;
1219 case BPF_FUNC_map_update_elem:
1220 return &bpf_map_update_elem_proto;
1221 case BPF_FUNC_map_delete_elem:
1222 return &bpf_map_delete_elem_proto;
02a8c817
AC
1223 case BPF_FUNC_map_push_elem:
1224 return &bpf_map_push_elem_proto;
1225 case BPF_FUNC_map_pop_elem:
1226 return &bpf_map_pop_elem_proto;
1227 case BPF_FUNC_map_peek_elem:
1228 return &bpf_map_peek_elem_proto;
d9847d31
AS
1229 case BPF_FUNC_ktime_get_ns:
1230 return &bpf_ktime_get_ns_proto;
71d19214
MÅ»
1231 case BPF_FUNC_ktime_get_boot_ns:
1232 return &bpf_ktime_get_boot_ns_proto;
04fd61ab
AS
1233 case BPF_FUNC_tail_call:
1234 return &bpf_tail_call_proto;
ffeedafb
AS
1235 case BPF_FUNC_get_current_pid_tgid:
1236 return &bpf_get_current_pid_tgid_proto;
606274c5
AS
1237 case BPF_FUNC_get_current_task:
1238 return &bpf_get_current_task_proto;
ffeedafb
AS
1239 case BPF_FUNC_get_current_uid_gid:
1240 return &bpf_get_current_uid_gid_proto;
1241 case BPF_FUNC_get_current_comm:
1242 return &bpf_get_current_comm_proto;
9c959c86 1243 case BPF_FUNC_trace_printk:
0756ea3e 1244 return bpf_get_trace_printk_proto();
ab1973d3
AS
1245 case BPF_FUNC_get_smp_processor_id:
1246 return &bpf_get_smp_processor_id_proto;
2d0e30c3
DB
1247 case BPF_FUNC_get_numa_node_id:
1248 return &bpf_get_numa_node_id_proto;
35578d79
KX
1249 case BPF_FUNC_perf_event_read:
1250 return &bpf_perf_event_read_proto;
96ae5227
SD
1251 case BPF_FUNC_probe_write_user:
1252 return bpf_get_probe_write_proto();
60d20f91
SD
1253 case BPF_FUNC_current_task_under_cgroup:
1254 return &bpf_current_task_under_cgroup_proto;
8937bd80
AS
1255 case BPF_FUNC_get_prandom_u32:
1256 return &bpf_get_prandom_u32_proto;
6ae08ae3
DB
1257 case BPF_FUNC_probe_read_user:
1258 return &bpf_probe_read_user_proto;
1259 case BPF_FUNC_probe_read_kernel:
1260 return &bpf_probe_read_kernel_proto;
6ae08ae3
DB
1261 case BPF_FUNC_probe_read_user_str:
1262 return &bpf_probe_read_user_str_proto;
1263 case BPF_FUNC_probe_read_kernel_str:
1264 return &bpf_probe_read_kernel_str_proto;
0ebeea8c
DB
1265#ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
1266 case BPF_FUNC_probe_read:
1267 return &bpf_probe_read_compat_proto;
a5e8c070 1268 case BPF_FUNC_probe_read_str:
6ae08ae3 1269 return &bpf_probe_read_compat_str_proto;
0ebeea8c 1270#endif
34ea38ca 1271#ifdef CONFIG_CGROUPS
bf6fa2c8
YS
1272 case BPF_FUNC_get_current_cgroup_id:
1273 return &bpf_get_current_cgroup_id_proto;
34ea38ca 1274#endif
8b401f9e
YS
1275 case BPF_FUNC_send_signal:
1276 return &bpf_send_signal_proto;
8482941f
YS
1277 case BPF_FUNC_send_signal_thread:
1278 return &bpf_send_signal_thread_proto;
b80b033b
SL
1279 case BPF_FUNC_perf_event_read_value:
1280 return &bpf_perf_event_read_value_proto;
b4490c5c
CN
1281 case BPF_FUNC_get_ns_current_pid_tgid:
1282 return &bpf_get_ns_current_pid_tgid_proto;
457f4436
AN
1283 case BPF_FUNC_ringbuf_output:
1284 return &bpf_ringbuf_output_proto;
1285 case BPF_FUNC_ringbuf_reserve:
1286 return &bpf_ringbuf_reserve_proto;
1287 case BPF_FUNC_ringbuf_submit:
1288 return &bpf_ringbuf_submit_proto;
1289 case BPF_FUNC_ringbuf_discard:
1290 return &bpf_ringbuf_discard_proto;
1291 case BPF_FUNC_ringbuf_query:
1292 return &bpf_ringbuf_query_proto;
72e2b2b6
YS
1293 case BPF_FUNC_jiffies64:
1294 return &bpf_jiffies64_proto;
fa28dcb8
SL
1295 case BPF_FUNC_get_task_stack:
1296 return &bpf_get_task_stack_proto;
07be4c4a
AS
1297 case BPF_FUNC_copy_from_user:
1298 return prog->aux->sleepable ? &bpf_copy_from_user_proto : NULL;
c4d0bfb4
AM
1299 case BPF_FUNC_snprintf_btf:
1300 return &bpf_snprintf_btf_proto;
9fd82b61
AS
1301 default:
1302 return NULL;
1303 }
1304}
1305
5e43f899
AI
1306static const struct bpf_func_proto *
1307kprobe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
9fd82b61
AS
1308{
1309 switch (func_id) {
a43eec30
AS
1310 case BPF_FUNC_perf_event_output:
1311 return &bpf_perf_event_output_proto;
d5a3b1f6
AS
1312 case BPF_FUNC_get_stackid:
1313 return &bpf_get_stackid_proto;
c195651e
YS
1314 case BPF_FUNC_get_stack:
1315 return &bpf_get_stack_proto;
9802d865
JB
1316#ifdef CONFIG_BPF_KPROBE_OVERRIDE
1317 case BPF_FUNC_override_return:
1318 return &bpf_override_return_proto;
1319#endif
2541517c 1320 default:
fc611f47 1321 return bpf_tracing_func_proto(func_id, prog);
2541517c
AS
1322 }
1323}
1324
1325/* bpf+kprobe programs can access fields of 'struct pt_regs' */
19de99f7 1326static bool kprobe_prog_is_valid_access(int off, int size, enum bpf_access_type type,
5e43f899 1327 const struct bpf_prog *prog,
23994631 1328 struct bpf_insn_access_aux *info)
2541517c 1329{
2541517c
AS
1330 if (off < 0 || off >= sizeof(struct pt_regs))
1331 return false;
2541517c
AS
1332 if (type != BPF_READ)
1333 return false;
2541517c
AS
1334 if (off % size != 0)
1335 return false;
2d071c64
DB
1336 /*
1337 * Assertion for 32 bit to make sure last 8 byte access
1338 * (BPF_DW) to the last 4 byte member is disallowed.
1339 */
1340 if (off + size > sizeof(struct pt_regs))
1341 return false;
1342
2541517c
AS
1343 return true;
1344}
1345
7de16e3a 1346const struct bpf_verifier_ops kprobe_verifier_ops = {
2541517c
AS
1347 .get_func_proto = kprobe_prog_func_proto,
1348 .is_valid_access = kprobe_prog_is_valid_access,
1349};
1350
7de16e3a
JK
1351const struct bpf_prog_ops kprobe_prog_ops = {
1352};
1353
f3694e00
DB
1354BPF_CALL_5(bpf_perf_event_output_tp, void *, tp_buff, struct bpf_map *, map,
1355 u64, flags, void *, data, u64, size)
9940d67c 1356{
f3694e00
DB
1357 struct pt_regs *regs = *(struct pt_regs **)tp_buff;
1358
9940d67c
AS
1359 /*
1360 * r1 points to perf tracepoint buffer where first 8 bytes are hidden
1361 * from bpf program and contain a pointer to 'struct pt_regs'. Fetch it
f3694e00 1362 * from there and call the same bpf_perf_event_output() helper inline.
9940d67c 1363 */
f3694e00 1364 return ____bpf_perf_event_output(regs, map, flags, data, size);
9940d67c
AS
1365}
1366
1367static const struct bpf_func_proto bpf_perf_event_output_proto_tp = {
1368 .func = bpf_perf_event_output_tp,
1369 .gpl_only = true,
1370 .ret_type = RET_INTEGER,
1371 .arg1_type = ARG_PTR_TO_CTX,
1372 .arg2_type = ARG_CONST_MAP_PTR,
1373 .arg3_type = ARG_ANYTHING,
39f19ebb 1374 .arg4_type = ARG_PTR_TO_MEM,
a60dd35d 1375 .arg5_type = ARG_CONST_SIZE_OR_ZERO,
9940d67c
AS
1376};
1377
f3694e00
DB
1378BPF_CALL_3(bpf_get_stackid_tp, void *, tp_buff, struct bpf_map *, map,
1379 u64, flags)
9940d67c 1380{
f3694e00 1381 struct pt_regs *regs = *(struct pt_regs **)tp_buff;
9940d67c 1382
f3694e00
DB
1383 /*
1384 * Same comment as in bpf_perf_event_output_tp(), only that this time
1385 * the other helper's function body cannot be inlined due to being
1386 * external, thus we need to call raw helper function.
1387 */
1388 return bpf_get_stackid((unsigned long) regs, (unsigned long) map,
1389 flags, 0, 0);
9940d67c
AS
1390}
1391
1392static const struct bpf_func_proto bpf_get_stackid_proto_tp = {
1393 .func = bpf_get_stackid_tp,
1394 .gpl_only = true,
1395 .ret_type = RET_INTEGER,
1396 .arg1_type = ARG_PTR_TO_CTX,
1397 .arg2_type = ARG_CONST_MAP_PTR,
1398 .arg3_type = ARG_ANYTHING,
1399};
1400
c195651e
YS
1401BPF_CALL_4(bpf_get_stack_tp, void *, tp_buff, void *, buf, u32, size,
1402 u64, flags)
1403{
1404 struct pt_regs *regs = *(struct pt_regs **)tp_buff;
1405
1406 return bpf_get_stack((unsigned long) regs, (unsigned long) buf,
1407 (unsigned long) size, flags, 0);
1408}
1409
1410static const struct bpf_func_proto bpf_get_stack_proto_tp = {
1411 .func = bpf_get_stack_tp,
1412 .gpl_only = true,
1413 .ret_type = RET_INTEGER,
1414 .arg1_type = ARG_PTR_TO_CTX,
1415 .arg2_type = ARG_PTR_TO_UNINIT_MEM,
1416 .arg3_type = ARG_CONST_SIZE_OR_ZERO,
1417 .arg4_type = ARG_ANYTHING,
1418};
1419
5e43f899
AI
1420static const struct bpf_func_proto *
1421tp_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
f005afed
YS
1422{
1423 switch (func_id) {
1424 case BPF_FUNC_perf_event_output:
1425 return &bpf_perf_event_output_proto_tp;
1426 case BPF_FUNC_get_stackid:
1427 return &bpf_get_stackid_proto_tp;
c195651e
YS
1428 case BPF_FUNC_get_stack:
1429 return &bpf_get_stack_proto_tp;
f005afed 1430 default:
fc611f47 1431 return bpf_tracing_func_proto(func_id, prog);
f005afed
YS
1432 }
1433}
1434
1435static bool tp_prog_is_valid_access(int off, int size, enum bpf_access_type type,
5e43f899 1436 const struct bpf_prog *prog,
f005afed
YS
1437 struct bpf_insn_access_aux *info)
1438{
1439 if (off < sizeof(void *) || off >= PERF_MAX_TRACE_SIZE)
1440 return false;
1441 if (type != BPF_READ)
1442 return false;
1443 if (off % size != 0)
1444 return false;
1445
1446 BUILD_BUG_ON(PERF_MAX_TRACE_SIZE % sizeof(__u64));
1447 return true;
1448}
1449
1450const struct bpf_verifier_ops tracepoint_verifier_ops = {
1451 .get_func_proto = tp_prog_func_proto,
1452 .is_valid_access = tp_prog_is_valid_access,
1453};
1454
1455const struct bpf_prog_ops tracepoint_prog_ops = {
1456};
1457
1458BPF_CALL_3(bpf_perf_prog_read_value, struct bpf_perf_event_data_kern *, ctx,
4bebdc7a
YS
1459 struct bpf_perf_event_value *, buf, u32, size)
1460{
1461 int err = -EINVAL;
1462
1463 if (unlikely(size != sizeof(struct bpf_perf_event_value)))
1464 goto clear;
1465 err = perf_event_read_local(ctx->event, &buf->counter, &buf->enabled,
1466 &buf->running);
1467 if (unlikely(err))
1468 goto clear;
1469 return 0;
1470clear:
1471 memset(buf, 0, size);
1472 return err;
1473}
1474
f005afed
YS
1475static const struct bpf_func_proto bpf_perf_prog_read_value_proto = {
1476 .func = bpf_perf_prog_read_value,
4bebdc7a
YS
1477 .gpl_only = true,
1478 .ret_type = RET_INTEGER,
1479 .arg1_type = ARG_PTR_TO_CTX,
1480 .arg2_type = ARG_PTR_TO_UNINIT_MEM,
1481 .arg3_type = ARG_CONST_SIZE,
1482};
1483
fff7b643
DX
1484BPF_CALL_4(bpf_read_branch_records, struct bpf_perf_event_data_kern *, ctx,
1485 void *, buf, u32, size, u64, flags)
1486{
1487#ifndef CONFIG_X86
1488 return -ENOENT;
1489#else
1490 static const u32 br_entry_size = sizeof(struct perf_branch_entry);
1491 struct perf_branch_stack *br_stack = ctx->data->br_stack;
1492 u32 to_copy;
1493
1494 if (unlikely(flags & ~BPF_F_GET_BRANCH_RECORDS_SIZE))
1495 return -EINVAL;
1496
1497 if (unlikely(!br_stack))
1498 return -EINVAL;
1499
1500 if (flags & BPF_F_GET_BRANCH_RECORDS_SIZE)
1501 return br_stack->nr * br_entry_size;
1502
1503 if (!buf || (size % br_entry_size != 0))
1504 return -EINVAL;
1505
1506 to_copy = min_t(u32, br_stack->nr * br_entry_size, size);
1507 memcpy(buf, br_stack->entries, to_copy);
1508
1509 return to_copy;
1510#endif
1511}
1512
1513static const struct bpf_func_proto bpf_read_branch_records_proto = {
1514 .func = bpf_read_branch_records,
1515 .gpl_only = true,
1516 .ret_type = RET_INTEGER,
1517 .arg1_type = ARG_PTR_TO_CTX,
1518 .arg2_type = ARG_PTR_TO_MEM_OR_NULL,
1519 .arg3_type = ARG_CONST_SIZE_OR_ZERO,
1520 .arg4_type = ARG_ANYTHING,
1521};
1522
5e43f899
AI
1523static const struct bpf_func_proto *
1524pe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
9fd82b61
AS
1525{
1526 switch (func_id) {
1527 case BPF_FUNC_perf_event_output:
9940d67c 1528 return &bpf_perf_event_output_proto_tp;
9fd82b61 1529 case BPF_FUNC_get_stackid:
7b04d6d6 1530 return &bpf_get_stackid_proto_pe;
c195651e 1531 case BPF_FUNC_get_stack:
7b04d6d6 1532 return &bpf_get_stack_proto_pe;
4bebdc7a 1533 case BPF_FUNC_perf_prog_read_value:
f005afed 1534 return &bpf_perf_prog_read_value_proto;
fff7b643
DX
1535 case BPF_FUNC_read_branch_records:
1536 return &bpf_read_branch_records_proto;
9fd82b61 1537 default:
fc611f47 1538 return bpf_tracing_func_proto(func_id, prog);
9fd82b61
AS
1539 }
1540}
1541
c4f6699d
AS
1542/*
1543 * bpf_raw_tp_regs are separate from bpf_pt_regs used from skb/xdp
1544 * to avoid potential recursive reuse issue when/if tracepoints are added
9594dc3c
MM
1545 * inside bpf_*_event_output, bpf_get_stackid and/or bpf_get_stack.
1546 *
1547 * Since raw tracepoints run despite bpf_prog_active, support concurrent usage
1548 * in normal, irq, and nmi context.
c4f6699d 1549 */
9594dc3c
MM
1550struct bpf_raw_tp_regs {
1551 struct pt_regs regs[3];
1552};
1553static DEFINE_PER_CPU(struct bpf_raw_tp_regs, bpf_raw_tp_regs);
1554static DEFINE_PER_CPU(int, bpf_raw_tp_nest_level);
1555static struct pt_regs *get_bpf_raw_tp_regs(void)
1556{
1557 struct bpf_raw_tp_regs *tp_regs = this_cpu_ptr(&bpf_raw_tp_regs);
1558 int nest_level = this_cpu_inc_return(bpf_raw_tp_nest_level);
1559
1560 if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(tp_regs->regs))) {
1561 this_cpu_dec(bpf_raw_tp_nest_level);
1562 return ERR_PTR(-EBUSY);
1563 }
1564
1565 return &tp_regs->regs[nest_level - 1];
1566}
1567
1568static void put_bpf_raw_tp_regs(void)
1569{
1570 this_cpu_dec(bpf_raw_tp_nest_level);
1571}
1572
c4f6699d
AS
1573BPF_CALL_5(bpf_perf_event_output_raw_tp, struct bpf_raw_tracepoint_args *, args,
1574 struct bpf_map *, map, u64, flags, void *, data, u64, size)
1575{
9594dc3c
MM
1576 struct pt_regs *regs = get_bpf_raw_tp_regs();
1577 int ret;
1578
1579 if (IS_ERR(regs))
1580 return PTR_ERR(regs);
c4f6699d
AS
1581
1582 perf_fetch_caller_regs(regs);
9594dc3c
MM
1583 ret = ____bpf_perf_event_output(regs, map, flags, data, size);
1584
1585 put_bpf_raw_tp_regs();
1586 return ret;
c4f6699d
AS
1587}
1588
1589static const struct bpf_func_proto bpf_perf_event_output_proto_raw_tp = {
1590 .func = bpf_perf_event_output_raw_tp,
1591 .gpl_only = true,
1592 .ret_type = RET_INTEGER,
1593 .arg1_type = ARG_PTR_TO_CTX,
1594 .arg2_type = ARG_CONST_MAP_PTR,
1595 .arg3_type = ARG_ANYTHING,
1596 .arg4_type = ARG_PTR_TO_MEM,
1597 .arg5_type = ARG_CONST_SIZE_OR_ZERO,
1598};
1599
a7658e1a 1600extern const struct bpf_func_proto bpf_skb_output_proto;
d831ee84 1601extern const struct bpf_func_proto bpf_xdp_output_proto;
a7658e1a 1602
c4f6699d
AS
1603BPF_CALL_3(bpf_get_stackid_raw_tp, struct bpf_raw_tracepoint_args *, args,
1604 struct bpf_map *, map, u64, flags)
1605{
9594dc3c
MM
1606 struct pt_regs *regs = get_bpf_raw_tp_regs();
1607 int ret;
1608
1609 if (IS_ERR(regs))
1610 return PTR_ERR(regs);
c4f6699d
AS
1611
1612 perf_fetch_caller_regs(regs);
1613 /* similar to bpf_perf_event_output_tp, but pt_regs fetched differently */
9594dc3c
MM
1614 ret = bpf_get_stackid((unsigned long) regs, (unsigned long) map,
1615 flags, 0, 0);
1616 put_bpf_raw_tp_regs();
1617 return ret;
c4f6699d
AS
1618}
1619
1620static const struct bpf_func_proto bpf_get_stackid_proto_raw_tp = {
1621 .func = bpf_get_stackid_raw_tp,
1622 .gpl_only = true,
1623 .ret_type = RET_INTEGER,
1624 .arg1_type = ARG_PTR_TO_CTX,
1625 .arg2_type = ARG_CONST_MAP_PTR,
1626 .arg3_type = ARG_ANYTHING,
1627};
1628
c195651e
YS
1629BPF_CALL_4(bpf_get_stack_raw_tp, struct bpf_raw_tracepoint_args *, args,
1630 void *, buf, u32, size, u64, flags)
1631{
9594dc3c
MM
1632 struct pt_regs *regs = get_bpf_raw_tp_regs();
1633 int ret;
1634
1635 if (IS_ERR(regs))
1636 return PTR_ERR(regs);
c195651e
YS
1637
1638 perf_fetch_caller_regs(regs);
9594dc3c
MM
1639 ret = bpf_get_stack((unsigned long) regs, (unsigned long) buf,
1640 (unsigned long) size, flags, 0);
1641 put_bpf_raw_tp_regs();
1642 return ret;
c195651e
YS
1643}
1644
1645static const struct bpf_func_proto bpf_get_stack_proto_raw_tp = {
1646 .func = bpf_get_stack_raw_tp,
1647 .gpl_only = true,
1648 .ret_type = RET_INTEGER,
1649 .arg1_type = ARG_PTR_TO_CTX,
1650 .arg2_type = ARG_PTR_TO_MEM,
1651 .arg3_type = ARG_CONST_SIZE_OR_ZERO,
1652 .arg4_type = ARG_ANYTHING,
1653};
1654
5e43f899
AI
1655static const struct bpf_func_proto *
1656raw_tp_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
c4f6699d
AS
1657{
1658 switch (func_id) {
1659 case BPF_FUNC_perf_event_output:
1660 return &bpf_perf_event_output_proto_raw_tp;
1661 case BPF_FUNC_get_stackid:
1662 return &bpf_get_stackid_proto_raw_tp;
c195651e
YS
1663 case BPF_FUNC_get_stack:
1664 return &bpf_get_stack_proto_raw_tp;
c4f6699d 1665 default:
fc611f47 1666 return bpf_tracing_func_proto(func_id, prog);
c4f6699d
AS
1667 }
1668}
1669
958a3f2d 1670const struct bpf_func_proto *
f1b9509c
AS
1671tracing_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1672{
1673 switch (func_id) {
1674#ifdef CONFIG_NET
1675 case BPF_FUNC_skb_output:
1676 return &bpf_skb_output_proto;
d831ee84
EC
1677 case BPF_FUNC_xdp_output:
1678 return &bpf_xdp_output_proto;
af7ec138
YS
1679 case BPF_FUNC_skc_to_tcp6_sock:
1680 return &bpf_skc_to_tcp6_sock_proto;
478cfbdf
YS
1681 case BPF_FUNC_skc_to_tcp_sock:
1682 return &bpf_skc_to_tcp_sock_proto;
1683 case BPF_FUNC_skc_to_tcp_timewait_sock:
1684 return &bpf_skc_to_tcp_timewait_sock_proto;
1685 case BPF_FUNC_skc_to_tcp_request_sock:
1686 return &bpf_skc_to_tcp_request_sock_proto;
0d4fad3e
YS
1687 case BPF_FUNC_skc_to_udp6_sock:
1688 return &bpf_skc_to_udp6_sock_proto;
f1b9509c 1689#endif
492e639f
YS
1690 case BPF_FUNC_seq_printf:
1691 return prog->expected_attach_type == BPF_TRACE_ITER ?
1692 &bpf_seq_printf_proto :
1693 NULL;
1694 case BPF_FUNC_seq_write:
1695 return prog->expected_attach_type == BPF_TRACE_ITER ?
1696 &bpf_seq_write_proto :
1697 NULL;
6e22ab9d
JO
1698 case BPF_FUNC_d_path:
1699 return &bpf_d_path_proto;
f1b9509c
AS
1700 default:
1701 return raw_tp_prog_func_proto(func_id, prog);
1702 }
1703}
1704
c4f6699d
AS
1705static bool raw_tp_prog_is_valid_access(int off, int size,
1706 enum bpf_access_type type,
5e43f899 1707 const struct bpf_prog *prog,
c4f6699d
AS
1708 struct bpf_insn_access_aux *info)
1709{
f1b9509c
AS
1710 if (off < 0 || off >= sizeof(__u64) * MAX_BPF_FUNC_ARGS)
1711 return false;
1712 if (type != BPF_READ)
1713 return false;
1714 if (off % size != 0)
1715 return false;
1716 return true;
1717}
1718
1719static bool tracing_prog_is_valid_access(int off, int size,
1720 enum bpf_access_type type,
1721 const struct bpf_prog *prog,
1722 struct bpf_insn_access_aux *info)
1723{
1724 if (off < 0 || off >= sizeof(__u64) * MAX_BPF_FUNC_ARGS)
c4f6699d
AS
1725 return false;
1726 if (type != BPF_READ)
1727 return false;
1728 if (off % size != 0)
1729 return false;
9e15db66 1730 return btf_ctx_access(off, size, type, prog, info);
c4f6699d
AS
1731}
1732
3e7c67d9
KS
1733int __weak bpf_prog_test_run_tracing(struct bpf_prog *prog,
1734 const union bpf_attr *kattr,
1735 union bpf_attr __user *uattr)
1736{
1737 return -ENOTSUPP;
1738}
1739
c4f6699d
AS
1740const struct bpf_verifier_ops raw_tracepoint_verifier_ops = {
1741 .get_func_proto = raw_tp_prog_func_proto,
1742 .is_valid_access = raw_tp_prog_is_valid_access,
1743};
1744
1745const struct bpf_prog_ops raw_tracepoint_prog_ops = {
1b4d60ec 1746 .test_run = bpf_prog_test_run_raw_tp,
c4f6699d
AS
1747};
1748
f1b9509c
AS
1749const struct bpf_verifier_ops tracing_verifier_ops = {
1750 .get_func_proto = tracing_prog_func_proto,
1751 .is_valid_access = tracing_prog_is_valid_access,
1752};
1753
1754const struct bpf_prog_ops tracing_prog_ops = {
da00d2f1 1755 .test_run = bpf_prog_test_run_tracing,
f1b9509c
AS
1756};
1757
9df1c28b
MM
1758static bool raw_tp_writable_prog_is_valid_access(int off, int size,
1759 enum bpf_access_type type,
1760 const struct bpf_prog *prog,
1761 struct bpf_insn_access_aux *info)
1762{
1763 if (off == 0) {
1764 if (size != sizeof(u64) || type != BPF_READ)
1765 return false;
1766 info->reg_type = PTR_TO_TP_BUFFER;
1767 }
1768 return raw_tp_prog_is_valid_access(off, size, type, prog, info);
1769}
1770
1771const struct bpf_verifier_ops raw_tracepoint_writable_verifier_ops = {
1772 .get_func_proto = raw_tp_prog_func_proto,
1773 .is_valid_access = raw_tp_writable_prog_is_valid_access,
1774};
1775
1776const struct bpf_prog_ops raw_tracepoint_writable_prog_ops = {
1777};
1778
0515e599 1779static bool pe_prog_is_valid_access(int off, int size, enum bpf_access_type type,
5e43f899 1780 const struct bpf_prog *prog,
23994631 1781 struct bpf_insn_access_aux *info)
0515e599 1782{
95da0cdb 1783 const int size_u64 = sizeof(u64);
31fd8581 1784
0515e599
AS
1785 if (off < 0 || off >= sizeof(struct bpf_perf_event_data))
1786 return false;
1787 if (type != BPF_READ)
1788 return false;
bc23105c
DB
1789 if (off % size != 0) {
1790 if (sizeof(unsigned long) != 4)
1791 return false;
1792 if (size != 8)
1793 return false;
1794 if (off % size != 4)
1795 return false;
1796 }
31fd8581 1797
f96da094
DB
1798 switch (off) {
1799 case bpf_ctx_range(struct bpf_perf_event_data, sample_period):
95da0cdb
TQ
1800 bpf_ctx_record_field_size(info, size_u64);
1801 if (!bpf_ctx_narrow_access_ok(off, size, size_u64))
1802 return false;
1803 break;
1804 case bpf_ctx_range(struct bpf_perf_event_data, addr):
1805 bpf_ctx_record_field_size(info, size_u64);
1806 if (!bpf_ctx_narrow_access_ok(off, size, size_u64))
23994631 1807 return false;
f96da094
DB
1808 break;
1809 default:
0515e599
AS
1810 if (size != sizeof(long))
1811 return false;
1812 }
f96da094 1813
0515e599
AS
1814 return true;
1815}
1816
6b8cc1d1
DB
1817static u32 pe_prog_convert_ctx_access(enum bpf_access_type type,
1818 const struct bpf_insn *si,
0515e599 1819 struct bpf_insn *insn_buf,
f96da094 1820 struct bpf_prog *prog, u32 *target_size)
0515e599
AS
1821{
1822 struct bpf_insn *insn = insn_buf;
1823
6b8cc1d1 1824 switch (si->off) {
0515e599 1825 case offsetof(struct bpf_perf_event_data, sample_period):
f035a515 1826 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern,
6b8cc1d1 1827 data), si->dst_reg, si->src_reg,
0515e599 1828 offsetof(struct bpf_perf_event_data_kern, data));
6b8cc1d1 1829 *insn++ = BPF_LDX_MEM(BPF_DW, si->dst_reg, si->dst_reg,
f96da094
DB
1830 bpf_target_off(struct perf_sample_data, period, 8,
1831 target_size));
0515e599 1832 break;
95da0cdb
TQ
1833 case offsetof(struct bpf_perf_event_data, addr):
1834 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern,
1835 data), si->dst_reg, si->src_reg,
1836 offsetof(struct bpf_perf_event_data_kern, data));
1837 *insn++ = BPF_LDX_MEM(BPF_DW, si->dst_reg, si->dst_reg,
1838 bpf_target_off(struct perf_sample_data, addr, 8,
1839 target_size));
1840 break;
0515e599 1841 default:
f035a515 1842 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern,
6b8cc1d1 1843 regs), si->dst_reg, si->src_reg,
0515e599 1844 offsetof(struct bpf_perf_event_data_kern, regs));
6b8cc1d1
DB
1845 *insn++ = BPF_LDX_MEM(BPF_SIZEOF(long), si->dst_reg, si->dst_reg,
1846 si->off);
0515e599
AS
1847 break;
1848 }
1849
1850 return insn - insn_buf;
1851}
1852
7de16e3a 1853const struct bpf_verifier_ops perf_event_verifier_ops = {
f005afed 1854 .get_func_proto = pe_prog_func_proto,
0515e599
AS
1855 .is_valid_access = pe_prog_is_valid_access,
1856 .convert_ctx_access = pe_prog_convert_ctx_access,
1857};
7de16e3a
JK
1858
1859const struct bpf_prog_ops perf_event_prog_ops = {
1860};
e87c6bc3
YS
1861
1862static DEFINE_MUTEX(bpf_event_mutex);
1863
c8c088ba
YS
1864#define BPF_TRACE_MAX_PROGS 64
1865
e87c6bc3
YS
1866int perf_event_attach_bpf_prog(struct perf_event *event,
1867 struct bpf_prog *prog)
1868{
e672db03 1869 struct bpf_prog_array *old_array;
e87c6bc3
YS
1870 struct bpf_prog_array *new_array;
1871 int ret = -EEXIST;
1872
9802d865 1873 /*
b4da3340
MH
1874 * Kprobe override only works if they are on the function entry,
1875 * and only if they are on the opt-in list.
9802d865
JB
1876 */
1877 if (prog->kprobe_override &&
b4da3340 1878 (!trace_kprobe_on_func_entry(event->tp_event) ||
9802d865
JB
1879 !trace_kprobe_error_injectable(event->tp_event)))
1880 return -EINVAL;
1881
e87c6bc3
YS
1882 mutex_lock(&bpf_event_mutex);
1883
1884 if (event->prog)
07c41a29 1885 goto unlock;
e87c6bc3 1886
e672db03 1887 old_array = bpf_event_rcu_dereference(event->tp_event->prog_array);
c8c088ba
YS
1888 if (old_array &&
1889 bpf_prog_array_length(old_array) >= BPF_TRACE_MAX_PROGS) {
1890 ret = -E2BIG;
1891 goto unlock;
1892 }
1893
e87c6bc3
YS
1894 ret = bpf_prog_array_copy(old_array, NULL, prog, &new_array);
1895 if (ret < 0)
07c41a29 1896 goto unlock;
e87c6bc3
YS
1897
1898 /* set the new array to event->tp_event and set event->prog */
1899 event->prog = prog;
1900 rcu_assign_pointer(event->tp_event->prog_array, new_array);
1901 bpf_prog_array_free(old_array);
1902
07c41a29 1903unlock:
e87c6bc3
YS
1904 mutex_unlock(&bpf_event_mutex);
1905 return ret;
1906}
1907
1908void perf_event_detach_bpf_prog(struct perf_event *event)
1909{
e672db03 1910 struct bpf_prog_array *old_array;
e87c6bc3
YS
1911 struct bpf_prog_array *new_array;
1912 int ret;
1913
1914 mutex_lock(&bpf_event_mutex);
1915
1916 if (!event->prog)
07c41a29 1917 goto unlock;
e87c6bc3 1918
e672db03 1919 old_array = bpf_event_rcu_dereference(event->tp_event->prog_array);
e87c6bc3 1920 ret = bpf_prog_array_copy(old_array, event->prog, NULL, &new_array);
170a7e3e
SY
1921 if (ret == -ENOENT)
1922 goto unlock;
e87c6bc3
YS
1923 if (ret < 0) {
1924 bpf_prog_array_delete_safe(old_array, event->prog);
1925 } else {
1926 rcu_assign_pointer(event->tp_event->prog_array, new_array);
1927 bpf_prog_array_free(old_array);
1928 }
1929
1930 bpf_prog_put(event->prog);
1931 event->prog = NULL;
1932
07c41a29 1933unlock:
e87c6bc3
YS
1934 mutex_unlock(&bpf_event_mutex);
1935}
f371b304 1936
f4e2298e 1937int perf_event_query_prog_array(struct perf_event *event, void __user *info)
f371b304
YS
1938{
1939 struct perf_event_query_bpf __user *uquery = info;
1940 struct perf_event_query_bpf query = {};
e672db03 1941 struct bpf_prog_array *progs;
3a38bb98 1942 u32 *ids, prog_cnt, ids_len;
f371b304
YS
1943 int ret;
1944
031258da 1945 if (!perfmon_capable())
f371b304
YS
1946 return -EPERM;
1947 if (event->attr.type != PERF_TYPE_TRACEPOINT)
1948 return -EINVAL;
1949 if (copy_from_user(&query, uquery, sizeof(query)))
1950 return -EFAULT;
3a38bb98
YS
1951
1952 ids_len = query.ids_len;
1953 if (ids_len > BPF_TRACE_MAX_PROGS)
9c481b90 1954 return -E2BIG;
3a38bb98
YS
1955 ids = kcalloc(ids_len, sizeof(u32), GFP_USER | __GFP_NOWARN);
1956 if (!ids)
1957 return -ENOMEM;
1958 /*
1959 * The above kcalloc returns ZERO_SIZE_PTR when ids_len = 0, which
1960 * is required when user only wants to check for uquery->prog_cnt.
1961 * There is no need to check for it since the case is handled
1962 * gracefully in bpf_prog_array_copy_info.
1963 */
f371b304
YS
1964
1965 mutex_lock(&bpf_event_mutex);
e672db03
SF
1966 progs = bpf_event_rcu_dereference(event->tp_event->prog_array);
1967 ret = bpf_prog_array_copy_info(progs, ids, ids_len, &prog_cnt);
f371b304
YS
1968 mutex_unlock(&bpf_event_mutex);
1969
3a38bb98
YS
1970 if (copy_to_user(&uquery->prog_cnt, &prog_cnt, sizeof(prog_cnt)) ||
1971 copy_to_user(uquery->ids, ids, ids_len * sizeof(u32)))
1972 ret = -EFAULT;
1973
1974 kfree(ids);
f371b304
YS
1975 return ret;
1976}
c4f6699d
AS
1977
1978extern struct bpf_raw_event_map __start__bpf_raw_tp[];
1979extern struct bpf_raw_event_map __stop__bpf_raw_tp[];
1980
a38d1107 1981struct bpf_raw_event_map *bpf_get_raw_tracepoint(const char *name)
c4f6699d
AS
1982{
1983 struct bpf_raw_event_map *btp = __start__bpf_raw_tp;
1984
1985 for (; btp < __stop__bpf_raw_tp; btp++) {
1986 if (!strcmp(btp->tp->name, name))
1987 return btp;
1988 }
a38d1107
MM
1989
1990 return bpf_get_raw_tracepoint_module(name);
1991}
1992
1993void bpf_put_raw_tracepoint(struct bpf_raw_event_map *btp)
1994{
1995 struct module *mod = __module_address((unsigned long)btp);
1996
1997 if (mod)
1998 module_put(mod);
c4f6699d
AS
1999}
2000
2001static __always_inline
2002void __bpf_trace_run(struct bpf_prog *prog, u64 *args)
2003{
f03efe49 2004 cant_sleep();
c4f6699d 2005 rcu_read_lock();
c4f6699d 2006 (void) BPF_PROG_RUN(prog, args);
c4f6699d
AS
2007 rcu_read_unlock();
2008}
2009
2010#define UNPACK(...) __VA_ARGS__
2011#define REPEAT_1(FN, DL, X, ...) FN(X)
2012#define REPEAT_2(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_1(FN, DL, __VA_ARGS__)
2013#define REPEAT_3(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_2(FN, DL, __VA_ARGS__)
2014#define REPEAT_4(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_3(FN, DL, __VA_ARGS__)
2015#define REPEAT_5(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_4(FN, DL, __VA_ARGS__)
2016#define REPEAT_6(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_5(FN, DL, __VA_ARGS__)
2017#define REPEAT_7(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_6(FN, DL, __VA_ARGS__)
2018#define REPEAT_8(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_7(FN, DL, __VA_ARGS__)
2019#define REPEAT_9(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_8(FN, DL, __VA_ARGS__)
2020#define REPEAT_10(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_9(FN, DL, __VA_ARGS__)
2021#define REPEAT_11(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_10(FN, DL, __VA_ARGS__)
2022#define REPEAT_12(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_11(FN, DL, __VA_ARGS__)
2023#define REPEAT(X, FN, DL, ...) REPEAT_##X(FN, DL, __VA_ARGS__)
2024
2025#define SARG(X) u64 arg##X
2026#define COPY(X) args[X] = arg##X
2027
2028#define __DL_COM (,)
2029#define __DL_SEM (;)
2030
2031#define __SEQ_0_11 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11
2032
2033#define BPF_TRACE_DEFN_x(x) \
2034 void bpf_trace_run##x(struct bpf_prog *prog, \
2035 REPEAT(x, SARG, __DL_COM, __SEQ_0_11)) \
2036 { \
2037 u64 args[x]; \
2038 REPEAT(x, COPY, __DL_SEM, __SEQ_0_11); \
2039 __bpf_trace_run(prog, args); \
2040 } \
2041 EXPORT_SYMBOL_GPL(bpf_trace_run##x)
2042BPF_TRACE_DEFN_x(1);
2043BPF_TRACE_DEFN_x(2);
2044BPF_TRACE_DEFN_x(3);
2045BPF_TRACE_DEFN_x(4);
2046BPF_TRACE_DEFN_x(5);
2047BPF_TRACE_DEFN_x(6);
2048BPF_TRACE_DEFN_x(7);
2049BPF_TRACE_DEFN_x(8);
2050BPF_TRACE_DEFN_x(9);
2051BPF_TRACE_DEFN_x(10);
2052BPF_TRACE_DEFN_x(11);
2053BPF_TRACE_DEFN_x(12);
2054
2055static int __bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog)
2056{
2057 struct tracepoint *tp = btp->tp;
2058
2059 /*
2060 * check that program doesn't access arguments beyond what's
2061 * available in this tracepoint
2062 */
2063 if (prog->aux->max_ctx_offset > btp->num_args * sizeof(u64))
2064 return -EINVAL;
2065
9df1c28b
MM
2066 if (prog->aux->max_tp_access > btp->writable_size)
2067 return -EINVAL;
2068
c4f6699d
AS
2069 return tracepoint_probe_register(tp, (void *)btp->bpf_func, prog);
2070}
2071
2072int bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog)
2073{
e16ec340 2074 return __bpf_probe_register(btp, prog);
c4f6699d
AS
2075}
2076
2077int bpf_probe_unregister(struct bpf_raw_event_map *btp, struct bpf_prog *prog)
2078{
e16ec340 2079 return tracepoint_probe_unregister(btp->tp, (void *)btp->bpf_func, prog);
c4f6699d 2080}
41bdc4b4
YS
2081
2082int bpf_get_perf_event_info(const struct perf_event *event, u32 *prog_id,
2083 u32 *fd_type, const char **buf,
2084 u64 *probe_offset, u64 *probe_addr)
2085{
2086 bool is_tracepoint, is_syscall_tp;
2087 struct bpf_prog *prog;
2088 int flags, err = 0;
2089
2090 prog = event->prog;
2091 if (!prog)
2092 return -ENOENT;
2093
2094 /* not supporting BPF_PROG_TYPE_PERF_EVENT yet */
2095 if (prog->type == BPF_PROG_TYPE_PERF_EVENT)
2096 return -EOPNOTSUPP;
2097
2098 *prog_id = prog->aux->id;
2099 flags = event->tp_event->flags;
2100 is_tracepoint = flags & TRACE_EVENT_FL_TRACEPOINT;
2101 is_syscall_tp = is_syscall_trace_event(event->tp_event);
2102
2103 if (is_tracepoint || is_syscall_tp) {
2104 *buf = is_tracepoint ? event->tp_event->tp->name
2105 : event->tp_event->name;
2106 *fd_type = BPF_FD_TYPE_TRACEPOINT;
2107 *probe_offset = 0x0;
2108 *probe_addr = 0x0;
2109 } else {
2110 /* kprobe/uprobe */
2111 err = -EOPNOTSUPP;
2112#ifdef CONFIG_KPROBE_EVENTS
2113 if (flags & TRACE_EVENT_FL_KPROBE)
2114 err = bpf_get_kprobe_info(event, fd_type, buf,
2115 probe_offset, probe_addr,
2116 event->attr.type == PERF_TYPE_TRACEPOINT);
2117#endif
2118#ifdef CONFIG_UPROBE_EVENTS
2119 if (flags & TRACE_EVENT_FL_UPROBE)
2120 err = bpf_get_uprobe_info(event, fd_type, buf,
2121 probe_offset,
2122 event->attr.type == PERF_TYPE_TRACEPOINT);
2123#endif
2124 }
2125
2126 return err;
2127}
a38d1107 2128
9db1ff0a
YS
2129static int __init send_signal_irq_work_init(void)
2130{
2131 int cpu;
2132 struct send_signal_irq_work *work;
2133
2134 for_each_possible_cpu(cpu) {
2135 work = per_cpu_ptr(&send_signal_work, cpu);
2136 init_irq_work(&work->irq_work, do_bpf_send_signal);
2137 }
2138 return 0;
2139}
2140
2141subsys_initcall(send_signal_irq_work_init);
2142
a38d1107 2143#ifdef CONFIG_MODULES
390e99cf
SF
2144static int bpf_event_notify(struct notifier_block *nb, unsigned long op,
2145 void *module)
a38d1107
MM
2146{
2147 struct bpf_trace_module *btm, *tmp;
2148 struct module *mod = module;
2149
2150 if (mod->num_bpf_raw_events == 0 ||
2151 (op != MODULE_STATE_COMING && op != MODULE_STATE_GOING))
2152 return 0;
2153
2154 mutex_lock(&bpf_module_mutex);
2155
2156 switch (op) {
2157 case MODULE_STATE_COMING:
2158 btm = kzalloc(sizeof(*btm), GFP_KERNEL);
2159 if (btm) {
2160 btm->module = module;
2161 list_add(&btm->list, &bpf_trace_modules);
2162 }
2163 break;
2164 case MODULE_STATE_GOING:
2165 list_for_each_entry_safe(btm, tmp, &bpf_trace_modules, list) {
2166 if (btm->module == module) {
2167 list_del(&btm->list);
2168 kfree(btm);
2169 break;
2170 }
2171 }
2172 break;
2173 }
2174
2175 mutex_unlock(&bpf_module_mutex);
2176
2177 return 0;
2178}
2179
2180static struct notifier_block bpf_module_nb = {
2181 .notifier_call = bpf_event_notify,
2182};
2183
390e99cf 2184static int __init bpf_event_init(void)
a38d1107
MM
2185{
2186 register_module_notifier(&bpf_module_nb);
2187 return 0;
2188}
2189
2190fs_initcall(bpf_event_init);
2191#endif /* CONFIG_MODULES */