tools/bpftool: Remove warning about PID iterator support
[linux-2.6-block.git] / kernel / trace / bpf_trace.c
CommitLineData
179a0cc4 1// SPDX-License-Identifier: GPL-2.0
2541517c 2/* Copyright (c) 2011-2015 PLUMgrid, http://plumgrid.com
0515e599 3 * Copyright (c) 2016 Facebook
2541517c
AS
4 */
5#include <linux/kernel.h>
6#include <linux/types.h>
7#include <linux/slab.h>
8#include <linux/bpf.h>
0515e599 9#include <linux/bpf_perf_event.h>
2541517c
AS
10#include <linux/filter.h>
11#include <linux/uaccess.h>
9c959c86 12#include <linux/ctype.h>
9802d865 13#include <linux/kprobes.h>
41bdc4b4 14#include <linux/syscalls.h>
540adea3 15#include <linux/error-injection.h>
c9a0f3b8 16#include <linux/btf_ids.h>
9802d865 17
c7b6f29b
NA
18#include <asm/tlb.h>
19
9802d865 20#include "trace_probe.h"
2541517c
AS
21#include "trace.h"
22
e672db03
SF
23#define bpf_event_rcu_dereference(p) \
24 rcu_dereference_protected(p, lockdep_is_held(&bpf_event_mutex))
25
a38d1107
MM
26#ifdef CONFIG_MODULES
27struct bpf_trace_module {
28 struct module *module;
29 struct list_head list;
30};
31
32static LIST_HEAD(bpf_trace_modules);
33static DEFINE_MUTEX(bpf_module_mutex);
34
35static struct bpf_raw_event_map *bpf_get_raw_tracepoint_module(const char *name)
36{
37 struct bpf_raw_event_map *btp, *ret = NULL;
38 struct bpf_trace_module *btm;
39 unsigned int i;
40
41 mutex_lock(&bpf_module_mutex);
42 list_for_each_entry(btm, &bpf_trace_modules, list) {
43 for (i = 0; i < btm->module->num_bpf_raw_events; ++i) {
44 btp = &btm->module->bpf_raw_events[i];
45 if (!strcmp(btp->tp->name, name)) {
46 if (try_module_get(btm->module))
47 ret = btp;
48 goto out;
49 }
50 }
51 }
52out:
53 mutex_unlock(&bpf_module_mutex);
54 return ret;
55}
56#else
57static struct bpf_raw_event_map *bpf_get_raw_tracepoint_module(const char *name)
58{
59 return NULL;
60}
61#endif /* CONFIG_MODULES */
62
035226b9 63u64 bpf_get_stackid(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
c195651e 64u64 bpf_get_stack(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
035226b9 65
2541517c
AS
66/**
67 * trace_call_bpf - invoke BPF program
e87c6bc3 68 * @call: tracepoint event
2541517c
AS
69 * @ctx: opaque context pointer
70 *
71 * kprobe handlers execute BPF programs via this helper.
72 * Can be used from static tracepoints in the future.
73 *
74 * Return: BPF programs always return an integer which is interpreted by
75 * kprobe handler as:
76 * 0 - return from kprobe (event is filtered out)
77 * 1 - store kprobe event into ring buffer
78 * Other values are reserved and currently alias to 1
79 */
e87c6bc3 80unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx)
2541517c
AS
81{
82 unsigned int ret;
83
84 if (in_nmi()) /* not supported yet */
85 return 1;
86
b0a81b94 87 cant_sleep();
2541517c
AS
88
89 if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1)) {
90 /*
91 * since some bpf program is already running on this cpu,
92 * don't call into another bpf program (same or different)
93 * and don't send kprobe event into ring-buffer,
94 * so return zero here
95 */
96 ret = 0;
97 goto out;
98 }
99
e87c6bc3
YS
100 /*
101 * Instead of moving rcu_read_lock/rcu_dereference/rcu_read_unlock
102 * to all call sites, we did a bpf_prog_array_valid() there to check
103 * whether call->prog_array is empty or not, which is
104 * a heurisitc to speed up execution.
105 *
106 * If bpf_prog_array_valid() fetched prog_array was
107 * non-NULL, we go into trace_call_bpf() and do the actual
108 * proper rcu_dereference() under RCU lock.
109 * If it turns out that prog_array is NULL then, we bail out.
110 * For the opposite, if the bpf_prog_array_valid() fetched pointer
111 * was NULL, you'll skip the prog_array with the risk of missing
112 * out of events when it was updated in between this and the
113 * rcu_dereference() which is accepted risk.
114 */
115 ret = BPF_PROG_RUN_ARRAY_CHECK(call->prog_array, ctx, BPF_PROG_RUN);
2541517c
AS
116
117 out:
118 __this_cpu_dec(bpf_prog_active);
2541517c
AS
119
120 return ret;
121}
2541517c 122
9802d865
JB
123#ifdef CONFIG_BPF_KPROBE_OVERRIDE
124BPF_CALL_2(bpf_override_return, struct pt_regs *, regs, unsigned long, rc)
125{
9802d865 126 regs_set_return_value(regs, rc);
540adea3 127 override_function_with_return(regs);
9802d865
JB
128 return 0;
129}
130
131static const struct bpf_func_proto bpf_override_return_proto = {
132 .func = bpf_override_return,
133 .gpl_only = true,
134 .ret_type = RET_INTEGER,
135 .arg1_type = ARG_PTR_TO_CTX,
136 .arg2_type = ARG_ANYTHING,
137};
138#endif
139
8d92db5c
CH
140static __always_inline int
141bpf_probe_read_user_common(void *dst, u32 size, const void __user *unsafe_ptr)
2541517c 142{
8d92db5c 143 int ret;
2541517c 144
c0ee37e8 145 ret = copy_from_user_nofault(dst, unsafe_ptr, size);
6ae08ae3
DB
146 if (unlikely(ret < 0))
147 memset(dst, 0, size);
6ae08ae3
DB
148 return ret;
149}
150
8d92db5c
CH
151BPF_CALL_3(bpf_probe_read_user, void *, dst, u32, size,
152 const void __user *, unsafe_ptr)
153{
154 return bpf_probe_read_user_common(dst, size, unsafe_ptr);
155}
156
f470378c 157const struct bpf_func_proto bpf_probe_read_user_proto = {
6ae08ae3
DB
158 .func = bpf_probe_read_user,
159 .gpl_only = true,
160 .ret_type = RET_INTEGER,
161 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
162 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
163 .arg3_type = ARG_ANYTHING,
164};
165
8d92db5c
CH
166static __always_inline int
167bpf_probe_read_user_str_common(void *dst, u32 size,
168 const void __user *unsafe_ptr)
6ae08ae3 169{
8d92db5c 170 int ret;
6ae08ae3 171
8d92db5c 172 ret = strncpy_from_user_nofault(dst, unsafe_ptr, size);
6ae08ae3
DB
173 if (unlikely(ret < 0))
174 memset(dst, 0, size);
6ae08ae3
DB
175 return ret;
176}
177
8d92db5c
CH
178BPF_CALL_3(bpf_probe_read_user_str, void *, dst, u32, size,
179 const void __user *, unsafe_ptr)
180{
181 return bpf_probe_read_user_str_common(dst, size, unsafe_ptr);
182}
183
f470378c 184const struct bpf_func_proto bpf_probe_read_user_str_proto = {
6ae08ae3
DB
185 .func = bpf_probe_read_user_str,
186 .gpl_only = true,
187 .ret_type = RET_INTEGER,
188 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
189 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
190 .arg3_type = ARG_ANYTHING,
191};
192
193static __always_inline int
8d92db5c 194bpf_probe_read_kernel_common(void *dst, u32 size, const void *unsafe_ptr)
6ae08ae3
DB
195{
196 int ret = security_locked_down(LOCKDOWN_BPF_READ);
9d1f8be5 197
6ae08ae3 198 if (unlikely(ret < 0))
8d92db5c 199 goto fail;
fe557319 200 ret = copy_from_kernel_nofault(dst, unsafe_ptr, size);
074f528e 201 if (unlikely(ret < 0))
8d92db5c
CH
202 goto fail;
203 return ret;
204fail:
205 memset(dst, 0, size);
6ae08ae3
DB
206 return ret;
207}
074f528e 208
6ae08ae3
DB
209BPF_CALL_3(bpf_probe_read_kernel, void *, dst, u32, size,
210 const void *, unsafe_ptr)
211{
8d92db5c 212 return bpf_probe_read_kernel_common(dst, size, unsafe_ptr);
6ae08ae3
DB
213}
214
f470378c 215const struct bpf_func_proto bpf_probe_read_kernel_proto = {
6ae08ae3
DB
216 .func = bpf_probe_read_kernel,
217 .gpl_only = true,
218 .ret_type = RET_INTEGER,
219 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
220 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
221 .arg3_type = ARG_ANYTHING,
222};
223
6ae08ae3 224static __always_inline int
8d92db5c 225bpf_probe_read_kernel_str_common(void *dst, u32 size, const void *unsafe_ptr)
6ae08ae3
DB
226{
227 int ret = security_locked_down(LOCKDOWN_BPF_READ);
228
229 if (unlikely(ret < 0))
8d92db5c
CH
230 goto fail;
231
6ae08ae3 232 /*
8d92db5c
CH
233 * The strncpy_from_kernel_nofault() call will likely not fill the
234 * entire buffer, but that's okay in this circumstance as we're probing
6ae08ae3
DB
235 * arbitrary memory anyway similar to bpf_probe_read_*() and might
236 * as well probe the stack. Thus, memory is explicitly cleared
237 * only in error case, so that improper users ignoring return
238 * code altogether don't copy garbage; otherwise length of string
239 * is returned that can be used for bpf_perf_event_output() et al.
240 */
8d92db5c 241 ret = strncpy_from_kernel_nofault(dst, unsafe_ptr, size);
6ae08ae3 242 if (unlikely(ret < 0))
8d92db5c
CH
243 goto fail;
244
02553b91 245 return ret;
8d92db5c
CH
246fail:
247 memset(dst, 0, size);
074f528e 248 return ret;
2541517c
AS
249}
250
6ae08ae3
DB
251BPF_CALL_3(bpf_probe_read_kernel_str, void *, dst, u32, size,
252 const void *, unsafe_ptr)
253{
8d92db5c 254 return bpf_probe_read_kernel_str_common(dst, size, unsafe_ptr);
6ae08ae3
DB
255}
256
f470378c 257const struct bpf_func_proto bpf_probe_read_kernel_str_proto = {
6ae08ae3
DB
258 .func = bpf_probe_read_kernel_str,
259 .gpl_only = true,
260 .ret_type = RET_INTEGER,
261 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
262 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
263 .arg3_type = ARG_ANYTHING,
264};
265
8d92db5c
CH
266#ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
267BPF_CALL_3(bpf_probe_read_compat, void *, dst, u32, size,
268 const void *, unsafe_ptr)
269{
270 if ((unsigned long)unsafe_ptr < TASK_SIZE) {
271 return bpf_probe_read_user_common(dst, size,
272 (__force void __user *)unsafe_ptr);
273 }
274 return bpf_probe_read_kernel_common(dst, size, unsafe_ptr);
275}
276
277static const struct bpf_func_proto bpf_probe_read_compat_proto = {
278 .func = bpf_probe_read_compat,
279 .gpl_only = true,
280 .ret_type = RET_INTEGER,
281 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
282 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
283 .arg3_type = ARG_ANYTHING,
284};
285
6ae08ae3
DB
286BPF_CALL_3(bpf_probe_read_compat_str, void *, dst, u32, size,
287 const void *, unsafe_ptr)
288{
8d92db5c
CH
289 if ((unsigned long)unsafe_ptr < TASK_SIZE) {
290 return bpf_probe_read_user_str_common(dst, size,
291 (__force void __user *)unsafe_ptr);
292 }
293 return bpf_probe_read_kernel_str_common(dst, size, unsafe_ptr);
6ae08ae3
DB
294}
295
296static const struct bpf_func_proto bpf_probe_read_compat_str_proto = {
297 .func = bpf_probe_read_compat_str,
2541517c
AS
298 .gpl_only = true,
299 .ret_type = RET_INTEGER,
39f19ebb 300 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
9c019e2b 301 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
2541517c
AS
302 .arg3_type = ARG_ANYTHING,
303};
8d92db5c 304#endif /* CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE */
2541517c 305
eb1b6688 306BPF_CALL_3(bpf_probe_write_user, void __user *, unsafe_ptr, const void *, src,
f3694e00 307 u32, size)
96ae5227 308{
96ae5227
SD
309 /*
310 * Ensure we're in user context which is safe for the helper to
311 * run. This helper has no business in a kthread.
312 *
313 * access_ok() should prevent writing to non-user memory, but in
314 * some situations (nommu, temporary switch, etc) access_ok() does
315 * not provide enough validation, hence the check on KERNEL_DS.
c7b6f29b
NA
316 *
317 * nmi_uaccess_okay() ensures the probe is not run in an interim
318 * state, when the task or mm are switched. This is specifically
319 * required to prevent the use of temporary mm.
96ae5227
SD
320 */
321
322 if (unlikely(in_interrupt() ||
323 current->flags & (PF_KTHREAD | PF_EXITING)))
324 return -EPERM;
db68ce10 325 if (unlikely(uaccess_kernel()))
96ae5227 326 return -EPERM;
c7b6f29b
NA
327 if (unlikely(!nmi_uaccess_okay()))
328 return -EPERM;
96ae5227 329
c0ee37e8 330 return copy_to_user_nofault(unsafe_ptr, src, size);
96ae5227
SD
331}
332
333static const struct bpf_func_proto bpf_probe_write_user_proto = {
334 .func = bpf_probe_write_user,
335 .gpl_only = true,
336 .ret_type = RET_INTEGER,
337 .arg1_type = ARG_ANYTHING,
39f19ebb
AS
338 .arg2_type = ARG_PTR_TO_MEM,
339 .arg3_type = ARG_CONST_SIZE,
96ae5227
SD
340};
341
342static const struct bpf_func_proto *bpf_get_probe_write_proto(void)
343{
2c78ee89
AS
344 if (!capable(CAP_SYS_ADMIN))
345 return NULL;
346
96ae5227
SD
347 pr_warn_ratelimited("%s[%d] is installing a program with bpf_probe_write_user helper that may corrupt user memory!",
348 current->comm, task_pid_nr(current));
349
350 return &bpf_probe_write_user_proto;
351}
352
d7b2977b
CH
353static void bpf_trace_copy_string(char *buf, void *unsafe_ptr, char fmt_ptype,
354 size_t bufsz)
355{
356 void __user *user_ptr = (__force void __user *)unsafe_ptr;
357
358 buf[0] = 0;
359
360 switch (fmt_ptype) {
361 case 's':
362#ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
aec6ce59
CH
363 if ((unsigned long)unsafe_ptr < TASK_SIZE) {
364 strncpy_from_user_nofault(buf, user_ptr, bufsz);
365 break;
366 }
367 fallthrough;
d7b2977b
CH
368#endif
369 case 'k':
370 strncpy_from_kernel_nofault(buf, unsafe_ptr, bufsz);
371 break;
372 case 'u':
373 strncpy_from_user_nofault(buf, user_ptr, bufsz);
374 break;
375 }
376}
377
9c959c86 378/*
7bda4b40 379 * Only limited trace_printk() conversion specifiers allowed:
2df6bb54 380 * %d %i %u %x %ld %li %lu %lx %lld %lli %llu %llx %p %pB %pks %pus %s
9c959c86 381 */
f3694e00
DB
382BPF_CALL_5(bpf_trace_printk, char *, fmt, u32, fmt_size, u64, arg1,
383 u64, arg2, u64, arg3)
9c959c86 384{
b2a5212f
DB
385 int i, mod[3] = {}, fmt_cnt = 0;
386 char buf[64], fmt_ptype;
387 void *unsafe_ptr = NULL;
8d3b7dce 388 bool str_seen = false;
9c959c86
AS
389
390 /*
391 * bpf_check()->check_func_arg()->check_stack_boundary()
392 * guarantees that fmt points to bpf program stack,
393 * fmt_size bytes of it were initialized and fmt_size > 0
394 */
395 if (fmt[--fmt_size] != 0)
396 return -EINVAL;
397
398 /* check format string for allowed specifiers */
399 for (i = 0; i < fmt_size; i++) {
400 if ((!isprint(fmt[i]) && !isspace(fmt[i])) || !isascii(fmt[i]))
401 return -EINVAL;
402
403 if (fmt[i] != '%')
404 continue;
405
406 if (fmt_cnt >= 3)
407 return -EINVAL;
408
409 /* fmt[i] != 0 && fmt[last] == 0, so we can access fmt[i + 1] */
410 i++;
411 if (fmt[i] == 'l') {
412 mod[fmt_cnt]++;
413 i++;
b2a5212f 414 } else if (fmt[i] == 'p') {
9c959c86 415 mod[fmt_cnt]++;
b2a5212f
DB
416 if ((fmt[i + 1] == 'k' ||
417 fmt[i + 1] == 'u') &&
418 fmt[i + 2] == 's') {
419 fmt_ptype = fmt[i + 1];
420 i += 2;
421 goto fmt_str;
422 }
423
2df6bb54
SL
424 if (fmt[i + 1] == 'B') {
425 i++;
426 goto fmt_next;
427 }
428
1efb6ee3
MP
429 /* disallow any further format extensions */
430 if (fmt[i + 1] != 0 &&
431 !isspace(fmt[i + 1]) &&
432 !ispunct(fmt[i + 1]))
9c959c86 433 return -EINVAL;
b2a5212f
DB
434
435 goto fmt_next;
436 } else if (fmt[i] == 's') {
437 mod[fmt_cnt]++;
438 fmt_ptype = fmt[i];
439fmt_str:
440 if (str_seen)
441 /* allow only one '%s' per fmt string */
442 return -EINVAL;
443 str_seen = true;
444
445 if (fmt[i + 1] != 0 &&
446 !isspace(fmt[i + 1]) &&
447 !ispunct(fmt[i + 1]))
448 return -EINVAL;
449
450 switch (fmt_cnt) {
451 case 0:
452 unsafe_ptr = (void *)(long)arg1;
453 arg1 = (long)buf;
454 break;
455 case 1:
456 unsafe_ptr = (void *)(long)arg2;
457 arg2 = (long)buf;
458 break;
459 case 2:
460 unsafe_ptr = (void *)(long)arg3;
461 arg3 = (long)buf;
462 break;
463 }
464
d7b2977b
CH
465 bpf_trace_copy_string(buf, unsafe_ptr, fmt_ptype,
466 sizeof(buf));
b2a5212f 467 goto fmt_next;
9c959c86
AS
468 }
469
470 if (fmt[i] == 'l') {
471 mod[fmt_cnt]++;
472 i++;
473 }
474
7bda4b40
JF
475 if (fmt[i] != 'i' && fmt[i] != 'd' &&
476 fmt[i] != 'u' && fmt[i] != 'x')
9c959c86 477 return -EINVAL;
b2a5212f 478fmt_next:
9c959c86
AS
479 fmt_cnt++;
480 }
481
88a5c690
DB
482/* Horrid workaround for getting va_list handling working with different
483 * argument type combinations generically for 32 and 64 bit archs.
484 */
485#define __BPF_TP_EMIT() __BPF_ARG3_TP()
486#define __BPF_TP(...) \
eefa864a 487 __trace_printk(0 /* Fake ip */, \
88a5c690
DB
488 fmt, ##__VA_ARGS__)
489
490#define __BPF_ARG1_TP(...) \
491 ((mod[0] == 2 || (mod[0] == 1 && __BITS_PER_LONG == 64)) \
492 ? __BPF_TP(arg1, ##__VA_ARGS__) \
493 : ((mod[0] == 1 || (mod[0] == 0 && __BITS_PER_LONG == 32)) \
494 ? __BPF_TP((long)arg1, ##__VA_ARGS__) \
495 : __BPF_TP((u32)arg1, ##__VA_ARGS__)))
496
497#define __BPF_ARG2_TP(...) \
498 ((mod[1] == 2 || (mod[1] == 1 && __BITS_PER_LONG == 64)) \
499 ? __BPF_ARG1_TP(arg2, ##__VA_ARGS__) \
500 : ((mod[1] == 1 || (mod[1] == 0 && __BITS_PER_LONG == 32)) \
501 ? __BPF_ARG1_TP((long)arg2, ##__VA_ARGS__) \
502 : __BPF_ARG1_TP((u32)arg2, ##__VA_ARGS__)))
503
504#define __BPF_ARG3_TP(...) \
505 ((mod[2] == 2 || (mod[2] == 1 && __BITS_PER_LONG == 64)) \
506 ? __BPF_ARG2_TP(arg3, ##__VA_ARGS__) \
507 : ((mod[2] == 1 || (mod[2] == 0 && __BITS_PER_LONG == 32)) \
508 ? __BPF_ARG2_TP((long)arg3, ##__VA_ARGS__) \
509 : __BPF_ARG2_TP((u32)arg3, ##__VA_ARGS__)))
510
511 return __BPF_TP_EMIT();
9c959c86
AS
512}
513
514static const struct bpf_func_proto bpf_trace_printk_proto = {
515 .func = bpf_trace_printk,
516 .gpl_only = true,
517 .ret_type = RET_INTEGER,
39f19ebb
AS
518 .arg1_type = ARG_PTR_TO_MEM,
519 .arg2_type = ARG_CONST_SIZE,
9c959c86
AS
520};
521
0756ea3e
AS
522const struct bpf_func_proto *bpf_get_trace_printk_proto(void)
523{
524 /*
525 * this program might be calling bpf_trace_printk,
526 * so allocate per-cpu printk buffers
527 */
528 trace_printk_init_buffers();
529
530 return &bpf_trace_printk_proto;
531}
532
492e639f
YS
533#define MAX_SEQ_PRINTF_VARARGS 12
534#define MAX_SEQ_PRINTF_MAX_MEMCPY 6
535#define MAX_SEQ_PRINTF_STR_LEN 128
536
537struct bpf_seq_printf_buf {
538 char buf[MAX_SEQ_PRINTF_MAX_MEMCPY][MAX_SEQ_PRINTF_STR_LEN];
539};
540static DEFINE_PER_CPU(struct bpf_seq_printf_buf, bpf_seq_printf_buf);
541static DEFINE_PER_CPU(int, bpf_seq_printf_buf_used);
542
543BPF_CALL_5(bpf_seq_printf, struct seq_file *, m, char *, fmt, u32, fmt_size,
544 const void *, data, u32, data_len)
545{
546 int err = -EINVAL, fmt_cnt = 0, memcpy_cnt = 0;
547 int i, buf_used, copy_size, num_args;
548 u64 params[MAX_SEQ_PRINTF_VARARGS];
549 struct bpf_seq_printf_buf *bufs;
550 const u64 *args = data;
551
552 buf_used = this_cpu_inc_return(bpf_seq_printf_buf_used);
553 if (WARN_ON_ONCE(buf_used > 1)) {
554 err = -EBUSY;
555 goto out;
556 }
557
558 bufs = this_cpu_ptr(&bpf_seq_printf_buf);
559
560 /*
561 * bpf_check()->check_func_arg()->check_stack_boundary()
562 * guarantees that fmt points to bpf program stack,
563 * fmt_size bytes of it were initialized and fmt_size > 0
564 */
565 if (fmt[--fmt_size] != 0)
566 goto out;
567
568 if (data_len & 7)
569 goto out;
570
571 for (i = 0; i < fmt_size; i++) {
572 if (fmt[i] == '%') {
573 if (fmt[i + 1] == '%')
574 i++;
575 else if (!data || !data_len)
576 goto out;
577 }
578 }
579
580 num_args = data_len / 8;
581
582 /* check format string for allowed specifiers */
583 for (i = 0; i < fmt_size; i++) {
584 /* only printable ascii for now. */
585 if ((!isprint(fmt[i]) && !isspace(fmt[i])) || !isascii(fmt[i])) {
586 err = -EINVAL;
587 goto out;
588 }
589
590 if (fmt[i] != '%')
591 continue;
592
593 if (fmt[i + 1] == '%') {
594 i++;
595 continue;
596 }
597
598 if (fmt_cnt >= MAX_SEQ_PRINTF_VARARGS) {
599 err = -E2BIG;
600 goto out;
601 }
602
603 if (fmt_cnt >= num_args) {
604 err = -EINVAL;
605 goto out;
606 }
607
608 /* fmt[i] != 0 && fmt[last] == 0, so we can access fmt[i + 1] */
609 i++;
610
611 /* skip optional "[0 +-][num]" width formating field */
612 while (fmt[i] == '0' || fmt[i] == '+' || fmt[i] == '-' ||
613 fmt[i] == ' ')
614 i++;
615 if (fmt[i] >= '1' && fmt[i] <= '9') {
616 i++;
617 while (fmt[i] >= '0' && fmt[i] <= '9')
618 i++;
619 }
620
621 if (fmt[i] == 's') {
19c8d8ac
AM
622 void *unsafe_ptr;
623
492e639f
YS
624 /* try our best to copy */
625 if (memcpy_cnt >= MAX_SEQ_PRINTF_MAX_MEMCPY) {
626 err = -E2BIG;
627 goto out;
628 }
629
19c8d8ac
AM
630 unsafe_ptr = (void *)(long)args[fmt_cnt];
631 err = strncpy_from_kernel_nofault(bufs->buf[memcpy_cnt],
632 unsafe_ptr, MAX_SEQ_PRINTF_STR_LEN);
492e639f
YS
633 if (err < 0)
634 bufs->buf[memcpy_cnt][0] = '\0';
635 params[fmt_cnt] = (u64)(long)bufs->buf[memcpy_cnt];
636
637 fmt_cnt++;
638 memcpy_cnt++;
639 continue;
640 }
641
642 if (fmt[i] == 'p') {
643 if (fmt[i + 1] == 0 ||
644 fmt[i + 1] == 'K' ||
2df6bb54
SL
645 fmt[i + 1] == 'x' ||
646 fmt[i + 1] == 'B') {
492e639f
YS
647 /* just kernel pointers */
648 params[fmt_cnt] = args[fmt_cnt];
649 fmt_cnt++;
650 continue;
651 }
652
653 /* only support "%pI4", "%pi4", "%pI6" and "%pi6". */
654 if (fmt[i + 1] != 'i' && fmt[i + 1] != 'I') {
655 err = -EINVAL;
656 goto out;
657 }
658 if (fmt[i + 2] != '4' && fmt[i + 2] != '6') {
659 err = -EINVAL;
660 goto out;
661 }
662
663 if (memcpy_cnt >= MAX_SEQ_PRINTF_MAX_MEMCPY) {
664 err = -E2BIG;
665 goto out;
666 }
667
668
669 copy_size = (fmt[i + 2] == '4') ? 4 : 16;
670
fe557319 671 err = copy_from_kernel_nofault(bufs->buf[memcpy_cnt],
492e639f
YS
672 (void *) (long) args[fmt_cnt],
673 copy_size);
674 if (err < 0)
675 memset(bufs->buf[memcpy_cnt], 0, copy_size);
676 params[fmt_cnt] = (u64)(long)bufs->buf[memcpy_cnt];
677
678 i += 2;
679 fmt_cnt++;
680 memcpy_cnt++;
681 continue;
682 }
683
684 if (fmt[i] == 'l') {
685 i++;
686 if (fmt[i] == 'l')
687 i++;
688 }
689
690 if (fmt[i] != 'i' && fmt[i] != 'd' &&
c06b0229
YS
691 fmt[i] != 'u' && fmt[i] != 'x' &&
692 fmt[i] != 'X') {
492e639f
YS
693 err = -EINVAL;
694 goto out;
695 }
696
697 params[fmt_cnt] = args[fmt_cnt];
698 fmt_cnt++;
699 }
700
701 /* Maximumly we can have MAX_SEQ_PRINTF_VARARGS parameter, just give
702 * all of them to seq_printf().
703 */
704 seq_printf(m, fmt, params[0], params[1], params[2], params[3],
705 params[4], params[5], params[6], params[7], params[8],
706 params[9], params[10], params[11]);
707
708 err = seq_has_overflowed(m) ? -EOVERFLOW : 0;
709out:
710 this_cpu_dec(bpf_seq_printf_buf_used);
711 return err;
712}
713
c9a0f3b8
JO
714BTF_ID_LIST(bpf_seq_printf_btf_ids)
715BTF_ID(struct, seq_file)
716
492e639f
YS
717static const struct bpf_func_proto bpf_seq_printf_proto = {
718 .func = bpf_seq_printf,
719 .gpl_only = true,
720 .ret_type = RET_INTEGER,
721 .arg1_type = ARG_PTR_TO_BTF_ID,
722 .arg2_type = ARG_PTR_TO_MEM,
723 .arg3_type = ARG_CONST_SIZE,
724 .arg4_type = ARG_PTR_TO_MEM_OR_NULL,
725 .arg5_type = ARG_CONST_SIZE_OR_ZERO,
726 .btf_id = bpf_seq_printf_btf_ids,
727};
728
729BPF_CALL_3(bpf_seq_write, struct seq_file *, m, const void *, data, u32, len)
730{
731 return seq_write(m, data, len) ? -EOVERFLOW : 0;
732}
733
c9a0f3b8
JO
734BTF_ID_LIST(bpf_seq_write_btf_ids)
735BTF_ID(struct, seq_file)
736
492e639f
YS
737static const struct bpf_func_proto bpf_seq_write_proto = {
738 .func = bpf_seq_write,
739 .gpl_only = true,
740 .ret_type = RET_INTEGER,
741 .arg1_type = ARG_PTR_TO_BTF_ID,
742 .arg2_type = ARG_PTR_TO_MEM,
743 .arg3_type = ARG_CONST_SIZE_OR_ZERO,
744 .btf_id = bpf_seq_write_btf_ids,
745};
746
908432ca
YS
747static __always_inline int
748get_map_perf_counter(struct bpf_map *map, u64 flags,
749 u64 *value, u64 *enabled, u64 *running)
35578d79 750{
35578d79 751 struct bpf_array *array = container_of(map, struct bpf_array, map);
6816a7ff
DB
752 unsigned int cpu = smp_processor_id();
753 u64 index = flags & BPF_F_INDEX_MASK;
3b1efb19 754 struct bpf_event_entry *ee;
35578d79 755
6816a7ff
DB
756 if (unlikely(flags & ~(BPF_F_INDEX_MASK)))
757 return -EINVAL;
758 if (index == BPF_F_CURRENT_CPU)
759 index = cpu;
35578d79
KX
760 if (unlikely(index >= array->map.max_entries))
761 return -E2BIG;
762
3b1efb19 763 ee = READ_ONCE(array->ptrs[index]);
1ca1cc98 764 if (!ee)
35578d79
KX
765 return -ENOENT;
766
908432ca
YS
767 return perf_event_read_local(ee->event, value, enabled, running);
768}
769
770BPF_CALL_2(bpf_perf_event_read, struct bpf_map *, map, u64, flags)
771{
772 u64 value = 0;
773 int err;
774
775 err = get_map_perf_counter(map, flags, &value, NULL, NULL);
35578d79 776 /*
f91840a3
AS
777 * this api is ugly since we miss [-22..-2] range of valid
778 * counter values, but that's uapi
35578d79 779 */
f91840a3
AS
780 if (err)
781 return err;
782 return value;
35578d79
KX
783}
784
62544ce8 785static const struct bpf_func_proto bpf_perf_event_read_proto = {
35578d79 786 .func = bpf_perf_event_read,
1075ef59 787 .gpl_only = true,
35578d79
KX
788 .ret_type = RET_INTEGER,
789 .arg1_type = ARG_CONST_MAP_PTR,
790 .arg2_type = ARG_ANYTHING,
791};
792
908432ca
YS
793BPF_CALL_4(bpf_perf_event_read_value, struct bpf_map *, map, u64, flags,
794 struct bpf_perf_event_value *, buf, u32, size)
795{
796 int err = -EINVAL;
797
798 if (unlikely(size != sizeof(struct bpf_perf_event_value)))
799 goto clear;
800 err = get_map_perf_counter(map, flags, &buf->counter, &buf->enabled,
801 &buf->running);
802 if (unlikely(err))
803 goto clear;
804 return 0;
805clear:
806 memset(buf, 0, size);
807 return err;
808}
809
810static const struct bpf_func_proto bpf_perf_event_read_value_proto = {
811 .func = bpf_perf_event_read_value,
812 .gpl_only = true,
813 .ret_type = RET_INTEGER,
814 .arg1_type = ARG_CONST_MAP_PTR,
815 .arg2_type = ARG_ANYTHING,
816 .arg3_type = ARG_PTR_TO_UNINIT_MEM,
817 .arg4_type = ARG_CONST_SIZE,
818};
819
8e7a3920
DB
820static __always_inline u64
821__bpf_perf_event_output(struct pt_regs *regs, struct bpf_map *map,
283ca526 822 u64 flags, struct perf_sample_data *sd)
a43eec30 823{
a43eec30 824 struct bpf_array *array = container_of(map, struct bpf_array, map);
d7931330 825 unsigned int cpu = smp_processor_id();
1e33759c 826 u64 index = flags & BPF_F_INDEX_MASK;
3b1efb19 827 struct bpf_event_entry *ee;
a43eec30 828 struct perf_event *event;
a43eec30 829
1e33759c 830 if (index == BPF_F_CURRENT_CPU)
d7931330 831 index = cpu;
a43eec30
AS
832 if (unlikely(index >= array->map.max_entries))
833 return -E2BIG;
834
3b1efb19 835 ee = READ_ONCE(array->ptrs[index]);
1ca1cc98 836 if (!ee)
a43eec30
AS
837 return -ENOENT;
838
3b1efb19 839 event = ee->event;
a43eec30
AS
840 if (unlikely(event->attr.type != PERF_TYPE_SOFTWARE ||
841 event->attr.config != PERF_COUNT_SW_BPF_OUTPUT))
842 return -EINVAL;
843
d7931330 844 if (unlikely(event->oncpu != cpu))
a43eec30
AS
845 return -EOPNOTSUPP;
846
56201969 847 return perf_event_output(event, sd, regs);
a43eec30
AS
848}
849
9594dc3c
MM
850/*
851 * Support executing tracepoints in normal, irq, and nmi context that each call
852 * bpf_perf_event_output
853 */
854struct bpf_trace_sample_data {
855 struct perf_sample_data sds[3];
856};
857
858static DEFINE_PER_CPU(struct bpf_trace_sample_data, bpf_trace_sds);
859static DEFINE_PER_CPU(int, bpf_trace_nest_level);
f3694e00
DB
860BPF_CALL_5(bpf_perf_event_output, struct pt_regs *, regs, struct bpf_map *, map,
861 u64, flags, void *, data, u64, size)
8e7a3920 862{
9594dc3c
MM
863 struct bpf_trace_sample_data *sds = this_cpu_ptr(&bpf_trace_sds);
864 int nest_level = this_cpu_inc_return(bpf_trace_nest_level);
8e7a3920
DB
865 struct perf_raw_record raw = {
866 .frag = {
867 .size = size,
868 .data = data,
869 },
870 };
9594dc3c
MM
871 struct perf_sample_data *sd;
872 int err;
8e7a3920 873
9594dc3c
MM
874 if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(sds->sds))) {
875 err = -EBUSY;
876 goto out;
877 }
878
879 sd = &sds->sds[nest_level - 1];
880
881 if (unlikely(flags & ~(BPF_F_INDEX_MASK))) {
882 err = -EINVAL;
883 goto out;
884 }
8e7a3920 885
283ca526
DB
886 perf_sample_data_init(sd, 0, 0);
887 sd->raw = &raw;
888
9594dc3c
MM
889 err = __bpf_perf_event_output(regs, map, flags, sd);
890
891out:
892 this_cpu_dec(bpf_trace_nest_level);
893 return err;
8e7a3920
DB
894}
895
a43eec30
AS
896static const struct bpf_func_proto bpf_perf_event_output_proto = {
897 .func = bpf_perf_event_output,
1075ef59 898 .gpl_only = true,
a43eec30
AS
899 .ret_type = RET_INTEGER,
900 .arg1_type = ARG_PTR_TO_CTX,
901 .arg2_type = ARG_CONST_MAP_PTR,
902 .arg3_type = ARG_ANYTHING,
39f19ebb 903 .arg4_type = ARG_PTR_TO_MEM,
a60dd35d 904 .arg5_type = ARG_CONST_SIZE_OR_ZERO,
a43eec30
AS
905};
906
768fb61f
AZ
907static DEFINE_PER_CPU(int, bpf_event_output_nest_level);
908struct bpf_nested_pt_regs {
909 struct pt_regs regs[3];
910};
911static DEFINE_PER_CPU(struct bpf_nested_pt_regs, bpf_pt_regs);
912static DEFINE_PER_CPU(struct bpf_trace_sample_data, bpf_misc_sds);
bd570ff9 913
555c8a86
DB
914u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
915 void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy)
bd570ff9 916{
768fb61f 917 int nest_level = this_cpu_inc_return(bpf_event_output_nest_level);
555c8a86
DB
918 struct perf_raw_frag frag = {
919 .copy = ctx_copy,
920 .size = ctx_size,
921 .data = ctx,
922 };
923 struct perf_raw_record raw = {
924 .frag = {
183fc153
AM
925 {
926 .next = ctx_size ? &frag : NULL,
927 },
555c8a86
DB
928 .size = meta_size,
929 .data = meta,
930 },
931 };
768fb61f
AZ
932 struct perf_sample_data *sd;
933 struct pt_regs *regs;
934 u64 ret;
935
936 if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(bpf_misc_sds.sds))) {
937 ret = -EBUSY;
938 goto out;
939 }
940 sd = this_cpu_ptr(&bpf_misc_sds.sds[nest_level - 1]);
941 regs = this_cpu_ptr(&bpf_pt_regs.regs[nest_level - 1]);
bd570ff9
DB
942
943 perf_fetch_caller_regs(regs);
283ca526
DB
944 perf_sample_data_init(sd, 0, 0);
945 sd->raw = &raw;
bd570ff9 946
768fb61f
AZ
947 ret = __bpf_perf_event_output(regs, map, flags, sd);
948out:
949 this_cpu_dec(bpf_event_output_nest_level);
950 return ret;
bd570ff9
DB
951}
952
f3694e00 953BPF_CALL_0(bpf_get_current_task)
606274c5
AS
954{
955 return (long) current;
956}
957
f470378c 958const struct bpf_func_proto bpf_get_current_task_proto = {
606274c5
AS
959 .func = bpf_get_current_task,
960 .gpl_only = true,
961 .ret_type = RET_INTEGER,
962};
963
f3694e00 964BPF_CALL_2(bpf_current_task_under_cgroup, struct bpf_map *, map, u32, idx)
60d20f91 965{
60d20f91
SD
966 struct bpf_array *array = container_of(map, struct bpf_array, map);
967 struct cgroup *cgrp;
60d20f91 968
60d20f91
SD
969 if (unlikely(idx >= array->map.max_entries))
970 return -E2BIG;
971
972 cgrp = READ_ONCE(array->ptrs[idx]);
973 if (unlikely(!cgrp))
974 return -EAGAIN;
975
976 return task_under_cgroup_hierarchy(current, cgrp);
977}
978
979static const struct bpf_func_proto bpf_current_task_under_cgroup_proto = {
980 .func = bpf_current_task_under_cgroup,
981 .gpl_only = false,
982 .ret_type = RET_INTEGER,
983 .arg1_type = ARG_CONST_MAP_PTR,
984 .arg2_type = ARG_ANYTHING,
985};
986
8b401f9e
YS
987struct send_signal_irq_work {
988 struct irq_work irq_work;
989 struct task_struct *task;
990 u32 sig;
8482941f 991 enum pid_type type;
8b401f9e
YS
992};
993
994static DEFINE_PER_CPU(struct send_signal_irq_work, send_signal_work);
995
996static void do_bpf_send_signal(struct irq_work *entry)
997{
998 struct send_signal_irq_work *work;
999
1000 work = container_of(entry, struct send_signal_irq_work, irq_work);
8482941f 1001 group_send_sig_info(work->sig, SEND_SIG_PRIV, work->task, work->type);
8b401f9e
YS
1002}
1003
8482941f 1004static int bpf_send_signal_common(u32 sig, enum pid_type type)
8b401f9e
YS
1005{
1006 struct send_signal_irq_work *work = NULL;
1007
1008 /* Similar to bpf_probe_write_user, task needs to be
1009 * in a sound condition and kernel memory access be
1010 * permitted in order to send signal to the current
1011 * task.
1012 */
1013 if (unlikely(current->flags & (PF_KTHREAD | PF_EXITING)))
1014 return -EPERM;
1015 if (unlikely(uaccess_kernel()))
1016 return -EPERM;
1017 if (unlikely(!nmi_uaccess_okay()))
1018 return -EPERM;
1019
1bc7896e 1020 if (irqs_disabled()) {
e1afb702
YS
1021 /* Do an early check on signal validity. Otherwise,
1022 * the error is lost in deferred irq_work.
1023 */
1024 if (unlikely(!valid_signal(sig)))
1025 return -EINVAL;
1026
8b401f9e 1027 work = this_cpu_ptr(&send_signal_work);
153bedba 1028 if (atomic_read(&work->irq_work.flags) & IRQ_WORK_BUSY)
8b401f9e
YS
1029 return -EBUSY;
1030
1031 /* Add the current task, which is the target of sending signal,
1032 * to the irq_work. The current task may change when queued
1033 * irq works get executed.
1034 */
1035 work->task = current;
1036 work->sig = sig;
8482941f 1037 work->type = type;
8b401f9e
YS
1038 irq_work_queue(&work->irq_work);
1039 return 0;
1040 }
1041
8482941f
YS
1042 return group_send_sig_info(sig, SEND_SIG_PRIV, current, type);
1043}
1044
1045BPF_CALL_1(bpf_send_signal, u32, sig)
1046{
1047 return bpf_send_signal_common(sig, PIDTYPE_TGID);
8b401f9e
YS
1048}
1049
1050static const struct bpf_func_proto bpf_send_signal_proto = {
1051 .func = bpf_send_signal,
1052 .gpl_only = false,
1053 .ret_type = RET_INTEGER,
1054 .arg1_type = ARG_ANYTHING,
1055};
1056
8482941f
YS
1057BPF_CALL_1(bpf_send_signal_thread, u32, sig)
1058{
1059 return bpf_send_signal_common(sig, PIDTYPE_PID);
1060}
1061
1062static const struct bpf_func_proto bpf_send_signal_thread_proto = {
1063 .func = bpf_send_signal_thread,
1064 .gpl_only = false,
1065 .ret_type = RET_INTEGER,
1066 .arg1_type = ARG_ANYTHING,
1067};
1068
fc611f47
KS
1069const struct bpf_func_proto *
1070bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
2541517c
AS
1071{
1072 switch (func_id) {
1073 case BPF_FUNC_map_lookup_elem:
1074 return &bpf_map_lookup_elem_proto;
1075 case BPF_FUNC_map_update_elem:
1076 return &bpf_map_update_elem_proto;
1077 case BPF_FUNC_map_delete_elem:
1078 return &bpf_map_delete_elem_proto;
02a8c817
AC
1079 case BPF_FUNC_map_push_elem:
1080 return &bpf_map_push_elem_proto;
1081 case BPF_FUNC_map_pop_elem:
1082 return &bpf_map_pop_elem_proto;
1083 case BPF_FUNC_map_peek_elem:
1084 return &bpf_map_peek_elem_proto;
d9847d31
AS
1085 case BPF_FUNC_ktime_get_ns:
1086 return &bpf_ktime_get_ns_proto;
71d19214
MÅ»
1087 case BPF_FUNC_ktime_get_boot_ns:
1088 return &bpf_ktime_get_boot_ns_proto;
04fd61ab
AS
1089 case BPF_FUNC_tail_call:
1090 return &bpf_tail_call_proto;
ffeedafb
AS
1091 case BPF_FUNC_get_current_pid_tgid:
1092 return &bpf_get_current_pid_tgid_proto;
606274c5
AS
1093 case BPF_FUNC_get_current_task:
1094 return &bpf_get_current_task_proto;
ffeedafb
AS
1095 case BPF_FUNC_get_current_uid_gid:
1096 return &bpf_get_current_uid_gid_proto;
1097 case BPF_FUNC_get_current_comm:
1098 return &bpf_get_current_comm_proto;
9c959c86 1099 case BPF_FUNC_trace_printk:
0756ea3e 1100 return bpf_get_trace_printk_proto();
ab1973d3
AS
1101 case BPF_FUNC_get_smp_processor_id:
1102 return &bpf_get_smp_processor_id_proto;
2d0e30c3
DB
1103 case BPF_FUNC_get_numa_node_id:
1104 return &bpf_get_numa_node_id_proto;
35578d79
KX
1105 case BPF_FUNC_perf_event_read:
1106 return &bpf_perf_event_read_proto;
96ae5227
SD
1107 case BPF_FUNC_probe_write_user:
1108 return bpf_get_probe_write_proto();
60d20f91
SD
1109 case BPF_FUNC_current_task_under_cgroup:
1110 return &bpf_current_task_under_cgroup_proto;
8937bd80
AS
1111 case BPF_FUNC_get_prandom_u32:
1112 return &bpf_get_prandom_u32_proto;
6ae08ae3
DB
1113 case BPF_FUNC_probe_read_user:
1114 return &bpf_probe_read_user_proto;
1115 case BPF_FUNC_probe_read_kernel:
1116 return &bpf_probe_read_kernel_proto;
6ae08ae3
DB
1117 case BPF_FUNC_probe_read_user_str:
1118 return &bpf_probe_read_user_str_proto;
1119 case BPF_FUNC_probe_read_kernel_str:
1120 return &bpf_probe_read_kernel_str_proto;
0ebeea8c
DB
1121#ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
1122 case BPF_FUNC_probe_read:
1123 return &bpf_probe_read_compat_proto;
a5e8c070 1124 case BPF_FUNC_probe_read_str:
6ae08ae3 1125 return &bpf_probe_read_compat_str_proto;
0ebeea8c 1126#endif
34ea38ca 1127#ifdef CONFIG_CGROUPS
bf6fa2c8
YS
1128 case BPF_FUNC_get_current_cgroup_id:
1129 return &bpf_get_current_cgroup_id_proto;
34ea38ca 1130#endif
8b401f9e
YS
1131 case BPF_FUNC_send_signal:
1132 return &bpf_send_signal_proto;
8482941f
YS
1133 case BPF_FUNC_send_signal_thread:
1134 return &bpf_send_signal_thread_proto;
b80b033b
SL
1135 case BPF_FUNC_perf_event_read_value:
1136 return &bpf_perf_event_read_value_proto;
b4490c5c
CN
1137 case BPF_FUNC_get_ns_current_pid_tgid:
1138 return &bpf_get_ns_current_pid_tgid_proto;
457f4436
AN
1139 case BPF_FUNC_ringbuf_output:
1140 return &bpf_ringbuf_output_proto;
1141 case BPF_FUNC_ringbuf_reserve:
1142 return &bpf_ringbuf_reserve_proto;
1143 case BPF_FUNC_ringbuf_submit:
1144 return &bpf_ringbuf_submit_proto;
1145 case BPF_FUNC_ringbuf_discard:
1146 return &bpf_ringbuf_discard_proto;
1147 case BPF_FUNC_ringbuf_query:
1148 return &bpf_ringbuf_query_proto;
72e2b2b6
YS
1149 case BPF_FUNC_jiffies64:
1150 return &bpf_jiffies64_proto;
fa28dcb8
SL
1151 case BPF_FUNC_get_task_stack:
1152 return &bpf_get_task_stack_proto;
9fd82b61
AS
1153 default:
1154 return NULL;
1155 }
1156}
1157
5e43f899
AI
1158static const struct bpf_func_proto *
1159kprobe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
9fd82b61
AS
1160{
1161 switch (func_id) {
a43eec30
AS
1162 case BPF_FUNC_perf_event_output:
1163 return &bpf_perf_event_output_proto;
d5a3b1f6
AS
1164 case BPF_FUNC_get_stackid:
1165 return &bpf_get_stackid_proto;
c195651e
YS
1166 case BPF_FUNC_get_stack:
1167 return &bpf_get_stack_proto;
9802d865
JB
1168#ifdef CONFIG_BPF_KPROBE_OVERRIDE
1169 case BPF_FUNC_override_return:
1170 return &bpf_override_return_proto;
1171#endif
2541517c 1172 default:
fc611f47 1173 return bpf_tracing_func_proto(func_id, prog);
2541517c
AS
1174 }
1175}
1176
1177/* bpf+kprobe programs can access fields of 'struct pt_regs' */
19de99f7 1178static bool kprobe_prog_is_valid_access(int off, int size, enum bpf_access_type type,
5e43f899 1179 const struct bpf_prog *prog,
23994631 1180 struct bpf_insn_access_aux *info)
2541517c 1181{
2541517c
AS
1182 if (off < 0 || off >= sizeof(struct pt_regs))
1183 return false;
2541517c
AS
1184 if (type != BPF_READ)
1185 return false;
2541517c
AS
1186 if (off % size != 0)
1187 return false;
2d071c64
DB
1188 /*
1189 * Assertion for 32 bit to make sure last 8 byte access
1190 * (BPF_DW) to the last 4 byte member is disallowed.
1191 */
1192 if (off + size > sizeof(struct pt_regs))
1193 return false;
1194
2541517c
AS
1195 return true;
1196}
1197
7de16e3a 1198const struct bpf_verifier_ops kprobe_verifier_ops = {
2541517c
AS
1199 .get_func_proto = kprobe_prog_func_proto,
1200 .is_valid_access = kprobe_prog_is_valid_access,
1201};
1202
7de16e3a
JK
1203const struct bpf_prog_ops kprobe_prog_ops = {
1204};
1205
f3694e00
DB
1206BPF_CALL_5(bpf_perf_event_output_tp, void *, tp_buff, struct bpf_map *, map,
1207 u64, flags, void *, data, u64, size)
9940d67c 1208{
f3694e00
DB
1209 struct pt_regs *regs = *(struct pt_regs **)tp_buff;
1210
9940d67c
AS
1211 /*
1212 * r1 points to perf tracepoint buffer where first 8 bytes are hidden
1213 * from bpf program and contain a pointer to 'struct pt_regs'. Fetch it
f3694e00 1214 * from there and call the same bpf_perf_event_output() helper inline.
9940d67c 1215 */
f3694e00 1216 return ____bpf_perf_event_output(regs, map, flags, data, size);
9940d67c
AS
1217}
1218
1219static const struct bpf_func_proto bpf_perf_event_output_proto_tp = {
1220 .func = bpf_perf_event_output_tp,
1221 .gpl_only = true,
1222 .ret_type = RET_INTEGER,
1223 .arg1_type = ARG_PTR_TO_CTX,
1224 .arg2_type = ARG_CONST_MAP_PTR,
1225 .arg3_type = ARG_ANYTHING,
39f19ebb 1226 .arg4_type = ARG_PTR_TO_MEM,
a60dd35d 1227 .arg5_type = ARG_CONST_SIZE_OR_ZERO,
9940d67c
AS
1228};
1229
f3694e00
DB
1230BPF_CALL_3(bpf_get_stackid_tp, void *, tp_buff, struct bpf_map *, map,
1231 u64, flags)
9940d67c 1232{
f3694e00 1233 struct pt_regs *regs = *(struct pt_regs **)tp_buff;
9940d67c 1234
f3694e00
DB
1235 /*
1236 * Same comment as in bpf_perf_event_output_tp(), only that this time
1237 * the other helper's function body cannot be inlined due to being
1238 * external, thus we need to call raw helper function.
1239 */
1240 return bpf_get_stackid((unsigned long) regs, (unsigned long) map,
1241 flags, 0, 0);
9940d67c
AS
1242}
1243
1244static const struct bpf_func_proto bpf_get_stackid_proto_tp = {
1245 .func = bpf_get_stackid_tp,
1246 .gpl_only = true,
1247 .ret_type = RET_INTEGER,
1248 .arg1_type = ARG_PTR_TO_CTX,
1249 .arg2_type = ARG_CONST_MAP_PTR,
1250 .arg3_type = ARG_ANYTHING,
1251};
1252
c195651e
YS
1253BPF_CALL_4(bpf_get_stack_tp, void *, tp_buff, void *, buf, u32, size,
1254 u64, flags)
1255{
1256 struct pt_regs *regs = *(struct pt_regs **)tp_buff;
1257
1258 return bpf_get_stack((unsigned long) regs, (unsigned long) buf,
1259 (unsigned long) size, flags, 0);
1260}
1261
1262static const struct bpf_func_proto bpf_get_stack_proto_tp = {
1263 .func = bpf_get_stack_tp,
1264 .gpl_only = true,
1265 .ret_type = RET_INTEGER,
1266 .arg1_type = ARG_PTR_TO_CTX,
1267 .arg2_type = ARG_PTR_TO_UNINIT_MEM,
1268 .arg3_type = ARG_CONST_SIZE_OR_ZERO,
1269 .arg4_type = ARG_ANYTHING,
1270};
1271
5e43f899
AI
1272static const struct bpf_func_proto *
1273tp_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
f005afed
YS
1274{
1275 switch (func_id) {
1276 case BPF_FUNC_perf_event_output:
1277 return &bpf_perf_event_output_proto_tp;
1278 case BPF_FUNC_get_stackid:
1279 return &bpf_get_stackid_proto_tp;
c195651e
YS
1280 case BPF_FUNC_get_stack:
1281 return &bpf_get_stack_proto_tp;
f005afed 1282 default:
fc611f47 1283 return bpf_tracing_func_proto(func_id, prog);
f005afed
YS
1284 }
1285}
1286
1287static bool tp_prog_is_valid_access(int off, int size, enum bpf_access_type type,
5e43f899 1288 const struct bpf_prog *prog,
f005afed
YS
1289 struct bpf_insn_access_aux *info)
1290{
1291 if (off < sizeof(void *) || off >= PERF_MAX_TRACE_SIZE)
1292 return false;
1293 if (type != BPF_READ)
1294 return false;
1295 if (off % size != 0)
1296 return false;
1297
1298 BUILD_BUG_ON(PERF_MAX_TRACE_SIZE % sizeof(__u64));
1299 return true;
1300}
1301
1302const struct bpf_verifier_ops tracepoint_verifier_ops = {
1303 .get_func_proto = tp_prog_func_proto,
1304 .is_valid_access = tp_prog_is_valid_access,
1305};
1306
1307const struct bpf_prog_ops tracepoint_prog_ops = {
1308};
1309
1310BPF_CALL_3(bpf_perf_prog_read_value, struct bpf_perf_event_data_kern *, ctx,
4bebdc7a
YS
1311 struct bpf_perf_event_value *, buf, u32, size)
1312{
1313 int err = -EINVAL;
1314
1315 if (unlikely(size != sizeof(struct bpf_perf_event_value)))
1316 goto clear;
1317 err = perf_event_read_local(ctx->event, &buf->counter, &buf->enabled,
1318 &buf->running);
1319 if (unlikely(err))
1320 goto clear;
1321 return 0;
1322clear:
1323 memset(buf, 0, size);
1324 return err;
1325}
1326
f005afed
YS
1327static const struct bpf_func_proto bpf_perf_prog_read_value_proto = {
1328 .func = bpf_perf_prog_read_value,
4bebdc7a
YS
1329 .gpl_only = true,
1330 .ret_type = RET_INTEGER,
1331 .arg1_type = ARG_PTR_TO_CTX,
1332 .arg2_type = ARG_PTR_TO_UNINIT_MEM,
1333 .arg3_type = ARG_CONST_SIZE,
1334};
1335
fff7b643
DX
1336BPF_CALL_4(bpf_read_branch_records, struct bpf_perf_event_data_kern *, ctx,
1337 void *, buf, u32, size, u64, flags)
1338{
1339#ifndef CONFIG_X86
1340 return -ENOENT;
1341#else
1342 static const u32 br_entry_size = sizeof(struct perf_branch_entry);
1343 struct perf_branch_stack *br_stack = ctx->data->br_stack;
1344 u32 to_copy;
1345
1346 if (unlikely(flags & ~BPF_F_GET_BRANCH_RECORDS_SIZE))
1347 return -EINVAL;
1348
1349 if (unlikely(!br_stack))
1350 return -EINVAL;
1351
1352 if (flags & BPF_F_GET_BRANCH_RECORDS_SIZE)
1353 return br_stack->nr * br_entry_size;
1354
1355 if (!buf || (size % br_entry_size != 0))
1356 return -EINVAL;
1357
1358 to_copy = min_t(u32, br_stack->nr * br_entry_size, size);
1359 memcpy(buf, br_stack->entries, to_copy);
1360
1361 return to_copy;
1362#endif
1363}
1364
1365static const struct bpf_func_proto bpf_read_branch_records_proto = {
1366 .func = bpf_read_branch_records,
1367 .gpl_only = true,
1368 .ret_type = RET_INTEGER,
1369 .arg1_type = ARG_PTR_TO_CTX,
1370 .arg2_type = ARG_PTR_TO_MEM_OR_NULL,
1371 .arg3_type = ARG_CONST_SIZE_OR_ZERO,
1372 .arg4_type = ARG_ANYTHING,
1373};
1374
5e43f899
AI
1375static const struct bpf_func_proto *
1376pe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
9fd82b61
AS
1377{
1378 switch (func_id) {
1379 case BPF_FUNC_perf_event_output:
9940d67c 1380 return &bpf_perf_event_output_proto_tp;
9fd82b61 1381 case BPF_FUNC_get_stackid:
9940d67c 1382 return &bpf_get_stackid_proto_tp;
c195651e
YS
1383 case BPF_FUNC_get_stack:
1384 return &bpf_get_stack_proto_tp;
4bebdc7a 1385 case BPF_FUNC_perf_prog_read_value:
f005afed 1386 return &bpf_perf_prog_read_value_proto;
fff7b643
DX
1387 case BPF_FUNC_read_branch_records:
1388 return &bpf_read_branch_records_proto;
9fd82b61 1389 default:
fc611f47 1390 return bpf_tracing_func_proto(func_id, prog);
9fd82b61
AS
1391 }
1392}
1393
c4f6699d
AS
1394/*
1395 * bpf_raw_tp_regs are separate from bpf_pt_regs used from skb/xdp
1396 * to avoid potential recursive reuse issue when/if tracepoints are added
9594dc3c
MM
1397 * inside bpf_*_event_output, bpf_get_stackid and/or bpf_get_stack.
1398 *
1399 * Since raw tracepoints run despite bpf_prog_active, support concurrent usage
1400 * in normal, irq, and nmi context.
c4f6699d 1401 */
9594dc3c
MM
1402struct bpf_raw_tp_regs {
1403 struct pt_regs regs[3];
1404};
1405static DEFINE_PER_CPU(struct bpf_raw_tp_regs, bpf_raw_tp_regs);
1406static DEFINE_PER_CPU(int, bpf_raw_tp_nest_level);
1407static struct pt_regs *get_bpf_raw_tp_regs(void)
1408{
1409 struct bpf_raw_tp_regs *tp_regs = this_cpu_ptr(&bpf_raw_tp_regs);
1410 int nest_level = this_cpu_inc_return(bpf_raw_tp_nest_level);
1411
1412 if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(tp_regs->regs))) {
1413 this_cpu_dec(bpf_raw_tp_nest_level);
1414 return ERR_PTR(-EBUSY);
1415 }
1416
1417 return &tp_regs->regs[nest_level - 1];
1418}
1419
1420static void put_bpf_raw_tp_regs(void)
1421{
1422 this_cpu_dec(bpf_raw_tp_nest_level);
1423}
1424
c4f6699d
AS
1425BPF_CALL_5(bpf_perf_event_output_raw_tp, struct bpf_raw_tracepoint_args *, args,
1426 struct bpf_map *, map, u64, flags, void *, data, u64, size)
1427{
9594dc3c
MM
1428 struct pt_regs *regs = get_bpf_raw_tp_regs();
1429 int ret;
1430
1431 if (IS_ERR(regs))
1432 return PTR_ERR(regs);
c4f6699d
AS
1433
1434 perf_fetch_caller_regs(regs);
9594dc3c
MM
1435 ret = ____bpf_perf_event_output(regs, map, flags, data, size);
1436
1437 put_bpf_raw_tp_regs();
1438 return ret;
c4f6699d
AS
1439}
1440
1441static const struct bpf_func_proto bpf_perf_event_output_proto_raw_tp = {
1442 .func = bpf_perf_event_output_raw_tp,
1443 .gpl_only = true,
1444 .ret_type = RET_INTEGER,
1445 .arg1_type = ARG_PTR_TO_CTX,
1446 .arg2_type = ARG_CONST_MAP_PTR,
1447 .arg3_type = ARG_ANYTHING,
1448 .arg4_type = ARG_PTR_TO_MEM,
1449 .arg5_type = ARG_CONST_SIZE_OR_ZERO,
1450};
1451
a7658e1a 1452extern const struct bpf_func_proto bpf_skb_output_proto;
d831ee84 1453extern const struct bpf_func_proto bpf_xdp_output_proto;
a7658e1a 1454
c4f6699d
AS
1455BPF_CALL_3(bpf_get_stackid_raw_tp, struct bpf_raw_tracepoint_args *, args,
1456 struct bpf_map *, map, u64, flags)
1457{
9594dc3c
MM
1458 struct pt_regs *regs = get_bpf_raw_tp_regs();
1459 int ret;
1460
1461 if (IS_ERR(regs))
1462 return PTR_ERR(regs);
c4f6699d
AS
1463
1464 perf_fetch_caller_regs(regs);
1465 /* similar to bpf_perf_event_output_tp, but pt_regs fetched differently */
9594dc3c
MM
1466 ret = bpf_get_stackid((unsigned long) regs, (unsigned long) map,
1467 flags, 0, 0);
1468 put_bpf_raw_tp_regs();
1469 return ret;
c4f6699d
AS
1470}
1471
1472static const struct bpf_func_proto bpf_get_stackid_proto_raw_tp = {
1473 .func = bpf_get_stackid_raw_tp,
1474 .gpl_only = true,
1475 .ret_type = RET_INTEGER,
1476 .arg1_type = ARG_PTR_TO_CTX,
1477 .arg2_type = ARG_CONST_MAP_PTR,
1478 .arg3_type = ARG_ANYTHING,
1479};
1480
c195651e
YS
1481BPF_CALL_4(bpf_get_stack_raw_tp, struct bpf_raw_tracepoint_args *, args,
1482 void *, buf, u32, size, u64, flags)
1483{
9594dc3c
MM
1484 struct pt_regs *regs = get_bpf_raw_tp_regs();
1485 int ret;
1486
1487 if (IS_ERR(regs))
1488 return PTR_ERR(regs);
c195651e
YS
1489
1490 perf_fetch_caller_regs(regs);
9594dc3c
MM
1491 ret = bpf_get_stack((unsigned long) regs, (unsigned long) buf,
1492 (unsigned long) size, flags, 0);
1493 put_bpf_raw_tp_regs();
1494 return ret;
c195651e
YS
1495}
1496
1497static const struct bpf_func_proto bpf_get_stack_proto_raw_tp = {
1498 .func = bpf_get_stack_raw_tp,
1499 .gpl_only = true,
1500 .ret_type = RET_INTEGER,
1501 .arg1_type = ARG_PTR_TO_CTX,
1502 .arg2_type = ARG_PTR_TO_MEM,
1503 .arg3_type = ARG_CONST_SIZE_OR_ZERO,
1504 .arg4_type = ARG_ANYTHING,
1505};
1506
5e43f899
AI
1507static const struct bpf_func_proto *
1508raw_tp_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
c4f6699d
AS
1509{
1510 switch (func_id) {
1511 case BPF_FUNC_perf_event_output:
1512 return &bpf_perf_event_output_proto_raw_tp;
1513 case BPF_FUNC_get_stackid:
1514 return &bpf_get_stackid_proto_raw_tp;
c195651e
YS
1515 case BPF_FUNC_get_stack:
1516 return &bpf_get_stack_proto_raw_tp;
c4f6699d 1517 default:
fc611f47 1518 return bpf_tracing_func_proto(func_id, prog);
c4f6699d
AS
1519 }
1520}
1521
958a3f2d 1522const struct bpf_func_proto *
f1b9509c
AS
1523tracing_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1524{
1525 switch (func_id) {
1526#ifdef CONFIG_NET
1527 case BPF_FUNC_skb_output:
1528 return &bpf_skb_output_proto;
d831ee84
EC
1529 case BPF_FUNC_xdp_output:
1530 return &bpf_xdp_output_proto;
af7ec138
YS
1531 case BPF_FUNC_skc_to_tcp6_sock:
1532 return &bpf_skc_to_tcp6_sock_proto;
478cfbdf
YS
1533 case BPF_FUNC_skc_to_tcp_sock:
1534 return &bpf_skc_to_tcp_sock_proto;
1535 case BPF_FUNC_skc_to_tcp_timewait_sock:
1536 return &bpf_skc_to_tcp_timewait_sock_proto;
1537 case BPF_FUNC_skc_to_tcp_request_sock:
1538 return &bpf_skc_to_tcp_request_sock_proto;
0d4fad3e
YS
1539 case BPF_FUNC_skc_to_udp6_sock:
1540 return &bpf_skc_to_udp6_sock_proto;
f1b9509c 1541#endif
492e639f
YS
1542 case BPF_FUNC_seq_printf:
1543 return prog->expected_attach_type == BPF_TRACE_ITER ?
1544 &bpf_seq_printf_proto :
1545 NULL;
1546 case BPF_FUNC_seq_write:
1547 return prog->expected_attach_type == BPF_TRACE_ITER ?
1548 &bpf_seq_write_proto :
1549 NULL;
f1b9509c
AS
1550 default:
1551 return raw_tp_prog_func_proto(func_id, prog);
1552 }
1553}
1554
c4f6699d
AS
1555static bool raw_tp_prog_is_valid_access(int off, int size,
1556 enum bpf_access_type type,
5e43f899 1557 const struct bpf_prog *prog,
c4f6699d
AS
1558 struct bpf_insn_access_aux *info)
1559{
f1b9509c
AS
1560 if (off < 0 || off >= sizeof(__u64) * MAX_BPF_FUNC_ARGS)
1561 return false;
1562 if (type != BPF_READ)
1563 return false;
1564 if (off % size != 0)
1565 return false;
1566 return true;
1567}
1568
1569static bool tracing_prog_is_valid_access(int off, int size,
1570 enum bpf_access_type type,
1571 const struct bpf_prog *prog,
1572 struct bpf_insn_access_aux *info)
1573{
1574 if (off < 0 || off >= sizeof(__u64) * MAX_BPF_FUNC_ARGS)
c4f6699d
AS
1575 return false;
1576 if (type != BPF_READ)
1577 return false;
1578 if (off % size != 0)
1579 return false;
9e15db66 1580 return btf_ctx_access(off, size, type, prog, info);
c4f6699d
AS
1581}
1582
3e7c67d9
KS
1583int __weak bpf_prog_test_run_tracing(struct bpf_prog *prog,
1584 const union bpf_attr *kattr,
1585 union bpf_attr __user *uattr)
1586{
1587 return -ENOTSUPP;
1588}
1589
c4f6699d
AS
1590const struct bpf_verifier_ops raw_tracepoint_verifier_ops = {
1591 .get_func_proto = raw_tp_prog_func_proto,
1592 .is_valid_access = raw_tp_prog_is_valid_access,
1593};
1594
1595const struct bpf_prog_ops raw_tracepoint_prog_ops = {
1596};
1597
f1b9509c
AS
1598const struct bpf_verifier_ops tracing_verifier_ops = {
1599 .get_func_proto = tracing_prog_func_proto,
1600 .is_valid_access = tracing_prog_is_valid_access,
1601};
1602
1603const struct bpf_prog_ops tracing_prog_ops = {
da00d2f1 1604 .test_run = bpf_prog_test_run_tracing,
f1b9509c
AS
1605};
1606
9df1c28b
MM
1607static bool raw_tp_writable_prog_is_valid_access(int off, int size,
1608 enum bpf_access_type type,
1609 const struct bpf_prog *prog,
1610 struct bpf_insn_access_aux *info)
1611{
1612 if (off == 0) {
1613 if (size != sizeof(u64) || type != BPF_READ)
1614 return false;
1615 info->reg_type = PTR_TO_TP_BUFFER;
1616 }
1617 return raw_tp_prog_is_valid_access(off, size, type, prog, info);
1618}
1619
1620const struct bpf_verifier_ops raw_tracepoint_writable_verifier_ops = {
1621 .get_func_proto = raw_tp_prog_func_proto,
1622 .is_valid_access = raw_tp_writable_prog_is_valid_access,
1623};
1624
1625const struct bpf_prog_ops raw_tracepoint_writable_prog_ops = {
1626};
1627
0515e599 1628static bool pe_prog_is_valid_access(int off, int size, enum bpf_access_type type,
5e43f899 1629 const struct bpf_prog *prog,
23994631 1630 struct bpf_insn_access_aux *info)
0515e599 1631{
95da0cdb 1632 const int size_u64 = sizeof(u64);
31fd8581 1633
0515e599
AS
1634 if (off < 0 || off >= sizeof(struct bpf_perf_event_data))
1635 return false;
1636 if (type != BPF_READ)
1637 return false;
bc23105c
DB
1638 if (off % size != 0) {
1639 if (sizeof(unsigned long) != 4)
1640 return false;
1641 if (size != 8)
1642 return false;
1643 if (off % size != 4)
1644 return false;
1645 }
31fd8581 1646
f96da094
DB
1647 switch (off) {
1648 case bpf_ctx_range(struct bpf_perf_event_data, sample_period):
95da0cdb
TQ
1649 bpf_ctx_record_field_size(info, size_u64);
1650 if (!bpf_ctx_narrow_access_ok(off, size, size_u64))
1651 return false;
1652 break;
1653 case bpf_ctx_range(struct bpf_perf_event_data, addr):
1654 bpf_ctx_record_field_size(info, size_u64);
1655 if (!bpf_ctx_narrow_access_ok(off, size, size_u64))
23994631 1656 return false;
f96da094
DB
1657 break;
1658 default:
0515e599
AS
1659 if (size != sizeof(long))
1660 return false;
1661 }
f96da094 1662
0515e599
AS
1663 return true;
1664}
1665
6b8cc1d1
DB
1666static u32 pe_prog_convert_ctx_access(enum bpf_access_type type,
1667 const struct bpf_insn *si,
0515e599 1668 struct bpf_insn *insn_buf,
f96da094 1669 struct bpf_prog *prog, u32 *target_size)
0515e599
AS
1670{
1671 struct bpf_insn *insn = insn_buf;
1672
6b8cc1d1 1673 switch (si->off) {
0515e599 1674 case offsetof(struct bpf_perf_event_data, sample_period):
f035a515 1675 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern,
6b8cc1d1 1676 data), si->dst_reg, si->src_reg,
0515e599 1677 offsetof(struct bpf_perf_event_data_kern, data));
6b8cc1d1 1678 *insn++ = BPF_LDX_MEM(BPF_DW, si->dst_reg, si->dst_reg,
f96da094
DB
1679 bpf_target_off(struct perf_sample_data, period, 8,
1680 target_size));
0515e599 1681 break;
95da0cdb
TQ
1682 case offsetof(struct bpf_perf_event_data, addr):
1683 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern,
1684 data), si->dst_reg, si->src_reg,
1685 offsetof(struct bpf_perf_event_data_kern, data));
1686 *insn++ = BPF_LDX_MEM(BPF_DW, si->dst_reg, si->dst_reg,
1687 bpf_target_off(struct perf_sample_data, addr, 8,
1688 target_size));
1689 break;
0515e599 1690 default:
f035a515 1691 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern,
6b8cc1d1 1692 regs), si->dst_reg, si->src_reg,
0515e599 1693 offsetof(struct bpf_perf_event_data_kern, regs));
6b8cc1d1
DB
1694 *insn++ = BPF_LDX_MEM(BPF_SIZEOF(long), si->dst_reg, si->dst_reg,
1695 si->off);
0515e599
AS
1696 break;
1697 }
1698
1699 return insn - insn_buf;
1700}
1701
7de16e3a 1702const struct bpf_verifier_ops perf_event_verifier_ops = {
f005afed 1703 .get_func_proto = pe_prog_func_proto,
0515e599
AS
1704 .is_valid_access = pe_prog_is_valid_access,
1705 .convert_ctx_access = pe_prog_convert_ctx_access,
1706};
7de16e3a
JK
1707
1708const struct bpf_prog_ops perf_event_prog_ops = {
1709};
e87c6bc3
YS
1710
1711static DEFINE_MUTEX(bpf_event_mutex);
1712
c8c088ba
YS
1713#define BPF_TRACE_MAX_PROGS 64
1714
e87c6bc3
YS
1715int perf_event_attach_bpf_prog(struct perf_event *event,
1716 struct bpf_prog *prog)
1717{
e672db03 1718 struct bpf_prog_array *old_array;
e87c6bc3
YS
1719 struct bpf_prog_array *new_array;
1720 int ret = -EEXIST;
1721
9802d865 1722 /*
b4da3340
MH
1723 * Kprobe override only works if they are on the function entry,
1724 * and only if they are on the opt-in list.
9802d865
JB
1725 */
1726 if (prog->kprobe_override &&
b4da3340 1727 (!trace_kprobe_on_func_entry(event->tp_event) ||
9802d865
JB
1728 !trace_kprobe_error_injectable(event->tp_event)))
1729 return -EINVAL;
1730
e87c6bc3
YS
1731 mutex_lock(&bpf_event_mutex);
1732
1733 if (event->prog)
07c41a29 1734 goto unlock;
e87c6bc3 1735
e672db03 1736 old_array = bpf_event_rcu_dereference(event->tp_event->prog_array);
c8c088ba
YS
1737 if (old_array &&
1738 bpf_prog_array_length(old_array) >= BPF_TRACE_MAX_PROGS) {
1739 ret = -E2BIG;
1740 goto unlock;
1741 }
1742
e87c6bc3
YS
1743 ret = bpf_prog_array_copy(old_array, NULL, prog, &new_array);
1744 if (ret < 0)
07c41a29 1745 goto unlock;
e87c6bc3
YS
1746
1747 /* set the new array to event->tp_event and set event->prog */
1748 event->prog = prog;
1749 rcu_assign_pointer(event->tp_event->prog_array, new_array);
1750 bpf_prog_array_free(old_array);
1751
07c41a29 1752unlock:
e87c6bc3
YS
1753 mutex_unlock(&bpf_event_mutex);
1754 return ret;
1755}
1756
1757void perf_event_detach_bpf_prog(struct perf_event *event)
1758{
e672db03 1759 struct bpf_prog_array *old_array;
e87c6bc3
YS
1760 struct bpf_prog_array *new_array;
1761 int ret;
1762
1763 mutex_lock(&bpf_event_mutex);
1764
1765 if (!event->prog)
07c41a29 1766 goto unlock;
e87c6bc3 1767
e672db03 1768 old_array = bpf_event_rcu_dereference(event->tp_event->prog_array);
e87c6bc3 1769 ret = bpf_prog_array_copy(old_array, event->prog, NULL, &new_array);
170a7e3e
SY
1770 if (ret == -ENOENT)
1771 goto unlock;
e87c6bc3
YS
1772 if (ret < 0) {
1773 bpf_prog_array_delete_safe(old_array, event->prog);
1774 } else {
1775 rcu_assign_pointer(event->tp_event->prog_array, new_array);
1776 bpf_prog_array_free(old_array);
1777 }
1778
1779 bpf_prog_put(event->prog);
1780 event->prog = NULL;
1781
07c41a29 1782unlock:
e87c6bc3
YS
1783 mutex_unlock(&bpf_event_mutex);
1784}
f371b304 1785
f4e2298e 1786int perf_event_query_prog_array(struct perf_event *event, void __user *info)
f371b304
YS
1787{
1788 struct perf_event_query_bpf __user *uquery = info;
1789 struct perf_event_query_bpf query = {};
e672db03 1790 struct bpf_prog_array *progs;
3a38bb98 1791 u32 *ids, prog_cnt, ids_len;
f371b304
YS
1792 int ret;
1793
031258da 1794 if (!perfmon_capable())
f371b304
YS
1795 return -EPERM;
1796 if (event->attr.type != PERF_TYPE_TRACEPOINT)
1797 return -EINVAL;
1798 if (copy_from_user(&query, uquery, sizeof(query)))
1799 return -EFAULT;
3a38bb98
YS
1800
1801 ids_len = query.ids_len;
1802 if (ids_len > BPF_TRACE_MAX_PROGS)
9c481b90 1803 return -E2BIG;
3a38bb98
YS
1804 ids = kcalloc(ids_len, sizeof(u32), GFP_USER | __GFP_NOWARN);
1805 if (!ids)
1806 return -ENOMEM;
1807 /*
1808 * The above kcalloc returns ZERO_SIZE_PTR when ids_len = 0, which
1809 * is required when user only wants to check for uquery->prog_cnt.
1810 * There is no need to check for it since the case is handled
1811 * gracefully in bpf_prog_array_copy_info.
1812 */
f371b304
YS
1813
1814 mutex_lock(&bpf_event_mutex);
e672db03
SF
1815 progs = bpf_event_rcu_dereference(event->tp_event->prog_array);
1816 ret = bpf_prog_array_copy_info(progs, ids, ids_len, &prog_cnt);
f371b304
YS
1817 mutex_unlock(&bpf_event_mutex);
1818
3a38bb98
YS
1819 if (copy_to_user(&uquery->prog_cnt, &prog_cnt, sizeof(prog_cnt)) ||
1820 copy_to_user(uquery->ids, ids, ids_len * sizeof(u32)))
1821 ret = -EFAULT;
1822
1823 kfree(ids);
f371b304
YS
1824 return ret;
1825}
c4f6699d
AS
1826
1827extern struct bpf_raw_event_map __start__bpf_raw_tp[];
1828extern struct bpf_raw_event_map __stop__bpf_raw_tp[];
1829
a38d1107 1830struct bpf_raw_event_map *bpf_get_raw_tracepoint(const char *name)
c4f6699d
AS
1831{
1832 struct bpf_raw_event_map *btp = __start__bpf_raw_tp;
1833
1834 for (; btp < __stop__bpf_raw_tp; btp++) {
1835 if (!strcmp(btp->tp->name, name))
1836 return btp;
1837 }
a38d1107
MM
1838
1839 return bpf_get_raw_tracepoint_module(name);
1840}
1841
1842void bpf_put_raw_tracepoint(struct bpf_raw_event_map *btp)
1843{
1844 struct module *mod = __module_address((unsigned long)btp);
1845
1846 if (mod)
1847 module_put(mod);
c4f6699d
AS
1848}
1849
1850static __always_inline
1851void __bpf_trace_run(struct bpf_prog *prog, u64 *args)
1852{
f03efe49 1853 cant_sleep();
c4f6699d 1854 rcu_read_lock();
c4f6699d 1855 (void) BPF_PROG_RUN(prog, args);
c4f6699d
AS
1856 rcu_read_unlock();
1857}
1858
1859#define UNPACK(...) __VA_ARGS__
1860#define REPEAT_1(FN, DL, X, ...) FN(X)
1861#define REPEAT_2(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_1(FN, DL, __VA_ARGS__)
1862#define REPEAT_3(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_2(FN, DL, __VA_ARGS__)
1863#define REPEAT_4(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_3(FN, DL, __VA_ARGS__)
1864#define REPEAT_5(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_4(FN, DL, __VA_ARGS__)
1865#define REPEAT_6(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_5(FN, DL, __VA_ARGS__)
1866#define REPEAT_7(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_6(FN, DL, __VA_ARGS__)
1867#define REPEAT_8(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_7(FN, DL, __VA_ARGS__)
1868#define REPEAT_9(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_8(FN, DL, __VA_ARGS__)
1869#define REPEAT_10(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_9(FN, DL, __VA_ARGS__)
1870#define REPEAT_11(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_10(FN, DL, __VA_ARGS__)
1871#define REPEAT_12(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_11(FN, DL, __VA_ARGS__)
1872#define REPEAT(X, FN, DL, ...) REPEAT_##X(FN, DL, __VA_ARGS__)
1873
1874#define SARG(X) u64 arg##X
1875#define COPY(X) args[X] = arg##X
1876
1877#define __DL_COM (,)
1878#define __DL_SEM (;)
1879
1880#define __SEQ_0_11 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11
1881
1882#define BPF_TRACE_DEFN_x(x) \
1883 void bpf_trace_run##x(struct bpf_prog *prog, \
1884 REPEAT(x, SARG, __DL_COM, __SEQ_0_11)) \
1885 { \
1886 u64 args[x]; \
1887 REPEAT(x, COPY, __DL_SEM, __SEQ_0_11); \
1888 __bpf_trace_run(prog, args); \
1889 } \
1890 EXPORT_SYMBOL_GPL(bpf_trace_run##x)
1891BPF_TRACE_DEFN_x(1);
1892BPF_TRACE_DEFN_x(2);
1893BPF_TRACE_DEFN_x(3);
1894BPF_TRACE_DEFN_x(4);
1895BPF_TRACE_DEFN_x(5);
1896BPF_TRACE_DEFN_x(6);
1897BPF_TRACE_DEFN_x(7);
1898BPF_TRACE_DEFN_x(8);
1899BPF_TRACE_DEFN_x(9);
1900BPF_TRACE_DEFN_x(10);
1901BPF_TRACE_DEFN_x(11);
1902BPF_TRACE_DEFN_x(12);
1903
1904static int __bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog)
1905{
1906 struct tracepoint *tp = btp->tp;
1907
1908 /*
1909 * check that program doesn't access arguments beyond what's
1910 * available in this tracepoint
1911 */
1912 if (prog->aux->max_ctx_offset > btp->num_args * sizeof(u64))
1913 return -EINVAL;
1914
9df1c28b
MM
1915 if (prog->aux->max_tp_access > btp->writable_size)
1916 return -EINVAL;
1917
c4f6699d
AS
1918 return tracepoint_probe_register(tp, (void *)btp->bpf_func, prog);
1919}
1920
1921int bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog)
1922{
e16ec340 1923 return __bpf_probe_register(btp, prog);
c4f6699d
AS
1924}
1925
1926int bpf_probe_unregister(struct bpf_raw_event_map *btp, struct bpf_prog *prog)
1927{
e16ec340 1928 return tracepoint_probe_unregister(btp->tp, (void *)btp->bpf_func, prog);
c4f6699d 1929}
41bdc4b4
YS
1930
1931int bpf_get_perf_event_info(const struct perf_event *event, u32 *prog_id,
1932 u32 *fd_type, const char **buf,
1933 u64 *probe_offset, u64 *probe_addr)
1934{
1935 bool is_tracepoint, is_syscall_tp;
1936 struct bpf_prog *prog;
1937 int flags, err = 0;
1938
1939 prog = event->prog;
1940 if (!prog)
1941 return -ENOENT;
1942
1943 /* not supporting BPF_PROG_TYPE_PERF_EVENT yet */
1944 if (prog->type == BPF_PROG_TYPE_PERF_EVENT)
1945 return -EOPNOTSUPP;
1946
1947 *prog_id = prog->aux->id;
1948 flags = event->tp_event->flags;
1949 is_tracepoint = flags & TRACE_EVENT_FL_TRACEPOINT;
1950 is_syscall_tp = is_syscall_trace_event(event->tp_event);
1951
1952 if (is_tracepoint || is_syscall_tp) {
1953 *buf = is_tracepoint ? event->tp_event->tp->name
1954 : event->tp_event->name;
1955 *fd_type = BPF_FD_TYPE_TRACEPOINT;
1956 *probe_offset = 0x0;
1957 *probe_addr = 0x0;
1958 } else {
1959 /* kprobe/uprobe */
1960 err = -EOPNOTSUPP;
1961#ifdef CONFIG_KPROBE_EVENTS
1962 if (flags & TRACE_EVENT_FL_KPROBE)
1963 err = bpf_get_kprobe_info(event, fd_type, buf,
1964 probe_offset, probe_addr,
1965 event->attr.type == PERF_TYPE_TRACEPOINT);
1966#endif
1967#ifdef CONFIG_UPROBE_EVENTS
1968 if (flags & TRACE_EVENT_FL_UPROBE)
1969 err = bpf_get_uprobe_info(event, fd_type, buf,
1970 probe_offset,
1971 event->attr.type == PERF_TYPE_TRACEPOINT);
1972#endif
1973 }
1974
1975 return err;
1976}
a38d1107 1977
9db1ff0a
YS
1978static int __init send_signal_irq_work_init(void)
1979{
1980 int cpu;
1981 struct send_signal_irq_work *work;
1982
1983 for_each_possible_cpu(cpu) {
1984 work = per_cpu_ptr(&send_signal_work, cpu);
1985 init_irq_work(&work->irq_work, do_bpf_send_signal);
1986 }
1987 return 0;
1988}
1989
1990subsys_initcall(send_signal_irq_work_init);
1991
a38d1107 1992#ifdef CONFIG_MODULES
390e99cf
SF
1993static int bpf_event_notify(struct notifier_block *nb, unsigned long op,
1994 void *module)
a38d1107
MM
1995{
1996 struct bpf_trace_module *btm, *tmp;
1997 struct module *mod = module;
1998
1999 if (mod->num_bpf_raw_events == 0 ||
2000 (op != MODULE_STATE_COMING && op != MODULE_STATE_GOING))
2001 return 0;
2002
2003 mutex_lock(&bpf_module_mutex);
2004
2005 switch (op) {
2006 case MODULE_STATE_COMING:
2007 btm = kzalloc(sizeof(*btm), GFP_KERNEL);
2008 if (btm) {
2009 btm->module = module;
2010 list_add(&btm->list, &bpf_trace_modules);
2011 }
2012 break;
2013 case MODULE_STATE_GOING:
2014 list_for_each_entry_safe(btm, tmp, &bpf_trace_modules, list) {
2015 if (btm->module == module) {
2016 list_del(&btm->list);
2017 kfree(btm);
2018 break;
2019 }
2020 }
2021 break;
2022 }
2023
2024 mutex_unlock(&bpf_module_mutex);
2025
2026 return 0;
2027}
2028
2029static struct notifier_block bpf_module_nb = {
2030 .notifier_call = bpf_event_notify,
2031};
2032
390e99cf 2033static int __init bpf_event_init(void)
a38d1107
MM
2034{
2035 register_module_notifier(&bpf_module_nb);
2036 return 0;
2037}
2038
2039fs_initcall(bpf_event_init);
2040#endif /* CONFIG_MODULES */