xsk: Support allocations of large umems
[linux-2.6-block.git] / kernel / trace / bpf_trace.c
CommitLineData
179a0cc4 1// SPDX-License-Identifier: GPL-2.0
2541517c 2/* Copyright (c) 2011-2015 PLUMgrid, http://plumgrid.com
0515e599 3 * Copyright (c) 2016 Facebook
2541517c
AS
4 */
5#include <linux/kernel.h>
6#include <linux/types.h>
7#include <linux/slab.h>
8#include <linux/bpf.h>
0515e599 9#include <linux/bpf_perf_event.h>
2541517c
AS
10#include <linux/filter.h>
11#include <linux/uaccess.h>
9c959c86 12#include <linux/ctype.h>
9802d865 13#include <linux/kprobes.h>
41bdc4b4 14#include <linux/syscalls.h>
540adea3 15#include <linux/error-injection.h>
9802d865 16
c7b6f29b
NA
17#include <asm/tlb.h>
18
9802d865 19#include "trace_probe.h"
2541517c
AS
20#include "trace.h"
21
e672db03
SF
22#define bpf_event_rcu_dereference(p) \
23 rcu_dereference_protected(p, lockdep_is_held(&bpf_event_mutex))
24
a38d1107
MM
25#ifdef CONFIG_MODULES
26struct bpf_trace_module {
27 struct module *module;
28 struct list_head list;
29};
30
31static LIST_HEAD(bpf_trace_modules);
32static DEFINE_MUTEX(bpf_module_mutex);
33
34static struct bpf_raw_event_map *bpf_get_raw_tracepoint_module(const char *name)
35{
36 struct bpf_raw_event_map *btp, *ret = NULL;
37 struct bpf_trace_module *btm;
38 unsigned int i;
39
40 mutex_lock(&bpf_module_mutex);
41 list_for_each_entry(btm, &bpf_trace_modules, list) {
42 for (i = 0; i < btm->module->num_bpf_raw_events; ++i) {
43 btp = &btm->module->bpf_raw_events[i];
44 if (!strcmp(btp->tp->name, name)) {
45 if (try_module_get(btm->module))
46 ret = btp;
47 goto out;
48 }
49 }
50 }
51out:
52 mutex_unlock(&bpf_module_mutex);
53 return ret;
54}
55#else
56static struct bpf_raw_event_map *bpf_get_raw_tracepoint_module(const char *name)
57{
58 return NULL;
59}
60#endif /* CONFIG_MODULES */
61
035226b9 62u64 bpf_get_stackid(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
c195651e 63u64 bpf_get_stack(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
035226b9 64
2541517c
AS
65/**
66 * trace_call_bpf - invoke BPF program
e87c6bc3 67 * @call: tracepoint event
2541517c
AS
68 * @ctx: opaque context pointer
69 *
70 * kprobe handlers execute BPF programs via this helper.
71 * Can be used from static tracepoints in the future.
72 *
73 * Return: BPF programs always return an integer which is interpreted by
74 * kprobe handler as:
75 * 0 - return from kprobe (event is filtered out)
76 * 1 - store kprobe event into ring buffer
77 * Other values are reserved and currently alias to 1
78 */
e87c6bc3 79unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx)
2541517c
AS
80{
81 unsigned int ret;
82
83 if (in_nmi()) /* not supported yet */
84 return 1;
85
86 preempt_disable();
87
88 if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1)) {
89 /*
90 * since some bpf program is already running on this cpu,
91 * don't call into another bpf program (same or different)
92 * and don't send kprobe event into ring-buffer,
93 * so return zero here
94 */
95 ret = 0;
96 goto out;
97 }
98
e87c6bc3
YS
99 /*
100 * Instead of moving rcu_read_lock/rcu_dereference/rcu_read_unlock
101 * to all call sites, we did a bpf_prog_array_valid() there to check
102 * whether call->prog_array is empty or not, which is
103 * a heurisitc to speed up execution.
104 *
105 * If bpf_prog_array_valid() fetched prog_array was
106 * non-NULL, we go into trace_call_bpf() and do the actual
107 * proper rcu_dereference() under RCU lock.
108 * If it turns out that prog_array is NULL then, we bail out.
109 * For the opposite, if the bpf_prog_array_valid() fetched pointer
110 * was NULL, you'll skip the prog_array with the risk of missing
111 * out of events when it was updated in between this and the
112 * rcu_dereference() which is accepted risk.
113 */
114 ret = BPF_PROG_RUN_ARRAY_CHECK(call->prog_array, ctx, BPF_PROG_RUN);
2541517c
AS
115
116 out:
117 __this_cpu_dec(bpf_prog_active);
118 preempt_enable();
119
120 return ret;
121}
122EXPORT_SYMBOL_GPL(trace_call_bpf);
123
9802d865
JB
124#ifdef CONFIG_BPF_KPROBE_OVERRIDE
125BPF_CALL_2(bpf_override_return, struct pt_regs *, regs, unsigned long, rc)
126{
9802d865 127 regs_set_return_value(regs, rc);
540adea3 128 override_function_with_return(regs);
9802d865
JB
129 return 0;
130}
131
132static const struct bpf_func_proto bpf_override_return_proto = {
133 .func = bpf_override_return,
134 .gpl_only = true,
135 .ret_type = RET_INTEGER,
136 .arg1_type = ARG_PTR_TO_CTX,
137 .arg2_type = ARG_ANYTHING,
138};
139#endif
140
6ae08ae3
DB
141BPF_CALL_3(bpf_probe_read_user, void *, dst, u32, size,
142 const void __user *, unsafe_ptr)
2541517c 143{
6ae08ae3 144 int ret = probe_user_read(dst, unsafe_ptr, size);
2541517c 145
6ae08ae3
DB
146 if (unlikely(ret < 0))
147 memset(dst, 0, size);
148
149 return ret;
150}
151
152static const struct bpf_func_proto bpf_probe_read_user_proto = {
153 .func = bpf_probe_read_user,
154 .gpl_only = true,
155 .ret_type = RET_INTEGER,
156 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
157 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
158 .arg3_type = ARG_ANYTHING,
159};
160
161BPF_CALL_3(bpf_probe_read_user_str, void *, dst, u32, size,
162 const void __user *, unsafe_ptr)
163{
164 int ret = strncpy_from_unsafe_user(dst, unsafe_ptr, size);
165
166 if (unlikely(ret < 0))
167 memset(dst, 0, size);
168
169 return ret;
170}
171
172static const struct bpf_func_proto bpf_probe_read_user_str_proto = {
173 .func = bpf_probe_read_user_str,
174 .gpl_only = true,
175 .ret_type = RET_INTEGER,
176 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
177 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
178 .arg3_type = ARG_ANYTHING,
179};
180
181static __always_inline int
182bpf_probe_read_kernel_common(void *dst, u32 size, const void *unsafe_ptr,
183 const bool compat)
184{
185 int ret = security_locked_down(LOCKDOWN_BPF_READ);
9d1f8be5 186
6ae08ae3
DB
187 if (unlikely(ret < 0))
188 goto out;
189 ret = compat ? probe_kernel_read(dst, unsafe_ptr, size) :
190 probe_kernel_read_strict(dst, unsafe_ptr, size);
074f528e 191 if (unlikely(ret < 0))
9d1f8be5 192out:
074f528e 193 memset(dst, 0, size);
6ae08ae3
DB
194 return ret;
195}
074f528e 196
6ae08ae3
DB
197BPF_CALL_3(bpf_probe_read_kernel, void *, dst, u32, size,
198 const void *, unsafe_ptr)
199{
200 return bpf_probe_read_kernel_common(dst, size, unsafe_ptr, false);
201}
202
203static const struct bpf_func_proto bpf_probe_read_kernel_proto = {
204 .func = bpf_probe_read_kernel,
205 .gpl_only = true,
206 .ret_type = RET_INTEGER,
207 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
208 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
209 .arg3_type = ARG_ANYTHING,
210};
211
212BPF_CALL_3(bpf_probe_read_compat, void *, dst, u32, size,
213 const void *, unsafe_ptr)
214{
215 return bpf_probe_read_kernel_common(dst, size, unsafe_ptr, true);
216}
217
218static const struct bpf_func_proto bpf_probe_read_compat_proto = {
219 .func = bpf_probe_read_compat,
220 .gpl_only = true,
221 .ret_type = RET_INTEGER,
222 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
223 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
224 .arg3_type = ARG_ANYTHING,
225};
226
227static __always_inline int
228bpf_probe_read_kernel_str_common(void *dst, u32 size, const void *unsafe_ptr,
229 const bool compat)
230{
231 int ret = security_locked_down(LOCKDOWN_BPF_READ);
232
233 if (unlikely(ret < 0))
234 goto out;
235 /*
236 * The strncpy_from_unsafe_*() call will likely not fill the entire
237 * buffer, but that's okay in this circumstance as we're probing
238 * arbitrary memory anyway similar to bpf_probe_read_*() and might
239 * as well probe the stack. Thus, memory is explicitly cleared
240 * only in error case, so that improper users ignoring return
241 * code altogether don't copy garbage; otherwise length of string
242 * is returned that can be used for bpf_perf_event_output() et al.
243 */
244 ret = compat ? strncpy_from_unsafe(dst, unsafe_ptr, size) :
245 strncpy_from_unsafe_strict(dst, unsafe_ptr, size);
246 if (unlikely(ret < 0))
247out:
248 memset(dst, 0, size);
074f528e 249 return ret;
2541517c
AS
250}
251
6ae08ae3
DB
252BPF_CALL_3(bpf_probe_read_kernel_str, void *, dst, u32, size,
253 const void *, unsafe_ptr)
254{
255 return bpf_probe_read_kernel_str_common(dst, size, unsafe_ptr, false);
256}
257
258static const struct bpf_func_proto bpf_probe_read_kernel_str_proto = {
259 .func = bpf_probe_read_kernel_str,
260 .gpl_only = true,
261 .ret_type = RET_INTEGER,
262 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
263 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
264 .arg3_type = ARG_ANYTHING,
265};
266
267BPF_CALL_3(bpf_probe_read_compat_str, void *, dst, u32, size,
268 const void *, unsafe_ptr)
269{
270 return bpf_probe_read_kernel_str_common(dst, size, unsafe_ptr, true);
271}
272
273static const struct bpf_func_proto bpf_probe_read_compat_str_proto = {
274 .func = bpf_probe_read_compat_str,
2541517c
AS
275 .gpl_only = true,
276 .ret_type = RET_INTEGER,
39f19ebb 277 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
9c019e2b 278 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
2541517c
AS
279 .arg3_type = ARG_ANYTHING,
280};
281
eb1b6688 282BPF_CALL_3(bpf_probe_write_user, void __user *, unsafe_ptr, const void *, src,
f3694e00 283 u32, size)
96ae5227 284{
96ae5227
SD
285 /*
286 * Ensure we're in user context which is safe for the helper to
287 * run. This helper has no business in a kthread.
288 *
289 * access_ok() should prevent writing to non-user memory, but in
290 * some situations (nommu, temporary switch, etc) access_ok() does
291 * not provide enough validation, hence the check on KERNEL_DS.
c7b6f29b
NA
292 *
293 * nmi_uaccess_okay() ensures the probe is not run in an interim
294 * state, when the task or mm are switched. This is specifically
295 * required to prevent the use of temporary mm.
96ae5227
SD
296 */
297
298 if (unlikely(in_interrupt() ||
299 current->flags & (PF_KTHREAD | PF_EXITING)))
300 return -EPERM;
db68ce10 301 if (unlikely(uaccess_kernel()))
96ae5227 302 return -EPERM;
c7b6f29b
NA
303 if (unlikely(!nmi_uaccess_okay()))
304 return -EPERM;
96ae5227 305
eb1b6688 306 return probe_user_write(unsafe_ptr, src, size);
96ae5227
SD
307}
308
309static const struct bpf_func_proto bpf_probe_write_user_proto = {
310 .func = bpf_probe_write_user,
311 .gpl_only = true,
312 .ret_type = RET_INTEGER,
313 .arg1_type = ARG_ANYTHING,
39f19ebb
AS
314 .arg2_type = ARG_PTR_TO_MEM,
315 .arg3_type = ARG_CONST_SIZE,
96ae5227
SD
316};
317
318static const struct bpf_func_proto *bpf_get_probe_write_proto(void)
319{
320 pr_warn_ratelimited("%s[%d] is installing a program with bpf_probe_write_user helper that may corrupt user memory!",
321 current->comm, task_pid_nr(current));
322
323 return &bpf_probe_write_user_proto;
324}
325
9c959c86 326/*
7bda4b40
JF
327 * Only limited trace_printk() conversion specifiers allowed:
328 * %d %i %u %x %ld %li %lu %lx %lld %lli %llu %llx %p %s
9c959c86 329 */
f3694e00
DB
330BPF_CALL_5(bpf_trace_printk, char *, fmt, u32, fmt_size, u64, arg1,
331 u64, arg2, u64, arg3)
9c959c86 332{
8d3b7dce 333 bool str_seen = false;
9c959c86
AS
334 int mod[3] = {};
335 int fmt_cnt = 0;
8d3b7dce
AS
336 u64 unsafe_addr;
337 char buf[64];
9c959c86
AS
338 int i;
339
340 /*
341 * bpf_check()->check_func_arg()->check_stack_boundary()
342 * guarantees that fmt points to bpf program stack,
343 * fmt_size bytes of it were initialized and fmt_size > 0
344 */
345 if (fmt[--fmt_size] != 0)
346 return -EINVAL;
347
348 /* check format string for allowed specifiers */
349 for (i = 0; i < fmt_size; i++) {
350 if ((!isprint(fmt[i]) && !isspace(fmt[i])) || !isascii(fmt[i]))
351 return -EINVAL;
352
353 if (fmt[i] != '%')
354 continue;
355
356 if (fmt_cnt >= 3)
357 return -EINVAL;
358
359 /* fmt[i] != 0 && fmt[last] == 0, so we can access fmt[i + 1] */
360 i++;
361 if (fmt[i] == 'l') {
362 mod[fmt_cnt]++;
363 i++;
8d3b7dce 364 } else if (fmt[i] == 'p' || fmt[i] == 's') {
9c959c86 365 mod[fmt_cnt]++;
1efb6ee3
MP
366 /* disallow any further format extensions */
367 if (fmt[i + 1] != 0 &&
368 !isspace(fmt[i + 1]) &&
369 !ispunct(fmt[i + 1]))
9c959c86
AS
370 return -EINVAL;
371 fmt_cnt++;
1efb6ee3 372 if (fmt[i] == 's') {
8d3b7dce
AS
373 if (str_seen)
374 /* allow only one '%s' per fmt string */
375 return -EINVAL;
376 str_seen = true;
377
378 switch (fmt_cnt) {
379 case 1:
f3694e00
DB
380 unsafe_addr = arg1;
381 arg1 = (long) buf;
8d3b7dce
AS
382 break;
383 case 2:
f3694e00
DB
384 unsafe_addr = arg2;
385 arg2 = (long) buf;
8d3b7dce
AS
386 break;
387 case 3:
f3694e00
DB
388 unsafe_addr = arg3;
389 arg3 = (long) buf;
8d3b7dce
AS
390 break;
391 }
392 buf[0] = 0;
393 strncpy_from_unsafe(buf,
394 (void *) (long) unsafe_addr,
395 sizeof(buf));
396 }
9c959c86
AS
397 continue;
398 }
399
400 if (fmt[i] == 'l') {
401 mod[fmt_cnt]++;
402 i++;
403 }
404
7bda4b40
JF
405 if (fmt[i] != 'i' && fmt[i] != 'd' &&
406 fmt[i] != 'u' && fmt[i] != 'x')
9c959c86
AS
407 return -EINVAL;
408 fmt_cnt++;
409 }
410
88a5c690
DB
411/* Horrid workaround for getting va_list handling working with different
412 * argument type combinations generically for 32 and 64 bit archs.
413 */
414#define __BPF_TP_EMIT() __BPF_ARG3_TP()
415#define __BPF_TP(...) \
eefa864a 416 __trace_printk(0 /* Fake ip */, \
88a5c690
DB
417 fmt, ##__VA_ARGS__)
418
419#define __BPF_ARG1_TP(...) \
420 ((mod[0] == 2 || (mod[0] == 1 && __BITS_PER_LONG == 64)) \
421 ? __BPF_TP(arg1, ##__VA_ARGS__) \
422 : ((mod[0] == 1 || (mod[0] == 0 && __BITS_PER_LONG == 32)) \
423 ? __BPF_TP((long)arg1, ##__VA_ARGS__) \
424 : __BPF_TP((u32)arg1, ##__VA_ARGS__)))
425
426#define __BPF_ARG2_TP(...) \
427 ((mod[1] == 2 || (mod[1] == 1 && __BITS_PER_LONG == 64)) \
428 ? __BPF_ARG1_TP(arg2, ##__VA_ARGS__) \
429 : ((mod[1] == 1 || (mod[1] == 0 && __BITS_PER_LONG == 32)) \
430 ? __BPF_ARG1_TP((long)arg2, ##__VA_ARGS__) \
431 : __BPF_ARG1_TP((u32)arg2, ##__VA_ARGS__)))
432
433#define __BPF_ARG3_TP(...) \
434 ((mod[2] == 2 || (mod[2] == 1 && __BITS_PER_LONG == 64)) \
435 ? __BPF_ARG2_TP(arg3, ##__VA_ARGS__) \
436 : ((mod[2] == 1 || (mod[2] == 0 && __BITS_PER_LONG == 32)) \
437 ? __BPF_ARG2_TP((long)arg3, ##__VA_ARGS__) \
438 : __BPF_ARG2_TP((u32)arg3, ##__VA_ARGS__)))
439
440 return __BPF_TP_EMIT();
9c959c86
AS
441}
442
443static const struct bpf_func_proto bpf_trace_printk_proto = {
444 .func = bpf_trace_printk,
445 .gpl_only = true,
446 .ret_type = RET_INTEGER,
39f19ebb
AS
447 .arg1_type = ARG_PTR_TO_MEM,
448 .arg2_type = ARG_CONST_SIZE,
9c959c86
AS
449};
450
0756ea3e
AS
451const struct bpf_func_proto *bpf_get_trace_printk_proto(void)
452{
453 /*
454 * this program might be calling bpf_trace_printk,
455 * so allocate per-cpu printk buffers
456 */
457 trace_printk_init_buffers();
458
459 return &bpf_trace_printk_proto;
460}
461
908432ca
YS
462static __always_inline int
463get_map_perf_counter(struct bpf_map *map, u64 flags,
464 u64 *value, u64 *enabled, u64 *running)
35578d79 465{
35578d79 466 struct bpf_array *array = container_of(map, struct bpf_array, map);
6816a7ff
DB
467 unsigned int cpu = smp_processor_id();
468 u64 index = flags & BPF_F_INDEX_MASK;
3b1efb19 469 struct bpf_event_entry *ee;
35578d79 470
6816a7ff
DB
471 if (unlikely(flags & ~(BPF_F_INDEX_MASK)))
472 return -EINVAL;
473 if (index == BPF_F_CURRENT_CPU)
474 index = cpu;
35578d79
KX
475 if (unlikely(index >= array->map.max_entries))
476 return -E2BIG;
477
3b1efb19 478 ee = READ_ONCE(array->ptrs[index]);
1ca1cc98 479 if (!ee)
35578d79
KX
480 return -ENOENT;
481
908432ca
YS
482 return perf_event_read_local(ee->event, value, enabled, running);
483}
484
485BPF_CALL_2(bpf_perf_event_read, struct bpf_map *, map, u64, flags)
486{
487 u64 value = 0;
488 int err;
489
490 err = get_map_perf_counter(map, flags, &value, NULL, NULL);
35578d79 491 /*
f91840a3
AS
492 * this api is ugly since we miss [-22..-2] range of valid
493 * counter values, but that's uapi
35578d79 494 */
f91840a3
AS
495 if (err)
496 return err;
497 return value;
35578d79
KX
498}
499
62544ce8 500static const struct bpf_func_proto bpf_perf_event_read_proto = {
35578d79 501 .func = bpf_perf_event_read,
1075ef59 502 .gpl_only = true,
35578d79
KX
503 .ret_type = RET_INTEGER,
504 .arg1_type = ARG_CONST_MAP_PTR,
505 .arg2_type = ARG_ANYTHING,
506};
507
908432ca
YS
508BPF_CALL_4(bpf_perf_event_read_value, struct bpf_map *, map, u64, flags,
509 struct bpf_perf_event_value *, buf, u32, size)
510{
511 int err = -EINVAL;
512
513 if (unlikely(size != sizeof(struct bpf_perf_event_value)))
514 goto clear;
515 err = get_map_perf_counter(map, flags, &buf->counter, &buf->enabled,
516 &buf->running);
517 if (unlikely(err))
518 goto clear;
519 return 0;
520clear:
521 memset(buf, 0, size);
522 return err;
523}
524
525static const struct bpf_func_proto bpf_perf_event_read_value_proto = {
526 .func = bpf_perf_event_read_value,
527 .gpl_only = true,
528 .ret_type = RET_INTEGER,
529 .arg1_type = ARG_CONST_MAP_PTR,
530 .arg2_type = ARG_ANYTHING,
531 .arg3_type = ARG_PTR_TO_UNINIT_MEM,
532 .arg4_type = ARG_CONST_SIZE,
533};
534
8e7a3920
DB
535static __always_inline u64
536__bpf_perf_event_output(struct pt_regs *regs, struct bpf_map *map,
283ca526 537 u64 flags, struct perf_sample_data *sd)
a43eec30 538{
a43eec30 539 struct bpf_array *array = container_of(map, struct bpf_array, map);
d7931330 540 unsigned int cpu = smp_processor_id();
1e33759c 541 u64 index = flags & BPF_F_INDEX_MASK;
3b1efb19 542 struct bpf_event_entry *ee;
a43eec30 543 struct perf_event *event;
a43eec30 544
1e33759c 545 if (index == BPF_F_CURRENT_CPU)
d7931330 546 index = cpu;
a43eec30
AS
547 if (unlikely(index >= array->map.max_entries))
548 return -E2BIG;
549
3b1efb19 550 ee = READ_ONCE(array->ptrs[index]);
1ca1cc98 551 if (!ee)
a43eec30
AS
552 return -ENOENT;
553
3b1efb19 554 event = ee->event;
a43eec30
AS
555 if (unlikely(event->attr.type != PERF_TYPE_SOFTWARE ||
556 event->attr.config != PERF_COUNT_SW_BPF_OUTPUT))
557 return -EINVAL;
558
d7931330 559 if (unlikely(event->oncpu != cpu))
a43eec30
AS
560 return -EOPNOTSUPP;
561
56201969 562 return perf_event_output(event, sd, regs);
a43eec30
AS
563}
564
9594dc3c
MM
565/*
566 * Support executing tracepoints in normal, irq, and nmi context that each call
567 * bpf_perf_event_output
568 */
569struct bpf_trace_sample_data {
570 struct perf_sample_data sds[3];
571};
572
573static DEFINE_PER_CPU(struct bpf_trace_sample_data, bpf_trace_sds);
574static DEFINE_PER_CPU(int, bpf_trace_nest_level);
f3694e00
DB
575BPF_CALL_5(bpf_perf_event_output, struct pt_regs *, regs, struct bpf_map *, map,
576 u64, flags, void *, data, u64, size)
8e7a3920 577{
9594dc3c
MM
578 struct bpf_trace_sample_data *sds = this_cpu_ptr(&bpf_trace_sds);
579 int nest_level = this_cpu_inc_return(bpf_trace_nest_level);
8e7a3920
DB
580 struct perf_raw_record raw = {
581 .frag = {
582 .size = size,
583 .data = data,
584 },
585 };
9594dc3c
MM
586 struct perf_sample_data *sd;
587 int err;
8e7a3920 588
9594dc3c
MM
589 if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(sds->sds))) {
590 err = -EBUSY;
591 goto out;
592 }
593
594 sd = &sds->sds[nest_level - 1];
595
596 if (unlikely(flags & ~(BPF_F_INDEX_MASK))) {
597 err = -EINVAL;
598 goto out;
599 }
8e7a3920 600
283ca526
DB
601 perf_sample_data_init(sd, 0, 0);
602 sd->raw = &raw;
603
9594dc3c
MM
604 err = __bpf_perf_event_output(regs, map, flags, sd);
605
606out:
607 this_cpu_dec(bpf_trace_nest_level);
608 return err;
8e7a3920
DB
609}
610
a43eec30
AS
611static const struct bpf_func_proto bpf_perf_event_output_proto = {
612 .func = bpf_perf_event_output,
1075ef59 613 .gpl_only = true,
a43eec30
AS
614 .ret_type = RET_INTEGER,
615 .arg1_type = ARG_PTR_TO_CTX,
616 .arg2_type = ARG_CONST_MAP_PTR,
617 .arg3_type = ARG_ANYTHING,
39f19ebb 618 .arg4_type = ARG_PTR_TO_MEM,
a60dd35d 619 .arg5_type = ARG_CONST_SIZE_OR_ZERO,
a43eec30
AS
620};
621
768fb61f
AZ
622static DEFINE_PER_CPU(int, bpf_event_output_nest_level);
623struct bpf_nested_pt_regs {
624 struct pt_regs regs[3];
625};
626static DEFINE_PER_CPU(struct bpf_nested_pt_regs, bpf_pt_regs);
627static DEFINE_PER_CPU(struct bpf_trace_sample_data, bpf_misc_sds);
bd570ff9 628
555c8a86
DB
629u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
630 void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy)
bd570ff9 631{
768fb61f 632 int nest_level = this_cpu_inc_return(bpf_event_output_nest_level);
555c8a86
DB
633 struct perf_raw_frag frag = {
634 .copy = ctx_copy,
635 .size = ctx_size,
636 .data = ctx,
637 };
638 struct perf_raw_record raw = {
639 .frag = {
183fc153
AM
640 {
641 .next = ctx_size ? &frag : NULL,
642 },
555c8a86
DB
643 .size = meta_size,
644 .data = meta,
645 },
646 };
768fb61f
AZ
647 struct perf_sample_data *sd;
648 struct pt_regs *regs;
649 u64 ret;
650
651 if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(bpf_misc_sds.sds))) {
652 ret = -EBUSY;
653 goto out;
654 }
655 sd = this_cpu_ptr(&bpf_misc_sds.sds[nest_level - 1]);
656 regs = this_cpu_ptr(&bpf_pt_regs.regs[nest_level - 1]);
bd570ff9
DB
657
658 perf_fetch_caller_regs(regs);
283ca526
DB
659 perf_sample_data_init(sd, 0, 0);
660 sd->raw = &raw;
bd570ff9 661
768fb61f
AZ
662 ret = __bpf_perf_event_output(regs, map, flags, sd);
663out:
664 this_cpu_dec(bpf_event_output_nest_level);
665 return ret;
bd570ff9
DB
666}
667
f3694e00 668BPF_CALL_0(bpf_get_current_task)
606274c5
AS
669{
670 return (long) current;
671}
672
673static const struct bpf_func_proto bpf_get_current_task_proto = {
674 .func = bpf_get_current_task,
675 .gpl_only = true,
676 .ret_type = RET_INTEGER,
677};
678
f3694e00 679BPF_CALL_2(bpf_current_task_under_cgroup, struct bpf_map *, map, u32, idx)
60d20f91 680{
60d20f91
SD
681 struct bpf_array *array = container_of(map, struct bpf_array, map);
682 struct cgroup *cgrp;
60d20f91 683
60d20f91
SD
684 if (unlikely(idx >= array->map.max_entries))
685 return -E2BIG;
686
687 cgrp = READ_ONCE(array->ptrs[idx]);
688 if (unlikely(!cgrp))
689 return -EAGAIN;
690
691 return task_under_cgroup_hierarchy(current, cgrp);
692}
693
694static const struct bpf_func_proto bpf_current_task_under_cgroup_proto = {
695 .func = bpf_current_task_under_cgroup,
696 .gpl_only = false,
697 .ret_type = RET_INTEGER,
698 .arg1_type = ARG_CONST_MAP_PTR,
699 .arg2_type = ARG_ANYTHING,
700};
701
8b401f9e
YS
702struct send_signal_irq_work {
703 struct irq_work irq_work;
704 struct task_struct *task;
705 u32 sig;
706};
707
708static DEFINE_PER_CPU(struct send_signal_irq_work, send_signal_work);
709
710static void do_bpf_send_signal(struct irq_work *entry)
711{
712 struct send_signal_irq_work *work;
713
714 work = container_of(entry, struct send_signal_irq_work, irq_work);
715 group_send_sig_info(work->sig, SEND_SIG_PRIV, work->task, PIDTYPE_TGID);
716}
717
718BPF_CALL_1(bpf_send_signal, u32, sig)
719{
720 struct send_signal_irq_work *work = NULL;
721
722 /* Similar to bpf_probe_write_user, task needs to be
723 * in a sound condition and kernel memory access be
724 * permitted in order to send signal to the current
725 * task.
726 */
727 if (unlikely(current->flags & (PF_KTHREAD | PF_EXITING)))
728 return -EPERM;
729 if (unlikely(uaccess_kernel()))
730 return -EPERM;
731 if (unlikely(!nmi_uaccess_okay()))
732 return -EPERM;
733
734 if (in_nmi()) {
e1afb702
YS
735 /* Do an early check on signal validity. Otherwise,
736 * the error is lost in deferred irq_work.
737 */
738 if (unlikely(!valid_signal(sig)))
739 return -EINVAL;
740
8b401f9e 741 work = this_cpu_ptr(&send_signal_work);
153bedba 742 if (atomic_read(&work->irq_work.flags) & IRQ_WORK_BUSY)
8b401f9e
YS
743 return -EBUSY;
744
745 /* Add the current task, which is the target of sending signal,
746 * to the irq_work. The current task may change when queued
747 * irq works get executed.
748 */
749 work->task = current;
750 work->sig = sig;
751 irq_work_queue(&work->irq_work);
752 return 0;
753 }
754
755 return group_send_sig_info(sig, SEND_SIG_PRIV, current, PIDTYPE_TGID);
756}
757
758static const struct bpf_func_proto bpf_send_signal_proto = {
759 .func = bpf_send_signal,
760 .gpl_only = false,
761 .ret_type = RET_INTEGER,
762 .arg1_type = ARG_ANYTHING,
763};
764
5e43f899
AI
765static const struct bpf_func_proto *
766tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
2541517c
AS
767{
768 switch (func_id) {
769 case BPF_FUNC_map_lookup_elem:
770 return &bpf_map_lookup_elem_proto;
771 case BPF_FUNC_map_update_elem:
772 return &bpf_map_update_elem_proto;
773 case BPF_FUNC_map_delete_elem:
774 return &bpf_map_delete_elem_proto;
02a8c817
AC
775 case BPF_FUNC_map_push_elem:
776 return &bpf_map_push_elem_proto;
777 case BPF_FUNC_map_pop_elem:
778 return &bpf_map_pop_elem_proto;
779 case BPF_FUNC_map_peek_elem:
780 return &bpf_map_peek_elem_proto;
d9847d31
AS
781 case BPF_FUNC_ktime_get_ns:
782 return &bpf_ktime_get_ns_proto;
04fd61ab
AS
783 case BPF_FUNC_tail_call:
784 return &bpf_tail_call_proto;
ffeedafb
AS
785 case BPF_FUNC_get_current_pid_tgid:
786 return &bpf_get_current_pid_tgid_proto;
606274c5
AS
787 case BPF_FUNC_get_current_task:
788 return &bpf_get_current_task_proto;
ffeedafb
AS
789 case BPF_FUNC_get_current_uid_gid:
790 return &bpf_get_current_uid_gid_proto;
791 case BPF_FUNC_get_current_comm:
792 return &bpf_get_current_comm_proto;
9c959c86 793 case BPF_FUNC_trace_printk:
0756ea3e 794 return bpf_get_trace_printk_proto();
ab1973d3
AS
795 case BPF_FUNC_get_smp_processor_id:
796 return &bpf_get_smp_processor_id_proto;
2d0e30c3
DB
797 case BPF_FUNC_get_numa_node_id:
798 return &bpf_get_numa_node_id_proto;
35578d79
KX
799 case BPF_FUNC_perf_event_read:
800 return &bpf_perf_event_read_proto;
96ae5227
SD
801 case BPF_FUNC_probe_write_user:
802 return bpf_get_probe_write_proto();
60d20f91
SD
803 case BPF_FUNC_current_task_under_cgroup:
804 return &bpf_current_task_under_cgroup_proto;
8937bd80
AS
805 case BPF_FUNC_get_prandom_u32:
806 return &bpf_get_prandom_u32_proto;
6ae08ae3
DB
807 case BPF_FUNC_probe_read_user:
808 return &bpf_probe_read_user_proto;
809 case BPF_FUNC_probe_read_kernel:
810 return &bpf_probe_read_kernel_proto;
811 case BPF_FUNC_probe_read:
812 return &bpf_probe_read_compat_proto;
813 case BPF_FUNC_probe_read_user_str:
814 return &bpf_probe_read_user_str_proto;
815 case BPF_FUNC_probe_read_kernel_str:
816 return &bpf_probe_read_kernel_str_proto;
a5e8c070 817 case BPF_FUNC_probe_read_str:
6ae08ae3 818 return &bpf_probe_read_compat_str_proto;
34ea38ca 819#ifdef CONFIG_CGROUPS
bf6fa2c8
YS
820 case BPF_FUNC_get_current_cgroup_id:
821 return &bpf_get_current_cgroup_id_proto;
34ea38ca 822#endif
8b401f9e
YS
823 case BPF_FUNC_send_signal:
824 return &bpf_send_signal_proto;
9fd82b61
AS
825 default:
826 return NULL;
827 }
828}
829
5e43f899
AI
830static const struct bpf_func_proto *
831kprobe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
9fd82b61
AS
832{
833 switch (func_id) {
a43eec30
AS
834 case BPF_FUNC_perf_event_output:
835 return &bpf_perf_event_output_proto;
d5a3b1f6
AS
836 case BPF_FUNC_get_stackid:
837 return &bpf_get_stackid_proto;
c195651e
YS
838 case BPF_FUNC_get_stack:
839 return &bpf_get_stack_proto;
908432ca
YS
840 case BPF_FUNC_perf_event_read_value:
841 return &bpf_perf_event_read_value_proto;
9802d865
JB
842#ifdef CONFIG_BPF_KPROBE_OVERRIDE
843 case BPF_FUNC_override_return:
844 return &bpf_override_return_proto;
845#endif
2541517c 846 default:
5e43f899 847 return tracing_func_proto(func_id, prog);
2541517c
AS
848 }
849}
850
851/* bpf+kprobe programs can access fields of 'struct pt_regs' */
19de99f7 852static bool kprobe_prog_is_valid_access(int off, int size, enum bpf_access_type type,
5e43f899 853 const struct bpf_prog *prog,
23994631 854 struct bpf_insn_access_aux *info)
2541517c 855{
2541517c
AS
856 if (off < 0 || off >= sizeof(struct pt_regs))
857 return false;
2541517c
AS
858 if (type != BPF_READ)
859 return false;
2541517c
AS
860 if (off % size != 0)
861 return false;
2d071c64
DB
862 /*
863 * Assertion for 32 bit to make sure last 8 byte access
864 * (BPF_DW) to the last 4 byte member is disallowed.
865 */
866 if (off + size > sizeof(struct pt_regs))
867 return false;
868
2541517c
AS
869 return true;
870}
871
7de16e3a 872const struct bpf_verifier_ops kprobe_verifier_ops = {
2541517c
AS
873 .get_func_proto = kprobe_prog_func_proto,
874 .is_valid_access = kprobe_prog_is_valid_access,
875};
876
7de16e3a
JK
877const struct bpf_prog_ops kprobe_prog_ops = {
878};
879
f3694e00
DB
880BPF_CALL_5(bpf_perf_event_output_tp, void *, tp_buff, struct bpf_map *, map,
881 u64, flags, void *, data, u64, size)
9940d67c 882{
f3694e00
DB
883 struct pt_regs *regs = *(struct pt_regs **)tp_buff;
884
9940d67c
AS
885 /*
886 * r1 points to perf tracepoint buffer where first 8 bytes are hidden
887 * from bpf program and contain a pointer to 'struct pt_regs'. Fetch it
f3694e00 888 * from there and call the same bpf_perf_event_output() helper inline.
9940d67c 889 */
f3694e00 890 return ____bpf_perf_event_output(regs, map, flags, data, size);
9940d67c
AS
891}
892
893static const struct bpf_func_proto bpf_perf_event_output_proto_tp = {
894 .func = bpf_perf_event_output_tp,
895 .gpl_only = true,
896 .ret_type = RET_INTEGER,
897 .arg1_type = ARG_PTR_TO_CTX,
898 .arg2_type = ARG_CONST_MAP_PTR,
899 .arg3_type = ARG_ANYTHING,
39f19ebb 900 .arg4_type = ARG_PTR_TO_MEM,
a60dd35d 901 .arg5_type = ARG_CONST_SIZE_OR_ZERO,
9940d67c
AS
902};
903
f3694e00
DB
904BPF_CALL_3(bpf_get_stackid_tp, void *, tp_buff, struct bpf_map *, map,
905 u64, flags)
9940d67c 906{
f3694e00 907 struct pt_regs *regs = *(struct pt_regs **)tp_buff;
9940d67c 908
f3694e00
DB
909 /*
910 * Same comment as in bpf_perf_event_output_tp(), only that this time
911 * the other helper's function body cannot be inlined due to being
912 * external, thus we need to call raw helper function.
913 */
914 return bpf_get_stackid((unsigned long) regs, (unsigned long) map,
915 flags, 0, 0);
9940d67c
AS
916}
917
918static const struct bpf_func_proto bpf_get_stackid_proto_tp = {
919 .func = bpf_get_stackid_tp,
920 .gpl_only = true,
921 .ret_type = RET_INTEGER,
922 .arg1_type = ARG_PTR_TO_CTX,
923 .arg2_type = ARG_CONST_MAP_PTR,
924 .arg3_type = ARG_ANYTHING,
925};
926
c195651e
YS
927BPF_CALL_4(bpf_get_stack_tp, void *, tp_buff, void *, buf, u32, size,
928 u64, flags)
929{
930 struct pt_regs *regs = *(struct pt_regs **)tp_buff;
931
932 return bpf_get_stack((unsigned long) regs, (unsigned long) buf,
933 (unsigned long) size, flags, 0);
934}
935
936static const struct bpf_func_proto bpf_get_stack_proto_tp = {
937 .func = bpf_get_stack_tp,
938 .gpl_only = true,
939 .ret_type = RET_INTEGER,
940 .arg1_type = ARG_PTR_TO_CTX,
941 .arg2_type = ARG_PTR_TO_UNINIT_MEM,
942 .arg3_type = ARG_CONST_SIZE_OR_ZERO,
943 .arg4_type = ARG_ANYTHING,
944};
945
5e43f899
AI
946static const struct bpf_func_proto *
947tp_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
f005afed
YS
948{
949 switch (func_id) {
950 case BPF_FUNC_perf_event_output:
951 return &bpf_perf_event_output_proto_tp;
952 case BPF_FUNC_get_stackid:
953 return &bpf_get_stackid_proto_tp;
c195651e
YS
954 case BPF_FUNC_get_stack:
955 return &bpf_get_stack_proto_tp;
f005afed 956 default:
5e43f899 957 return tracing_func_proto(func_id, prog);
f005afed
YS
958 }
959}
960
961static bool tp_prog_is_valid_access(int off, int size, enum bpf_access_type type,
5e43f899 962 const struct bpf_prog *prog,
f005afed
YS
963 struct bpf_insn_access_aux *info)
964{
965 if (off < sizeof(void *) || off >= PERF_MAX_TRACE_SIZE)
966 return false;
967 if (type != BPF_READ)
968 return false;
969 if (off % size != 0)
970 return false;
971
972 BUILD_BUG_ON(PERF_MAX_TRACE_SIZE % sizeof(__u64));
973 return true;
974}
975
976const struct bpf_verifier_ops tracepoint_verifier_ops = {
977 .get_func_proto = tp_prog_func_proto,
978 .is_valid_access = tp_prog_is_valid_access,
979};
980
981const struct bpf_prog_ops tracepoint_prog_ops = {
982};
983
984BPF_CALL_3(bpf_perf_prog_read_value, struct bpf_perf_event_data_kern *, ctx,
4bebdc7a
YS
985 struct bpf_perf_event_value *, buf, u32, size)
986{
987 int err = -EINVAL;
988
989 if (unlikely(size != sizeof(struct bpf_perf_event_value)))
990 goto clear;
991 err = perf_event_read_local(ctx->event, &buf->counter, &buf->enabled,
992 &buf->running);
993 if (unlikely(err))
994 goto clear;
995 return 0;
996clear:
997 memset(buf, 0, size);
998 return err;
999}
1000
f005afed
YS
1001static const struct bpf_func_proto bpf_perf_prog_read_value_proto = {
1002 .func = bpf_perf_prog_read_value,
4bebdc7a
YS
1003 .gpl_only = true,
1004 .ret_type = RET_INTEGER,
1005 .arg1_type = ARG_PTR_TO_CTX,
1006 .arg2_type = ARG_PTR_TO_UNINIT_MEM,
1007 .arg3_type = ARG_CONST_SIZE,
1008};
1009
5e43f899
AI
1010static const struct bpf_func_proto *
1011pe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
9fd82b61
AS
1012{
1013 switch (func_id) {
1014 case BPF_FUNC_perf_event_output:
9940d67c 1015 return &bpf_perf_event_output_proto_tp;
9fd82b61 1016 case BPF_FUNC_get_stackid:
9940d67c 1017 return &bpf_get_stackid_proto_tp;
c195651e
YS
1018 case BPF_FUNC_get_stack:
1019 return &bpf_get_stack_proto_tp;
4bebdc7a 1020 case BPF_FUNC_perf_prog_read_value:
f005afed 1021 return &bpf_perf_prog_read_value_proto;
9fd82b61 1022 default:
5e43f899 1023 return tracing_func_proto(func_id, prog);
9fd82b61
AS
1024 }
1025}
1026
c4f6699d
AS
1027/*
1028 * bpf_raw_tp_regs are separate from bpf_pt_regs used from skb/xdp
1029 * to avoid potential recursive reuse issue when/if tracepoints are added
9594dc3c
MM
1030 * inside bpf_*_event_output, bpf_get_stackid and/or bpf_get_stack.
1031 *
1032 * Since raw tracepoints run despite bpf_prog_active, support concurrent usage
1033 * in normal, irq, and nmi context.
c4f6699d 1034 */
9594dc3c
MM
1035struct bpf_raw_tp_regs {
1036 struct pt_regs regs[3];
1037};
1038static DEFINE_PER_CPU(struct bpf_raw_tp_regs, bpf_raw_tp_regs);
1039static DEFINE_PER_CPU(int, bpf_raw_tp_nest_level);
1040static struct pt_regs *get_bpf_raw_tp_regs(void)
1041{
1042 struct bpf_raw_tp_regs *tp_regs = this_cpu_ptr(&bpf_raw_tp_regs);
1043 int nest_level = this_cpu_inc_return(bpf_raw_tp_nest_level);
1044
1045 if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(tp_regs->regs))) {
1046 this_cpu_dec(bpf_raw_tp_nest_level);
1047 return ERR_PTR(-EBUSY);
1048 }
1049
1050 return &tp_regs->regs[nest_level - 1];
1051}
1052
1053static void put_bpf_raw_tp_regs(void)
1054{
1055 this_cpu_dec(bpf_raw_tp_nest_level);
1056}
1057
c4f6699d
AS
1058BPF_CALL_5(bpf_perf_event_output_raw_tp, struct bpf_raw_tracepoint_args *, args,
1059 struct bpf_map *, map, u64, flags, void *, data, u64, size)
1060{
9594dc3c
MM
1061 struct pt_regs *regs = get_bpf_raw_tp_regs();
1062 int ret;
1063
1064 if (IS_ERR(regs))
1065 return PTR_ERR(regs);
c4f6699d
AS
1066
1067 perf_fetch_caller_regs(regs);
9594dc3c
MM
1068 ret = ____bpf_perf_event_output(regs, map, flags, data, size);
1069
1070 put_bpf_raw_tp_regs();
1071 return ret;
c4f6699d
AS
1072}
1073
1074static const struct bpf_func_proto bpf_perf_event_output_proto_raw_tp = {
1075 .func = bpf_perf_event_output_raw_tp,
1076 .gpl_only = true,
1077 .ret_type = RET_INTEGER,
1078 .arg1_type = ARG_PTR_TO_CTX,
1079 .arg2_type = ARG_CONST_MAP_PTR,
1080 .arg3_type = ARG_ANYTHING,
1081 .arg4_type = ARG_PTR_TO_MEM,
1082 .arg5_type = ARG_CONST_SIZE_OR_ZERO,
1083};
1084
a7658e1a
AS
1085extern const struct bpf_func_proto bpf_skb_output_proto;
1086
c4f6699d
AS
1087BPF_CALL_3(bpf_get_stackid_raw_tp, struct bpf_raw_tracepoint_args *, args,
1088 struct bpf_map *, map, u64, flags)
1089{
9594dc3c
MM
1090 struct pt_regs *regs = get_bpf_raw_tp_regs();
1091 int ret;
1092
1093 if (IS_ERR(regs))
1094 return PTR_ERR(regs);
c4f6699d
AS
1095
1096 perf_fetch_caller_regs(regs);
1097 /* similar to bpf_perf_event_output_tp, but pt_regs fetched differently */
9594dc3c
MM
1098 ret = bpf_get_stackid((unsigned long) regs, (unsigned long) map,
1099 flags, 0, 0);
1100 put_bpf_raw_tp_regs();
1101 return ret;
c4f6699d
AS
1102}
1103
1104static const struct bpf_func_proto bpf_get_stackid_proto_raw_tp = {
1105 .func = bpf_get_stackid_raw_tp,
1106 .gpl_only = true,
1107 .ret_type = RET_INTEGER,
1108 .arg1_type = ARG_PTR_TO_CTX,
1109 .arg2_type = ARG_CONST_MAP_PTR,
1110 .arg3_type = ARG_ANYTHING,
1111};
1112
c195651e
YS
1113BPF_CALL_4(bpf_get_stack_raw_tp, struct bpf_raw_tracepoint_args *, args,
1114 void *, buf, u32, size, u64, flags)
1115{
9594dc3c
MM
1116 struct pt_regs *regs = get_bpf_raw_tp_regs();
1117 int ret;
1118
1119 if (IS_ERR(regs))
1120 return PTR_ERR(regs);
c195651e
YS
1121
1122 perf_fetch_caller_regs(regs);
9594dc3c
MM
1123 ret = bpf_get_stack((unsigned long) regs, (unsigned long) buf,
1124 (unsigned long) size, flags, 0);
1125 put_bpf_raw_tp_regs();
1126 return ret;
c195651e
YS
1127}
1128
1129static const struct bpf_func_proto bpf_get_stack_proto_raw_tp = {
1130 .func = bpf_get_stack_raw_tp,
1131 .gpl_only = true,
1132 .ret_type = RET_INTEGER,
1133 .arg1_type = ARG_PTR_TO_CTX,
1134 .arg2_type = ARG_PTR_TO_MEM,
1135 .arg3_type = ARG_CONST_SIZE_OR_ZERO,
1136 .arg4_type = ARG_ANYTHING,
1137};
1138
5e43f899
AI
1139static const struct bpf_func_proto *
1140raw_tp_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
c4f6699d
AS
1141{
1142 switch (func_id) {
1143 case BPF_FUNC_perf_event_output:
1144 return &bpf_perf_event_output_proto_raw_tp;
1145 case BPF_FUNC_get_stackid:
1146 return &bpf_get_stackid_proto_raw_tp;
c195651e
YS
1147 case BPF_FUNC_get_stack:
1148 return &bpf_get_stack_proto_raw_tp;
c4f6699d 1149 default:
5e43f899 1150 return tracing_func_proto(func_id, prog);
c4f6699d
AS
1151 }
1152}
1153
f1b9509c
AS
1154static const struct bpf_func_proto *
1155tracing_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1156{
1157 switch (func_id) {
1158#ifdef CONFIG_NET
1159 case BPF_FUNC_skb_output:
1160 return &bpf_skb_output_proto;
1161#endif
1162 default:
1163 return raw_tp_prog_func_proto(func_id, prog);
1164 }
1165}
1166
c4f6699d
AS
1167static bool raw_tp_prog_is_valid_access(int off, int size,
1168 enum bpf_access_type type,
5e43f899 1169 const struct bpf_prog *prog,
c4f6699d
AS
1170 struct bpf_insn_access_aux *info)
1171{
f1b9509c
AS
1172 if (off < 0 || off >= sizeof(__u64) * MAX_BPF_FUNC_ARGS)
1173 return false;
1174 if (type != BPF_READ)
1175 return false;
1176 if (off % size != 0)
1177 return false;
1178 return true;
1179}
1180
1181static bool tracing_prog_is_valid_access(int off, int size,
1182 enum bpf_access_type type,
1183 const struct bpf_prog *prog,
1184 struct bpf_insn_access_aux *info)
1185{
1186 if (off < 0 || off >= sizeof(__u64) * MAX_BPF_FUNC_ARGS)
c4f6699d
AS
1187 return false;
1188 if (type != BPF_READ)
1189 return false;
1190 if (off % size != 0)
1191 return false;
9e15db66 1192 return btf_ctx_access(off, size, type, prog, info);
c4f6699d
AS
1193}
1194
1195const struct bpf_verifier_ops raw_tracepoint_verifier_ops = {
1196 .get_func_proto = raw_tp_prog_func_proto,
1197 .is_valid_access = raw_tp_prog_is_valid_access,
1198};
1199
1200const struct bpf_prog_ops raw_tracepoint_prog_ops = {
1201};
1202
f1b9509c
AS
1203const struct bpf_verifier_ops tracing_verifier_ops = {
1204 .get_func_proto = tracing_prog_func_proto,
1205 .is_valid_access = tracing_prog_is_valid_access,
1206};
1207
1208const struct bpf_prog_ops tracing_prog_ops = {
1209};
1210
9df1c28b
MM
1211static bool raw_tp_writable_prog_is_valid_access(int off, int size,
1212 enum bpf_access_type type,
1213 const struct bpf_prog *prog,
1214 struct bpf_insn_access_aux *info)
1215{
1216 if (off == 0) {
1217 if (size != sizeof(u64) || type != BPF_READ)
1218 return false;
1219 info->reg_type = PTR_TO_TP_BUFFER;
1220 }
1221 return raw_tp_prog_is_valid_access(off, size, type, prog, info);
1222}
1223
1224const struct bpf_verifier_ops raw_tracepoint_writable_verifier_ops = {
1225 .get_func_proto = raw_tp_prog_func_proto,
1226 .is_valid_access = raw_tp_writable_prog_is_valid_access,
1227};
1228
1229const struct bpf_prog_ops raw_tracepoint_writable_prog_ops = {
1230};
1231
0515e599 1232static bool pe_prog_is_valid_access(int off, int size, enum bpf_access_type type,
5e43f899 1233 const struct bpf_prog *prog,
23994631 1234 struct bpf_insn_access_aux *info)
0515e599 1235{
95da0cdb 1236 const int size_u64 = sizeof(u64);
31fd8581 1237
0515e599
AS
1238 if (off < 0 || off >= sizeof(struct bpf_perf_event_data))
1239 return false;
1240 if (type != BPF_READ)
1241 return false;
bc23105c
DB
1242 if (off % size != 0) {
1243 if (sizeof(unsigned long) != 4)
1244 return false;
1245 if (size != 8)
1246 return false;
1247 if (off % size != 4)
1248 return false;
1249 }
31fd8581 1250
f96da094
DB
1251 switch (off) {
1252 case bpf_ctx_range(struct bpf_perf_event_data, sample_period):
95da0cdb
TQ
1253 bpf_ctx_record_field_size(info, size_u64);
1254 if (!bpf_ctx_narrow_access_ok(off, size, size_u64))
1255 return false;
1256 break;
1257 case bpf_ctx_range(struct bpf_perf_event_data, addr):
1258 bpf_ctx_record_field_size(info, size_u64);
1259 if (!bpf_ctx_narrow_access_ok(off, size, size_u64))
23994631 1260 return false;
f96da094
DB
1261 break;
1262 default:
0515e599
AS
1263 if (size != sizeof(long))
1264 return false;
1265 }
f96da094 1266
0515e599
AS
1267 return true;
1268}
1269
6b8cc1d1
DB
1270static u32 pe_prog_convert_ctx_access(enum bpf_access_type type,
1271 const struct bpf_insn *si,
0515e599 1272 struct bpf_insn *insn_buf,
f96da094 1273 struct bpf_prog *prog, u32 *target_size)
0515e599
AS
1274{
1275 struct bpf_insn *insn = insn_buf;
1276
6b8cc1d1 1277 switch (si->off) {
0515e599 1278 case offsetof(struct bpf_perf_event_data, sample_period):
f035a515 1279 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern,
6b8cc1d1 1280 data), si->dst_reg, si->src_reg,
0515e599 1281 offsetof(struct bpf_perf_event_data_kern, data));
6b8cc1d1 1282 *insn++ = BPF_LDX_MEM(BPF_DW, si->dst_reg, si->dst_reg,
f96da094
DB
1283 bpf_target_off(struct perf_sample_data, period, 8,
1284 target_size));
0515e599 1285 break;
95da0cdb
TQ
1286 case offsetof(struct bpf_perf_event_data, addr):
1287 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern,
1288 data), si->dst_reg, si->src_reg,
1289 offsetof(struct bpf_perf_event_data_kern, data));
1290 *insn++ = BPF_LDX_MEM(BPF_DW, si->dst_reg, si->dst_reg,
1291 bpf_target_off(struct perf_sample_data, addr, 8,
1292 target_size));
1293 break;
0515e599 1294 default:
f035a515 1295 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern,
6b8cc1d1 1296 regs), si->dst_reg, si->src_reg,
0515e599 1297 offsetof(struct bpf_perf_event_data_kern, regs));
6b8cc1d1
DB
1298 *insn++ = BPF_LDX_MEM(BPF_SIZEOF(long), si->dst_reg, si->dst_reg,
1299 si->off);
0515e599
AS
1300 break;
1301 }
1302
1303 return insn - insn_buf;
1304}
1305
7de16e3a 1306const struct bpf_verifier_ops perf_event_verifier_ops = {
f005afed 1307 .get_func_proto = pe_prog_func_proto,
0515e599
AS
1308 .is_valid_access = pe_prog_is_valid_access,
1309 .convert_ctx_access = pe_prog_convert_ctx_access,
1310};
7de16e3a
JK
1311
1312const struct bpf_prog_ops perf_event_prog_ops = {
1313};
e87c6bc3
YS
1314
1315static DEFINE_MUTEX(bpf_event_mutex);
1316
c8c088ba
YS
1317#define BPF_TRACE_MAX_PROGS 64
1318
e87c6bc3
YS
1319int perf_event_attach_bpf_prog(struct perf_event *event,
1320 struct bpf_prog *prog)
1321{
e672db03 1322 struct bpf_prog_array *old_array;
e87c6bc3
YS
1323 struct bpf_prog_array *new_array;
1324 int ret = -EEXIST;
1325
9802d865 1326 /*
b4da3340
MH
1327 * Kprobe override only works if they are on the function entry,
1328 * and only if they are on the opt-in list.
9802d865
JB
1329 */
1330 if (prog->kprobe_override &&
b4da3340 1331 (!trace_kprobe_on_func_entry(event->tp_event) ||
9802d865
JB
1332 !trace_kprobe_error_injectable(event->tp_event)))
1333 return -EINVAL;
1334
e87c6bc3
YS
1335 mutex_lock(&bpf_event_mutex);
1336
1337 if (event->prog)
07c41a29 1338 goto unlock;
e87c6bc3 1339
e672db03 1340 old_array = bpf_event_rcu_dereference(event->tp_event->prog_array);
c8c088ba
YS
1341 if (old_array &&
1342 bpf_prog_array_length(old_array) >= BPF_TRACE_MAX_PROGS) {
1343 ret = -E2BIG;
1344 goto unlock;
1345 }
1346
e87c6bc3
YS
1347 ret = bpf_prog_array_copy(old_array, NULL, prog, &new_array);
1348 if (ret < 0)
07c41a29 1349 goto unlock;
e87c6bc3
YS
1350
1351 /* set the new array to event->tp_event and set event->prog */
1352 event->prog = prog;
1353 rcu_assign_pointer(event->tp_event->prog_array, new_array);
1354 bpf_prog_array_free(old_array);
1355
07c41a29 1356unlock:
e87c6bc3
YS
1357 mutex_unlock(&bpf_event_mutex);
1358 return ret;
1359}
1360
1361void perf_event_detach_bpf_prog(struct perf_event *event)
1362{
e672db03 1363 struct bpf_prog_array *old_array;
e87c6bc3
YS
1364 struct bpf_prog_array *new_array;
1365 int ret;
1366
1367 mutex_lock(&bpf_event_mutex);
1368
1369 if (!event->prog)
07c41a29 1370 goto unlock;
e87c6bc3 1371
e672db03 1372 old_array = bpf_event_rcu_dereference(event->tp_event->prog_array);
e87c6bc3 1373 ret = bpf_prog_array_copy(old_array, event->prog, NULL, &new_array);
170a7e3e
SY
1374 if (ret == -ENOENT)
1375 goto unlock;
e87c6bc3
YS
1376 if (ret < 0) {
1377 bpf_prog_array_delete_safe(old_array, event->prog);
1378 } else {
1379 rcu_assign_pointer(event->tp_event->prog_array, new_array);
1380 bpf_prog_array_free(old_array);
1381 }
1382
1383 bpf_prog_put(event->prog);
1384 event->prog = NULL;
1385
07c41a29 1386unlock:
e87c6bc3
YS
1387 mutex_unlock(&bpf_event_mutex);
1388}
f371b304 1389
f4e2298e 1390int perf_event_query_prog_array(struct perf_event *event, void __user *info)
f371b304
YS
1391{
1392 struct perf_event_query_bpf __user *uquery = info;
1393 struct perf_event_query_bpf query = {};
e672db03 1394 struct bpf_prog_array *progs;
3a38bb98 1395 u32 *ids, prog_cnt, ids_len;
f371b304
YS
1396 int ret;
1397
1398 if (!capable(CAP_SYS_ADMIN))
1399 return -EPERM;
1400 if (event->attr.type != PERF_TYPE_TRACEPOINT)
1401 return -EINVAL;
1402 if (copy_from_user(&query, uquery, sizeof(query)))
1403 return -EFAULT;
3a38bb98
YS
1404
1405 ids_len = query.ids_len;
1406 if (ids_len > BPF_TRACE_MAX_PROGS)
9c481b90 1407 return -E2BIG;
3a38bb98
YS
1408 ids = kcalloc(ids_len, sizeof(u32), GFP_USER | __GFP_NOWARN);
1409 if (!ids)
1410 return -ENOMEM;
1411 /*
1412 * The above kcalloc returns ZERO_SIZE_PTR when ids_len = 0, which
1413 * is required when user only wants to check for uquery->prog_cnt.
1414 * There is no need to check for it since the case is handled
1415 * gracefully in bpf_prog_array_copy_info.
1416 */
f371b304
YS
1417
1418 mutex_lock(&bpf_event_mutex);
e672db03
SF
1419 progs = bpf_event_rcu_dereference(event->tp_event->prog_array);
1420 ret = bpf_prog_array_copy_info(progs, ids, ids_len, &prog_cnt);
f371b304
YS
1421 mutex_unlock(&bpf_event_mutex);
1422
3a38bb98
YS
1423 if (copy_to_user(&uquery->prog_cnt, &prog_cnt, sizeof(prog_cnt)) ||
1424 copy_to_user(uquery->ids, ids, ids_len * sizeof(u32)))
1425 ret = -EFAULT;
1426
1427 kfree(ids);
f371b304
YS
1428 return ret;
1429}
c4f6699d
AS
1430
1431extern struct bpf_raw_event_map __start__bpf_raw_tp[];
1432extern struct bpf_raw_event_map __stop__bpf_raw_tp[];
1433
a38d1107 1434struct bpf_raw_event_map *bpf_get_raw_tracepoint(const char *name)
c4f6699d
AS
1435{
1436 struct bpf_raw_event_map *btp = __start__bpf_raw_tp;
1437
1438 for (; btp < __stop__bpf_raw_tp; btp++) {
1439 if (!strcmp(btp->tp->name, name))
1440 return btp;
1441 }
a38d1107
MM
1442
1443 return bpf_get_raw_tracepoint_module(name);
1444}
1445
1446void bpf_put_raw_tracepoint(struct bpf_raw_event_map *btp)
1447{
1448 struct module *mod = __module_address((unsigned long)btp);
1449
1450 if (mod)
1451 module_put(mod);
c4f6699d
AS
1452}
1453
1454static __always_inline
1455void __bpf_trace_run(struct bpf_prog *prog, u64 *args)
1456{
1457 rcu_read_lock();
1458 preempt_disable();
1459 (void) BPF_PROG_RUN(prog, args);
1460 preempt_enable();
1461 rcu_read_unlock();
1462}
1463
1464#define UNPACK(...) __VA_ARGS__
1465#define REPEAT_1(FN, DL, X, ...) FN(X)
1466#define REPEAT_2(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_1(FN, DL, __VA_ARGS__)
1467#define REPEAT_3(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_2(FN, DL, __VA_ARGS__)
1468#define REPEAT_4(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_3(FN, DL, __VA_ARGS__)
1469#define REPEAT_5(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_4(FN, DL, __VA_ARGS__)
1470#define REPEAT_6(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_5(FN, DL, __VA_ARGS__)
1471#define REPEAT_7(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_6(FN, DL, __VA_ARGS__)
1472#define REPEAT_8(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_7(FN, DL, __VA_ARGS__)
1473#define REPEAT_9(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_8(FN, DL, __VA_ARGS__)
1474#define REPEAT_10(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_9(FN, DL, __VA_ARGS__)
1475#define REPEAT_11(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_10(FN, DL, __VA_ARGS__)
1476#define REPEAT_12(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_11(FN, DL, __VA_ARGS__)
1477#define REPEAT(X, FN, DL, ...) REPEAT_##X(FN, DL, __VA_ARGS__)
1478
1479#define SARG(X) u64 arg##X
1480#define COPY(X) args[X] = arg##X
1481
1482#define __DL_COM (,)
1483#define __DL_SEM (;)
1484
1485#define __SEQ_0_11 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11
1486
1487#define BPF_TRACE_DEFN_x(x) \
1488 void bpf_trace_run##x(struct bpf_prog *prog, \
1489 REPEAT(x, SARG, __DL_COM, __SEQ_0_11)) \
1490 { \
1491 u64 args[x]; \
1492 REPEAT(x, COPY, __DL_SEM, __SEQ_0_11); \
1493 __bpf_trace_run(prog, args); \
1494 } \
1495 EXPORT_SYMBOL_GPL(bpf_trace_run##x)
1496BPF_TRACE_DEFN_x(1);
1497BPF_TRACE_DEFN_x(2);
1498BPF_TRACE_DEFN_x(3);
1499BPF_TRACE_DEFN_x(4);
1500BPF_TRACE_DEFN_x(5);
1501BPF_TRACE_DEFN_x(6);
1502BPF_TRACE_DEFN_x(7);
1503BPF_TRACE_DEFN_x(8);
1504BPF_TRACE_DEFN_x(9);
1505BPF_TRACE_DEFN_x(10);
1506BPF_TRACE_DEFN_x(11);
1507BPF_TRACE_DEFN_x(12);
1508
1509static int __bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog)
1510{
1511 struct tracepoint *tp = btp->tp;
1512
1513 /*
1514 * check that program doesn't access arguments beyond what's
1515 * available in this tracepoint
1516 */
1517 if (prog->aux->max_ctx_offset > btp->num_args * sizeof(u64))
1518 return -EINVAL;
1519
9df1c28b
MM
1520 if (prog->aux->max_tp_access > btp->writable_size)
1521 return -EINVAL;
1522
c4f6699d
AS
1523 return tracepoint_probe_register(tp, (void *)btp->bpf_func, prog);
1524}
1525
1526int bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog)
1527{
e16ec340 1528 return __bpf_probe_register(btp, prog);
c4f6699d
AS
1529}
1530
1531int bpf_probe_unregister(struct bpf_raw_event_map *btp, struct bpf_prog *prog)
1532{
e16ec340 1533 return tracepoint_probe_unregister(btp->tp, (void *)btp->bpf_func, prog);
c4f6699d 1534}
41bdc4b4
YS
1535
1536int bpf_get_perf_event_info(const struct perf_event *event, u32 *prog_id,
1537 u32 *fd_type, const char **buf,
1538 u64 *probe_offset, u64 *probe_addr)
1539{
1540 bool is_tracepoint, is_syscall_tp;
1541 struct bpf_prog *prog;
1542 int flags, err = 0;
1543
1544 prog = event->prog;
1545 if (!prog)
1546 return -ENOENT;
1547
1548 /* not supporting BPF_PROG_TYPE_PERF_EVENT yet */
1549 if (prog->type == BPF_PROG_TYPE_PERF_EVENT)
1550 return -EOPNOTSUPP;
1551
1552 *prog_id = prog->aux->id;
1553 flags = event->tp_event->flags;
1554 is_tracepoint = flags & TRACE_EVENT_FL_TRACEPOINT;
1555 is_syscall_tp = is_syscall_trace_event(event->tp_event);
1556
1557 if (is_tracepoint || is_syscall_tp) {
1558 *buf = is_tracepoint ? event->tp_event->tp->name
1559 : event->tp_event->name;
1560 *fd_type = BPF_FD_TYPE_TRACEPOINT;
1561 *probe_offset = 0x0;
1562 *probe_addr = 0x0;
1563 } else {
1564 /* kprobe/uprobe */
1565 err = -EOPNOTSUPP;
1566#ifdef CONFIG_KPROBE_EVENTS
1567 if (flags & TRACE_EVENT_FL_KPROBE)
1568 err = bpf_get_kprobe_info(event, fd_type, buf,
1569 probe_offset, probe_addr,
1570 event->attr.type == PERF_TYPE_TRACEPOINT);
1571#endif
1572#ifdef CONFIG_UPROBE_EVENTS
1573 if (flags & TRACE_EVENT_FL_UPROBE)
1574 err = bpf_get_uprobe_info(event, fd_type, buf,
1575 probe_offset,
1576 event->attr.type == PERF_TYPE_TRACEPOINT);
1577#endif
1578 }
1579
1580 return err;
1581}
a38d1107 1582
9db1ff0a
YS
1583static int __init send_signal_irq_work_init(void)
1584{
1585 int cpu;
1586 struct send_signal_irq_work *work;
1587
1588 for_each_possible_cpu(cpu) {
1589 work = per_cpu_ptr(&send_signal_work, cpu);
1590 init_irq_work(&work->irq_work, do_bpf_send_signal);
1591 }
1592 return 0;
1593}
1594
1595subsys_initcall(send_signal_irq_work_init);
1596
a38d1107 1597#ifdef CONFIG_MODULES
390e99cf
SF
1598static int bpf_event_notify(struct notifier_block *nb, unsigned long op,
1599 void *module)
a38d1107
MM
1600{
1601 struct bpf_trace_module *btm, *tmp;
1602 struct module *mod = module;
1603
1604 if (mod->num_bpf_raw_events == 0 ||
1605 (op != MODULE_STATE_COMING && op != MODULE_STATE_GOING))
1606 return 0;
1607
1608 mutex_lock(&bpf_module_mutex);
1609
1610 switch (op) {
1611 case MODULE_STATE_COMING:
1612 btm = kzalloc(sizeof(*btm), GFP_KERNEL);
1613 if (btm) {
1614 btm->module = module;
1615 list_add(&btm->list, &bpf_trace_modules);
1616 }
1617 break;
1618 case MODULE_STATE_GOING:
1619 list_for_each_entry_safe(btm, tmp, &bpf_trace_modules, list) {
1620 if (btm->module == module) {
1621 list_del(&btm->list);
1622 kfree(btm);
1623 break;
1624 }
1625 }
1626 break;
1627 }
1628
1629 mutex_unlock(&bpf_module_mutex);
1630
1631 return 0;
1632}
1633
1634static struct notifier_block bpf_module_nb = {
1635 .notifier_call = bpf_event_notify,
1636};
1637
390e99cf 1638static int __init bpf_event_init(void)
a38d1107
MM
1639{
1640 register_module_notifier(&bpf_module_nb);
1641 return 0;
1642}
1643
1644fs_initcall(bpf_event_init);
1645#endif /* CONFIG_MODULES */