bpf: simplify xdp_convert_ctx_access for xdp_rxq_info
[linux-block.git] / kernel / trace / bpf_trace.c
CommitLineData
2541517c 1/* Copyright (c) 2011-2015 PLUMgrid, http://plumgrid.com
0515e599 2 * Copyright (c) 2016 Facebook
2541517c
AS
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public
6 * License as published by the Free Software Foundation.
7 */
8#include <linux/kernel.h>
9#include <linux/types.h>
10#include <linux/slab.h>
11#include <linux/bpf.h>
0515e599 12#include <linux/bpf_perf_event.h>
2541517c
AS
13#include <linux/filter.h>
14#include <linux/uaccess.h>
9c959c86 15#include <linux/ctype.h>
9802d865
JB
16#include <linux/kprobes.h>
17#include <asm/kprobes.h>
18
19#include "trace_probe.h"
2541517c
AS
20#include "trace.h"
21
035226b9
GB
22u64 bpf_get_stackid(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
23
2541517c
AS
24/**
25 * trace_call_bpf - invoke BPF program
e87c6bc3 26 * @call: tracepoint event
2541517c
AS
27 * @ctx: opaque context pointer
28 *
29 * kprobe handlers execute BPF programs via this helper.
30 * Can be used from static tracepoints in the future.
31 *
32 * Return: BPF programs always return an integer which is interpreted by
33 * kprobe handler as:
34 * 0 - return from kprobe (event is filtered out)
35 * 1 - store kprobe event into ring buffer
36 * Other values are reserved and currently alias to 1
37 */
e87c6bc3 38unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx)
2541517c
AS
39{
40 unsigned int ret;
41
42 if (in_nmi()) /* not supported yet */
43 return 1;
44
45 preempt_disable();
46
47 if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1)) {
48 /*
49 * since some bpf program is already running on this cpu,
50 * don't call into another bpf program (same or different)
51 * and don't send kprobe event into ring-buffer,
52 * so return zero here
53 */
54 ret = 0;
55 goto out;
56 }
57
e87c6bc3
YS
58 /*
59 * Instead of moving rcu_read_lock/rcu_dereference/rcu_read_unlock
60 * to all call sites, we did a bpf_prog_array_valid() there to check
61 * whether call->prog_array is empty or not, which is
62 * a heurisitc to speed up execution.
63 *
64 * If bpf_prog_array_valid() fetched prog_array was
65 * non-NULL, we go into trace_call_bpf() and do the actual
66 * proper rcu_dereference() under RCU lock.
67 * If it turns out that prog_array is NULL then, we bail out.
68 * For the opposite, if the bpf_prog_array_valid() fetched pointer
69 * was NULL, you'll skip the prog_array with the risk of missing
70 * out of events when it was updated in between this and the
71 * rcu_dereference() which is accepted risk.
72 */
73 ret = BPF_PROG_RUN_ARRAY_CHECK(call->prog_array, ctx, BPF_PROG_RUN);
2541517c
AS
74
75 out:
76 __this_cpu_dec(bpf_prog_active);
77 preempt_enable();
78
79 return ret;
80}
81EXPORT_SYMBOL_GPL(trace_call_bpf);
82
9802d865
JB
83#ifdef CONFIG_BPF_KPROBE_OVERRIDE
84BPF_CALL_2(bpf_override_return, struct pt_regs *, regs, unsigned long, rc)
85{
86 __this_cpu_write(bpf_kprobe_override, 1);
87 regs_set_return_value(regs, rc);
88 arch_ftrace_kprobe_override_function(regs);
89 return 0;
90}
91
92static const struct bpf_func_proto bpf_override_return_proto = {
93 .func = bpf_override_return,
94 .gpl_only = true,
95 .ret_type = RET_INTEGER,
96 .arg1_type = ARG_PTR_TO_CTX,
97 .arg2_type = ARG_ANYTHING,
98};
99#endif
100
f3694e00 101BPF_CALL_3(bpf_probe_read, void *, dst, u32, size, const void *, unsafe_ptr)
2541517c 102{
eb33f2cc 103 int ret;
2541517c 104
074f528e
DB
105 ret = probe_kernel_read(dst, unsafe_ptr, size);
106 if (unlikely(ret < 0))
107 memset(dst, 0, size);
108
109 return ret;
2541517c
AS
110}
111
112static const struct bpf_func_proto bpf_probe_read_proto = {
113 .func = bpf_probe_read,
114 .gpl_only = true,
115 .ret_type = RET_INTEGER,
39f19ebb 116 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
9c019e2b 117 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
2541517c
AS
118 .arg3_type = ARG_ANYTHING,
119};
120
f3694e00
DB
121BPF_CALL_3(bpf_probe_write_user, void *, unsafe_ptr, const void *, src,
122 u32, size)
96ae5227 123{
96ae5227
SD
124 /*
125 * Ensure we're in user context which is safe for the helper to
126 * run. This helper has no business in a kthread.
127 *
128 * access_ok() should prevent writing to non-user memory, but in
129 * some situations (nommu, temporary switch, etc) access_ok() does
130 * not provide enough validation, hence the check on KERNEL_DS.
131 */
132
133 if (unlikely(in_interrupt() ||
134 current->flags & (PF_KTHREAD | PF_EXITING)))
135 return -EPERM;
db68ce10 136 if (unlikely(uaccess_kernel()))
96ae5227
SD
137 return -EPERM;
138 if (!access_ok(VERIFY_WRITE, unsafe_ptr, size))
139 return -EPERM;
140
141 return probe_kernel_write(unsafe_ptr, src, size);
142}
143
144static const struct bpf_func_proto bpf_probe_write_user_proto = {
145 .func = bpf_probe_write_user,
146 .gpl_only = true,
147 .ret_type = RET_INTEGER,
148 .arg1_type = ARG_ANYTHING,
39f19ebb
AS
149 .arg2_type = ARG_PTR_TO_MEM,
150 .arg3_type = ARG_CONST_SIZE,
96ae5227
SD
151};
152
153static const struct bpf_func_proto *bpf_get_probe_write_proto(void)
154{
155 pr_warn_ratelimited("%s[%d] is installing a program with bpf_probe_write_user helper that may corrupt user memory!",
156 current->comm, task_pid_nr(current));
157
158 return &bpf_probe_write_user_proto;
159}
160
9c959c86 161/*
7bda4b40
JF
162 * Only limited trace_printk() conversion specifiers allowed:
163 * %d %i %u %x %ld %li %lu %lx %lld %lli %llu %llx %p %s
9c959c86 164 */
f3694e00
DB
165BPF_CALL_5(bpf_trace_printk, char *, fmt, u32, fmt_size, u64, arg1,
166 u64, arg2, u64, arg3)
9c959c86 167{
8d3b7dce 168 bool str_seen = false;
9c959c86
AS
169 int mod[3] = {};
170 int fmt_cnt = 0;
8d3b7dce
AS
171 u64 unsafe_addr;
172 char buf[64];
9c959c86
AS
173 int i;
174
175 /*
176 * bpf_check()->check_func_arg()->check_stack_boundary()
177 * guarantees that fmt points to bpf program stack,
178 * fmt_size bytes of it were initialized and fmt_size > 0
179 */
180 if (fmt[--fmt_size] != 0)
181 return -EINVAL;
182
183 /* check format string for allowed specifiers */
184 for (i = 0; i < fmt_size; i++) {
185 if ((!isprint(fmt[i]) && !isspace(fmt[i])) || !isascii(fmt[i]))
186 return -EINVAL;
187
188 if (fmt[i] != '%')
189 continue;
190
191 if (fmt_cnt >= 3)
192 return -EINVAL;
193
194 /* fmt[i] != 0 && fmt[last] == 0, so we can access fmt[i + 1] */
195 i++;
196 if (fmt[i] == 'l') {
197 mod[fmt_cnt]++;
198 i++;
8d3b7dce 199 } else if (fmt[i] == 'p' || fmt[i] == 's') {
9c959c86
AS
200 mod[fmt_cnt]++;
201 i++;
202 if (!isspace(fmt[i]) && !ispunct(fmt[i]) && fmt[i] != 0)
203 return -EINVAL;
204 fmt_cnt++;
8d3b7dce
AS
205 if (fmt[i - 1] == 's') {
206 if (str_seen)
207 /* allow only one '%s' per fmt string */
208 return -EINVAL;
209 str_seen = true;
210
211 switch (fmt_cnt) {
212 case 1:
f3694e00
DB
213 unsafe_addr = arg1;
214 arg1 = (long) buf;
8d3b7dce
AS
215 break;
216 case 2:
f3694e00
DB
217 unsafe_addr = arg2;
218 arg2 = (long) buf;
8d3b7dce
AS
219 break;
220 case 3:
f3694e00
DB
221 unsafe_addr = arg3;
222 arg3 = (long) buf;
8d3b7dce
AS
223 break;
224 }
225 buf[0] = 0;
226 strncpy_from_unsafe(buf,
227 (void *) (long) unsafe_addr,
228 sizeof(buf));
229 }
9c959c86
AS
230 continue;
231 }
232
233 if (fmt[i] == 'l') {
234 mod[fmt_cnt]++;
235 i++;
236 }
237
7bda4b40
JF
238 if (fmt[i] != 'i' && fmt[i] != 'd' &&
239 fmt[i] != 'u' && fmt[i] != 'x')
9c959c86
AS
240 return -EINVAL;
241 fmt_cnt++;
242 }
243
88a5c690
DB
244/* Horrid workaround for getting va_list handling working with different
245 * argument type combinations generically for 32 and 64 bit archs.
246 */
247#define __BPF_TP_EMIT() __BPF_ARG3_TP()
248#define __BPF_TP(...) \
249 __trace_printk(1 /* Fake ip will not be printed. */, \
250 fmt, ##__VA_ARGS__)
251
252#define __BPF_ARG1_TP(...) \
253 ((mod[0] == 2 || (mod[0] == 1 && __BITS_PER_LONG == 64)) \
254 ? __BPF_TP(arg1, ##__VA_ARGS__) \
255 : ((mod[0] == 1 || (mod[0] == 0 && __BITS_PER_LONG == 32)) \
256 ? __BPF_TP((long)arg1, ##__VA_ARGS__) \
257 : __BPF_TP((u32)arg1, ##__VA_ARGS__)))
258
259#define __BPF_ARG2_TP(...) \
260 ((mod[1] == 2 || (mod[1] == 1 && __BITS_PER_LONG == 64)) \
261 ? __BPF_ARG1_TP(arg2, ##__VA_ARGS__) \
262 : ((mod[1] == 1 || (mod[1] == 0 && __BITS_PER_LONG == 32)) \
263 ? __BPF_ARG1_TP((long)arg2, ##__VA_ARGS__) \
264 : __BPF_ARG1_TP((u32)arg2, ##__VA_ARGS__)))
265
266#define __BPF_ARG3_TP(...) \
267 ((mod[2] == 2 || (mod[2] == 1 && __BITS_PER_LONG == 64)) \
268 ? __BPF_ARG2_TP(arg3, ##__VA_ARGS__) \
269 : ((mod[2] == 1 || (mod[2] == 0 && __BITS_PER_LONG == 32)) \
270 ? __BPF_ARG2_TP((long)arg3, ##__VA_ARGS__) \
271 : __BPF_ARG2_TP((u32)arg3, ##__VA_ARGS__)))
272
273 return __BPF_TP_EMIT();
9c959c86
AS
274}
275
276static const struct bpf_func_proto bpf_trace_printk_proto = {
277 .func = bpf_trace_printk,
278 .gpl_only = true,
279 .ret_type = RET_INTEGER,
39f19ebb
AS
280 .arg1_type = ARG_PTR_TO_MEM,
281 .arg2_type = ARG_CONST_SIZE,
9c959c86
AS
282};
283
0756ea3e
AS
284const struct bpf_func_proto *bpf_get_trace_printk_proto(void)
285{
286 /*
287 * this program might be calling bpf_trace_printk,
288 * so allocate per-cpu printk buffers
289 */
290 trace_printk_init_buffers();
291
292 return &bpf_trace_printk_proto;
293}
294
908432ca
YS
295static __always_inline int
296get_map_perf_counter(struct bpf_map *map, u64 flags,
297 u64 *value, u64 *enabled, u64 *running)
35578d79 298{
35578d79 299 struct bpf_array *array = container_of(map, struct bpf_array, map);
6816a7ff
DB
300 unsigned int cpu = smp_processor_id();
301 u64 index = flags & BPF_F_INDEX_MASK;
3b1efb19 302 struct bpf_event_entry *ee;
35578d79 303
6816a7ff
DB
304 if (unlikely(flags & ~(BPF_F_INDEX_MASK)))
305 return -EINVAL;
306 if (index == BPF_F_CURRENT_CPU)
307 index = cpu;
35578d79
KX
308 if (unlikely(index >= array->map.max_entries))
309 return -E2BIG;
310
3b1efb19 311 ee = READ_ONCE(array->ptrs[index]);
1ca1cc98 312 if (!ee)
35578d79
KX
313 return -ENOENT;
314
908432ca
YS
315 return perf_event_read_local(ee->event, value, enabled, running);
316}
317
318BPF_CALL_2(bpf_perf_event_read, struct bpf_map *, map, u64, flags)
319{
320 u64 value = 0;
321 int err;
322
323 err = get_map_perf_counter(map, flags, &value, NULL, NULL);
35578d79 324 /*
f91840a3
AS
325 * this api is ugly since we miss [-22..-2] range of valid
326 * counter values, but that's uapi
35578d79 327 */
f91840a3
AS
328 if (err)
329 return err;
330 return value;
35578d79
KX
331}
332
62544ce8 333static const struct bpf_func_proto bpf_perf_event_read_proto = {
35578d79 334 .func = bpf_perf_event_read,
1075ef59 335 .gpl_only = true,
35578d79
KX
336 .ret_type = RET_INTEGER,
337 .arg1_type = ARG_CONST_MAP_PTR,
338 .arg2_type = ARG_ANYTHING,
339};
340
908432ca
YS
341BPF_CALL_4(bpf_perf_event_read_value, struct bpf_map *, map, u64, flags,
342 struct bpf_perf_event_value *, buf, u32, size)
343{
344 int err = -EINVAL;
345
346 if (unlikely(size != sizeof(struct bpf_perf_event_value)))
347 goto clear;
348 err = get_map_perf_counter(map, flags, &buf->counter, &buf->enabled,
349 &buf->running);
350 if (unlikely(err))
351 goto clear;
352 return 0;
353clear:
354 memset(buf, 0, size);
355 return err;
356}
357
358static const struct bpf_func_proto bpf_perf_event_read_value_proto = {
359 .func = bpf_perf_event_read_value,
360 .gpl_only = true,
361 .ret_type = RET_INTEGER,
362 .arg1_type = ARG_CONST_MAP_PTR,
363 .arg2_type = ARG_ANYTHING,
364 .arg3_type = ARG_PTR_TO_UNINIT_MEM,
365 .arg4_type = ARG_CONST_SIZE,
366};
367
283ca526 368static DEFINE_PER_CPU(struct perf_sample_data, bpf_trace_sd);
20b9d7ac 369
8e7a3920
DB
370static __always_inline u64
371__bpf_perf_event_output(struct pt_regs *regs, struct bpf_map *map,
283ca526 372 u64 flags, struct perf_sample_data *sd)
a43eec30 373{
a43eec30 374 struct bpf_array *array = container_of(map, struct bpf_array, map);
d7931330 375 unsigned int cpu = smp_processor_id();
1e33759c 376 u64 index = flags & BPF_F_INDEX_MASK;
3b1efb19 377 struct bpf_event_entry *ee;
a43eec30 378 struct perf_event *event;
a43eec30 379
1e33759c 380 if (index == BPF_F_CURRENT_CPU)
d7931330 381 index = cpu;
a43eec30
AS
382 if (unlikely(index >= array->map.max_entries))
383 return -E2BIG;
384
3b1efb19 385 ee = READ_ONCE(array->ptrs[index]);
1ca1cc98 386 if (!ee)
a43eec30
AS
387 return -ENOENT;
388
3b1efb19 389 event = ee->event;
a43eec30
AS
390 if (unlikely(event->attr.type != PERF_TYPE_SOFTWARE ||
391 event->attr.config != PERF_COUNT_SW_BPF_OUTPUT))
392 return -EINVAL;
393
d7931330 394 if (unlikely(event->oncpu != cpu))
a43eec30
AS
395 return -EOPNOTSUPP;
396
20b9d7ac 397 perf_event_output(event, sd, regs);
a43eec30
AS
398 return 0;
399}
400
f3694e00
DB
401BPF_CALL_5(bpf_perf_event_output, struct pt_regs *, regs, struct bpf_map *, map,
402 u64, flags, void *, data, u64, size)
8e7a3920 403{
283ca526 404 struct perf_sample_data *sd = this_cpu_ptr(&bpf_trace_sd);
8e7a3920
DB
405 struct perf_raw_record raw = {
406 .frag = {
407 .size = size,
408 .data = data,
409 },
410 };
411
412 if (unlikely(flags & ~(BPF_F_INDEX_MASK)))
413 return -EINVAL;
414
283ca526
DB
415 perf_sample_data_init(sd, 0, 0);
416 sd->raw = &raw;
417
418 return __bpf_perf_event_output(regs, map, flags, sd);
8e7a3920
DB
419}
420
a43eec30
AS
421static const struct bpf_func_proto bpf_perf_event_output_proto = {
422 .func = bpf_perf_event_output,
1075ef59 423 .gpl_only = true,
a43eec30
AS
424 .ret_type = RET_INTEGER,
425 .arg1_type = ARG_PTR_TO_CTX,
426 .arg2_type = ARG_CONST_MAP_PTR,
427 .arg3_type = ARG_ANYTHING,
39f19ebb 428 .arg4_type = ARG_PTR_TO_MEM,
a60dd35d 429 .arg5_type = ARG_CONST_SIZE_OR_ZERO,
a43eec30
AS
430};
431
bd570ff9 432static DEFINE_PER_CPU(struct pt_regs, bpf_pt_regs);
283ca526 433static DEFINE_PER_CPU(struct perf_sample_data, bpf_misc_sd);
bd570ff9 434
555c8a86
DB
435u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
436 void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy)
bd570ff9 437{
283ca526 438 struct perf_sample_data *sd = this_cpu_ptr(&bpf_misc_sd);
bd570ff9 439 struct pt_regs *regs = this_cpu_ptr(&bpf_pt_regs);
555c8a86
DB
440 struct perf_raw_frag frag = {
441 .copy = ctx_copy,
442 .size = ctx_size,
443 .data = ctx,
444 };
445 struct perf_raw_record raw = {
446 .frag = {
183fc153
AM
447 {
448 .next = ctx_size ? &frag : NULL,
449 },
555c8a86
DB
450 .size = meta_size,
451 .data = meta,
452 },
453 };
bd570ff9
DB
454
455 perf_fetch_caller_regs(regs);
283ca526
DB
456 perf_sample_data_init(sd, 0, 0);
457 sd->raw = &raw;
bd570ff9 458
283ca526 459 return __bpf_perf_event_output(regs, map, flags, sd);
bd570ff9
DB
460}
461
f3694e00 462BPF_CALL_0(bpf_get_current_task)
606274c5
AS
463{
464 return (long) current;
465}
466
467static const struct bpf_func_proto bpf_get_current_task_proto = {
468 .func = bpf_get_current_task,
469 .gpl_only = true,
470 .ret_type = RET_INTEGER,
471};
472
f3694e00 473BPF_CALL_2(bpf_current_task_under_cgroup, struct bpf_map *, map, u32, idx)
60d20f91 474{
60d20f91
SD
475 struct bpf_array *array = container_of(map, struct bpf_array, map);
476 struct cgroup *cgrp;
60d20f91
SD
477
478 if (unlikely(in_interrupt()))
479 return -EINVAL;
60d20f91
SD
480 if (unlikely(idx >= array->map.max_entries))
481 return -E2BIG;
482
483 cgrp = READ_ONCE(array->ptrs[idx]);
484 if (unlikely(!cgrp))
485 return -EAGAIN;
486
487 return task_under_cgroup_hierarchy(current, cgrp);
488}
489
490static const struct bpf_func_proto bpf_current_task_under_cgroup_proto = {
491 .func = bpf_current_task_under_cgroup,
492 .gpl_only = false,
493 .ret_type = RET_INTEGER,
494 .arg1_type = ARG_CONST_MAP_PTR,
495 .arg2_type = ARG_ANYTHING,
496};
497
a5e8c070
GB
498BPF_CALL_3(bpf_probe_read_str, void *, dst, u32, size,
499 const void *, unsafe_ptr)
500{
501 int ret;
502
503 /*
504 * The strncpy_from_unsafe() call will likely not fill the entire
505 * buffer, but that's okay in this circumstance as we're probing
506 * arbitrary memory anyway similar to bpf_probe_read() and might
507 * as well probe the stack. Thus, memory is explicitly cleared
508 * only in error case, so that improper users ignoring return
509 * code altogether don't copy garbage; otherwise length of string
510 * is returned that can be used for bpf_perf_event_output() et al.
511 */
512 ret = strncpy_from_unsafe(dst, unsafe_ptr, size);
513 if (unlikely(ret < 0))
514 memset(dst, 0, size);
515
516 return ret;
517}
518
519static const struct bpf_func_proto bpf_probe_read_str_proto = {
520 .func = bpf_probe_read_str,
521 .gpl_only = true,
522 .ret_type = RET_INTEGER,
523 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
5c4e1201 524 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
a5e8c070
GB
525 .arg3_type = ARG_ANYTHING,
526};
527
9fd82b61 528static const struct bpf_func_proto *tracing_func_proto(enum bpf_func_id func_id)
2541517c
AS
529{
530 switch (func_id) {
531 case BPF_FUNC_map_lookup_elem:
532 return &bpf_map_lookup_elem_proto;
533 case BPF_FUNC_map_update_elem:
534 return &bpf_map_update_elem_proto;
535 case BPF_FUNC_map_delete_elem:
536 return &bpf_map_delete_elem_proto;
537 case BPF_FUNC_probe_read:
538 return &bpf_probe_read_proto;
d9847d31
AS
539 case BPF_FUNC_ktime_get_ns:
540 return &bpf_ktime_get_ns_proto;
04fd61ab
AS
541 case BPF_FUNC_tail_call:
542 return &bpf_tail_call_proto;
ffeedafb
AS
543 case BPF_FUNC_get_current_pid_tgid:
544 return &bpf_get_current_pid_tgid_proto;
606274c5
AS
545 case BPF_FUNC_get_current_task:
546 return &bpf_get_current_task_proto;
ffeedafb
AS
547 case BPF_FUNC_get_current_uid_gid:
548 return &bpf_get_current_uid_gid_proto;
549 case BPF_FUNC_get_current_comm:
550 return &bpf_get_current_comm_proto;
9c959c86 551 case BPF_FUNC_trace_printk:
0756ea3e 552 return bpf_get_trace_printk_proto();
ab1973d3
AS
553 case BPF_FUNC_get_smp_processor_id:
554 return &bpf_get_smp_processor_id_proto;
2d0e30c3
DB
555 case BPF_FUNC_get_numa_node_id:
556 return &bpf_get_numa_node_id_proto;
35578d79
KX
557 case BPF_FUNC_perf_event_read:
558 return &bpf_perf_event_read_proto;
96ae5227
SD
559 case BPF_FUNC_probe_write_user:
560 return bpf_get_probe_write_proto();
60d20f91
SD
561 case BPF_FUNC_current_task_under_cgroup:
562 return &bpf_current_task_under_cgroup_proto;
8937bd80
AS
563 case BPF_FUNC_get_prandom_u32:
564 return &bpf_get_prandom_u32_proto;
a5e8c070
GB
565 case BPF_FUNC_probe_read_str:
566 return &bpf_probe_read_str_proto;
9fd82b61
AS
567 default:
568 return NULL;
569 }
570}
571
572static const struct bpf_func_proto *kprobe_prog_func_proto(enum bpf_func_id func_id)
573{
574 switch (func_id) {
a43eec30
AS
575 case BPF_FUNC_perf_event_output:
576 return &bpf_perf_event_output_proto;
d5a3b1f6
AS
577 case BPF_FUNC_get_stackid:
578 return &bpf_get_stackid_proto;
908432ca
YS
579 case BPF_FUNC_perf_event_read_value:
580 return &bpf_perf_event_read_value_proto;
9802d865
JB
581#ifdef CONFIG_BPF_KPROBE_OVERRIDE
582 case BPF_FUNC_override_return:
583 return &bpf_override_return_proto;
584#endif
2541517c 585 default:
9fd82b61 586 return tracing_func_proto(func_id);
2541517c
AS
587 }
588}
589
590/* bpf+kprobe programs can access fields of 'struct pt_regs' */
19de99f7 591static bool kprobe_prog_is_valid_access(int off, int size, enum bpf_access_type type,
23994631 592 struct bpf_insn_access_aux *info)
2541517c 593{
2541517c
AS
594 if (off < 0 || off >= sizeof(struct pt_regs))
595 return false;
2541517c
AS
596 if (type != BPF_READ)
597 return false;
2541517c
AS
598 if (off % size != 0)
599 return false;
2d071c64
DB
600 /*
601 * Assertion for 32 bit to make sure last 8 byte access
602 * (BPF_DW) to the last 4 byte member is disallowed.
603 */
604 if (off + size > sizeof(struct pt_regs))
605 return false;
606
2541517c
AS
607 return true;
608}
609
7de16e3a 610const struct bpf_verifier_ops kprobe_verifier_ops = {
2541517c
AS
611 .get_func_proto = kprobe_prog_func_proto,
612 .is_valid_access = kprobe_prog_is_valid_access,
613};
614
7de16e3a
JK
615const struct bpf_prog_ops kprobe_prog_ops = {
616};
617
f3694e00
DB
618BPF_CALL_5(bpf_perf_event_output_tp, void *, tp_buff, struct bpf_map *, map,
619 u64, flags, void *, data, u64, size)
9940d67c 620{
f3694e00
DB
621 struct pt_regs *regs = *(struct pt_regs **)tp_buff;
622
9940d67c
AS
623 /*
624 * r1 points to perf tracepoint buffer where first 8 bytes are hidden
625 * from bpf program and contain a pointer to 'struct pt_regs'. Fetch it
f3694e00 626 * from there and call the same bpf_perf_event_output() helper inline.
9940d67c 627 */
f3694e00 628 return ____bpf_perf_event_output(regs, map, flags, data, size);
9940d67c
AS
629}
630
631static const struct bpf_func_proto bpf_perf_event_output_proto_tp = {
632 .func = bpf_perf_event_output_tp,
633 .gpl_only = true,
634 .ret_type = RET_INTEGER,
635 .arg1_type = ARG_PTR_TO_CTX,
636 .arg2_type = ARG_CONST_MAP_PTR,
637 .arg3_type = ARG_ANYTHING,
39f19ebb 638 .arg4_type = ARG_PTR_TO_MEM,
a60dd35d 639 .arg5_type = ARG_CONST_SIZE_OR_ZERO,
9940d67c
AS
640};
641
f3694e00
DB
642BPF_CALL_3(bpf_get_stackid_tp, void *, tp_buff, struct bpf_map *, map,
643 u64, flags)
9940d67c 644{
f3694e00 645 struct pt_regs *regs = *(struct pt_regs **)tp_buff;
9940d67c 646
f3694e00
DB
647 /*
648 * Same comment as in bpf_perf_event_output_tp(), only that this time
649 * the other helper's function body cannot be inlined due to being
650 * external, thus we need to call raw helper function.
651 */
652 return bpf_get_stackid((unsigned long) regs, (unsigned long) map,
653 flags, 0, 0);
9940d67c
AS
654}
655
656static const struct bpf_func_proto bpf_get_stackid_proto_tp = {
657 .func = bpf_get_stackid_tp,
658 .gpl_only = true,
659 .ret_type = RET_INTEGER,
660 .arg1_type = ARG_PTR_TO_CTX,
661 .arg2_type = ARG_CONST_MAP_PTR,
662 .arg3_type = ARG_ANYTHING,
663};
664
4bebdc7a
YS
665BPF_CALL_3(bpf_perf_prog_read_value_tp, struct bpf_perf_event_data_kern *, ctx,
666 struct bpf_perf_event_value *, buf, u32, size)
667{
668 int err = -EINVAL;
669
670 if (unlikely(size != sizeof(struct bpf_perf_event_value)))
671 goto clear;
672 err = perf_event_read_local(ctx->event, &buf->counter, &buf->enabled,
673 &buf->running);
674 if (unlikely(err))
675 goto clear;
676 return 0;
677clear:
678 memset(buf, 0, size);
679 return err;
680}
681
682static const struct bpf_func_proto bpf_perf_prog_read_value_proto_tp = {
683 .func = bpf_perf_prog_read_value_tp,
684 .gpl_only = true,
685 .ret_type = RET_INTEGER,
686 .arg1_type = ARG_PTR_TO_CTX,
687 .arg2_type = ARG_PTR_TO_UNINIT_MEM,
688 .arg3_type = ARG_CONST_SIZE,
689};
690
9fd82b61
AS
691static const struct bpf_func_proto *tp_prog_func_proto(enum bpf_func_id func_id)
692{
693 switch (func_id) {
694 case BPF_FUNC_perf_event_output:
9940d67c 695 return &bpf_perf_event_output_proto_tp;
9fd82b61 696 case BPF_FUNC_get_stackid:
9940d67c 697 return &bpf_get_stackid_proto_tp;
4bebdc7a
YS
698 case BPF_FUNC_perf_prog_read_value:
699 return &bpf_perf_prog_read_value_proto_tp;
9fd82b61
AS
700 default:
701 return tracing_func_proto(func_id);
702 }
703}
704
19de99f7 705static bool tp_prog_is_valid_access(int off, int size, enum bpf_access_type type,
23994631 706 struct bpf_insn_access_aux *info)
9fd82b61
AS
707{
708 if (off < sizeof(void *) || off >= PERF_MAX_TRACE_SIZE)
709 return false;
710 if (type != BPF_READ)
711 return false;
712 if (off % size != 0)
713 return false;
2d071c64
DB
714
715 BUILD_BUG_ON(PERF_MAX_TRACE_SIZE % sizeof(__u64));
9fd82b61
AS
716 return true;
717}
718
7de16e3a 719const struct bpf_verifier_ops tracepoint_verifier_ops = {
9fd82b61
AS
720 .get_func_proto = tp_prog_func_proto,
721 .is_valid_access = tp_prog_is_valid_access,
722};
723
7de16e3a
JK
724const struct bpf_prog_ops tracepoint_prog_ops = {
725};
726
0515e599 727static bool pe_prog_is_valid_access(int off, int size, enum bpf_access_type type,
23994631 728 struct bpf_insn_access_aux *info)
0515e599 729{
f96da094
DB
730 const int size_sp = FIELD_SIZEOF(struct bpf_perf_event_data,
731 sample_period);
31fd8581 732
0515e599
AS
733 if (off < 0 || off >= sizeof(struct bpf_perf_event_data))
734 return false;
735 if (type != BPF_READ)
736 return false;
737 if (off % size != 0)
738 return false;
31fd8581 739
f96da094
DB
740 switch (off) {
741 case bpf_ctx_range(struct bpf_perf_event_data, sample_period):
742 bpf_ctx_record_field_size(info, size_sp);
743 if (!bpf_ctx_narrow_access_ok(off, size, size_sp))
23994631 744 return false;
f96da094
DB
745 break;
746 default:
0515e599
AS
747 if (size != sizeof(long))
748 return false;
749 }
f96da094 750
0515e599
AS
751 return true;
752}
753
6b8cc1d1
DB
754static u32 pe_prog_convert_ctx_access(enum bpf_access_type type,
755 const struct bpf_insn *si,
0515e599 756 struct bpf_insn *insn_buf,
f96da094 757 struct bpf_prog *prog, u32 *target_size)
0515e599
AS
758{
759 struct bpf_insn *insn = insn_buf;
760
6b8cc1d1 761 switch (si->off) {
0515e599 762 case offsetof(struct bpf_perf_event_data, sample_period):
f035a515 763 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern,
6b8cc1d1 764 data), si->dst_reg, si->src_reg,
0515e599 765 offsetof(struct bpf_perf_event_data_kern, data));
6b8cc1d1 766 *insn++ = BPF_LDX_MEM(BPF_DW, si->dst_reg, si->dst_reg,
f96da094
DB
767 bpf_target_off(struct perf_sample_data, period, 8,
768 target_size));
0515e599
AS
769 break;
770 default:
f035a515 771 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern,
6b8cc1d1 772 regs), si->dst_reg, si->src_reg,
0515e599 773 offsetof(struct bpf_perf_event_data_kern, regs));
6b8cc1d1
DB
774 *insn++ = BPF_LDX_MEM(BPF_SIZEOF(long), si->dst_reg, si->dst_reg,
775 si->off);
0515e599
AS
776 break;
777 }
778
779 return insn - insn_buf;
780}
781
7de16e3a 782const struct bpf_verifier_ops perf_event_verifier_ops = {
0515e599
AS
783 .get_func_proto = tp_prog_func_proto,
784 .is_valid_access = pe_prog_is_valid_access,
785 .convert_ctx_access = pe_prog_convert_ctx_access,
786};
7de16e3a
JK
787
788const struct bpf_prog_ops perf_event_prog_ops = {
789};
e87c6bc3
YS
790
791static DEFINE_MUTEX(bpf_event_mutex);
792
c8c088ba
YS
793#define BPF_TRACE_MAX_PROGS 64
794
e87c6bc3
YS
795int perf_event_attach_bpf_prog(struct perf_event *event,
796 struct bpf_prog *prog)
797{
798 struct bpf_prog_array __rcu *old_array;
799 struct bpf_prog_array *new_array;
800 int ret = -EEXIST;
801
9802d865
JB
802 /*
803 * Kprobe override only works for ftrace based kprobes, and only if they
804 * are on the opt-in list.
805 */
806 if (prog->kprobe_override &&
807 (!trace_kprobe_ftrace(event->tp_event) ||
808 !trace_kprobe_error_injectable(event->tp_event)))
809 return -EINVAL;
810
e87c6bc3
YS
811 mutex_lock(&bpf_event_mutex);
812
813 if (event->prog)
07c41a29 814 goto unlock;
e87c6bc3 815
07c41a29 816 old_array = event->tp_event->prog_array;
c8c088ba
YS
817 if (old_array &&
818 bpf_prog_array_length(old_array) >= BPF_TRACE_MAX_PROGS) {
819 ret = -E2BIG;
820 goto unlock;
821 }
822
e87c6bc3
YS
823 ret = bpf_prog_array_copy(old_array, NULL, prog, &new_array);
824 if (ret < 0)
07c41a29 825 goto unlock;
e87c6bc3
YS
826
827 /* set the new array to event->tp_event and set event->prog */
828 event->prog = prog;
829 rcu_assign_pointer(event->tp_event->prog_array, new_array);
830 bpf_prog_array_free(old_array);
831
07c41a29 832unlock:
e87c6bc3
YS
833 mutex_unlock(&bpf_event_mutex);
834 return ret;
835}
836
837void perf_event_detach_bpf_prog(struct perf_event *event)
838{
839 struct bpf_prog_array __rcu *old_array;
840 struct bpf_prog_array *new_array;
841 int ret;
842
843 mutex_lock(&bpf_event_mutex);
844
845 if (!event->prog)
07c41a29 846 goto unlock;
e87c6bc3 847
07c41a29 848 old_array = event->tp_event->prog_array;
e87c6bc3
YS
849 ret = bpf_prog_array_copy(old_array, event->prog, NULL, &new_array);
850 if (ret < 0) {
851 bpf_prog_array_delete_safe(old_array, event->prog);
852 } else {
853 rcu_assign_pointer(event->tp_event->prog_array, new_array);
854 bpf_prog_array_free(old_array);
855 }
856
857 bpf_prog_put(event->prog);
858 event->prog = NULL;
859
07c41a29 860unlock:
e87c6bc3
YS
861 mutex_unlock(&bpf_event_mutex);
862}
f371b304 863
f4e2298e 864int perf_event_query_prog_array(struct perf_event *event, void __user *info)
f371b304
YS
865{
866 struct perf_event_query_bpf __user *uquery = info;
867 struct perf_event_query_bpf query = {};
868 int ret;
869
870 if (!capable(CAP_SYS_ADMIN))
871 return -EPERM;
872 if (event->attr.type != PERF_TYPE_TRACEPOINT)
873 return -EINVAL;
874 if (copy_from_user(&query, uquery, sizeof(query)))
875 return -EFAULT;
876
877 mutex_lock(&bpf_event_mutex);
878 ret = bpf_prog_array_copy_info(event->tp_event->prog_array,
879 uquery->ids,
880 query.ids_len,
881 &uquery->prog_cnt);
882 mutex_unlock(&bpf_event_mutex);
883
884 return ret;
885}