Commit | Line | Data |
---|---|---|
179a0cc4 | 1 | // SPDX-License-Identifier: GPL-2.0 |
2541517c | 2 | /* Copyright (c) 2011-2015 PLUMgrid, http://plumgrid.com |
0515e599 | 3 | * Copyright (c) 2016 Facebook |
2541517c AS |
4 | */ |
5 | #include <linux/kernel.h> | |
6 | #include <linux/types.h> | |
7 | #include <linux/slab.h> | |
8 | #include <linux/bpf.h> | |
0515e599 | 9 | #include <linux/bpf_perf_event.h> |
2541517c AS |
10 | #include <linux/filter.h> |
11 | #include <linux/uaccess.h> | |
9c959c86 | 12 | #include <linux/ctype.h> |
9802d865 | 13 | #include <linux/kprobes.h> |
41bdc4b4 | 14 | #include <linux/syscalls.h> |
540adea3 | 15 | #include <linux/error-injection.h> |
9802d865 JB |
16 | |
17 | #include "trace_probe.h" | |
2541517c AS |
18 | #include "trace.h" |
19 | ||
035226b9 | 20 | u64 bpf_get_stackid(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); |
c195651e | 21 | u64 bpf_get_stack(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); |
035226b9 | 22 | |
2541517c AS |
23 | /** |
24 | * trace_call_bpf - invoke BPF program | |
e87c6bc3 | 25 | * @call: tracepoint event |
2541517c AS |
26 | * @ctx: opaque context pointer |
27 | * | |
28 | * kprobe handlers execute BPF programs via this helper. | |
29 | * Can be used from static tracepoints in the future. | |
30 | * | |
31 | * Return: BPF programs always return an integer which is interpreted by | |
32 | * kprobe handler as: | |
33 | * 0 - return from kprobe (event is filtered out) | |
34 | * 1 - store kprobe event into ring buffer | |
35 | * Other values are reserved and currently alias to 1 | |
36 | */ | |
e87c6bc3 | 37 | unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx) |
2541517c AS |
38 | { |
39 | unsigned int ret; | |
40 | ||
41 | if (in_nmi()) /* not supported yet */ | |
42 | return 1; | |
43 | ||
44 | preempt_disable(); | |
45 | ||
46 | if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1)) { | |
47 | /* | |
48 | * since some bpf program is already running on this cpu, | |
49 | * don't call into another bpf program (same or different) | |
50 | * and don't send kprobe event into ring-buffer, | |
51 | * so return zero here | |
52 | */ | |
53 | ret = 0; | |
54 | goto out; | |
55 | } | |
56 | ||
e87c6bc3 YS |
57 | /* |
58 | * Instead of moving rcu_read_lock/rcu_dereference/rcu_read_unlock | |
59 | * to all call sites, we did a bpf_prog_array_valid() there to check | |
60 | * whether call->prog_array is empty or not, which is | |
61 | * a heurisitc to speed up execution. | |
62 | * | |
63 | * If bpf_prog_array_valid() fetched prog_array was | |
64 | * non-NULL, we go into trace_call_bpf() and do the actual | |
65 | * proper rcu_dereference() under RCU lock. | |
66 | * If it turns out that prog_array is NULL then, we bail out. | |
67 | * For the opposite, if the bpf_prog_array_valid() fetched pointer | |
68 | * was NULL, you'll skip the prog_array with the risk of missing | |
69 | * out of events when it was updated in between this and the | |
70 | * rcu_dereference() which is accepted risk. | |
71 | */ | |
72 | ret = BPF_PROG_RUN_ARRAY_CHECK(call->prog_array, ctx, BPF_PROG_RUN); | |
2541517c AS |
73 | |
74 | out: | |
75 | __this_cpu_dec(bpf_prog_active); | |
76 | preempt_enable(); | |
77 | ||
78 | return ret; | |
79 | } | |
80 | EXPORT_SYMBOL_GPL(trace_call_bpf); | |
81 | ||
9802d865 JB |
82 | #ifdef CONFIG_BPF_KPROBE_OVERRIDE |
83 | BPF_CALL_2(bpf_override_return, struct pt_regs *, regs, unsigned long, rc) | |
84 | { | |
9802d865 | 85 | regs_set_return_value(regs, rc); |
540adea3 | 86 | override_function_with_return(regs); |
9802d865 JB |
87 | return 0; |
88 | } | |
89 | ||
90 | static const struct bpf_func_proto bpf_override_return_proto = { | |
91 | .func = bpf_override_return, | |
92 | .gpl_only = true, | |
93 | .ret_type = RET_INTEGER, | |
94 | .arg1_type = ARG_PTR_TO_CTX, | |
95 | .arg2_type = ARG_ANYTHING, | |
96 | }; | |
97 | #endif | |
98 | ||
f3694e00 | 99 | BPF_CALL_3(bpf_probe_read, void *, dst, u32, size, const void *, unsafe_ptr) |
2541517c | 100 | { |
eb33f2cc | 101 | int ret; |
2541517c | 102 | |
074f528e DB |
103 | ret = probe_kernel_read(dst, unsafe_ptr, size); |
104 | if (unlikely(ret < 0)) | |
105 | memset(dst, 0, size); | |
106 | ||
107 | return ret; | |
2541517c AS |
108 | } |
109 | ||
110 | static const struct bpf_func_proto bpf_probe_read_proto = { | |
111 | .func = bpf_probe_read, | |
112 | .gpl_only = true, | |
113 | .ret_type = RET_INTEGER, | |
39f19ebb | 114 | .arg1_type = ARG_PTR_TO_UNINIT_MEM, |
9c019e2b | 115 | .arg2_type = ARG_CONST_SIZE_OR_ZERO, |
2541517c AS |
116 | .arg3_type = ARG_ANYTHING, |
117 | }; | |
118 | ||
f3694e00 DB |
119 | BPF_CALL_3(bpf_probe_write_user, void *, unsafe_ptr, const void *, src, |
120 | u32, size) | |
96ae5227 | 121 | { |
96ae5227 SD |
122 | /* |
123 | * Ensure we're in user context which is safe for the helper to | |
124 | * run. This helper has no business in a kthread. | |
125 | * | |
126 | * access_ok() should prevent writing to non-user memory, but in | |
127 | * some situations (nommu, temporary switch, etc) access_ok() does | |
128 | * not provide enough validation, hence the check on KERNEL_DS. | |
129 | */ | |
130 | ||
131 | if (unlikely(in_interrupt() || | |
132 | current->flags & (PF_KTHREAD | PF_EXITING))) | |
133 | return -EPERM; | |
db68ce10 | 134 | if (unlikely(uaccess_kernel())) |
96ae5227 SD |
135 | return -EPERM; |
136 | if (!access_ok(VERIFY_WRITE, unsafe_ptr, size)) | |
137 | return -EPERM; | |
138 | ||
139 | return probe_kernel_write(unsafe_ptr, src, size); | |
140 | } | |
141 | ||
142 | static const struct bpf_func_proto bpf_probe_write_user_proto = { | |
143 | .func = bpf_probe_write_user, | |
144 | .gpl_only = true, | |
145 | .ret_type = RET_INTEGER, | |
146 | .arg1_type = ARG_ANYTHING, | |
39f19ebb AS |
147 | .arg2_type = ARG_PTR_TO_MEM, |
148 | .arg3_type = ARG_CONST_SIZE, | |
96ae5227 SD |
149 | }; |
150 | ||
151 | static const struct bpf_func_proto *bpf_get_probe_write_proto(void) | |
152 | { | |
153 | pr_warn_ratelimited("%s[%d] is installing a program with bpf_probe_write_user helper that may corrupt user memory!", | |
154 | current->comm, task_pid_nr(current)); | |
155 | ||
156 | return &bpf_probe_write_user_proto; | |
157 | } | |
158 | ||
9c959c86 | 159 | /* |
7bda4b40 JF |
160 | * Only limited trace_printk() conversion specifiers allowed: |
161 | * %d %i %u %x %ld %li %lu %lx %lld %lli %llu %llx %p %s | |
9c959c86 | 162 | */ |
f3694e00 DB |
163 | BPF_CALL_5(bpf_trace_printk, char *, fmt, u32, fmt_size, u64, arg1, |
164 | u64, arg2, u64, arg3) | |
9c959c86 | 165 | { |
8d3b7dce | 166 | bool str_seen = false; |
9c959c86 AS |
167 | int mod[3] = {}; |
168 | int fmt_cnt = 0; | |
8d3b7dce AS |
169 | u64 unsafe_addr; |
170 | char buf[64]; | |
9c959c86 AS |
171 | int i; |
172 | ||
173 | /* | |
174 | * bpf_check()->check_func_arg()->check_stack_boundary() | |
175 | * guarantees that fmt points to bpf program stack, | |
176 | * fmt_size bytes of it were initialized and fmt_size > 0 | |
177 | */ | |
178 | if (fmt[--fmt_size] != 0) | |
179 | return -EINVAL; | |
180 | ||
181 | /* check format string for allowed specifiers */ | |
182 | for (i = 0; i < fmt_size; i++) { | |
183 | if ((!isprint(fmt[i]) && !isspace(fmt[i])) || !isascii(fmt[i])) | |
184 | return -EINVAL; | |
185 | ||
186 | if (fmt[i] != '%') | |
187 | continue; | |
188 | ||
189 | if (fmt_cnt >= 3) | |
190 | return -EINVAL; | |
191 | ||
192 | /* fmt[i] != 0 && fmt[last] == 0, so we can access fmt[i + 1] */ | |
193 | i++; | |
194 | if (fmt[i] == 'l') { | |
195 | mod[fmt_cnt]++; | |
196 | i++; | |
8d3b7dce | 197 | } else if (fmt[i] == 'p' || fmt[i] == 's') { |
9c959c86 AS |
198 | mod[fmt_cnt]++; |
199 | i++; | |
200 | if (!isspace(fmt[i]) && !ispunct(fmt[i]) && fmt[i] != 0) | |
201 | return -EINVAL; | |
202 | fmt_cnt++; | |
8d3b7dce AS |
203 | if (fmt[i - 1] == 's') { |
204 | if (str_seen) | |
205 | /* allow only one '%s' per fmt string */ | |
206 | return -EINVAL; | |
207 | str_seen = true; | |
208 | ||
209 | switch (fmt_cnt) { | |
210 | case 1: | |
f3694e00 DB |
211 | unsafe_addr = arg1; |
212 | arg1 = (long) buf; | |
8d3b7dce AS |
213 | break; |
214 | case 2: | |
f3694e00 DB |
215 | unsafe_addr = arg2; |
216 | arg2 = (long) buf; | |
8d3b7dce AS |
217 | break; |
218 | case 3: | |
f3694e00 DB |
219 | unsafe_addr = arg3; |
220 | arg3 = (long) buf; | |
8d3b7dce AS |
221 | break; |
222 | } | |
223 | buf[0] = 0; | |
224 | strncpy_from_unsafe(buf, | |
225 | (void *) (long) unsafe_addr, | |
226 | sizeof(buf)); | |
227 | } | |
9c959c86 AS |
228 | continue; |
229 | } | |
230 | ||
231 | if (fmt[i] == 'l') { | |
232 | mod[fmt_cnt]++; | |
233 | i++; | |
234 | } | |
235 | ||
7bda4b40 JF |
236 | if (fmt[i] != 'i' && fmt[i] != 'd' && |
237 | fmt[i] != 'u' && fmt[i] != 'x') | |
9c959c86 AS |
238 | return -EINVAL; |
239 | fmt_cnt++; | |
240 | } | |
241 | ||
88a5c690 DB |
242 | /* Horrid workaround for getting va_list handling working with different |
243 | * argument type combinations generically for 32 and 64 bit archs. | |
244 | */ | |
245 | #define __BPF_TP_EMIT() __BPF_ARG3_TP() | |
246 | #define __BPF_TP(...) \ | |
eefa864a | 247 | __trace_printk(0 /* Fake ip */, \ |
88a5c690 DB |
248 | fmt, ##__VA_ARGS__) |
249 | ||
250 | #define __BPF_ARG1_TP(...) \ | |
251 | ((mod[0] == 2 || (mod[0] == 1 && __BITS_PER_LONG == 64)) \ | |
252 | ? __BPF_TP(arg1, ##__VA_ARGS__) \ | |
253 | : ((mod[0] == 1 || (mod[0] == 0 && __BITS_PER_LONG == 32)) \ | |
254 | ? __BPF_TP((long)arg1, ##__VA_ARGS__) \ | |
255 | : __BPF_TP((u32)arg1, ##__VA_ARGS__))) | |
256 | ||
257 | #define __BPF_ARG2_TP(...) \ | |
258 | ((mod[1] == 2 || (mod[1] == 1 && __BITS_PER_LONG == 64)) \ | |
259 | ? __BPF_ARG1_TP(arg2, ##__VA_ARGS__) \ | |
260 | : ((mod[1] == 1 || (mod[1] == 0 && __BITS_PER_LONG == 32)) \ | |
261 | ? __BPF_ARG1_TP((long)arg2, ##__VA_ARGS__) \ | |
262 | : __BPF_ARG1_TP((u32)arg2, ##__VA_ARGS__))) | |
263 | ||
264 | #define __BPF_ARG3_TP(...) \ | |
265 | ((mod[2] == 2 || (mod[2] == 1 && __BITS_PER_LONG == 64)) \ | |
266 | ? __BPF_ARG2_TP(arg3, ##__VA_ARGS__) \ | |
267 | : ((mod[2] == 1 || (mod[2] == 0 && __BITS_PER_LONG == 32)) \ | |
268 | ? __BPF_ARG2_TP((long)arg3, ##__VA_ARGS__) \ | |
269 | : __BPF_ARG2_TP((u32)arg3, ##__VA_ARGS__))) | |
270 | ||
271 | return __BPF_TP_EMIT(); | |
9c959c86 AS |
272 | } |
273 | ||
274 | static const struct bpf_func_proto bpf_trace_printk_proto = { | |
275 | .func = bpf_trace_printk, | |
276 | .gpl_only = true, | |
277 | .ret_type = RET_INTEGER, | |
39f19ebb AS |
278 | .arg1_type = ARG_PTR_TO_MEM, |
279 | .arg2_type = ARG_CONST_SIZE, | |
9c959c86 AS |
280 | }; |
281 | ||
0756ea3e AS |
282 | const struct bpf_func_proto *bpf_get_trace_printk_proto(void) |
283 | { | |
284 | /* | |
285 | * this program might be calling bpf_trace_printk, | |
286 | * so allocate per-cpu printk buffers | |
287 | */ | |
288 | trace_printk_init_buffers(); | |
289 | ||
290 | return &bpf_trace_printk_proto; | |
291 | } | |
292 | ||
908432ca YS |
293 | static __always_inline int |
294 | get_map_perf_counter(struct bpf_map *map, u64 flags, | |
295 | u64 *value, u64 *enabled, u64 *running) | |
35578d79 | 296 | { |
35578d79 | 297 | struct bpf_array *array = container_of(map, struct bpf_array, map); |
6816a7ff DB |
298 | unsigned int cpu = smp_processor_id(); |
299 | u64 index = flags & BPF_F_INDEX_MASK; | |
3b1efb19 | 300 | struct bpf_event_entry *ee; |
35578d79 | 301 | |
6816a7ff DB |
302 | if (unlikely(flags & ~(BPF_F_INDEX_MASK))) |
303 | return -EINVAL; | |
304 | if (index == BPF_F_CURRENT_CPU) | |
305 | index = cpu; | |
35578d79 KX |
306 | if (unlikely(index >= array->map.max_entries)) |
307 | return -E2BIG; | |
308 | ||
3b1efb19 | 309 | ee = READ_ONCE(array->ptrs[index]); |
1ca1cc98 | 310 | if (!ee) |
35578d79 KX |
311 | return -ENOENT; |
312 | ||
908432ca YS |
313 | return perf_event_read_local(ee->event, value, enabled, running); |
314 | } | |
315 | ||
316 | BPF_CALL_2(bpf_perf_event_read, struct bpf_map *, map, u64, flags) | |
317 | { | |
318 | u64 value = 0; | |
319 | int err; | |
320 | ||
321 | err = get_map_perf_counter(map, flags, &value, NULL, NULL); | |
35578d79 | 322 | /* |
f91840a3 AS |
323 | * this api is ugly since we miss [-22..-2] range of valid |
324 | * counter values, but that's uapi | |
35578d79 | 325 | */ |
f91840a3 AS |
326 | if (err) |
327 | return err; | |
328 | return value; | |
35578d79 KX |
329 | } |
330 | ||
62544ce8 | 331 | static const struct bpf_func_proto bpf_perf_event_read_proto = { |
35578d79 | 332 | .func = bpf_perf_event_read, |
1075ef59 | 333 | .gpl_only = true, |
35578d79 KX |
334 | .ret_type = RET_INTEGER, |
335 | .arg1_type = ARG_CONST_MAP_PTR, | |
336 | .arg2_type = ARG_ANYTHING, | |
337 | }; | |
338 | ||
908432ca YS |
339 | BPF_CALL_4(bpf_perf_event_read_value, struct bpf_map *, map, u64, flags, |
340 | struct bpf_perf_event_value *, buf, u32, size) | |
341 | { | |
342 | int err = -EINVAL; | |
343 | ||
344 | if (unlikely(size != sizeof(struct bpf_perf_event_value))) | |
345 | goto clear; | |
346 | err = get_map_perf_counter(map, flags, &buf->counter, &buf->enabled, | |
347 | &buf->running); | |
348 | if (unlikely(err)) | |
349 | goto clear; | |
350 | return 0; | |
351 | clear: | |
352 | memset(buf, 0, size); | |
353 | return err; | |
354 | } | |
355 | ||
356 | static const struct bpf_func_proto bpf_perf_event_read_value_proto = { | |
357 | .func = bpf_perf_event_read_value, | |
358 | .gpl_only = true, | |
359 | .ret_type = RET_INTEGER, | |
360 | .arg1_type = ARG_CONST_MAP_PTR, | |
361 | .arg2_type = ARG_ANYTHING, | |
362 | .arg3_type = ARG_PTR_TO_UNINIT_MEM, | |
363 | .arg4_type = ARG_CONST_SIZE, | |
364 | }; | |
365 | ||
283ca526 | 366 | static DEFINE_PER_CPU(struct perf_sample_data, bpf_trace_sd); |
20b9d7ac | 367 | |
8e7a3920 DB |
368 | static __always_inline u64 |
369 | __bpf_perf_event_output(struct pt_regs *regs, struct bpf_map *map, | |
283ca526 | 370 | u64 flags, struct perf_sample_data *sd) |
a43eec30 | 371 | { |
a43eec30 | 372 | struct bpf_array *array = container_of(map, struct bpf_array, map); |
d7931330 | 373 | unsigned int cpu = smp_processor_id(); |
1e33759c | 374 | u64 index = flags & BPF_F_INDEX_MASK; |
3b1efb19 | 375 | struct bpf_event_entry *ee; |
a43eec30 | 376 | struct perf_event *event; |
a43eec30 | 377 | |
1e33759c | 378 | if (index == BPF_F_CURRENT_CPU) |
d7931330 | 379 | index = cpu; |
a43eec30 AS |
380 | if (unlikely(index >= array->map.max_entries)) |
381 | return -E2BIG; | |
382 | ||
3b1efb19 | 383 | ee = READ_ONCE(array->ptrs[index]); |
1ca1cc98 | 384 | if (!ee) |
a43eec30 AS |
385 | return -ENOENT; |
386 | ||
3b1efb19 | 387 | event = ee->event; |
a43eec30 AS |
388 | if (unlikely(event->attr.type != PERF_TYPE_SOFTWARE || |
389 | event->attr.config != PERF_COUNT_SW_BPF_OUTPUT)) | |
390 | return -EINVAL; | |
391 | ||
d7931330 | 392 | if (unlikely(event->oncpu != cpu)) |
a43eec30 AS |
393 | return -EOPNOTSUPP; |
394 | ||
20b9d7ac | 395 | perf_event_output(event, sd, regs); |
a43eec30 AS |
396 | return 0; |
397 | } | |
398 | ||
f3694e00 DB |
399 | BPF_CALL_5(bpf_perf_event_output, struct pt_regs *, regs, struct bpf_map *, map, |
400 | u64, flags, void *, data, u64, size) | |
8e7a3920 | 401 | { |
283ca526 | 402 | struct perf_sample_data *sd = this_cpu_ptr(&bpf_trace_sd); |
8e7a3920 DB |
403 | struct perf_raw_record raw = { |
404 | .frag = { | |
405 | .size = size, | |
406 | .data = data, | |
407 | }, | |
408 | }; | |
409 | ||
410 | if (unlikely(flags & ~(BPF_F_INDEX_MASK))) | |
411 | return -EINVAL; | |
412 | ||
283ca526 DB |
413 | perf_sample_data_init(sd, 0, 0); |
414 | sd->raw = &raw; | |
415 | ||
416 | return __bpf_perf_event_output(regs, map, flags, sd); | |
8e7a3920 DB |
417 | } |
418 | ||
a43eec30 AS |
419 | static const struct bpf_func_proto bpf_perf_event_output_proto = { |
420 | .func = bpf_perf_event_output, | |
1075ef59 | 421 | .gpl_only = true, |
a43eec30 AS |
422 | .ret_type = RET_INTEGER, |
423 | .arg1_type = ARG_PTR_TO_CTX, | |
424 | .arg2_type = ARG_CONST_MAP_PTR, | |
425 | .arg3_type = ARG_ANYTHING, | |
39f19ebb | 426 | .arg4_type = ARG_PTR_TO_MEM, |
a60dd35d | 427 | .arg5_type = ARG_CONST_SIZE_OR_ZERO, |
a43eec30 AS |
428 | }; |
429 | ||
bd570ff9 | 430 | static DEFINE_PER_CPU(struct pt_regs, bpf_pt_regs); |
283ca526 | 431 | static DEFINE_PER_CPU(struct perf_sample_data, bpf_misc_sd); |
bd570ff9 | 432 | |
555c8a86 DB |
433 | u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size, |
434 | void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy) | |
bd570ff9 | 435 | { |
283ca526 | 436 | struct perf_sample_data *sd = this_cpu_ptr(&bpf_misc_sd); |
bd570ff9 | 437 | struct pt_regs *regs = this_cpu_ptr(&bpf_pt_regs); |
555c8a86 DB |
438 | struct perf_raw_frag frag = { |
439 | .copy = ctx_copy, | |
440 | .size = ctx_size, | |
441 | .data = ctx, | |
442 | }; | |
443 | struct perf_raw_record raw = { | |
444 | .frag = { | |
183fc153 AM |
445 | { |
446 | .next = ctx_size ? &frag : NULL, | |
447 | }, | |
555c8a86 DB |
448 | .size = meta_size, |
449 | .data = meta, | |
450 | }, | |
451 | }; | |
bd570ff9 DB |
452 | |
453 | perf_fetch_caller_regs(regs); | |
283ca526 DB |
454 | perf_sample_data_init(sd, 0, 0); |
455 | sd->raw = &raw; | |
bd570ff9 | 456 | |
283ca526 | 457 | return __bpf_perf_event_output(regs, map, flags, sd); |
bd570ff9 DB |
458 | } |
459 | ||
f3694e00 | 460 | BPF_CALL_0(bpf_get_current_task) |
606274c5 AS |
461 | { |
462 | return (long) current; | |
463 | } | |
464 | ||
465 | static const struct bpf_func_proto bpf_get_current_task_proto = { | |
466 | .func = bpf_get_current_task, | |
467 | .gpl_only = true, | |
468 | .ret_type = RET_INTEGER, | |
469 | }; | |
470 | ||
f3694e00 | 471 | BPF_CALL_2(bpf_current_task_under_cgroup, struct bpf_map *, map, u32, idx) |
60d20f91 | 472 | { |
60d20f91 SD |
473 | struct bpf_array *array = container_of(map, struct bpf_array, map); |
474 | struct cgroup *cgrp; | |
60d20f91 | 475 | |
60d20f91 SD |
476 | if (unlikely(idx >= array->map.max_entries)) |
477 | return -E2BIG; | |
478 | ||
479 | cgrp = READ_ONCE(array->ptrs[idx]); | |
480 | if (unlikely(!cgrp)) | |
481 | return -EAGAIN; | |
482 | ||
483 | return task_under_cgroup_hierarchy(current, cgrp); | |
484 | } | |
485 | ||
486 | static const struct bpf_func_proto bpf_current_task_under_cgroup_proto = { | |
487 | .func = bpf_current_task_under_cgroup, | |
488 | .gpl_only = false, | |
489 | .ret_type = RET_INTEGER, | |
490 | .arg1_type = ARG_CONST_MAP_PTR, | |
491 | .arg2_type = ARG_ANYTHING, | |
492 | }; | |
493 | ||
a5e8c070 GB |
494 | BPF_CALL_3(bpf_probe_read_str, void *, dst, u32, size, |
495 | const void *, unsafe_ptr) | |
496 | { | |
497 | int ret; | |
498 | ||
499 | /* | |
500 | * The strncpy_from_unsafe() call will likely not fill the entire | |
501 | * buffer, but that's okay in this circumstance as we're probing | |
502 | * arbitrary memory anyway similar to bpf_probe_read() and might | |
503 | * as well probe the stack. Thus, memory is explicitly cleared | |
504 | * only in error case, so that improper users ignoring return | |
505 | * code altogether don't copy garbage; otherwise length of string | |
506 | * is returned that can be used for bpf_perf_event_output() et al. | |
507 | */ | |
508 | ret = strncpy_from_unsafe(dst, unsafe_ptr, size); | |
509 | if (unlikely(ret < 0)) | |
510 | memset(dst, 0, size); | |
511 | ||
512 | return ret; | |
513 | } | |
514 | ||
515 | static const struct bpf_func_proto bpf_probe_read_str_proto = { | |
516 | .func = bpf_probe_read_str, | |
517 | .gpl_only = true, | |
518 | .ret_type = RET_INTEGER, | |
519 | .arg1_type = ARG_PTR_TO_UNINIT_MEM, | |
5c4e1201 | 520 | .arg2_type = ARG_CONST_SIZE_OR_ZERO, |
a5e8c070 GB |
521 | .arg3_type = ARG_ANYTHING, |
522 | }; | |
523 | ||
5e43f899 AI |
524 | static const struct bpf_func_proto * |
525 | tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) | |
2541517c AS |
526 | { |
527 | switch (func_id) { | |
528 | case BPF_FUNC_map_lookup_elem: | |
529 | return &bpf_map_lookup_elem_proto; | |
530 | case BPF_FUNC_map_update_elem: | |
531 | return &bpf_map_update_elem_proto; | |
532 | case BPF_FUNC_map_delete_elem: | |
533 | return &bpf_map_delete_elem_proto; | |
534 | case BPF_FUNC_probe_read: | |
535 | return &bpf_probe_read_proto; | |
d9847d31 AS |
536 | case BPF_FUNC_ktime_get_ns: |
537 | return &bpf_ktime_get_ns_proto; | |
04fd61ab AS |
538 | case BPF_FUNC_tail_call: |
539 | return &bpf_tail_call_proto; | |
ffeedafb AS |
540 | case BPF_FUNC_get_current_pid_tgid: |
541 | return &bpf_get_current_pid_tgid_proto; | |
606274c5 AS |
542 | case BPF_FUNC_get_current_task: |
543 | return &bpf_get_current_task_proto; | |
ffeedafb AS |
544 | case BPF_FUNC_get_current_uid_gid: |
545 | return &bpf_get_current_uid_gid_proto; | |
546 | case BPF_FUNC_get_current_comm: | |
547 | return &bpf_get_current_comm_proto; | |
9c959c86 | 548 | case BPF_FUNC_trace_printk: |
0756ea3e | 549 | return bpf_get_trace_printk_proto(); |
ab1973d3 AS |
550 | case BPF_FUNC_get_smp_processor_id: |
551 | return &bpf_get_smp_processor_id_proto; | |
2d0e30c3 DB |
552 | case BPF_FUNC_get_numa_node_id: |
553 | return &bpf_get_numa_node_id_proto; | |
35578d79 KX |
554 | case BPF_FUNC_perf_event_read: |
555 | return &bpf_perf_event_read_proto; | |
96ae5227 SD |
556 | case BPF_FUNC_probe_write_user: |
557 | return bpf_get_probe_write_proto(); | |
60d20f91 SD |
558 | case BPF_FUNC_current_task_under_cgroup: |
559 | return &bpf_current_task_under_cgroup_proto; | |
8937bd80 AS |
560 | case BPF_FUNC_get_prandom_u32: |
561 | return &bpf_get_prandom_u32_proto; | |
a5e8c070 GB |
562 | case BPF_FUNC_probe_read_str: |
563 | return &bpf_probe_read_str_proto; | |
34ea38ca | 564 | #ifdef CONFIG_CGROUPS |
bf6fa2c8 YS |
565 | case BPF_FUNC_get_current_cgroup_id: |
566 | return &bpf_get_current_cgroup_id_proto; | |
34ea38ca | 567 | #endif |
9fd82b61 AS |
568 | default: |
569 | return NULL; | |
570 | } | |
571 | } | |
572 | ||
5e43f899 AI |
573 | static const struct bpf_func_proto * |
574 | kprobe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) | |
9fd82b61 AS |
575 | { |
576 | switch (func_id) { | |
a43eec30 AS |
577 | case BPF_FUNC_perf_event_output: |
578 | return &bpf_perf_event_output_proto; | |
d5a3b1f6 AS |
579 | case BPF_FUNC_get_stackid: |
580 | return &bpf_get_stackid_proto; | |
c195651e YS |
581 | case BPF_FUNC_get_stack: |
582 | return &bpf_get_stack_proto; | |
908432ca YS |
583 | case BPF_FUNC_perf_event_read_value: |
584 | return &bpf_perf_event_read_value_proto; | |
9802d865 JB |
585 | #ifdef CONFIG_BPF_KPROBE_OVERRIDE |
586 | case BPF_FUNC_override_return: | |
587 | return &bpf_override_return_proto; | |
588 | #endif | |
2541517c | 589 | default: |
5e43f899 | 590 | return tracing_func_proto(func_id, prog); |
2541517c AS |
591 | } |
592 | } | |
593 | ||
594 | /* bpf+kprobe programs can access fields of 'struct pt_regs' */ | |
19de99f7 | 595 | static bool kprobe_prog_is_valid_access(int off, int size, enum bpf_access_type type, |
5e43f899 | 596 | const struct bpf_prog *prog, |
23994631 | 597 | struct bpf_insn_access_aux *info) |
2541517c | 598 | { |
2541517c AS |
599 | if (off < 0 || off >= sizeof(struct pt_regs)) |
600 | return false; | |
2541517c AS |
601 | if (type != BPF_READ) |
602 | return false; | |
2541517c AS |
603 | if (off % size != 0) |
604 | return false; | |
2d071c64 DB |
605 | /* |
606 | * Assertion for 32 bit to make sure last 8 byte access | |
607 | * (BPF_DW) to the last 4 byte member is disallowed. | |
608 | */ | |
609 | if (off + size > sizeof(struct pt_regs)) | |
610 | return false; | |
611 | ||
2541517c AS |
612 | return true; |
613 | } | |
614 | ||
7de16e3a | 615 | const struct bpf_verifier_ops kprobe_verifier_ops = { |
2541517c AS |
616 | .get_func_proto = kprobe_prog_func_proto, |
617 | .is_valid_access = kprobe_prog_is_valid_access, | |
618 | }; | |
619 | ||
7de16e3a JK |
620 | const struct bpf_prog_ops kprobe_prog_ops = { |
621 | }; | |
622 | ||
f3694e00 DB |
623 | BPF_CALL_5(bpf_perf_event_output_tp, void *, tp_buff, struct bpf_map *, map, |
624 | u64, flags, void *, data, u64, size) | |
9940d67c | 625 | { |
f3694e00 DB |
626 | struct pt_regs *regs = *(struct pt_regs **)tp_buff; |
627 | ||
9940d67c AS |
628 | /* |
629 | * r1 points to perf tracepoint buffer where first 8 bytes are hidden | |
630 | * from bpf program and contain a pointer to 'struct pt_regs'. Fetch it | |
f3694e00 | 631 | * from there and call the same bpf_perf_event_output() helper inline. |
9940d67c | 632 | */ |
f3694e00 | 633 | return ____bpf_perf_event_output(regs, map, flags, data, size); |
9940d67c AS |
634 | } |
635 | ||
636 | static const struct bpf_func_proto bpf_perf_event_output_proto_tp = { | |
637 | .func = bpf_perf_event_output_tp, | |
638 | .gpl_only = true, | |
639 | .ret_type = RET_INTEGER, | |
640 | .arg1_type = ARG_PTR_TO_CTX, | |
641 | .arg2_type = ARG_CONST_MAP_PTR, | |
642 | .arg3_type = ARG_ANYTHING, | |
39f19ebb | 643 | .arg4_type = ARG_PTR_TO_MEM, |
a60dd35d | 644 | .arg5_type = ARG_CONST_SIZE_OR_ZERO, |
9940d67c AS |
645 | }; |
646 | ||
f3694e00 DB |
647 | BPF_CALL_3(bpf_get_stackid_tp, void *, tp_buff, struct bpf_map *, map, |
648 | u64, flags) | |
9940d67c | 649 | { |
f3694e00 | 650 | struct pt_regs *regs = *(struct pt_regs **)tp_buff; |
9940d67c | 651 | |
f3694e00 DB |
652 | /* |
653 | * Same comment as in bpf_perf_event_output_tp(), only that this time | |
654 | * the other helper's function body cannot be inlined due to being | |
655 | * external, thus we need to call raw helper function. | |
656 | */ | |
657 | return bpf_get_stackid((unsigned long) regs, (unsigned long) map, | |
658 | flags, 0, 0); | |
9940d67c AS |
659 | } |
660 | ||
661 | static const struct bpf_func_proto bpf_get_stackid_proto_tp = { | |
662 | .func = bpf_get_stackid_tp, | |
663 | .gpl_only = true, | |
664 | .ret_type = RET_INTEGER, | |
665 | .arg1_type = ARG_PTR_TO_CTX, | |
666 | .arg2_type = ARG_CONST_MAP_PTR, | |
667 | .arg3_type = ARG_ANYTHING, | |
668 | }; | |
669 | ||
c195651e YS |
670 | BPF_CALL_4(bpf_get_stack_tp, void *, tp_buff, void *, buf, u32, size, |
671 | u64, flags) | |
672 | { | |
673 | struct pt_regs *regs = *(struct pt_regs **)tp_buff; | |
674 | ||
675 | return bpf_get_stack((unsigned long) regs, (unsigned long) buf, | |
676 | (unsigned long) size, flags, 0); | |
677 | } | |
678 | ||
679 | static const struct bpf_func_proto bpf_get_stack_proto_tp = { | |
680 | .func = bpf_get_stack_tp, | |
681 | .gpl_only = true, | |
682 | .ret_type = RET_INTEGER, | |
683 | .arg1_type = ARG_PTR_TO_CTX, | |
684 | .arg2_type = ARG_PTR_TO_UNINIT_MEM, | |
685 | .arg3_type = ARG_CONST_SIZE_OR_ZERO, | |
686 | .arg4_type = ARG_ANYTHING, | |
687 | }; | |
688 | ||
5e43f899 AI |
689 | static const struct bpf_func_proto * |
690 | tp_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) | |
f005afed YS |
691 | { |
692 | switch (func_id) { | |
693 | case BPF_FUNC_perf_event_output: | |
694 | return &bpf_perf_event_output_proto_tp; | |
695 | case BPF_FUNC_get_stackid: | |
696 | return &bpf_get_stackid_proto_tp; | |
c195651e YS |
697 | case BPF_FUNC_get_stack: |
698 | return &bpf_get_stack_proto_tp; | |
f005afed | 699 | default: |
5e43f899 | 700 | return tracing_func_proto(func_id, prog); |
f005afed YS |
701 | } |
702 | } | |
703 | ||
704 | static bool tp_prog_is_valid_access(int off, int size, enum bpf_access_type type, | |
5e43f899 | 705 | const struct bpf_prog *prog, |
f005afed YS |
706 | struct bpf_insn_access_aux *info) |
707 | { | |
708 | if (off < sizeof(void *) || off >= PERF_MAX_TRACE_SIZE) | |
709 | return false; | |
710 | if (type != BPF_READ) | |
711 | return false; | |
712 | if (off % size != 0) | |
713 | return false; | |
714 | ||
715 | BUILD_BUG_ON(PERF_MAX_TRACE_SIZE % sizeof(__u64)); | |
716 | return true; | |
717 | } | |
718 | ||
719 | const struct bpf_verifier_ops tracepoint_verifier_ops = { | |
720 | .get_func_proto = tp_prog_func_proto, | |
721 | .is_valid_access = tp_prog_is_valid_access, | |
722 | }; | |
723 | ||
724 | const struct bpf_prog_ops tracepoint_prog_ops = { | |
725 | }; | |
726 | ||
727 | BPF_CALL_3(bpf_perf_prog_read_value, struct bpf_perf_event_data_kern *, ctx, | |
4bebdc7a YS |
728 | struct bpf_perf_event_value *, buf, u32, size) |
729 | { | |
730 | int err = -EINVAL; | |
731 | ||
732 | if (unlikely(size != sizeof(struct bpf_perf_event_value))) | |
733 | goto clear; | |
734 | err = perf_event_read_local(ctx->event, &buf->counter, &buf->enabled, | |
735 | &buf->running); | |
736 | if (unlikely(err)) | |
737 | goto clear; | |
738 | return 0; | |
739 | clear: | |
740 | memset(buf, 0, size); | |
741 | return err; | |
742 | } | |
743 | ||
f005afed YS |
744 | static const struct bpf_func_proto bpf_perf_prog_read_value_proto = { |
745 | .func = bpf_perf_prog_read_value, | |
4bebdc7a YS |
746 | .gpl_only = true, |
747 | .ret_type = RET_INTEGER, | |
748 | .arg1_type = ARG_PTR_TO_CTX, | |
749 | .arg2_type = ARG_PTR_TO_UNINIT_MEM, | |
750 | .arg3_type = ARG_CONST_SIZE, | |
751 | }; | |
752 | ||
5e43f899 AI |
753 | static const struct bpf_func_proto * |
754 | pe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) | |
9fd82b61 AS |
755 | { |
756 | switch (func_id) { | |
757 | case BPF_FUNC_perf_event_output: | |
9940d67c | 758 | return &bpf_perf_event_output_proto_tp; |
9fd82b61 | 759 | case BPF_FUNC_get_stackid: |
9940d67c | 760 | return &bpf_get_stackid_proto_tp; |
c195651e YS |
761 | case BPF_FUNC_get_stack: |
762 | return &bpf_get_stack_proto_tp; | |
4bebdc7a | 763 | case BPF_FUNC_perf_prog_read_value: |
f005afed | 764 | return &bpf_perf_prog_read_value_proto; |
9fd82b61 | 765 | default: |
5e43f899 | 766 | return tracing_func_proto(func_id, prog); |
9fd82b61 AS |
767 | } |
768 | } | |
769 | ||
c4f6699d AS |
770 | /* |
771 | * bpf_raw_tp_regs are separate from bpf_pt_regs used from skb/xdp | |
772 | * to avoid potential recursive reuse issue when/if tracepoints are added | |
c195651e | 773 | * inside bpf_*_event_output, bpf_get_stackid and/or bpf_get_stack |
c4f6699d AS |
774 | */ |
775 | static DEFINE_PER_CPU(struct pt_regs, bpf_raw_tp_regs); | |
776 | BPF_CALL_5(bpf_perf_event_output_raw_tp, struct bpf_raw_tracepoint_args *, args, | |
777 | struct bpf_map *, map, u64, flags, void *, data, u64, size) | |
778 | { | |
779 | struct pt_regs *regs = this_cpu_ptr(&bpf_raw_tp_regs); | |
780 | ||
781 | perf_fetch_caller_regs(regs); | |
782 | return ____bpf_perf_event_output(regs, map, flags, data, size); | |
783 | } | |
784 | ||
785 | static const struct bpf_func_proto bpf_perf_event_output_proto_raw_tp = { | |
786 | .func = bpf_perf_event_output_raw_tp, | |
787 | .gpl_only = true, | |
788 | .ret_type = RET_INTEGER, | |
789 | .arg1_type = ARG_PTR_TO_CTX, | |
790 | .arg2_type = ARG_CONST_MAP_PTR, | |
791 | .arg3_type = ARG_ANYTHING, | |
792 | .arg4_type = ARG_PTR_TO_MEM, | |
793 | .arg5_type = ARG_CONST_SIZE_OR_ZERO, | |
794 | }; | |
795 | ||
796 | BPF_CALL_3(bpf_get_stackid_raw_tp, struct bpf_raw_tracepoint_args *, args, | |
797 | struct bpf_map *, map, u64, flags) | |
798 | { | |
799 | struct pt_regs *regs = this_cpu_ptr(&bpf_raw_tp_regs); | |
800 | ||
801 | perf_fetch_caller_regs(regs); | |
802 | /* similar to bpf_perf_event_output_tp, but pt_regs fetched differently */ | |
803 | return bpf_get_stackid((unsigned long) regs, (unsigned long) map, | |
804 | flags, 0, 0); | |
805 | } | |
806 | ||
807 | static const struct bpf_func_proto bpf_get_stackid_proto_raw_tp = { | |
808 | .func = bpf_get_stackid_raw_tp, | |
809 | .gpl_only = true, | |
810 | .ret_type = RET_INTEGER, | |
811 | .arg1_type = ARG_PTR_TO_CTX, | |
812 | .arg2_type = ARG_CONST_MAP_PTR, | |
813 | .arg3_type = ARG_ANYTHING, | |
814 | }; | |
815 | ||
c195651e YS |
816 | BPF_CALL_4(bpf_get_stack_raw_tp, struct bpf_raw_tracepoint_args *, args, |
817 | void *, buf, u32, size, u64, flags) | |
818 | { | |
819 | struct pt_regs *regs = this_cpu_ptr(&bpf_raw_tp_regs); | |
820 | ||
821 | perf_fetch_caller_regs(regs); | |
822 | return bpf_get_stack((unsigned long) regs, (unsigned long) buf, | |
823 | (unsigned long) size, flags, 0); | |
824 | } | |
825 | ||
826 | static const struct bpf_func_proto bpf_get_stack_proto_raw_tp = { | |
827 | .func = bpf_get_stack_raw_tp, | |
828 | .gpl_only = true, | |
829 | .ret_type = RET_INTEGER, | |
830 | .arg1_type = ARG_PTR_TO_CTX, | |
831 | .arg2_type = ARG_PTR_TO_MEM, | |
832 | .arg3_type = ARG_CONST_SIZE_OR_ZERO, | |
833 | .arg4_type = ARG_ANYTHING, | |
834 | }; | |
835 | ||
5e43f899 AI |
836 | static const struct bpf_func_proto * |
837 | raw_tp_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) | |
c4f6699d AS |
838 | { |
839 | switch (func_id) { | |
840 | case BPF_FUNC_perf_event_output: | |
841 | return &bpf_perf_event_output_proto_raw_tp; | |
842 | case BPF_FUNC_get_stackid: | |
843 | return &bpf_get_stackid_proto_raw_tp; | |
c195651e YS |
844 | case BPF_FUNC_get_stack: |
845 | return &bpf_get_stack_proto_raw_tp; | |
c4f6699d | 846 | default: |
5e43f899 | 847 | return tracing_func_proto(func_id, prog); |
c4f6699d AS |
848 | } |
849 | } | |
850 | ||
851 | static bool raw_tp_prog_is_valid_access(int off, int size, | |
852 | enum bpf_access_type type, | |
5e43f899 | 853 | const struct bpf_prog *prog, |
c4f6699d AS |
854 | struct bpf_insn_access_aux *info) |
855 | { | |
856 | /* largest tracepoint in the kernel has 12 args */ | |
857 | if (off < 0 || off >= sizeof(__u64) * 12) | |
858 | return false; | |
859 | if (type != BPF_READ) | |
860 | return false; | |
861 | if (off % size != 0) | |
862 | return false; | |
863 | return true; | |
864 | } | |
865 | ||
866 | const struct bpf_verifier_ops raw_tracepoint_verifier_ops = { | |
867 | .get_func_proto = raw_tp_prog_func_proto, | |
868 | .is_valid_access = raw_tp_prog_is_valid_access, | |
869 | }; | |
870 | ||
871 | const struct bpf_prog_ops raw_tracepoint_prog_ops = { | |
872 | }; | |
873 | ||
0515e599 | 874 | static bool pe_prog_is_valid_access(int off, int size, enum bpf_access_type type, |
5e43f899 | 875 | const struct bpf_prog *prog, |
23994631 | 876 | struct bpf_insn_access_aux *info) |
0515e599 | 877 | { |
95da0cdb | 878 | const int size_u64 = sizeof(u64); |
31fd8581 | 879 | |
0515e599 AS |
880 | if (off < 0 || off >= sizeof(struct bpf_perf_event_data)) |
881 | return false; | |
882 | if (type != BPF_READ) | |
883 | return false; | |
bc23105c DB |
884 | if (off % size != 0) { |
885 | if (sizeof(unsigned long) != 4) | |
886 | return false; | |
887 | if (size != 8) | |
888 | return false; | |
889 | if (off % size != 4) | |
890 | return false; | |
891 | } | |
31fd8581 | 892 | |
f96da094 DB |
893 | switch (off) { |
894 | case bpf_ctx_range(struct bpf_perf_event_data, sample_period): | |
95da0cdb TQ |
895 | bpf_ctx_record_field_size(info, size_u64); |
896 | if (!bpf_ctx_narrow_access_ok(off, size, size_u64)) | |
897 | return false; | |
898 | break; | |
899 | case bpf_ctx_range(struct bpf_perf_event_data, addr): | |
900 | bpf_ctx_record_field_size(info, size_u64); | |
901 | if (!bpf_ctx_narrow_access_ok(off, size, size_u64)) | |
23994631 | 902 | return false; |
f96da094 DB |
903 | break; |
904 | default: | |
0515e599 AS |
905 | if (size != sizeof(long)) |
906 | return false; | |
907 | } | |
f96da094 | 908 | |
0515e599 AS |
909 | return true; |
910 | } | |
911 | ||
6b8cc1d1 DB |
912 | static u32 pe_prog_convert_ctx_access(enum bpf_access_type type, |
913 | const struct bpf_insn *si, | |
0515e599 | 914 | struct bpf_insn *insn_buf, |
f96da094 | 915 | struct bpf_prog *prog, u32 *target_size) |
0515e599 AS |
916 | { |
917 | struct bpf_insn *insn = insn_buf; | |
918 | ||
6b8cc1d1 | 919 | switch (si->off) { |
0515e599 | 920 | case offsetof(struct bpf_perf_event_data, sample_period): |
f035a515 | 921 | *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern, |
6b8cc1d1 | 922 | data), si->dst_reg, si->src_reg, |
0515e599 | 923 | offsetof(struct bpf_perf_event_data_kern, data)); |
6b8cc1d1 | 924 | *insn++ = BPF_LDX_MEM(BPF_DW, si->dst_reg, si->dst_reg, |
f96da094 DB |
925 | bpf_target_off(struct perf_sample_data, period, 8, |
926 | target_size)); | |
0515e599 | 927 | break; |
95da0cdb TQ |
928 | case offsetof(struct bpf_perf_event_data, addr): |
929 | *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern, | |
930 | data), si->dst_reg, si->src_reg, | |
931 | offsetof(struct bpf_perf_event_data_kern, data)); | |
932 | *insn++ = BPF_LDX_MEM(BPF_DW, si->dst_reg, si->dst_reg, | |
933 | bpf_target_off(struct perf_sample_data, addr, 8, | |
934 | target_size)); | |
935 | break; | |
0515e599 | 936 | default: |
f035a515 | 937 | *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern, |
6b8cc1d1 | 938 | regs), si->dst_reg, si->src_reg, |
0515e599 | 939 | offsetof(struct bpf_perf_event_data_kern, regs)); |
6b8cc1d1 DB |
940 | *insn++ = BPF_LDX_MEM(BPF_SIZEOF(long), si->dst_reg, si->dst_reg, |
941 | si->off); | |
0515e599 AS |
942 | break; |
943 | } | |
944 | ||
945 | return insn - insn_buf; | |
946 | } | |
947 | ||
7de16e3a | 948 | const struct bpf_verifier_ops perf_event_verifier_ops = { |
f005afed | 949 | .get_func_proto = pe_prog_func_proto, |
0515e599 AS |
950 | .is_valid_access = pe_prog_is_valid_access, |
951 | .convert_ctx_access = pe_prog_convert_ctx_access, | |
952 | }; | |
7de16e3a JK |
953 | |
954 | const struct bpf_prog_ops perf_event_prog_ops = { | |
955 | }; | |
e87c6bc3 YS |
956 | |
957 | static DEFINE_MUTEX(bpf_event_mutex); | |
958 | ||
c8c088ba YS |
959 | #define BPF_TRACE_MAX_PROGS 64 |
960 | ||
e87c6bc3 YS |
961 | int perf_event_attach_bpf_prog(struct perf_event *event, |
962 | struct bpf_prog *prog) | |
963 | { | |
964 | struct bpf_prog_array __rcu *old_array; | |
965 | struct bpf_prog_array *new_array; | |
966 | int ret = -EEXIST; | |
967 | ||
9802d865 | 968 | /* |
b4da3340 MH |
969 | * Kprobe override only works if they are on the function entry, |
970 | * and only if they are on the opt-in list. | |
9802d865 JB |
971 | */ |
972 | if (prog->kprobe_override && | |
b4da3340 | 973 | (!trace_kprobe_on_func_entry(event->tp_event) || |
9802d865 JB |
974 | !trace_kprobe_error_injectable(event->tp_event))) |
975 | return -EINVAL; | |
976 | ||
e87c6bc3 YS |
977 | mutex_lock(&bpf_event_mutex); |
978 | ||
979 | if (event->prog) | |
07c41a29 | 980 | goto unlock; |
e87c6bc3 | 981 | |
07c41a29 | 982 | old_array = event->tp_event->prog_array; |
c8c088ba YS |
983 | if (old_array && |
984 | bpf_prog_array_length(old_array) >= BPF_TRACE_MAX_PROGS) { | |
985 | ret = -E2BIG; | |
986 | goto unlock; | |
987 | } | |
988 | ||
e87c6bc3 YS |
989 | ret = bpf_prog_array_copy(old_array, NULL, prog, &new_array); |
990 | if (ret < 0) | |
07c41a29 | 991 | goto unlock; |
e87c6bc3 YS |
992 | |
993 | /* set the new array to event->tp_event and set event->prog */ | |
994 | event->prog = prog; | |
995 | rcu_assign_pointer(event->tp_event->prog_array, new_array); | |
996 | bpf_prog_array_free(old_array); | |
997 | ||
07c41a29 | 998 | unlock: |
e87c6bc3 YS |
999 | mutex_unlock(&bpf_event_mutex); |
1000 | return ret; | |
1001 | } | |
1002 | ||
1003 | void perf_event_detach_bpf_prog(struct perf_event *event) | |
1004 | { | |
1005 | struct bpf_prog_array __rcu *old_array; | |
1006 | struct bpf_prog_array *new_array; | |
1007 | int ret; | |
1008 | ||
1009 | mutex_lock(&bpf_event_mutex); | |
1010 | ||
1011 | if (!event->prog) | |
07c41a29 | 1012 | goto unlock; |
e87c6bc3 | 1013 | |
07c41a29 | 1014 | old_array = event->tp_event->prog_array; |
e87c6bc3 | 1015 | ret = bpf_prog_array_copy(old_array, event->prog, NULL, &new_array); |
170a7e3e SY |
1016 | if (ret == -ENOENT) |
1017 | goto unlock; | |
e87c6bc3 YS |
1018 | if (ret < 0) { |
1019 | bpf_prog_array_delete_safe(old_array, event->prog); | |
1020 | } else { | |
1021 | rcu_assign_pointer(event->tp_event->prog_array, new_array); | |
1022 | bpf_prog_array_free(old_array); | |
1023 | } | |
1024 | ||
1025 | bpf_prog_put(event->prog); | |
1026 | event->prog = NULL; | |
1027 | ||
07c41a29 | 1028 | unlock: |
e87c6bc3 YS |
1029 | mutex_unlock(&bpf_event_mutex); |
1030 | } | |
f371b304 | 1031 | |
f4e2298e | 1032 | int perf_event_query_prog_array(struct perf_event *event, void __user *info) |
f371b304 YS |
1033 | { |
1034 | struct perf_event_query_bpf __user *uquery = info; | |
1035 | struct perf_event_query_bpf query = {}; | |
3a38bb98 | 1036 | u32 *ids, prog_cnt, ids_len; |
f371b304 YS |
1037 | int ret; |
1038 | ||
1039 | if (!capable(CAP_SYS_ADMIN)) | |
1040 | return -EPERM; | |
1041 | if (event->attr.type != PERF_TYPE_TRACEPOINT) | |
1042 | return -EINVAL; | |
1043 | if (copy_from_user(&query, uquery, sizeof(query))) | |
1044 | return -EFAULT; | |
3a38bb98 YS |
1045 | |
1046 | ids_len = query.ids_len; | |
1047 | if (ids_len > BPF_TRACE_MAX_PROGS) | |
9c481b90 | 1048 | return -E2BIG; |
3a38bb98 YS |
1049 | ids = kcalloc(ids_len, sizeof(u32), GFP_USER | __GFP_NOWARN); |
1050 | if (!ids) | |
1051 | return -ENOMEM; | |
1052 | /* | |
1053 | * The above kcalloc returns ZERO_SIZE_PTR when ids_len = 0, which | |
1054 | * is required when user only wants to check for uquery->prog_cnt. | |
1055 | * There is no need to check for it since the case is handled | |
1056 | * gracefully in bpf_prog_array_copy_info. | |
1057 | */ | |
f371b304 YS |
1058 | |
1059 | mutex_lock(&bpf_event_mutex); | |
1060 | ret = bpf_prog_array_copy_info(event->tp_event->prog_array, | |
3a38bb98 YS |
1061 | ids, |
1062 | ids_len, | |
1063 | &prog_cnt); | |
f371b304 YS |
1064 | mutex_unlock(&bpf_event_mutex); |
1065 | ||
3a38bb98 YS |
1066 | if (copy_to_user(&uquery->prog_cnt, &prog_cnt, sizeof(prog_cnt)) || |
1067 | copy_to_user(uquery->ids, ids, ids_len * sizeof(u32))) | |
1068 | ret = -EFAULT; | |
1069 | ||
1070 | kfree(ids); | |
f371b304 YS |
1071 | return ret; |
1072 | } | |
c4f6699d AS |
1073 | |
1074 | extern struct bpf_raw_event_map __start__bpf_raw_tp[]; | |
1075 | extern struct bpf_raw_event_map __stop__bpf_raw_tp[]; | |
1076 | ||
1077 | struct bpf_raw_event_map *bpf_find_raw_tracepoint(const char *name) | |
1078 | { | |
1079 | struct bpf_raw_event_map *btp = __start__bpf_raw_tp; | |
1080 | ||
1081 | for (; btp < __stop__bpf_raw_tp; btp++) { | |
1082 | if (!strcmp(btp->tp->name, name)) | |
1083 | return btp; | |
1084 | } | |
1085 | return NULL; | |
1086 | } | |
1087 | ||
1088 | static __always_inline | |
1089 | void __bpf_trace_run(struct bpf_prog *prog, u64 *args) | |
1090 | { | |
1091 | rcu_read_lock(); | |
1092 | preempt_disable(); | |
1093 | (void) BPF_PROG_RUN(prog, args); | |
1094 | preempt_enable(); | |
1095 | rcu_read_unlock(); | |
1096 | } | |
1097 | ||
1098 | #define UNPACK(...) __VA_ARGS__ | |
1099 | #define REPEAT_1(FN, DL, X, ...) FN(X) | |
1100 | #define REPEAT_2(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_1(FN, DL, __VA_ARGS__) | |
1101 | #define REPEAT_3(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_2(FN, DL, __VA_ARGS__) | |
1102 | #define REPEAT_4(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_3(FN, DL, __VA_ARGS__) | |
1103 | #define REPEAT_5(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_4(FN, DL, __VA_ARGS__) | |
1104 | #define REPEAT_6(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_5(FN, DL, __VA_ARGS__) | |
1105 | #define REPEAT_7(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_6(FN, DL, __VA_ARGS__) | |
1106 | #define REPEAT_8(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_7(FN, DL, __VA_ARGS__) | |
1107 | #define REPEAT_9(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_8(FN, DL, __VA_ARGS__) | |
1108 | #define REPEAT_10(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_9(FN, DL, __VA_ARGS__) | |
1109 | #define REPEAT_11(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_10(FN, DL, __VA_ARGS__) | |
1110 | #define REPEAT_12(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_11(FN, DL, __VA_ARGS__) | |
1111 | #define REPEAT(X, FN, DL, ...) REPEAT_##X(FN, DL, __VA_ARGS__) | |
1112 | ||
1113 | #define SARG(X) u64 arg##X | |
1114 | #define COPY(X) args[X] = arg##X | |
1115 | ||
1116 | #define __DL_COM (,) | |
1117 | #define __DL_SEM (;) | |
1118 | ||
1119 | #define __SEQ_0_11 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11 | |
1120 | ||
1121 | #define BPF_TRACE_DEFN_x(x) \ | |
1122 | void bpf_trace_run##x(struct bpf_prog *prog, \ | |
1123 | REPEAT(x, SARG, __DL_COM, __SEQ_0_11)) \ | |
1124 | { \ | |
1125 | u64 args[x]; \ | |
1126 | REPEAT(x, COPY, __DL_SEM, __SEQ_0_11); \ | |
1127 | __bpf_trace_run(prog, args); \ | |
1128 | } \ | |
1129 | EXPORT_SYMBOL_GPL(bpf_trace_run##x) | |
1130 | BPF_TRACE_DEFN_x(1); | |
1131 | BPF_TRACE_DEFN_x(2); | |
1132 | BPF_TRACE_DEFN_x(3); | |
1133 | BPF_TRACE_DEFN_x(4); | |
1134 | BPF_TRACE_DEFN_x(5); | |
1135 | BPF_TRACE_DEFN_x(6); | |
1136 | BPF_TRACE_DEFN_x(7); | |
1137 | BPF_TRACE_DEFN_x(8); | |
1138 | BPF_TRACE_DEFN_x(9); | |
1139 | BPF_TRACE_DEFN_x(10); | |
1140 | BPF_TRACE_DEFN_x(11); | |
1141 | BPF_TRACE_DEFN_x(12); | |
1142 | ||
1143 | static int __bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog) | |
1144 | { | |
1145 | struct tracepoint *tp = btp->tp; | |
1146 | ||
1147 | /* | |
1148 | * check that program doesn't access arguments beyond what's | |
1149 | * available in this tracepoint | |
1150 | */ | |
1151 | if (prog->aux->max_ctx_offset > btp->num_args * sizeof(u64)) | |
1152 | return -EINVAL; | |
1153 | ||
1154 | return tracepoint_probe_register(tp, (void *)btp->bpf_func, prog); | |
1155 | } | |
1156 | ||
1157 | int bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog) | |
1158 | { | |
1159 | int err; | |
1160 | ||
1161 | mutex_lock(&bpf_event_mutex); | |
1162 | err = __bpf_probe_register(btp, prog); | |
1163 | mutex_unlock(&bpf_event_mutex); | |
1164 | return err; | |
1165 | } | |
1166 | ||
1167 | int bpf_probe_unregister(struct bpf_raw_event_map *btp, struct bpf_prog *prog) | |
1168 | { | |
1169 | int err; | |
1170 | ||
1171 | mutex_lock(&bpf_event_mutex); | |
1172 | err = tracepoint_probe_unregister(btp->tp, (void *)btp->bpf_func, prog); | |
1173 | mutex_unlock(&bpf_event_mutex); | |
1174 | return err; | |
1175 | } | |
41bdc4b4 YS |
1176 | |
1177 | int bpf_get_perf_event_info(const struct perf_event *event, u32 *prog_id, | |
1178 | u32 *fd_type, const char **buf, | |
1179 | u64 *probe_offset, u64 *probe_addr) | |
1180 | { | |
1181 | bool is_tracepoint, is_syscall_tp; | |
1182 | struct bpf_prog *prog; | |
1183 | int flags, err = 0; | |
1184 | ||
1185 | prog = event->prog; | |
1186 | if (!prog) | |
1187 | return -ENOENT; | |
1188 | ||
1189 | /* not supporting BPF_PROG_TYPE_PERF_EVENT yet */ | |
1190 | if (prog->type == BPF_PROG_TYPE_PERF_EVENT) | |
1191 | return -EOPNOTSUPP; | |
1192 | ||
1193 | *prog_id = prog->aux->id; | |
1194 | flags = event->tp_event->flags; | |
1195 | is_tracepoint = flags & TRACE_EVENT_FL_TRACEPOINT; | |
1196 | is_syscall_tp = is_syscall_trace_event(event->tp_event); | |
1197 | ||
1198 | if (is_tracepoint || is_syscall_tp) { | |
1199 | *buf = is_tracepoint ? event->tp_event->tp->name | |
1200 | : event->tp_event->name; | |
1201 | *fd_type = BPF_FD_TYPE_TRACEPOINT; | |
1202 | *probe_offset = 0x0; | |
1203 | *probe_addr = 0x0; | |
1204 | } else { | |
1205 | /* kprobe/uprobe */ | |
1206 | err = -EOPNOTSUPP; | |
1207 | #ifdef CONFIG_KPROBE_EVENTS | |
1208 | if (flags & TRACE_EVENT_FL_KPROBE) | |
1209 | err = bpf_get_kprobe_info(event, fd_type, buf, | |
1210 | probe_offset, probe_addr, | |
1211 | event->attr.type == PERF_TYPE_TRACEPOINT); | |
1212 | #endif | |
1213 | #ifdef CONFIG_UPROBE_EVENTS | |
1214 | if (flags & TRACE_EVENT_FL_UPROBE) | |
1215 | err = bpf_get_uprobe_info(event, fd_type, buf, | |
1216 | probe_offset, | |
1217 | event->attr.type == PERF_TYPE_TRACEPOINT); | |
1218 | #endif | |
1219 | } | |
1220 | ||
1221 | return err; | |
1222 | } |