Commit | Line | Data |
---|---|---|
179a0cc4 | 1 | // SPDX-License-Identifier: GPL-2.0 |
2541517c | 2 | /* Copyright (c) 2011-2015 PLUMgrid, http://plumgrid.com |
0515e599 | 3 | * Copyright (c) 2016 Facebook |
2541517c AS |
4 | */ |
5 | #include <linux/kernel.h> | |
6 | #include <linux/types.h> | |
7 | #include <linux/slab.h> | |
8 | #include <linux/bpf.h> | |
0515e599 | 9 | #include <linux/bpf_perf_event.h> |
2541517c AS |
10 | #include <linux/filter.h> |
11 | #include <linux/uaccess.h> | |
9c959c86 | 12 | #include <linux/ctype.h> |
9802d865 | 13 | #include <linux/kprobes.h> |
41bdc4b4 | 14 | #include <linux/syscalls.h> |
540adea3 | 15 | #include <linux/error-injection.h> |
9802d865 | 16 | |
c7b6f29b NA |
17 | #include <asm/tlb.h> |
18 | ||
9802d865 | 19 | #include "trace_probe.h" |
2541517c AS |
20 | #include "trace.h" |
21 | ||
e672db03 SF |
22 | #define bpf_event_rcu_dereference(p) \ |
23 | rcu_dereference_protected(p, lockdep_is_held(&bpf_event_mutex)) | |
24 | ||
a38d1107 MM |
25 | #ifdef CONFIG_MODULES |
26 | struct bpf_trace_module { | |
27 | struct module *module; | |
28 | struct list_head list; | |
29 | }; | |
30 | ||
31 | static LIST_HEAD(bpf_trace_modules); | |
32 | static DEFINE_MUTEX(bpf_module_mutex); | |
33 | ||
34 | static struct bpf_raw_event_map *bpf_get_raw_tracepoint_module(const char *name) | |
35 | { | |
36 | struct bpf_raw_event_map *btp, *ret = NULL; | |
37 | struct bpf_trace_module *btm; | |
38 | unsigned int i; | |
39 | ||
40 | mutex_lock(&bpf_module_mutex); | |
41 | list_for_each_entry(btm, &bpf_trace_modules, list) { | |
42 | for (i = 0; i < btm->module->num_bpf_raw_events; ++i) { | |
43 | btp = &btm->module->bpf_raw_events[i]; | |
44 | if (!strcmp(btp->tp->name, name)) { | |
45 | if (try_module_get(btm->module)) | |
46 | ret = btp; | |
47 | goto out; | |
48 | } | |
49 | } | |
50 | } | |
51 | out: | |
52 | mutex_unlock(&bpf_module_mutex); | |
53 | return ret; | |
54 | } | |
55 | #else | |
56 | static struct bpf_raw_event_map *bpf_get_raw_tracepoint_module(const char *name) | |
57 | { | |
58 | return NULL; | |
59 | } | |
60 | #endif /* CONFIG_MODULES */ | |
61 | ||
035226b9 | 62 | u64 bpf_get_stackid(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); |
c195651e | 63 | u64 bpf_get_stack(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); |
035226b9 | 64 | |
2541517c AS |
65 | /** |
66 | * trace_call_bpf - invoke BPF program | |
e87c6bc3 | 67 | * @call: tracepoint event |
2541517c AS |
68 | * @ctx: opaque context pointer |
69 | * | |
70 | * kprobe handlers execute BPF programs via this helper. | |
71 | * Can be used from static tracepoints in the future. | |
72 | * | |
73 | * Return: BPF programs always return an integer which is interpreted by | |
74 | * kprobe handler as: | |
75 | * 0 - return from kprobe (event is filtered out) | |
76 | * 1 - store kprobe event into ring buffer | |
77 | * Other values are reserved and currently alias to 1 | |
78 | */ | |
e87c6bc3 | 79 | unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx) |
2541517c AS |
80 | { |
81 | unsigned int ret; | |
82 | ||
83 | if (in_nmi()) /* not supported yet */ | |
84 | return 1; | |
85 | ||
86 | preempt_disable(); | |
87 | ||
88 | if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1)) { | |
89 | /* | |
90 | * since some bpf program is already running on this cpu, | |
91 | * don't call into another bpf program (same or different) | |
92 | * and don't send kprobe event into ring-buffer, | |
93 | * so return zero here | |
94 | */ | |
95 | ret = 0; | |
96 | goto out; | |
97 | } | |
98 | ||
e87c6bc3 YS |
99 | /* |
100 | * Instead of moving rcu_read_lock/rcu_dereference/rcu_read_unlock | |
101 | * to all call sites, we did a bpf_prog_array_valid() there to check | |
102 | * whether call->prog_array is empty or not, which is | |
103 | * a heurisitc to speed up execution. | |
104 | * | |
105 | * If bpf_prog_array_valid() fetched prog_array was | |
106 | * non-NULL, we go into trace_call_bpf() and do the actual | |
107 | * proper rcu_dereference() under RCU lock. | |
108 | * If it turns out that prog_array is NULL then, we bail out. | |
109 | * For the opposite, if the bpf_prog_array_valid() fetched pointer | |
110 | * was NULL, you'll skip the prog_array with the risk of missing | |
111 | * out of events when it was updated in between this and the | |
112 | * rcu_dereference() which is accepted risk. | |
113 | */ | |
114 | ret = BPF_PROG_RUN_ARRAY_CHECK(call->prog_array, ctx, BPF_PROG_RUN); | |
2541517c AS |
115 | |
116 | out: | |
117 | __this_cpu_dec(bpf_prog_active); | |
118 | preempt_enable(); | |
119 | ||
120 | return ret; | |
121 | } | |
122 | EXPORT_SYMBOL_GPL(trace_call_bpf); | |
123 | ||
9802d865 JB |
124 | #ifdef CONFIG_BPF_KPROBE_OVERRIDE |
125 | BPF_CALL_2(bpf_override_return, struct pt_regs *, regs, unsigned long, rc) | |
126 | { | |
9802d865 | 127 | regs_set_return_value(regs, rc); |
540adea3 | 128 | override_function_with_return(regs); |
9802d865 JB |
129 | return 0; |
130 | } | |
131 | ||
132 | static const struct bpf_func_proto bpf_override_return_proto = { | |
133 | .func = bpf_override_return, | |
134 | .gpl_only = true, | |
135 | .ret_type = RET_INTEGER, | |
136 | .arg1_type = ARG_PTR_TO_CTX, | |
137 | .arg2_type = ARG_ANYTHING, | |
138 | }; | |
139 | #endif | |
140 | ||
f3694e00 | 141 | BPF_CALL_3(bpf_probe_read, void *, dst, u32, size, const void *, unsafe_ptr) |
2541517c | 142 | { |
eb33f2cc | 143 | int ret; |
2541517c | 144 | |
074f528e DB |
145 | ret = probe_kernel_read(dst, unsafe_ptr, size); |
146 | if (unlikely(ret < 0)) | |
147 | memset(dst, 0, size); | |
148 | ||
149 | return ret; | |
2541517c AS |
150 | } |
151 | ||
152 | static const struct bpf_func_proto bpf_probe_read_proto = { | |
153 | .func = bpf_probe_read, | |
154 | .gpl_only = true, | |
155 | .ret_type = RET_INTEGER, | |
39f19ebb | 156 | .arg1_type = ARG_PTR_TO_UNINIT_MEM, |
9c019e2b | 157 | .arg2_type = ARG_CONST_SIZE_OR_ZERO, |
2541517c AS |
158 | .arg3_type = ARG_ANYTHING, |
159 | }; | |
160 | ||
f3694e00 DB |
161 | BPF_CALL_3(bpf_probe_write_user, void *, unsafe_ptr, const void *, src, |
162 | u32, size) | |
96ae5227 | 163 | { |
96ae5227 SD |
164 | /* |
165 | * Ensure we're in user context which is safe for the helper to | |
166 | * run. This helper has no business in a kthread. | |
167 | * | |
168 | * access_ok() should prevent writing to non-user memory, but in | |
169 | * some situations (nommu, temporary switch, etc) access_ok() does | |
170 | * not provide enough validation, hence the check on KERNEL_DS. | |
c7b6f29b NA |
171 | * |
172 | * nmi_uaccess_okay() ensures the probe is not run in an interim | |
173 | * state, when the task or mm are switched. This is specifically | |
174 | * required to prevent the use of temporary mm. | |
96ae5227 SD |
175 | */ |
176 | ||
177 | if (unlikely(in_interrupt() || | |
178 | current->flags & (PF_KTHREAD | PF_EXITING))) | |
179 | return -EPERM; | |
db68ce10 | 180 | if (unlikely(uaccess_kernel())) |
96ae5227 | 181 | return -EPERM; |
c7b6f29b NA |
182 | if (unlikely(!nmi_uaccess_okay())) |
183 | return -EPERM; | |
96d4f267 | 184 | if (!access_ok(unsafe_ptr, size)) |
96ae5227 SD |
185 | return -EPERM; |
186 | ||
187 | return probe_kernel_write(unsafe_ptr, src, size); | |
188 | } | |
189 | ||
190 | static const struct bpf_func_proto bpf_probe_write_user_proto = { | |
191 | .func = bpf_probe_write_user, | |
192 | .gpl_only = true, | |
193 | .ret_type = RET_INTEGER, | |
194 | .arg1_type = ARG_ANYTHING, | |
39f19ebb AS |
195 | .arg2_type = ARG_PTR_TO_MEM, |
196 | .arg3_type = ARG_CONST_SIZE, | |
96ae5227 SD |
197 | }; |
198 | ||
199 | static const struct bpf_func_proto *bpf_get_probe_write_proto(void) | |
200 | { | |
201 | pr_warn_ratelimited("%s[%d] is installing a program with bpf_probe_write_user helper that may corrupt user memory!", | |
202 | current->comm, task_pid_nr(current)); | |
203 | ||
204 | return &bpf_probe_write_user_proto; | |
205 | } | |
206 | ||
9c959c86 | 207 | /* |
7bda4b40 JF |
208 | * Only limited trace_printk() conversion specifiers allowed: |
209 | * %d %i %u %x %ld %li %lu %lx %lld %lli %llu %llx %p %s | |
9c959c86 | 210 | */ |
f3694e00 DB |
211 | BPF_CALL_5(bpf_trace_printk, char *, fmt, u32, fmt_size, u64, arg1, |
212 | u64, arg2, u64, arg3) | |
9c959c86 | 213 | { |
8d3b7dce | 214 | bool str_seen = false; |
9c959c86 AS |
215 | int mod[3] = {}; |
216 | int fmt_cnt = 0; | |
8d3b7dce AS |
217 | u64 unsafe_addr; |
218 | char buf[64]; | |
9c959c86 AS |
219 | int i; |
220 | ||
221 | /* | |
222 | * bpf_check()->check_func_arg()->check_stack_boundary() | |
223 | * guarantees that fmt points to bpf program stack, | |
224 | * fmt_size bytes of it were initialized and fmt_size > 0 | |
225 | */ | |
226 | if (fmt[--fmt_size] != 0) | |
227 | return -EINVAL; | |
228 | ||
229 | /* check format string for allowed specifiers */ | |
230 | for (i = 0; i < fmt_size; i++) { | |
231 | if ((!isprint(fmt[i]) && !isspace(fmt[i])) || !isascii(fmt[i])) | |
232 | return -EINVAL; | |
233 | ||
234 | if (fmt[i] != '%') | |
235 | continue; | |
236 | ||
237 | if (fmt_cnt >= 3) | |
238 | return -EINVAL; | |
239 | ||
240 | /* fmt[i] != 0 && fmt[last] == 0, so we can access fmt[i + 1] */ | |
241 | i++; | |
242 | if (fmt[i] == 'l') { | |
243 | mod[fmt_cnt]++; | |
244 | i++; | |
8d3b7dce | 245 | } else if (fmt[i] == 'p' || fmt[i] == 's') { |
9c959c86 | 246 | mod[fmt_cnt]++; |
1efb6ee3 MP |
247 | /* disallow any further format extensions */ |
248 | if (fmt[i + 1] != 0 && | |
249 | !isspace(fmt[i + 1]) && | |
250 | !ispunct(fmt[i + 1])) | |
9c959c86 AS |
251 | return -EINVAL; |
252 | fmt_cnt++; | |
1efb6ee3 | 253 | if (fmt[i] == 's') { |
8d3b7dce AS |
254 | if (str_seen) |
255 | /* allow only one '%s' per fmt string */ | |
256 | return -EINVAL; | |
257 | str_seen = true; | |
258 | ||
259 | switch (fmt_cnt) { | |
260 | case 1: | |
f3694e00 DB |
261 | unsafe_addr = arg1; |
262 | arg1 = (long) buf; | |
8d3b7dce AS |
263 | break; |
264 | case 2: | |
f3694e00 DB |
265 | unsafe_addr = arg2; |
266 | arg2 = (long) buf; | |
8d3b7dce AS |
267 | break; |
268 | case 3: | |
f3694e00 DB |
269 | unsafe_addr = arg3; |
270 | arg3 = (long) buf; | |
8d3b7dce AS |
271 | break; |
272 | } | |
273 | buf[0] = 0; | |
274 | strncpy_from_unsafe(buf, | |
275 | (void *) (long) unsafe_addr, | |
276 | sizeof(buf)); | |
277 | } | |
9c959c86 AS |
278 | continue; |
279 | } | |
280 | ||
281 | if (fmt[i] == 'l') { | |
282 | mod[fmt_cnt]++; | |
283 | i++; | |
284 | } | |
285 | ||
7bda4b40 JF |
286 | if (fmt[i] != 'i' && fmt[i] != 'd' && |
287 | fmt[i] != 'u' && fmt[i] != 'x') | |
9c959c86 AS |
288 | return -EINVAL; |
289 | fmt_cnt++; | |
290 | } | |
291 | ||
88a5c690 DB |
292 | /* Horrid workaround for getting va_list handling working with different |
293 | * argument type combinations generically for 32 and 64 bit archs. | |
294 | */ | |
295 | #define __BPF_TP_EMIT() __BPF_ARG3_TP() | |
296 | #define __BPF_TP(...) \ | |
eefa864a | 297 | __trace_printk(0 /* Fake ip */, \ |
88a5c690 DB |
298 | fmt, ##__VA_ARGS__) |
299 | ||
300 | #define __BPF_ARG1_TP(...) \ | |
301 | ((mod[0] == 2 || (mod[0] == 1 && __BITS_PER_LONG == 64)) \ | |
302 | ? __BPF_TP(arg1, ##__VA_ARGS__) \ | |
303 | : ((mod[0] == 1 || (mod[0] == 0 && __BITS_PER_LONG == 32)) \ | |
304 | ? __BPF_TP((long)arg1, ##__VA_ARGS__) \ | |
305 | : __BPF_TP((u32)arg1, ##__VA_ARGS__))) | |
306 | ||
307 | #define __BPF_ARG2_TP(...) \ | |
308 | ((mod[1] == 2 || (mod[1] == 1 && __BITS_PER_LONG == 64)) \ | |
309 | ? __BPF_ARG1_TP(arg2, ##__VA_ARGS__) \ | |
310 | : ((mod[1] == 1 || (mod[1] == 0 && __BITS_PER_LONG == 32)) \ | |
311 | ? __BPF_ARG1_TP((long)arg2, ##__VA_ARGS__) \ | |
312 | : __BPF_ARG1_TP((u32)arg2, ##__VA_ARGS__))) | |
313 | ||
314 | #define __BPF_ARG3_TP(...) \ | |
315 | ((mod[2] == 2 || (mod[2] == 1 && __BITS_PER_LONG == 64)) \ | |
316 | ? __BPF_ARG2_TP(arg3, ##__VA_ARGS__) \ | |
317 | : ((mod[2] == 1 || (mod[2] == 0 && __BITS_PER_LONG == 32)) \ | |
318 | ? __BPF_ARG2_TP((long)arg3, ##__VA_ARGS__) \ | |
319 | : __BPF_ARG2_TP((u32)arg3, ##__VA_ARGS__))) | |
320 | ||
321 | return __BPF_TP_EMIT(); | |
9c959c86 AS |
322 | } |
323 | ||
324 | static const struct bpf_func_proto bpf_trace_printk_proto = { | |
325 | .func = bpf_trace_printk, | |
326 | .gpl_only = true, | |
327 | .ret_type = RET_INTEGER, | |
39f19ebb AS |
328 | .arg1_type = ARG_PTR_TO_MEM, |
329 | .arg2_type = ARG_CONST_SIZE, | |
9c959c86 AS |
330 | }; |
331 | ||
0756ea3e AS |
332 | const struct bpf_func_proto *bpf_get_trace_printk_proto(void) |
333 | { | |
334 | /* | |
335 | * this program might be calling bpf_trace_printk, | |
336 | * so allocate per-cpu printk buffers | |
337 | */ | |
338 | trace_printk_init_buffers(); | |
339 | ||
340 | return &bpf_trace_printk_proto; | |
341 | } | |
342 | ||
908432ca YS |
343 | static __always_inline int |
344 | get_map_perf_counter(struct bpf_map *map, u64 flags, | |
345 | u64 *value, u64 *enabled, u64 *running) | |
35578d79 | 346 | { |
35578d79 | 347 | struct bpf_array *array = container_of(map, struct bpf_array, map); |
6816a7ff DB |
348 | unsigned int cpu = smp_processor_id(); |
349 | u64 index = flags & BPF_F_INDEX_MASK; | |
3b1efb19 | 350 | struct bpf_event_entry *ee; |
35578d79 | 351 | |
6816a7ff DB |
352 | if (unlikely(flags & ~(BPF_F_INDEX_MASK))) |
353 | return -EINVAL; | |
354 | if (index == BPF_F_CURRENT_CPU) | |
355 | index = cpu; | |
35578d79 KX |
356 | if (unlikely(index >= array->map.max_entries)) |
357 | return -E2BIG; | |
358 | ||
3b1efb19 | 359 | ee = READ_ONCE(array->ptrs[index]); |
1ca1cc98 | 360 | if (!ee) |
35578d79 KX |
361 | return -ENOENT; |
362 | ||
908432ca YS |
363 | return perf_event_read_local(ee->event, value, enabled, running); |
364 | } | |
365 | ||
366 | BPF_CALL_2(bpf_perf_event_read, struct bpf_map *, map, u64, flags) | |
367 | { | |
368 | u64 value = 0; | |
369 | int err; | |
370 | ||
371 | err = get_map_perf_counter(map, flags, &value, NULL, NULL); | |
35578d79 | 372 | /* |
f91840a3 AS |
373 | * this api is ugly since we miss [-22..-2] range of valid |
374 | * counter values, but that's uapi | |
35578d79 | 375 | */ |
f91840a3 AS |
376 | if (err) |
377 | return err; | |
378 | return value; | |
35578d79 KX |
379 | } |
380 | ||
62544ce8 | 381 | static const struct bpf_func_proto bpf_perf_event_read_proto = { |
35578d79 | 382 | .func = bpf_perf_event_read, |
1075ef59 | 383 | .gpl_only = true, |
35578d79 KX |
384 | .ret_type = RET_INTEGER, |
385 | .arg1_type = ARG_CONST_MAP_PTR, | |
386 | .arg2_type = ARG_ANYTHING, | |
387 | }; | |
388 | ||
908432ca YS |
389 | BPF_CALL_4(bpf_perf_event_read_value, struct bpf_map *, map, u64, flags, |
390 | struct bpf_perf_event_value *, buf, u32, size) | |
391 | { | |
392 | int err = -EINVAL; | |
393 | ||
394 | if (unlikely(size != sizeof(struct bpf_perf_event_value))) | |
395 | goto clear; | |
396 | err = get_map_perf_counter(map, flags, &buf->counter, &buf->enabled, | |
397 | &buf->running); | |
398 | if (unlikely(err)) | |
399 | goto clear; | |
400 | return 0; | |
401 | clear: | |
402 | memset(buf, 0, size); | |
403 | return err; | |
404 | } | |
405 | ||
406 | static const struct bpf_func_proto bpf_perf_event_read_value_proto = { | |
407 | .func = bpf_perf_event_read_value, | |
408 | .gpl_only = true, | |
409 | .ret_type = RET_INTEGER, | |
410 | .arg1_type = ARG_CONST_MAP_PTR, | |
411 | .arg2_type = ARG_ANYTHING, | |
412 | .arg3_type = ARG_PTR_TO_UNINIT_MEM, | |
413 | .arg4_type = ARG_CONST_SIZE, | |
414 | }; | |
415 | ||
8e7a3920 DB |
416 | static __always_inline u64 |
417 | __bpf_perf_event_output(struct pt_regs *regs, struct bpf_map *map, | |
283ca526 | 418 | u64 flags, struct perf_sample_data *sd) |
a43eec30 | 419 | { |
a43eec30 | 420 | struct bpf_array *array = container_of(map, struct bpf_array, map); |
d7931330 | 421 | unsigned int cpu = smp_processor_id(); |
1e33759c | 422 | u64 index = flags & BPF_F_INDEX_MASK; |
3b1efb19 | 423 | struct bpf_event_entry *ee; |
a43eec30 | 424 | struct perf_event *event; |
a43eec30 | 425 | |
1e33759c | 426 | if (index == BPF_F_CURRENT_CPU) |
d7931330 | 427 | index = cpu; |
a43eec30 AS |
428 | if (unlikely(index >= array->map.max_entries)) |
429 | return -E2BIG; | |
430 | ||
3b1efb19 | 431 | ee = READ_ONCE(array->ptrs[index]); |
1ca1cc98 | 432 | if (!ee) |
a43eec30 AS |
433 | return -ENOENT; |
434 | ||
3b1efb19 | 435 | event = ee->event; |
a43eec30 AS |
436 | if (unlikely(event->attr.type != PERF_TYPE_SOFTWARE || |
437 | event->attr.config != PERF_COUNT_SW_BPF_OUTPUT)) | |
438 | return -EINVAL; | |
439 | ||
d7931330 | 440 | if (unlikely(event->oncpu != cpu)) |
a43eec30 AS |
441 | return -EOPNOTSUPP; |
442 | ||
56201969 | 443 | return perf_event_output(event, sd, regs); |
a43eec30 AS |
444 | } |
445 | ||
9594dc3c MM |
446 | /* |
447 | * Support executing tracepoints in normal, irq, and nmi context that each call | |
448 | * bpf_perf_event_output | |
449 | */ | |
450 | struct bpf_trace_sample_data { | |
451 | struct perf_sample_data sds[3]; | |
452 | }; | |
453 | ||
454 | static DEFINE_PER_CPU(struct bpf_trace_sample_data, bpf_trace_sds); | |
455 | static DEFINE_PER_CPU(int, bpf_trace_nest_level); | |
f3694e00 DB |
456 | BPF_CALL_5(bpf_perf_event_output, struct pt_regs *, regs, struct bpf_map *, map, |
457 | u64, flags, void *, data, u64, size) | |
8e7a3920 | 458 | { |
9594dc3c MM |
459 | struct bpf_trace_sample_data *sds = this_cpu_ptr(&bpf_trace_sds); |
460 | int nest_level = this_cpu_inc_return(bpf_trace_nest_level); | |
8e7a3920 DB |
461 | struct perf_raw_record raw = { |
462 | .frag = { | |
463 | .size = size, | |
464 | .data = data, | |
465 | }, | |
466 | }; | |
9594dc3c MM |
467 | struct perf_sample_data *sd; |
468 | int err; | |
8e7a3920 | 469 | |
9594dc3c MM |
470 | if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(sds->sds))) { |
471 | err = -EBUSY; | |
472 | goto out; | |
473 | } | |
474 | ||
475 | sd = &sds->sds[nest_level - 1]; | |
476 | ||
477 | if (unlikely(flags & ~(BPF_F_INDEX_MASK))) { | |
478 | err = -EINVAL; | |
479 | goto out; | |
480 | } | |
8e7a3920 | 481 | |
283ca526 DB |
482 | perf_sample_data_init(sd, 0, 0); |
483 | sd->raw = &raw; | |
484 | ||
9594dc3c MM |
485 | err = __bpf_perf_event_output(regs, map, flags, sd); |
486 | ||
487 | out: | |
488 | this_cpu_dec(bpf_trace_nest_level); | |
489 | return err; | |
8e7a3920 DB |
490 | } |
491 | ||
a43eec30 AS |
492 | static const struct bpf_func_proto bpf_perf_event_output_proto = { |
493 | .func = bpf_perf_event_output, | |
1075ef59 | 494 | .gpl_only = true, |
a43eec30 AS |
495 | .ret_type = RET_INTEGER, |
496 | .arg1_type = ARG_PTR_TO_CTX, | |
497 | .arg2_type = ARG_CONST_MAP_PTR, | |
498 | .arg3_type = ARG_ANYTHING, | |
39f19ebb | 499 | .arg4_type = ARG_PTR_TO_MEM, |
a60dd35d | 500 | .arg5_type = ARG_CONST_SIZE_OR_ZERO, |
a43eec30 AS |
501 | }; |
502 | ||
bd570ff9 | 503 | static DEFINE_PER_CPU(struct pt_regs, bpf_pt_regs); |
283ca526 | 504 | static DEFINE_PER_CPU(struct perf_sample_data, bpf_misc_sd); |
bd570ff9 | 505 | |
555c8a86 DB |
506 | u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size, |
507 | void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy) | |
bd570ff9 | 508 | { |
283ca526 | 509 | struct perf_sample_data *sd = this_cpu_ptr(&bpf_misc_sd); |
bd570ff9 | 510 | struct pt_regs *regs = this_cpu_ptr(&bpf_pt_regs); |
555c8a86 DB |
511 | struct perf_raw_frag frag = { |
512 | .copy = ctx_copy, | |
513 | .size = ctx_size, | |
514 | .data = ctx, | |
515 | }; | |
516 | struct perf_raw_record raw = { | |
517 | .frag = { | |
183fc153 AM |
518 | { |
519 | .next = ctx_size ? &frag : NULL, | |
520 | }, | |
555c8a86 DB |
521 | .size = meta_size, |
522 | .data = meta, | |
523 | }, | |
524 | }; | |
bd570ff9 DB |
525 | |
526 | perf_fetch_caller_regs(regs); | |
283ca526 DB |
527 | perf_sample_data_init(sd, 0, 0); |
528 | sd->raw = &raw; | |
bd570ff9 | 529 | |
283ca526 | 530 | return __bpf_perf_event_output(regs, map, flags, sd); |
bd570ff9 DB |
531 | } |
532 | ||
f3694e00 | 533 | BPF_CALL_0(bpf_get_current_task) |
606274c5 AS |
534 | { |
535 | return (long) current; | |
536 | } | |
537 | ||
538 | static const struct bpf_func_proto bpf_get_current_task_proto = { | |
539 | .func = bpf_get_current_task, | |
540 | .gpl_only = true, | |
541 | .ret_type = RET_INTEGER, | |
542 | }; | |
543 | ||
f3694e00 | 544 | BPF_CALL_2(bpf_current_task_under_cgroup, struct bpf_map *, map, u32, idx) |
60d20f91 | 545 | { |
60d20f91 SD |
546 | struct bpf_array *array = container_of(map, struct bpf_array, map); |
547 | struct cgroup *cgrp; | |
60d20f91 | 548 | |
60d20f91 SD |
549 | if (unlikely(idx >= array->map.max_entries)) |
550 | return -E2BIG; | |
551 | ||
552 | cgrp = READ_ONCE(array->ptrs[idx]); | |
553 | if (unlikely(!cgrp)) | |
554 | return -EAGAIN; | |
555 | ||
556 | return task_under_cgroup_hierarchy(current, cgrp); | |
557 | } | |
558 | ||
559 | static const struct bpf_func_proto bpf_current_task_under_cgroup_proto = { | |
560 | .func = bpf_current_task_under_cgroup, | |
561 | .gpl_only = false, | |
562 | .ret_type = RET_INTEGER, | |
563 | .arg1_type = ARG_CONST_MAP_PTR, | |
564 | .arg2_type = ARG_ANYTHING, | |
565 | }; | |
566 | ||
a5e8c070 GB |
567 | BPF_CALL_3(bpf_probe_read_str, void *, dst, u32, size, |
568 | const void *, unsafe_ptr) | |
569 | { | |
570 | int ret; | |
571 | ||
572 | /* | |
573 | * The strncpy_from_unsafe() call will likely not fill the entire | |
574 | * buffer, but that's okay in this circumstance as we're probing | |
575 | * arbitrary memory anyway similar to bpf_probe_read() and might | |
576 | * as well probe the stack. Thus, memory is explicitly cleared | |
577 | * only in error case, so that improper users ignoring return | |
578 | * code altogether don't copy garbage; otherwise length of string | |
579 | * is returned that can be used for bpf_perf_event_output() et al. | |
580 | */ | |
581 | ret = strncpy_from_unsafe(dst, unsafe_ptr, size); | |
582 | if (unlikely(ret < 0)) | |
583 | memset(dst, 0, size); | |
584 | ||
585 | return ret; | |
586 | } | |
587 | ||
588 | static const struct bpf_func_proto bpf_probe_read_str_proto = { | |
589 | .func = bpf_probe_read_str, | |
590 | .gpl_only = true, | |
591 | .ret_type = RET_INTEGER, | |
592 | .arg1_type = ARG_PTR_TO_UNINIT_MEM, | |
5c4e1201 | 593 | .arg2_type = ARG_CONST_SIZE_OR_ZERO, |
a5e8c070 GB |
594 | .arg3_type = ARG_ANYTHING, |
595 | }; | |
596 | ||
8b401f9e YS |
597 | struct send_signal_irq_work { |
598 | struct irq_work irq_work; | |
599 | struct task_struct *task; | |
600 | u32 sig; | |
601 | }; | |
602 | ||
603 | static DEFINE_PER_CPU(struct send_signal_irq_work, send_signal_work); | |
604 | ||
605 | static void do_bpf_send_signal(struct irq_work *entry) | |
606 | { | |
607 | struct send_signal_irq_work *work; | |
608 | ||
609 | work = container_of(entry, struct send_signal_irq_work, irq_work); | |
610 | group_send_sig_info(work->sig, SEND_SIG_PRIV, work->task, PIDTYPE_TGID); | |
611 | } | |
612 | ||
613 | BPF_CALL_1(bpf_send_signal, u32, sig) | |
614 | { | |
615 | struct send_signal_irq_work *work = NULL; | |
616 | ||
617 | /* Similar to bpf_probe_write_user, task needs to be | |
618 | * in a sound condition and kernel memory access be | |
619 | * permitted in order to send signal to the current | |
620 | * task. | |
621 | */ | |
622 | if (unlikely(current->flags & (PF_KTHREAD | PF_EXITING))) | |
623 | return -EPERM; | |
624 | if (unlikely(uaccess_kernel())) | |
625 | return -EPERM; | |
626 | if (unlikely(!nmi_uaccess_okay())) | |
627 | return -EPERM; | |
628 | ||
629 | if (in_nmi()) { | |
e1afb702 YS |
630 | /* Do an early check on signal validity. Otherwise, |
631 | * the error is lost in deferred irq_work. | |
632 | */ | |
633 | if (unlikely(!valid_signal(sig))) | |
634 | return -EINVAL; | |
635 | ||
8b401f9e YS |
636 | work = this_cpu_ptr(&send_signal_work); |
637 | if (work->irq_work.flags & IRQ_WORK_BUSY) | |
638 | return -EBUSY; | |
639 | ||
640 | /* Add the current task, which is the target of sending signal, | |
641 | * to the irq_work. The current task may change when queued | |
642 | * irq works get executed. | |
643 | */ | |
644 | work->task = current; | |
645 | work->sig = sig; | |
646 | irq_work_queue(&work->irq_work); | |
647 | return 0; | |
648 | } | |
649 | ||
650 | return group_send_sig_info(sig, SEND_SIG_PRIV, current, PIDTYPE_TGID); | |
651 | } | |
652 | ||
653 | static const struct bpf_func_proto bpf_send_signal_proto = { | |
654 | .func = bpf_send_signal, | |
655 | .gpl_only = false, | |
656 | .ret_type = RET_INTEGER, | |
657 | .arg1_type = ARG_ANYTHING, | |
658 | }; | |
659 | ||
5e43f899 AI |
660 | static const struct bpf_func_proto * |
661 | tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) | |
2541517c AS |
662 | { |
663 | switch (func_id) { | |
664 | case BPF_FUNC_map_lookup_elem: | |
665 | return &bpf_map_lookup_elem_proto; | |
666 | case BPF_FUNC_map_update_elem: | |
667 | return &bpf_map_update_elem_proto; | |
668 | case BPF_FUNC_map_delete_elem: | |
669 | return &bpf_map_delete_elem_proto; | |
02a8c817 AC |
670 | case BPF_FUNC_map_push_elem: |
671 | return &bpf_map_push_elem_proto; | |
672 | case BPF_FUNC_map_pop_elem: | |
673 | return &bpf_map_pop_elem_proto; | |
674 | case BPF_FUNC_map_peek_elem: | |
675 | return &bpf_map_peek_elem_proto; | |
2541517c AS |
676 | case BPF_FUNC_probe_read: |
677 | return &bpf_probe_read_proto; | |
d9847d31 AS |
678 | case BPF_FUNC_ktime_get_ns: |
679 | return &bpf_ktime_get_ns_proto; | |
04fd61ab AS |
680 | case BPF_FUNC_tail_call: |
681 | return &bpf_tail_call_proto; | |
ffeedafb AS |
682 | case BPF_FUNC_get_current_pid_tgid: |
683 | return &bpf_get_current_pid_tgid_proto; | |
606274c5 AS |
684 | case BPF_FUNC_get_current_task: |
685 | return &bpf_get_current_task_proto; | |
ffeedafb AS |
686 | case BPF_FUNC_get_current_uid_gid: |
687 | return &bpf_get_current_uid_gid_proto; | |
688 | case BPF_FUNC_get_current_comm: | |
689 | return &bpf_get_current_comm_proto; | |
9c959c86 | 690 | case BPF_FUNC_trace_printk: |
0756ea3e | 691 | return bpf_get_trace_printk_proto(); |
ab1973d3 AS |
692 | case BPF_FUNC_get_smp_processor_id: |
693 | return &bpf_get_smp_processor_id_proto; | |
2d0e30c3 DB |
694 | case BPF_FUNC_get_numa_node_id: |
695 | return &bpf_get_numa_node_id_proto; | |
35578d79 KX |
696 | case BPF_FUNC_perf_event_read: |
697 | return &bpf_perf_event_read_proto; | |
96ae5227 SD |
698 | case BPF_FUNC_probe_write_user: |
699 | return bpf_get_probe_write_proto(); | |
60d20f91 SD |
700 | case BPF_FUNC_current_task_under_cgroup: |
701 | return &bpf_current_task_under_cgroup_proto; | |
8937bd80 AS |
702 | case BPF_FUNC_get_prandom_u32: |
703 | return &bpf_get_prandom_u32_proto; | |
a5e8c070 GB |
704 | case BPF_FUNC_probe_read_str: |
705 | return &bpf_probe_read_str_proto; | |
34ea38ca | 706 | #ifdef CONFIG_CGROUPS |
bf6fa2c8 YS |
707 | case BPF_FUNC_get_current_cgroup_id: |
708 | return &bpf_get_current_cgroup_id_proto; | |
34ea38ca | 709 | #endif |
8b401f9e YS |
710 | case BPF_FUNC_send_signal: |
711 | return &bpf_send_signal_proto; | |
9fd82b61 AS |
712 | default: |
713 | return NULL; | |
714 | } | |
715 | } | |
716 | ||
5e43f899 AI |
717 | static const struct bpf_func_proto * |
718 | kprobe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) | |
9fd82b61 AS |
719 | { |
720 | switch (func_id) { | |
a43eec30 AS |
721 | case BPF_FUNC_perf_event_output: |
722 | return &bpf_perf_event_output_proto; | |
d5a3b1f6 AS |
723 | case BPF_FUNC_get_stackid: |
724 | return &bpf_get_stackid_proto; | |
c195651e YS |
725 | case BPF_FUNC_get_stack: |
726 | return &bpf_get_stack_proto; | |
908432ca YS |
727 | case BPF_FUNC_perf_event_read_value: |
728 | return &bpf_perf_event_read_value_proto; | |
9802d865 JB |
729 | #ifdef CONFIG_BPF_KPROBE_OVERRIDE |
730 | case BPF_FUNC_override_return: | |
731 | return &bpf_override_return_proto; | |
732 | #endif | |
2541517c | 733 | default: |
5e43f899 | 734 | return tracing_func_proto(func_id, prog); |
2541517c AS |
735 | } |
736 | } | |
737 | ||
738 | /* bpf+kprobe programs can access fields of 'struct pt_regs' */ | |
19de99f7 | 739 | static bool kprobe_prog_is_valid_access(int off, int size, enum bpf_access_type type, |
5e43f899 | 740 | const struct bpf_prog *prog, |
23994631 | 741 | struct bpf_insn_access_aux *info) |
2541517c | 742 | { |
2541517c AS |
743 | if (off < 0 || off >= sizeof(struct pt_regs)) |
744 | return false; | |
2541517c AS |
745 | if (type != BPF_READ) |
746 | return false; | |
2541517c AS |
747 | if (off % size != 0) |
748 | return false; | |
2d071c64 DB |
749 | /* |
750 | * Assertion for 32 bit to make sure last 8 byte access | |
751 | * (BPF_DW) to the last 4 byte member is disallowed. | |
752 | */ | |
753 | if (off + size > sizeof(struct pt_regs)) | |
754 | return false; | |
755 | ||
2541517c AS |
756 | return true; |
757 | } | |
758 | ||
7de16e3a | 759 | const struct bpf_verifier_ops kprobe_verifier_ops = { |
2541517c AS |
760 | .get_func_proto = kprobe_prog_func_proto, |
761 | .is_valid_access = kprobe_prog_is_valid_access, | |
762 | }; | |
763 | ||
7de16e3a JK |
764 | const struct bpf_prog_ops kprobe_prog_ops = { |
765 | }; | |
766 | ||
f3694e00 DB |
767 | BPF_CALL_5(bpf_perf_event_output_tp, void *, tp_buff, struct bpf_map *, map, |
768 | u64, flags, void *, data, u64, size) | |
9940d67c | 769 | { |
f3694e00 DB |
770 | struct pt_regs *regs = *(struct pt_regs **)tp_buff; |
771 | ||
9940d67c AS |
772 | /* |
773 | * r1 points to perf tracepoint buffer where first 8 bytes are hidden | |
774 | * from bpf program and contain a pointer to 'struct pt_regs'. Fetch it | |
f3694e00 | 775 | * from there and call the same bpf_perf_event_output() helper inline. |
9940d67c | 776 | */ |
f3694e00 | 777 | return ____bpf_perf_event_output(regs, map, flags, data, size); |
9940d67c AS |
778 | } |
779 | ||
780 | static const struct bpf_func_proto bpf_perf_event_output_proto_tp = { | |
781 | .func = bpf_perf_event_output_tp, | |
782 | .gpl_only = true, | |
783 | .ret_type = RET_INTEGER, | |
784 | .arg1_type = ARG_PTR_TO_CTX, | |
785 | .arg2_type = ARG_CONST_MAP_PTR, | |
786 | .arg3_type = ARG_ANYTHING, | |
39f19ebb | 787 | .arg4_type = ARG_PTR_TO_MEM, |
a60dd35d | 788 | .arg5_type = ARG_CONST_SIZE_OR_ZERO, |
9940d67c AS |
789 | }; |
790 | ||
f3694e00 DB |
791 | BPF_CALL_3(bpf_get_stackid_tp, void *, tp_buff, struct bpf_map *, map, |
792 | u64, flags) | |
9940d67c | 793 | { |
f3694e00 | 794 | struct pt_regs *regs = *(struct pt_regs **)tp_buff; |
9940d67c | 795 | |
f3694e00 DB |
796 | /* |
797 | * Same comment as in bpf_perf_event_output_tp(), only that this time | |
798 | * the other helper's function body cannot be inlined due to being | |
799 | * external, thus we need to call raw helper function. | |
800 | */ | |
801 | return bpf_get_stackid((unsigned long) regs, (unsigned long) map, | |
802 | flags, 0, 0); | |
9940d67c AS |
803 | } |
804 | ||
805 | static const struct bpf_func_proto bpf_get_stackid_proto_tp = { | |
806 | .func = bpf_get_stackid_tp, | |
807 | .gpl_only = true, | |
808 | .ret_type = RET_INTEGER, | |
809 | .arg1_type = ARG_PTR_TO_CTX, | |
810 | .arg2_type = ARG_CONST_MAP_PTR, | |
811 | .arg3_type = ARG_ANYTHING, | |
812 | }; | |
813 | ||
c195651e YS |
814 | BPF_CALL_4(bpf_get_stack_tp, void *, tp_buff, void *, buf, u32, size, |
815 | u64, flags) | |
816 | { | |
817 | struct pt_regs *regs = *(struct pt_regs **)tp_buff; | |
818 | ||
819 | return bpf_get_stack((unsigned long) regs, (unsigned long) buf, | |
820 | (unsigned long) size, flags, 0); | |
821 | } | |
822 | ||
823 | static const struct bpf_func_proto bpf_get_stack_proto_tp = { | |
824 | .func = bpf_get_stack_tp, | |
825 | .gpl_only = true, | |
826 | .ret_type = RET_INTEGER, | |
827 | .arg1_type = ARG_PTR_TO_CTX, | |
828 | .arg2_type = ARG_PTR_TO_UNINIT_MEM, | |
829 | .arg3_type = ARG_CONST_SIZE_OR_ZERO, | |
830 | .arg4_type = ARG_ANYTHING, | |
831 | }; | |
832 | ||
5e43f899 AI |
833 | static const struct bpf_func_proto * |
834 | tp_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) | |
f005afed YS |
835 | { |
836 | switch (func_id) { | |
837 | case BPF_FUNC_perf_event_output: | |
838 | return &bpf_perf_event_output_proto_tp; | |
839 | case BPF_FUNC_get_stackid: | |
840 | return &bpf_get_stackid_proto_tp; | |
c195651e YS |
841 | case BPF_FUNC_get_stack: |
842 | return &bpf_get_stack_proto_tp; | |
f005afed | 843 | default: |
5e43f899 | 844 | return tracing_func_proto(func_id, prog); |
f005afed YS |
845 | } |
846 | } | |
847 | ||
848 | static bool tp_prog_is_valid_access(int off, int size, enum bpf_access_type type, | |
5e43f899 | 849 | const struct bpf_prog *prog, |
f005afed YS |
850 | struct bpf_insn_access_aux *info) |
851 | { | |
852 | if (off < sizeof(void *) || off >= PERF_MAX_TRACE_SIZE) | |
853 | return false; | |
854 | if (type != BPF_READ) | |
855 | return false; | |
856 | if (off % size != 0) | |
857 | return false; | |
858 | ||
859 | BUILD_BUG_ON(PERF_MAX_TRACE_SIZE % sizeof(__u64)); | |
860 | return true; | |
861 | } | |
862 | ||
863 | const struct bpf_verifier_ops tracepoint_verifier_ops = { | |
864 | .get_func_proto = tp_prog_func_proto, | |
865 | .is_valid_access = tp_prog_is_valid_access, | |
866 | }; | |
867 | ||
868 | const struct bpf_prog_ops tracepoint_prog_ops = { | |
869 | }; | |
870 | ||
871 | BPF_CALL_3(bpf_perf_prog_read_value, struct bpf_perf_event_data_kern *, ctx, | |
4bebdc7a YS |
872 | struct bpf_perf_event_value *, buf, u32, size) |
873 | { | |
874 | int err = -EINVAL; | |
875 | ||
876 | if (unlikely(size != sizeof(struct bpf_perf_event_value))) | |
877 | goto clear; | |
878 | err = perf_event_read_local(ctx->event, &buf->counter, &buf->enabled, | |
879 | &buf->running); | |
880 | if (unlikely(err)) | |
881 | goto clear; | |
882 | return 0; | |
883 | clear: | |
884 | memset(buf, 0, size); | |
885 | return err; | |
886 | } | |
887 | ||
f005afed YS |
888 | static const struct bpf_func_proto bpf_perf_prog_read_value_proto = { |
889 | .func = bpf_perf_prog_read_value, | |
4bebdc7a YS |
890 | .gpl_only = true, |
891 | .ret_type = RET_INTEGER, | |
892 | .arg1_type = ARG_PTR_TO_CTX, | |
893 | .arg2_type = ARG_PTR_TO_UNINIT_MEM, | |
894 | .arg3_type = ARG_CONST_SIZE, | |
895 | }; | |
896 | ||
5e43f899 AI |
897 | static const struct bpf_func_proto * |
898 | pe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) | |
9fd82b61 AS |
899 | { |
900 | switch (func_id) { | |
901 | case BPF_FUNC_perf_event_output: | |
9940d67c | 902 | return &bpf_perf_event_output_proto_tp; |
9fd82b61 | 903 | case BPF_FUNC_get_stackid: |
9940d67c | 904 | return &bpf_get_stackid_proto_tp; |
c195651e YS |
905 | case BPF_FUNC_get_stack: |
906 | return &bpf_get_stack_proto_tp; | |
4bebdc7a | 907 | case BPF_FUNC_perf_prog_read_value: |
f005afed | 908 | return &bpf_perf_prog_read_value_proto; |
9fd82b61 | 909 | default: |
5e43f899 | 910 | return tracing_func_proto(func_id, prog); |
9fd82b61 AS |
911 | } |
912 | } | |
913 | ||
c4f6699d AS |
914 | /* |
915 | * bpf_raw_tp_regs are separate from bpf_pt_regs used from skb/xdp | |
916 | * to avoid potential recursive reuse issue when/if tracepoints are added | |
9594dc3c MM |
917 | * inside bpf_*_event_output, bpf_get_stackid and/or bpf_get_stack. |
918 | * | |
919 | * Since raw tracepoints run despite bpf_prog_active, support concurrent usage | |
920 | * in normal, irq, and nmi context. | |
c4f6699d | 921 | */ |
9594dc3c MM |
922 | struct bpf_raw_tp_regs { |
923 | struct pt_regs regs[3]; | |
924 | }; | |
925 | static DEFINE_PER_CPU(struct bpf_raw_tp_regs, bpf_raw_tp_regs); | |
926 | static DEFINE_PER_CPU(int, bpf_raw_tp_nest_level); | |
927 | static struct pt_regs *get_bpf_raw_tp_regs(void) | |
928 | { | |
929 | struct bpf_raw_tp_regs *tp_regs = this_cpu_ptr(&bpf_raw_tp_regs); | |
930 | int nest_level = this_cpu_inc_return(bpf_raw_tp_nest_level); | |
931 | ||
932 | if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(tp_regs->regs))) { | |
933 | this_cpu_dec(bpf_raw_tp_nest_level); | |
934 | return ERR_PTR(-EBUSY); | |
935 | } | |
936 | ||
937 | return &tp_regs->regs[nest_level - 1]; | |
938 | } | |
939 | ||
940 | static void put_bpf_raw_tp_regs(void) | |
941 | { | |
942 | this_cpu_dec(bpf_raw_tp_nest_level); | |
943 | } | |
944 | ||
c4f6699d AS |
945 | BPF_CALL_5(bpf_perf_event_output_raw_tp, struct bpf_raw_tracepoint_args *, args, |
946 | struct bpf_map *, map, u64, flags, void *, data, u64, size) | |
947 | { | |
9594dc3c MM |
948 | struct pt_regs *regs = get_bpf_raw_tp_regs(); |
949 | int ret; | |
950 | ||
951 | if (IS_ERR(regs)) | |
952 | return PTR_ERR(regs); | |
c4f6699d AS |
953 | |
954 | perf_fetch_caller_regs(regs); | |
9594dc3c MM |
955 | ret = ____bpf_perf_event_output(regs, map, flags, data, size); |
956 | ||
957 | put_bpf_raw_tp_regs(); | |
958 | return ret; | |
c4f6699d AS |
959 | } |
960 | ||
961 | static const struct bpf_func_proto bpf_perf_event_output_proto_raw_tp = { | |
962 | .func = bpf_perf_event_output_raw_tp, | |
963 | .gpl_only = true, | |
964 | .ret_type = RET_INTEGER, | |
965 | .arg1_type = ARG_PTR_TO_CTX, | |
966 | .arg2_type = ARG_CONST_MAP_PTR, | |
967 | .arg3_type = ARG_ANYTHING, | |
968 | .arg4_type = ARG_PTR_TO_MEM, | |
969 | .arg5_type = ARG_CONST_SIZE_OR_ZERO, | |
970 | }; | |
971 | ||
972 | BPF_CALL_3(bpf_get_stackid_raw_tp, struct bpf_raw_tracepoint_args *, args, | |
973 | struct bpf_map *, map, u64, flags) | |
974 | { | |
9594dc3c MM |
975 | struct pt_regs *regs = get_bpf_raw_tp_regs(); |
976 | int ret; | |
977 | ||
978 | if (IS_ERR(regs)) | |
979 | return PTR_ERR(regs); | |
c4f6699d AS |
980 | |
981 | perf_fetch_caller_regs(regs); | |
982 | /* similar to bpf_perf_event_output_tp, but pt_regs fetched differently */ | |
9594dc3c MM |
983 | ret = bpf_get_stackid((unsigned long) regs, (unsigned long) map, |
984 | flags, 0, 0); | |
985 | put_bpf_raw_tp_regs(); | |
986 | return ret; | |
c4f6699d AS |
987 | } |
988 | ||
989 | static const struct bpf_func_proto bpf_get_stackid_proto_raw_tp = { | |
990 | .func = bpf_get_stackid_raw_tp, | |
991 | .gpl_only = true, | |
992 | .ret_type = RET_INTEGER, | |
993 | .arg1_type = ARG_PTR_TO_CTX, | |
994 | .arg2_type = ARG_CONST_MAP_PTR, | |
995 | .arg3_type = ARG_ANYTHING, | |
996 | }; | |
997 | ||
c195651e YS |
998 | BPF_CALL_4(bpf_get_stack_raw_tp, struct bpf_raw_tracepoint_args *, args, |
999 | void *, buf, u32, size, u64, flags) | |
1000 | { | |
9594dc3c MM |
1001 | struct pt_regs *regs = get_bpf_raw_tp_regs(); |
1002 | int ret; | |
1003 | ||
1004 | if (IS_ERR(regs)) | |
1005 | return PTR_ERR(regs); | |
c195651e YS |
1006 | |
1007 | perf_fetch_caller_regs(regs); | |
9594dc3c MM |
1008 | ret = bpf_get_stack((unsigned long) regs, (unsigned long) buf, |
1009 | (unsigned long) size, flags, 0); | |
1010 | put_bpf_raw_tp_regs(); | |
1011 | return ret; | |
c195651e YS |
1012 | } |
1013 | ||
1014 | static const struct bpf_func_proto bpf_get_stack_proto_raw_tp = { | |
1015 | .func = bpf_get_stack_raw_tp, | |
1016 | .gpl_only = true, | |
1017 | .ret_type = RET_INTEGER, | |
1018 | .arg1_type = ARG_PTR_TO_CTX, | |
1019 | .arg2_type = ARG_PTR_TO_MEM, | |
1020 | .arg3_type = ARG_CONST_SIZE_OR_ZERO, | |
1021 | .arg4_type = ARG_ANYTHING, | |
1022 | }; | |
1023 | ||
5e43f899 AI |
1024 | static const struct bpf_func_proto * |
1025 | raw_tp_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) | |
c4f6699d AS |
1026 | { |
1027 | switch (func_id) { | |
1028 | case BPF_FUNC_perf_event_output: | |
1029 | return &bpf_perf_event_output_proto_raw_tp; | |
1030 | case BPF_FUNC_get_stackid: | |
1031 | return &bpf_get_stackid_proto_raw_tp; | |
c195651e YS |
1032 | case BPF_FUNC_get_stack: |
1033 | return &bpf_get_stack_proto_raw_tp; | |
c4f6699d | 1034 | default: |
5e43f899 | 1035 | return tracing_func_proto(func_id, prog); |
c4f6699d AS |
1036 | } |
1037 | } | |
1038 | ||
1039 | static bool raw_tp_prog_is_valid_access(int off, int size, | |
1040 | enum bpf_access_type type, | |
5e43f899 | 1041 | const struct bpf_prog *prog, |
c4f6699d AS |
1042 | struct bpf_insn_access_aux *info) |
1043 | { | |
1044 | /* largest tracepoint in the kernel has 12 args */ | |
1045 | if (off < 0 || off >= sizeof(__u64) * 12) | |
1046 | return false; | |
1047 | if (type != BPF_READ) | |
1048 | return false; | |
1049 | if (off % size != 0) | |
1050 | return false; | |
1051 | return true; | |
1052 | } | |
1053 | ||
1054 | const struct bpf_verifier_ops raw_tracepoint_verifier_ops = { | |
1055 | .get_func_proto = raw_tp_prog_func_proto, | |
1056 | .is_valid_access = raw_tp_prog_is_valid_access, | |
1057 | }; | |
1058 | ||
1059 | const struct bpf_prog_ops raw_tracepoint_prog_ops = { | |
1060 | }; | |
1061 | ||
9df1c28b MM |
1062 | static bool raw_tp_writable_prog_is_valid_access(int off, int size, |
1063 | enum bpf_access_type type, | |
1064 | const struct bpf_prog *prog, | |
1065 | struct bpf_insn_access_aux *info) | |
1066 | { | |
1067 | if (off == 0) { | |
1068 | if (size != sizeof(u64) || type != BPF_READ) | |
1069 | return false; | |
1070 | info->reg_type = PTR_TO_TP_BUFFER; | |
1071 | } | |
1072 | return raw_tp_prog_is_valid_access(off, size, type, prog, info); | |
1073 | } | |
1074 | ||
1075 | const struct bpf_verifier_ops raw_tracepoint_writable_verifier_ops = { | |
1076 | .get_func_proto = raw_tp_prog_func_proto, | |
1077 | .is_valid_access = raw_tp_writable_prog_is_valid_access, | |
1078 | }; | |
1079 | ||
1080 | const struct bpf_prog_ops raw_tracepoint_writable_prog_ops = { | |
1081 | }; | |
1082 | ||
0515e599 | 1083 | static bool pe_prog_is_valid_access(int off, int size, enum bpf_access_type type, |
5e43f899 | 1084 | const struct bpf_prog *prog, |
23994631 | 1085 | struct bpf_insn_access_aux *info) |
0515e599 | 1086 | { |
95da0cdb | 1087 | const int size_u64 = sizeof(u64); |
31fd8581 | 1088 | |
0515e599 AS |
1089 | if (off < 0 || off >= sizeof(struct bpf_perf_event_data)) |
1090 | return false; | |
1091 | if (type != BPF_READ) | |
1092 | return false; | |
bc23105c DB |
1093 | if (off % size != 0) { |
1094 | if (sizeof(unsigned long) != 4) | |
1095 | return false; | |
1096 | if (size != 8) | |
1097 | return false; | |
1098 | if (off % size != 4) | |
1099 | return false; | |
1100 | } | |
31fd8581 | 1101 | |
f96da094 DB |
1102 | switch (off) { |
1103 | case bpf_ctx_range(struct bpf_perf_event_data, sample_period): | |
95da0cdb TQ |
1104 | bpf_ctx_record_field_size(info, size_u64); |
1105 | if (!bpf_ctx_narrow_access_ok(off, size, size_u64)) | |
1106 | return false; | |
1107 | break; | |
1108 | case bpf_ctx_range(struct bpf_perf_event_data, addr): | |
1109 | bpf_ctx_record_field_size(info, size_u64); | |
1110 | if (!bpf_ctx_narrow_access_ok(off, size, size_u64)) | |
23994631 | 1111 | return false; |
f96da094 DB |
1112 | break; |
1113 | default: | |
0515e599 AS |
1114 | if (size != sizeof(long)) |
1115 | return false; | |
1116 | } | |
f96da094 | 1117 | |
0515e599 AS |
1118 | return true; |
1119 | } | |
1120 | ||
6b8cc1d1 DB |
1121 | static u32 pe_prog_convert_ctx_access(enum bpf_access_type type, |
1122 | const struct bpf_insn *si, | |
0515e599 | 1123 | struct bpf_insn *insn_buf, |
f96da094 | 1124 | struct bpf_prog *prog, u32 *target_size) |
0515e599 AS |
1125 | { |
1126 | struct bpf_insn *insn = insn_buf; | |
1127 | ||
6b8cc1d1 | 1128 | switch (si->off) { |
0515e599 | 1129 | case offsetof(struct bpf_perf_event_data, sample_period): |
f035a515 | 1130 | *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern, |
6b8cc1d1 | 1131 | data), si->dst_reg, si->src_reg, |
0515e599 | 1132 | offsetof(struct bpf_perf_event_data_kern, data)); |
6b8cc1d1 | 1133 | *insn++ = BPF_LDX_MEM(BPF_DW, si->dst_reg, si->dst_reg, |
f96da094 DB |
1134 | bpf_target_off(struct perf_sample_data, period, 8, |
1135 | target_size)); | |
0515e599 | 1136 | break; |
95da0cdb TQ |
1137 | case offsetof(struct bpf_perf_event_data, addr): |
1138 | *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern, | |
1139 | data), si->dst_reg, si->src_reg, | |
1140 | offsetof(struct bpf_perf_event_data_kern, data)); | |
1141 | *insn++ = BPF_LDX_MEM(BPF_DW, si->dst_reg, si->dst_reg, | |
1142 | bpf_target_off(struct perf_sample_data, addr, 8, | |
1143 | target_size)); | |
1144 | break; | |
0515e599 | 1145 | default: |
f035a515 | 1146 | *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern, |
6b8cc1d1 | 1147 | regs), si->dst_reg, si->src_reg, |
0515e599 | 1148 | offsetof(struct bpf_perf_event_data_kern, regs)); |
6b8cc1d1 DB |
1149 | *insn++ = BPF_LDX_MEM(BPF_SIZEOF(long), si->dst_reg, si->dst_reg, |
1150 | si->off); | |
0515e599 AS |
1151 | break; |
1152 | } | |
1153 | ||
1154 | return insn - insn_buf; | |
1155 | } | |
1156 | ||
7de16e3a | 1157 | const struct bpf_verifier_ops perf_event_verifier_ops = { |
f005afed | 1158 | .get_func_proto = pe_prog_func_proto, |
0515e599 AS |
1159 | .is_valid_access = pe_prog_is_valid_access, |
1160 | .convert_ctx_access = pe_prog_convert_ctx_access, | |
1161 | }; | |
7de16e3a JK |
1162 | |
1163 | const struct bpf_prog_ops perf_event_prog_ops = { | |
1164 | }; | |
e87c6bc3 YS |
1165 | |
1166 | static DEFINE_MUTEX(bpf_event_mutex); | |
1167 | ||
c8c088ba YS |
1168 | #define BPF_TRACE_MAX_PROGS 64 |
1169 | ||
e87c6bc3 YS |
1170 | int perf_event_attach_bpf_prog(struct perf_event *event, |
1171 | struct bpf_prog *prog) | |
1172 | { | |
e672db03 | 1173 | struct bpf_prog_array *old_array; |
e87c6bc3 YS |
1174 | struct bpf_prog_array *new_array; |
1175 | int ret = -EEXIST; | |
1176 | ||
9802d865 | 1177 | /* |
b4da3340 MH |
1178 | * Kprobe override only works if they are on the function entry, |
1179 | * and only if they are on the opt-in list. | |
9802d865 JB |
1180 | */ |
1181 | if (prog->kprobe_override && | |
b4da3340 | 1182 | (!trace_kprobe_on_func_entry(event->tp_event) || |
9802d865 JB |
1183 | !trace_kprobe_error_injectable(event->tp_event))) |
1184 | return -EINVAL; | |
1185 | ||
e87c6bc3 YS |
1186 | mutex_lock(&bpf_event_mutex); |
1187 | ||
1188 | if (event->prog) | |
07c41a29 | 1189 | goto unlock; |
e87c6bc3 | 1190 | |
e672db03 | 1191 | old_array = bpf_event_rcu_dereference(event->tp_event->prog_array); |
c8c088ba YS |
1192 | if (old_array && |
1193 | bpf_prog_array_length(old_array) >= BPF_TRACE_MAX_PROGS) { | |
1194 | ret = -E2BIG; | |
1195 | goto unlock; | |
1196 | } | |
1197 | ||
e87c6bc3 YS |
1198 | ret = bpf_prog_array_copy(old_array, NULL, prog, &new_array); |
1199 | if (ret < 0) | |
07c41a29 | 1200 | goto unlock; |
e87c6bc3 YS |
1201 | |
1202 | /* set the new array to event->tp_event and set event->prog */ | |
1203 | event->prog = prog; | |
1204 | rcu_assign_pointer(event->tp_event->prog_array, new_array); | |
1205 | bpf_prog_array_free(old_array); | |
1206 | ||
07c41a29 | 1207 | unlock: |
e87c6bc3 YS |
1208 | mutex_unlock(&bpf_event_mutex); |
1209 | return ret; | |
1210 | } | |
1211 | ||
1212 | void perf_event_detach_bpf_prog(struct perf_event *event) | |
1213 | { | |
e672db03 | 1214 | struct bpf_prog_array *old_array; |
e87c6bc3 YS |
1215 | struct bpf_prog_array *new_array; |
1216 | int ret; | |
1217 | ||
1218 | mutex_lock(&bpf_event_mutex); | |
1219 | ||
1220 | if (!event->prog) | |
07c41a29 | 1221 | goto unlock; |
e87c6bc3 | 1222 | |
e672db03 | 1223 | old_array = bpf_event_rcu_dereference(event->tp_event->prog_array); |
e87c6bc3 | 1224 | ret = bpf_prog_array_copy(old_array, event->prog, NULL, &new_array); |
170a7e3e SY |
1225 | if (ret == -ENOENT) |
1226 | goto unlock; | |
e87c6bc3 YS |
1227 | if (ret < 0) { |
1228 | bpf_prog_array_delete_safe(old_array, event->prog); | |
1229 | } else { | |
1230 | rcu_assign_pointer(event->tp_event->prog_array, new_array); | |
1231 | bpf_prog_array_free(old_array); | |
1232 | } | |
1233 | ||
1234 | bpf_prog_put(event->prog); | |
1235 | event->prog = NULL; | |
1236 | ||
07c41a29 | 1237 | unlock: |
e87c6bc3 YS |
1238 | mutex_unlock(&bpf_event_mutex); |
1239 | } | |
f371b304 | 1240 | |
f4e2298e | 1241 | int perf_event_query_prog_array(struct perf_event *event, void __user *info) |
f371b304 YS |
1242 | { |
1243 | struct perf_event_query_bpf __user *uquery = info; | |
1244 | struct perf_event_query_bpf query = {}; | |
e672db03 | 1245 | struct bpf_prog_array *progs; |
3a38bb98 | 1246 | u32 *ids, prog_cnt, ids_len; |
f371b304 YS |
1247 | int ret; |
1248 | ||
1249 | if (!capable(CAP_SYS_ADMIN)) | |
1250 | return -EPERM; | |
1251 | if (event->attr.type != PERF_TYPE_TRACEPOINT) | |
1252 | return -EINVAL; | |
1253 | if (copy_from_user(&query, uquery, sizeof(query))) | |
1254 | return -EFAULT; | |
3a38bb98 YS |
1255 | |
1256 | ids_len = query.ids_len; | |
1257 | if (ids_len > BPF_TRACE_MAX_PROGS) | |
9c481b90 | 1258 | return -E2BIG; |
3a38bb98 YS |
1259 | ids = kcalloc(ids_len, sizeof(u32), GFP_USER | __GFP_NOWARN); |
1260 | if (!ids) | |
1261 | return -ENOMEM; | |
1262 | /* | |
1263 | * The above kcalloc returns ZERO_SIZE_PTR when ids_len = 0, which | |
1264 | * is required when user only wants to check for uquery->prog_cnt. | |
1265 | * There is no need to check for it since the case is handled | |
1266 | * gracefully in bpf_prog_array_copy_info. | |
1267 | */ | |
f371b304 YS |
1268 | |
1269 | mutex_lock(&bpf_event_mutex); | |
e672db03 SF |
1270 | progs = bpf_event_rcu_dereference(event->tp_event->prog_array); |
1271 | ret = bpf_prog_array_copy_info(progs, ids, ids_len, &prog_cnt); | |
f371b304 YS |
1272 | mutex_unlock(&bpf_event_mutex); |
1273 | ||
3a38bb98 YS |
1274 | if (copy_to_user(&uquery->prog_cnt, &prog_cnt, sizeof(prog_cnt)) || |
1275 | copy_to_user(uquery->ids, ids, ids_len * sizeof(u32))) | |
1276 | ret = -EFAULT; | |
1277 | ||
1278 | kfree(ids); | |
f371b304 YS |
1279 | return ret; |
1280 | } | |
c4f6699d AS |
1281 | |
1282 | extern struct bpf_raw_event_map __start__bpf_raw_tp[]; | |
1283 | extern struct bpf_raw_event_map __stop__bpf_raw_tp[]; | |
1284 | ||
a38d1107 | 1285 | struct bpf_raw_event_map *bpf_get_raw_tracepoint(const char *name) |
c4f6699d AS |
1286 | { |
1287 | struct bpf_raw_event_map *btp = __start__bpf_raw_tp; | |
1288 | ||
1289 | for (; btp < __stop__bpf_raw_tp; btp++) { | |
1290 | if (!strcmp(btp->tp->name, name)) | |
1291 | return btp; | |
1292 | } | |
a38d1107 MM |
1293 | |
1294 | return bpf_get_raw_tracepoint_module(name); | |
1295 | } | |
1296 | ||
1297 | void bpf_put_raw_tracepoint(struct bpf_raw_event_map *btp) | |
1298 | { | |
1299 | struct module *mod = __module_address((unsigned long)btp); | |
1300 | ||
1301 | if (mod) | |
1302 | module_put(mod); | |
c4f6699d AS |
1303 | } |
1304 | ||
1305 | static __always_inline | |
1306 | void __bpf_trace_run(struct bpf_prog *prog, u64 *args) | |
1307 | { | |
1308 | rcu_read_lock(); | |
1309 | preempt_disable(); | |
1310 | (void) BPF_PROG_RUN(prog, args); | |
1311 | preempt_enable(); | |
1312 | rcu_read_unlock(); | |
1313 | } | |
1314 | ||
1315 | #define UNPACK(...) __VA_ARGS__ | |
1316 | #define REPEAT_1(FN, DL, X, ...) FN(X) | |
1317 | #define REPEAT_2(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_1(FN, DL, __VA_ARGS__) | |
1318 | #define REPEAT_3(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_2(FN, DL, __VA_ARGS__) | |
1319 | #define REPEAT_4(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_3(FN, DL, __VA_ARGS__) | |
1320 | #define REPEAT_5(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_4(FN, DL, __VA_ARGS__) | |
1321 | #define REPEAT_6(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_5(FN, DL, __VA_ARGS__) | |
1322 | #define REPEAT_7(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_6(FN, DL, __VA_ARGS__) | |
1323 | #define REPEAT_8(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_7(FN, DL, __VA_ARGS__) | |
1324 | #define REPEAT_9(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_8(FN, DL, __VA_ARGS__) | |
1325 | #define REPEAT_10(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_9(FN, DL, __VA_ARGS__) | |
1326 | #define REPEAT_11(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_10(FN, DL, __VA_ARGS__) | |
1327 | #define REPEAT_12(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_11(FN, DL, __VA_ARGS__) | |
1328 | #define REPEAT(X, FN, DL, ...) REPEAT_##X(FN, DL, __VA_ARGS__) | |
1329 | ||
1330 | #define SARG(X) u64 arg##X | |
1331 | #define COPY(X) args[X] = arg##X | |
1332 | ||
1333 | #define __DL_COM (,) | |
1334 | #define __DL_SEM (;) | |
1335 | ||
1336 | #define __SEQ_0_11 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11 | |
1337 | ||
1338 | #define BPF_TRACE_DEFN_x(x) \ | |
1339 | void bpf_trace_run##x(struct bpf_prog *prog, \ | |
1340 | REPEAT(x, SARG, __DL_COM, __SEQ_0_11)) \ | |
1341 | { \ | |
1342 | u64 args[x]; \ | |
1343 | REPEAT(x, COPY, __DL_SEM, __SEQ_0_11); \ | |
1344 | __bpf_trace_run(prog, args); \ | |
1345 | } \ | |
1346 | EXPORT_SYMBOL_GPL(bpf_trace_run##x) | |
1347 | BPF_TRACE_DEFN_x(1); | |
1348 | BPF_TRACE_DEFN_x(2); | |
1349 | BPF_TRACE_DEFN_x(3); | |
1350 | BPF_TRACE_DEFN_x(4); | |
1351 | BPF_TRACE_DEFN_x(5); | |
1352 | BPF_TRACE_DEFN_x(6); | |
1353 | BPF_TRACE_DEFN_x(7); | |
1354 | BPF_TRACE_DEFN_x(8); | |
1355 | BPF_TRACE_DEFN_x(9); | |
1356 | BPF_TRACE_DEFN_x(10); | |
1357 | BPF_TRACE_DEFN_x(11); | |
1358 | BPF_TRACE_DEFN_x(12); | |
1359 | ||
1360 | static int __bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog) | |
1361 | { | |
1362 | struct tracepoint *tp = btp->tp; | |
1363 | ||
1364 | /* | |
1365 | * check that program doesn't access arguments beyond what's | |
1366 | * available in this tracepoint | |
1367 | */ | |
1368 | if (prog->aux->max_ctx_offset > btp->num_args * sizeof(u64)) | |
1369 | return -EINVAL; | |
1370 | ||
9df1c28b MM |
1371 | if (prog->aux->max_tp_access > btp->writable_size) |
1372 | return -EINVAL; | |
1373 | ||
c4f6699d AS |
1374 | return tracepoint_probe_register(tp, (void *)btp->bpf_func, prog); |
1375 | } | |
1376 | ||
1377 | int bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog) | |
1378 | { | |
e16ec340 | 1379 | return __bpf_probe_register(btp, prog); |
c4f6699d AS |
1380 | } |
1381 | ||
1382 | int bpf_probe_unregister(struct bpf_raw_event_map *btp, struct bpf_prog *prog) | |
1383 | { | |
e16ec340 | 1384 | return tracepoint_probe_unregister(btp->tp, (void *)btp->bpf_func, prog); |
c4f6699d | 1385 | } |
41bdc4b4 YS |
1386 | |
1387 | int bpf_get_perf_event_info(const struct perf_event *event, u32 *prog_id, | |
1388 | u32 *fd_type, const char **buf, | |
1389 | u64 *probe_offset, u64 *probe_addr) | |
1390 | { | |
1391 | bool is_tracepoint, is_syscall_tp; | |
1392 | struct bpf_prog *prog; | |
1393 | int flags, err = 0; | |
1394 | ||
1395 | prog = event->prog; | |
1396 | if (!prog) | |
1397 | return -ENOENT; | |
1398 | ||
1399 | /* not supporting BPF_PROG_TYPE_PERF_EVENT yet */ | |
1400 | if (prog->type == BPF_PROG_TYPE_PERF_EVENT) | |
1401 | return -EOPNOTSUPP; | |
1402 | ||
1403 | *prog_id = prog->aux->id; | |
1404 | flags = event->tp_event->flags; | |
1405 | is_tracepoint = flags & TRACE_EVENT_FL_TRACEPOINT; | |
1406 | is_syscall_tp = is_syscall_trace_event(event->tp_event); | |
1407 | ||
1408 | if (is_tracepoint || is_syscall_tp) { | |
1409 | *buf = is_tracepoint ? event->tp_event->tp->name | |
1410 | : event->tp_event->name; | |
1411 | *fd_type = BPF_FD_TYPE_TRACEPOINT; | |
1412 | *probe_offset = 0x0; | |
1413 | *probe_addr = 0x0; | |
1414 | } else { | |
1415 | /* kprobe/uprobe */ | |
1416 | err = -EOPNOTSUPP; | |
1417 | #ifdef CONFIG_KPROBE_EVENTS | |
1418 | if (flags & TRACE_EVENT_FL_KPROBE) | |
1419 | err = bpf_get_kprobe_info(event, fd_type, buf, | |
1420 | probe_offset, probe_addr, | |
1421 | event->attr.type == PERF_TYPE_TRACEPOINT); | |
1422 | #endif | |
1423 | #ifdef CONFIG_UPROBE_EVENTS | |
1424 | if (flags & TRACE_EVENT_FL_UPROBE) | |
1425 | err = bpf_get_uprobe_info(event, fd_type, buf, | |
1426 | probe_offset, | |
1427 | event->attr.type == PERF_TYPE_TRACEPOINT); | |
1428 | #endif | |
1429 | } | |
1430 | ||
1431 | return err; | |
1432 | } | |
a38d1107 | 1433 | |
9db1ff0a YS |
1434 | static int __init send_signal_irq_work_init(void) |
1435 | { | |
1436 | int cpu; | |
1437 | struct send_signal_irq_work *work; | |
1438 | ||
1439 | for_each_possible_cpu(cpu) { | |
1440 | work = per_cpu_ptr(&send_signal_work, cpu); | |
1441 | init_irq_work(&work->irq_work, do_bpf_send_signal); | |
1442 | } | |
1443 | return 0; | |
1444 | } | |
1445 | ||
1446 | subsys_initcall(send_signal_irq_work_init); | |
1447 | ||
a38d1107 | 1448 | #ifdef CONFIG_MODULES |
390e99cf SF |
1449 | static int bpf_event_notify(struct notifier_block *nb, unsigned long op, |
1450 | void *module) | |
a38d1107 MM |
1451 | { |
1452 | struct bpf_trace_module *btm, *tmp; | |
1453 | struct module *mod = module; | |
1454 | ||
1455 | if (mod->num_bpf_raw_events == 0 || | |
1456 | (op != MODULE_STATE_COMING && op != MODULE_STATE_GOING)) | |
1457 | return 0; | |
1458 | ||
1459 | mutex_lock(&bpf_module_mutex); | |
1460 | ||
1461 | switch (op) { | |
1462 | case MODULE_STATE_COMING: | |
1463 | btm = kzalloc(sizeof(*btm), GFP_KERNEL); | |
1464 | if (btm) { | |
1465 | btm->module = module; | |
1466 | list_add(&btm->list, &bpf_trace_modules); | |
1467 | } | |
1468 | break; | |
1469 | case MODULE_STATE_GOING: | |
1470 | list_for_each_entry_safe(btm, tmp, &bpf_trace_modules, list) { | |
1471 | if (btm->module == module) { | |
1472 | list_del(&btm->list); | |
1473 | kfree(btm); | |
1474 | break; | |
1475 | } | |
1476 | } | |
1477 | break; | |
1478 | } | |
1479 | ||
1480 | mutex_unlock(&bpf_module_mutex); | |
1481 | ||
1482 | return 0; | |
1483 | } | |
1484 | ||
1485 | static struct notifier_block bpf_module_nb = { | |
1486 | .notifier_call = bpf_event_notify, | |
1487 | }; | |
1488 | ||
390e99cf | 1489 | static int __init bpf_event_init(void) |
a38d1107 MM |
1490 | { |
1491 | register_module_notifier(&bpf_module_nb); | |
1492 | return 0; | |
1493 | } | |
1494 | ||
1495 | fs_initcall(bpf_event_init); | |
1496 | #endif /* CONFIG_MODULES */ |