Commit | Line | Data |
---|---|---|
179a0cc4 | 1 | // SPDX-License-Identifier: GPL-2.0 |
2541517c | 2 | /* Copyright (c) 2011-2015 PLUMgrid, http://plumgrid.com |
0515e599 | 3 | * Copyright (c) 2016 Facebook |
2541517c AS |
4 | */ |
5 | #include <linux/kernel.h> | |
6 | #include <linux/types.h> | |
7 | #include <linux/slab.h> | |
8 | #include <linux/bpf.h> | |
0515e599 | 9 | #include <linux/bpf_perf_event.h> |
c4d0bfb4 | 10 | #include <linux/btf.h> |
2541517c AS |
11 | #include <linux/filter.h> |
12 | #include <linux/uaccess.h> | |
9c959c86 | 13 | #include <linux/ctype.h> |
9802d865 | 14 | #include <linux/kprobes.h> |
ac5a72ea | 15 | #include <linux/spinlock.h> |
41bdc4b4 | 16 | #include <linux/syscalls.h> |
540adea3 | 17 | #include <linux/error-injection.h> |
c9a0f3b8 | 18 | #include <linux/btf_ids.h> |
6f100640 KS |
19 | #include <linux/bpf_lsm.h> |
20 | ||
8e4597c6 | 21 | #include <net/bpf_sk_storage.h> |
9802d865 | 22 | |
c4d0bfb4 AM |
23 | #include <uapi/linux/bpf.h> |
24 | #include <uapi/linux/btf.h> | |
25 | ||
c7b6f29b NA |
26 | #include <asm/tlb.h> |
27 | ||
9802d865 | 28 | #include "trace_probe.h" |
2541517c AS |
29 | #include "trace.h" |
30 | ||
ac5a72ea AM |
31 | #define CREATE_TRACE_POINTS |
32 | #include "bpf_trace.h" | |
33 | ||
e672db03 SF |
34 | #define bpf_event_rcu_dereference(p) \ |
35 | rcu_dereference_protected(p, lockdep_is_held(&bpf_event_mutex)) | |
36 | ||
a38d1107 MM |
37 | #ifdef CONFIG_MODULES |
38 | struct bpf_trace_module { | |
39 | struct module *module; | |
40 | struct list_head list; | |
41 | }; | |
42 | ||
43 | static LIST_HEAD(bpf_trace_modules); | |
44 | static DEFINE_MUTEX(bpf_module_mutex); | |
45 | ||
46 | static struct bpf_raw_event_map *bpf_get_raw_tracepoint_module(const char *name) | |
47 | { | |
48 | struct bpf_raw_event_map *btp, *ret = NULL; | |
49 | struct bpf_trace_module *btm; | |
50 | unsigned int i; | |
51 | ||
52 | mutex_lock(&bpf_module_mutex); | |
53 | list_for_each_entry(btm, &bpf_trace_modules, list) { | |
54 | for (i = 0; i < btm->module->num_bpf_raw_events; ++i) { | |
55 | btp = &btm->module->bpf_raw_events[i]; | |
56 | if (!strcmp(btp->tp->name, name)) { | |
57 | if (try_module_get(btm->module)) | |
58 | ret = btp; | |
59 | goto out; | |
60 | } | |
61 | } | |
62 | } | |
63 | out: | |
64 | mutex_unlock(&bpf_module_mutex); | |
65 | return ret; | |
66 | } | |
67 | #else | |
68 | static struct bpf_raw_event_map *bpf_get_raw_tracepoint_module(const char *name) | |
69 | { | |
70 | return NULL; | |
71 | } | |
72 | #endif /* CONFIG_MODULES */ | |
73 | ||
035226b9 | 74 | u64 bpf_get_stackid(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); |
c195651e | 75 | u64 bpf_get_stack(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); |
035226b9 | 76 | |
eb411377 AM |
77 | static int bpf_btf_printf_prepare(struct btf_ptr *ptr, u32 btf_ptr_size, |
78 | u64 flags, const struct btf **btf, | |
79 | s32 *btf_id); | |
80 | ||
2541517c AS |
81 | /** |
82 | * trace_call_bpf - invoke BPF program | |
e87c6bc3 | 83 | * @call: tracepoint event |
2541517c AS |
84 | * @ctx: opaque context pointer |
85 | * | |
86 | * kprobe handlers execute BPF programs via this helper. | |
87 | * Can be used from static tracepoints in the future. | |
88 | * | |
89 | * Return: BPF programs always return an integer which is interpreted by | |
90 | * kprobe handler as: | |
91 | * 0 - return from kprobe (event is filtered out) | |
92 | * 1 - store kprobe event into ring buffer | |
93 | * Other values are reserved and currently alias to 1 | |
94 | */ | |
e87c6bc3 | 95 | unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx) |
2541517c AS |
96 | { |
97 | unsigned int ret; | |
98 | ||
b0a81b94 | 99 | cant_sleep(); |
2541517c AS |
100 | |
101 | if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1)) { | |
102 | /* | |
103 | * since some bpf program is already running on this cpu, | |
104 | * don't call into another bpf program (same or different) | |
105 | * and don't send kprobe event into ring-buffer, | |
106 | * so return zero here | |
107 | */ | |
108 | ret = 0; | |
109 | goto out; | |
110 | } | |
111 | ||
e87c6bc3 YS |
112 | /* |
113 | * Instead of moving rcu_read_lock/rcu_dereference/rcu_read_unlock | |
114 | * to all call sites, we did a bpf_prog_array_valid() there to check | |
115 | * whether call->prog_array is empty or not, which is | |
2b5894cc | 116 | * a heuristic to speed up execution. |
e87c6bc3 YS |
117 | * |
118 | * If bpf_prog_array_valid() fetched prog_array was | |
119 | * non-NULL, we go into trace_call_bpf() and do the actual | |
120 | * proper rcu_dereference() under RCU lock. | |
121 | * If it turns out that prog_array is NULL then, we bail out. | |
122 | * For the opposite, if the bpf_prog_array_valid() fetched pointer | |
123 | * was NULL, you'll skip the prog_array with the risk of missing | |
124 | * out of events when it was updated in between this and the | |
125 | * rcu_dereference() which is accepted risk. | |
126 | */ | |
127 | ret = BPF_PROG_RUN_ARRAY_CHECK(call->prog_array, ctx, BPF_PROG_RUN); | |
2541517c AS |
128 | |
129 | out: | |
130 | __this_cpu_dec(bpf_prog_active); | |
2541517c AS |
131 | |
132 | return ret; | |
133 | } | |
2541517c | 134 | |
9802d865 JB |
135 | #ifdef CONFIG_BPF_KPROBE_OVERRIDE |
136 | BPF_CALL_2(bpf_override_return, struct pt_regs *, regs, unsigned long, rc) | |
137 | { | |
9802d865 | 138 | regs_set_return_value(regs, rc); |
540adea3 | 139 | override_function_with_return(regs); |
9802d865 JB |
140 | return 0; |
141 | } | |
142 | ||
143 | static const struct bpf_func_proto bpf_override_return_proto = { | |
144 | .func = bpf_override_return, | |
145 | .gpl_only = true, | |
146 | .ret_type = RET_INTEGER, | |
147 | .arg1_type = ARG_PTR_TO_CTX, | |
148 | .arg2_type = ARG_ANYTHING, | |
149 | }; | |
150 | #endif | |
151 | ||
8d92db5c CH |
152 | static __always_inline int |
153 | bpf_probe_read_user_common(void *dst, u32 size, const void __user *unsafe_ptr) | |
2541517c | 154 | { |
8d92db5c | 155 | int ret; |
2541517c | 156 | |
c0ee37e8 | 157 | ret = copy_from_user_nofault(dst, unsafe_ptr, size); |
6ae08ae3 DB |
158 | if (unlikely(ret < 0)) |
159 | memset(dst, 0, size); | |
6ae08ae3 DB |
160 | return ret; |
161 | } | |
162 | ||
8d92db5c CH |
163 | BPF_CALL_3(bpf_probe_read_user, void *, dst, u32, size, |
164 | const void __user *, unsafe_ptr) | |
165 | { | |
166 | return bpf_probe_read_user_common(dst, size, unsafe_ptr); | |
167 | } | |
168 | ||
f470378c | 169 | const struct bpf_func_proto bpf_probe_read_user_proto = { |
6ae08ae3 DB |
170 | .func = bpf_probe_read_user, |
171 | .gpl_only = true, | |
172 | .ret_type = RET_INTEGER, | |
173 | .arg1_type = ARG_PTR_TO_UNINIT_MEM, | |
174 | .arg2_type = ARG_CONST_SIZE_OR_ZERO, | |
175 | .arg3_type = ARG_ANYTHING, | |
176 | }; | |
177 | ||
8d92db5c CH |
178 | static __always_inline int |
179 | bpf_probe_read_user_str_common(void *dst, u32 size, | |
180 | const void __user *unsafe_ptr) | |
6ae08ae3 | 181 | { |
8d92db5c | 182 | int ret; |
6ae08ae3 | 183 | |
6fa6d280 DX |
184 | /* |
185 | * NB: We rely on strncpy_from_user() not copying junk past the NUL | |
186 | * terminator into `dst`. | |
187 | * | |
188 | * strncpy_from_user() does long-sized strides in the fast path. If the | |
189 | * strncpy does not mask out the bytes after the NUL in `unsafe_ptr`, | |
190 | * then there could be junk after the NUL in `dst`. If user takes `dst` | |
191 | * and keys a hash map with it, then semantically identical strings can | |
192 | * occupy multiple entries in the map. | |
193 | */ | |
8d92db5c | 194 | ret = strncpy_from_user_nofault(dst, unsafe_ptr, size); |
6ae08ae3 DB |
195 | if (unlikely(ret < 0)) |
196 | memset(dst, 0, size); | |
6ae08ae3 DB |
197 | return ret; |
198 | } | |
199 | ||
8d92db5c CH |
200 | BPF_CALL_3(bpf_probe_read_user_str, void *, dst, u32, size, |
201 | const void __user *, unsafe_ptr) | |
202 | { | |
203 | return bpf_probe_read_user_str_common(dst, size, unsafe_ptr); | |
204 | } | |
205 | ||
f470378c | 206 | const struct bpf_func_proto bpf_probe_read_user_str_proto = { |
6ae08ae3 DB |
207 | .func = bpf_probe_read_user_str, |
208 | .gpl_only = true, | |
209 | .ret_type = RET_INTEGER, | |
210 | .arg1_type = ARG_PTR_TO_UNINIT_MEM, | |
211 | .arg2_type = ARG_CONST_SIZE_OR_ZERO, | |
212 | .arg3_type = ARG_ANYTHING, | |
213 | }; | |
214 | ||
215 | static __always_inline int | |
8d92db5c | 216 | bpf_probe_read_kernel_common(void *dst, u32 size, const void *unsafe_ptr) |
6ae08ae3 | 217 | { |
ff40e510 | 218 | int ret; |
9d1f8be5 | 219 | |
fe557319 | 220 | ret = copy_from_kernel_nofault(dst, unsafe_ptr, size); |
074f528e | 221 | if (unlikely(ret < 0)) |
ff40e510 | 222 | memset(dst, 0, size); |
6ae08ae3 DB |
223 | return ret; |
224 | } | |
074f528e | 225 | |
6ae08ae3 DB |
226 | BPF_CALL_3(bpf_probe_read_kernel, void *, dst, u32, size, |
227 | const void *, unsafe_ptr) | |
228 | { | |
8d92db5c | 229 | return bpf_probe_read_kernel_common(dst, size, unsafe_ptr); |
6ae08ae3 DB |
230 | } |
231 | ||
f470378c | 232 | const struct bpf_func_proto bpf_probe_read_kernel_proto = { |
6ae08ae3 DB |
233 | .func = bpf_probe_read_kernel, |
234 | .gpl_only = true, | |
235 | .ret_type = RET_INTEGER, | |
236 | .arg1_type = ARG_PTR_TO_UNINIT_MEM, | |
237 | .arg2_type = ARG_CONST_SIZE_OR_ZERO, | |
238 | .arg3_type = ARG_ANYTHING, | |
239 | }; | |
240 | ||
6ae08ae3 | 241 | static __always_inline int |
8d92db5c | 242 | bpf_probe_read_kernel_str_common(void *dst, u32 size, const void *unsafe_ptr) |
6ae08ae3 | 243 | { |
ff40e510 | 244 | int ret; |
8d92db5c | 245 | |
6ae08ae3 | 246 | /* |
8d92db5c CH |
247 | * The strncpy_from_kernel_nofault() call will likely not fill the |
248 | * entire buffer, but that's okay in this circumstance as we're probing | |
6ae08ae3 DB |
249 | * arbitrary memory anyway similar to bpf_probe_read_*() and might |
250 | * as well probe the stack. Thus, memory is explicitly cleared | |
251 | * only in error case, so that improper users ignoring return | |
252 | * code altogether don't copy garbage; otherwise length of string | |
253 | * is returned that can be used for bpf_perf_event_output() et al. | |
254 | */ | |
8d92db5c | 255 | ret = strncpy_from_kernel_nofault(dst, unsafe_ptr, size); |
6ae08ae3 | 256 | if (unlikely(ret < 0)) |
ff40e510 | 257 | memset(dst, 0, size); |
074f528e | 258 | return ret; |
2541517c AS |
259 | } |
260 | ||
6ae08ae3 DB |
261 | BPF_CALL_3(bpf_probe_read_kernel_str, void *, dst, u32, size, |
262 | const void *, unsafe_ptr) | |
263 | { | |
8d92db5c | 264 | return bpf_probe_read_kernel_str_common(dst, size, unsafe_ptr); |
6ae08ae3 DB |
265 | } |
266 | ||
f470378c | 267 | const struct bpf_func_proto bpf_probe_read_kernel_str_proto = { |
6ae08ae3 DB |
268 | .func = bpf_probe_read_kernel_str, |
269 | .gpl_only = true, | |
270 | .ret_type = RET_INTEGER, | |
271 | .arg1_type = ARG_PTR_TO_UNINIT_MEM, | |
272 | .arg2_type = ARG_CONST_SIZE_OR_ZERO, | |
273 | .arg3_type = ARG_ANYTHING, | |
274 | }; | |
275 | ||
8d92db5c CH |
276 | #ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE |
277 | BPF_CALL_3(bpf_probe_read_compat, void *, dst, u32, size, | |
278 | const void *, unsafe_ptr) | |
279 | { | |
280 | if ((unsigned long)unsafe_ptr < TASK_SIZE) { | |
281 | return bpf_probe_read_user_common(dst, size, | |
282 | (__force void __user *)unsafe_ptr); | |
283 | } | |
284 | return bpf_probe_read_kernel_common(dst, size, unsafe_ptr); | |
285 | } | |
286 | ||
287 | static const struct bpf_func_proto bpf_probe_read_compat_proto = { | |
288 | .func = bpf_probe_read_compat, | |
289 | .gpl_only = true, | |
290 | .ret_type = RET_INTEGER, | |
291 | .arg1_type = ARG_PTR_TO_UNINIT_MEM, | |
292 | .arg2_type = ARG_CONST_SIZE_OR_ZERO, | |
293 | .arg3_type = ARG_ANYTHING, | |
294 | }; | |
295 | ||
6ae08ae3 DB |
296 | BPF_CALL_3(bpf_probe_read_compat_str, void *, dst, u32, size, |
297 | const void *, unsafe_ptr) | |
298 | { | |
8d92db5c CH |
299 | if ((unsigned long)unsafe_ptr < TASK_SIZE) { |
300 | return bpf_probe_read_user_str_common(dst, size, | |
301 | (__force void __user *)unsafe_ptr); | |
302 | } | |
303 | return bpf_probe_read_kernel_str_common(dst, size, unsafe_ptr); | |
6ae08ae3 DB |
304 | } |
305 | ||
306 | static const struct bpf_func_proto bpf_probe_read_compat_str_proto = { | |
307 | .func = bpf_probe_read_compat_str, | |
2541517c AS |
308 | .gpl_only = true, |
309 | .ret_type = RET_INTEGER, | |
39f19ebb | 310 | .arg1_type = ARG_PTR_TO_UNINIT_MEM, |
9c019e2b | 311 | .arg2_type = ARG_CONST_SIZE_OR_ZERO, |
2541517c AS |
312 | .arg3_type = ARG_ANYTHING, |
313 | }; | |
8d92db5c | 314 | #endif /* CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE */ |
2541517c | 315 | |
eb1b6688 | 316 | BPF_CALL_3(bpf_probe_write_user, void __user *, unsafe_ptr, const void *, src, |
f3694e00 | 317 | u32, size) |
96ae5227 | 318 | { |
96ae5227 SD |
319 | /* |
320 | * Ensure we're in user context which is safe for the helper to | |
321 | * run. This helper has no business in a kthread. | |
322 | * | |
323 | * access_ok() should prevent writing to non-user memory, but in | |
324 | * some situations (nommu, temporary switch, etc) access_ok() does | |
325 | * not provide enough validation, hence the check on KERNEL_DS. | |
c7b6f29b NA |
326 | * |
327 | * nmi_uaccess_okay() ensures the probe is not run in an interim | |
328 | * state, when the task or mm are switched. This is specifically | |
329 | * required to prevent the use of temporary mm. | |
96ae5227 SD |
330 | */ |
331 | ||
332 | if (unlikely(in_interrupt() || | |
333 | current->flags & (PF_KTHREAD | PF_EXITING))) | |
334 | return -EPERM; | |
db68ce10 | 335 | if (unlikely(uaccess_kernel())) |
96ae5227 | 336 | return -EPERM; |
c7b6f29b NA |
337 | if (unlikely(!nmi_uaccess_okay())) |
338 | return -EPERM; | |
96ae5227 | 339 | |
c0ee37e8 | 340 | return copy_to_user_nofault(unsafe_ptr, src, size); |
96ae5227 SD |
341 | } |
342 | ||
343 | static const struct bpf_func_proto bpf_probe_write_user_proto = { | |
344 | .func = bpf_probe_write_user, | |
345 | .gpl_only = true, | |
346 | .ret_type = RET_INTEGER, | |
347 | .arg1_type = ARG_ANYTHING, | |
39f19ebb AS |
348 | .arg2_type = ARG_PTR_TO_MEM, |
349 | .arg3_type = ARG_CONST_SIZE, | |
96ae5227 SD |
350 | }; |
351 | ||
352 | static const struct bpf_func_proto *bpf_get_probe_write_proto(void) | |
353 | { | |
2c78ee89 AS |
354 | if (!capable(CAP_SYS_ADMIN)) |
355 | return NULL; | |
356 | ||
96ae5227 SD |
357 | pr_warn_ratelimited("%s[%d] is installing a program with bpf_probe_write_user helper that may corrupt user memory!", |
358 | current->comm, task_pid_nr(current)); | |
359 | ||
360 | return &bpf_probe_write_user_proto; | |
361 | } | |
362 | ||
ac5a72ea AM |
363 | static DEFINE_RAW_SPINLOCK(trace_printk_lock); |
364 | ||
d9c9e4db FR |
365 | #define MAX_TRACE_PRINTK_VARARGS 3 |
366 | #define BPF_TRACE_PRINTK_SIZE 1024 | |
ac5a72ea | 367 | |
d9c9e4db FR |
368 | BPF_CALL_5(bpf_trace_printk, char *, fmt, u32, fmt_size, u64, arg1, |
369 | u64, arg2, u64, arg3) | |
ac5a72ea | 370 | { |
d9c9e4db | 371 | u64 args[MAX_TRACE_PRINTK_VARARGS] = { arg1, arg2, arg3 }; |
48cac3f4 | 372 | u32 *bin_args; |
ac5a72ea AM |
373 | static char buf[BPF_TRACE_PRINTK_SIZE]; |
374 | unsigned long flags; | |
ac5a72ea AM |
375 | int ret; |
376 | ||
48cac3f4 FR |
377 | ret = bpf_bprintf_prepare(fmt, fmt_size, args, &bin_args, |
378 | MAX_TRACE_PRINTK_VARARGS); | |
d9c9e4db FR |
379 | if (ret < 0) |
380 | return ret; | |
381 | ||
38d26d89 | 382 | raw_spin_lock_irqsave(&trace_printk_lock, flags); |
48cac3f4 | 383 | ret = bstr_printf(buf, sizeof(buf), fmt, bin_args); |
d9c9e4db | 384 | |
ac5a72ea AM |
385 | trace_bpf_trace_printk(buf); |
386 | raw_spin_unlock_irqrestore(&trace_printk_lock, flags); | |
387 | ||
48cac3f4 | 388 | bpf_bprintf_cleanup(); |
9c959c86 | 389 | |
d9c9e4db | 390 | return ret; |
9c959c86 AS |
391 | } |
392 | ||
393 | static const struct bpf_func_proto bpf_trace_printk_proto = { | |
394 | .func = bpf_trace_printk, | |
395 | .gpl_only = true, | |
396 | .ret_type = RET_INTEGER, | |
39f19ebb AS |
397 | .arg1_type = ARG_PTR_TO_MEM, |
398 | .arg2_type = ARG_CONST_SIZE, | |
9c959c86 AS |
399 | }; |
400 | ||
0756ea3e AS |
401 | const struct bpf_func_proto *bpf_get_trace_printk_proto(void) |
402 | { | |
403 | /* | |
ac5a72ea AM |
404 | * This program might be calling bpf_trace_printk, |
405 | * so enable the associated bpf_trace/bpf_trace_printk event. | |
406 | * Repeat this each time as it is possible a user has | |
407 | * disabled bpf_trace_printk events. By loading a program | |
408 | * calling bpf_trace_printk() however the user has expressed | |
409 | * the intent to see such events. | |
0756ea3e | 410 | */ |
ac5a72ea AM |
411 | if (trace_set_clr_event("bpf_trace", "bpf_trace_printk", 1)) |
412 | pr_warn_ratelimited("could not enable bpf_trace_printk events"); | |
0756ea3e AS |
413 | |
414 | return &bpf_trace_printk_proto; | |
415 | } | |
416 | ||
492e639f | 417 | #define MAX_SEQ_PRINTF_VARARGS 12 |
492e639f YS |
418 | |
419 | BPF_CALL_5(bpf_seq_printf, struct seq_file *, m, char *, fmt, u32, fmt_size, | |
420 | const void *, data, u32, data_len) | |
421 | { | |
d9c9e4db | 422 | int err, num_args; |
48cac3f4 | 423 | u32 *bin_args; |
492e639f | 424 | |
d9c9e4db FR |
425 | if (data_len & 7 || data_len > MAX_SEQ_PRINTF_VARARGS * 8 || |
426 | (data_len && !data)) | |
427 | return -EINVAL; | |
492e639f YS |
428 | num_args = data_len / 8; |
429 | ||
48cac3f4 | 430 | err = bpf_bprintf_prepare(fmt, fmt_size, data, &bin_args, num_args); |
d9c9e4db FR |
431 | if (err < 0) |
432 | return err; | |
492e639f | 433 | |
48cac3f4 FR |
434 | seq_bprintf(m, fmt, bin_args); |
435 | ||
436 | bpf_bprintf_cleanup(); | |
d9c9e4db FR |
437 | |
438 | return seq_has_overflowed(m) ? -EOVERFLOW : 0; | |
492e639f YS |
439 | } |
440 | ||
9436ef6e | 441 | BTF_ID_LIST_SINGLE(btf_seq_file_ids, struct, seq_file) |
c9a0f3b8 | 442 | |
492e639f YS |
443 | static const struct bpf_func_proto bpf_seq_printf_proto = { |
444 | .func = bpf_seq_printf, | |
445 | .gpl_only = true, | |
446 | .ret_type = RET_INTEGER, | |
447 | .arg1_type = ARG_PTR_TO_BTF_ID, | |
9436ef6e | 448 | .arg1_btf_id = &btf_seq_file_ids[0], |
492e639f YS |
449 | .arg2_type = ARG_PTR_TO_MEM, |
450 | .arg3_type = ARG_CONST_SIZE, | |
451 | .arg4_type = ARG_PTR_TO_MEM_OR_NULL, | |
452 | .arg5_type = ARG_CONST_SIZE_OR_ZERO, | |
492e639f YS |
453 | }; |
454 | ||
455 | BPF_CALL_3(bpf_seq_write, struct seq_file *, m, const void *, data, u32, len) | |
456 | { | |
457 | return seq_write(m, data, len) ? -EOVERFLOW : 0; | |
458 | } | |
459 | ||
492e639f YS |
460 | static const struct bpf_func_proto bpf_seq_write_proto = { |
461 | .func = bpf_seq_write, | |
462 | .gpl_only = true, | |
463 | .ret_type = RET_INTEGER, | |
464 | .arg1_type = ARG_PTR_TO_BTF_ID, | |
9436ef6e | 465 | .arg1_btf_id = &btf_seq_file_ids[0], |
492e639f YS |
466 | .arg2_type = ARG_PTR_TO_MEM, |
467 | .arg3_type = ARG_CONST_SIZE_OR_ZERO, | |
492e639f YS |
468 | }; |
469 | ||
eb411377 AM |
470 | BPF_CALL_4(bpf_seq_printf_btf, struct seq_file *, m, struct btf_ptr *, ptr, |
471 | u32, btf_ptr_size, u64, flags) | |
472 | { | |
473 | const struct btf *btf; | |
474 | s32 btf_id; | |
475 | int ret; | |
476 | ||
477 | ret = bpf_btf_printf_prepare(ptr, btf_ptr_size, flags, &btf, &btf_id); | |
478 | if (ret) | |
479 | return ret; | |
480 | ||
481 | return btf_type_seq_show_flags(btf, btf_id, ptr->ptr, m, flags); | |
482 | } | |
483 | ||
484 | static const struct bpf_func_proto bpf_seq_printf_btf_proto = { | |
485 | .func = bpf_seq_printf_btf, | |
486 | .gpl_only = true, | |
487 | .ret_type = RET_INTEGER, | |
488 | .arg1_type = ARG_PTR_TO_BTF_ID, | |
489 | .arg1_btf_id = &btf_seq_file_ids[0], | |
492e639f YS |
490 | .arg2_type = ARG_PTR_TO_MEM, |
491 | .arg3_type = ARG_CONST_SIZE_OR_ZERO, | |
eb411377 | 492 | .arg4_type = ARG_ANYTHING, |
492e639f YS |
493 | }; |
494 | ||
908432ca YS |
495 | static __always_inline int |
496 | get_map_perf_counter(struct bpf_map *map, u64 flags, | |
497 | u64 *value, u64 *enabled, u64 *running) | |
35578d79 | 498 | { |
35578d79 | 499 | struct bpf_array *array = container_of(map, struct bpf_array, map); |
6816a7ff DB |
500 | unsigned int cpu = smp_processor_id(); |
501 | u64 index = flags & BPF_F_INDEX_MASK; | |
3b1efb19 | 502 | struct bpf_event_entry *ee; |
35578d79 | 503 | |
6816a7ff DB |
504 | if (unlikely(flags & ~(BPF_F_INDEX_MASK))) |
505 | return -EINVAL; | |
506 | if (index == BPF_F_CURRENT_CPU) | |
507 | index = cpu; | |
35578d79 KX |
508 | if (unlikely(index >= array->map.max_entries)) |
509 | return -E2BIG; | |
510 | ||
3b1efb19 | 511 | ee = READ_ONCE(array->ptrs[index]); |
1ca1cc98 | 512 | if (!ee) |
35578d79 KX |
513 | return -ENOENT; |
514 | ||
908432ca YS |
515 | return perf_event_read_local(ee->event, value, enabled, running); |
516 | } | |
517 | ||
518 | BPF_CALL_2(bpf_perf_event_read, struct bpf_map *, map, u64, flags) | |
519 | { | |
520 | u64 value = 0; | |
521 | int err; | |
522 | ||
523 | err = get_map_perf_counter(map, flags, &value, NULL, NULL); | |
35578d79 | 524 | /* |
f91840a3 AS |
525 | * this api is ugly since we miss [-22..-2] range of valid |
526 | * counter values, but that's uapi | |
35578d79 | 527 | */ |
f91840a3 AS |
528 | if (err) |
529 | return err; | |
530 | return value; | |
35578d79 KX |
531 | } |
532 | ||
62544ce8 | 533 | static const struct bpf_func_proto bpf_perf_event_read_proto = { |
35578d79 | 534 | .func = bpf_perf_event_read, |
1075ef59 | 535 | .gpl_only = true, |
35578d79 KX |
536 | .ret_type = RET_INTEGER, |
537 | .arg1_type = ARG_CONST_MAP_PTR, | |
538 | .arg2_type = ARG_ANYTHING, | |
539 | }; | |
540 | ||
908432ca YS |
541 | BPF_CALL_4(bpf_perf_event_read_value, struct bpf_map *, map, u64, flags, |
542 | struct bpf_perf_event_value *, buf, u32, size) | |
543 | { | |
544 | int err = -EINVAL; | |
545 | ||
546 | if (unlikely(size != sizeof(struct bpf_perf_event_value))) | |
547 | goto clear; | |
548 | err = get_map_perf_counter(map, flags, &buf->counter, &buf->enabled, | |
549 | &buf->running); | |
550 | if (unlikely(err)) | |
551 | goto clear; | |
552 | return 0; | |
553 | clear: | |
554 | memset(buf, 0, size); | |
555 | return err; | |
556 | } | |
557 | ||
558 | static const struct bpf_func_proto bpf_perf_event_read_value_proto = { | |
559 | .func = bpf_perf_event_read_value, | |
560 | .gpl_only = true, | |
561 | .ret_type = RET_INTEGER, | |
562 | .arg1_type = ARG_CONST_MAP_PTR, | |
563 | .arg2_type = ARG_ANYTHING, | |
564 | .arg3_type = ARG_PTR_TO_UNINIT_MEM, | |
565 | .arg4_type = ARG_CONST_SIZE, | |
566 | }; | |
567 | ||
8e7a3920 DB |
568 | static __always_inline u64 |
569 | __bpf_perf_event_output(struct pt_regs *regs, struct bpf_map *map, | |
283ca526 | 570 | u64 flags, struct perf_sample_data *sd) |
a43eec30 | 571 | { |
a43eec30 | 572 | struct bpf_array *array = container_of(map, struct bpf_array, map); |
d7931330 | 573 | unsigned int cpu = smp_processor_id(); |
1e33759c | 574 | u64 index = flags & BPF_F_INDEX_MASK; |
3b1efb19 | 575 | struct bpf_event_entry *ee; |
a43eec30 | 576 | struct perf_event *event; |
a43eec30 | 577 | |
1e33759c | 578 | if (index == BPF_F_CURRENT_CPU) |
d7931330 | 579 | index = cpu; |
a43eec30 AS |
580 | if (unlikely(index >= array->map.max_entries)) |
581 | return -E2BIG; | |
582 | ||
3b1efb19 | 583 | ee = READ_ONCE(array->ptrs[index]); |
1ca1cc98 | 584 | if (!ee) |
a43eec30 AS |
585 | return -ENOENT; |
586 | ||
3b1efb19 | 587 | event = ee->event; |
a43eec30 AS |
588 | if (unlikely(event->attr.type != PERF_TYPE_SOFTWARE || |
589 | event->attr.config != PERF_COUNT_SW_BPF_OUTPUT)) | |
590 | return -EINVAL; | |
591 | ||
d7931330 | 592 | if (unlikely(event->oncpu != cpu)) |
a43eec30 AS |
593 | return -EOPNOTSUPP; |
594 | ||
56201969 | 595 | return perf_event_output(event, sd, regs); |
a43eec30 AS |
596 | } |
597 | ||
9594dc3c MM |
598 | /* |
599 | * Support executing tracepoints in normal, irq, and nmi context that each call | |
600 | * bpf_perf_event_output | |
601 | */ | |
602 | struct bpf_trace_sample_data { | |
603 | struct perf_sample_data sds[3]; | |
604 | }; | |
605 | ||
606 | static DEFINE_PER_CPU(struct bpf_trace_sample_data, bpf_trace_sds); | |
607 | static DEFINE_PER_CPU(int, bpf_trace_nest_level); | |
f3694e00 DB |
608 | BPF_CALL_5(bpf_perf_event_output, struct pt_regs *, regs, struct bpf_map *, map, |
609 | u64, flags, void *, data, u64, size) | |
8e7a3920 | 610 | { |
9594dc3c MM |
611 | struct bpf_trace_sample_data *sds = this_cpu_ptr(&bpf_trace_sds); |
612 | int nest_level = this_cpu_inc_return(bpf_trace_nest_level); | |
8e7a3920 DB |
613 | struct perf_raw_record raw = { |
614 | .frag = { | |
615 | .size = size, | |
616 | .data = data, | |
617 | }, | |
618 | }; | |
9594dc3c MM |
619 | struct perf_sample_data *sd; |
620 | int err; | |
8e7a3920 | 621 | |
9594dc3c MM |
622 | if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(sds->sds))) { |
623 | err = -EBUSY; | |
624 | goto out; | |
625 | } | |
626 | ||
627 | sd = &sds->sds[nest_level - 1]; | |
628 | ||
629 | if (unlikely(flags & ~(BPF_F_INDEX_MASK))) { | |
630 | err = -EINVAL; | |
631 | goto out; | |
632 | } | |
8e7a3920 | 633 | |
283ca526 DB |
634 | perf_sample_data_init(sd, 0, 0); |
635 | sd->raw = &raw; | |
636 | ||
9594dc3c MM |
637 | err = __bpf_perf_event_output(regs, map, flags, sd); |
638 | ||
639 | out: | |
640 | this_cpu_dec(bpf_trace_nest_level); | |
641 | return err; | |
8e7a3920 DB |
642 | } |
643 | ||
a43eec30 AS |
644 | static const struct bpf_func_proto bpf_perf_event_output_proto = { |
645 | .func = bpf_perf_event_output, | |
1075ef59 | 646 | .gpl_only = true, |
a43eec30 AS |
647 | .ret_type = RET_INTEGER, |
648 | .arg1_type = ARG_PTR_TO_CTX, | |
649 | .arg2_type = ARG_CONST_MAP_PTR, | |
650 | .arg3_type = ARG_ANYTHING, | |
39f19ebb | 651 | .arg4_type = ARG_PTR_TO_MEM, |
a60dd35d | 652 | .arg5_type = ARG_CONST_SIZE_OR_ZERO, |
a43eec30 AS |
653 | }; |
654 | ||
768fb61f AZ |
655 | static DEFINE_PER_CPU(int, bpf_event_output_nest_level); |
656 | struct bpf_nested_pt_regs { | |
657 | struct pt_regs regs[3]; | |
658 | }; | |
659 | static DEFINE_PER_CPU(struct bpf_nested_pt_regs, bpf_pt_regs); | |
660 | static DEFINE_PER_CPU(struct bpf_trace_sample_data, bpf_misc_sds); | |
bd570ff9 | 661 | |
555c8a86 DB |
662 | u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size, |
663 | void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy) | |
bd570ff9 | 664 | { |
768fb61f | 665 | int nest_level = this_cpu_inc_return(bpf_event_output_nest_level); |
555c8a86 DB |
666 | struct perf_raw_frag frag = { |
667 | .copy = ctx_copy, | |
668 | .size = ctx_size, | |
669 | .data = ctx, | |
670 | }; | |
671 | struct perf_raw_record raw = { | |
672 | .frag = { | |
183fc153 AM |
673 | { |
674 | .next = ctx_size ? &frag : NULL, | |
675 | }, | |
555c8a86 DB |
676 | .size = meta_size, |
677 | .data = meta, | |
678 | }, | |
679 | }; | |
768fb61f AZ |
680 | struct perf_sample_data *sd; |
681 | struct pt_regs *regs; | |
682 | u64 ret; | |
683 | ||
684 | if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(bpf_misc_sds.sds))) { | |
685 | ret = -EBUSY; | |
686 | goto out; | |
687 | } | |
688 | sd = this_cpu_ptr(&bpf_misc_sds.sds[nest_level - 1]); | |
689 | regs = this_cpu_ptr(&bpf_pt_regs.regs[nest_level - 1]); | |
bd570ff9 DB |
690 | |
691 | perf_fetch_caller_regs(regs); | |
283ca526 DB |
692 | perf_sample_data_init(sd, 0, 0); |
693 | sd->raw = &raw; | |
bd570ff9 | 694 | |
768fb61f AZ |
695 | ret = __bpf_perf_event_output(regs, map, flags, sd); |
696 | out: | |
697 | this_cpu_dec(bpf_event_output_nest_level); | |
698 | return ret; | |
bd570ff9 DB |
699 | } |
700 | ||
f3694e00 | 701 | BPF_CALL_0(bpf_get_current_task) |
606274c5 AS |
702 | { |
703 | return (long) current; | |
704 | } | |
705 | ||
f470378c | 706 | const struct bpf_func_proto bpf_get_current_task_proto = { |
606274c5 AS |
707 | .func = bpf_get_current_task, |
708 | .gpl_only = true, | |
709 | .ret_type = RET_INTEGER, | |
710 | }; | |
711 | ||
3ca1032a KS |
712 | BPF_CALL_0(bpf_get_current_task_btf) |
713 | { | |
714 | return (unsigned long) current; | |
715 | } | |
716 | ||
717 | BTF_ID_LIST_SINGLE(bpf_get_current_btf_ids, struct, task_struct) | |
718 | ||
719 | static const struct bpf_func_proto bpf_get_current_task_btf_proto = { | |
720 | .func = bpf_get_current_task_btf, | |
721 | .gpl_only = true, | |
722 | .ret_type = RET_PTR_TO_BTF_ID, | |
723 | .ret_btf_id = &bpf_get_current_btf_ids[0], | |
724 | }; | |
725 | ||
f3694e00 | 726 | BPF_CALL_2(bpf_current_task_under_cgroup, struct bpf_map *, map, u32, idx) |
60d20f91 | 727 | { |
60d20f91 SD |
728 | struct bpf_array *array = container_of(map, struct bpf_array, map); |
729 | struct cgroup *cgrp; | |
60d20f91 | 730 | |
60d20f91 SD |
731 | if (unlikely(idx >= array->map.max_entries)) |
732 | return -E2BIG; | |
733 | ||
734 | cgrp = READ_ONCE(array->ptrs[idx]); | |
735 | if (unlikely(!cgrp)) | |
736 | return -EAGAIN; | |
737 | ||
738 | return task_under_cgroup_hierarchy(current, cgrp); | |
739 | } | |
740 | ||
741 | static const struct bpf_func_proto bpf_current_task_under_cgroup_proto = { | |
742 | .func = bpf_current_task_under_cgroup, | |
743 | .gpl_only = false, | |
744 | .ret_type = RET_INTEGER, | |
745 | .arg1_type = ARG_CONST_MAP_PTR, | |
746 | .arg2_type = ARG_ANYTHING, | |
747 | }; | |
748 | ||
8b401f9e YS |
749 | struct send_signal_irq_work { |
750 | struct irq_work irq_work; | |
751 | struct task_struct *task; | |
752 | u32 sig; | |
8482941f | 753 | enum pid_type type; |
8b401f9e YS |
754 | }; |
755 | ||
756 | static DEFINE_PER_CPU(struct send_signal_irq_work, send_signal_work); | |
757 | ||
758 | static void do_bpf_send_signal(struct irq_work *entry) | |
759 | { | |
760 | struct send_signal_irq_work *work; | |
761 | ||
762 | work = container_of(entry, struct send_signal_irq_work, irq_work); | |
8482941f | 763 | group_send_sig_info(work->sig, SEND_SIG_PRIV, work->task, work->type); |
8b401f9e YS |
764 | } |
765 | ||
8482941f | 766 | static int bpf_send_signal_common(u32 sig, enum pid_type type) |
8b401f9e YS |
767 | { |
768 | struct send_signal_irq_work *work = NULL; | |
769 | ||
770 | /* Similar to bpf_probe_write_user, task needs to be | |
771 | * in a sound condition and kernel memory access be | |
772 | * permitted in order to send signal to the current | |
773 | * task. | |
774 | */ | |
775 | if (unlikely(current->flags & (PF_KTHREAD | PF_EXITING))) | |
776 | return -EPERM; | |
777 | if (unlikely(uaccess_kernel())) | |
778 | return -EPERM; | |
779 | if (unlikely(!nmi_uaccess_okay())) | |
780 | return -EPERM; | |
781 | ||
1bc7896e | 782 | if (irqs_disabled()) { |
e1afb702 YS |
783 | /* Do an early check on signal validity. Otherwise, |
784 | * the error is lost in deferred irq_work. | |
785 | */ | |
786 | if (unlikely(!valid_signal(sig))) | |
787 | return -EINVAL; | |
788 | ||
8b401f9e | 789 | work = this_cpu_ptr(&send_signal_work); |
7a9f50a0 | 790 | if (irq_work_is_busy(&work->irq_work)) |
8b401f9e YS |
791 | return -EBUSY; |
792 | ||
793 | /* Add the current task, which is the target of sending signal, | |
794 | * to the irq_work. The current task may change when queued | |
795 | * irq works get executed. | |
796 | */ | |
797 | work->task = current; | |
798 | work->sig = sig; | |
8482941f | 799 | work->type = type; |
8b401f9e YS |
800 | irq_work_queue(&work->irq_work); |
801 | return 0; | |
802 | } | |
803 | ||
8482941f YS |
804 | return group_send_sig_info(sig, SEND_SIG_PRIV, current, type); |
805 | } | |
806 | ||
807 | BPF_CALL_1(bpf_send_signal, u32, sig) | |
808 | { | |
809 | return bpf_send_signal_common(sig, PIDTYPE_TGID); | |
8b401f9e YS |
810 | } |
811 | ||
812 | static const struct bpf_func_proto bpf_send_signal_proto = { | |
813 | .func = bpf_send_signal, | |
814 | .gpl_only = false, | |
815 | .ret_type = RET_INTEGER, | |
816 | .arg1_type = ARG_ANYTHING, | |
817 | }; | |
818 | ||
8482941f YS |
819 | BPF_CALL_1(bpf_send_signal_thread, u32, sig) |
820 | { | |
821 | return bpf_send_signal_common(sig, PIDTYPE_PID); | |
822 | } | |
823 | ||
824 | static const struct bpf_func_proto bpf_send_signal_thread_proto = { | |
825 | .func = bpf_send_signal_thread, | |
826 | .gpl_only = false, | |
827 | .ret_type = RET_INTEGER, | |
828 | .arg1_type = ARG_ANYTHING, | |
829 | }; | |
830 | ||
6e22ab9d JO |
831 | BPF_CALL_3(bpf_d_path, struct path *, path, char *, buf, u32, sz) |
832 | { | |
833 | long len; | |
834 | char *p; | |
835 | ||
836 | if (!sz) | |
837 | return 0; | |
838 | ||
839 | p = d_path(path, buf, sz); | |
840 | if (IS_ERR(p)) { | |
841 | len = PTR_ERR(p); | |
842 | } else { | |
843 | len = buf + sz - p; | |
844 | memmove(buf, p, len); | |
845 | } | |
846 | ||
847 | return len; | |
848 | } | |
849 | ||
850 | BTF_SET_START(btf_allowlist_d_path) | |
a8a71796 JO |
851 | #ifdef CONFIG_SECURITY |
852 | BTF_ID(func, security_file_permission) | |
853 | BTF_ID(func, security_inode_getattr) | |
854 | BTF_ID(func, security_file_open) | |
855 | #endif | |
856 | #ifdef CONFIG_SECURITY_PATH | |
857 | BTF_ID(func, security_path_truncate) | |
858 | #endif | |
6e22ab9d JO |
859 | BTF_ID(func, vfs_truncate) |
860 | BTF_ID(func, vfs_fallocate) | |
861 | BTF_ID(func, dentry_open) | |
862 | BTF_ID(func, vfs_getattr) | |
863 | BTF_ID(func, filp_close) | |
864 | BTF_SET_END(btf_allowlist_d_path) | |
865 | ||
866 | static bool bpf_d_path_allowed(const struct bpf_prog *prog) | |
867 | { | |
3d06f34a SL |
868 | if (prog->type == BPF_PROG_TYPE_TRACING && |
869 | prog->expected_attach_type == BPF_TRACE_ITER) | |
870 | return true; | |
871 | ||
6f100640 KS |
872 | if (prog->type == BPF_PROG_TYPE_LSM) |
873 | return bpf_lsm_is_sleepable_hook(prog->aux->attach_btf_id); | |
874 | ||
875 | return btf_id_set_contains(&btf_allowlist_d_path, | |
876 | prog->aux->attach_btf_id); | |
6e22ab9d JO |
877 | } |
878 | ||
9436ef6e | 879 | BTF_ID_LIST_SINGLE(bpf_d_path_btf_ids, struct, path) |
6e22ab9d JO |
880 | |
881 | static const struct bpf_func_proto bpf_d_path_proto = { | |
882 | .func = bpf_d_path, | |
883 | .gpl_only = false, | |
884 | .ret_type = RET_INTEGER, | |
885 | .arg1_type = ARG_PTR_TO_BTF_ID, | |
9436ef6e | 886 | .arg1_btf_id = &bpf_d_path_btf_ids[0], |
6e22ab9d JO |
887 | .arg2_type = ARG_PTR_TO_MEM, |
888 | .arg3_type = ARG_CONST_SIZE_OR_ZERO, | |
6e22ab9d JO |
889 | .allowed = bpf_d_path_allowed, |
890 | }; | |
891 | ||
c4d0bfb4 AM |
892 | #define BTF_F_ALL (BTF_F_COMPACT | BTF_F_NONAME | \ |
893 | BTF_F_PTR_RAW | BTF_F_ZERO) | |
894 | ||
895 | static int bpf_btf_printf_prepare(struct btf_ptr *ptr, u32 btf_ptr_size, | |
896 | u64 flags, const struct btf **btf, | |
897 | s32 *btf_id) | |
898 | { | |
899 | const struct btf_type *t; | |
900 | ||
901 | if (unlikely(flags & ~(BTF_F_ALL))) | |
902 | return -EINVAL; | |
903 | ||
904 | if (btf_ptr_size != sizeof(struct btf_ptr)) | |
905 | return -EINVAL; | |
906 | ||
907 | *btf = bpf_get_btf_vmlinux(); | |
908 | ||
909 | if (IS_ERR_OR_NULL(*btf)) | |
abbaa433 | 910 | return IS_ERR(*btf) ? PTR_ERR(*btf) : -EINVAL; |
c4d0bfb4 AM |
911 | |
912 | if (ptr->type_id > 0) | |
913 | *btf_id = ptr->type_id; | |
914 | else | |
915 | return -EINVAL; | |
916 | ||
917 | if (*btf_id > 0) | |
918 | t = btf_type_by_id(*btf, *btf_id); | |
919 | if (*btf_id <= 0 || !t) | |
920 | return -ENOENT; | |
921 | ||
922 | return 0; | |
923 | } | |
924 | ||
925 | BPF_CALL_5(bpf_snprintf_btf, char *, str, u32, str_size, struct btf_ptr *, ptr, | |
926 | u32, btf_ptr_size, u64, flags) | |
927 | { | |
928 | const struct btf *btf; | |
929 | s32 btf_id; | |
930 | int ret; | |
931 | ||
932 | ret = bpf_btf_printf_prepare(ptr, btf_ptr_size, flags, &btf, &btf_id); | |
933 | if (ret) | |
934 | return ret; | |
935 | ||
936 | return btf_type_snprintf_show(btf, btf_id, ptr->ptr, str, str_size, | |
937 | flags); | |
938 | } | |
939 | ||
940 | const struct bpf_func_proto bpf_snprintf_btf_proto = { | |
941 | .func = bpf_snprintf_btf, | |
942 | .gpl_only = false, | |
943 | .ret_type = RET_INTEGER, | |
944 | .arg1_type = ARG_PTR_TO_MEM, | |
945 | .arg2_type = ARG_CONST_SIZE, | |
946 | .arg3_type = ARG_PTR_TO_MEM, | |
947 | .arg4_type = ARG_CONST_SIZE, | |
948 | .arg5_type = ARG_ANYTHING, | |
949 | }; | |
950 | ||
fc611f47 KS |
951 | const struct bpf_func_proto * |
952 | bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) | |
2541517c AS |
953 | { |
954 | switch (func_id) { | |
955 | case BPF_FUNC_map_lookup_elem: | |
956 | return &bpf_map_lookup_elem_proto; | |
957 | case BPF_FUNC_map_update_elem: | |
958 | return &bpf_map_update_elem_proto; | |
959 | case BPF_FUNC_map_delete_elem: | |
960 | return &bpf_map_delete_elem_proto; | |
02a8c817 AC |
961 | case BPF_FUNC_map_push_elem: |
962 | return &bpf_map_push_elem_proto; | |
963 | case BPF_FUNC_map_pop_elem: | |
964 | return &bpf_map_pop_elem_proto; | |
965 | case BPF_FUNC_map_peek_elem: | |
966 | return &bpf_map_peek_elem_proto; | |
d9847d31 AS |
967 | case BPF_FUNC_ktime_get_ns: |
968 | return &bpf_ktime_get_ns_proto; | |
71d19214 MÅ» |
969 | case BPF_FUNC_ktime_get_boot_ns: |
970 | return &bpf_ktime_get_boot_ns_proto; | |
d0551261 DB |
971 | case BPF_FUNC_ktime_get_coarse_ns: |
972 | return &bpf_ktime_get_coarse_ns_proto; | |
04fd61ab AS |
973 | case BPF_FUNC_tail_call: |
974 | return &bpf_tail_call_proto; | |
ffeedafb AS |
975 | case BPF_FUNC_get_current_pid_tgid: |
976 | return &bpf_get_current_pid_tgid_proto; | |
606274c5 AS |
977 | case BPF_FUNC_get_current_task: |
978 | return &bpf_get_current_task_proto; | |
3ca1032a KS |
979 | case BPF_FUNC_get_current_task_btf: |
980 | return &bpf_get_current_task_btf_proto; | |
ffeedafb AS |
981 | case BPF_FUNC_get_current_uid_gid: |
982 | return &bpf_get_current_uid_gid_proto; | |
983 | case BPF_FUNC_get_current_comm: | |
984 | return &bpf_get_current_comm_proto; | |
9c959c86 | 985 | case BPF_FUNC_trace_printk: |
0756ea3e | 986 | return bpf_get_trace_printk_proto(); |
ab1973d3 AS |
987 | case BPF_FUNC_get_smp_processor_id: |
988 | return &bpf_get_smp_processor_id_proto; | |
2d0e30c3 DB |
989 | case BPF_FUNC_get_numa_node_id: |
990 | return &bpf_get_numa_node_id_proto; | |
35578d79 KX |
991 | case BPF_FUNC_perf_event_read: |
992 | return &bpf_perf_event_read_proto; | |
96ae5227 SD |
993 | case BPF_FUNC_probe_write_user: |
994 | return bpf_get_probe_write_proto(); | |
60d20f91 SD |
995 | case BPF_FUNC_current_task_under_cgroup: |
996 | return &bpf_current_task_under_cgroup_proto; | |
8937bd80 AS |
997 | case BPF_FUNC_get_prandom_u32: |
998 | return &bpf_get_prandom_u32_proto; | |
6ae08ae3 DB |
999 | case BPF_FUNC_probe_read_user: |
1000 | return &bpf_probe_read_user_proto; | |
1001 | case BPF_FUNC_probe_read_kernel: | |
ff40e510 DB |
1002 | return security_locked_down(LOCKDOWN_BPF_READ) < 0 ? |
1003 | NULL : &bpf_probe_read_kernel_proto; | |
6ae08ae3 DB |
1004 | case BPF_FUNC_probe_read_user_str: |
1005 | return &bpf_probe_read_user_str_proto; | |
1006 | case BPF_FUNC_probe_read_kernel_str: | |
ff40e510 DB |
1007 | return security_locked_down(LOCKDOWN_BPF_READ) < 0 ? |
1008 | NULL : &bpf_probe_read_kernel_str_proto; | |
0ebeea8c DB |
1009 | #ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE |
1010 | case BPF_FUNC_probe_read: | |
ff40e510 DB |
1011 | return security_locked_down(LOCKDOWN_BPF_READ) < 0 ? |
1012 | NULL : &bpf_probe_read_compat_proto; | |
a5e8c070 | 1013 | case BPF_FUNC_probe_read_str: |
ff40e510 DB |
1014 | return security_locked_down(LOCKDOWN_BPF_READ) < 0 ? |
1015 | NULL : &bpf_probe_read_compat_str_proto; | |
0ebeea8c | 1016 | #endif |
34ea38ca | 1017 | #ifdef CONFIG_CGROUPS |
bf6fa2c8 YS |
1018 | case BPF_FUNC_get_current_cgroup_id: |
1019 | return &bpf_get_current_cgroup_id_proto; | |
34ea38ca | 1020 | #endif |
8b401f9e YS |
1021 | case BPF_FUNC_send_signal: |
1022 | return &bpf_send_signal_proto; | |
8482941f YS |
1023 | case BPF_FUNC_send_signal_thread: |
1024 | return &bpf_send_signal_thread_proto; | |
b80b033b SL |
1025 | case BPF_FUNC_perf_event_read_value: |
1026 | return &bpf_perf_event_read_value_proto; | |
b4490c5c CN |
1027 | case BPF_FUNC_get_ns_current_pid_tgid: |
1028 | return &bpf_get_ns_current_pid_tgid_proto; | |
457f4436 AN |
1029 | case BPF_FUNC_ringbuf_output: |
1030 | return &bpf_ringbuf_output_proto; | |
1031 | case BPF_FUNC_ringbuf_reserve: | |
1032 | return &bpf_ringbuf_reserve_proto; | |
1033 | case BPF_FUNC_ringbuf_submit: | |
1034 | return &bpf_ringbuf_submit_proto; | |
1035 | case BPF_FUNC_ringbuf_discard: | |
1036 | return &bpf_ringbuf_discard_proto; | |
1037 | case BPF_FUNC_ringbuf_query: | |
1038 | return &bpf_ringbuf_query_proto; | |
72e2b2b6 YS |
1039 | case BPF_FUNC_jiffies64: |
1040 | return &bpf_jiffies64_proto; | |
fa28dcb8 SL |
1041 | case BPF_FUNC_get_task_stack: |
1042 | return &bpf_get_task_stack_proto; | |
07be4c4a AS |
1043 | case BPF_FUNC_copy_from_user: |
1044 | return prog->aux->sleepable ? &bpf_copy_from_user_proto : NULL; | |
c4d0bfb4 AM |
1045 | case BPF_FUNC_snprintf_btf: |
1046 | return &bpf_snprintf_btf_proto; | |
b7906b70 | 1047 | case BPF_FUNC_per_cpu_ptr: |
eaa6bcb7 | 1048 | return &bpf_per_cpu_ptr_proto; |
b7906b70 | 1049 | case BPF_FUNC_this_cpu_ptr: |
63d9b80d | 1050 | return &bpf_this_cpu_ptr_proto; |
a10787e6 SL |
1051 | case BPF_FUNC_task_storage_get: |
1052 | return &bpf_task_storage_get_proto; | |
1053 | case BPF_FUNC_task_storage_delete: | |
1054 | return &bpf_task_storage_delete_proto; | |
69c087ba YS |
1055 | case BPF_FUNC_for_each_map_elem: |
1056 | return &bpf_for_each_map_elem_proto; | |
7b15523a FR |
1057 | case BPF_FUNC_snprintf: |
1058 | return &bpf_snprintf_proto; | |
9fd82b61 AS |
1059 | default: |
1060 | return NULL; | |
1061 | } | |
1062 | } | |
1063 | ||
5e43f899 AI |
1064 | static const struct bpf_func_proto * |
1065 | kprobe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) | |
9fd82b61 AS |
1066 | { |
1067 | switch (func_id) { | |
a43eec30 AS |
1068 | case BPF_FUNC_perf_event_output: |
1069 | return &bpf_perf_event_output_proto; | |
d5a3b1f6 AS |
1070 | case BPF_FUNC_get_stackid: |
1071 | return &bpf_get_stackid_proto; | |
c195651e YS |
1072 | case BPF_FUNC_get_stack: |
1073 | return &bpf_get_stack_proto; | |
9802d865 JB |
1074 | #ifdef CONFIG_BPF_KPROBE_OVERRIDE |
1075 | case BPF_FUNC_override_return: | |
1076 | return &bpf_override_return_proto; | |
1077 | #endif | |
2541517c | 1078 | default: |
fc611f47 | 1079 | return bpf_tracing_func_proto(func_id, prog); |
2541517c AS |
1080 | } |
1081 | } | |
1082 | ||
1083 | /* bpf+kprobe programs can access fields of 'struct pt_regs' */ | |
19de99f7 | 1084 | static bool kprobe_prog_is_valid_access(int off, int size, enum bpf_access_type type, |
5e43f899 | 1085 | const struct bpf_prog *prog, |
23994631 | 1086 | struct bpf_insn_access_aux *info) |
2541517c | 1087 | { |
2541517c AS |
1088 | if (off < 0 || off >= sizeof(struct pt_regs)) |
1089 | return false; | |
2541517c AS |
1090 | if (type != BPF_READ) |
1091 | return false; | |
2541517c AS |
1092 | if (off % size != 0) |
1093 | return false; | |
2d071c64 DB |
1094 | /* |
1095 | * Assertion for 32 bit to make sure last 8 byte access | |
1096 | * (BPF_DW) to the last 4 byte member is disallowed. | |
1097 | */ | |
1098 | if (off + size > sizeof(struct pt_regs)) | |
1099 | return false; | |
1100 | ||
2541517c AS |
1101 | return true; |
1102 | } | |
1103 | ||
7de16e3a | 1104 | const struct bpf_verifier_ops kprobe_verifier_ops = { |
2541517c AS |
1105 | .get_func_proto = kprobe_prog_func_proto, |
1106 | .is_valid_access = kprobe_prog_is_valid_access, | |
1107 | }; | |
1108 | ||
7de16e3a JK |
1109 | const struct bpf_prog_ops kprobe_prog_ops = { |
1110 | }; | |
1111 | ||
f3694e00 DB |
1112 | BPF_CALL_5(bpf_perf_event_output_tp, void *, tp_buff, struct bpf_map *, map, |
1113 | u64, flags, void *, data, u64, size) | |
9940d67c | 1114 | { |
f3694e00 DB |
1115 | struct pt_regs *regs = *(struct pt_regs **)tp_buff; |
1116 | ||
9940d67c AS |
1117 | /* |
1118 | * r1 points to perf tracepoint buffer where first 8 bytes are hidden | |
1119 | * from bpf program and contain a pointer to 'struct pt_regs'. Fetch it | |
f3694e00 | 1120 | * from there and call the same bpf_perf_event_output() helper inline. |
9940d67c | 1121 | */ |
f3694e00 | 1122 | return ____bpf_perf_event_output(regs, map, flags, data, size); |
9940d67c AS |
1123 | } |
1124 | ||
1125 | static const struct bpf_func_proto bpf_perf_event_output_proto_tp = { | |
1126 | .func = bpf_perf_event_output_tp, | |
1127 | .gpl_only = true, | |
1128 | .ret_type = RET_INTEGER, | |
1129 | .arg1_type = ARG_PTR_TO_CTX, | |
1130 | .arg2_type = ARG_CONST_MAP_PTR, | |
1131 | .arg3_type = ARG_ANYTHING, | |
39f19ebb | 1132 | .arg4_type = ARG_PTR_TO_MEM, |
a60dd35d | 1133 | .arg5_type = ARG_CONST_SIZE_OR_ZERO, |
9940d67c AS |
1134 | }; |
1135 | ||
f3694e00 DB |
1136 | BPF_CALL_3(bpf_get_stackid_tp, void *, tp_buff, struct bpf_map *, map, |
1137 | u64, flags) | |
9940d67c | 1138 | { |
f3694e00 | 1139 | struct pt_regs *regs = *(struct pt_regs **)tp_buff; |
9940d67c | 1140 | |
f3694e00 DB |
1141 | /* |
1142 | * Same comment as in bpf_perf_event_output_tp(), only that this time | |
1143 | * the other helper's function body cannot be inlined due to being | |
1144 | * external, thus we need to call raw helper function. | |
1145 | */ | |
1146 | return bpf_get_stackid((unsigned long) regs, (unsigned long) map, | |
1147 | flags, 0, 0); | |
9940d67c AS |
1148 | } |
1149 | ||
1150 | static const struct bpf_func_proto bpf_get_stackid_proto_tp = { | |
1151 | .func = bpf_get_stackid_tp, | |
1152 | .gpl_only = true, | |
1153 | .ret_type = RET_INTEGER, | |
1154 | .arg1_type = ARG_PTR_TO_CTX, | |
1155 | .arg2_type = ARG_CONST_MAP_PTR, | |
1156 | .arg3_type = ARG_ANYTHING, | |
1157 | }; | |
1158 | ||
c195651e YS |
1159 | BPF_CALL_4(bpf_get_stack_tp, void *, tp_buff, void *, buf, u32, size, |
1160 | u64, flags) | |
1161 | { | |
1162 | struct pt_regs *regs = *(struct pt_regs **)tp_buff; | |
1163 | ||
1164 | return bpf_get_stack((unsigned long) regs, (unsigned long) buf, | |
1165 | (unsigned long) size, flags, 0); | |
1166 | } | |
1167 | ||
1168 | static const struct bpf_func_proto bpf_get_stack_proto_tp = { | |
1169 | .func = bpf_get_stack_tp, | |
1170 | .gpl_only = true, | |
1171 | .ret_type = RET_INTEGER, | |
1172 | .arg1_type = ARG_PTR_TO_CTX, | |
1173 | .arg2_type = ARG_PTR_TO_UNINIT_MEM, | |
1174 | .arg3_type = ARG_CONST_SIZE_OR_ZERO, | |
1175 | .arg4_type = ARG_ANYTHING, | |
1176 | }; | |
1177 | ||
5e43f899 AI |
1178 | static const struct bpf_func_proto * |
1179 | tp_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) | |
f005afed YS |
1180 | { |
1181 | switch (func_id) { | |
1182 | case BPF_FUNC_perf_event_output: | |
1183 | return &bpf_perf_event_output_proto_tp; | |
1184 | case BPF_FUNC_get_stackid: | |
1185 | return &bpf_get_stackid_proto_tp; | |
c195651e YS |
1186 | case BPF_FUNC_get_stack: |
1187 | return &bpf_get_stack_proto_tp; | |
f005afed | 1188 | default: |
fc611f47 | 1189 | return bpf_tracing_func_proto(func_id, prog); |
f005afed YS |
1190 | } |
1191 | } | |
1192 | ||
1193 | static bool tp_prog_is_valid_access(int off, int size, enum bpf_access_type type, | |
5e43f899 | 1194 | const struct bpf_prog *prog, |
f005afed YS |
1195 | struct bpf_insn_access_aux *info) |
1196 | { | |
1197 | if (off < sizeof(void *) || off >= PERF_MAX_TRACE_SIZE) | |
1198 | return false; | |
1199 | if (type != BPF_READ) | |
1200 | return false; | |
1201 | if (off % size != 0) | |
1202 | return false; | |
1203 | ||
1204 | BUILD_BUG_ON(PERF_MAX_TRACE_SIZE % sizeof(__u64)); | |
1205 | return true; | |
1206 | } | |
1207 | ||
1208 | const struct bpf_verifier_ops tracepoint_verifier_ops = { | |
1209 | .get_func_proto = tp_prog_func_proto, | |
1210 | .is_valid_access = tp_prog_is_valid_access, | |
1211 | }; | |
1212 | ||
1213 | const struct bpf_prog_ops tracepoint_prog_ops = { | |
1214 | }; | |
1215 | ||
1216 | BPF_CALL_3(bpf_perf_prog_read_value, struct bpf_perf_event_data_kern *, ctx, | |
4bebdc7a YS |
1217 | struct bpf_perf_event_value *, buf, u32, size) |
1218 | { | |
1219 | int err = -EINVAL; | |
1220 | ||
1221 | if (unlikely(size != sizeof(struct bpf_perf_event_value))) | |
1222 | goto clear; | |
1223 | err = perf_event_read_local(ctx->event, &buf->counter, &buf->enabled, | |
1224 | &buf->running); | |
1225 | if (unlikely(err)) | |
1226 | goto clear; | |
1227 | return 0; | |
1228 | clear: | |
1229 | memset(buf, 0, size); | |
1230 | return err; | |
1231 | } | |
1232 | ||
f005afed YS |
1233 | static const struct bpf_func_proto bpf_perf_prog_read_value_proto = { |
1234 | .func = bpf_perf_prog_read_value, | |
4bebdc7a YS |
1235 | .gpl_only = true, |
1236 | .ret_type = RET_INTEGER, | |
1237 | .arg1_type = ARG_PTR_TO_CTX, | |
1238 | .arg2_type = ARG_PTR_TO_UNINIT_MEM, | |
1239 | .arg3_type = ARG_CONST_SIZE, | |
1240 | }; | |
1241 | ||
fff7b643 DX |
1242 | BPF_CALL_4(bpf_read_branch_records, struct bpf_perf_event_data_kern *, ctx, |
1243 | void *, buf, u32, size, u64, flags) | |
1244 | { | |
1245 | #ifndef CONFIG_X86 | |
1246 | return -ENOENT; | |
1247 | #else | |
1248 | static const u32 br_entry_size = sizeof(struct perf_branch_entry); | |
1249 | struct perf_branch_stack *br_stack = ctx->data->br_stack; | |
1250 | u32 to_copy; | |
1251 | ||
1252 | if (unlikely(flags & ~BPF_F_GET_BRANCH_RECORDS_SIZE)) | |
1253 | return -EINVAL; | |
1254 | ||
1255 | if (unlikely(!br_stack)) | |
1256 | return -EINVAL; | |
1257 | ||
1258 | if (flags & BPF_F_GET_BRANCH_RECORDS_SIZE) | |
1259 | return br_stack->nr * br_entry_size; | |
1260 | ||
1261 | if (!buf || (size % br_entry_size != 0)) | |
1262 | return -EINVAL; | |
1263 | ||
1264 | to_copy = min_t(u32, br_stack->nr * br_entry_size, size); | |
1265 | memcpy(buf, br_stack->entries, to_copy); | |
1266 | ||
1267 | return to_copy; | |
1268 | #endif | |
1269 | } | |
1270 | ||
1271 | static const struct bpf_func_proto bpf_read_branch_records_proto = { | |
1272 | .func = bpf_read_branch_records, | |
1273 | .gpl_only = true, | |
1274 | .ret_type = RET_INTEGER, | |
1275 | .arg1_type = ARG_PTR_TO_CTX, | |
1276 | .arg2_type = ARG_PTR_TO_MEM_OR_NULL, | |
1277 | .arg3_type = ARG_CONST_SIZE_OR_ZERO, | |
1278 | .arg4_type = ARG_ANYTHING, | |
1279 | }; | |
1280 | ||
5e43f899 AI |
1281 | static const struct bpf_func_proto * |
1282 | pe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) | |
9fd82b61 AS |
1283 | { |
1284 | switch (func_id) { | |
1285 | case BPF_FUNC_perf_event_output: | |
9940d67c | 1286 | return &bpf_perf_event_output_proto_tp; |
9fd82b61 | 1287 | case BPF_FUNC_get_stackid: |
7b04d6d6 | 1288 | return &bpf_get_stackid_proto_pe; |
c195651e | 1289 | case BPF_FUNC_get_stack: |
7b04d6d6 | 1290 | return &bpf_get_stack_proto_pe; |
4bebdc7a | 1291 | case BPF_FUNC_perf_prog_read_value: |
f005afed | 1292 | return &bpf_perf_prog_read_value_proto; |
fff7b643 DX |
1293 | case BPF_FUNC_read_branch_records: |
1294 | return &bpf_read_branch_records_proto; | |
9fd82b61 | 1295 | default: |
fc611f47 | 1296 | return bpf_tracing_func_proto(func_id, prog); |
9fd82b61 AS |
1297 | } |
1298 | } | |
1299 | ||
c4f6699d AS |
1300 | /* |
1301 | * bpf_raw_tp_regs are separate from bpf_pt_regs used from skb/xdp | |
1302 | * to avoid potential recursive reuse issue when/if tracepoints are added | |
9594dc3c MM |
1303 | * inside bpf_*_event_output, bpf_get_stackid and/or bpf_get_stack. |
1304 | * | |
1305 | * Since raw tracepoints run despite bpf_prog_active, support concurrent usage | |
1306 | * in normal, irq, and nmi context. | |
c4f6699d | 1307 | */ |
9594dc3c MM |
1308 | struct bpf_raw_tp_regs { |
1309 | struct pt_regs regs[3]; | |
1310 | }; | |
1311 | static DEFINE_PER_CPU(struct bpf_raw_tp_regs, bpf_raw_tp_regs); | |
1312 | static DEFINE_PER_CPU(int, bpf_raw_tp_nest_level); | |
1313 | static struct pt_regs *get_bpf_raw_tp_regs(void) | |
1314 | { | |
1315 | struct bpf_raw_tp_regs *tp_regs = this_cpu_ptr(&bpf_raw_tp_regs); | |
1316 | int nest_level = this_cpu_inc_return(bpf_raw_tp_nest_level); | |
1317 | ||
1318 | if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(tp_regs->regs))) { | |
1319 | this_cpu_dec(bpf_raw_tp_nest_level); | |
1320 | return ERR_PTR(-EBUSY); | |
1321 | } | |
1322 | ||
1323 | return &tp_regs->regs[nest_level - 1]; | |
1324 | } | |
1325 | ||
1326 | static void put_bpf_raw_tp_regs(void) | |
1327 | { | |
1328 | this_cpu_dec(bpf_raw_tp_nest_level); | |
1329 | } | |
1330 | ||
c4f6699d AS |
1331 | BPF_CALL_5(bpf_perf_event_output_raw_tp, struct bpf_raw_tracepoint_args *, args, |
1332 | struct bpf_map *, map, u64, flags, void *, data, u64, size) | |
1333 | { | |
9594dc3c MM |
1334 | struct pt_regs *regs = get_bpf_raw_tp_regs(); |
1335 | int ret; | |
1336 | ||
1337 | if (IS_ERR(regs)) | |
1338 | return PTR_ERR(regs); | |
c4f6699d AS |
1339 | |
1340 | perf_fetch_caller_regs(regs); | |
9594dc3c MM |
1341 | ret = ____bpf_perf_event_output(regs, map, flags, data, size); |
1342 | ||
1343 | put_bpf_raw_tp_regs(); | |
1344 | return ret; | |
c4f6699d AS |
1345 | } |
1346 | ||
1347 | static const struct bpf_func_proto bpf_perf_event_output_proto_raw_tp = { | |
1348 | .func = bpf_perf_event_output_raw_tp, | |
1349 | .gpl_only = true, | |
1350 | .ret_type = RET_INTEGER, | |
1351 | .arg1_type = ARG_PTR_TO_CTX, | |
1352 | .arg2_type = ARG_CONST_MAP_PTR, | |
1353 | .arg3_type = ARG_ANYTHING, | |
1354 | .arg4_type = ARG_PTR_TO_MEM, | |
1355 | .arg5_type = ARG_CONST_SIZE_OR_ZERO, | |
1356 | }; | |
1357 | ||
a7658e1a | 1358 | extern const struct bpf_func_proto bpf_skb_output_proto; |
d831ee84 | 1359 | extern const struct bpf_func_proto bpf_xdp_output_proto; |
a7658e1a | 1360 | |
c4f6699d AS |
1361 | BPF_CALL_3(bpf_get_stackid_raw_tp, struct bpf_raw_tracepoint_args *, args, |
1362 | struct bpf_map *, map, u64, flags) | |
1363 | { | |
9594dc3c MM |
1364 | struct pt_regs *regs = get_bpf_raw_tp_regs(); |
1365 | int ret; | |
1366 | ||
1367 | if (IS_ERR(regs)) | |
1368 | return PTR_ERR(regs); | |
c4f6699d AS |
1369 | |
1370 | perf_fetch_caller_regs(regs); | |
1371 | /* similar to bpf_perf_event_output_tp, but pt_regs fetched differently */ | |
9594dc3c MM |
1372 | ret = bpf_get_stackid((unsigned long) regs, (unsigned long) map, |
1373 | flags, 0, 0); | |
1374 | put_bpf_raw_tp_regs(); | |
1375 | return ret; | |
c4f6699d AS |
1376 | } |
1377 | ||
1378 | static const struct bpf_func_proto bpf_get_stackid_proto_raw_tp = { | |
1379 | .func = bpf_get_stackid_raw_tp, | |
1380 | .gpl_only = true, | |
1381 | .ret_type = RET_INTEGER, | |
1382 | .arg1_type = ARG_PTR_TO_CTX, | |
1383 | .arg2_type = ARG_CONST_MAP_PTR, | |
1384 | .arg3_type = ARG_ANYTHING, | |
1385 | }; | |
1386 | ||
c195651e YS |
1387 | BPF_CALL_4(bpf_get_stack_raw_tp, struct bpf_raw_tracepoint_args *, args, |
1388 | void *, buf, u32, size, u64, flags) | |
1389 | { | |
9594dc3c MM |
1390 | struct pt_regs *regs = get_bpf_raw_tp_regs(); |
1391 | int ret; | |
1392 | ||
1393 | if (IS_ERR(regs)) | |
1394 | return PTR_ERR(regs); | |
c195651e YS |
1395 | |
1396 | perf_fetch_caller_regs(regs); | |
9594dc3c MM |
1397 | ret = bpf_get_stack((unsigned long) regs, (unsigned long) buf, |
1398 | (unsigned long) size, flags, 0); | |
1399 | put_bpf_raw_tp_regs(); | |
1400 | return ret; | |
c195651e YS |
1401 | } |
1402 | ||
1403 | static const struct bpf_func_proto bpf_get_stack_proto_raw_tp = { | |
1404 | .func = bpf_get_stack_raw_tp, | |
1405 | .gpl_only = true, | |
1406 | .ret_type = RET_INTEGER, | |
1407 | .arg1_type = ARG_PTR_TO_CTX, | |
1408 | .arg2_type = ARG_PTR_TO_MEM, | |
1409 | .arg3_type = ARG_CONST_SIZE_OR_ZERO, | |
1410 | .arg4_type = ARG_ANYTHING, | |
1411 | }; | |
1412 | ||
5e43f899 AI |
1413 | static const struct bpf_func_proto * |
1414 | raw_tp_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) | |
c4f6699d AS |
1415 | { |
1416 | switch (func_id) { | |
1417 | case BPF_FUNC_perf_event_output: | |
1418 | return &bpf_perf_event_output_proto_raw_tp; | |
1419 | case BPF_FUNC_get_stackid: | |
1420 | return &bpf_get_stackid_proto_raw_tp; | |
c195651e YS |
1421 | case BPF_FUNC_get_stack: |
1422 | return &bpf_get_stack_proto_raw_tp; | |
c4f6699d | 1423 | default: |
fc611f47 | 1424 | return bpf_tracing_func_proto(func_id, prog); |
c4f6699d AS |
1425 | } |
1426 | } | |
1427 | ||
958a3f2d | 1428 | const struct bpf_func_proto * |
f1b9509c AS |
1429 | tracing_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) |
1430 | { | |
1431 | switch (func_id) { | |
1432 | #ifdef CONFIG_NET | |
1433 | case BPF_FUNC_skb_output: | |
1434 | return &bpf_skb_output_proto; | |
d831ee84 EC |
1435 | case BPF_FUNC_xdp_output: |
1436 | return &bpf_xdp_output_proto; | |
af7ec138 YS |
1437 | case BPF_FUNC_skc_to_tcp6_sock: |
1438 | return &bpf_skc_to_tcp6_sock_proto; | |
478cfbdf YS |
1439 | case BPF_FUNC_skc_to_tcp_sock: |
1440 | return &bpf_skc_to_tcp_sock_proto; | |
1441 | case BPF_FUNC_skc_to_tcp_timewait_sock: | |
1442 | return &bpf_skc_to_tcp_timewait_sock_proto; | |
1443 | case BPF_FUNC_skc_to_tcp_request_sock: | |
1444 | return &bpf_skc_to_tcp_request_sock_proto; | |
0d4fad3e YS |
1445 | case BPF_FUNC_skc_to_udp6_sock: |
1446 | return &bpf_skc_to_udp6_sock_proto; | |
8e4597c6 MKL |
1447 | case BPF_FUNC_sk_storage_get: |
1448 | return &bpf_sk_storage_get_tracing_proto; | |
1449 | case BPF_FUNC_sk_storage_delete: | |
1450 | return &bpf_sk_storage_delete_tracing_proto; | |
b60da495 FR |
1451 | case BPF_FUNC_sock_from_file: |
1452 | return &bpf_sock_from_file_proto; | |
c5dbb89f FR |
1453 | case BPF_FUNC_get_socket_cookie: |
1454 | return &bpf_get_socket_ptr_cookie_proto; | |
f1b9509c | 1455 | #endif |
492e639f YS |
1456 | case BPF_FUNC_seq_printf: |
1457 | return prog->expected_attach_type == BPF_TRACE_ITER ? | |
1458 | &bpf_seq_printf_proto : | |
1459 | NULL; | |
1460 | case BPF_FUNC_seq_write: | |
1461 | return prog->expected_attach_type == BPF_TRACE_ITER ? | |
1462 | &bpf_seq_write_proto : | |
1463 | NULL; | |
eb411377 AM |
1464 | case BPF_FUNC_seq_printf_btf: |
1465 | return prog->expected_attach_type == BPF_TRACE_ITER ? | |
1466 | &bpf_seq_printf_btf_proto : | |
1467 | NULL; | |
6e22ab9d JO |
1468 | case BPF_FUNC_d_path: |
1469 | return &bpf_d_path_proto; | |
f1b9509c AS |
1470 | default: |
1471 | return raw_tp_prog_func_proto(func_id, prog); | |
1472 | } | |
1473 | } | |
1474 | ||
c4f6699d AS |
1475 | static bool raw_tp_prog_is_valid_access(int off, int size, |
1476 | enum bpf_access_type type, | |
5e43f899 | 1477 | const struct bpf_prog *prog, |
c4f6699d AS |
1478 | struct bpf_insn_access_aux *info) |
1479 | { | |
f1b9509c AS |
1480 | if (off < 0 || off >= sizeof(__u64) * MAX_BPF_FUNC_ARGS) |
1481 | return false; | |
1482 | if (type != BPF_READ) | |
1483 | return false; | |
1484 | if (off % size != 0) | |
1485 | return false; | |
1486 | return true; | |
1487 | } | |
1488 | ||
1489 | static bool tracing_prog_is_valid_access(int off, int size, | |
1490 | enum bpf_access_type type, | |
1491 | const struct bpf_prog *prog, | |
1492 | struct bpf_insn_access_aux *info) | |
1493 | { | |
1494 | if (off < 0 || off >= sizeof(__u64) * MAX_BPF_FUNC_ARGS) | |
c4f6699d AS |
1495 | return false; |
1496 | if (type != BPF_READ) | |
1497 | return false; | |
1498 | if (off % size != 0) | |
1499 | return false; | |
9e15db66 | 1500 | return btf_ctx_access(off, size, type, prog, info); |
c4f6699d AS |
1501 | } |
1502 | ||
3e7c67d9 KS |
1503 | int __weak bpf_prog_test_run_tracing(struct bpf_prog *prog, |
1504 | const union bpf_attr *kattr, | |
1505 | union bpf_attr __user *uattr) | |
1506 | { | |
1507 | return -ENOTSUPP; | |
1508 | } | |
1509 | ||
c4f6699d AS |
1510 | const struct bpf_verifier_ops raw_tracepoint_verifier_ops = { |
1511 | .get_func_proto = raw_tp_prog_func_proto, | |
1512 | .is_valid_access = raw_tp_prog_is_valid_access, | |
1513 | }; | |
1514 | ||
1515 | const struct bpf_prog_ops raw_tracepoint_prog_ops = { | |
ebfb4d40 | 1516 | #ifdef CONFIG_NET |
1b4d60ec | 1517 | .test_run = bpf_prog_test_run_raw_tp, |
ebfb4d40 | 1518 | #endif |
c4f6699d AS |
1519 | }; |
1520 | ||
f1b9509c AS |
1521 | const struct bpf_verifier_ops tracing_verifier_ops = { |
1522 | .get_func_proto = tracing_prog_func_proto, | |
1523 | .is_valid_access = tracing_prog_is_valid_access, | |
1524 | }; | |
1525 | ||
1526 | const struct bpf_prog_ops tracing_prog_ops = { | |
da00d2f1 | 1527 | .test_run = bpf_prog_test_run_tracing, |
f1b9509c AS |
1528 | }; |
1529 | ||
9df1c28b MM |
1530 | static bool raw_tp_writable_prog_is_valid_access(int off, int size, |
1531 | enum bpf_access_type type, | |
1532 | const struct bpf_prog *prog, | |
1533 | struct bpf_insn_access_aux *info) | |
1534 | { | |
1535 | if (off == 0) { | |
1536 | if (size != sizeof(u64) || type != BPF_READ) | |
1537 | return false; | |
1538 | info->reg_type = PTR_TO_TP_BUFFER; | |
1539 | } | |
1540 | return raw_tp_prog_is_valid_access(off, size, type, prog, info); | |
1541 | } | |
1542 | ||
1543 | const struct bpf_verifier_ops raw_tracepoint_writable_verifier_ops = { | |
1544 | .get_func_proto = raw_tp_prog_func_proto, | |
1545 | .is_valid_access = raw_tp_writable_prog_is_valid_access, | |
1546 | }; | |
1547 | ||
1548 | const struct bpf_prog_ops raw_tracepoint_writable_prog_ops = { | |
1549 | }; | |
1550 | ||
0515e599 | 1551 | static bool pe_prog_is_valid_access(int off, int size, enum bpf_access_type type, |
5e43f899 | 1552 | const struct bpf_prog *prog, |
23994631 | 1553 | struct bpf_insn_access_aux *info) |
0515e599 | 1554 | { |
95da0cdb | 1555 | const int size_u64 = sizeof(u64); |
31fd8581 | 1556 | |
0515e599 AS |
1557 | if (off < 0 || off >= sizeof(struct bpf_perf_event_data)) |
1558 | return false; | |
1559 | if (type != BPF_READ) | |
1560 | return false; | |
bc23105c DB |
1561 | if (off % size != 0) { |
1562 | if (sizeof(unsigned long) != 4) | |
1563 | return false; | |
1564 | if (size != 8) | |
1565 | return false; | |
1566 | if (off % size != 4) | |
1567 | return false; | |
1568 | } | |
31fd8581 | 1569 | |
f96da094 DB |
1570 | switch (off) { |
1571 | case bpf_ctx_range(struct bpf_perf_event_data, sample_period): | |
95da0cdb TQ |
1572 | bpf_ctx_record_field_size(info, size_u64); |
1573 | if (!bpf_ctx_narrow_access_ok(off, size, size_u64)) | |
1574 | return false; | |
1575 | break; | |
1576 | case bpf_ctx_range(struct bpf_perf_event_data, addr): | |
1577 | bpf_ctx_record_field_size(info, size_u64); | |
1578 | if (!bpf_ctx_narrow_access_ok(off, size, size_u64)) | |
23994631 | 1579 | return false; |
f96da094 DB |
1580 | break; |
1581 | default: | |
0515e599 AS |
1582 | if (size != sizeof(long)) |
1583 | return false; | |
1584 | } | |
f96da094 | 1585 | |
0515e599 AS |
1586 | return true; |
1587 | } | |
1588 | ||
6b8cc1d1 DB |
1589 | static u32 pe_prog_convert_ctx_access(enum bpf_access_type type, |
1590 | const struct bpf_insn *si, | |
0515e599 | 1591 | struct bpf_insn *insn_buf, |
f96da094 | 1592 | struct bpf_prog *prog, u32 *target_size) |
0515e599 AS |
1593 | { |
1594 | struct bpf_insn *insn = insn_buf; | |
1595 | ||
6b8cc1d1 | 1596 | switch (si->off) { |
0515e599 | 1597 | case offsetof(struct bpf_perf_event_data, sample_period): |
f035a515 | 1598 | *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern, |
6b8cc1d1 | 1599 | data), si->dst_reg, si->src_reg, |
0515e599 | 1600 | offsetof(struct bpf_perf_event_data_kern, data)); |
6b8cc1d1 | 1601 | *insn++ = BPF_LDX_MEM(BPF_DW, si->dst_reg, si->dst_reg, |
f96da094 DB |
1602 | bpf_target_off(struct perf_sample_data, period, 8, |
1603 | target_size)); | |
0515e599 | 1604 | break; |
95da0cdb TQ |
1605 | case offsetof(struct bpf_perf_event_data, addr): |
1606 | *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern, | |
1607 | data), si->dst_reg, si->src_reg, | |
1608 | offsetof(struct bpf_perf_event_data_kern, data)); | |
1609 | *insn++ = BPF_LDX_MEM(BPF_DW, si->dst_reg, si->dst_reg, | |
1610 | bpf_target_off(struct perf_sample_data, addr, 8, | |
1611 | target_size)); | |
1612 | break; | |
0515e599 | 1613 | default: |
f035a515 | 1614 | *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern, |
6b8cc1d1 | 1615 | regs), si->dst_reg, si->src_reg, |
0515e599 | 1616 | offsetof(struct bpf_perf_event_data_kern, regs)); |
6b8cc1d1 DB |
1617 | *insn++ = BPF_LDX_MEM(BPF_SIZEOF(long), si->dst_reg, si->dst_reg, |
1618 | si->off); | |
0515e599 AS |
1619 | break; |
1620 | } | |
1621 | ||
1622 | return insn - insn_buf; | |
1623 | } | |
1624 | ||
7de16e3a | 1625 | const struct bpf_verifier_ops perf_event_verifier_ops = { |
f005afed | 1626 | .get_func_proto = pe_prog_func_proto, |
0515e599 AS |
1627 | .is_valid_access = pe_prog_is_valid_access, |
1628 | .convert_ctx_access = pe_prog_convert_ctx_access, | |
1629 | }; | |
7de16e3a JK |
1630 | |
1631 | const struct bpf_prog_ops perf_event_prog_ops = { | |
1632 | }; | |
e87c6bc3 YS |
1633 | |
1634 | static DEFINE_MUTEX(bpf_event_mutex); | |
1635 | ||
c8c088ba YS |
1636 | #define BPF_TRACE_MAX_PROGS 64 |
1637 | ||
e87c6bc3 YS |
1638 | int perf_event_attach_bpf_prog(struct perf_event *event, |
1639 | struct bpf_prog *prog) | |
1640 | { | |
e672db03 | 1641 | struct bpf_prog_array *old_array; |
e87c6bc3 YS |
1642 | struct bpf_prog_array *new_array; |
1643 | int ret = -EEXIST; | |
1644 | ||
9802d865 | 1645 | /* |
b4da3340 MH |
1646 | * Kprobe override only works if they are on the function entry, |
1647 | * and only if they are on the opt-in list. | |
9802d865 JB |
1648 | */ |
1649 | if (prog->kprobe_override && | |
b4da3340 | 1650 | (!trace_kprobe_on_func_entry(event->tp_event) || |
9802d865 JB |
1651 | !trace_kprobe_error_injectable(event->tp_event))) |
1652 | return -EINVAL; | |
1653 | ||
e87c6bc3 YS |
1654 | mutex_lock(&bpf_event_mutex); |
1655 | ||
1656 | if (event->prog) | |
07c41a29 | 1657 | goto unlock; |
e87c6bc3 | 1658 | |
e672db03 | 1659 | old_array = bpf_event_rcu_dereference(event->tp_event->prog_array); |
c8c088ba YS |
1660 | if (old_array && |
1661 | bpf_prog_array_length(old_array) >= BPF_TRACE_MAX_PROGS) { | |
1662 | ret = -E2BIG; | |
1663 | goto unlock; | |
1664 | } | |
1665 | ||
e87c6bc3 YS |
1666 | ret = bpf_prog_array_copy(old_array, NULL, prog, &new_array); |
1667 | if (ret < 0) | |
07c41a29 | 1668 | goto unlock; |
e87c6bc3 YS |
1669 | |
1670 | /* set the new array to event->tp_event and set event->prog */ | |
1671 | event->prog = prog; | |
1672 | rcu_assign_pointer(event->tp_event->prog_array, new_array); | |
1673 | bpf_prog_array_free(old_array); | |
1674 | ||
07c41a29 | 1675 | unlock: |
e87c6bc3 YS |
1676 | mutex_unlock(&bpf_event_mutex); |
1677 | return ret; | |
1678 | } | |
1679 | ||
1680 | void perf_event_detach_bpf_prog(struct perf_event *event) | |
1681 | { | |
e672db03 | 1682 | struct bpf_prog_array *old_array; |
e87c6bc3 YS |
1683 | struct bpf_prog_array *new_array; |
1684 | int ret; | |
1685 | ||
1686 | mutex_lock(&bpf_event_mutex); | |
1687 | ||
1688 | if (!event->prog) | |
07c41a29 | 1689 | goto unlock; |
e87c6bc3 | 1690 | |
e672db03 | 1691 | old_array = bpf_event_rcu_dereference(event->tp_event->prog_array); |
e87c6bc3 | 1692 | ret = bpf_prog_array_copy(old_array, event->prog, NULL, &new_array); |
170a7e3e SY |
1693 | if (ret == -ENOENT) |
1694 | goto unlock; | |
e87c6bc3 YS |
1695 | if (ret < 0) { |
1696 | bpf_prog_array_delete_safe(old_array, event->prog); | |
1697 | } else { | |
1698 | rcu_assign_pointer(event->tp_event->prog_array, new_array); | |
1699 | bpf_prog_array_free(old_array); | |
1700 | } | |
1701 | ||
1702 | bpf_prog_put(event->prog); | |
1703 | event->prog = NULL; | |
1704 | ||
07c41a29 | 1705 | unlock: |
e87c6bc3 YS |
1706 | mutex_unlock(&bpf_event_mutex); |
1707 | } | |
f371b304 | 1708 | |
f4e2298e | 1709 | int perf_event_query_prog_array(struct perf_event *event, void __user *info) |
f371b304 YS |
1710 | { |
1711 | struct perf_event_query_bpf __user *uquery = info; | |
1712 | struct perf_event_query_bpf query = {}; | |
e672db03 | 1713 | struct bpf_prog_array *progs; |
3a38bb98 | 1714 | u32 *ids, prog_cnt, ids_len; |
f371b304 YS |
1715 | int ret; |
1716 | ||
031258da | 1717 | if (!perfmon_capable()) |
f371b304 YS |
1718 | return -EPERM; |
1719 | if (event->attr.type != PERF_TYPE_TRACEPOINT) | |
1720 | return -EINVAL; | |
1721 | if (copy_from_user(&query, uquery, sizeof(query))) | |
1722 | return -EFAULT; | |
3a38bb98 YS |
1723 | |
1724 | ids_len = query.ids_len; | |
1725 | if (ids_len > BPF_TRACE_MAX_PROGS) | |
9c481b90 | 1726 | return -E2BIG; |
3a38bb98 YS |
1727 | ids = kcalloc(ids_len, sizeof(u32), GFP_USER | __GFP_NOWARN); |
1728 | if (!ids) | |
1729 | return -ENOMEM; | |
1730 | /* | |
1731 | * The above kcalloc returns ZERO_SIZE_PTR when ids_len = 0, which | |
1732 | * is required when user only wants to check for uquery->prog_cnt. | |
1733 | * There is no need to check for it since the case is handled | |
1734 | * gracefully in bpf_prog_array_copy_info. | |
1735 | */ | |
f371b304 YS |
1736 | |
1737 | mutex_lock(&bpf_event_mutex); | |
e672db03 SF |
1738 | progs = bpf_event_rcu_dereference(event->tp_event->prog_array); |
1739 | ret = bpf_prog_array_copy_info(progs, ids, ids_len, &prog_cnt); | |
f371b304 YS |
1740 | mutex_unlock(&bpf_event_mutex); |
1741 | ||
3a38bb98 YS |
1742 | if (copy_to_user(&uquery->prog_cnt, &prog_cnt, sizeof(prog_cnt)) || |
1743 | copy_to_user(uquery->ids, ids, ids_len * sizeof(u32))) | |
1744 | ret = -EFAULT; | |
1745 | ||
1746 | kfree(ids); | |
f371b304 YS |
1747 | return ret; |
1748 | } | |
c4f6699d AS |
1749 | |
1750 | extern struct bpf_raw_event_map __start__bpf_raw_tp[]; | |
1751 | extern struct bpf_raw_event_map __stop__bpf_raw_tp[]; | |
1752 | ||
a38d1107 | 1753 | struct bpf_raw_event_map *bpf_get_raw_tracepoint(const char *name) |
c4f6699d AS |
1754 | { |
1755 | struct bpf_raw_event_map *btp = __start__bpf_raw_tp; | |
1756 | ||
1757 | for (; btp < __stop__bpf_raw_tp; btp++) { | |
1758 | if (!strcmp(btp->tp->name, name)) | |
1759 | return btp; | |
1760 | } | |
a38d1107 MM |
1761 | |
1762 | return bpf_get_raw_tracepoint_module(name); | |
1763 | } | |
1764 | ||
1765 | void bpf_put_raw_tracepoint(struct bpf_raw_event_map *btp) | |
1766 | { | |
12cc126d | 1767 | struct module *mod; |
a38d1107 | 1768 | |
12cc126d AN |
1769 | preempt_disable(); |
1770 | mod = __module_address((unsigned long)btp); | |
1771 | module_put(mod); | |
1772 | preempt_enable(); | |
c4f6699d AS |
1773 | } |
1774 | ||
1775 | static __always_inline | |
1776 | void __bpf_trace_run(struct bpf_prog *prog, u64 *args) | |
1777 | { | |
f03efe49 | 1778 | cant_sleep(); |
c4f6699d | 1779 | rcu_read_lock(); |
c4f6699d | 1780 | (void) BPF_PROG_RUN(prog, args); |
c4f6699d AS |
1781 | rcu_read_unlock(); |
1782 | } | |
1783 | ||
1784 | #define UNPACK(...) __VA_ARGS__ | |
1785 | #define REPEAT_1(FN, DL, X, ...) FN(X) | |
1786 | #define REPEAT_2(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_1(FN, DL, __VA_ARGS__) | |
1787 | #define REPEAT_3(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_2(FN, DL, __VA_ARGS__) | |
1788 | #define REPEAT_4(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_3(FN, DL, __VA_ARGS__) | |
1789 | #define REPEAT_5(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_4(FN, DL, __VA_ARGS__) | |
1790 | #define REPEAT_6(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_5(FN, DL, __VA_ARGS__) | |
1791 | #define REPEAT_7(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_6(FN, DL, __VA_ARGS__) | |
1792 | #define REPEAT_8(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_7(FN, DL, __VA_ARGS__) | |
1793 | #define REPEAT_9(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_8(FN, DL, __VA_ARGS__) | |
1794 | #define REPEAT_10(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_9(FN, DL, __VA_ARGS__) | |
1795 | #define REPEAT_11(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_10(FN, DL, __VA_ARGS__) | |
1796 | #define REPEAT_12(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_11(FN, DL, __VA_ARGS__) | |
1797 | #define REPEAT(X, FN, DL, ...) REPEAT_##X(FN, DL, __VA_ARGS__) | |
1798 | ||
1799 | #define SARG(X) u64 arg##X | |
1800 | #define COPY(X) args[X] = arg##X | |
1801 | ||
1802 | #define __DL_COM (,) | |
1803 | #define __DL_SEM (;) | |
1804 | ||
1805 | #define __SEQ_0_11 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11 | |
1806 | ||
1807 | #define BPF_TRACE_DEFN_x(x) \ | |
1808 | void bpf_trace_run##x(struct bpf_prog *prog, \ | |
1809 | REPEAT(x, SARG, __DL_COM, __SEQ_0_11)) \ | |
1810 | { \ | |
1811 | u64 args[x]; \ | |
1812 | REPEAT(x, COPY, __DL_SEM, __SEQ_0_11); \ | |
1813 | __bpf_trace_run(prog, args); \ | |
1814 | } \ | |
1815 | EXPORT_SYMBOL_GPL(bpf_trace_run##x) | |
1816 | BPF_TRACE_DEFN_x(1); | |
1817 | BPF_TRACE_DEFN_x(2); | |
1818 | BPF_TRACE_DEFN_x(3); | |
1819 | BPF_TRACE_DEFN_x(4); | |
1820 | BPF_TRACE_DEFN_x(5); | |
1821 | BPF_TRACE_DEFN_x(6); | |
1822 | BPF_TRACE_DEFN_x(7); | |
1823 | BPF_TRACE_DEFN_x(8); | |
1824 | BPF_TRACE_DEFN_x(9); | |
1825 | BPF_TRACE_DEFN_x(10); | |
1826 | BPF_TRACE_DEFN_x(11); | |
1827 | BPF_TRACE_DEFN_x(12); | |
1828 | ||
1829 | static int __bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog) | |
1830 | { | |
1831 | struct tracepoint *tp = btp->tp; | |
1832 | ||
1833 | /* | |
1834 | * check that program doesn't access arguments beyond what's | |
1835 | * available in this tracepoint | |
1836 | */ | |
1837 | if (prog->aux->max_ctx_offset > btp->num_args * sizeof(u64)) | |
1838 | return -EINVAL; | |
1839 | ||
9df1c28b MM |
1840 | if (prog->aux->max_tp_access > btp->writable_size) |
1841 | return -EINVAL; | |
1842 | ||
c4f6699d AS |
1843 | return tracepoint_probe_register(tp, (void *)btp->bpf_func, prog); |
1844 | } | |
1845 | ||
1846 | int bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog) | |
1847 | { | |
e16ec340 | 1848 | return __bpf_probe_register(btp, prog); |
c4f6699d AS |
1849 | } |
1850 | ||
1851 | int bpf_probe_unregister(struct bpf_raw_event_map *btp, struct bpf_prog *prog) | |
1852 | { | |
e16ec340 | 1853 | return tracepoint_probe_unregister(btp->tp, (void *)btp->bpf_func, prog); |
c4f6699d | 1854 | } |
41bdc4b4 YS |
1855 | |
1856 | int bpf_get_perf_event_info(const struct perf_event *event, u32 *prog_id, | |
1857 | u32 *fd_type, const char **buf, | |
1858 | u64 *probe_offset, u64 *probe_addr) | |
1859 | { | |
1860 | bool is_tracepoint, is_syscall_tp; | |
1861 | struct bpf_prog *prog; | |
1862 | int flags, err = 0; | |
1863 | ||
1864 | prog = event->prog; | |
1865 | if (!prog) | |
1866 | return -ENOENT; | |
1867 | ||
1868 | /* not supporting BPF_PROG_TYPE_PERF_EVENT yet */ | |
1869 | if (prog->type == BPF_PROG_TYPE_PERF_EVENT) | |
1870 | return -EOPNOTSUPP; | |
1871 | ||
1872 | *prog_id = prog->aux->id; | |
1873 | flags = event->tp_event->flags; | |
1874 | is_tracepoint = flags & TRACE_EVENT_FL_TRACEPOINT; | |
1875 | is_syscall_tp = is_syscall_trace_event(event->tp_event); | |
1876 | ||
1877 | if (is_tracepoint || is_syscall_tp) { | |
1878 | *buf = is_tracepoint ? event->tp_event->tp->name | |
1879 | : event->tp_event->name; | |
1880 | *fd_type = BPF_FD_TYPE_TRACEPOINT; | |
1881 | *probe_offset = 0x0; | |
1882 | *probe_addr = 0x0; | |
1883 | } else { | |
1884 | /* kprobe/uprobe */ | |
1885 | err = -EOPNOTSUPP; | |
1886 | #ifdef CONFIG_KPROBE_EVENTS | |
1887 | if (flags & TRACE_EVENT_FL_KPROBE) | |
1888 | err = bpf_get_kprobe_info(event, fd_type, buf, | |
1889 | probe_offset, probe_addr, | |
1890 | event->attr.type == PERF_TYPE_TRACEPOINT); | |
1891 | #endif | |
1892 | #ifdef CONFIG_UPROBE_EVENTS | |
1893 | if (flags & TRACE_EVENT_FL_UPROBE) | |
1894 | err = bpf_get_uprobe_info(event, fd_type, buf, | |
1895 | probe_offset, | |
1896 | event->attr.type == PERF_TYPE_TRACEPOINT); | |
1897 | #endif | |
1898 | } | |
1899 | ||
1900 | return err; | |
1901 | } | |
a38d1107 | 1902 | |
9db1ff0a YS |
1903 | static int __init send_signal_irq_work_init(void) |
1904 | { | |
1905 | int cpu; | |
1906 | struct send_signal_irq_work *work; | |
1907 | ||
1908 | for_each_possible_cpu(cpu) { | |
1909 | work = per_cpu_ptr(&send_signal_work, cpu); | |
1910 | init_irq_work(&work->irq_work, do_bpf_send_signal); | |
1911 | } | |
1912 | return 0; | |
1913 | } | |
1914 | ||
1915 | subsys_initcall(send_signal_irq_work_init); | |
1916 | ||
a38d1107 | 1917 | #ifdef CONFIG_MODULES |
390e99cf SF |
1918 | static int bpf_event_notify(struct notifier_block *nb, unsigned long op, |
1919 | void *module) | |
a38d1107 MM |
1920 | { |
1921 | struct bpf_trace_module *btm, *tmp; | |
1922 | struct module *mod = module; | |
0340a6b7 | 1923 | int ret = 0; |
a38d1107 MM |
1924 | |
1925 | if (mod->num_bpf_raw_events == 0 || | |
1926 | (op != MODULE_STATE_COMING && op != MODULE_STATE_GOING)) | |
0340a6b7 | 1927 | goto out; |
a38d1107 MM |
1928 | |
1929 | mutex_lock(&bpf_module_mutex); | |
1930 | ||
1931 | switch (op) { | |
1932 | case MODULE_STATE_COMING: | |
1933 | btm = kzalloc(sizeof(*btm), GFP_KERNEL); | |
1934 | if (btm) { | |
1935 | btm->module = module; | |
1936 | list_add(&btm->list, &bpf_trace_modules); | |
0340a6b7 PZ |
1937 | } else { |
1938 | ret = -ENOMEM; | |
a38d1107 MM |
1939 | } |
1940 | break; | |
1941 | case MODULE_STATE_GOING: | |
1942 | list_for_each_entry_safe(btm, tmp, &bpf_trace_modules, list) { | |
1943 | if (btm->module == module) { | |
1944 | list_del(&btm->list); | |
1945 | kfree(btm); | |
1946 | break; | |
1947 | } | |
1948 | } | |
1949 | break; | |
1950 | } | |
1951 | ||
1952 | mutex_unlock(&bpf_module_mutex); | |
1953 | ||
0340a6b7 PZ |
1954 | out: |
1955 | return notifier_from_errno(ret); | |
a38d1107 MM |
1956 | } |
1957 | ||
1958 | static struct notifier_block bpf_module_nb = { | |
1959 | .notifier_call = bpf_event_notify, | |
1960 | }; | |
1961 | ||
390e99cf | 1962 | static int __init bpf_event_init(void) |
a38d1107 MM |
1963 | { |
1964 | register_module_notifier(&bpf_module_nb); | |
1965 | return 0; | |
1966 | } | |
1967 | ||
1968 | fs_initcall(bpf_event_init); | |
1969 | #endif /* CONFIG_MODULES */ |