Commit | Line | Data |
---|---|---|
179a0cc4 | 1 | // SPDX-License-Identifier: GPL-2.0 |
2541517c | 2 | /* Copyright (c) 2011-2015 PLUMgrid, http://plumgrid.com |
0515e599 | 3 | * Copyright (c) 2016 Facebook |
2541517c AS |
4 | */ |
5 | #include <linux/kernel.h> | |
6 | #include <linux/types.h> | |
7 | #include <linux/slab.h> | |
8 | #include <linux/bpf.h> | |
4279adb0 | 9 | #include <linux/bpf_verifier.h> |
0515e599 | 10 | #include <linux/bpf_perf_event.h> |
c4d0bfb4 | 11 | #include <linux/btf.h> |
2541517c AS |
12 | #include <linux/filter.h> |
13 | #include <linux/uaccess.h> | |
9c959c86 | 14 | #include <linux/ctype.h> |
9802d865 | 15 | #include <linux/kprobes.h> |
ac5a72ea | 16 | #include <linux/spinlock.h> |
41bdc4b4 | 17 | #include <linux/syscalls.h> |
540adea3 | 18 | #include <linux/error-injection.h> |
c9a0f3b8 | 19 | #include <linux/btf_ids.h> |
6f100640 | 20 | #include <linux/bpf_lsm.h> |
0dcac272 | 21 | #include <linux/fprobe.h> |
ca74823c JO |
22 | #include <linux/bsearch.h> |
23 | #include <linux/sort.h> | |
f3cf4134 RS |
24 | #include <linux/key.h> |
25 | #include <linux/verification.h> | |
6f100640 | 26 | |
8e4597c6 | 27 | #include <net/bpf_sk_storage.h> |
9802d865 | 28 | |
c4d0bfb4 AM |
29 | #include <uapi/linux/bpf.h> |
30 | #include <uapi/linux/btf.h> | |
31 | ||
c7b6f29b NA |
32 | #include <asm/tlb.h> |
33 | ||
9802d865 | 34 | #include "trace_probe.h" |
2541517c AS |
35 | #include "trace.h" |
36 | ||
ac5a72ea AM |
37 | #define CREATE_TRACE_POINTS |
38 | #include "bpf_trace.h" | |
39 | ||
e672db03 SF |
40 | #define bpf_event_rcu_dereference(p) \ |
41 | rcu_dereference_protected(p, lockdep_is_held(&bpf_event_mutex)) | |
42 | ||
a38d1107 MM |
43 | #ifdef CONFIG_MODULES |
44 | struct bpf_trace_module { | |
45 | struct module *module; | |
46 | struct list_head list; | |
47 | }; | |
48 | ||
49 | static LIST_HEAD(bpf_trace_modules); | |
50 | static DEFINE_MUTEX(bpf_module_mutex); | |
51 | ||
52 | static struct bpf_raw_event_map *bpf_get_raw_tracepoint_module(const char *name) | |
53 | { | |
54 | struct bpf_raw_event_map *btp, *ret = NULL; | |
55 | struct bpf_trace_module *btm; | |
56 | unsigned int i; | |
57 | ||
58 | mutex_lock(&bpf_module_mutex); | |
59 | list_for_each_entry(btm, &bpf_trace_modules, list) { | |
60 | for (i = 0; i < btm->module->num_bpf_raw_events; ++i) { | |
61 | btp = &btm->module->bpf_raw_events[i]; | |
62 | if (!strcmp(btp->tp->name, name)) { | |
63 | if (try_module_get(btm->module)) | |
64 | ret = btp; | |
65 | goto out; | |
66 | } | |
67 | } | |
68 | } | |
69 | out: | |
70 | mutex_unlock(&bpf_module_mutex); | |
71 | return ret; | |
72 | } | |
73 | #else | |
74 | static struct bpf_raw_event_map *bpf_get_raw_tracepoint_module(const char *name) | |
75 | { | |
76 | return NULL; | |
77 | } | |
78 | #endif /* CONFIG_MODULES */ | |
79 | ||
035226b9 | 80 | u64 bpf_get_stackid(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); |
c195651e | 81 | u64 bpf_get_stack(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); |
035226b9 | 82 | |
eb411377 AM |
83 | static int bpf_btf_printf_prepare(struct btf_ptr *ptr, u32 btf_ptr_size, |
84 | u64 flags, const struct btf **btf, | |
85 | s32 *btf_id); | |
f7098690 JO |
86 | static u64 bpf_kprobe_multi_cookie(struct bpf_run_ctx *ctx); |
87 | static u64 bpf_kprobe_multi_entry_ip(struct bpf_run_ctx *ctx); | |
eb411377 | 88 | |
2541517c AS |
89 | /** |
90 | * trace_call_bpf - invoke BPF program | |
e87c6bc3 | 91 | * @call: tracepoint event |
2541517c AS |
92 | * @ctx: opaque context pointer |
93 | * | |
94 | * kprobe handlers execute BPF programs via this helper. | |
95 | * Can be used from static tracepoints in the future. | |
96 | * | |
97 | * Return: BPF programs always return an integer which is interpreted by | |
98 | * kprobe handler as: | |
99 | * 0 - return from kprobe (event is filtered out) | |
100 | * 1 - store kprobe event into ring buffer | |
101 | * Other values are reserved and currently alias to 1 | |
102 | */ | |
e87c6bc3 | 103 | unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx) |
2541517c AS |
104 | { |
105 | unsigned int ret; | |
106 | ||
b0a81b94 | 107 | cant_sleep(); |
2541517c AS |
108 | |
109 | if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1)) { | |
110 | /* | |
111 | * since some bpf program is already running on this cpu, | |
112 | * don't call into another bpf program (same or different) | |
113 | * and don't send kprobe event into ring-buffer, | |
114 | * so return zero here | |
115 | */ | |
116 | ret = 0; | |
117 | goto out; | |
118 | } | |
119 | ||
e87c6bc3 YS |
120 | /* |
121 | * Instead of moving rcu_read_lock/rcu_dereference/rcu_read_unlock | |
122 | * to all call sites, we did a bpf_prog_array_valid() there to check | |
123 | * whether call->prog_array is empty or not, which is | |
2b5894cc | 124 | * a heuristic to speed up execution. |
e87c6bc3 YS |
125 | * |
126 | * If bpf_prog_array_valid() fetched prog_array was | |
127 | * non-NULL, we go into trace_call_bpf() and do the actual | |
128 | * proper rcu_dereference() under RCU lock. | |
129 | * If it turns out that prog_array is NULL then, we bail out. | |
130 | * For the opposite, if the bpf_prog_array_valid() fetched pointer | |
131 | * was NULL, you'll skip the prog_array with the risk of missing | |
132 | * out of events when it was updated in between this and the | |
133 | * rcu_dereference() which is accepted risk. | |
134 | */ | |
055eb955 SF |
135 | rcu_read_lock(); |
136 | ret = bpf_prog_run_array(rcu_dereference(call->prog_array), | |
137 | ctx, bpf_prog_run); | |
138 | rcu_read_unlock(); | |
2541517c AS |
139 | |
140 | out: | |
141 | __this_cpu_dec(bpf_prog_active); | |
2541517c AS |
142 | |
143 | return ret; | |
144 | } | |
2541517c | 145 | |
9802d865 JB |
146 | #ifdef CONFIG_BPF_KPROBE_OVERRIDE |
147 | BPF_CALL_2(bpf_override_return, struct pt_regs *, regs, unsigned long, rc) | |
148 | { | |
9802d865 | 149 | regs_set_return_value(regs, rc); |
540adea3 | 150 | override_function_with_return(regs); |
9802d865 JB |
151 | return 0; |
152 | } | |
153 | ||
154 | static const struct bpf_func_proto bpf_override_return_proto = { | |
155 | .func = bpf_override_return, | |
156 | .gpl_only = true, | |
157 | .ret_type = RET_INTEGER, | |
158 | .arg1_type = ARG_PTR_TO_CTX, | |
159 | .arg2_type = ARG_ANYTHING, | |
160 | }; | |
161 | #endif | |
162 | ||
8d92db5c CH |
163 | static __always_inline int |
164 | bpf_probe_read_user_common(void *dst, u32 size, const void __user *unsafe_ptr) | |
2541517c | 165 | { |
8d92db5c | 166 | int ret; |
2541517c | 167 | |
c0ee37e8 | 168 | ret = copy_from_user_nofault(dst, unsafe_ptr, size); |
6ae08ae3 DB |
169 | if (unlikely(ret < 0)) |
170 | memset(dst, 0, size); | |
6ae08ae3 DB |
171 | return ret; |
172 | } | |
173 | ||
8d92db5c CH |
174 | BPF_CALL_3(bpf_probe_read_user, void *, dst, u32, size, |
175 | const void __user *, unsafe_ptr) | |
176 | { | |
177 | return bpf_probe_read_user_common(dst, size, unsafe_ptr); | |
178 | } | |
179 | ||
f470378c | 180 | const struct bpf_func_proto bpf_probe_read_user_proto = { |
6ae08ae3 DB |
181 | .func = bpf_probe_read_user, |
182 | .gpl_only = true, | |
183 | .ret_type = RET_INTEGER, | |
184 | .arg1_type = ARG_PTR_TO_UNINIT_MEM, | |
185 | .arg2_type = ARG_CONST_SIZE_OR_ZERO, | |
186 | .arg3_type = ARG_ANYTHING, | |
187 | }; | |
188 | ||
8d92db5c CH |
189 | static __always_inline int |
190 | bpf_probe_read_user_str_common(void *dst, u32 size, | |
191 | const void __user *unsafe_ptr) | |
6ae08ae3 | 192 | { |
8d92db5c | 193 | int ret; |
6ae08ae3 | 194 | |
6fa6d280 DX |
195 | /* |
196 | * NB: We rely on strncpy_from_user() not copying junk past the NUL | |
197 | * terminator into `dst`. | |
198 | * | |
199 | * strncpy_from_user() does long-sized strides in the fast path. If the | |
200 | * strncpy does not mask out the bytes after the NUL in `unsafe_ptr`, | |
201 | * then there could be junk after the NUL in `dst`. If user takes `dst` | |
202 | * and keys a hash map with it, then semantically identical strings can | |
203 | * occupy multiple entries in the map. | |
204 | */ | |
8d92db5c | 205 | ret = strncpy_from_user_nofault(dst, unsafe_ptr, size); |
6ae08ae3 DB |
206 | if (unlikely(ret < 0)) |
207 | memset(dst, 0, size); | |
6ae08ae3 DB |
208 | return ret; |
209 | } | |
210 | ||
8d92db5c CH |
211 | BPF_CALL_3(bpf_probe_read_user_str, void *, dst, u32, size, |
212 | const void __user *, unsafe_ptr) | |
213 | { | |
214 | return bpf_probe_read_user_str_common(dst, size, unsafe_ptr); | |
215 | } | |
216 | ||
f470378c | 217 | const struct bpf_func_proto bpf_probe_read_user_str_proto = { |
6ae08ae3 DB |
218 | .func = bpf_probe_read_user_str, |
219 | .gpl_only = true, | |
220 | .ret_type = RET_INTEGER, | |
221 | .arg1_type = ARG_PTR_TO_UNINIT_MEM, | |
222 | .arg2_type = ARG_CONST_SIZE_OR_ZERO, | |
223 | .arg3_type = ARG_ANYTHING, | |
224 | }; | |
225 | ||
226 | static __always_inline int | |
8d92db5c | 227 | bpf_probe_read_kernel_common(void *dst, u32 size, const void *unsafe_ptr) |
6ae08ae3 | 228 | { |
ff40e510 | 229 | int ret; |
9d1f8be5 | 230 | |
fe557319 | 231 | ret = copy_from_kernel_nofault(dst, unsafe_ptr, size); |
074f528e | 232 | if (unlikely(ret < 0)) |
ff40e510 | 233 | memset(dst, 0, size); |
6ae08ae3 DB |
234 | return ret; |
235 | } | |
074f528e | 236 | |
6ae08ae3 DB |
237 | BPF_CALL_3(bpf_probe_read_kernel, void *, dst, u32, size, |
238 | const void *, unsafe_ptr) | |
239 | { | |
8d92db5c | 240 | return bpf_probe_read_kernel_common(dst, size, unsafe_ptr); |
6ae08ae3 DB |
241 | } |
242 | ||
f470378c | 243 | const struct bpf_func_proto bpf_probe_read_kernel_proto = { |
6ae08ae3 DB |
244 | .func = bpf_probe_read_kernel, |
245 | .gpl_only = true, | |
246 | .ret_type = RET_INTEGER, | |
247 | .arg1_type = ARG_PTR_TO_UNINIT_MEM, | |
248 | .arg2_type = ARG_CONST_SIZE_OR_ZERO, | |
249 | .arg3_type = ARG_ANYTHING, | |
250 | }; | |
251 | ||
6ae08ae3 | 252 | static __always_inline int |
8d92db5c | 253 | bpf_probe_read_kernel_str_common(void *dst, u32 size, const void *unsafe_ptr) |
6ae08ae3 | 254 | { |
ff40e510 | 255 | int ret; |
8d92db5c | 256 | |
6ae08ae3 | 257 | /* |
8d92db5c CH |
258 | * The strncpy_from_kernel_nofault() call will likely not fill the |
259 | * entire buffer, but that's okay in this circumstance as we're probing | |
6ae08ae3 DB |
260 | * arbitrary memory anyway similar to bpf_probe_read_*() and might |
261 | * as well probe the stack. Thus, memory is explicitly cleared | |
262 | * only in error case, so that improper users ignoring return | |
263 | * code altogether don't copy garbage; otherwise length of string | |
264 | * is returned that can be used for bpf_perf_event_output() et al. | |
265 | */ | |
8d92db5c | 266 | ret = strncpy_from_kernel_nofault(dst, unsafe_ptr, size); |
6ae08ae3 | 267 | if (unlikely(ret < 0)) |
ff40e510 | 268 | memset(dst, 0, size); |
074f528e | 269 | return ret; |
2541517c AS |
270 | } |
271 | ||
6ae08ae3 DB |
272 | BPF_CALL_3(bpf_probe_read_kernel_str, void *, dst, u32, size, |
273 | const void *, unsafe_ptr) | |
274 | { | |
8d92db5c | 275 | return bpf_probe_read_kernel_str_common(dst, size, unsafe_ptr); |
6ae08ae3 DB |
276 | } |
277 | ||
f470378c | 278 | const struct bpf_func_proto bpf_probe_read_kernel_str_proto = { |
6ae08ae3 DB |
279 | .func = bpf_probe_read_kernel_str, |
280 | .gpl_only = true, | |
281 | .ret_type = RET_INTEGER, | |
282 | .arg1_type = ARG_PTR_TO_UNINIT_MEM, | |
283 | .arg2_type = ARG_CONST_SIZE_OR_ZERO, | |
284 | .arg3_type = ARG_ANYTHING, | |
285 | }; | |
286 | ||
8d92db5c CH |
287 | #ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE |
288 | BPF_CALL_3(bpf_probe_read_compat, void *, dst, u32, size, | |
289 | const void *, unsafe_ptr) | |
290 | { | |
291 | if ((unsigned long)unsafe_ptr < TASK_SIZE) { | |
292 | return bpf_probe_read_user_common(dst, size, | |
293 | (__force void __user *)unsafe_ptr); | |
294 | } | |
295 | return bpf_probe_read_kernel_common(dst, size, unsafe_ptr); | |
296 | } | |
297 | ||
298 | static const struct bpf_func_proto bpf_probe_read_compat_proto = { | |
299 | .func = bpf_probe_read_compat, | |
300 | .gpl_only = true, | |
301 | .ret_type = RET_INTEGER, | |
302 | .arg1_type = ARG_PTR_TO_UNINIT_MEM, | |
303 | .arg2_type = ARG_CONST_SIZE_OR_ZERO, | |
304 | .arg3_type = ARG_ANYTHING, | |
305 | }; | |
306 | ||
6ae08ae3 DB |
307 | BPF_CALL_3(bpf_probe_read_compat_str, void *, dst, u32, size, |
308 | const void *, unsafe_ptr) | |
309 | { | |
8d92db5c CH |
310 | if ((unsigned long)unsafe_ptr < TASK_SIZE) { |
311 | return bpf_probe_read_user_str_common(dst, size, | |
312 | (__force void __user *)unsafe_ptr); | |
313 | } | |
314 | return bpf_probe_read_kernel_str_common(dst, size, unsafe_ptr); | |
6ae08ae3 DB |
315 | } |
316 | ||
317 | static const struct bpf_func_proto bpf_probe_read_compat_str_proto = { | |
318 | .func = bpf_probe_read_compat_str, | |
2541517c AS |
319 | .gpl_only = true, |
320 | .ret_type = RET_INTEGER, | |
39f19ebb | 321 | .arg1_type = ARG_PTR_TO_UNINIT_MEM, |
9c019e2b | 322 | .arg2_type = ARG_CONST_SIZE_OR_ZERO, |
2541517c AS |
323 | .arg3_type = ARG_ANYTHING, |
324 | }; | |
8d92db5c | 325 | #endif /* CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE */ |
2541517c | 326 | |
eb1b6688 | 327 | BPF_CALL_3(bpf_probe_write_user, void __user *, unsafe_ptr, const void *, src, |
f3694e00 | 328 | u32, size) |
96ae5227 | 329 | { |
96ae5227 SD |
330 | /* |
331 | * Ensure we're in user context which is safe for the helper to | |
332 | * run. This helper has no business in a kthread. | |
333 | * | |
334 | * access_ok() should prevent writing to non-user memory, but in | |
335 | * some situations (nommu, temporary switch, etc) access_ok() does | |
336 | * not provide enough validation, hence the check on KERNEL_DS. | |
c7b6f29b NA |
337 | * |
338 | * nmi_uaccess_okay() ensures the probe is not run in an interim | |
339 | * state, when the task or mm are switched. This is specifically | |
340 | * required to prevent the use of temporary mm. | |
96ae5227 SD |
341 | */ |
342 | ||
343 | if (unlikely(in_interrupt() || | |
344 | current->flags & (PF_KTHREAD | PF_EXITING))) | |
345 | return -EPERM; | |
c7b6f29b NA |
346 | if (unlikely(!nmi_uaccess_okay())) |
347 | return -EPERM; | |
96ae5227 | 348 | |
c0ee37e8 | 349 | return copy_to_user_nofault(unsafe_ptr, src, size); |
96ae5227 SD |
350 | } |
351 | ||
352 | static const struct bpf_func_proto bpf_probe_write_user_proto = { | |
353 | .func = bpf_probe_write_user, | |
354 | .gpl_only = true, | |
355 | .ret_type = RET_INTEGER, | |
356 | .arg1_type = ARG_ANYTHING, | |
216e3cd2 | 357 | .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY, |
39f19ebb | 358 | .arg3_type = ARG_CONST_SIZE, |
96ae5227 SD |
359 | }; |
360 | ||
361 | static const struct bpf_func_proto *bpf_get_probe_write_proto(void) | |
362 | { | |
2c78ee89 AS |
363 | if (!capable(CAP_SYS_ADMIN)) |
364 | return NULL; | |
365 | ||
96ae5227 SD |
366 | pr_warn_ratelimited("%s[%d] is installing a program with bpf_probe_write_user helper that may corrupt user memory!", |
367 | current->comm, task_pid_nr(current)); | |
368 | ||
369 | return &bpf_probe_write_user_proto; | |
370 | } | |
371 | ||
d9c9e4db FR |
372 | #define MAX_TRACE_PRINTK_VARARGS 3 |
373 | #define BPF_TRACE_PRINTK_SIZE 1024 | |
ac5a72ea | 374 | |
d9c9e4db FR |
375 | BPF_CALL_5(bpf_trace_printk, char *, fmt, u32, fmt_size, u64, arg1, |
376 | u64, arg2, u64, arg3) | |
ac5a72ea | 377 | { |
d9c9e4db | 378 | u64 args[MAX_TRACE_PRINTK_VARARGS] = { arg1, arg2, arg3 }; |
78aa1cc9 JO |
379 | struct bpf_bprintf_data data = { |
380 | .get_bin_args = true, | |
e2bb9e01 | 381 | .get_buf = true, |
78aa1cc9 | 382 | }; |
ac5a72ea AM |
383 | int ret; |
384 | ||
78aa1cc9 JO |
385 | ret = bpf_bprintf_prepare(fmt, fmt_size, args, |
386 | MAX_TRACE_PRINTK_VARARGS, &data); | |
d9c9e4db FR |
387 | if (ret < 0) |
388 | return ret; | |
389 | ||
e2bb9e01 | 390 | ret = bstr_printf(data.buf, MAX_BPRINTF_BUF, fmt, data.bin_args); |
d9c9e4db | 391 | |
e2bb9e01 | 392 | trace_bpf_trace_printk(data.buf); |
ac5a72ea | 393 | |
f19a4050 | 394 | bpf_bprintf_cleanup(&data); |
9c959c86 | 395 | |
d9c9e4db | 396 | return ret; |
9c959c86 AS |
397 | } |
398 | ||
399 | static const struct bpf_func_proto bpf_trace_printk_proto = { | |
400 | .func = bpf_trace_printk, | |
401 | .gpl_only = true, | |
402 | .ret_type = RET_INTEGER, | |
216e3cd2 | 403 | .arg1_type = ARG_PTR_TO_MEM | MEM_RDONLY, |
39f19ebb | 404 | .arg2_type = ARG_CONST_SIZE, |
9c959c86 AS |
405 | }; |
406 | ||
10aceb62 | 407 | static void __set_printk_clr_event(void) |
0756ea3e AS |
408 | { |
409 | /* | |
ac5a72ea AM |
410 | * This program might be calling bpf_trace_printk, |
411 | * so enable the associated bpf_trace/bpf_trace_printk event. | |
412 | * Repeat this each time as it is possible a user has | |
413 | * disabled bpf_trace_printk events. By loading a program | |
414 | * calling bpf_trace_printk() however the user has expressed | |
415 | * the intent to see such events. | |
0756ea3e | 416 | */ |
ac5a72ea AM |
417 | if (trace_set_clr_event("bpf_trace", "bpf_trace_printk", 1)) |
418 | pr_warn_ratelimited("could not enable bpf_trace_printk events"); | |
10aceb62 | 419 | } |
0756ea3e | 420 | |
10aceb62 DM |
421 | const struct bpf_func_proto *bpf_get_trace_printk_proto(void) |
422 | { | |
423 | __set_printk_clr_event(); | |
0756ea3e AS |
424 | return &bpf_trace_printk_proto; |
425 | } | |
426 | ||
78aa1cc9 | 427 | BPF_CALL_4(bpf_trace_vprintk, char *, fmt, u32, fmt_size, const void *, args, |
10aceb62 DM |
428 | u32, data_len) |
429 | { | |
78aa1cc9 JO |
430 | struct bpf_bprintf_data data = { |
431 | .get_bin_args = true, | |
e2bb9e01 | 432 | .get_buf = true, |
78aa1cc9 | 433 | }; |
10aceb62 | 434 | int ret, num_args; |
10aceb62 DM |
435 | |
436 | if (data_len & 7 || data_len > MAX_BPRINTF_VARARGS * 8 || | |
78aa1cc9 | 437 | (data_len && !args)) |
10aceb62 DM |
438 | return -EINVAL; |
439 | num_args = data_len / 8; | |
440 | ||
78aa1cc9 | 441 | ret = bpf_bprintf_prepare(fmt, fmt_size, args, num_args, &data); |
10aceb62 DM |
442 | if (ret < 0) |
443 | return ret; | |
444 | ||
e2bb9e01 | 445 | ret = bstr_printf(data.buf, MAX_BPRINTF_BUF, fmt, data.bin_args); |
10aceb62 | 446 | |
e2bb9e01 | 447 | trace_bpf_trace_printk(data.buf); |
10aceb62 | 448 | |
f19a4050 | 449 | bpf_bprintf_cleanup(&data); |
10aceb62 DM |
450 | |
451 | return ret; | |
452 | } | |
453 | ||
454 | static const struct bpf_func_proto bpf_trace_vprintk_proto = { | |
455 | .func = bpf_trace_vprintk, | |
456 | .gpl_only = true, | |
457 | .ret_type = RET_INTEGER, | |
216e3cd2 | 458 | .arg1_type = ARG_PTR_TO_MEM | MEM_RDONLY, |
10aceb62 | 459 | .arg2_type = ARG_CONST_SIZE, |
216e3cd2 | 460 | .arg3_type = ARG_PTR_TO_MEM | PTR_MAYBE_NULL | MEM_RDONLY, |
10aceb62 DM |
461 | .arg4_type = ARG_CONST_SIZE_OR_ZERO, |
462 | }; | |
463 | ||
464 | const struct bpf_func_proto *bpf_get_trace_vprintk_proto(void) | |
465 | { | |
466 | __set_printk_clr_event(); | |
467 | return &bpf_trace_vprintk_proto; | |
468 | } | |
469 | ||
492e639f | 470 | BPF_CALL_5(bpf_seq_printf, struct seq_file *, m, char *, fmt, u32, fmt_size, |
78aa1cc9 | 471 | const void *, args, u32, data_len) |
492e639f | 472 | { |
78aa1cc9 JO |
473 | struct bpf_bprintf_data data = { |
474 | .get_bin_args = true, | |
475 | }; | |
d9c9e4db | 476 | int err, num_args; |
492e639f | 477 | |
335ff499 | 478 | if (data_len & 7 || data_len > MAX_BPRINTF_VARARGS * 8 || |
78aa1cc9 | 479 | (data_len && !args)) |
d9c9e4db | 480 | return -EINVAL; |
492e639f YS |
481 | num_args = data_len / 8; |
482 | ||
78aa1cc9 | 483 | err = bpf_bprintf_prepare(fmt, fmt_size, args, num_args, &data); |
d9c9e4db FR |
484 | if (err < 0) |
485 | return err; | |
492e639f | 486 | |
78aa1cc9 | 487 | seq_bprintf(m, fmt, data.bin_args); |
48cac3f4 | 488 | |
f19a4050 | 489 | bpf_bprintf_cleanup(&data); |
d9c9e4db FR |
490 | |
491 | return seq_has_overflowed(m) ? -EOVERFLOW : 0; | |
492e639f YS |
492 | } |
493 | ||
9436ef6e | 494 | BTF_ID_LIST_SINGLE(btf_seq_file_ids, struct, seq_file) |
c9a0f3b8 | 495 | |
492e639f YS |
496 | static const struct bpf_func_proto bpf_seq_printf_proto = { |
497 | .func = bpf_seq_printf, | |
498 | .gpl_only = true, | |
499 | .ret_type = RET_INTEGER, | |
500 | .arg1_type = ARG_PTR_TO_BTF_ID, | |
9436ef6e | 501 | .arg1_btf_id = &btf_seq_file_ids[0], |
216e3cd2 | 502 | .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY, |
492e639f | 503 | .arg3_type = ARG_CONST_SIZE, |
216e3cd2 | 504 | .arg4_type = ARG_PTR_TO_MEM | PTR_MAYBE_NULL | MEM_RDONLY, |
492e639f | 505 | .arg5_type = ARG_CONST_SIZE_OR_ZERO, |
492e639f YS |
506 | }; |
507 | ||
508 | BPF_CALL_3(bpf_seq_write, struct seq_file *, m, const void *, data, u32, len) | |
509 | { | |
510 | return seq_write(m, data, len) ? -EOVERFLOW : 0; | |
511 | } | |
512 | ||
492e639f YS |
513 | static const struct bpf_func_proto bpf_seq_write_proto = { |
514 | .func = bpf_seq_write, | |
515 | .gpl_only = true, | |
516 | .ret_type = RET_INTEGER, | |
517 | .arg1_type = ARG_PTR_TO_BTF_ID, | |
9436ef6e | 518 | .arg1_btf_id = &btf_seq_file_ids[0], |
216e3cd2 | 519 | .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY, |
492e639f | 520 | .arg3_type = ARG_CONST_SIZE_OR_ZERO, |
492e639f YS |
521 | }; |
522 | ||
eb411377 AM |
523 | BPF_CALL_4(bpf_seq_printf_btf, struct seq_file *, m, struct btf_ptr *, ptr, |
524 | u32, btf_ptr_size, u64, flags) | |
525 | { | |
526 | const struct btf *btf; | |
527 | s32 btf_id; | |
528 | int ret; | |
529 | ||
530 | ret = bpf_btf_printf_prepare(ptr, btf_ptr_size, flags, &btf, &btf_id); | |
531 | if (ret) | |
532 | return ret; | |
533 | ||
534 | return btf_type_seq_show_flags(btf, btf_id, ptr->ptr, m, flags); | |
535 | } | |
536 | ||
537 | static const struct bpf_func_proto bpf_seq_printf_btf_proto = { | |
538 | .func = bpf_seq_printf_btf, | |
539 | .gpl_only = true, | |
540 | .ret_type = RET_INTEGER, | |
541 | .arg1_type = ARG_PTR_TO_BTF_ID, | |
542 | .arg1_btf_id = &btf_seq_file_ids[0], | |
216e3cd2 | 543 | .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY, |
492e639f | 544 | .arg3_type = ARG_CONST_SIZE_OR_ZERO, |
eb411377 | 545 | .arg4_type = ARG_ANYTHING, |
492e639f YS |
546 | }; |
547 | ||
908432ca YS |
548 | static __always_inline int |
549 | get_map_perf_counter(struct bpf_map *map, u64 flags, | |
550 | u64 *value, u64 *enabled, u64 *running) | |
35578d79 | 551 | { |
35578d79 | 552 | struct bpf_array *array = container_of(map, struct bpf_array, map); |
6816a7ff DB |
553 | unsigned int cpu = smp_processor_id(); |
554 | u64 index = flags & BPF_F_INDEX_MASK; | |
3b1efb19 | 555 | struct bpf_event_entry *ee; |
35578d79 | 556 | |
6816a7ff DB |
557 | if (unlikely(flags & ~(BPF_F_INDEX_MASK))) |
558 | return -EINVAL; | |
559 | if (index == BPF_F_CURRENT_CPU) | |
560 | index = cpu; | |
35578d79 KX |
561 | if (unlikely(index >= array->map.max_entries)) |
562 | return -E2BIG; | |
563 | ||
3b1efb19 | 564 | ee = READ_ONCE(array->ptrs[index]); |
1ca1cc98 | 565 | if (!ee) |
35578d79 KX |
566 | return -ENOENT; |
567 | ||
908432ca YS |
568 | return perf_event_read_local(ee->event, value, enabled, running); |
569 | } | |
570 | ||
571 | BPF_CALL_2(bpf_perf_event_read, struct bpf_map *, map, u64, flags) | |
572 | { | |
573 | u64 value = 0; | |
574 | int err; | |
575 | ||
576 | err = get_map_perf_counter(map, flags, &value, NULL, NULL); | |
35578d79 | 577 | /* |
f91840a3 AS |
578 | * this api is ugly since we miss [-22..-2] range of valid |
579 | * counter values, but that's uapi | |
35578d79 | 580 | */ |
f91840a3 AS |
581 | if (err) |
582 | return err; | |
583 | return value; | |
35578d79 KX |
584 | } |
585 | ||
62544ce8 | 586 | static const struct bpf_func_proto bpf_perf_event_read_proto = { |
35578d79 | 587 | .func = bpf_perf_event_read, |
1075ef59 | 588 | .gpl_only = true, |
35578d79 KX |
589 | .ret_type = RET_INTEGER, |
590 | .arg1_type = ARG_CONST_MAP_PTR, | |
591 | .arg2_type = ARG_ANYTHING, | |
592 | }; | |
593 | ||
908432ca YS |
594 | BPF_CALL_4(bpf_perf_event_read_value, struct bpf_map *, map, u64, flags, |
595 | struct bpf_perf_event_value *, buf, u32, size) | |
596 | { | |
597 | int err = -EINVAL; | |
598 | ||
599 | if (unlikely(size != sizeof(struct bpf_perf_event_value))) | |
600 | goto clear; | |
601 | err = get_map_perf_counter(map, flags, &buf->counter, &buf->enabled, | |
602 | &buf->running); | |
603 | if (unlikely(err)) | |
604 | goto clear; | |
605 | return 0; | |
606 | clear: | |
607 | memset(buf, 0, size); | |
608 | return err; | |
609 | } | |
610 | ||
611 | static const struct bpf_func_proto bpf_perf_event_read_value_proto = { | |
612 | .func = bpf_perf_event_read_value, | |
613 | .gpl_only = true, | |
614 | .ret_type = RET_INTEGER, | |
615 | .arg1_type = ARG_CONST_MAP_PTR, | |
616 | .arg2_type = ARG_ANYTHING, | |
617 | .arg3_type = ARG_PTR_TO_UNINIT_MEM, | |
618 | .arg4_type = ARG_CONST_SIZE, | |
619 | }; | |
620 | ||
8e7a3920 DB |
621 | static __always_inline u64 |
622 | __bpf_perf_event_output(struct pt_regs *regs, struct bpf_map *map, | |
283ca526 | 623 | u64 flags, struct perf_sample_data *sd) |
a43eec30 | 624 | { |
a43eec30 | 625 | struct bpf_array *array = container_of(map, struct bpf_array, map); |
d7931330 | 626 | unsigned int cpu = smp_processor_id(); |
1e33759c | 627 | u64 index = flags & BPF_F_INDEX_MASK; |
3b1efb19 | 628 | struct bpf_event_entry *ee; |
a43eec30 | 629 | struct perf_event *event; |
a43eec30 | 630 | |
1e33759c | 631 | if (index == BPF_F_CURRENT_CPU) |
d7931330 | 632 | index = cpu; |
a43eec30 AS |
633 | if (unlikely(index >= array->map.max_entries)) |
634 | return -E2BIG; | |
635 | ||
3b1efb19 | 636 | ee = READ_ONCE(array->ptrs[index]); |
1ca1cc98 | 637 | if (!ee) |
a43eec30 AS |
638 | return -ENOENT; |
639 | ||
3b1efb19 | 640 | event = ee->event; |
a43eec30 AS |
641 | if (unlikely(event->attr.type != PERF_TYPE_SOFTWARE || |
642 | event->attr.config != PERF_COUNT_SW_BPF_OUTPUT)) | |
643 | return -EINVAL; | |
644 | ||
d7931330 | 645 | if (unlikely(event->oncpu != cpu)) |
a43eec30 AS |
646 | return -EOPNOTSUPP; |
647 | ||
56201969 | 648 | return perf_event_output(event, sd, regs); |
a43eec30 AS |
649 | } |
650 | ||
9594dc3c MM |
651 | /* |
652 | * Support executing tracepoints in normal, irq, and nmi context that each call | |
653 | * bpf_perf_event_output | |
654 | */ | |
655 | struct bpf_trace_sample_data { | |
656 | struct perf_sample_data sds[3]; | |
657 | }; | |
658 | ||
659 | static DEFINE_PER_CPU(struct bpf_trace_sample_data, bpf_trace_sds); | |
660 | static DEFINE_PER_CPU(int, bpf_trace_nest_level); | |
f3694e00 DB |
661 | BPF_CALL_5(bpf_perf_event_output, struct pt_regs *, regs, struct bpf_map *, map, |
662 | u64, flags, void *, data, u64, size) | |
8e7a3920 | 663 | { |
9594dc3c MM |
664 | struct bpf_trace_sample_data *sds = this_cpu_ptr(&bpf_trace_sds); |
665 | int nest_level = this_cpu_inc_return(bpf_trace_nest_level); | |
8e7a3920 DB |
666 | struct perf_raw_record raw = { |
667 | .frag = { | |
668 | .size = size, | |
669 | .data = data, | |
670 | }, | |
671 | }; | |
9594dc3c MM |
672 | struct perf_sample_data *sd; |
673 | int err; | |
8e7a3920 | 674 | |
9594dc3c MM |
675 | if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(sds->sds))) { |
676 | err = -EBUSY; | |
677 | goto out; | |
678 | } | |
679 | ||
680 | sd = &sds->sds[nest_level - 1]; | |
681 | ||
682 | if (unlikely(flags & ~(BPF_F_INDEX_MASK))) { | |
683 | err = -EINVAL; | |
684 | goto out; | |
685 | } | |
8e7a3920 | 686 | |
283ca526 | 687 | perf_sample_data_init(sd, 0, 0); |
0a9081cf | 688 | perf_sample_save_raw_data(sd, &raw); |
283ca526 | 689 | |
9594dc3c MM |
690 | err = __bpf_perf_event_output(regs, map, flags, sd); |
691 | ||
692 | out: | |
693 | this_cpu_dec(bpf_trace_nest_level); | |
694 | return err; | |
8e7a3920 DB |
695 | } |
696 | ||
a43eec30 AS |
697 | static const struct bpf_func_proto bpf_perf_event_output_proto = { |
698 | .func = bpf_perf_event_output, | |
1075ef59 | 699 | .gpl_only = true, |
a43eec30 AS |
700 | .ret_type = RET_INTEGER, |
701 | .arg1_type = ARG_PTR_TO_CTX, | |
702 | .arg2_type = ARG_CONST_MAP_PTR, | |
703 | .arg3_type = ARG_ANYTHING, | |
216e3cd2 | 704 | .arg4_type = ARG_PTR_TO_MEM | MEM_RDONLY, |
a60dd35d | 705 | .arg5_type = ARG_CONST_SIZE_OR_ZERO, |
a43eec30 AS |
706 | }; |
707 | ||
768fb61f AZ |
708 | static DEFINE_PER_CPU(int, bpf_event_output_nest_level); |
709 | struct bpf_nested_pt_regs { | |
710 | struct pt_regs regs[3]; | |
711 | }; | |
712 | static DEFINE_PER_CPU(struct bpf_nested_pt_regs, bpf_pt_regs); | |
713 | static DEFINE_PER_CPU(struct bpf_trace_sample_data, bpf_misc_sds); | |
bd570ff9 | 714 | |
555c8a86 DB |
715 | u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size, |
716 | void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy) | |
bd570ff9 | 717 | { |
768fb61f | 718 | int nest_level = this_cpu_inc_return(bpf_event_output_nest_level); |
555c8a86 DB |
719 | struct perf_raw_frag frag = { |
720 | .copy = ctx_copy, | |
721 | .size = ctx_size, | |
722 | .data = ctx, | |
723 | }; | |
724 | struct perf_raw_record raw = { | |
725 | .frag = { | |
183fc153 AM |
726 | { |
727 | .next = ctx_size ? &frag : NULL, | |
728 | }, | |
555c8a86 DB |
729 | .size = meta_size, |
730 | .data = meta, | |
731 | }, | |
732 | }; | |
768fb61f AZ |
733 | struct perf_sample_data *sd; |
734 | struct pt_regs *regs; | |
735 | u64 ret; | |
736 | ||
737 | if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(bpf_misc_sds.sds))) { | |
738 | ret = -EBUSY; | |
739 | goto out; | |
740 | } | |
741 | sd = this_cpu_ptr(&bpf_misc_sds.sds[nest_level - 1]); | |
742 | regs = this_cpu_ptr(&bpf_pt_regs.regs[nest_level - 1]); | |
bd570ff9 DB |
743 | |
744 | perf_fetch_caller_regs(regs); | |
283ca526 | 745 | perf_sample_data_init(sd, 0, 0); |
0a9081cf | 746 | perf_sample_save_raw_data(sd, &raw); |
bd570ff9 | 747 | |
768fb61f AZ |
748 | ret = __bpf_perf_event_output(regs, map, flags, sd); |
749 | out: | |
750 | this_cpu_dec(bpf_event_output_nest_level); | |
751 | return ret; | |
bd570ff9 DB |
752 | } |
753 | ||
f3694e00 | 754 | BPF_CALL_0(bpf_get_current_task) |
606274c5 AS |
755 | { |
756 | return (long) current; | |
757 | } | |
758 | ||
f470378c | 759 | const struct bpf_func_proto bpf_get_current_task_proto = { |
606274c5 AS |
760 | .func = bpf_get_current_task, |
761 | .gpl_only = true, | |
762 | .ret_type = RET_INTEGER, | |
763 | }; | |
764 | ||
3ca1032a KS |
765 | BPF_CALL_0(bpf_get_current_task_btf) |
766 | { | |
767 | return (unsigned long) current; | |
768 | } | |
769 | ||
a396eda5 | 770 | const struct bpf_func_proto bpf_get_current_task_btf_proto = { |
3ca1032a KS |
771 | .func = bpf_get_current_task_btf, |
772 | .gpl_only = true, | |
3f00c523 | 773 | .ret_type = RET_PTR_TO_BTF_ID_TRUSTED, |
d19ddb47 | 774 | .ret_btf_id = &btf_tracing_ids[BTF_TRACING_TYPE_TASK], |
3ca1032a KS |
775 | }; |
776 | ||
dd6e10fb DX |
777 | BPF_CALL_1(bpf_task_pt_regs, struct task_struct *, task) |
778 | { | |
779 | return (unsigned long) task_pt_regs(task); | |
780 | } | |
781 | ||
782 | BTF_ID_LIST(bpf_task_pt_regs_ids) | |
783 | BTF_ID(struct, pt_regs) | |
784 | ||
785 | const struct bpf_func_proto bpf_task_pt_regs_proto = { | |
786 | .func = bpf_task_pt_regs, | |
787 | .gpl_only = true, | |
788 | .arg1_type = ARG_PTR_TO_BTF_ID, | |
d19ddb47 | 789 | .arg1_btf_id = &btf_tracing_ids[BTF_TRACING_TYPE_TASK], |
dd6e10fb DX |
790 | .ret_type = RET_PTR_TO_BTF_ID, |
791 | .ret_btf_id = &bpf_task_pt_regs_ids[0], | |
792 | }; | |
793 | ||
f3694e00 | 794 | BPF_CALL_2(bpf_current_task_under_cgroup, struct bpf_map *, map, u32, idx) |
60d20f91 | 795 | { |
60d20f91 SD |
796 | struct bpf_array *array = container_of(map, struct bpf_array, map); |
797 | struct cgroup *cgrp; | |
60d20f91 | 798 | |
60d20f91 SD |
799 | if (unlikely(idx >= array->map.max_entries)) |
800 | return -E2BIG; | |
801 | ||
802 | cgrp = READ_ONCE(array->ptrs[idx]); | |
803 | if (unlikely(!cgrp)) | |
804 | return -EAGAIN; | |
805 | ||
806 | return task_under_cgroup_hierarchy(current, cgrp); | |
807 | } | |
808 | ||
809 | static const struct bpf_func_proto bpf_current_task_under_cgroup_proto = { | |
810 | .func = bpf_current_task_under_cgroup, | |
811 | .gpl_only = false, | |
812 | .ret_type = RET_INTEGER, | |
813 | .arg1_type = ARG_CONST_MAP_PTR, | |
814 | .arg2_type = ARG_ANYTHING, | |
815 | }; | |
816 | ||
8b401f9e YS |
817 | struct send_signal_irq_work { |
818 | struct irq_work irq_work; | |
819 | struct task_struct *task; | |
820 | u32 sig; | |
8482941f | 821 | enum pid_type type; |
8b401f9e YS |
822 | }; |
823 | ||
824 | static DEFINE_PER_CPU(struct send_signal_irq_work, send_signal_work); | |
825 | ||
826 | static void do_bpf_send_signal(struct irq_work *entry) | |
827 | { | |
828 | struct send_signal_irq_work *work; | |
829 | ||
830 | work = container_of(entry, struct send_signal_irq_work, irq_work); | |
8482941f | 831 | group_send_sig_info(work->sig, SEND_SIG_PRIV, work->task, work->type); |
bdb7fdb0 | 832 | put_task_struct(work->task); |
8b401f9e YS |
833 | } |
834 | ||
8482941f | 835 | static int bpf_send_signal_common(u32 sig, enum pid_type type) |
8b401f9e YS |
836 | { |
837 | struct send_signal_irq_work *work = NULL; | |
838 | ||
839 | /* Similar to bpf_probe_write_user, task needs to be | |
840 | * in a sound condition and kernel memory access be | |
841 | * permitted in order to send signal to the current | |
842 | * task. | |
843 | */ | |
844 | if (unlikely(current->flags & (PF_KTHREAD | PF_EXITING))) | |
845 | return -EPERM; | |
8b401f9e YS |
846 | if (unlikely(!nmi_uaccess_okay())) |
847 | return -EPERM; | |
a3d81bc1 HS |
848 | /* Task should not be pid=1 to avoid kernel panic. */ |
849 | if (unlikely(is_global_init(current))) | |
850 | return -EPERM; | |
8b401f9e | 851 | |
1bc7896e | 852 | if (irqs_disabled()) { |
e1afb702 YS |
853 | /* Do an early check on signal validity. Otherwise, |
854 | * the error is lost in deferred irq_work. | |
855 | */ | |
856 | if (unlikely(!valid_signal(sig))) | |
857 | return -EINVAL; | |
858 | ||
8b401f9e | 859 | work = this_cpu_ptr(&send_signal_work); |
7a9f50a0 | 860 | if (irq_work_is_busy(&work->irq_work)) |
8b401f9e YS |
861 | return -EBUSY; |
862 | ||
863 | /* Add the current task, which is the target of sending signal, | |
864 | * to the irq_work. The current task may change when queued | |
865 | * irq works get executed. | |
866 | */ | |
bdb7fdb0 | 867 | work->task = get_task_struct(current); |
8b401f9e | 868 | work->sig = sig; |
8482941f | 869 | work->type = type; |
8b401f9e YS |
870 | irq_work_queue(&work->irq_work); |
871 | return 0; | |
872 | } | |
873 | ||
8482941f YS |
874 | return group_send_sig_info(sig, SEND_SIG_PRIV, current, type); |
875 | } | |
876 | ||
877 | BPF_CALL_1(bpf_send_signal, u32, sig) | |
878 | { | |
879 | return bpf_send_signal_common(sig, PIDTYPE_TGID); | |
8b401f9e YS |
880 | } |
881 | ||
882 | static const struct bpf_func_proto bpf_send_signal_proto = { | |
883 | .func = bpf_send_signal, | |
884 | .gpl_only = false, | |
885 | .ret_type = RET_INTEGER, | |
886 | .arg1_type = ARG_ANYTHING, | |
887 | }; | |
888 | ||
8482941f YS |
889 | BPF_CALL_1(bpf_send_signal_thread, u32, sig) |
890 | { | |
891 | return bpf_send_signal_common(sig, PIDTYPE_PID); | |
892 | } | |
893 | ||
894 | static const struct bpf_func_proto bpf_send_signal_thread_proto = { | |
895 | .func = bpf_send_signal_thread, | |
896 | .gpl_only = false, | |
897 | .ret_type = RET_INTEGER, | |
898 | .arg1_type = ARG_ANYTHING, | |
899 | }; | |
900 | ||
6e22ab9d JO |
901 | BPF_CALL_3(bpf_d_path, struct path *, path, char *, buf, u32, sz) |
902 | { | |
903 | long len; | |
904 | char *p; | |
905 | ||
906 | if (!sz) | |
907 | return 0; | |
908 | ||
909 | p = d_path(path, buf, sz); | |
910 | if (IS_ERR(p)) { | |
911 | len = PTR_ERR(p); | |
912 | } else { | |
913 | len = buf + sz - p; | |
914 | memmove(buf, p, len); | |
915 | } | |
916 | ||
917 | return len; | |
918 | } | |
919 | ||
920 | BTF_SET_START(btf_allowlist_d_path) | |
a8a71796 JO |
921 | #ifdef CONFIG_SECURITY |
922 | BTF_ID(func, security_file_permission) | |
923 | BTF_ID(func, security_inode_getattr) | |
924 | BTF_ID(func, security_file_open) | |
925 | #endif | |
926 | #ifdef CONFIG_SECURITY_PATH | |
927 | BTF_ID(func, security_path_truncate) | |
928 | #endif | |
6e22ab9d JO |
929 | BTF_ID(func, vfs_truncate) |
930 | BTF_ID(func, vfs_fallocate) | |
931 | BTF_ID(func, dentry_open) | |
932 | BTF_ID(func, vfs_getattr) | |
933 | BTF_ID(func, filp_close) | |
934 | BTF_SET_END(btf_allowlist_d_path) | |
935 | ||
936 | static bool bpf_d_path_allowed(const struct bpf_prog *prog) | |
937 | { | |
3d06f34a SL |
938 | if (prog->type == BPF_PROG_TYPE_TRACING && |
939 | prog->expected_attach_type == BPF_TRACE_ITER) | |
940 | return true; | |
941 | ||
6f100640 KS |
942 | if (prog->type == BPF_PROG_TYPE_LSM) |
943 | return bpf_lsm_is_sleepable_hook(prog->aux->attach_btf_id); | |
944 | ||
945 | return btf_id_set_contains(&btf_allowlist_d_path, | |
946 | prog->aux->attach_btf_id); | |
6e22ab9d JO |
947 | } |
948 | ||
9436ef6e | 949 | BTF_ID_LIST_SINGLE(bpf_d_path_btf_ids, struct, path) |
6e22ab9d JO |
950 | |
951 | static const struct bpf_func_proto bpf_d_path_proto = { | |
952 | .func = bpf_d_path, | |
953 | .gpl_only = false, | |
954 | .ret_type = RET_INTEGER, | |
955 | .arg1_type = ARG_PTR_TO_BTF_ID, | |
9436ef6e | 956 | .arg1_btf_id = &bpf_d_path_btf_ids[0], |
6e22ab9d JO |
957 | .arg2_type = ARG_PTR_TO_MEM, |
958 | .arg3_type = ARG_CONST_SIZE_OR_ZERO, | |
6e22ab9d JO |
959 | .allowed = bpf_d_path_allowed, |
960 | }; | |
961 | ||
c4d0bfb4 AM |
962 | #define BTF_F_ALL (BTF_F_COMPACT | BTF_F_NONAME | \ |
963 | BTF_F_PTR_RAW | BTF_F_ZERO) | |
964 | ||
965 | static int bpf_btf_printf_prepare(struct btf_ptr *ptr, u32 btf_ptr_size, | |
966 | u64 flags, const struct btf **btf, | |
967 | s32 *btf_id) | |
968 | { | |
969 | const struct btf_type *t; | |
970 | ||
971 | if (unlikely(flags & ~(BTF_F_ALL))) | |
972 | return -EINVAL; | |
973 | ||
974 | if (btf_ptr_size != sizeof(struct btf_ptr)) | |
975 | return -EINVAL; | |
976 | ||
977 | *btf = bpf_get_btf_vmlinux(); | |
978 | ||
979 | if (IS_ERR_OR_NULL(*btf)) | |
abbaa433 | 980 | return IS_ERR(*btf) ? PTR_ERR(*btf) : -EINVAL; |
c4d0bfb4 AM |
981 | |
982 | if (ptr->type_id > 0) | |
983 | *btf_id = ptr->type_id; | |
984 | else | |
985 | return -EINVAL; | |
986 | ||
987 | if (*btf_id > 0) | |
988 | t = btf_type_by_id(*btf, *btf_id); | |
989 | if (*btf_id <= 0 || !t) | |
990 | return -ENOENT; | |
991 | ||
992 | return 0; | |
993 | } | |
994 | ||
995 | BPF_CALL_5(bpf_snprintf_btf, char *, str, u32, str_size, struct btf_ptr *, ptr, | |
996 | u32, btf_ptr_size, u64, flags) | |
997 | { | |
998 | const struct btf *btf; | |
999 | s32 btf_id; | |
1000 | int ret; | |
1001 | ||
1002 | ret = bpf_btf_printf_prepare(ptr, btf_ptr_size, flags, &btf, &btf_id); | |
1003 | if (ret) | |
1004 | return ret; | |
1005 | ||
1006 | return btf_type_snprintf_show(btf, btf_id, ptr->ptr, str, str_size, | |
1007 | flags); | |
1008 | } | |
1009 | ||
1010 | const struct bpf_func_proto bpf_snprintf_btf_proto = { | |
1011 | .func = bpf_snprintf_btf, | |
1012 | .gpl_only = false, | |
1013 | .ret_type = RET_INTEGER, | |
1014 | .arg1_type = ARG_PTR_TO_MEM, | |
1015 | .arg2_type = ARG_CONST_SIZE, | |
216e3cd2 | 1016 | .arg3_type = ARG_PTR_TO_MEM | MEM_RDONLY, |
c4d0bfb4 AM |
1017 | .arg4_type = ARG_CONST_SIZE, |
1018 | .arg5_type = ARG_ANYTHING, | |
1019 | }; | |
1020 | ||
9b99edca JO |
1021 | BPF_CALL_1(bpf_get_func_ip_tracing, void *, ctx) |
1022 | { | |
1023 | /* This helper call is inlined by verifier. */ | |
f92c1e18 | 1024 | return ((u64 *)ctx)[-2]; |
9b99edca JO |
1025 | } |
1026 | ||
1027 | static const struct bpf_func_proto bpf_get_func_ip_proto_tracing = { | |
1028 | .func = bpf_get_func_ip_tracing, | |
1029 | .gpl_only = true, | |
1030 | .ret_type = RET_INTEGER, | |
1031 | .arg1_type = ARG_PTR_TO_CTX, | |
1032 | }; | |
1033 | ||
c09eb2e5 JO |
1034 | #ifdef CONFIG_X86_KERNEL_IBT |
1035 | static unsigned long get_entry_ip(unsigned long fentry_ip) | |
1036 | { | |
1037 | u32 instr; | |
1038 | ||
1039 | /* Being extra safe in here in case entry ip is on the page-edge. */ | |
1040 | if (get_kernel_nofault(instr, (u32 *) fentry_ip - 1)) | |
1041 | return fentry_ip; | |
1042 | if (is_endbr(instr)) | |
1043 | fentry_ip -= ENDBR_INSN_SIZE; | |
1044 | return fentry_ip; | |
1045 | } | |
1046 | #else | |
1047 | #define get_entry_ip(fentry_ip) fentry_ip | |
1048 | #endif | |
1049 | ||
9ffd9f3f JO |
1050 | BPF_CALL_1(bpf_get_func_ip_kprobe, struct pt_regs *, regs) |
1051 | { | |
1052 | struct kprobe *kp = kprobe_running(); | |
1053 | ||
0e253f7e JO |
1054 | if (!kp || !(kp->flags & KPROBE_FLAG_ON_FUNC_ENTRY)) |
1055 | return 0; | |
1056 | ||
1057 | return get_entry_ip((uintptr_t)kp->addr); | |
9ffd9f3f JO |
1058 | } |
1059 | ||
1060 | static const struct bpf_func_proto bpf_get_func_ip_proto_kprobe = { | |
1061 | .func = bpf_get_func_ip_kprobe, | |
1062 | .gpl_only = true, | |
1063 | .ret_type = RET_INTEGER, | |
1064 | .arg1_type = ARG_PTR_TO_CTX, | |
1065 | }; | |
1066 | ||
42a57120 JO |
1067 | BPF_CALL_1(bpf_get_func_ip_kprobe_multi, struct pt_regs *, regs) |
1068 | { | |
f7098690 | 1069 | return bpf_kprobe_multi_entry_ip(current->bpf_ctx); |
42a57120 JO |
1070 | } |
1071 | ||
1072 | static const struct bpf_func_proto bpf_get_func_ip_proto_kprobe_multi = { | |
1073 | .func = bpf_get_func_ip_kprobe_multi, | |
1074 | .gpl_only = false, | |
1075 | .ret_type = RET_INTEGER, | |
1076 | .arg1_type = ARG_PTR_TO_CTX, | |
1077 | }; | |
1078 | ||
ca74823c JO |
1079 | BPF_CALL_1(bpf_get_attach_cookie_kprobe_multi, struct pt_regs *, regs) |
1080 | { | |
f7098690 | 1081 | return bpf_kprobe_multi_cookie(current->bpf_ctx); |
ca74823c JO |
1082 | } |
1083 | ||
1084 | static const struct bpf_func_proto bpf_get_attach_cookie_proto_kmulti = { | |
1085 | .func = bpf_get_attach_cookie_kprobe_multi, | |
1086 | .gpl_only = false, | |
1087 | .ret_type = RET_INTEGER, | |
1088 | .arg1_type = ARG_PTR_TO_CTX, | |
1089 | }; | |
1090 | ||
7adfc6c9 AN |
1091 | BPF_CALL_1(bpf_get_attach_cookie_trace, void *, ctx) |
1092 | { | |
1093 | struct bpf_trace_run_ctx *run_ctx; | |
1094 | ||
1095 | run_ctx = container_of(current->bpf_ctx, struct bpf_trace_run_ctx, run_ctx); | |
1096 | return run_ctx->bpf_cookie; | |
1097 | } | |
1098 | ||
1099 | static const struct bpf_func_proto bpf_get_attach_cookie_proto_trace = { | |
1100 | .func = bpf_get_attach_cookie_trace, | |
1101 | .gpl_only = false, | |
1102 | .ret_type = RET_INTEGER, | |
1103 | .arg1_type = ARG_PTR_TO_CTX, | |
1104 | }; | |
1105 | ||
1106 | BPF_CALL_1(bpf_get_attach_cookie_pe, struct bpf_perf_event_data_kern *, ctx) | |
1107 | { | |
1108 | return ctx->event->bpf_cookie; | |
1109 | } | |
1110 | ||
1111 | static const struct bpf_func_proto bpf_get_attach_cookie_proto_pe = { | |
1112 | .func = bpf_get_attach_cookie_pe, | |
1113 | .gpl_only = false, | |
1114 | .ret_type = RET_INTEGER, | |
1115 | .arg1_type = ARG_PTR_TO_CTX, | |
1116 | }; | |
1117 | ||
2fcc8241 KFL |
1118 | BPF_CALL_1(bpf_get_attach_cookie_tracing, void *, ctx) |
1119 | { | |
1120 | struct bpf_trace_run_ctx *run_ctx; | |
1121 | ||
1122 | run_ctx = container_of(current->bpf_ctx, struct bpf_trace_run_ctx, run_ctx); | |
1123 | return run_ctx->bpf_cookie; | |
1124 | } | |
1125 | ||
1126 | static const struct bpf_func_proto bpf_get_attach_cookie_proto_tracing = { | |
1127 | .func = bpf_get_attach_cookie_tracing, | |
1128 | .gpl_only = false, | |
1129 | .ret_type = RET_INTEGER, | |
1130 | .arg1_type = ARG_PTR_TO_CTX, | |
1131 | }; | |
1132 | ||
856c02db SL |
1133 | BPF_CALL_3(bpf_get_branch_snapshot, void *, buf, u32, size, u64, flags) |
1134 | { | |
1135 | #ifndef CONFIG_X86 | |
1136 | return -ENOENT; | |
1137 | #else | |
1138 | static const u32 br_entry_size = sizeof(struct perf_branch_entry); | |
1139 | u32 entry_cnt = size / br_entry_size; | |
1140 | ||
1141 | entry_cnt = static_call(perf_snapshot_branch_stack)(buf, entry_cnt); | |
1142 | ||
1143 | if (unlikely(flags)) | |
1144 | return -EINVAL; | |
1145 | ||
1146 | if (!entry_cnt) | |
1147 | return -ENOENT; | |
1148 | ||
1149 | return entry_cnt * br_entry_size; | |
1150 | #endif | |
1151 | } | |
1152 | ||
1153 | static const struct bpf_func_proto bpf_get_branch_snapshot_proto = { | |
1154 | .func = bpf_get_branch_snapshot, | |
1155 | .gpl_only = true, | |
1156 | .ret_type = RET_INTEGER, | |
1157 | .arg1_type = ARG_PTR_TO_UNINIT_MEM, | |
1158 | .arg2_type = ARG_CONST_SIZE_OR_ZERO, | |
1159 | }; | |
1160 | ||
f92c1e18 JO |
1161 | BPF_CALL_3(get_func_arg, void *, ctx, u32, n, u64 *, value) |
1162 | { | |
1163 | /* This helper call is inlined by verifier. */ | |
1164 | u64 nr_args = ((u64 *)ctx)[-1]; | |
1165 | ||
1166 | if ((u64) n >= nr_args) | |
1167 | return -EINVAL; | |
1168 | *value = ((u64 *)ctx)[n]; | |
1169 | return 0; | |
1170 | } | |
1171 | ||
1172 | static const struct bpf_func_proto bpf_get_func_arg_proto = { | |
1173 | .func = get_func_arg, | |
1174 | .ret_type = RET_INTEGER, | |
1175 | .arg1_type = ARG_PTR_TO_CTX, | |
1176 | .arg2_type = ARG_ANYTHING, | |
1177 | .arg3_type = ARG_PTR_TO_LONG, | |
1178 | }; | |
1179 | ||
1180 | BPF_CALL_2(get_func_ret, void *, ctx, u64 *, value) | |
1181 | { | |
1182 | /* This helper call is inlined by verifier. */ | |
1183 | u64 nr_args = ((u64 *)ctx)[-1]; | |
1184 | ||
1185 | *value = ((u64 *)ctx)[nr_args]; | |
1186 | return 0; | |
1187 | } | |
1188 | ||
1189 | static const struct bpf_func_proto bpf_get_func_ret_proto = { | |
1190 | .func = get_func_ret, | |
1191 | .ret_type = RET_INTEGER, | |
1192 | .arg1_type = ARG_PTR_TO_CTX, | |
1193 | .arg2_type = ARG_PTR_TO_LONG, | |
1194 | }; | |
1195 | ||
1196 | BPF_CALL_1(get_func_arg_cnt, void *, ctx) | |
1197 | { | |
1198 | /* This helper call is inlined by verifier. */ | |
1199 | return ((u64 *)ctx)[-1]; | |
1200 | } | |
1201 | ||
1202 | static const struct bpf_func_proto bpf_get_func_arg_cnt_proto = { | |
1203 | .func = get_func_arg_cnt, | |
1204 | .ret_type = RET_INTEGER, | |
1205 | .arg1_type = ARG_PTR_TO_CTX, | |
1206 | }; | |
1207 | ||
f3cf4134 RS |
1208 | #ifdef CONFIG_KEYS |
1209 | __diag_push(); | |
1210 | __diag_ignore_all("-Wmissing-prototypes", | |
1211 | "kfuncs which will be used in BPF programs"); | |
1212 | ||
1213 | /** | |
1214 | * bpf_lookup_user_key - lookup a key by its serial | |
1215 | * @serial: key handle serial number | |
1216 | * @flags: lookup-specific flags | |
1217 | * | |
1218 | * Search a key with a given *serial* and the provided *flags*. | |
1219 | * If found, increment the reference count of the key by one, and | |
1220 | * return it in the bpf_key structure. | |
1221 | * | |
1222 | * The bpf_key structure must be passed to bpf_key_put() when done | |
1223 | * with it, so that the key reference count is decremented and the | |
1224 | * bpf_key structure is freed. | |
1225 | * | |
1226 | * Permission checks are deferred to the time the key is used by | |
1227 | * one of the available key-specific kfuncs. | |
1228 | * | |
1229 | * Set *flags* with KEY_LOOKUP_CREATE, to attempt creating a requested | |
1230 | * special keyring (e.g. session keyring), if it doesn't yet exist. | |
1231 | * Set *flags* with KEY_LOOKUP_PARTIAL, to lookup a key without waiting | |
1232 | * for the key construction, and to retrieve uninstantiated keys (keys | |
1233 | * without data attached to them). | |
1234 | * | |
1235 | * Return: a bpf_key pointer with a valid key pointer if the key is found, a | |
1236 | * NULL pointer otherwise. | |
1237 | */ | |
400031e0 | 1238 | __bpf_kfunc struct bpf_key *bpf_lookup_user_key(u32 serial, u64 flags) |
f3cf4134 RS |
1239 | { |
1240 | key_ref_t key_ref; | |
1241 | struct bpf_key *bkey; | |
1242 | ||
1243 | if (flags & ~KEY_LOOKUP_ALL) | |
1244 | return NULL; | |
1245 | ||
1246 | /* | |
1247 | * Permission check is deferred until the key is used, as the | |
1248 | * intent of the caller is unknown here. | |
1249 | */ | |
1250 | key_ref = lookup_user_key(serial, flags, KEY_DEFER_PERM_CHECK); | |
1251 | if (IS_ERR(key_ref)) | |
1252 | return NULL; | |
1253 | ||
1254 | bkey = kmalloc(sizeof(*bkey), GFP_KERNEL); | |
1255 | if (!bkey) { | |
1256 | key_put(key_ref_to_ptr(key_ref)); | |
1257 | return NULL; | |
1258 | } | |
1259 | ||
1260 | bkey->key = key_ref_to_ptr(key_ref); | |
1261 | bkey->has_ref = true; | |
1262 | ||
1263 | return bkey; | |
1264 | } | |
1265 | ||
1266 | /** | |
1267 | * bpf_lookup_system_key - lookup a key by a system-defined ID | |
1268 | * @id: key ID | |
1269 | * | |
1270 | * Obtain a bpf_key structure with a key pointer set to the passed key ID. | |
1271 | * The key pointer is marked as invalid, to prevent bpf_key_put() from | |
1272 | * attempting to decrement the key reference count on that pointer. The key | |
1273 | * pointer set in such way is currently understood only by | |
1274 | * verify_pkcs7_signature(). | |
1275 | * | |
1276 | * Set *id* to one of the values defined in include/linux/verification.h: | |
1277 | * 0 for the primary keyring (immutable keyring of system keys); | |
1278 | * VERIFY_USE_SECONDARY_KEYRING for both the primary and secondary keyring | |
1279 | * (where keys can be added only if they are vouched for by existing keys | |
1280 | * in those keyrings); VERIFY_USE_PLATFORM_KEYRING for the platform | |
1281 | * keyring (primarily used by the integrity subsystem to verify a kexec'ed | |
1282 | * kerned image and, possibly, the initramfs signature). | |
1283 | * | |
1284 | * Return: a bpf_key pointer with an invalid key pointer set from the | |
1285 | * pre-determined ID on success, a NULL pointer otherwise | |
1286 | */ | |
400031e0 | 1287 | __bpf_kfunc struct bpf_key *bpf_lookup_system_key(u64 id) |
f3cf4134 RS |
1288 | { |
1289 | struct bpf_key *bkey; | |
1290 | ||
1291 | if (system_keyring_id_check(id) < 0) | |
1292 | return NULL; | |
1293 | ||
1294 | bkey = kmalloc(sizeof(*bkey), GFP_ATOMIC); | |
1295 | if (!bkey) | |
1296 | return NULL; | |
1297 | ||
1298 | bkey->key = (struct key *)(unsigned long)id; | |
1299 | bkey->has_ref = false; | |
1300 | ||
1301 | return bkey; | |
1302 | } | |
1303 | ||
1304 | /** | |
1305 | * bpf_key_put - decrement key reference count if key is valid and free bpf_key | |
1306 | * @bkey: bpf_key structure | |
1307 | * | |
1308 | * Decrement the reference count of the key inside *bkey*, if the pointer | |
1309 | * is valid, and free *bkey*. | |
1310 | */ | |
400031e0 | 1311 | __bpf_kfunc void bpf_key_put(struct bpf_key *bkey) |
f3cf4134 RS |
1312 | { |
1313 | if (bkey->has_ref) | |
1314 | key_put(bkey->key); | |
1315 | ||
1316 | kfree(bkey); | |
1317 | } | |
1318 | ||
865b0566 RS |
1319 | #ifdef CONFIG_SYSTEM_DATA_VERIFICATION |
1320 | /** | |
1321 | * bpf_verify_pkcs7_signature - verify a PKCS#7 signature | |
1322 | * @data_ptr: data to verify | |
1323 | * @sig_ptr: signature of the data | |
1324 | * @trusted_keyring: keyring with keys trusted for signature verification | |
1325 | * | |
1326 | * Verify the PKCS#7 signature *sig_ptr* against the supplied *data_ptr* | |
1327 | * with keys in a keyring referenced by *trusted_keyring*. | |
1328 | * | |
1329 | * Return: 0 on success, a negative value on error. | |
1330 | */ | |
400031e0 | 1331 | __bpf_kfunc int bpf_verify_pkcs7_signature(struct bpf_dynptr_kern *data_ptr, |
865b0566 RS |
1332 | struct bpf_dynptr_kern *sig_ptr, |
1333 | struct bpf_key *trusted_keyring) | |
1334 | { | |
1335 | int ret; | |
1336 | ||
1337 | if (trusted_keyring->has_ref) { | |
1338 | /* | |
1339 | * Do the permission check deferred in bpf_lookup_user_key(). | |
1340 | * See bpf_lookup_user_key() for more details. | |
1341 | * | |
1342 | * A call to key_task_permission() here would be redundant, as | |
1343 | * it is already done by keyring_search() called by | |
1344 | * find_asymmetric_key(). | |
1345 | */ | |
1346 | ret = key_validate(trusted_keyring->key); | |
1347 | if (ret < 0) | |
1348 | return ret; | |
1349 | } | |
1350 | ||
1351 | return verify_pkcs7_signature(data_ptr->data, | |
1352 | bpf_dynptr_get_size(data_ptr), | |
1353 | sig_ptr->data, | |
1354 | bpf_dynptr_get_size(sig_ptr), | |
1355 | trusted_keyring->key, | |
1356 | VERIFYING_UNSPECIFIED_SIGNATURE, NULL, | |
1357 | NULL); | |
1358 | } | |
1359 | #endif /* CONFIG_SYSTEM_DATA_VERIFICATION */ | |
1360 | ||
f3cf4134 RS |
1361 | __diag_pop(); |
1362 | ||
1363 | BTF_SET8_START(key_sig_kfunc_set) | |
1364 | BTF_ID_FLAGS(func, bpf_lookup_user_key, KF_ACQUIRE | KF_RET_NULL | KF_SLEEPABLE) | |
1365 | BTF_ID_FLAGS(func, bpf_lookup_system_key, KF_ACQUIRE | KF_RET_NULL) | |
1366 | BTF_ID_FLAGS(func, bpf_key_put, KF_RELEASE) | |
865b0566 RS |
1367 | #ifdef CONFIG_SYSTEM_DATA_VERIFICATION |
1368 | BTF_ID_FLAGS(func, bpf_verify_pkcs7_signature, KF_SLEEPABLE) | |
1369 | #endif | |
f3cf4134 RS |
1370 | BTF_SET8_END(key_sig_kfunc_set) |
1371 | ||
1372 | static const struct btf_kfunc_id_set bpf_key_sig_kfunc_set = { | |
1373 | .owner = THIS_MODULE, | |
1374 | .set = &key_sig_kfunc_set, | |
1375 | }; | |
1376 | ||
1377 | static int __init bpf_key_sig_kfuncs_init(void) | |
1378 | { | |
1379 | return register_btf_kfunc_id_set(BPF_PROG_TYPE_TRACING, | |
1380 | &bpf_key_sig_kfunc_set); | |
1381 | } | |
1382 | ||
1383 | late_initcall(bpf_key_sig_kfuncs_init); | |
1384 | #endif /* CONFIG_KEYS */ | |
1385 | ||
7adfc6c9 | 1386 | static const struct bpf_func_proto * |
fc611f47 | 1387 | bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) |
2541517c AS |
1388 | { |
1389 | switch (func_id) { | |
1390 | case BPF_FUNC_map_lookup_elem: | |
1391 | return &bpf_map_lookup_elem_proto; | |
1392 | case BPF_FUNC_map_update_elem: | |
1393 | return &bpf_map_update_elem_proto; | |
1394 | case BPF_FUNC_map_delete_elem: | |
1395 | return &bpf_map_delete_elem_proto; | |
02a8c817 AC |
1396 | case BPF_FUNC_map_push_elem: |
1397 | return &bpf_map_push_elem_proto; | |
1398 | case BPF_FUNC_map_pop_elem: | |
1399 | return &bpf_map_pop_elem_proto; | |
1400 | case BPF_FUNC_map_peek_elem: | |
1401 | return &bpf_map_peek_elem_proto; | |
07343110 FZ |
1402 | case BPF_FUNC_map_lookup_percpu_elem: |
1403 | return &bpf_map_lookup_percpu_elem_proto; | |
d9847d31 AS |
1404 | case BPF_FUNC_ktime_get_ns: |
1405 | return &bpf_ktime_get_ns_proto; | |
71d19214 MÅ» |
1406 | case BPF_FUNC_ktime_get_boot_ns: |
1407 | return &bpf_ktime_get_boot_ns_proto; | |
04fd61ab AS |
1408 | case BPF_FUNC_tail_call: |
1409 | return &bpf_tail_call_proto; | |
ffeedafb AS |
1410 | case BPF_FUNC_get_current_pid_tgid: |
1411 | return &bpf_get_current_pid_tgid_proto; | |
606274c5 AS |
1412 | case BPF_FUNC_get_current_task: |
1413 | return &bpf_get_current_task_proto; | |
3ca1032a KS |
1414 | case BPF_FUNC_get_current_task_btf: |
1415 | return &bpf_get_current_task_btf_proto; | |
dd6e10fb DX |
1416 | case BPF_FUNC_task_pt_regs: |
1417 | return &bpf_task_pt_regs_proto; | |
ffeedafb AS |
1418 | case BPF_FUNC_get_current_uid_gid: |
1419 | return &bpf_get_current_uid_gid_proto; | |
1420 | case BPF_FUNC_get_current_comm: | |
1421 | return &bpf_get_current_comm_proto; | |
9c959c86 | 1422 | case BPF_FUNC_trace_printk: |
0756ea3e | 1423 | return bpf_get_trace_printk_proto(); |
ab1973d3 AS |
1424 | case BPF_FUNC_get_smp_processor_id: |
1425 | return &bpf_get_smp_processor_id_proto; | |
2d0e30c3 DB |
1426 | case BPF_FUNC_get_numa_node_id: |
1427 | return &bpf_get_numa_node_id_proto; | |
35578d79 KX |
1428 | case BPF_FUNC_perf_event_read: |
1429 | return &bpf_perf_event_read_proto; | |
60d20f91 SD |
1430 | case BPF_FUNC_current_task_under_cgroup: |
1431 | return &bpf_current_task_under_cgroup_proto; | |
8937bd80 AS |
1432 | case BPF_FUNC_get_prandom_u32: |
1433 | return &bpf_get_prandom_u32_proto; | |
51e1bb9e DB |
1434 | case BPF_FUNC_probe_write_user: |
1435 | return security_locked_down(LOCKDOWN_BPF_WRITE_USER) < 0 ? | |
1436 | NULL : bpf_get_probe_write_proto(); | |
6ae08ae3 DB |
1437 | case BPF_FUNC_probe_read_user: |
1438 | return &bpf_probe_read_user_proto; | |
1439 | case BPF_FUNC_probe_read_kernel: | |
71330842 | 1440 | return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ? |
ff40e510 | 1441 | NULL : &bpf_probe_read_kernel_proto; |
6ae08ae3 DB |
1442 | case BPF_FUNC_probe_read_user_str: |
1443 | return &bpf_probe_read_user_str_proto; | |
1444 | case BPF_FUNC_probe_read_kernel_str: | |
71330842 | 1445 | return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ? |
ff40e510 | 1446 | NULL : &bpf_probe_read_kernel_str_proto; |
0ebeea8c DB |
1447 | #ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE |
1448 | case BPF_FUNC_probe_read: | |
71330842 | 1449 | return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ? |
ff40e510 | 1450 | NULL : &bpf_probe_read_compat_proto; |
a5e8c070 | 1451 | case BPF_FUNC_probe_read_str: |
71330842 | 1452 | return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ? |
ff40e510 | 1453 | NULL : &bpf_probe_read_compat_str_proto; |
0ebeea8c | 1454 | #endif |
34ea38ca | 1455 | #ifdef CONFIG_CGROUPS |
bf6fa2c8 YS |
1456 | case BPF_FUNC_get_current_cgroup_id: |
1457 | return &bpf_get_current_cgroup_id_proto; | |
95b861a7 NK |
1458 | case BPF_FUNC_get_current_ancestor_cgroup_id: |
1459 | return &bpf_get_current_ancestor_cgroup_id_proto; | |
c4bcfb38 YS |
1460 | case BPF_FUNC_cgrp_storage_get: |
1461 | return &bpf_cgrp_storage_get_proto; | |
1462 | case BPF_FUNC_cgrp_storage_delete: | |
1463 | return &bpf_cgrp_storage_delete_proto; | |
34ea38ca | 1464 | #endif |
8b401f9e YS |
1465 | case BPF_FUNC_send_signal: |
1466 | return &bpf_send_signal_proto; | |
8482941f YS |
1467 | case BPF_FUNC_send_signal_thread: |
1468 | return &bpf_send_signal_thread_proto; | |
b80b033b SL |
1469 | case BPF_FUNC_perf_event_read_value: |
1470 | return &bpf_perf_event_read_value_proto; | |
b4490c5c CN |
1471 | case BPF_FUNC_get_ns_current_pid_tgid: |
1472 | return &bpf_get_ns_current_pid_tgid_proto; | |
457f4436 AN |
1473 | case BPF_FUNC_ringbuf_output: |
1474 | return &bpf_ringbuf_output_proto; | |
1475 | case BPF_FUNC_ringbuf_reserve: | |
1476 | return &bpf_ringbuf_reserve_proto; | |
1477 | case BPF_FUNC_ringbuf_submit: | |
1478 | return &bpf_ringbuf_submit_proto; | |
1479 | case BPF_FUNC_ringbuf_discard: | |
1480 | return &bpf_ringbuf_discard_proto; | |
1481 | case BPF_FUNC_ringbuf_query: | |
1482 | return &bpf_ringbuf_query_proto; | |
72e2b2b6 YS |
1483 | case BPF_FUNC_jiffies64: |
1484 | return &bpf_jiffies64_proto; | |
fa28dcb8 SL |
1485 | case BPF_FUNC_get_task_stack: |
1486 | return &bpf_get_task_stack_proto; | |
07be4c4a | 1487 | case BPF_FUNC_copy_from_user: |
01685c5b | 1488 | return &bpf_copy_from_user_proto; |
376040e4 | 1489 | case BPF_FUNC_copy_from_user_task: |
01685c5b | 1490 | return &bpf_copy_from_user_task_proto; |
c4d0bfb4 AM |
1491 | case BPF_FUNC_snprintf_btf: |
1492 | return &bpf_snprintf_btf_proto; | |
b7906b70 | 1493 | case BPF_FUNC_per_cpu_ptr: |
eaa6bcb7 | 1494 | return &bpf_per_cpu_ptr_proto; |
b7906b70 | 1495 | case BPF_FUNC_this_cpu_ptr: |
63d9b80d | 1496 | return &bpf_this_cpu_ptr_proto; |
a10787e6 | 1497 | case BPF_FUNC_task_storage_get: |
4279adb0 MKL |
1498 | if (bpf_prog_check_recur(prog)) |
1499 | return &bpf_task_storage_get_recur_proto; | |
a10787e6 SL |
1500 | return &bpf_task_storage_get_proto; |
1501 | case BPF_FUNC_task_storage_delete: | |
8a7dac37 MKL |
1502 | if (bpf_prog_check_recur(prog)) |
1503 | return &bpf_task_storage_delete_recur_proto; | |
a10787e6 | 1504 | return &bpf_task_storage_delete_proto; |
69c087ba YS |
1505 | case BPF_FUNC_for_each_map_elem: |
1506 | return &bpf_for_each_map_elem_proto; | |
7b15523a FR |
1507 | case BPF_FUNC_snprintf: |
1508 | return &bpf_snprintf_proto; | |
9b99edca JO |
1509 | case BPF_FUNC_get_func_ip: |
1510 | return &bpf_get_func_ip_proto_tracing; | |
856c02db SL |
1511 | case BPF_FUNC_get_branch_snapshot: |
1512 | return &bpf_get_branch_snapshot_proto; | |
7c7e3d31 SL |
1513 | case BPF_FUNC_find_vma: |
1514 | return &bpf_find_vma_proto; | |
10aceb62 DM |
1515 | case BPF_FUNC_trace_vprintk: |
1516 | return bpf_get_trace_vprintk_proto(); | |
9fd82b61 | 1517 | default: |
b00628b1 | 1518 | return bpf_base_func_proto(func_id); |
9fd82b61 AS |
1519 | } |
1520 | } | |
1521 | ||
5e43f899 AI |
1522 | static const struct bpf_func_proto * |
1523 | kprobe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) | |
9fd82b61 AS |
1524 | { |
1525 | switch (func_id) { | |
a43eec30 AS |
1526 | case BPF_FUNC_perf_event_output: |
1527 | return &bpf_perf_event_output_proto; | |
d5a3b1f6 AS |
1528 | case BPF_FUNC_get_stackid: |
1529 | return &bpf_get_stackid_proto; | |
c195651e YS |
1530 | case BPF_FUNC_get_stack: |
1531 | return &bpf_get_stack_proto; | |
9802d865 JB |
1532 | #ifdef CONFIG_BPF_KPROBE_OVERRIDE |
1533 | case BPF_FUNC_override_return: | |
1534 | return &bpf_override_return_proto; | |
1535 | #endif | |
9ffd9f3f | 1536 | case BPF_FUNC_get_func_ip: |
42a57120 JO |
1537 | return prog->expected_attach_type == BPF_TRACE_KPROBE_MULTI ? |
1538 | &bpf_get_func_ip_proto_kprobe_multi : | |
1539 | &bpf_get_func_ip_proto_kprobe; | |
7adfc6c9 | 1540 | case BPF_FUNC_get_attach_cookie: |
ca74823c JO |
1541 | return prog->expected_attach_type == BPF_TRACE_KPROBE_MULTI ? |
1542 | &bpf_get_attach_cookie_proto_kmulti : | |
1543 | &bpf_get_attach_cookie_proto_trace; | |
2541517c | 1544 | default: |
fc611f47 | 1545 | return bpf_tracing_func_proto(func_id, prog); |
2541517c AS |
1546 | } |
1547 | } | |
1548 | ||
1549 | /* bpf+kprobe programs can access fields of 'struct pt_regs' */ | |
19de99f7 | 1550 | static bool kprobe_prog_is_valid_access(int off, int size, enum bpf_access_type type, |
5e43f899 | 1551 | const struct bpf_prog *prog, |
23994631 | 1552 | struct bpf_insn_access_aux *info) |
2541517c | 1553 | { |
2541517c AS |
1554 | if (off < 0 || off >= sizeof(struct pt_regs)) |
1555 | return false; | |
2541517c AS |
1556 | if (type != BPF_READ) |
1557 | return false; | |
2541517c AS |
1558 | if (off % size != 0) |
1559 | return false; | |
2d071c64 DB |
1560 | /* |
1561 | * Assertion for 32 bit to make sure last 8 byte access | |
1562 | * (BPF_DW) to the last 4 byte member is disallowed. | |
1563 | */ | |
1564 | if (off + size > sizeof(struct pt_regs)) | |
1565 | return false; | |
1566 | ||
2541517c AS |
1567 | return true; |
1568 | } | |
1569 | ||
7de16e3a | 1570 | const struct bpf_verifier_ops kprobe_verifier_ops = { |
2541517c AS |
1571 | .get_func_proto = kprobe_prog_func_proto, |
1572 | .is_valid_access = kprobe_prog_is_valid_access, | |
1573 | }; | |
1574 | ||
7de16e3a JK |
1575 | const struct bpf_prog_ops kprobe_prog_ops = { |
1576 | }; | |
1577 | ||
f3694e00 DB |
1578 | BPF_CALL_5(bpf_perf_event_output_tp, void *, tp_buff, struct bpf_map *, map, |
1579 | u64, flags, void *, data, u64, size) | |
9940d67c | 1580 | { |
f3694e00 DB |
1581 | struct pt_regs *regs = *(struct pt_regs **)tp_buff; |
1582 | ||
9940d67c AS |
1583 | /* |
1584 | * r1 points to perf tracepoint buffer where first 8 bytes are hidden | |
1585 | * from bpf program and contain a pointer to 'struct pt_regs'. Fetch it | |
f3694e00 | 1586 | * from there and call the same bpf_perf_event_output() helper inline. |
9940d67c | 1587 | */ |
f3694e00 | 1588 | return ____bpf_perf_event_output(regs, map, flags, data, size); |
9940d67c AS |
1589 | } |
1590 | ||
1591 | static const struct bpf_func_proto bpf_perf_event_output_proto_tp = { | |
1592 | .func = bpf_perf_event_output_tp, | |
1593 | .gpl_only = true, | |
1594 | .ret_type = RET_INTEGER, | |
1595 | .arg1_type = ARG_PTR_TO_CTX, | |
1596 | .arg2_type = ARG_CONST_MAP_PTR, | |
1597 | .arg3_type = ARG_ANYTHING, | |
216e3cd2 | 1598 | .arg4_type = ARG_PTR_TO_MEM | MEM_RDONLY, |
a60dd35d | 1599 | .arg5_type = ARG_CONST_SIZE_OR_ZERO, |
9940d67c AS |
1600 | }; |
1601 | ||
f3694e00 DB |
1602 | BPF_CALL_3(bpf_get_stackid_tp, void *, tp_buff, struct bpf_map *, map, |
1603 | u64, flags) | |
9940d67c | 1604 | { |
f3694e00 | 1605 | struct pt_regs *regs = *(struct pt_regs **)tp_buff; |
9940d67c | 1606 | |
f3694e00 DB |
1607 | /* |
1608 | * Same comment as in bpf_perf_event_output_tp(), only that this time | |
1609 | * the other helper's function body cannot be inlined due to being | |
1610 | * external, thus we need to call raw helper function. | |
1611 | */ | |
1612 | return bpf_get_stackid((unsigned long) regs, (unsigned long) map, | |
1613 | flags, 0, 0); | |
9940d67c AS |
1614 | } |
1615 | ||
1616 | static const struct bpf_func_proto bpf_get_stackid_proto_tp = { | |
1617 | .func = bpf_get_stackid_tp, | |
1618 | .gpl_only = true, | |
1619 | .ret_type = RET_INTEGER, | |
1620 | .arg1_type = ARG_PTR_TO_CTX, | |
1621 | .arg2_type = ARG_CONST_MAP_PTR, | |
1622 | .arg3_type = ARG_ANYTHING, | |
1623 | }; | |
1624 | ||
c195651e YS |
1625 | BPF_CALL_4(bpf_get_stack_tp, void *, tp_buff, void *, buf, u32, size, |
1626 | u64, flags) | |
1627 | { | |
1628 | struct pt_regs *regs = *(struct pt_regs **)tp_buff; | |
1629 | ||
1630 | return bpf_get_stack((unsigned long) regs, (unsigned long) buf, | |
1631 | (unsigned long) size, flags, 0); | |
1632 | } | |
1633 | ||
1634 | static const struct bpf_func_proto bpf_get_stack_proto_tp = { | |
1635 | .func = bpf_get_stack_tp, | |
1636 | .gpl_only = true, | |
1637 | .ret_type = RET_INTEGER, | |
1638 | .arg1_type = ARG_PTR_TO_CTX, | |
1639 | .arg2_type = ARG_PTR_TO_UNINIT_MEM, | |
1640 | .arg3_type = ARG_CONST_SIZE_OR_ZERO, | |
1641 | .arg4_type = ARG_ANYTHING, | |
1642 | }; | |
1643 | ||
5e43f899 AI |
1644 | static const struct bpf_func_proto * |
1645 | tp_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) | |
f005afed YS |
1646 | { |
1647 | switch (func_id) { | |
1648 | case BPF_FUNC_perf_event_output: | |
1649 | return &bpf_perf_event_output_proto_tp; | |
1650 | case BPF_FUNC_get_stackid: | |
1651 | return &bpf_get_stackid_proto_tp; | |
c195651e YS |
1652 | case BPF_FUNC_get_stack: |
1653 | return &bpf_get_stack_proto_tp; | |
7adfc6c9 AN |
1654 | case BPF_FUNC_get_attach_cookie: |
1655 | return &bpf_get_attach_cookie_proto_trace; | |
f005afed | 1656 | default: |
fc611f47 | 1657 | return bpf_tracing_func_proto(func_id, prog); |
f005afed YS |
1658 | } |
1659 | } | |
1660 | ||
1661 | static bool tp_prog_is_valid_access(int off, int size, enum bpf_access_type type, | |
5e43f899 | 1662 | const struct bpf_prog *prog, |
f005afed YS |
1663 | struct bpf_insn_access_aux *info) |
1664 | { | |
1665 | if (off < sizeof(void *) || off >= PERF_MAX_TRACE_SIZE) | |
1666 | return false; | |
1667 | if (type != BPF_READ) | |
1668 | return false; | |
1669 | if (off % size != 0) | |
1670 | return false; | |
1671 | ||
1672 | BUILD_BUG_ON(PERF_MAX_TRACE_SIZE % sizeof(__u64)); | |
1673 | return true; | |
1674 | } | |
1675 | ||
1676 | const struct bpf_verifier_ops tracepoint_verifier_ops = { | |
1677 | .get_func_proto = tp_prog_func_proto, | |
1678 | .is_valid_access = tp_prog_is_valid_access, | |
1679 | }; | |
1680 | ||
1681 | const struct bpf_prog_ops tracepoint_prog_ops = { | |
1682 | }; | |
1683 | ||
1684 | BPF_CALL_3(bpf_perf_prog_read_value, struct bpf_perf_event_data_kern *, ctx, | |
4bebdc7a YS |
1685 | struct bpf_perf_event_value *, buf, u32, size) |
1686 | { | |
1687 | int err = -EINVAL; | |
1688 | ||
1689 | if (unlikely(size != sizeof(struct bpf_perf_event_value))) | |
1690 | goto clear; | |
1691 | err = perf_event_read_local(ctx->event, &buf->counter, &buf->enabled, | |
1692 | &buf->running); | |
1693 | if (unlikely(err)) | |
1694 | goto clear; | |
1695 | return 0; | |
1696 | clear: | |
1697 | memset(buf, 0, size); | |
1698 | return err; | |
1699 | } | |
1700 | ||
f005afed YS |
1701 | static const struct bpf_func_proto bpf_perf_prog_read_value_proto = { |
1702 | .func = bpf_perf_prog_read_value, | |
4bebdc7a YS |
1703 | .gpl_only = true, |
1704 | .ret_type = RET_INTEGER, | |
1705 | .arg1_type = ARG_PTR_TO_CTX, | |
1706 | .arg2_type = ARG_PTR_TO_UNINIT_MEM, | |
1707 | .arg3_type = ARG_CONST_SIZE, | |
1708 | }; | |
1709 | ||
fff7b643 DX |
1710 | BPF_CALL_4(bpf_read_branch_records, struct bpf_perf_event_data_kern *, ctx, |
1711 | void *, buf, u32, size, u64, flags) | |
1712 | { | |
fff7b643 DX |
1713 | static const u32 br_entry_size = sizeof(struct perf_branch_entry); |
1714 | struct perf_branch_stack *br_stack = ctx->data->br_stack; | |
1715 | u32 to_copy; | |
1716 | ||
1717 | if (unlikely(flags & ~BPF_F_GET_BRANCH_RECORDS_SIZE)) | |
1718 | return -EINVAL; | |
1719 | ||
cce6a2d7 JO |
1720 | if (unlikely(!(ctx->data->sample_flags & PERF_SAMPLE_BRANCH_STACK))) |
1721 | return -ENOENT; | |
1722 | ||
fff7b643 | 1723 | if (unlikely(!br_stack)) |
db52f572 | 1724 | return -ENOENT; |
fff7b643 DX |
1725 | |
1726 | if (flags & BPF_F_GET_BRANCH_RECORDS_SIZE) | |
1727 | return br_stack->nr * br_entry_size; | |
1728 | ||
1729 | if (!buf || (size % br_entry_size != 0)) | |
1730 | return -EINVAL; | |
1731 | ||
1732 | to_copy = min_t(u32, br_stack->nr * br_entry_size, size); | |
1733 | memcpy(buf, br_stack->entries, to_copy); | |
1734 | ||
1735 | return to_copy; | |
fff7b643 DX |
1736 | } |
1737 | ||
1738 | static const struct bpf_func_proto bpf_read_branch_records_proto = { | |
1739 | .func = bpf_read_branch_records, | |
1740 | .gpl_only = true, | |
1741 | .ret_type = RET_INTEGER, | |
1742 | .arg1_type = ARG_PTR_TO_CTX, | |
1743 | .arg2_type = ARG_PTR_TO_MEM_OR_NULL, | |
1744 | .arg3_type = ARG_CONST_SIZE_OR_ZERO, | |
1745 | .arg4_type = ARG_ANYTHING, | |
1746 | }; | |
1747 | ||
5e43f899 AI |
1748 | static const struct bpf_func_proto * |
1749 | pe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) | |
9fd82b61 AS |
1750 | { |
1751 | switch (func_id) { | |
1752 | case BPF_FUNC_perf_event_output: | |
9940d67c | 1753 | return &bpf_perf_event_output_proto_tp; |
9fd82b61 | 1754 | case BPF_FUNC_get_stackid: |
7b04d6d6 | 1755 | return &bpf_get_stackid_proto_pe; |
c195651e | 1756 | case BPF_FUNC_get_stack: |
7b04d6d6 | 1757 | return &bpf_get_stack_proto_pe; |
4bebdc7a | 1758 | case BPF_FUNC_perf_prog_read_value: |
f005afed | 1759 | return &bpf_perf_prog_read_value_proto; |
fff7b643 DX |
1760 | case BPF_FUNC_read_branch_records: |
1761 | return &bpf_read_branch_records_proto; | |
7adfc6c9 AN |
1762 | case BPF_FUNC_get_attach_cookie: |
1763 | return &bpf_get_attach_cookie_proto_pe; | |
9fd82b61 | 1764 | default: |
fc611f47 | 1765 | return bpf_tracing_func_proto(func_id, prog); |
9fd82b61 AS |
1766 | } |
1767 | } | |
1768 | ||
c4f6699d AS |
1769 | /* |
1770 | * bpf_raw_tp_regs are separate from bpf_pt_regs used from skb/xdp | |
1771 | * to avoid potential recursive reuse issue when/if tracepoints are added | |
9594dc3c MM |
1772 | * inside bpf_*_event_output, bpf_get_stackid and/or bpf_get_stack. |
1773 | * | |
1774 | * Since raw tracepoints run despite bpf_prog_active, support concurrent usage | |
1775 | * in normal, irq, and nmi context. | |
c4f6699d | 1776 | */ |
9594dc3c MM |
1777 | struct bpf_raw_tp_regs { |
1778 | struct pt_regs regs[3]; | |
1779 | }; | |
1780 | static DEFINE_PER_CPU(struct bpf_raw_tp_regs, bpf_raw_tp_regs); | |
1781 | static DEFINE_PER_CPU(int, bpf_raw_tp_nest_level); | |
1782 | static struct pt_regs *get_bpf_raw_tp_regs(void) | |
1783 | { | |
1784 | struct bpf_raw_tp_regs *tp_regs = this_cpu_ptr(&bpf_raw_tp_regs); | |
1785 | int nest_level = this_cpu_inc_return(bpf_raw_tp_nest_level); | |
1786 | ||
1787 | if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(tp_regs->regs))) { | |
1788 | this_cpu_dec(bpf_raw_tp_nest_level); | |
1789 | return ERR_PTR(-EBUSY); | |
1790 | } | |
1791 | ||
1792 | return &tp_regs->regs[nest_level - 1]; | |
1793 | } | |
1794 | ||
1795 | static void put_bpf_raw_tp_regs(void) | |
1796 | { | |
1797 | this_cpu_dec(bpf_raw_tp_nest_level); | |
1798 | } | |
1799 | ||
c4f6699d AS |
1800 | BPF_CALL_5(bpf_perf_event_output_raw_tp, struct bpf_raw_tracepoint_args *, args, |
1801 | struct bpf_map *, map, u64, flags, void *, data, u64, size) | |
1802 | { | |
9594dc3c MM |
1803 | struct pt_regs *regs = get_bpf_raw_tp_regs(); |
1804 | int ret; | |
1805 | ||
1806 | if (IS_ERR(regs)) | |
1807 | return PTR_ERR(regs); | |
c4f6699d AS |
1808 | |
1809 | perf_fetch_caller_regs(regs); | |
9594dc3c MM |
1810 | ret = ____bpf_perf_event_output(regs, map, flags, data, size); |
1811 | ||
1812 | put_bpf_raw_tp_regs(); | |
1813 | return ret; | |
c4f6699d AS |
1814 | } |
1815 | ||
1816 | static const struct bpf_func_proto bpf_perf_event_output_proto_raw_tp = { | |
1817 | .func = bpf_perf_event_output_raw_tp, | |
1818 | .gpl_only = true, | |
1819 | .ret_type = RET_INTEGER, | |
1820 | .arg1_type = ARG_PTR_TO_CTX, | |
1821 | .arg2_type = ARG_CONST_MAP_PTR, | |
1822 | .arg3_type = ARG_ANYTHING, | |
216e3cd2 | 1823 | .arg4_type = ARG_PTR_TO_MEM | MEM_RDONLY, |
c4f6699d AS |
1824 | .arg5_type = ARG_CONST_SIZE_OR_ZERO, |
1825 | }; | |
1826 | ||
a7658e1a | 1827 | extern const struct bpf_func_proto bpf_skb_output_proto; |
d831ee84 | 1828 | extern const struct bpf_func_proto bpf_xdp_output_proto; |
d9917302 | 1829 | extern const struct bpf_func_proto bpf_xdp_get_buff_len_trace_proto; |
a7658e1a | 1830 | |
c4f6699d AS |
1831 | BPF_CALL_3(bpf_get_stackid_raw_tp, struct bpf_raw_tracepoint_args *, args, |
1832 | struct bpf_map *, map, u64, flags) | |
1833 | { | |
9594dc3c MM |
1834 | struct pt_regs *regs = get_bpf_raw_tp_regs(); |
1835 | int ret; | |
1836 | ||
1837 | if (IS_ERR(regs)) | |
1838 | return PTR_ERR(regs); | |
c4f6699d AS |
1839 | |
1840 | perf_fetch_caller_regs(regs); | |
1841 | /* similar to bpf_perf_event_output_tp, but pt_regs fetched differently */ | |
9594dc3c MM |
1842 | ret = bpf_get_stackid((unsigned long) regs, (unsigned long) map, |
1843 | flags, 0, 0); | |
1844 | put_bpf_raw_tp_regs(); | |
1845 | return ret; | |
c4f6699d AS |
1846 | } |
1847 | ||
1848 | static const struct bpf_func_proto bpf_get_stackid_proto_raw_tp = { | |
1849 | .func = bpf_get_stackid_raw_tp, | |
1850 | .gpl_only = true, | |
1851 | .ret_type = RET_INTEGER, | |
1852 | .arg1_type = ARG_PTR_TO_CTX, | |
1853 | .arg2_type = ARG_CONST_MAP_PTR, | |
1854 | .arg3_type = ARG_ANYTHING, | |
1855 | }; | |
1856 | ||
c195651e YS |
1857 | BPF_CALL_4(bpf_get_stack_raw_tp, struct bpf_raw_tracepoint_args *, args, |
1858 | void *, buf, u32, size, u64, flags) | |
1859 | { | |
9594dc3c MM |
1860 | struct pt_regs *regs = get_bpf_raw_tp_regs(); |
1861 | int ret; | |
1862 | ||
1863 | if (IS_ERR(regs)) | |
1864 | return PTR_ERR(regs); | |
c195651e YS |
1865 | |
1866 | perf_fetch_caller_regs(regs); | |
9594dc3c MM |
1867 | ret = bpf_get_stack((unsigned long) regs, (unsigned long) buf, |
1868 | (unsigned long) size, flags, 0); | |
1869 | put_bpf_raw_tp_regs(); | |
1870 | return ret; | |
c195651e YS |
1871 | } |
1872 | ||
1873 | static const struct bpf_func_proto bpf_get_stack_proto_raw_tp = { | |
1874 | .func = bpf_get_stack_raw_tp, | |
1875 | .gpl_only = true, | |
1876 | .ret_type = RET_INTEGER, | |
1877 | .arg1_type = ARG_PTR_TO_CTX, | |
216e3cd2 | 1878 | .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY, |
c195651e YS |
1879 | .arg3_type = ARG_CONST_SIZE_OR_ZERO, |
1880 | .arg4_type = ARG_ANYTHING, | |
1881 | }; | |
1882 | ||
5e43f899 AI |
1883 | static const struct bpf_func_proto * |
1884 | raw_tp_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) | |
c4f6699d AS |
1885 | { |
1886 | switch (func_id) { | |
1887 | case BPF_FUNC_perf_event_output: | |
1888 | return &bpf_perf_event_output_proto_raw_tp; | |
1889 | case BPF_FUNC_get_stackid: | |
1890 | return &bpf_get_stackid_proto_raw_tp; | |
c195651e YS |
1891 | case BPF_FUNC_get_stack: |
1892 | return &bpf_get_stack_proto_raw_tp; | |
c4f6699d | 1893 | default: |
fc611f47 | 1894 | return bpf_tracing_func_proto(func_id, prog); |
c4f6699d AS |
1895 | } |
1896 | } | |
1897 | ||
958a3f2d | 1898 | const struct bpf_func_proto * |
f1b9509c AS |
1899 | tracing_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) |
1900 | { | |
3cee6fb8 MKL |
1901 | const struct bpf_func_proto *fn; |
1902 | ||
f1b9509c AS |
1903 | switch (func_id) { |
1904 | #ifdef CONFIG_NET | |
1905 | case BPF_FUNC_skb_output: | |
1906 | return &bpf_skb_output_proto; | |
d831ee84 EC |
1907 | case BPF_FUNC_xdp_output: |
1908 | return &bpf_xdp_output_proto; | |
af7ec138 YS |
1909 | case BPF_FUNC_skc_to_tcp6_sock: |
1910 | return &bpf_skc_to_tcp6_sock_proto; | |
478cfbdf YS |
1911 | case BPF_FUNC_skc_to_tcp_sock: |
1912 | return &bpf_skc_to_tcp_sock_proto; | |
1913 | case BPF_FUNC_skc_to_tcp_timewait_sock: | |
1914 | return &bpf_skc_to_tcp_timewait_sock_proto; | |
1915 | case BPF_FUNC_skc_to_tcp_request_sock: | |
1916 | return &bpf_skc_to_tcp_request_sock_proto; | |
0d4fad3e YS |
1917 | case BPF_FUNC_skc_to_udp6_sock: |
1918 | return &bpf_skc_to_udp6_sock_proto; | |
9eeb3aa3 HC |
1919 | case BPF_FUNC_skc_to_unix_sock: |
1920 | return &bpf_skc_to_unix_sock_proto; | |
3bc253c2 GT |
1921 | case BPF_FUNC_skc_to_mptcp_sock: |
1922 | return &bpf_skc_to_mptcp_sock_proto; | |
8e4597c6 MKL |
1923 | case BPF_FUNC_sk_storage_get: |
1924 | return &bpf_sk_storage_get_tracing_proto; | |
1925 | case BPF_FUNC_sk_storage_delete: | |
1926 | return &bpf_sk_storage_delete_tracing_proto; | |
b60da495 FR |
1927 | case BPF_FUNC_sock_from_file: |
1928 | return &bpf_sock_from_file_proto; | |
c5dbb89f FR |
1929 | case BPF_FUNC_get_socket_cookie: |
1930 | return &bpf_get_socket_ptr_cookie_proto; | |
d9917302 EC |
1931 | case BPF_FUNC_xdp_get_buff_len: |
1932 | return &bpf_xdp_get_buff_len_trace_proto; | |
f1b9509c | 1933 | #endif |
492e639f YS |
1934 | case BPF_FUNC_seq_printf: |
1935 | return prog->expected_attach_type == BPF_TRACE_ITER ? | |
1936 | &bpf_seq_printf_proto : | |
1937 | NULL; | |
1938 | case BPF_FUNC_seq_write: | |
1939 | return prog->expected_attach_type == BPF_TRACE_ITER ? | |
1940 | &bpf_seq_write_proto : | |
1941 | NULL; | |
eb411377 AM |
1942 | case BPF_FUNC_seq_printf_btf: |
1943 | return prog->expected_attach_type == BPF_TRACE_ITER ? | |
1944 | &bpf_seq_printf_btf_proto : | |
1945 | NULL; | |
6e22ab9d JO |
1946 | case BPF_FUNC_d_path: |
1947 | return &bpf_d_path_proto; | |
f92c1e18 JO |
1948 | case BPF_FUNC_get_func_arg: |
1949 | return bpf_prog_has_trampoline(prog) ? &bpf_get_func_arg_proto : NULL; | |
1950 | case BPF_FUNC_get_func_ret: | |
1951 | return bpf_prog_has_trampoline(prog) ? &bpf_get_func_ret_proto : NULL; | |
1952 | case BPF_FUNC_get_func_arg_cnt: | |
1953 | return bpf_prog_has_trampoline(prog) ? &bpf_get_func_arg_cnt_proto : NULL; | |
2fcc8241 KFL |
1954 | case BPF_FUNC_get_attach_cookie: |
1955 | return bpf_prog_has_trampoline(prog) ? &bpf_get_attach_cookie_proto_tracing : NULL; | |
f1b9509c | 1956 | default: |
3cee6fb8 MKL |
1957 | fn = raw_tp_prog_func_proto(func_id, prog); |
1958 | if (!fn && prog->expected_attach_type == BPF_TRACE_ITER) | |
1959 | fn = bpf_iter_get_func_proto(func_id, prog); | |
1960 | return fn; | |
f1b9509c AS |
1961 | } |
1962 | } | |
1963 | ||
c4f6699d AS |
1964 | static bool raw_tp_prog_is_valid_access(int off, int size, |
1965 | enum bpf_access_type type, | |
5e43f899 | 1966 | const struct bpf_prog *prog, |
c4f6699d AS |
1967 | struct bpf_insn_access_aux *info) |
1968 | { | |
35346ab6 | 1969 | return bpf_tracing_ctx_access(off, size, type); |
f1b9509c AS |
1970 | } |
1971 | ||
1972 | static bool tracing_prog_is_valid_access(int off, int size, | |
1973 | enum bpf_access_type type, | |
1974 | const struct bpf_prog *prog, | |
1975 | struct bpf_insn_access_aux *info) | |
1976 | { | |
35346ab6 | 1977 | return bpf_tracing_btf_ctx_access(off, size, type, prog, info); |
c4f6699d AS |
1978 | } |
1979 | ||
3e7c67d9 KS |
1980 | int __weak bpf_prog_test_run_tracing(struct bpf_prog *prog, |
1981 | const union bpf_attr *kattr, | |
1982 | union bpf_attr __user *uattr) | |
1983 | { | |
1984 | return -ENOTSUPP; | |
1985 | } | |
1986 | ||
c4f6699d AS |
1987 | const struct bpf_verifier_ops raw_tracepoint_verifier_ops = { |
1988 | .get_func_proto = raw_tp_prog_func_proto, | |
1989 | .is_valid_access = raw_tp_prog_is_valid_access, | |
1990 | }; | |
1991 | ||
1992 | const struct bpf_prog_ops raw_tracepoint_prog_ops = { | |
ebfb4d40 | 1993 | #ifdef CONFIG_NET |
1b4d60ec | 1994 | .test_run = bpf_prog_test_run_raw_tp, |
ebfb4d40 | 1995 | #endif |
c4f6699d AS |
1996 | }; |
1997 | ||
f1b9509c AS |
1998 | const struct bpf_verifier_ops tracing_verifier_ops = { |
1999 | .get_func_proto = tracing_prog_func_proto, | |
2000 | .is_valid_access = tracing_prog_is_valid_access, | |
2001 | }; | |
2002 | ||
2003 | const struct bpf_prog_ops tracing_prog_ops = { | |
da00d2f1 | 2004 | .test_run = bpf_prog_test_run_tracing, |
f1b9509c AS |
2005 | }; |
2006 | ||
9df1c28b MM |
2007 | static bool raw_tp_writable_prog_is_valid_access(int off, int size, |
2008 | enum bpf_access_type type, | |
2009 | const struct bpf_prog *prog, | |
2010 | struct bpf_insn_access_aux *info) | |
2011 | { | |
2012 | if (off == 0) { | |
2013 | if (size != sizeof(u64) || type != BPF_READ) | |
2014 | return false; | |
2015 | info->reg_type = PTR_TO_TP_BUFFER; | |
2016 | } | |
2017 | return raw_tp_prog_is_valid_access(off, size, type, prog, info); | |
2018 | } | |
2019 | ||
2020 | const struct bpf_verifier_ops raw_tracepoint_writable_verifier_ops = { | |
2021 | .get_func_proto = raw_tp_prog_func_proto, | |
2022 | .is_valid_access = raw_tp_writable_prog_is_valid_access, | |
2023 | }; | |
2024 | ||
2025 | const struct bpf_prog_ops raw_tracepoint_writable_prog_ops = { | |
2026 | }; | |
2027 | ||
0515e599 | 2028 | static bool pe_prog_is_valid_access(int off, int size, enum bpf_access_type type, |
5e43f899 | 2029 | const struct bpf_prog *prog, |
23994631 | 2030 | struct bpf_insn_access_aux *info) |
0515e599 | 2031 | { |
95da0cdb | 2032 | const int size_u64 = sizeof(u64); |
31fd8581 | 2033 | |
0515e599 AS |
2034 | if (off < 0 || off >= sizeof(struct bpf_perf_event_data)) |
2035 | return false; | |
2036 | if (type != BPF_READ) | |
2037 | return false; | |
bc23105c DB |
2038 | if (off % size != 0) { |
2039 | if (sizeof(unsigned long) != 4) | |
2040 | return false; | |
2041 | if (size != 8) | |
2042 | return false; | |
2043 | if (off % size != 4) | |
2044 | return false; | |
2045 | } | |
31fd8581 | 2046 | |
f96da094 DB |
2047 | switch (off) { |
2048 | case bpf_ctx_range(struct bpf_perf_event_data, sample_period): | |
95da0cdb TQ |
2049 | bpf_ctx_record_field_size(info, size_u64); |
2050 | if (!bpf_ctx_narrow_access_ok(off, size, size_u64)) | |
2051 | return false; | |
2052 | break; | |
2053 | case bpf_ctx_range(struct bpf_perf_event_data, addr): | |
2054 | bpf_ctx_record_field_size(info, size_u64); | |
2055 | if (!bpf_ctx_narrow_access_ok(off, size, size_u64)) | |
23994631 | 2056 | return false; |
f96da094 DB |
2057 | break; |
2058 | default: | |
0515e599 AS |
2059 | if (size != sizeof(long)) |
2060 | return false; | |
2061 | } | |
f96da094 | 2062 | |
0515e599 AS |
2063 | return true; |
2064 | } | |
2065 | ||
6b8cc1d1 DB |
2066 | static u32 pe_prog_convert_ctx_access(enum bpf_access_type type, |
2067 | const struct bpf_insn *si, | |
0515e599 | 2068 | struct bpf_insn *insn_buf, |
f96da094 | 2069 | struct bpf_prog *prog, u32 *target_size) |
0515e599 AS |
2070 | { |
2071 | struct bpf_insn *insn = insn_buf; | |
2072 | ||
6b8cc1d1 | 2073 | switch (si->off) { |
0515e599 | 2074 | case offsetof(struct bpf_perf_event_data, sample_period): |
f035a515 | 2075 | *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern, |
6b8cc1d1 | 2076 | data), si->dst_reg, si->src_reg, |
0515e599 | 2077 | offsetof(struct bpf_perf_event_data_kern, data)); |
6b8cc1d1 | 2078 | *insn++ = BPF_LDX_MEM(BPF_DW, si->dst_reg, si->dst_reg, |
f96da094 DB |
2079 | bpf_target_off(struct perf_sample_data, period, 8, |
2080 | target_size)); | |
0515e599 | 2081 | break; |
95da0cdb TQ |
2082 | case offsetof(struct bpf_perf_event_data, addr): |
2083 | *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern, | |
2084 | data), si->dst_reg, si->src_reg, | |
2085 | offsetof(struct bpf_perf_event_data_kern, data)); | |
2086 | *insn++ = BPF_LDX_MEM(BPF_DW, si->dst_reg, si->dst_reg, | |
2087 | bpf_target_off(struct perf_sample_data, addr, 8, | |
2088 | target_size)); | |
2089 | break; | |
0515e599 | 2090 | default: |
f035a515 | 2091 | *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern, |
6b8cc1d1 | 2092 | regs), si->dst_reg, si->src_reg, |
0515e599 | 2093 | offsetof(struct bpf_perf_event_data_kern, regs)); |
6b8cc1d1 DB |
2094 | *insn++ = BPF_LDX_MEM(BPF_SIZEOF(long), si->dst_reg, si->dst_reg, |
2095 | si->off); | |
0515e599 AS |
2096 | break; |
2097 | } | |
2098 | ||
2099 | return insn - insn_buf; | |
2100 | } | |
2101 | ||
7de16e3a | 2102 | const struct bpf_verifier_ops perf_event_verifier_ops = { |
f005afed | 2103 | .get_func_proto = pe_prog_func_proto, |
0515e599 AS |
2104 | .is_valid_access = pe_prog_is_valid_access, |
2105 | .convert_ctx_access = pe_prog_convert_ctx_access, | |
2106 | }; | |
7de16e3a JK |
2107 | |
2108 | const struct bpf_prog_ops perf_event_prog_ops = { | |
2109 | }; | |
e87c6bc3 YS |
2110 | |
2111 | static DEFINE_MUTEX(bpf_event_mutex); | |
2112 | ||
c8c088ba YS |
2113 | #define BPF_TRACE_MAX_PROGS 64 |
2114 | ||
e87c6bc3 | 2115 | int perf_event_attach_bpf_prog(struct perf_event *event, |
82e6b1ee AN |
2116 | struct bpf_prog *prog, |
2117 | u64 bpf_cookie) | |
e87c6bc3 | 2118 | { |
e672db03 | 2119 | struct bpf_prog_array *old_array; |
e87c6bc3 YS |
2120 | struct bpf_prog_array *new_array; |
2121 | int ret = -EEXIST; | |
2122 | ||
9802d865 | 2123 | /* |
b4da3340 MH |
2124 | * Kprobe override only works if they are on the function entry, |
2125 | * and only if they are on the opt-in list. | |
9802d865 JB |
2126 | */ |
2127 | if (prog->kprobe_override && | |
b4da3340 | 2128 | (!trace_kprobe_on_func_entry(event->tp_event) || |
9802d865 JB |
2129 | !trace_kprobe_error_injectable(event->tp_event))) |
2130 | return -EINVAL; | |
2131 | ||
e87c6bc3 YS |
2132 | mutex_lock(&bpf_event_mutex); |
2133 | ||
2134 | if (event->prog) | |
07c41a29 | 2135 | goto unlock; |
e87c6bc3 | 2136 | |
e672db03 | 2137 | old_array = bpf_event_rcu_dereference(event->tp_event->prog_array); |
c8c088ba YS |
2138 | if (old_array && |
2139 | bpf_prog_array_length(old_array) >= BPF_TRACE_MAX_PROGS) { | |
2140 | ret = -E2BIG; | |
2141 | goto unlock; | |
2142 | } | |
2143 | ||
82e6b1ee | 2144 | ret = bpf_prog_array_copy(old_array, NULL, prog, bpf_cookie, &new_array); |
e87c6bc3 | 2145 | if (ret < 0) |
07c41a29 | 2146 | goto unlock; |
e87c6bc3 YS |
2147 | |
2148 | /* set the new array to event->tp_event and set event->prog */ | |
2149 | event->prog = prog; | |
82e6b1ee | 2150 | event->bpf_cookie = bpf_cookie; |
e87c6bc3 | 2151 | rcu_assign_pointer(event->tp_event->prog_array, new_array); |
8c7dcb84 | 2152 | bpf_prog_array_free_sleepable(old_array); |
e87c6bc3 | 2153 | |
07c41a29 | 2154 | unlock: |
e87c6bc3 YS |
2155 | mutex_unlock(&bpf_event_mutex); |
2156 | return ret; | |
2157 | } | |
2158 | ||
2159 | void perf_event_detach_bpf_prog(struct perf_event *event) | |
2160 | { | |
e672db03 | 2161 | struct bpf_prog_array *old_array; |
e87c6bc3 YS |
2162 | struct bpf_prog_array *new_array; |
2163 | int ret; | |
2164 | ||
2165 | mutex_lock(&bpf_event_mutex); | |
2166 | ||
2167 | if (!event->prog) | |
07c41a29 | 2168 | goto unlock; |
e87c6bc3 | 2169 | |
e672db03 | 2170 | old_array = bpf_event_rcu_dereference(event->tp_event->prog_array); |
82e6b1ee | 2171 | ret = bpf_prog_array_copy(old_array, event->prog, NULL, 0, &new_array); |
170a7e3e SY |
2172 | if (ret == -ENOENT) |
2173 | goto unlock; | |
e87c6bc3 YS |
2174 | if (ret < 0) { |
2175 | bpf_prog_array_delete_safe(old_array, event->prog); | |
2176 | } else { | |
2177 | rcu_assign_pointer(event->tp_event->prog_array, new_array); | |
8c7dcb84 | 2178 | bpf_prog_array_free_sleepable(old_array); |
e87c6bc3 YS |
2179 | } |
2180 | ||
2181 | bpf_prog_put(event->prog); | |
2182 | event->prog = NULL; | |
2183 | ||
07c41a29 | 2184 | unlock: |
e87c6bc3 YS |
2185 | mutex_unlock(&bpf_event_mutex); |
2186 | } | |
f371b304 | 2187 | |
f4e2298e | 2188 | int perf_event_query_prog_array(struct perf_event *event, void __user *info) |
f371b304 YS |
2189 | { |
2190 | struct perf_event_query_bpf __user *uquery = info; | |
2191 | struct perf_event_query_bpf query = {}; | |
e672db03 | 2192 | struct bpf_prog_array *progs; |
3a38bb98 | 2193 | u32 *ids, prog_cnt, ids_len; |
f371b304 YS |
2194 | int ret; |
2195 | ||
031258da | 2196 | if (!perfmon_capable()) |
f371b304 YS |
2197 | return -EPERM; |
2198 | if (event->attr.type != PERF_TYPE_TRACEPOINT) | |
2199 | return -EINVAL; | |
2200 | if (copy_from_user(&query, uquery, sizeof(query))) | |
2201 | return -EFAULT; | |
3a38bb98 YS |
2202 | |
2203 | ids_len = query.ids_len; | |
2204 | if (ids_len > BPF_TRACE_MAX_PROGS) | |
9c481b90 | 2205 | return -E2BIG; |
3a38bb98 YS |
2206 | ids = kcalloc(ids_len, sizeof(u32), GFP_USER | __GFP_NOWARN); |
2207 | if (!ids) | |
2208 | return -ENOMEM; | |
2209 | /* | |
2210 | * The above kcalloc returns ZERO_SIZE_PTR when ids_len = 0, which | |
2211 | * is required when user only wants to check for uquery->prog_cnt. | |
2212 | * There is no need to check for it since the case is handled | |
2213 | * gracefully in bpf_prog_array_copy_info. | |
2214 | */ | |
f371b304 YS |
2215 | |
2216 | mutex_lock(&bpf_event_mutex); | |
e672db03 SF |
2217 | progs = bpf_event_rcu_dereference(event->tp_event->prog_array); |
2218 | ret = bpf_prog_array_copy_info(progs, ids, ids_len, &prog_cnt); | |
f371b304 YS |
2219 | mutex_unlock(&bpf_event_mutex); |
2220 | ||
3a38bb98 YS |
2221 | if (copy_to_user(&uquery->prog_cnt, &prog_cnt, sizeof(prog_cnt)) || |
2222 | copy_to_user(uquery->ids, ids, ids_len * sizeof(u32))) | |
2223 | ret = -EFAULT; | |
2224 | ||
2225 | kfree(ids); | |
f371b304 YS |
2226 | return ret; |
2227 | } | |
c4f6699d AS |
2228 | |
2229 | extern struct bpf_raw_event_map __start__bpf_raw_tp[]; | |
2230 | extern struct bpf_raw_event_map __stop__bpf_raw_tp[]; | |
2231 | ||
a38d1107 | 2232 | struct bpf_raw_event_map *bpf_get_raw_tracepoint(const char *name) |
c4f6699d AS |
2233 | { |
2234 | struct bpf_raw_event_map *btp = __start__bpf_raw_tp; | |
2235 | ||
2236 | for (; btp < __stop__bpf_raw_tp; btp++) { | |
2237 | if (!strcmp(btp->tp->name, name)) | |
2238 | return btp; | |
2239 | } | |
a38d1107 MM |
2240 | |
2241 | return bpf_get_raw_tracepoint_module(name); | |
2242 | } | |
2243 | ||
2244 | void bpf_put_raw_tracepoint(struct bpf_raw_event_map *btp) | |
2245 | { | |
12cc126d | 2246 | struct module *mod; |
a38d1107 | 2247 | |
12cc126d AN |
2248 | preempt_disable(); |
2249 | mod = __module_address((unsigned long)btp); | |
2250 | module_put(mod); | |
2251 | preempt_enable(); | |
c4f6699d AS |
2252 | } |
2253 | ||
2254 | static __always_inline | |
2255 | void __bpf_trace_run(struct bpf_prog *prog, u64 *args) | |
2256 | { | |
f03efe49 | 2257 | cant_sleep(); |
05b24ff9 JO |
2258 | if (unlikely(this_cpu_inc_return(*(prog->active)) != 1)) { |
2259 | bpf_prog_inc_misses_counter(prog); | |
2260 | goto out; | |
2261 | } | |
c4f6699d | 2262 | rcu_read_lock(); |
fb7dd8bc | 2263 | (void) bpf_prog_run(prog, args); |
c4f6699d | 2264 | rcu_read_unlock(); |
05b24ff9 JO |
2265 | out: |
2266 | this_cpu_dec(*(prog->active)); | |
c4f6699d AS |
2267 | } |
2268 | ||
2269 | #define UNPACK(...) __VA_ARGS__ | |
2270 | #define REPEAT_1(FN, DL, X, ...) FN(X) | |
2271 | #define REPEAT_2(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_1(FN, DL, __VA_ARGS__) | |
2272 | #define REPEAT_3(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_2(FN, DL, __VA_ARGS__) | |
2273 | #define REPEAT_4(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_3(FN, DL, __VA_ARGS__) | |
2274 | #define REPEAT_5(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_4(FN, DL, __VA_ARGS__) | |
2275 | #define REPEAT_6(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_5(FN, DL, __VA_ARGS__) | |
2276 | #define REPEAT_7(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_6(FN, DL, __VA_ARGS__) | |
2277 | #define REPEAT_8(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_7(FN, DL, __VA_ARGS__) | |
2278 | #define REPEAT_9(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_8(FN, DL, __VA_ARGS__) | |
2279 | #define REPEAT_10(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_9(FN, DL, __VA_ARGS__) | |
2280 | #define REPEAT_11(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_10(FN, DL, __VA_ARGS__) | |
2281 | #define REPEAT_12(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_11(FN, DL, __VA_ARGS__) | |
2282 | #define REPEAT(X, FN, DL, ...) REPEAT_##X(FN, DL, __VA_ARGS__) | |
2283 | ||
2284 | #define SARG(X) u64 arg##X | |
2285 | #define COPY(X) args[X] = arg##X | |
2286 | ||
2287 | #define __DL_COM (,) | |
2288 | #define __DL_SEM (;) | |
2289 | ||
2290 | #define __SEQ_0_11 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11 | |
2291 | ||
2292 | #define BPF_TRACE_DEFN_x(x) \ | |
2293 | void bpf_trace_run##x(struct bpf_prog *prog, \ | |
2294 | REPEAT(x, SARG, __DL_COM, __SEQ_0_11)) \ | |
2295 | { \ | |
2296 | u64 args[x]; \ | |
2297 | REPEAT(x, COPY, __DL_SEM, __SEQ_0_11); \ | |
2298 | __bpf_trace_run(prog, args); \ | |
2299 | } \ | |
2300 | EXPORT_SYMBOL_GPL(bpf_trace_run##x) | |
2301 | BPF_TRACE_DEFN_x(1); | |
2302 | BPF_TRACE_DEFN_x(2); | |
2303 | BPF_TRACE_DEFN_x(3); | |
2304 | BPF_TRACE_DEFN_x(4); | |
2305 | BPF_TRACE_DEFN_x(5); | |
2306 | BPF_TRACE_DEFN_x(6); | |
2307 | BPF_TRACE_DEFN_x(7); | |
2308 | BPF_TRACE_DEFN_x(8); | |
2309 | BPF_TRACE_DEFN_x(9); | |
2310 | BPF_TRACE_DEFN_x(10); | |
2311 | BPF_TRACE_DEFN_x(11); | |
2312 | BPF_TRACE_DEFN_x(12); | |
2313 | ||
2314 | static int __bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog) | |
2315 | { | |
2316 | struct tracepoint *tp = btp->tp; | |
2317 | ||
2318 | /* | |
2319 | * check that program doesn't access arguments beyond what's | |
2320 | * available in this tracepoint | |
2321 | */ | |
2322 | if (prog->aux->max_ctx_offset > btp->num_args * sizeof(u64)) | |
2323 | return -EINVAL; | |
2324 | ||
9df1c28b MM |
2325 | if (prog->aux->max_tp_access > btp->writable_size) |
2326 | return -EINVAL; | |
2327 | ||
9913d574 SRV |
2328 | return tracepoint_probe_register_may_exist(tp, (void *)btp->bpf_func, |
2329 | prog); | |
c4f6699d AS |
2330 | } |
2331 | ||
2332 | int bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog) | |
2333 | { | |
e16ec340 | 2334 | return __bpf_probe_register(btp, prog); |
c4f6699d AS |
2335 | } |
2336 | ||
2337 | int bpf_probe_unregister(struct bpf_raw_event_map *btp, struct bpf_prog *prog) | |
2338 | { | |
e16ec340 | 2339 | return tracepoint_probe_unregister(btp->tp, (void *)btp->bpf_func, prog); |
c4f6699d | 2340 | } |
41bdc4b4 YS |
2341 | |
2342 | int bpf_get_perf_event_info(const struct perf_event *event, u32 *prog_id, | |
2343 | u32 *fd_type, const char **buf, | |
2344 | u64 *probe_offset, u64 *probe_addr) | |
2345 | { | |
2346 | bool is_tracepoint, is_syscall_tp; | |
2347 | struct bpf_prog *prog; | |
2348 | int flags, err = 0; | |
2349 | ||
2350 | prog = event->prog; | |
2351 | if (!prog) | |
2352 | return -ENOENT; | |
2353 | ||
2354 | /* not supporting BPF_PROG_TYPE_PERF_EVENT yet */ | |
2355 | if (prog->type == BPF_PROG_TYPE_PERF_EVENT) | |
2356 | return -EOPNOTSUPP; | |
2357 | ||
2358 | *prog_id = prog->aux->id; | |
2359 | flags = event->tp_event->flags; | |
2360 | is_tracepoint = flags & TRACE_EVENT_FL_TRACEPOINT; | |
2361 | is_syscall_tp = is_syscall_trace_event(event->tp_event); | |
2362 | ||
2363 | if (is_tracepoint || is_syscall_tp) { | |
2364 | *buf = is_tracepoint ? event->tp_event->tp->name | |
2365 | : event->tp_event->name; | |
2366 | *fd_type = BPF_FD_TYPE_TRACEPOINT; | |
2367 | *probe_offset = 0x0; | |
2368 | *probe_addr = 0x0; | |
2369 | } else { | |
2370 | /* kprobe/uprobe */ | |
2371 | err = -EOPNOTSUPP; | |
2372 | #ifdef CONFIG_KPROBE_EVENTS | |
2373 | if (flags & TRACE_EVENT_FL_KPROBE) | |
2374 | err = bpf_get_kprobe_info(event, fd_type, buf, | |
2375 | probe_offset, probe_addr, | |
2376 | event->attr.type == PERF_TYPE_TRACEPOINT); | |
2377 | #endif | |
2378 | #ifdef CONFIG_UPROBE_EVENTS | |
2379 | if (flags & TRACE_EVENT_FL_UPROBE) | |
2380 | err = bpf_get_uprobe_info(event, fd_type, buf, | |
2381 | probe_offset, | |
2382 | event->attr.type == PERF_TYPE_TRACEPOINT); | |
2383 | #endif | |
2384 | } | |
2385 | ||
2386 | return err; | |
2387 | } | |
a38d1107 | 2388 | |
9db1ff0a YS |
2389 | static int __init send_signal_irq_work_init(void) |
2390 | { | |
2391 | int cpu; | |
2392 | struct send_signal_irq_work *work; | |
2393 | ||
2394 | for_each_possible_cpu(cpu) { | |
2395 | work = per_cpu_ptr(&send_signal_work, cpu); | |
2396 | init_irq_work(&work->irq_work, do_bpf_send_signal); | |
2397 | } | |
2398 | return 0; | |
2399 | } | |
2400 | ||
2401 | subsys_initcall(send_signal_irq_work_init); | |
2402 | ||
a38d1107 | 2403 | #ifdef CONFIG_MODULES |
390e99cf SF |
2404 | static int bpf_event_notify(struct notifier_block *nb, unsigned long op, |
2405 | void *module) | |
a38d1107 MM |
2406 | { |
2407 | struct bpf_trace_module *btm, *tmp; | |
2408 | struct module *mod = module; | |
0340a6b7 | 2409 | int ret = 0; |
a38d1107 MM |
2410 | |
2411 | if (mod->num_bpf_raw_events == 0 || | |
2412 | (op != MODULE_STATE_COMING && op != MODULE_STATE_GOING)) | |
0340a6b7 | 2413 | goto out; |
a38d1107 MM |
2414 | |
2415 | mutex_lock(&bpf_module_mutex); | |
2416 | ||
2417 | switch (op) { | |
2418 | case MODULE_STATE_COMING: | |
2419 | btm = kzalloc(sizeof(*btm), GFP_KERNEL); | |
2420 | if (btm) { | |
2421 | btm->module = module; | |
2422 | list_add(&btm->list, &bpf_trace_modules); | |
0340a6b7 PZ |
2423 | } else { |
2424 | ret = -ENOMEM; | |
a38d1107 MM |
2425 | } |
2426 | break; | |
2427 | case MODULE_STATE_GOING: | |
2428 | list_for_each_entry_safe(btm, tmp, &bpf_trace_modules, list) { | |
2429 | if (btm->module == module) { | |
2430 | list_del(&btm->list); | |
2431 | kfree(btm); | |
2432 | break; | |
2433 | } | |
2434 | } | |
2435 | break; | |
2436 | } | |
2437 | ||
2438 | mutex_unlock(&bpf_module_mutex); | |
2439 | ||
0340a6b7 PZ |
2440 | out: |
2441 | return notifier_from_errno(ret); | |
a38d1107 MM |
2442 | } |
2443 | ||
2444 | static struct notifier_block bpf_module_nb = { | |
2445 | .notifier_call = bpf_event_notify, | |
2446 | }; | |
2447 | ||
390e99cf | 2448 | static int __init bpf_event_init(void) |
a38d1107 MM |
2449 | { |
2450 | register_module_notifier(&bpf_module_nb); | |
2451 | return 0; | |
2452 | } | |
2453 | ||
2454 | fs_initcall(bpf_event_init); | |
2455 | #endif /* CONFIG_MODULES */ | |
0dcac272 JO |
2456 | |
2457 | #ifdef CONFIG_FPROBE | |
2458 | struct bpf_kprobe_multi_link { | |
2459 | struct bpf_link link; | |
2460 | struct fprobe fp; | |
2461 | unsigned long *addrs; | |
ca74823c JO |
2462 | u64 *cookies; |
2463 | u32 cnt; | |
e22061b2 JO |
2464 | u32 mods_cnt; |
2465 | struct module **mods; | |
0dcac272 JO |
2466 | }; |
2467 | ||
f7098690 JO |
2468 | struct bpf_kprobe_multi_run_ctx { |
2469 | struct bpf_run_ctx run_ctx; | |
2470 | struct bpf_kprobe_multi_link *link; | |
2471 | unsigned long entry_ip; | |
2472 | }; | |
2473 | ||
0236fec5 JO |
2474 | struct user_syms { |
2475 | const char **syms; | |
2476 | char *buf; | |
2477 | }; | |
2478 | ||
2479 | static int copy_user_syms(struct user_syms *us, unsigned long __user *usyms, u32 cnt) | |
2480 | { | |
2481 | unsigned long __user usymbol; | |
2482 | const char **syms = NULL; | |
2483 | char *buf = NULL, *p; | |
2484 | int err = -ENOMEM; | |
2485 | unsigned int i; | |
2486 | ||
fd58f7df | 2487 | syms = kvmalloc_array(cnt, sizeof(*syms), GFP_KERNEL); |
0236fec5 JO |
2488 | if (!syms) |
2489 | goto error; | |
2490 | ||
fd58f7df | 2491 | buf = kvmalloc_array(cnt, KSYM_NAME_LEN, GFP_KERNEL); |
0236fec5 JO |
2492 | if (!buf) |
2493 | goto error; | |
2494 | ||
2495 | for (p = buf, i = 0; i < cnt; i++) { | |
2496 | if (__get_user(usymbol, usyms + i)) { | |
2497 | err = -EFAULT; | |
2498 | goto error; | |
2499 | } | |
2500 | err = strncpy_from_user(p, (const char __user *) usymbol, KSYM_NAME_LEN); | |
2501 | if (err == KSYM_NAME_LEN) | |
2502 | err = -E2BIG; | |
2503 | if (err < 0) | |
2504 | goto error; | |
2505 | syms[i] = p; | |
2506 | p += err + 1; | |
2507 | } | |
2508 | ||
2509 | us->syms = syms; | |
2510 | us->buf = buf; | |
2511 | return 0; | |
2512 | ||
2513 | error: | |
2514 | if (err) { | |
2515 | kvfree(syms); | |
2516 | kvfree(buf); | |
2517 | } | |
2518 | return err; | |
2519 | } | |
2520 | ||
e22061b2 JO |
2521 | static void kprobe_multi_put_modules(struct module **mods, u32 cnt) |
2522 | { | |
2523 | u32 i; | |
2524 | ||
2525 | for (i = 0; i < cnt; i++) | |
2526 | module_put(mods[i]); | |
2527 | } | |
2528 | ||
0236fec5 JO |
2529 | static void free_user_syms(struct user_syms *us) |
2530 | { | |
2531 | kvfree(us->syms); | |
2532 | kvfree(us->buf); | |
2533 | } | |
2534 | ||
0dcac272 JO |
2535 | static void bpf_kprobe_multi_link_release(struct bpf_link *link) |
2536 | { | |
2537 | struct bpf_kprobe_multi_link *kmulti_link; | |
2538 | ||
2539 | kmulti_link = container_of(link, struct bpf_kprobe_multi_link, link); | |
2540 | unregister_fprobe(&kmulti_link->fp); | |
e22061b2 | 2541 | kprobe_multi_put_modules(kmulti_link->mods, kmulti_link->mods_cnt); |
0dcac272 JO |
2542 | } |
2543 | ||
2544 | static void bpf_kprobe_multi_link_dealloc(struct bpf_link *link) | |
2545 | { | |
2546 | struct bpf_kprobe_multi_link *kmulti_link; | |
2547 | ||
2548 | kmulti_link = container_of(link, struct bpf_kprobe_multi_link, link); | |
2549 | kvfree(kmulti_link->addrs); | |
ca74823c | 2550 | kvfree(kmulti_link->cookies); |
e22061b2 | 2551 | kfree(kmulti_link->mods); |
0dcac272 JO |
2552 | kfree(kmulti_link); |
2553 | } | |
2554 | ||
2555 | static const struct bpf_link_ops bpf_kprobe_multi_link_lops = { | |
2556 | .release = bpf_kprobe_multi_link_release, | |
2557 | .dealloc = bpf_kprobe_multi_link_dealloc, | |
2558 | }; | |
2559 | ||
ca74823c JO |
2560 | static void bpf_kprobe_multi_cookie_swap(void *a, void *b, int size, const void *priv) |
2561 | { | |
2562 | const struct bpf_kprobe_multi_link *link = priv; | |
2563 | unsigned long *addr_a = a, *addr_b = b; | |
2564 | u64 *cookie_a, *cookie_b; | |
ca74823c JO |
2565 | |
2566 | cookie_a = link->cookies + (addr_a - link->addrs); | |
2567 | cookie_b = link->cookies + (addr_b - link->addrs); | |
2568 | ||
2569 | /* swap addr_a/addr_b and cookie_a/cookie_b values */ | |
11e17ae4 JC |
2570 | swap(*addr_a, *addr_b); |
2571 | swap(*cookie_a, *cookie_b); | |
ca74823c JO |
2572 | } |
2573 | ||
1a1b0716 | 2574 | static int bpf_kprobe_multi_addrs_cmp(const void *a, const void *b) |
ca74823c JO |
2575 | { |
2576 | const unsigned long *addr_a = a, *addr_b = b; | |
2577 | ||
2578 | if (*addr_a == *addr_b) | |
2579 | return 0; | |
2580 | return *addr_a < *addr_b ? -1 : 1; | |
2581 | } | |
2582 | ||
2583 | static int bpf_kprobe_multi_cookie_cmp(const void *a, const void *b, const void *priv) | |
2584 | { | |
1a1b0716 | 2585 | return bpf_kprobe_multi_addrs_cmp(a, b); |
ca74823c JO |
2586 | } |
2587 | ||
f7098690 | 2588 | static u64 bpf_kprobe_multi_cookie(struct bpf_run_ctx *ctx) |
ca74823c | 2589 | { |
f7098690 | 2590 | struct bpf_kprobe_multi_run_ctx *run_ctx; |
ca74823c | 2591 | struct bpf_kprobe_multi_link *link; |
f7098690 | 2592 | u64 *cookie, entry_ip; |
ca74823c | 2593 | unsigned long *addr; |
ca74823c JO |
2594 | |
2595 | if (WARN_ON_ONCE(!ctx)) | |
2596 | return 0; | |
f7098690 JO |
2597 | run_ctx = container_of(current->bpf_ctx, struct bpf_kprobe_multi_run_ctx, run_ctx); |
2598 | link = run_ctx->link; | |
ca74823c JO |
2599 | if (!link->cookies) |
2600 | return 0; | |
f7098690 JO |
2601 | entry_ip = run_ctx->entry_ip; |
2602 | addr = bsearch(&entry_ip, link->addrs, link->cnt, sizeof(entry_ip), | |
1a1b0716 | 2603 | bpf_kprobe_multi_addrs_cmp); |
ca74823c JO |
2604 | if (!addr) |
2605 | return 0; | |
2606 | cookie = link->cookies + (addr - link->addrs); | |
2607 | return *cookie; | |
2608 | } | |
2609 | ||
f7098690 JO |
2610 | static u64 bpf_kprobe_multi_entry_ip(struct bpf_run_ctx *ctx) |
2611 | { | |
2612 | struct bpf_kprobe_multi_run_ctx *run_ctx; | |
2613 | ||
2614 | run_ctx = container_of(current->bpf_ctx, struct bpf_kprobe_multi_run_ctx, run_ctx); | |
2615 | return run_ctx->entry_ip; | |
2616 | } | |
2617 | ||
0dcac272 JO |
2618 | static int |
2619 | kprobe_multi_link_prog_run(struct bpf_kprobe_multi_link *link, | |
f7098690 | 2620 | unsigned long entry_ip, struct pt_regs *regs) |
0dcac272 | 2621 | { |
f7098690 JO |
2622 | struct bpf_kprobe_multi_run_ctx run_ctx = { |
2623 | .link = link, | |
2624 | .entry_ip = entry_ip, | |
2625 | }; | |
ca74823c | 2626 | struct bpf_run_ctx *old_run_ctx; |
0dcac272 JO |
2627 | int err; |
2628 | ||
2629 | if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1)) { | |
2630 | err = 0; | |
2631 | goto out; | |
2632 | } | |
2633 | ||
2634 | migrate_disable(); | |
2635 | rcu_read_lock(); | |
f7098690 | 2636 | old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx); |
0dcac272 | 2637 | err = bpf_prog_run(link->link.prog, regs); |
ca74823c | 2638 | bpf_reset_run_ctx(old_run_ctx); |
0dcac272 JO |
2639 | rcu_read_unlock(); |
2640 | migrate_enable(); | |
2641 | ||
2642 | out: | |
2643 | __this_cpu_dec(bpf_prog_active); | |
2644 | return err; | |
2645 | } | |
2646 | ||
2647 | static void | |
c09eb2e5 | 2648 | kprobe_multi_link_handler(struct fprobe *fp, unsigned long fentry_ip, |
0dcac272 JO |
2649 | struct pt_regs *regs) |
2650 | { | |
0dcac272 JO |
2651 | struct bpf_kprobe_multi_link *link; |
2652 | ||
0dcac272 | 2653 | link = container_of(fp, struct bpf_kprobe_multi_link, fp); |
c09eb2e5 | 2654 | kprobe_multi_link_prog_run(link, get_entry_ip(fentry_ip), regs); |
0dcac272 JO |
2655 | } |
2656 | ||
eb5fb032 | 2657 | static int symbols_cmp_r(const void *a, const void *b, const void *priv) |
0dcac272 | 2658 | { |
0236fec5 JO |
2659 | const char **str_a = (const char **) a; |
2660 | const char **str_b = (const char **) b; | |
0dcac272 | 2661 | |
0236fec5 | 2662 | return strcmp(*str_a, *str_b); |
0dcac272 JO |
2663 | } |
2664 | ||
eb5fb032 JO |
2665 | struct multi_symbols_sort { |
2666 | const char **funcs; | |
2667 | u64 *cookies; | |
2668 | }; | |
2669 | ||
2670 | static void symbols_swap_r(void *a, void *b, int size, const void *priv) | |
2671 | { | |
2672 | const struct multi_symbols_sort *data = priv; | |
2673 | const char **name_a = a, **name_b = b; | |
2674 | ||
2675 | swap(*name_a, *name_b); | |
2676 | ||
2677 | /* If defined, swap also related cookies. */ | |
2678 | if (data->cookies) { | |
2679 | u64 *cookie_a, *cookie_b; | |
2680 | ||
2681 | cookie_a = data->cookies + (name_a - data->funcs); | |
2682 | cookie_b = data->cookies + (name_b - data->funcs); | |
2683 | swap(*cookie_a, *cookie_b); | |
2684 | } | |
2685 | } | |
2686 | ||
6a5f2d6e | 2687 | struct modules_array { |
e22061b2 JO |
2688 | struct module **mods; |
2689 | int mods_cnt; | |
2690 | int mods_cap; | |
2691 | }; | |
2692 | ||
6a5f2d6e | 2693 | static int add_module(struct modules_array *arr, struct module *mod) |
e22061b2 | 2694 | { |
e22061b2 JO |
2695 | struct module **mods; |
2696 | ||
6a5f2d6e JO |
2697 | if (arr->mods_cnt == arr->mods_cap) { |
2698 | arr->mods_cap = max(16, arr->mods_cap * 3 / 2); | |
2699 | mods = krealloc_array(arr->mods, arr->mods_cap, sizeof(*mods), GFP_KERNEL); | |
e22061b2 JO |
2700 | if (!mods) |
2701 | return -ENOMEM; | |
6a5f2d6e | 2702 | arr->mods = mods; |
e22061b2 JO |
2703 | } |
2704 | ||
6a5f2d6e JO |
2705 | arr->mods[arr->mods_cnt] = mod; |
2706 | arr->mods_cnt++; | |
e22061b2 JO |
2707 | return 0; |
2708 | } | |
2709 | ||
6a5f2d6e JO |
2710 | static bool has_module(struct modules_array *arr, struct module *mod) |
2711 | { | |
2712 | int i; | |
2713 | ||
2714 | for (i = arr->mods_cnt - 1; i >= 0; i--) { | |
2715 | if (arr->mods[i] == mod) | |
2716 | return true; | |
2717 | } | |
2718 | return false; | |
2719 | } | |
2720 | ||
e22061b2 JO |
2721 | static int get_modules_for_addrs(struct module ***mods, unsigned long *addrs, u32 addrs_cnt) |
2722 | { | |
6a5f2d6e JO |
2723 | struct modules_array arr = {}; |
2724 | u32 i, err = 0; | |
2725 | ||
2726 | for (i = 0; i < addrs_cnt; i++) { | |
2727 | struct module *mod; | |
2728 | ||
2729 | preempt_disable(); | |
2730 | mod = __module_address(addrs[i]); | |
2731 | /* Either no module or we it's already stored */ | |
2732 | if (!mod || has_module(&arr, mod)) { | |
2733 | preempt_enable(); | |
2734 | continue; | |
2735 | } | |
2736 | if (!try_module_get(mod)) | |
2737 | err = -EINVAL; | |
2738 | preempt_enable(); | |
2739 | if (err) | |
2740 | break; | |
2741 | err = add_module(&arr, mod); | |
2742 | if (err) { | |
2743 | module_put(mod); | |
2744 | break; | |
2745 | } | |
2746 | } | |
e22061b2 JO |
2747 | |
2748 | /* We return either err < 0 in case of error, ... */ | |
e22061b2 | 2749 | if (err) { |
6a5f2d6e JO |
2750 | kprobe_multi_put_modules(arr.mods, arr.mods_cnt); |
2751 | kfree(arr.mods); | |
e22061b2 JO |
2752 | return err; |
2753 | } | |
2754 | ||
2755 | /* or number of modules found if everything is ok. */ | |
6a5f2d6e JO |
2756 | *mods = arr.mods; |
2757 | return arr.mods_cnt; | |
e22061b2 JO |
2758 | } |
2759 | ||
0dcac272 JO |
2760 | int bpf_kprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog) |
2761 | { | |
2762 | struct bpf_kprobe_multi_link *link = NULL; | |
2763 | struct bpf_link_primer link_primer; | |
ca74823c | 2764 | void __user *ucookies; |
0dcac272 JO |
2765 | unsigned long *addrs; |
2766 | u32 flags, cnt, size; | |
2767 | void __user *uaddrs; | |
ca74823c | 2768 | u64 *cookies = NULL; |
0dcac272 JO |
2769 | void __user *usyms; |
2770 | int err; | |
2771 | ||
2772 | /* no support for 32bit archs yet */ | |
2773 | if (sizeof(u64) != sizeof(void *)) | |
2774 | return -EOPNOTSUPP; | |
2775 | ||
2776 | if (prog->expected_attach_type != BPF_TRACE_KPROBE_MULTI) | |
2777 | return -EINVAL; | |
2778 | ||
2779 | flags = attr->link_create.kprobe_multi.flags; | |
2780 | if (flags & ~BPF_F_KPROBE_MULTI_RETURN) | |
2781 | return -EINVAL; | |
2782 | ||
2783 | uaddrs = u64_to_user_ptr(attr->link_create.kprobe_multi.addrs); | |
2784 | usyms = u64_to_user_ptr(attr->link_create.kprobe_multi.syms); | |
2785 | if (!!uaddrs == !!usyms) | |
2786 | return -EINVAL; | |
2787 | ||
2788 | cnt = attr->link_create.kprobe_multi.cnt; | |
2789 | if (!cnt) | |
2790 | return -EINVAL; | |
2791 | ||
2792 | size = cnt * sizeof(*addrs); | |
fd58f7df | 2793 | addrs = kvmalloc_array(cnt, sizeof(*addrs), GFP_KERNEL); |
0dcac272 JO |
2794 | if (!addrs) |
2795 | return -ENOMEM; | |
2796 | ||
eb5fb032 JO |
2797 | ucookies = u64_to_user_ptr(attr->link_create.kprobe_multi.cookies); |
2798 | if (ucookies) { | |
2799 | cookies = kvmalloc_array(cnt, sizeof(*addrs), GFP_KERNEL); | |
2800 | if (!cookies) { | |
2801 | err = -ENOMEM; | |
2802 | goto error; | |
2803 | } | |
2804 | if (copy_from_user(cookies, ucookies, size)) { | |
2805 | err = -EFAULT; | |
2806 | goto error; | |
2807 | } | |
2808 | } | |
2809 | ||
0dcac272 JO |
2810 | if (uaddrs) { |
2811 | if (copy_from_user(addrs, uaddrs, size)) { | |
2812 | err = -EFAULT; | |
2813 | goto error; | |
2814 | } | |
2815 | } else { | |
eb5fb032 JO |
2816 | struct multi_symbols_sort data = { |
2817 | .cookies = cookies, | |
2818 | }; | |
0236fec5 JO |
2819 | struct user_syms us; |
2820 | ||
2821 | err = copy_user_syms(&us, usyms, cnt); | |
2822 | if (err) | |
2823 | goto error; | |
2824 | ||
eb5fb032 JO |
2825 | if (cookies) |
2826 | data.funcs = us.syms; | |
2827 | ||
2828 | sort_r(us.syms, cnt, sizeof(*us.syms), symbols_cmp_r, | |
2829 | symbols_swap_r, &data); | |
2830 | ||
0236fec5 JO |
2831 | err = ftrace_lookup_symbols(us.syms, cnt, addrs); |
2832 | free_user_syms(&us); | |
0dcac272 JO |
2833 | if (err) |
2834 | goto error; | |
2835 | } | |
2836 | ||
2837 | link = kzalloc(sizeof(*link), GFP_KERNEL); | |
2838 | if (!link) { | |
2839 | err = -ENOMEM; | |
2840 | goto error; | |
2841 | } | |
2842 | ||
2843 | bpf_link_init(&link->link, BPF_LINK_TYPE_KPROBE_MULTI, | |
2844 | &bpf_kprobe_multi_link_lops, prog); | |
2845 | ||
2846 | err = bpf_link_prime(&link->link, &link_primer); | |
2847 | if (err) | |
2848 | goto error; | |
2849 | ||
2850 | if (flags & BPF_F_KPROBE_MULTI_RETURN) | |
2851 | link->fp.exit_handler = kprobe_multi_link_handler; | |
2852 | else | |
2853 | link->fp.entry_handler = kprobe_multi_link_handler; | |
2854 | ||
2855 | link->addrs = addrs; | |
ca74823c JO |
2856 | link->cookies = cookies; |
2857 | link->cnt = cnt; | |
2858 | ||
2859 | if (cookies) { | |
2860 | /* | |
2861 | * Sorting addresses will trigger sorting cookies as well | |
2862 | * (check bpf_kprobe_multi_cookie_swap). This way we can | |
2863 | * find cookie based on the address in bpf_get_attach_cookie | |
2864 | * helper. | |
2865 | */ | |
2866 | sort_r(addrs, cnt, sizeof(*addrs), | |
2867 | bpf_kprobe_multi_cookie_cmp, | |
2868 | bpf_kprobe_multi_cookie_swap, | |
2869 | link); | |
e22061b2 JO |
2870 | } |
2871 | ||
2872 | err = get_modules_for_addrs(&link->mods, addrs, cnt); | |
2873 | if (err < 0) { | |
2874 | bpf_link_cleanup(&link_primer); | |
2875 | return err; | |
ca74823c | 2876 | } |
e22061b2 | 2877 | link->mods_cnt = err; |
0dcac272 JO |
2878 | |
2879 | err = register_fprobe_ips(&link->fp, addrs, cnt); | |
2880 | if (err) { | |
e22061b2 | 2881 | kprobe_multi_put_modules(link->mods, link->mods_cnt); |
0dcac272 JO |
2882 | bpf_link_cleanup(&link_primer); |
2883 | return err; | |
2884 | } | |
2885 | ||
2886 | return bpf_link_settle(&link_primer); | |
2887 | ||
2888 | error: | |
2889 | kfree(link); | |
2890 | kvfree(addrs); | |
ca74823c | 2891 | kvfree(cookies); |
0dcac272 JO |
2892 | return err; |
2893 | } | |
2894 | #else /* !CONFIG_FPROBE */ | |
2895 | int bpf_kprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog) | |
2896 | { | |
2897 | return -EOPNOTSUPP; | |
2898 | } | |
f7098690 JO |
2899 | static u64 bpf_kprobe_multi_cookie(struct bpf_run_ctx *ctx) |
2900 | { | |
2901 | return 0; | |
2902 | } | |
2903 | static u64 bpf_kprobe_multi_entry_ip(struct bpf_run_ctx *ctx) | |
ca74823c JO |
2904 | { |
2905 | return 0; | |
2906 | } | |
0dcac272 | 2907 | #endif |