Commit | Line | Data |
---|---|---|
bcea3f96 | 1 | // SPDX-License-Identifier: GPL-2.0 |
f3f096cf SD |
2 | /* |
3 | * uprobes-based tracing events | |
4 | * | |
f3f096cf SD |
5 | * Copyright (C) IBM Corporation, 2010-2012 |
6 | * Author: Srikar Dronamraju <srikar@linux.vnet.ibm.com> | |
7 | */ | |
ea6eb5e7 | 8 | #define pr_fmt(fmt) "trace_uprobe: " fmt |
f3f096cf | 9 | |
aef2feda | 10 | #include <linux/bpf-cgroup.h> |
17911ff3 | 11 | #include <linux/security.h> |
0597c49c | 12 | #include <linux/ctype.h> |
f3f096cf SD |
13 | #include <linux/module.h> |
14 | #include <linux/uaccess.h> | |
15 | #include <linux/uprobes.h> | |
16 | #include <linux/namei.h> | |
b2e902f0 | 17 | #include <linux/string.h> |
b2d09103 | 18 | #include <linux/rculist.h> |
8c7dcb84 | 19 | #include <linux/filter.h> |
10cdb82a | 20 | #include <linux/percpu.h> |
f3f096cf | 21 | |
0597c49c | 22 | #include "trace_dynevent.h" |
f3f096cf | 23 | #include "trace_probe.h" |
53305928 | 24 | #include "trace_probe_tmpl.h" |
f3f096cf SD |
25 | |
26 | #define UPROBE_EVENT_SYSTEM "uprobes" | |
27 | ||
457d1772 ON |
28 | struct uprobe_trace_entry_head { |
29 | struct trace_entry ent; | |
30 | unsigned long vaddr[]; | |
31 | }; | |
32 | ||
33 | #define SIZEOF_TRACE_ENTRY(is_return) \ | |
34 | (sizeof(struct uprobe_trace_entry_head) + \ | |
35 | sizeof(unsigned long) * (is_return ? 2 : 1)) | |
36 | ||
37 | #define DATAOF_TRACE_ENTRY(entry, is_return) \ | |
38 | ((void*)(entry) + SIZEOF_TRACE_ENTRY(is_return)) | |
39 | ||
d262271d | 40 | static int trace_uprobe_create(const char *raw_command); |
0597c49c MH |
41 | static int trace_uprobe_show(struct seq_file *m, struct dyn_event *ev); |
42 | static int trace_uprobe_release(struct dyn_event *ev); | |
43 | static bool trace_uprobe_is_busy(struct dyn_event *ev); | |
44 | static bool trace_uprobe_match(const char *system, const char *event, | |
30199137 | 45 | int argc, const char **argv, struct dyn_event *ev); |
0597c49c MH |
46 | |
47 | static struct dyn_event_operations trace_uprobe_ops = { | |
48 | .create = trace_uprobe_create, | |
49 | .show = trace_uprobe_show, | |
50 | .is_busy = trace_uprobe_is_busy, | |
51 | .free = trace_uprobe_release, | |
52 | .match = trace_uprobe_match, | |
53 | }; | |
54 | ||
f3f096cf SD |
55 | /* |
56 | * uprobe event core functions | |
57 | */ | |
f3f096cf | 58 | struct trace_uprobe { |
0597c49c | 59 | struct dyn_event devent; |
a932b738 | 60 | struct uprobe_consumer consumer; |
0c92c7a3 | 61 | struct path path; |
f3f096cf | 62 | char *filename; |
3c83a9ad | 63 | struct uprobe *uprobe; |
f3f096cf | 64 | unsigned long offset; |
1cc33161 | 65 | unsigned long ref_ctr_offset; |
10cdb82a | 66 | unsigned long __percpu *nhits; |
14577c39 | 67 | struct trace_probe tp; |
f3f096cf SD |
68 | }; |
69 | ||
0597c49c MH |
70 | static bool is_trace_uprobe(struct dyn_event *ev) |
71 | { | |
72 | return ev->ops == &trace_uprobe_ops; | |
73 | } | |
74 | ||
75 | static struct trace_uprobe *to_trace_uprobe(struct dyn_event *ev) | |
76 | { | |
77 | return container_of(ev, struct trace_uprobe, devent); | |
78 | } | |
79 | ||
80 | /** | |
81 | * for_each_trace_uprobe - iterate over the trace_uprobe list | |
82 | * @pos: the struct trace_uprobe * for each entry | |
83 | * @dpos: the struct dyn_event * to use as a loop cursor | |
84 | */ | |
85 | #define for_each_trace_uprobe(pos, dpos) \ | |
86 | for_each_dyn_event(dpos) \ | |
87 | if (is_trace_uprobe(dpos) && (pos = to_trace_uprobe(dpos))) | |
88 | ||
f3f096cf | 89 | static int register_uprobe_event(struct trace_uprobe *tu); |
c6c2401d | 90 | static int unregister_uprobe_event(struct trace_uprobe *tu); |
f3f096cf | 91 | |
da09a9e0 JO |
92 | static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs, |
93 | __u64 *data); | |
c1ae5c75 | 94 | static int uretprobe_dispatcher(struct uprobe_consumer *con, |
da09a9e0 JO |
95 | unsigned long func, struct pt_regs *regs, |
96 | __u64 *data); | |
f3f096cf | 97 | |
3fd996a2 NK |
98 | #ifdef CONFIG_STACK_GROWSUP |
99 | static unsigned long adjust_stack_addr(unsigned long addr, unsigned int n) | |
100 | { | |
101 | return addr - (n * sizeof(long)); | |
102 | } | |
103 | #else | |
104 | static unsigned long adjust_stack_addr(unsigned long addr, unsigned int n) | |
105 | { | |
106 | return addr + (n * sizeof(long)); | |
107 | } | |
108 | #endif | |
109 | ||
110 | static unsigned long get_user_stack_nth(struct pt_regs *regs, unsigned int n) | |
111 | { | |
112 | unsigned long ret; | |
113 | unsigned long addr = user_stack_pointer(regs); | |
114 | ||
115 | addr = adjust_stack_addr(addr, n); | |
116 | ||
117 | if (copy_from_user(&ret, (void __force __user *) addr, sizeof(ret))) | |
118 | return 0; | |
119 | ||
120 | return ret; | |
121 | } | |
122 | ||
123 | /* | |
124 | * Uprobes-specific fetch functions | |
125 | */ | |
53305928 | 126 | static nokprobe_inline int |
9b960a38 | 127 | probe_mem_read(void *dest, void *src, size_t size) |
53305928 MH |
128 | { |
129 | void __user *vaddr = (void __force __user *)src; | |
130 | ||
f3f58935 | 131 | return copy_from_user(dest, vaddr, size) ? -EFAULT : 0; |
5baaa59e | 132 | } |
e65f7ae7 MH |
133 | |
134 | static nokprobe_inline int | |
135 | probe_mem_read_user(void *dest, void *src, size_t size) | |
136 | { | |
137 | return probe_mem_read(dest, src, size); | |
138 | } | |
139 | ||
5baaa59e NK |
140 | /* |
141 | * Fetch a null-terminated string. Caller MUST set *(u32 *)dest with max | |
142 | * length and relative data location. | |
143 | */ | |
9178412d MH |
144 | static nokprobe_inline int |
145 | fetch_store_string(unsigned long addr, void *dest, void *base) | |
5baaa59e NK |
146 | { |
147 | long ret; | |
9178412d MH |
148 | u32 loc = *(u32 *)dest; |
149 | int maxlen = get_loc_len(loc); | |
150 | u8 *dst = get_loc_data(dest, base); | |
5baaa59e NK |
151 | void __user *src = (void __force __user *) addr; |
152 | ||
9178412d MH |
153 | if (unlikely(!maxlen)) |
154 | return -ENOMEM; | |
5baaa59e | 155 | |
4dd537ac | 156 | if (addr == FETCH_TOKEN_COMM) |
8a3750ec | 157 | ret = strscpy(dst, current->comm, maxlen); |
4dd537ac MH |
158 | else |
159 | ret = strncpy_from_user(dst, src, maxlen); | |
9178412d MH |
160 | if (ret >= 0) { |
161 | if (ret == maxlen) | |
162 | dst[ret - 1] = '\0'; | |
0722069a AZ |
163 | else |
164 | /* | |
165 | * Include the terminating null byte. In this case it | |
166 | * was copied by strncpy_from_user but not accounted | |
167 | * for in ret. | |
168 | */ | |
169 | ret++; | |
9178412d | 170 | *(u32 *)dest = make_data_loc(ret, (void *)dst - base); |
797311bc MHG |
171 | } else |
172 | *(u32 *)dest = make_data_loc(0, (void *)dst - base); | |
9178412d MH |
173 | |
174 | return ret; | |
5baaa59e NK |
175 | } |
176 | ||
88903c46 MH |
177 | static nokprobe_inline int |
178 | fetch_store_string_user(unsigned long addr, void *dest, void *base) | |
179 | { | |
180 | return fetch_store_string(addr, dest, base); | |
181 | } | |
182 | ||
53305928 | 183 | /* Return the length of string -- including null terminal byte */ |
9178412d MH |
184 | static nokprobe_inline int |
185 | fetch_store_strlen(unsigned long addr) | |
5baaa59e NK |
186 | { |
187 | int len; | |
188 | void __user *vaddr = (void __force __user *) addr; | |
189 | ||
4dd537ac MH |
190 | if (addr == FETCH_TOKEN_COMM) |
191 | len = strlen(current->comm) + 1; | |
192 | else | |
193 | len = strnlen_user(vaddr, MAX_STRING_SIZE); | |
5baaa59e | 194 | |
9178412d | 195 | return (len > MAX_STRING_SIZE) ? 0 : len; |
5baaa59e | 196 | } |
3fd996a2 | 197 | |
88903c46 MH |
198 | static nokprobe_inline int |
199 | fetch_store_strlen_user(unsigned long addr) | |
200 | { | |
201 | return fetch_store_strlen(addr); | |
202 | } | |
203 | ||
53305928 | 204 | static unsigned long translate_user_vaddr(unsigned long file_offset) |
b7e0bf34 NK |
205 | { |
206 | unsigned long base_addr; | |
207 | struct uprobe_dispatch_data *udd; | |
208 | ||
209 | udd = (void *) current->utask->vaddr; | |
210 | ||
211 | base_addr = udd->bp_addr - udd->tu->offset; | |
53305928 | 212 | return base_addr + file_offset; |
b7e0bf34 | 213 | } |
b7e0bf34 | 214 | |
53305928 MH |
215 | /* Note that we don't verify it, since the code does not come from user space */ |
216 | static int | |
25f00e40 MHG |
217 | process_fetch_insn(struct fetch_insn *code, void *rec, void *edata, |
218 | void *dest, void *base) | |
53305928 | 219 | { |
8565a45d | 220 | struct pt_regs *regs = rec; |
53305928 | 221 | unsigned long val; |
bd78acc8 | 222 | int ret; |
53305928 MH |
223 | |
224 | /* 1st stage: get value from context */ | |
225 | switch (code->op) { | |
226 | case FETCH_OP_REG: | |
227 | val = regs_get_register(regs, code->param); | |
228 | break; | |
229 | case FETCH_OP_STACK: | |
230 | val = get_user_stack_nth(regs, code->param); | |
231 | break; | |
232 | case FETCH_OP_STACKP: | |
233 | val = user_stack_pointer(regs); | |
234 | break; | |
235 | case FETCH_OP_RETVAL: | |
236 | val = regs_return_value(regs); | |
237 | break; | |
4dd537ac MH |
238 | case FETCH_OP_COMM: |
239 | val = FETCH_TOKEN_COMM; | |
240 | break; | |
53305928 MH |
241 | case FETCH_OP_FOFFS: |
242 | val = translate_user_vaddr(code->immediate); | |
243 | break; | |
244 | default: | |
bd78acc8 SC |
245 | ret = process_common_fetch_insn(code, &val); |
246 | if (ret < 0) | |
247 | return ret; | |
53305928 MH |
248 | } |
249 | code++; | |
250 | ||
9b960a38 | 251 | return process_fetch_insn_bottom(code, val, dest, base); |
53305928 MH |
252 | } |
253 | NOKPROBE_SYMBOL(process_fetch_insn) | |
254 | ||
736288ba ON |
255 | static inline void init_trace_uprobe_filter(struct trace_uprobe_filter *filter) |
256 | { | |
257 | rwlock_init(&filter->rwlock); | |
258 | filter->nr_systemwide = 0; | |
259 | INIT_LIST_HEAD(&filter->perf_events); | |
260 | } | |
261 | ||
262 | static inline bool uprobe_filter_is_empty(struct trace_uprobe_filter *filter) | |
263 | { | |
264 | return !filter->nr_systemwide && list_empty(&filter->perf_events); | |
265 | } | |
266 | ||
c1ae5c75 ON |
267 | static inline bool is_ret_probe(struct trace_uprobe *tu) |
268 | { | |
269 | return tu->consumer.ret_handler != NULL; | |
270 | } | |
271 | ||
0597c49c MH |
272 | static bool trace_uprobe_is_busy(struct dyn_event *ev) |
273 | { | |
274 | struct trace_uprobe *tu = to_trace_uprobe(ev); | |
275 | ||
276 | return trace_probe_is_enabled(&tu->tp); | |
277 | } | |
278 | ||
ab10d69e MH |
279 | static bool trace_uprobe_match_command_head(struct trace_uprobe *tu, |
280 | int argc, const char **argv) | |
281 | { | |
282 | char buf[MAX_ARGSTR_LEN + 1]; | |
283 | int len; | |
284 | ||
285 | if (!argc) | |
286 | return true; | |
287 | ||
288 | len = strlen(tu->filename); | |
289 | if (strncmp(tu->filename, argv[0], len) || argv[0][len] != ':') | |
290 | return false; | |
291 | ||
292 | if (tu->ref_ctr_offset == 0) | |
293 | snprintf(buf, sizeof(buf), "0x%0*lx", | |
294 | (int)(sizeof(void *) * 2), tu->offset); | |
295 | else | |
296 | snprintf(buf, sizeof(buf), "0x%0*lx(0x%lx)", | |
297 | (int)(sizeof(void *) * 2), tu->offset, | |
298 | tu->ref_ctr_offset); | |
299 | if (strcmp(buf, &argv[0][len + 1])) | |
300 | return false; | |
301 | ||
302 | argc--; argv++; | |
303 | ||
304 | return trace_probe_match_command_args(&tu->tp, argc, argv); | |
305 | } | |
306 | ||
0597c49c | 307 | static bool trace_uprobe_match(const char *system, const char *event, |
30199137 | 308 | int argc, const char **argv, struct dyn_event *ev) |
0597c49c MH |
309 | { |
310 | struct trace_uprobe *tu = to_trace_uprobe(ev); | |
311 | ||
95c104c3 LY |
312 | return (event[0] == '\0' || |
313 | strcmp(trace_probe_name(&tu->tp), event) == 0) && | |
ab10d69e MH |
314 | (!system || strcmp(trace_probe_group_name(&tu->tp), system) == 0) && |
315 | trace_uprobe_match_command_head(tu, argc, argv); | |
0597c49c MH |
316 | } |
317 | ||
60d53e2c MH |
318 | static nokprobe_inline struct trace_uprobe * |
319 | trace_uprobe_primary_from_call(struct trace_event_call *call) | |
320 | { | |
321 | struct trace_probe *tp; | |
322 | ||
323 | tp = trace_probe_primary_from_call(call); | |
324 | if (WARN_ON_ONCE(!tp)) | |
325 | return NULL; | |
326 | ||
327 | return container_of(tp, struct trace_uprobe, tp); | |
328 | } | |
329 | ||
f3f096cf SD |
330 | /* |
331 | * Allocate new trace_uprobe and initialize it (including uprobes). | |
332 | */ | |
333 | static struct trace_uprobe * | |
c1ae5c75 | 334 | alloc_trace_uprobe(const char *group, const char *event, int nargs, bool is_ret) |
f3f096cf SD |
335 | { |
336 | struct trace_uprobe *tu; | |
455b2899 | 337 | int ret; |
f3f096cf | 338 | |
845cbf3e | 339 | tu = kzalloc(struct_size(tu, tp.args, nargs), GFP_KERNEL); |
f3f096cf SD |
340 | if (!tu) |
341 | return ERR_PTR(-ENOMEM); | |
342 | ||
10cdb82a AN |
343 | tu->nhits = alloc_percpu(unsigned long); |
344 | if (!tu->nhits) { | |
345 | ret = -ENOMEM; | |
346 | goto error; | |
347 | } | |
348 | ||
035ba760 | 349 | ret = trace_probe_init(&tu->tp, event, group, true, nargs); |
455b2899 | 350 | if (ret < 0) |
f3f096cf SD |
351 | goto error; |
352 | ||
0597c49c | 353 | dyn_event_init(&tu->devent, &trace_uprobe_ops); |
a932b738 | 354 | tu->consumer.handler = uprobe_dispatcher; |
c1ae5c75 ON |
355 | if (is_ret) |
356 | tu->consumer.ret_handler = uretprobe_dispatcher; | |
b61387cb | 357 | init_trace_uprobe_filter(tu->tp.event->filter); |
f3f096cf SD |
358 | return tu; |
359 | ||
360 | error: | |
10cdb82a | 361 | free_percpu(tu->nhits); |
f3f096cf SD |
362 | kfree(tu); |
363 | ||
455b2899 | 364 | return ERR_PTR(ret); |
f3f096cf SD |
365 | } |
366 | ||
367 | static void free_trace_uprobe(struct trace_uprobe *tu) | |
368 | { | |
0597c49c MH |
369 | if (!tu) |
370 | return; | |
371 | ||
0c92c7a3 | 372 | path_put(&tu->path); |
455b2899 | 373 | trace_probe_cleanup(&tu->tp); |
f3f096cf | 374 | kfree(tu->filename); |
10cdb82a | 375 | free_percpu(tu->nhits); |
f3f096cf SD |
376 | kfree(tu); |
377 | } | |
378 | ||
379 | static struct trace_uprobe *find_probe_event(const char *event, const char *group) | |
380 | { | |
0597c49c | 381 | struct dyn_event *pos; |
f3f096cf SD |
382 | struct trace_uprobe *tu; |
383 | ||
0597c49c | 384 | for_each_trace_uprobe(tu, pos) |
b55ce203 MH |
385 | if (strcmp(trace_probe_name(&tu->tp), event) == 0 && |
386 | strcmp(trace_probe_group_name(&tu->tp), group) == 0) | |
f3f096cf SD |
387 | return tu; |
388 | ||
389 | return NULL; | |
390 | } | |
391 | ||
0597c49c | 392 | /* Unregister a trace_uprobe and probe_event */ |
c6c2401d | 393 | static int unregister_trace_uprobe(struct trace_uprobe *tu) |
f3f096cf | 394 | { |
c6c2401d SRRH |
395 | int ret; |
396 | ||
41af3cf5 MH |
397 | if (trace_probe_has_sibling(&tu->tp)) |
398 | goto unreg; | |
399 | ||
1d18538e SRV |
400 | /* If there's a reference to the dynamic event */ |
401 | if (trace_event_dyn_busy(trace_probe_event_call(&tu->tp))) | |
402 | return -EBUSY; | |
403 | ||
c6c2401d SRRH |
404 | ret = unregister_uprobe_event(tu); |
405 | if (ret) | |
406 | return ret; | |
407 | ||
41af3cf5 | 408 | unreg: |
0597c49c | 409 | dyn_event_remove(&tu->devent); |
41af3cf5 | 410 | trace_probe_unlink(&tu->tp); |
f3f096cf | 411 | free_trace_uprobe(tu); |
c6c2401d | 412 | return 0; |
f3f096cf SD |
413 | } |
414 | ||
fe60b0ce MH |
415 | static bool trace_uprobe_has_same_uprobe(struct trace_uprobe *orig, |
416 | struct trace_uprobe *comp) | |
417 | { | |
418 | struct trace_probe_event *tpe = orig->tp.event; | |
fe60b0ce MH |
419 | struct inode *comp_inode = d_real_inode(comp->path.dentry); |
420 | int i; | |
421 | ||
e161c6bf | 422 | list_for_each_entry(orig, &tpe->probes, tp.list) { |
fe60b0ce MH |
423 | if (comp_inode != d_real_inode(orig->path.dentry) || |
424 | comp->offset != orig->offset) | |
425 | continue; | |
426 | ||
427 | /* | |
428 | * trace_probe_compare_arg_type() ensured that nr_args and | |
429 | * each argument name and type are same. Let's compare comm. | |
430 | */ | |
431 | for (i = 0; i < orig->tp.nr_args; i++) { | |
432 | if (strcmp(orig->tp.args[i].comm, | |
433 | comp->tp.args[i].comm)) | |
f8d7ab2b | 434 | break; |
fe60b0ce MH |
435 | } |
436 | ||
f8d7ab2b SD |
437 | if (i == orig->tp.nr_args) |
438 | return true; | |
fe60b0ce MH |
439 | } |
440 | ||
441 | return false; | |
442 | } | |
443 | ||
41af3cf5 MH |
444 | static int append_trace_uprobe(struct trace_uprobe *tu, struct trace_uprobe *to) |
445 | { | |
446 | int ret; | |
447 | ||
fe60b0ce MH |
448 | ret = trace_probe_compare_arg_type(&tu->tp, &to->tp); |
449 | if (ret) { | |
450 | /* Note that argument starts index = 2 */ | |
451 | trace_probe_log_set_index(ret + 1); | |
452 | trace_probe_log_err(0, DIFF_ARG_TYPE); | |
453 | return -EEXIST; | |
454 | } | |
455 | if (trace_uprobe_has_same_uprobe(to, tu)) { | |
456 | trace_probe_log_set_index(0); | |
457 | trace_probe_log_err(0, SAME_PROBE); | |
458 | return -EEXIST; | |
459 | } | |
460 | ||
41af3cf5 MH |
461 | /* Append to existing event */ |
462 | ret = trace_probe_append(&tu->tp, &to->tp); | |
463 | if (!ret) | |
8b0e6c74 | 464 | dyn_event_add(&tu->devent, trace_probe_event_call(&tu->tp)); |
41af3cf5 MH |
465 | |
466 | return ret; | |
467 | } | |
468 | ||
ccea8727 RB |
469 | /* |
470 | * Uprobe with multiple reference counter is not allowed. i.e. | |
471 | * If inode and offset matches, reference counter offset *must* | |
472 | * match as well. Though, there is one exception: If user is | |
473 | * replacing old trace_uprobe with new one(same group/event), | |
474 | * then we allow same uprobe with new reference counter as far | |
475 | * as the new one does not conflict with any other existing | |
476 | * ones. | |
477 | */ | |
41af3cf5 | 478 | static int validate_ref_ctr_offset(struct trace_uprobe *new) |
ccea8727 | 479 | { |
0597c49c | 480 | struct dyn_event *pos; |
41af3cf5 | 481 | struct trace_uprobe *tmp; |
ccea8727 RB |
482 | struct inode *new_inode = d_real_inode(new->path.dentry); |
483 | ||
0597c49c | 484 | for_each_trace_uprobe(tmp, pos) { |
41af3cf5 | 485 | if (new_inode == d_real_inode(tmp->path.dentry) && |
ccea8727 RB |
486 | new->offset == tmp->offset && |
487 | new->ref_ctr_offset != tmp->ref_ctr_offset) { | |
488 | pr_warn("Reference counter offset mismatch."); | |
41af3cf5 | 489 | return -EINVAL; |
ccea8727 RB |
490 | } |
491 | } | |
41af3cf5 | 492 | return 0; |
ccea8727 RB |
493 | } |
494 | ||
f3f096cf SD |
495 | /* Register a trace_uprobe and probe_event */ |
496 | static int register_trace_uprobe(struct trace_uprobe *tu) | |
497 | { | |
14577c39 | 498 | struct trace_uprobe *old_tu; |
f3f096cf SD |
499 | int ret; |
500 | ||
f8821732 | 501 | guard(mutex)(&event_mutex); |
f3f096cf | 502 | |
41af3cf5 MH |
503 | ret = validate_ref_ctr_offset(tu); |
504 | if (ret) | |
f8821732 | 505 | return ret; |
ccea8727 | 506 | |
41af3cf5 MH |
507 | /* register as an event */ |
508 | old_tu = find_probe_event(trace_probe_name(&tu->tp), | |
509 | trace_probe_group_name(&tu->tp)); | |
14577c39 | 510 | if (old_tu) { |
41af3cf5 MH |
511 | if (is_ret_probe(tu) != is_ret_probe(old_tu)) { |
512 | trace_probe_log_set_index(0); | |
513 | trace_probe_log_err(0, DIFF_PROBE_TYPE); | |
f8821732 | 514 | return -EEXIST; |
41af3cf5 | 515 | } |
f8821732 | 516 | return append_trace_uprobe(tu, old_tu); |
c6c2401d | 517 | } |
f3f096cf SD |
518 | |
519 | ret = register_uprobe_event(tu); | |
520 | if (ret) { | |
8e242060 MH |
521 | if (ret == -EEXIST) { |
522 | trace_probe_log_set_index(0); | |
523 | trace_probe_log_err(0, EVENT_EXIST); | |
524 | } else | |
525 | pr_warn("Failed to register probe event(%d)\n", ret); | |
f8821732 | 526 | return ret; |
f3f096cf SD |
527 | } |
528 | ||
8b0e6c74 | 529 | dyn_event_add(&tu->devent, trace_probe_event_call(&tu->tp)); |
f3f096cf | 530 | |
f3f096cf SD |
531 | return ret; |
532 | } | |
533 | ||
534 | /* | |
535 | * Argument syntax: | |
95c104c3 | 536 | * - Add uprobe: p|r[:[GRP/][EVENT]] PATH:OFFSET[%return][(REF)] [FETCHARGS] |
f3f096cf | 537 | */ |
d262271d | 538 | static int __trace_uprobe_create(int argc, const char **argv) |
f3f096cf SD |
539 | { |
540 | struct trace_uprobe *tu; | |
0597c49c MH |
541 | const char *event = NULL, *group = UPROBE_EVENT_SYSTEM; |
542 | char *arg, *filename, *rctr, *rctr_end, *tmp; | |
f3f096cf | 543 | char buf[MAX_EVENT_NAME_LEN]; |
95c104c3 | 544 | char gbuf[MAX_EVENT_NAME_LEN]; |
007517a0 | 545 | enum probe_print_type ptype; |
f3f096cf | 546 | struct path path; |
1cc33161 | 547 | unsigned long offset, ref_ctr_offset; |
0597c49c | 548 | bool is_return = false; |
f3f096cf SD |
549 | int i, ret; |
550 | ||
1cc33161 | 551 | ref_ctr_offset = 0; |
f3f096cf | 552 | |
f01098c7 ET |
553 | switch (argv[0][0]) { |
554 | case 'r': | |
4ee5a52e | 555 | is_return = true; |
f01098c7 ET |
556 | break; |
557 | case 'p': | |
558 | break; | |
559 | default: | |
560 | return -ECANCELED; | |
561 | } | |
562 | ||
563 | if (argc < 2) | |
0597c49c | 564 | return -ECANCELED; |
57faaa04 MHG |
565 | |
566 | trace_probe_log_init("trace_uprobe", argc, argv); | |
567 | ||
568 | if (argc - 2 > MAX_TRACE_ARGS) { | |
569 | trace_probe_log_set_index(2); | |
570 | trace_probe_log_err(0, TOO_MANY_ARGS); | |
73f35080 | 571 | return -E2BIG; |
57faaa04 | 572 | } |
f3f096cf | 573 | |
0597c49c | 574 | if (argv[0][1] == ':') |
f3f096cf | 575 | event = &argv[0][2]; |
f3f096cf | 576 | |
0597c49c MH |
577 | if (!strchr(argv[1], '/')) |
578 | return -ECANCELED; | |
f3f096cf | 579 | |
0597c49c MH |
580 | filename = kstrdup(argv[1], GFP_KERNEL); |
581 | if (!filename) | |
582 | return -ENOMEM; | |
f3f096cf | 583 | |
6496bb72 | 584 | /* Find the last occurrence, in case the path contains ':' too. */ |
0597c49c MH |
585 | arg = strrchr(filename, ':'); |
586 | if (!arg || !isdigit(arg[1])) { | |
587 | kfree(filename); | |
588 | return -ECANCELED; | |
589 | } | |
f3f096cf | 590 | |
ab105a4f MH |
591 | trace_probe_log_set_index(1); /* filename is the 2nd argument */ |
592 | ||
f3f096cf | 593 | *arg++ = '\0'; |
f3f096cf | 594 | ret = kern_path(filename, LOOKUP_FOLLOW, &path); |
0597c49c | 595 | if (ret) { |
ab105a4f | 596 | trace_probe_log_err(0, FILE_NOT_FOUND); |
0597c49c | 597 | kfree(filename); |
ab105a4f | 598 | trace_probe_log_clear(); |
0c92c7a3 | 599 | return ret; |
0597c49c | 600 | } |
0c92c7a3 | 601 | if (!d_is_reg(path.dentry)) { |
ab105a4f | 602 | trace_probe_log_err(0, NO_REGULAR_FILE); |
d24d7dbf JZ |
603 | ret = -EINVAL; |
604 | goto fail_address_parse; | |
605 | } | |
f3f096cf | 606 | |
1cc33161 RB |
607 | /* Parse reference counter offset if specified. */ |
608 | rctr = strchr(arg, '('); | |
609 | if (rctr) { | |
610 | rctr_end = strchr(rctr, ')'); | |
ab105a4f MH |
611 | if (!rctr_end) { |
612 | ret = -EINVAL; | |
613 | rctr_end = rctr + strlen(rctr); | |
614 | trace_probe_log_err(rctr_end - filename, | |
615 | REFCNT_OPEN_BRACE); | |
616 | goto fail_address_parse; | |
617 | } else if (rctr_end[1] != '\0') { | |
1cc33161 | 618 | ret = -EINVAL; |
ab105a4f MH |
619 | trace_probe_log_err(rctr_end + 1 - filename, |
620 | BAD_REFCNT_SUFFIX); | |
1cc33161 RB |
621 | goto fail_address_parse; |
622 | } | |
623 | ||
624 | *rctr++ = '\0'; | |
625 | *rctr_end = '\0'; | |
626 | ret = kstrtoul(rctr, 0, &ref_ctr_offset); | |
627 | if (ret) { | |
ab105a4f | 628 | trace_probe_log_err(rctr - filename, BAD_REFCNT); |
1cc33161 RB |
629 | goto fail_address_parse; |
630 | } | |
631 | } | |
632 | ||
3dd3aae3 MH |
633 | /* Check if there is %return suffix */ |
634 | tmp = strchr(arg, '%'); | |
635 | if (tmp) { | |
636 | if (!strcmp(tmp, "%return")) { | |
637 | *tmp = '\0'; | |
638 | is_return = true; | |
639 | } else { | |
640 | trace_probe_log_err(tmp - filename, BAD_ADDR_SUFFIX); | |
641 | ret = -EINVAL; | |
642 | goto fail_address_parse; | |
643 | } | |
644 | } | |
645 | ||
1cc33161 | 646 | /* Parse uprobe offset. */ |
84d7ed79 | 647 | ret = kstrtoul(arg, 0, &offset); |
ab105a4f MH |
648 | if (ret) { |
649 | trace_probe_log_err(arg - filename, BAD_UPROBE_OFFS); | |
84d7ed79 | 650 | goto fail_address_parse; |
ab105a4f | 651 | } |
f3f096cf SD |
652 | |
653 | /* setup a probe */ | |
ab105a4f | 654 | trace_probe_log_set_index(0); |
0597c49c | 655 | if (event) { |
95c104c3 | 656 | ret = traceprobe_parse_event_name(&event, &group, gbuf, |
ab105a4f | 657 | event - argv[0]); |
0597c49c MH |
658 | if (ret) |
659 | goto fail_address_parse; | |
95c104c3 LY |
660 | } |
661 | ||
662 | if (!event) { | |
b2e902f0 | 663 | char *tail; |
f3f096cf SD |
664 | char *ptr; |
665 | ||
b2e902f0 AS |
666 | tail = kstrdup(kbasename(filename), GFP_KERNEL); |
667 | if (!tail) { | |
f3f096cf SD |
668 | ret = -ENOMEM; |
669 | goto fail_address_parse; | |
670 | } | |
671 | ||
f3f096cf SD |
672 | ptr = strpbrk(tail, ".-_"); |
673 | if (ptr) | |
674 | *ptr = '\0'; | |
675 | ||
676 | snprintf(buf, MAX_EVENT_NAME_LEN, "%c_%s_0x%lx", 'p', tail, offset); | |
677 | event = buf; | |
678 | kfree(tail); | |
679 | } | |
680 | ||
ab105a4f MH |
681 | argc -= 2; |
682 | argv += 2; | |
683 | ||
4ee5a52e | 684 | tu = alloc_trace_uprobe(group, event, argc, is_return); |
f3f096cf | 685 | if (IS_ERR(tu)) { |
f3f096cf | 686 | ret = PTR_ERR(tu); |
a039480e MH |
687 | /* This must return -ENOMEM otherwise there is a bug */ |
688 | WARN_ON_ONCE(ret != -ENOMEM); | |
f3f096cf SD |
689 | goto fail_address_parse; |
690 | } | |
691 | tu->offset = offset; | |
1cc33161 | 692 | tu->ref_ctr_offset = ref_ctr_offset; |
0c92c7a3 | 693 | tu->path = path; |
0597c49c | 694 | tu->filename = filename; |
f3f096cf SD |
695 | |
696 | /* parse arguments */ | |
73f35080 | 697 | for (i = 0; i < argc; i++) { |
1b8b0cd7 MHG |
698 | struct traceprobe_parse_context ctx = { |
699 | .flags = (is_return ? TPARG_FL_RETURN : 0) | TPARG_FL_USER, | |
700 | }; | |
701 | ||
ab105a4f | 702 | trace_probe_log_set_index(i + 2); |
1b8b0cd7 | 703 | ret = traceprobe_parse_probe_arg(&tu->tp, i, argv[i], &ctx); |
b1d1e904 | 704 | traceprobe_finish_parse(&ctx); |
d00bbea9 | 705 | if (ret) |
f3f096cf | 706 | goto error; |
f3f096cf SD |
707 | } |
708 | ||
007517a0 SRV |
709 | ptype = is_ret_probe(tu) ? PROBE_PRINT_RETURN : PROBE_PRINT_NORMAL; |
710 | ret = traceprobe_set_print_fmt(&tu->tp, ptype); | |
b4d4b96b MH |
711 | if (ret < 0) |
712 | goto error; | |
713 | ||
f3f096cf | 714 | ret = register_trace_uprobe(tu); |
ab105a4f MH |
715 | if (!ret) |
716 | goto out; | |
f3f096cf SD |
717 | |
718 | error: | |
719 | free_trace_uprobe(tu); | |
ab105a4f MH |
720 | out: |
721 | trace_probe_log_clear(); | |
f3f096cf SD |
722 | return ret; |
723 | ||
724 | fail_address_parse: | |
ab105a4f | 725 | trace_probe_log_clear(); |
0c92c7a3 | 726 | path_put(&path); |
0597c49c | 727 | kfree(filename); |
f3f096cf | 728 | |
f3f096cf SD |
729 | return ret; |
730 | } | |
731 | ||
d262271d MH |
732 | int trace_uprobe_create(const char *raw_command) |
733 | { | |
734 | return trace_probe_create(raw_command, __trace_uprobe_create); | |
735 | } | |
736 | ||
737 | static int create_or_delete_trace_uprobe(const char *raw_command) | |
f3f096cf | 738 | { |
0597c49c | 739 | int ret; |
f3f096cf | 740 | |
d262271d MH |
741 | if (raw_command[0] == '-') |
742 | return dyn_event_release(raw_command, &trace_uprobe_ops); | |
f3f096cf | 743 | |
fd837de3 | 744 | ret = dyn_event_create(raw_command, &trace_uprobe_ops); |
0597c49c | 745 | return ret == -ECANCELED ? -EINVAL : ret; |
f3f096cf SD |
746 | } |
747 | ||
0597c49c | 748 | static int trace_uprobe_release(struct dyn_event *ev) |
f3f096cf | 749 | { |
0597c49c | 750 | struct trace_uprobe *tu = to_trace_uprobe(ev); |
f3f096cf | 751 | |
0597c49c | 752 | return unregister_trace_uprobe(tu); |
f3f096cf SD |
753 | } |
754 | ||
0597c49c MH |
755 | /* Probes listing interfaces */ |
756 | static int trace_uprobe_show(struct seq_file *m, struct dyn_event *ev) | |
f3f096cf | 757 | { |
0597c49c | 758 | struct trace_uprobe *tu = to_trace_uprobe(ev); |
3ede82dd | 759 | char c = is_ret_probe(tu) ? 'r' : 'p'; |
f3f096cf SD |
760 | int i; |
761 | ||
b55ce203 MH |
762 | seq_printf(m, "%c:%s/%s %s:0x%0*lx", c, trace_probe_group_name(&tu->tp), |
763 | trace_probe_name(&tu->tp), tu->filename, | |
a64b2c01 | 764 | (int)(sizeof(void *) * 2), tu->offset); |
f3f096cf | 765 | |
1cc33161 RB |
766 | if (tu->ref_ctr_offset) |
767 | seq_printf(m, "(0x%lx)", tu->ref_ctr_offset); | |
768 | ||
14577c39 NK |
769 | for (i = 0; i < tu->tp.nr_args; i++) |
770 | seq_printf(m, " %s=%s", tu->tp.args[i].name, tu->tp.args[i].comm); | |
f3f096cf | 771 | |
fa6f0cc7 | 772 | seq_putc(m, '\n'); |
f3f096cf SD |
773 | return 0; |
774 | } | |
775 | ||
0597c49c MH |
776 | static int probes_seq_show(struct seq_file *m, void *v) |
777 | { | |
778 | struct dyn_event *ev = v; | |
779 | ||
780 | if (!is_trace_uprobe(ev)) | |
781 | return 0; | |
782 | ||
783 | return trace_uprobe_show(m, ev); | |
784 | } | |
785 | ||
f3f096cf | 786 | static const struct seq_operations probes_seq_op = { |
0597c49c MH |
787 | .start = dyn_event_seq_start, |
788 | .next = dyn_event_seq_next, | |
789 | .stop = dyn_event_seq_stop, | |
790 | .show = probes_seq_show | |
f3f096cf SD |
791 | }; |
792 | ||
793 | static int probes_open(struct inode *inode, struct file *file) | |
794 | { | |
c6c2401d SRRH |
795 | int ret; |
796 | ||
17911ff3 SRV |
797 | ret = security_locked_down(LOCKDOWN_TRACEFS); |
798 | if (ret) | |
799 | return ret; | |
800 | ||
c6c2401d | 801 | if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) { |
0597c49c | 802 | ret = dyn_events_release_all(&trace_uprobe_ops); |
c6c2401d SRRH |
803 | if (ret) |
804 | return ret; | |
805 | } | |
f3f096cf SD |
806 | |
807 | return seq_open(file, &probes_seq_op); | |
808 | } | |
809 | ||
810 | static ssize_t probes_write(struct file *file, const char __user *buffer, | |
811 | size_t count, loff_t *ppos) | |
812 | { | |
0597c49c MH |
813 | return trace_parse_run_command(file, buffer, count, ppos, |
814 | create_or_delete_trace_uprobe); | |
f3f096cf SD |
815 | } |
816 | ||
817 | static const struct file_operations uprobe_events_ops = { | |
818 | .owner = THIS_MODULE, | |
819 | .open = probes_open, | |
820 | .read = seq_read, | |
821 | .llseek = seq_lseek, | |
822 | .release = seq_release, | |
823 | .write = probes_write, | |
824 | }; | |
825 | ||
826 | /* Probes profiling interfaces */ | |
827 | static int probes_profile_seq_show(struct seq_file *m, void *v) | |
828 | { | |
0597c49c MH |
829 | struct dyn_event *ev = v; |
830 | struct trace_uprobe *tu; | |
10cdb82a AN |
831 | unsigned long nhits; |
832 | int cpu; | |
0597c49c MH |
833 | |
834 | if (!is_trace_uprobe(ev)) | |
835 | return 0; | |
f3f096cf | 836 | |
0597c49c | 837 | tu = to_trace_uprobe(ev); |
10cdb82a AN |
838 | |
839 | nhits = 0; | |
840 | for_each_possible_cpu(cpu) { | |
841 | nhits += per_cpu(*tu->nhits, cpu); | |
842 | } | |
843 | ||
de7b2973 | 844 | seq_printf(m, " %s %-44s %15lu\n", tu->filename, |
10cdb82a | 845 | trace_probe_name(&tu->tp), nhits); |
f3f096cf SD |
846 | return 0; |
847 | } | |
848 | ||
849 | static const struct seq_operations profile_seq_op = { | |
0597c49c MH |
850 | .start = dyn_event_seq_start, |
851 | .next = dyn_event_seq_next, | |
852 | .stop = dyn_event_seq_stop, | |
f3f096cf SD |
853 | .show = probes_profile_seq_show |
854 | }; | |
855 | ||
856 | static int profile_open(struct inode *inode, struct file *file) | |
857 | { | |
17911ff3 SRV |
858 | int ret; |
859 | ||
860 | ret = security_locked_down(LOCKDOWN_TRACEFS); | |
861 | if (ret) | |
862 | return ret; | |
863 | ||
f3f096cf SD |
864 | return seq_open(file, &profile_seq_op); |
865 | } | |
866 | ||
867 | static const struct file_operations uprobe_profile_ops = { | |
868 | .owner = THIS_MODULE, | |
869 | .open = profile_open, | |
870 | .read = seq_read, | |
871 | .llseek = seq_lseek, | |
872 | .release = seq_release, | |
873 | }; | |
874 | ||
dcad1a20 NK |
875 | struct uprobe_cpu_buffer { |
876 | struct mutex mutex; | |
877 | void *buf; | |
3eaea21b | 878 | int dsize; |
dcad1a20 NK |
879 | }; |
880 | static struct uprobe_cpu_buffer __percpu *uprobe_cpu_buffer; | |
881 | static int uprobe_buffer_refcnt; | |
373b9338 | 882 | #define MAX_UCB_BUFFER_SIZE PAGE_SIZE |
dcad1a20 NK |
883 | |
884 | static int uprobe_buffer_init(void) | |
885 | { | |
886 | int cpu, err_cpu; | |
887 | ||
888 | uprobe_cpu_buffer = alloc_percpu(struct uprobe_cpu_buffer); | |
889 | if (uprobe_cpu_buffer == NULL) | |
890 | return -ENOMEM; | |
891 | ||
892 | for_each_possible_cpu(cpu) { | |
893 | struct page *p = alloc_pages_node(cpu_to_node(cpu), | |
894 | GFP_KERNEL, 0); | |
895 | if (p == NULL) { | |
896 | err_cpu = cpu; | |
897 | goto err; | |
898 | } | |
899 | per_cpu_ptr(uprobe_cpu_buffer, cpu)->buf = page_address(p); | |
900 | mutex_init(&per_cpu_ptr(uprobe_cpu_buffer, cpu)->mutex); | |
901 | } | |
902 | ||
903 | return 0; | |
904 | ||
905 | err: | |
906 | for_each_possible_cpu(cpu) { | |
907 | if (cpu == err_cpu) | |
908 | break; | |
909 | free_page((unsigned long)per_cpu_ptr(uprobe_cpu_buffer, cpu)->buf); | |
910 | } | |
911 | ||
912 | free_percpu(uprobe_cpu_buffer); | |
913 | return -ENOMEM; | |
914 | } | |
915 | ||
916 | static int uprobe_buffer_enable(void) | |
917 | { | |
918 | int ret = 0; | |
919 | ||
920 | BUG_ON(!mutex_is_locked(&event_mutex)); | |
921 | ||
922 | if (uprobe_buffer_refcnt++ == 0) { | |
923 | ret = uprobe_buffer_init(); | |
924 | if (ret < 0) | |
925 | uprobe_buffer_refcnt--; | |
926 | } | |
927 | ||
928 | return ret; | |
929 | } | |
930 | ||
931 | static void uprobe_buffer_disable(void) | |
932 | { | |
6ea6215f J |
933 | int cpu; |
934 | ||
dcad1a20 NK |
935 | BUG_ON(!mutex_is_locked(&event_mutex)); |
936 | ||
937 | if (--uprobe_buffer_refcnt == 0) { | |
6ea6215f J |
938 | for_each_possible_cpu(cpu) |
939 | free_page((unsigned long)per_cpu_ptr(uprobe_cpu_buffer, | |
940 | cpu)->buf); | |
941 | ||
dcad1a20 NK |
942 | free_percpu(uprobe_cpu_buffer); |
943 | uprobe_cpu_buffer = NULL; | |
944 | } | |
945 | } | |
946 | ||
947 | static struct uprobe_cpu_buffer *uprobe_buffer_get(void) | |
948 | { | |
949 | struct uprobe_cpu_buffer *ucb; | |
950 | int cpu; | |
951 | ||
952 | cpu = raw_smp_processor_id(); | |
953 | ucb = per_cpu_ptr(uprobe_cpu_buffer, cpu); | |
954 | ||
955 | /* | |
956 | * Use per-cpu buffers for fastest access, but we might migrate | |
957 | * so the mutex makes sure we have sole access to it. | |
958 | */ | |
959 | mutex_lock(&ucb->mutex); | |
960 | ||
961 | return ucb; | |
962 | } | |
963 | ||
964 | static void uprobe_buffer_put(struct uprobe_cpu_buffer *ucb) | |
965 | { | |
1b8f85de AN |
966 | if (!ucb) |
967 | return; | |
dcad1a20 NK |
968 | mutex_unlock(&ucb->mutex); |
969 | } | |
970 | ||
3eaea21b | 971 | static struct uprobe_cpu_buffer *prepare_uprobe_buffer(struct trace_uprobe *tu, |
1b8f85de AN |
972 | struct pt_regs *regs, |
973 | struct uprobe_cpu_buffer **ucbp) | |
3eaea21b AN |
974 | { |
975 | struct uprobe_cpu_buffer *ucb; | |
976 | int dsize, esize; | |
977 | ||
1b8f85de AN |
978 | if (*ucbp) |
979 | return *ucbp; | |
980 | ||
3eaea21b AN |
981 | esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu)); |
982 | dsize = __get_data_size(&tu->tp, regs, NULL); | |
983 | ||
984 | ucb = uprobe_buffer_get(); | |
985 | ucb->dsize = tu->tp.size + dsize; | |
986 | ||
373b9338 QM |
987 | if (WARN_ON_ONCE(ucb->dsize > MAX_UCB_BUFFER_SIZE)) { |
988 | ucb->dsize = MAX_UCB_BUFFER_SIZE; | |
989 | dsize = MAX_UCB_BUFFER_SIZE - tu->tp.size; | |
990 | } | |
991 | ||
3eaea21b AN |
992 | store_trace_args(ucb->buf, &tu->tp, regs, NULL, esize, dsize); |
993 | ||
1b8f85de | 994 | *ucbp = ucb; |
3eaea21b AN |
995 | return ucb; |
996 | } | |
997 | ||
a43b9704 | 998 | static void __uprobe_trace_func(struct trace_uprobe *tu, |
dd9fa555 | 999 | unsigned long func, struct pt_regs *regs, |
69964673 | 1000 | struct uprobe_cpu_buffer *ucb, |
7f1d2f82 | 1001 | struct trace_event_file *trace_file) |
f3f096cf SD |
1002 | { |
1003 | struct uprobe_trace_entry_head *entry; | |
b7d5eb26 | 1004 | struct trace_event_buffer fbuffer; |
457d1772 | 1005 | void *data; |
dd9fa555 | 1006 | int size, esize; |
e3dc9f89 | 1007 | struct trace_event_call *call = trace_probe_event_call(&tu->tp); |
f3f096cf | 1008 | |
7f1d2f82 | 1009 | WARN_ON(call != trace_file->event_call); |
70ed91c6 | 1010 | |
09a5059a | 1011 | if (trace_trigger_soft_disabled(trace_file)) |
ca3b1620 NK |
1012 | return; |
1013 | ||
dd9fa555 | 1014 | esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu)); |
3eaea21b | 1015 | size = esize + ucb->dsize; |
b7d5eb26 SRV |
1016 | entry = trace_event_buffer_reserve(&fbuffer, trace_file, size); |
1017 | if (!entry) | |
dd9fa555 | 1018 | return; |
f3f096cf | 1019 | |
393a736c ON |
1020 | if (is_ret_probe(tu)) { |
1021 | entry->vaddr[0] = func; | |
1022 | entry->vaddr[1] = instruction_pointer(regs); | |
1023 | data = DATAOF_TRACE_ENTRY(entry, true); | |
1024 | } else { | |
1025 | entry->vaddr[0] = instruction_pointer(regs); | |
1026 | data = DATAOF_TRACE_ENTRY(entry, false); | |
1027 | } | |
1028 | ||
3eaea21b | 1029 | memcpy(data, ucb->buf, ucb->dsize); |
f3f096cf | 1030 | |
b7d5eb26 | 1031 | trace_event_buffer_commit(&fbuffer); |
a51cc604 | 1032 | } |
f42d24a1 | 1033 | |
a51cc604 | 1034 | /* uprobe handler */ |
dd9fa555 | 1035 | static int uprobe_trace_func(struct trace_uprobe *tu, struct pt_regs *regs, |
1b8f85de | 1036 | struct uprobe_cpu_buffer **ucbp) |
a51cc604 | 1037 | { |
70ed91c6 | 1038 | struct event_file_link *link; |
69964673 | 1039 | struct uprobe_cpu_buffer *ucb; |
70ed91c6 J |
1040 | |
1041 | if (is_ret_probe(tu)) | |
1042 | return 0; | |
1043 | ||
69964673 AN |
1044 | ucb = prepare_uprobe_buffer(tu, regs, ucbp); |
1045 | ||
70ed91c6 | 1046 | rcu_read_lock(); |
b5f935ee | 1047 | trace_probe_for_each_link_rcu(link, &tu->tp) |
69964673 | 1048 | __uprobe_trace_func(tu, 0, regs, ucb, link->file); |
70ed91c6 J |
1049 | rcu_read_unlock(); |
1050 | ||
f42d24a1 | 1051 | return 0; |
f3f096cf SD |
1052 | } |
1053 | ||
c1ae5c75 | 1054 | static void uretprobe_trace_func(struct trace_uprobe *tu, unsigned long func, |
dd9fa555 | 1055 | struct pt_regs *regs, |
1b8f85de | 1056 | struct uprobe_cpu_buffer **ucbp) |
c1ae5c75 | 1057 | { |
70ed91c6 | 1058 | struct event_file_link *link; |
69964673 AN |
1059 | struct uprobe_cpu_buffer *ucb; |
1060 | ||
1061 | ucb = prepare_uprobe_buffer(tu, regs, ucbp); | |
70ed91c6 J |
1062 | |
1063 | rcu_read_lock(); | |
b5f935ee | 1064 | trace_probe_for_each_link_rcu(link, &tu->tp) |
69964673 | 1065 | __uprobe_trace_func(tu, func, regs, ucb, link->file); |
70ed91c6 | 1066 | rcu_read_unlock(); |
c1ae5c75 ON |
1067 | } |
1068 | ||
f3f096cf SD |
1069 | /* Event entry printers */ |
1070 | static enum print_line_t | |
1071 | print_uprobe_event(struct trace_iterator *iter, int flags, struct trace_event *event) | |
1072 | { | |
457d1772 | 1073 | struct uprobe_trace_entry_head *entry; |
f3f096cf SD |
1074 | struct trace_seq *s = &iter->seq; |
1075 | struct trace_uprobe *tu; | |
1076 | u8 *data; | |
f3f096cf | 1077 | |
457d1772 | 1078 | entry = (struct uprobe_trace_entry_head *)iter->ent; |
60d53e2c MH |
1079 | tu = trace_uprobe_primary_from_call( |
1080 | container_of(event, struct trace_event_call, event)); | |
1081 | if (unlikely(!tu)) | |
1082 | goto out; | |
f3f096cf | 1083 | |
3ede82dd | 1084 | if (is_ret_probe(tu)) { |
8579a107 | 1085 | trace_seq_printf(s, "%s: (0x%lx <- 0x%lx)", |
b55ce203 | 1086 | trace_probe_name(&tu->tp), |
8579a107 | 1087 | entry->vaddr[1], entry->vaddr[0]); |
3ede82dd ON |
1088 | data = DATAOF_TRACE_ENTRY(entry, true); |
1089 | } else { | |
8579a107 | 1090 | trace_seq_printf(s, "%s: (0x%lx)", |
b55ce203 | 1091 | trace_probe_name(&tu->tp), |
8579a107 | 1092 | entry->vaddr[0]); |
3ede82dd ON |
1093 | data = DATAOF_TRACE_ENTRY(entry, false); |
1094 | } | |
f3f096cf | 1095 | |
196b6389 | 1096 | if (trace_probe_print_args(s, tu->tp.args, tu->tp.nr_args, data, entry) < 0) |
56de7630 | 1097 | goto out; |
f3f096cf | 1098 | |
8579a107 | 1099 | trace_seq_putc(s, '\n'); |
f3f096cf | 1100 | |
8579a107 SRRH |
1101 | out: |
1102 | return trace_handle_return(s); | |
f3f096cf SD |
1103 | } |
1104 | ||
59da880a | 1105 | typedef bool (*filter_func_t)(struct uprobe_consumer *self, struct mm_struct *mm); |
31ba3348 | 1106 | |
60d53e2c | 1107 | static int trace_uprobe_enable(struct trace_uprobe *tu, filter_func_t filter) |
f3f096cf | 1108 | { |
3c83a9ad ON |
1109 | struct inode *inode = d_real_inode(tu->path.dentry); |
1110 | struct uprobe *uprobe; | |
70ed91c6 | 1111 | |
60d53e2c | 1112 | tu->consumer.filter = filter; |
3c83a9ad ON |
1113 | uprobe = uprobe_register(inode, tu->offset, tu->ref_ctr_offset, &tu->consumer); |
1114 | if (IS_ERR(uprobe)) | |
1115 | return PTR_ERR(uprobe); | |
60d53e2c | 1116 | |
3c83a9ad ON |
1117 | tu->uprobe = uprobe; |
1118 | return 0; | |
60d53e2c MH |
1119 | } |
1120 | ||
1121 | static void __probe_event_disable(struct trace_probe *tp) | |
1122 | { | |
60d53e2c | 1123 | struct trace_uprobe *tu; |
04b01625 | 1124 | bool sync = false; |
60d53e2c | 1125 | |
99c9a923 | 1126 | tu = container_of(tp, struct trace_uprobe, tp); |
b61387cb | 1127 | WARN_ON(!uprobe_filter_is_empty(tu->tp.event->filter)); |
99c9a923 | 1128 | |
e161c6bf | 1129 | list_for_each_entry(tu, trace_probe_probe_list(tp), tp.list) { |
3c83a9ad | 1130 | if (!tu->uprobe) |
60d53e2c MH |
1131 | continue; |
1132 | ||
04b01625 PZ |
1133 | uprobe_unregister_nosync(tu->uprobe, &tu->consumer); |
1134 | sync = true; | |
3c83a9ad | 1135 | tu->uprobe = NULL; |
60d53e2c | 1136 | } |
04b01625 PZ |
1137 | if (sync) |
1138 | uprobe_unregister_sync(); | |
60d53e2c MH |
1139 | } |
1140 | ||
1141 | static int probe_event_enable(struct trace_event_call *call, | |
1142 | struct trace_event_file *file, filter_func_t filter) | |
1143 | { | |
e161c6bf | 1144 | struct trace_probe *tp; |
60d53e2c MH |
1145 | struct trace_uprobe *tu; |
1146 | bool enabled; | |
1147 | int ret; | |
1148 | ||
1149 | tp = trace_probe_primary_from_call(call); | |
1150 | if (WARN_ON_ONCE(!tp)) | |
1151 | return -ENODEV; | |
1152 | enabled = trace_probe_is_enabled(tp); | |
1153 | ||
1154 | /* This may also change "enabled" state */ | |
70ed91c6 | 1155 | if (file) { |
60d53e2c | 1156 | if (trace_probe_test_flag(tp, TP_FLAG_PROFILE)) |
48212542 ON |
1157 | return -EINTR; |
1158 | ||
60d53e2c | 1159 | ret = trace_probe_add_file(tp, file); |
b5f935ee MH |
1160 | if (ret < 0) |
1161 | return ret; | |
48212542 | 1162 | } else { |
60d53e2c | 1163 | if (trace_probe_test_flag(tp, TP_FLAG_TRACE)) |
48212542 ON |
1164 | return -EINTR; |
1165 | ||
60d53e2c | 1166 | trace_probe_set_flag(tp, TP_FLAG_PROFILE); |
48212542 | 1167 | } |
f3f096cf | 1168 | |
60d53e2c | 1169 | tu = container_of(tp, struct trace_uprobe, tp); |
b61387cb | 1170 | WARN_ON(!uprobe_filter_is_empty(tu->tp.event->filter)); |
736288ba | 1171 | |
70ed91c6 J |
1172 | if (enabled) |
1173 | return 0; | |
1174 | ||
fb6bab6a ON |
1175 | ret = uprobe_buffer_enable(); |
1176 | if (ret) | |
1177 | goto err_flags; | |
1178 | ||
e161c6bf | 1179 | list_for_each_entry(tu, trace_probe_probe_list(tp), tp.list) { |
60d53e2c MH |
1180 | ret = trace_uprobe_enable(tu, filter); |
1181 | if (ret) { | |
1182 | __probe_event_disable(tp); | |
1183 | goto err_buffer; | |
1184 | } | |
1cc33161 RB |
1185 | } |
1186 | ||
fb6bab6a ON |
1187 | return 0; |
1188 | ||
1189 | err_buffer: | |
1190 | uprobe_buffer_disable(); | |
f3f096cf | 1191 | |
fb6bab6a | 1192 | err_flags: |
b5f935ee | 1193 | if (file) |
60d53e2c | 1194 | trace_probe_remove_file(tp, file); |
b5f935ee | 1195 | else |
60d53e2c | 1196 | trace_probe_clear_flag(tp, TP_FLAG_PROFILE); |
b5f935ee | 1197 | |
4161824f | 1198 | return ret; |
f3f096cf SD |
1199 | } |
1200 | ||
60d53e2c MH |
1201 | static void probe_event_disable(struct trace_event_call *call, |
1202 | struct trace_event_file *file) | |
f3f096cf | 1203 | { |
60d53e2c MH |
1204 | struct trace_probe *tp; |
1205 | ||
1206 | tp = trace_probe_primary_from_call(call); | |
1207 | if (WARN_ON_ONCE(!tp)) | |
1208 | return; | |
1209 | ||
1210 | if (!trace_probe_is_enabled(tp)) | |
f3f096cf SD |
1211 | return; |
1212 | ||
70ed91c6 | 1213 | if (file) { |
60d53e2c | 1214 | if (trace_probe_remove_file(tp, file) < 0) |
70ed91c6 J |
1215 | return; |
1216 | ||
60d53e2c | 1217 | if (trace_probe_is_enabled(tp)) |
70ed91c6 | 1218 | return; |
b5f935ee | 1219 | } else |
60d53e2c | 1220 | trace_probe_clear_flag(tp, TP_FLAG_PROFILE); |
dcad1a20 | 1221 | |
60d53e2c | 1222 | __probe_event_disable(tp); |
dcad1a20 | 1223 | uprobe_buffer_disable(); |
f3f096cf SD |
1224 | } |
1225 | ||
2425bcb9 | 1226 | static int uprobe_event_define_fields(struct trace_event_call *event_call) |
f3f096cf | 1227 | { |
eeb07b06 | 1228 | int ret, size; |
f3f096cf | 1229 | struct uprobe_trace_entry_head field; |
60d53e2c MH |
1230 | struct trace_uprobe *tu; |
1231 | ||
1232 | tu = trace_uprobe_primary_from_call(event_call); | |
1233 | if (unlikely(!tu)) | |
1234 | return -ENODEV; | |
f3f096cf | 1235 | |
4d1298e2 ON |
1236 | if (is_ret_probe(tu)) { |
1237 | DEFINE_FIELD(unsigned long, vaddr[0], FIELD_STRING_FUNC, 0); | |
1238 | DEFINE_FIELD(unsigned long, vaddr[1], FIELD_STRING_RETIP, 0); | |
1239 | size = SIZEOF_TRACE_ENTRY(true); | |
1240 | } else { | |
1241 | DEFINE_FIELD(unsigned long, vaddr[0], FIELD_STRING_IP, 0); | |
1242 | size = SIZEOF_TRACE_ENTRY(false); | |
1243 | } | |
f3f096cf | 1244 | |
eeb07b06 | 1245 | return traceprobe_define_arg_fields(event_call, size, &tu->tp); |
f3f096cf SD |
1246 | } |
1247 | ||
f3f096cf | 1248 | #ifdef CONFIG_PERF_EVENTS |
31ba3348 ON |
1249 | static bool |
1250 | __uprobe_perf_filter(struct trace_uprobe_filter *filter, struct mm_struct *mm) | |
1251 | { | |
1252 | struct perf_event *event; | |
1253 | ||
31ba3348 | 1254 | list_for_each_entry(event, &filter->perf_events, hw.tp_list) { |
50f16a8b | 1255 | if (event->hw.target->mm == mm) |
31ba3348 ON |
1256 | return true; |
1257 | } | |
1258 | ||
1259 | return false; | |
1260 | } | |
1261 | ||
b2fe8ba6 | 1262 | static inline bool |
99c9a923 MH |
1263 | trace_uprobe_filter_event(struct trace_uprobe_filter *filter, |
1264 | struct perf_event *event) | |
b2fe8ba6 | 1265 | { |
99c9a923 | 1266 | return __uprobe_perf_filter(filter, event->hw.target->mm); |
b2fe8ba6 ON |
1267 | } |
1268 | ||
99c9a923 MH |
1269 | static bool trace_uprobe_filter_remove(struct trace_uprobe_filter *filter, |
1270 | struct perf_event *event) | |
736288ba | 1271 | { |
b2fe8ba6 ON |
1272 | bool done; |
1273 | ||
99c9a923 | 1274 | write_lock(&filter->rwlock); |
50f16a8b | 1275 | if (event->hw.target) { |
ce5f36a5 | 1276 | list_del(&event->hw.tp_list); |
99c9a923 | 1277 | done = filter->nr_systemwide || |
50f16a8b | 1278 | (event->hw.target->flags & PF_EXITING) || |
99c9a923 | 1279 | trace_uprobe_filter_event(filter, event); |
b2fe8ba6 | 1280 | } else { |
99c9a923 MH |
1281 | filter->nr_systemwide--; |
1282 | done = filter->nr_systemwide; | |
b2fe8ba6 | 1283 | } |
99c9a923 | 1284 | write_unlock(&filter->rwlock); |
31ba3348 | 1285 | |
99c9a923 | 1286 | return done; |
736288ba ON |
1287 | } |
1288 | ||
99c9a923 MH |
1289 | /* This returns true if the filter always covers target mm */ |
1290 | static bool trace_uprobe_filter_add(struct trace_uprobe_filter *filter, | |
1291 | struct perf_event *event) | |
736288ba | 1292 | { |
b2fe8ba6 ON |
1293 | bool done; |
1294 | ||
99c9a923 | 1295 | write_lock(&filter->rwlock); |
50f16a8b | 1296 | if (event->hw.target) { |
ce5f36a5 ON |
1297 | /* |
1298 | * event->parent != NULL means copy_process(), we can avoid | |
1299 | * uprobe_apply(). current->mm must be probed and we can rely | |
1300 | * on dup_mmap() which preserves the already installed bp's. | |
1301 | * | |
1302 | * attr.enable_on_exec means that exec/mmap will install the | |
1303 | * breakpoints we need. | |
1304 | */ | |
99c9a923 | 1305 | done = filter->nr_systemwide || |
ce5f36a5 | 1306 | event->parent || event->attr.enable_on_exec || |
99c9a923 MH |
1307 | trace_uprobe_filter_event(filter, event); |
1308 | list_add(&event->hw.tp_list, &filter->perf_events); | |
b2fe8ba6 | 1309 | } else { |
99c9a923 MH |
1310 | done = filter->nr_systemwide; |
1311 | filter->nr_systemwide++; | |
b2fe8ba6 | 1312 | } |
99c9a923 | 1313 | write_unlock(&filter->rwlock); |
736288ba | 1314 | |
99c9a923 | 1315 | return done; |
736288ba ON |
1316 | } |
1317 | ||
99c9a923 MH |
1318 | static int uprobe_perf_close(struct trace_event_call *call, |
1319 | struct perf_event *event) | |
60d53e2c | 1320 | { |
e161c6bf | 1321 | struct trace_probe *tp; |
60d53e2c MH |
1322 | struct trace_uprobe *tu; |
1323 | int ret = 0; | |
1324 | ||
1325 | tp = trace_probe_primary_from_call(call); | |
1326 | if (WARN_ON_ONCE(!tp)) | |
1327 | return -ENODEV; | |
1328 | ||
99c9a923 | 1329 | tu = container_of(tp, struct trace_uprobe, tp); |
b61387cb | 1330 | if (trace_uprobe_filter_remove(tu->tp.event->filter, event)) |
99c9a923 MH |
1331 | return 0; |
1332 | ||
e161c6bf | 1333 | list_for_each_entry(tu, trace_probe_probe_list(tp), tp.list) { |
3c83a9ad | 1334 | ret = uprobe_apply(tu->uprobe, &tu->consumer, false); |
60d53e2c MH |
1335 | if (ret) |
1336 | break; | |
1337 | } | |
1338 | ||
1339 | return ret; | |
1340 | } | |
99c9a923 MH |
1341 | |
1342 | static int uprobe_perf_open(struct trace_event_call *call, | |
1343 | struct perf_event *event) | |
1344 | { | |
e161c6bf | 1345 | struct trace_probe *tp; |
99c9a923 MH |
1346 | struct trace_uprobe *tu; |
1347 | int err = 0; | |
1348 | ||
1349 | tp = trace_probe_primary_from_call(call); | |
1350 | if (WARN_ON_ONCE(!tp)) | |
1351 | return -ENODEV; | |
1352 | ||
1353 | tu = container_of(tp, struct trace_uprobe, tp); | |
b61387cb | 1354 | if (trace_uprobe_filter_add(tu->tp.event->filter, event)) |
99c9a923 MH |
1355 | return 0; |
1356 | ||
e161c6bf | 1357 | list_for_each_entry(tu, trace_probe_probe_list(tp), tp.list) { |
3c83a9ad | 1358 | err = uprobe_apply(tu->uprobe, &tu->consumer, true); |
99c9a923 MH |
1359 | if (err) { |
1360 | uprobe_perf_close(call, event); | |
1361 | break; | |
1362 | } | |
1363 | } | |
1364 | ||
1365 | return err; | |
1366 | } | |
1367 | ||
59da880a | 1368 | static bool uprobe_perf_filter(struct uprobe_consumer *uc, struct mm_struct *mm) |
31ba3348 | 1369 | { |
99c9a923 | 1370 | struct trace_uprobe_filter *filter; |
31ba3348 ON |
1371 | struct trace_uprobe *tu; |
1372 | int ret; | |
1373 | ||
1374 | tu = container_of(uc, struct trace_uprobe, consumer); | |
b61387cb | 1375 | filter = tu->tp.event->filter; |
99c9a923 | 1376 | |
cdf355cc AN |
1377 | /* |
1378 | * speculative short-circuiting check to avoid unnecessarily taking | |
1379 | * filter->rwlock below, if the uprobe has system-wide consumer | |
1380 | */ | |
1381 | if (READ_ONCE(filter->nr_systemwide)) | |
1382 | return true; | |
1383 | ||
99c9a923 MH |
1384 | read_lock(&filter->rwlock); |
1385 | ret = __uprobe_perf_filter(filter, mm); | |
1386 | read_unlock(&filter->rwlock); | |
31ba3348 ON |
1387 | |
1388 | return ret; | |
1389 | } | |
1390 | ||
a43b9704 | 1391 | static void __uprobe_perf_func(struct trace_uprobe *tu, |
dd9fa555 | 1392 | unsigned long func, struct pt_regs *regs, |
1b8f85de | 1393 | struct uprobe_cpu_buffer **ucbp) |
f3f096cf | 1394 | { |
e3dc9f89 | 1395 | struct trace_event_call *call = trace_probe_event_call(&tu->tp); |
f3f096cf | 1396 | struct uprobe_trace_entry_head *entry; |
1b8f85de | 1397 | struct uprobe_cpu_buffer *ucb; |
f3f096cf | 1398 | struct hlist_head *head; |
457d1772 | 1399 | void *data; |
dd9fa555 | 1400 | int size, esize; |
dcad1a20 NK |
1401 | int rctx; |
1402 | ||
aca80dd9 | 1403 | #ifdef CONFIG_BPF_EVENTS |
70ed0706 | 1404 | if (bpf_prog_array_valid(call)) { |
7d0d6736 | 1405 | const struct bpf_prog_array *array; |
70ed0706 AS |
1406 | u32 ret; |
1407 | ||
7d0d6736 JH |
1408 | rcu_read_lock_trace(); |
1409 | array = rcu_dereference_check(call->prog_array, rcu_read_lock_trace_held()); | |
1410 | ret = bpf_prog_run_array_uprobe(array, regs, bpf_prog_run); | |
1411 | rcu_read_unlock_trace(); | |
70ed0706 AS |
1412 | if (!ret) |
1413 | return; | |
1414 | } | |
aca80dd9 | 1415 | #endif /* CONFIG_BPF_EVENTS */ |
04a22fae | 1416 | |
dcad1a20 | 1417 | esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu)); |
f3f096cf | 1418 | |
1b8f85de | 1419 | ucb = prepare_uprobe_buffer(tu, regs, ucbp); |
3eaea21b | 1420 | size = esize + ucb->dsize; |
dcad1a20 NK |
1421 | size = ALIGN(size + sizeof(u32), sizeof(u64)) - sizeof(u32); |
1422 | if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE, "profile buffer not large enough")) | |
1423 | return; | |
1424 | ||
f3f096cf | 1425 | preempt_disable(); |
515619f2 ON |
1426 | head = this_cpu_ptr(call->perf_events); |
1427 | if (hlist_empty(head)) | |
1428 | goto out; | |
1429 | ||
1e1dcd93 | 1430 | entry = perf_trace_buf_alloc(size, NULL, &rctx); |
f3f096cf SD |
1431 | if (!entry) |
1432 | goto out; | |
1433 | ||
393a736c ON |
1434 | if (is_ret_probe(tu)) { |
1435 | entry->vaddr[0] = func; | |
32520b2c | 1436 | entry->vaddr[1] = instruction_pointer(regs); |
393a736c ON |
1437 | data = DATAOF_TRACE_ENTRY(entry, true); |
1438 | } else { | |
32520b2c | 1439 | entry->vaddr[0] = instruction_pointer(regs); |
393a736c ON |
1440 | data = DATAOF_TRACE_ENTRY(entry, false); |
1441 | } | |
1442 | ||
3eaea21b | 1443 | memcpy(data, ucb->buf, ucb->dsize); |
dcad1a20 | 1444 | |
3eaea21b AN |
1445 | if (size - esize > ucb->dsize) |
1446 | memset(data + ucb->dsize, 0, size - esize - ucb->dsize); | |
f3f096cf | 1447 | |
1e1dcd93 | 1448 | perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs, |
8fd0fbbe | 1449 | head, NULL); |
f3f096cf SD |
1450 | out: |
1451 | preempt_enable(); | |
a51cc604 ON |
1452 | } |
1453 | ||
1454 | /* uprobe profile handler */ | |
dd9fa555 | 1455 | static int uprobe_perf_func(struct trace_uprobe *tu, struct pt_regs *regs, |
1b8f85de | 1456 | struct uprobe_cpu_buffer **ucbp) |
a51cc604 | 1457 | { |
59da880a | 1458 | if (!uprobe_perf_filter(&tu->consumer, current->mm)) |
a51cc604 ON |
1459 | return UPROBE_HANDLER_REMOVE; |
1460 | ||
393a736c | 1461 | if (!is_ret_probe(tu)) |
1b8f85de | 1462 | __uprobe_perf_func(tu, 0, regs, ucbp); |
f42d24a1 | 1463 | return 0; |
f3f096cf | 1464 | } |
c1ae5c75 ON |
1465 | |
1466 | static void uretprobe_perf_func(struct trace_uprobe *tu, unsigned long func, | |
dd9fa555 | 1467 | struct pt_regs *regs, |
1b8f85de | 1468 | struct uprobe_cpu_buffer **ucbp) |
c1ae5c75 | 1469 | { |
1b8f85de | 1470 | __uprobe_perf_func(tu, func, regs, ucbp); |
c1ae5c75 | 1471 | } |
41bdc4b4 YS |
1472 | |
1473 | int bpf_get_uprobe_info(const struct perf_event *event, u32 *fd_type, | |
1474 | const char **filename, u64 *probe_offset, | |
5125e757 | 1475 | u64 *probe_addr, bool perf_type_tracepoint) |
41bdc4b4 YS |
1476 | { |
1477 | const char *pevent = trace_event_name(event->tp_event); | |
1478 | const char *group = event->tp_event->class->system; | |
1479 | struct trace_uprobe *tu; | |
1480 | ||
1481 | if (perf_type_tracepoint) | |
1482 | tu = find_probe_event(pevent, group); | |
1483 | else | |
22d5bd68 | 1484 | tu = trace_uprobe_primary_from_call(event->tp_event); |
41bdc4b4 YS |
1485 | if (!tu) |
1486 | return -EINVAL; | |
1487 | ||
1488 | *fd_type = is_ret_probe(tu) ? BPF_FD_TYPE_URETPROBE | |
1489 | : BPF_FD_TYPE_UPROBE; | |
1490 | *filename = tu->filename; | |
1491 | *probe_offset = tu->offset; | |
82315333 | 1492 | *probe_addr = tu->ref_ctr_offset; |
41bdc4b4 YS |
1493 | return 0; |
1494 | } | |
f3f096cf SD |
1495 | #endif /* CONFIG_PERF_EVENTS */ |
1496 | ||
70ed91c6 | 1497 | static int |
2425bcb9 | 1498 | trace_uprobe_register(struct trace_event_call *event, enum trace_reg type, |
70ed91c6 | 1499 | void *data) |
f3f096cf | 1500 | { |
7f1d2f82 | 1501 | struct trace_event_file *file = data; |
f3f096cf SD |
1502 | |
1503 | switch (type) { | |
1504 | case TRACE_REG_REGISTER: | |
60d53e2c | 1505 | return probe_event_enable(event, file, NULL); |
f3f096cf SD |
1506 | |
1507 | case TRACE_REG_UNREGISTER: | |
60d53e2c | 1508 | probe_event_disable(event, file); |
f3f096cf SD |
1509 | return 0; |
1510 | ||
1511 | #ifdef CONFIG_PERF_EVENTS | |
1512 | case TRACE_REG_PERF_REGISTER: | |
60d53e2c | 1513 | return probe_event_enable(event, NULL, uprobe_perf_filter); |
f3f096cf SD |
1514 | |
1515 | case TRACE_REG_PERF_UNREGISTER: | |
60d53e2c | 1516 | probe_event_disable(event, NULL); |
f3f096cf | 1517 | return 0; |
736288ba ON |
1518 | |
1519 | case TRACE_REG_PERF_OPEN: | |
99c9a923 | 1520 | return uprobe_perf_open(event, data); |
736288ba ON |
1521 | |
1522 | case TRACE_REG_PERF_CLOSE: | |
99c9a923 | 1523 | return uprobe_perf_close(event, data); |
736288ba | 1524 | |
f3f096cf SD |
1525 | #endif |
1526 | default: | |
1527 | return 0; | |
1528 | } | |
f3f096cf SD |
1529 | } |
1530 | ||
da09a9e0 JO |
1531 | static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs, |
1532 | __u64 *data) | |
f3f096cf | 1533 | { |
f3f096cf | 1534 | struct trace_uprobe *tu; |
b7e0bf34 | 1535 | struct uprobe_dispatch_data udd; |
1b8f85de | 1536 | struct uprobe_cpu_buffer *ucb = NULL; |
f42d24a1 | 1537 | int ret = 0; |
f3f096cf | 1538 | |
a932b738 | 1539 | tu = container_of(con, struct trace_uprobe, consumer); |
10cdb82a AN |
1540 | |
1541 | this_cpu_inc(*tu->nhits); | |
f3f096cf | 1542 | |
b7e0bf34 NK |
1543 | udd.tu = tu; |
1544 | udd.bp_addr = instruction_pointer(regs); | |
1545 | ||
1546 | current->utask->vaddr = (unsigned long) &udd; | |
1547 | ||
dd9fa555 NK |
1548 | if (WARN_ON_ONCE(!uprobe_cpu_buffer)) |
1549 | return 0; | |
1550 | ||
747774d6 | 1551 | if (trace_probe_test_flag(&tu->tp, TP_FLAG_TRACE)) |
1b8f85de | 1552 | ret |= uprobe_trace_func(tu, regs, &ucb); |
f3f096cf SD |
1553 | |
1554 | #ifdef CONFIG_PERF_EVENTS | |
747774d6 | 1555 | if (trace_probe_test_flag(&tu->tp, TP_FLAG_PROFILE)) |
1b8f85de | 1556 | ret |= uprobe_perf_func(tu, regs, &ucb); |
f3f096cf | 1557 | #endif |
dd9fa555 | 1558 | uprobe_buffer_put(ucb); |
f42d24a1 | 1559 | return ret; |
f3f096cf SD |
1560 | } |
1561 | ||
c1ae5c75 | 1562 | static int uretprobe_dispatcher(struct uprobe_consumer *con, |
da09a9e0 JO |
1563 | unsigned long func, struct pt_regs *regs, |
1564 | __u64 *data) | |
c1ae5c75 ON |
1565 | { |
1566 | struct trace_uprobe *tu; | |
b7e0bf34 | 1567 | struct uprobe_dispatch_data udd; |
1b8f85de | 1568 | struct uprobe_cpu_buffer *ucb = NULL; |
c1ae5c75 ON |
1569 | |
1570 | tu = container_of(con, struct trace_uprobe, consumer); | |
1571 | ||
b7e0bf34 NK |
1572 | udd.tu = tu; |
1573 | udd.bp_addr = func; | |
1574 | ||
1575 | current->utask->vaddr = (unsigned long) &udd; | |
1576 | ||
dd9fa555 NK |
1577 | if (WARN_ON_ONCE(!uprobe_cpu_buffer)) |
1578 | return 0; | |
1579 | ||
747774d6 | 1580 | if (trace_probe_test_flag(&tu->tp, TP_FLAG_TRACE)) |
1b8f85de | 1581 | uretprobe_trace_func(tu, func, regs, &ucb); |
c1ae5c75 ON |
1582 | |
1583 | #ifdef CONFIG_PERF_EVENTS | |
747774d6 | 1584 | if (trace_probe_test_flag(&tu->tp, TP_FLAG_PROFILE)) |
1b8f85de | 1585 | uretprobe_perf_func(tu, func, regs, &ucb); |
c1ae5c75 | 1586 | #endif |
dd9fa555 | 1587 | uprobe_buffer_put(ucb); |
c1ae5c75 ON |
1588 | return 0; |
1589 | } | |
1590 | ||
f3f096cf SD |
1591 | static struct trace_event_functions uprobe_funcs = { |
1592 | .trace = print_uprobe_event | |
1593 | }; | |
1594 | ||
04ae87a5 PZ |
1595 | static struct trace_event_fields uprobe_fields_array[] = { |
1596 | { .type = TRACE_FUNCTION_TYPE, | |
1597 | .define_fields = uprobe_event_define_fields }, | |
1598 | {} | |
1599 | }; | |
1600 | ||
e3dc9f89 | 1601 | static inline void init_trace_event_call(struct trace_uprobe *tu) |
f3f096cf | 1602 | { |
e3dc9f89 | 1603 | struct trace_event_call *call = trace_probe_event_call(&tu->tp); |
f3f096cf | 1604 | call->event.funcs = &uprobe_funcs; |
04ae87a5 | 1605 | call->class->fields_array = uprobe_fields_array; |
f3f096cf | 1606 | |
9fd2e48b | 1607 | call->flags = TRACE_EVENT_FL_UPROBE | TRACE_EVENT_FL_CAP_ANY; |
33ea4b24 | 1608 | call->class->reg = trace_uprobe_register; |
33ea4b24 SL |
1609 | } |
1610 | ||
1611 | static int register_uprobe_event(struct trace_uprobe *tu) | |
1612 | { | |
e3dc9f89 | 1613 | init_trace_event_call(tu); |
f3f096cf | 1614 | |
46e5376d | 1615 | return trace_probe_register_event_call(&tu->tp); |
f3f096cf SD |
1616 | } |
1617 | ||
c6c2401d | 1618 | static int unregister_uprobe_event(struct trace_uprobe *tu) |
f3f096cf | 1619 | { |
46e5376d | 1620 | return trace_probe_unregister_event_call(&tu->tp); |
f3f096cf SD |
1621 | } |
1622 | ||
33ea4b24 SL |
1623 | #ifdef CONFIG_PERF_EVENTS |
1624 | struct trace_event_call * | |
a6ca88b2 SL |
1625 | create_local_trace_uprobe(char *name, unsigned long offs, |
1626 | unsigned long ref_ctr_offset, bool is_return) | |
33ea4b24 | 1627 | { |
007517a0 | 1628 | enum probe_print_type ptype; |
33ea4b24 | 1629 | struct trace_uprobe *tu; |
33ea4b24 SL |
1630 | struct path path; |
1631 | int ret; | |
1632 | ||
1633 | ret = kern_path(name, LOOKUP_FOLLOW, &path); | |
1634 | if (ret) | |
1635 | return ERR_PTR(ret); | |
1636 | ||
0c92c7a3 SL |
1637 | if (!d_is_reg(path.dentry)) { |
1638 | path_put(&path); | |
33ea4b24 SL |
1639 | return ERR_PTR(-EINVAL); |
1640 | } | |
1641 | ||
1642 | /* | |
0597c49c | 1643 | * local trace_kprobes are not added to dyn_event, so they are never |
33ea4b24 SL |
1644 | * searched in find_trace_kprobe(). Therefore, there is no concern of |
1645 | * duplicated name "DUMMY_EVENT" here. | |
1646 | */ | |
1647 | tu = alloc_trace_uprobe(UPROBE_EVENT_SYSTEM, "DUMMY_EVENT", 0, | |
1648 | is_return); | |
1649 | ||
1650 | if (IS_ERR(tu)) { | |
1651 | pr_info("Failed to allocate trace_uprobe.(%d)\n", | |
1652 | (int)PTR_ERR(tu)); | |
0c92c7a3 | 1653 | path_put(&path); |
33ea4b24 SL |
1654 | return ERR_CAST(tu); |
1655 | } | |
1656 | ||
1657 | tu->offset = offs; | |
0c92c7a3 | 1658 | tu->path = path; |
a6ca88b2 | 1659 | tu->ref_ctr_offset = ref_ctr_offset; |
33ea4b24 | 1660 | tu->filename = kstrdup(name, GFP_KERNEL); |
8c722424 XW |
1661 | if (!tu->filename) { |
1662 | ret = -ENOMEM; | |
1663 | goto error; | |
1664 | } | |
1665 | ||
e3dc9f89 | 1666 | init_trace_event_call(tu); |
33ea4b24 | 1667 | |
007517a0 SRV |
1668 | ptype = is_ret_probe(tu) ? PROBE_PRINT_RETURN : PROBE_PRINT_NORMAL; |
1669 | if (traceprobe_set_print_fmt(&tu->tp, ptype) < 0) { | |
33ea4b24 SL |
1670 | ret = -ENOMEM; |
1671 | goto error; | |
1672 | } | |
1673 | ||
e3dc9f89 | 1674 | return trace_probe_event_call(&tu->tp); |
33ea4b24 SL |
1675 | error: |
1676 | free_trace_uprobe(tu); | |
1677 | return ERR_PTR(ret); | |
1678 | } | |
1679 | ||
1680 | void destroy_local_trace_uprobe(struct trace_event_call *event_call) | |
1681 | { | |
1682 | struct trace_uprobe *tu; | |
1683 | ||
60d53e2c | 1684 | tu = trace_uprobe_primary_from_call(event_call); |
33ea4b24 | 1685 | |
33ea4b24 SL |
1686 | free_trace_uprobe(tu); |
1687 | } | |
1688 | #endif /* CONFIG_PERF_EVENTS */ | |
1689 | ||
39bcdd6a | 1690 | /* Make a trace interface for controlling probe points */ |
f3f096cf SD |
1691 | static __init int init_uprobe_trace(void) |
1692 | { | |
0597c49c MH |
1693 | int ret; |
1694 | ||
1695 | ret = dyn_event_register(&trace_uprobe_ops); | |
1696 | if (ret) | |
1697 | return ret; | |
f3f096cf | 1698 | |
22c36b18 WY |
1699 | ret = tracing_init_dentry(); |
1700 | if (ret) | |
f3f096cf SD |
1701 | return 0; |
1702 | ||
21ccc9cd | 1703 | trace_create_file("uprobe_events", TRACE_MODE_WRITE, NULL, |
f3f096cf SD |
1704 | NULL, &uprobe_events_ops); |
1705 | /* Profile interface */ | |
21ccc9cd | 1706 | trace_create_file("uprobe_profile", TRACE_MODE_READ, NULL, |
f3f096cf SD |
1707 | NULL, &uprobe_profile_ops); |
1708 | return 0; | |
1709 | } | |
1710 | ||
1711 | fs_initcall(init_uprobe_trace); |