Commit | Line | Data |
---|---|---|
bcea3f96 | 1 | // SPDX-License-Identifier: GPL-2.0 |
f3f096cf SD |
2 | /* |
3 | * uprobes-based tracing events | |
4 | * | |
f3f096cf SD |
5 | * Copyright (C) IBM Corporation, 2010-2012 |
6 | * Author: Srikar Dronamraju <srikar@linux.vnet.ibm.com> | |
7 | */ | |
ea6eb5e7 | 8 | #define pr_fmt(fmt) "trace_uprobe: " fmt |
f3f096cf | 9 | |
aef2feda | 10 | #include <linux/bpf-cgroup.h> |
17911ff3 | 11 | #include <linux/security.h> |
0597c49c | 12 | #include <linux/ctype.h> |
f3f096cf SD |
13 | #include <linux/module.h> |
14 | #include <linux/uaccess.h> | |
15 | #include <linux/uprobes.h> | |
16 | #include <linux/namei.h> | |
b2e902f0 | 17 | #include <linux/string.h> |
b2d09103 | 18 | #include <linux/rculist.h> |
8c7dcb84 | 19 | #include <linux/filter.h> |
10cdb82a | 20 | #include <linux/percpu.h> |
f3f096cf | 21 | |
0597c49c | 22 | #include "trace_dynevent.h" |
f3f096cf | 23 | #include "trace_probe.h" |
53305928 | 24 | #include "trace_probe_tmpl.h" |
f3f096cf SD |
25 | |
26 | #define UPROBE_EVENT_SYSTEM "uprobes" | |
27 | ||
457d1772 ON |
28 | struct uprobe_trace_entry_head { |
29 | struct trace_entry ent; | |
30 | unsigned long vaddr[]; | |
31 | }; | |
32 | ||
33 | #define SIZEOF_TRACE_ENTRY(is_return) \ | |
34 | (sizeof(struct uprobe_trace_entry_head) + \ | |
35 | sizeof(unsigned long) * (is_return ? 2 : 1)) | |
36 | ||
37 | #define DATAOF_TRACE_ENTRY(entry, is_return) \ | |
38 | ((void*)(entry) + SIZEOF_TRACE_ENTRY(is_return)) | |
39 | ||
d262271d | 40 | static int trace_uprobe_create(const char *raw_command); |
0597c49c MH |
41 | static int trace_uprobe_show(struct seq_file *m, struct dyn_event *ev); |
42 | static int trace_uprobe_release(struct dyn_event *ev); | |
43 | static bool trace_uprobe_is_busy(struct dyn_event *ev); | |
44 | static bool trace_uprobe_match(const char *system, const char *event, | |
30199137 | 45 | int argc, const char **argv, struct dyn_event *ev); |
0597c49c MH |
46 | |
47 | static struct dyn_event_operations trace_uprobe_ops = { | |
48 | .create = trace_uprobe_create, | |
49 | .show = trace_uprobe_show, | |
50 | .is_busy = trace_uprobe_is_busy, | |
51 | .free = trace_uprobe_release, | |
52 | .match = trace_uprobe_match, | |
53 | }; | |
54 | ||
f3f096cf SD |
55 | /* |
56 | * uprobe event core functions | |
57 | */ | |
f3f096cf | 58 | struct trace_uprobe { |
0597c49c | 59 | struct dyn_event devent; |
a932b738 | 60 | struct uprobe_consumer consumer; |
0c92c7a3 | 61 | struct path path; |
f3f096cf | 62 | char *filename; |
3c83a9ad | 63 | struct uprobe *uprobe; |
f3f096cf | 64 | unsigned long offset; |
1cc33161 | 65 | unsigned long ref_ctr_offset; |
10cdb82a | 66 | unsigned long __percpu *nhits; |
14577c39 | 67 | struct trace_probe tp; |
f3f096cf SD |
68 | }; |
69 | ||
0597c49c MH |
70 | static bool is_trace_uprobe(struct dyn_event *ev) |
71 | { | |
72 | return ev->ops == &trace_uprobe_ops; | |
73 | } | |
74 | ||
75 | static struct trace_uprobe *to_trace_uprobe(struct dyn_event *ev) | |
76 | { | |
77 | return container_of(ev, struct trace_uprobe, devent); | |
78 | } | |
79 | ||
80 | /** | |
81 | * for_each_trace_uprobe - iterate over the trace_uprobe list | |
82 | * @pos: the struct trace_uprobe * for each entry | |
83 | * @dpos: the struct dyn_event * to use as a loop cursor | |
84 | */ | |
85 | #define for_each_trace_uprobe(pos, dpos) \ | |
86 | for_each_dyn_event(dpos) \ | |
87 | if (is_trace_uprobe(dpos) && (pos = to_trace_uprobe(dpos))) | |
88 | ||
f3f096cf | 89 | static int register_uprobe_event(struct trace_uprobe *tu); |
c6c2401d | 90 | static int unregister_uprobe_event(struct trace_uprobe *tu); |
f3f096cf | 91 | |
f3f096cf | 92 | static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs); |
c1ae5c75 ON |
93 | static int uretprobe_dispatcher(struct uprobe_consumer *con, |
94 | unsigned long func, struct pt_regs *regs); | |
f3f096cf | 95 | |
3fd996a2 NK |
96 | #ifdef CONFIG_STACK_GROWSUP |
97 | static unsigned long adjust_stack_addr(unsigned long addr, unsigned int n) | |
98 | { | |
99 | return addr - (n * sizeof(long)); | |
100 | } | |
101 | #else | |
102 | static unsigned long adjust_stack_addr(unsigned long addr, unsigned int n) | |
103 | { | |
104 | return addr + (n * sizeof(long)); | |
105 | } | |
106 | #endif | |
107 | ||
108 | static unsigned long get_user_stack_nth(struct pt_regs *regs, unsigned int n) | |
109 | { | |
110 | unsigned long ret; | |
111 | unsigned long addr = user_stack_pointer(regs); | |
112 | ||
113 | addr = adjust_stack_addr(addr, n); | |
114 | ||
115 | if (copy_from_user(&ret, (void __force __user *) addr, sizeof(ret))) | |
116 | return 0; | |
117 | ||
118 | return ret; | |
119 | } | |
120 | ||
121 | /* | |
122 | * Uprobes-specific fetch functions | |
123 | */ | |
53305928 | 124 | static nokprobe_inline int |
9b960a38 | 125 | probe_mem_read(void *dest, void *src, size_t size) |
53305928 MH |
126 | { |
127 | void __user *vaddr = (void __force __user *)src; | |
128 | ||
f3f58935 | 129 | return copy_from_user(dest, vaddr, size) ? -EFAULT : 0; |
5baaa59e | 130 | } |
e65f7ae7 MH |
131 | |
132 | static nokprobe_inline int | |
133 | probe_mem_read_user(void *dest, void *src, size_t size) | |
134 | { | |
135 | return probe_mem_read(dest, src, size); | |
136 | } | |
137 | ||
5baaa59e NK |
138 | /* |
139 | * Fetch a null-terminated string. Caller MUST set *(u32 *)dest with max | |
140 | * length and relative data location. | |
141 | */ | |
9178412d MH |
142 | static nokprobe_inline int |
143 | fetch_store_string(unsigned long addr, void *dest, void *base) | |
5baaa59e NK |
144 | { |
145 | long ret; | |
9178412d MH |
146 | u32 loc = *(u32 *)dest; |
147 | int maxlen = get_loc_len(loc); | |
148 | u8 *dst = get_loc_data(dest, base); | |
5baaa59e NK |
149 | void __user *src = (void __force __user *) addr; |
150 | ||
9178412d MH |
151 | if (unlikely(!maxlen)) |
152 | return -ENOMEM; | |
5baaa59e | 153 | |
4dd537ac | 154 | if (addr == FETCH_TOKEN_COMM) |
8a3750ec | 155 | ret = strscpy(dst, current->comm, maxlen); |
4dd537ac MH |
156 | else |
157 | ret = strncpy_from_user(dst, src, maxlen); | |
9178412d MH |
158 | if (ret >= 0) { |
159 | if (ret == maxlen) | |
160 | dst[ret - 1] = '\0'; | |
0722069a AZ |
161 | else |
162 | /* | |
163 | * Include the terminating null byte. In this case it | |
164 | * was copied by strncpy_from_user but not accounted | |
165 | * for in ret. | |
166 | */ | |
167 | ret++; | |
9178412d | 168 | *(u32 *)dest = make_data_loc(ret, (void *)dst - base); |
797311bc MHG |
169 | } else |
170 | *(u32 *)dest = make_data_loc(0, (void *)dst - base); | |
9178412d MH |
171 | |
172 | return ret; | |
5baaa59e NK |
173 | } |
174 | ||
88903c46 MH |
175 | static nokprobe_inline int |
176 | fetch_store_string_user(unsigned long addr, void *dest, void *base) | |
177 | { | |
178 | return fetch_store_string(addr, dest, base); | |
179 | } | |
180 | ||
53305928 | 181 | /* Return the length of string -- including null terminal byte */ |
9178412d MH |
182 | static nokprobe_inline int |
183 | fetch_store_strlen(unsigned long addr) | |
5baaa59e NK |
184 | { |
185 | int len; | |
186 | void __user *vaddr = (void __force __user *) addr; | |
187 | ||
4dd537ac MH |
188 | if (addr == FETCH_TOKEN_COMM) |
189 | len = strlen(current->comm) + 1; | |
190 | else | |
191 | len = strnlen_user(vaddr, MAX_STRING_SIZE); | |
5baaa59e | 192 | |
9178412d | 193 | return (len > MAX_STRING_SIZE) ? 0 : len; |
5baaa59e | 194 | } |
3fd996a2 | 195 | |
88903c46 MH |
196 | static nokprobe_inline int |
197 | fetch_store_strlen_user(unsigned long addr) | |
198 | { | |
199 | return fetch_store_strlen(addr); | |
200 | } | |
201 | ||
53305928 | 202 | static unsigned long translate_user_vaddr(unsigned long file_offset) |
b7e0bf34 NK |
203 | { |
204 | unsigned long base_addr; | |
205 | struct uprobe_dispatch_data *udd; | |
206 | ||
207 | udd = (void *) current->utask->vaddr; | |
208 | ||
209 | base_addr = udd->bp_addr - udd->tu->offset; | |
53305928 | 210 | return base_addr + file_offset; |
b7e0bf34 | 211 | } |
b7e0bf34 | 212 | |
53305928 MH |
213 | /* Note that we don't verify it, since the code does not come from user space */ |
214 | static int | |
25f00e40 MHG |
215 | process_fetch_insn(struct fetch_insn *code, void *rec, void *edata, |
216 | void *dest, void *base) | |
53305928 | 217 | { |
8565a45d | 218 | struct pt_regs *regs = rec; |
53305928 | 219 | unsigned long val; |
bd78acc8 | 220 | int ret; |
53305928 MH |
221 | |
222 | /* 1st stage: get value from context */ | |
223 | switch (code->op) { | |
224 | case FETCH_OP_REG: | |
225 | val = regs_get_register(regs, code->param); | |
226 | break; | |
227 | case FETCH_OP_STACK: | |
228 | val = get_user_stack_nth(regs, code->param); | |
229 | break; | |
230 | case FETCH_OP_STACKP: | |
231 | val = user_stack_pointer(regs); | |
232 | break; | |
233 | case FETCH_OP_RETVAL: | |
234 | val = regs_return_value(regs); | |
235 | break; | |
4dd537ac MH |
236 | case FETCH_OP_COMM: |
237 | val = FETCH_TOKEN_COMM; | |
238 | break; | |
53305928 MH |
239 | case FETCH_OP_FOFFS: |
240 | val = translate_user_vaddr(code->immediate); | |
241 | break; | |
242 | default: | |
bd78acc8 SC |
243 | ret = process_common_fetch_insn(code, &val); |
244 | if (ret < 0) | |
245 | return ret; | |
53305928 MH |
246 | } |
247 | code++; | |
248 | ||
9b960a38 | 249 | return process_fetch_insn_bottom(code, val, dest, base); |
53305928 MH |
250 | } |
251 | NOKPROBE_SYMBOL(process_fetch_insn) | |
252 | ||
736288ba ON |
253 | static inline void init_trace_uprobe_filter(struct trace_uprobe_filter *filter) |
254 | { | |
255 | rwlock_init(&filter->rwlock); | |
256 | filter->nr_systemwide = 0; | |
257 | INIT_LIST_HEAD(&filter->perf_events); | |
258 | } | |
259 | ||
260 | static inline bool uprobe_filter_is_empty(struct trace_uprobe_filter *filter) | |
261 | { | |
262 | return !filter->nr_systemwide && list_empty(&filter->perf_events); | |
263 | } | |
264 | ||
c1ae5c75 ON |
265 | static inline bool is_ret_probe(struct trace_uprobe *tu) |
266 | { | |
267 | return tu->consumer.ret_handler != NULL; | |
268 | } | |
269 | ||
0597c49c MH |
270 | static bool trace_uprobe_is_busy(struct dyn_event *ev) |
271 | { | |
272 | struct trace_uprobe *tu = to_trace_uprobe(ev); | |
273 | ||
274 | return trace_probe_is_enabled(&tu->tp); | |
275 | } | |
276 | ||
ab10d69e MH |
277 | static bool trace_uprobe_match_command_head(struct trace_uprobe *tu, |
278 | int argc, const char **argv) | |
279 | { | |
280 | char buf[MAX_ARGSTR_LEN + 1]; | |
281 | int len; | |
282 | ||
283 | if (!argc) | |
284 | return true; | |
285 | ||
286 | len = strlen(tu->filename); | |
287 | if (strncmp(tu->filename, argv[0], len) || argv[0][len] != ':') | |
288 | return false; | |
289 | ||
290 | if (tu->ref_ctr_offset == 0) | |
291 | snprintf(buf, sizeof(buf), "0x%0*lx", | |
292 | (int)(sizeof(void *) * 2), tu->offset); | |
293 | else | |
294 | snprintf(buf, sizeof(buf), "0x%0*lx(0x%lx)", | |
295 | (int)(sizeof(void *) * 2), tu->offset, | |
296 | tu->ref_ctr_offset); | |
297 | if (strcmp(buf, &argv[0][len + 1])) | |
298 | return false; | |
299 | ||
300 | argc--; argv++; | |
301 | ||
302 | return trace_probe_match_command_args(&tu->tp, argc, argv); | |
303 | } | |
304 | ||
0597c49c | 305 | static bool trace_uprobe_match(const char *system, const char *event, |
30199137 | 306 | int argc, const char **argv, struct dyn_event *ev) |
0597c49c MH |
307 | { |
308 | struct trace_uprobe *tu = to_trace_uprobe(ev); | |
309 | ||
95c104c3 LY |
310 | return (event[0] == '\0' || |
311 | strcmp(trace_probe_name(&tu->tp), event) == 0) && | |
ab10d69e MH |
312 | (!system || strcmp(trace_probe_group_name(&tu->tp), system) == 0) && |
313 | trace_uprobe_match_command_head(tu, argc, argv); | |
0597c49c MH |
314 | } |
315 | ||
60d53e2c MH |
316 | static nokprobe_inline struct trace_uprobe * |
317 | trace_uprobe_primary_from_call(struct trace_event_call *call) | |
318 | { | |
319 | struct trace_probe *tp; | |
320 | ||
321 | tp = trace_probe_primary_from_call(call); | |
322 | if (WARN_ON_ONCE(!tp)) | |
323 | return NULL; | |
324 | ||
325 | return container_of(tp, struct trace_uprobe, tp); | |
326 | } | |
327 | ||
f3f096cf SD |
328 | /* |
329 | * Allocate new trace_uprobe and initialize it (including uprobes). | |
330 | */ | |
331 | static struct trace_uprobe * | |
c1ae5c75 | 332 | alloc_trace_uprobe(const char *group, const char *event, int nargs, bool is_ret) |
f3f096cf SD |
333 | { |
334 | struct trace_uprobe *tu; | |
455b2899 | 335 | int ret; |
f3f096cf | 336 | |
845cbf3e | 337 | tu = kzalloc(struct_size(tu, tp.args, nargs), GFP_KERNEL); |
f3f096cf SD |
338 | if (!tu) |
339 | return ERR_PTR(-ENOMEM); | |
340 | ||
10cdb82a AN |
341 | tu->nhits = alloc_percpu(unsigned long); |
342 | if (!tu->nhits) { | |
343 | ret = -ENOMEM; | |
344 | goto error; | |
345 | } | |
346 | ||
035ba760 | 347 | ret = trace_probe_init(&tu->tp, event, group, true, nargs); |
455b2899 | 348 | if (ret < 0) |
f3f096cf SD |
349 | goto error; |
350 | ||
0597c49c | 351 | dyn_event_init(&tu->devent, &trace_uprobe_ops); |
a932b738 | 352 | tu->consumer.handler = uprobe_dispatcher; |
c1ae5c75 ON |
353 | if (is_ret) |
354 | tu->consumer.ret_handler = uretprobe_dispatcher; | |
b61387cb | 355 | init_trace_uprobe_filter(tu->tp.event->filter); |
f3f096cf SD |
356 | return tu; |
357 | ||
358 | error: | |
10cdb82a | 359 | free_percpu(tu->nhits); |
f3f096cf SD |
360 | kfree(tu); |
361 | ||
455b2899 | 362 | return ERR_PTR(ret); |
f3f096cf SD |
363 | } |
364 | ||
365 | static void free_trace_uprobe(struct trace_uprobe *tu) | |
366 | { | |
0597c49c MH |
367 | if (!tu) |
368 | return; | |
369 | ||
0c92c7a3 | 370 | path_put(&tu->path); |
455b2899 | 371 | trace_probe_cleanup(&tu->tp); |
f3f096cf | 372 | kfree(tu->filename); |
10cdb82a | 373 | free_percpu(tu->nhits); |
f3f096cf SD |
374 | kfree(tu); |
375 | } | |
376 | ||
377 | static struct trace_uprobe *find_probe_event(const char *event, const char *group) | |
378 | { | |
0597c49c | 379 | struct dyn_event *pos; |
f3f096cf SD |
380 | struct trace_uprobe *tu; |
381 | ||
0597c49c | 382 | for_each_trace_uprobe(tu, pos) |
b55ce203 MH |
383 | if (strcmp(trace_probe_name(&tu->tp), event) == 0 && |
384 | strcmp(trace_probe_group_name(&tu->tp), group) == 0) | |
f3f096cf SD |
385 | return tu; |
386 | ||
387 | return NULL; | |
388 | } | |
389 | ||
0597c49c | 390 | /* Unregister a trace_uprobe and probe_event */ |
c6c2401d | 391 | static int unregister_trace_uprobe(struct trace_uprobe *tu) |
f3f096cf | 392 | { |
c6c2401d SRRH |
393 | int ret; |
394 | ||
41af3cf5 MH |
395 | if (trace_probe_has_sibling(&tu->tp)) |
396 | goto unreg; | |
397 | ||
1d18538e SRV |
398 | /* If there's a reference to the dynamic event */ |
399 | if (trace_event_dyn_busy(trace_probe_event_call(&tu->tp))) | |
400 | return -EBUSY; | |
401 | ||
c6c2401d SRRH |
402 | ret = unregister_uprobe_event(tu); |
403 | if (ret) | |
404 | return ret; | |
405 | ||
41af3cf5 | 406 | unreg: |
0597c49c | 407 | dyn_event_remove(&tu->devent); |
41af3cf5 | 408 | trace_probe_unlink(&tu->tp); |
f3f096cf | 409 | free_trace_uprobe(tu); |
c6c2401d | 410 | return 0; |
f3f096cf SD |
411 | } |
412 | ||
fe60b0ce MH |
413 | static bool trace_uprobe_has_same_uprobe(struct trace_uprobe *orig, |
414 | struct trace_uprobe *comp) | |
415 | { | |
416 | struct trace_probe_event *tpe = orig->tp.event; | |
fe60b0ce MH |
417 | struct inode *comp_inode = d_real_inode(comp->path.dentry); |
418 | int i; | |
419 | ||
e161c6bf | 420 | list_for_each_entry(orig, &tpe->probes, tp.list) { |
fe60b0ce MH |
421 | if (comp_inode != d_real_inode(orig->path.dentry) || |
422 | comp->offset != orig->offset) | |
423 | continue; | |
424 | ||
425 | /* | |
426 | * trace_probe_compare_arg_type() ensured that nr_args and | |
427 | * each argument name and type are same. Let's compare comm. | |
428 | */ | |
429 | for (i = 0; i < orig->tp.nr_args; i++) { | |
430 | if (strcmp(orig->tp.args[i].comm, | |
431 | comp->tp.args[i].comm)) | |
f8d7ab2b | 432 | break; |
fe60b0ce MH |
433 | } |
434 | ||
f8d7ab2b SD |
435 | if (i == orig->tp.nr_args) |
436 | return true; | |
fe60b0ce MH |
437 | } |
438 | ||
439 | return false; | |
440 | } | |
441 | ||
41af3cf5 MH |
442 | static int append_trace_uprobe(struct trace_uprobe *tu, struct trace_uprobe *to) |
443 | { | |
444 | int ret; | |
445 | ||
fe60b0ce MH |
446 | ret = trace_probe_compare_arg_type(&tu->tp, &to->tp); |
447 | if (ret) { | |
448 | /* Note that argument starts index = 2 */ | |
449 | trace_probe_log_set_index(ret + 1); | |
450 | trace_probe_log_err(0, DIFF_ARG_TYPE); | |
451 | return -EEXIST; | |
452 | } | |
453 | if (trace_uprobe_has_same_uprobe(to, tu)) { | |
454 | trace_probe_log_set_index(0); | |
455 | trace_probe_log_err(0, SAME_PROBE); | |
456 | return -EEXIST; | |
457 | } | |
458 | ||
41af3cf5 MH |
459 | /* Append to existing event */ |
460 | ret = trace_probe_append(&tu->tp, &to->tp); | |
461 | if (!ret) | |
8b0e6c74 | 462 | dyn_event_add(&tu->devent, trace_probe_event_call(&tu->tp)); |
41af3cf5 MH |
463 | |
464 | return ret; | |
465 | } | |
466 | ||
ccea8727 RB |
467 | /* |
468 | * Uprobe with multiple reference counter is not allowed. i.e. | |
469 | * If inode and offset matches, reference counter offset *must* | |
470 | * match as well. Though, there is one exception: If user is | |
471 | * replacing old trace_uprobe with new one(same group/event), | |
472 | * then we allow same uprobe with new reference counter as far | |
473 | * as the new one does not conflict with any other existing | |
474 | * ones. | |
475 | */ | |
41af3cf5 | 476 | static int validate_ref_ctr_offset(struct trace_uprobe *new) |
ccea8727 | 477 | { |
0597c49c | 478 | struct dyn_event *pos; |
41af3cf5 | 479 | struct trace_uprobe *tmp; |
ccea8727 RB |
480 | struct inode *new_inode = d_real_inode(new->path.dentry); |
481 | ||
0597c49c | 482 | for_each_trace_uprobe(tmp, pos) { |
41af3cf5 | 483 | if (new_inode == d_real_inode(tmp->path.dentry) && |
ccea8727 RB |
484 | new->offset == tmp->offset && |
485 | new->ref_ctr_offset != tmp->ref_ctr_offset) { | |
486 | pr_warn("Reference counter offset mismatch."); | |
41af3cf5 | 487 | return -EINVAL; |
ccea8727 RB |
488 | } |
489 | } | |
41af3cf5 | 490 | return 0; |
ccea8727 RB |
491 | } |
492 | ||
f3f096cf SD |
493 | /* Register a trace_uprobe and probe_event */ |
494 | static int register_trace_uprobe(struct trace_uprobe *tu) | |
495 | { | |
14577c39 | 496 | struct trace_uprobe *old_tu; |
f3f096cf SD |
497 | int ret; |
498 | ||
0597c49c | 499 | mutex_lock(&event_mutex); |
f3f096cf | 500 | |
41af3cf5 MH |
501 | ret = validate_ref_ctr_offset(tu); |
502 | if (ret) | |
ccea8727 | 503 | goto end; |
ccea8727 | 504 | |
41af3cf5 MH |
505 | /* register as an event */ |
506 | old_tu = find_probe_event(trace_probe_name(&tu->tp), | |
507 | trace_probe_group_name(&tu->tp)); | |
14577c39 | 508 | if (old_tu) { |
41af3cf5 MH |
509 | if (is_ret_probe(tu) != is_ret_probe(old_tu)) { |
510 | trace_probe_log_set_index(0); | |
511 | trace_probe_log_err(0, DIFF_PROBE_TYPE); | |
512 | ret = -EEXIST; | |
513 | } else { | |
fe60b0ce | 514 | ret = append_trace_uprobe(tu, old_tu); |
41af3cf5 MH |
515 | } |
516 | goto end; | |
c6c2401d | 517 | } |
f3f096cf SD |
518 | |
519 | ret = register_uprobe_event(tu); | |
520 | if (ret) { | |
8e242060 MH |
521 | if (ret == -EEXIST) { |
522 | trace_probe_log_set_index(0); | |
523 | trace_probe_log_err(0, EVENT_EXIST); | |
524 | } else | |
525 | pr_warn("Failed to register probe event(%d)\n", ret); | |
f3f096cf SD |
526 | goto end; |
527 | } | |
528 | ||
8b0e6c74 | 529 | dyn_event_add(&tu->devent, trace_probe_event_call(&tu->tp)); |
f3f096cf SD |
530 | |
531 | end: | |
0597c49c | 532 | mutex_unlock(&event_mutex); |
f3f096cf SD |
533 | |
534 | return ret; | |
535 | } | |
536 | ||
537 | /* | |
538 | * Argument syntax: | |
95c104c3 | 539 | * - Add uprobe: p|r[:[GRP/][EVENT]] PATH:OFFSET[%return][(REF)] [FETCHARGS] |
f3f096cf | 540 | */ |
d262271d | 541 | static int __trace_uprobe_create(int argc, const char **argv) |
f3f096cf SD |
542 | { |
543 | struct trace_uprobe *tu; | |
0597c49c MH |
544 | const char *event = NULL, *group = UPROBE_EVENT_SYSTEM; |
545 | char *arg, *filename, *rctr, *rctr_end, *tmp; | |
f3f096cf | 546 | char buf[MAX_EVENT_NAME_LEN]; |
95c104c3 | 547 | char gbuf[MAX_EVENT_NAME_LEN]; |
007517a0 | 548 | enum probe_print_type ptype; |
f3f096cf | 549 | struct path path; |
1cc33161 | 550 | unsigned long offset, ref_ctr_offset; |
0597c49c | 551 | bool is_return = false; |
f3f096cf SD |
552 | int i, ret; |
553 | ||
1cc33161 | 554 | ref_ctr_offset = 0; |
f3f096cf | 555 | |
f01098c7 ET |
556 | switch (argv[0][0]) { |
557 | case 'r': | |
4ee5a52e | 558 | is_return = true; |
f01098c7 ET |
559 | break; |
560 | case 'p': | |
561 | break; | |
562 | default: | |
563 | return -ECANCELED; | |
564 | } | |
565 | ||
566 | if (argc < 2) | |
0597c49c | 567 | return -ECANCELED; |
f3f096cf | 568 | |
0597c49c | 569 | if (argv[0][1] == ':') |
f3f096cf | 570 | event = &argv[0][2]; |
f3f096cf | 571 | |
0597c49c MH |
572 | if (!strchr(argv[1], '/')) |
573 | return -ECANCELED; | |
f3f096cf | 574 | |
0597c49c MH |
575 | filename = kstrdup(argv[1], GFP_KERNEL); |
576 | if (!filename) | |
577 | return -ENOMEM; | |
f3f096cf | 578 | |
6496bb72 | 579 | /* Find the last occurrence, in case the path contains ':' too. */ |
0597c49c MH |
580 | arg = strrchr(filename, ':'); |
581 | if (!arg || !isdigit(arg[1])) { | |
582 | kfree(filename); | |
583 | return -ECANCELED; | |
584 | } | |
f3f096cf | 585 | |
ab105a4f MH |
586 | trace_probe_log_init("trace_uprobe", argc, argv); |
587 | trace_probe_log_set_index(1); /* filename is the 2nd argument */ | |
588 | ||
f3f096cf | 589 | *arg++ = '\0'; |
f3f096cf | 590 | ret = kern_path(filename, LOOKUP_FOLLOW, &path); |
0597c49c | 591 | if (ret) { |
ab105a4f | 592 | trace_probe_log_err(0, FILE_NOT_FOUND); |
0597c49c | 593 | kfree(filename); |
ab105a4f | 594 | trace_probe_log_clear(); |
0c92c7a3 | 595 | return ret; |
0597c49c | 596 | } |
0c92c7a3 | 597 | if (!d_is_reg(path.dentry)) { |
ab105a4f | 598 | trace_probe_log_err(0, NO_REGULAR_FILE); |
d24d7dbf JZ |
599 | ret = -EINVAL; |
600 | goto fail_address_parse; | |
601 | } | |
f3f096cf | 602 | |
1cc33161 RB |
603 | /* Parse reference counter offset if specified. */ |
604 | rctr = strchr(arg, '('); | |
605 | if (rctr) { | |
606 | rctr_end = strchr(rctr, ')'); | |
ab105a4f MH |
607 | if (!rctr_end) { |
608 | ret = -EINVAL; | |
609 | rctr_end = rctr + strlen(rctr); | |
610 | trace_probe_log_err(rctr_end - filename, | |
611 | REFCNT_OPEN_BRACE); | |
612 | goto fail_address_parse; | |
613 | } else if (rctr_end[1] != '\0') { | |
1cc33161 | 614 | ret = -EINVAL; |
ab105a4f MH |
615 | trace_probe_log_err(rctr_end + 1 - filename, |
616 | BAD_REFCNT_SUFFIX); | |
1cc33161 RB |
617 | goto fail_address_parse; |
618 | } | |
619 | ||
620 | *rctr++ = '\0'; | |
621 | *rctr_end = '\0'; | |
622 | ret = kstrtoul(rctr, 0, &ref_ctr_offset); | |
623 | if (ret) { | |
ab105a4f | 624 | trace_probe_log_err(rctr - filename, BAD_REFCNT); |
1cc33161 RB |
625 | goto fail_address_parse; |
626 | } | |
627 | } | |
628 | ||
3dd3aae3 MH |
629 | /* Check if there is %return suffix */ |
630 | tmp = strchr(arg, '%'); | |
631 | if (tmp) { | |
632 | if (!strcmp(tmp, "%return")) { | |
633 | *tmp = '\0'; | |
634 | is_return = true; | |
635 | } else { | |
636 | trace_probe_log_err(tmp - filename, BAD_ADDR_SUFFIX); | |
637 | ret = -EINVAL; | |
638 | goto fail_address_parse; | |
639 | } | |
640 | } | |
641 | ||
1cc33161 | 642 | /* Parse uprobe offset. */ |
84d7ed79 | 643 | ret = kstrtoul(arg, 0, &offset); |
ab105a4f MH |
644 | if (ret) { |
645 | trace_probe_log_err(arg - filename, BAD_UPROBE_OFFS); | |
84d7ed79 | 646 | goto fail_address_parse; |
ab105a4f | 647 | } |
f3f096cf SD |
648 | |
649 | /* setup a probe */ | |
ab105a4f | 650 | trace_probe_log_set_index(0); |
0597c49c | 651 | if (event) { |
95c104c3 | 652 | ret = traceprobe_parse_event_name(&event, &group, gbuf, |
ab105a4f | 653 | event - argv[0]); |
0597c49c MH |
654 | if (ret) |
655 | goto fail_address_parse; | |
95c104c3 LY |
656 | } |
657 | ||
658 | if (!event) { | |
b2e902f0 | 659 | char *tail; |
f3f096cf SD |
660 | char *ptr; |
661 | ||
b2e902f0 AS |
662 | tail = kstrdup(kbasename(filename), GFP_KERNEL); |
663 | if (!tail) { | |
f3f096cf SD |
664 | ret = -ENOMEM; |
665 | goto fail_address_parse; | |
666 | } | |
667 | ||
f3f096cf SD |
668 | ptr = strpbrk(tail, ".-_"); |
669 | if (ptr) | |
670 | *ptr = '\0'; | |
671 | ||
672 | snprintf(buf, MAX_EVENT_NAME_LEN, "%c_%s_0x%lx", 'p', tail, offset); | |
673 | event = buf; | |
674 | kfree(tail); | |
675 | } | |
676 | ||
ab105a4f MH |
677 | argc -= 2; |
678 | argv += 2; | |
679 | ||
4ee5a52e | 680 | tu = alloc_trace_uprobe(group, event, argc, is_return); |
f3f096cf | 681 | if (IS_ERR(tu)) { |
f3f096cf | 682 | ret = PTR_ERR(tu); |
a039480e MH |
683 | /* This must return -ENOMEM otherwise there is a bug */ |
684 | WARN_ON_ONCE(ret != -ENOMEM); | |
f3f096cf SD |
685 | goto fail_address_parse; |
686 | } | |
687 | tu->offset = offset; | |
1cc33161 | 688 | tu->ref_ctr_offset = ref_ctr_offset; |
0c92c7a3 | 689 | tu->path = path; |
0597c49c | 690 | tu->filename = filename; |
f3f096cf SD |
691 | |
692 | /* parse arguments */ | |
f3f096cf | 693 | for (i = 0; i < argc && i < MAX_TRACE_ARGS; i++) { |
1b8b0cd7 MHG |
694 | struct traceprobe_parse_context ctx = { |
695 | .flags = (is_return ? TPARG_FL_RETURN : 0) | TPARG_FL_USER, | |
696 | }; | |
697 | ||
ab105a4f | 698 | trace_probe_log_set_index(i + 2); |
1b8b0cd7 | 699 | ret = traceprobe_parse_probe_arg(&tu->tp, i, argv[i], &ctx); |
b1d1e904 | 700 | traceprobe_finish_parse(&ctx); |
d00bbea9 | 701 | if (ret) |
f3f096cf | 702 | goto error; |
f3f096cf SD |
703 | } |
704 | ||
007517a0 SRV |
705 | ptype = is_ret_probe(tu) ? PROBE_PRINT_RETURN : PROBE_PRINT_NORMAL; |
706 | ret = traceprobe_set_print_fmt(&tu->tp, ptype); | |
b4d4b96b MH |
707 | if (ret < 0) |
708 | goto error; | |
709 | ||
f3f096cf | 710 | ret = register_trace_uprobe(tu); |
ab105a4f MH |
711 | if (!ret) |
712 | goto out; | |
f3f096cf SD |
713 | |
714 | error: | |
715 | free_trace_uprobe(tu); | |
ab105a4f MH |
716 | out: |
717 | trace_probe_log_clear(); | |
f3f096cf SD |
718 | return ret; |
719 | ||
720 | fail_address_parse: | |
ab105a4f | 721 | trace_probe_log_clear(); |
0c92c7a3 | 722 | path_put(&path); |
0597c49c | 723 | kfree(filename); |
f3f096cf | 724 | |
f3f096cf SD |
725 | return ret; |
726 | } | |
727 | ||
d262271d MH |
728 | int trace_uprobe_create(const char *raw_command) |
729 | { | |
730 | return trace_probe_create(raw_command, __trace_uprobe_create); | |
731 | } | |
732 | ||
733 | static int create_or_delete_trace_uprobe(const char *raw_command) | |
f3f096cf | 734 | { |
0597c49c | 735 | int ret; |
f3f096cf | 736 | |
d262271d MH |
737 | if (raw_command[0] == '-') |
738 | return dyn_event_release(raw_command, &trace_uprobe_ops); | |
f3f096cf | 739 | |
d262271d | 740 | ret = trace_uprobe_create(raw_command); |
0597c49c | 741 | return ret == -ECANCELED ? -EINVAL : ret; |
f3f096cf SD |
742 | } |
743 | ||
0597c49c | 744 | static int trace_uprobe_release(struct dyn_event *ev) |
f3f096cf | 745 | { |
0597c49c | 746 | struct trace_uprobe *tu = to_trace_uprobe(ev); |
f3f096cf | 747 | |
0597c49c | 748 | return unregister_trace_uprobe(tu); |
f3f096cf SD |
749 | } |
750 | ||
0597c49c MH |
751 | /* Probes listing interfaces */ |
752 | static int trace_uprobe_show(struct seq_file *m, struct dyn_event *ev) | |
f3f096cf | 753 | { |
0597c49c | 754 | struct trace_uprobe *tu = to_trace_uprobe(ev); |
3ede82dd | 755 | char c = is_ret_probe(tu) ? 'r' : 'p'; |
f3f096cf SD |
756 | int i; |
757 | ||
b55ce203 MH |
758 | seq_printf(m, "%c:%s/%s %s:0x%0*lx", c, trace_probe_group_name(&tu->tp), |
759 | trace_probe_name(&tu->tp), tu->filename, | |
a64b2c01 | 760 | (int)(sizeof(void *) * 2), tu->offset); |
f3f096cf | 761 | |
1cc33161 RB |
762 | if (tu->ref_ctr_offset) |
763 | seq_printf(m, "(0x%lx)", tu->ref_ctr_offset); | |
764 | ||
14577c39 NK |
765 | for (i = 0; i < tu->tp.nr_args; i++) |
766 | seq_printf(m, " %s=%s", tu->tp.args[i].name, tu->tp.args[i].comm); | |
f3f096cf | 767 | |
fa6f0cc7 | 768 | seq_putc(m, '\n'); |
f3f096cf SD |
769 | return 0; |
770 | } | |
771 | ||
0597c49c MH |
772 | static int probes_seq_show(struct seq_file *m, void *v) |
773 | { | |
774 | struct dyn_event *ev = v; | |
775 | ||
776 | if (!is_trace_uprobe(ev)) | |
777 | return 0; | |
778 | ||
779 | return trace_uprobe_show(m, ev); | |
780 | } | |
781 | ||
f3f096cf | 782 | static const struct seq_operations probes_seq_op = { |
0597c49c MH |
783 | .start = dyn_event_seq_start, |
784 | .next = dyn_event_seq_next, | |
785 | .stop = dyn_event_seq_stop, | |
786 | .show = probes_seq_show | |
f3f096cf SD |
787 | }; |
788 | ||
789 | static int probes_open(struct inode *inode, struct file *file) | |
790 | { | |
c6c2401d SRRH |
791 | int ret; |
792 | ||
17911ff3 SRV |
793 | ret = security_locked_down(LOCKDOWN_TRACEFS); |
794 | if (ret) | |
795 | return ret; | |
796 | ||
c6c2401d | 797 | if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) { |
0597c49c | 798 | ret = dyn_events_release_all(&trace_uprobe_ops); |
c6c2401d SRRH |
799 | if (ret) |
800 | return ret; | |
801 | } | |
f3f096cf SD |
802 | |
803 | return seq_open(file, &probes_seq_op); | |
804 | } | |
805 | ||
806 | static ssize_t probes_write(struct file *file, const char __user *buffer, | |
807 | size_t count, loff_t *ppos) | |
808 | { | |
0597c49c MH |
809 | return trace_parse_run_command(file, buffer, count, ppos, |
810 | create_or_delete_trace_uprobe); | |
f3f096cf SD |
811 | } |
812 | ||
813 | static const struct file_operations uprobe_events_ops = { | |
814 | .owner = THIS_MODULE, | |
815 | .open = probes_open, | |
816 | .read = seq_read, | |
817 | .llseek = seq_lseek, | |
818 | .release = seq_release, | |
819 | .write = probes_write, | |
820 | }; | |
821 | ||
822 | /* Probes profiling interfaces */ | |
823 | static int probes_profile_seq_show(struct seq_file *m, void *v) | |
824 | { | |
0597c49c MH |
825 | struct dyn_event *ev = v; |
826 | struct trace_uprobe *tu; | |
10cdb82a AN |
827 | unsigned long nhits; |
828 | int cpu; | |
0597c49c MH |
829 | |
830 | if (!is_trace_uprobe(ev)) | |
831 | return 0; | |
f3f096cf | 832 | |
0597c49c | 833 | tu = to_trace_uprobe(ev); |
10cdb82a AN |
834 | |
835 | nhits = 0; | |
836 | for_each_possible_cpu(cpu) { | |
837 | nhits += per_cpu(*tu->nhits, cpu); | |
838 | } | |
839 | ||
de7b2973 | 840 | seq_printf(m, " %s %-44s %15lu\n", tu->filename, |
10cdb82a | 841 | trace_probe_name(&tu->tp), nhits); |
f3f096cf SD |
842 | return 0; |
843 | } | |
844 | ||
845 | static const struct seq_operations profile_seq_op = { | |
0597c49c MH |
846 | .start = dyn_event_seq_start, |
847 | .next = dyn_event_seq_next, | |
848 | .stop = dyn_event_seq_stop, | |
f3f096cf SD |
849 | .show = probes_profile_seq_show |
850 | }; | |
851 | ||
852 | static int profile_open(struct inode *inode, struct file *file) | |
853 | { | |
17911ff3 SRV |
854 | int ret; |
855 | ||
856 | ret = security_locked_down(LOCKDOWN_TRACEFS); | |
857 | if (ret) | |
858 | return ret; | |
859 | ||
f3f096cf SD |
860 | return seq_open(file, &profile_seq_op); |
861 | } | |
862 | ||
863 | static const struct file_operations uprobe_profile_ops = { | |
864 | .owner = THIS_MODULE, | |
865 | .open = profile_open, | |
866 | .read = seq_read, | |
867 | .llseek = seq_lseek, | |
868 | .release = seq_release, | |
869 | }; | |
870 | ||
dcad1a20 NK |
871 | struct uprobe_cpu_buffer { |
872 | struct mutex mutex; | |
873 | void *buf; | |
3eaea21b | 874 | int dsize; |
dcad1a20 NK |
875 | }; |
876 | static struct uprobe_cpu_buffer __percpu *uprobe_cpu_buffer; | |
877 | static int uprobe_buffer_refcnt; | |
878 | ||
879 | static int uprobe_buffer_init(void) | |
880 | { | |
881 | int cpu, err_cpu; | |
882 | ||
883 | uprobe_cpu_buffer = alloc_percpu(struct uprobe_cpu_buffer); | |
884 | if (uprobe_cpu_buffer == NULL) | |
885 | return -ENOMEM; | |
886 | ||
887 | for_each_possible_cpu(cpu) { | |
888 | struct page *p = alloc_pages_node(cpu_to_node(cpu), | |
889 | GFP_KERNEL, 0); | |
890 | if (p == NULL) { | |
891 | err_cpu = cpu; | |
892 | goto err; | |
893 | } | |
894 | per_cpu_ptr(uprobe_cpu_buffer, cpu)->buf = page_address(p); | |
895 | mutex_init(&per_cpu_ptr(uprobe_cpu_buffer, cpu)->mutex); | |
896 | } | |
897 | ||
898 | return 0; | |
899 | ||
900 | err: | |
901 | for_each_possible_cpu(cpu) { | |
902 | if (cpu == err_cpu) | |
903 | break; | |
904 | free_page((unsigned long)per_cpu_ptr(uprobe_cpu_buffer, cpu)->buf); | |
905 | } | |
906 | ||
907 | free_percpu(uprobe_cpu_buffer); | |
908 | return -ENOMEM; | |
909 | } | |
910 | ||
911 | static int uprobe_buffer_enable(void) | |
912 | { | |
913 | int ret = 0; | |
914 | ||
915 | BUG_ON(!mutex_is_locked(&event_mutex)); | |
916 | ||
917 | if (uprobe_buffer_refcnt++ == 0) { | |
918 | ret = uprobe_buffer_init(); | |
919 | if (ret < 0) | |
920 | uprobe_buffer_refcnt--; | |
921 | } | |
922 | ||
923 | return ret; | |
924 | } | |
925 | ||
926 | static void uprobe_buffer_disable(void) | |
927 | { | |
6ea6215f J |
928 | int cpu; |
929 | ||
dcad1a20 NK |
930 | BUG_ON(!mutex_is_locked(&event_mutex)); |
931 | ||
932 | if (--uprobe_buffer_refcnt == 0) { | |
6ea6215f J |
933 | for_each_possible_cpu(cpu) |
934 | free_page((unsigned long)per_cpu_ptr(uprobe_cpu_buffer, | |
935 | cpu)->buf); | |
936 | ||
dcad1a20 NK |
937 | free_percpu(uprobe_cpu_buffer); |
938 | uprobe_cpu_buffer = NULL; | |
939 | } | |
940 | } | |
941 | ||
942 | static struct uprobe_cpu_buffer *uprobe_buffer_get(void) | |
943 | { | |
944 | struct uprobe_cpu_buffer *ucb; | |
945 | int cpu; | |
946 | ||
947 | cpu = raw_smp_processor_id(); | |
948 | ucb = per_cpu_ptr(uprobe_cpu_buffer, cpu); | |
949 | ||
950 | /* | |
951 | * Use per-cpu buffers for fastest access, but we might migrate | |
952 | * so the mutex makes sure we have sole access to it. | |
953 | */ | |
954 | mutex_lock(&ucb->mutex); | |
955 | ||
956 | return ucb; | |
957 | } | |
958 | ||
959 | static void uprobe_buffer_put(struct uprobe_cpu_buffer *ucb) | |
960 | { | |
1b8f85de AN |
961 | if (!ucb) |
962 | return; | |
dcad1a20 NK |
963 | mutex_unlock(&ucb->mutex); |
964 | } | |
965 | ||
3eaea21b | 966 | static struct uprobe_cpu_buffer *prepare_uprobe_buffer(struct trace_uprobe *tu, |
1b8f85de AN |
967 | struct pt_regs *regs, |
968 | struct uprobe_cpu_buffer **ucbp) | |
3eaea21b AN |
969 | { |
970 | struct uprobe_cpu_buffer *ucb; | |
971 | int dsize, esize; | |
972 | ||
1b8f85de AN |
973 | if (*ucbp) |
974 | return *ucbp; | |
975 | ||
3eaea21b AN |
976 | esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu)); |
977 | dsize = __get_data_size(&tu->tp, regs, NULL); | |
978 | ||
979 | ucb = uprobe_buffer_get(); | |
980 | ucb->dsize = tu->tp.size + dsize; | |
981 | ||
982 | store_trace_args(ucb->buf, &tu->tp, regs, NULL, esize, dsize); | |
983 | ||
1b8f85de | 984 | *ucbp = ucb; |
3eaea21b AN |
985 | return ucb; |
986 | } | |
987 | ||
a43b9704 | 988 | static void __uprobe_trace_func(struct trace_uprobe *tu, |
dd9fa555 | 989 | unsigned long func, struct pt_regs *regs, |
69964673 | 990 | struct uprobe_cpu_buffer *ucb, |
7f1d2f82 | 991 | struct trace_event_file *trace_file) |
f3f096cf SD |
992 | { |
993 | struct uprobe_trace_entry_head *entry; | |
b7d5eb26 | 994 | struct trace_event_buffer fbuffer; |
457d1772 | 995 | void *data; |
dd9fa555 | 996 | int size, esize; |
e3dc9f89 | 997 | struct trace_event_call *call = trace_probe_event_call(&tu->tp); |
f3f096cf | 998 | |
7f1d2f82 | 999 | WARN_ON(call != trace_file->event_call); |
70ed91c6 | 1000 | |
3eaea21b | 1001 | if (WARN_ON_ONCE(ucb->dsize > PAGE_SIZE)) |
dcad1a20 NK |
1002 | return; |
1003 | ||
09a5059a | 1004 | if (trace_trigger_soft_disabled(trace_file)) |
ca3b1620 NK |
1005 | return; |
1006 | ||
dd9fa555 | 1007 | esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu)); |
3eaea21b | 1008 | size = esize + ucb->dsize; |
b7d5eb26 SRV |
1009 | entry = trace_event_buffer_reserve(&fbuffer, trace_file, size); |
1010 | if (!entry) | |
dd9fa555 | 1011 | return; |
f3f096cf | 1012 | |
393a736c ON |
1013 | if (is_ret_probe(tu)) { |
1014 | entry->vaddr[0] = func; | |
1015 | entry->vaddr[1] = instruction_pointer(regs); | |
1016 | data = DATAOF_TRACE_ENTRY(entry, true); | |
1017 | } else { | |
1018 | entry->vaddr[0] = instruction_pointer(regs); | |
1019 | data = DATAOF_TRACE_ENTRY(entry, false); | |
1020 | } | |
1021 | ||
3eaea21b | 1022 | memcpy(data, ucb->buf, ucb->dsize); |
f3f096cf | 1023 | |
b7d5eb26 | 1024 | trace_event_buffer_commit(&fbuffer); |
a51cc604 | 1025 | } |
f42d24a1 | 1026 | |
a51cc604 | 1027 | /* uprobe handler */ |
dd9fa555 | 1028 | static int uprobe_trace_func(struct trace_uprobe *tu, struct pt_regs *regs, |
1b8f85de | 1029 | struct uprobe_cpu_buffer **ucbp) |
a51cc604 | 1030 | { |
70ed91c6 | 1031 | struct event_file_link *link; |
69964673 | 1032 | struct uprobe_cpu_buffer *ucb; |
70ed91c6 J |
1033 | |
1034 | if (is_ret_probe(tu)) | |
1035 | return 0; | |
1036 | ||
69964673 AN |
1037 | ucb = prepare_uprobe_buffer(tu, regs, ucbp); |
1038 | ||
70ed91c6 | 1039 | rcu_read_lock(); |
b5f935ee | 1040 | trace_probe_for_each_link_rcu(link, &tu->tp) |
69964673 | 1041 | __uprobe_trace_func(tu, 0, regs, ucb, link->file); |
70ed91c6 J |
1042 | rcu_read_unlock(); |
1043 | ||
f42d24a1 | 1044 | return 0; |
f3f096cf SD |
1045 | } |
1046 | ||
c1ae5c75 | 1047 | static void uretprobe_trace_func(struct trace_uprobe *tu, unsigned long func, |
dd9fa555 | 1048 | struct pt_regs *regs, |
1b8f85de | 1049 | struct uprobe_cpu_buffer **ucbp) |
c1ae5c75 | 1050 | { |
70ed91c6 | 1051 | struct event_file_link *link; |
69964673 AN |
1052 | struct uprobe_cpu_buffer *ucb; |
1053 | ||
1054 | ucb = prepare_uprobe_buffer(tu, regs, ucbp); | |
70ed91c6 J |
1055 | |
1056 | rcu_read_lock(); | |
b5f935ee | 1057 | trace_probe_for_each_link_rcu(link, &tu->tp) |
69964673 | 1058 | __uprobe_trace_func(tu, func, regs, ucb, link->file); |
70ed91c6 | 1059 | rcu_read_unlock(); |
c1ae5c75 ON |
1060 | } |
1061 | ||
f3f096cf SD |
1062 | /* Event entry printers */ |
1063 | static enum print_line_t | |
1064 | print_uprobe_event(struct trace_iterator *iter, int flags, struct trace_event *event) | |
1065 | { | |
457d1772 | 1066 | struct uprobe_trace_entry_head *entry; |
f3f096cf SD |
1067 | struct trace_seq *s = &iter->seq; |
1068 | struct trace_uprobe *tu; | |
1069 | u8 *data; | |
f3f096cf | 1070 | |
457d1772 | 1071 | entry = (struct uprobe_trace_entry_head *)iter->ent; |
60d53e2c MH |
1072 | tu = trace_uprobe_primary_from_call( |
1073 | container_of(event, struct trace_event_call, event)); | |
1074 | if (unlikely(!tu)) | |
1075 | goto out; | |
f3f096cf | 1076 | |
3ede82dd | 1077 | if (is_ret_probe(tu)) { |
8579a107 | 1078 | trace_seq_printf(s, "%s: (0x%lx <- 0x%lx)", |
b55ce203 | 1079 | trace_probe_name(&tu->tp), |
8579a107 | 1080 | entry->vaddr[1], entry->vaddr[0]); |
3ede82dd ON |
1081 | data = DATAOF_TRACE_ENTRY(entry, true); |
1082 | } else { | |
8579a107 | 1083 | trace_seq_printf(s, "%s: (0x%lx)", |
b55ce203 | 1084 | trace_probe_name(&tu->tp), |
8579a107 | 1085 | entry->vaddr[0]); |
3ede82dd ON |
1086 | data = DATAOF_TRACE_ENTRY(entry, false); |
1087 | } | |
f3f096cf | 1088 | |
196b6389 | 1089 | if (trace_probe_print_args(s, tu->tp.args, tu->tp.nr_args, data, entry) < 0) |
56de7630 | 1090 | goto out; |
f3f096cf | 1091 | |
8579a107 | 1092 | trace_seq_putc(s, '\n'); |
f3f096cf | 1093 | |
8579a107 SRRH |
1094 | out: |
1095 | return trace_handle_return(s); | |
f3f096cf SD |
1096 | } |
1097 | ||
59da880a | 1098 | typedef bool (*filter_func_t)(struct uprobe_consumer *self, struct mm_struct *mm); |
31ba3348 | 1099 | |
60d53e2c | 1100 | static int trace_uprobe_enable(struct trace_uprobe *tu, filter_func_t filter) |
f3f096cf | 1101 | { |
3c83a9ad ON |
1102 | struct inode *inode = d_real_inode(tu->path.dentry); |
1103 | struct uprobe *uprobe; | |
70ed91c6 | 1104 | |
60d53e2c | 1105 | tu->consumer.filter = filter; |
3c83a9ad ON |
1106 | uprobe = uprobe_register(inode, tu->offset, tu->ref_ctr_offset, &tu->consumer); |
1107 | if (IS_ERR(uprobe)) | |
1108 | return PTR_ERR(uprobe); | |
60d53e2c | 1109 | |
3c83a9ad ON |
1110 | tu->uprobe = uprobe; |
1111 | return 0; | |
60d53e2c MH |
1112 | } |
1113 | ||
1114 | static void __probe_event_disable(struct trace_probe *tp) | |
1115 | { | |
60d53e2c | 1116 | struct trace_uprobe *tu; |
04b01625 | 1117 | bool sync = false; |
60d53e2c | 1118 | |
99c9a923 | 1119 | tu = container_of(tp, struct trace_uprobe, tp); |
b61387cb | 1120 | WARN_ON(!uprobe_filter_is_empty(tu->tp.event->filter)); |
99c9a923 | 1121 | |
e161c6bf | 1122 | list_for_each_entry(tu, trace_probe_probe_list(tp), tp.list) { |
3c83a9ad | 1123 | if (!tu->uprobe) |
60d53e2c MH |
1124 | continue; |
1125 | ||
04b01625 PZ |
1126 | uprobe_unregister_nosync(tu->uprobe, &tu->consumer); |
1127 | sync = true; | |
3c83a9ad | 1128 | tu->uprobe = NULL; |
60d53e2c | 1129 | } |
04b01625 PZ |
1130 | if (sync) |
1131 | uprobe_unregister_sync(); | |
60d53e2c MH |
1132 | } |
1133 | ||
1134 | static int probe_event_enable(struct trace_event_call *call, | |
1135 | struct trace_event_file *file, filter_func_t filter) | |
1136 | { | |
e161c6bf | 1137 | struct trace_probe *tp; |
60d53e2c MH |
1138 | struct trace_uprobe *tu; |
1139 | bool enabled; | |
1140 | int ret; | |
1141 | ||
1142 | tp = trace_probe_primary_from_call(call); | |
1143 | if (WARN_ON_ONCE(!tp)) | |
1144 | return -ENODEV; | |
1145 | enabled = trace_probe_is_enabled(tp); | |
1146 | ||
1147 | /* This may also change "enabled" state */ | |
70ed91c6 | 1148 | if (file) { |
60d53e2c | 1149 | if (trace_probe_test_flag(tp, TP_FLAG_PROFILE)) |
48212542 ON |
1150 | return -EINTR; |
1151 | ||
60d53e2c | 1152 | ret = trace_probe_add_file(tp, file); |
b5f935ee MH |
1153 | if (ret < 0) |
1154 | return ret; | |
48212542 | 1155 | } else { |
60d53e2c | 1156 | if (trace_probe_test_flag(tp, TP_FLAG_TRACE)) |
48212542 ON |
1157 | return -EINTR; |
1158 | ||
60d53e2c | 1159 | trace_probe_set_flag(tp, TP_FLAG_PROFILE); |
48212542 | 1160 | } |
f3f096cf | 1161 | |
60d53e2c | 1162 | tu = container_of(tp, struct trace_uprobe, tp); |
b61387cb | 1163 | WARN_ON(!uprobe_filter_is_empty(tu->tp.event->filter)); |
736288ba | 1164 | |
70ed91c6 J |
1165 | if (enabled) |
1166 | return 0; | |
1167 | ||
fb6bab6a ON |
1168 | ret = uprobe_buffer_enable(); |
1169 | if (ret) | |
1170 | goto err_flags; | |
1171 | ||
e161c6bf | 1172 | list_for_each_entry(tu, trace_probe_probe_list(tp), tp.list) { |
60d53e2c MH |
1173 | ret = trace_uprobe_enable(tu, filter); |
1174 | if (ret) { | |
1175 | __probe_event_disable(tp); | |
1176 | goto err_buffer; | |
1177 | } | |
1cc33161 RB |
1178 | } |
1179 | ||
fb6bab6a ON |
1180 | return 0; |
1181 | ||
1182 | err_buffer: | |
1183 | uprobe_buffer_disable(); | |
f3f096cf | 1184 | |
fb6bab6a | 1185 | err_flags: |
b5f935ee | 1186 | if (file) |
60d53e2c | 1187 | trace_probe_remove_file(tp, file); |
b5f935ee | 1188 | else |
60d53e2c | 1189 | trace_probe_clear_flag(tp, TP_FLAG_PROFILE); |
b5f935ee | 1190 | |
4161824f | 1191 | return ret; |
f3f096cf SD |
1192 | } |
1193 | ||
60d53e2c MH |
1194 | static void probe_event_disable(struct trace_event_call *call, |
1195 | struct trace_event_file *file) | |
f3f096cf | 1196 | { |
60d53e2c MH |
1197 | struct trace_probe *tp; |
1198 | ||
1199 | tp = trace_probe_primary_from_call(call); | |
1200 | if (WARN_ON_ONCE(!tp)) | |
1201 | return; | |
1202 | ||
1203 | if (!trace_probe_is_enabled(tp)) | |
f3f096cf SD |
1204 | return; |
1205 | ||
70ed91c6 | 1206 | if (file) { |
60d53e2c | 1207 | if (trace_probe_remove_file(tp, file) < 0) |
70ed91c6 J |
1208 | return; |
1209 | ||
60d53e2c | 1210 | if (trace_probe_is_enabled(tp)) |
70ed91c6 | 1211 | return; |
b5f935ee | 1212 | } else |
60d53e2c | 1213 | trace_probe_clear_flag(tp, TP_FLAG_PROFILE); |
dcad1a20 | 1214 | |
60d53e2c | 1215 | __probe_event_disable(tp); |
dcad1a20 | 1216 | uprobe_buffer_disable(); |
f3f096cf SD |
1217 | } |
1218 | ||
2425bcb9 | 1219 | static int uprobe_event_define_fields(struct trace_event_call *event_call) |
f3f096cf | 1220 | { |
eeb07b06 | 1221 | int ret, size; |
f3f096cf | 1222 | struct uprobe_trace_entry_head field; |
60d53e2c MH |
1223 | struct trace_uprobe *tu; |
1224 | ||
1225 | tu = trace_uprobe_primary_from_call(event_call); | |
1226 | if (unlikely(!tu)) | |
1227 | return -ENODEV; | |
f3f096cf | 1228 | |
4d1298e2 ON |
1229 | if (is_ret_probe(tu)) { |
1230 | DEFINE_FIELD(unsigned long, vaddr[0], FIELD_STRING_FUNC, 0); | |
1231 | DEFINE_FIELD(unsigned long, vaddr[1], FIELD_STRING_RETIP, 0); | |
1232 | size = SIZEOF_TRACE_ENTRY(true); | |
1233 | } else { | |
1234 | DEFINE_FIELD(unsigned long, vaddr[0], FIELD_STRING_IP, 0); | |
1235 | size = SIZEOF_TRACE_ENTRY(false); | |
1236 | } | |
f3f096cf | 1237 | |
eeb07b06 | 1238 | return traceprobe_define_arg_fields(event_call, size, &tu->tp); |
f3f096cf SD |
1239 | } |
1240 | ||
f3f096cf | 1241 | #ifdef CONFIG_PERF_EVENTS |
31ba3348 ON |
1242 | static bool |
1243 | __uprobe_perf_filter(struct trace_uprobe_filter *filter, struct mm_struct *mm) | |
1244 | { | |
1245 | struct perf_event *event; | |
1246 | ||
31ba3348 | 1247 | list_for_each_entry(event, &filter->perf_events, hw.tp_list) { |
50f16a8b | 1248 | if (event->hw.target->mm == mm) |
31ba3348 ON |
1249 | return true; |
1250 | } | |
1251 | ||
1252 | return false; | |
1253 | } | |
1254 | ||
b2fe8ba6 | 1255 | static inline bool |
99c9a923 MH |
1256 | trace_uprobe_filter_event(struct trace_uprobe_filter *filter, |
1257 | struct perf_event *event) | |
b2fe8ba6 | 1258 | { |
99c9a923 | 1259 | return __uprobe_perf_filter(filter, event->hw.target->mm); |
b2fe8ba6 ON |
1260 | } |
1261 | ||
99c9a923 MH |
1262 | static bool trace_uprobe_filter_remove(struct trace_uprobe_filter *filter, |
1263 | struct perf_event *event) | |
736288ba | 1264 | { |
b2fe8ba6 ON |
1265 | bool done; |
1266 | ||
99c9a923 | 1267 | write_lock(&filter->rwlock); |
50f16a8b | 1268 | if (event->hw.target) { |
ce5f36a5 | 1269 | list_del(&event->hw.tp_list); |
99c9a923 | 1270 | done = filter->nr_systemwide || |
50f16a8b | 1271 | (event->hw.target->flags & PF_EXITING) || |
99c9a923 | 1272 | trace_uprobe_filter_event(filter, event); |
b2fe8ba6 | 1273 | } else { |
99c9a923 MH |
1274 | filter->nr_systemwide--; |
1275 | done = filter->nr_systemwide; | |
b2fe8ba6 | 1276 | } |
99c9a923 | 1277 | write_unlock(&filter->rwlock); |
31ba3348 | 1278 | |
99c9a923 | 1279 | return done; |
736288ba ON |
1280 | } |
1281 | ||
99c9a923 MH |
1282 | /* This returns true if the filter always covers target mm */ |
1283 | static bool trace_uprobe_filter_add(struct trace_uprobe_filter *filter, | |
1284 | struct perf_event *event) | |
736288ba | 1285 | { |
b2fe8ba6 ON |
1286 | bool done; |
1287 | ||
99c9a923 | 1288 | write_lock(&filter->rwlock); |
50f16a8b | 1289 | if (event->hw.target) { |
ce5f36a5 ON |
1290 | /* |
1291 | * event->parent != NULL means copy_process(), we can avoid | |
1292 | * uprobe_apply(). current->mm must be probed and we can rely | |
1293 | * on dup_mmap() which preserves the already installed bp's. | |
1294 | * | |
1295 | * attr.enable_on_exec means that exec/mmap will install the | |
1296 | * breakpoints we need. | |
1297 | */ | |
99c9a923 | 1298 | done = filter->nr_systemwide || |
ce5f36a5 | 1299 | event->parent || event->attr.enable_on_exec || |
99c9a923 MH |
1300 | trace_uprobe_filter_event(filter, event); |
1301 | list_add(&event->hw.tp_list, &filter->perf_events); | |
b2fe8ba6 | 1302 | } else { |
99c9a923 MH |
1303 | done = filter->nr_systemwide; |
1304 | filter->nr_systemwide++; | |
b2fe8ba6 | 1305 | } |
99c9a923 | 1306 | write_unlock(&filter->rwlock); |
736288ba | 1307 | |
99c9a923 | 1308 | return done; |
736288ba ON |
1309 | } |
1310 | ||
99c9a923 MH |
1311 | static int uprobe_perf_close(struct trace_event_call *call, |
1312 | struct perf_event *event) | |
60d53e2c | 1313 | { |
e161c6bf | 1314 | struct trace_probe *tp; |
60d53e2c MH |
1315 | struct trace_uprobe *tu; |
1316 | int ret = 0; | |
1317 | ||
1318 | tp = trace_probe_primary_from_call(call); | |
1319 | if (WARN_ON_ONCE(!tp)) | |
1320 | return -ENODEV; | |
1321 | ||
99c9a923 | 1322 | tu = container_of(tp, struct trace_uprobe, tp); |
b61387cb | 1323 | if (trace_uprobe_filter_remove(tu->tp.event->filter, event)) |
99c9a923 MH |
1324 | return 0; |
1325 | ||
e161c6bf | 1326 | list_for_each_entry(tu, trace_probe_probe_list(tp), tp.list) { |
3c83a9ad | 1327 | ret = uprobe_apply(tu->uprobe, &tu->consumer, false); |
60d53e2c MH |
1328 | if (ret) |
1329 | break; | |
1330 | } | |
1331 | ||
1332 | return ret; | |
1333 | } | |
99c9a923 MH |
1334 | |
1335 | static int uprobe_perf_open(struct trace_event_call *call, | |
1336 | struct perf_event *event) | |
1337 | { | |
e161c6bf | 1338 | struct trace_probe *tp; |
99c9a923 MH |
1339 | struct trace_uprobe *tu; |
1340 | int err = 0; | |
1341 | ||
1342 | tp = trace_probe_primary_from_call(call); | |
1343 | if (WARN_ON_ONCE(!tp)) | |
1344 | return -ENODEV; | |
1345 | ||
1346 | tu = container_of(tp, struct trace_uprobe, tp); | |
b61387cb | 1347 | if (trace_uprobe_filter_add(tu->tp.event->filter, event)) |
99c9a923 MH |
1348 | return 0; |
1349 | ||
e161c6bf | 1350 | list_for_each_entry(tu, trace_probe_probe_list(tp), tp.list) { |
3c83a9ad | 1351 | err = uprobe_apply(tu->uprobe, &tu->consumer, true); |
99c9a923 MH |
1352 | if (err) { |
1353 | uprobe_perf_close(call, event); | |
1354 | break; | |
1355 | } | |
1356 | } | |
1357 | ||
1358 | return err; | |
1359 | } | |
1360 | ||
59da880a | 1361 | static bool uprobe_perf_filter(struct uprobe_consumer *uc, struct mm_struct *mm) |
31ba3348 | 1362 | { |
99c9a923 | 1363 | struct trace_uprobe_filter *filter; |
31ba3348 ON |
1364 | struct trace_uprobe *tu; |
1365 | int ret; | |
1366 | ||
1367 | tu = container_of(uc, struct trace_uprobe, consumer); | |
b61387cb | 1368 | filter = tu->tp.event->filter; |
99c9a923 | 1369 | |
cdf355cc AN |
1370 | /* |
1371 | * speculative short-circuiting check to avoid unnecessarily taking | |
1372 | * filter->rwlock below, if the uprobe has system-wide consumer | |
1373 | */ | |
1374 | if (READ_ONCE(filter->nr_systemwide)) | |
1375 | return true; | |
1376 | ||
99c9a923 MH |
1377 | read_lock(&filter->rwlock); |
1378 | ret = __uprobe_perf_filter(filter, mm); | |
1379 | read_unlock(&filter->rwlock); | |
31ba3348 ON |
1380 | |
1381 | return ret; | |
1382 | } | |
1383 | ||
a43b9704 | 1384 | static void __uprobe_perf_func(struct trace_uprobe *tu, |
dd9fa555 | 1385 | unsigned long func, struct pt_regs *regs, |
1b8f85de | 1386 | struct uprobe_cpu_buffer **ucbp) |
f3f096cf | 1387 | { |
e3dc9f89 | 1388 | struct trace_event_call *call = trace_probe_event_call(&tu->tp); |
f3f096cf | 1389 | struct uprobe_trace_entry_head *entry; |
1b8f85de | 1390 | struct uprobe_cpu_buffer *ucb; |
f3f096cf | 1391 | struct hlist_head *head; |
457d1772 | 1392 | void *data; |
dd9fa555 | 1393 | int size, esize; |
dcad1a20 NK |
1394 | int rctx; |
1395 | ||
aca80dd9 | 1396 | #ifdef CONFIG_BPF_EVENTS |
70ed0706 AS |
1397 | if (bpf_prog_array_valid(call)) { |
1398 | u32 ret; | |
1399 | ||
a3c485a5 | 1400 | ret = bpf_prog_run_array_uprobe(call->prog_array, regs, bpf_prog_run); |
70ed0706 AS |
1401 | if (!ret) |
1402 | return; | |
1403 | } | |
aca80dd9 | 1404 | #endif /* CONFIG_BPF_EVENTS */ |
04a22fae | 1405 | |
dcad1a20 | 1406 | esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu)); |
f3f096cf | 1407 | |
1b8f85de | 1408 | ucb = prepare_uprobe_buffer(tu, regs, ucbp); |
3eaea21b | 1409 | size = esize + ucb->dsize; |
dcad1a20 NK |
1410 | size = ALIGN(size + sizeof(u32), sizeof(u64)) - sizeof(u32); |
1411 | if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE, "profile buffer not large enough")) | |
1412 | return; | |
1413 | ||
f3f096cf | 1414 | preempt_disable(); |
515619f2 ON |
1415 | head = this_cpu_ptr(call->perf_events); |
1416 | if (hlist_empty(head)) | |
1417 | goto out; | |
1418 | ||
1e1dcd93 | 1419 | entry = perf_trace_buf_alloc(size, NULL, &rctx); |
f3f096cf SD |
1420 | if (!entry) |
1421 | goto out; | |
1422 | ||
393a736c ON |
1423 | if (is_ret_probe(tu)) { |
1424 | entry->vaddr[0] = func; | |
32520b2c | 1425 | entry->vaddr[1] = instruction_pointer(regs); |
393a736c ON |
1426 | data = DATAOF_TRACE_ENTRY(entry, true); |
1427 | } else { | |
32520b2c | 1428 | entry->vaddr[0] = instruction_pointer(regs); |
393a736c ON |
1429 | data = DATAOF_TRACE_ENTRY(entry, false); |
1430 | } | |
1431 | ||
3eaea21b | 1432 | memcpy(data, ucb->buf, ucb->dsize); |
dcad1a20 | 1433 | |
3eaea21b AN |
1434 | if (size - esize > ucb->dsize) |
1435 | memset(data + ucb->dsize, 0, size - esize - ucb->dsize); | |
f3f096cf | 1436 | |
1e1dcd93 | 1437 | perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs, |
8fd0fbbe | 1438 | head, NULL); |
f3f096cf SD |
1439 | out: |
1440 | preempt_enable(); | |
a51cc604 ON |
1441 | } |
1442 | ||
1443 | /* uprobe profile handler */ | |
dd9fa555 | 1444 | static int uprobe_perf_func(struct trace_uprobe *tu, struct pt_regs *regs, |
1b8f85de | 1445 | struct uprobe_cpu_buffer **ucbp) |
a51cc604 | 1446 | { |
59da880a | 1447 | if (!uprobe_perf_filter(&tu->consumer, current->mm)) |
a51cc604 ON |
1448 | return UPROBE_HANDLER_REMOVE; |
1449 | ||
393a736c | 1450 | if (!is_ret_probe(tu)) |
1b8f85de | 1451 | __uprobe_perf_func(tu, 0, regs, ucbp); |
f42d24a1 | 1452 | return 0; |
f3f096cf | 1453 | } |
c1ae5c75 ON |
1454 | |
1455 | static void uretprobe_perf_func(struct trace_uprobe *tu, unsigned long func, | |
dd9fa555 | 1456 | struct pt_regs *regs, |
1b8f85de | 1457 | struct uprobe_cpu_buffer **ucbp) |
c1ae5c75 | 1458 | { |
1b8f85de | 1459 | __uprobe_perf_func(tu, func, regs, ucbp); |
c1ae5c75 | 1460 | } |
41bdc4b4 YS |
1461 | |
1462 | int bpf_get_uprobe_info(const struct perf_event *event, u32 *fd_type, | |
1463 | const char **filename, u64 *probe_offset, | |
5125e757 | 1464 | u64 *probe_addr, bool perf_type_tracepoint) |
41bdc4b4 YS |
1465 | { |
1466 | const char *pevent = trace_event_name(event->tp_event); | |
1467 | const char *group = event->tp_event->class->system; | |
1468 | struct trace_uprobe *tu; | |
1469 | ||
1470 | if (perf_type_tracepoint) | |
1471 | tu = find_probe_event(pevent, group); | |
1472 | else | |
22d5bd68 | 1473 | tu = trace_uprobe_primary_from_call(event->tp_event); |
41bdc4b4 YS |
1474 | if (!tu) |
1475 | return -EINVAL; | |
1476 | ||
1477 | *fd_type = is_ret_probe(tu) ? BPF_FD_TYPE_URETPROBE | |
1478 | : BPF_FD_TYPE_UPROBE; | |
1479 | *filename = tu->filename; | |
1480 | *probe_offset = tu->offset; | |
5125e757 | 1481 | *probe_addr = 0; |
41bdc4b4 YS |
1482 | return 0; |
1483 | } | |
f3f096cf SD |
1484 | #endif /* CONFIG_PERF_EVENTS */ |
1485 | ||
70ed91c6 | 1486 | static int |
2425bcb9 | 1487 | trace_uprobe_register(struct trace_event_call *event, enum trace_reg type, |
70ed91c6 | 1488 | void *data) |
f3f096cf | 1489 | { |
7f1d2f82 | 1490 | struct trace_event_file *file = data; |
f3f096cf SD |
1491 | |
1492 | switch (type) { | |
1493 | case TRACE_REG_REGISTER: | |
60d53e2c | 1494 | return probe_event_enable(event, file, NULL); |
f3f096cf SD |
1495 | |
1496 | case TRACE_REG_UNREGISTER: | |
60d53e2c | 1497 | probe_event_disable(event, file); |
f3f096cf SD |
1498 | return 0; |
1499 | ||
1500 | #ifdef CONFIG_PERF_EVENTS | |
1501 | case TRACE_REG_PERF_REGISTER: | |
60d53e2c | 1502 | return probe_event_enable(event, NULL, uprobe_perf_filter); |
f3f096cf SD |
1503 | |
1504 | case TRACE_REG_PERF_UNREGISTER: | |
60d53e2c | 1505 | probe_event_disable(event, NULL); |
f3f096cf | 1506 | return 0; |
736288ba ON |
1507 | |
1508 | case TRACE_REG_PERF_OPEN: | |
99c9a923 | 1509 | return uprobe_perf_open(event, data); |
736288ba ON |
1510 | |
1511 | case TRACE_REG_PERF_CLOSE: | |
99c9a923 | 1512 | return uprobe_perf_close(event, data); |
736288ba | 1513 | |
f3f096cf SD |
1514 | #endif |
1515 | default: | |
1516 | return 0; | |
1517 | } | |
f3f096cf SD |
1518 | } |
1519 | ||
1520 | static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs) | |
1521 | { | |
f3f096cf | 1522 | struct trace_uprobe *tu; |
b7e0bf34 | 1523 | struct uprobe_dispatch_data udd; |
1b8f85de | 1524 | struct uprobe_cpu_buffer *ucb = NULL; |
f42d24a1 | 1525 | int ret = 0; |
f3f096cf | 1526 | |
a932b738 | 1527 | tu = container_of(con, struct trace_uprobe, consumer); |
10cdb82a AN |
1528 | |
1529 | this_cpu_inc(*tu->nhits); | |
f3f096cf | 1530 | |
b7e0bf34 NK |
1531 | udd.tu = tu; |
1532 | udd.bp_addr = instruction_pointer(regs); | |
1533 | ||
1534 | current->utask->vaddr = (unsigned long) &udd; | |
1535 | ||
dd9fa555 NK |
1536 | if (WARN_ON_ONCE(!uprobe_cpu_buffer)) |
1537 | return 0; | |
1538 | ||
747774d6 | 1539 | if (trace_probe_test_flag(&tu->tp, TP_FLAG_TRACE)) |
1b8f85de | 1540 | ret |= uprobe_trace_func(tu, regs, &ucb); |
f3f096cf SD |
1541 | |
1542 | #ifdef CONFIG_PERF_EVENTS | |
747774d6 | 1543 | if (trace_probe_test_flag(&tu->tp, TP_FLAG_PROFILE)) |
1b8f85de | 1544 | ret |= uprobe_perf_func(tu, regs, &ucb); |
f3f096cf | 1545 | #endif |
dd9fa555 | 1546 | uprobe_buffer_put(ucb); |
f42d24a1 | 1547 | return ret; |
f3f096cf SD |
1548 | } |
1549 | ||
c1ae5c75 ON |
1550 | static int uretprobe_dispatcher(struct uprobe_consumer *con, |
1551 | unsigned long func, struct pt_regs *regs) | |
1552 | { | |
1553 | struct trace_uprobe *tu; | |
b7e0bf34 | 1554 | struct uprobe_dispatch_data udd; |
1b8f85de | 1555 | struct uprobe_cpu_buffer *ucb = NULL; |
c1ae5c75 ON |
1556 | |
1557 | tu = container_of(con, struct trace_uprobe, consumer); | |
1558 | ||
b7e0bf34 NK |
1559 | udd.tu = tu; |
1560 | udd.bp_addr = func; | |
1561 | ||
1562 | current->utask->vaddr = (unsigned long) &udd; | |
1563 | ||
dd9fa555 NK |
1564 | if (WARN_ON_ONCE(!uprobe_cpu_buffer)) |
1565 | return 0; | |
1566 | ||
747774d6 | 1567 | if (trace_probe_test_flag(&tu->tp, TP_FLAG_TRACE)) |
1b8f85de | 1568 | uretprobe_trace_func(tu, func, regs, &ucb); |
c1ae5c75 ON |
1569 | |
1570 | #ifdef CONFIG_PERF_EVENTS | |
747774d6 | 1571 | if (trace_probe_test_flag(&tu->tp, TP_FLAG_PROFILE)) |
1b8f85de | 1572 | uretprobe_perf_func(tu, func, regs, &ucb); |
c1ae5c75 | 1573 | #endif |
dd9fa555 | 1574 | uprobe_buffer_put(ucb); |
c1ae5c75 ON |
1575 | return 0; |
1576 | } | |
1577 | ||
f3f096cf SD |
1578 | static struct trace_event_functions uprobe_funcs = { |
1579 | .trace = print_uprobe_event | |
1580 | }; | |
1581 | ||
04ae87a5 PZ |
1582 | static struct trace_event_fields uprobe_fields_array[] = { |
1583 | { .type = TRACE_FUNCTION_TYPE, | |
1584 | .define_fields = uprobe_event_define_fields }, | |
1585 | {} | |
1586 | }; | |
1587 | ||
e3dc9f89 | 1588 | static inline void init_trace_event_call(struct trace_uprobe *tu) |
f3f096cf | 1589 | { |
e3dc9f89 | 1590 | struct trace_event_call *call = trace_probe_event_call(&tu->tp); |
f3f096cf | 1591 | call->event.funcs = &uprobe_funcs; |
04ae87a5 | 1592 | call->class->fields_array = uprobe_fields_array; |
f3f096cf | 1593 | |
9fd2e48b | 1594 | call->flags = TRACE_EVENT_FL_UPROBE | TRACE_EVENT_FL_CAP_ANY; |
33ea4b24 | 1595 | call->class->reg = trace_uprobe_register; |
33ea4b24 SL |
1596 | } |
1597 | ||
1598 | static int register_uprobe_event(struct trace_uprobe *tu) | |
1599 | { | |
e3dc9f89 | 1600 | init_trace_event_call(tu); |
f3f096cf | 1601 | |
46e5376d | 1602 | return trace_probe_register_event_call(&tu->tp); |
f3f096cf SD |
1603 | } |
1604 | ||
c6c2401d | 1605 | static int unregister_uprobe_event(struct trace_uprobe *tu) |
f3f096cf | 1606 | { |
46e5376d | 1607 | return trace_probe_unregister_event_call(&tu->tp); |
f3f096cf SD |
1608 | } |
1609 | ||
33ea4b24 SL |
1610 | #ifdef CONFIG_PERF_EVENTS |
1611 | struct trace_event_call * | |
a6ca88b2 SL |
1612 | create_local_trace_uprobe(char *name, unsigned long offs, |
1613 | unsigned long ref_ctr_offset, bool is_return) | |
33ea4b24 | 1614 | { |
007517a0 | 1615 | enum probe_print_type ptype; |
33ea4b24 | 1616 | struct trace_uprobe *tu; |
33ea4b24 SL |
1617 | struct path path; |
1618 | int ret; | |
1619 | ||
1620 | ret = kern_path(name, LOOKUP_FOLLOW, &path); | |
1621 | if (ret) | |
1622 | return ERR_PTR(ret); | |
1623 | ||
0c92c7a3 SL |
1624 | if (!d_is_reg(path.dentry)) { |
1625 | path_put(&path); | |
33ea4b24 SL |
1626 | return ERR_PTR(-EINVAL); |
1627 | } | |
1628 | ||
1629 | /* | |
0597c49c | 1630 | * local trace_kprobes are not added to dyn_event, so they are never |
33ea4b24 SL |
1631 | * searched in find_trace_kprobe(). Therefore, there is no concern of |
1632 | * duplicated name "DUMMY_EVENT" here. | |
1633 | */ | |
1634 | tu = alloc_trace_uprobe(UPROBE_EVENT_SYSTEM, "DUMMY_EVENT", 0, | |
1635 | is_return); | |
1636 | ||
1637 | if (IS_ERR(tu)) { | |
1638 | pr_info("Failed to allocate trace_uprobe.(%d)\n", | |
1639 | (int)PTR_ERR(tu)); | |
0c92c7a3 | 1640 | path_put(&path); |
33ea4b24 SL |
1641 | return ERR_CAST(tu); |
1642 | } | |
1643 | ||
1644 | tu->offset = offs; | |
0c92c7a3 | 1645 | tu->path = path; |
a6ca88b2 | 1646 | tu->ref_ctr_offset = ref_ctr_offset; |
33ea4b24 | 1647 | tu->filename = kstrdup(name, GFP_KERNEL); |
8c722424 XW |
1648 | if (!tu->filename) { |
1649 | ret = -ENOMEM; | |
1650 | goto error; | |
1651 | } | |
1652 | ||
e3dc9f89 | 1653 | init_trace_event_call(tu); |
33ea4b24 | 1654 | |
007517a0 SRV |
1655 | ptype = is_ret_probe(tu) ? PROBE_PRINT_RETURN : PROBE_PRINT_NORMAL; |
1656 | if (traceprobe_set_print_fmt(&tu->tp, ptype) < 0) { | |
33ea4b24 SL |
1657 | ret = -ENOMEM; |
1658 | goto error; | |
1659 | } | |
1660 | ||
e3dc9f89 | 1661 | return trace_probe_event_call(&tu->tp); |
33ea4b24 SL |
1662 | error: |
1663 | free_trace_uprobe(tu); | |
1664 | return ERR_PTR(ret); | |
1665 | } | |
1666 | ||
1667 | void destroy_local_trace_uprobe(struct trace_event_call *event_call) | |
1668 | { | |
1669 | struct trace_uprobe *tu; | |
1670 | ||
60d53e2c | 1671 | tu = trace_uprobe_primary_from_call(event_call); |
33ea4b24 | 1672 | |
33ea4b24 SL |
1673 | free_trace_uprobe(tu); |
1674 | } | |
1675 | #endif /* CONFIG_PERF_EVENTS */ | |
1676 | ||
39bcdd6a | 1677 | /* Make a trace interface for controlling probe points */ |
f3f096cf SD |
1678 | static __init int init_uprobe_trace(void) |
1679 | { | |
0597c49c MH |
1680 | int ret; |
1681 | ||
1682 | ret = dyn_event_register(&trace_uprobe_ops); | |
1683 | if (ret) | |
1684 | return ret; | |
f3f096cf | 1685 | |
22c36b18 WY |
1686 | ret = tracing_init_dentry(); |
1687 | if (ret) | |
f3f096cf SD |
1688 | return 0; |
1689 | ||
21ccc9cd | 1690 | trace_create_file("uprobe_events", TRACE_MODE_WRITE, NULL, |
f3f096cf SD |
1691 | NULL, &uprobe_events_ops); |
1692 | /* Profile interface */ | |
21ccc9cd | 1693 | trace_create_file("uprobe_profile", TRACE_MODE_READ, NULL, |
f3f096cf SD |
1694 | NULL, &uprobe_profile_ops); |
1695 | return 0; | |
1696 | } | |
1697 | ||
1698 | fs_initcall(init_uprobe_trace); |