1 // SPDX-License-Identifier: GPL-2.0
3 * uprobes-based tracing events
5 * Copyright (C) IBM Corporation, 2010-2012
6 * Author: Srikar Dronamraju <srikar@linux.vnet.ibm.com>
8 #define pr_fmt(fmt) "trace_uprobe: " fmt
10 #include <linux/bpf-cgroup.h>
11 #include <linux/security.h>
12 #include <linux/ctype.h>
13 #include <linux/module.h>
14 #include <linux/uaccess.h>
15 #include <linux/uprobes.h>
16 #include <linux/namei.h>
17 #include <linux/string.h>
18 #include <linux/rculist.h>
19 #include <linux/filter.h>
21 #include "trace_dynevent.h"
22 #include "trace_probe.h"
23 #include "trace_probe_tmpl.h"
25 #define UPROBE_EVENT_SYSTEM "uprobes"
27 struct uprobe_trace_entry_head {
28 struct trace_entry ent;
29 unsigned long vaddr[];
32 #define SIZEOF_TRACE_ENTRY(is_return) \
33 (sizeof(struct uprobe_trace_entry_head) + \
34 sizeof(unsigned long) * (is_return ? 2 : 1))
36 #define DATAOF_TRACE_ENTRY(entry, is_return) \
37 ((void*)(entry) + SIZEOF_TRACE_ENTRY(is_return))
39 static int trace_uprobe_create(const char *raw_command);
40 static int trace_uprobe_show(struct seq_file *m, struct dyn_event *ev);
41 static int trace_uprobe_release(struct dyn_event *ev);
42 static bool trace_uprobe_is_busy(struct dyn_event *ev);
43 static bool trace_uprobe_match(const char *system, const char *event,
44 int argc, const char **argv, struct dyn_event *ev);
46 static struct dyn_event_operations trace_uprobe_ops = {
47 .create = trace_uprobe_create,
48 .show = trace_uprobe_show,
49 .is_busy = trace_uprobe_is_busy,
50 .free = trace_uprobe_release,
51 .match = trace_uprobe_match,
55 * uprobe event core functions
58 struct dyn_event devent;
59 struct uprobe_consumer consumer;
64 unsigned long ref_ctr_offset;
66 struct trace_probe tp;
69 static bool is_trace_uprobe(struct dyn_event *ev)
71 return ev->ops == &trace_uprobe_ops;
74 static struct trace_uprobe *to_trace_uprobe(struct dyn_event *ev)
76 return container_of(ev, struct trace_uprobe, devent);
80 * for_each_trace_uprobe - iterate over the trace_uprobe list
81 * @pos: the struct trace_uprobe * for each entry
82 * @dpos: the struct dyn_event * to use as a loop cursor
84 #define for_each_trace_uprobe(pos, dpos) \
85 for_each_dyn_event(dpos) \
86 if (is_trace_uprobe(dpos) && (pos = to_trace_uprobe(dpos)))
88 static int register_uprobe_event(struct trace_uprobe *tu);
89 static int unregister_uprobe_event(struct trace_uprobe *tu);
91 static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs);
92 static int uretprobe_dispatcher(struct uprobe_consumer *con,
93 unsigned long func, struct pt_regs *regs);
95 #ifdef CONFIG_STACK_GROWSUP
96 static unsigned long adjust_stack_addr(unsigned long addr, unsigned int n)
98 return addr - (n * sizeof(long));
101 static unsigned long adjust_stack_addr(unsigned long addr, unsigned int n)
103 return addr + (n * sizeof(long));
107 static unsigned long get_user_stack_nth(struct pt_regs *regs, unsigned int n)
110 unsigned long addr = user_stack_pointer(regs);
112 addr = adjust_stack_addr(addr, n);
114 if (copy_from_user(&ret, (void __force __user *) addr, sizeof(ret)))
121 * Uprobes-specific fetch functions
123 static nokprobe_inline int
124 probe_mem_read(void *dest, void *src, size_t size)
126 void __user *vaddr = (void __force __user *)src;
128 return copy_from_user(dest, vaddr, size) ? -EFAULT : 0;
131 static nokprobe_inline int
132 probe_mem_read_user(void *dest, void *src, size_t size)
134 return probe_mem_read(dest, src, size);
138 * Fetch a null-terminated string. Caller MUST set *(u32 *)dest with max
139 * length and relative data location.
141 static nokprobe_inline int
142 fetch_store_string(unsigned long addr, void *dest, void *base)
145 u32 loc = *(u32 *)dest;
146 int maxlen = get_loc_len(loc);
147 u8 *dst = get_loc_data(dest, base);
148 void __user *src = (void __force __user *) addr;
150 if (unlikely(!maxlen))
153 if (addr == FETCH_TOKEN_COMM)
154 ret = strscpy(dst, current->comm, maxlen);
156 ret = strncpy_from_user(dst, src, maxlen);
162 * Include the terminating null byte. In this case it
163 * was copied by strncpy_from_user but not accounted
167 *(u32 *)dest = make_data_loc(ret, (void *)dst - base);
169 *(u32 *)dest = make_data_loc(0, (void *)dst - base);
174 static nokprobe_inline int
175 fetch_store_string_user(unsigned long addr, void *dest, void *base)
177 return fetch_store_string(addr, dest, base);
180 /* Return the length of string -- including null terminal byte */
181 static nokprobe_inline int
182 fetch_store_strlen(unsigned long addr)
185 void __user *vaddr = (void __force __user *) addr;
187 if (addr == FETCH_TOKEN_COMM)
188 len = strlen(current->comm) + 1;
190 len = strnlen_user(vaddr, MAX_STRING_SIZE);
192 return (len > MAX_STRING_SIZE) ? 0 : len;
195 static nokprobe_inline int
196 fetch_store_strlen_user(unsigned long addr)
198 return fetch_store_strlen(addr);
201 static unsigned long translate_user_vaddr(unsigned long file_offset)
203 unsigned long base_addr;
204 struct uprobe_dispatch_data *udd;
206 udd = (void *) current->utask->vaddr;
208 base_addr = udd->bp_addr - udd->tu->offset;
209 return base_addr + file_offset;
212 /* Note that we don't verify it, since the code does not come from user space */
214 process_fetch_insn(struct fetch_insn *code, void *rec, void *edata,
215 void *dest, void *base)
217 struct pt_regs *regs = rec;
221 /* 1st stage: get value from context */
224 val = regs_get_register(regs, code->param);
227 val = get_user_stack_nth(regs, code->param);
229 case FETCH_OP_STACKP:
230 val = user_stack_pointer(regs);
232 case FETCH_OP_RETVAL:
233 val = regs_return_value(regs);
236 val = FETCH_TOKEN_COMM;
239 val = translate_user_vaddr(code->immediate);
242 ret = process_common_fetch_insn(code, &val);
248 return process_fetch_insn_bottom(code, val, dest, base);
250 NOKPROBE_SYMBOL(process_fetch_insn)
252 static inline void init_trace_uprobe_filter(struct trace_uprobe_filter *filter)
254 rwlock_init(&filter->rwlock);
255 filter->nr_systemwide = 0;
256 INIT_LIST_HEAD(&filter->perf_events);
259 static inline bool uprobe_filter_is_empty(struct trace_uprobe_filter *filter)
261 return !filter->nr_systemwide && list_empty(&filter->perf_events);
264 static inline bool is_ret_probe(struct trace_uprobe *tu)
266 return tu->consumer.ret_handler != NULL;
269 static bool trace_uprobe_is_busy(struct dyn_event *ev)
271 struct trace_uprobe *tu = to_trace_uprobe(ev);
273 return trace_probe_is_enabled(&tu->tp);
276 static bool trace_uprobe_match_command_head(struct trace_uprobe *tu,
277 int argc, const char **argv)
279 char buf[MAX_ARGSTR_LEN + 1];
285 len = strlen(tu->filename);
286 if (strncmp(tu->filename, argv[0], len) || argv[0][len] != ':')
289 if (tu->ref_ctr_offset == 0)
290 snprintf(buf, sizeof(buf), "0x%0*lx",
291 (int)(sizeof(void *) * 2), tu->offset);
293 snprintf(buf, sizeof(buf), "0x%0*lx(0x%lx)",
294 (int)(sizeof(void *) * 2), tu->offset,
296 if (strcmp(buf, &argv[0][len + 1]))
301 return trace_probe_match_command_args(&tu->tp, argc, argv);
304 static bool trace_uprobe_match(const char *system, const char *event,
305 int argc, const char **argv, struct dyn_event *ev)
307 struct trace_uprobe *tu = to_trace_uprobe(ev);
309 return (event[0] == '\0' ||
310 strcmp(trace_probe_name(&tu->tp), event) == 0) &&
311 (!system || strcmp(trace_probe_group_name(&tu->tp), system) == 0) &&
312 trace_uprobe_match_command_head(tu, argc, argv);
315 static nokprobe_inline struct trace_uprobe *
316 trace_uprobe_primary_from_call(struct trace_event_call *call)
318 struct trace_probe *tp;
320 tp = trace_probe_primary_from_call(call);
321 if (WARN_ON_ONCE(!tp))
324 return container_of(tp, struct trace_uprobe, tp);
328 * Allocate new trace_uprobe and initialize it (including uprobes).
330 static struct trace_uprobe *
331 alloc_trace_uprobe(const char *group, const char *event, int nargs, bool is_ret)
333 struct trace_uprobe *tu;
336 tu = kzalloc(struct_size(tu, tp.args, nargs), GFP_KERNEL);
338 return ERR_PTR(-ENOMEM);
340 ret = trace_probe_init(&tu->tp, event, group, true, nargs);
344 dyn_event_init(&tu->devent, &trace_uprobe_ops);
345 tu->consumer.handler = uprobe_dispatcher;
347 tu->consumer.ret_handler = uretprobe_dispatcher;
348 init_trace_uprobe_filter(tu->tp.event->filter);
357 static void free_trace_uprobe(struct trace_uprobe *tu)
363 trace_probe_cleanup(&tu->tp);
368 static struct trace_uprobe *find_probe_event(const char *event, const char *group)
370 struct dyn_event *pos;
371 struct trace_uprobe *tu;
373 for_each_trace_uprobe(tu, pos)
374 if (strcmp(trace_probe_name(&tu->tp), event) == 0 &&
375 strcmp(trace_probe_group_name(&tu->tp), group) == 0)
381 /* Unregister a trace_uprobe and probe_event */
382 static int unregister_trace_uprobe(struct trace_uprobe *tu)
386 if (trace_probe_has_sibling(&tu->tp))
389 /* If there's a reference to the dynamic event */
390 if (trace_event_dyn_busy(trace_probe_event_call(&tu->tp)))
393 ret = unregister_uprobe_event(tu);
398 dyn_event_remove(&tu->devent);
399 trace_probe_unlink(&tu->tp);
400 free_trace_uprobe(tu);
404 static bool trace_uprobe_has_same_uprobe(struct trace_uprobe *orig,
405 struct trace_uprobe *comp)
407 struct trace_probe_event *tpe = orig->tp.event;
408 struct inode *comp_inode = d_real_inode(comp->path.dentry);
411 list_for_each_entry(orig, &tpe->probes, tp.list) {
412 if (comp_inode != d_real_inode(orig->path.dentry) ||
413 comp->offset != orig->offset)
417 * trace_probe_compare_arg_type() ensured that nr_args and
418 * each argument name and type are same. Let's compare comm.
420 for (i = 0; i < orig->tp.nr_args; i++) {
421 if (strcmp(orig->tp.args[i].comm,
422 comp->tp.args[i].comm))
426 if (i == orig->tp.nr_args)
433 static int append_trace_uprobe(struct trace_uprobe *tu, struct trace_uprobe *to)
437 ret = trace_probe_compare_arg_type(&tu->tp, &to->tp);
439 /* Note that argument starts index = 2 */
440 trace_probe_log_set_index(ret + 1);
441 trace_probe_log_err(0, DIFF_ARG_TYPE);
444 if (trace_uprobe_has_same_uprobe(to, tu)) {
445 trace_probe_log_set_index(0);
446 trace_probe_log_err(0, SAME_PROBE);
450 /* Append to existing event */
451 ret = trace_probe_append(&tu->tp, &to->tp);
453 dyn_event_add(&tu->devent, trace_probe_event_call(&tu->tp));
459 * Uprobe with multiple reference counter is not allowed. i.e.
460 * If inode and offset matches, reference counter offset *must*
461 * match as well. Though, there is one exception: If user is
462 * replacing old trace_uprobe with new one(same group/event),
463 * then we allow same uprobe with new reference counter as far
464 * as the new one does not conflict with any other existing
467 static int validate_ref_ctr_offset(struct trace_uprobe *new)
469 struct dyn_event *pos;
470 struct trace_uprobe *tmp;
471 struct inode *new_inode = d_real_inode(new->path.dentry);
473 for_each_trace_uprobe(tmp, pos) {
474 if (new_inode == d_real_inode(tmp->path.dentry) &&
475 new->offset == tmp->offset &&
476 new->ref_ctr_offset != tmp->ref_ctr_offset) {
477 pr_warn("Reference counter offset mismatch.");
484 /* Register a trace_uprobe and probe_event */
485 static int register_trace_uprobe(struct trace_uprobe *tu)
487 struct trace_uprobe *old_tu;
490 mutex_lock(&event_mutex);
492 ret = validate_ref_ctr_offset(tu);
496 /* register as an event */
497 old_tu = find_probe_event(trace_probe_name(&tu->tp),
498 trace_probe_group_name(&tu->tp));
500 if (is_ret_probe(tu) != is_ret_probe(old_tu)) {
501 trace_probe_log_set_index(0);
502 trace_probe_log_err(0, DIFF_PROBE_TYPE);
505 ret = append_trace_uprobe(tu, old_tu);
510 ret = register_uprobe_event(tu);
512 if (ret == -EEXIST) {
513 trace_probe_log_set_index(0);
514 trace_probe_log_err(0, EVENT_EXIST);
516 pr_warn("Failed to register probe event(%d)\n", ret);
520 dyn_event_add(&tu->devent, trace_probe_event_call(&tu->tp));
523 mutex_unlock(&event_mutex);
530 * - Add uprobe: p|r[:[GRP/][EVENT]] PATH:OFFSET[%return][(REF)] [FETCHARGS]
532 static int __trace_uprobe_create(int argc, const char **argv)
534 struct trace_uprobe *tu;
535 const char *event = NULL, *group = UPROBE_EVENT_SYSTEM;
536 char *arg, *filename, *rctr, *rctr_end, *tmp;
537 char buf[MAX_EVENT_NAME_LEN];
538 char gbuf[MAX_EVENT_NAME_LEN];
539 enum probe_print_type ptype;
541 unsigned long offset, ref_ctr_offset;
542 bool is_return = false;
547 switch (argv[0][0]) {
560 if (argv[0][1] == ':')
563 if (!strchr(argv[1], '/'))
566 filename = kstrdup(argv[1], GFP_KERNEL);
570 /* Find the last occurrence, in case the path contains ':' too. */
571 arg = strrchr(filename, ':');
572 if (!arg || !isdigit(arg[1])) {
577 trace_probe_log_init("trace_uprobe", argc, argv);
578 trace_probe_log_set_index(1); /* filename is the 2nd argument */
581 ret = kern_path(filename, LOOKUP_FOLLOW, &path);
583 trace_probe_log_err(0, FILE_NOT_FOUND);
585 trace_probe_log_clear();
588 if (!d_is_reg(path.dentry)) {
589 trace_probe_log_err(0, NO_REGULAR_FILE);
591 goto fail_address_parse;
594 /* Parse reference counter offset if specified. */
595 rctr = strchr(arg, '(');
597 rctr_end = strchr(rctr, ')');
600 rctr_end = rctr + strlen(rctr);
601 trace_probe_log_err(rctr_end - filename,
603 goto fail_address_parse;
604 } else if (rctr_end[1] != '\0') {
606 trace_probe_log_err(rctr_end + 1 - filename,
608 goto fail_address_parse;
613 ret = kstrtoul(rctr, 0, &ref_ctr_offset);
615 trace_probe_log_err(rctr - filename, BAD_REFCNT);
616 goto fail_address_parse;
620 /* Check if there is %return suffix */
621 tmp = strchr(arg, '%');
623 if (!strcmp(tmp, "%return")) {
627 trace_probe_log_err(tmp - filename, BAD_ADDR_SUFFIX);
629 goto fail_address_parse;
633 /* Parse uprobe offset. */
634 ret = kstrtoul(arg, 0, &offset);
636 trace_probe_log_err(arg - filename, BAD_UPROBE_OFFS);
637 goto fail_address_parse;
641 trace_probe_log_set_index(0);
643 ret = traceprobe_parse_event_name(&event, &group, gbuf,
646 goto fail_address_parse;
653 tail = kstrdup(kbasename(filename), GFP_KERNEL);
656 goto fail_address_parse;
659 ptr = strpbrk(tail, ".-_");
663 snprintf(buf, MAX_EVENT_NAME_LEN, "%c_%s_0x%lx", 'p', tail, offset);
671 tu = alloc_trace_uprobe(group, event, argc, is_return);
674 /* This must return -ENOMEM otherwise there is a bug */
675 WARN_ON_ONCE(ret != -ENOMEM);
676 goto fail_address_parse;
679 tu->ref_ctr_offset = ref_ctr_offset;
681 tu->filename = filename;
683 /* parse arguments */
684 for (i = 0; i < argc && i < MAX_TRACE_ARGS; i++) {
685 struct traceprobe_parse_context ctx = {
686 .flags = (is_return ? TPARG_FL_RETURN : 0) | TPARG_FL_USER,
689 trace_probe_log_set_index(i + 2);
690 ret = traceprobe_parse_probe_arg(&tu->tp, i, argv[i], &ctx);
691 traceprobe_finish_parse(&ctx);
696 ptype = is_ret_probe(tu) ? PROBE_PRINT_RETURN : PROBE_PRINT_NORMAL;
697 ret = traceprobe_set_print_fmt(&tu->tp, ptype);
701 ret = register_trace_uprobe(tu);
706 free_trace_uprobe(tu);
708 trace_probe_log_clear();
712 trace_probe_log_clear();
719 int trace_uprobe_create(const char *raw_command)
721 return trace_probe_create(raw_command, __trace_uprobe_create);
724 static int create_or_delete_trace_uprobe(const char *raw_command)
728 if (raw_command[0] == '-')
729 return dyn_event_release(raw_command, &trace_uprobe_ops);
731 ret = trace_uprobe_create(raw_command);
732 return ret == -ECANCELED ? -EINVAL : ret;
735 static int trace_uprobe_release(struct dyn_event *ev)
737 struct trace_uprobe *tu = to_trace_uprobe(ev);
739 return unregister_trace_uprobe(tu);
742 /* Probes listing interfaces */
743 static int trace_uprobe_show(struct seq_file *m, struct dyn_event *ev)
745 struct trace_uprobe *tu = to_trace_uprobe(ev);
746 char c = is_ret_probe(tu) ? 'r' : 'p';
749 seq_printf(m, "%c:%s/%s %s:0x%0*lx", c, trace_probe_group_name(&tu->tp),
750 trace_probe_name(&tu->tp), tu->filename,
751 (int)(sizeof(void *) * 2), tu->offset);
753 if (tu->ref_ctr_offset)
754 seq_printf(m, "(0x%lx)", tu->ref_ctr_offset);
756 for (i = 0; i < tu->tp.nr_args; i++)
757 seq_printf(m, " %s=%s", tu->tp.args[i].name, tu->tp.args[i].comm);
763 static int probes_seq_show(struct seq_file *m, void *v)
765 struct dyn_event *ev = v;
767 if (!is_trace_uprobe(ev))
770 return trace_uprobe_show(m, ev);
773 static const struct seq_operations probes_seq_op = {
774 .start = dyn_event_seq_start,
775 .next = dyn_event_seq_next,
776 .stop = dyn_event_seq_stop,
777 .show = probes_seq_show
780 static int probes_open(struct inode *inode, struct file *file)
784 ret = security_locked_down(LOCKDOWN_TRACEFS);
788 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
789 ret = dyn_events_release_all(&trace_uprobe_ops);
794 return seq_open(file, &probes_seq_op);
797 static ssize_t probes_write(struct file *file, const char __user *buffer,
798 size_t count, loff_t *ppos)
800 return trace_parse_run_command(file, buffer, count, ppos,
801 create_or_delete_trace_uprobe);
804 static const struct file_operations uprobe_events_ops = {
805 .owner = THIS_MODULE,
809 .release = seq_release,
810 .write = probes_write,
813 /* Probes profiling interfaces */
814 static int probes_profile_seq_show(struct seq_file *m, void *v)
816 struct dyn_event *ev = v;
817 struct trace_uprobe *tu;
819 if (!is_trace_uprobe(ev))
822 tu = to_trace_uprobe(ev);
823 seq_printf(m, " %s %-44s %15lu\n", tu->filename,
824 trace_probe_name(&tu->tp), tu->nhit);
828 static const struct seq_operations profile_seq_op = {
829 .start = dyn_event_seq_start,
830 .next = dyn_event_seq_next,
831 .stop = dyn_event_seq_stop,
832 .show = probes_profile_seq_show
835 static int profile_open(struct inode *inode, struct file *file)
839 ret = security_locked_down(LOCKDOWN_TRACEFS);
843 return seq_open(file, &profile_seq_op);
846 static const struct file_operations uprobe_profile_ops = {
847 .owner = THIS_MODULE,
848 .open = profile_open,
851 .release = seq_release,
854 struct uprobe_cpu_buffer {
859 static struct uprobe_cpu_buffer __percpu *uprobe_cpu_buffer;
860 static int uprobe_buffer_refcnt;
862 static int uprobe_buffer_init(void)
866 uprobe_cpu_buffer = alloc_percpu(struct uprobe_cpu_buffer);
867 if (uprobe_cpu_buffer == NULL)
870 for_each_possible_cpu(cpu) {
871 struct page *p = alloc_pages_node(cpu_to_node(cpu),
877 per_cpu_ptr(uprobe_cpu_buffer, cpu)->buf = page_address(p);
878 mutex_init(&per_cpu_ptr(uprobe_cpu_buffer, cpu)->mutex);
884 for_each_possible_cpu(cpu) {
887 free_page((unsigned long)per_cpu_ptr(uprobe_cpu_buffer, cpu)->buf);
890 free_percpu(uprobe_cpu_buffer);
894 static int uprobe_buffer_enable(void)
898 BUG_ON(!mutex_is_locked(&event_mutex));
900 if (uprobe_buffer_refcnt++ == 0) {
901 ret = uprobe_buffer_init();
903 uprobe_buffer_refcnt--;
909 static void uprobe_buffer_disable(void)
913 BUG_ON(!mutex_is_locked(&event_mutex));
915 if (--uprobe_buffer_refcnt == 0) {
916 for_each_possible_cpu(cpu)
917 free_page((unsigned long)per_cpu_ptr(uprobe_cpu_buffer,
920 free_percpu(uprobe_cpu_buffer);
921 uprobe_cpu_buffer = NULL;
925 static struct uprobe_cpu_buffer *uprobe_buffer_get(void)
927 struct uprobe_cpu_buffer *ucb;
930 cpu = raw_smp_processor_id();
931 ucb = per_cpu_ptr(uprobe_cpu_buffer, cpu);
934 * Use per-cpu buffers for fastest access, but we might migrate
935 * so the mutex makes sure we have sole access to it.
937 mutex_lock(&ucb->mutex);
942 static void uprobe_buffer_put(struct uprobe_cpu_buffer *ucb)
946 mutex_unlock(&ucb->mutex);
949 static struct uprobe_cpu_buffer *prepare_uprobe_buffer(struct trace_uprobe *tu,
950 struct pt_regs *regs,
951 struct uprobe_cpu_buffer **ucbp)
953 struct uprobe_cpu_buffer *ucb;
959 esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
960 dsize = __get_data_size(&tu->tp, regs, NULL);
962 ucb = uprobe_buffer_get();
963 ucb->dsize = tu->tp.size + dsize;
965 store_trace_args(ucb->buf, &tu->tp, regs, NULL, esize, dsize);
971 static void __uprobe_trace_func(struct trace_uprobe *tu,
972 unsigned long func, struct pt_regs *regs,
973 struct uprobe_cpu_buffer **ucbp,
974 struct trace_event_file *trace_file)
976 struct uprobe_trace_entry_head *entry;
977 struct trace_event_buffer fbuffer;
978 struct uprobe_cpu_buffer *ucb;
981 struct trace_event_call *call = trace_probe_event_call(&tu->tp);
983 WARN_ON(call != trace_file->event_call);
985 ucb = prepare_uprobe_buffer(tu, regs, ucbp);
986 if (WARN_ON_ONCE(ucb->dsize > PAGE_SIZE))
989 if (trace_trigger_soft_disabled(trace_file))
992 esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
993 size = esize + ucb->dsize;
994 entry = trace_event_buffer_reserve(&fbuffer, trace_file, size);
998 if (is_ret_probe(tu)) {
999 entry->vaddr[0] = func;
1000 entry->vaddr[1] = instruction_pointer(regs);
1001 data = DATAOF_TRACE_ENTRY(entry, true);
1003 entry->vaddr[0] = instruction_pointer(regs);
1004 data = DATAOF_TRACE_ENTRY(entry, false);
1007 memcpy(data, ucb->buf, ucb->dsize);
1009 trace_event_buffer_commit(&fbuffer);
1012 /* uprobe handler */
1013 static int uprobe_trace_func(struct trace_uprobe *tu, struct pt_regs *regs,
1014 struct uprobe_cpu_buffer **ucbp)
1016 struct event_file_link *link;
1018 if (is_ret_probe(tu))
1022 trace_probe_for_each_link_rcu(link, &tu->tp)
1023 __uprobe_trace_func(tu, 0, regs, ucbp, link->file);
1029 static void uretprobe_trace_func(struct trace_uprobe *tu, unsigned long func,
1030 struct pt_regs *regs,
1031 struct uprobe_cpu_buffer **ucbp)
1033 struct event_file_link *link;
1036 trace_probe_for_each_link_rcu(link, &tu->tp)
1037 __uprobe_trace_func(tu, func, regs, ucbp, link->file);
1041 /* Event entry printers */
1042 static enum print_line_t
1043 print_uprobe_event(struct trace_iterator *iter, int flags, struct trace_event *event)
1045 struct uprobe_trace_entry_head *entry;
1046 struct trace_seq *s = &iter->seq;
1047 struct trace_uprobe *tu;
1050 entry = (struct uprobe_trace_entry_head *)iter->ent;
1051 tu = trace_uprobe_primary_from_call(
1052 container_of(event, struct trace_event_call, event));
1056 if (is_ret_probe(tu)) {
1057 trace_seq_printf(s, "%s: (0x%lx <- 0x%lx)",
1058 trace_probe_name(&tu->tp),
1059 entry->vaddr[1], entry->vaddr[0]);
1060 data = DATAOF_TRACE_ENTRY(entry, true);
1062 trace_seq_printf(s, "%s: (0x%lx)",
1063 trace_probe_name(&tu->tp),
1065 data = DATAOF_TRACE_ENTRY(entry, false);
1068 if (trace_probe_print_args(s, tu->tp.args, tu->tp.nr_args, data, entry) < 0)
1071 trace_seq_putc(s, '\n');
1074 return trace_handle_return(s);
1077 typedef bool (*filter_func_t)(struct uprobe_consumer *self,
1078 enum uprobe_filter_ctx ctx,
1079 struct mm_struct *mm);
1081 static int trace_uprobe_enable(struct trace_uprobe *tu, filter_func_t filter)
1085 tu->consumer.filter = filter;
1086 tu->inode = d_real_inode(tu->path.dentry);
1088 if (tu->ref_ctr_offset)
1089 ret = uprobe_register_refctr(tu->inode, tu->offset,
1090 tu->ref_ctr_offset, &tu->consumer);
1092 ret = uprobe_register(tu->inode, tu->offset, &tu->consumer);
1100 static void __probe_event_disable(struct trace_probe *tp)
1102 struct trace_uprobe *tu;
1104 tu = container_of(tp, struct trace_uprobe, tp);
1105 WARN_ON(!uprobe_filter_is_empty(tu->tp.event->filter));
1107 list_for_each_entry(tu, trace_probe_probe_list(tp), tp.list) {
1111 uprobe_unregister(tu->inode, tu->offset, &tu->consumer);
1116 static int probe_event_enable(struct trace_event_call *call,
1117 struct trace_event_file *file, filter_func_t filter)
1119 struct trace_probe *tp;
1120 struct trace_uprobe *tu;
1124 tp = trace_probe_primary_from_call(call);
1125 if (WARN_ON_ONCE(!tp))
1127 enabled = trace_probe_is_enabled(tp);
1129 /* This may also change "enabled" state */
1131 if (trace_probe_test_flag(tp, TP_FLAG_PROFILE))
1134 ret = trace_probe_add_file(tp, file);
1138 if (trace_probe_test_flag(tp, TP_FLAG_TRACE))
1141 trace_probe_set_flag(tp, TP_FLAG_PROFILE);
1144 tu = container_of(tp, struct trace_uprobe, tp);
1145 WARN_ON(!uprobe_filter_is_empty(tu->tp.event->filter));
1150 ret = uprobe_buffer_enable();
1154 list_for_each_entry(tu, trace_probe_probe_list(tp), tp.list) {
1155 ret = trace_uprobe_enable(tu, filter);
1157 __probe_event_disable(tp);
1165 uprobe_buffer_disable();
1169 trace_probe_remove_file(tp, file);
1171 trace_probe_clear_flag(tp, TP_FLAG_PROFILE);
1176 static void probe_event_disable(struct trace_event_call *call,
1177 struct trace_event_file *file)
1179 struct trace_probe *tp;
1181 tp = trace_probe_primary_from_call(call);
1182 if (WARN_ON_ONCE(!tp))
1185 if (!trace_probe_is_enabled(tp))
1189 if (trace_probe_remove_file(tp, file) < 0)
1192 if (trace_probe_is_enabled(tp))
1195 trace_probe_clear_flag(tp, TP_FLAG_PROFILE);
1197 __probe_event_disable(tp);
1198 uprobe_buffer_disable();
1201 static int uprobe_event_define_fields(struct trace_event_call *event_call)
1204 struct uprobe_trace_entry_head field;
1205 struct trace_uprobe *tu;
1207 tu = trace_uprobe_primary_from_call(event_call);
1211 if (is_ret_probe(tu)) {
1212 DEFINE_FIELD(unsigned long, vaddr[0], FIELD_STRING_FUNC, 0);
1213 DEFINE_FIELD(unsigned long, vaddr[1], FIELD_STRING_RETIP, 0);
1214 size = SIZEOF_TRACE_ENTRY(true);
1216 DEFINE_FIELD(unsigned long, vaddr[0], FIELD_STRING_IP, 0);
1217 size = SIZEOF_TRACE_ENTRY(false);
1220 return traceprobe_define_arg_fields(event_call, size, &tu->tp);
1223 #ifdef CONFIG_PERF_EVENTS
1225 __uprobe_perf_filter(struct trace_uprobe_filter *filter, struct mm_struct *mm)
1227 struct perf_event *event;
1229 list_for_each_entry(event, &filter->perf_events, hw.tp_list) {
1230 if (event->hw.target->mm == mm)
1238 trace_uprobe_filter_event(struct trace_uprobe_filter *filter,
1239 struct perf_event *event)
1241 return __uprobe_perf_filter(filter, event->hw.target->mm);
1244 static bool trace_uprobe_filter_remove(struct trace_uprobe_filter *filter,
1245 struct perf_event *event)
1249 write_lock(&filter->rwlock);
1250 if (event->hw.target) {
1251 list_del(&event->hw.tp_list);
1252 done = filter->nr_systemwide ||
1253 (event->hw.target->flags & PF_EXITING) ||
1254 trace_uprobe_filter_event(filter, event);
1256 filter->nr_systemwide--;
1257 done = filter->nr_systemwide;
1259 write_unlock(&filter->rwlock);
1264 /* This returns true if the filter always covers target mm */
1265 static bool trace_uprobe_filter_add(struct trace_uprobe_filter *filter,
1266 struct perf_event *event)
1270 write_lock(&filter->rwlock);
1271 if (event->hw.target) {
1273 * event->parent != NULL means copy_process(), we can avoid
1274 * uprobe_apply(). current->mm must be probed and we can rely
1275 * on dup_mmap() which preserves the already installed bp's.
1277 * attr.enable_on_exec means that exec/mmap will install the
1278 * breakpoints we need.
1280 done = filter->nr_systemwide ||
1281 event->parent || event->attr.enable_on_exec ||
1282 trace_uprobe_filter_event(filter, event);
1283 list_add(&event->hw.tp_list, &filter->perf_events);
1285 done = filter->nr_systemwide;
1286 filter->nr_systemwide++;
1288 write_unlock(&filter->rwlock);
1293 static int uprobe_perf_close(struct trace_event_call *call,
1294 struct perf_event *event)
1296 struct trace_probe *tp;
1297 struct trace_uprobe *tu;
1300 tp = trace_probe_primary_from_call(call);
1301 if (WARN_ON_ONCE(!tp))
1304 tu = container_of(tp, struct trace_uprobe, tp);
1305 if (trace_uprobe_filter_remove(tu->tp.event->filter, event))
1308 list_for_each_entry(tu, trace_probe_probe_list(tp), tp.list) {
1309 ret = uprobe_apply(tu->inode, tu->offset, &tu->consumer, false);
1317 static int uprobe_perf_open(struct trace_event_call *call,
1318 struct perf_event *event)
1320 struct trace_probe *tp;
1321 struct trace_uprobe *tu;
1324 tp = trace_probe_primary_from_call(call);
1325 if (WARN_ON_ONCE(!tp))
1328 tu = container_of(tp, struct trace_uprobe, tp);
1329 if (trace_uprobe_filter_add(tu->tp.event->filter, event))
1332 list_for_each_entry(tu, trace_probe_probe_list(tp), tp.list) {
1333 err = uprobe_apply(tu->inode, tu->offset, &tu->consumer, true);
1335 uprobe_perf_close(call, event);
1343 static bool uprobe_perf_filter(struct uprobe_consumer *uc,
1344 enum uprobe_filter_ctx ctx, struct mm_struct *mm)
1346 struct trace_uprobe_filter *filter;
1347 struct trace_uprobe *tu;
1350 tu = container_of(uc, struct trace_uprobe, consumer);
1351 filter = tu->tp.event->filter;
1354 * speculative short-circuiting check to avoid unnecessarily taking
1355 * filter->rwlock below, if the uprobe has system-wide consumer
1357 if (READ_ONCE(filter->nr_systemwide))
1360 read_lock(&filter->rwlock);
1361 ret = __uprobe_perf_filter(filter, mm);
1362 read_unlock(&filter->rwlock);
1367 static void __uprobe_perf_func(struct trace_uprobe *tu,
1368 unsigned long func, struct pt_regs *regs,
1369 struct uprobe_cpu_buffer **ucbp)
1371 struct trace_event_call *call = trace_probe_event_call(&tu->tp);
1372 struct uprobe_trace_entry_head *entry;
1373 struct uprobe_cpu_buffer *ucb;
1374 struct hlist_head *head;
1379 #ifdef CONFIG_BPF_EVENTS
1380 if (bpf_prog_array_valid(call)) {
1383 ret = bpf_prog_run_array_uprobe(call->prog_array, regs, bpf_prog_run);
1387 #endif /* CONFIG_BPF_EVENTS */
1389 esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
1391 ucb = prepare_uprobe_buffer(tu, regs, ucbp);
1392 size = esize + ucb->dsize;
1393 size = ALIGN(size + sizeof(u32), sizeof(u64)) - sizeof(u32);
1394 if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE, "profile buffer not large enough"))
1398 head = this_cpu_ptr(call->perf_events);
1399 if (hlist_empty(head))
1402 entry = perf_trace_buf_alloc(size, NULL, &rctx);
1406 if (is_ret_probe(tu)) {
1407 entry->vaddr[0] = func;
1408 entry->vaddr[1] = instruction_pointer(regs);
1409 data = DATAOF_TRACE_ENTRY(entry, true);
1411 entry->vaddr[0] = instruction_pointer(regs);
1412 data = DATAOF_TRACE_ENTRY(entry, false);
1415 memcpy(data, ucb->buf, ucb->dsize);
1417 if (size - esize > ucb->dsize)
1418 memset(data + ucb->dsize, 0, size - esize - ucb->dsize);
1420 perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs,
1426 /* uprobe profile handler */
1427 static int uprobe_perf_func(struct trace_uprobe *tu, struct pt_regs *regs,
1428 struct uprobe_cpu_buffer **ucbp)
1430 if (!uprobe_perf_filter(&tu->consumer, 0, current->mm))
1431 return UPROBE_HANDLER_REMOVE;
1433 if (!is_ret_probe(tu))
1434 __uprobe_perf_func(tu, 0, regs, ucbp);
1438 static void uretprobe_perf_func(struct trace_uprobe *tu, unsigned long func,
1439 struct pt_regs *regs,
1440 struct uprobe_cpu_buffer **ucbp)
1442 __uprobe_perf_func(tu, func, regs, ucbp);
1445 int bpf_get_uprobe_info(const struct perf_event *event, u32 *fd_type,
1446 const char **filename, u64 *probe_offset,
1447 u64 *probe_addr, bool perf_type_tracepoint)
1449 const char *pevent = trace_event_name(event->tp_event);
1450 const char *group = event->tp_event->class->system;
1451 struct trace_uprobe *tu;
1453 if (perf_type_tracepoint)
1454 tu = find_probe_event(pevent, group);
1456 tu = trace_uprobe_primary_from_call(event->tp_event);
1460 *fd_type = is_ret_probe(tu) ? BPF_FD_TYPE_URETPROBE
1461 : BPF_FD_TYPE_UPROBE;
1462 *filename = tu->filename;
1463 *probe_offset = tu->offset;
1467 #endif /* CONFIG_PERF_EVENTS */
1470 trace_uprobe_register(struct trace_event_call *event, enum trace_reg type,
1473 struct trace_event_file *file = data;
1476 case TRACE_REG_REGISTER:
1477 return probe_event_enable(event, file, NULL);
1479 case TRACE_REG_UNREGISTER:
1480 probe_event_disable(event, file);
1483 #ifdef CONFIG_PERF_EVENTS
1484 case TRACE_REG_PERF_REGISTER:
1485 return probe_event_enable(event, NULL, uprobe_perf_filter);
1487 case TRACE_REG_PERF_UNREGISTER:
1488 probe_event_disable(event, NULL);
1491 case TRACE_REG_PERF_OPEN:
1492 return uprobe_perf_open(event, data);
1494 case TRACE_REG_PERF_CLOSE:
1495 return uprobe_perf_close(event, data);
1503 static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs)
1505 struct trace_uprobe *tu;
1506 struct uprobe_dispatch_data udd;
1507 struct uprobe_cpu_buffer *ucb = NULL;
1510 tu = container_of(con, struct trace_uprobe, consumer);
1514 udd.bp_addr = instruction_pointer(regs);
1516 current->utask->vaddr = (unsigned long) &udd;
1518 if (WARN_ON_ONCE(!uprobe_cpu_buffer))
1521 if (trace_probe_test_flag(&tu->tp, TP_FLAG_TRACE))
1522 ret |= uprobe_trace_func(tu, regs, &ucb);
1524 #ifdef CONFIG_PERF_EVENTS
1525 if (trace_probe_test_flag(&tu->tp, TP_FLAG_PROFILE))
1526 ret |= uprobe_perf_func(tu, regs, &ucb);
1528 uprobe_buffer_put(ucb);
1532 static int uretprobe_dispatcher(struct uprobe_consumer *con,
1533 unsigned long func, struct pt_regs *regs)
1535 struct trace_uprobe *tu;
1536 struct uprobe_dispatch_data udd;
1537 struct uprobe_cpu_buffer *ucb = NULL;
1539 tu = container_of(con, struct trace_uprobe, consumer);
1544 current->utask->vaddr = (unsigned long) &udd;
1546 if (WARN_ON_ONCE(!uprobe_cpu_buffer))
1549 if (trace_probe_test_flag(&tu->tp, TP_FLAG_TRACE))
1550 uretprobe_trace_func(tu, func, regs, &ucb);
1552 #ifdef CONFIG_PERF_EVENTS
1553 if (trace_probe_test_flag(&tu->tp, TP_FLAG_PROFILE))
1554 uretprobe_perf_func(tu, func, regs, &ucb);
1556 uprobe_buffer_put(ucb);
1560 static struct trace_event_functions uprobe_funcs = {
1561 .trace = print_uprobe_event
1564 static struct trace_event_fields uprobe_fields_array[] = {
1565 { .type = TRACE_FUNCTION_TYPE,
1566 .define_fields = uprobe_event_define_fields },
1570 static inline void init_trace_event_call(struct trace_uprobe *tu)
1572 struct trace_event_call *call = trace_probe_event_call(&tu->tp);
1573 call->event.funcs = &uprobe_funcs;
1574 call->class->fields_array = uprobe_fields_array;
1576 call->flags = TRACE_EVENT_FL_UPROBE | TRACE_EVENT_FL_CAP_ANY;
1577 call->class->reg = trace_uprobe_register;
1580 static int register_uprobe_event(struct trace_uprobe *tu)
1582 init_trace_event_call(tu);
1584 return trace_probe_register_event_call(&tu->tp);
1587 static int unregister_uprobe_event(struct trace_uprobe *tu)
1589 return trace_probe_unregister_event_call(&tu->tp);
1592 #ifdef CONFIG_PERF_EVENTS
1593 struct trace_event_call *
1594 create_local_trace_uprobe(char *name, unsigned long offs,
1595 unsigned long ref_ctr_offset, bool is_return)
1597 enum probe_print_type ptype;
1598 struct trace_uprobe *tu;
1602 ret = kern_path(name, LOOKUP_FOLLOW, &path);
1604 return ERR_PTR(ret);
1606 if (!d_is_reg(path.dentry)) {
1608 return ERR_PTR(-EINVAL);
1612 * local trace_kprobes are not added to dyn_event, so they are never
1613 * searched in find_trace_kprobe(). Therefore, there is no concern of
1614 * duplicated name "DUMMY_EVENT" here.
1616 tu = alloc_trace_uprobe(UPROBE_EVENT_SYSTEM, "DUMMY_EVENT", 0,
1620 pr_info("Failed to allocate trace_uprobe.(%d)\n",
1623 return ERR_CAST(tu);
1628 tu->ref_ctr_offset = ref_ctr_offset;
1629 tu->filename = kstrdup(name, GFP_KERNEL);
1630 if (!tu->filename) {
1635 init_trace_event_call(tu);
1637 ptype = is_ret_probe(tu) ? PROBE_PRINT_RETURN : PROBE_PRINT_NORMAL;
1638 if (traceprobe_set_print_fmt(&tu->tp, ptype) < 0) {
1643 return trace_probe_event_call(&tu->tp);
1645 free_trace_uprobe(tu);
1646 return ERR_PTR(ret);
1649 void destroy_local_trace_uprobe(struct trace_event_call *event_call)
1651 struct trace_uprobe *tu;
1653 tu = trace_uprobe_primary_from_call(event_call);
1655 free_trace_uprobe(tu);
1657 #endif /* CONFIG_PERF_EVENTS */
1659 /* Make a trace interface for controlling probe points */
1660 static __init int init_uprobe_trace(void)
1664 ret = dyn_event_register(&trace_uprobe_ops);
1668 ret = tracing_init_dentry();
1672 trace_create_file("uprobe_events", TRACE_MODE_WRITE, NULL,
1673 NULL, &uprobe_events_ops);
1674 /* Profile interface */
1675 trace_create_file("uprobe_profile", TRACE_MODE_READ, NULL,
1676 NULL, &uprobe_profile_ops);
1680 fs_initcall(init_uprobe_trace);