f95a2c3d5b1b0f4701087a63141f11cbbea2aa58
[linux-block.git] / kernel / trace / trace_uprobe.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * uprobes-based tracing events
4  *
5  * Copyright (C) IBM Corporation, 2010-2012
6  * Author:      Srikar Dronamraju <srikar@linux.vnet.ibm.com>
7  */
8 #define pr_fmt(fmt)     "trace_uprobe: " fmt
9
10 #include <linux/bpf-cgroup.h>
11 #include <linux/security.h>
12 #include <linux/ctype.h>
13 #include <linux/module.h>
14 #include <linux/uaccess.h>
15 #include <linux/uprobes.h>
16 #include <linux/namei.h>
17 #include <linux/string.h>
18 #include <linux/rculist.h>
19 #include <linux/filter.h>
20 #include <linux/percpu.h>
21
22 #include "trace_dynevent.h"
23 #include "trace_probe.h"
24 #include "trace_probe_tmpl.h"
25
26 #define UPROBE_EVENT_SYSTEM     "uprobes"
27
28 struct uprobe_trace_entry_head {
29         struct trace_entry      ent;
30         unsigned long           vaddr[];
31 };
32
33 #define SIZEOF_TRACE_ENTRY(is_return)                   \
34         (sizeof(struct uprobe_trace_entry_head) +       \
35          sizeof(unsigned long) * (is_return ? 2 : 1))
36
37 #define DATAOF_TRACE_ENTRY(entry, is_return)            \
38         ((void*)(entry) + SIZEOF_TRACE_ENTRY(is_return))
39
40 static int trace_uprobe_create(const char *raw_command);
41 static int trace_uprobe_show(struct seq_file *m, struct dyn_event *ev);
42 static int trace_uprobe_release(struct dyn_event *ev);
43 static bool trace_uprobe_is_busy(struct dyn_event *ev);
44 static bool trace_uprobe_match(const char *system, const char *event,
45                         int argc, const char **argv, struct dyn_event *ev);
46
47 static struct dyn_event_operations trace_uprobe_ops = {
48         .create = trace_uprobe_create,
49         .show = trace_uprobe_show,
50         .is_busy = trace_uprobe_is_busy,
51         .free = trace_uprobe_release,
52         .match = trace_uprobe_match,
53 };
54
55 /*
56  * uprobe event core functions
57  */
58 struct trace_uprobe {
59         struct dyn_event                devent;
60         struct uprobe_consumer          consumer;
61         struct path                     path;
62         char                            *filename;
63         struct uprobe                   *uprobe;
64         unsigned long                   offset;
65         unsigned long                   ref_ctr_offset;
66         unsigned long __percpu          *nhits;
67         struct trace_probe              tp;
68 };
69
70 static bool is_trace_uprobe(struct dyn_event *ev)
71 {
72         return ev->ops == &trace_uprobe_ops;
73 }
74
75 static struct trace_uprobe *to_trace_uprobe(struct dyn_event *ev)
76 {
77         return container_of(ev, struct trace_uprobe, devent);
78 }
79
80 /**
81  * for_each_trace_uprobe - iterate over the trace_uprobe list
82  * @pos:        the struct trace_uprobe * for each entry
83  * @dpos:       the struct dyn_event * to use as a loop cursor
84  */
85 #define for_each_trace_uprobe(pos, dpos)        \
86         for_each_dyn_event(dpos)                \
87                 if (is_trace_uprobe(dpos) && (pos = to_trace_uprobe(dpos)))
88
89 static int register_uprobe_event(struct trace_uprobe *tu);
90 static int unregister_uprobe_event(struct trace_uprobe *tu);
91
92 static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs,
93                              __u64 *data);
94 static int uretprobe_dispatcher(struct uprobe_consumer *con,
95                                 unsigned long func, struct pt_regs *regs,
96                                 __u64 *data);
97
98 #ifdef CONFIG_STACK_GROWSUP
99 static unsigned long adjust_stack_addr(unsigned long addr, unsigned int n)
100 {
101         return addr - (n * sizeof(long));
102 }
103 #else
104 static unsigned long adjust_stack_addr(unsigned long addr, unsigned int n)
105 {
106         return addr + (n * sizeof(long));
107 }
108 #endif
109
110 static unsigned long get_user_stack_nth(struct pt_regs *regs, unsigned int n)
111 {
112         unsigned long ret;
113         unsigned long addr = user_stack_pointer(regs);
114
115         addr = adjust_stack_addr(addr, n);
116
117         if (copy_from_user(&ret, (void __force __user *) addr, sizeof(ret)))
118                 return 0;
119
120         return ret;
121 }
122
123 /*
124  * Uprobes-specific fetch functions
125  */
126 static nokprobe_inline int
127 probe_mem_read(void *dest, void *src, size_t size)
128 {
129         void __user *vaddr = (void __force __user *)src;
130
131         return copy_from_user(dest, vaddr, size) ? -EFAULT : 0;
132 }
133
134 static nokprobe_inline int
135 probe_mem_read_user(void *dest, void *src, size_t size)
136 {
137         return probe_mem_read(dest, src, size);
138 }
139
140 /*
141  * Fetch a null-terminated string. Caller MUST set *(u32 *)dest with max
142  * length and relative data location.
143  */
144 static nokprobe_inline int
145 fetch_store_string(unsigned long addr, void *dest, void *base)
146 {
147         long ret;
148         u32 loc = *(u32 *)dest;
149         int maxlen  = get_loc_len(loc);
150         u8 *dst = get_loc_data(dest, base);
151         void __user *src = (void __force __user *) addr;
152
153         if (unlikely(!maxlen))
154                 return -ENOMEM;
155
156         if (addr == FETCH_TOKEN_COMM)
157                 ret = strscpy(dst, current->comm, maxlen);
158         else
159                 ret = strncpy_from_user(dst, src, maxlen);
160         if (ret >= 0) {
161                 if (ret == maxlen)
162                         dst[ret - 1] = '\0';
163                 else
164                         /*
165                          * Include the terminating null byte. In this case it
166                          * was copied by strncpy_from_user but not accounted
167                          * for in ret.
168                          */
169                         ret++;
170                 *(u32 *)dest = make_data_loc(ret, (void *)dst - base);
171         } else
172                 *(u32 *)dest = make_data_loc(0, (void *)dst - base);
173
174         return ret;
175 }
176
177 static nokprobe_inline int
178 fetch_store_string_user(unsigned long addr, void *dest, void *base)
179 {
180         return fetch_store_string(addr, dest, base);
181 }
182
183 /* Return the length of string -- including null terminal byte */
184 static nokprobe_inline int
185 fetch_store_strlen(unsigned long addr)
186 {
187         int len;
188         void __user *vaddr = (void __force __user *) addr;
189
190         if (addr == FETCH_TOKEN_COMM)
191                 len = strlen(current->comm) + 1;
192         else
193                 len = strnlen_user(vaddr, MAX_STRING_SIZE);
194
195         return (len > MAX_STRING_SIZE) ? 0 : len;
196 }
197
198 static nokprobe_inline int
199 fetch_store_strlen_user(unsigned long addr)
200 {
201         return fetch_store_strlen(addr);
202 }
203
204 static unsigned long translate_user_vaddr(unsigned long file_offset)
205 {
206         unsigned long base_addr;
207         struct uprobe_dispatch_data *udd;
208
209         udd = (void *) current->utask->vaddr;
210
211         base_addr = udd->bp_addr - udd->tu->offset;
212         return base_addr + file_offset;
213 }
214
215 /* Note that we don't verify it, since the code does not come from user space */
216 static int
217 process_fetch_insn(struct fetch_insn *code, void *rec, void *edata,
218                    void *dest, void *base)
219 {
220         struct pt_regs *regs = rec;
221         unsigned long val;
222         int ret;
223
224         /* 1st stage: get value from context */
225         switch (code->op) {
226         case FETCH_OP_REG:
227                 val = regs_get_register(regs, code->param);
228                 break;
229         case FETCH_OP_STACK:
230                 val = get_user_stack_nth(regs, code->param);
231                 break;
232         case FETCH_OP_STACKP:
233                 val = user_stack_pointer(regs);
234                 break;
235         case FETCH_OP_RETVAL:
236                 val = regs_return_value(regs);
237                 break;
238         case FETCH_OP_COMM:
239                 val = FETCH_TOKEN_COMM;
240                 break;
241         case FETCH_OP_FOFFS:
242                 val = translate_user_vaddr(code->immediate);
243                 break;
244         default:
245                 ret = process_common_fetch_insn(code, &val);
246                 if (ret < 0)
247                         return ret;
248         }
249         code++;
250
251         return process_fetch_insn_bottom(code, val, dest, base);
252 }
253 NOKPROBE_SYMBOL(process_fetch_insn)
254
255 static inline void init_trace_uprobe_filter(struct trace_uprobe_filter *filter)
256 {
257         rwlock_init(&filter->rwlock);
258         filter->nr_systemwide = 0;
259         INIT_LIST_HEAD(&filter->perf_events);
260 }
261
262 static inline bool uprobe_filter_is_empty(struct trace_uprobe_filter *filter)
263 {
264         return !filter->nr_systemwide && list_empty(&filter->perf_events);
265 }
266
267 static inline bool is_ret_probe(struct trace_uprobe *tu)
268 {
269         return tu->consumer.ret_handler != NULL;
270 }
271
272 static bool trace_uprobe_is_busy(struct dyn_event *ev)
273 {
274         struct trace_uprobe *tu = to_trace_uprobe(ev);
275
276         return trace_probe_is_enabled(&tu->tp);
277 }
278
279 static bool trace_uprobe_match_command_head(struct trace_uprobe *tu,
280                                             int argc, const char **argv)
281 {
282         char buf[MAX_ARGSTR_LEN + 1];
283         int len;
284
285         if (!argc)
286                 return true;
287
288         len = strlen(tu->filename);
289         if (strncmp(tu->filename, argv[0], len) || argv[0][len] != ':')
290                 return false;
291
292         if (tu->ref_ctr_offset == 0)
293                 snprintf(buf, sizeof(buf), "0x%0*lx",
294                                 (int)(sizeof(void *) * 2), tu->offset);
295         else
296                 snprintf(buf, sizeof(buf), "0x%0*lx(0x%lx)",
297                                 (int)(sizeof(void *) * 2), tu->offset,
298                                 tu->ref_ctr_offset);
299         if (strcmp(buf, &argv[0][len + 1]))
300                 return false;
301
302         argc--; argv++;
303
304         return trace_probe_match_command_args(&tu->tp, argc, argv);
305 }
306
307 static bool trace_uprobe_match(const char *system, const char *event,
308                         int argc, const char **argv, struct dyn_event *ev)
309 {
310         struct trace_uprobe *tu = to_trace_uprobe(ev);
311
312         return (event[0] == '\0' ||
313                 strcmp(trace_probe_name(&tu->tp), event) == 0) &&
314            (!system || strcmp(trace_probe_group_name(&tu->tp), system) == 0) &&
315            trace_uprobe_match_command_head(tu, argc, argv);
316 }
317
318 static nokprobe_inline struct trace_uprobe *
319 trace_uprobe_primary_from_call(struct trace_event_call *call)
320 {
321         struct trace_probe *tp;
322
323         tp = trace_probe_primary_from_call(call);
324         if (WARN_ON_ONCE(!tp))
325                 return NULL;
326
327         return container_of(tp, struct trace_uprobe, tp);
328 }
329
330 /*
331  * Allocate new trace_uprobe and initialize it (including uprobes).
332  */
333 static struct trace_uprobe *
334 alloc_trace_uprobe(const char *group, const char *event, int nargs, bool is_ret)
335 {
336         struct trace_uprobe *tu;
337         int ret;
338
339         tu = kzalloc(struct_size(tu, tp.args, nargs), GFP_KERNEL);
340         if (!tu)
341                 return ERR_PTR(-ENOMEM);
342
343         tu->nhits = alloc_percpu(unsigned long);
344         if (!tu->nhits) {
345                 ret = -ENOMEM;
346                 goto error;
347         }
348
349         ret = trace_probe_init(&tu->tp, event, group, true, nargs);
350         if (ret < 0)
351                 goto error;
352
353         dyn_event_init(&tu->devent, &trace_uprobe_ops);
354         tu->consumer.handler = uprobe_dispatcher;
355         if (is_ret)
356                 tu->consumer.ret_handler = uretprobe_dispatcher;
357         init_trace_uprobe_filter(tu->tp.event->filter);
358         return tu;
359
360 error:
361         free_percpu(tu->nhits);
362         kfree(tu);
363
364         return ERR_PTR(ret);
365 }
366
367 static void free_trace_uprobe(struct trace_uprobe *tu)
368 {
369         if (!tu)
370                 return;
371
372         path_put(&tu->path);
373         trace_probe_cleanup(&tu->tp);
374         kfree(tu->filename);
375         free_percpu(tu->nhits);
376         kfree(tu);
377 }
378
379 static struct trace_uprobe *find_probe_event(const char *event, const char *group)
380 {
381         struct dyn_event *pos;
382         struct trace_uprobe *tu;
383
384         for_each_trace_uprobe(tu, pos)
385                 if (strcmp(trace_probe_name(&tu->tp), event) == 0 &&
386                     strcmp(trace_probe_group_name(&tu->tp), group) == 0)
387                         return tu;
388
389         return NULL;
390 }
391
392 /* Unregister a trace_uprobe and probe_event */
393 static int unregister_trace_uprobe(struct trace_uprobe *tu)
394 {
395         int ret;
396
397         if (trace_probe_has_sibling(&tu->tp))
398                 goto unreg;
399
400         /* If there's a reference to the dynamic event */
401         if (trace_event_dyn_busy(trace_probe_event_call(&tu->tp)))
402                 return -EBUSY;
403
404         ret = unregister_uprobe_event(tu);
405         if (ret)
406                 return ret;
407
408 unreg:
409         dyn_event_remove(&tu->devent);
410         trace_probe_unlink(&tu->tp);
411         free_trace_uprobe(tu);
412         return 0;
413 }
414
415 static bool trace_uprobe_has_same_uprobe(struct trace_uprobe *orig,
416                                          struct trace_uprobe *comp)
417 {
418         struct trace_probe_event *tpe = orig->tp.event;
419         struct inode *comp_inode = d_real_inode(comp->path.dentry);
420         int i;
421
422         list_for_each_entry(orig, &tpe->probes, tp.list) {
423                 if (comp_inode != d_real_inode(orig->path.dentry) ||
424                     comp->offset != orig->offset)
425                         continue;
426
427                 /*
428                  * trace_probe_compare_arg_type() ensured that nr_args and
429                  * each argument name and type are same. Let's compare comm.
430                  */
431                 for (i = 0; i < orig->tp.nr_args; i++) {
432                         if (strcmp(orig->tp.args[i].comm,
433                                    comp->tp.args[i].comm))
434                                 break;
435                 }
436
437                 if (i == orig->tp.nr_args)
438                         return true;
439         }
440
441         return false;
442 }
443
444 static int append_trace_uprobe(struct trace_uprobe *tu, struct trace_uprobe *to)
445 {
446         int ret;
447
448         ret = trace_probe_compare_arg_type(&tu->tp, &to->tp);
449         if (ret) {
450                 /* Note that argument starts index = 2 */
451                 trace_probe_log_set_index(ret + 1);
452                 trace_probe_log_err(0, DIFF_ARG_TYPE);
453                 return -EEXIST;
454         }
455         if (trace_uprobe_has_same_uprobe(to, tu)) {
456                 trace_probe_log_set_index(0);
457                 trace_probe_log_err(0, SAME_PROBE);
458                 return -EEXIST;
459         }
460
461         /* Append to existing event */
462         ret = trace_probe_append(&tu->tp, &to->tp);
463         if (!ret)
464                 dyn_event_add(&tu->devent, trace_probe_event_call(&tu->tp));
465
466         return ret;
467 }
468
469 /*
470  * Uprobe with multiple reference counter is not allowed. i.e.
471  * If inode and offset matches, reference counter offset *must*
472  * match as well. Though, there is one exception: If user is
473  * replacing old trace_uprobe with new one(same group/event),
474  * then we allow same uprobe with new reference counter as far
475  * as the new one does not conflict with any other existing
476  * ones.
477  */
478 static int validate_ref_ctr_offset(struct trace_uprobe *new)
479 {
480         struct dyn_event *pos;
481         struct trace_uprobe *tmp;
482         struct inode *new_inode = d_real_inode(new->path.dentry);
483
484         for_each_trace_uprobe(tmp, pos) {
485                 if (new_inode == d_real_inode(tmp->path.dentry) &&
486                     new->offset == tmp->offset &&
487                     new->ref_ctr_offset != tmp->ref_ctr_offset) {
488                         pr_warn("Reference counter offset mismatch.");
489                         return -EINVAL;
490                 }
491         }
492         return 0;
493 }
494
495 /* Register a trace_uprobe and probe_event */
496 static int register_trace_uprobe(struct trace_uprobe *tu)
497 {
498         struct trace_uprobe *old_tu;
499         int ret;
500
501         guard(mutex)(&event_mutex);
502
503         ret = validate_ref_ctr_offset(tu);
504         if (ret)
505                 return ret;
506
507         /* register as an event */
508         old_tu = find_probe_event(trace_probe_name(&tu->tp),
509                                   trace_probe_group_name(&tu->tp));
510         if (old_tu) {
511                 if (is_ret_probe(tu) != is_ret_probe(old_tu)) {
512                         trace_probe_log_set_index(0);
513                         trace_probe_log_err(0, DIFF_PROBE_TYPE);
514                         return -EEXIST;
515                 }
516                 return append_trace_uprobe(tu, old_tu);
517         }
518
519         ret = register_uprobe_event(tu);
520         if (ret) {
521                 if (ret == -EEXIST) {
522                         trace_probe_log_set_index(0);
523                         trace_probe_log_err(0, EVENT_EXIST);
524                 } else
525                         pr_warn("Failed to register probe event(%d)\n", ret);
526                 return ret;
527         }
528
529         dyn_event_add(&tu->devent, trace_probe_event_call(&tu->tp));
530
531         return ret;
532 }
533
534 /*
535  * Argument syntax:
536  *  - Add uprobe: p|r[:[GRP/][EVENT]] PATH:OFFSET[%return][(REF)] [FETCHARGS]
537  */
538 static int __trace_uprobe_create(int argc, const char **argv)
539 {
540         struct trace_uprobe *tu;
541         const char *event = NULL, *group = UPROBE_EVENT_SYSTEM;
542         char *arg, *filename, *rctr, *rctr_end, *tmp;
543         char buf[MAX_EVENT_NAME_LEN];
544         char gbuf[MAX_EVENT_NAME_LEN];
545         enum probe_print_type ptype;
546         struct path path;
547         unsigned long offset, ref_ctr_offset;
548         bool is_return = false;
549         int i, ret;
550
551         ref_ctr_offset = 0;
552
553         switch (argv[0][0]) {
554         case 'r':
555                 is_return = true;
556                 break;
557         case 'p':
558                 break;
559         default:
560                 return -ECANCELED;
561         }
562
563         if (argc < 2)
564                 return -ECANCELED;
565
566         trace_probe_log_init("trace_uprobe", argc, argv);
567
568         if (argc - 2 > MAX_TRACE_ARGS) {
569                 trace_probe_log_set_index(2);
570                 trace_probe_log_err(0, TOO_MANY_ARGS);
571                 return -E2BIG;
572         }
573
574         if (argv[0][1] == ':')
575                 event = &argv[0][2];
576
577         if (!strchr(argv[1], '/'))
578                 return -ECANCELED;
579
580         filename = kstrdup(argv[1], GFP_KERNEL);
581         if (!filename)
582                 return -ENOMEM;
583
584         /* Find the last occurrence, in case the path contains ':' too. */
585         arg = strrchr(filename, ':');
586         if (!arg || !isdigit(arg[1])) {
587                 kfree(filename);
588                 return -ECANCELED;
589         }
590
591         trace_probe_log_set_index(1);   /* filename is the 2nd argument */
592
593         *arg++ = '\0';
594         ret = kern_path(filename, LOOKUP_FOLLOW, &path);
595         if (ret) {
596                 trace_probe_log_err(0, FILE_NOT_FOUND);
597                 kfree(filename);
598                 trace_probe_log_clear();
599                 return ret;
600         }
601         if (!d_is_reg(path.dentry)) {
602                 trace_probe_log_err(0, NO_REGULAR_FILE);
603                 ret = -EINVAL;
604                 goto fail_address_parse;
605         }
606
607         /* Parse reference counter offset if specified. */
608         rctr = strchr(arg, '(');
609         if (rctr) {
610                 rctr_end = strchr(rctr, ')');
611                 if (!rctr_end) {
612                         ret = -EINVAL;
613                         rctr_end = rctr + strlen(rctr);
614                         trace_probe_log_err(rctr_end - filename,
615                                             REFCNT_OPEN_BRACE);
616                         goto fail_address_parse;
617                 } else if (rctr_end[1] != '\0') {
618                         ret = -EINVAL;
619                         trace_probe_log_err(rctr_end + 1 - filename,
620                                             BAD_REFCNT_SUFFIX);
621                         goto fail_address_parse;
622                 }
623
624                 *rctr++ = '\0';
625                 *rctr_end = '\0';
626                 ret = kstrtoul(rctr, 0, &ref_ctr_offset);
627                 if (ret) {
628                         trace_probe_log_err(rctr - filename, BAD_REFCNT);
629                         goto fail_address_parse;
630                 }
631         }
632
633         /* Check if there is %return suffix */
634         tmp = strchr(arg, '%');
635         if (tmp) {
636                 if (!strcmp(tmp, "%return")) {
637                         *tmp = '\0';
638                         is_return = true;
639                 } else {
640                         trace_probe_log_err(tmp - filename, BAD_ADDR_SUFFIX);
641                         ret = -EINVAL;
642                         goto fail_address_parse;
643                 }
644         }
645
646         /* Parse uprobe offset. */
647         ret = kstrtoul(arg, 0, &offset);
648         if (ret) {
649                 trace_probe_log_err(arg - filename, BAD_UPROBE_OFFS);
650                 goto fail_address_parse;
651         }
652
653         /* setup a probe */
654         trace_probe_log_set_index(0);
655         if (event) {
656                 ret = traceprobe_parse_event_name(&event, &group, gbuf,
657                                                   event - argv[0]);
658                 if (ret)
659                         goto fail_address_parse;
660         }
661
662         if (!event) {
663                 char *tail;
664                 char *ptr;
665
666                 tail = kstrdup(kbasename(filename), GFP_KERNEL);
667                 if (!tail) {
668                         ret = -ENOMEM;
669                         goto fail_address_parse;
670                 }
671
672                 ptr = strpbrk(tail, ".-_");
673                 if (ptr)
674                         *ptr = '\0';
675
676                 snprintf(buf, MAX_EVENT_NAME_LEN, "%c_%s_0x%lx", 'p', tail, offset);
677                 event = buf;
678                 kfree(tail);
679         }
680
681         argc -= 2;
682         argv += 2;
683
684         tu = alloc_trace_uprobe(group, event, argc, is_return);
685         if (IS_ERR(tu)) {
686                 ret = PTR_ERR(tu);
687                 /* This must return -ENOMEM otherwise there is a bug */
688                 WARN_ON_ONCE(ret != -ENOMEM);
689                 goto fail_address_parse;
690         }
691         tu->offset = offset;
692         tu->ref_ctr_offset = ref_ctr_offset;
693         tu->path = path;
694         tu->filename = filename;
695
696         /* parse arguments */
697         for (i = 0; i < argc; i++) {
698                 struct traceprobe_parse_context ctx = {
699                         .flags = (is_return ? TPARG_FL_RETURN : 0) | TPARG_FL_USER,
700                 };
701
702                 trace_probe_log_set_index(i + 2);
703                 ret = traceprobe_parse_probe_arg(&tu->tp, i, argv[i], &ctx);
704                 traceprobe_finish_parse(&ctx);
705                 if (ret)
706                         goto error;
707         }
708
709         ptype = is_ret_probe(tu) ? PROBE_PRINT_RETURN : PROBE_PRINT_NORMAL;
710         ret = traceprobe_set_print_fmt(&tu->tp, ptype);
711         if (ret < 0)
712                 goto error;
713
714         ret = register_trace_uprobe(tu);
715         if (!ret)
716                 goto out;
717
718 error:
719         free_trace_uprobe(tu);
720 out:
721         trace_probe_log_clear();
722         return ret;
723
724 fail_address_parse:
725         trace_probe_log_clear();
726         path_put(&path);
727         kfree(filename);
728
729         return ret;
730 }
731
732 int trace_uprobe_create(const char *raw_command)
733 {
734         return trace_probe_create(raw_command, __trace_uprobe_create);
735 }
736
737 static int create_or_delete_trace_uprobe(const char *raw_command)
738 {
739         int ret;
740
741         if (raw_command[0] == '-')
742                 return dyn_event_release(raw_command, &trace_uprobe_ops);
743
744         ret = dyn_event_create(raw_command, &trace_uprobe_ops);
745         return ret == -ECANCELED ? -EINVAL : ret;
746 }
747
748 static int trace_uprobe_release(struct dyn_event *ev)
749 {
750         struct trace_uprobe *tu = to_trace_uprobe(ev);
751
752         return unregister_trace_uprobe(tu);
753 }
754
755 /* Probes listing interfaces */
756 static int trace_uprobe_show(struct seq_file *m, struct dyn_event *ev)
757 {
758         struct trace_uprobe *tu = to_trace_uprobe(ev);
759         char c = is_ret_probe(tu) ? 'r' : 'p';
760         int i;
761
762         seq_printf(m, "%c:%s/%s %s:0x%0*lx", c, trace_probe_group_name(&tu->tp),
763                         trace_probe_name(&tu->tp), tu->filename,
764                         (int)(sizeof(void *) * 2), tu->offset);
765
766         if (tu->ref_ctr_offset)
767                 seq_printf(m, "(0x%lx)", tu->ref_ctr_offset);
768
769         for (i = 0; i < tu->tp.nr_args; i++)
770                 seq_printf(m, " %s=%s", tu->tp.args[i].name, tu->tp.args[i].comm);
771
772         seq_putc(m, '\n');
773         return 0;
774 }
775
776 static int probes_seq_show(struct seq_file *m, void *v)
777 {
778         struct dyn_event *ev = v;
779
780         if (!is_trace_uprobe(ev))
781                 return 0;
782
783         return trace_uprobe_show(m, ev);
784 }
785
786 static const struct seq_operations probes_seq_op = {
787         .start  = dyn_event_seq_start,
788         .next   = dyn_event_seq_next,
789         .stop   = dyn_event_seq_stop,
790         .show   = probes_seq_show
791 };
792
793 static int probes_open(struct inode *inode, struct file *file)
794 {
795         int ret;
796
797         ret = security_locked_down(LOCKDOWN_TRACEFS);
798         if (ret)
799                 return ret;
800
801         if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
802                 ret = dyn_events_release_all(&trace_uprobe_ops);
803                 if (ret)
804                         return ret;
805         }
806
807         return seq_open(file, &probes_seq_op);
808 }
809
810 static ssize_t probes_write(struct file *file, const char __user *buffer,
811                             size_t count, loff_t *ppos)
812 {
813         return trace_parse_run_command(file, buffer, count, ppos,
814                                         create_or_delete_trace_uprobe);
815 }
816
817 static const struct file_operations uprobe_events_ops = {
818         .owner          = THIS_MODULE,
819         .open           = probes_open,
820         .read           = seq_read,
821         .llseek         = seq_lseek,
822         .release        = seq_release,
823         .write          = probes_write,
824 };
825
826 /* Probes profiling interfaces */
827 static int probes_profile_seq_show(struct seq_file *m, void *v)
828 {
829         struct dyn_event *ev = v;
830         struct trace_uprobe *tu;
831         unsigned long nhits;
832         int cpu;
833
834         if (!is_trace_uprobe(ev))
835                 return 0;
836
837         tu = to_trace_uprobe(ev);
838
839         nhits = 0;
840         for_each_possible_cpu(cpu) {
841                 nhits += per_cpu(*tu->nhits, cpu);
842         }
843
844         seq_printf(m, "  %s %-44s %15lu\n", tu->filename,
845                    trace_probe_name(&tu->tp), nhits);
846         return 0;
847 }
848
849 static const struct seq_operations profile_seq_op = {
850         .start  = dyn_event_seq_start,
851         .next   = dyn_event_seq_next,
852         .stop   = dyn_event_seq_stop,
853         .show   = probes_profile_seq_show
854 };
855
856 static int profile_open(struct inode *inode, struct file *file)
857 {
858         int ret;
859
860         ret = security_locked_down(LOCKDOWN_TRACEFS);
861         if (ret)
862                 return ret;
863
864         return seq_open(file, &profile_seq_op);
865 }
866
867 static const struct file_operations uprobe_profile_ops = {
868         .owner          = THIS_MODULE,
869         .open           = profile_open,
870         .read           = seq_read,
871         .llseek         = seq_lseek,
872         .release        = seq_release,
873 };
874
875 struct uprobe_cpu_buffer {
876         struct mutex mutex;
877         void *buf;
878         int dsize;
879 };
880 static struct uprobe_cpu_buffer __percpu *uprobe_cpu_buffer;
881 static int uprobe_buffer_refcnt;
882 #define MAX_UCB_BUFFER_SIZE PAGE_SIZE
883
884 static int uprobe_buffer_init(void)
885 {
886         int cpu, err_cpu;
887
888         uprobe_cpu_buffer = alloc_percpu(struct uprobe_cpu_buffer);
889         if (uprobe_cpu_buffer == NULL)
890                 return -ENOMEM;
891
892         for_each_possible_cpu(cpu) {
893                 struct page *p = alloc_pages_node(cpu_to_node(cpu),
894                                                   GFP_KERNEL, 0);
895                 if (p == NULL) {
896                         err_cpu = cpu;
897                         goto err;
898                 }
899                 per_cpu_ptr(uprobe_cpu_buffer, cpu)->buf = page_address(p);
900                 mutex_init(&per_cpu_ptr(uprobe_cpu_buffer, cpu)->mutex);
901         }
902
903         return 0;
904
905 err:
906         for_each_possible_cpu(cpu) {
907                 if (cpu == err_cpu)
908                         break;
909                 free_page((unsigned long)per_cpu_ptr(uprobe_cpu_buffer, cpu)->buf);
910         }
911
912         free_percpu(uprobe_cpu_buffer);
913         return -ENOMEM;
914 }
915
916 static int uprobe_buffer_enable(void)
917 {
918         int ret = 0;
919
920         BUG_ON(!mutex_is_locked(&event_mutex));
921
922         if (uprobe_buffer_refcnt++ == 0) {
923                 ret = uprobe_buffer_init();
924                 if (ret < 0)
925                         uprobe_buffer_refcnt--;
926         }
927
928         return ret;
929 }
930
931 static void uprobe_buffer_disable(void)
932 {
933         int cpu;
934
935         BUG_ON(!mutex_is_locked(&event_mutex));
936
937         if (--uprobe_buffer_refcnt == 0) {
938                 for_each_possible_cpu(cpu)
939                         free_page((unsigned long)per_cpu_ptr(uprobe_cpu_buffer,
940                                                              cpu)->buf);
941
942                 free_percpu(uprobe_cpu_buffer);
943                 uprobe_cpu_buffer = NULL;
944         }
945 }
946
947 static struct uprobe_cpu_buffer *uprobe_buffer_get(void)
948 {
949         struct uprobe_cpu_buffer *ucb;
950         int cpu;
951
952         cpu = raw_smp_processor_id();
953         ucb = per_cpu_ptr(uprobe_cpu_buffer, cpu);
954
955         /*
956          * Use per-cpu buffers for fastest access, but we might migrate
957          * so the mutex makes sure we have sole access to it.
958          */
959         mutex_lock(&ucb->mutex);
960
961         return ucb;
962 }
963
964 static void uprobe_buffer_put(struct uprobe_cpu_buffer *ucb)
965 {
966         if (!ucb)
967                 return;
968         mutex_unlock(&ucb->mutex);
969 }
970
971 static struct uprobe_cpu_buffer *prepare_uprobe_buffer(struct trace_uprobe *tu,
972                                                        struct pt_regs *regs,
973                                                        struct uprobe_cpu_buffer **ucbp)
974 {
975         struct uprobe_cpu_buffer *ucb;
976         int dsize, esize;
977
978         if (*ucbp)
979                 return *ucbp;
980
981         esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
982         dsize = __get_data_size(&tu->tp, regs, NULL);
983
984         ucb = uprobe_buffer_get();
985         ucb->dsize = tu->tp.size + dsize;
986
987         if (WARN_ON_ONCE(ucb->dsize > MAX_UCB_BUFFER_SIZE)) {
988                 ucb->dsize = MAX_UCB_BUFFER_SIZE;
989                 dsize = MAX_UCB_BUFFER_SIZE - tu->tp.size;
990         }
991
992         store_trace_args(ucb->buf, &tu->tp, regs, NULL, esize, dsize);
993
994         *ucbp = ucb;
995         return ucb;
996 }
997
998 static void __uprobe_trace_func(struct trace_uprobe *tu,
999                                 unsigned long func, struct pt_regs *regs,
1000                                 struct uprobe_cpu_buffer *ucb,
1001                                 struct trace_event_file *trace_file)
1002 {
1003         struct uprobe_trace_entry_head *entry;
1004         struct trace_event_buffer fbuffer;
1005         void *data;
1006         int size, esize;
1007         struct trace_event_call *call = trace_probe_event_call(&tu->tp);
1008
1009         WARN_ON(call != trace_file->event_call);
1010
1011         if (trace_trigger_soft_disabled(trace_file))
1012                 return;
1013
1014         esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
1015         size = esize + ucb->dsize;
1016         entry = trace_event_buffer_reserve(&fbuffer, trace_file, size);
1017         if (!entry)
1018                 return;
1019
1020         if (is_ret_probe(tu)) {
1021                 entry->vaddr[0] = func;
1022                 entry->vaddr[1] = instruction_pointer(regs);
1023                 data = DATAOF_TRACE_ENTRY(entry, true);
1024         } else {
1025                 entry->vaddr[0] = instruction_pointer(regs);
1026                 data = DATAOF_TRACE_ENTRY(entry, false);
1027         }
1028
1029         memcpy(data, ucb->buf, ucb->dsize);
1030
1031         trace_event_buffer_commit(&fbuffer);
1032 }
1033
1034 /* uprobe handler */
1035 static int uprobe_trace_func(struct trace_uprobe *tu, struct pt_regs *regs,
1036                              struct uprobe_cpu_buffer **ucbp)
1037 {
1038         struct event_file_link *link;
1039         struct uprobe_cpu_buffer *ucb;
1040
1041         if (is_ret_probe(tu))
1042                 return 0;
1043
1044         ucb = prepare_uprobe_buffer(tu, regs, ucbp);
1045
1046         rcu_read_lock();
1047         trace_probe_for_each_link_rcu(link, &tu->tp)
1048                 __uprobe_trace_func(tu, 0, regs, ucb, link->file);
1049         rcu_read_unlock();
1050
1051         return 0;
1052 }
1053
1054 static void uretprobe_trace_func(struct trace_uprobe *tu, unsigned long func,
1055                                  struct pt_regs *regs,
1056                                  struct uprobe_cpu_buffer **ucbp)
1057 {
1058         struct event_file_link *link;
1059         struct uprobe_cpu_buffer *ucb;
1060
1061         ucb = prepare_uprobe_buffer(tu, regs, ucbp);
1062
1063         rcu_read_lock();
1064         trace_probe_for_each_link_rcu(link, &tu->tp)
1065                 __uprobe_trace_func(tu, func, regs, ucb, link->file);
1066         rcu_read_unlock();
1067 }
1068
1069 /* Event entry printers */
1070 static enum print_line_t
1071 print_uprobe_event(struct trace_iterator *iter, int flags, struct trace_event *event)
1072 {
1073         struct uprobe_trace_entry_head *entry;
1074         struct trace_seq *s = &iter->seq;
1075         struct trace_uprobe *tu;
1076         u8 *data;
1077
1078         entry = (struct uprobe_trace_entry_head *)iter->ent;
1079         tu = trace_uprobe_primary_from_call(
1080                 container_of(event, struct trace_event_call, event));
1081         if (unlikely(!tu))
1082                 goto out;
1083
1084         if (is_ret_probe(tu)) {
1085                 trace_seq_printf(s, "%s: (0x%lx <- 0x%lx)",
1086                                  trace_probe_name(&tu->tp),
1087                                  entry->vaddr[1], entry->vaddr[0]);
1088                 data = DATAOF_TRACE_ENTRY(entry, true);
1089         } else {
1090                 trace_seq_printf(s, "%s: (0x%lx)",
1091                                  trace_probe_name(&tu->tp),
1092                                  entry->vaddr[0]);
1093                 data = DATAOF_TRACE_ENTRY(entry, false);
1094         }
1095
1096         if (trace_probe_print_args(s, tu->tp.args, tu->tp.nr_args, data, entry) < 0)
1097                 goto out;
1098
1099         trace_seq_putc(s, '\n');
1100
1101  out:
1102         return trace_handle_return(s);
1103 }
1104
1105 typedef bool (*filter_func_t)(struct uprobe_consumer *self, struct mm_struct *mm);
1106
1107 static int trace_uprobe_enable(struct trace_uprobe *tu, filter_func_t filter)
1108 {
1109         struct inode *inode = d_real_inode(tu->path.dentry);
1110         struct uprobe *uprobe;
1111
1112         tu->consumer.filter = filter;
1113         uprobe = uprobe_register(inode, tu->offset, tu->ref_ctr_offset, &tu->consumer);
1114         if (IS_ERR(uprobe))
1115                 return PTR_ERR(uprobe);
1116
1117         tu->uprobe = uprobe;
1118         return 0;
1119 }
1120
1121 static void __probe_event_disable(struct trace_probe *tp)
1122 {
1123         struct trace_uprobe *tu;
1124         bool sync = false;
1125
1126         tu = container_of(tp, struct trace_uprobe, tp);
1127         WARN_ON(!uprobe_filter_is_empty(tu->tp.event->filter));
1128
1129         list_for_each_entry(tu, trace_probe_probe_list(tp), tp.list) {
1130                 if (!tu->uprobe)
1131                         continue;
1132
1133                 uprobe_unregister_nosync(tu->uprobe, &tu->consumer);
1134                 sync = true;
1135                 tu->uprobe = NULL;
1136         }
1137         if (sync)
1138                 uprobe_unregister_sync();
1139 }
1140
1141 static int probe_event_enable(struct trace_event_call *call,
1142                         struct trace_event_file *file, filter_func_t filter)
1143 {
1144         struct trace_probe *tp;
1145         struct trace_uprobe *tu;
1146         bool enabled;
1147         int ret;
1148
1149         tp = trace_probe_primary_from_call(call);
1150         if (WARN_ON_ONCE(!tp))
1151                 return -ENODEV;
1152         enabled = trace_probe_is_enabled(tp);
1153
1154         /* This may also change "enabled" state */
1155         if (file) {
1156                 if (trace_probe_test_flag(tp, TP_FLAG_PROFILE))
1157                         return -EINTR;
1158
1159                 ret = trace_probe_add_file(tp, file);
1160                 if (ret < 0)
1161                         return ret;
1162         } else {
1163                 if (trace_probe_test_flag(tp, TP_FLAG_TRACE))
1164                         return -EINTR;
1165
1166                 trace_probe_set_flag(tp, TP_FLAG_PROFILE);
1167         }
1168
1169         tu = container_of(tp, struct trace_uprobe, tp);
1170         WARN_ON(!uprobe_filter_is_empty(tu->tp.event->filter));
1171
1172         if (enabled)
1173                 return 0;
1174
1175         ret = uprobe_buffer_enable();
1176         if (ret)
1177                 goto err_flags;
1178
1179         list_for_each_entry(tu, trace_probe_probe_list(tp), tp.list) {
1180                 ret = trace_uprobe_enable(tu, filter);
1181                 if (ret) {
1182                         __probe_event_disable(tp);
1183                         goto err_buffer;
1184                 }
1185         }
1186
1187         return 0;
1188
1189  err_buffer:
1190         uprobe_buffer_disable();
1191
1192  err_flags:
1193         if (file)
1194                 trace_probe_remove_file(tp, file);
1195         else
1196                 trace_probe_clear_flag(tp, TP_FLAG_PROFILE);
1197
1198         return ret;
1199 }
1200
1201 static void probe_event_disable(struct trace_event_call *call,
1202                                 struct trace_event_file *file)
1203 {
1204         struct trace_probe *tp;
1205
1206         tp = trace_probe_primary_from_call(call);
1207         if (WARN_ON_ONCE(!tp))
1208                 return;
1209
1210         if (!trace_probe_is_enabled(tp))
1211                 return;
1212
1213         if (file) {
1214                 if (trace_probe_remove_file(tp, file) < 0)
1215                         return;
1216
1217                 if (trace_probe_is_enabled(tp))
1218                         return;
1219         } else
1220                 trace_probe_clear_flag(tp, TP_FLAG_PROFILE);
1221
1222         __probe_event_disable(tp);
1223         uprobe_buffer_disable();
1224 }
1225
1226 static int uprobe_event_define_fields(struct trace_event_call *event_call)
1227 {
1228         int ret, size;
1229         struct uprobe_trace_entry_head field;
1230         struct trace_uprobe *tu;
1231
1232         tu = trace_uprobe_primary_from_call(event_call);
1233         if (unlikely(!tu))
1234                 return -ENODEV;
1235
1236         if (is_ret_probe(tu)) {
1237                 DEFINE_FIELD(unsigned long, vaddr[0], FIELD_STRING_FUNC, 0);
1238                 DEFINE_FIELD(unsigned long, vaddr[1], FIELD_STRING_RETIP, 0);
1239                 size = SIZEOF_TRACE_ENTRY(true);
1240         } else {
1241                 DEFINE_FIELD(unsigned long, vaddr[0], FIELD_STRING_IP, 0);
1242                 size = SIZEOF_TRACE_ENTRY(false);
1243         }
1244
1245         return traceprobe_define_arg_fields(event_call, size, &tu->tp);
1246 }
1247
1248 #ifdef CONFIG_PERF_EVENTS
1249 static bool
1250 __uprobe_perf_filter(struct trace_uprobe_filter *filter, struct mm_struct *mm)
1251 {
1252         struct perf_event *event;
1253
1254         list_for_each_entry(event, &filter->perf_events, hw.tp_list) {
1255                 if (event->hw.target->mm == mm)
1256                         return true;
1257         }
1258
1259         return false;
1260 }
1261
1262 static inline bool
1263 trace_uprobe_filter_event(struct trace_uprobe_filter *filter,
1264                           struct perf_event *event)
1265 {
1266         return __uprobe_perf_filter(filter, event->hw.target->mm);
1267 }
1268
1269 static bool trace_uprobe_filter_remove(struct trace_uprobe_filter *filter,
1270                                        struct perf_event *event)
1271 {
1272         bool done;
1273
1274         write_lock(&filter->rwlock);
1275         if (event->hw.target) {
1276                 list_del(&event->hw.tp_list);
1277                 done = filter->nr_systemwide ||
1278                         (event->hw.target->flags & PF_EXITING) ||
1279                         trace_uprobe_filter_event(filter, event);
1280         } else {
1281                 filter->nr_systemwide--;
1282                 done = filter->nr_systemwide;
1283         }
1284         write_unlock(&filter->rwlock);
1285
1286         return done;
1287 }
1288
1289 /* This returns true if the filter always covers target mm */
1290 static bool trace_uprobe_filter_add(struct trace_uprobe_filter *filter,
1291                                     struct perf_event *event)
1292 {
1293         bool done;
1294
1295         write_lock(&filter->rwlock);
1296         if (event->hw.target) {
1297                 /*
1298                  * event->parent != NULL means copy_process(), we can avoid
1299                  * uprobe_apply(). current->mm must be probed and we can rely
1300                  * on dup_mmap() which preserves the already installed bp's.
1301                  *
1302                  * attr.enable_on_exec means that exec/mmap will install the
1303                  * breakpoints we need.
1304                  */
1305                 done = filter->nr_systemwide ||
1306                         event->parent || event->attr.enable_on_exec ||
1307                         trace_uprobe_filter_event(filter, event);
1308                 list_add(&event->hw.tp_list, &filter->perf_events);
1309         } else {
1310                 done = filter->nr_systemwide;
1311                 filter->nr_systemwide++;
1312         }
1313         write_unlock(&filter->rwlock);
1314
1315         return done;
1316 }
1317
1318 static int uprobe_perf_close(struct trace_event_call *call,
1319                              struct perf_event *event)
1320 {
1321         struct trace_probe *tp;
1322         struct trace_uprobe *tu;
1323         int ret = 0;
1324
1325         tp = trace_probe_primary_from_call(call);
1326         if (WARN_ON_ONCE(!tp))
1327                 return -ENODEV;
1328
1329         tu = container_of(tp, struct trace_uprobe, tp);
1330         if (trace_uprobe_filter_remove(tu->tp.event->filter, event))
1331                 return 0;
1332
1333         list_for_each_entry(tu, trace_probe_probe_list(tp), tp.list) {
1334                 ret = uprobe_apply(tu->uprobe, &tu->consumer, false);
1335                 if (ret)
1336                         break;
1337         }
1338
1339         return ret;
1340 }
1341
1342 static int uprobe_perf_open(struct trace_event_call *call,
1343                             struct perf_event *event)
1344 {
1345         struct trace_probe *tp;
1346         struct trace_uprobe *tu;
1347         int err = 0;
1348
1349         tp = trace_probe_primary_from_call(call);
1350         if (WARN_ON_ONCE(!tp))
1351                 return -ENODEV;
1352
1353         tu = container_of(tp, struct trace_uprobe, tp);
1354         if (trace_uprobe_filter_add(tu->tp.event->filter, event))
1355                 return 0;
1356
1357         list_for_each_entry(tu, trace_probe_probe_list(tp), tp.list) {
1358                 err = uprobe_apply(tu->uprobe, &tu->consumer, true);
1359                 if (err) {
1360                         uprobe_perf_close(call, event);
1361                         break;
1362                 }
1363         }
1364
1365         return err;
1366 }
1367
1368 static bool uprobe_perf_filter(struct uprobe_consumer *uc, struct mm_struct *mm)
1369 {
1370         struct trace_uprobe_filter *filter;
1371         struct trace_uprobe *tu;
1372         int ret;
1373
1374         tu = container_of(uc, struct trace_uprobe, consumer);
1375         filter = tu->tp.event->filter;
1376
1377         /*
1378          * speculative short-circuiting check to avoid unnecessarily taking
1379          * filter->rwlock below, if the uprobe has system-wide consumer
1380          */
1381         if (READ_ONCE(filter->nr_systemwide))
1382                 return true;
1383
1384         read_lock(&filter->rwlock);
1385         ret = __uprobe_perf_filter(filter, mm);
1386         read_unlock(&filter->rwlock);
1387
1388         return ret;
1389 }
1390
1391 static void __uprobe_perf_func(struct trace_uprobe *tu,
1392                                unsigned long func, struct pt_regs *regs,
1393                                struct uprobe_cpu_buffer **ucbp)
1394 {
1395         struct trace_event_call *call = trace_probe_event_call(&tu->tp);
1396         struct uprobe_trace_entry_head *entry;
1397         struct uprobe_cpu_buffer *ucb;
1398         struct hlist_head *head;
1399         void *data;
1400         int size, esize;
1401         int rctx;
1402
1403 #ifdef CONFIG_BPF_EVENTS
1404         if (bpf_prog_array_valid(call)) {
1405                 const struct bpf_prog_array *array;
1406                 u32 ret;
1407
1408                 rcu_read_lock_trace();
1409                 array = rcu_dereference_check(call->prog_array, rcu_read_lock_trace_held());
1410                 ret = bpf_prog_run_array_uprobe(array, regs, bpf_prog_run);
1411                 rcu_read_unlock_trace();
1412                 if (!ret)
1413                         return;
1414         }
1415 #endif /* CONFIG_BPF_EVENTS */
1416
1417         esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
1418
1419         ucb = prepare_uprobe_buffer(tu, regs, ucbp);
1420         size = esize + ucb->dsize;
1421         size = ALIGN(size + sizeof(u32), sizeof(u64)) - sizeof(u32);
1422         if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE, "profile buffer not large enough"))
1423                 return;
1424
1425         preempt_disable();
1426         head = this_cpu_ptr(call->perf_events);
1427         if (hlist_empty(head))
1428                 goto out;
1429
1430         entry = perf_trace_buf_alloc(size, NULL, &rctx);
1431         if (!entry)
1432                 goto out;
1433
1434         if (is_ret_probe(tu)) {
1435                 entry->vaddr[0] = func;
1436                 entry->vaddr[1] = instruction_pointer(regs);
1437                 data = DATAOF_TRACE_ENTRY(entry, true);
1438         } else {
1439                 entry->vaddr[0] = instruction_pointer(regs);
1440                 data = DATAOF_TRACE_ENTRY(entry, false);
1441         }
1442
1443         memcpy(data, ucb->buf, ucb->dsize);
1444
1445         if (size - esize > ucb->dsize)
1446                 memset(data + ucb->dsize, 0, size - esize - ucb->dsize);
1447
1448         perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs,
1449                               head, NULL);
1450  out:
1451         preempt_enable();
1452 }
1453
1454 /* uprobe profile handler */
1455 static int uprobe_perf_func(struct trace_uprobe *tu, struct pt_regs *regs,
1456                             struct uprobe_cpu_buffer **ucbp)
1457 {
1458         if (!uprobe_perf_filter(&tu->consumer, current->mm))
1459                 return UPROBE_HANDLER_REMOVE;
1460
1461         if (!is_ret_probe(tu))
1462                 __uprobe_perf_func(tu, 0, regs, ucbp);
1463         return 0;
1464 }
1465
1466 static void uretprobe_perf_func(struct trace_uprobe *tu, unsigned long func,
1467                                 struct pt_regs *regs,
1468                                 struct uprobe_cpu_buffer **ucbp)
1469 {
1470         __uprobe_perf_func(tu, func, regs, ucbp);
1471 }
1472
1473 int bpf_get_uprobe_info(const struct perf_event *event, u32 *fd_type,
1474                         const char **filename, u64 *probe_offset,
1475                         u64 *probe_addr, bool perf_type_tracepoint)
1476 {
1477         const char *pevent = trace_event_name(event->tp_event);
1478         const char *group = event->tp_event->class->system;
1479         struct trace_uprobe *tu;
1480
1481         if (perf_type_tracepoint)
1482                 tu = find_probe_event(pevent, group);
1483         else
1484                 tu = trace_uprobe_primary_from_call(event->tp_event);
1485         if (!tu)
1486                 return -EINVAL;
1487
1488         *fd_type = is_ret_probe(tu) ? BPF_FD_TYPE_URETPROBE
1489                                     : BPF_FD_TYPE_UPROBE;
1490         *filename = tu->filename;
1491         *probe_offset = tu->offset;
1492         *probe_addr = tu->ref_ctr_offset;
1493         return 0;
1494 }
1495 #endif  /* CONFIG_PERF_EVENTS */
1496
1497 static int
1498 trace_uprobe_register(struct trace_event_call *event, enum trace_reg type,
1499                       void *data)
1500 {
1501         struct trace_event_file *file = data;
1502
1503         switch (type) {
1504         case TRACE_REG_REGISTER:
1505                 return probe_event_enable(event, file, NULL);
1506
1507         case TRACE_REG_UNREGISTER:
1508                 probe_event_disable(event, file);
1509                 return 0;
1510
1511 #ifdef CONFIG_PERF_EVENTS
1512         case TRACE_REG_PERF_REGISTER:
1513                 return probe_event_enable(event, NULL, uprobe_perf_filter);
1514
1515         case TRACE_REG_PERF_UNREGISTER:
1516                 probe_event_disable(event, NULL);
1517                 return 0;
1518
1519         case TRACE_REG_PERF_OPEN:
1520                 return uprobe_perf_open(event, data);
1521
1522         case TRACE_REG_PERF_CLOSE:
1523                 return uprobe_perf_close(event, data);
1524
1525 #endif
1526         default:
1527                 return 0;
1528         }
1529 }
1530
1531 static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs,
1532                              __u64 *data)
1533 {
1534         struct trace_uprobe *tu;
1535         struct uprobe_dispatch_data udd;
1536         struct uprobe_cpu_buffer *ucb = NULL;
1537         int ret = 0;
1538
1539         tu = container_of(con, struct trace_uprobe, consumer);
1540
1541         this_cpu_inc(*tu->nhits);
1542
1543         udd.tu = tu;
1544         udd.bp_addr = instruction_pointer(regs);
1545
1546         current->utask->vaddr = (unsigned long) &udd;
1547
1548         if (WARN_ON_ONCE(!uprobe_cpu_buffer))
1549                 return 0;
1550
1551         if (trace_probe_test_flag(&tu->tp, TP_FLAG_TRACE))
1552                 ret |= uprobe_trace_func(tu, regs, &ucb);
1553
1554 #ifdef CONFIG_PERF_EVENTS
1555         if (trace_probe_test_flag(&tu->tp, TP_FLAG_PROFILE))
1556                 ret |= uprobe_perf_func(tu, regs, &ucb);
1557 #endif
1558         uprobe_buffer_put(ucb);
1559         return ret;
1560 }
1561
1562 static int uretprobe_dispatcher(struct uprobe_consumer *con,
1563                                 unsigned long func, struct pt_regs *regs,
1564                                 __u64 *data)
1565 {
1566         struct trace_uprobe *tu;
1567         struct uprobe_dispatch_data udd;
1568         struct uprobe_cpu_buffer *ucb = NULL;
1569
1570         tu = container_of(con, struct trace_uprobe, consumer);
1571
1572         udd.tu = tu;
1573         udd.bp_addr = func;
1574
1575         current->utask->vaddr = (unsigned long) &udd;
1576
1577         if (WARN_ON_ONCE(!uprobe_cpu_buffer))
1578                 return 0;
1579
1580         if (trace_probe_test_flag(&tu->tp, TP_FLAG_TRACE))
1581                 uretprobe_trace_func(tu, func, regs, &ucb);
1582
1583 #ifdef CONFIG_PERF_EVENTS
1584         if (trace_probe_test_flag(&tu->tp, TP_FLAG_PROFILE))
1585                 uretprobe_perf_func(tu, func, regs, &ucb);
1586 #endif
1587         uprobe_buffer_put(ucb);
1588         return 0;
1589 }
1590
1591 static struct trace_event_functions uprobe_funcs = {
1592         .trace          = print_uprobe_event
1593 };
1594
1595 static struct trace_event_fields uprobe_fields_array[] = {
1596         { .type = TRACE_FUNCTION_TYPE,
1597           .define_fields = uprobe_event_define_fields },
1598         {}
1599 };
1600
1601 static inline void init_trace_event_call(struct trace_uprobe *tu)
1602 {
1603         struct trace_event_call *call = trace_probe_event_call(&tu->tp);
1604         call->event.funcs = &uprobe_funcs;
1605         call->class->fields_array = uprobe_fields_array;
1606
1607         call->flags = TRACE_EVENT_FL_UPROBE | TRACE_EVENT_FL_CAP_ANY;
1608         call->class->reg = trace_uprobe_register;
1609 }
1610
1611 static int register_uprobe_event(struct trace_uprobe *tu)
1612 {
1613         init_trace_event_call(tu);
1614
1615         return trace_probe_register_event_call(&tu->tp);
1616 }
1617
1618 static int unregister_uprobe_event(struct trace_uprobe *tu)
1619 {
1620         return trace_probe_unregister_event_call(&tu->tp);
1621 }
1622
1623 #ifdef CONFIG_PERF_EVENTS
1624 struct trace_event_call *
1625 create_local_trace_uprobe(char *name, unsigned long offs,
1626                           unsigned long ref_ctr_offset, bool is_return)
1627 {
1628         enum probe_print_type ptype;
1629         struct trace_uprobe *tu;
1630         struct path path;
1631         int ret;
1632
1633         ret = kern_path(name, LOOKUP_FOLLOW, &path);
1634         if (ret)
1635                 return ERR_PTR(ret);
1636
1637         if (!d_is_reg(path.dentry)) {
1638                 path_put(&path);
1639                 return ERR_PTR(-EINVAL);
1640         }
1641
1642         /*
1643          * local trace_kprobes are not added to dyn_event, so they are never
1644          * searched in find_trace_kprobe(). Therefore, there is no concern of
1645          * duplicated name "DUMMY_EVENT" here.
1646          */
1647         tu = alloc_trace_uprobe(UPROBE_EVENT_SYSTEM, "DUMMY_EVENT", 0,
1648                                 is_return);
1649
1650         if (IS_ERR(tu)) {
1651                 pr_info("Failed to allocate trace_uprobe.(%d)\n",
1652                         (int)PTR_ERR(tu));
1653                 path_put(&path);
1654                 return ERR_CAST(tu);
1655         }
1656
1657         tu->offset = offs;
1658         tu->path = path;
1659         tu->ref_ctr_offset = ref_ctr_offset;
1660         tu->filename = kstrdup(name, GFP_KERNEL);
1661         if (!tu->filename) {
1662                 ret = -ENOMEM;
1663                 goto error;
1664         }
1665
1666         init_trace_event_call(tu);
1667
1668         ptype = is_ret_probe(tu) ? PROBE_PRINT_RETURN : PROBE_PRINT_NORMAL;
1669         if (traceprobe_set_print_fmt(&tu->tp, ptype) < 0) {
1670                 ret = -ENOMEM;
1671                 goto error;
1672         }
1673
1674         return trace_probe_event_call(&tu->tp);
1675 error:
1676         free_trace_uprobe(tu);
1677         return ERR_PTR(ret);
1678 }
1679
1680 void destroy_local_trace_uprobe(struct trace_event_call *event_call)
1681 {
1682         struct trace_uprobe *tu;
1683
1684         tu = trace_uprobe_primary_from_call(event_call);
1685
1686         free_trace_uprobe(tu);
1687 }
1688 #endif /* CONFIG_PERF_EVENTS */
1689
1690 /* Make a trace interface for controlling probe points */
1691 static __init int init_uprobe_trace(void)
1692 {
1693         int ret;
1694
1695         ret = dyn_event_register(&trace_uprobe_ops);
1696         if (ret)
1697                 return ret;
1698
1699         ret = tracing_init_dentry();
1700         if (ret)
1701                 return 0;
1702
1703         trace_create_file("uprobe_events", TRACE_MODE_WRITE, NULL,
1704                                     NULL, &uprobe_events_ops);
1705         /* Profile interface */
1706         trace_create_file("uprobe_profile", TRACE_MODE_READ, NULL,
1707                                     NULL, &uprobe_profile_ops);
1708         return 0;
1709 }
1710
1711 fs_initcall(init_uprobe_trace);