1 // SPDX-License-Identifier: GPL-2.0
3 * Kprobes-based tracing events
5 * Created by Masami Hiramatsu <mhiramat@redhat.com>
8 #define pr_fmt(fmt) "trace_kprobe: " fmt
10 #include <linux/bpf-cgroup.h>
11 #include <linux/cleanup.h>
12 #include <linux/security.h>
13 #include <linux/module.h>
14 #include <linux/uaccess.h>
15 #include <linux/rculist.h>
16 #include <linux/error-injection.h>
18 #include <asm/setup.h> /* for COMMAND_LINE_SIZE */
20 #include "trace_dynevent.h"
21 #include "trace_kprobe_selftest.h"
22 #include "trace_probe.h"
23 #include "trace_probe_tmpl.h"
24 #include "trace_probe_kernel.h"
26 #define KPROBE_EVENT_SYSTEM "kprobes"
27 #define KRETPROBE_MAXACTIVE_MAX 4096
29 /* Kprobe early definition from command line */
30 static char kprobe_boot_events_buf[COMMAND_LINE_SIZE] __initdata;
32 static int __init set_kprobe_boot_events(char *str)
34 strscpy(kprobe_boot_events_buf, str, COMMAND_LINE_SIZE);
35 disable_tracing_selftest("running kprobe events");
39 __setup("kprobe_event=", set_kprobe_boot_events);
41 static int trace_kprobe_create(const char *raw_command);
42 static int trace_kprobe_show(struct seq_file *m, struct dyn_event *ev);
43 static int trace_kprobe_release(struct dyn_event *ev);
44 static bool trace_kprobe_is_busy(struct dyn_event *ev);
45 static bool trace_kprobe_match(const char *system, const char *event,
46 int argc, const char **argv, struct dyn_event *ev);
48 static struct dyn_event_operations trace_kprobe_ops = {
49 .create = trace_kprobe_create,
50 .show = trace_kprobe_show,
51 .is_busy = trace_kprobe_is_busy,
52 .free = trace_kprobe_release,
53 .match = trace_kprobe_match,
57 * Kprobe event core functions
60 struct dyn_event devent;
61 struct kretprobe rp; /* Use rp.kp for kprobe use */
62 unsigned long __percpu *nhit;
63 const char *symbol; /* symbol name */
64 struct trace_probe tp;
67 static bool is_trace_kprobe(struct dyn_event *ev)
69 return ev->ops == &trace_kprobe_ops;
72 static struct trace_kprobe *to_trace_kprobe(struct dyn_event *ev)
74 return container_of(ev, struct trace_kprobe, devent);
78 * for_each_trace_kprobe - iterate over the trace_kprobe list
79 * @pos: the struct trace_kprobe * for each entry
80 * @dpos: the struct dyn_event * to use as a loop cursor
82 #define for_each_trace_kprobe(pos, dpos) \
83 for_each_dyn_event(dpos) \
84 if (is_trace_kprobe(dpos) && (pos = to_trace_kprobe(dpos)))
86 static nokprobe_inline bool trace_kprobe_is_return(struct trace_kprobe *tk)
88 return tk->rp.handler != NULL;
91 static nokprobe_inline const char *trace_kprobe_symbol(struct trace_kprobe *tk)
93 return tk->symbol ? tk->symbol : "unknown";
96 static nokprobe_inline unsigned long trace_kprobe_offset(struct trace_kprobe *tk)
98 return tk->rp.kp.offset;
101 static nokprobe_inline bool trace_kprobe_has_gone(struct trace_kprobe *tk)
103 return kprobe_gone(&tk->rp.kp);
106 static nokprobe_inline bool trace_kprobe_within_module(struct trace_kprobe *tk,
109 int len = strlen(module_name(mod));
110 const char *name = trace_kprobe_symbol(tk);
112 return strncmp(module_name(mod), name, len) == 0 && name[len] == ':';
115 #ifdef CONFIG_MODULES
116 static nokprobe_inline bool trace_kprobe_module_exist(struct trace_kprobe *tk)
123 p = strchr(tk->symbol, ':');
128 ret = !!find_module(tk->symbol);
134 static inline bool trace_kprobe_module_exist(struct trace_kprobe *tk)
140 static bool trace_kprobe_is_busy(struct dyn_event *ev)
142 struct trace_kprobe *tk = to_trace_kprobe(ev);
144 return trace_probe_is_enabled(&tk->tp);
147 static bool trace_kprobe_match_command_head(struct trace_kprobe *tk,
148 int argc, const char **argv)
150 char buf[MAX_ARGSTR_LEN + 1];
156 snprintf(buf, sizeof(buf), "0x%p", tk->rp.kp.addr);
157 else if (tk->rp.kp.offset)
158 snprintf(buf, sizeof(buf), "%s+%u",
159 trace_kprobe_symbol(tk), tk->rp.kp.offset);
161 snprintf(buf, sizeof(buf), "%s", trace_kprobe_symbol(tk));
162 if (strcmp(buf, argv[0]))
166 return trace_probe_match_command_args(&tk->tp, argc, argv);
169 static bool trace_kprobe_match(const char *system, const char *event,
170 int argc, const char **argv, struct dyn_event *ev)
172 struct trace_kprobe *tk = to_trace_kprobe(ev);
174 return (event[0] == '\0' ||
175 strcmp(trace_probe_name(&tk->tp), event) == 0) &&
176 (!system || strcmp(trace_probe_group_name(&tk->tp), system) == 0) &&
177 trace_kprobe_match_command_head(tk, argc, argv);
180 static nokprobe_inline unsigned long trace_kprobe_nhit(struct trace_kprobe *tk)
182 unsigned long nhit = 0;
185 for_each_possible_cpu(cpu)
186 nhit += *per_cpu_ptr(tk->nhit, cpu);
191 static nokprobe_inline bool trace_kprobe_is_registered(struct trace_kprobe *tk)
193 return !(list_empty(&tk->rp.kp.list) &&
194 hlist_unhashed(&tk->rp.kp.hlist));
197 /* Return 0 if it fails to find the symbol address */
198 static nokprobe_inline
199 unsigned long trace_kprobe_address(struct trace_kprobe *tk)
204 addr = (unsigned long)
205 kallsyms_lookup_name(trace_kprobe_symbol(tk));
207 addr += tk->rp.kp.offset;
209 addr = (unsigned long)tk->rp.kp.addr;
214 static nokprobe_inline struct trace_kprobe *
215 trace_kprobe_primary_from_call(struct trace_event_call *call)
217 struct trace_probe *tp;
219 tp = trace_probe_primary_from_call(call);
220 if (WARN_ON_ONCE(!tp))
223 return container_of(tp, struct trace_kprobe, tp);
226 bool trace_kprobe_on_func_entry(struct trace_event_call *call)
228 struct trace_kprobe *tk = trace_kprobe_primary_from_call(call);
230 return tk ? (kprobe_on_func_entry(tk->rp.kp.addr,
231 tk->rp.kp.addr ? NULL : tk->rp.kp.symbol_name,
232 tk->rp.kp.addr ? 0 : tk->rp.kp.offset) == 0) : false;
235 bool trace_kprobe_error_injectable(struct trace_event_call *call)
237 struct trace_kprobe *tk = trace_kprobe_primary_from_call(call);
239 return tk ? within_error_injection_list(trace_kprobe_address(tk)) :
243 static int register_kprobe_event(struct trace_kprobe *tk);
244 static int unregister_kprobe_event(struct trace_kprobe *tk);
246 static int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs);
247 static int kretprobe_dispatcher(struct kretprobe_instance *ri,
248 struct pt_regs *regs);
250 static void free_trace_kprobe(struct trace_kprobe *tk)
253 trace_probe_cleanup(&tk->tp);
255 free_percpu(tk->nhit);
260 DEFINE_FREE(free_trace_kprobe, struct trace_kprobe *,
261 if (!IS_ERR_OR_NULL(_T)) free_trace_kprobe(_T))
264 * Allocate new trace_probe and initialize it (including kprobes).
266 static struct trace_kprobe *alloc_trace_kprobe(const char *group,
272 int nargs, bool is_return)
274 struct trace_kprobe *tk __free(free_trace_kprobe) = NULL;
277 tk = kzalloc(struct_size(tk, tp.args, nargs), GFP_KERNEL);
281 tk->nhit = alloc_percpu(unsigned long);
286 tk->symbol = kstrdup(symbol, GFP_KERNEL);
289 tk->rp.kp.symbol_name = tk->symbol;
290 tk->rp.kp.offset = offs;
292 tk->rp.kp.addr = addr;
295 tk->rp.handler = kretprobe_dispatcher;
297 tk->rp.kp.pre_handler = kprobe_dispatcher;
299 tk->rp.maxactive = maxactive;
300 INIT_HLIST_NODE(&tk->rp.kp.hlist);
301 INIT_LIST_HEAD(&tk->rp.kp.list);
303 ret = trace_probe_init(&tk->tp, event, group, false, nargs);
307 dyn_event_init(&tk->devent, &trace_kprobe_ops);
311 static struct trace_kprobe *find_trace_kprobe(const char *event,
314 struct dyn_event *pos;
315 struct trace_kprobe *tk;
317 for_each_trace_kprobe(tk, pos)
318 if (strcmp(trace_probe_name(&tk->tp), event) == 0 &&
319 strcmp(trace_probe_group_name(&tk->tp), group) == 0)
324 static inline int __enable_trace_kprobe(struct trace_kprobe *tk)
328 if (trace_kprobe_is_registered(tk) && !trace_kprobe_has_gone(tk)) {
329 if (trace_kprobe_is_return(tk))
330 ret = enable_kretprobe(&tk->rp);
332 ret = enable_kprobe(&tk->rp.kp);
338 static void __disable_trace_kprobe(struct trace_probe *tp)
340 struct trace_kprobe *tk;
342 list_for_each_entry(tk, trace_probe_probe_list(tp), tp.list) {
343 if (!trace_kprobe_is_registered(tk))
345 if (trace_kprobe_is_return(tk))
346 disable_kretprobe(&tk->rp);
348 disable_kprobe(&tk->rp.kp);
354 * if the file is NULL, enable "perf" handler, or enable "trace" handler.
356 static int enable_trace_kprobe(struct trace_event_call *call,
357 struct trace_event_file *file)
359 struct trace_probe *tp;
360 struct trace_kprobe *tk;
364 tp = trace_probe_primary_from_call(call);
365 if (WARN_ON_ONCE(!tp))
367 enabled = trace_probe_is_enabled(tp);
369 /* This also changes "enabled" state */
371 ret = trace_probe_add_file(tp, file);
375 trace_probe_set_flag(tp, TP_FLAG_PROFILE);
380 list_for_each_entry(tk, trace_probe_probe_list(tp), tp.list) {
381 if (trace_kprobe_has_gone(tk))
383 ret = __enable_trace_kprobe(tk);
390 /* Failed to enable one of them. Roll back all */
392 __disable_trace_kprobe(tp);
394 trace_probe_remove_file(tp, file);
396 trace_probe_clear_flag(tp, TP_FLAG_PROFILE);
403 * Disable trace_probe
404 * if the file is NULL, disable "perf" handler, or disable "trace" handler.
406 static int disable_trace_kprobe(struct trace_event_call *call,
407 struct trace_event_file *file)
409 struct trace_probe *tp;
411 tp = trace_probe_primary_from_call(call);
412 if (WARN_ON_ONCE(!tp))
416 if (!trace_probe_get_file_link(tp, file))
418 if (!trace_probe_has_single_file(tp))
420 trace_probe_clear_flag(tp, TP_FLAG_TRACE);
422 trace_probe_clear_flag(tp, TP_FLAG_PROFILE);
424 if (!trace_probe_is_enabled(tp))
425 __disable_trace_kprobe(tp);
430 * Synchronization is done in below function. For perf event,
431 * file == NULL and perf_trace_event_unreg() calls
432 * tracepoint_synchronize_unregister() to ensure synchronize
433 * event. We don't need to care about it.
435 trace_probe_remove_file(tp, file);
440 #if defined(CONFIG_DYNAMIC_FTRACE) && \
441 !defined(CONFIG_KPROBE_EVENTS_ON_NOTRACE)
442 static bool __within_notrace_func(unsigned long addr)
444 unsigned long offset, size;
446 if (!addr || !kallsyms_lookup_size_offset(addr, &size, &offset))
449 /* Get the entry address of the target function */
453 * Since ftrace_location_range() does inclusive range check, we need
454 * to subtract 1 byte from the end address.
456 return !ftrace_location_range(addr, addr + size - 1);
459 static bool within_notrace_func(struct trace_kprobe *tk)
461 unsigned long addr = trace_kprobe_address(tk);
462 char symname[KSYM_NAME_LEN], *p;
464 if (!__within_notrace_func(addr))
467 /* Check if the address is on a suffixed-symbol */
468 if (!lookup_symbol_name(addr, symname)) {
469 p = strchr(symname, '.');
473 addr = (unsigned long)kprobe_lookup_name(symname, 0);
475 return __within_notrace_func(addr);
481 #define within_notrace_func(tk) (false)
484 /* Internal register function - just handle k*probes and flags */
485 static int __register_trace_kprobe(struct trace_kprobe *tk)
489 ret = security_locked_down(LOCKDOWN_KPROBES);
493 if (trace_kprobe_is_registered(tk))
496 if (within_notrace_func(tk)) {
497 pr_warn("Could not probe notrace function %ps\n",
498 (void *)trace_kprobe_address(tk));
502 for (i = 0; i < tk->tp.nr_args; i++) {
503 ret = traceprobe_update_arg(&tk->tp.args[i]);
508 /* Set/clear disabled flag according to tp->flag */
509 if (trace_probe_is_enabled(&tk->tp))
510 tk->rp.kp.flags &= ~KPROBE_FLAG_DISABLED;
512 tk->rp.kp.flags |= KPROBE_FLAG_DISABLED;
514 if (trace_kprobe_is_return(tk))
515 ret = register_kretprobe(&tk->rp);
517 ret = register_kprobe(&tk->rp.kp);
522 /* Internal unregister function - just handle k*probes and flags */
523 static void __unregister_trace_kprobe(struct trace_kprobe *tk)
525 if (trace_kprobe_is_registered(tk)) {
526 if (trace_kprobe_is_return(tk))
527 unregister_kretprobe(&tk->rp);
529 unregister_kprobe(&tk->rp.kp);
530 /* Cleanup kprobe for reuse and mark it unregistered */
531 INIT_HLIST_NODE(&tk->rp.kp.hlist);
532 INIT_LIST_HEAD(&tk->rp.kp.list);
533 if (tk->rp.kp.symbol_name)
534 tk->rp.kp.addr = NULL;
538 /* Unregister a trace_probe and probe_event */
539 static int unregister_trace_kprobe(struct trace_kprobe *tk)
541 /* If other probes are on the event, just unregister kprobe */
542 if (trace_probe_has_sibling(&tk->tp))
545 /* Enabled event can not be unregistered */
546 if (trace_probe_is_enabled(&tk->tp))
549 /* If there's a reference to the dynamic event */
550 if (trace_event_dyn_busy(trace_probe_event_call(&tk->tp)))
553 /* Will fail if probe is being used by ftrace or perf */
554 if (unregister_kprobe_event(tk))
558 __unregister_trace_kprobe(tk);
559 dyn_event_remove(&tk->devent);
560 trace_probe_unlink(&tk->tp);
565 static bool trace_kprobe_has_same_kprobe(struct trace_kprobe *orig,
566 struct trace_kprobe *comp)
568 struct trace_probe_event *tpe = orig->tp.event;
571 list_for_each_entry(orig, &tpe->probes, tp.list) {
572 if (strcmp(trace_kprobe_symbol(orig),
573 trace_kprobe_symbol(comp)) ||
574 trace_kprobe_offset(orig) != trace_kprobe_offset(comp))
578 * trace_probe_compare_arg_type() ensured that nr_args and
579 * each argument name and type are same. Let's compare comm.
581 for (i = 0; i < orig->tp.nr_args; i++) {
582 if (strcmp(orig->tp.args[i].comm,
583 comp->tp.args[i].comm))
587 if (i == orig->tp.nr_args)
594 static int append_trace_kprobe(struct trace_kprobe *tk, struct trace_kprobe *to)
598 ret = trace_probe_compare_arg_type(&tk->tp, &to->tp);
600 /* Note that argument starts index = 2 */
601 trace_probe_log_set_index(ret + 1);
602 trace_probe_log_err(0, DIFF_ARG_TYPE);
605 if (trace_kprobe_has_same_kprobe(to, tk)) {
606 trace_probe_log_set_index(0);
607 trace_probe_log_err(0, SAME_PROBE);
611 /* Append to existing event */
612 ret = trace_probe_append(&tk->tp, &to->tp);
616 /* Register k*probe */
617 ret = __register_trace_kprobe(tk);
618 if (ret == -ENOENT && !trace_kprobe_module_exist(tk)) {
619 pr_warn("This probe might be able to register after target module is loaded. Continue.\n");
624 trace_probe_unlink(&tk->tp);
626 dyn_event_add(&tk->devent, trace_probe_event_call(&tk->tp));
631 /* Register a trace_probe and probe_event */
632 static int register_trace_kprobe(struct trace_kprobe *tk)
634 struct trace_kprobe *old_tk;
637 guard(mutex)(&event_mutex);
639 old_tk = find_trace_kprobe(trace_probe_name(&tk->tp),
640 trace_probe_group_name(&tk->tp));
642 if (trace_kprobe_is_return(tk) != trace_kprobe_is_return(old_tk)) {
643 trace_probe_log_set_index(0);
644 trace_probe_log_err(0, DIFF_PROBE_TYPE);
647 return append_trace_kprobe(tk, old_tk);
650 /* Register new event */
651 ret = register_kprobe_event(tk);
653 if (ret == -EEXIST) {
654 trace_probe_log_set_index(0);
655 trace_probe_log_err(0, EVENT_EXIST);
657 pr_warn("Failed to register probe event(%d)\n", ret);
661 /* Register k*probe */
662 ret = __register_trace_kprobe(tk);
663 if (ret == -ENOENT && !trace_kprobe_module_exist(tk)) {
664 pr_warn("This probe might be able to register after target module is loaded. Continue.\n");
669 unregister_kprobe_event(tk);
671 dyn_event_add(&tk->devent, trace_probe_event_call(&tk->tp));
676 #ifdef CONFIG_MODULES
677 static int validate_module_probe_symbol(const char *modname, const char *symbol);
679 static int register_module_trace_kprobe(struct module *mod, struct trace_kprobe *tk)
684 p = strchr(trace_kprobe_symbol(tk), ':');
686 ret = validate_module_probe_symbol(module_name(mod), p + 1);
688 ret = __register_trace_kprobe(tk);
692 /* Module notifier call back, checking event on the module */
693 static int trace_kprobe_module_callback(struct notifier_block *nb,
694 unsigned long val, void *data)
696 struct module *mod = data;
697 struct dyn_event *pos;
698 struct trace_kprobe *tk;
701 if (val != MODULE_STATE_COMING)
704 /* Update probes on coming module */
705 guard(mutex)(&event_mutex);
706 for_each_trace_kprobe(tk, pos) {
707 if (trace_kprobe_within_module(tk, mod)) {
708 /* Don't need to check busy - this should have gone. */
709 __unregister_trace_kprobe(tk);
710 ret = register_module_trace_kprobe(mod, tk);
712 pr_warn("Failed to re-register probe %s on %s: %d\n",
713 trace_probe_name(&tk->tp),
714 module_name(mod), ret);
721 static struct notifier_block trace_kprobe_module_nb = {
722 .notifier_call = trace_kprobe_module_callback,
723 .priority = 2 /* Invoked after kprobe and jump_label module callback */
725 static int trace_kprobe_register_module_notifier(void)
727 return register_module_notifier(&trace_kprobe_module_nb);
730 static int trace_kprobe_register_module_notifier(void)
734 #endif /* CONFIG_MODULES */
736 static int count_symbols(void *data, unsigned long unused)
738 unsigned int *count = data;
745 struct sym_count_ctx {
750 static int count_mod_symbols(void *data, const char *name, unsigned long unused)
752 struct sym_count_ctx *ctx = data;
754 if (strcmp(name, ctx->name) == 0)
760 static unsigned int number_of_same_symbols(const char *mod, const char *func_name)
762 struct sym_count_ctx ctx = { .count = 0, .name = func_name };
765 kallsyms_on_each_match_symbol(count_symbols, func_name, &ctx.count);
767 module_kallsyms_on_each_symbol(mod, count_mod_symbols, &ctx);
772 static int validate_module_probe_symbol(const char *modname, const char *symbol)
774 unsigned int count = number_of_same_symbols(modname, symbol);
778 * Users should use ADDR to remove the ambiguity of
781 return -EADDRNOTAVAIL;
782 } else if (count == 0) {
784 * We can return ENOENT earlier than when register the
792 #ifdef CONFIG_MODULES
793 /* Return NULL if the module is not loaded or under unloading. */
794 static struct module *try_module_get_by_name(const char *name)
799 mod = find_module(name);
800 if (mod && !try_module_get(mod))
805 #define try_module_get_by_name(name) (NULL)
808 static int validate_probe_symbol(char *symbol)
810 struct module *mod = NULL;
811 char *modname = NULL, *p;
814 p = strchr(symbol, ':');
819 mod = try_module_get_by_name(modname);
824 ret = validate_module_probe_symbol(modname, symbol);
833 static int trace_kprobe_entry_handler(struct kretprobe_instance *ri,
834 struct pt_regs *regs);
836 static int trace_kprobe_create_internal(int argc, const char *argv[],
837 struct traceprobe_parse_context *ctx)
842 * p[:[GRP/][EVENT]] [MOD:]KSYM[+OFFS]|KADDR [FETCHARGS]
844 * r[MAXACTIVE][:[GRP/][EVENT]] [MOD:]KSYM[+0] [FETCHARGS]
846 * p[:[GRP/][EVENT]] [MOD:]KSYM[+0]%return [FETCHARGS]
849 * $retval : fetch return value
850 * $stack : fetch stack address
851 * $stackN : fetch Nth of stack (N:0-)
852 * $comm : fetch current task comm
853 * @ADDR : fetch memory at ADDR (ADDR should be in kernel)
854 * @SYM[+|-offs] : fetch memory at SYM +|- offs (SYM is a data symbol)
855 * %REG : fetch register REG
856 * Dereferencing memory fetch:
857 * +|-offs(ARG) : fetch memory at ARG +|- offs address.
858 * Alias name of args:
859 * NAME=FETCHARG : set NAME as alias of FETCHARG.
861 * FETCHARG:TYPE : use TYPE instead of unsigned long.
863 struct trace_kprobe *tk __free(free_trace_kprobe) = NULL;
864 int i, len, new_argc = 0, ret = 0;
865 bool is_return = false;
866 char *symbol __free(kfree) = NULL;
868 const char **new_argv __free(kfree) = NULL;
869 const char *event = NULL, *group = KPROBE_EVENT_SYSTEM;
870 enum probe_print_type ptype;
874 char buf[MAX_EVENT_NAME_LEN];
875 char gbuf[MAX_EVENT_NAME_LEN];
876 char abuf[MAX_BTF_ARGS_LEN];
877 char *dbuf __free(kfree) = NULL;
879 switch (argv[0][0]) {
891 event = strchr(&argv[0][1], ':');
895 if (isdigit(argv[0][1])) {
897 trace_probe_log_err(1, BAD_MAXACT_TYPE);
901 len = event - &argv[0][1] - 1;
903 len = strlen(&argv[0][1]);
904 if (len > MAX_EVENT_NAME_LEN - 1) {
905 trace_probe_log_err(1, BAD_MAXACT);
908 memcpy(buf, &argv[0][1], len);
910 ret = kstrtouint(buf, 0, &maxactive);
911 if (ret || !maxactive) {
912 trace_probe_log_err(1, BAD_MAXACT);
915 /* kretprobes instances are iterated over via a list. The
916 * maximum should stay reasonable.
918 if (maxactive > KRETPROBE_MAXACTIVE_MAX) {
919 trace_probe_log_err(1, MAXACT_TOO_BIG);
924 /* try to parse an address. if that fails, try to read the
925 * input as a symbol. */
926 if (kstrtoul(argv[1], 0, (unsigned long *)&addr)) {
927 trace_probe_log_set_index(1);
928 /* Check whether uprobe event specified */
929 if (strchr(argv[1], '/') && strchr(argv[1], ':'))
932 /* a symbol specified */
933 symbol = kstrdup(argv[1], GFP_KERNEL);
937 tmp = strchr(symbol, '%');
939 if (!strcmp(tmp, "%return")) {
943 trace_probe_log_err(tmp - symbol, BAD_ADDR_SUFFIX);
948 /* TODO: support .init module functions */
949 ret = traceprobe_split_symbol_offset(symbol, &offset);
950 if (ret || offset < 0 || offset > UINT_MAX) {
951 trace_probe_log_err(0, BAD_PROBE_ADDR);
954 ret = validate_probe_symbol(symbol);
956 if (ret == -EADDRNOTAVAIL)
957 trace_probe_log_err(0, NON_UNIQ_SYMBOL);
959 trace_probe_log_err(0, BAD_PROBE_ADDR);
963 ctx->flags |= TPARG_FL_RETURN;
964 ret = kprobe_on_func_entry(NULL, symbol, offset);
965 if (ret == 0 && !is_return)
966 ctx->flags |= TPARG_FL_FENTRY;
967 /* Defer the ENOENT case until register kprobe */
968 if (ret == -EINVAL && is_return) {
969 trace_probe_log_err(0, BAD_RETPROBE);
974 trace_probe_log_set_index(0);
976 ret = traceprobe_parse_event_name(&event, &group, gbuf,
983 /* Make a new event name */
985 snprintf(buf, MAX_EVENT_NAME_LEN, "%c_%s_%ld",
986 is_return ? 'r' : 'p', symbol, offset);
988 snprintf(buf, MAX_EVENT_NAME_LEN, "%c_0x%p",
989 is_return ? 'r' : 'p', addr);
990 sanitize_event_name(buf);
994 argc -= 2; argv += 2;
995 ctx->funcname = symbol;
996 new_argv = traceprobe_expand_meta_args(argc, argv, &new_argc,
997 abuf, MAX_BTF_ARGS_LEN, ctx);
998 if (IS_ERR(new_argv)) {
999 ret = PTR_ERR(new_argv);
1007 if (argc > MAX_TRACE_ARGS) {
1008 trace_probe_log_set_index(2);
1009 trace_probe_log_err(0, TOO_MANY_ARGS);
1013 ret = traceprobe_expand_dentry_args(argc, argv, &dbuf);
1018 tk = alloc_trace_kprobe(group, event, addr, symbol, offset, maxactive,
1022 /* This must return -ENOMEM, else there is a bug */
1023 WARN_ON_ONCE(ret != -ENOMEM);
1024 return ret; /* We know tk is not allocated */
1027 /* parse arguments */
1028 for (i = 0; i < argc; i++) {
1029 trace_probe_log_set_index(i + 2);
1031 ret = traceprobe_parse_probe_arg(&tk->tp, i, argv[i], ctx);
1033 return ret; /* This can be -ENOMEM */
1035 /* entry handler for kretprobe */
1036 if (is_return && tk->tp.entry_arg) {
1037 tk->rp.entry_handler = trace_kprobe_entry_handler;
1038 tk->rp.data_size = traceprobe_get_entry_data_size(&tk->tp);
1041 ptype = is_return ? PROBE_PRINT_RETURN : PROBE_PRINT_NORMAL;
1042 ret = traceprobe_set_print_fmt(&tk->tp, ptype);
1046 ret = register_trace_kprobe(tk);
1048 trace_probe_log_set_index(1);
1050 trace_probe_log_err(0, BAD_INSN_BNDRY);
1051 else if (ret == -ENOENT)
1052 trace_probe_log_err(0, BAD_PROBE_ADDR);
1053 else if (ret != -ENOMEM && ret != -EEXIST)
1054 trace_probe_log_err(0, FAIL_REG_PROBE);
1058 * Here, 'tk' has been registered to the list successfully,
1059 * so we don't need to free it.
1066 static int trace_kprobe_create_cb(int argc, const char *argv[])
1068 struct traceprobe_parse_context ctx = { .flags = TPARG_FL_KERNEL };
1071 trace_probe_log_init("trace_kprobe", argc, argv);
1073 ret = trace_kprobe_create_internal(argc, argv, &ctx);
1075 traceprobe_finish_parse(&ctx);
1076 trace_probe_log_clear();
1080 static int trace_kprobe_create(const char *raw_command)
1082 return trace_probe_create(raw_command, trace_kprobe_create_cb);
1085 static int create_or_delete_trace_kprobe(const char *raw_command)
1089 if (raw_command[0] == '-')
1090 return dyn_event_release(raw_command, &trace_kprobe_ops);
1092 ret = dyn_event_create(raw_command, &trace_kprobe_ops);
1093 return ret == -ECANCELED ? -EINVAL : ret;
1096 static int trace_kprobe_run_command(struct dynevent_cmd *cmd)
1098 return create_or_delete_trace_kprobe(cmd->seq.buffer);
1102 * kprobe_event_cmd_init - Initialize a kprobe event command object
1103 * @cmd: A pointer to the dynevent_cmd struct representing the new event
1104 * @buf: A pointer to the buffer used to build the command
1105 * @maxlen: The length of the buffer passed in @buf
1107 * Initialize a synthetic event command object. Use this before
1108 * calling any of the other kprobe_event functions.
1110 void kprobe_event_cmd_init(struct dynevent_cmd *cmd, char *buf, int maxlen)
1112 dynevent_cmd_init(cmd, buf, maxlen, DYNEVENT_TYPE_KPROBE,
1113 trace_kprobe_run_command);
1115 EXPORT_SYMBOL_GPL(kprobe_event_cmd_init);
1118 * __kprobe_event_gen_cmd_start - Generate a kprobe event command from arg list
1119 * @cmd: A pointer to the dynevent_cmd struct representing the new event
1120 * @kretprobe: Is this a return probe?
1121 * @name: The name of the kprobe event
1122 * @loc: The location of the kprobe event
1123 * @...: Variable number of arg (pairs), one pair for each field
1125 * NOTE: Users normally won't want to call this function directly, but
1126 * rather use the kprobe_event_gen_cmd_start() wrapper, which automatically
1127 * adds a NULL to the end of the arg list. If this function is used
1128 * directly, make sure the last arg in the variable arg list is NULL.
1130 * Generate a kprobe event command to be executed by
1131 * kprobe_event_gen_cmd_end(). This function can be used to generate the
1132 * complete command or only the first part of it; in the latter case,
1133 * kprobe_event_add_fields() can be used to add more fields following this.
1135 * Unlikely the synth_event_gen_cmd_start(), @loc must be specified. This
1136 * returns -EINVAL if @loc == NULL.
1138 * Return: 0 if successful, error otherwise.
1140 int __kprobe_event_gen_cmd_start(struct dynevent_cmd *cmd, bool kretprobe,
1141 const char *name, const char *loc, ...)
1143 char buf[MAX_EVENT_NAME_LEN];
1144 struct dynevent_arg arg;
1148 if (cmd->type != DYNEVENT_TYPE_KPROBE)
1155 snprintf(buf, MAX_EVENT_NAME_LEN, "r:kprobes/%s", name);
1157 snprintf(buf, MAX_EVENT_NAME_LEN, "p:kprobes/%s", name);
1159 ret = dynevent_str_add(cmd, buf);
1163 dynevent_arg_init(&arg, 0);
1165 ret = dynevent_arg_add(cmd, &arg, NULL);
1169 va_start(args, loc);
1173 field = va_arg(args, const char *);
1177 if (++cmd->n_fields > MAX_TRACE_ARGS) {
1183 ret = dynevent_arg_add(cmd, &arg, NULL);
1191 EXPORT_SYMBOL_GPL(__kprobe_event_gen_cmd_start);
1194 * __kprobe_event_add_fields - Add probe fields to a kprobe command from arg list
1195 * @cmd: A pointer to the dynevent_cmd struct representing the new event
1196 * @...: Variable number of arg (pairs), one pair for each field
1198 * NOTE: Users normally won't want to call this function directly, but
1199 * rather use the kprobe_event_add_fields() wrapper, which
1200 * automatically adds a NULL to the end of the arg list. If this
1201 * function is used directly, make sure the last arg in the variable
1204 * Add probe fields to an existing kprobe command using a variable
1205 * list of args. Fields are added in the same order they're listed.
1207 * Return: 0 if successful, error otherwise.
1209 int __kprobe_event_add_fields(struct dynevent_cmd *cmd, ...)
1211 struct dynevent_arg arg;
1215 if (cmd->type != DYNEVENT_TYPE_KPROBE)
1218 dynevent_arg_init(&arg, 0);
1220 va_start(args, cmd);
1224 field = va_arg(args, const char *);
1228 if (++cmd->n_fields > MAX_TRACE_ARGS) {
1234 ret = dynevent_arg_add(cmd, &arg, NULL);
1242 EXPORT_SYMBOL_GPL(__kprobe_event_add_fields);
1245 * kprobe_event_delete - Delete a kprobe event
1246 * @name: The name of the kprobe event to delete
1248 * Delete a kprobe event with the give @name from kernel code rather
1249 * than directly from the command line.
1251 * Return: 0 if successful, error otherwise.
1253 int kprobe_event_delete(const char *name)
1255 char buf[MAX_EVENT_NAME_LEN];
1257 snprintf(buf, MAX_EVENT_NAME_LEN, "-:%s", name);
1259 return create_or_delete_trace_kprobe(buf);
1261 EXPORT_SYMBOL_GPL(kprobe_event_delete);
1263 static int trace_kprobe_release(struct dyn_event *ev)
1265 struct trace_kprobe *tk = to_trace_kprobe(ev);
1266 int ret = unregister_trace_kprobe(tk);
1269 free_trace_kprobe(tk);
1273 static int trace_kprobe_show(struct seq_file *m, struct dyn_event *ev)
1275 struct trace_kprobe *tk = to_trace_kprobe(ev);
1278 seq_putc(m, trace_kprobe_is_return(tk) ? 'r' : 'p');
1279 if (trace_kprobe_is_return(tk) && tk->rp.maxactive)
1280 seq_printf(m, "%d", tk->rp.maxactive);
1281 seq_printf(m, ":%s/%s", trace_probe_group_name(&tk->tp),
1282 trace_probe_name(&tk->tp));
1285 seq_printf(m, " 0x%p", tk->rp.kp.addr);
1286 else if (tk->rp.kp.offset)
1287 seq_printf(m, " %s+%u", trace_kprobe_symbol(tk),
1290 seq_printf(m, " %s", trace_kprobe_symbol(tk));
1292 for (i = 0; i < tk->tp.nr_args; i++)
1293 seq_printf(m, " %s=%s", tk->tp.args[i].name, tk->tp.args[i].comm);
1299 static int probes_seq_show(struct seq_file *m, void *v)
1301 struct dyn_event *ev = v;
1303 if (!is_trace_kprobe(ev))
1306 return trace_kprobe_show(m, ev);
1309 static const struct seq_operations probes_seq_op = {
1310 .start = dyn_event_seq_start,
1311 .next = dyn_event_seq_next,
1312 .stop = dyn_event_seq_stop,
1313 .show = probes_seq_show
1316 static int probes_open(struct inode *inode, struct file *file)
1320 ret = security_locked_down(LOCKDOWN_TRACEFS);
1324 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
1325 ret = dyn_events_release_all(&trace_kprobe_ops);
1330 return seq_open(file, &probes_seq_op);
1333 static ssize_t probes_write(struct file *file, const char __user *buffer,
1334 size_t count, loff_t *ppos)
1336 return trace_parse_run_command(file, buffer, count, ppos,
1337 create_or_delete_trace_kprobe);
1340 static const struct file_operations kprobe_events_ops = {
1341 .owner = THIS_MODULE,
1342 .open = probes_open,
1344 .llseek = seq_lseek,
1345 .release = seq_release,
1346 .write = probes_write,
1349 static unsigned long trace_kprobe_missed(struct trace_kprobe *tk)
1351 return trace_kprobe_is_return(tk) ?
1352 tk->rp.kp.nmissed + tk->rp.nmissed : tk->rp.kp.nmissed;
1355 /* Probes profiling interfaces */
1356 static int probes_profile_seq_show(struct seq_file *m, void *v)
1358 struct dyn_event *ev = v;
1359 struct trace_kprobe *tk;
1360 unsigned long nmissed;
1362 if (!is_trace_kprobe(ev))
1365 tk = to_trace_kprobe(ev);
1366 nmissed = trace_kprobe_missed(tk);
1367 seq_printf(m, " %-44s %15lu %15lu\n",
1368 trace_probe_name(&tk->tp),
1369 trace_kprobe_nhit(tk),
1375 static const struct seq_operations profile_seq_op = {
1376 .start = dyn_event_seq_start,
1377 .next = dyn_event_seq_next,
1378 .stop = dyn_event_seq_stop,
1379 .show = probes_profile_seq_show
1382 static int profile_open(struct inode *inode, struct file *file)
1386 ret = security_locked_down(LOCKDOWN_TRACEFS);
1390 return seq_open(file, &profile_seq_op);
1393 static const struct file_operations kprobe_profile_ops = {
1394 .owner = THIS_MODULE,
1395 .open = profile_open,
1397 .llseek = seq_lseek,
1398 .release = seq_release,
1401 /* Note that we don't verify it, since the code does not come from user space */
1403 process_fetch_insn(struct fetch_insn *code, void *rec, void *edata,
1404 void *dest, void *base)
1406 struct pt_regs *regs = rec;
1411 /* 1st stage: get value from context */
1414 val = regs_get_register(regs, code->param);
1416 case FETCH_OP_STACK:
1417 val = regs_get_kernel_stack_nth(regs, code->param);
1419 case FETCH_OP_STACKP:
1420 val = kernel_stack_pointer(regs);
1422 case FETCH_OP_RETVAL:
1423 val = regs_return_value(regs);
1425 #ifdef CONFIG_HAVE_FUNCTION_ARG_ACCESS_API
1427 val = regs_get_kernel_argument(regs, code->param);
1429 case FETCH_OP_EDATA:
1430 val = *(unsigned long *)((unsigned long)edata + code->offset);
1433 case FETCH_NOP_SYMBOL: /* Ignore a place holder */
1437 ret = process_common_fetch_insn(code, &val);
1443 return process_fetch_insn_bottom(code, val, dest, base);
1445 NOKPROBE_SYMBOL(process_fetch_insn)
1447 /* Kprobe handler */
1448 static nokprobe_inline void
1449 __kprobe_trace_func(struct trace_kprobe *tk, struct pt_regs *regs,
1450 struct trace_event_file *trace_file)
1452 struct kprobe_trace_entry_head *entry;
1453 struct trace_event_call *call = trace_probe_event_call(&tk->tp);
1454 struct trace_event_buffer fbuffer;
1457 WARN_ON(call != trace_file->event_call);
1459 if (trace_trigger_soft_disabled(trace_file))
1462 dsize = __get_data_size(&tk->tp, regs, NULL);
1464 entry = trace_event_buffer_reserve(&fbuffer, trace_file,
1465 sizeof(*entry) + tk->tp.size + dsize);
1469 fbuffer.regs = regs;
1470 entry->ip = (unsigned long)tk->rp.kp.addr;
1471 store_trace_args(&entry[1], &tk->tp, regs, NULL, sizeof(*entry), dsize);
1473 trace_event_buffer_commit(&fbuffer);
1477 kprobe_trace_func(struct trace_kprobe *tk, struct pt_regs *regs)
1479 struct event_file_link *link;
1481 trace_probe_for_each_link_rcu(link, &tk->tp)
1482 __kprobe_trace_func(tk, regs, link->file);
1484 NOKPROBE_SYMBOL(kprobe_trace_func);
1486 /* Kretprobe handler */
1488 static int trace_kprobe_entry_handler(struct kretprobe_instance *ri,
1489 struct pt_regs *regs)
1491 struct kretprobe *rp = get_kretprobe(ri);
1492 struct trace_kprobe *tk;
1495 * There is a small chance that get_kretprobe(ri) returns NULL when
1496 * the kretprobe is unregister on another CPU between kretprobe's
1497 * trampoline_handler and this function.
1502 tk = container_of(rp, struct trace_kprobe, rp);
1504 /* store argument values into ri->data as entry data */
1505 if (tk->tp.entry_arg)
1506 store_trace_entry_data(ri->data, &tk->tp, regs);
1512 static nokprobe_inline void
1513 __kretprobe_trace_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
1514 struct pt_regs *regs,
1515 struct trace_event_file *trace_file)
1517 struct kretprobe_trace_entry_head *entry;
1518 struct trace_event_buffer fbuffer;
1519 struct trace_event_call *call = trace_probe_event_call(&tk->tp);
1522 WARN_ON(call != trace_file->event_call);
1524 if (trace_trigger_soft_disabled(trace_file))
1527 dsize = __get_data_size(&tk->tp, regs, ri->data);
1529 entry = trace_event_buffer_reserve(&fbuffer, trace_file,
1530 sizeof(*entry) + tk->tp.size + dsize);
1534 fbuffer.regs = regs;
1535 entry->func = (unsigned long)tk->rp.kp.addr;
1536 entry->ret_ip = get_kretprobe_retaddr(ri);
1537 store_trace_args(&entry[1], &tk->tp, regs, ri->data, sizeof(*entry), dsize);
1539 trace_event_buffer_commit(&fbuffer);
1543 kretprobe_trace_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
1544 struct pt_regs *regs)
1546 struct event_file_link *link;
1548 trace_probe_for_each_link_rcu(link, &tk->tp)
1549 __kretprobe_trace_func(tk, ri, regs, link->file);
1551 NOKPROBE_SYMBOL(kretprobe_trace_func);
1553 /* Event entry printers */
1554 static enum print_line_t
1555 print_kprobe_event(struct trace_iterator *iter, int flags,
1556 struct trace_event *event)
1558 struct kprobe_trace_entry_head *field;
1559 struct trace_seq *s = &iter->seq;
1560 struct trace_probe *tp;
1562 field = (struct kprobe_trace_entry_head *)iter->ent;
1563 tp = trace_probe_primary_from_call(
1564 container_of(event, struct trace_event_call, event));
1565 if (WARN_ON_ONCE(!tp))
1568 trace_seq_printf(s, "%s: (", trace_probe_name(tp));
1570 if (!seq_print_ip_sym(s, field->ip, flags | TRACE_ITER_SYM_OFFSET))
1573 trace_seq_putc(s, ')');
1575 if (trace_probe_print_args(s, tp->args, tp->nr_args,
1576 (u8 *)&field[1], field) < 0)
1579 trace_seq_putc(s, '\n');
1581 return trace_handle_return(s);
1584 static enum print_line_t
1585 print_kretprobe_event(struct trace_iterator *iter, int flags,
1586 struct trace_event *event)
1588 struct kretprobe_trace_entry_head *field;
1589 struct trace_seq *s = &iter->seq;
1590 struct trace_probe *tp;
1592 field = (struct kretprobe_trace_entry_head *)iter->ent;
1593 tp = trace_probe_primary_from_call(
1594 container_of(event, struct trace_event_call, event));
1595 if (WARN_ON_ONCE(!tp))
1598 trace_seq_printf(s, "%s: (", trace_probe_name(tp));
1600 if (!seq_print_ip_sym(s, field->ret_ip, flags | TRACE_ITER_SYM_OFFSET))
1603 trace_seq_puts(s, " <- ");
1605 if (!seq_print_ip_sym(s, field->func, flags & ~TRACE_ITER_SYM_OFFSET))
1608 trace_seq_putc(s, ')');
1610 if (trace_probe_print_args(s, tp->args, tp->nr_args,
1611 (u8 *)&field[1], field) < 0)
1614 trace_seq_putc(s, '\n');
1617 return trace_handle_return(s);
1621 static int kprobe_event_define_fields(struct trace_event_call *event_call)
1624 struct kprobe_trace_entry_head field;
1625 struct trace_probe *tp;
1627 tp = trace_probe_primary_from_call(event_call);
1628 if (WARN_ON_ONCE(!tp))
1631 DEFINE_FIELD(unsigned long, ip, FIELD_STRING_IP, 0);
1633 return traceprobe_define_arg_fields(event_call, sizeof(field), tp);
1636 static int kretprobe_event_define_fields(struct trace_event_call *event_call)
1639 struct kretprobe_trace_entry_head field;
1640 struct trace_probe *tp;
1642 tp = trace_probe_primary_from_call(event_call);
1643 if (WARN_ON_ONCE(!tp))
1646 DEFINE_FIELD(unsigned long, func, FIELD_STRING_FUNC, 0);
1647 DEFINE_FIELD(unsigned long, ret_ip, FIELD_STRING_RETIP, 0);
1649 return traceprobe_define_arg_fields(event_call, sizeof(field), tp);
1652 #ifdef CONFIG_PERF_EVENTS
1654 /* Kprobe profile handler */
1656 kprobe_perf_func(struct trace_kprobe *tk, struct pt_regs *regs)
1658 struct trace_event_call *call = trace_probe_event_call(&tk->tp);
1659 struct kprobe_trace_entry_head *entry;
1660 struct hlist_head *head;
1661 int size, __size, dsize;
1664 if (bpf_prog_array_valid(call)) {
1665 unsigned long orig_ip = instruction_pointer(regs);
1668 ret = trace_call_bpf(call, regs);
1671 * We need to check and see if we modified the pc of the
1672 * pt_regs, and if so return 1 so that we don't do the
1675 if (orig_ip != instruction_pointer(regs))
1681 head = this_cpu_ptr(call->perf_events);
1682 if (hlist_empty(head))
1685 dsize = __get_data_size(&tk->tp, regs, NULL);
1686 __size = sizeof(*entry) + tk->tp.size + dsize;
1687 size = ALIGN(__size + sizeof(u32), sizeof(u64));
1688 size -= sizeof(u32);
1690 entry = perf_trace_buf_alloc(size, NULL, &rctx);
1694 entry->ip = (unsigned long)tk->rp.kp.addr;
1695 memset(&entry[1], 0, dsize);
1696 store_trace_args(&entry[1], &tk->tp, regs, NULL, sizeof(*entry), dsize);
1697 perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs,
1701 NOKPROBE_SYMBOL(kprobe_perf_func);
1703 /* Kretprobe profile handler */
1705 kretprobe_perf_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
1706 struct pt_regs *regs)
1708 struct trace_event_call *call = trace_probe_event_call(&tk->tp);
1709 struct kretprobe_trace_entry_head *entry;
1710 struct hlist_head *head;
1711 int size, __size, dsize;
1714 if (bpf_prog_array_valid(call) && !trace_call_bpf(call, regs))
1717 head = this_cpu_ptr(call->perf_events);
1718 if (hlist_empty(head))
1721 dsize = __get_data_size(&tk->tp, regs, ri->data);
1722 __size = sizeof(*entry) + tk->tp.size + dsize;
1723 size = ALIGN(__size + sizeof(u32), sizeof(u64));
1724 size -= sizeof(u32);
1726 entry = perf_trace_buf_alloc(size, NULL, &rctx);
1730 entry->func = (unsigned long)tk->rp.kp.addr;
1731 entry->ret_ip = get_kretprobe_retaddr(ri);
1732 store_trace_args(&entry[1], &tk->tp, regs, ri->data, sizeof(*entry), dsize);
1733 perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs,
1736 NOKPROBE_SYMBOL(kretprobe_perf_func);
1738 int bpf_get_kprobe_info(const struct perf_event *event, u32 *fd_type,
1739 const char **symbol, u64 *probe_offset,
1740 u64 *probe_addr, unsigned long *missed,
1741 bool perf_type_tracepoint)
1743 const char *pevent = trace_event_name(event->tp_event);
1744 const char *group = event->tp_event->class->system;
1745 struct trace_kprobe *tk;
1747 if (perf_type_tracepoint)
1748 tk = find_trace_kprobe(pevent, group);
1750 tk = trace_kprobe_primary_from_call(event->tp_event);
1754 *fd_type = trace_kprobe_is_return(tk) ? BPF_FD_TYPE_KRETPROBE
1755 : BPF_FD_TYPE_KPROBE;
1756 *probe_offset = tk->rp.kp.offset;
1757 *probe_addr = kallsyms_show_value(current_cred()) ?
1758 (unsigned long)tk->rp.kp.addr : 0;
1759 *symbol = tk->symbol;
1761 *missed = trace_kprobe_missed(tk);
1764 #endif /* CONFIG_PERF_EVENTS */
1767 * called by perf_trace_init() or __ftrace_set_clr_event() under event_mutex.
1769 * kprobe_trace_self_tests_init() does enable_trace_probe/disable_trace_probe
1770 * lockless, but we can't race with this __init function.
1772 static int kprobe_register(struct trace_event_call *event,
1773 enum trace_reg type, void *data)
1775 struct trace_event_file *file = data;
1778 case TRACE_REG_REGISTER:
1779 return enable_trace_kprobe(event, file);
1780 case TRACE_REG_UNREGISTER:
1781 return disable_trace_kprobe(event, file);
1783 #ifdef CONFIG_PERF_EVENTS
1784 case TRACE_REG_PERF_REGISTER:
1785 return enable_trace_kprobe(event, NULL);
1786 case TRACE_REG_PERF_UNREGISTER:
1787 return disable_trace_kprobe(event, NULL);
1788 case TRACE_REG_PERF_OPEN:
1789 case TRACE_REG_PERF_CLOSE:
1790 case TRACE_REG_PERF_ADD:
1791 case TRACE_REG_PERF_DEL:
1798 static int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs)
1800 struct trace_kprobe *tk = container_of(kp, struct trace_kprobe, rp.kp);
1803 raw_cpu_inc(*tk->nhit);
1805 if (trace_probe_test_flag(&tk->tp, TP_FLAG_TRACE))
1806 kprobe_trace_func(tk, regs);
1807 #ifdef CONFIG_PERF_EVENTS
1808 if (trace_probe_test_flag(&tk->tp, TP_FLAG_PROFILE))
1809 ret = kprobe_perf_func(tk, regs);
1813 NOKPROBE_SYMBOL(kprobe_dispatcher);
1816 kretprobe_dispatcher(struct kretprobe_instance *ri, struct pt_regs *regs)
1818 struct kretprobe *rp = get_kretprobe(ri);
1819 struct trace_kprobe *tk;
1822 * There is a small chance that get_kretprobe(ri) returns NULL when
1823 * the kretprobe is unregister on another CPU between kretprobe's
1824 * trampoline_handler and this function.
1829 tk = container_of(rp, struct trace_kprobe, rp);
1830 raw_cpu_inc(*tk->nhit);
1832 if (trace_probe_test_flag(&tk->tp, TP_FLAG_TRACE))
1833 kretprobe_trace_func(tk, ri, regs);
1834 #ifdef CONFIG_PERF_EVENTS
1835 if (trace_probe_test_flag(&tk->tp, TP_FLAG_PROFILE))
1836 kretprobe_perf_func(tk, ri, regs);
1838 return 0; /* We don't tweak kernel, so just return 0 */
1840 NOKPROBE_SYMBOL(kretprobe_dispatcher);
1842 static struct trace_event_functions kretprobe_funcs = {
1843 .trace = print_kretprobe_event
1846 static struct trace_event_functions kprobe_funcs = {
1847 .trace = print_kprobe_event
1850 static struct trace_event_fields kretprobe_fields_array[] = {
1851 { .type = TRACE_FUNCTION_TYPE,
1852 .define_fields = kretprobe_event_define_fields },
1856 static struct trace_event_fields kprobe_fields_array[] = {
1857 { .type = TRACE_FUNCTION_TYPE,
1858 .define_fields = kprobe_event_define_fields },
1862 static inline void init_trace_event_call(struct trace_kprobe *tk)
1864 struct trace_event_call *call = trace_probe_event_call(&tk->tp);
1866 if (trace_kprobe_is_return(tk)) {
1867 call->event.funcs = &kretprobe_funcs;
1868 call->class->fields_array = kretprobe_fields_array;
1870 call->event.funcs = &kprobe_funcs;
1871 call->class->fields_array = kprobe_fields_array;
1874 call->flags = TRACE_EVENT_FL_KPROBE;
1875 call->class->reg = kprobe_register;
1878 static int register_kprobe_event(struct trace_kprobe *tk)
1880 init_trace_event_call(tk);
1882 return trace_probe_register_event_call(&tk->tp);
1885 static int unregister_kprobe_event(struct trace_kprobe *tk)
1887 return trace_probe_unregister_event_call(&tk->tp);
1890 #ifdef CONFIG_PERF_EVENTS
1892 /* create a trace_kprobe, but don't add it to global lists */
1893 struct trace_event_call *
1894 create_local_trace_kprobe(char *func, void *addr, unsigned long offs,
1897 enum probe_print_type ptype;
1898 struct trace_kprobe *tk __free(free_trace_kprobe) = NULL;
1903 ret = validate_probe_symbol(func);
1905 return ERR_PTR(ret);
1909 * local trace_kprobes are not added to dyn_event, so they are never
1910 * searched in find_trace_kprobe(). Therefore, there is no concern of
1911 * duplicated name here.
1913 event = func ? func : "DUMMY_EVENT";
1915 tk = alloc_trace_kprobe(KPROBE_EVENT_SYSTEM, event, (void *)addr, func,
1916 offs, 0 /* maxactive */, 0 /* nargs */,
1920 pr_info("Failed to allocate trace_probe.(%d)\n",
1922 return ERR_CAST(tk);
1925 init_trace_event_call(tk);
1927 ptype = trace_kprobe_is_return(tk) ?
1928 PROBE_PRINT_RETURN : PROBE_PRINT_NORMAL;
1929 if (traceprobe_set_print_fmt(&tk->tp, ptype) < 0)
1930 return ERR_PTR(-ENOMEM);
1932 ret = __register_trace_kprobe(tk);
1934 return ERR_PTR(ret);
1936 return trace_probe_event_call(&(no_free_ptr(tk)->tp));
1939 void destroy_local_trace_kprobe(struct trace_event_call *event_call)
1941 struct trace_kprobe *tk;
1943 tk = trace_kprobe_primary_from_call(event_call);
1947 if (trace_probe_is_enabled(&tk->tp)) {
1952 __unregister_trace_kprobe(tk);
1954 free_trace_kprobe(tk);
1956 #endif /* CONFIG_PERF_EVENTS */
1958 static __init void enable_boot_kprobe_events(void)
1960 struct trace_array *tr = top_trace_array();
1961 struct trace_event_file *file;
1962 struct trace_kprobe *tk;
1963 struct dyn_event *pos;
1965 guard(mutex)(&event_mutex);
1966 for_each_trace_kprobe(tk, pos) {
1967 list_for_each_entry(file, &tr->events, list)
1968 if (file->event_call == trace_probe_event_call(&tk->tp))
1969 trace_event_enable_disable(file, 1, 0);
1973 static __init void setup_boot_kprobe_events(void)
1975 char *p, *cmd = kprobe_boot_events_buf;
1978 strreplace(kprobe_boot_events_buf, ',', ' ');
1980 while (cmd && *cmd != '\0') {
1981 p = strchr(cmd, ';');
1985 ret = create_or_delete_trace_kprobe(cmd);
1987 pr_warn("Failed to add event(%d): %s\n", ret, cmd);
1992 enable_boot_kprobe_events();
1996 * Register dynevent at core_initcall. This allows kernel to setup kprobe
1997 * events in postcore_initcall without tracefs.
1999 static __init int init_kprobe_trace_early(void)
2003 ret = dyn_event_register(&trace_kprobe_ops);
2007 if (trace_kprobe_register_module_notifier())
2012 core_initcall(init_kprobe_trace_early);
2014 /* Make a tracefs interface for controlling probe points */
2015 static __init int init_kprobe_trace(void)
2019 ret = tracing_init_dentry();
2023 /* Event list interface */
2024 trace_create_file("kprobe_events", TRACE_MODE_WRITE,
2025 NULL, NULL, &kprobe_events_ops);
2027 /* Profile interface */
2028 trace_create_file("kprobe_profile", TRACE_MODE_READ,
2029 NULL, NULL, &kprobe_profile_ops);
2031 setup_boot_kprobe_events();
2035 fs_initcall(init_kprobe_trace);
2038 #ifdef CONFIG_FTRACE_STARTUP_TEST
2039 static __init struct trace_event_file *
2040 find_trace_probe_file(struct trace_kprobe *tk, struct trace_array *tr)
2042 struct trace_event_file *file;
2044 list_for_each_entry(file, &tr->events, list)
2045 if (file->event_call == trace_probe_event_call(&tk->tp))
2052 * Nobody but us can call enable_trace_kprobe/disable_trace_kprobe at this
2053 * stage, we can do this lockless.
2055 static __init int kprobe_trace_self_tests_init(void)
2058 int (*target)(int, int, int, int, int, int);
2059 struct trace_kprobe *tk;
2060 struct trace_event_file *file;
2062 if (tracing_is_disabled())
2065 if (tracing_selftest_disabled)
2068 target = kprobe_trace_selftest_target;
2070 pr_info("Testing kprobe tracing: ");
2072 ret = create_or_delete_trace_kprobe("p:testprobe kprobe_trace_selftest_target $stack $stack0 +0($stack)");
2073 if (WARN_ONCE(ret, "error on probing function entry.")) {
2076 /* Enable trace point */
2077 tk = find_trace_kprobe("testprobe", KPROBE_EVENT_SYSTEM);
2078 if (WARN_ONCE(tk == NULL, "error on probing function entry.")) {
2081 file = find_trace_probe_file(tk, top_trace_array());
2082 if (WARN_ONCE(file == NULL, "error on getting probe file.")) {
2085 enable_trace_kprobe(
2086 trace_probe_event_call(&tk->tp), file);
2090 ret = create_or_delete_trace_kprobe("r:testprobe2 kprobe_trace_selftest_target $retval");
2091 if (WARN_ONCE(ret, "error on probing function return.")) {
2094 /* Enable trace point */
2095 tk = find_trace_kprobe("testprobe2", KPROBE_EVENT_SYSTEM);
2096 if (WARN_ONCE(tk == NULL, "error on getting 2nd new probe.")) {
2099 file = find_trace_probe_file(tk, top_trace_array());
2100 if (WARN_ONCE(file == NULL, "error on getting probe file.")) {
2103 enable_trace_kprobe(
2104 trace_probe_event_call(&tk->tp), file);
2111 ret = target(1, 2, 3, 4, 5, 6);
2114 * Not expecting an error here, the check is only to prevent the
2115 * optimizer from removing the call to target() as otherwise there
2116 * are no side-effects and the call is never performed.
2121 /* Disable trace points before removing it */
2122 tk = find_trace_kprobe("testprobe", KPROBE_EVENT_SYSTEM);
2123 if (WARN_ONCE(tk == NULL, "error on getting test probe.")) {
2126 if (WARN_ONCE(trace_kprobe_nhit(tk) != 1,
2127 "incorrect number of testprobe hits."))
2130 file = find_trace_probe_file(tk, top_trace_array());
2131 if (WARN_ONCE(file == NULL, "error on getting probe file.")) {
2134 disable_trace_kprobe(
2135 trace_probe_event_call(&tk->tp), file);
2138 tk = find_trace_kprobe("testprobe2", KPROBE_EVENT_SYSTEM);
2139 if (WARN_ONCE(tk == NULL, "error on getting 2nd test probe.")) {
2142 if (WARN_ONCE(trace_kprobe_nhit(tk) != 1,
2143 "incorrect number of testprobe2 hits."))
2146 file = find_trace_probe_file(tk, top_trace_array());
2147 if (WARN_ONCE(file == NULL, "error on getting probe file.")) {
2150 disable_trace_kprobe(
2151 trace_probe_event_call(&tk->tp), file);
2154 ret = create_or_delete_trace_kprobe("-:testprobe");
2155 if (WARN_ONCE(ret, "error on deleting a probe."))
2158 ret = create_or_delete_trace_kprobe("-:testprobe2");
2159 if (WARN_ONCE(ret, "error on deleting a probe."))
2165 * Wait for the optimizer work to finish. Otherwise it might fiddle
2166 * with probes in already freed __init text.
2168 wait_for_kprobe_optimizer();
2170 pr_cont("NG: Some tests are failed. Please check them.\n");
2176 late_initcall(kprobe_trace_self_tests_init);