Merge tag 'modules-for-v5.4' of git://git.kernel.org/pub/scm/linux/kernel/git/jeyu...
[linux-2.6-block.git] / kernel / trace / trace_kprobe.c
CommitLineData
bcea3f96 1// SPDX-License-Identifier: GPL-2.0
413d37d1 2/*
77b44d1b 3 * Kprobes-based tracing events
413d37d1
MH
4 *
5 * Created by Masami Hiramatsu <mhiramat@redhat.com>
6 *
413d37d1 7 */
72576341 8#define pr_fmt(fmt) "trace_kprobe: " fmt
413d37d1
MH
9
10#include <linux/module.h>
11#include <linux/uaccess.h>
b2d09103 12#include <linux/rculist.h>
540adea3 13#include <linux/error-injection.h>
413d37d1 14
970988e1
MH
15#include <asm/setup.h> /* for COMMAND_LINE_SIZE */
16
6212dd29 17#include "trace_dynevent.h"
d899926f 18#include "trace_kprobe_selftest.h"
8ab83f56 19#include "trace_probe.h"
53305928 20#include "trace_probe_tmpl.h"
1ff511e3 21
8ab83f56 22#define KPROBE_EVENT_SYSTEM "kprobes"
696ced4f 23#define KRETPROBE_MAXACTIVE_MAX 4096
970988e1
MH
24#define MAX_KPROBE_CMDLINE_SIZE 1024
25
26/* Kprobe early definition from command line */
27static char kprobe_boot_events_buf[COMMAND_LINE_SIZE] __initdata;
b6399cc7 28static bool kprobe_boot_events_enabled __initdata;
970988e1
MH
29
30static int __init set_kprobe_boot_events(char *str)
31{
32 strlcpy(kprobe_boot_events_buf, str, COMMAND_LINE_SIZE);
33 return 0;
34}
35__setup("kprobe_event=", set_kprobe_boot_events);
e09c8614 36
6212dd29
MH
37static int trace_kprobe_create(int argc, const char **argv);
38static int trace_kprobe_show(struct seq_file *m, struct dyn_event *ev);
39static int trace_kprobe_release(struct dyn_event *ev);
40static bool trace_kprobe_is_busy(struct dyn_event *ev);
41static bool trace_kprobe_match(const char *system, const char *event,
30199137 42 int argc, const char **argv, struct dyn_event *ev);
6212dd29
MH
43
44static struct dyn_event_operations trace_kprobe_ops = {
45 .create = trace_kprobe_create,
46 .show = trace_kprobe_show,
47 .is_busy = trace_kprobe_is_busy,
48 .free = trace_kprobe_release,
49 .match = trace_kprobe_match,
50};
51
cede666e 52/*
77b44d1b 53 * Kprobe event core functions
413d37d1 54 */
c31ffb3f 55struct trace_kprobe {
6212dd29 56 struct dyn_event devent;
4a846b44 57 struct kretprobe rp; /* Use rp.kp for kprobe use */
a7636d9e 58 unsigned long __percpu *nhit;
413d37d1 59 const char *symbol; /* symbol name */
c31ffb3f 60 struct trace_probe tp;
413d37d1
MH
61};
62
6212dd29
MH
63static bool is_trace_kprobe(struct dyn_event *ev)
64{
65 return ev->ops == &trace_kprobe_ops;
66}
67
68static struct trace_kprobe *to_trace_kprobe(struct dyn_event *ev)
69{
70 return container_of(ev, struct trace_kprobe, devent);
71}
72
73/**
74 * for_each_trace_kprobe - iterate over the trace_kprobe list
75 * @pos: the struct trace_kprobe * for each entry
76 * @dpos: the struct dyn_event * to use as a loop cursor
77 */
78#define for_each_trace_kprobe(pos, dpos) \
79 for_each_dyn_event(dpos) \
80 if (is_trace_kprobe(dpos) && (pos = to_trace_kprobe(dpos)))
81
c31ffb3f
NK
82#define SIZEOF_TRACE_KPROBE(n) \
83 (offsetof(struct trace_kprobe, tp.args) + \
eca0d916 84 (sizeof(struct probe_arg) * (n)))
a82378d8 85
3da0f180 86static nokprobe_inline bool trace_kprobe_is_return(struct trace_kprobe *tk)
413d37d1 87{
c31ffb3f 88 return tk->rp.handler != NULL;
413d37d1
MH
89}
90
3da0f180 91static nokprobe_inline const char *trace_kprobe_symbol(struct trace_kprobe *tk)
413d37d1 92{
c31ffb3f 93 return tk->symbol ? tk->symbol : "unknown";
413d37d1
MH
94}
95
3da0f180 96static nokprobe_inline unsigned long trace_kprobe_offset(struct trace_kprobe *tk)
61424318 97{
c31ffb3f 98 return tk->rp.kp.offset;
61424318
MH
99}
100
3da0f180 101static nokprobe_inline bool trace_kprobe_has_gone(struct trace_kprobe *tk)
61424318 102{
c31ffb3f 103 return !!(kprobe_gone(&tk->rp.kp));
61424318
MH
104}
105
3da0f180 106static nokprobe_inline bool trace_kprobe_within_module(struct trace_kprobe *tk,
c31ffb3f 107 struct module *mod)
61424318
MH
108{
109 int len = strlen(mod->name);
c31ffb3f 110 const char *name = trace_kprobe_symbol(tk);
61424318
MH
111 return strncmp(mod->name, name, len) == 0 && name[len] == ':';
112}
113
59158ec4 114static nokprobe_inline bool trace_kprobe_module_exist(struct trace_kprobe *tk)
61424318 115{
59158ec4
MH
116 char *p;
117 bool ret;
118
119 if (!tk->symbol)
120 return false;
121 p = strchr(tk->symbol, ':');
122 if (!p)
123 return true;
124 *p = '\0';
125 mutex_lock(&module_mutex);
126 ret = !!find_module(tk->symbol);
127 mutex_unlock(&module_mutex);
128 *p = ':';
129
130 return ret;
61424318
MH
131}
132
6212dd29
MH
133static bool trace_kprobe_is_busy(struct dyn_event *ev)
134{
135 struct trace_kprobe *tk = to_trace_kprobe(ev);
136
137 return trace_probe_is_enabled(&tk->tp);
138}
139
eb5bf813
MH
140static bool trace_kprobe_match_command_head(struct trace_kprobe *tk,
141 int argc, const char **argv)
142{
143 char buf[MAX_ARGSTR_LEN + 1];
144
145 if (!argc)
146 return true;
147
148 if (!tk->symbol)
149 snprintf(buf, sizeof(buf), "0x%p", tk->rp.kp.addr);
150 else if (tk->rp.kp.offset)
151 snprintf(buf, sizeof(buf), "%s+%u",
152 trace_kprobe_symbol(tk), tk->rp.kp.offset);
153 else
154 snprintf(buf, sizeof(buf), "%s", trace_kprobe_symbol(tk));
155 if (strcmp(buf, argv[0]))
156 return false;
157 argc--; argv++;
158
159 return trace_probe_match_command_args(&tk->tp, argc, argv);
160}
161
6212dd29 162static bool trace_kprobe_match(const char *system, const char *event,
30199137 163 int argc, const char **argv, struct dyn_event *ev)
6212dd29
MH
164{
165 struct trace_kprobe *tk = to_trace_kprobe(ev);
166
b55ce203 167 return strcmp(trace_probe_name(&tk->tp), event) == 0 &&
eb5bf813
MH
168 (!system || strcmp(trace_probe_group_name(&tk->tp), system) == 0) &&
169 trace_kprobe_match_command_head(tk, argc, argv);
6212dd29
MH
170}
171
f18f97ac
MN
172static nokprobe_inline unsigned long trace_kprobe_nhit(struct trace_kprobe *tk)
173{
174 unsigned long nhit = 0;
175 int cpu;
176
177 for_each_possible_cpu(cpu)
178 nhit += *per_cpu_ptr(tk->nhit, cpu);
179
180 return nhit;
181}
182
715fa2fd
MH
183static nokprobe_inline bool trace_kprobe_is_registered(struct trace_kprobe *tk)
184{
185 return !(list_empty(&tk->rp.kp.list) &&
186 hlist_unhashed(&tk->rp.kp.hlist));
187}
188
6bc6c77c 189/* Return 0 if it fails to find the symbol address */
45408c4f
MH
190static nokprobe_inline
191unsigned long trace_kprobe_address(struct trace_kprobe *tk)
192{
193 unsigned long addr;
194
195 if (tk->symbol) {
196 addr = (unsigned long)
197 kallsyms_lookup_name(trace_kprobe_symbol(tk));
6bc6c77c
MH
198 if (addr)
199 addr += tk->rp.kp.offset;
45408c4f
MH
200 } else {
201 addr = (unsigned long)tk->rp.kp.addr;
202 }
203 return addr;
204}
205
60d53e2c
MH
206static nokprobe_inline struct trace_kprobe *
207trace_kprobe_primary_from_call(struct trace_event_call *call)
208{
209 struct trace_probe *tp;
210
211 tp = trace_probe_primary_from_call(call);
212 if (WARN_ON_ONCE(!tp))
213 return NULL;
214
215 return container_of(tp, struct trace_kprobe, tp);
216}
217
b4da3340 218bool trace_kprobe_on_func_entry(struct trace_event_call *call)
9802d865 219{
60d53e2c 220 struct trace_kprobe *tk = trace_kprobe_primary_from_call(call);
b4da3340 221
60d53e2c 222 return tk ? kprobe_on_func_entry(tk->rp.kp.addr,
b4da3340 223 tk->rp.kp.addr ? NULL : tk->rp.kp.symbol_name,
60d53e2c 224 tk->rp.kp.addr ? 0 : tk->rp.kp.offset) : false;
9802d865
JB
225}
226
b4da3340 227bool trace_kprobe_error_injectable(struct trace_event_call *call)
9802d865 228{
60d53e2c 229 struct trace_kprobe *tk = trace_kprobe_primary_from_call(call);
9802d865 230
60d53e2c
MH
231 return tk ? within_error_injection_list(trace_kprobe_address(tk)) :
232 false;
9802d865
JB
233}
234
c31ffb3f
NK
235static int register_kprobe_event(struct trace_kprobe *tk);
236static int unregister_kprobe_event(struct trace_kprobe *tk);
413d37d1 237
50d78056
MH
238static int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs);
239static int kretprobe_dispatcher(struct kretprobe_instance *ri,
240 struct pt_regs *regs);
241
455b2899
MH
242static void free_trace_kprobe(struct trace_kprobe *tk)
243{
244 if (tk) {
245 trace_probe_cleanup(&tk->tp);
246 kfree(tk->symbol);
247 free_percpu(tk->nhit);
248 kfree(tk);
249 }
250}
251
4a846b44
MH
252/*
253 * Allocate new trace_probe and initialize it (including kprobes).
254 */
c31ffb3f 255static struct trace_kprobe *alloc_trace_kprobe(const char *group,
f52487e9 256 const char *event,
4a846b44
MH
257 void *addr,
258 const char *symbol,
259 unsigned long offs,
696ced4f 260 int maxactive,
3a6b7666 261 int nargs, bool is_return)
413d37d1 262{
c31ffb3f 263 struct trace_kprobe *tk;
6f3cf440 264 int ret = -ENOMEM;
413d37d1 265
c31ffb3f
NK
266 tk = kzalloc(SIZEOF_TRACE_KPROBE(nargs), GFP_KERNEL);
267 if (!tk)
6f3cf440 268 return ERR_PTR(ret);
413d37d1 269
a7636d9e
MKL
270 tk->nhit = alloc_percpu(unsigned long);
271 if (!tk->nhit)
272 goto error;
273
413d37d1 274 if (symbol) {
c31ffb3f
NK
275 tk->symbol = kstrdup(symbol, GFP_KERNEL);
276 if (!tk->symbol)
413d37d1 277 goto error;
c31ffb3f
NK
278 tk->rp.kp.symbol_name = tk->symbol;
279 tk->rp.kp.offset = offs;
4a846b44 280 } else
c31ffb3f 281 tk->rp.kp.addr = addr;
4a846b44
MH
282
283 if (is_return)
c31ffb3f 284 tk->rp.handler = kretprobe_dispatcher;
4a846b44 285 else
c31ffb3f 286 tk->rp.kp.pre_handler = kprobe_dispatcher;
4a846b44 287
696ced4f 288 tk->rp.maxactive = maxactive;
715fa2fd
MH
289 INIT_HLIST_NODE(&tk->rp.kp.hlist);
290 INIT_LIST_HEAD(&tk->rp.kp.list);
696ced4f 291
455b2899
MH
292 ret = trace_probe_init(&tk->tp, event, group);
293 if (ret < 0)
f52487e9
MH
294 goto error;
295
6212dd29 296 dyn_event_init(&tk->devent, &trace_kprobe_ops);
c31ffb3f 297 return tk;
413d37d1 298error:
455b2899 299 free_trace_kprobe(tk);
6f3cf440 300 return ERR_PTR(ret);
413d37d1
MH
301}
302
c31ffb3f
NK
303static struct trace_kprobe *find_trace_kprobe(const char *event,
304 const char *group)
413d37d1 305{
6212dd29 306 struct dyn_event *pos;
c31ffb3f 307 struct trace_kprobe *tk;
413d37d1 308
6212dd29 309 for_each_trace_kprobe(tk, pos)
b55ce203
MH
310 if (strcmp(trace_probe_name(&tk->tp), event) == 0 &&
311 strcmp(trace_probe_group_name(&tk->tp), group) == 0)
c31ffb3f 312 return tk;
413d37d1
MH
313 return NULL;
314}
315
87107a25
SRV
316static inline int __enable_trace_kprobe(struct trace_kprobe *tk)
317{
318 int ret = 0;
319
715fa2fd 320 if (trace_kprobe_is_registered(tk) && !trace_kprobe_has_gone(tk)) {
87107a25
SRV
321 if (trace_kprobe_is_return(tk))
322 ret = enable_kretprobe(&tk->rp);
323 else
324 ret = enable_kprobe(&tk->rp.kp);
325 }
326
327 return ret;
328}
329
60d53e2c
MH
330static void __disable_trace_kprobe(struct trace_probe *tp)
331{
332 struct trace_probe *pos;
333 struct trace_kprobe *tk;
334
335 list_for_each_entry(pos, trace_probe_probe_list(tp), list) {
336 tk = container_of(pos, struct trace_kprobe, tp);
337 if (!trace_kprobe_is_registered(tk))
338 continue;
339 if (trace_kprobe_is_return(tk))
340 disable_kretprobe(&tk->rp);
341 else
342 disable_kprobe(&tk->rp.kp);
343 }
344}
345
41a7dd42
MH
346/*
347 * Enable trace_probe
348 * if the file is NULL, enable "perf" handler, or enable "trace" handler.
349 */
60d53e2c
MH
350static int enable_trace_kprobe(struct trace_event_call *call,
351 struct trace_event_file *file)
1538f888 352{
60d53e2c
MH
353 struct trace_probe *pos, *tp;
354 struct trace_kprobe *tk;
355 bool enabled;
1538f888
MH
356 int ret = 0;
357
60d53e2c
MH
358 tp = trace_probe_primary_from_call(call);
359 if (WARN_ON_ONCE(!tp))
360 return -ENODEV;
361 enabled = trace_probe_is_enabled(tp);
362
363 /* This also changes "enabled" state */
41a7dd42 364 if (file) {
60d53e2c 365 ret = trace_probe_add_file(tp, file);
b5f935ee
MH
366 if (ret)
367 return ret;
368 } else
60d53e2c 369 trace_probe_set_flag(tp, TP_FLAG_PROFILE);
41a7dd42 370
b5f935ee
MH
371 if (enabled)
372 return 0;
87107a25 373
60d53e2c
MH
374 list_for_each_entry(pos, trace_probe_probe_list(tp), list) {
375 tk = container_of(pos, struct trace_kprobe, tp);
376 if (trace_kprobe_has_gone(tk))
377 continue;
378 ret = __enable_trace_kprobe(tk);
44d00dc7 379 if (ret)
60d53e2c 380 break;
60d53e2c
MH
381 enabled = true;
382 }
383
44d00dc7
MH
384 if (ret) {
385 /* Failed to enable one of them. Roll back all */
386 if (enabled)
387 __disable_trace_kprobe(tp);
b5f935ee 388 if (file)
60d53e2c 389 trace_probe_remove_file(tp, file);
b5f935ee 390 else
60d53e2c 391 trace_probe_clear_flag(tp, TP_FLAG_PROFILE);
57ea2a34 392 }
b5f935ee 393
1538f888
MH
394 return ret;
395}
396
41a7dd42
MH
397/*
398 * Disable trace_probe
399 * if the file is NULL, disable "perf" handler, or disable "trace" handler.
400 */
60d53e2c
MH
401static int disable_trace_kprobe(struct trace_event_call *call,
402 struct trace_event_file *file)
1538f888 403{
60d53e2c
MH
404 struct trace_probe *tp;
405
406 tp = trace_probe_primary_from_call(call);
407 if (WARN_ON_ONCE(!tp))
408 return -ENODEV;
41a7dd42 409
41a7dd42 410 if (file) {
b5f935ee
MH
411 if (!trace_probe_get_file_link(tp, file))
412 return -ENOENT;
413 if (!trace_probe_has_single_file(tp))
b04d52e3 414 goto out;
747774d6 415 trace_probe_clear_flag(tp, TP_FLAG_TRACE);
41a7dd42 416 } else
747774d6 417 trace_probe_clear_flag(tp, TP_FLAG_PROFILE);
41a7dd42 418
60d53e2c
MH
419 if (!trace_probe_is_enabled(tp))
420 __disable_trace_kprobe(tp);
e12f03d7 421
3fe3d619 422 out:
b5f935ee 423 if (file)
a232e270 424 /*
b5f935ee
MH
425 * Synchronization is done in below function. For perf event,
426 * file == NULL and perf_trace_event_unreg() calls
427 * tracepoint_synchronize_unregister() to ensure synchronize
428 * event. We don't need to care about it.
a232e270 429 */
b5f935ee 430 trace_probe_remove_file(tp, file);
a232e270 431
60d53e2c 432 return 0;
1538f888
MH
433}
434
45408c4f
MH
435#if defined(CONFIG_KPROBES_ON_FTRACE) && \
436 !defined(CONFIG_KPROBE_EVENTS_ON_NOTRACE)
437static bool within_notrace_func(struct trace_kprobe *tk)
438{
439 unsigned long offset, size, addr;
440
441 addr = trace_kprobe_address(tk);
6bc6c77c
MH
442 if (!addr || !kallsyms_lookup_size_offset(addr, &size, &offset))
443 return false;
45408c4f 444
9161a864
MH
445 /* Get the entry address of the target function */
446 addr -= offset;
447
448 /*
449 * Since ftrace_location_range() does inclusive range check, we need
450 * to subtract 1 byte from the end address.
451 */
452 return !ftrace_location_range(addr, addr + size - 1);
45408c4f
MH
453}
454#else
455#define within_notrace_func(tk) (false)
456#endif
457
61424318 458/* Internal register function - just handle k*probes and flags */
c31ffb3f 459static int __register_trace_kprobe(struct trace_kprobe *tk)
413d37d1 460{
a6682814 461 int i, ret;
61424318 462
715fa2fd 463 if (trace_kprobe_is_registered(tk))
61424318
MH
464 return -EINVAL;
465
45408c4f
MH
466 if (within_notrace_func(tk)) {
467 pr_warn("Could not probe notrace function %s\n",
468 trace_kprobe_symbol(tk));
469 return -EINVAL;
470 }
471
a6682814
MH
472 for (i = 0; i < tk->tp.nr_args; i++) {
473 ret = traceprobe_update_arg(&tk->tp.args[i]);
474 if (ret)
475 return ret;
476 }
477
61424318 478 /* Set/clear disabled flag according to tp->flag */
c31ffb3f
NK
479 if (trace_probe_is_enabled(&tk->tp))
480 tk->rp.kp.flags &= ~KPROBE_FLAG_DISABLED;
61424318 481 else
c31ffb3f 482 tk->rp.kp.flags |= KPROBE_FLAG_DISABLED;
61424318 483
c31ffb3f
NK
484 if (trace_kprobe_is_return(tk))
485 ret = register_kretprobe(&tk->rp);
413d37d1 486 else
c31ffb3f 487 ret = register_kprobe(&tk->rp.kp);
61424318 488
61424318
MH
489 return ret;
490}
491
492/* Internal unregister function - just handle k*probes and flags */
c31ffb3f 493static void __unregister_trace_kprobe(struct trace_kprobe *tk)
61424318 494{
715fa2fd 495 if (trace_kprobe_is_registered(tk)) {
c31ffb3f
NK
496 if (trace_kprobe_is_return(tk))
497 unregister_kretprobe(&tk->rp);
61424318 498 else
c31ffb3f 499 unregister_kprobe(&tk->rp.kp);
715fa2fd
MH
500 /* Cleanup kprobe for reuse and mark it unregistered */
501 INIT_HLIST_NODE(&tk->rp.kp.hlist);
502 INIT_LIST_HEAD(&tk->rp.kp.list);
c31ffb3f
NK
503 if (tk->rp.kp.symbol_name)
504 tk->rp.kp.addr = NULL;
61424318
MH
505 }
506}
507
6212dd29 508/* Unregister a trace_probe and probe_event */
c31ffb3f 509static int unregister_trace_kprobe(struct trace_kprobe *tk)
61424318 510{
ca89bc07
MH
511 /* If other probes are on the event, just unregister kprobe */
512 if (trace_probe_has_sibling(&tk->tp))
513 goto unreg;
514
02ca1521 515 /* Enabled event can not be unregistered */
c31ffb3f 516 if (trace_probe_is_enabled(&tk->tp))
02ca1521
MH
517 return -EBUSY;
518
40c32592 519 /* Will fail if probe is being used by ftrace or perf */
c31ffb3f 520 if (unregister_kprobe_event(tk))
40c32592
SRRH
521 return -EBUSY;
522
ca89bc07 523unreg:
c31ffb3f 524 __unregister_trace_kprobe(tk);
6212dd29 525 dyn_event_remove(&tk->devent);
ca89bc07 526 trace_probe_unlink(&tk->tp);
02ca1521
MH
527
528 return 0;
413d37d1
MH
529}
530
fe60b0ce
MH
531static bool trace_kprobe_has_same_kprobe(struct trace_kprobe *orig,
532 struct trace_kprobe *comp)
533{
534 struct trace_probe_event *tpe = orig->tp.event;
535 struct trace_probe *pos;
536 int i;
537
538 list_for_each_entry(pos, &tpe->probes, list) {
539 orig = container_of(pos, struct trace_kprobe, tp);
540 if (strcmp(trace_kprobe_symbol(orig),
541 trace_kprobe_symbol(comp)) ||
542 trace_kprobe_offset(orig) != trace_kprobe_offset(comp))
543 continue;
544
545 /*
546 * trace_probe_compare_arg_type() ensured that nr_args and
547 * each argument name and type are same. Let's compare comm.
548 */
549 for (i = 0; i < orig->tp.nr_args; i++) {
550 if (strcmp(orig->tp.args[i].comm,
551 comp->tp.args[i].comm))
552 continue;
553 }
554
555 return true;
556 }
557
558 return false;
559}
560
ca89bc07
MH
561static int append_trace_kprobe(struct trace_kprobe *tk, struct trace_kprobe *to)
562{
563 int ret;
564
fe60b0ce
MH
565 ret = trace_probe_compare_arg_type(&tk->tp, &to->tp);
566 if (ret) {
567 /* Note that argument starts index = 2 */
568 trace_probe_log_set_index(ret + 1);
569 trace_probe_log_err(0, DIFF_ARG_TYPE);
570 return -EEXIST;
571 }
572 if (trace_kprobe_has_same_kprobe(to, tk)) {
573 trace_probe_log_set_index(0);
574 trace_probe_log_err(0, SAME_PROBE);
575 return -EEXIST;
576 }
577
ca89bc07
MH
578 /* Append to existing event */
579 ret = trace_probe_append(&tk->tp, &to->tp);
580 if (ret)
581 return ret;
582
583 /* Register k*probe */
584 ret = __register_trace_kprobe(tk);
585 if (ret == -ENOENT && !trace_kprobe_module_exist(tk)) {
586 pr_warn("This probe might be able to register after target module is loaded. Continue.\n");
587 ret = 0;
588 }
589
590 if (ret)
591 trace_probe_unlink(&tk->tp);
592 else
593 dyn_event_add(&tk->devent);
594
595 return ret;
596}
597
413d37d1 598/* Register a trace_probe and probe_event */
c31ffb3f 599static int register_trace_kprobe(struct trace_kprobe *tk)
413d37d1 600{
c31ffb3f 601 struct trace_kprobe *old_tk;
413d37d1
MH
602 int ret;
603
6212dd29 604 mutex_lock(&event_mutex);
413d37d1 605
b55ce203
MH
606 old_tk = find_trace_kprobe(trace_probe_name(&tk->tp),
607 trace_probe_group_name(&tk->tp));
c31ffb3f 608 if (old_tk) {
ca89bc07
MH
609 if (trace_kprobe_is_return(tk) != trace_kprobe_is_return(old_tk)) {
610 trace_probe_log_set_index(0);
611 trace_probe_log_err(0, DIFF_PROBE_TYPE);
612 ret = -EEXIST;
613 } else {
fe60b0ce 614 ret = append_trace_kprobe(tk, old_tk);
ca89bc07
MH
615 }
616 goto end;
2d5e067e 617 }
61424318
MH
618
619 /* Register new event */
c31ffb3f 620 ret = register_kprobe_event(tk);
2d5e067e 621 if (ret) {
a395d6a7 622 pr_warn("Failed to register probe event(%d)\n", ret);
2d5e067e
MH
623 goto end;
624 }
625
61424318 626 /* Register k*probe */
c31ffb3f 627 ret = __register_trace_kprobe(tk);
59158ec4
MH
628 if (ret == -ENOENT && !trace_kprobe_module_exist(tk)) {
629 pr_warn("This probe might be able to register after target module is loaded. Continue.\n");
630 ret = 0;
631 }
632
61424318 633 if (ret < 0)
c31ffb3f 634 unregister_kprobe_event(tk);
61424318 635 else
6212dd29 636 dyn_event_add(&tk->devent);
61424318 637
413d37d1 638end:
6212dd29 639 mutex_unlock(&event_mutex);
413d37d1
MH
640 return ret;
641}
642
61424318 643/* Module notifier call back, checking event on the module */
c31ffb3f 644static int trace_kprobe_module_callback(struct notifier_block *nb,
61424318
MH
645 unsigned long val, void *data)
646{
647 struct module *mod = data;
6212dd29 648 struct dyn_event *pos;
c31ffb3f 649 struct trace_kprobe *tk;
61424318
MH
650 int ret;
651
652 if (val != MODULE_STATE_COMING)
653 return NOTIFY_DONE;
654
655 /* Update probes on coming module */
6212dd29
MH
656 mutex_lock(&event_mutex);
657 for_each_trace_kprobe(tk, pos) {
c31ffb3f 658 if (trace_kprobe_within_module(tk, mod)) {
02ca1521 659 /* Don't need to check busy - this should have gone. */
c31ffb3f
NK
660 __unregister_trace_kprobe(tk);
661 ret = __register_trace_kprobe(tk);
61424318 662 if (ret)
a395d6a7 663 pr_warn("Failed to re-register probe %s on %s: %d\n",
b55ce203 664 trace_probe_name(&tk->tp),
a395d6a7 665 mod->name, ret);
61424318
MH
666 }
667 }
6212dd29 668 mutex_unlock(&event_mutex);
61424318
MH
669
670 return NOTIFY_DONE;
671}
672
c31ffb3f
NK
673static struct notifier_block trace_kprobe_module_nb = {
674 .notifier_call = trace_kprobe_module_callback,
61424318
MH
675 .priority = 1 /* Invoked after kprobe module callback */
676};
677
fca18a47
NR
678/* Convert certain expected symbols into '_' when generating event names */
679static inline void sanitize_event_name(char *name)
680{
681 while (*name++ != '\0')
682 if (*name == ':' || *name == '.')
683 *name = '_';
684}
685
6212dd29 686static int trace_kprobe_create(int argc, const char *argv[])
413d37d1
MH
687{
688 /*
689 * Argument syntax:
696ced4f
AC
690 * - Add kprobe:
691 * p[:[GRP/]EVENT] [MOD:]KSYM[+OFFS]|KADDR [FETCHARGS]
692 * - Add kretprobe:
693 * r[MAXACTIVE][:[GRP/]EVENT] [MOD:]KSYM[+0] [FETCHARGS]
413d37d1 694 * Fetch args:
2e06ff63
MH
695 * $retval : fetch return value
696 * $stack : fetch stack address
697 * $stackN : fetch Nth of stack (N:0-)
35abb67d 698 * $comm : fetch current task comm
413d37d1
MH
699 * @ADDR : fetch memory at ADDR (ADDR should be in kernel)
700 * @SYM[+|-offs] : fetch memory at SYM +|- offs (SYM is a data symbol)
701 * %REG : fetch register REG
93ccae7a 702 * Dereferencing memory fetch:
413d37d1 703 * +|-offs(ARG) : fetch memory at ARG +|- offs address.
eca0d916
MH
704 * Alias name of args:
705 * NAME=FETCHARG : set NAME as alias of FETCHARG.
93ccae7a
MH
706 * Type of args:
707 * FETCHARG:TYPE : use TYPE instead of unsigned long.
413d37d1 708 */
ab105a4f 709 struct trace_kprobe *tk = NULL;
6212dd29
MH
710 int i, len, ret = 0;
711 bool is_return = false;
712 char *symbol = NULL, *tmp = NULL;
713 const char *event = NULL, *group = KPROBE_EVENT_SYSTEM;
696ced4f 714 int maxactive = 0;
c5d343b6 715 long offset = 0;
413d37d1 716 void *addr = NULL;
4a846b44 717 char buf[MAX_EVENT_NAME_LEN];
a1303af5 718 unsigned int flags = TPARG_FL_KERNEL;
413d37d1 719
8b05a3a7
AR
720 switch (argv[0][0]) {
721 case 'r':
3a6b7666 722 is_return = true;
a1303af5 723 flags |= TPARG_FL_RETURN;
8b05a3a7
AR
724 break;
725 case 'p':
726 break;
727 default:
728 return -ECANCELED;
729 }
730 if (argc < 2)
6212dd29 731 return -ECANCELED;
413d37d1 732
ab105a4f
MH
733 trace_probe_log_init("trace_kprobe", argc, argv);
734
696ced4f 735 event = strchr(&argv[0][1], ':');
6212dd29 736 if (event)
696ced4f 737 event++;
6212dd29 738
287c038c
MH
739 if (isdigit(argv[0][1])) {
740 if (!is_return) {
ab105a4f
MH
741 trace_probe_log_err(1, MAXACT_NO_KPROBE);
742 goto parse_error;
287c038c 743 }
6212dd29
MH
744 if (event)
745 len = event - &argv[0][1] - 1;
746 else
747 len = strlen(&argv[0][1]);
ab105a4f
MH
748 if (len > MAX_EVENT_NAME_LEN - 1) {
749 trace_probe_log_err(1, BAD_MAXACT);
750 goto parse_error;
751 }
6212dd29
MH
752 memcpy(buf, &argv[0][1], len);
753 buf[len] = '\0';
754 ret = kstrtouint(buf, 0, &maxactive);
287c038c 755 if (ret || !maxactive) {
ab105a4f
MH
756 trace_probe_log_err(1, BAD_MAXACT);
757 goto parse_error;
696ced4f
AC
758 }
759 /* kretprobes instances are iterated over via a list. The
760 * maximum should stay reasonable.
761 */
762 if (maxactive > KRETPROBE_MAXACTIVE_MAX) {
ab105a4f
MH
763 trace_probe_log_err(1, MAXACT_TOO_BIG);
764 goto parse_error;
696ced4f
AC
765 }
766 }
767
9e52b325
SD
768 /* try to parse an address. if that fails, try to read the
769 * input as a symbol. */
770 if (kstrtoul(argv[1], 0, (unsigned long *)&addr)) {
ab105a4f 771 trace_probe_log_set_index(1);
6212dd29 772 /* Check whether uprobe event specified */
ab105a4f
MH
773 if (strchr(argv[1], '/') && strchr(argv[1], ':')) {
774 ret = -ECANCELED;
775 goto error;
776 }
413d37d1 777 /* a symbol specified */
6212dd29
MH
778 symbol = kstrdup(argv[1], GFP_KERNEL);
779 if (!symbol)
780 return -ENOMEM;
413d37d1 781 /* TODO: support .init module functions */
8ab83f56 782 ret = traceprobe_split_symbol_offset(symbol, &offset);
c5d343b6 783 if (ret || offset < 0 || offset > UINT_MAX) {
ab105a4f
MH
784 trace_probe_log_err(0, BAD_PROBE_ADDR);
785 goto parse_error;
e63cc239 786 }
a1303af5
MH
787 if (kprobe_on_func_entry(NULL, symbol, offset))
788 flags |= TPARG_FL_FENTRY;
789 if (offset && is_return && !(flags & TPARG_FL_FENTRY)) {
ab105a4f
MH
790 trace_probe_log_err(0, BAD_RETPROBE);
791 goto parse_error;
e63cc239 792 }
413d37d1
MH
793 }
794
ab105a4f 795 trace_probe_log_set_index(0);
6212dd29 796 if (event) {
ab105a4f
MH
797 ret = traceprobe_parse_event_name(&event, &group, buf,
798 event - argv[0]);
6212dd29 799 if (ret)
ab105a4f 800 goto parse_error;
6212dd29 801 } else {
4263565d 802 /* Make a new event name */
4263565d 803 if (symbol)
6f3cf440 804 snprintf(buf, MAX_EVENT_NAME_LEN, "%c_%s_%ld",
4263565d
MH
805 is_return ? 'r' : 'p', symbol, offset);
806 else
6f3cf440 807 snprintf(buf, MAX_EVENT_NAME_LEN, "%c_0x%p",
4263565d 808 is_return ? 'r' : 'p', addr);
fca18a47 809 sanitize_event_name(buf);
4a846b44
MH
810 event = buf;
811 }
6212dd29
MH
812
813 /* setup a probe */
696ced4f 814 tk = alloc_trace_kprobe(group, event, addr, symbol, offset, maxactive,
ab105a4f 815 argc - 2, is_return);
c31ffb3f 816 if (IS_ERR(tk)) {
6212dd29 817 ret = PTR_ERR(tk);
ab105a4f 818 /* This must return -ENOMEM, else there is a bug */
a039480e 819 WARN_ON_ONCE(ret != -ENOMEM);
ab105a4f 820 goto out; /* We know tk is not allocated */
e63cc239 821 }
ab105a4f 822 argc -= 2; argv += 2;
413d37d1 823
413d37d1 824 /* parse arguments */
a82378d8 825 for (i = 0; i < argc && i < MAX_TRACE_ARGS; i++) {
6212dd29
MH
826 tmp = kstrdup(argv[i], GFP_KERNEL);
827 if (!tmp) {
ba8665d7 828 ret = -ENOMEM;
413d37d1
MH
829 goto error;
830 }
da34634f 831
ab105a4f 832 trace_probe_log_set_index(i + 2);
6212dd29
MH
833 ret = traceprobe_parse_probe_arg(&tk->tp, i, tmp, flags);
834 kfree(tmp);
d00bbea9 835 if (ret)
ab105a4f 836 goto error; /* This can be -ENOMEM */
413d37d1 837 }
413d37d1 838
f730e0f2
MH
839 ret = traceprobe_set_print_fmt(&tk->tp, is_return);
840 if (ret < 0)
841 goto error;
842
c31ffb3f 843 ret = register_trace_kprobe(tk);
ab105a4f
MH
844 if (ret) {
845 trace_probe_log_set_index(1);
846 if (ret == -EILSEQ)
847 trace_probe_log_err(0, BAD_INSN_BNDRY);
848 else if (ret == -ENOENT)
849 trace_probe_log_err(0, BAD_PROBE_ADDR);
ca89bc07 850 else if (ret != -ENOMEM && ret != -EEXIST)
ab105a4f 851 trace_probe_log_err(0, FAIL_REG_PROBE);
413d37d1 852 goto error;
ab105a4f
MH
853 }
854
6212dd29 855out:
ab105a4f 856 trace_probe_log_clear();
6212dd29
MH
857 kfree(symbol);
858 return ret;
413d37d1 859
ab105a4f
MH
860parse_error:
861 ret = -EINVAL;
413d37d1 862error:
c31ffb3f 863 free_trace_kprobe(tk);
6212dd29 864 goto out;
413d37d1
MH
865}
866
6212dd29 867static int create_or_delete_trace_kprobe(int argc, char **argv)
413d37d1 868{
6212dd29 869 int ret;
02ca1521 870
6212dd29
MH
871 if (argv[0][0] == '-')
872 return dyn_event_release(argc, argv, &trace_kprobe_ops);
413d37d1 873
6212dd29
MH
874 ret = trace_kprobe_create(argc, (const char **)argv);
875 return ret == -ECANCELED ? -EINVAL : ret;
413d37d1
MH
876}
877
6212dd29 878static int trace_kprobe_release(struct dyn_event *ev)
413d37d1 879{
6212dd29
MH
880 struct trace_kprobe *tk = to_trace_kprobe(ev);
881 int ret = unregister_trace_kprobe(tk);
413d37d1 882
6212dd29
MH
883 if (!ret)
884 free_trace_kprobe(tk);
885 return ret;
413d37d1
MH
886}
887
6212dd29 888static int trace_kprobe_show(struct seq_file *m, struct dyn_event *ev)
413d37d1 889{
6212dd29 890 struct trace_kprobe *tk = to_trace_kprobe(ev);
93ccae7a 891 int i;
413d37d1 892
fa6f0cc7 893 seq_putc(m, trace_kprobe_is_return(tk) ? 'r' : 'p');
b55ce203
MH
894 seq_printf(m, ":%s/%s", trace_probe_group_name(&tk->tp),
895 trace_probe_name(&tk->tp));
413d37d1 896
c31ffb3f
NK
897 if (!tk->symbol)
898 seq_printf(m, " 0x%p", tk->rp.kp.addr);
899 else if (tk->rp.kp.offset)
900 seq_printf(m, " %s+%u", trace_kprobe_symbol(tk),
901 tk->rp.kp.offset);
413d37d1 902 else
c31ffb3f 903 seq_printf(m, " %s", trace_kprobe_symbol(tk));
413d37d1 904
c31ffb3f
NK
905 for (i = 0; i < tk->tp.nr_args; i++)
906 seq_printf(m, " %s=%s", tk->tp.args[i].name, tk->tp.args[i].comm);
fa6f0cc7 907 seq_putc(m, '\n');
93ccae7a 908
413d37d1
MH
909 return 0;
910}
911
6212dd29
MH
912static int probes_seq_show(struct seq_file *m, void *v)
913{
914 struct dyn_event *ev = v;
915
916 if (!is_trace_kprobe(ev))
917 return 0;
918
919 return trace_kprobe_show(m, ev);
920}
921
413d37d1 922static const struct seq_operations probes_seq_op = {
6212dd29
MH
923 .start = dyn_event_seq_start,
924 .next = dyn_event_seq_next,
925 .stop = dyn_event_seq_stop,
413d37d1
MH
926 .show = probes_seq_show
927};
928
929static int probes_open(struct inode *inode, struct file *file)
930{
02ca1521
MH
931 int ret;
932
933 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
6212dd29 934 ret = dyn_events_release_all(&trace_kprobe_ops);
02ca1521
MH
935 if (ret < 0)
936 return ret;
937 }
413d37d1
MH
938
939 return seq_open(file, &probes_seq_op);
940}
941
413d37d1
MH
942static ssize_t probes_write(struct file *file, const char __user *buffer,
943 size_t count, loff_t *ppos)
944{
7e465baa 945 return trace_parse_run_command(file, buffer, count, ppos,
6212dd29 946 create_or_delete_trace_kprobe);
413d37d1
MH
947}
948
949static const struct file_operations kprobe_events_ops = {
950 .owner = THIS_MODULE,
951 .open = probes_open,
952 .read = seq_read,
953 .llseek = seq_lseek,
954 .release = seq_release,
955 .write = probes_write,
956};
957
cd7e7bd5
MH
958/* Probes profiling interfaces */
959static int probes_profile_seq_show(struct seq_file *m, void *v)
960{
6212dd29
MH
961 struct dyn_event *ev = v;
962 struct trace_kprobe *tk;
cd7e7bd5 963
6212dd29
MH
964 if (!is_trace_kprobe(ev))
965 return 0;
cd7e7bd5 966
6212dd29 967 tk = to_trace_kprobe(ev);
de7b2973 968 seq_printf(m, " %-44s %15lu %15lu\n",
b55ce203 969 trace_probe_name(&tk->tp),
f18f97ac 970 trace_kprobe_nhit(tk),
c31ffb3f 971 tk->rp.kp.nmissed);
cd7e7bd5
MH
972
973 return 0;
974}
975
976static const struct seq_operations profile_seq_op = {
6212dd29
MH
977 .start = dyn_event_seq_start,
978 .next = dyn_event_seq_next,
979 .stop = dyn_event_seq_stop,
cd7e7bd5
MH
980 .show = probes_profile_seq_show
981};
982
983static int profile_open(struct inode *inode, struct file *file)
984{
985 return seq_open(file, &profile_seq_op);
986}
987
988static const struct file_operations kprobe_profile_ops = {
989 .owner = THIS_MODULE,
990 .open = profile_open,
991 .read = seq_read,
992 .llseek = seq_lseek,
993 .release = seq_release,
994};
995
53305928
MH
996/* Kprobe specific fetch functions */
997
998/* Return the length of string -- including null terminal byte */
9178412d
MH
999static nokprobe_inline int
1000fetch_store_strlen(unsigned long addr)
53305928 1001{
53305928
MH
1002 int ret, len = 0;
1003 u8 c;
1004
53305928 1005 do {
49ef5f45 1006 ret = probe_kernel_read(&c, (u8 *)addr + len, 1);
53305928
MH
1007 len++;
1008 } while (c && ret == 0 && len < MAX_STRING_SIZE);
1009
9178412d 1010 return (ret < 0) ? ret : len;
53305928
MH
1011}
1012
88903c46
MH
1013/* Return the length of string -- including null terminal byte */
1014static nokprobe_inline int
1015fetch_store_strlen_user(unsigned long addr)
1016{
1017 const void __user *uaddr = (__force const void __user *)addr;
1018
1019 return strnlen_unsafe_user(uaddr, MAX_STRING_SIZE);
1020}
1021
53305928
MH
1022/*
1023 * Fetch a null-terminated string. Caller MUST set *(u32 *)buf with max
1024 * length and relative data location.
1025 */
9178412d
MH
1026static nokprobe_inline int
1027fetch_store_string(unsigned long addr, void *dest, void *base)
53305928 1028{
9178412d 1029 int maxlen = get_loc_len(*(u32 *)dest);
88903c46 1030 void *__dest;
53305928
MH
1031 long ret;
1032
9178412d
MH
1033 if (unlikely(!maxlen))
1034 return -ENOMEM;
88903c46
MH
1035
1036 __dest = get_loc_data(dest, base);
1037
53305928
MH
1038 /*
1039 * Try to get string again, since the string can be changed while
1040 * probing.
1041 */
88903c46
MH
1042 ret = strncpy_from_unsafe(__dest, (void *)addr, maxlen);
1043 if (ret >= 0)
1044 *(u32 *)dest = make_data_loc(ret, __dest - base);
1045
1046 return ret;
1047}
53305928 1048
88903c46
MH
1049/*
1050 * Fetch a null-terminated string from user. Caller MUST set *(u32 *)buf
1051 * with max length and relative data location.
1052 */
1053static nokprobe_inline int
1054fetch_store_string_user(unsigned long addr, void *dest, void *base)
1055{
1056 const void __user *uaddr = (__force const void __user *)addr;
1057 int maxlen = get_loc_len(*(u32 *)dest);
1058 void *__dest;
1059 long ret;
1060
1061 if (unlikely(!maxlen))
1062 return -ENOMEM;
1063
1064 __dest = get_loc_data(dest, base);
1065
1066 ret = strncpy_from_unsafe_user(__dest, uaddr, maxlen);
9178412d 1067 if (ret >= 0)
88903c46
MH
1068 *(u32 *)dest = make_data_loc(ret, __dest - base);
1069
9178412d 1070 return ret;
53305928
MH
1071}
1072
9b960a38
MH
1073static nokprobe_inline int
1074probe_mem_read(void *dest, void *src, size_t size)
1075{
1076 return probe_kernel_read(dest, src, size);
1077}
1078
e65f7ae7
MH
1079static nokprobe_inline int
1080probe_mem_read_user(void *dest, void *src, size_t size)
1081{
539b75b2
MH
1082 const void __user *uaddr = (__force const void __user *)src;
1083
1084 return probe_user_read(dest, uaddr, size);
e65f7ae7
MH
1085}
1086
53305928
MH
1087/* Note that we don't verify it, since the code does not come from user space */
1088static int
1089process_fetch_insn(struct fetch_insn *code, struct pt_regs *regs, void *dest,
9178412d 1090 void *base)
53305928
MH
1091{
1092 unsigned long val;
53305928 1093
a6682814 1094retry:
53305928
MH
1095 /* 1st stage: get value from context */
1096 switch (code->op) {
1097 case FETCH_OP_REG:
1098 val = regs_get_register(regs, code->param);
1099 break;
1100 case FETCH_OP_STACK:
1101 val = regs_get_kernel_stack_nth(regs, code->param);
1102 break;
1103 case FETCH_OP_STACKP:
1104 val = kernel_stack_pointer(regs);
1105 break;
1106 case FETCH_OP_RETVAL:
1107 val = regs_return_value(regs);
1108 break;
1109 case FETCH_OP_IMM:
1110 val = code->immediate;
1111 break;
1112 case FETCH_OP_COMM:
1113 val = (unsigned long)current->comm;
1114 break;
a42e3c4d
MH
1115 case FETCH_OP_DATA:
1116 val = (unsigned long)code->data;
1117 break;
a1303af5
MH
1118#ifdef CONFIG_HAVE_FUNCTION_ARG_ACCESS_API
1119 case FETCH_OP_ARG:
1120 val = regs_get_kernel_argument(regs, code->param);
1121 break;
1122#endif
a6682814
MH
1123 case FETCH_NOP_SYMBOL: /* Ignore a place holder */
1124 code++;
1125 goto retry;
53305928
MH
1126 default:
1127 return -EILSEQ;
1128 }
1129 code++;
1130
9b960a38 1131 return process_fetch_insn_bottom(code, val, dest, base);
53305928
MH
1132}
1133NOKPROBE_SYMBOL(process_fetch_insn)
1134
413d37d1 1135/* Kprobe handler */
3da0f180 1136static nokprobe_inline void
c31ffb3f 1137__kprobe_trace_func(struct trace_kprobe *tk, struct pt_regs *regs,
7f1d2f82 1138 struct trace_event_file *trace_file)
413d37d1 1139{
93ccae7a 1140 struct kprobe_trace_entry_head *entry;
413d37d1 1141 struct ring_buffer_event *event;
8f8ffe24 1142 struct ring_buffer *buffer;
e09c8614 1143 int size, dsize, pc;
413d37d1 1144 unsigned long irq_flags;
e3dc9f89 1145 struct trace_event_call *call = trace_probe_event_call(&tk->tp);
413d37d1 1146
7f1d2f82 1147 WARN_ON(call != trace_file->event_call);
41a7dd42 1148
09a5059a 1149 if (trace_trigger_soft_disabled(trace_file))
13a1e4ae 1150 return;
b8820084 1151
413d37d1
MH
1152 local_save_flags(irq_flags);
1153 pc = preempt_count();
1154
c31ffb3f
NK
1155 dsize = __get_data_size(&tk->tp, regs);
1156 size = sizeof(*entry) + tk->tp.size + dsize;
413d37d1 1157
7f1d2f82 1158 event = trace_event_buffer_lock_reserve(&buffer, trace_file,
41a7dd42
MH
1159 call->event.type,
1160 size, irq_flags, pc);
413d37d1 1161 if (!event)
1e12a4a7 1162 return;
413d37d1
MH
1163
1164 entry = ring_buffer_event_data(event);
c31ffb3f 1165 entry->ip = (unsigned long)tk->rp.kp.addr;
9178412d 1166 store_trace_args(&entry[1], &tk->tp, regs, sizeof(*entry), dsize);
413d37d1 1167
7f1d2f82 1168 event_trigger_unlock_commit_regs(trace_file, buffer, event,
13a1e4ae 1169 entry, irq_flags, pc, regs);
413d37d1
MH
1170}
1171
3da0f180 1172static void
c31ffb3f 1173kprobe_trace_func(struct trace_kprobe *tk, struct pt_regs *regs)
41a7dd42 1174{
b04d52e3 1175 struct event_file_link *link;
41a7dd42 1176
b5f935ee 1177 trace_probe_for_each_link_rcu(link, &tk->tp)
c31ffb3f 1178 __kprobe_trace_func(tk, regs, link->file);
41a7dd42 1179}
3da0f180 1180NOKPROBE_SYMBOL(kprobe_trace_func);
41a7dd42 1181
413d37d1 1182/* Kretprobe handler */
3da0f180 1183static nokprobe_inline void
c31ffb3f 1184__kretprobe_trace_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
41a7dd42 1185 struct pt_regs *regs,
7f1d2f82 1186 struct trace_event_file *trace_file)
413d37d1 1187{
93ccae7a 1188 struct kretprobe_trace_entry_head *entry;
413d37d1 1189 struct ring_buffer_event *event;
8f8ffe24 1190 struct ring_buffer *buffer;
e09c8614 1191 int size, pc, dsize;
413d37d1 1192 unsigned long irq_flags;
e3dc9f89 1193 struct trace_event_call *call = trace_probe_event_call(&tk->tp);
413d37d1 1194
7f1d2f82 1195 WARN_ON(call != trace_file->event_call);
41a7dd42 1196
09a5059a 1197 if (trace_trigger_soft_disabled(trace_file))
13a1e4ae 1198 return;
b8820084 1199
413d37d1
MH
1200 local_save_flags(irq_flags);
1201 pc = preempt_count();
1202
c31ffb3f
NK
1203 dsize = __get_data_size(&tk->tp, regs);
1204 size = sizeof(*entry) + tk->tp.size + dsize;
413d37d1 1205
7f1d2f82 1206 event = trace_event_buffer_lock_reserve(&buffer, trace_file,
41a7dd42
MH
1207 call->event.type,
1208 size, irq_flags, pc);
413d37d1 1209 if (!event)
1e12a4a7 1210 return;
413d37d1
MH
1211
1212 entry = ring_buffer_event_data(event);
c31ffb3f 1213 entry->func = (unsigned long)tk->rp.kp.addr;
413d37d1 1214 entry->ret_ip = (unsigned long)ri->ret_addr;
9178412d 1215 store_trace_args(&entry[1], &tk->tp, regs, sizeof(*entry), dsize);
413d37d1 1216
7f1d2f82 1217 event_trigger_unlock_commit_regs(trace_file, buffer, event,
13a1e4ae 1218 entry, irq_flags, pc, regs);
413d37d1
MH
1219}
1220
3da0f180 1221static void
c31ffb3f 1222kretprobe_trace_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
41a7dd42
MH
1223 struct pt_regs *regs)
1224{
b04d52e3 1225 struct event_file_link *link;
41a7dd42 1226
b5f935ee 1227 trace_probe_for_each_link_rcu(link, &tk->tp)
c31ffb3f 1228 __kretprobe_trace_func(tk, ri, regs, link->file);
41a7dd42 1229}
3da0f180 1230NOKPROBE_SYMBOL(kretprobe_trace_func);
41a7dd42 1231
413d37d1 1232/* Event entry printers */
b62fdd97 1233static enum print_line_t
a9a57763
SR
1234print_kprobe_event(struct trace_iterator *iter, int flags,
1235 struct trace_event *event)
413d37d1 1236{
93ccae7a 1237 struct kprobe_trace_entry_head *field;
413d37d1 1238 struct trace_seq *s = &iter->seq;
eca0d916 1239 struct trace_probe *tp;
413d37d1 1240
93ccae7a 1241 field = (struct kprobe_trace_entry_head *)iter->ent;
60d53e2c
MH
1242 tp = trace_probe_primary_from_call(
1243 container_of(event, struct trace_event_call, event));
1244 if (WARN_ON_ONCE(!tp))
1245 goto out;
413d37d1 1246
b55ce203 1247 trace_seq_printf(s, "%s: (", trace_probe_name(tp));
6e9f23d1 1248
413d37d1 1249 if (!seq_print_ip_sym(s, field->ip, flags | TRACE_ITER_SYM_OFFSET))
85224da0 1250 goto out;
413d37d1 1251
85224da0 1252 trace_seq_putc(s, ')');
413d37d1 1253
56de7630
MH
1254 if (print_probe_args(s, tp->args, tp->nr_args,
1255 (u8 *)&field[1], field) < 0)
1256 goto out;
413d37d1 1257
85224da0
SRRH
1258 trace_seq_putc(s, '\n');
1259 out:
1260 return trace_handle_return(s);
413d37d1
MH
1261}
1262
b62fdd97 1263static enum print_line_t
a9a57763
SR
1264print_kretprobe_event(struct trace_iterator *iter, int flags,
1265 struct trace_event *event)
413d37d1 1266{
93ccae7a 1267 struct kretprobe_trace_entry_head *field;
413d37d1 1268 struct trace_seq *s = &iter->seq;
eca0d916 1269 struct trace_probe *tp;
413d37d1 1270
93ccae7a 1271 field = (struct kretprobe_trace_entry_head *)iter->ent;
60d53e2c
MH
1272 tp = trace_probe_primary_from_call(
1273 container_of(event, struct trace_event_call, event));
1274 if (WARN_ON_ONCE(!tp))
1275 goto out;
413d37d1 1276
b55ce203 1277 trace_seq_printf(s, "%s: (", trace_probe_name(tp));
6e9f23d1 1278
413d37d1 1279 if (!seq_print_ip_sym(s, field->ret_ip, flags | TRACE_ITER_SYM_OFFSET))
85224da0 1280 goto out;
413d37d1 1281
85224da0 1282 trace_seq_puts(s, " <- ");
413d37d1
MH
1283
1284 if (!seq_print_ip_sym(s, field->func, flags & ~TRACE_ITER_SYM_OFFSET))
85224da0 1285 goto out;
413d37d1 1286
85224da0 1287 trace_seq_putc(s, ')');
413d37d1 1288
56de7630
MH
1289 if (print_probe_args(s, tp->args, tp->nr_args,
1290 (u8 *)&field[1], field) < 0)
1291 goto out;
413d37d1 1292
85224da0 1293 trace_seq_putc(s, '\n');
413d37d1 1294
85224da0
SRRH
1295 out:
1296 return trace_handle_return(s);
413d37d1
MH
1297}
1298
413d37d1 1299
2425bcb9 1300static int kprobe_event_define_fields(struct trace_event_call *event_call)
413d37d1 1301{
eeb07b06 1302 int ret;
93ccae7a 1303 struct kprobe_trace_entry_head field;
60d53e2c
MH
1304 struct trace_probe *tp;
1305
1306 tp = trace_probe_primary_from_call(event_call);
1307 if (WARN_ON_ONCE(!tp))
1308 return -ENOENT;
413d37d1 1309
a703d946 1310 DEFINE_FIELD(unsigned long, ip, FIELD_STRING_IP, 0);
c31ffb3f 1311
60d53e2c 1312 return traceprobe_define_arg_fields(event_call, sizeof(field), tp);
413d37d1
MH
1313}
1314
2425bcb9 1315static int kretprobe_event_define_fields(struct trace_event_call *event_call)
413d37d1 1316{
eeb07b06 1317 int ret;
93ccae7a 1318 struct kretprobe_trace_entry_head field;
60d53e2c
MH
1319 struct trace_probe *tp;
1320
1321 tp = trace_probe_primary_from_call(event_call);
1322 if (WARN_ON_ONCE(!tp))
1323 return -ENOENT;
413d37d1 1324
a703d946
MH
1325 DEFINE_FIELD(unsigned long, func, FIELD_STRING_FUNC, 0);
1326 DEFINE_FIELD(unsigned long, ret_ip, FIELD_STRING_RETIP, 0);
c31ffb3f 1327
60d53e2c 1328 return traceprobe_define_arg_fields(event_call, sizeof(field), tp);
413d37d1
MH
1329}
1330
07b139c8 1331#ifdef CONFIG_PERF_EVENTS
e08d1c65
MH
1332
1333/* Kprobe profile handler */
9802d865 1334static int
c31ffb3f 1335kprobe_perf_func(struct trace_kprobe *tk, struct pt_regs *regs)
e08d1c65 1336{
e3dc9f89 1337 struct trace_event_call *call = trace_probe_event_call(&tk->tp);
93ccae7a 1338 struct kprobe_trace_entry_head *entry;
1c024eca 1339 struct hlist_head *head;
e09c8614 1340 int size, __size, dsize;
4ed7c92d 1341 int rctx;
e08d1c65 1342
9802d865 1343 if (bpf_prog_array_valid(call)) {
66665ad2 1344 unsigned long orig_ip = instruction_pointer(regs);
9802d865
JB
1345 int ret;
1346
1347 ret = trace_call_bpf(call, regs);
1348
1349 /*
1350 * We need to check and see if we modified the pc of the
cce188bd
MH
1351 * pt_regs, and if so return 1 so that we don't do the
1352 * single stepping.
9802d865 1353 */
cce188bd 1354 if (orig_ip != instruction_pointer(regs))
9802d865 1355 return 1;
9802d865
JB
1356 if (!ret)
1357 return 0;
1358 }
2541517c 1359
288e984e
ON
1360 head = this_cpu_ptr(call->perf_events);
1361 if (hlist_empty(head))
9802d865 1362 return 0;
288e984e 1363
c31ffb3f
NK
1364 dsize = __get_data_size(&tk->tp, regs);
1365 __size = sizeof(*entry) + tk->tp.size + dsize;
74ebb63e
MH
1366 size = ALIGN(__size + sizeof(u32), sizeof(u64));
1367 size -= sizeof(u32);
ce71b9df 1368
1e1dcd93 1369 entry = perf_trace_buf_alloc(size, NULL, &rctx);
430ad5a6 1370 if (!entry)
9802d865 1371 return 0;
a1a138d0 1372
c31ffb3f 1373 entry->ip = (unsigned long)tk->rp.kp.addr;
e09c8614 1374 memset(&entry[1], 0, dsize);
9178412d 1375 store_trace_args(&entry[1], &tk->tp, regs, sizeof(*entry), dsize);
1e1dcd93 1376 perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs,
8fd0fbbe 1377 head, NULL);
9802d865 1378 return 0;
e08d1c65 1379}
3da0f180 1380NOKPROBE_SYMBOL(kprobe_perf_func);
e08d1c65
MH
1381
1382/* Kretprobe profile handler */
3da0f180 1383static void
c31ffb3f 1384kretprobe_perf_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
2b106aab 1385 struct pt_regs *regs)
e08d1c65 1386{
e3dc9f89 1387 struct trace_event_call *call = trace_probe_event_call(&tk->tp);
93ccae7a 1388 struct kretprobe_trace_entry_head *entry;
1c024eca 1389 struct hlist_head *head;
e09c8614 1390 int size, __size, dsize;
4ed7c92d 1391 int rctx;
e08d1c65 1392
e87c6bc3 1393 if (bpf_prog_array_valid(call) && !trace_call_bpf(call, regs))
2541517c
AS
1394 return;
1395
288e984e
ON
1396 head = this_cpu_ptr(call->perf_events);
1397 if (hlist_empty(head))
1398 return;
1399
c31ffb3f
NK
1400 dsize = __get_data_size(&tk->tp, regs);
1401 __size = sizeof(*entry) + tk->tp.size + dsize;
74ebb63e
MH
1402 size = ALIGN(__size + sizeof(u32), sizeof(u64));
1403 size -= sizeof(u32);
444a2a3b 1404
1e1dcd93 1405 entry = perf_trace_buf_alloc(size, NULL, &rctx);
430ad5a6 1406 if (!entry)
1e12a4a7 1407 return;
e08d1c65 1408
c31ffb3f 1409 entry->func = (unsigned long)tk->rp.kp.addr;
a1a138d0 1410 entry->ret_ip = (unsigned long)ri->ret_addr;
9178412d 1411 store_trace_args(&entry[1], &tk->tp, regs, sizeof(*entry), dsize);
1e1dcd93 1412 perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs,
8fd0fbbe 1413 head, NULL);
e08d1c65 1414}
3da0f180 1415NOKPROBE_SYMBOL(kretprobe_perf_func);
41bdc4b4
YS
1416
1417int bpf_get_kprobe_info(const struct perf_event *event, u32 *fd_type,
1418 const char **symbol, u64 *probe_offset,
1419 u64 *probe_addr, bool perf_type_tracepoint)
1420{
1421 const char *pevent = trace_event_name(event->tp_event);
1422 const char *group = event->tp_event->class->system;
1423 struct trace_kprobe *tk;
1424
1425 if (perf_type_tracepoint)
1426 tk = find_trace_kprobe(pevent, group);
1427 else
1428 tk = event->tp_event->data;
1429 if (!tk)
1430 return -EINVAL;
1431
1432 *fd_type = trace_kprobe_is_return(tk) ? BPF_FD_TYPE_KRETPROBE
1433 : BPF_FD_TYPE_KPROBE;
1434 if (tk->symbol) {
1435 *symbol = tk->symbol;
1436 *probe_offset = tk->rp.kp.offset;
1437 *probe_addr = 0;
1438 } else {
1439 *symbol = NULL;
1440 *probe_offset = 0;
1441 *probe_addr = (unsigned long)tk->rp.kp.addr;
1442 }
1443 return 0;
1444}
07b139c8 1445#endif /* CONFIG_PERF_EVENTS */
50d78056 1446
3fe3d619
ON
1447/*
1448 * called by perf_trace_init() or __ftrace_set_clr_event() under event_mutex.
1449 *
1450 * kprobe_trace_self_tests_init() does enable_trace_probe/disable_trace_probe
1451 * lockless, but we can't race with this __init function.
1452 */
2425bcb9 1453static int kprobe_register(struct trace_event_call *event,
fbc1963d 1454 enum trace_reg type, void *data)
2239291a 1455{
7f1d2f82 1456 struct trace_event_file *file = data;
1538f888 1457
2239291a
SR
1458 switch (type) {
1459 case TRACE_REG_REGISTER:
60d53e2c 1460 return enable_trace_kprobe(event, file);
2239291a 1461 case TRACE_REG_UNREGISTER:
60d53e2c 1462 return disable_trace_kprobe(event, file);
2239291a
SR
1463
1464#ifdef CONFIG_PERF_EVENTS
1465 case TRACE_REG_PERF_REGISTER:
60d53e2c 1466 return enable_trace_kprobe(event, NULL);
2239291a 1467 case TRACE_REG_PERF_UNREGISTER:
60d53e2c 1468 return disable_trace_kprobe(event, NULL);
ceec0b6f
JO
1469 case TRACE_REG_PERF_OPEN:
1470 case TRACE_REG_PERF_CLOSE:
489c75c3
JO
1471 case TRACE_REG_PERF_ADD:
1472 case TRACE_REG_PERF_DEL:
ceec0b6f 1473 return 0;
2239291a
SR
1474#endif
1475 }
1476 return 0;
1477}
50d78056 1478
3da0f180 1479static int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs)
50d78056 1480{
c31ffb3f 1481 struct trace_kprobe *tk = container_of(kp, struct trace_kprobe, rp.kp);
9802d865 1482 int ret = 0;
e08d1c65 1483
a7636d9e 1484 raw_cpu_inc(*tk->nhit);
48182bd2 1485
747774d6 1486 if (trace_probe_test_flag(&tk->tp, TP_FLAG_TRACE))
c31ffb3f 1487 kprobe_trace_func(tk, regs);
07b139c8 1488#ifdef CONFIG_PERF_EVENTS
747774d6 1489 if (trace_probe_test_flag(&tk->tp, TP_FLAG_PROFILE))
9802d865 1490 ret = kprobe_perf_func(tk, regs);
07b139c8 1491#endif
9802d865 1492 return ret;
50d78056 1493}
3da0f180 1494NOKPROBE_SYMBOL(kprobe_dispatcher);
50d78056 1495
3da0f180
MH
1496static int
1497kretprobe_dispatcher(struct kretprobe_instance *ri, struct pt_regs *regs)
50d78056 1498{
c31ffb3f 1499 struct trace_kprobe *tk = container_of(ri->rp, struct trace_kprobe, rp);
50d78056 1500
a7636d9e 1501 raw_cpu_inc(*tk->nhit);
48182bd2 1502
747774d6 1503 if (trace_probe_test_flag(&tk->tp, TP_FLAG_TRACE))
c31ffb3f 1504 kretprobe_trace_func(tk, ri, regs);
07b139c8 1505#ifdef CONFIG_PERF_EVENTS
747774d6 1506 if (trace_probe_test_flag(&tk->tp, TP_FLAG_PROFILE))
c31ffb3f 1507 kretprobe_perf_func(tk, ri, regs);
07b139c8 1508#endif
50d78056
MH
1509 return 0; /* We don't tweek kernel, so just return 0 */
1510}
3da0f180 1511NOKPROBE_SYMBOL(kretprobe_dispatcher);
e08d1c65 1512
a9a57763
SR
1513static struct trace_event_functions kretprobe_funcs = {
1514 .trace = print_kretprobe_event
1515};
1516
1517static struct trace_event_functions kprobe_funcs = {
1518 .trace = print_kprobe_event
1519};
1520
e3dc9f89 1521static inline void init_trace_event_call(struct trace_kprobe *tk)
413d37d1 1522{
e3dc9f89
MH
1523 struct trace_event_call *call = trace_probe_event_call(&tk->tp);
1524
c31ffb3f 1525 if (trace_kprobe_is_return(tk)) {
80decc70 1526 call->event.funcs = &kretprobe_funcs;
2e33af02 1527 call->class->define_fields = kretprobe_event_define_fields;
413d37d1 1528 } else {
80decc70 1529 call->event.funcs = &kprobe_funcs;
2e33af02 1530 call->class->define_fields = kprobe_event_define_fields;
413d37d1 1531 }
e12f03d7
SL
1532
1533 call->flags = TRACE_EVENT_FL_KPROBE;
1534 call->class->reg = kprobe_register;
e12f03d7
SL
1535}
1536
1537static int register_kprobe_event(struct trace_kprobe *tk)
1538{
e3dc9f89 1539 init_trace_event_call(tk);
f730e0f2 1540
46e5376d 1541 return trace_probe_register_event_call(&tk->tp);
413d37d1
MH
1542}
1543
c31ffb3f 1544static int unregister_kprobe_event(struct trace_kprobe *tk)
413d37d1 1545{
46e5376d 1546 return trace_probe_unregister_event_call(&tk->tp);
413d37d1
MH
1547}
1548
e12f03d7
SL
1549#ifdef CONFIG_PERF_EVENTS
1550/* create a trace_kprobe, but don't add it to global lists */
1551struct trace_event_call *
1552create_local_trace_kprobe(char *func, void *addr, unsigned long offs,
1553 bool is_return)
1554{
1555 struct trace_kprobe *tk;
1556 int ret;
1557 char *event;
1558
1559 /*
6212dd29 1560 * local trace_kprobes are not added to dyn_event, so they are never
e12f03d7
SL
1561 * searched in find_trace_kprobe(). Therefore, there is no concern of
1562 * duplicated name here.
1563 */
1564 event = func ? func : "DUMMY_EVENT";
1565
1566 tk = alloc_trace_kprobe(KPROBE_EVENT_SYSTEM, event, (void *)addr, func,
1567 offs, 0 /* maxactive */, 0 /* nargs */,
1568 is_return);
1569
1570 if (IS_ERR(tk)) {
1571 pr_info("Failed to allocate trace_probe.(%d)\n",
1572 (int)PTR_ERR(tk));
1573 return ERR_CAST(tk);
1574 }
1575
e3dc9f89 1576 init_trace_event_call(tk);
e12f03d7 1577
0a46c854 1578 if (traceprobe_set_print_fmt(&tk->tp, trace_kprobe_is_return(tk)) < 0) {
e12f03d7
SL
1579 ret = -ENOMEM;
1580 goto error;
1581 }
1582
1583 ret = __register_trace_kprobe(tk);
f730e0f2 1584 if (ret < 0)
e12f03d7
SL
1585 goto error;
1586
e3dc9f89 1587 return trace_probe_event_call(&tk->tp);
e12f03d7
SL
1588error:
1589 free_trace_kprobe(tk);
1590 return ERR_PTR(ret);
1591}
1592
1593void destroy_local_trace_kprobe(struct trace_event_call *event_call)
1594{
1595 struct trace_kprobe *tk;
1596
60d53e2c
MH
1597 tk = trace_kprobe_primary_from_call(event_call);
1598 if (unlikely(!tk))
1599 return;
e12f03d7
SL
1600
1601 if (trace_probe_is_enabled(&tk->tp)) {
1602 WARN_ON(1);
1603 return;
1604 }
1605
1606 __unregister_trace_kprobe(tk);
0fc8c358 1607
e12f03d7
SL
1608 free_trace_kprobe(tk);
1609}
1610#endif /* CONFIG_PERF_EVENTS */
1611
970988e1
MH
1612static __init void enable_boot_kprobe_events(void)
1613{
1614 struct trace_array *tr = top_trace_array();
1615 struct trace_event_file *file;
1616 struct trace_kprobe *tk;
1617 struct dyn_event *pos;
1618
1619 mutex_lock(&event_mutex);
1620 for_each_trace_kprobe(tk, pos) {
1621 list_for_each_entry(file, &tr->events, list)
e3dc9f89 1622 if (file->event_call == trace_probe_event_call(&tk->tp))
970988e1
MH
1623 trace_event_enable_disable(file, 1, 0);
1624 }
1625 mutex_unlock(&event_mutex);
1626}
1627
1628static __init void setup_boot_kprobe_events(void)
1629{
1630 char *p, *cmd = kprobe_boot_events_buf;
1631 int ret;
1632
1633 strreplace(kprobe_boot_events_buf, ',', ' ');
1634
1635 while (cmd && *cmd != '\0') {
1636 p = strchr(cmd, ';');
1637 if (p)
1638 *p++ = '\0';
1639
1640 ret = trace_run_command(cmd, create_or_delete_trace_kprobe);
1641 if (ret)
1642 pr_warn("Failed to add event(%d): %s\n", ret, cmd);
b6399cc7
SRV
1643 else
1644 kprobe_boot_events_enabled = true;
970988e1
MH
1645
1646 cmd = p;
1647 }
1648
1649 enable_boot_kprobe_events();
1650}
1651
8434dc93 1652/* Make a tracefs interface for controlling probe points */
413d37d1
MH
1653static __init int init_kprobe_trace(void)
1654{
1655 struct dentry *d_tracer;
1656 struct dentry *entry;
6212dd29
MH
1657 int ret;
1658
1659 ret = dyn_event_register(&trace_kprobe_ops);
1660 if (ret)
1661 return ret;
413d37d1 1662
c31ffb3f 1663 if (register_module_notifier(&trace_kprobe_module_nb))
61424318
MH
1664 return -EINVAL;
1665
413d37d1 1666 d_tracer = tracing_init_dentry();
14a5ae40 1667 if (IS_ERR(d_tracer))
413d37d1
MH
1668 return 0;
1669
8434dc93 1670 entry = tracefs_create_file("kprobe_events", 0644, d_tracer,
413d37d1
MH
1671 NULL, &kprobe_events_ops);
1672
cd7e7bd5 1673 /* Event list interface */
413d37d1 1674 if (!entry)
a395d6a7 1675 pr_warn("Could not create tracefs 'kprobe_events' entry\n");
cd7e7bd5
MH
1676
1677 /* Profile interface */
8434dc93 1678 entry = tracefs_create_file("kprobe_profile", 0444, d_tracer,
cd7e7bd5
MH
1679 NULL, &kprobe_profile_ops);
1680
1681 if (!entry)
a395d6a7 1682 pr_warn("Could not create tracefs 'kprobe_profile' entry\n");
970988e1
MH
1683
1684 setup_boot_kprobe_events();
1685
413d37d1
MH
1686 return 0;
1687}
1688fs_initcall(init_kprobe_trace);
1689
1690
1691#ifdef CONFIG_FTRACE_STARTUP_TEST
26a346f2 1692static __init struct trace_event_file *
c31ffb3f 1693find_trace_probe_file(struct trace_kprobe *tk, struct trace_array *tr)
41a7dd42 1694{
7f1d2f82 1695 struct trace_event_file *file;
41a7dd42
MH
1696
1697 list_for_each_entry(file, &tr->events, list)
e3dc9f89 1698 if (file->event_call == trace_probe_event_call(&tk->tp))
41a7dd42
MH
1699 return file;
1700
1701 return NULL;
1702}
1703
3fe3d619 1704/*
c31ffb3f 1705 * Nobody but us can call enable_trace_kprobe/disable_trace_kprobe at this
3fe3d619
ON
1706 * stage, we can do this lockless.
1707 */
413d37d1
MH
1708static __init int kprobe_trace_self_tests_init(void)
1709{
231e36f4 1710 int ret, warn = 0;
413d37d1 1711 int (*target)(int, int, int, int, int, int);
c31ffb3f 1712 struct trace_kprobe *tk;
7f1d2f82 1713 struct trace_event_file *file;
413d37d1 1714
748ec3a2
YY
1715 if (tracing_is_disabled())
1716 return -ENODEV;
1717
b6399cc7
SRV
1718 if (kprobe_boot_events_enabled) {
1719 pr_info("Skipping kprobe tests due to kprobe_event on cmdline\n");
1720 return 0;
1721 }
1722
413d37d1
MH
1723 target = kprobe_trace_selftest_target;
1724
1725 pr_info("Testing kprobe tracing: ");
1726
6212dd29
MH
1727 ret = trace_run_command("p:testprobe kprobe_trace_selftest_target $stack $stack0 +0($stack)",
1728 create_or_delete_trace_kprobe);
231e36f4 1729 if (WARN_ON_ONCE(ret)) {
41a7dd42 1730 pr_warn("error on probing function entry.\n");
231e36f4
MH
1731 warn++;
1732 } else {
1733 /* Enable trace point */
c31ffb3f
NK
1734 tk = find_trace_kprobe("testprobe", KPROBE_EVENT_SYSTEM);
1735 if (WARN_ON_ONCE(tk == NULL)) {
41a7dd42 1736 pr_warn("error on getting new probe.\n");
231e36f4 1737 warn++;
41a7dd42 1738 } else {
c31ffb3f 1739 file = find_trace_probe_file(tk, top_trace_array());
41a7dd42
MH
1740 if (WARN_ON_ONCE(file == NULL)) {
1741 pr_warn("error on getting probe file.\n");
1742 warn++;
1743 } else
60d53e2c
MH
1744 enable_trace_kprobe(
1745 trace_probe_event_call(&tk->tp), file);
41a7dd42 1746 }
231e36f4 1747 }
413d37d1 1748
6212dd29
MH
1749 ret = trace_run_command("r:testprobe2 kprobe_trace_selftest_target $retval",
1750 create_or_delete_trace_kprobe);
231e36f4 1751 if (WARN_ON_ONCE(ret)) {
41a7dd42 1752 pr_warn("error on probing function return.\n");
231e36f4
MH
1753 warn++;
1754 } else {
1755 /* Enable trace point */
c31ffb3f
NK
1756 tk = find_trace_kprobe("testprobe2", KPROBE_EVENT_SYSTEM);
1757 if (WARN_ON_ONCE(tk == NULL)) {
41a7dd42 1758 pr_warn("error on getting 2nd new probe.\n");
231e36f4 1759 warn++;
41a7dd42 1760 } else {
c31ffb3f 1761 file = find_trace_probe_file(tk, top_trace_array());
41a7dd42
MH
1762 if (WARN_ON_ONCE(file == NULL)) {
1763 pr_warn("error on getting probe file.\n");
1764 warn++;
1765 } else
60d53e2c
MH
1766 enable_trace_kprobe(
1767 trace_probe_event_call(&tk->tp), file);
41a7dd42 1768 }
231e36f4
MH
1769 }
1770
1771 if (warn)
1772 goto end;
413d37d1
MH
1773
1774 ret = target(1, 2, 3, 4, 5, 6);
1775
d4d7ccc8
MN
1776 /*
1777 * Not expecting an error here, the check is only to prevent the
1778 * optimizer from removing the call to target() as otherwise there
1779 * are no side-effects and the call is never performed.
1780 */
1781 if (ret != 21)
1782 warn++;
1783
02ca1521 1784 /* Disable trace points before removing it */
c31ffb3f
NK
1785 tk = find_trace_kprobe("testprobe", KPROBE_EVENT_SYSTEM);
1786 if (WARN_ON_ONCE(tk == NULL)) {
41a7dd42 1787 pr_warn("error on getting test probe.\n");
02ca1521 1788 warn++;
41a7dd42 1789 } else {
d4d7ccc8
MN
1790 if (trace_kprobe_nhit(tk) != 1) {
1791 pr_warn("incorrect number of testprobe hits\n");
1792 warn++;
1793 }
1794
c31ffb3f 1795 file = find_trace_probe_file(tk, top_trace_array());
41a7dd42
MH
1796 if (WARN_ON_ONCE(file == NULL)) {
1797 pr_warn("error on getting probe file.\n");
1798 warn++;
1799 } else
60d53e2c
MH
1800 disable_trace_kprobe(
1801 trace_probe_event_call(&tk->tp), file);
41a7dd42 1802 }
02ca1521 1803
c31ffb3f
NK
1804 tk = find_trace_kprobe("testprobe2", KPROBE_EVENT_SYSTEM);
1805 if (WARN_ON_ONCE(tk == NULL)) {
41a7dd42 1806 pr_warn("error on getting 2nd test probe.\n");
02ca1521 1807 warn++;
41a7dd42 1808 } else {
d4d7ccc8
MN
1809 if (trace_kprobe_nhit(tk) != 1) {
1810 pr_warn("incorrect number of testprobe2 hits\n");
1811 warn++;
1812 }
1813
c31ffb3f 1814 file = find_trace_probe_file(tk, top_trace_array());
41a7dd42
MH
1815 if (WARN_ON_ONCE(file == NULL)) {
1816 pr_warn("error on getting probe file.\n");
1817 warn++;
1818 } else
60d53e2c
MH
1819 disable_trace_kprobe(
1820 trace_probe_event_call(&tk->tp), file);
41a7dd42 1821 }
02ca1521 1822
6212dd29 1823 ret = trace_run_command("-:testprobe", create_or_delete_trace_kprobe);
231e36f4 1824 if (WARN_ON_ONCE(ret)) {
41a7dd42 1825 pr_warn("error on deleting a probe.\n");
231e36f4
MH
1826 warn++;
1827 }
1828
6212dd29 1829 ret = trace_run_command("-:testprobe2", create_or_delete_trace_kprobe);
231e36f4 1830 if (WARN_ON_ONCE(ret)) {
41a7dd42 1831 pr_warn("error on deleting a probe.\n");
231e36f4
MH
1832 warn++;
1833 }
413d37d1 1834
231e36f4 1835end:
6212dd29
MH
1836 ret = dyn_events_release_all(&trace_kprobe_ops);
1837 if (WARN_ON_ONCE(ret)) {
1838 pr_warn("error on cleaning up probes.\n");
1839 warn++;
1840 }
30e7d894
TG
1841 /*
1842 * Wait for the optimizer work to finish. Otherwise it might fiddle
1843 * with probes in already freed __init text.
1844 */
1845 wait_for_kprobe_optimizer();
231e36f4
MH
1846 if (warn)
1847 pr_cont("NG: Some tests are failed. Please check them.\n");
1848 else
1849 pr_cont("OK\n");
413d37d1
MH
1850 return 0;
1851}
1852
1853late_initcall(kprobe_trace_self_tests_init);
1854
1855#endif