tracing/probe: Add trace_event_call register API for trace_probe
[linux-2.6-block.git] / kernel / trace / trace_kprobe.c
CommitLineData
bcea3f96 1// SPDX-License-Identifier: GPL-2.0
413d37d1 2/*
77b44d1b 3 * Kprobes-based tracing events
413d37d1
MH
4 *
5 * Created by Masami Hiramatsu <mhiramat@redhat.com>
6 *
413d37d1 7 */
72576341 8#define pr_fmt(fmt) "trace_kprobe: " fmt
413d37d1
MH
9
10#include <linux/module.h>
11#include <linux/uaccess.h>
b2d09103 12#include <linux/rculist.h>
540adea3 13#include <linux/error-injection.h>
413d37d1 14
970988e1
MH
15#include <asm/setup.h> /* for COMMAND_LINE_SIZE */
16
6212dd29 17#include "trace_dynevent.h"
d899926f 18#include "trace_kprobe_selftest.h"
8ab83f56 19#include "trace_probe.h"
53305928 20#include "trace_probe_tmpl.h"
1ff511e3 21
8ab83f56 22#define KPROBE_EVENT_SYSTEM "kprobes"
696ced4f 23#define KRETPROBE_MAXACTIVE_MAX 4096
970988e1
MH
24#define MAX_KPROBE_CMDLINE_SIZE 1024
25
26/* Kprobe early definition from command line */
27static char kprobe_boot_events_buf[COMMAND_LINE_SIZE] __initdata;
b6399cc7 28static bool kprobe_boot_events_enabled __initdata;
970988e1
MH
29
30static int __init set_kprobe_boot_events(char *str)
31{
32 strlcpy(kprobe_boot_events_buf, str, COMMAND_LINE_SIZE);
33 return 0;
34}
35__setup("kprobe_event=", set_kprobe_boot_events);
e09c8614 36
6212dd29
MH
37static int trace_kprobe_create(int argc, const char **argv);
38static int trace_kprobe_show(struct seq_file *m, struct dyn_event *ev);
39static int trace_kprobe_release(struct dyn_event *ev);
40static bool trace_kprobe_is_busy(struct dyn_event *ev);
41static bool trace_kprobe_match(const char *system, const char *event,
42 struct dyn_event *ev);
43
44static struct dyn_event_operations trace_kprobe_ops = {
45 .create = trace_kprobe_create,
46 .show = trace_kprobe_show,
47 .is_busy = trace_kprobe_is_busy,
48 .free = trace_kprobe_release,
49 .match = trace_kprobe_match,
50};
51
cede666e 52/*
77b44d1b 53 * Kprobe event core functions
413d37d1 54 */
c31ffb3f 55struct trace_kprobe {
6212dd29 56 struct dyn_event devent;
4a846b44 57 struct kretprobe rp; /* Use rp.kp for kprobe use */
a7636d9e 58 unsigned long __percpu *nhit;
413d37d1 59 const char *symbol; /* symbol name */
c31ffb3f 60 struct trace_probe tp;
413d37d1
MH
61};
62
6212dd29
MH
63static bool is_trace_kprobe(struct dyn_event *ev)
64{
65 return ev->ops == &trace_kprobe_ops;
66}
67
68static struct trace_kprobe *to_trace_kprobe(struct dyn_event *ev)
69{
70 return container_of(ev, struct trace_kprobe, devent);
71}
72
73/**
74 * for_each_trace_kprobe - iterate over the trace_kprobe list
75 * @pos: the struct trace_kprobe * for each entry
76 * @dpos: the struct dyn_event * to use as a loop cursor
77 */
78#define for_each_trace_kprobe(pos, dpos) \
79 for_each_dyn_event(dpos) \
80 if (is_trace_kprobe(dpos) && (pos = to_trace_kprobe(dpos)))
81
c31ffb3f
NK
82#define SIZEOF_TRACE_KPROBE(n) \
83 (offsetof(struct trace_kprobe, tp.args) + \
eca0d916 84 (sizeof(struct probe_arg) * (n)))
a82378d8 85
3da0f180 86static nokprobe_inline bool trace_kprobe_is_return(struct trace_kprobe *tk)
413d37d1 87{
c31ffb3f 88 return tk->rp.handler != NULL;
413d37d1
MH
89}
90
3da0f180 91static nokprobe_inline const char *trace_kprobe_symbol(struct trace_kprobe *tk)
413d37d1 92{
c31ffb3f 93 return tk->symbol ? tk->symbol : "unknown";
413d37d1
MH
94}
95
3da0f180 96static nokprobe_inline unsigned long trace_kprobe_offset(struct trace_kprobe *tk)
61424318 97{
c31ffb3f 98 return tk->rp.kp.offset;
61424318
MH
99}
100
3da0f180 101static nokprobe_inline bool trace_kprobe_has_gone(struct trace_kprobe *tk)
61424318 102{
c31ffb3f 103 return !!(kprobe_gone(&tk->rp.kp));
61424318
MH
104}
105
3da0f180 106static nokprobe_inline bool trace_kprobe_within_module(struct trace_kprobe *tk,
c31ffb3f 107 struct module *mod)
61424318
MH
108{
109 int len = strlen(mod->name);
c31ffb3f 110 const char *name = trace_kprobe_symbol(tk);
61424318
MH
111 return strncmp(mod->name, name, len) == 0 && name[len] == ':';
112}
113
59158ec4 114static nokprobe_inline bool trace_kprobe_module_exist(struct trace_kprobe *tk)
61424318 115{
59158ec4
MH
116 char *p;
117 bool ret;
118
119 if (!tk->symbol)
120 return false;
121 p = strchr(tk->symbol, ':');
122 if (!p)
123 return true;
124 *p = '\0';
125 mutex_lock(&module_mutex);
126 ret = !!find_module(tk->symbol);
127 mutex_unlock(&module_mutex);
128 *p = ':';
129
130 return ret;
61424318
MH
131}
132
6212dd29
MH
133static bool trace_kprobe_is_busy(struct dyn_event *ev)
134{
135 struct trace_kprobe *tk = to_trace_kprobe(ev);
136
137 return trace_probe_is_enabled(&tk->tp);
138}
139
140static bool trace_kprobe_match(const char *system, const char *event,
141 struct dyn_event *ev)
142{
143 struct trace_kprobe *tk = to_trace_kprobe(ev);
144
145 return strcmp(trace_event_name(&tk->tp.call), event) == 0 &&
146 (!system || strcmp(tk->tp.call.class->system, system) == 0);
147}
148
f18f97ac
MN
149static nokprobe_inline unsigned long trace_kprobe_nhit(struct trace_kprobe *tk)
150{
151 unsigned long nhit = 0;
152 int cpu;
153
154 for_each_possible_cpu(cpu)
155 nhit += *per_cpu_ptr(tk->nhit, cpu);
156
157 return nhit;
158}
159
6bc6c77c 160/* Return 0 if it fails to find the symbol address */
45408c4f
MH
161static nokprobe_inline
162unsigned long trace_kprobe_address(struct trace_kprobe *tk)
163{
164 unsigned long addr;
165
166 if (tk->symbol) {
167 addr = (unsigned long)
168 kallsyms_lookup_name(trace_kprobe_symbol(tk));
6bc6c77c
MH
169 if (addr)
170 addr += tk->rp.kp.offset;
45408c4f
MH
171 } else {
172 addr = (unsigned long)tk->rp.kp.addr;
173 }
174 return addr;
175}
176
b4da3340 177bool trace_kprobe_on_func_entry(struct trace_event_call *call)
9802d865
JB
178{
179 struct trace_kprobe *tk = (struct trace_kprobe *)call->data;
b4da3340
MH
180
181 return kprobe_on_func_entry(tk->rp.kp.addr,
182 tk->rp.kp.addr ? NULL : tk->rp.kp.symbol_name,
183 tk->rp.kp.addr ? 0 : tk->rp.kp.offset);
9802d865
JB
184}
185
b4da3340 186bool trace_kprobe_error_injectable(struct trace_event_call *call)
9802d865
JB
187{
188 struct trace_kprobe *tk = (struct trace_kprobe *)call->data;
9802d865 189
45408c4f 190 return within_error_injection_list(trace_kprobe_address(tk));
9802d865
JB
191}
192
c31ffb3f
NK
193static int register_kprobe_event(struct trace_kprobe *tk);
194static int unregister_kprobe_event(struct trace_kprobe *tk);
413d37d1 195
50d78056
MH
196static int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs);
197static int kretprobe_dispatcher(struct kretprobe_instance *ri,
198 struct pt_regs *regs);
199
455b2899
MH
200static void free_trace_kprobe(struct trace_kprobe *tk)
201{
202 if (tk) {
203 trace_probe_cleanup(&tk->tp);
204 kfree(tk->symbol);
205 free_percpu(tk->nhit);
206 kfree(tk);
207 }
208}
209
4a846b44
MH
210/*
211 * Allocate new trace_probe and initialize it (including kprobes).
212 */
c31ffb3f 213static struct trace_kprobe *alloc_trace_kprobe(const char *group,
f52487e9 214 const char *event,
4a846b44
MH
215 void *addr,
216 const char *symbol,
217 unsigned long offs,
696ced4f 218 int maxactive,
3a6b7666 219 int nargs, bool is_return)
413d37d1 220{
c31ffb3f 221 struct trace_kprobe *tk;
6f3cf440 222 int ret = -ENOMEM;
413d37d1 223
c31ffb3f
NK
224 tk = kzalloc(SIZEOF_TRACE_KPROBE(nargs), GFP_KERNEL);
225 if (!tk)
6f3cf440 226 return ERR_PTR(ret);
413d37d1 227
a7636d9e
MKL
228 tk->nhit = alloc_percpu(unsigned long);
229 if (!tk->nhit)
230 goto error;
231
413d37d1 232 if (symbol) {
c31ffb3f
NK
233 tk->symbol = kstrdup(symbol, GFP_KERNEL);
234 if (!tk->symbol)
413d37d1 235 goto error;
c31ffb3f
NK
236 tk->rp.kp.symbol_name = tk->symbol;
237 tk->rp.kp.offset = offs;
4a846b44 238 } else
c31ffb3f 239 tk->rp.kp.addr = addr;
4a846b44
MH
240
241 if (is_return)
c31ffb3f 242 tk->rp.handler = kretprobe_dispatcher;
4a846b44 243 else
c31ffb3f 244 tk->rp.kp.pre_handler = kprobe_dispatcher;
4a846b44 245
696ced4f
AC
246 tk->rp.maxactive = maxactive;
247
455b2899
MH
248 ret = trace_probe_init(&tk->tp, event, group);
249 if (ret < 0)
f52487e9
MH
250 goto error;
251
6212dd29 252 dyn_event_init(&tk->devent, &trace_kprobe_ops);
c31ffb3f 253 return tk;
413d37d1 254error:
455b2899 255 free_trace_kprobe(tk);
6f3cf440 256 return ERR_PTR(ret);
413d37d1
MH
257}
258
c31ffb3f
NK
259static struct trace_kprobe *find_trace_kprobe(const char *event,
260 const char *group)
413d37d1 261{
6212dd29 262 struct dyn_event *pos;
c31ffb3f 263 struct trace_kprobe *tk;
413d37d1 264
6212dd29 265 for_each_trace_kprobe(tk, pos)
687fcc4a 266 if (strcmp(trace_event_name(&tk->tp.call), event) == 0 &&
c31ffb3f
NK
267 strcmp(tk->tp.call.class->system, group) == 0)
268 return tk;
413d37d1
MH
269 return NULL;
270}
271
87107a25
SRV
272static inline int __enable_trace_kprobe(struct trace_kprobe *tk)
273{
274 int ret = 0;
275
276 if (trace_probe_is_registered(&tk->tp) && !trace_kprobe_has_gone(tk)) {
277 if (trace_kprobe_is_return(tk))
278 ret = enable_kretprobe(&tk->rp);
279 else
280 ret = enable_kprobe(&tk->rp.kp);
281 }
282
283 return ret;
284}
285
41a7dd42
MH
286/*
287 * Enable trace_probe
288 * if the file is NULL, enable "perf" handler, or enable "trace" handler.
289 */
290static int
7f1d2f82 291enable_trace_kprobe(struct trace_kprobe *tk, struct trace_event_file *file)
1538f888 292{
87107a25 293 struct event_file_link *link;
1538f888
MH
294 int ret = 0;
295
41a7dd42 296 if (file) {
b04d52e3
ON
297 link = kmalloc(sizeof(*link), GFP_KERNEL);
298 if (!link) {
41a7dd42 299 ret = -ENOMEM;
3fe3d619 300 goto out;
41a7dd42 301 }
41a7dd42 302
b04d52e3 303 link->file = file;
c31ffb3f 304 list_add_tail_rcu(&link->list, &tk->tp.files);
41a7dd42 305
c31ffb3f 306 tk->tp.flags |= TP_FLAG_TRACE;
87107a25
SRV
307 ret = __enable_trace_kprobe(tk);
308 if (ret) {
309 list_del_rcu(&link->list);
57ea2a34
AS
310 kfree(link);
311 tk->tp.flags &= ~TP_FLAG_TRACE;
57ea2a34 312 }
87107a25
SRV
313
314 } else {
315 tk->tp.flags |= TP_FLAG_PROFILE;
316 ret = __enable_trace_kprobe(tk);
317 if (ret)
318 tk->tp.flags &= ~TP_FLAG_PROFILE;
57ea2a34 319 }
3fe3d619 320 out:
1538f888
MH
321 return ret;
322}
323
41a7dd42
MH
324/*
325 * Disable trace_probe
326 * if the file is NULL, disable "perf" handler, or disable "trace" handler.
327 */
328static int
7f1d2f82 329disable_trace_kprobe(struct trace_kprobe *tk, struct trace_event_file *file)
1538f888 330{
a232e270
MH
331 struct event_file_link *link = NULL;
332 int wait = 0;
41a7dd42
MH
333 int ret = 0;
334
41a7dd42 335 if (file) {
c31ffb3f 336 link = find_event_file_link(&tk->tp, file);
b04d52e3 337 if (!link) {
41a7dd42 338 ret = -EINVAL;
3fe3d619 339 goto out;
41a7dd42
MH
340 }
341
b04d52e3 342 list_del_rcu(&link->list);
a232e270 343 wait = 1;
c31ffb3f 344 if (!list_empty(&tk->tp.files))
b04d52e3 345 goto out;
41a7dd42 346
c31ffb3f 347 tk->tp.flags &= ~TP_FLAG_TRACE;
41a7dd42 348 } else
c31ffb3f 349 tk->tp.flags &= ~TP_FLAG_PROFILE;
41a7dd42 350
c31ffb3f
NK
351 if (!trace_probe_is_enabled(&tk->tp) && trace_probe_is_registered(&tk->tp)) {
352 if (trace_kprobe_is_return(tk))
353 disable_kretprobe(&tk->rp);
1538f888 354 else
c31ffb3f 355 disable_kprobe(&tk->rp.kp);
a232e270 356 wait = 1;
1538f888 357 }
e12f03d7
SL
358
359 /*
360 * if tk is not added to any list, it must be a local trace_kprobe
361 * created with perf_event_open. We don't need to wait for these
362 * trace_kprobes
363 */
6212dd29 364 if (list_empty(&tk->devent.list))
e12f03d7 365 wait = 0;
3fe3d619 366 out:
a232e270
MH
367 if (wait) {
368 /*
369 * Synchronize with kprobe_trace_func/kretprobe_trace_func
370 * to ensure disabled (all running handlers are finished).
371 * This is not only for kfree(), but also the caller,
372 * trace_remove_event_call() supposes it for releasing
373 * event_call related objects, which will be accessed in
374 * the kprobe_trace_func/kretprobe_trace_func.
375 */
74401729 376 synchronize_rcu();
a232e270
MH
377 kfree(link); /* Ignored if link == NULL */
378 }
379
41a7dd42 380 return ret;
1538f888
MH
381}
382
45408c4f
MH
383#if defined(CONFIG_KPROBES_ON_FTRACE) && \
384 !defined(CONFIG_KPROBE_EVENTS_ON_NOTRACE)
385static bool within_notrace_func(struct trace_kprobe *tk)
386{
387 unsigned long offset, size, addr;
388
389 addr = trace_kprobe_address(tk);
6bc6c77c
MH
390 if (!addr || !kallsyms_lookup_size_offset(addr, &size, &offset))
391 return false;
45408c4f 392
9161a864
MH
393 /* Get the entry address of the target function */
394 addr -= offset;
395
396 /*
397 * Since ftrace_location_range() does inclusive range check, we need
398 * to subtract 1 byte from the end address.
399 */
400 return !ftrace_location_range(addr, addr + size - 1);
45408c4f
MH
401}
402#else
403#define within_notrace_func(tk) (false)
404#endif
405
61424318 406/* Internal register function - just handle k*probes and flags */
c31ffb3f 407static int __register_trace_kprobe(struct trace_kprobe *tk)
413d37d1 408{
a6682814 409 int i, ret;
61424318 410
c31ffb3f 411 if (trace_probe_is_registered(&tk->tp))
61424318
MH
412 return -EINVAL;
413
45408c4f
MH
414 if (within_notrace_func(tk)) {
415 pr_warn("Could not probe notrace function %s\n",
416 trace_kprobe_symbol(tk));
417 return -EINVAL;
418 }
419
a6682814
MH
420 for (i = 0; i < tk->tp.nr_args; i++) {
421 ret = traceprobe_update_arg(&tk->tp.args[i]);
422 if (ret)
423 return ret;
424 }
425
61424318 426 /* Set/clear disabled flag according to tp->flag */
c31ffb3f
NK
427 if (trace_probe_is_enabled(&tk->tp))
428 tk->rp.kp.flags &= ~KPROBE_FLAG_DISABLED;
61424318 429 else
c31ffb3f 430 tk->rp.kp.flags |= KPROBE_FLAG_DISABLED;
61424318 431
c31ffb3f
NK
432 if (trace_kprobe_is_return(tk))
433 ret = register_kretprobe(&tk->rp);
413d37d1 434 else
c31ffb3f 435 ret = register_kprobe(&tk->rp.kp);
61424318 436
ab105a4f 437 if (ret == 0)
c31ffb3f 438 tk->tp.flags |= TP_FLAG_REGISTERED;
61424318
MH
439 return ret;
440}
441
442/* Internal unregister function - just handle k*probes and flags */
c31ffb3f 443static void __unregister_trace_kprobe(struct trace_kprobe *tk)
61424318 444{
c31ffb3f
NK
445 if (trace_probe_is_registered(&tk->tp)) {
446 if (trace_kprobe_is_return(tk))
447 unregister_kretprobe(&tk->rp);
61424318 448 else
c31ffb3f
NK
449 unregister_kprobe(&tk->rp.kp);
450 tk->tp.flags &= ~TP_FLAG_REGISTERED;
61424318 451 /* Cleanup kprobe for reuse */
c31ffb3f
NK
452 if (tk->rp.kp.symbol_name)
453 tk->rp.kp.addr = NULL;
61424318
MH
454 }
455}
456
6212dd29 457/* Unregister a trace_probe and probe_event */
c31ffb3f 458static int unregister_trace_kprobe(struct trace_kprobe *tk)
61424318 459{
02ca1521 460 /* Enabled event can not be unregistered */
c31ffb3f 461 if (trace_probe_is_enabled(&tk->tp))
02ca1521
MH
462 return -EBUSY;
463
40c32592 464 /* Will fail if probe is being used by ftrace or perf */
c31ffb3f 465 if (unregister_kprobe_event(tk))
40c32592
SRRH
466 return -EBUSY;
467
c31ffb3f 468 __unregister_trace_kprobe(tk);
6212dd29 469 dyn_event_remove(&tk->devent);
02ca1521
MH
470
471 return 0;
413d37d1
MH
472}
473
474/* Register a trace_probe and probe_event */
c31ffb3f 475static int register_trace_kprobe(struct trace_kprobe *tk)
413d37d1 476{
c31ffb3f 477 struct trace_kprobe *old_tk;
413d37d1
MH
478 int ret;
479
6212dd29 480 mutex_lock(&event_mutex);
413d37d1 481
61424318 482 /* Delete old (same name) event if exist */
687fcc4a 483 old_tk = find_trace_kprobe(trace_event_name(&tk->tp.call),
de7b2973 484 tk->tp.call.class->system);
c31ffb3f
NK
485 if (old_tk) {
486 ret = unregister_trace_kprobe(old_tk);
02ca1521
MH
487 if (ret < 0)
488 goto end;
c31ffb3f 489 free_trace_kprobe(old_tk);
2d5e067e 490 }
61424318
MH
491
492 /* Register new event */
c31ffb3f 493 ret = register_kprobe_event(tk);
2d5e067e 494 if (ret) {
a395d6a7 495 pr_warn("Failed to register probe event(%d)\n", ret);
2d5e067e
MH
496 goto end;
497 }
498
61424318 499 /* Register k*probe */
c31ffb3f 500 ret = __register_trace_kprobe(tk);
59158ec4
MH
501 if (ret == -ENOENT && !trace_kprobe_module_exist(tk)) {
502 pr_warn("This probe might be able to register after target module is loaded. Continue.\n");
503 ret = 0;
504 }
505
61424318 506 if (ret < 0)
c31ffb3f 507 unregister_kprobe_event(tk);
61424318 508 else
6212dd29 509 dyn_event_add(&tk->devent);
61424318 510
413d37d1 511end:
6212dd29 512 mutex_unlock(&event_mutex);
413d37d1
MH
513 return ret;
514}
515
61424318 516/* Module notifier call back, checking event on the module */
c31ffb3f 517static int trace_kprobe_module_callback(struct notifier_block *nb,
61424318
MH
518 unsigned long val, void *data)
519{
520 struct module *mod = data;
6212dd29 521 struct dyn_event *pos;
c31ffb3f 522 struct trace_kprobe *tk;
61424318
MH
523 int ret;
524
525 if (val != MODULE_STATE_COMING)
526 return NOTIFY_DONE;
527
528 /* Update probes on coming module */
6212dd29
MH
529 mutex_lock(&event_mutex);
530 for_each_trace_kprobe(tk, pos) {
c31ffb3f 531 if (trace_kprobe_within_module(tk, mod)) {
02ca1521 532 /* Don't need to check busy - this should have gone. */
c31ffb3f
NK
533 __unregister_trace_kprobe(tk);
534 ret = __register_trace_kprobe(tk);
61424318 535 if (ret)
a395d6a7
JP
536 pr_warn("Failed to re-register probe %s on %s: %d\n",
537 trace_event_name(&tk->tp.call),
538 mod->name, ret);
61424318
MH
539 }
540 }
6212dd29 541 mutex_unlock(&event_mutex);
61424318
MH
542
543 return NOTIFY_DONE;
544}
545
c31ffb3f
NK
546static struct notifier_block trace_kprobe_module_nb = {
547 .notifier_call = trace_kprobe_module_callback,
61424318
MH
548 .priority = 1 /* Invoked after kprobe module callback */
549};
550
fca18a47
NR
551/* Convert certain expected symbols into '_' when generating event names */
552static inline void sanitize_event_name(char *name)
553{
554 while (*name++ != '\0')
555 if (*name == ':' || *name == '.')
556 *name = '_';
557}
558
6212dd29 559static int trace_kprobe_create(int argc, const char *argv[])
413d37d1
MH
560{
561 /*
562 * Argument syntax:
696ced4f
AC
563 * - Add kprobe:
564 * p[:[GRP/]EVENT] [MOD:]KSYM[+OFFS]|KADDR [FETCHARGS]
565 * - Add kretprobe:
566 * r[MAXACTIVE][:[GRP/]EVENT] [MOD:]KSYM[+0] [FETCHARGS]
413d37d1 567 * Fetch args:
2e06ff63
MH
568 * $retval : fetch return value
569 * $stack : fetch stack address
570 * $stackN : fetch Nth of stack (N:0-)
35abb67d 571 * $comm : fetch current task comm
413d37d1
MH
572 * @ADDR : fetch memory at ADDR (ADDR should be in kernel)
573 * @SYM[+|-offs] : fetch memory at SYM +|- offs (SYM is a data symbol)
574 * %REG : fetch register REG
93ccae7a 575 * Dereferencing memory fetch:
413d37d1 576 * +|-offs(ARG) : fetch memory at ARG +|- offs address.
eca0d916
MH
577 * Alias name of args:
578 * NAME=FETCHARG : set NAME as alias of FETCHARG.
93ccae7a
MH
579 * Type of args:
580 * FETCHARG:TYPE : use TYPE instead of unsigned long.
413d37d1 581 */
ab105a4f 582 struct trace_kprobe *tk = NULL;
6212dd29
MH
583 int i, len, ret = 0;
584 bool is_return = false;
585 char *symbol = NULL, *tmp = NULL;
586 const char *event = NULL, *group = KPROBE_EVENT_SYSTEM;
696ced4f 587 int maxactive = 0;
c5d343b6 588 long offset = 0;
413d37d1 589 void *addr = NULL;
4a846b44 590 char buf[MAX_EVENT_NAME_LEN];
a1303af5 591 unsigned int flags = TPARG_FL_KERNEL;
413d37d1 592
8b05a3a7
AR
593 switch (argv[0][0]) {
594 case 'r':
3a6b7666 595 is_return = true;
a1303af5 596 flags |= TPARG_FL_RETURN;
8b05a3a7
AR
597 break;
598 case 'p':
599 break;
600 default:
601 return -ECANCELED;
602 }
603 if (argc < 2)
6212dd29 604 return -ECANCELED;
413d37d1 605
ab105a4f
MH
606 trace_probe_log_init("trace_kprobe", argc, argv);
607
696ced4f 608 event = strchr(&argv[0][1], ':');
6212dd29 609 if (event)
696ced4f 610 event++;
6212dd29 611
287c038c
MH
612 if (isdigit(argv[0][1])) {
613 if (!is_return) {
ab105a4f
MH
614 trace_probe_log_err(1, MAXACT_NO_KPROBE);
615 goto parse_error;
287c038c 616 }
6212dd29
MH
617 if (event)
618 len = event - &argv[0][1] - 1;
619 else
620 len = strlen(&argv[0][1]);
ab105a4f
MH
621 if (len > MAX_EVENT_NAME_LEN - 1) {
622 trace_probe_log_err(1, BAD_MAXACT);
623 goto parse_error;
624 }
6212dd29
MH
625 memcpy(buf, &argv[0][1], len);
626 buf[len] = '\0';
627 ret = kstrtouint(buf, 0, &maxactive);
287c038c 628 if (ret || !maxactive) {
ab105a4f
MH
629 trace_probe_log_err(1, BAD_MAXACT);
630 goto parse_error;
696ced4f
AC
631 }
632 /* kretprobes instances are iterated over via a list. The
633 * maximum should stay reasonable.
634 */
635 if (maxactive > KRETPROBE_MAXACTIVE_MAX) {
ab105a4f
MH
636 trace_probe_log_err(1, MAXACT_TOO_BIG);
637 goto parse_error;
696ced4f
AC
638 }
639 }
640
9e52b325
SD
641 /* try to parse an address. if that fails, try to read the
642 * input as a symbol. */
643 if (kstrtoul(argv[1], 0, (unsigned long *)&addr)) {
ab105a4f 644 trace_probe_log_set_index(1);
6212dd29 645 /* Check whether uprobe event specified */
ab105a4f
MH
646 if (strchr(argv[1], '/') && strchr(argv[1], ':')) {
647 ret = -ECANCELED;
648 goto error;
649 }
413d37d1 650 /* a symbol specified */
6212dd29
MH
651 symbol = kstrdup(argv[1], GFP_KERNEL);
652 if (!symbol)
653 return -ENOMEM;
413d37d1 654 /* TODO: support .init module functions */
8ab83f56 655 ret = traceprobe_split_symbol_offset(symbol, &offset);
c5d343b6 656 if (ret || offset < 0 || offset > UINT_MAX) {
ab105a4f
MH
657 trace_probe_log_err(0, BAD_PROBE_ADDR);
658 goto parse_error;
e63cc239 659 }
a1303af5
MH
660 if (kprobe_on_func_entry(NULL, symbol, offset))
661 flags |= TPARG_FL_FENTRY;
662 if (offset && is_return && !(flags & TPARG_FL_FENTRY)) {
ab105a4f
MH
663 trace_probe_log_err(0, BAD_RETPROBE);
664 goto parse_error;
e63cc239 665 }
413d37d1
MH
666 }
667
ab105a4f 668 trace_probe_log_set_index(0);
6212dd29 669 if (event) {
ab105a4f
MH
670 ret = traceprobe_parse_event_name(&event, &group, buf,
671 event - argv[0]);
6212dd29 672 if (ret)
ab105a4f 673 goto parse_error;
6212dd29 674 } else {
4263565d 675 /* Make a new event name */
4263565d 676 if (symbol)
6f3cf440 677 snprintf(buf, MAX_EVENT_NAME_LEN, "%c_%s_%ld",
4263565d
MH
678 is_return ? 'r' : 'p', symbol, offset);
679 else
6f3cf440 680 snprintf(buf, MAX_EVENT_NAME_LEN, "%c_0x%p",
4263565d 681 is_return ? 'r' : 'p', addr);
fca18a47 682 sanitize_event_name(buf);
4a846b44
MH
683 event = buf;
684 }
6212dd29
MH
685
686 /* setup a probe */
696ced4f 687 tk = alloc_trace_kprobe(group, event, addr, symbol, offset, maxactive,
ab105a4f 688 argc - 2, is_return);
c31ffb3f 689 if (IS_ERR(tk)) {
6212dd29 690 ret = PTR_ERR(tk);
ab105a4f 691 /* This must return -ENOMEM, else there is a bug */
a039480e 692 WARN_ON_ONCE(ret != -ENOMEM);
ab105a4f 693 goto out; /* We know tk is not allocated */
e63cc239 694 }
ab105a4f 695 argc -= 2; argv += 2;
413d37d1 696
413d37d1 697 /* parse arguments */
a82378d8 698 for (i = 0; i < argc && i < MAX_TRACE_ARGS; i++) {
6212dd29
MH
699 tmp = kstrdup(argv[i], GFP_KERNEL);
700 if (!tmp) {
ba8665d7 701 ret = -ENOMEM;
413d37d1
MH
702 goto error;
703 }
da34634f 704
ab105a4f 705 trace_probe_log_set_index(i + 2);
6212dd29
MH
706 ret = traceprobe_parse_probe_arg(&tk->tp, i, tmp, flags);
707 kfree(tmp);
d00bbea9 708 if (ret)
ab105a4f 709 goto error; /* This can be -ENOMEM */
413d37d1 710 }
413d37d1 711
f730e0f2
MH
712 ret = traceprobe_set_print_fmt(&tk->tp, is_return);
713 if (ret < 0)
714 goto error;
715
c31ffb3f 716 ret = register_trace_kprobe(tk);
ab105a4f
MH
717 if (ret) {
718 trace_probe_log_set_index(1);
719 if (ret == -EILSEQ)
720 trace_probe_log_err(0, BAD_INSN_BNDRY);
721 else if (ret == -ENOENT)
722 trace_probe_log_err(0, BAD_PROBE_ADDR);
723 else if (ret != -ENOMEM)
724 trace_probe_log_err(0, FAIL_REG_PROBE);
413d37d1 725 goto error;
ab105a4f
MH
726 }
727
6212dd29 728out:
ab105a4f 729 trace_probe_log_clear();
6212dd29
MH
730 kfree(symbol);
731 return ret;
413d37d1 732
ab105a4f
MH
733parse_error:
734 ret = -EINVAL;
413d37d1 735error:
c31ffb3f 736 free_trace_kprobe(tk);
6212dd29 737 goto out;
413d37d1
MH
738}
739
6212dd29 740static int create_or_delete_trace_kprobe(int argc, char **argv)
413d37d1 741{
6212dd29 742 int ret;
02ca1521 743
6212dd29
MH
744 if (argv[0][0] == '-')
745 return dyn_event_release(argc, argv, &trace_kprobe_ops);
413d37d1 746
6212dd29
MH
747 ret = trace_kprobe_create(argc, (const char **)argv);
748 return ret == -ECANCELED ? -EINVAL : ret;
413d37d1
MH
749}
750
6212dd29 751static int trace_kprobe_release(struct dyn_event *ev)
413d37d1 752{
6212dd29
MH
753 struct trace_kprobe *tk = to_trace_kprobe(ev);
754 int ret = unregister_trace_kprobe(tk);
413d37d1 755
6212dd29
MH
756 if (!ret)
757 free_trace_kprobe(tk);
758 return ret;
413d37d1
MH
759}
760
6212dd29 761static int trace_kprobe_show(struct seq_file *m, struct dyn_event *ev)
413d37d1 762{
6212dd29 763 struct trace_kprobe *tk = to_trace_kprobe(ev);
93ccae7a 764 int i;
413d37d1 765
fa6f0cc7 766 seq_putc(m, trace_kprobe_is_return(tk) ? 'r' : 'p');
de7b2973 767 seq_printf(m, ":%s/%s", tk->tp.call.class->system,
687fcc4a 768 trace_event_name(&tk->tp.call));
413d37d1 769
c31ffb3f
NK
770 if (!tk->symbol)
771 seq_printf(m, " 0x%p", tk->rp.kp.addr);
772 else if (tk->rp.kp.offset)
773 seq_printf(m, " %s+%u", trace_kprobe_symbol(tk),
774 tk->rp.kp.offset);
413d37d1 775 else
c31ffb3f 776 seq_printf(m, " %s", trace_kprobe_symbol(tk));
413d37d1 777
c31ffb3f
NK
778 for (i = 0; i < tk->tp.nr_args; i++)
779 seq_printf(m, " %s=%s", tk->tp.args[i].name, tk->tp.args[i].comm);
fa6f0cc7 780 seq_putc(m, '\n');
93ccae7a 781
413d37d1
MH
782 return 0;
783}
784
6212dd29
MH
785static int probes_seq_show(struct seq_file *m, void *v)
786{
787 struct dyn_event *ev = v;
788
789 if (!is_trace_kprobe(ev))
790 return 0;
791
792 return trace_kprobe_show(m, ev);
793}
794
413d37d1 795static const struct seq_operations probes_seq_op = {
6212dd29
MH
796 .start = dyn_event_seq_start,
797 .next = dyn_event_seq_next,
798 .stop = dyn_event_seq_stop,
413d37d1
MH
799 .show = probes_seq_show
800};
801
802static int probes_open(struct inode *inode, struct file *file)
803{
02ca1521
MH
804 int ret;
805
806 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
6212dd29 807 ret = dyn_events_release_all(&trace_kprobe_ops);
02ca1521
MH
808 if (ret < 0)
809 return ret;
810 }
413d37d1
MH
811
812 return seq_open(file, &probes_seq_op);
813}
814
413d37d1
MH
815static ssize_t probes_write(struct file *file, const char __user *buffer,
816 size_t count, loff_t *ppos)
817{
7e465baa 818 return trace_parse_run_command(file, buffer, count, ppos,
6212dd29 819 create_or_delete_trace_kprobe);
413d37d1
MH
820}
821
822static const struct file_operations kprobe_events_ops = {
823 .owner = THIS_MODULE,
824 .open = probes_open,
825 .read = seq_read,
826 .llseek = seq_lseek,
827 .release = seq_release,
828 .write = probes_write,
829};
830
cd7e7bd5
MH
831/* Probes profiling interfaces */
832static int probes_profile_seq_show(struct seq_file *m, void *v)
833{
6212dd29
MH
834 struct dyn_event *ev = v;
835 struct trace_kprobe *tk;
cd7e7bd5 836
6212dd29
MH
837 if (!is_trace_kprobe(ev))
838 return 0;
cd7e7bd5 839
6212dd29 840 tk = to_trace_kprobe(ev);
de7b2973 841 seq_printf(m, " %-44s %15lu %15lu\n",
f18f97ac
MN
842 trace_event_name(&tk->tp.call),
843 trace_kprobe_nhit(tk),
c31ffb3f 844 tk->rp.kp.nmissed);
cd7e7bd5
MH
845
846 return 0;
847}
848
849static const struct seq_operations profile_seq_op = {
6212dd29
MH
850 .start = dyn_event_seq_start,
851 .next = dyn_event_seq_next,
852 .stop = dyn_event_seq_stop,
cd7e7bd5
MH
853 .show = probes_profile_seq_show
854};
855
856static int profile_open(struct inode *inode, struct file *file)
857{
858 return seq_open(file, &profile_seq_op);
859}
860
861static const struct file_operations kprobe_profile_ops = {
862 .owner = THIS_MODULE,
863 .open = profile_open,
864 .read = seq_read,
865 .llseek = seq_lseek,
866 .release = seq_release,
867};
868
53305928
MH
869/* Kprobe specific fetch functions */
870
871/* Return the length of string -- including null terminal byte */
9178412d
MH
872static nokprobe_inline int
873fetch_store_strlen(unsigned long addr)
53305928 874{
53305928
MH
875 int ret, len = 0;
876 u8 c;
877
53305928 878 do {
49ef5f45 879 ret = probe_kernel_read(&c, (u8 *)addr + len, 1);
53305928
MH
880 len++;
881 } while (c && ret == 0 && len < MAX_STRING_SIZE);
882
9178412d 883 return (ret < 0) ? ret : len;
53305928
MH
884}
885
88903c46
MH
886/* Return the length of string -- including null terminal byte */
887static nokprobe_inline int
888fetch_store_strlen_user(unsigned long addr)
889{
890 const void __user *uaddr = (__force const void __user *)addr;
891
892 return strnlen_unsafe_user(uaddr, MAX_STRING_SIZE);
893}
894
53305928
MH
895/*
896 * Fetch a null-terminated string. Caller MUST set *(u32 *)buf with max
897 * length and relative data location.
898 */
9178412d
MH
899static nokprobe_inline int
900fetch_store_string(unsigned long addr, void *dest, void *base)
53305928 901{
9178412d 902 int maxlen = get_loc_len(*(u32 *)dest);
88903c46 903 void *__dest;
53305928
MH
904 long ret;
905
9178412d
MH
906 if (unlikely(!maxlen))
907 return -ENOMEM;
88903c46
MH
908
909 __dest = get_loc_data(dest, base);
910
53305928
MH
911 /*
912 * Try to get string again, since the string can be changed while
913 * probing.
914 */
88903c46
MH
915 ret = strncpy_from_unsafe(__dest, (void *)addr, maxlen);
916 if (ret >= 0)
917 *(u32 *)dest = make_data_loc(ret, __dest - base);
918
919 return ret;
920}
53305928 921
88903c46
MH
922/*
923 * Fetch a null-terminated string from user. Caller MUST set *(u32 *)buf
924 * with max length and relative data location.
925 */
926static nokprobe_inline int
927fetch_store_string_user(unsigned long addr, void *dest, void *base)
928{
929 const void __user *uaddr = (__force const void __user *)addr;
930 int maxlen = get_loc_len(*(u32 *)dest);
931 void *__dest;
932 long ret;
933
934 if (unlikely(!maxlen))
935 return -ENOMEM;
936
937 __dest = get_loc_data(dest, base);
938
939 ret = strncpy_from_unsafe_user(__dest, uaddr, maxlen);
9178412d 940 if (ret >= 0)
88903c46
MH
941 *(u32 *)dest = make_data_loc(ret, __dest - base);
942
9178412d 943 return ret;
53305928
MH
944}
945
9b960a38
MH
946static nokprobe_inline int
947probe_mem_read(void *dest, void *src, size_t size)
948{
949 return probe_kernel_read(dest, src, size);
950}
951
e65f7ae7
MH
952static nokprobe_inline int
953probe_mem_read_user(void *dest, void *src, size_t size)
954{
539b75b2
MH
955 const void __user *uaddr = (__force const void __user *)src;
956
957 return probe_user_read(dest, uaddr, size);
e65f7ae7
MH
958}
959
53305928
MH
960/* Note that we don't verify it, since the code does not come from user space */
961static int
962process_fetch_insn(struct fetch_insn *code, struct pt_regs *regs, void *dest,
9178412d 963 void *base)
53305928
MH
964{
965 unsigned long val;
53305928 966
a6682814 967retry:
53305928
MH
968 /* 1st stage: get value from context */
969 switch (code->op) {
970 case FETCH_OP_REG:
971 val = regs_get_register(regs, code->param);
972 break;
973 case FETCH_OP_STACK:
974 val = regs_get_kernel_stack_nth(regs, code->param);
975 break;
976 case FETCH_OP_STACKP:
977 val = kernel_stack_pointer(regs);
978 break;
979 case FETCH_OP_RETVAL:
980 val = regs_return_value(regs);
981 break;
982 case FETCH_OP_IMM:
983 val = code->immediate;
984 break;
985 case FETCH_OP_COMM:
986 val = (unsigned long)current->comm;
987 break;
a1303af5
MH
988#ifdef CONFIG_HAVE_FUNCTION_ARG_ACCESS_API
989 case FETCH_OP_ARG:
990 val = regs_get_kernel_argument(regs, code->param);
991 break;
992#endif
a6682814
MH
993 case FETCH_NOP_SYMBOL: /* Ignore a place holder */
994 code++;
995 goto retry;
53305928
MH
996 default:
997 return -EILSEQ;
998 }
999 code++;
1000
9b960a38 1001 return process_fetch_insn_bottom(code, val, dest, base);
53305928
MH
1002}
1003NOKPROBE_SYMBOL(process_fetch_insn)
1004
413d37d1 1005/* Kprobe handler */
3da0f180 1006static nokprobe_inline void
c31ffb3f 1007__kprobe_trace_func(struct trace_kprobe *tk, struct pt_regs *regs,
7f1d2f82 1008 struct trace_event_file *trace_file)
413d37d1 1009{
93ccae7a 1010 struct kprobe_trace_entry_head *entry;
413d37d1 1011 struct ring_buffer_event *event;
8f8ffe24 1012 struct ring_buffer *buffer;
e09c8614 1013 int size, dsize, pc;
413d37d1 1014 unsigned long irq_flags;
2425bcb9 1015 struct trace_event_call *call = &tk->tp.call;
413d37d1 1016
7f1d2f82 1017 WARN_ON(call != trace_file->event_call);
41a7dd42 1018
09a5059a 1019 if (trace_trigger_soft_disabled(trace_file))
13a1e4ae 1020 return;
b8820084 1021
413d37d1
MH
1022 local_save_flags(irq_flags);
1023 pc = preempt_count();
1024
c31ffb3f
NK
1025 dsize = __get_data_size(&tk->tp, regs);
1026 size = sizeof(*entry) + tk->tp.size + dsize;
413d37d1 1027
7f1d2f82 1028 event = trace_event_buffer_lock_reserve(&buffer, trace_file,
41a7dd42
MH
1029 call->event.type,
1030 size, irq_flags, pc);
413d37d1 1031 if (!event)
1e12a4a7 1032 return;
413d37d1
MH
1033
1034 entry = ring_buffer_event_data(event);
c31ffb3f 1035 entry->ip = (unsigned long)tk->rp.kp.addr;
9178412d 1036 store_trace_args(&entry[1], &tk->tp, regs, sizeof(*entry), dsize);
413d37d1 1037
7f1d2f82 1038 event_trigger_unlock_commit_regs(trace_file, buffer, event,
13a1e4ae 1039 entry, irq_flags, pc, regs);
413d37d1
MH
1040}
1041
3da0f180 1042static void
c31ffb3f 1043kprobe_trace_func(struct trace_kprobe *tk, struct pt_regs *regs)
41a7dd42 1044{
b04d52e3 1045 struct event_file_link *link;
41a7dd42 1046
c31ffb3f
NK
1047 list_for_each_entry_rcu(link, &tk->tp.files, list)
1048 __kprobe_trace_func(tk, regs, link->file);
41a7dd42 1049}
3da0f180 1050NOKPROBE_SYMBOL(kprobe_trace_func);
41a7dd42 1051
413d37d1 1052/* Kretprobe handler */
3da0f180 1053static nokprobe_inline void
c31ffb3f 1054__kretprobe_trace_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
41a7dd42 1055 struct pt_regs *regs,
7f1d2f82 1056 struct trace_event_file *trace_file)
413d37d1 1057{
93ccae7a 1058 struct kretprobe_trace_entry_head *entry;
413d37d1 1059 struct ring_buffer_event *event;
8f8ffe24 1060 struct ring_buffer *buffer;
e09c8614 1061 int size, pc, dsize;
413d37d1 1062 unsigned long irq_flags;
2425bcb9 1063 struct trace_event_call *call = &tk->tp.call;
413d37d1 1064
7f1d2f82 1065 WARN_ON(call != trace_file->event_call);
41a7dd42 1066
09a5059a 1067 if (trace_trigger_soft_disabled(trace_file))
13a1e4ae 1068 return;
b8820084 1069
413d37d1
MH
1070 local_save_flags(irq_flags);
1071 pc = preempt_count();
1072
c31ffb3f
NK
1073 dsize = __get_data_size(&tk->tp, regs);
1074 size = sizeof(*entry) + tk->tp.size + dsize;
413d37d1 1075
7f1d2f82 1076 event = trace_event_buffer_lock_reserve(&buffer, trace_file,
41a7dd42
MH
1077 call->event.type,
1078 size, irq_flags, pc);
413d37d1 1079 if (!event)
1e12a4a7 1080 return;
413d37d1
MH
1081
1082 entry = ring_buffer_event_data(event);
c31ffb3f 1083 entry->func = (unsigned long)tk->rp.kp.addr;
413d37d1 1084 entry->ret_ip = (unsigned long)ri->ret_addr;
9178412d 1085 store_trace_args(&entry[1], &tk->tp, regs, sizeof(*entry), dsize);
413d37d1 1086
7f1d2f82 1087 event_trigger_unlock_commit_regs(trace_file, buffer, event,
13a1e4ae 1088 entry, irq_flags, pc, regs);
413d37d1
MH
1089}
1090
3da0f180 1091static void
c31ffb3f 1092kretprobe_trace_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
41a7dd42
MH
1093 struct pt_regs *regs)
1094{
b04d52e3 1095 struct event_file_link *link;
41a7dd42 1096
c31ffb3f
NK
1097 list_for_each_entry_rcu(link, &tk->tp.files, list)
1098 __kretprobe_trace_func(tk, ri, regs, link->file);
41a7dd42 1099}
3da0f180 1100NOKPROBE_SYMBOL(kretprobe_trace_func);
41a7dd42 1101
413d37d1 1102/* Event entry printers */
b62fdd97 1103static enum print_line_t
a9a57763
SR
1104print_kprobe_event(struct trace_iterator *iter, int flags,
1105 struct trace_event *event)
413d37d1 1106{
93ccae7a 1107 struct kprobe_trace_entry_head *field;
413d37d1 1108 struct trace_seq *s = &iter->seq;
eca0d916 1109 struct trace_probe *tp;
413d37d1 1110
93ccae7a 1111 field = (struct kprobe_trace_entry_head *)iter->ent;
80decc70 1112 tp = container_of(event, struct trace_probe, call.event);
413d37d1 1113
687fcc4a 1114 trace_seq_printf(s, "%s: (", trace_event_name(&tp->call));
6e9f23d1 1115
413d37d1 1116 if (!seq_print_ip_sym(s, field->ip, flags | TRACE_ITER_SYM_OFFSET))
85224da0 1117 goto out;
413d37d1 1118
85224da0 1119 trace_seq_putc(s, ')');
413d37d1 1120
56de7630
MH
1121 if (print_probe_args(s, tp->args, tp->nr_args,
1122 (u8 *)&field[1], field) < 0)
1123 goto out;
413d37d1 1124
85224da0
SRRH
1125 trace_seq_putc(s, '\n');
1126 out:
1127 return trace_handle_return(s);
413d37d1
MH
1128}
1129
b62fdd97 1130static enum print_line_t
a9a57763
SR
1131print_kretprobe_event(struct trace_iterator *iter, int flags,
1132 struct trace_event *event)
413d37d1 1133{
93ccae7a 1134 struct kretprobe_trace_entry_head *field;
413d37d1 1135 struct trace_seq *s = &iter->seq;
eca0d916 1136 struct trace_probe *tp;
413d37d1 1137
93ccae7a 1138 field = (struct kretprobe_trace_entry_head *)iter->ent;
80decc70 1139 tp = container_of(event, struct trace_probe, call.event);
413d37d1 1140
687fcc4a 1141 trace_seq_printf(s, "%s: (", trace_event_name(&tp->call));
6e9f23d1 1142
413d37d1 1143 if (!seq_print_ip_sym(s, field->ret_ip, flags | TRACE_ITER_SYM_OFFSET))
85224da0 1144 goto out;
413d37d1 1145
85224da0 1146 trace_seq_puts(s, " <- ");
413d37d1
MH
1147
1148 if (!seq_print_ip_sym(s, field->func, flags & ~TRACE_ITER_SYM_OFFSET))
85224da0 1149 goto out;
413d37d1 1150
85224da0 1151 trace_seq_putc(s, ')');
413d37d1 1152
56de7630
MH
1153 if (print_probe_args(s, tp->args, tp->nr_args,
1154 (u8 *)&field[1], field) < 0)
1155 goto out;
413d37d1 1156
85224da0 1157 trace_seq_putc(s, '\n');
413d37d1 1158
85224da0
SRRH
1159 out:
1160 return trace_handle_return(s);
413d37d1
MH
1161}
1162
413d37d1 1163
2425bcb9 1164static int kprobe_event_define_fields(struct trace_event_call *event_call)
413d37d1 1165{
eeb07b06 1166 int ret;
93ccae7a 1167 struct kprobe_trace_entry_head field;
c31ffb3f 1168 struct trace_kprobe *tk = (struct trace_kprobe *)event_call->data;
413d37d1 1169
a703d946 1170 DEFINE_FIELD(unsigned long, ip, FIELD_STRING_IP, 0);
c31ffb3f 1171
eeb07b06 1172 return traceprobe_define_arg_fields(event_call, sizeof(field), &tk->tp);
413d37d1
MH
1173}
1174
2425bcb9 1175static int kretprobe_event_define_fields(struct trace_event_call *event_call)
413d37d1 1176{
eeb07b06 1177 int ret;
93ccae7a 1178 struct kretprobe_trace_entry_head field;
c31ffb3f 1179 struct trace_kprobe *tk = (struct trace_kprobe *)event_call->data;
413d37d1 1180
a703d946
MH
1181 DEFINE_FIELD(unsigned long, func, FIELD_STRING_FUNC, 0);
1182 DEFINE_FIELD(unsigned long, ret_ip, FIELD_STRING_RETIP, 0);
c31ffb3f 1183
eeb07b06 1184 return traceprobe_define_arg_fields(event_call, sizeof(field), &tk->tp);
413d37d1
MH
1185}
1186
07b139c8 1187#ifdef CONFIG_PERF_EVENTS
e08d1c65
MH
1188
1189/* Kprobe profile handler */
9802d865 1190static int
c31ffb3f 1191kprobe_perf_func(struct trace_kprobe *tk, struct pt_regs *regs)
e08d1c65 1192{
2425bcb9 1193 struct trace_event_call *call = &tk->tp.call;
93ccae7a 1194 struct kprobe_trace_entry_head *entry;
1c024eca 1195 struct hlist_head *head;
e09c8614 1196 int size, __size, dsize;
4ed7c92d 1197 int rctx;
e08d1c65 1198
9802d865 1199 if (bpf_prog_array_valid(call)) {
66665ad2 1200 unsigned long orig_ip = instruction_pointer(regs);
9802d865
JB
1201 int ret;
1202
1203 ret = trace_call_bpf(call, regs);
1204
1205 /*
1206 * We need to check and see if we modified the pc of the
cce188bd
MH
1207 * pt_regs, and if so return 1 so that we don't do the
1208 * single stepping.
9802d865 1209 */
cce188bd 1210 if (orig_ip != instruction_pointer(regs))
9802d865 1211 return 1;
9802d865
JB
1212 if (!ret)
1213 return 0;
1214 }
2541517c 1215
288e984e
ON
1216 head = this_cpu_ptr(call->perf_events);
1217 if (hlist_empty(head))
9802d865 1218 return 0;
288e984e 1219
c31ffb3f
NK
1220 dsize = __get_data_size(&tk->tp, regs);
1221 __size = sizeof(*entry) + tk->tp.size + dsize;
74ebb63e
MH
1222 size = ALIGN(__size + sizeof(u32), sizeof(u64));
1223 size -= sizeof(u32);
ce71b9df 1224
1e1dcd93 1225 entry = perf_trace_buf_alloc(size, NULL, &rctx);
430ad5a6 1226 if (!entry)
9802d865 1227 return 0;
a1a138d0 1228
c31ffb3f 1229 entry->ip = (unsigned long)tk->rp.kp.addr;
e09c8614 1230 memset(&entry[1], 0, dsize);
9178412d 1231 store_trace_args(&entry[1], &tk->tp, regs, sizeof(*entry), dsize);
1e1dcd93 1232 perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs,
8fd0fbbe 1233 head, NULL);
9802d865 1234 return 0;
e08d1c65 1235}
3da0f180 1236NOKPROBE_SYMBOL(kprobe_perf_func);
e08d1c65
MH
1237
1238/* Kretprobe profile handler */
3da0f180 1239static void
c31ffb3f 1240kretprobe_perf_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
2b106aab 1241 struct pt_regs *regs)
e08d1c65 1242{
2425bcb9 1243 struct trace_event_call *call = &tk->tp.call;
93ccae7a 1244 struct kretprobe_trace_entry_head *entry;
1c024eca 1245 struct hlist_head *head;
e09c8614 1246 int size, __size, dsize;
4ed7c92d 1247 int rctx;
e08d1c65 1248
e87c6bc3 1249 if (bpf_prog_array_valid(call) && !trace_call_bpf(call, regs))
2541517c
AS
1250 return;
1251
288e984e
ON
1252 head = this_cpu_ptr(call->perf_events);
1253 if (hlist_empty(head))
1254 return;
1255
c31ffb3f
NK
1256 dsize = __get_data_size(&tk->tp, regs);
1257 __size = sizeof(*entry) + tk->tp.size + dsize;
74ebb63e
MH
1258 size = ALIGN(__size + sizeof(u32), sizeof(u64));
1259 size -= sizeof(u32);
444a2a3b 1260
1e1dcd93 1261 entry = perf_trace_buf_alloc(size, NULL, &rctx);
430ad5a6 1262 if (!entry)
1e12a4a7 1263 return;
e08d1c65 1264
c31ffb3f 1265 entry->func = (unsigned long)tk->rp.kp.addr;
a1a138d0 1266 entry->ret_ip = (unsigned long)ri->ret_addr;
9178412d 1267 store_trace_args(&entry[1], &tk->tp, regs, sizeof(*entry), dsize);
1e1dcd93 1268 perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs,
8fd0fbbe 1269 head, NULL);
e08d1c65 1270}
3da0f180 1271NOKPROBE_SYMBOL(kretprobe_perf_func);
41bdc4b4
YS
1272
1273int bpf_get_kprobe_info(const struct perf_event *event, u32 *fd_type,
1274 const char **symbol, u64 *probe_offset,
1275 u64 *probe_addr, bool perf_type_tracepoint)
1276{
1277 const char *pevent = trace_event_name(event->tp_event);
1278 const char *group = event->tp_event->class->system;
1279 struct trace_kprobe *tk;
1280
1281 if (perf_type_tracepoint)
1282 tk = find_trace_kprobe(pevent, group);
1283 else
1284 tk = event->tp_event->data;
1285 if (!tk)
1286 return -EINVAL;
1287
1288 *fd_type = trace_kprobe_is_return(tk) ? BPF_FD_TYPE_KRETPROBE
1289 : BPF_FD_TYPE_KPROBE;
1290 if (tk->symbol) {
1291 *symbol = tk->symbol;
1292 *probe_offset = tk->rp.kp.offset;
1293 *probe_addr = 0;
1294 } else {
1295 *symbol = NULL;
1296 *probe_offset = 0;
1297 *probe_addr = (unsigned long)tk->rp.kp.addr;
1298 }
1299 return 0;
1300}
07b139c8 1301#endif /* CONFIG_PERF_EVENTS */
50d78056 1302
3fe3d619
ON
1303/*
1304 * called by perf_trace_init() or __ftrace_set_clr_event() under event_mutex.
1305 *
1306 * kprobe_trace_self_tests_init() does enable_trace_probe/disable_trace_probe
1307 * lockless, but we can't race with this __init function.
1308 */
2425bcb9 1309static int kprobe_register(struct trace_event_call *event,
fbc1963d 1310 enum trace_reg type, void *data)
2239291a 1311{
c31ffb3f 1312 struct trace_kprobe *tk = (struct trace_kprobe *)event->data;
7f1d2f82 1313 struct trace_event_file *file = data;
1538f888 1314
2239291a
SR
1315 switch (type) {
1316 case TRACE_REG_REGISTER:
c31ffb3f 1317 return enable_trace_kprobe(tk, file);
2239291a 1318 case TRACE_REG_UNREGISTER:
c31ffb3f 1319 return disable_trace_kprobe(tk, file);
2239291a
SR
1320
1321#ifdef CONFIG_PERF_EVENTS
1322 case TRACE_REG_PERF_REGISTER:
c31ffb3f 1323 return enable_trace_kprobe(tk, NULL);
2239291a 1324 case TRACE_REG_PERF_UNREGISTER:
c31ffb3f 1325 return disable_trace_kprobe(tk, NULL);
ceec0b6f
JO
1326 case TRACE_REG_PERF_OPEN:
1327 case TRACE_REG_PERF_CLOSE:
489c75c3
JO
1328 case TRACE_REG_PERF_ADD:
1329 case TRACE_REG_PERF_DEL:
ceec0b6f 1330 return 0;
2239291a
SR
1331#endif
1332 }
1333 return 0;
1334}
50d78056 1335
3da0f180 1336static int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs)
50d78056 1337{
c31ffb3f 1338 struct trace_kprobe *tk = container_of(kp, struct trace_kprobe, rp.kp);
9802d865 1339 int ret = 0;
e08d1c65 1340
a7636d9e 1341 raw_cpu_inc(*tk->nhit);
48182bd2 1342
c31ffb3f
NK
1343 if (tk->tp.flags & TP_FLAG_TRACE)
1344 kprobe_trace_func(tk, regs);
07b139c8 1345#ifdef CONFIG_PERF_EVENTS
c31ffb3f 1346 if (tk->tp.flags & TP_FLAG_PROFILE)
9802d865 1347 ret = kprobe_perf_func(tk, regs);
07b139c8 1348#endif
9802d865 1349 return ret;
50d78056 1350}
3da0f180 1351NOKPROBE_SYMBOL(kprobe_dispatcher);
50d78056 1352
3da0f180
MH
1353static int
1354kretprobe_dispatcher(struct kretprobe_instance *ri, struct pt_regs *regs)
50d78056 1355{
c31ffb3f 1356 struct trace_kprobe *tk = container_of(ri->rp, struct trace_kprobe, rp);
50d78056 1357
a7636d9e 1358 raw_cpu_inc(*tk->nhit);
48182bd2 1359
c31ffb3f
NK
1360 if (tk->tp.flags & TP_FLAG_TRACE)
1361 kretprobe_trace_func(tk, ri, regs);
07b139c8 1362#ifdef CONFIG_PERF_EVENTS
c31ffb3f
NK
1363 if (tk->tp.flags & TP_FLAG_PROFILE)
1364 kretprobe_perf_func(tk, ri, regs);
07b139c8 1365#endif
50d78056
MH
1366 return 0; /* We don't tweek kernel, so just return 0 */
1367}
3da0f180 1368NOKPROBE_SYMBOL(kretprobe_dispatcher);
e08d1c65 1369
a9a57763
SR
1370static struct trace_event_functions kretprobe_funcs = {
1371 .trace = print_kretprobe_event
1372};
1373
1374static struct trace_event_functions kprobe_funcs = {
1375 .trace = print_kprobe_event
1376};
1377
e12f03d7
SL
1378static inline void init_trace_event_call(struct trace_kprobe *tk,
1379 struct trace_event_call *call)
413d37d1 1380{
c31ffb3f 1381 if (trace_kprobe_is_return(tk)) {
80decc70 1382 call->event.funcs = &kretprobe_funcs;
2e33af02 1383 call->class->define_fields = kretprobe_event_define_fields;
413d37d1 1384 } else {
80decc70 1385 call->event.funcs = &kprobe_funcs;
2e33af02 1386 call->class->define_fields = kprobe_event_define_fields;
413d37d1 1387 }
e12f03d7
SL
1388
1389 call->flags = TRACE_EVENT_FL_KPROBE;
1390 call->class->reg = kprobe_register;
1391 call->data = tk;
1392}
1393
1394static int register_kprobe_event(struct trace_kprobe *tk)
1395{
46e5376d 1396 init_trace_event_call(tk, &tk->tp.call);
f730e0f2 1397
46e5376d 1398 return trace_probe_register_event_call(&tk->tp);
413d37d1
MH
1399}
1400
c31ffb3f 1401static int unregister_kprobe_event(struct trace_kprobe *tk)
413d37d1 1402{
46e5376d 1403 return trace_probe_unregister_event_call(&tk->tp);
413d37d1
MH
1404}
1405
e12f03d7
SL
1406#ifdef CONFIG_PERF_EVENTS
1407/* create a trace_kprobe, but don't add it to global lists */
1408struct trace_event_call *
1409create_local_trace_kprobe(char *func, void *addr, unsigned long offs,
1410 bool is_return)
1411{
1412 struct trace_kprobe *tk;
1413 int ret;
1414 char *event;
1415
1416 /*
6212dd29 1417 * local trace_kprobes are not added to dyn_event, so they are never
e12f03d7
SL
1418 * searched in find_trace_kprobe(). Therefore, there is no concern of
1419 * duplicated name here.
1420 */
1421 event = func ? func : "DUMMY_EVENT";
1422
1423 tk = alloc_trace_kprobe(KPROBE_EVENT_SYSTEM, event, (void *)addr, func,
1424 offs, 0 /* maxactive */, 0 /* nargs */,
1425 is_return);
1426
1427 if (IS_ERR(tk)) {
1428 pr_info("Failed to allocate trace_probe.(%d)\n",
1429 (int)PTR_ERR(tk));
1430 return ERR_CAST(tk);
1431 }
1432
1433 init_trace_event_call(tk, &tk->tp.call);
1434
0a46c854 1435 if (traceprobe_set_print_fmt(&tk->tp, trace_kprobe_is_return(tk)) < 0) {
e12f03d7
SL
1436 ret = -ENOMEM;
1437 goto error;
1438 }
1439
1440 ret = __register_trace_kprobe(tk);
f730e0f2 1441 if (ret < 0)
e12f03d7
SL
1442 goto error;
1443
1444 return &tk->tp.call;
1445error:
1446 free_trace_kprobe(tk);
1447 return ERR_PTR(ret);
1448}
1449
1450void destroy_local_trace_kprobe(struct trace_event_call *event_call)
1451{
1452 struct trace_kprobe *tk;
1453
1454 tk = container_of(event_call, struct trace_kprobe, tp.call);
1455
1456 if (trace_probe_is_enabled(&tk->tp)) {
1457 WARN_ON(1);
1458 return;
1459 }
1460
1461 __unregister_trace_kprobe(tk);
0fc8c358 1462
e12f03d7
SL
1463 free_trace_kprobe(tk);
1464}
1465#endif /* CONFIG_PERF_EVENTS */
1466
970988e1
MH
1467static __init void enable_boot_kprobe_events(void)
1468{
1469 struct trace_array *tr = top_trace_array();
1470 struct trace_event_file *file;
1471 struct trace_kprobe *tk;
1472 struct dyn_event *pos;
1473
1474 mutex_lock(&event_mutex);
1475 for_each_trace_kprobe(tk, pos) {
1476 list_for_each_entry(file, &tr->events, list)
1477 if (file->event_call == &tk->tp.call)
1478 trace_event_enable_disable(file, 1, 0);
1479 }
1480 mutex_unlock(&event_mutex);
1481}
1482
1483static __init void setup_boot_kprobe_events(void)
1484{
1485 char *p, *cmd = kprobe_boot_events_buf;
1486 int ret;
1487
1488 strreplace(kprobe_boot_events_buf, ',', ' ');
1489
1490 while (cmd && *cmd != '\0') {
1491 p = strchr(cmd, ';');
1492 if (p)
1493 *p++ = '\0';
1494
1495 ret = trace_run_command(cmd, create_or_delete_trace_kprobe);
1496 if (ret)
1497 pr_warn("Failed to add event(%d): %s\n", ret, cmd);
b6399cc7
SRV
1498 else
1499 kprobe_boot_events_enabled = true;
970988e1
MH
1500
1501 cmd = p;
1502 }
1503
1504 enable_boot_kprobe_events();
1505}
1506
8434dc93 1507/* Make a tracefs interface for controlling probe points */
413d37d1
MH
1508static __init int init_kprobe_trace(void)
1509{
1510 struct dentry *d_tracer;
1511 struct dentry *entry;
6212dd29
MH
1512 int ret;
1513
1514 ret = dyn_event_register(&trace_kprobe_ops);
1515 if (ret)
1516 return ret;
413d37d1 1517
c31ffb3f 1518 if (register_module_notifier(&trace_kprobe_module_nb))
61424318
MH
1519 return -EINVAL;
1520
413d37d1 1521 d_tracer = tracing_init_dentry();
14a5ae40 1522 if (IS_ERR(d_tracer))
413d37d1
MH
1523 return 0;
1524
8434dc93 1525 entry = tracefs_create_file("kprobe_events", 0644, d_tracer,
413d37d1
MH
1526 NULL, &kprobe_events_ops);
1527
cd7e7bd5 1528 /* Event list interface */
413d37d1 1529 if (!entry)
a395d6a7 1530 pr_warn("Could not create tracefs 'kprobe_events' entry\n");
cd7e7bd5
MH
1531
1532 /* Profile interface */
8434dc93 1533 entry = tracefs_create_file("kprobe_profile", 0444, d_tracer,
cd7e7bd5
MH
1534 NULL, &kprobe_profile_ops);
1535
1536 if (!entry)
a395d6a7 1537 pr_warn("Could not create tracefs 'kprobe_profile' entry\n");
970988e1
MH
1538
1539 setup_boot_kprobe_events();
1540
413d37d1
MH
1541 return 0;
1542}
1543fs_initcall(init_kprobe_trace);
1544
1545
1546#ifdef CONFIG_FTRACE_STARTUP_TEST
26a346f2 1547static __init struct trace_event_file *
c31ffb3f 1548find_trace_probe_file(struct trace_kprobe *tk, struct trace_array *tr)
41a7dd42 1549{
7f1d2f82 1550 struct trace_event_file *file;
41a7dd42
MH
1551
1552 list_for_each_entry(file, &tr->events, list)
c31ffb3f 1553 if (file->event_call == &tk->tp.call)
41a7dd42
MH
1554 return file;
1555
1556 return NULL;
1557}
1558
3fe3d619 1559/*
c31ffb3f 1560 * Nobody but us can call enable_trace_kprobe/disable_trace_kprobe at this
3fe3d619
ON
1561 * stage, we can do this lockless.
1562 */
413d37d1
MH
1563static __init int kprobe_trace_self_tests_init(void)
1564{
231e36f4 1565 int ret, warn = 0;
413d37d1 1566 int (*target)(int, int, int, int, int, int);
c31ffb3f 1567 struct trace_kprobe *tk;
7f1d2f82 1568 struct trace_event_file *file;
413d37d1 1569
748ec3a2
YY
1570 if (tracing_is_disabled())
1571 return -ENODEV;
1572
b6399cc7
SRV
1573 if (kprobe_boot_events_enabled) {
1574 pr_info("Skipping kprobe tests due to kprobe_event on cmdline\n");
1575 return 0;
1576 }
1577
413d37d1
MH
1578 target = kprobe_trace_selftest_target;
1579
1580 pr_info("Testing kprobe tracing: ");
1581
6212dd29
MH
1582 ret = trace_run_command("p:testprobe kprobe_trace_selftest_target $stack $stack0 +0($stack)",
1583 create_or_delete_trace_kprobe);
231e36f4 1584 if (WARN_ON_ONCE(ret)) {
41a7dd42 1585 pr_warn("error on probing function entry.\n");
231e36f4
MH
1586 warn++;
1587 } else {
1588 /* Enable trace point */
c31ffb3f
NK
1589 tk = find_trace_kprobe("testprobe", KPROBE_EVENT_SYSTEM);
1590 if (WARN_ON_ONCE(tk == NULL)) {
41a7dd42 1591 pr_warn("error on getting new probe.\n");
231e36f4 1592 warn++;
41a7dd42 1593 } else {
c31ffb3f 1594 file = find_trace_probe_file(tk, top_trace_array());
41a7dd42
MH
1595 if (WARN_ON_ONCE(file == NULL)) {
1596 pr_warn("error on getting probe file.\n");
1597 warn++;
1598 } else
c31ffb3f 1599 enable_trace_kprobe(tk, file);
41a7dd42 1600 }
231e36f4 1601 }
413d37d1 1602
6212dd29
MH
1603 ret = trace_run_command("r:testprobe2 kprobe_trace_selftest_target $retval",
1604 create_or_delete_trace_kprobe);
231e36f4 1605 if (WARN_ON_ONCE(ret)) {
41a7dd42 1606 pr_warn("error on probing function return.\n");
231e36f4
MH
1607 warn++;
1608 } else {
1609 /* Enable trace point */
c31ffb3f
NK
1610 tk = find_trace_kprobe("testprobe2", KPROBE_EVENT_SYSTEM);
1611 if (WARN_ON_ONCE(tk == NULL)) {
41a7dd42 1612 pr_warn("error on getting 2nd new probe.\n");
231e36f4 1613 warn++;
41a7dd42 1614 } else {
c31ffb3f 1615 file = find_trace_probe_file(tk, top_trace_array());
41a7dd42
MH
1616 if (WARN_ON_ONCE(file == NULL)) {
1617 pr_warn("error on getting probe file.\n");
1618 warn++;
1619 } else
c31ffb3f 1620 enable_trace_kprobe(tk, file);
41a7dd42 1621 }
231e36f4
MH
1622 }
1623
1624 if (warn)
1625 goto end;
413d37d1
MH
1626
1627 ret = target(1, 2, 3, 4, 5, 6);
1628
d4d7ccc8
MN
1629 /*
1630 * Not expecting an error here, the check is only to prevent the
1631 * optimizer from removing the call to target() as otherwise there
1632 * are no side-effects and the call is never performed.
1633 */
1634 if (ret != 21)
1635 warn++;
1636
02ca1521 1637 /* Disable trace points before removing it */
c31ffb3f
NK
1638 tk = find_trace_kprobe("testprobe", KPROBE_EVENT_SYSTEM);
1639 if (WARN_ON_ONCE(tk == NULL)) {
41a7dd42 1640 pr_warn("error on getting test probe.\n");
02ca1521 1641 warn++;
41a7dd42 1642 } else {
d4d7ccc8
MN
1643 if (trace_kprobe_nhit(tk) != 1) {
1644 pr_warn("incorrect number of testprobe hits\n");
1645 warn++;
1646 }
1647
c31ffb3f 1648 file = find_trace_probe_file(tk, top_trace_array());
41a7dd42
MH
1649 if (WARN_ON_ONCE(file == NULL)) {
1650 pr_warn("error on getting probe file.\n");
1651 warn++;
1652 } else
c31ffb3f 1653 disable_trace_kprobe(tk, file);
41a7dd42 1654 }
02ca1521 1655
c31ffb3f
NK
1656 tk = find_trace_kprobe("testprobe2", KPROBE_EVENT_SYSTEM);
1657 if (WARN_ON_ONCE(tk == NULL)) {
41a7dd42 1658 pr_warn("error on getting 2nd test probe.\n");
02ca1521 1659 warn++;
41a7dd42 1660 } else {
d4d7ccc8
MN
1661 if (trace_kprobe_nhit(tk) != 1) {
1662 pr_warn("incorrect number of testprobe2 hits\n");
1663 warn++;
1664 }
1665
c31ffb3f 1666 file = find_trace_probe_file(tk, top_trace_array());
41a7dd42
MH
1667 if (WARN_ON_ONCE(file == NULL)) {
1668 pr_warn("error on getting probe file.\n");
1669 warn++;
1670 } else
c31ffb3f 1671 disable_trace_kprobe(tk, file);
41a7dd42 1672 }
02ca1521 1673
6212dd29 1674 ret = trace_run_command("-:testprobe", create_or_delete_trace_kprobe);
231e36f4 1675 if (WARN_ON_ONCE(ret)) {
41a7dd42 1676 pr_warn("error on deleting a probe.\n");
231e36f4
MH
1677 warn++;
1678 }
1679
6212dd29 1680 ret = trace_run_command("-:testprobe2", create_or_delete_trace_kprobe);
231e36f4 1681 if (WARN_ON_ONCE(ret)) {
41a7dd42 1682 pr_warn("error on deleting a probe.\n");
231e36f4
MH
1683 warn++;
1684 }
413d37d1 1685
231e36f4 1686end:
6212dd29
MH
1687 ret = dyn_events_release_all(&trace_kprobe_ops);
1688 if (WARN_ON_ONCE(ret)) {
1689 pr_warn("error on cleaning up probes.\n");
1690 warn++;
1691 }
30e7d894
TG
1692 /*
1693 * Wait for the optimizer work to finish. Otherwise it might fiddle
1694 * with probes in already freed __init text.
1695 */
1696 wait_for_kprobe_optimizer();
231e36f4
MH
1697 if (warn)
1698 pr_cont("NG: Some tests are failed. Please check them.\n");
1699 else
1700 pr_cont("OK\n");
413d37d1
MH
1701 return 0;
1702}
1703
1704late_initcall(kprobe_trace_self_tests_init);
1705
1706#endif