Merge tag 'sched_ext-for-6.12' of git://git.kernel.org/pub/scm/linux/kernel/git/tj...
[linux-2.6-block.git] / kernel / trace / trace_functions.c
CommitLineData
b2441318 1// SPDX-License-Identifier: GPL-2.0
1b29b018
SR
2/*
3 * ring buffer based function tracer
4 *
5 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
6 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
7 *
8 * Based on code from the latency_tracer, that is:
9 *
10 * Copyright (C) 2004-2006 Ingo Molnar
6d49e352 11 * Copyright (C) 2004 Nadia Yvette Chambers
1b29b018 12 */
23b4ff3a 13#include <linux/ring_buffer.h>
1b29b018
SR
14#include <linux/debugfs.h>
15#include <linux/uaccess.h>
16#include <linux/ftrace.h>
f20a5806 17#include <linux/slab.h>
2e0f5761 18#include <linux/fs.h>
1b29b018
SR
19
20#include "trace.h"
21
f20a5806
SRRH
22static void tracing_start_function_trace(struct trace_array *tr);
23static void tracing_stop_function_trace(struct trace_array *tr);
24static void
25function_trace_call(unsigned long ip, unsigned long parent_ip,
d19ad077 26 struct ftrace_ops *op, struct ftrace_regs *fregs);
f20a5806
SRRH
27static void
28function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
d19ad077 29 struct ftrace_ops *op, struct ftrace_regs *fregs);
22db095d
YKV
30static void
31function_no_repeats_trace_call(unsigned long ip, unsigned long parent_ip,
32 struct ftrace_ops *op, struct ftrace_regs *fregs);
33static void
34function_stack_no_repeats_trace_call(unsigned long ip, unsigned long parent_ip,
35 struct ftrace_ops *op,
36 struct ftrace_regs *fregs);
f20a5806
SRRH
37static struct tracer_flags func_flags;
38
39/* Our option */
40enum {
22db095d
YKV
41
42 TRACE_FUNC_NO_OPTS = 0x0, /* No flags set. */
43 TRACE_FUNC_OPT_STACK = 0x1,
44 TRACE_FUNC_OPT_NO_REPEATS = 0x2,
45
46 /* Update this to next highest bit. */
47 TRACE_FUNC_OPT_HIGHEST_BIT = 0x4
f20a5806
SRRH
48};
49
22db095d 50#define TRACE_FUNC_OPT_MASK (TRACE_FUNC_OPT_HIGHEST_BIT - 1)
4994891e 51
4114fbfd 52int ftrace_allocate_ftrace_ops(struct trace_array *tr)
f20a5806
SRRH
53{
54 struct ftrace_ops *ops;
a225cdd2 55
4114fbfd
MH
56 /* The top level array uses the "global_ops" */
57 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
58 return 0;
59
f20a5806
SRRH
60 ops = kzalloc(sizeof(*ops), GFP_KERNEL);
61 if (!ops)
62 return -ENOMEM;
53614991 63
48a42f5d 64 /* Currently only the non stack version is supported */
f20a5806 65 ops->func = function_trace_call;
a25d036d 66 ops->flags = FTRACE_OPS_FL_PID;
f20a5806
SRRH
67
68 tr->ops = ops;
69 ops->private = tr;
4114fbfd 70
f20a5806
SRRH
71 return 0;
72}
a225cdd2 73
4114fbfd
MH
74void ftrace_free_ftrace_ops(struct trace_array *tr)
75{
76 kfree(tr->ops);
77 tr->ops = NULL;
78}
591dffda
SRRH
79
80int ftrace_create_function_files(struct trace_array *tr,
81 struct dentry *parent)
82{
26dda563 83 int ret;
5d6c97c5
SRRH
84 /*
85 * The top level array uses the "global_ops", and the files are
86 * created on boot up.
87 */
88 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
89 return 0;
90
4114fbfd
MH
91 if (!tr->ops)
92 return -EINVAL;
591dffda 93
c132be2c 94 ret = allocate_fgraph_ops(tr, tr->ops);
26dda563
SRV
95 if (ret) {
96 kfree(tr->ops);
97 return ret;
98 }
99
591dffda
SRRH
100 ftrace_create_filter_files(tr->ops, parent);
101
102 return 0;
103}
104
105void ftrace_destroy_function_files(struct trace_array *tr)
106{
107 ftrace_destroy_filter_files(tr->ops);
4114fbfd 108 ftrace_free_ftrace_ops(tr);
26dda563 109 free_fgraph_ops(tr);
591dffda
SRRH
110}
111
4994891e
YKV
112static ftrace_func_t select_trace_function(u32 flags_val)
113{
114 switch (flags_val & TRACE_FUNC_OPT_MASK) {
115 case TRACE_FUNC_NO_OPTS:
116 return function_trace_call;
117 case TRACE_FUNC_OPT_STACK:
118 return function_stack_trace_call;
22db095d
YKV
119 case TRACE_FUNC_OPT_NO_REPEATS:
120 return function_no_repeats_trace_call;
121 case TRACE_FUNC_OPT_STACK | TRACE_FUNC_OPT_NO_REPEATS:
122 return function_stack_no_repeats_trace_call;
4994891e
YKV
123 default:
124 return NULL;
125 }
126}
127
22db095d
YKV
128static bool handle_func_repeats(struct trace_array *tr, u32 flags_val)
129{
130 if (!tr->last_func_repeats &&
131 (flags_val & TRACE_FUNC_OPT_NO_REPEATS)) {
132 tr->last_func_repeats = alloc_percpu(struct trace_func_repeats);
133 if (!tr->last_func_repeats)
134 return false;
135 }
136
137 return true;
138}
139
b6f11df2 140static int function_trace_init(struct trace_array *tr)
1b29b018 141{
4104d326 142 ftrace_func_t func;
4104d326
SRRH
143 /*
144 * Instance trace_arrays get their ops allocated
145 * at instance creation. Unless it failed
146 * the allocation.
147 */
148 if (!tr->ops)
591dffda 149 return -ENOMEM;
4104d326 150
4994891e
YKV
151 func = select_trace_function(func_flags.val);
152 if (!func)
153 return -EINVAL;
4104d326 154
22db095d
YKV
155 if (!handle_func_repeats(tr, func_flags.val))
156 return -ENOMEM;
157
4104d326 158 ftrace_init_array_ops(tr, func);
f20a5806 159
18d14ebd 160 tr->array_buffer.cpu = raw_smp_processor_id();
26bc83f4 161
41bc8144 162 tracing_start_cmdline_record();
f20a5806 163 tracing_start_function_trace(tr);
1c80025a 164 return 0;
1b29b018
SR
165}
166
e309b41d 167static void function_trace_reset(struct trace_array *tr)
1b29b018 168{
f20a5806 169 tracing_stop_function_trace(tr);
b6f11df2 170 tracing_stop_cmdline_record();
4104d326 171 ftrace_reset_array_ops(tr);
1b29b018
SR
172}
173
9036990d
SR
174static void function_trace_start(struct trace_array *tr)
175{
1c5eb448 176 tracing_reset_online_cpus(&tr->array_buffer);
9036990d
SR
177}
178
bb3c3c95 179static void
2f5f6ad9 180function_trace_call(unsigned long ip, unsigned long parent_ip,
d19ad077 181 struct ftrace_ops *op, struct ftrace_regs *fregs)
bb3c3c95 182{
f20a5806 183 struct trace_array *tr = op->private;
bb3c3c95 184 struct trace_array_cpu *data;
36590c50 185 unsigned int trace_ctx;
d41032a8 186 int bit;
bb3c3c95 187 int cpu;
bb3c3c95 188
f20a5806 189 if (unlikely(!tr->function_enabled))
bb3c3c95
SR
190 return;
191
773c1670 192 bit = ftrace_test_recursion_trylock(ip, parent_ip);
6e4eb9cb
SRV
193 if (bit < 0)
194 return;
195
36590c50 196 trace_ctx = tracing_gen_ctx();
bb3c3c95 197
897f68a4 198 cpu = smp_processor_id();
1c5eb448 199 data = per_cpu_ptr(tr->array_buffer.data, cpu);
36590c50
SAS
200 if (!atomic_read(&data->disabled))
201 trace_function(tr, ip, parent_ip, trace_ctx);
202
6e4eb9cb 203 ftrace_test_recursion_unlock(bit);
bb3c3c95
SR
204}
205
2ee5b92a
SRV
206#ifdef CONFIG_UNWINDER_ORC
207/*
208 * Skip 2:
209 *
210 * function_stack_trace_call()
211 * ftrace_call()
212 */
213#define STACK_SKIP 2
214#else
215/*
216 * Skip 3:
217 * __trace_stack()
218 * function_stack_trace_call()
219 * ftrace_call()
220 */
221#define STACK_SKIP 3
222#endif
223
53614991 224static void
2f5f6ad9 225function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
d19ad077 226 struct ftrace_ops *op, struct ftrace_regs *fregs)
53614991 227{
f20a5806 228 struct trace_array *tr = op->private;
53614991
SR
229 struct trace_array_cpu *data;
230 unsigned long flags;
231 long disabled;
232 int cpu;
36590c50 233 unsigned int trace_ctx;
6c1f7f0a 234 int skip = STACK_SKIP;
53614991 235
f20a5806 236 if (unlikely(!tr->function_enabled))
53614991
SR
237 return;
238
239 /*
240 * Need to use raw, since this must be called before the
241 * recursive protection is performed.
242 */
243 local_irq_save(flags);
244 cpu = raw_smp_processor_id();
1c5eb448 245 data = per_cpu_ptr(tr->array_buffer.data, cpu);
53614991
SR
246 disabled = atomic_inc_return(&data->disabled);
247
248 if (likely(disabled == 1)) {
36590c50
SAS
249 trace_ctx = tracing_gen_ctx_flags(flags);
250 trace_function(tr, ip, parent_ip, trace_ctx);
6c1f7f0a
T
251#ifdef CONFIG_UNWINDER_FRAME_POINTER
252 if (ftrace_pids_enabled(op))
253 skip++;
254#endif
255 __trace_stack(tr, trace_ctx, skip);
53614991
SR
256 }
257
258 atomic_dec(&data->disabled);
259 local_irq_restore(flags);
260}
261
22db095d
YKV
262static inline bool is_repeat_check(struct trace_array *tr,
263 struct trace_func_repeats *last_info,
264 unsigned long ip, unsigned long parent_ip)
265{
266 if (last_info->ip == ip &&
267 last_info->parent_ip == parent_ip &&
268 last_info->count < U16_MAX) {
269 last_info->ts_last_call =
270 ring_buffer_time_stamp(tr->array_buffer.buffer);
271 last_info->count++;
272 return true;
273 }
274
275 return false;
276}
277
278static inline void process_repeats(struct trace_array *tr,
279 unsigned long ip, unsigned long parent_ip,
280 struct trace_func_repeats *last_info,
281 unsigned int trace_ctx)
282{
283 if (last_info->count) {
284 trace_last_func_repeats(tr, last_info, trace_ctx);
285 last_info->count = 0;
286 }
287
288 last_info->ip = ip;
289 last_info->parent_ip = parent_ip;
290}
291
292static void
293function_no_repeats_trace_call(unsigned long ip, unsigned long parent_ip,
294 struct ftrace_ops *op,
295 struct ftrace_regs *fregs)
296{
297 struct trace_func_repeats *last_info;
298 struct trace_array *tr = op->private;
299 struct trace_array_cpu *data;
300 unsigned int trace_ctx;
301 unsigned long flags;
302 int bit;
303 int cpu;
304
305 if (unlikely(!tr->function_enabled))
306 return;
307
308 bit = ftrace_test_recursion_trylock(ip, parent_ip);
309 if (bit < 0)
310 return;
311
22db095d
YKV
312 cpu = smp_processor_id();
313 data = per_cpu_ptr(tr->array_buffer.data, cpu);
314 if (atomic_read(&data->disabled))
315 goto out;
316
317 /*
318 * An interrupt may happen at any place here. But as far as I can see,
319 * the only damage that this can cause is to mess up the repetition
320 * counter without valuable data being lost.
321 * TODO: think about a solution that is better than just hoping to be
322 * lucky.
323 */
324 last_info = per_cpu_ptr(tr->last_func_repeats, cpu);
325 if (is_repeat_check(tr, last_info, ip, parent_ip))
326 goto out;
327
328 local_save_flags(flags);
329 trace_ctx = tracing_gen_ctx_flags(flags);
330 process_repeats(tr, ip, parent_ip, last_info, trace_ctx);
331
332 trace_function(tr, ip, parent_ip, trace_ctx);
333
334out:
335 ftrace_test_recursion_unlock(bit);
22db095d
YKV
336}
337
338static void
339function_stack_no_repeats_trace_call(unsigned long ip, unsigned long parent_ip,
340 struct ftrace_ops *op,
341 struct ftrace_regs *fregs)
342{
343 struct trace_func_repeats *last_info;
344 struct trace_array *tr = op->private;
345 struct trace_array_cpu *data;
346 unsigned long flags;
347 long disabled;
348 int cpu;
349 unsigned int trace_ctx;
350
351 if (unlikely(!tr->function_enabled))
352 return;
353
354 /*
355 * Need to use raw, since this must be called before the
356 * recursive protection is performed.
357 */
358 local_irq_save(flags);
359 cpu = raw_smp_processor_id();
360 data = per_cpu_ptr(tr->array_buffer.data, cpu);
361 disabled = atomic_inc_return(&data->disabled);
362
363 if (likely(disabled == 1)) {
364 last_info = per_cpu_ptr(tr->last_func_repeats, cpu);
365 if (is_repeat_check(tr, last_info, ip, parent_ip))
366 goto out;
367
368 trace_ctx = tracing_gen_ctx_flags(flags);
369 process_repeats(tr, ip, parent_ip, last_info, trace_ctx);
370
371 trace_function(tr, ip, parent_ip, trace_ctx);
372 __trace_stack(tr, trace_ctx, STACK_SKIP);
373 }
374
375 out:
376 atomic_dec(&data->disabled);
377 local_irq_restore(flags);
378}
379
53614991
SR
380static struct tracer_opt func_opts[] = {
381#ifdef CONFIG_STACKTRACE
382 { TRACER_OPT(func_stack_trace, TRACE_FUNC_OPT_STACK) },
383#endif
22db095d 384 { TRACER_OPT(func-no-repeats, TRACE_FUNC_OPT_NO_REPEATS) },
53614991
SR
385 { } /* Always set a last empty entry */
386};
387
388static struct tracer_flags func_flags = {
4994891e 389 .val = TRACE_FUNC_NO_OPTS, /* By default: all flags disabled */
53614991
SR
390 .opts = func_opts
391};
392
f20a5806 393static void tracing_start_function_trace(struct trace_array *tr)
3eb36aa0 394{
f20a5806
SRRH
395 tr->function_enabled = 0;
396 register_ftrace_function(tr->ops);
397 tr->function_enabled = 1;
3eb36aa0
SR
398}
399
f20a5806 400static void tracing_stop_function_trace(struct trace_array *tr)
3eb36aa0 401{
f20a5806
SRRH
402 tr->function_enabled = 0;
403 unregister_ftrace_function(tr->ops);
3eb36aa0
SR
404}
405
d39cdd20
CH
406static struct tracer function_trace;
407
8c1a49ae
SRRH
408static int
409func_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
53614991 410{
4994891e
YKV
411 ftrace_func_t func;
412 u32 new_flags;
d39cdd20 413
4994891e
YKV
414 /* Do nothing if already set. */
415 if (!!set == !!(func_flags.val & bit))
416 return 0;
f20a5806 417
4994891e
YKV
418 /* We can change this flag only when not running. */
419 if (tr->current_trace != &function_trace)
420 return 0;
53614991 421
4994891e
YKV
422 new_flags = (func_flags.val & ~bit) | (set ? bit : 0);
423 func = select_trace_function(new_flags);
424 if (!func)
f555f123 425 return -EINVAL;
4994891e
YKV
426
427 /* Check if there's anything to change. */
428 if (tr->ops->func == func)
429 return 0;
430
22db095d
YKV
431 if (!handle_func_repeats(tr, new_flags))
432 return -ENOMEM;
433
4994891e
YKV
434 unregister_ftrace_function(tr->ops);
435 tr->ops->func = func;
436 register_ftrace_function(tr->ops);
53614991 437
f555f123 438 return 0;
53614991
SR
439}
440
8f768993 441static struct tracer function_trace __tracer_data =
1b29b018 442{
3eb36aa0
SR
443 .name = "function",
444 .init = function_trace_init,
445 .reset = function_trace_reset,
446 .start = function_trace_start,
53614991
SR
447 .flags = &func_flags,
448 .set_flag = func_set_flag,
f20a5806 449 .allow_instances = true,
60a11774 450#ifdef CONFIG_FTRACE_SELFTEST
3eb36aa0 451 .selftest = trace_selftest_startup_function,
60a11774 452#endif
1b29b018
SR
453};
454
23b4ff3a 455#ifdef CONFIG_DYNAMIC_FTRACE
fe014e24 456static void update_traceon_count(struct ftrace_probe_ops *ops,
2290f2c5
SRV
457 unsigned long ip,
458 struct trace_array *tr, bool on,
6e444319 459 void *data)
23b4ff3a 460{
6e444319 461 struct ftrace_func_mapper *mapper = data;
fe014e24
SRV
462 long *count;
463 long old_count;
23b4ff3a 464
a9ce7c36
SRRH
465 /*
466 * Tracing gets disabled (or enabled) once per count.
0af26492 467 * This function can be called at the same time on multiple CPUs.
a9ce7c36
SRRH
468 * It is fine if both disable (or enable) tracing, as disabling
469 * (or enabling) the second time doesn't do anything as the
470 * state of the tracer is already disabled (or enabled).
471 * What needs to be synchronized in this case is that the count
472 * only gets decremented once, even if the tracer is disabled
473 * (or enabled) twice, as the second one is really a nop.
474 *
475 * The memory barriers guarantee that we only decrement the
476 * counter once. First the count is read to a local variable
477 * and a read barrier is used to make sure that it is loaded
478 * before checking if the tracer is in the state we want.
479 * If the tracer is not in the state we want, then the count
480 * is guaranteed to be the old count.
481 *
482 * Next the tracer is set to the state we want (disabled or enabled)
483 * then a write memory barrier is used to make sure that
484 * the new state is visible before changing the counter by
485 * one minus the old counter. This guarantees that another CPU
486 * executing this code will see the new state before seeing
0af26492 487 * the new counter value, and would not do anything if the new
a9ce7c36
SRRH
488 * counter is seen.
489 *
490 * Note, there is no synchronization between this and a user
491 * setting the tracing_on file. But we currently don't care
492 * about that.
493 */
fe014e24
SRV
494 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
495 old_count = *count;
496
497 if (old_count <= 0)
a9ce7c36 498 return;
23b4ff3a 499
a9ce7c36
SRRH
500 /* Make sure we see count before checking tracing state */
501 smp_rmb();
23b4ff3a 502
2290f2c5 503 if (on == !!tracer_tracing_is_on(tr))
a9ce7c36
SRRH
504 return;
505
506 if (on)
2290f2c5 507 tracer_tracing_on(tr);
a9ce7c36 508 else
2290f2c5 509 tracer_tracing_off(tr);
a9ce7c36 510
a9ce7c36
SRRH
511 /* Make sure tracing state is visible before updating count */
512 smp_wmb();
513
514 *count = old_count - 1;
23b4ff3a
SR
515}
516
517static void
bca6c8d0 518ftrace_traceon_count(unsigned long ip, unsigned long parent_ip,
b5f081b5 519 struct trace_array *tr, struct ftrace_probe_ops *ops,
6e444319 520 void *data)
23b4ff3a 521{
2290f2c5 522 update_traceon_count(ops, ip, tr, 1, data);
1c317143 523}
23b4ff3a 524
1c317143 525static void
bca6c8d0 526ftrace_traceoff_count(unsigned long ip, unsigned long parent_ip,
b5f081b5 527 struct trace_array *tr, struct ftrace_probe_ops *ops,
6e444319 528 void *data)
1c317143 529{
2290f2c5 530 update_traceon_count(ops, ip, tr, 0, data);
23b4ff3a
SR
531}
532
8380d248 533static void
bca6c8d0 534ftrace_traceon(unsigned long ip, unsigned long parent_ip,
b5f081b5 535 struct trace_array *tr, struct ftrace_probe_ops *ops,
6e444319 536 void *data)
8380d248 537{
2290f2c5 538 if (tracer_tracing_is_on(tr))
8380d248
SRRH
539 return;
540
2290f2c5 541 tracer_tracing_on(tr);
8380d248
SRRH
542}
543
544static void
bca6c8d0 545ftrace_traceoff(unsigned long ip, unsigned long parent_ip,
b5f081b5 546 struct trace_array *tr, struct ftrace_probe_ops *ops,
6e444319 547 void *data)
8380d248 548{
2290f2c5 549 if (!tracer_tracing_is_on(tr))
8380d248
SRRH
550 return;
551
2290f2c5 552 tracer_tracing_off(tr);
8380d248
SRRH
553}
554
2ee5b92a
SRV
555#ifdef CONFIG_UNWINDER_ORC
556/*
557 * Skip 3:
558 *
559 * function_trace_probe_call()
560 * ftrace_ops_assist_func()
561 * ftrace_call()
562 */
563#define FTRACE_STACK_SKIP 3
564#else
dd42cd3e 565/*
2ee5b92a
SRV
566 * Skip 5:
567 *
568 * __trace_stack()
dd42cd3e
SRRH
569 * ftrace_stacktrace()
570 * function_trace_probe_call()
2ee5b92a 571 * ftrace_ops_assist_func()
dd42cd3e
SRRH
572 * ftrace_call()
573 */
2ee5b92a
SRV
574#define FTRACE_STACK_SKIP 5
575#endif
dd42cd3e 576
dcc19d28
SRV
577static __always_inline void trace_stack(struct trace_array *tr)
578{
36590c50 579 unsigned int trace_ctx;
dcc19d28 580
36590c50 581 trace_ctx = tracing_gen_ctx();
dcc19d28 582
36590c50 583 __trace_stack(tr, trace_ctx, FTRACE_STACK_SKIP);
dcc19d28
SRV
584}
585
dd42cd3e 586static void
bca6c8d0 587ftrace_stacktrace(unsigned long ip, unsigned long parent_ip,
b5f081b5 588 struct trace_array *tr, struct ftrace_probe_ops *ops,
6e444319 589 void *data)
dd42cd3e 590{
dcc19d28 591 trace_stack(tr);
dd42cd3e
SRRH
592}
593
594static void
bca6c8d0 595ftrace_stacktrace_count(unsigned long ip, unsigned long parent_ip,
b5f081b5 596 struct trace_array *tr, struct ftrace_probe_ops *ops,
6e444319 597 void *data)
dd42cd3e 598{
6e444319 599 struct ftrace_func_mapper *mapper = data;
fe014e24 600 long *count;
a9ce7c36
SRRH
601 long old_count;
602 long new_count;
603
fe014e24
SRV
604 if (!tracing_is_on())
605 return;
606
607 /* unlimited? */
608 if (!mapper) {
dcc19d28 609 trace_stack(tr);
fe014e24
SRV
610 return;
611 }
612
613 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
614
a9ce7c36
SRRH
615 /*
616 * Stack traces should only execute the number of times the
617 * user specified in the counter.
618 */
619 do {
a9ce7c36
SRRH
620 old_count = *count;
621
622 if (!old_count)
623 return;
624
a9ce7c36
SRRH
625 new_count = old_count - 1;
626 new_count = cmpxchg(count, old_count, new_count);
627 if (new_count == old_count)
dcc19d28 628 trace_stack(tr);
a9ce7c36 629
fe014e24
SRV
630 if (!tracing_is_on())
631 return;
632
a9ce7c36
SRRH
633 } while (new_count != old_count);
634}
635
6e444319
SRV
636static int update_count(struct ftrace_probe_ops *ops, unsigned long ip,
637 void *data)
a9ce7c36 638{
6e444319 639 struct ftrace_func_mapper *mapper = data;
fe014e24 640 long *count = NULL;
a9ce7c36 641
fe014e24
SRV
642 if (mapper)
643 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
a9ce7c36 644
fe014e24
SRV
645 if (count) {
646 if (*count <= 0)
647 return 0;
a9ce7c36 648 (*count)--;
fe014e24 649 }
a9ce7c36
SRRH
650
651 return 1;
dd42cd3e
SRRH
652}
653
ad71d889 654static void
bca6c8d0 655ftrace_dump_probe(unsigned long ip, unsigned long parent_ip,
b5f081b5 656 struct trace_array *tr, struct ftrace_probe_ops *ops,
6e444319 657 void *data)
ad71d889 658{
6e444319 659 if (update_count(ops, ip, data))
ad71d889
SRRH
660 ftrace_dump(DUMP_ALL);
661}
662
90e3c03c
SRRH
663/* Only dump the current CPU buffer. */
664static void
bca6c8d0 665ftrace_cpudump_probe(unsigned long ip, unsigned long parent_ip,
b5f081b5 666 struct trace_array *tr, struct ftrace_probe_ops *ops,
6e444319 667 void *data)
90e3c03c 668{
6e444319 669 if (update_count(ops, ip, data))
90e3c03c
SRRH
670 ftrace_dump(DUMP_ORIG);
671}
672
e110e3d1 673static int
dd42cd3e 674ftrace_probe_print(const char *name, struct seq_file *m,
6e444319
SRV
675 unsigned long ip, struct ftrace_probe_ops *ops,
676 void *data)
dd42cd3e 677{
6e444319 678 struct ftrace_func_mapper *mapper = data;
fe014e24 679 long *count = NULL;
dd42cd3e
SRRH
680
681 seq_printf(m, "%ps:%s", (void *)ip, name);
682
fe014e24
SRV
683 if (mapper)
684 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
685
686 if (count)
687 seq_printf(m, ":count=%ld\n", *count);
dd42cd3e 688 else
fe014e24 689 seq_puts(m, ":unlimited\n");
dd42cd3e
SRRH
690
691 return 0;
692}
693
694static int
695ftrace_traceon_print(struct seq_file *m, unsigned long ip,
b5f081b5
SRV
696 struct ftrace_probe_ops *ops,
697 void *data)
dd42cd3e 698{
6e444319 699 return ftrace_probe_print("traceon", m, ip, ops, data);
dd42cd3e
SRRH
700}
701
702static int
703ftrace_traceoff_print(struct seq_file *m, unsigned long ip,
704 struct ftrace_probe_ops *ops, void *data)
705{
6e444319 706 return ftrace_probe_print("traceoff", m, ip, ops, data);
dd42cd3e
SRRH
707}
708
709static int
710ftrace_stacktrace_print(struct seq_file *m, unsigned long ip,
711 struct ftrace_probe_ops *ops, void *data)
712{
6e444319 713 return ftrace_probe_print("stacktrace", m, ip, ops, data);
dd42cd3e 714}
e110e3d1 715
ad71d889
SRRH
716static int
717ftrace_dump_print(struct seq_file *m, unsigned long ip,
718 struct ftrace_probe_ops *ops, void *data)
719{
6e444319 720 return ftrace_probe_print("dump", m, ip, ops, data);
ad71d889
SRRH
721}
722
90e3c03c
SRRH
723static int
724ftrace_cpudump_print(struct seq_file *m, unsigned long ip,
725 struct ftrace_probe_ops *ops, void *data)
726{
6e444319 727 return ftrace_probe_print("cpudump", m, ip, ops, data);
fe014e24
SRV
728}
729
730
731static int
b5f081b5 732ftrace_count_init(struct ftrace_probe_ops *ops, struct trace_array *tr,
6e444319 733 unsigned long ip, void *init_data, void **data)
fe014e24 734{
6e444319
SRV
735 struct ftrace_func_mapper *mapper = *data;
736
737 if (!mapper) {
738 mapper = allocate_ftrace_func_mapper();
739 if (!mapper)
740 return -ENOMEM;
741 *data = mapper;
742 }
fe014e24 743
6e444319 744 return ftrace_func_mapper_add_ip(mapper, ip, init_data);
fe014e24
SRV
745}
746
747static void
b5f081b5 748ftrace_count_free(struct ftrace_probe_ops *ops, struct trace_array *tr,
6e444319 749 unsigned long ip, void *data)
fe014e24 750{
6e444319
SRV
751 struct ftrace_func_mapper *mapper = data;
752
753 if (!ip) {
754 free_ftrace_func_mapper(mapper, NULL);
755 return;
756 }
fe014e24
SRV
757
758 ftrace_func_mapper_remove_ip(mapper, ip);
90e3c03c
SRRH
759}
760
8380d248
SRRH
761static struct ftrace_probe_ops traceon_count_probe_ops = {
762 .func = ftrace_traceon_count,
dd42cd3e 763 .print = ftrace_traceon_print,
fe014e24
SRV
764 .init = ftrace_count_init,
765 .free = ftrace_count_free,
8380d248
SRRH
766};
767
768static struct ftrace_probe_ops traceoff_count_probe_ops = {
769 .func = ftrace_traceoff_count,
dd42cd3e 770 .print = ftrace_traceoff_print,
fe014e24
SRV
771 .init = ftrace_count_init,
772 .free = ftrace_count_free,
dd42cd3e
SRRH
773};
774
775static struct ftrace_probe_ops stacktrace_count_probe_ops = {
776 .func = ftrace_stacktrace_count,
777 .print = ftrace_stacktrace_print,
fe014e24
SRV
778 .init = ftrace_count_init,
779 .free = ftrace_count_free,
8380d248
SRRH
780};
781
ad71d889
SRRH
782static struct ftrace_probe_ops dump_probe_ops = {
783 .func = ftrace_dump_probe,
784 .print = ftrace_dump_print,
fe014e24
SRV
785 .init = ftrace_count_init,
786 .free = ftrace_count_free,
ad71d889
SRRH
787};
788
90e3c03c
SRRH
789static struct ftrace_probe_ops cpudump_probe_ops = {
790 .func = ftrace_cpudump_probe,
791 .print = ftrace_cpudump_print,
792};
793
b6887d79 794static struct ftrace_probe_ops traceon_probe_ops = {
23b4ff3a 795 .func = ftrace_traceon,
dd42cd3e 796 .print = ftrace_traceon_print,
23b4ff3a
SR
797};
798
b6887d79 799static struct ftrace_probe_ops traceoff_probe_ops = {
23b4ff3a 800 .func = ftrace_traceoff,
dd42cd3e 801 .print = ftrace_traceoff_print,
23b4ff3a
SR
802};
803
dd42cd3e
SRRH
804static struct ftrace_probe_ops stacktrace_probe_ops = {
805 .func = ftrace_stacktrace,
806 .print = ftrace_stacktrace_print,
807};
e110e3d1 808
23b4ff3a 809static int
04ec7bb6
SRV
810ftrace_trace_probe_callback(struct trace_array *tr,
811 struct ftrace_probe_ops *ops,
dd42cd3e
SRRH
812 struct ftrace_hash *hash, char *glob,
813 char *cmd, char *param, int enable)
23b4ff3a 814{
23b4ff3a
SR
815 void *count = (void *)-1;
816 char *number;
817 int ret;
818
819 /* hash funcs only work with set_ftrace_filter */
820 if (!enable)
821 return -EINVAL;
822
d3d532d7 823 if (glob[0] == '!')
7b60f3d8 824 return unregister_ftrace_function_probe_func(glob+1, tr, ops);
8b8fa62c 825
23b4ff3a
SR
826 if (!param)
827 goto out_reg;
828
829 number = strsep(&param, ":");
830
831 if (!strlen(number))
832 goto out_reg;
833
834 /*
835 * We use the callback data field (which is a pointer)
836 * as our counter.
837 */
bcd83ea6 838 ret = kstrtoul(number, 0, (unsigned long *)&count);
23b4ff3a
SR
839 if (ret)
840 return ret;
841
842 out_reg:
04ec7bb6 843 ret = register_ftrace_function_probe(glob, tr, ops, count);
23b4ff3a 844
04aef32d 845 return ret < 0 ? ret : 0;
23b4ff3a
SR
846}
847
dd42cd3e 848static int
04ec7bb6 849ftrace_trace_onoff_callback(struct trace_array *tr, struct ftrace_hash *hash,
dd42cd3e
SRRH
850 char *glob, char *cmd, char *param, int enable)
851{
852 struct ftrace_probe_ops *ops;
853
0f179765
SRV
854 if (!tr)
855 return -ENODEV;
856
dd42cd3e
SRRH
857 /* we register both traceon and traceoff to this callback */
858 if (strcmp(cmd, "traceon") == 0)
859 ops = param ? &traceon_count_probe_ops : &traceon_probe_ops;
860 else
861 ops = param ? &traceoff_count_probe_ops : &traceoff_probe_ops;
862
04ec7bb6 863 return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
dd42cd3e
SRRH
864 param, enable);
865}
866
867static int
04ec7bb6 868ftrace_stacktrace_callback(struct trace_array *tr, struct ftrace_hash *hash,
dd42cd3e
SRRH
869 char *glob, char *cmd, char *param, int enable)
870{
871 struct ftrace_probe_ops *ops;
872
0f179765
SRV
873 if (!tr)
874 return -ENODEV;
875
dd42cd3e
SRRH
876 ops = param ? &stacktrace_count_probe_ops : &stacktrace_probe_ops;
877
04ec7bb6 878 return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
dd42cd3e
SRRH
879 param, enable);
880}
881
ad71d889 882static int
04ec7bb6 883ftrace_dump_callback(struct trace_array *tr, struct ftrace_hash *hash,
ad71d889
SRRH
884 char *glob, char *cmd, char *param, int enable)
885{
886 struct ftrace_probe_ops *ops;
887
0f179765
SRV
888 if (!tr)
889 return -ENODEV;
890
ad71d889
SRRH
891 ops = &dump_probe_ops;
892
893 /* Only dump once. */
04ec7bb6 894 return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
ad71d889
SRRH
895 "1", enable);
896}
897
90e3c03c 898static int
04ec7bb6 899ftrace_cpudump_callback(struct trace_array *tr, struct ftrace_hash *hash,
90e3c03c
SRRH
900 char *glob, char *cmd, char *param, int enable)
901{
902 struct ftrace_probe_ops *ops;
903
0f179765
SRV
904 if (!tr)
905 return -ENODEV;
906
90e3c03c
SRRH
907 ops = &cpudump_probe_ops;
908
909 /* Only dump once. */
04ec7bb6 910 return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
90e3c03c
SRRH
911 "1", enable);
912}
913
23b4ff3a
SR
914static struct ftrace_func_command ftrace_traceon_cmd = {
915 .name = "traceon",
916 .func = ftrace_trace_onoff_callback,
917};
918
919static struct ftrace_func_command ftrace_traceoff_cmd = {
920 .name = "traceoff",
921 .func = ftrace_trace_onoff_callback,
922};
923
dd42cd3e
SRRH
924static struct ftrace_func_command ftrace_stacktrace_cmd = {
925 .name = "stacktrace",
926 .func = ftrace_stacktrace_callback,
927};
928
ad71d889
SRRH
929static struct ftrace_func_command ftrace_dump_cmd = {
930 .name = "dump",
931 .func = ftrace_dump_callback,
932};
933
90e3c03c
SRRH
934static struct ftrace_func_command ftrace_cpudump_cmd = {
935 .name = "cpudump",
936 .func = ftrace_cpudump_callback,
937};
938
23b4ff3a
SR
939static int __init init_func_cmd_traceon(void)
940{
941 int ret;
942
943 ret = register_ftrace_command(&ftrace_traceoff_cmd);
944 if (ret)
945 return ret;
946
947 ret = register_ftrace_command(&ftrace_traceon_cmd);
948 if (ret)
ad71d889 949 goto out_free_traceoff;
dd42cd3e
SRRH
950
951 ret = register_ftrace_command(&ftrace_stacktrace_cmd);
ad71d889
SRRH
952 if (ret)
953 goto out_free_traceon;
954
955 ret = register_ftrace_command(&ftrace_dump_cmd);
956 if (ret)
957 goto out_free_stacktrace;
958
90e3c03c
SRRH
959 ret = register_ftrace_command(&ftrace_cpudump_cmd);
960 if (ret)
961 goto out_free_dump;
962
ad71d889
SRRH
963 return 0;
964
90e3c03c
SRRH
965 out_free_dump:
966 unregister_ftrace_command(&ftrace_dump_cmd);
ad71d889
SRRH
967 out_free_stacktrace:
968 unregister_ftrace_command(&ftrace_stacktrace_cmd);
969 out_free_traceon:
970 unregister_ftrace_command(&ftrace_traceon_cmd);
971 out_free_traceoff:
972 unregister_ftrace_command(&ftrace_traceoff_cmd);
973
23b4ff3a
SR
974 return ret;
975}
976#else
977static inline int init_func_cmd_traceon(void)
978{
979 return 0;
980}
981#endif /* CONFIG_DYNAMIC_FTRACE */
982
dbeafd0d 983__init int init_function_trace(void)
1b29b018 984{
23b4ff3a 985 init_func_cmd_traceon();
1b29b018
SR
986 return register_tracer(&function_trace);
987}