Commit | Line | Data |
---|---|---|
b2441318 | 1 | // SPDX-License-Identifier: GPL-2.0 |
1b29b018 SR |
2 | /* |
3 | * ring buffer based function tracer | |
4 | * | |
5 | * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com> | |
6 | * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com> | |
7 | * | |
8 | * Based on code from the latency_tracer, that is: | |
9 | * | |
10 | * Copyright (C) 2004-2006 Ingo Molnar | |
6d49e352 | 11 | * Copyright (C) 2004 Nadia Yvette Chambers |
1b29b018 | 12 | */ |
23b4ff3a | 13 | #include <linux/ring_buffer.h> |
1b29b018 SR |
14 | #include <linux/debugfs.h> |
15 | #include <linux/uaccess.h> | |
16 | #include <linux/ftrace.h> | |
f20a5806 | 17 | #include <linux/slab.h> |
2e0f5761 | 18 | #include <linux/fs.h> |
1b29b018 SR |
19 | |
20 | #include "trace.h" | |
21 | ||
f20a5806 SRRH |
22 | static void tracing_start_function_trace(struct trace_array *tr); |
23 | static void tracing_stop_function_trace(struct trace_array *tr); | |
24 | static void | |
25 | function_trace_call(unsigned long ip, unsigned long parent_ip, | |
26 | struct ftrace_ops *op, struct pt_regs *pt_regs); | |
27 | static void | |
28 | function_stack_trace_call(unsigned long ip, unsigned long parent_ip, | |
29 | struct ftrace_ops *op, struct pt_regs *pt_regs); | |
f20a5806 SRRH |
30 | static struct tracer_flags func_flags; |
31 | ||
32 | /* Our option */ | |
33 | enum { | |
34 | TRACE_FUNC_OPT_STACK = 0x1, | |
35 | }; | |
36 | ||
37 | static int allocate_ftrace_ops(struct trace_array *tr) | |
38 | { | |
39 | struct ftrace_ops *ops; | |
a225cdd2 | 40 | |
f20a5806 SRRH |
41 | ops = kzalloc(sizeof(*ops), GFP_KERNEL); |
42 | if (!ops) | |
43 | return -ENOMEM; | |
53614991 | 44 | |
f20a5806 SRRH |
45 | /* Currently only the non stack verision is supported */ |
46 | ops->func = function_trace_call; | |
345ddcc8 | 47 | ops->flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_PID; |
f20a5806 SRRH |
48 | |
49 | tr->ops = ops; | |
50 | ops->private = tr; | |
51 | return 0; | |
52 | } | |
a225cdd2 | 53 | |
591dffda SRRH |
54 | |
55 | int ftrace_create_function_files(struct trace_array *tr, | |
56 | struct dentry *parent) | |
57 | { | |
58 | int ret; | |
59 | ||
5d6c97c5 SRRH |
60 | /* |
61 | * The top level array uses the "global_ops", and the files are | |
62 | * created on boot up. | |
63 | */ | |
64 | if (tr->flags & TRACE_ARRAY_FL_GLOBAL) | |
65 | return 0; | |
66 | ||
67 | ret = allocate_ftrace_ops(tr); | |
68 | if (ret) | |
69 | return ret; | |
591dffda SRRH |
70 | |
71 | ftrace_create_filter_files(tr->ops, parent); | |
72 | ||
73 | return 0; | |
74 | } | |
75 | ||
76 | void ftrace_destroy_function_files(struct trace_array *tr) | |
77 | { | |
78 | ftrace_destroy_filter_files(tr->ops); | |
79 | kfree(tr->ops); | |
80 | tr->ops = NULL; | |
81 | } | |
82 | ||
b6f11df2 | 83 | static int function_trace_init(struct trace_array *tr) |
1b29b018 | 84 | { |
4104d326 | 85 | ftrace_func_t func; |
f20a5806 | 86 | |
4104d326 SRRH |
87 | /* |
88 | * Instance trace_arrays get their ops allocated | |
89 | * at instance creation. Unless it failed | |
90 | * the allocation. | |
91 | */ | |
92 | if (!tr->ops) | |
591dffda | 93 | return -ENOMEM; |
4104d326 SRRH |
94 | |
95 | /* Currently only the global instance can do stack tracing */ | |
96 | if (tr->flags & TRACE_ARRAY_FL_GLOBAL && | |
97 | func_flags.val & TRACE_FUNC_OPT_STACK) | |
98 | func = function_stack_trace_call; | |
99 | else | |
100 | func = function_trace_call; | |
101 | ||
102 | ftrace_init_array_ops(tr, func); | |
f20a5806 | 103 | |
12883efb | 104 | tr->trace_buffer.cpu = get_cpu(); |
26bc83f4 SR |
105 | put_cpu(); |
106 | ||
41bc8144 | 107 | tracing_start_cmdline_record(); |
f20a5806 | 108 | tracing_start_function_trace(tr); |
1c80025a | 109 | return 0; |
1b29b018 SR |
110 | } |
111 | ||
e309b41d | 112 | static void function_trace_reset(struct trace_array *tr) |
1b29b018 | 113 | { |
f20a5806 | 114 | tracing_stop_function_trace(tr); |
b6f11df2 | 115 | tracing_stop_cmdline_record(); |
4104d326 | 116 | ftrace_reset_array_ops(tr); |
1b29b018 SR |
117 | } |
118 | ||
9036990d SR |
119 | static void function_trace_start(struct trace_array *tr) |
120 | { | |
12883efb | 121 | tracing_reset_online_cpus(&tr->trace_buffer); |
9036990d SR |
122 | } |
123 | ||
bb3c3c95 | 124 | static void |
2f5f6ad9 | 125 | function_trace_call(unsigned long ip, unsigned long parent_ip, |
a1e2e31d | 126 | struct ftrace_ops *op, struct pt_regs *pt_regs) |
bb3c3c95 | 127 | { |
f20a5806 | 128 | struct trace_array *tr = op->private; |
bb3c3c95 SR |
129 | struct trace_array_cpu *data; |
130 | unsigned long flags; | |
d41032a8 | 131 | int bit; |
bb3c3c95 SR |
132 | int cpu; |
133 | int pc; | |
134 | ||
f20a5806 | 135 | if (unlikely(!tr->function_enabled)) |
bb3c3c95 SR |
136 | return; |
137 | ||
897f68a4 SR |
138 | pc = preempt_count(); |
139 | preempt_disable_notrace(); | |
bb3c3c95 | 140 | |
897f68a4 SR |
141 | bit = trace_test_and_set_recursion(TRACE_FTRACE_START, TRACE_FTRACE_MAX); |
142 | if (bit < 0) | |
143 | goto out; | |
144 | ||
145 | cpu = smp_processor_id(); | |
12883efb | 146 | data = per_cpu_ptr(tr->trace_buffer.data, cpu); |
897f68a4 SR |
147 | if (!atomic_read(&data->disabled)) { |
148 | local_save_flags(flags); | |
7be42151 | 149 | trace_function(tr, ip, parent_ip, flags, pc); |
bb3c3c95 | 150 | } |
897f68a4 | 151 | trace_clear_recursion(bit); |
bb3c3c95 | 152 | |
897f68a4 SR |
153 | out: |
154 | preempt_enable_notrace(); | |
bb3c3c95 SR |
155 | } |
156 | ||
53614991 | 157 | static void |
2f5f6ad9 | 158 | function_stack_trace_call(unsigned long ip, unsigned long parent_ip, |
a1e2e31d | 159 | struct ftrace_ops *op, struct pt_regs *pt_regs) |
53614991 | 160 | { |
f20a5806 | 161 | struct trace_array *tr = op->private; |
53614991 SR |
162 | struct trace_array_cpu *data; |
163 | unsigned long flags; | |
164 | long disabled; | |
165 | int cpu; | |
166 | int pc; | |
167 | ||
f20a5806 | 168 | if (unlikely(!tr->function_enabled)) |
53614991 SR |
169 | return; |
170 | ||
171 | /* | |
172 | * Need to use raw, since this must be called before the | |
173 | * recursive protection is performed. | |
174 | */ | |
175 | local_irq_save(flags); | |
176 | cpu = raw_smp_processor_id(); | |
12883efb | 177 | data = per_cpu_ptr(tr->trace_buffer.data, cpu); |
53614991 SR |
178 | disabled = atomic_inc_return(&data->disabled); |
179 | ||
180 | if (likely(disabled == 1)) { | |
181 | pc = preempt_count(); | |
7be42151 | 182 | trace_function(tr, ip, parent_ip, flags, pc); |
53614991 SR |
183 | /* |
184 | * skip over 5 funcs: | |
185 | * __ftrace_trace_stack, | |
186 | * __trace_stack, | |
187 | * function_stack_trace_call | |
188 | * ftrace_list_func | |
189 | * ftrace_call | |
190 | */ | |
7be42151 | 191 | __trace_stack(tr, flags, 5, pc); |
53614991 SR |
192 | } |
193 | ||
194 | atomic_dec(&data->disabled); | |
195 | local_irq_restore(flags); | |
196 | } | |
197 | ||
53614991 SR |
198 | static struct tracer_opt func_opts[] = { |
199 | #ifdef CONFIG_STACKTRACE | |
200 | { TRACER_OPT(func_stack_trace, TRACE_FUNC_OPT_STACK) }, | |
201 | #endif | |
202 | { } /* Always set a last empty entry */ | |
203 | }; | |
204 | ||
205 | static struct tracer_flags func_flags = { | |
206 | .val = 0, /* By default: all flags disabled */ | |
207 | .opts = func_opts | |
208 | }; | |
209 | ||
f20a5806 | 210 | static void tracing_start_function_trace(struct trace_array *tr) |
3eb36aa0 | 211 | { |
f20a5806 SRRH |
212 | tr->function_enabled = 0; |
213 | register_ftrace_function(tr->ops); | |
214 | tr->function_enabled = 1; | |
3eb36aa0 SR |
215 | } |
216 | ||
f20a5806 | 217 | static void tracing_stop_function_trace(struct trace_array *tr) |
3eb36aa0 | 218 | { |
f20a5806 SRRH |
219 | tr->function_enabled = 0; |
220 | unregister_ftrace_function(tr->ops); | |
3eb36aa0 SR |
221 | } |
222 | ||
d39cdd20 CH |
223 | static struct tracer function_trace; |
224 | ||
8c1a49ae SRRH |
225 | static int |
226 | func_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set) | |
53614991 | 227 | { |
f555f123 AV |
228 | switch (bit) { |
229 | case TRACE_FUNC_OPT_STACK: | |
53614991 SR |
230 | /* do nothing if already set */ |
231 | if (!!set == !!(func_flags.val & TRACE_FUNC_OPT_STACK)) | |
f555f123 | 232 | break; |
53614991 | 233 | |
d39cdd20 CH |
234 | /* We can change this flag when not running. */ |
235 | if (tr->current_trace != &function_trace) | |
236 | break; | |
237 | ||
f20a5806 SRRH |
238 | unregister_ftrace_function(tr->ops); |
239 | ||
3eb36aa0 | 240 | if (set) { |
4104d326 | 241 | tr->ops->func = function_stack_trace_call; |
f20a5806 | 242 | register_ftrace_function(tr->ops); |
3eb36aa0 | 243 | } else { |
4104d326 | 244 | tr->ops->func = function_trace_call; |
f20a5806 | 245 | register_ftrace_function(tr->ops); |
3eb36aa0 | 246 | } |
53614991 | 247 | |
f555f123 AV |
248 | break; |
249 | default: | |
250 | return -EINVAL; | |
53614991 SR |
251 | } |
252 | ||
f555f123 | 253 | return 0; |
53614991 SR |
254 | } |
255 | ||
8f768993 | 256 | static struct tracer function_trace __tracer_data = |
1b29b018 | 257 | { |
3eb36aa0 SR |
258 | .name = "function", |
259 | .init = function_trace_init, | |
260 | .reset = function_trace_reset, | |
261 | .start = function_trace_start, | |
53614991 SR |
262 | .flags = &func_flags, |
263 | .set_flag = func_set_flag, | |
f20a5806 | 264 | .allow_instances = true, |
60a11774 | 265 | #ifdef CONFIG_FTRACE_SELFTEST |
3eb36aa0 | 266 | .selftest = trace_selftest_startup_function, |
60a11774 | 267 | #endif |
1b29b018 SR |
268 | }; |
269 | ||
23b4ff3a | 270 | #ifdef CONFIG_DYNAMIC_FTRACE |
fe014e24 | 271 | static void update_traceon_count(struct ftrace_probe_ops *ops, |
2290f2c5 SRV |
272 | unsigned long ip, |
273 | struct trace_array *tr, bool on, | |
6e444319 | 274 | void *data) |
23b4ff3a | 275 | { |
6e444319 | 276 | struct ftrace_func_mapper *mapper = data; |
fe014e24 SRV |
277 | long *count; |
278 | long old_count; | |
23b4ff3a | 279 | |
a9ce7c36 SRRH |
280 | /* |
281 | * Tracing gets disabled (or enabled) once per count. | |
0af26492 | 282 | * This function can be called at the same time on multiple CPUs. |
a9ce7c36 SRRH |
283 | * It is fine if both disable (or enable) tracing, as disabling |
284 | * (or enabling) the second time doesn't do anything as the | |
285 | * state of the tracer is already disabled (or enabled). | |
286 | * What needs to be synchronized in this case is that the count | |
287 | * only gets decremented once, even if the tracer is disabled | |
288 | * (or enabled) twice, as the second one is really a nop. | |
289 | * | |
290 | * The memory barriers guarantee that we only decrement the | |
291 | * counter once. First the count is read to a local variable | |
292 | * and a read barrier is used to make sure that it is loaded | |
293 | * before checking if the tracer is in the state we want. | |
294 | * If the tracer is not in the state we want, then the count | |
295 | * is guaranteed to be the old count. | |
296 | * | |
297 | * Next the tracer is set to the state we want (disabled or enabled) | |
298 | * then a write memory barrier is used to make sure that | |
299 | * the new state is visible before changing the counter by | |
300 | * one minus the old counter. This guarantees that another CPU | |
301 | * executing this code will see the new state before seeing | |
0af26492 | 302 | * the new counter value, and would not do anything if the new |
a9ce7c36 SRRH |
303 | * counter is seen. |
304 | * | |
305 | * Note, there is no synchronization between this and a user | |
306 | * setting the tracing_on file. But we currently don't care | |
307 | * about that. | |
308 | */ | |
fe014e24 SRV |
309 | count = (long *)ftrace_func_mapper_find_ip(mapper, ip); |
310 | old_count = *count; | |
311 | ||
312 | if (old_count <= 0) | |
a9ce7c36 | 313 | return; |
23b4ff3a | 314 | |
a9ce7c36 SRRH |
315 | /* Make sure we see count before checking tracing state */ |
316 | smp_rmb(); | |
23b4ff3a | 317 | |
2290f2c5 | 318 | if (on == !!tracer_tracing_is_on(tr)) |
a9ce7c36 SRRH |
319 | return; |
320 | ||
321 | if (on) | |
2290f2c5 | 322 | tracer_tracing_on(tr); |
a9ce7c36 | 323 | else |
2290f2c5 | 324 | tracer_tracing_off(tr); |
a9ce7c36 | 325 | |
a9ce7c36 SRRH |
326 | /* Make sure tracing state is visible before updating count */ |
327 | smp_wmb(); | |
328 | ||
329 | *count = old_count - 1; | |
23b4ff3a SR |
330 | } |
331 | ||
332 | static void | |
bca6c8d0 | 333 | ftrace_traceon_count(unsigned long ip, unsigned long parent_ip, |
b5f081b5 | 334 | struct trace_array *tr, struct ftrace_probe_ops *ops, |
6e444319 | 335 | void *data) |
23b4ff3a | 336 | { |
2290f2c5 | 337 | update_traceon_count(ops, ip, tr, 1, data); |
1c317143 | 338 | } |
23b4ff3a | 339 | |
1c317143 | 340 | static void |
bca6c8d0 | 341 | ftrace_traceoff_count(unsigned long ip, unsigned long parent_ip, |
b5f081b5 | 342 | struct trace_array *tr, struct ftrace_probe_ops *ops, |
6e444319 | 343 | void *data) |
1c317143 | 344 | { |
2290f2c5 | 345 | update_traceon_count(ops, ip, tr, 0, data); |
23b4ff3a SR |
346 | } |
347 | ||
8380d248 | 348 | static void |
bca6c8d0 | 349 | ftrace_traceon(unsigned long ip, unsigned long parent_ip, |
b5f081b5 | 350 | struct trace_array *tr, struct ftrace_probe_ops *ops, |
6e444319 | 351 | void *data) |
8380d248 | 352 | { |
2290f2c5 | 353 | if (tracer_tracing_is_on(tr)) |
8380d248 SRRH |
354 | return; |
355 | ||
2290f2c5 | 356 | tracer_tracing_on(tr); |
8380d248 SRRH |
357 | } |
358 | ||
359 | static void | |
bca6c8d0 | 360 | ftrace_traceoff(unsigned long ip, unsigned long parent_ip, |
b5f081b5 | 361 | struct trace_array *tr, struct ftrace_probe_ops *ops, |
6e444319 | 362 | void *data) |
8380d248 | 363 | { |
2290f2c5 | 364 | if (!tracer_tracing_is_on(tr)) |
8380d248 SRRH |
365 | return; |
366 | ||
2290f2c5 | 367 | tracer_tracing_off(tr); |
8380d248 SRRH |
368 | } |
369 | ||
dd42cd3e SRRH |
370 | /* |
371 | * Skip 4: | |
372 | * ftrace_stacktrace() | |
373 | * function_trace_probe_call() | |
374 | * ftrace_ops_list_func() | |
375 | * ftrace_call() | |
376 | */ | |
377 | #define STACK_SKIP 4 | |
378 | ||
dcc19d28 SRV |
379 | static __always_inline void trace_stack(struct trace_array *tr) |
380 | { | |
381 | unsigned long flags; | |
382 | int pc; | |
383 | ||
384 | local_save_flags(flags); | |
385 | pc = preempt_count(); | |
386 | ||
387 | __trace_stack(tr, flags, STACK_SKIP, pc); | |
388 | } | |
389 | ||
dd42cd3e | 390 | static void |
bca6c8d0 | 391 | ftrace_stacktrace(unsigned long ip, unsigned long parent_ip, |
b5f081b5 | 392 | struct trace_array *tr, struct ftrace_probe_ops *ops, |
6e444319 | 393 | void *data) |
dd42cd3e | 394 | { |
dcc19d28 | 395 | trace_stack(tr); |
dd42cd3e SRRH |
396 | } |
397 | ||
398 | static void | |
bca6c8d0 | 399 | ftrace_stacktrace_count(unsigned long ip, unsigned long parent_ip, |
b5f081b5 | 400 | struct trace_array *tr, struct ftrace_probe_ops *ops, |
6e444319 | 401 | void *data) |
dd42cd3e | 402 | { |
6e444319 | 403 | struct ftrace_func_mapper *mapper = data; |
fe014e24 | 404 | long *count; |
a9ce7c36 SRRH |
405 | long old_count; |
406 | long new_count; | |
407 | ||
fe014e24 SRV |
408 | if (!tracing_is_on()) |
409 | return; | |
410 | ||
411 | /* unlimited? */ | |
412 | if (!mapper) { | |
dcc19d28 | 413 | trace_stack(tr); |
fe014e24 SRV |
414 | return; |
415 | } | |
416 | ||
417 | count = (long *)ftrace_func_mapper_find_ip(mapper, ip); | |
418 | ||
a9ce7c36 SRRH |
419 | /* |
420 | * Stack traces should only execute the number of times the | |
421 | * user specified in the counter. | |
422 | */ | |
423 | do { | |
a9ce7c36 SRRH |
424 | old_count = *count; |
425 | ||
426 | if (!old_count) | |
427 | return; | |
428 | ||
a9ce7c36 SRRH |
429 | new_count = old_count - 1; |
430 | new_count = cmpxchg(count, old_count, new_count); | |
431 | if (new_count == old_count) | |
dcc19d28 | 432 | trace_stack(tr); |
a9ce7c36 | 433 | |
fe014e24 SRV |
434 | if (!tracing_is_on()) |
435 | return; | |
436 | ||
a9ce7c36 SRRH |
437 | } while (new_count != old_count); |
438 | } | |
439 | ||
6e444319 SRV |
440 | static int update_count(struct ftrace_probe_ops *ops, unsigned long ip, |
441 | void *data) | |
a9ce7c36 | 442 | { |
6e444319 | 443 | struct ftrace_func_mapper *mapper = data; |
fe014e24 | 444 | long *count = NULL; |
a9ce7c36 | 445 | |
fe014e24 SRV |
446 | if (mapper) |
447 | count = (long *)ftrace_func_mapper_find_ip(mapper, ip); | |
a9ce7c36 | 448 | |
fe014e24 SRV |
449 | if (count) { |
450 | if (*count <= 0) | |
451 | return 0; | |
a9ce7c36 | 452 | (*count)--; |
fe014e24 | 453 | } |
a9ce7c36 SRRH |
454 | |
455 | return 1; | |
dd42cd3e SRRH |
456 | } |
457 | ||
ad71d889 | 458 | static void |
bca6c8d0 | 459 | ftrace_dump_probe(unsigned long ip, unsigned long parent_ip, |
b5f081b5 | 460 | struct trace_array *tr, struct ftrace_probe_ops *ops, |
6e444319 | 461 | void *data) |
ad71d889 | 462 | { |
6e444319 | 463 | if (update_count(ops, ip, data)) |
ad71d889 SRRH |
464 | ftrace_dump(DUMP_ALL); |
465 | } | |
466 | ||
90e3c03c SRRH |
467 | /* Only dump the current CPU buffer. */ |
468 | static void | |
bca6c8d0 | 469 | ftrace_cpudump_probe(unsigned long ip, unsigned long parent_ip, |
b5f081b5 | 470 | struct trace_array *tr, struct ftrace_probe_ops *ops, |
6e444319 | 471 | void *data) |
90e3c03c | 472 | { |
6e444319 | 473 | if (update_count(ops, ip, data)) |
90e3c03c SRRH |
474 | ftrace_dump(DUMP_ORIG); |
475 | } | |
476 | ||
e110e3d1 | 477 | static int |
dd42cd3e | 478 | ftrace_probe_print(const char *name, struct seq_file *m, |
6e444319 SRV |
479 | unsigned long ip, struct ftrace_probe_ops *ops, |
480 | void *data) | |
dd42cd3e | 481 | { |
6e444319 | 482 | struct ftrace_func_mapper *mapper = data; |
fe014e24 | 483 | long *count = NULL; |
dd42cd3e SRRH |
484 | |
485 | seq_printf(m, "%ps:%s", (void *)ip, name); | |
486 | ||
fe014e24 SRV |
487 | if (mapper) |
488 | count = (long *)ftrace_func_mapper_find_ip(mapper, ip); | |
489 | ||
490 | if (count) | |
491 | seq_printf(m, ":count=%ld\n", *count); | |
dd42cd3e | 492 | else |
fe014e24 | 493 | seq_puts(m, ":unlimited\n"); |
dd42cd3e SRRH |
494 | |
495 | return 0; | |
496 | } | |
497 | ||
498 | static int | |
499 | ftrace_traceon_print(struct seq_file *m, unsigned long ip, | |
b5f081b5 SRV |
500 | struct ftrace_probe_ops *ops, |
501 | void *data) | |
dd42cd3e | 502 | { |
6e444319 | 503 | return ftrace_probe_print("traceon", m, ip, ops, data); |
dd42cd3e SRRH |
504 | } |
505 | ||
506 | static int | |
507 | ftrace_traceoff_print(struct seq_file *m, unsigned long ip, | |
508 | struct ftrace_probe_ops *ops, void *data) | |
509 | { | |
6e444319 | 510 | return ftrace_probe_print("traceoff", m, ip, ops, data); |
dd42cd3e SRRH |
511 | } |
512 | ||
513 | static int | |
514 | ftrace_stacktrace_print(struct seq_file *m, unsigned long ip, | |
515 | struct ftrace_probe_ops *ops, void *data) | |
516 | { | |
6e444319 | 517 | return ftrace_probe_print("stacktrace", m, ip, ops, data); |
dd42cd3e | 518 | } |
e110e3d1 | 519 | |
ad71d889 SRRH |
520 | static int |
521 | ftrace_dump_print(struct seq_file *m, unsigned long ip, | |
522 | struct ftrace_probe_ops *ops, void *data) | |
523 | { | |
6e444319 | 524 | return ftrace_probe_print("dump", m, ip, ops, data); |
ad71d889 SRRH |
525 | } |
526 | ||
90e3c03c SRRH |
527 | static int |
528 | ftrace_cpudump_print(struct seq_file *m, unsigned long ip, | |
529 | struct ftrace_probe_ops *ops, void *data) | |
530 | { | |
6e444319 | 531 | return ftrace_probe_print("cpudump", m, ip, ops, data); |
fe014e24 SRV |
532 | } |
533 | ||
534 | ||
535 | static int | |
b5f081b5 | 536 | ftrace_count_init(struct ftrace_probe_ops *ops, struct trace_array *tr, |
6e444319 | 537 | unsigned long ip, void *init_data, void **data) |
fe014e24 | 538 | { |
6e444319 SRV |
539 | struct ftrace_func_mapper *mapper = *data; |
540 | ||
541 | if (!mapper) { | |
542 | mapper = allocate_ftrace_func_mapper(); | |
543 | if (!mapper) | |
544 | return -ENOMEM; | |
545 | *data = mapper; | |
546 | } | |
fe014e24 | 547 | |
6e444319 | 548 | return ftrace_func_mapper_add_ip(mapper, ip, init_data); |
fe014e24 SRV |
549 | } |
550 | ||
551 | static void | |
b5f081b5 | 552 | ftrace_count_free(struct ftrace_probe_ops *ops, struct trace_array *tr, |
6e444319 | 553 | unsigned long ip, void *data) |
fe014e24 | 554 | { |
6e444319 SRV |
555 | struct ftrace_func_mapper *mapper = data; |
556 | ||
557 | if (!ip) { | |
558 | free_ftrace_func_mapper(mapper, NULL); | |
559 | return; | |
560 | } | |
fe014e24 SRV |
561 | |
562 | ftrace_func_mapper_remove_ip(mapper, ip); | |
90e3c03c SRRH |
563 | } |
564 | ||
8380d248 SRRH |
565 | static struct ftrace_probe_ops traceon_count_probe_ops = { |
566 | .func = ftrace_traceon_count, | |
dd42cd3e | 567 | .print = ftrace_traceon_print, |
fe014e24 SRV |
568 | .init = ftrace_count_init, |
569 | .free = ftrace_count_free, | |
8380d248 SRRH |
570 | }; |
571 | ||
572 | static struct ftrace_probe_ops traceoff_count_probe_ops = { | |
573 | .func = ftrace_traceoff_count, | |
dd42cd3e | 574 | .print = ftrace_traceoff_print, |
fe014e24 SRV |
575 | .init = ftrace_count_init, |
576 | .free = ftrace_count_free, | |
dd42cd3e SRRH |
577 | }; |
578 | ||
579 | static struct ftrace_probe_ops stacktrace_count_probe_ops = { | |
580 | .func = ftrace_stacktrace_count, | |
581 | .print = ftrace_stacktrace_print, | |
fe014e24 SRV |
582 | .init = ftrace_count_init, |
583 | .free = ftrace_count_free, | |
8380d248 SRRH |
584 | }; |
585 | ||
ad71d889 SRRH |
586 | static struct ftrace_probe_ops dump_probe_ops = { |
587 | .func = ftrace_dump_probe, | |
588 | .print = ftrace_dump_print, | |
fe014e24 SRV |
589 | .init = ftrace_count_init, |
590 | .free = ftrace_count_free, | |
ad71d889 SRRH |
591 | }; |
592 | ||
90e3c03c SRRH |
593 | static struct ftrace_probe_ops cpudump_probe_ops = { |
594 | .func = ftrace_cpudump_probe, | |
595 | .print = ftrace_cpudump_print, | |
596 | }; | |
597 | ||
b6887d79 | 598 | static struct ftrace_probe_ops traceon_probe_ops = { |
23b4ff3a | 599 | .func = ftrace_traceon, |
dd42cd3e | 600 | .print = ftrace_traceon_print, |
23b4ff3a SR |
601 | }; |
602 | ||
b6887d79 | 603 | static struct ftrace_probe_ops traceoff_probe_ops = { |
23b4ff3a | 604 | .func = ftrace_traceoff, |
dd42cd3e | 605 | .print = ftrace_traceoff_print, |
23b4ff3a SR |
606 | }; |
607 | ||
dd42cd3e SRRH |
608 | static struct ftrace_probe_ops stacktrace_probe_ops = { |
609 | .func = ftrace_stacktrace, | |
610 | .print = ftrace_stacktrace_print, | |
611 | }; | |
e110e3d1 | 612 | |
23b4ff3a | 613 | static int |
04ec7bb6 SRV |
614 | ftrace_trace_probe_callback(struct trace_array *tr, |
615 | struct ftrace_probe_ops *ops, | |
dd42cd3e SRRH |
616 | struct ftrace_hash *hash, char *glob, |
617 | char *cmd, char *param, int enable) | |
23b4ff3a | 618 | { |
23b4ff3a SR |
619 | void *count = (void *)-1; |
620 | char *number; | |
621 | int ret; | |
622 | ||
623 | /* hash funcs only work with set_ftrace_filter */ | |
624 | if (!enable) | |
625 | return -EINVAL; | |
626 | ||
d3d532d7 | 627 | if (glob[0] == '!') |
7b60f3d8 | 628 | return unregister_ftrace_function_probe_func(glob+1, tr, ops); |
8b8fa62c | 629 | |
23b4ff3a SR |
630 | if (!param) |
631 | goto out_reg; | |
632 | ||
633 | number = strsep(¶m, ":"); | |
634 | ||
635 | if (!strlen(number)) | |
636 | goto out_reg; | |
637 | ||
638 | /* | |
639 | * We use the callback data field (which is a pointer) | |
640 | * as our counter. | |
641 | */ | |
bcd83ea6 | 642 | ret = kstrtoul(number, 0, (unsigned long *)&count); |
23b4ff3a SR |
643 | if (ret) |
644 | return ret; | |
645 | ||
646 | out_reg: | |
04ec7bb6 | 647 | ret = register_ftrace_function_probe(glob, tr, ops, count); |
23b4ff3a | 648 | |
04aef32d | 649 | return ret < 0 ? ret : 0; |
23b4ff3a SR |
650 | } |
651 | ||
dd42cd3e | 652 | static int |
04ec7bb6 | 653 | ftrace_trace_onoff_callback(struct trace_array *tr, struct ftrace_hash *hash, |
dd42cd3e SRRH |
654 | char *glob, char *cmd, char *param, int enable) |
655 | { | |
656 | struct ftrace_probe_ops *ops; | |
657 | ||
0f179765 SRV |
658 | if (!tr) |
659 | return -ENODEV; | |
660 | ||
dd42cd3e SRRH |
661 | /* we register both traceon and traceoff to this callback */ |
662 | if (strcmp(cmd, "traceon") == 0) | |
663 | ops = param ? &traceon_count_probe_ops : &traceon_probe_ops; | |
664 | else | |
665 | ops = param ? &traceoff_count_probe_ops : &traceoff_probe_ops; | |
666 | ||
04ec7bb6 | 667 | return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd, |
dd42cd3e SRRH |
668 | param, enable); |
669 | } | |
670 | ||
671 | static int | |
04ec7bb6 | 672 | ftrace_stacktrace_callback(struct trace_array *tr, struct ftrace_hash *hash, |
dd42cd3e SRRH |
673 | char *glob, char *cmd, char *param, int enable) |
674 | { | |
675 | struct ftrace_probe_ops *ops; | |
676 | ||
0f179765 SRV |
677 | if (!tr) |
678 | return -ENODEV; | |
679 | ||
dd42cd3e SRRH |
680 | ops = param ? &stacktrace_count_probe_ops : &stacktrace_probe_ops; |
681 | ||
04ec7bb6 | 682 | return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd, |
dd42cd3e SRRH |
683 | param, enable); |
684 | } | |
685 | ||
ad71d889 | 686 | static int |
04ec7bb6 | 687 | ftrace_dump_callback(struct trace_array *tr, struct ftrace_hash *hash, |
ad71d889 SRRH |
688 | char *glob, char *cmd, char *param, int enable) |
689 | { | |
690 | struct ftrace_probe_ops *ops; | |
691 | ||
0f179765 SRV |
692 | if (!tr) |
693 | return -ENODEV; | |
694 | ||
ad71d889 SRRH |
695 | ops = &dump_probe_ops; |
696 | ||
697 | /* Only dump once. */ | |
04ec7bb6 | 698 | return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd, |
ad71d889 SRRH |
699 | "1", enable); |
700 | } | |
701 | ||
90e3c03c | 702 | static int |
04ec7bb6 | 703 | ftrace_cpudump_callback(struct trace_array *tr, struct ftrace_hash *hash, |
90e3c03c SRRH |
704 | char *glob, char *cmd, char *param, int enable) |
705 | { | |
706 | struct ftrace_probe_ops *ops; | |
707 | ||
0f179765 SRV |
708 | if (!tr) |
709 | return -ENODEV; | |
710 | ||
90e3c03c SRRH |
711 | ops = &cpudump_probe_ops; |
712 | ||
713 | /* Only dump once. */ | |
04ec7bb6 | 714 | return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd, |
90e3c03c SRRH |
715 | "1", enable); |
716 | } | |
717 | ||
23b4ff3a SR |
718 | static struct ftrace_func_command ftrace_traceon_cmd = { |
719 | .name = "traceon", | |
720 | .func = ftrace_trace_onoff_callback, | |
721 | }; | |
722 | ||
723 | static struct ftrace_func_command ftrace_traceoff_cmd = { | |
724 | .name = "traceoff", | |
725 | .func = ftrace_trace_onoff_callback, | |
726 | }; | |
727 | ||
dd42cd3e SRRH |
728 | static struct ftrace_func_command ftrace_stacktrace_cmd = { |
729 | .name = "stacktrace", | |
730 | .func = ftrace_stacktrace_callback, | |
731 | }; | |
732 | ||
ad71d889 SRRH |
733 | static struct ftrace_func_command ftrace_dump_cmd = { |
734 | .name = "dump", | |
735 | .func = ftrace_dump_callback, | |
736 | }; | |
737 | ||
90e3c03c SRRH |
738 | static struct ftrace_func_command ftrace_cpudump_cmd = { |
739 | .name = "cpudump", | |
740 | .func = ftrace_cpudump_callback, | |
741 | }; | |
742 | ||
23b4ff3a SR |
743 | static int __init init_func_cmd_traceon(void) |
744 | { | |
745 | int ret; | |
746 | ||
747 | ret = register_ftrace_command(&ftrace_traceoff_cmd); | |
748 | if (ret) | |
749 | return ret; | |
750 | ||
751 | ret = register_ftrace_command(&ftrace_traceon_cmd); | |
752 | if (ret) | |
ad71d889 | 753 | goto out_free_traceoff; |
dd42cd3e SRRH |
754 | |
755 | ret = register_ftrace_command(&ftrace_stacktrace_cmd); | |
ad71d889 SRRH |
756 | if (ret) |
757 | goto out_free_traceon; | |
758 | ||
759 | ret = register_ftrace_command(&ftrace_dump_cmd); | |
760 | if (ret) | |
761 | goto out_free_stacktrace; | |
762 | ||
90e3c03c SRRH |
763 | ret = register_ftrace_command(&ftrace_cpudump_cmd); |
764 | if (ret) | |
765 | goto out_free_dump; | |
766 | ||
ad71d889 SRRH |
767 | return 0; |
768 | ||
90e3c03c SRRH |
769 | out_free_dump: |
770 | unregister_ftrace_command(&ftrace_dump_cmd); | |
ad71d889 SRRH |
771 | out_free_stacktrace: |
772 | unregister_ftrace_command(&ftrace_stacktrace_cmd); | |
773 | out_free_traceon: | |
774 | unregister_ftrace_command(&ftrace_traceon_cmd); | |
775 | out_free_traceoff: | |
776 | unregister_ftrace_command(&ftrace_traceoff_cmd); | |
777 | ||
23b4ff3a SR |
778 | return ret; |
779 | } | |
780 | #else | |
781 | static inline int init_func_cmd_traceon(void) | |
782 | { | |
783 | return 0; | |
784 | } | |
785 | #endif /* CONFIG_DYNAMIC_FTRACE */ | |
786 | ||
dbeafd0d | 787 | __init int init_function_trace(void) |
1b29b018 | 788 | { |
23b4ff3a | 789 | init_func_cmd_traceon(); |
1b29b018 SR |
790 | return register_tracer(&function_trace); |
791 | } |