Commit | Line | Data |
---|---|---|
1b29b018 SR |
1 | /* |
2 | * ring buffer based function tracer | |
3 | * | |
4 | * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com> | |
5 | * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com> | |
6 | * | |
7 | * Based on code from the latency_tracer, that is: | |
8 | * | |
9 | * Copyright (C) 2004-2006 Ingo Molnar | |
10 | * Copyright (C) 2004 William Lee Irwin III | |
11 | */ | |
1b29b018 SR |
12 | #include <linux/debugfs.h> |
13 | #include <linux/uaccess.h> | |
14 | #include <linux/ftrace.h> | |
2e0f5761 | 15 | #include <linux/fs.h> |
1b29b018 SR |
16 | |
17 | #include "trace.h" | |
18 | ||
53614991 SR |
19 | static struct trace_array *func_trace; |
20 | ||
e309b41d | 21 | static void start_function_trace(struct trace_array *tr) |
1b29b018 | 22 | { |
bb3c3c95 | 23 | func_trace = tr; |
26bc83f4 | 24 | tr->cpu = get_cpu(); |
213cc060 | 25 | tracing_reset_online_cpus(tr); |
26bc83f4 SR |
26 | put_cpu(); |
27 | ||
41bc8144 | 28 | tracing_start_cmdline_record(); |
1b29b018 SR |
29 | tracing_start_function_trace(); |
30 | } | |
31 | ||
e309b41d | 32 | static void stop_function_trace(struct trace_array *tr) |
1b29b018 SR |
33 | { |
34 | tracing_stop_function_trace(); | |
41bc8144 | 35 | tracing_stop_cmdline_record(); |
1b29b018 SR |
36 | } |
37 | ||
1c80025a | 38 | static int function_trace_init(struct trace_array *tr) |
1b29b018 | 39 | { |
c76f0694 | 40 | start_function_trace(tr); |
1c80025a | 41 | return 0; |
1b29b018 SR |
42 | } |
43 | ||
e309b41d | 44 | static void function_trace_reset(struct trace_array *tr) |
1b29b018 | 45 | { |
c76f0694 | 46 | stop_function_trace(tr); |
1b29b018 SR |
47 | } |
48 | ||
9036990d SR |
49 | static void function_trace_start(struct trace_array *tr) |
50 | { | |
213cc060 | 51 | tracing_reset_online_cpus(tr); |
9036990d SR |
52 | } |
53 | ||
bb3c3c95 SR |
54 | static void |
55 | function_trace_call_preempt_only(unsigned long ip, unsigned long parent_ip) | |
56 | { | |
57 | struct trace_array *tr = func_trace; | |
58 | struct trace_array_cpu *data; | |
59 | unsigned long flags; | |
60 | long disabled; | |
61 | int cpu, resched; | |
62 | int pc; | |
63 | ||
64 | if (unlikely(!ftrace_function_enabled)) | |
65 | return; | |
66 | ||
67 | pc = preempt_count(); | |
68 | resched = ftrace_preempt_disable(); | |
69 | local_save_flags(flags); | |
70 | cpu = raw_smp_processor_id(); | |
71 | data = tr->data[cpu]; | |
72 | disabled = atomic_inc_return(&data->disabled); | |
73 | ||
74 | if (likely(disabled == 1)) | |
75 | trace_function(tr, data, ip, parent_ip, flags, pc); | |
76 | ||
77 | atomic_dec(&data->disabled); | |
78 | ftrace_preempt_enable(resched); | |
79 | } | |
80 | ||
81 | static void | |
82 | function_trace_call(unsigned long ip, unsigned long parent_ip) | |
83 | { | |
84 | struct trace_array *tr = func_trace; | |
85 | struct trace_array_cpu *data; | |
86 | unsigned long flags; | |
87 | long disabled; | |
88 | int cpu; | |
89 | int pc; | |
90 | ||
91 | if (unlikely(!ftrace_function_enabled)) | |
92 | return; | |
93 | ||
94 | /* | |
95 | * Need to use raw, since this must be called before the | |
96 | * recursive protection is performed. | |
97 | */ | |
98 | local_irq_save(flags); | |
99 | cpu = raw_smp_processor_id(); | |
100 | data = tr->data[cpu]; | |
101 | disabled = atomic_inc_return(&data->disabled); | |
102 | ||
103 | if (likely(disabled == 1)) { | |
104 | pc = preempt_count(); | |
105 | trace_function(tr, data, ip, parent_ip, flags, pc); | |
106 | } | |
107 | ||
108 | atomic_dec(&data->disabled); | |
109 | local_irq_restore(flags); | |
110 | } | |
111 | ||
53614991 SR |
112 | static void |
113 | function_stack_trace_call(unsigned long ip, unsigned long parent_ip) | |
114 | { | |
115 | struct trace_array *tr = func_trace; | |
116 | struct trace_array_cpu *data; | |
117 | unsigned long flags; | |
118 | long disabled; | |
119 | int cpu; | |
120 | int pc; | |
121 | ||
122 | if (unlikely(!ftrace_function_enabled)) | |
123 | return; | |
124 | ||
125 | /* | |
126 | * Need to use raw, since this must be called before the | |
127 | * recursive protection is performed. | |
128 | */ | |
129 | local_irq_save(flags); | |
130 | cpu = raw_smp_processor_id(); | |
131 | data = tr->data[cpu]; | |
132 | disabled = atomic_inc_return(&data->disabled); | |
133 | ||
134 | if (likely(disabled == 1)) { | |
135 | pc = preempt_count(); | |
136 | /* | |
137 | * skip over 5 funcs: | |
138 | * __ftrace_trace_stack, | |
139 | * __trace_stack, | |
140 | * function_stack_trace_call | |
141 | * ftrace_list_func | |
142 | * ftrace_call | |
143 | */ | |
144 | __trace_stack(tr, data, flags, 5, pc); | |
145 | } | |
146 | ||
147 | atomic_dec(&data->disabled); | |
148 | local_irq_restore(flags); | |
149 | } | |
150 | ||
bb3c3c95 SR |
151 | |
152 | static struct ftrace_ops trace_ops __read_mostly = | |
153 | { | |
154 | .func = function_trace_call, | |
155 | }; | |
156 | ||
157 | void tracing_start_function_trace(void) | |
158 | { | |
159 | ftrace_function_enabled = 0; | |
160 | ||
161 | if (trace_flags & TRACE_ITER_PREEMPTONLY) | |
162 | trace_ops.func = function_trace_call_preempt_only; | |
163 | else | |
164 | trace_ops.func = function_trace_call; | |
165 | ||
166 | register_ftrace_function(&trace_ops); | |
167 | ftrace_function_enabled = 1; | |
168 | } | |
169 | ||
170 | void tracing_stop_function_trace(void) | |
171 | { | |
172 | ftrace_function_enabled = 0; | |
173 | unregister_ftrace_function(&trace_ops); | |
174 | } | |
53614991 SR |
175 | static struct ftrace_ops trace_stack_ops __read_mostly = |
176 | { | |
177 | .func = function_stack_trace_call, | |
178 | }; | |
179 | ||
180 | /* Our two options */ | |
181 | enum { | |
182 | TRACE_FUNC_OPT_STACK = 0x1, | |
183 | }; | |
184 | ||
185 | static struct tracer_opt func_opts[] = { | |
186 | #ifdef CONFIG_STACKTRACE | |
187 | { TRACER_OPT(func_stack_trace, TRACE_FUNC_OPT_STACK) }, | |
188 | #endif | |
189 | { } /* Always set a last empty entry */ | |
190 | }; | |
191 | ||
192 | static struct tracer_flags func_flags = { | |
193 | .val = 0, /* By default: all flags disabled */ | |
194 | .opts = func_opts | |
195 | }; | |
196 | ||
197 | static int func_set_flag(u32 old_flags, u32 bit, int set) | |
198 | { | |
199 | if (bit == TRACE_FUNC_OPT_STACK) { | |
200 | /* do nothing if already set */ | |
201 | if (!!set == !!(func_flags.val & TRACE_FUNC_OPT_STACK)) | |
202 | return 0; | |
203 | ||
204 | if (set) | |
205 | register_ftrace_function(&trace_stack_ops); | |
206 | else | |
207 | unregister_ftrace_function(&trace_stack_ops); | |
208 | ||
209 | return 0; | |
210 | } | |
211 | ||
212 | return -EINVAL; | |
213 | } | |
214 | ||
1b29b018 SR |
215 | static struct tracer function_trace __read_mostly = |
216 | { | |
3ce83aea | 217 | .name = "function", |
1b29b018 SR |
218 | .init = function_trace_init, |
219 | .reset = function_trace_reset, | |
9036990d | 220 | .start = function_trace_start, |
53614991 SR |
221 | .flags = &func_flags, |
222 | .set_flag = func_set_flag, | |
60a11774 SR |
223 | #ifdef CONFIG_FTRACE_SELFTEST |
224 | .selftest = trace_selftest_startup_function, | |
225 | #endif | |
1b29b018 SR |
226 | }; |
227 | ||
228 | static __init int init_function_trace(void) | |
229 | { | |
230 | return register_tracer(&function_trace); | |
231 | } | |
232 | ||
233 | device_initcall(init_function_trace); |