Commit | Line | Data |
---|---|---|
e5a81b62 SR |
1 | /* |
2 | * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com> | |
3 | * | |
4 | */ | |
5 | #include <linux/stacktrace.h> | |
6 | #include <linux/kallsyms.h> | |
7 | #include <linux/seq_file.h> | |
8 | #include <linux/spinlock.h> | |
9 | #include <linux/uaccess.h> | |
10 | #include <linux/debugfs.h> | |
11 | #include <linux/ftrace.h> | |
12 | #include <linux/module.h> | |
f38f1d2a | 13 | #include <linux/sysctl.h> |
e5a81b62 SR |
14 | #include <linux/init.h> |
15 | #include <linux/fs.h> | |
762e1207 SR |
16 | |
17 | #include <asm/setup.h> | |
18 | ||
e5a81b62 SR |
19 | #include "trace.h" |
20 | ||
21 | #define STACK_TRACE_ENTRIES 500 | |
22 | ||
d4ecbfc4 | 23 | #ifdef CC_USING_FENTRY |
4df29712 | 24 | # define fentry 1 |
d4ecbfc4 | 25 | #else |
4df29712 | 26 | # define fentry 0 |
d4ecbfc4 SRRH |
27 | #endif |
28 | ||
1b6cced6 SR |
29 | static unsigned long stack_dump_trace[STACK_TRACE_ENTRIES+1] = |
30 | { [0 ... (STACK_TRACE_ENTRIES)] = ULONG_MAX }; | |
31 | static unsigned stack_dump_index[STACK_TRACE_ENTRIES]; | |
32 | ||
4df29712 SRRH |
33 | /* |
34 | * Reserve one entry for the passed in ip. This will allow | |
35 | * us to remove most or all of the stack size overhead | |
36 | * added by the stack tracer itself. | |
37 | */ | |
e5a81b62 | 38 | static struct stack_trace max_stack_trace = { |
4df29712 SRRH |
39 | .max_entries = STACK_TRACE_ENTRIES - 1, |
40 | .entries = &stack_dump_trace[1], | |
e5a81b62 SR |
41 | }; |
42 | ||
43 | static unsigned long max_stack_size; | |
445c8951 | 44 | static arch_spinlock_t max_stack_lock = |
edc35bd7 | 45 | (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; |
e5a81b62 | 46 | |
e5a81b62 | 47 | static DEFINE_PER_CPU(int, trace_active); |
f38f1d2a SR |
48 | static DEFINE_MUTEX(stack_sysctl_mutex); |
49 | ||
50 | int stack_tracer_enabled; | |
51 | static int last_stack_tracer_enabled; | |
e5a81b62 | 52 | |
87889501 | 53 | static inline void |
d4ecbfc4 | 54 | check_stack(unsigned long ip, unsigned long *stack) |
e5a81b62 | 55 | { |
1b6cced6 SR |
56 | unsigned long this_size, flags; |
57 | unsigned long *p, *top, *start; | |
4df29712 SRRH |
58 | static int tracer_frame; |
59 | int frame_size = ACCESS_ONCE(tracer_frame); | |
1b6cced6 | 60 | int i; |
e5a81b62 | 61 | |
87889501 | 62 | this_size = ((unsigned long)stack) & (THREAD_SIZE-1); |
e5a81b62 | 63 | this_size = THREAD_SIZE - this_size; |
4df29712 SRRH |
64 | /* Remove the frame of the tracer */ |
65 | this_size -= frame_size; | |
e5a81b62 SR |
66 | |
67 | if (this_size <= max_stack_size) | |
68 | return; | |
69 | ||
81520a1b | 70 | /* we do not handle interrupt stacks yet */ |
87889501 | 71 | if (!object_is_on_stack(stack)) |
81520a1b SR |
72 | return; |
73 | ||
a5e25883 | 74 | local_irq_save(flags); |
0199c4e6 | 75 | arch_spin_lock(&max_stack_lock); |
e5a81b62 | 76 | |
4df29712 SRRH |
77 | /* In case another CPU set the tracer_frame on us */ |
78 | if (unlikely(!frame_size)) | |
79 | this_size -= tracer_frame; | |
80 | ||
e5a81b62 SR |
81 | /* a race could have already updated it */ |
82 | if (this_size <= max_stack_size) | |
83 | goto out; | |
84 | ||
85 | max_stack_size = this_size; | |
86 | ||
87 | max_stack_trace.nr_entries = 0; | |
1b6cced6 | 88 | max_stack_trace.skip = 3; |
e5a81b62 SR |
89 | |
90 | save_stack_trace(&max_stack_trace); | |
91 | ||
d4ecbfc4 | 92 | /* |
4df29712 SRRH |
93 | * Add the passed in ip from the function tracer. |
94 | * Searching for this on the stack will skip over | |
95 | * most of the overhead from the stack tracer itself. | |
d4ecbfc4 | 96 | */ |
4df29712 SRRH |
97 | stack_dump_trace[0] = ip; |
98 | max_stack_trace.nr_entries++; | |
d4ecbfc4 | 99 | |
1b6cced6 SR |
100 | /* |
101 | * Now find where in the stack these are. | |
102 | */ | |
103 | i = 0; | |
87889501 | 104 | start = stack; |
1b6cced6 SR |
105 | top = (unsigned long *) |
106 | (((unsigned long)start & ~(THREAD_SIZE-1)) + THREAD_SIZE); | |
107 | ||
108 | /* | |
109 | * Loop through all the entries. One of the entries may | |
110 | * for some reason be missed on the stack, so we may | |
111 | * have to account for them. If they are all there, this | |
112 | * loop will only happen once. This code only takes place | |
113 | * on a new max, so it is far from a fast path. | |
114 | */ | |
115 | while (i < max_stack_trace.nr_entries) { | |
0a37119d | 116 | int found = 0; |
1b6cced6 SR |
117 | |
118 | stack_dump_index[i] = this_size; | |
119 | p = start; | |
120 | ||
121 | for (; p < top && i < max_stack_trace.nr_entries; p++) { | |
122 | if (*p == stack_dump_trace[i]) { | |
123 | this_size = stack_dump_index[i++] = | |
124 | (top - p) * sizeof(unsigned long); | |
0a37119d | 125 | found = 1; |
1b6cced6 SR |
126 | /* Start the search from here */ |
127 | start = p + 1; | |
4df29712 SRRH |
128 | /* |
129 | * We do not want to show the overhead | |
130 | * of the stack tracer stack in the | |
131 | * max stack. If we haven't figured | |
132 | * out what that is, then figure it out | |
133 | * now. | |
134 | */ | |
135 | if (unlikely(!tracer_frame) && i == 1) { | |
136 | tracer_frame = (p - stack) * | |
137 | sizeof(unsigned long); | |
138 | max_stack_size -= tracer_frame; | |
139 | } | |
1b6cced6 SR |
140 | } |
141 | } | |
142 | ||
0a37119d SR |
143 | if (!found) |
144 | i++; | |
1b6cced6 SR |
145 | } |
146 | ||
e5a81b62 | 147 | out: |
0199c4e6 | 148 | arch_spin_unlock(&max_stack_lock); |
a5e25883 | 149 | local_irq_restore(flags); |
e5a81b62 SR |
150 | } |
151 | ||
152 | static void | |
a1e2e31d SR |
153 | stack_trace_call(unsigned long ip, unsigned long parent_ip, |
154 | struct ftrace_ops *op, struct pt_regs *pt_regs) | |
e5a81b62 | 155 | { |
87889501 | 156 | unsigned long stack; |
5168ae50 | 157 | int cpu; |
e5a81b62 | 158 | |
5168ae50 | 159 | preempt_disable_notrace(); |
e5a81b62 SR |
160 | |
161 | cpu = raw_smp_processor_id(); | |
162 | /* no atomic needed, we only modify this variable by this cpu */ | |
163 | if (per_cpu(trace_active, cpu)++ != 0) | |
164 | goto out; | |
165 | ||
4df29712 SRRH |
166 | /* |
167 | * When fentry is used, the traced function does not get | |
168 | * its stack frame set up, and we lose the parent. | |
169 | * The ip is pretty useless because the function tracer | |
170 | * was called before that function set up its stack frame. | |
171 | * In this case, we use the parent ip. | |
172 | * | |
173 | * By adding the return address of either the parent ip | |
174 | * or the current ip we can disregard most of the stack usage | |
175 | * caused by the stack tracer itself. | |
176 | * | |
177 | * The function tracer always reports the address of where the | |
178 | * mcount call was, but the stack will hold the return address. | |
179 | */ | |
180 | if (fentry) | |
181 | ip = parent_ip; | |
182 | else | |
183 | ip += MCOUNT_INSN_SIZE; | |
184 | ||
185 | check_stack(ip, &stack); | |
e5a81b62 SR |
186 | |
187 | out: | |
188 | per_cpu(trace_active, cpu)--; | |
189 | /* prevent recursion in schedule */ | |
5168ae50 | 190 | preempt_enable_notrace(); |
e5a81b62 SR |
191 | } |
192 | ||
193 | static struct ftrace_ops trace_ops __read_mostly = | |
194 | { | |
195 | .func = stack_trace_call, | |
4740974a | 196 | .flags = FTRACE_OPS_FL_RECURSION_SAFE, |
e5a81b62 SR |
197 | }; |
198 | ||
199 | static ssize_t | |
200 | stack_max_size_read(struct file *filp, char __user *ubuf, | |
201 | size_t count, loff_t *ppos) | |
202 | { | |
203 | unsigned long *ptr = filp->private_data; | |
204 | char buf[64]; | |
205 | int r; | |
206 | ||
207 | r = snprintf(buf, sizeof(buf), "%ld\n", *ptr); | |
208 | if (r > sizeof(buf)) | |
209 | r = sizeof(buf); | |
210 | return simple_read_from_buffer(ubuf, count, ppos, buf, r); | |
211 | } | |
212 | ||
213 | static ssize_t | |
214 | stack_max_size_write(struct file *filp, const char __user *ubuf, | |
215 | size_t count, loff_t *ppos) | |
216 | { | |
217 | long *ptr = filp->private_data; | |
218 | unsigned long val, flags; | |
e5a81b62 | 219 | int ret; |
4f48f8b7 | 220 | int cpu; |
e5a81b62 | 221 | |
22fe9b54 PH |
222 | ret = kstrtoul_from_user(ubuf, count, 10, &val); |
223 | if (ret) | |
e5a81b62 SR |
224 | return ret; |
225 | ||
a5e25883 | 226 | local_irq_save(flags); |
4f48f8b7 LJ |
227 | |
228 | /* | |
229 | * In case we trace inside arch_spin_lock() or after (NMI), | |
230 | * we will cause circular lock, so we also need to increase | |
231 | * the percpu trace_active here. | |
232 | */ | |
233 | cpu = smp_processor_id(); | |
234 | per_cpu(trace_active, cpu)++; | |
235 | ||
0199c4e6 | 236 | arch_spin_lock(&max_stack_lock); |
e5a81b62 | 237 | *ptr = val; |
0199c4e6 | 238 | arch_spin_unlock(&max_stack_lock); |
4f48f8b7 LJ |
239 | |
240 | per_cpu(trace_active, cpu)--; | |
a5e25883 | 241 | local_irq_restore(flags); |
e5a81b62 SR |
242 | |
243 | return count; | |
244 | } | |
245 | ||
f38f1d2a | 246 | static const struct file_operations stack_max_size_fops = { |
e5a81b62 SR |
247 | .open = tracing_open_generic, |
248 | .read = stack_max_size_read, | |
249 | .write = stack_max_size_write, | |
6038f373 | 250 | .llseek = default_llseek, |
e5a81b62 SR |
251 | }; |
252 | ||
253 | static void * | |
2fc5f0cf | 254 | __next(struct seq_file *m, loff_t *pos) |
e5a81b62 | 255 | { |
2fc5f0cf | 256 | long n = *pos - 1; |
e5a81b62 | 257 | |
2fc5f0cf | 258 | if (n >= max_stack_trace.nr_entries || stack_dump_trace[n] == ULONG_MAX) |
e5a81b62 SR |
259 | return NULL; |
260 | ||
2fc5f0cf | 261 | m->private = (void *)n; |
1b6cced6 | 262 | return &m->private; |
e5a81b62 SR |
263 | } |
264 | ||
2fc5f0cf LZ |
265 | static void * |
266 | t_next(struct seq_file *m, void *v, loff_t *pos) | |
e5a81b62 | 267 | { |
2fc5f0cf LZ |
268 | (*pos)++; |
269 | return __next(m, pos); | |
270 | } | |
e5a81b62 | 271 | |
2fc5f0cf LZ |
272 | static void *t_start(struct seq_file *m, loff_t *pos) |
273 | { | |
4f48f8b7 LJ |
274 | int cpu; |
275 | ||
e5a81b62 | 276 | local_irq_disable(); |
4f48f8b7 LJ |
277 | |
278 | cpu = smp_processor_id(); | |
279 | per_cpu(trace_active, cpu)++; | |
280 | ||
0199c4e6 | 281 | arch_spin_lock(&max_stack_lock); |
e5a81b62 | 282 | |
522a110b LW |
283 | if (*pos == 0) |
284 | return SEQ_START_TOKEN; | |
285 | ||
2fc5f0cf | 286 | return __next(m, pos); |
e5a81b62 SR |
287 | } |
288 | ||
289 | static void t_stop(struct seq_file *m, void *p) | |
290 | { | |
4f48f8b7 LJ |
291 | int cpu; |
292 | ||
0199c4e6 | 293 | arch_spin_unlock(&max_stack_lock); |
4f48f8b7 LJ |
294 | |
295 | cpu = smp_processor_id(); | |
296 | per_cpu(trace_active, cpu)--; | |
297 | ||
e5a81b62 SR |
298 | local_irq_enable(); |
299 | } | |
300 | ||
1b6cced6 | 301 | static int trace_lookup_stack(struct seq_file *m, long i) |
e5a81b62 | 302 | { |
1b6cced6 | 303 | unsigned long addr = stack_dump_trace[i]; |
e5a81b62 | 304 | |
151772db | 305 | return seq_printf(m, "%pS\n", (void *)addr); |
e5a81b62 SR |
306 | } |
307 | ||
e447e1df SR |
308 | static void print_disabled(struct seq_file *m) |
309 | { | |
310 | seq_puts(m, "#\n" | |
311 | "# Stack tracer disabled\n" | |
312 | "#\n" | |
313 | "# To enable the stack tracer, either add 'stacktrace' to the\n" | |
314 | "# kernel command line\n" | |
315 | "# or 'echo 1 > /proc/sys/kernel/stack_tracer_enabled'\n" | |
316 | "#\n"); | |
317 | } | |
318 | ||
e5a81b62 SR |
319 | static int t_show(struct seq_file *m, void *v) |
320 | { | |
522a110b | 321 | long i; |
1b6cced6 SR |
322 | int size; |
323 | ||
522a110b | 324 | if (v == SEQ_START_TOKEN) { |
eb1871f3 | 325 | seq_printf(m, " Depth Size Location" |
1b6cced6 | 326 | " (%d entries)\n" |
eb1871f3 | 327 | " ----- ---- --------\n", |
083a63b4 | 328 | max_stack_trace.nr_entries - 1); |
e447e1df SR |
329 | |
330 | if (!stack_tracer_enabled && !max_stack_size) | |
331 | print_disabled(m); | |
332 | ||
1b6cced6 SR |
333 | return 0; |
334 | } | |
e5a81b62 | 335 | |
522a110b LW |
336 | i = *(long *)v; |
337 | ||
1b6cced6 SR |
338 | if (i >= max_stack_trace.nr_entries || |
339 | stack_dump_trace[i] == ULONG_MAX) | |
e5a81b62 SR |
340 | return 0; |
341 | ||
1b6cced6 SR |
342 | if (i+1 == max_stack_trace.nr_entries || |
343 | stack_dump_trace[i+1] == ULONG_MAX) | |
344 | size = stack_dump_index[i]; | |
345 | else | |
346 | size = stack_dump_index[i] - stack_dump_index[i+1]; | |
347 | ||
348 | seq_printf(m, "%3ld) %8d %5d ", i, stack_dump_index[i], size); | |
349 | ||
350 | trace_lookup_stack(m, i); | |
e5a81b62 SR |
351 | |
352 | return 0; | |
353 | } | |
354 | ||
f38f1d2a | 355 | static const struct seq_operations stack_trace_seq_ops = { |
e5a81b62 SR |
356 | .start = t_start, |
357 | .next = t_next, | |
358 | .stop = t_stop, | |
359 | .show = t_show, | |
360 | }; | |
361 | ||
362 | static int stack_trace_open(struct inode *inode, struct file *file) | |
363 | { | |
d8cc1ab7 | 364 | return seq_open(file, &stack_trace_seq_ops); |
e5a81b62 SR |
365 | } |
366 | ||
f38f1d2a | 367 | static const struct file_operations stack_trace_fops = { |
e5a81b62 SR |
368 | .open = stack_trace_open, |
369 | .read = seq_read, | |
370 | .llseek = seq_lseek, | |
d8cc1ab7 | 371 | .release = seq_release, |
e5a81b62 SR |
372 | }; |
373 | ||
d2d45c7a SR |
374 | static int |
375 | stack_trace_filter_open(struct inode *inode, struct file *file) | |
376 | { | |
377 | return ftrace_regex_open(&trace_ops, FTRACE_ITER_FILTER, | |
378 | inode, file); | |
379 | } | |
380 | ||
381 | static const struct file_operations stack_trace_filter_fops = { | |
382 | .open = stack_trace_filter_open, | |
383 | .read = seq_read, | |
384 | .write = ftrace_filter_write, | |
385 | .llseek = ftrace_regex_lseek, | |
386 | .release = ftrace_regex_release, | |
387 | }; | |
388 | ||
f38f1d2a SR |
389 | int |
390 | stack_trace_sysctl(struct ctl_table *table, int write, | |
8d65af78 | 391 | void __user *buffer, size_t *lenp, |
f38f1d2a SR |
392 | loff_t *ppos) |
393 | { | |
394 | int ret; | |
395 | ||
396 | mutex_lock(&stack_sysctl_mutex); | |
397 | ||
8d65af78 | 398 | ret = proc_dointvec(table, write, buffer, lenp, ppos); |
f38f1d2a SR |
399 | |
400 | if (ret || !write || | |
a32c7765 | 401 | (last_stack_tracer_enabled == !!stack_tracer_enabled)) |
f38f1d2a SR |
402 | goto out; |
403 | ||
a32c7765 | 404 | last_stack_tracer_enabled = !!stack_tracer_enabled; |
f38f1d2a SR |
405 | |
406 | if (stack_tracer_enabled) | |
407 | register_ftrace_function(&trace_ops); | |
408 | else | |
409 | unregister_ftrace_function(&trace_ops); | |
410 | ||
411 | out: | |
412 | mutex_unlock(&stack_sysctl_mutex); | |
413 | return ret; | |
414 | } | |
415 | ||
762e1207 SR |
416 | static char stack_trace_filter_buf[COMMAND_LINE_SIZE+1] __initdata; |
417 | ||
f38f1d2a SR |
418 | static __init int enable_stacktrace(char *str) |
419 | { | |
762e1207 SR |
420 | if (strncmp(str, "_filter=", 8) == 0) |
421 | strncpy(stack_trace_filter_buf, str+8, COMMAND_LINE_SIZE); | |
422 | ||
e05a43b7 SR |
423 | stack_tracer_enabled = 1; |
424 | last_stack_tracer_enabled = 1; | |
f38f1d2a SR |
425 | return 1; |
426 | } | |
427 | __setup("stacktrace", enable_stacktrace); | |
428 | ||
e5a81b62 SR |
429 | static __init int stack_trace_init(void) |
430 | { | |
431 | struct dentry *d_tracer; | |
e5a81b62 SR |
432 | |
433 | d_tracer = tracing_init_dentry(); | |
434 | ||
5452af66 FW |
435 | trace_create_file("stack_max_size", 0644, d_tracer, |
436 | &max_stack_size, &stack_max_size_fops); | |
e5a81b62 | 437 | |
5452af66 FW |
438 | trace_create_file("stack_trace", 0444, d_tracer, |
439 | NULL, &stack_trace_fops); | |
e5a81b62 | 440 | |
d2d45c7a SR |
441 | trace_create_file("stack_trace_filter", 0444, d_tracer, |
442 | NULL, &stack_trace_filter_fops); | |
443 | ||
762e1207 SR |
444 | if (stack_trace_filter_buf[0]) |
445 | ftrace_set_early_filter(&trace_ops, stack_trace_filter_buf, 1); | |
446 | ||
e05a43b7 | 447 | if (stack_tracer_enabled) |
f38f1d2a | 448 | register_ftrace_function(&trace_ops); |
e5a81b62 SR |
449 | |
450 | return 0; | |
451 | } | |
452 | ||
453 | device_initcall(stack_trace_init); |