Commit | Line | Data |
---|---|---|
e5a81b62 SR |
1 | /* |
2 | * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com> | |
3 | * | |
4 | */ | |
5 | #include <linux/stacktrace.h> | |
6 | #include <linux/kallsyms.h> | |
7 | #include <linux/seq_file.h> | |
8 | #include <linux/spinlock.h> | |
9 | #include <linux/uaccess.h> | |
10 | #include <linux/debugfs.h> | |
11 | #include <linux/ftrace.h> | |
12 | #include <linux/module.h> | |
f38f1d2a | 13 | #include <linux/sysctl.h> |
e5a81b62 SR |
14 | #include <linux/init.h> |
15 | #include <linux/fs.h> | |
762e1207 SR |
16 | |
17 | #include <asm/setup.h> | |
18 | ||
e5a81b62 SR |
19 | #include "trace.h" |
20 | ||
21 | #define STACK_TRACE_ENTRIES 500 | |
22 | ||
1b6cced6 SR |
23 | static unsigned long stack_dump_trace[STACK_TRACE_ENTRIES+1] = |
24 | { [0 ... (STACK_TRACE_ENTRIES)] = ULONG_MAX }; | |
25 | static unsigned stack_dump_index[STACK_TRACE_ENTRIES]; | |
26 | ||
e5a81b62 SR |
27 | static struct stack_trace max_stack_trace = { |
28 | .max_entries = STACK_TRACE_ENTRIES, | |
29 | .entries = stack_dump_trace, | |
30 | }; | |
31 | ||
32 | static unsigned long max_stack_size; | |
445c8951 | 33 | static arch_spinlock_t max_stack_lock = |
edc35bd7 | 34 | (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; |
e5a81b62 | 35 | |
e5a81b62 | 36 | static DEFINE_PER_CPU(int, trace_active); |
f38f1d2a SR |
37 | static DEFINE_MUTEX(stack_sysctl_mutex); |
38 | ||
39 | int stack_tracer_enabled; | |
40 | static int last_stack_tracer_enabled; | |
e5a81b62 | 41 | |
87889501 SRRH |
42 | static inline void |
43 | check_stack(unsigned long *stack) | |
e5a81b62 | 44 | { |
1b6cced6 SR |
45 | unsigned long this_size, flags; |
46 | unsigned long *p, *top, *start; | |
47 | int i; | |
e5a81b62 | 48 | |
87889501 | 49 | this_size = ((unsigned long)stack) & (THREAD_SIZE-1); |
e5a81b62 SR |
50 | this_size = THREAD_SIZE - this_size; |
51 | ||
52 | if (this_size <= max_stack_size) | |
53 | return; | |
54 | ||
81520a1b | 55 | /* we do not handle interrupt stacks yet */ |
87889501 | 56 | if (!object_is_on_stack(stack)) |
81520a1b SR |
57 | return; |
58 | ||
a5e25883 | 59 | local_irq_save(flags); |
0199c4e6 | 60 | arch_spin_lock(&max_stack_lock); |
e5a81b62 SR |
61 | |
62 | /* a race could have already updated it */ | |
63 | if (this_size <= max_stack_size) | |
64 | goto out; | |
65 | ||
66 | max_stack_size = this_size; | |
67 | ||
68 | max_stack_trace.nr_entries = 0; | |
1b6cced6 | 69 | max_stack_trace.skip = 3; |
e5a81b62 SR |
70 | |
71 | save_stack_trace(&max_stack_trace); | |
72 | ||
1b6cced6 SR |
73 | /* |
74 | * Now find where in the stack these are. | |
75 | */ | |
76 | i = 0; | |
87889501 | 77 | start = stack; |
1b6cced6 SR |
78 | top = (unsigned long *) |
79 | (((unsigned long)start & ~(THREAD_SIZE-1)) + THREAD_SIZE); | |
80 | ||
81 | /* | |
82 | * Loop through all the entries. One of the entries may | |
83 | * for some reason be missed on the stack, so we may | |
84 | * have to account for them. If they are all there, this | |
85 | * loop will only happen once. This code only takes place | |
86 | * on a new max, so it is far from a fast path. | |
87 | */ | |
88 | while (i < max_stack_trace.nr_entries) { | |
0a37119d | 89 | int found = 0; |
1b6cced6 SR |
90 | |
91 | stack_dump_index[i] = this_size; | |
92 | p = start; | |
93 | ||
94 | for (; p < top && i < max_stack_trace.nr_entries; p++) { | |
95 | if (*p == stack_dump_trace[i]) { | |
96 | this_size = stack_dump_index[i++] = | |
97 | (top - p) * sizeof(unsigned long); | |
0a37119d | 98 | found = 1; |
1b6cced6 SR |
99 | /* Start the search from here */ |
100 | start = p + 1; | |
101 | } | |
102 | } | |
103 | ||
0a37119d SR |
104 | if (!found) |
105 | i++; | |
1b6cced6 SR |
106 | } |
107 | ||
e5a81b62 | 108 | out: |
0199c4e6 | 109 | arch_spin_unlock(&max_stack_lock); |
a5e25883 | 110 | local_irq_restore(flags); |
e5a81b62 SR |
111 | } |
112 | ||
113 | static void | |
a1e2e31d SR |
114 | stack_trace_call(unsigned long ip, unsigned long parent_ip, |
115 | struct ftrace_ops *op, struct pt_regs *pt_regs) | |
e5a81b62 | 116 | { |
87889501 | 117 | unsigned long stack; |
5168ae50 | 118 | int cpu; |
e5a81b62 | 119 | |
5168ae50 | 120 | preempt_disable_notrace(); |
e5a81b62 SR |
121 | |
122 | cpu = raw_smp_processor_id(); | |
123 | /* no atomic needed, we only modify this variable by this cpu */ | |
124 | if (per_cpu(trace_active, cpu)++ != 0) | |
125 | goto out; | |
126 | ||
87889501 | 127 | check_stack(&stack); |
e5a81b62 SR |
128 | |
129 | out: | |
130 | per_cpu(trace_active, cpu)--; | |
131 | /* prevent recursion in schedule */ | |
5168ae50 | 132 | preempt_enable_notrace(); |
e5a81b62 SR |
133 | } |
134 | ||
135 | static struct ftrace_ops trace_ops __read_mostly = | |
136 | { | |
137 | .func = stack_trace_call, | |
4740974a | 138 | .flags = FTRACE_OPS_FL_RECURSION_SAFE, |
e5a81b62 SR |
139 | }; |
140 | ||
141 | static ssize_t | |
142 | stack_max_size_read(struct file *filp, char __user *ubuf, | |
143 | size_t count, loff_t *ppos) | |
144 | { | |
145 | unsigned long *ptr = filp->private_data; | |
146 | char buf[64]; | |
147 | int r; | |
148 | ||
149 | r = snprintf(buf, sizeof(buf), "%ld\n", *ptr); | |
150 | if (r > sizeof(buf)) | |
151 | r = sizeof(buf); | |
152 | return simple_read_from_buffer(ubuf, count, ppos, buf, r); | |
153 | } | |
154 | ||
155 | static ssize_t | |
156 | stack_max_size_write(struct file *filp, const char __user *ubuf, | |
157 | size_t count, loff_t *ppos) | |
158 | { | |
159 | long *ptr = filp->private_data; | |
160 | unsigned long val, flags; | |
e5a81b62 | 161 | int ret; |
4f48f8b7 | 162 | int cpu; |
e5a81b62 | 163 | |
22fe9b54 PH |
164 | ret = kstrtoul_from_user(ubuf, count, 10, &val); |
165 | if (ret) | |
e5a81b62 SR |
166 | return ret; |
167 | ||
a5e25883 | 168 | local_irq_save(flags); |
4f48f8b7 LJ |
169 | |
170 | /* | |
171 | * In case we trace inside arch_spin_lock() or after (NMI), | |
172 | * we will cause circular lock, so we also need to increase | |
173 | * the percpu trace_active here. | |
174 | */ | |
175 | cpu = smp_processor_id(); | |
176 | per_cpu(trace_active, cpu)++; | |
177 | ||
0199c4e6 | 178 | arch_spin_lock(&max_stack_lock); |
e5a81b62 | 179 | *ptr = val; |
0199c4e6 | 180 | arch_spin_unlock(&max_stack_lock); |
4f48f8b7 LJ |
181 | |
182 | per_cpu(trace_active, cpu)--; | |
a5e25883 | 183 | local_irq_restore(flags); |
e5a81b62 SR |
184 | |
185 | return count; | |
186 | } | |
187 | ||
f38f1d2a | 188 | static const struct file_operations stack_max_size_fops = { |
e5a81b62 SR |
189 | .open = tracing_open_generic, |
190 | .read = stack_max_size_read, | |
191 | .write = stack_max_size_write, | |
6038f373 | 192 | .llseek = default_llseek, |
e5a81b62 SR |
193 | }; |
194 | ||
195 | static void * | |
2fc5f0cf | 196 | __next(struct seq_file *m, loff_t *pos) |
e5a81b62 | 197 | { |
2fc5f0cf | 198 | long n = *pos - 1; |
e5a81b62 | 199 | |
2fc5f0cf | 200 | if (n >= max_stack_trace.nr_entries || stack_dump_trace[n] == ULONG_MAX) |
e5a81b62 SR |
201 | return NULL; |
202 | ||
2fc5f0cf | 203 | m->private = (void *)n; |
1b6cced6 | 204 | return &m->private; |
e5a81b62 SR |
205 | } |
206 | ||
2fc5f0cf LZ |
207 | static void * |
208 | t_next(struct seq_file *m, void *v, loff_t *pos) | |
e5a81b62 | 209 | { |
2fc5f0cf LZ |
210 | (*pos)++; |
211 | return __next(m, pos); | |
212 | } | |
e5a81b62 | 213 | |
2fc5f0cf LZ |
214 | static void *t_start(struct seq_file *m, loff_t *pos) |
215 | { | |
4f48f8b7 LJ |
216 | int cpu; |
217 | ||
e5a81b62 | 218 | local_irq_disable(); |
4f48f8b7 LJ |
219 | |
220 | cpu = smp_processor_id(); | |
221 | per_cpu(trace_active, cpu)++; | |
222 | ||
0199c4e6 | 223 | arch_spin_lock(&max_stack_lock); |
e5a81b62 | 224 | |
522a110b LW |
225 | if (*pos == 0) |
226 | return SEQ_START_TOKEN; | |
227 | ||
2fc5f0cf | 228 | return __next(m, pos); |
e5a81b62 SR |
229 | } |
230 | ||
231 | static void t_stop(struct seq_file *m, void *p) | |
232 | { | |
4f48f8b7 LJ |
233 | int cpu; |
234 | ||
0199c4e6 | 235 | arch_spin_unlock(&max_stack_lock); |
4f48f8b7 LJ |
236 | |
237 | cpu = smp_processor_id(); | |
238 | per_cpu(trace_active, cpu)--; | |
239 | ||
e5a81b62 SR |
240 | local_irq_enable(); |
241 | } | |
242 | ||
1b6cced6 | 243 | static int trace_lookup_stack(struct seq_file *m, long i) |
e5a81b62 | 244 | { |
1b6cced6 | 245 | unsigned long addr = stack_dump_trace[i]; |
e5a81b62 | 246 | |
151772db | 247 | return seq_printf(m, "%pS\n", (void *)addr); |
e5a81b62 SR |
248 | } |
249 | ||
e447e1df SR |
250 | static void print_disabled(struct seq_file *m) |
251 | { | |
252 | seq_puts(m, "#\n" | |
253 | "# Stack tracer disabled\n" | |
254 | "#\n" | |
255 | "# To enable the stack tracer, either add 'stacktrace' to the\n" | |
256 | "# kernel command line\n" | |
257 | "# or 'echo 1 > /proc/sys/kernel/stack_tracer_enabled'\n" | |
258 | "#\n"); | |
259 | } | |
260 | ||
e5a81b62 SR |
261 | static int t_show(struct seq_file *m, void *v) |
262 | { | |
522a110b | 263 | long i; |
1b6cced6 SR |
264 | int size; |
265 | ||
522a110b | 266 | if (v == SEQ_START_TOKEN) { |
eb1871f3 | 267 | seq_printf(m, " Depth Size Location" |
1b6cced6 | 268 | " (%d entries)\n" |
eb1871f3 | 269 | " ----- ---- --------\n", |
083a63b4 | 270 | max_stack_trace.nr_entries - 1); |
e447e1df SR |
271 | |
272 | if (!stack_tracer_enabled && !max_stack_size) | |
273 | print_disabled(m); | |
274 | ||
1b6cced6 SR |
275 | return 0; |
276 | } | |
e5a81b62 | 277 | |
522a110b LW |
278 | i = *(long *)v; |
279 | ||
1b6cced6 SR |
280 | if (i >= max_stack_trace.nr_entries || |
281 | stack_dump_trace[i] == ULONG_MAX) | |
e5a81b62 SR |
282 | return 0; |
283 | ||
1b6cced6 SR |
284 | if (i+1 == max_stack_trace.nr_entries || |
285 | stack_dump_trace[i+1] == ULONG_MAX) | |
286 | size = stack_dump_index[i]; | |
287 | else | |
288 | size = stack_dump_index[i] - stack_dump_index[i+1]; | |
289 | ||
290 | seq_printf(m, "%3ld) %8d %5d ", i, stack_dump_index[i], size); | |
291 | ||
292 | trace_lookup_stack(m, i); | |
e5a81b62 SR |
293 | |
294 | return 0; | |
295 | } | |
296 | ||
f38f1d2a | 297 | static const struct seq_operations stack_trace_seq_ops = { |
e5a81b62 SR |
298 | .start = t_start, |
299 | .next = t_next, | |
300 | .stop = t_stop, | |
301 | .show = t_show, | |
302 | }; | |
303 | ||
304 | static int stack_trace_open(struct inode *inode, struct file *file) | |
305 | { | |
d8cc1ab7 | 306 | return seq_open(file, &stack_trace_seq_ops); |
e5a81b62 SR |
307 | } |
308 | ||
f38f1d2a | 309 | static const struct file_operations stack_trace_fops = { |
e5a81b62 SR |
310 | .open = stack_trace_open, |
311 | .read = seq_read, | |
312 | .llseek = seq_lseek, | |
d8cc1ab7 | 313 | .release = seq_release, |
e5a81b62 SR |
314 | }; |
315 | ||
d2d45c7a SR |
316 | static int |
317 | stack_trace_filter_open(struct inode *inode, struct file *file) | |
318 | { | |
319 | return ftrace_regex_open(&trace_ops, FTRACE_ITER_FILTER, | |
320 | inode, file); | |
321 | } | |
322 | ||
323 | static const struct file_operations stack_trace_filter_fops = { | |
324 | .open = stack_trace_filter_open, | |
325 | .read = seq_read, | |
326 | .write = ftrace_filter_write, | |
327 | .llseek = ftrace_regex_lseek, | |
328 | .release = ftrace_regex_release, | |
329 | }; | |
330 | ||
f38f1d2a SR |
331 | int |
332 | stack_trace_sysctl(struct ctl_table *table, int write, | |
8d65af78 | 333 | void __user *buffer, size_t *lenp, |
f38f1d2a SR |
334 | loff_t *ppos) |
335 | { | |
336 | int ret; | |
337 | ||
338 | mutex_lock(&stack_sysctl_mutex); | |
339 | ||
8d65af78 | 340 | ret = proc_dointvec(table, write, buffer, lenp, ppos); |
f38f1d2a SR |
341 | |
342 | if (ret || !write || | |
a32c7765 | 343 | (last_stack_tracer_enabled == !!stack_tracer_enabled)) |
f38f1d2a SR |
344 | goto out; |
345 | ||
a32c7765 | 346 | last_stack_tracer_enabled = !!stack_tracer_enabled; |
f38f1d2a SR |
347 | |
348 | if (stack_tracer_enabled) | |
349 | register_ftrace_function(&trace_ops); | |
350 | else | |
351 | unregister_ftrace_function(&trace_ops); | |
352 | ||
353 | out: | |
354 | mutex_unlock(&stack_sysctl_mutex); | |
355 | return ret; | |
356 | } | |
357 | ||
762e1207 SR |
358 | static char stack_trace_filter_buf[COMMAND_LINE_SIZE+1] __initdata; |
359 | ||
f38f1d2a SR |
360 | static __init int enable_stacktrace(char *str) |
361 | { | |
762e1207 SR |
362 | if (strncmp(str, "_filter=", 8) == 0) |
363 | strncpy(stack_trace_filter_buf, str+8, COMMAND_LINE_SIZE); | |
364 | ||
e05a43b7 SR |
365 | stack_tracer_enabled = 1; |
366 | last_stack_tracer_enabled = 1; | |
f38f1d2a SR |
367 | return 1; |
368 | } | |
369 | __setup("stacktrace", enable_stacktrace); | |
370 | ||
e5a81b62 SR |
371 | static __init int stack_trace_init(void) |
372 | { | |
373 | struct dentry *d_tracer; | |
e5a81b62 SR |
374 | |
375 | d_tracer = tracing_init_dentry(); | |
376 | ||
5452af66 FW |
377 | trace_create_file("stack_max_size", 0644, d_tracer, |
378 | &max_stack_size, &stack_max_size_fops); | |
e5a81b62 | 379 | |
5452af66 FW |
380 | trace_create_file("stack_trace", 0444, d_tracer, |
381 | NULL, &stack_trace_fops); | |
e5a81b62 | 382 | |
d2d45c7a SR |
383 | trace_create_file("stack_trace_filter", 0444, d_tracer, |
384 | NULL, &stack_trace_filter_fops); | |
385 | ||
762e1207 SR |
386 | if (stack_trace_filter_buf[0]) |
387 | ftrace_set_early_filter(&trace_ops, stack_trace_filter_buf, 1); | |
388 | ||
e05a43b7 | 389 | if (stack_tracer_enabled) |
f38f1d2a | 390 | register_ftrace_function(&trace_ops); |
e5a81b62 SR |
391 | |
392 | return 0; | |
393 | } | |
394 | ||
395 | device_initcall(stack_trace_init); |