tracing: Have stack_tracer use a separate list of functions
[linux-2.6-block.git] / kernel / trace / trace_stack.c
CommitLineData
e5a81b62
SR
1/*
2 * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
3 *
4 */
5#include <linux/stacktrace.h>
6#include <linux/kallsyms.h>
7#include <linux/seq_file.h>
8#include <linux/spinlock.h>
9#include <linux/uaccess.h>
10#include <linux/debugfs.h>
11#include <linux/ftrace.h>
12#include <linux/module.h>
f38f1d2a 13#include <linux/sysctl.h>
e5a81b62
SR
14#include <linux/init.h>
15#include <linux/fs.h>
16#include "trace.h"
17
18#define STACK_TRACE_ENTRIES 500
19
1b6cced6
SR
20static unsigned long stack_dump_trace[STACK_TRACE_ENTRIES+1] =
21 { [0 ... (STACK_TRACE_ENTRIES)] = ULONG_MAX };
22static unsigned stack_dump_index[STACK_TRACE_ENTRIES];
23
e5a81b62
SR
24static struct stack_trace max_stack_trace = {
25 .max_entries = STACK_TRACE_ENTRIES,
26 .entries = stack_dump_trace,
27};
28
29static unsigned long max_stack_size;
445c8951 30static arch_spinlock_t max_stack_lock =
edc35bd7 31 (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
e5a81b62
SR
32
33static int stack_trace_disabled __read_mostly;
34static DEFINE_PER_CPU(int, trace_active);
f38f1d2a
SR
35static DEFINE_MUTEX(stack_sysctl_mutex);
36
37int stack_tracer_enabled;
38static int last_stack_tracer_enabled;
e5a81b62
SR
39
40static inline void check_stack(void)
41{
1b6cced6
SR
42 unsigned long this_size, flags;
43 unsigned long *p, *top, *start;
44 int i;
e5a81b62
SR
45
46 this_size = ((unsigned long)&this_size) & (THREAD_SIZE-1);
47 this_size = THREAD_SIZE - this_size;
48
49 if (this_size <= max_stack_size)
50 return;
51
81520a1b
SR
52 /* we do not handle interrupt stacks yet */
53 if (!object_is_on_stack(&this_size))
54 return;
55
a5e25883 56 local_irq_save(flags);
0199c4e6 57 arch_spin_lock(&max_stack_lock);
e5a81b62
SR
58
59 /* a race could have already updated it */
60 if (this_size <= max_stack_size)
61 goto out;
62
63 max_stack_size = this_size;
64
65 max_stack_trace.nr_entries = 0;
1b6cced6 66 max_stack_trace.skip = 3;
e5a81b62
SR
67
68 save_stack_trace(&max_stack_trace);
69
1b6cced6
SR
70 /*
71 * Now find where in the stack these are.
72 */
73 i = 0;
74 start = &this_size;
75 top = (unsigned long *)
76 (((unsigned long)start & ~(THREAD_SIZE-1)) + THREAD_SIZE);
77
78 /*
79 * Loop through all the entries. One of the entries may
80 * for some reason be missed on the stack, so we may
81 * have to account for them. If they are all there, this
82 * loop will only happen once. This code only takes place
83 * on a new max, so it is far from a fast path.
84 */
85 while (i < max_stack_trace.nr_entries) {
0a37119d 86 int found = 0;
1b6cced6
SR
87
88 stack_dump_index[i] = this_size;
89 p = start;
90
91 for (; p < top && i < max_stack_trace.nr_entries; p++) {
92 if (*p == stack_dump_trace[i]) {
93 this_size = stack_dump_index[i++] =
94 (top - p) * sizeof(unsigned long);
0a37119d 95 found = 1;
1b6cced6
SR
96 /* Start the search from here */
97 start = p + 1;
98 }
99 }
100
0a37119d
SR
101 if (!found)
102 i++;
1b6cced6
SR
103 }
104
e5a81b62 105 out:
0199c4e6 106 arch_spin_unlock(&max_stack_lock);
a5e25883 107 local_irq_restore(flags);
e5a81b62
SR
108}
109
110static void
111stack_trace_call(unsigned long ip, unsigned long parent_ip)
112{
5168ae50 113 int cpu;
e5a81b62
SR
114
115 if (unlikely(!ftrace_enabled || stack_trace_disabled))
116 return;
117
5168ae50 118 preempt_disable_notrace();
e5a81b62
SR
119
120 cpu = raw_smp_processor_id();
121 /* no atomic needed, we only modify this variable by this cpu */
122 if (per_cpu(trace_active, cpu)++ != 0)
123 goto out;
124
125 check_stack();
126
127 out:
128 per_cpu(trace_active, cpu)--;
129 /* prevent recursion in schedule */
5168ae50 130 preempt_enable_notrace();
e5a81b62
SR
131}
132
133static struct ftrace_ops trace_ops __read_mostly =
134{
135 .func = stack_trace_call,
136};
137
138static ssize_t
139stack_max_size_read(struct file *filp, char __user *ubuf,
140 size_t count, loff_t *ppos)
141{
142 unsigned long *ptr = filp->private_data;
143 char buf[64];
144 int r;
145
146 r = snprintf(buf, sizeof(buf), "%ld\n", *ptr);
147 if (r > sizeof(buf))
148 r = sizeof(buf);
149 return simple_read_from_buffer(ubuf, count, ppos, buf, r);
150}
151
152static ssize_t
153stack_max_size_write(struct file *filp, const char __user *ubuf,
154 size_t count, loff_t *ppos)
155{
156 long *ptr = filp->private_data;
157 unsigned long val, flags;
e5a81b62 158 int ret;
4f48f8b7 159 int cpu;
e5a81b62 160
22fe9b54
PH
161 ret = kstrtoul_from_user(ubuf, count, 10, &val);
162 if (ret)
e5a81b62
SR
163 return ret;
164
a5e25883 165 local_irq_save(flags);
4f48f8b7
LJ
166
167 /*
168 * In case we trace inside arch_spin_lock() or after (NMI),
169 * we will cause circular lock, so we also need to increase
170 * the percpu trace_active here.
171 */
172 cpu = smp_processor_id();
173 per_cpu(trace_active, cpu)++;
174
0199c4e6 175 arch_spin_lock(&max_stack_lock);
e5a81b62 176 *ptr = val;
0199c4e6 177 arch_spin_unlock(&max_stack_lock);
4f48f8b7
LJ
178
179 per_cpu(trace_active, cpu)--;
a5e25883 180 local_irq_restore(flags);
e5a81b62
SR
181
182 return count;
183}
184
f38f1d2a 185static const struct file_operations stack_max_size_fops = {
e5a81b62
SR
186 .open = tracing_open_generic,
187 .read = stack_max_size_read,
188 .write = stack_max_size_write,
6038f373 189 .llseek = default_llseek,
e5a81b62
SR
190};
191
192static void *
2fc5f0cf 193__next(struct seq_file *m, loff_t *pos)
e5a81b62 194{
2fc5f0cf 195 long n = *pos - 1;
e5a81b62 196
2fc5f0cf 197 if (n >= max_stack_trace.nr_entries || stack_dump_trace[n] == ULONG_MAX)
e5a81b62
SR
198 return NULL;
199
2fc5f0cf 200 m->private = (void *)n;
1b6cced6 201 return &m->private;
e5a81b62
SR
202}
203
2fc5f0cf
LZ
204static void *
205t_next(struct seq_file *m, void *v, loff_t *pos)
e5a81b62 206{
2fc5f0cf
LZ
207 (*pos)++;
208 return __next(m, pos);
209}
e5a81b62 210
2fc5f0cf
LZ
211static void *t_start(struct seq_file *m, loff_t *pos)
212{
4f48f8b7
LJ
213 int cpu;
214
e5a81b62 215 local_irq_disable();
4f48f8b7
LJ
216
217 cpu = smp_processor_id();
218 per_cpu(trace_active, cpu)++;
219
0199c4e6 220 arch_spin_lock(&max_stack_lock);
e5a81b62 221
522a110b
LW
222 if (*pos == 0)
223 return SEQ_START_TOKEN;
224
2fc5f0cf 225 return __next(m, pos);
e5a81b62
SR
226}
227
228static void t_stop(struct seq_file *m, void *p)
229{
4f48f8b7
LJ
230 int cpu;
231
0199c4e6 232 arch_spin_unlock(&max_stack_lock);
4f48f8b7
LJ
233
234 cpu = smp_processor_id();
235 per_cpu(trace_active, cpu)--;
236
e5a81b62
SR
237 local_irq_enable();
238}
239
1b6cced6 240static int trace_lookup_stack(struct seq_file *m, long i)
e5a81b62 241{
1b6cced6 242 unsigned long addr = stack_dump_trace[i];
e5a81b62 243
151772db 244 return seq_printf(m, "%pS\n", (void *)addr);
e5a81b62
SR
245}
246
e447e1df
SR
247static void print_disabled(struct seq_file *m)
248{
249 seq_puts(m, "#\n"
250 "# Stack tracer disabled\n"
251 "#\n"
252 "# To enable the stack tracer, either add 'stacktrace' to the\n"
253 "# kernel command line\n"
254 "# or 'echo 1 > /proc/sys/kernel/stack_tracer_enabled'\n"
255 "#\n");
256}
257
e5a81b62
SR
258static int t_show(struct seq_file *m, void *v)
259{
522a110b 260 long i;
1b6cced6
SR
261 int size;
262
522a110b 263 if (v == SEQ_START_TOKEN) {
eb1871f3 264 seq_printf(m, " Depth Size Location"
1b6cced6 265 " (%d entries)\n"
eb1871f3 266 " ----- ---- --------\n",
083a63b4 267 max_stack_trace.nr_entries - 1);
e447e1df
SR
268
269 if (!stack_tracer_enabled && !max_stack_size)
270 print_disabled(m);
271
1b6cced6
SR
272 return 0;
273 }
e5a81b62 274
522a110b
LW
275 i = *(long *)v;
276
1b6cced6
SR
277 if (i >= max_stack_trace.nr_entries ||
278 stack_dump_trace[i] == ULONG_MAX)
e5a81b62
SR
279 return 0;
280
1b6cced6
SR
281 if (i+1 == max_stack_trace.nr_entries ||
282 stack_dump_trace[i+1] == ULONG_MAX)
283 size = stack_dump_index[i];
284 else
285 size = stack_dump_index[i] - stack_dump_index[i+1];
286
287 seq_printf(m, "%3ld) %8d %5d ", i, stack_dump_index[i], size);
288
289 trace_lookup_stack(m, i);
e5a81b62
SR
290
291 return 0;
292}
293
f38f1d2a 294static const struct seq_operations stack_trace_seq_ops = {
e5a81b62
SR
295 .start = t_start,
296 .next = t_next,
297 .stop = t_stop,
298 .show = t_show,
299};
300
301static int stack_trace_open(struct inode *inode, struct file *file)
302{
d8cc1ab7 303 return seq_open(file, &stack_trace_seq_ops);
e5a81b62
SR
304}
305
f38f1d2a 306static const struct file_operations stack_trace_fops = {
e5a81b62
SR
307 .open = stack_trace_open,
308 .read = seq_read,
309 .llseek = seq_lseek,
d8cc1ab7 310 .release = seq_release,
e5a81b62
SR
311};
312
d2d45c7a
SR
313static int
314stack_trace_filter_open(struct inode *inode, struct file *file)
315{
316 return ftrace_regex_open(&trace_ops, FTRACE_ITER_FILTER,
317 inode, file);
318}
319
320static const struct file_operations stack_trace_filter_fops = {
321 .open = stack_trace_filter_open,
322 .read = seq_read,
323 .write = ftrace_filter_write,
324 .llseek = ftrace_regex_lseek,
325 .release = ftrace_regex_release,
326};
327
f38f1d2a
SR
328int
329stack_trace_sysctl(struct ctl_table *table, int write,
8d65af78 330 void __user *buffer, size_t *lenp,
f38f1d2a
SR
331 loff_t *ppos)
332{
333 int ret;
334
335 mutex_lock(&stack_sysctl_mutex);
336
8d65af78 337 ret = proc_dointvec(table, write, buffer, lenp, ppos);
f38f1d2a
SR
338
339 if (ret || !write ||
a32c7765 340 (last_stack_tracer_enabled == !!stack_tracer_enabled))
f38f1d2a
SR
341 goto out;
342
a32c7765 343 last_stack_tracer_enabled = !!stack_tracer_enabled;
f38f1d2a
SR
344
345 if (stack_tracer_enabled)
346 register_ftrace_function(&trace_ops);
347 else
348 unregister_ftrace_function(&trace_ops);
349
350 out:
351 mutex_unlock(&stack_sysctl_mutex);
352 return ret;
353}
354
f38f1d2a
SR
355static __init int enable_stacktrace(char *str)
356{
e05a43b7
SR
357 stack_tracer_enabled = 1;
358 last_stack_tracer_enabled = 1;
f38f1d2a
SR
359 return 1;
360}
361__setup("stacktrace", enable_stacktrace);
362
e5a81b62
SR
363static __init int stack_trace_init(void)
364{
365 struct dentry *d_tracer;
e5a81b62
SR
366
367 d_tracer = tracing_init_dentry();
368
5452af66
FW
369 trace_create_file("stack_max_size", 0644, d_tracer,
370 &max_stack_size, &stack_max_size_fops);
e5a81b62 371
5452af66
FW
372 trace_create_file("stack_trace", 0444, d_tracer,
373 NULL, &stack_trace_fops);
e5a81b62 374
d2d45c7a
SR
375 trace_create_file("stack_trace_filter", 0444, d_tracer,
376 NULL, &stack_trace_filter_fops);
377
e05a43b7 378 if (stack_tracer_enabled)
f38f1d2a 379 register_ftrace_function(&trace_ops);
e5a81b62
SR
380
381 return 0;
382}
383
384device_initcall(stack_trace_init);