blk-mq: really fix plug list flushing for nomerge queues
[linux-2.6-block.git] / kernel / trace / trace_stack.c
CommitLineData
e5a81b62
SR
1/*
2 * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
3 *
4 */
5#include <linux/stacktrace.h>
6#include <linux/kallsyms.h>
7#include <linux/seq_file.h>
8#include <linux/spinlock.h>
9#include <linux/uaccess.h>
e5a81b62
SR
10#include <linux/ftrace.h>
11#include <linux/module.h>
f38f1d2a 12#include <linux/sysctl.h>
e5a81b62 13#include <linux/init.h>
762e1207
SR
14
15#include <asm/setup.h>
16
e5a81b62
SR
17#include "trace.h"
18
1b6cced6
SR
19static unsigned long stack_dump_trace[STACK_TRACE_ENTRIES+1] =
20 { [0 ... (STACK_TRACE_ENTRIES)] = ULONG_MAX };
bb99d8cc 21unsigned stack_trace_index[STACK_TRACE_ENTRIES];
1b6cced6 22
4df29712
SRRH
23/*
24 * Reserve one entry for the passed in ip. This will allow
25 * us to remove most or all of the stack size overhead
26 * added by the stack tracer itself.
27 */
bb99d8cc 28struct stack_trace stack_trace_max = {
4df29712 29 .max_entries = STACK_TRACE_ENTRIES - 1,
72ac426a 30 .entries = &stack_dump_trace[0],
e5a81b62
SR
31};
32
bb99d8cc 33unsigned long stack_trace_max_size;
d332736d 34arch_spinlock_t stack_trace_max_lock =
edc35bd7 35 (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
e5a81b62 36
e5a81b62 37static DEFINE_PER_CPU(int, trace_active);
f38f1d2a
SR
38static DEFINE_MUTEX(stack_sysctl_mutex);
39
40int stack_tracer_enabled;
41static int last_stack_tracer_enabled;
e5a81b62 42
bb99d8cc 43void stack_trace_print(void)
e3172181
MK
44{
45 long i;
46 int size;
47
48 pr_emerg(" Depth Size Location (%d entries)\n"
49 " ----- ---- --------\n",
bb99d8cc 50 stack_trace_max.nr_entries);
e3172181 51
bb99d8cc 52 for (i = 0; i < stack_trace_max.nr_entries; i++) {
e3172181
MK
53 if (stack_dump_trace[i] == ULONG_MAX)
54 break;
bb99d8cc 55 if (i+1 == stack_trace_max.nr_entries ||
e3172181 56 stack_dump_trace[i+1] == ULONG_MAX)
bb99d8cc 57 size = stack_trace_index[i];
e3172181 58 else
bb99d8cc 59 size = stack_trace_index[i] - stack_trace_index[i+1];
e3172181 60
bb99d8cc 61 pr_emerg("%3ld) %8d %5d %pS\n", i, stack_trace_index[i],
e3172181
MK
62 size, (void *)stack_dump_trace[i]);
63 }
64}
65
bb99d8cc
AT
66/*
67 * When arch-specific code overides this function, the following
d332736d 68 * data should be filled up, assuming stack_trace_max_lock is held to
bb99d8cc
AT
69 * prevent concurrent updates.
70 * stack_trace_index[]
71 * stack_trace_max
72 * stack_trace_max_size
73 */
74void __weak
d4ecbfc4 75check_stack(unsigned long ip, unsigned long *stack)
e5a81b62 76{
e3172181 77 unsigned long this_size, flags; unsigned long *p, *top, *start;
4df29712
SRRH
78 static int tracer_frame;
79 int frame_size = ACCESS_ONCE(tracer_frame);
72ac426a 80 int i, x;
e5a81b62 81
87889501 82 this_size = ((unsigned long)stack) & (THREAD_SIZE-1);
e5a81b62 83 this_size = THREAD_SIZE - this_size;
4df29712
SRRH
84 /* Remove the frame of the tracer */
85 this_size -= frame_size;
e5a81b62 86
bb99d8cc 87 if (this_size <= stack_trace_max_size)
e5a81b62
SR
88 return;
89
81520a1b 90 /* we do not handle interrupt stacks yet */
87889501 91 if (!object_is_on_stack(stack))
81520a1b
SR
92 return;
93
1904be1b
SRRH
94 /* Can't do this from NMI context (can cause deadlocks) */
95 if (in_nmi())
96 return;
97
a5e25883 98 local_irq_save(flags);
d332736d 99 arch_spin_lock(&stack_trace_max_lock);
e5a81b62 100
a2d76290
SRRH
101 /*
102 * RCU may not be watching, make it see us.
103 * The stack trace code uses rcu_sched.
104 */
105 rcu_irq_enter();
106
4df29712
SRRH
107 /* In case another CPU set the tracer_frame on us */
108 if (unlikely(!frame_size))
109 this_size -= tracer_frame;
110
e5a81b62 111 /* a race could have already updated it */
bb99d8cc 112 if (this_size <= stack_trace_max_size)
e5a81b62
SR
113 goto out;
114
bb99d8cc 115 stack_trace_max_size = this_size;
e5a81b62 116
bb99d8cc
AT
117 stack_trace_max.nr_entries = 0;
118 stack_trace_max.skip = 3;
e5a81b62 119
bb99d8cc 120 save_stack_trace(&stack_trace_max);
e5a81b62 121
72ac426a 122 /* Skip over the overhead of the stack tracer itself */
bb99d8cc 123 for (i = 0; i < stack_trace_max.nr_entries; i++) {
72ac426a
SRRH
124 if (stack_dump_trace[i] == ip)
125 break;
126 }
d4ecbfc4 127
6ccd8371
SR
128 /*
129 * Some archs may not have the passed in ip in the dump.
130 * If that happens, we need to show everything.
131 */
132 if (i == stack_trace_max.nr_entries)
133 i = 0;
134
1b6cced6
SR
135 /*
136 * Now find where in the stack these are.
137 */
72ac426a 138 x = 0;
87889501 139 start = stack;
1b6cced6
SR
140 top = (unsigned long *)
141 (((unsigned long)start & ~(THREAD_SIZE-1)) + THREAD_SIZE);
142
143 /*
144 * Loop through all the entries. One of the entries may
145 * for some reason be missed on the stack, so we may
146 * have to account for them. If they are all there, this
147 * loop will only happen once. This code only takes place
148 * on a new max, so it is far from a fast path.
149 */
bb99d8cc 150 while (i < stack_trace_max.nr_entries) {
0a37119d 151 int found = 0;
1b6cced6 152
bb99d8cc 153 stack_trace_index[x] = this_size;
1b6cced6
SR
154 p = start;
155
bb99d8cc 156 for (; p < top && i < stack_trace_max.nr_entries; p++) {
72ac426a
SRRH
157 if (stack_dump_trace[i] == ULONG_MAX)
158 break;
6e22c836
YS
159 /*
160 * The READ_ONCE_NOCHECK is used to let KASAN know that
161 * this is not a stack-out-of-bounds error.
162 */
163 if ((READ_ONCE_NOCHECK(*p)) == stack_dump_trace[i]) {
72ac426a 164 stack_dump_trace[x] = stack_dump_trace[i++];
bb99d8cc 165 this_size = stack_trace_index[x++] =
1b6cced6 166 (top - p) * sizeof(unsigned long);
0a37119d 167 found = 1;
1b6cced6
SR
168 /* Start the search from here */
169 start = p + 1;
4df29712
SRRH
170 /*
171 * We do not want to show the overhead
172 * of the stack tracer stack in the
173 * max stack. If we haven't figured
174 * out what that is, then figure it out
175 * now.
176 */
72ac426a 177 if (unlikely(!tracer_frame)) {
4df29712
SRRH
178 tracer_frame = (p - stack) *
179 sizeof(unsigned long);
bb99d8cc 180 stack_trace_max_size -= tracer_frame;
4df29712 181 }
1b6cced6
SR
182 }
183 }
184
0a37119d
SR
185 if (!found)
186 i++;
1b6cced6
SR
187 }
188
bb99d8cc 189 stack_trace_max.nr_entries = x;
72ac426a
SRRH
190 for (; x < i; x++)
191 stack_dump_trace[x] = ULONG_MAX;
192
a70857e4 193 if (task_stack_end_corrupted(current)) {
bb99d8cc 194 stack_trace_print();
e3172181
MK
195 BUG();
196 }
197
e5a81b62 198 out:
a2d76290 199 rcu_irq_exit();
d332736d 200 arch_spin_unlock(&stack_trace_max_lock);
a5e25883 201 local_irq_restore(flags);
e5a81b62
SR
202}
203
204static void
a1e2e31d
SR
205stack_trace_call(unsigned long ip, unsigned long parent_ip,
206 struct ftrace_ops *op, struct pt_regs *pt_regs)
e5a81b62 207{
87889501 208 unsigned long stack;
5168ae50 209 int cpu;
e5a81b62 210
5168ae50 211 preempt_disable_notrace();
e5a81b62
SR
212
213 cpu = raw_smp_processor_id();
214 /* no atomic needed, we only modify this variable by this cpu */
215 if (per_cpu(trace_active, cpu)++ != 0)
216 goto out;
217
72ac426a 218 ip += MCOUNT_INSN_SIZE;
4df29712
SRRH
219
220 check_stack(ip, &stack);
e5a81b62
SR
221
222 out:
223 per_cpu(trace_active, cpu)--;
224 /* prevent recursion in schedule */
5168ae50 225 preempt_enable_notrace();
e5a81b62
SR
226}
227
228static struct ftrace_ops trace_ops __read_mostly =
229{
230 .func = stack_trace_call,
4740974a 231 .flags = FTRACE_OPS_FL_RECURSION_SAFE,
e5a81b62
SR
232};
233
234static ssize_t
235stack_max_size_read(struct file *filp, char __user *ubuf,
236 size_t count, loff_t *ppos)
237{
238 unsigned long *ptr = filp->private_data;
239 char buf[64];
240 int r;
241
242 r = snprintf(buf, sizeof(buf), "%ld\n", *ptr);
243 if (r > sizeof(buf))
244 r = sizeof(buf);
245 return simple_read_from_buffer(ubuf, count, ppos, buf, r);
246}
247
248static ssize_t
249stack_max_size_write(struct file *filp, const char __user *ubuf,
250 size_t count, loff_t *ppos)
251{
252 long *ptr = filp->private_data;
253 unsigned long val, flags;
e5a81b62 254 int ret;
4f48f8b7 255 int cpu;
e5a81b62 256
22fe9b54
PH
257 ret = kstrtoul_from_user(ubuf, count, 10, &val);
258 if (ret)
e5a81b62
SR
259 return ret;
260
a5e25883 261 local_irq_save(flags);
4f48f8b7
LJ
262
263 /*
264 * In case we trace inside arch_spin_lock() or after (NMI),
265 * we will cause circular lock, so we also need to increase
266 * the percpu trace_active here.
267 */
268 cpu = smp_processor_id();
269 per_cpu(trace_active, cpu)++;
270
d332736d 271 arch_spin_lock(&stack_trace_max_lock);
e5a81b62 272 *ptr = val;
d332736d 273 arch_spin_unlock(&stack_trace_max_lock);
4f48f8b7
LJ
274
275 per_cpu(trace_active, cpu)--;
a5e25883 276 local_irq_restore(flags);
e5a81b62
SR
277
278 return count;
279}
280
f38f1d2a 281static const struct file_operations stack_max_size_fops = {
e5a81b62
SR
282 .open = tracing_open_generic,
283 .read = stack_max_size_read,
284 .write = stack_max_size_write,
6038f373 285 .llseek = default_llseek,
e5a81b62
SR
286};
287
288static void *
2fc5f0cf 289__next(struct seq_file *m, loff_t *pos)
e5a81b62 290{
2fc5f0cf 291 long n = *pos - 1;
e5a81b62 292
bb99d8cc 293 if (n > stack_trace_max.nr_entries || stack_dump_trace[n] == ULONG_MAX)
e5a81b62
SR
294 return NULL;
295
2fc5f0cf 296 m->private = (void *)n;
1b6cced6 297 return &m->private;
e5a81b62
SR
298}
299
2fc5f0cf
LZ
300static void *
301t_next(struct seq_file *m, void *v, loff_t *pos)
e5a81b62 302{
2fc5f0cf
LZ
303 (*pos)++;
304 return __next(m, pos);
305}
e5a81b62 306
2fc5f0cf
LZ
307static void *t_start(struct seq_file *m, loff_t *pos)
308{
4f48f8b7
LJ
309 int cpu;
310
e5a81b62 311 local_irq_disable();
4f48f8b7
LJ
312
313 cpu = smp_processor_id();
314 per_cpu(trace_active, cpu)++;
315
d332736d 316 arch_spin_lock(&stack_trace_max_lock);
e5a81b62 317
522a110b
LW
318 if (*pos == 0)
319 return SEQ_START_TOKEN;
320
2fc5f0cf 321 return __next(m, pos);
e5a81b62
SR
322}
323
324static void t_stop(struct seq_file *m, void *p)
325{
4f48f8b7
LJ
326 int cpu;
327
d332736d 328 arch_spin_unlock(&stack_trace_max_lock);
4f48f8b7
LJ
329
330 cpu = smp_processor_id();
331 per_cpu(trace_active, cpu)--;
332
e5a81b62
SR
333 local_irq_enable();
334}
335
962e3707 336static void trace_lookup_stack(struct seq_file *m, long i)
e5a81b62 337{
1b6cced6 338 unsigned long addr = stack_dump_trace[i];
e5a81b62 339
962e3707 340 seq_printf(m, "%pS\n", (void *)addr);
e5a81b62
SR
341}
342
e447e1df
SR
343static void print_disabled(struct seq_file *m)
344{
345 seq_puts(m, "#\n"
346 "# Stack tracer disabled\n"
347 "#\n"
348 "# To enable the stack tracer, either add 'stacktrace' to the\n"
349 "# kernel command line\n"
350 "# or 'echo 1 > /proc/sys/kernel/stack_tracer_enabled'\n"
351 "#\n");
352}
353
e5a81b62
SR
354static int t_show(struct seq_file *m, void *v)
355{
522a110b 356 long i;
1b6cced6
SR
357 int size;
358
522a110b 359 if (v == SEQ_START_TOKEN) {
eb1871f3 360 seq_printf(m, " Depth Size Location"
1b6cced6 361 " (%d entries)\n"
eb1871f3 362 " ----- ---- --------\n",
bb99d8cc 363 stack_trace_max.nr_entries);
e447e1df 364
bb99d8cc 365 if (!stack_tracer_enabled && !stack_trace_max_size)
e447e1df
SR
366 print_disabled(m);
367
1b6cced6
SR
368 return 0;
369 }
e5a81b62 370
522a110b
LW
371 i = *(long *)v;
372
bb99d8cc 373 if (i >= stack_trace_max.nr_entries ||
1b6cced6 374 stack_dump_trace[i] == ULONG_MAX)
e5a81b62
SR
375 return 0;
376
bb99d8cc 377 if (i+1 == stack_trace_max.nr_entries ||
1b6cced6 378 stack_dump_trace[i+1] == ULONG_MAX)
bb99d8cc 379 size = stack_trace_index[i];
1b6cced6 380 else
bb99d8cc 381 size = stack_trace_index[i] - stack_trace_index[i+1];
1b6cced6 382
bb99d8cc 383 seq_printf(m, "%3ld) %8d %5d ", i, stack_trace_index[i], size);
1b6cced6
SR
384
385 trace_lookup_stack(m, i);
e5a81b62
SR
386
387 return 0;
388}
389
f38f1d2a 390static const struct seq_operations stack_trace_seq_ops = {
e5a81b62
SR
391 .start = t_start,
392 .next = t_next,
393 .stop = t_stop,
394 .show = t_show,
395};
396
397static int stack_trace_open(struct inode *inode, struct file *file)
398{
d8cc1ab7 399 return seq_open(file, &stack_trace_seq_ops);
e5a81b62
SR
400}
401
f38f1d2a 402static const struct file_operations stack_trace_fops = {
e5a81b62
SR
403 .open = stack_trace_open,
404 .read = seq_read,
405 .llseek = seq_lseek,
d8cc1ab7 406 .release = seq_release,
e5a81b62
SR
407};
408
d2d45c7a
SR
409static int
410stack_trace_filter_open(struct inode *inode, struct file *file)
411{
412 return ftrace_regex_open(&trace_ops, FTRACE_ITER_FILTER,
413 inode, file);
414}
415
416static const struct file_operations stack_trace_filter_fops = {
417 .open = stack_trace_filter_open,
418 .read = seq_read,
419 .write = ftrace_filter_write,
098c879e 420 .llseek = tracing_lseek,
d2d45c7a
SR
421 .release = ftrace_regex_release,
422};
423
f38f1d2a
SR
424int
425stack_trace_sysctl(struct ctl_table *table, int write,
8d65af78 426 void __user *buffer, size_t *lenp,
f38f1d2a
SR
427 loff_t *ppos)
428{
429 int ret;
430
431 mutex_lock(&stack_sysctl_mutex);
432
8d65af78 433 ret = proc_dointvec(table, write, buffer, lenp, ppos);
f38f1d2a
SR
434
435 if (ret || !write ||
a32c7765 436 (last_stack_tracer_enabled == !!stack_tracer_enabled))
f38f1d2a
SR
437 goto out;
438
a32c7765 439 last_stack_tracer_enabled = !!stack_tracer_enabled;
f38f1d2a
SR
440
441 if (stack_tracer_enabled)
442 register_ftrace_function(&trace_ops);
443 else
444 unregister_ftrace_function(&trace_ops);
445
446 out:
447 mutex_unlock(&stack_sysctl_mutex);
448 return ret;
449}
450
762e1207
SR
451static char stack_trace_filter_buf[COMMAND_LINE_SIZE+1] __initdata;
452
f38f1d2a
SR
453static __init int enable_stacktrace(char *str)
454{
762e1207
SR
455 if (strncmp(str, "_filter=", 8) == 0)
456 strncpy(stack_trace_filter_buf, str+8, COMMAND_LINE_SIZE);
457
e05a43b7
SR
458 stack_tracer_enabled = 1;
459 last_stack_tracer_enabled = 1;
f38f1d2a
SR
460 return 1;
461}
462__setup("stacktrace", enable_stacktrace);
463
e5a81b62
SR
464static __init int stack_trace_init(void)
465{
466 struct dentry *d_tracer;
e5a81b62
SR
467
468 d_tracer = tracing_init_dentry();
14a5ae40 469 if (IS_ERR(d_tracer))
ed6f1c99 470 return 0;
e5a81b62 471
5452af66 472 trace_create_file("stack_max_size", 0644, d_tracer,
bb99d8cc 473 &stack_trace_max_size, &stack_max_size_fops);
e5a81b62 474
5452af66
FW
475 trace_create_file("stack_trace", 0444, d_tracer,
476 NULL, &stack_trace_fops);
e5a81b62 477
d2d45c7a
SR
478 trace_create_file("stack_trace_filter", 0444, d_tracer,
479 NULL, &stack_trace_filter_fops);
480
762e1207
SR
481 if (stack_trace_filter_buf[0])
482 ftrace_set_early_filter(&trace_ops, stack_trace_filter_buf, 1);
483
e05a43b7 484 if (stack_tracer_enabled)
f38f1d2a 485 register_ftrace_function(&trace_ops);
e5a81b62
SR
486
487 return 0;
488}
489
490device_initcall(stack_trace_init);