ALSA: pcm: Fix missing check of the new non-cached buffer type
[linux-2.6-block.git] / kernel / trace / trace_stack.c
CommitLineData
b2441318 1// SPDX-License-Identifier: GPL-2.0
e5a81b62
SR
2/*
3 * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
4 *
5 */
68db0cf1 6#include <linux/sched/task_stack.h>
e5a81b62
SR
7#include <linux/stacktrace.h>
8#include <linux/kallsyms.h>
9#include <linux/seq_file.h>
10#include <linux/spinlock.h>
11#include <linux/uaccess.h>
e5a81b62
SR
12#include <linux/ftrace.h>
13#include <linux/module.h>
f38f1d2a 14#include <linux/sysctl.h>
e5a81b62 15#include <linux/init.h>
762e1207
SR
16
17#include <asm/setup.h>
18
e5a81b62
SR
19#include "trace.h"
20
3d9a8072
TG
21#define STACK_TRACE_ENTRIES 500
22
23static unsigned long stack_dump_trace[STACK_TRACE_ENTRIES];
24static unsigned stack_trace_index[STACK_TRACE_ENTRIES];
1b6cced6 25
9f50c91b 26static unsigned int stack_trace_nr_entries;
3d9a8072
TG
27static unsigned long stack_trace_max_size;
28static arch_spinlock_t stack_trace_max_lock =
edc35bd7 29 (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
e5a81b62 30
8aaf1ee7 31DEFINE_PER_CPU(int, disable_stack_tracer);
f38f1d2a
SR
32static DEFINE_MUTEX(stack_sysctl_mutex);
33
34int stack_tracer_enabled;
e5a81b62 35
3d9a8072 36static void print_max_stack(void)
e3172181
MK
37{
38 long i;
39 int size;
40
41 pr_emerg(" Depth Size Location (%d entries)\n"
42 " ----- ---- --------\n",
9f50c91b 43 stack_trace_nr_entries);
e3172181 44
9f50c91b
TG
45 for (i = 0; i < stack_trace_nr_entries; i++) {
46 if (i + 1 == stack_trace_nr_entries)
bb99d8cc 47 size = stack_trace_index[i];
e3172181 48 else
bb99d8cc 49 size = stack_trace_index[i] - stack_trace_index[i+1];
e3172181 50
bb99d8cc 51 pr_emerg("%3ld) %8d %5d %pS\n", i, stack_trace_index[i],
e3172181
MK
52 size, (void *)stack_dump_trace[i]);
53 }
54}
55
58fe7a87
SRV
56/*
57 * The stack tracer looks for a maximum stack at each call from a function. It
58 * registers a callback from ftrace, and in that callback it examines the stack
59 * size. It determines the stack size from the variable passed in, which is the
60 * address of a local variable in the stack_trace_call() callback function.
61 * The stack size is calculated by the address of the local variable to the top
62 * of the current stack. If that size is smaller than the currently saved max
63 * stack size, nothing more is done.
64 *
65 * If the size of the stack is greater than the maximum recorded size, then the
66 * following algorithm takes place.
67 *
68 * For architectures (like x86) that store the function's return address before
69 * saving the function's local variables, the stack will look something like
70 * this:
71 *
72 * [ top of stack ]
73 * 0: sys call entry frame
74 * 10: return addr to entry code
75 * 11: start of sys_foo frame
76 * 20: return addr to sys_foo
77 * 21: start of kernel_func_bar frame
78 * 30: return addr to kernel_func_bar
79 * 31: [ do trace stack here ]
80 *
81 * The save_stack_trace() is called returning all the functions it finds in the
82 * current stack. Which would be (from the bottom of the stack to the top):
83 *
84 * return addr to kernel_func_bar
85 * return addr to sys_foo
86 * return addr to entry code
87 *
88 * Now to figure out how much each of these functions' local variable size is,
89 * a search of the stack is made to find these values. When a match is made, it
90 * is added to the stack_dump_trace[] array. The offset into the stack is saved
91 * in the stack_trace_index[] array. The above example would show:
92 *
93 * stack_dump_trace[] | stack_trace_index[]
94 * ------------------ + -------------------
95 * return addr to kernel_func_bar | 30
96 * return addr to sys_foo | 20
97 * return addr to entry | 10
98 *
99 * The print_max_stack() function above, uses these values to print the size of
100 * each function's portion of the stack.
101 *
102 * for (i = 0; i < nr_entries; i++) {
103 * size = i == nr_entries - 1 ? stack_trace_index[i] :
104 * stack_trace_index[i] - stack_trace_index[i+1]
105 * print "%d %d %d %s\n", i, stack_trace_index[i], size, stack_dump_trace[i]);
106 * }
107 *
108 * The above shows
109 *
110 * depth size location
111 * ----- ---- --------
112 * 0 30 10 kernel_func_bar
113 * 1 20 10 sys_foo
114 * 2 10 10 entry code
115 *
116 * Now for architectures that might save the return address after the functions
117 * local variables (saving the link register before calling nested functions),
118 * this will cause the stack to look a little different:
119 *
120 * [ top of stack ]
121 * 0: sys call entry frame
122 * 10: start of sys_foo_frame
123 * 19: return addr to entry code << lr saved before calling kernel_func_bar
124 * 20: start of kernel_func_bar frame
125 * 29: return addr to sys_foo_frame << lr saved before calling next function
126 * 30: [ do trace stack here ]
127 *
128 * Although the functions returned by save_stack_trace() may be the same, the
129 * placement in the stack will be different. Using the same algorithm as above
130 * would yield:
131 *
132 * stack_dump_trace[] | stack_trace_index[]
133 * ------------------ + -------------------
134 * return addr to kernel_func_bar | 30
135 * return addr to sys_foo | 29
136 * return addr to entry | 19
137 *
138 * Where the mapping is off by one:
139 *
140 * kernel_func_bar stack frame size is 29 - 19 not 30 - 29!
141 *
142 * To fix this, if the architecture sets ARCH_RET_ADDR_AFTER_LOCAL_VARS the
143 * values in stack_trace_index[] are shifted by one to and the number of
144 * stack trace entries is decremented by one.
145 *
146 * stack_dump_trace[] | stack_trace_index[]
147 * ------------------ + -------------------
148 * return addr to kernel_func_bar | 29
149 * return addr to sys_foo | 19
150 *
151 * Although the entry function is not displayed, the first function (sys_foo)
152 * will still include the stack size of it.
153 */
3d9a8072 154static void check_stack(unsigned long ip, unsigned long *stack)
e5a81b62 155{
e3172181 156 unsigned long this_size, flags; unsigned long *p, *top, *start;
4df29712 157 static int tracer_frame;
6aa7de05 158 int frame_size = READ_ONCE(tracer_frame);
72ac426a 159 int i, x;
e5a81b62 160
87889501 161 this_size = ((unsigned long)stack) & (THREAD_SIZE-1);
e5a81b62 162 this_size = THREAD_SIZE - this_size;
4df29712
SRRH
163 /* Remove the frame of the tracer */
164 this_size -= frame_size;
e5a81b62 165
bb99d8cc 166 if (this_size <= stack_trace_max_size)
e5a81b62
SR
167 return;
168
81520a1b 169 /* we do not handle interrupt stacks yet */
87889501 170 if (!object_is_on_stack(stack))
81520a1b
SR
171 return;
172
1904be1b
SRRH
173 /* Can't do this from NMI context (can cause deadlocks) */
174 if (in_nmi())
175 return;
176
a5e25883 177 local_irq_save(flags);
d332736d 178 arch_spin_lock(&stack_trace_max_lock);
e5a81b62 179
4df29712
SRRH
180 /* In case another CPU set the tracer_frame on us */
181 if (unlikely(!frame_size))
182 this_size -= tracer_frame;
183
e5a81b62 184 /* a race could have already updated it */
bb99d8cc 185 if (this_size <= stack_trace_max_size)
e5a81b62
SR
186 goto out;
187
bb99d8cc 188 stack_trace_max_size = this_size;
e5a81b62 189
9f50c91b
TG
190 stack_trace_nr_entries = stack_trace_save(stack_dump_trace,
191 ARRAY_SIZE(stack_dump_trace) - 1,
192 0);
e5a81b62 193
72ac426a 194 /* Skip over the overhead of the stack tracer itself */
9f50c91b 195 for (i = 0; i < stack_trace_nr_entries; i++) {
72ac426a
SRRH
196 if (stack_dump_trace[i] == ip)
197 break;
198 }
d4ecbfc4 199
6ccd8371
SR
200 /*
201 * Some archs may not have the passed in ip in the dump.
202 * If that happens, we need to show everything.
203 */
9f50c91b 204 if (i == stack_trace_nr_entries)
6ccd8371
SR
205 i = 0;
206
1b6cced6
SR
207 /*
208 * Now find where in the stack these are.
209 */
72ac426a 210 x = 0;
87889501 211 start = stack;
1b6cced6
SR
212 top = (unsigned long *)
213 (((unsigned long)start & ~(THREAD_SIZE-1)) + THREAD_SIZE);
214
215 /*
216 * Loop through all the entries. One of the entries may
217 * for some reason be missed on the stack, so we may
218 * have to account for them. If they are all there, this
219 * loop will only happen once. This code only takes place
220 * on a new max, so it is far from a fast path.
221 */
9f50c91b 222 while (i < stack_trace_nr_entries) {
0a37119d 223 int found = 0;
1b6cced6 224
bb99d8cc 225 stack_trace_index[x] = this_size;
1b6cced6
SR
226 p = start;
227
9f50c91b 228 for (; p < top && i < stack_trace_nr_entries; p++) {
6e22c836
YS
229 /*
230 * The READ_ONCE_NOCHECK is used to let KASAN know that
231 * this is not a stack-out-of-bounds error.
232 */
233 if ((READ_ONCE_NOCHECK(*p)) == stack_dump_trace[i]) {
72ac426a 234 stack_dump_trace[x] = stack_dump_trace[i++];
bb99d8cc 235 this_size = stack_trace_index[x++] =
1b6cced6 236 (top - p) * sizeof(unsigned long);
0a37119d 237 found = 1;
1b6cced6
SR
238 /* Start the search from here */
239 start = p + 1;
4df29712
SRRH
240 /*
241 * We do not want to show the overhead
242 * of the stack tracer stack in the
243 * max stack. If we haven't figured
244 * out what that is, then figure it out
245 * now.
246 */
72ac426a 247 if (unlikely(!tracer_frame)) {
4df29712
SRRH
248 tracer_frame = (p - stack) *
249 sizeof(unsigned long);
bb99d8cc 250 stack_trace_max_size -= tracer_frame;
4df29712 251 }
1b6cced6
SR
252 }
253 }
254
0a37119d
SR
255 if (!found)
256 i++;
1b6cced6
SR
257 }
258
f7edb451
SRV
259#ifdef ARCH_FTRACE_SHIFT_STACK_TRACER
260 /*
261 * Some archs will store the link register before calling
262 * nested functions. This means the saved return address
263 * comes after the local storage, and we need to shift
264 * for that.
265 */
266 if (x > 1) {
267 memmove(&stack_trace_index[0], &stack_trace_index[1],
268 sizeof(stack_trace_index[0]) * (x - 1));
269 x--;
270 }
271#endif
272
9f50c91b 273 stack_trace_nr_entries = x;
72ac426a 274
a70857e4 275 if (task_stack_end_corrupted(current)) {
3d9a8072 276 print_max_stack();
e3172181
MK
277 BUG();
278 }
279
e5a81b62 280 out:
d332736d 281 arch_spin_unlock(&stack_trace_max_lock);
a5e25883 282 local_irq_restore(flags);
e5a81b62
SR
283}
284
285static void
a1e2e31d
SR
286stack_trace_call(unsigned long ip, unsigned long parent_ip,
287 struct ftrace_ops *op, struct pt_regs *pt_regs)
e5a81b62 288{
87889501 289 unsigned long stack;
e5a81b62 290
5168ae50 291 preempt_disable_notrace();
e5a81b62 292
e5a81b62 293 /* no atomic needed, we only modify this variable by this cpu */
8aaf1ee7
SRV
294 __this_cpu_inc(disable_stack_tracer);
295 if (__this_cpu_read(disable_stack_tracer) != 1)
e5a81b62
SR
296 goto out;
297
b00d607b
SRV
298 /* If rcu is not watching, then save stack trace can fail */
299 if (!rcu_is_watching())
300 goto out;
301
72ac426a 302 ip += MCOUNT_INSN_SIZE;
4df29712
SRRH
303
304 check_stack(ip, &stack);
e5a81b62
SR
305
306 out:
8aaf1ee7 307 __this_cpu_dec(disable_stack_tracer);
e5a81b62 308 /* prevent recursion in schedule */
5168ae50 309 preempt_enable_notrace();
e5a81b62
SR
310}
311
312static struct ftrace_ops trace_ops __read_mostly =
313{
314 .func = stack_trace_call,
4740974a 315 .flags = FTRACE_OPS_FL_RECURSION_SAFE,
e5a81b62
SR
316};
317
318static ssize_t
319stack_max_size_read(struct file *filp, char __user *ubuf,
320 size_t count, loff_t *ppos)
321{
322 unsigned long *ptr = filp->private_data;
323 char buf[64];
324 int r;
325
326 r = snprintf(buf, sizeof(buf), "%ld\n", *ptr);
327 if (r > sizeof(buf))
328 r = sizeof(buf);
329 return simple_read_from_buffer(ubuf, count, ppos, buf, r);
330}
331
332static ssize_t
333stack_max_size_write(struct file *filp, const char __user *ubuf,
334 size_t count, loff_t *ppos)
335{
336 long *ptr = filp->private_data;
337 unsigned long val, flags;
e5a81b62
SR
338 int ret;
339
22fe9b54
PH
340 ret = kstrtoul_from_user(ubuf, count, 10, &val);
341 if (ret)
e5a81b62
SR
342 return ret;
343
a5e25883 344 local_irq_save(flags);
4f48f8b7
LJ
345
346 /*
347 * In case we trace inside arch_spin_lock() or after (NMI),
348 * we will cause circular lock, so we also need to increase
8aaf1ee7 349 * the percpu disable_stack_tracer here.
4f48f8b7 350 */
8aaf1ee7 351 __this_cpu_inc(disable_stack_tracer);
4f48f8b7 352
d332736d 353 arch_spin_lock(&stack_trace_max_lock);
e5a81b62 354 *ptr = val;
d332736d 355 arch_spin_unlock(&stack_trace_max_lock);
4f48f8b7 356
8aaf1ee7 357 __this_cpu_dec(disable_stack_tracer);
a5e25883 358 local_irq_restore(flags);
e5a81b62
SR
359
360 return count;
361}
362
f38f1d2a 363static const struct file_operations stack_max_size_fops = {
e5a81b62
SR
364 .open = tracing_open_generic,
365 .read = stack_max_size_read,
366 .write = stack_max_size_write,
6038f373 367 .llseek = default_llseek,
e5a81b62
SR
368};
369
370static void *
2fc5f0cf 371__next(struct seq_file *m, loff_t *pos)
e5a81b62 372{
2fc5f0cf 373 long n = *pos - 1;
e5a81b62 374
9f50c91b 375 if (n >= stack_trace_nr_entries)
e5a81b62
SR
376 return NULL;
377
2fc5f0cf 378 m->private = (void *)n;
1b6cced6 379 return &m->private;
e5a81b62
SR
380}
381
2fc5f0cf
LZ
382static void *
383t_next(struct seq_file *m, void *v, loff_t *pos)
e5a81b62 384{
2fc5f0cf
LZ
385 (*pos)++;
386 return __next(m, pos);
387}
e5a81b62 388
2fc5f0cf
LZ
389static void *t_start(struct seq_file *m, loff_t *pos)
390{
e5a81b62 391 local_irq_disable();
4f48f8b7 392
8aaf1ee7 393 __this_cpu_inc(disable_stack_tracer);
4f48f8b7 394
d332736d 395 arch_spin_lock(&stack_trace_max_lock);
e5a81b62 396
522a110b
LW
397 if (*pos == 0)
398 return SEQ_START_TOKEN;
399
2fc5f0cf 400 return __next(m, pos);
e5a81b62
SR
401}
402
403static void t_stop(struct seq_file *m, void *p)
404{
d332736d 405 arch_spin_unlock(&stack_trace_max_lock);
4f48f8b7 406
8aaf1ee7 407 __this_cpu_dec(disable_stack_tracer);
4f48f8b7 408
e5a81b62
SR
409 local_irq_enable();
410}
411
962e3707 412static void trace_lookup_stack(struct seq_file *m, long i)
e5a81b62 413{
1b6cced6 414 unsigned long addr = stack_dump_trace[i];
e5a81b62 415
962e3707 416 seq_printf(m, "%pS\n", (void *)addr);
e5a81b62
SR
417}
418
e447e1df
SR
419static void print_disabled(struct seq_file *m)
420{
421 seq_puts(m, "#\n"
422 "# Stack tracer disabled\n"
423 "#\n"
424 "# To enable the stack tracer, either add 'stacktrace' to the\n"
425 "# kernel command line\n"
426 "# or 'echo 1 > /proc/sys/kernel/stack_tracer_enabled'\n"
427 "#\n");
428}
429
e5a81b62
SR
430static int t_show(struct seq_file *m, void *v)
431{
522a110b 432 long i;
1b6cced6
SR
433 int size;
434
522a110b 435 if (v == SEQ_START_TOKEN) {
eb1871f3 436 seq_printf(m, " Depth Size Location"
1b6cced6 437 " (%d entries)\n"
eb1871f3 438 " ----- ---- --------\n",
9f50c91b 439 stack_trace_nr_entries);
e447e1df 440
bb99d8cc 441 if (!stack_tracer_enabled && !stack_trace_max_size)
e447e1df
SR
442 print_disabled(m);
443
1b6cced6
SR
444 return 0;
445 }
e5a81b62 446
522a110b
LW
447 i = *(long *)v;
448
9f50c91b 449 if (i >= stack_trace_nr_entries)
e5a81b62
SR
450 return 0;
451
9f50c91b 452 if (i + 1 == stack_trace_nr_entries)
bb99d8cc 453 size = stack_trace_index[i];
1b6cced6 454 else
bb99d8cc 455 size = stack_trace_index[i] - stack_trace_index[i+1];
1b6cced6 456
bb99d8cc 457 seq_printf(m, "%3ld) %8d %5d ", i, stack_trace_index[i], size);
1b6cced6
SR
458
459 trace_lookup_stack(m, i);
e5a81b62
SR
460
461 return 0;
462}
463
f38f1d2a 464static const struct seq_operations stack_trace_seq_ops = {
e5a81b62
SR
465 .start = t_start,
466 .next = t_next,
467 .stop = t_stop,
468 .show = t_show,
469};
470
471static int stack_trace_open(struct inode *inode, struct file *file)
472{
d8cc1ab7 473 return seq_open(file, &stack_trace_seq_ops);
e5a81b62
SR
474}
475
f38f1d2a 476static const struct file_operations stack_trace_fops = {
e5a81b62
SR
477 .open = stack_trace_open,
478 .read = seq_read,
479 .llseek = seq_lseek,
d8cc1ab7 480 .release = seq_release,
e5a81b62
SR
481};
482
bbd1d27d
SRV
483#ifdef CONFIG_DYNAMIC_FTRACE
484
d2d45c7a
SR
485static int
486stack_trace_filter_open(struct inode *inode, struct file *file)
487{
0f179765
SRV
488 struct ftrace_ops *ops = inode->i_private;
489
490 return ftrace_regex_open(ops, FTRACE_ITER_FILTER,
d2d45c7a
SR
491 inode, file);
492}
493
494static const struct file_operations stack_trace_filter_fops = {
495 .open = stack_trace_filter_open,
496 .read = seq_read,
497 .write = ftrace_filter_write,
098c879e 498 .llseek = tracing_lseek,
d2d45c7a
SR
499 .release = ftrace_regex_release,
500};
501
bbd1d27d
SRV
502#endif /* CONFIG_DYNAMIC_FTRACE */
503
f38f1d2a
SR
504int
505stack_trace_sysctl(struct ctl_table *table, int write,
8d65af78 506 void __user *buffer, size_t *lenp,
f38f1d2a
SR
507 loff_t *ppos)
508{
3d9a8072 509 int was_enabled;
f38f1d2a
SR
510 int ret;
511
512 mutex_lock(&stack_sysctl_mutex);
3d9a8072 513 was_enabled = !!stack_tracer_enabled;
f38f1d2a 514
8d65af78 515 ret = proc_dointvec(table, write, buffer, lenp, ppos);
f38f1d2a 516
3d9a8072 517 if (ret || !write || (was_enabled == !!stack_tracer_enabled))
f38f1d2a
SR
518 goto out;
519
f38f1d2a
SR
520 if (stack_tracer_enabled)
521 register_ftrace_function(&trace_ops);
522 else
523 unregister_ftrace_function(&trace_ops);
f38f1d2a
SR
524 out:
525 mutex_unlock(&stack_sysctl_mutex);
526 return ret;
527}
528
762e1207
SR
529static char stack_trace_filter_buf[COMMAND_LINE_SIZE+1] __initdata;
530
f38f1d2a
SR
531static __init int enable_stacktrace(char *str)
532{
3d739c1f
SRV
533 int len;
534
535 if ((len = str_has_prefix(str, "_filter=")))
536 strncpy(stack_trace_filter_buf, str + len, COMMAND_LINE_SIZE);
762e1207 537
e05a43b7 538 stack_tracer_enabled = 1;
f38f1d2a
SR
539 return 1;
540}
541__setup("stacktrace", enable_stacktrace);
542
e5a81b62
SR
543static __init int stack_trace_init(void)
544{
545 struct dentry *d_tracer;
e5a81b62
SR
546
547 d_tracer = tracing_init_dentry();
14a5ae40 548 if (IS_ERR(d_tracer))
ed6f1c99 549 return 0;
e5a81b62 550
5452af66 551 trace_create_file("stack_max_size", 0644, d_tracer,
bb99d8cc 552 &stack_trace_max_size, &stack_max_size_fops);
e5a81b62 553
5452af66
FW
554 trace_create_file("stack_trace", 0444, d_tracer,
555 NULL, &stack_trace_fops);
e5a81b62 556
bbd1d27d 557#ifdef CONFIG_DYNAMIC_FTRACE
0c5a9acc 558 trace_create_file("stack_trace_filter", 0644, d_tracer,
0f179765 559 &trace_ops, &stack_trace_filter_fops);
bbd1d27d 560#endif
d2d45c7a 561
762e1207
SR
562 if (stack_trace_filter_buf[0])
563 ftrace_set_early_filter(&trace_ops, stack_trace_filter_buf, 1);
564
e05a43b7 565 if (stack_tracer_enabled)
f38f1d2a 566 register_ftrace_function(&trace_ops);
e5a81b62
SR
567
568 return 0;
569}
570
571device_initcall(stack_trace_init);