Commit | Line | Data |
---|---|---|
21b32bbf | 1 | /* |
21b32bbf IM |
2 | * Stack trace management functions |
3 | * | |
8f47e163 | 4 | * Copyright (C) 2006-2009 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> |
21b32bbf IM |
5 | */ |
6 | #include <linux/sched.h> | |
b17b0153 | 7 | #include <linux/sched/debug.h> |
68db0cf1 | 8 | #include <linux/sched/task_stack.h> |
21b32bbf | 9 | #include <linux/stacktrace.h> |
186f4360 | 10 | #include <linux/export.h> |
02b67518 | 11 | #include <linux/uaccess.h> |
c0b766f1 | 12 | #include <asm/stacktrace.h> |
49a612c6 | 13 | #include <asm/unwind.h> |
21b32bbf | 14 | |
49a612c6 JP |
15 | static int save_stack_address(struct stack_trace *trace, unsigned long addr, |
16 | bool nosched) | |
21b32bbf | 17 | { |
018378c5 | 18 | if (nosched && in_sched_functions(addr)) |
568b329a | 19 | return 0; |
49a612c6 | 20 | |
c0b766f1 AK |
21 | if (trace->skip > 0) { |
22 | trace->skip--; | |
568b329a | 23 | return 0; |
21b32bbf | 24 | } |
21b32bbf | 25 | |
49a612c6 JP |
26 | if (trace->nr_entries >= trace->max_entries) |
27 | return -1; | |
28 | ||
29 | trace->entries[trace->nr_entries++] = addr; | |
30 | return 0; | |
018378c5 ON |
31 | } |
32 | ||
77072f09 | 33 | static void noinline __save_stack_trace(struct stack_trace *trace, |
49a612c6 JP |
34 | struct task_struct *task, struct pt_regs *regs, |
35 | bool nosched) | |
9745512c | 36 | { |
49a612c6 JP |
37 | struct unwind_state state; |
38 | unsigned long addr; | |
9745512c | 39 | |
49a612c6 JP |
40 | if (regs) |
41 | save_stack_address(trace, regs->ip, nosched); | |
21b32bbf | 42 | |
49a612c6 JP |
43 | for (unwind_start(&state, task, regs, NULL); !unwind_done(&state); |
44 | unwind_next_frame(&state)) { | |
45 | addr = unwind_get_return_address(&state); | |
46 | if (!addr || save_stack_address(trace, addr, nosched)) | |
47 | break; | |
48 | } | |
49 | ||
50 | if (trace->nr_entries < trace->max_entries) | |
51 | trace->entries[trace->nr_entries++] = ULONG_MAX; | |
52 | } | |
9745512c | 53 | |
21b32bbf IM |
54 | /* |
55 | * Save stack-backtrace addresses into a stack_trace buffer. | |
21b32bbf | 56 | */ |
ab1b6f03 | 57 | void save_stack_trace(struct stack_trace *trace) |
21b32bbf | 58 | { |
77072f09 | 59 | trace->skip++; |
49a612c6 | 60 | __save_stack_trace(trace, current, NULL, false); |
21b32bbf | 61 | } |
8594698e | 62 | EXPORT_SYMBOL_GPL(save_stack_trace); |
9745512c | 63 | |
39581062 | 64 | void save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace) |
acc6be54 | 65 | { |
49a612c6 | 66 | __save_stack_trace(trace, current, regs, false); |
acc6be54 VN |
67 | } |
68 | ||
9745512c AV |
69 | void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) |
70 | { | |
1959a601 AL |
71 | if (!try_get_task_stack(tsk)) |
72 | return; | |
73 | ||
77072f09 VB |
74 | if (tsk == current) |
75 | trace->skip++; | |
49a612c6 | 76 | __save_stack_trace(trace, tsk, NULL, true); |
1959a601 AL |
77 | |
78 | put_task_stack(tsk); | |
9745512c | 79 | } |
8594698e | 80 | EXPORT_SYMBOL_GPL(save_stack_trace_tsk); |
02b67518 | 81 | |
af085d90 JP |
82 | #ifdef CONFIG_HAVE_RELIABLE_STACKTRACE |
83 | ||
77072f09 VB |
84 | static int __always_inline |
85 | __save_stack_trace_reliable(struct stack_trace *trace, | |
86 | struct task_struct *task) | |
af085d90 JP |
87 | { |
88 | struct unwind_state state; | |
89 | struct pt_regs *regs; | |
90 | unsigned long addr; | |
91 | ||
441ccc35 JS |
92 | for (unwind_start(&state, task, NULL, NULL); |
93 | !unwind_done(&state) && !unwind_error(&state); | |
af085d90 JP |
94 | unwind_next_frame(&state)) { |
95 | ||
a9cdbe72 | 96 | regs = unwind_get_entry_regs(&state, NULL); |
af085d90 | 97 | if (regs) { |
441ccc35 JS |
98 | /* Success path for user tasks */ |
99 | if (user_mode(regs)) | |
100 | goto success; | |
101 | ||
af085d90 JP |
102 | /* |
103 | * Kernel mode registers on the stack indicate an | |
104 | * in-kernel interrupt or exception (e.g., preemption | |
105 | * or a page fault), which can make frame pointers | |
106 | * unreliable. | |
107 | */ | |
af085d90 | 108 | |
0c414367 JS |
109 | if (IS_ENABLED(CONFIG_FRAME_POINTER)) |
110 | return -EINVAL; | |
af085d90 JP |
111 | } |
112 | ||
113 | addr = unwind_get_return_address(&state); | |
114 | ||
115 | /* | |
116 | * A NULL or invalid return address probably means there's some | |
117 | * generated code which __kernel_text_address() doesn't know | |
118 | * about. | |
119 | */ | |
17426923 | 120 | if (!addr) |
af085d90 | 121 | return -EINVAL; |
af085d90 JP |
122 | |
123 | if (save_stack_address(trace, addr, false)) | |
124 | return -EINVAL; | |
125 | } | |
126 | ||
127 | /* Check for stack corruption */ | |
17426923 | 128 | if (unwind_error(&state)) |
af085d90 | 129 | return -EINVAL; |
af085d90 | 130 | |
441ccc35 JS |
131 | /* Success path for non-user tasks, i.e. kthreads and idle tasks */ |
132 | if (!(task->flags & (PF_KTHREAD | PF_IDLE))) | |
133 | return -EINVAL; | |
134 | ||
135 | success: | |
af085d90 JP |
136 | if (trace->nr_entries < trace->max_entries) |
137 | trace->entries[trace->nr_entries++] = ULONG_MAX; | |
138 | ||
139 | return 0; | |
140 | } | |
141 | ||
142 | /* | |
143 | * This function returns an error if it detects any unreliable features of the | |
144 | * stack. Otherwise it guarantees that the stack trace is reliable. | |
145 | * | |
146 | * If the task is not 'current', the caller *must* ensure the task is inactive. | |
147 | */ | |
148 | int save_stack_trace_tsk_reliable(struct task_struct *tsk, | |
149 | struct stack_trace *trace) | |
150 | { | |
151 | int ret; | |
152 | ||
6454b3bd JP |
153 | /* |
154 | * If the task doesn't have a stack (e.g., a zombie), the stack is | |
155 | * "reliably" empty. | |
156 | */ | |
af085d90 | 157 | if (!try_get_task_stack(tsk)) |
6454b3bd | 158 | return 0; |
af085d90 JP |
159 | |
160 | ret = __save_stack_trace_reliable(trace, tsk); | |
161 | ||
162 | put_task_stack(tsk); | |
163 | ||
164 | return ret; | |
165 | } | |
166 | #endif /* CONFIG_HAVE_RELIABLE_STACKTRACE */ | |
167 | ||
02b67518 TE |
168 | /* Userspace stacktrace - based on kernel/trace/trace_sysprof.c */ |
169 | ||
c9cf4dbb | 170 | struct stack_frame_user { |
02b67518 | 171 | const void __user *next_fp; |
8d7c6a96 | 172 | unsigned long ret_addr; |
02b67518 TE |
173 | }; |
174 | ||
c9cf4dbb FW |
175 | static int |
176 | copy_stack_frame(const void __user *fp, struct stack_frame_user *frame) | |
02b67518 TE |
177 | { |
178 | int ret; | |
179 | ||
96d4f267 | 180 | if (!access_ok(fp, sizeof(*frame))) |
02b67518 TE |
181 | return 0; |
182 | ||
183 | ret = 1; | |
184 | pagefault_disable(); | |
185 | if (__copy_from_user_inatomic(frame, fp, sizeof(*frame))) | |
186 | ret = 0; | |
187 | pagefault_enable(); | |
188 | ||
189 | return ret; | |
190 | } | |
191 | ||
8d7c6a96 TE |
192 | static inline void __save_stack_trace_user(struct stack_trace *trace) |
193 | { | |
194 | const struct pt_regs *regs = task_pt_regs(current); | |
195 | const void __user *fp = (const void __user *)regs->bp; | |
196 | ||
197 | if (trace->nr_entries < trace->max_entries) | |
198 | trace->entries[trace->nr_entries++] = regs->ip; | |
199 | ||
200 | while (trace->nr_entries < trace->max_entries) { | |
c9cf4dbb | 201 | struct stack_frame_user frame; |
8d7c6a96 TE |
202 | |
203 | frame.next_fp = NULL; | |
204 | frame.ret_addr = 0; | |
205 | if (!copy_stack_frame(fp, &frame)) | |
206 | break; | |
207 | if ((unsigned long)fp < regs->sp) | |
208 | break; | |
209 | if (frame.ret_addr) { | |
210 | trace->entries[trace->nr_entries++] = | |
211 | frame.ret_addr; | |
212 | } | |
213 | if (fp == frame.next_fp) | |
214 | break; | |
215 | fp = frame.next_fp; | |
216 | } | |
217 | } | |
218 | ||
02b67518 TE |
219 | void save_stack_trace_user(struct stack_trace *trace) |
220 | { | |
221 | /* | |
222 | * Trace user stack if we are not a kernel thread | |
223 | */ | |
224 | if (current->mm) { | |
8d7c6a96 | 225 | __save_stack_trace_user(trace); |
02b67518 TE |
226 | } |
227 | if (trace->nr_entries < trace->max_entries) | |
228 | trace->entries[trace->nr_entries++] = ULONG_MAX; | |
229 | } |