b153a395f46d25023a9b37c5c95f4701284d3cd6
[linux-2.6-block.git] / arch / s390 / kernel / stacktrace.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Stack trace management functions
4  *
5  *  Copyright IBM Corp. 2006
6  */
7
8 #include <linux/perf_event.h>
9 #include <linux/stacktrace.h>
10 #include <linux/uaccess.h>
11 #include <linux/compat.h>
12 #include <asm/asm-offsets.h>
13 #include <asm/stacktrace.h>
14 #include <asm/unwind.h>
15 #include <asm/kprobes.h>
16 #include <asm/ptrace.h>
17
18 void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie,
19                      struct task_struct *task, struct pt_regs *regs)
20 {
21         struct unwind_state state;
22         unsigned long addr;
23
24         unwind_for_each_frame(&state, task, regs, 0) {
25                 addr = unwind_get_return_address(&state);
26                 if (!addr || !consume_entry(cookie, addr))
27                         break;
28         }
29 }
30
31 int arch_stack_walk_reliable(stack_trace_consume_fn consume_entry,
32                              void *cookie, struct task_struct *task)
33 {
34         struct unwind_state state;
35         unsigned long addr;
36
37         unwind_for_each_frame(&state, task, NULL, 0) {
38                 if (state.stack_info.type != STACK_TYPE_TASK)
39                         return -EINVAL;
40
41                 if (state.regs)
42                         return -EINVAL;
43
44                 addr = unwind_get_return_address(&state);
45                 if (!addr)
46                         return -EINVAL;
47
48 #ifdef CONFIG_RETHOOK
49                 /*
50                  * Mark stacktraces with krethook functions on them
51                  * as unreliable.
52                  */
53                 if (state.ip == (unsigned long)arch_rethook_trampoline)
54                         return -EINVAL;
55 #endif
56
57                 if (!consume_entry(cookie, addr))
58                         return -EINVAL;
59         }
60
61         /* Check for stack corruption */
62         if (unwind_error(&state))
63                 return -EINVAL;
64         return 0;
65 }
66
67 static inline bool store_ip(stack_trace_consume_fn consume_entry, void *cookie,
68                             struct perf_callchain_entry_ctx *entry, bool perf,
69                             unsigned long ip)
70 {
71 #ifdef CONFIG_PERF_EVENTS
72         if (perf) {
73                 if (perf_callchain_store(entry, ip))
74                         return false;
75                 return true;
76         }
77 #endif
78         return consume_entry(cookie, ip);
79 }
80
81 static inline bool ip_invalid(unsigned long ip)
82 {
83         /*
84          * Perform some basic checks if an instruction address taken
85          * from unreliable source is invalid.
86          */
87         if (ip & 1)
88                 return true;
89         if (ip < mmap_min_addr)
90                 return true;
91         if (ip >= current->mm->context.asce_limit)
92                 return true;
93         return false;
94 }
95
96 static inline bool ip_within_vdso(unsigned long ip)
97 {
98         return in_range(ip, current->mm->context.vdso_base, vdso_text_size());
99 }
100
101 void arch_stack_walk_user_common(stack_trace_consume_fn consume_entry, void *cookie,
102                                  struct perf_callchain_entry_ctx *entry,
103                                  const struct pt_regs *regs, bool perf)
104 {
105         struct stack_frame_vdso_wrapper __user *sf_vdso;
106         struct stack_frame_user __user *sf;
107         unsigned long ip, sp;
108         bool first = true;
109
110         if (is_compat_task())
111                 return;
112         if (!current->mm)
113                 return;
114         ip = instruction_pointer(regs);
115         if (!store_ip(consume_entry, cookie, entry, perf, ip))
116                 return;
117         sf = (void __user *)user_stack_pointer(regs);
118         pagefault_disable();
119         while (1) {
120                 if (__get_user(sp, &sf->back_chain))
121                         break;
122                 /*
123                  * VDSO entry code has a non-standard stack frame layout.
124                  * See VDSO user wrapper code for details.
125                  */
126                 if (!sp && ip_within_vdso(ip)) {
127                         sf_vdso = (void __user *)sf;
128                         if (__get_user(ip, &sf_vdso->return_address))
129                                 break;
130                         sp = (unsigned long)sf + STACK_FRAME_VDSO_OVERHEAD;
131                         sf = (void __user *)sp;
132                         if (__get_user(sp, &sf->back_chain))
133                                 break;
134                 } else {
135                         sf = (void __user *)sp;
136                         if (__get_user(ip, &sf->gprs[8]))
137                                 break;
138                 }
139                 /* Sanity check: ABI requires SP to be 8 byte aligned. */
140                 if (sp & 0x7)
141                         break;
142                 if (ip_invalid(ip)) {
143                         /*
144                          * If the instruction address is invalid, and this
145                          * is the first stack frame, assume r14 has not
146                          * been written to the stack yet. Otherwise exit.
147                          */
148                         if (!first)
149                                 break;
150                         ip = regs->gprs[14];
151                         if (ip_invalid(ip))
152                                 break;
153                 }
154                 if (!store_ip(consume_entry, cookie, entry, perf, ip))
155                         break;
156                 first = false;
157         }
158         pagefault_enable();
159 }
160
161 void arch_stack_walk_user(stack_trace_consume_fn consume_entry, void *cookie,
162                           const struct pt_regs *regs)
163 {
164         arch_stack_walk_user_common(consume_entry, cookie, NULL, regs, false);
165 }