Merge tag 'pm-6.16-rc5' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm
[linux-2.6-block.git] / arch / s390 / kernel / stacktrace.c
CommitLineData
a17ae4c3 1// SPDX-License-Identifier: GPL-2.0
5bdc9b44 2/*
5bdc9b44
HC
3 * Stack trace management functions
4 *
a53c8fab 5 * Copyright IBM Corp. 2006
5bdc9b44
HC
6 */
7
ebd912ff 8#include <linux/perf_event.h>
5bdc9b44 9#include <linux/stacktrace.h>
aa44433a
HC
10#include <linux/uaccess.h>
11#include <linux/compat.h>
b9be1bee 12#include <asm/asm-offsets.h>
78c98f90
MS
13#include <asm/stacktrace.h>
14#include <asm/unwind.h>
aa137a6d 15#include <asm/kprobes.h>
aa44433a 16#include <asm/ptrace.h>
66adce8f 17
e991e5bb
VG
18void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie,
19 struct task_struct *task, struct pt_regs *regs)
66adce8f 20{
78c98f90 21 struct unwind_state state;
e991e5bb 22 unsigned long addr;
66adce8f 23
e991e5bb
VG
24 unwind_for_each_frame(&state, task, regs, 0) {
25 addr = unwind_get_return_address(&state);
264c03a2 26 if (!addr || !consume_entry(cookie, addr))
78c98f90 27 break;
78c98f90 28 }
a3afe70b 29}
aa137a6d 30
aa137a6d
MB
31int arch_stack_walk_reliable(stack_trace_consume_fn consume_entry,
32 void *cookie, struct task_struct *task)
33{
34 struct unwind_state state;
35 unsigned long addr;
36
37 unwind_for_each_frame(&state, task, NULL, 0) {
38 if (state.stack_info.type != STACK_TYPE_TASK)
39 return -EINVAL;
40
41 if (state.regs)
42 return -EINVAL;
43
44 addr = unwind_get_return_address(&state);
45 if (!addr)
46 return -EINVAL;
47
1a280f48 48#ifdef CONFIG_RETHOOK
aa137a6d 49 /*
1a280f48 50 * Mark stacktraces with krethook functions on them
aa137a6d
MB
51 * as unreliable.
52 */
1a280f48 53 if (state.ip == (unsigned long)arch_rethook_trampoline)
aa137a6d
MB
54 return -EINVAL;
55#endif
56
264c03a2 57 if (!consume_entry(cookie, addr))
aa137a6d
MB
58 return -EINVAL;
59 }
60
61 /* Check for stack corruption */
62 if (unwind_error(&state))
63 return -EINVAL;
64 return 0;
65}
aa44433a 66
ebd912ff
HC
67static inline bool store_ip(stack_trace_consume_fn consume_entry, void *cookie,
68 struct perf_callchain_entry_ctx *entry, bool perf,
69 unsigned long ip)
70{
71#ifdef CONFIG_PERF_EVENTS
72 if (perf) {
73 if (perf_callchain_store(entry, ip))
74 return false;
75 return true;
76 }
77#endif
78 return consume_entry(cookie, ip);
79}
80
cd581092
HC
81static inline bool ip_invalid(unsigned long ip)
82{
83 /*
84 * Perform some basic checks if an instruction address taken
85 * from unreliable source is invalid.
86 */
87 if (ip & 1)
88 return true;
89 if (ip < mmap_min_addr)
90 return true;
91 if (ip >= current->mm->context.asce_limit)
92 return true;
93 return false;
94}
95
62b672c4
HC
96static inline bool ip_within_vdso(unsigned long ip)
97{
98 return in_range(ip, current->mm->context.vdso_base, vdso_text_size());
99}
100
ebd912ff
HC
101void arch_stack_walk_user_common(stack_trace_consume_fn consume_entry, void *cookie,
102 struct perf_callchain_entry_ctx *entry,
103 const struct pt_regs *regs, bool perf)
aa44433a 104{
62b672c4 105 struct stack_frame_vdso_wrapper __user *sf_vdso;
aa44433a
HC
106 struct stack_frame_user __user *sf;
107 unsigned long ip, sp;
108 bool first = true;
109
110 if (is_compat_task())
111 return;
cd581092
HC
112 if (!current->mm)
113 return;
ebd912ff
HC
114 ip = instruction_pointer(regs);
115 if (!store_ip(consume_entry, cookie, entry, perf, ip))
aa44433a
HC
116 return;
117 sf = (void __user *)user_stack_pointer(regs);
118 pagefault_disable();
119 while (1) {
120 if (__get_user(sp, &sf->back_chain))
121 break;
62b672c4
HC
122 /*
123 * VDSO entry code has a non-standard stack frame layout.
124 * See VDSO user wrapper code for details.
125 */
126 if (!sp && ip_within_vdso(ip)) {
127 sf_vdso = (void __user *)sf;
128 if (__get_user(ip, &sf_vdso->return_address))
129 break;
130 sp = (unsigned long)sf + STACK_FRAME_VDSO_OVERHEAD;
131 sf = (void __user *)sp;
132 if (__get_user(sp, &sf->back_chain))
133 break;
134 } else {
135 sf = (void __user *)sp;
136 if (__get_user(ip, &sf->gprs[8]))
137 break;
138 }
87eceb17 139 /* Sanity check: ABI requires SP to be 8 byte aligned. */
62b672c4 140 if (sp & 0x7)
aa44433a 141 break;
cd581092 142 if (ip_invalid(ip)) {
aa44433a
HC
143 /*
144 * If the instruction address is invalid, and this
145 * is the first stack frame, assume r14 has not
146 * been written to the stack yet. Otherwise exit.
147 */
cd581092
HC
148 if (!first)
149 break;
150 ip = regs->gprs[14];
151 if (ip_invalid(ip))
aa44433a
HC
152 break;
153 }
ebd912ff 154 if (!store_ip(consume_entry, cookie, entry, perf, ip))
588a9836 155 break;
aa44433a
HC
156 first = false;
157 }
158 pagefault_enable();
159}
cae74ba8 160
ebd912ff
HC
161void arch_stack_walk_user(stack_trace_consume_fn consume_entry, void *cookie,
162 const struct pt_regs *regs)
163{
164 arch_stack_walk_user_common(consume_entry, cookie, NULL, regs, false);
165}