1 // SPDX-License-Identifier: GPL-2.0
3 * Stack trace management functions
5 * Copyright IBM Corp. 2006
8 #include <linux/perf_event.h>
9 #include <linux/stacktrace.h>
10 #include <linux/uaccess.h>
11 #include <linux/compat.h>
12 #include <asm/asm-offsets.h>
13 #include <asm/stacktrace.h>
14 #include <asm/unwind.h>
15 #include <asm/kprobes.h>
16 #include <asm/ptrace.h>
18 void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie,
19 struct task_struct *task, struct pt_regs *regs)
21 struct unwind_state state;
24 unwind_for_each_frame(&state, task, regs, 0) {
25 addr = unwind_get_return_address(&state);
26 if (!addr || !consume_entry(cookie, addr))
31 int arch_stack_walk_reliable(stack_trace_consume_fn consume_entry,
32 void *cookie, struct task_struct *task)
34 struct unwind_state state;
37 unwind_for_each_frame(&state, task, NULL, 0) {
38 if (state.stack_info.type != STACK_TYPE_TASK)
44 addr = unwind_get_return_address(&state);
50 * Mark stacktraces with krethook functions on them
53 if (state.ip == (unsigned long)arch_rethook_trampoline)
57 if (!consume_entry(cookie, addr))
61 /* Check for stack corruption */
62 if (unwind_error(&state))
67 static inline bool store_ip(stack_trace_consume_fn consume_entry, void *cookie,
68 struct perf_callchain_entry_ctx *entry, bool perf,
71 #ifdef CONFIG_PERF_EVENTS
73 if (perf_callchain_store(entry, ip))
78 return consume_entry(cookie, ip);
81 static inline bool ip_invalid(unsigned long ip)
84 * Perform some basic checks if an instruction address taken
85 * from unreliable source is invalid.
89 if (ip < mmap_min_addr)
91 if (ip >= current->mm->context.asce_limit)
96 static inline bool ip_within_vdso(unsigned long ip)
98 return in_range(ip, current->mm->context.vdso_base, vdso_text_size());
101 void arch_stack_walk_user_common(stack_trace_consume_fn consume_entry, void *cookie,
102 struct perf_callchain_entry_ctx *entry,
103 const struct pt_regs *regs, bool perf)
105 struct stack_frame_vdso_wrapper __user *sf_vdso;
106 struct stack_frame_user __user *sf;
107 unsigned long ip, sp;
110 if (is_compat_task())
114 ip = instruction_pointer(regs);
115 if (!store_ip(consume_entry, cookie, entry, perf, ip))
117 sf = (void __user *)user_stack_pointer(regs);
120 if (__get_user(sp, &sf->back_chain))
123 * VDSO entry code has a non-standard stack frame layout.
124 * See VDSO user wrapper code for details.
126 if (!sp && ip_within_vdso(ip)) {
127 sf_vdso = (void __user *)sf;
128 if (__get_user(ip, &sf_vdso->return_address))
130 sp = (unsigned long)sf + STACK_FRAME_VDSO_OVERHEAD;
131 sf = (void __user *)sp;
132 if (__get_user(sp, &sf->back_chain))
135 sf = (void __user *)sp;
136 if (__get_user(ip, &sf->gprs[8]))
139 /* Sanity check: ABI requires SP to be 8 byte aligned. */
142 if (ip_invalid(ip)) {
144 * If the instruction address is invalid, and this
145 * is the first stack frame, assume r14 has not
146 * been written to the stack yet. Otherwise exit.
154 if (!store_ip(consume_entry, cookie, entry, perf, ip))
161 void arch_stack_walk_user(stack_trace_consume_fn consume_entry, void *cookie,
162 const struct pt_regs *regs)
164 arch_stack_walk_user_common(consume_entry, cookie, NULL, regs, false);