s390/stackstrace: Detect vdso stack frames
authorHeiko Carstens <hca@linux.ibm.com>
Mon, 29 Apr 2024 12:28:48 +0000 (14:28 +0200)
committerAlexander Gordeev <agordeev@linux.ibm.com>
Tue, 14 May 2024 11:37:07 +0000 (13:37 +0200)
Clear the backchain of the extra stack frame added by the vdso user wrapper
code. This allows the user stack walker to detect and skip the non-standard
stack frame. Without this an incorrect instruction pointer would be added
to stack traces, and stack frame walking would be continued with a more or
less random back chain.

Fixes: aa44433ac4ee ("s390: add USER_STACKTRACE support")
Reviewed-by: Jens Remus <jremus@linux.ibm.com>
Signed-off-by: Heiko Carstens <hca@linux.ibm.com>
Signed-off-by: Alexander Gordeev <agordeev@linux.ibm.com>
arch/s390/include/asm/processor.h
arch/s390/kernel/asm-offsets.c
arch/s390/kernel/stacktrace.c
arch/s390/kernel/vdso.c
arch/s390/kernel/vdso64/vdso_user_wrapper.S

index db9982f0e8cd02fe36bee8a45690aaa9e10d3b7b..bbbdc5abe2b2ce618a66f8a6a7114d8ff404c50a 100644 (file)
@@ -98,6 +98,7 @@ void cpu_detect_mhz_feature(void);
 
 extern const struct seq_operations cpuinfo_op;
 extern void execve_tail(void);
+unsigned long vdso_text_size(void);
 unsigned long vdso_size(void);
 
 /*
index 28017c418442be9e47d091ba53eda128eafbf4ff..2f65bca2f3f1c84f23df2f0d7dd9fc2f34c54c21 100644 (file)
@@ -66,6 +66,7 @@ int main(void)
        OFFSET(__SF_SIE_CONTROL_PHYS, stack_frame, sie_control_block_phys);
        DEFINE(STACK_FRAME_OVERHEAD, sizeof(struct stack_frame));
        BLANK();
+       OFFSET(__SFUSER_BACKCHAIN, stack_frame_user, back_chain);
        DEFINE(STACK_FRAME_USER_OVERHEAD, sizeof(struct stack_frame_user));
        OFFSET(__SFVDSO_RETURN_ADDRESS, stack_frame_vdso_wrapper, return_address);
        DEFINE(STACK_FRAME_VDSO_OVERHEAD, sizeof(struct stack_frame_vdso_wrapper));
index b4485b0c7f06bad282489ec0760c3edaa0410730..640363b2a1059ea96c594fbee417b29a25ae679b 100644 (file)
@@ -92,10 +92,16 @@ static inline bool ip_invalid(unsigned long ip)
        return false;
 }
 
+static inline bool ip_within_vdso(unsigned long ip)
+{
+       return in_range(ip, current->mm->context.vdso_base, vdso_text_size());
+}
+
 void arch_stack_walk_user_common(stack_trace_consume_fn consume_entry, void *cookie,
                                 struct perf_callchain_entry_ctx *entry,
                                 const struct pt_regs *regs, bool perf)
 {
+       struct stack_frame_vdso_wrapper __user *sf_vdso;
        struct stack_frame_user __user *sf;
        unsigned long ip, sp;
        bool first = true;
@@ -112,11 +118,25 @@ void arch_stack_walk_user_common(stack_trace_consume_fn consume_entry, void *coo
        while (1) {
                if (__get_user(sp, &sf->back_chain))
                        break;
+               /*
+                * VDSO entry code has a non-standard stack frame layout.
+                * See VDSO user wrapper code for details.
+                */
+               if (!sp && ip_within_vdso(ip)) {
+                       sf_vdso = (void __user *)sf;
+                       if (__get_user(ip, &sf_vdso->return_address))
+                               break;
+                       sp = (unsigned long)sf + STACK_FRAME_VDSO_OVERHEAD;
+                       sf = (void __user *)sp;
+                       if (__get_user(sp, &sf->back_chain))
+                               break;
+               } else {
+                       sf = (void __user *)sp;
+                       if (__get_user(ip, &sf->gprs[8]))
+                               break;
+               }
                /* Sanity check: ABI requires SP to be 8 byte aligned. */
-               if (!sp || sp & 0x7)
-                       break;
-               sf = (void __user *)sp;
-               if (__get_user(ip, &sf->gprs[8]))
+               if (sp & 0x7)
                        break;
                if (ip_invalid(ip)) {
                        /*
index a45b3a4c91db0f46a9518c47dd9356cf74907b71..2f967ac2b8e3e4491e96d0427a8b21d4413ae367 100644 (file)
@@ -210,17 +210,22 @@ static unsigned long vdso_addr(unsigned long start, unsigned long len)
        return addr;
 }
 
-unsigned long vdso_size(void)
+unsigned long vdso_text_size(void)
 {
-       unsigned long size = VVAR_NR_PAGES * PAGE_SIZE;
+       unsigned long size;
 
        if (is_compat_task())
-               size += vdso32_end - vdso32_start;
+               size = vdso32_end - vdso32_start;
        else
-               size += vdso64_end - vdso64_start;
+               size = vdso64_end - vdso64_start;
        return PAGE_ALIGN(size);
 }
 
+unsigned long vdso_size(void)
+{
+       return vdso_text_size() + VVAR_NR_PAGES * PAGE_SIZE;
+}
+
 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
 {
        unsigned long addr = VDSO_BASE;
index deee8ca9cdbf04eebfceaff8740213f9ebeeefa1..e26e68675c08d690c23237f482a1046ab77b987e 100644 (file)
@@ -23,6 +23,7 @@ __kernel_\func:
        CFI_VAL_OFFSET 15,-STACK_FRAME_USER_OVERHEAD
        stg     %r14,__SFVDSO_RETURN_ADDRESS(%r15)
        CFI_REL_OFFSET 14,__SFVDSO_RETURN_ADDRESS
+       xc      __SFUSER_BACKCHAIN(8,%r15),__SFUSER_BACKCHAIN(%r15)
        brasl   %r14,__s390_vdso_\func
        lg      %r14,__SFVDSO_RETURN_ADDRESS(%r15)
        CFI_RESTORE 14