s390: avoid misusing CALL_ON_STACK for task stack setup
authorVasily Gorbik <gor@linux.ibm.com>
Fri, 22 Nov 2019 12:12:57 +0000 (13:12 +0100)
committerVasily Gorbik <gor@linux.ibm.com>
Sat, 30 Nov 2019 09:52:45 +0000 (10:52 +0100)
CALL_ON_STACK is intended to be used for temporary stack switching with
potential return to the caller.

When CALL_ON_STACK is misused to switch from nodat stack to task stack
back_chain information would later lead stack unwinder from task stack into
(per cpu) nodat stack which is reused for other purposes. This would
yield confusing unwinding result or errors.

To avoid that introduce CALL_ON_STACK_NORETURN to be used instead. It
makes sure that back_chain is zeroed and unwinder finishes gracefully
ending up at task pt_regs.

Reviewed-by: Heiko Carstens <heiko.carstens@de.ibm.com>
Signed-off-by: Vasily Gorbik <gor@linux.ibm.com>
arch/s390/include/asm/stacktrace.h
arch/s390/kernel/setup.c
arch/s390/kernel/smp.c

index bb854e33e46098e017601814291cf05d8d731c86..4f3dd1c86c0d0b167614aadd7a3c080e92cb2203 100644 (file)
@@ -124,4 +124,15 @@ struct stack_frame {
        r2;                                                             \
 })
 
+#define CALL_ON_STACK_NORETURN(fn, stack)                              \
+({                                                                     \
+       asm volatile(                                                   \
+               "       la      15,0(%[_stack])\n"                      \
+               "       xc      %[_bc](8,15),%[_bc](15)\n"              \
+               "       brasl   14,%[_fn]\n"                            \
+               ::[_bc] "i" (offsetof(struct stack_frame, back_chain)), \
+                 [_stack] "a" (stack), [_fn] "X" (fn));                \
+       BUG();                                                          \
+})
+
 #endif /* _ASM_S390_STACKTRACE_H */
index 3ff291bc63b7e596767aed95607a9138bdab95d4..9cbf490fd162ec6b8df30a8cc0a9292cdbe90acb 100644 (file)
@@ -355,7 +355,6 @@ early_initcall(async_stack_realloc);
 
 void __init arch_call_rest_init(void)
 {
-       struct stack_frame *frame;
        unsigned long stack;
 
        stack = stack_alloc();
@@ -368,13 +367,7 @@ void __init arch_call_rest_init(void)
        set_task_stack_end_magic(current);
        stack += STACK_INIT_OFFSET;
        S390_lowcore.kernel_stack = stack;
-       frame = (struct stack_frame *) stack;
-       memset(frame, 0, sizeof(*frame));
-       /* Branch to rest_init on the new stack, never returns */
-       asm volatile(
-               "       la      15,0(%[_frame])\n"
-               "       jg      rest_init\n"
-               : : [_frame] "a" (frame));
+       CALL_ON_STACK_NORETURN(rest_init, stack);
 }
 
 static void __init setup_lowcore_dat_off(void)
index 06dddd7c4290c4ba1e231ca348ac86790515d0e8..2794cad9312e37cda034425386edf7356f2e6d97 100644 (file)
@@ -876,7 +876,7 @@ static void __no_sanitize_address smp_start_secondary(void *cpuvoid)
        S390_lowcore.restart_source = -1UL;
        __ctl_load(S390_lowcore.cregs_save_area, 0, 15);
        __load_psw_mask(PSW_KERNEL_BITS | PSW_MASK_DAT);
-       CALL_ON_STACK(smp_init_secondary, S390_lowcore.kernel_stack, 0);
+       CALL_ON_STACK_NORETURN(smp_init_secondary, S390_lowcore.kernel_stack);
 }
 
 /* Upping and downing of CPUs */