powerpc: regain entire stack space
authorChristophe Leroy <christophe.leroy@c-s.fr>
Thu, 31 Jan 2019 10:09:00 +0000 (10:09 +0000)
committerMichael Ellerman <mpe@ellerman.id.au>
Sat, 23 Feb 2019 11:31:40 +0000 (22:31 +1100)
thread_info is not anymore in the stack, so the entire stack
can now be used.

There is also no risk anymore of corrupting task_cpu(p) with a
stack overflow so the patch removes the test.

When doing this, an explicit test for NULL stack pointer is
needed in validate_sp() as it is not anymore implicitely covered
by the sizeof(thread_info) gap.

In the meantime, with the previous patch all pointers to the stacks
are not anymore pointers to thread_info so this patch changes them
to void*

Signed-off-by: Christophe Leroy <christophe.leroy@c-s.fr>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
arch/powerpc/include/asm/irq.h
arch/powerpc/include/asm/processor.h
arch/powerpc/kernel/asm-offsets.c
arch/powerpc/kernel/entry_32.S
arch/powerpc/kernel/irq.c
arch/powerpc/kernel/misc_32.S
arch/powerpc/kernel/process.c
arch/powerpc/kernel/setup_64.c

index 28a7ace0a1b91d61784576a496284a57169d511d..c91a60cda4faa9c162f5ea2bd519b11ad64dc40c 100644 (file)
@@ -48,16 +48,16 @@ struct pt_regs;
  * Per-cpu stacks for handling critical, debug and machine check
  * level interrupts.
  */
-extern struct thread_info *critirq_ctx[NR_CPUS];
-extern struct thread_info *dbgirq_ctx[NR_CPUS];
-extern struct thread_info *mcheckirq_ctx[NR_CPUS];
+extern void *critirq_ctx[NR_CPUS];
+extern void *dbgirq_ctx[NR_CPUS];
+extern void *mcheckirq_ctx[NR_CPUS];
 #endif
 
 /*
  * Per-cpu stacks for handling hard and soft interrupts.
  */
-extern struct thread_info *hardirq_ctx[NR_CPUS];
-extern struct thread_info *softirq_ctx[NR_CPUS];
+extern void *hardirq_ctx[NR_CPUS];
+extern void *softirq_ctx[NR_CPUS];
 
 void call_do_softirq(void *sp);
 void call_do_irq(struct pt_regs *regs, void *sp);
index 2c740042b8d3fa1191ceb2f85c5ca1c81cc161df..3351bcf42f2dbea4d747518205fd8a3fce214766 100644 (file)
@@ -270,8 +270,7 @@ struct thread_struct {
 #define ARCH_MIN_TASKALIGN 16
 
 #define INIT_SP                (sizeof(init_stack) + (unsigned long) &init_stack)
-#define INIT_SP_LIMIT \
-       (_ALIGN_UP(sizeof(struct thread_info), 16) + (unsigned long)&init_stack)
+#define INIT_SP_LIMIT  ((unsigned long)&init_stack)
 
 #ifdef CONFIG_SPE
 #define SPEFSCR_INIT \
index ca3fb836cbb93f5e89e1506ae79bdab8a2e13103..1ad0cbcc5f134a03a3f1b7a3086ab598853cdd02 100644 (file)
@@ -92,7 +92,6 @@ int main(void)
        DEFINE(SIGSEGV, SIGSEGV);
        DEFINE(NMI_MASK, NMI_MASK);
 #else
-       DEFINE(THREAD_INFO_GAP, _ALIGN_UP(sizeof(struct thread_info), 16));
        OFFSET(KSP_LIMIT, thread_struct, ksp_limit);
 #ifdef CONFIG_PPC_RTAS
        OFFSET(RTAS_SP, thread_struct, rtas_sp);
index f3618353c1c4c69efd783e4bde207333ac4e56e9..424e7265e7908fcfb2f42639ab24908829c5b0bb 100644 (file)
@@ -97,14 +97,11 @@ crit_transfer_to_handler:
        mfspr   r0,SPRN_SRR1
        stw     r0,_SRR1(r11)
 
-       /* set the stack limit to the current stack
-        * and set the limit to protect the thread_info
-        * struct
-        */
+       /* set the stack limit to the current stack */
        mfspr   r8,SPRN_SPRG_THREAD
        lwz     r0,KSP_LIMIT(r8)
        stw     r0,SAVED_KSP_LIMIT(r11)
-       rlwimi  r0,r1,0,0,(31-THREAD_SHIFT)
+       rlwinm  r0,r1,0,0,(31 - THREAD_SHIFT)
        stw     r0,KSP_LIMIT(r8)
        /* fall through */
 #endif
@@ -121,14 +118,11 @@ crit_transfer_to_handler:
        mfspr   r0,SPRN_SRR1
        stw     r0,crit_srr1@l(0)
 
-       /* set the stack limit to the current stack
-        * and set the limit to protect the thread_info
-        * struct
-        */
+       /* set the stack limit to the current stack */
        mfspr   r8,SPRN_SPRG_THREAD
        lwz     r0,KSP_LIMIT(r8)
        stw     r0,saved_ksp_limit@l(0)
-       rlwimi  r0,r1,0,0,(31-THREAD_SHIFT)
+       rlwinm  r0,r1,0,0,(31 - THREAD_SHIFT)
        stw     r0,KSP_LIMIT(r8)
        /* fall through */
 #endif
index 85c48911938a7cba9ad23405948f80fcae943f8a..938944c6e2ee1693b87add70c6fe5ad0b81dba0d 100644 (file)
@@ -618,9 +618,8 @@ static inline void check_stack_overflow(void)
        sp = current_stack_pointer() & (THREAD_SIZE-1);
 
        /* check for stack overflow: is there less than 2KB free? */
-       if (unlikely(sp < (sizeof(struct thread_info) + 2048))) {
-               pr_err("do_IRQ: stack overflow: %ld\n",
-                       sp - sizeof(struct thread_info));
+       if (unlikely(sp < 2048)) {
+               pr_err("do_IRQ: stack overflow: %ld\n", sp);
                dump_stack();
        }
 #endif
@@ -660,7 +659,7 @@ void __do_irq(struct pt_regs *regs)
 void do_IRQ(struct pt_regs *regs)
 {
        struct pt_regs *old_regs = set_irq_regs(regs);
-       struct thread_info *curtp, *irqtp, *sirqtp;
+       void *curtp, *irqtp, *sirqtp;
 
        /* Switch to the irq stack to handle this */
        curtp = (void *)(current_stack_pointer() & ~(THREAD_SIZE - 1));
@@ -686,17 +685,17 @@ void __init init_IRQ(void)
 }
 
 #if defined(CONFIG_BOOKE) || defined(CONFIG_40x)
-struct thread_info   *critirq_ctx[NR_CPUS] __read_mostly;
-struct thread_info    *dbgirq_ctx[NR_CPUS] __read_mostly;
-struct thread_info *mcheckirq_ctx[NR_CPUS] __read_mostly;
+void   *critirq_ctx[NR_CPUS] __read_mostly;
+void    *dbgirq_ctx[NR_CPUS] __read_mostly;
+void *mcheckirq_ctx[NR_CPUS] __read_mostly;
 #endif
 
-struct thread_info *softirq_ctx[NR_CPUS] __read_mostly;
-struct thread_info *hardirq_ctx[NR_CPUS] __read_mostly;
+void *softirq_ctx[NR_CPUS] __read_mostly;
+void *hardirq_ctx[NR_CPUS] __read_mostly;
 
 void do_softirq_own_stack(void)
 {
-       struct thread_info *irqtp;
+       void *irqtp;
 
        irqtp = softirq_ctx[smp_processor_id()];
        call_do_softirq(irqtp);
index b37b50fde828642b95359d4fdf4f04dc94210fa9..6f6127c3760cb08a5df0541c7ad11d07b4f0b370 100644 (file)
@@ -46,11 +46,10 @@ _GLOBAL(call_do_softirq)
        mflr    r0
        stw     r0,4(r1)
        lwz     r10,THREAD+KSP_LIMIT(r2)
-       addi    r11,r3,THREAD_INFO_GAP
+       stw     r3, THREAD+KSP_LIMIT(r2)
        stwu    r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r3)
        mr      r1,r3
        stw     r10,8(r1)
-       stw     r11,THREAD+KSP_LIMIT(r2)
        bl      __do_softirq
        lwz     r10,8(r1)
        lwz     r1,0(r1)
@@ -66,11 +65,10 @@ _GLOBAL(call_do_irq)
        mflr    r0
        stw     r0,4(r1)
        lwz     r10,THREAD+KSP_LIMIT(r2)
-       addi    r11,r4,THREAD_INFO_GAP
+       stw     r4, THREAD+KSP_LIMIT(r2)
        stwu    r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r4)
        mr      r1,r4
        stw     r10,8(r1)
-       stw     r11,THREAD+KSP_LIMIT(r2)
        bl      __do_irq
        lwz     r10,8(r1)
        lwz     r1,0(r1)
index fd07711035bdd635f82ff75c6a26525cadf82924..dd9e0d5386ee7030fe4539ef67e0b3a4d6e3eb46 100644 (file)
@@ -1691,8 +1691,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
        sp -= STACK_FRAME_OVERHEAD;
        p->thread.ksp = sp;
 #ifdef CONFIG_PPC32
-       p->thread.ksp_limit = (unsigned long)task_stack_page(p) +
-                               _ALIGN_UP(sizeof(struct thread_info), 16);
+       p->thread.ksp_limit = (unsigned long)end_of_stack(p);
 #endif
 #ifdef CONFIG_HAVE_HW_BREAKPOINT
        p->thread.ptrace_bps[0] = NULL;
@@ -1995,21 +1994,14 @@ static inline int valid_irq_stack(unsigned long sp, struct task_struct *p,
        unsigned long stack_page;
        unsigned long cpu = task_cpu(p);
 
-       /*
-        * Avoid crashing if the stack has overflowed and corrupted
-        * task_cpu(p), which is in the thread_info struct.
-        */
-       if (cpu < NR_CPUS && cpu_possible(cpu)) {
-               stack_page = (unsigned long) hardirq_ctx[cpu];
-               if (sp >= stack_page + sizeof(struct thread_struct)
-                   && sp <= stack_page + THREAD_SIZE - nbytes)
-                       return 1;
-
-               stack_page = (unsigned long) softirq_ctx[cpu];
-               if (sp >= stack_page + sizeof(struct thread_struct)
-                   && sp <= stack_page + THREAD_SIZE - nbytes)
-                       return 1;
-       }
+       stack_page = (unsigned long)hardirq_ctx[cpu];
+       if (sp >= stack_page && sp <= stack_page + THREAD_SIZE - nbytes)
+               return 1;
+
+       stack_page = (unsigned long)softirq_ctx[cpu];
+       if (sp >= stack_page && sp <= stack_page + THREAD_SIZE - nbytes)
+               return 1;
+
        return 0;
 }
 
@@ -2018,8 +2010,10 @@ int validate_sp(unsigned long sp, struct task_struct *p,
 {
        unsigned long stack_page = (unsigned long)task_stack_page(p);
 
-       if (sp >= stack_page + sizeof(struct thread_struct)
-           && sp <= stack_page + THREAD_SIZE - nbytes)
+       if (sp < THREAD_SIZE)
+               return 0;
+
+       if (sp >= stack_page && sp <= stack_page + THREAD_SIZE - nbytes)
                return 1;
 
        return valid_irq_stack(sp, p, nbytes);
index 0912948a8ea6895658b33af59a51dcddcbc97a18..2db1c5f7d141884b90324c8149482e8a3e69e10c 100644 (file)
@@ -716,19 +716,19 @@ void __init emergency_stack_init(void)
        limit = min(ppc64_bolted_size(), ppc64_rma_size);
 
        for_each_possible_cpu(i) {
-               struct thread_info *ti;
+               void *ti;
 
                ti = alloc_stack(limit, i);
-               paca_ptrs[i]->emergency_sp = (void *)ti + THREAD_SIZE;
+               paca_ptrs[i]->emergency_sp = ti + THREAD_SIZE;
 
 #ifdef CONFIG_PPC_BOOK3S_64
                /* emergency stack for NMI exception handling. */
                ti = alloc_stack(limit, i);
-               paca_ptrs[i]->nmi_emergency_sp = (void *)ti + THREAD_SIZE;
+               paca_ptrs[i]->nmi_emergency_sp = ti + THREAD_SIZE;
 
                /* emergency stack for machine check exception handling. */
                ti = alloc_stack(limit, i);
-               paca_ptrs[i]->mc_emergency_sp = (void *)ti + THREAD_SIZE;
+               paca_ptrs[i]->mc_emergency_sp = ti + THREAD_SIZE;
 #endif
        }
 }