powerpc: Activate CONFIG_THREAD_INFO_IN_TASK
authorChristophe Leroy <christophe.leroy@c-s.fr>
Thu, 31 Jan 2019 10:08:58 +0000 (10:08 +0000)
committerMichael Ellerman <mpe@ellerman.id.au>
Sat, 23 Feb 2019 11:31:40 +0000 (22:31 +1100)
This patch activates CONFIG_THREAD_INFO_IN_TASK which
moves the thread_info into task_struct.

Moving thread_info into task_struct has the following advantages:
  - It protects thread_info from corruption in the case of stack
    overflows.
  - Its address is harder to determine if stack addresses are leaked,
    making a number of attacks more difficult.

This has the following consequences:
  - thread_info is now located at the beginning of task_struct.
  - The 'cpu' field is now in task_struct, and only exists when
    CONFIG_SMP is active.
  - thread_info doesn't have anymore the 'task' field.

This patch:
  - Removes all recopy of thread_info struct when the stack changes.
  - Changes the CURRENT_THREAD_INFO() macro to point to current.
  - Selects CONFIG_THREAD_INFO_IN_TASK.
  - Modifies raw_smp_processor_id() to get ->cpu from current without
    including linux/sched.h to avoid circular inclusion and without
    including asm/asm-offsets.h to avoid symbol names duplication
    between ASM constants and C constants.
  - Modifies klp_init_thread_info() to take a task_struct pointer
    argument.

Signed-off-by: Christophe Leroy <christophe.leroy@c-s.fr>
Reviewed-by: Nicholas Piggin <npiggin@gmail.com>
[mpe: Add task_stack.h to livepatch.h to fix build fails]
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
21 files changed:
arch/powerpc/Kconfig
arch/powerpc/Makefile
arch/powerpc/include/asm/irq.h
arch/powerpc/include/asm/livepatch.h
arch/powerpc/include/asm/smp.h
arch/powerpc/include/asm/thread_info.h
arch/powerpc/kernel/asm-offsets.c
arch/powerpc/kernel/entry_32.S
arch/powerpc/kernel/exceptions-64e.S
arch/powerpc/kernel/head_32.S
arch/powerpc/kernel/head_44x.S
arch/powerpc/kernel/head_booke.h
arch/powerpc/kernel/head_fsl_booke.S
arch/powerpc/kernel/irq.c
arch/powerpc/kernel/kgdb.c
arch/powerpc/kernel/machine_kexec_64.c
arch/powerpc/kernel/process.c
arch/powerpc/kernel/setup-common.c
arch/powerpc/kernel/setup_64.c
arch/powerpc/kernel/smp.c
arch/powerpc/net/bpf_jit32.h

index 5b7945a7bd4171a060b37a8d4e21032a683a9428..652c25260838a594f998ab2571cb685ffe72b2aa 100644 (file)
@@ -238,6 +238,7 @@ config PPC
        select RTC_LIB
        select SPARSE_IRQ
        select SYSCTL_EXCEPTION_TRACE
+       select THREAD_INFO_IN_TASK
        select VIRT_TO_BUS                      if !PPC64
        #
        # Please keep this list sorted alphabetically.
index ac033341ed55e62f8c2bd571901201f86cfe736c..53ffe935f3b0a594eda9e75c8d959d1cd50b86c0 100644 (file)
@@ -427,6 +427,13 @@ else
 endif
 endif
 
+ifdef CONFIG_SMP
+prepare: task_cpu_prepare
+
+task_cpu_prepare: prepare0
+       $(eval KBUILD_CFLAGS += -D_TASK_CPU=$(shell awk '{if ($$2 == "TI_CPU") print $$3;}' include/generated/asm-offsets.h))
+endif
+
 # Check toolchain versions:
 # - gcc-4.6 is the minimum kernel-wide version so nothing required.
 checkbin:
index 2efbae8d93be0b01158ca05bb644a63c1fc295a3..28a7ace0a1b91d61784576a496284a57169d511d 100644 (file)
@@ -51,9 +51,6 @@ struct pt_regs;
 extern struct thread_info *critirq_ctx[NR_CPUS];
 extern struct thread_info *dbgirq_ctx[NR_CPUS];
 extern struct thread_info *mcheckirq_ctx[NR_CPUS];
-extern void exc_lvl_ctx_init(void);
-#else
-#define exc_lvl_ctx_init()
 #endif
 
 /*
@@ -62,7 +59,6 @@ extern void exc_lvl_ctx_init(void);
 extern struct thread_info *hardirq_ctx[NR_CPUS];
 extern struct thread_info *softirq_ctx[NR_CPUS];
 
-extern void irq_ctx_init(void);
 void call_do_softirq(void *sp);
 void call_do_irq(struct pt_regs *regs, void *sp);
 extern void do_IRQ(struct pt_regs *regs);
index 47a03b9b528b46672ece81b641ebecc13247c817..5070df19d4638e60dc9ede36058f27027e3f9a23 100644 (file)
@@ -21,6 +21,7 @@
 
 #include <linux/module.h>
 #include <linux/ftrace.h>
+#include <linux/sched/task_stack.h>
 
 #ifdef CONFIG_LIVEPATCH
 static inline int klp_check_compiler_support(void)
@@ -43,13 +44,13 @@ static inline unsigned long klp_get_ftrace_location(unsigned long faddr)
        return ftrace_location_range(faddr, faddr + 16);
 }
 
-static inline void klp_init_thread_info(struct thread_info *ti)
+static inline void klp_init_thread_info(struct task_struct *p)
 {
        /* + 1 to account for STACK_END_MAGIC */
-       ti->livepatch_sp = (unsigned long *)(ti + 1) + 1;
+       task_thread_info(p)->livepatch_sp = end_of_stack(p) + 1;
 }
 #else
-static void klp_init_thread_info(struct thread_info *ti) { }
+static inline void klp_init_thread_info(struct task_struct *p) { }
 #endif /* CONFIG_LIVEPATCH */
 
 #endif /* _ASM_POWERPC_LIVEPATCH_H */
index 41695745032cd6625ec0660edea8a1cdf144840c..0de717e16dd6d06a1de9de594164e237ce690d11 100644 (file)
@@ -83,7 +83,22 @@ int is_cpu_dead(unsigned int cpu);
 /* 32-bit */
 extern int smp_hw_index[];
 
-#define raw_smp_processor_id() (current_thread_info()->cpu)
+/*
+ * This is particularly ugly: it appears we can't actually get the definition
+ * of task_struct here, but we need access to the CPU this task is running on.
+ * Instead of using task_struct we're using _TASK_CPU which is extracted from
+ * asm-offsets.h by kbuild to get the current processor ID.
+ *
+ * This also needs to be safeguarded when building asm-offsets.s because at
+ * that time _TASK_CPU is not defined yet. It could have been guarded by
+ * _TASK_CPU itself, but we want the build to fail if _TASK_CPU is missing
+ * when building something else than asm-offsets.s
+ */
+#ifdef GENERATING_ASM_OFFSETS
+#define raw_smp_processor_id()         (0)
+#else
+#define raw_smp_processor_id()         (*(unsigned int *)((void *)current + _TASK_CPU))
+#endif
 #define hard_smp_processor_id()        (smp_hw_index[smp_processor_id()])
 
 static inline int get_hard_smp_processor_id(int cpu)
index 544cac0474cbcca12b96ef9f19406ada562722a2..d91523c2c7d88cf24930fe1a31cc3c6191893312 100644 (file)
@@ -18,9 +18,9 @@
 #define THREAD_SIZE            (1 << THREAD_SHIFT)
 
 #ifdef CONFIG_PPC64
-#define CURRENT_THREAD_INFO(dest, sp)  stringify_in_c(clrrdi dest, sp, THREAD_SHIFT)
+#define CURRENT_THREAD_INFO(dest, sp)  stringify_in_c(ld dest, PACACURRENT(r13))
 #else
-#define CURRENT_THREAD_INFO(dest, sp)  stringify_in_c(rlwinm dest, sp, 0, 0, 31-THREAD_SHIFT)
+#define CURRENT_THREAD_INFO(dest, sp)  stringify_in_c(mr dest, r2)
 #endif
 
 #ifndef __ASSEMBLY__
@@ -34,8 +34,6 @@
  * low level task data.
  */
 struct thread_info {
-       struct task_struct *task;               /* main task structure */
-       int             cpu;                    /* cpu we're on */
        int             preempt_count;          /* 0 => preemptable,
                                                   <0 => BUG */
        unsigned long   local_flags;            /* private flags for thread */
@@ -58,8 +56,6 @@ struct thread_info {
  */
 #define INIT_THREAD_INFO(tsk)                  \
 {                                              \
-       .task =         &tsk,                   \
-       .cpu =          0,                      \
        .preempt_count = INIT_PREEMPT_COUNT,    \
        .flags =        0,                      \
 }
@@ -67,15 +63,6 @@ struct thread_info {
 #define THREAD_SIZE_ORDER      (THREAD_SHIFT - PAGE_SHIFT)
 
 /* how to get the thread information struct from C */
-static inline struct thread_info *current_thread_info(void)
-{
-       unsigned long val;
-
-       asm (CURRENT_THREAD_INFO(%0,1) : "=r" (val));
-
-       return (struct thread_info *)val;
-}
-
 extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
 
 #ifdef CONFIG_PPC_BOOK3S_64
index ca55027f47a464689ad90cc269845053a57cbe04..ca3fb836cbb93f5e89e1506ae79bdab8a2e13103 100644 (file)
@@ -13,6 +13,8 @@
  * 2 of the License, or (at your option) any later version.
  */
 
+#define GENERATING_ASM_OFFSETS /* asm/smp.h */
+
 #include <linux/compat.h>
 #include <linux/signal.h>
 #include <linux/sched.h>
@@ -97,6 +99,9 @@ int main(void)
 #endif
 #endif /* CONFIG_PPC64 */
        OFFSET(TASK_STACK, task_struct, stack);
+#ifdef CONFIG_SMP
+       OFFSET(TI_CPU, task_struct, cpu);
+#endif
 
 #ifdef CONFIG_LIVEPATCH
        OFFSET(TI_livepatch_sp, thread_info, livepatch_sp);
@@ -164,8 +169,6 @@ int main(void)
        OFFSET(TI_FLAGS, thread_info, flags);
        OFFSET(TI_LOCAL_FLAGS, thread_info, local_flags);
        OFFSET(TI_PREEMPT, thread_info, preempt_count);
-       OFFSET(TI_TASK, thread_info, task);
-       OFFSET(TI_CPU, thread_info, cpu);
 
 #ifdef CONFIG_PPC64
        OFFSET(DCACHEL1BLOCKSIZE, ppc64_caches, l1d.block_size);
index 063100df83257b88ab721d89cff64c2116f1ef24..f3618353c1c4c69efd783e4bde207333ac4e56e9 100644 (file)
@@ -1165,10 +1165,6 @@ ret_from_debug_exc:
        mfspr   r9,SPRN_SPRG_THREAD
        lwz     r10,SAVED_KSP_LIMIT(r1)
        stw     r10,KSP_LIMIT(r9)
-       lwz     r9,TASK_STACK-THREAD(r9)
-       CURRENT_THREAD_INFO(r10, r1)
-       lwz     r10,TI_PREEMPT(r10)
-       stw     r10,TI_PREEMPT(r9)
        RESTORE_xSRR(SRR0,SRR1);
        RESTORE_xSRR(CSRR0,CSRR1);
        RESTORE_MMU_REGS;
@@ -1291,10 +1287,13 @@ BEGIN_FTR_SECTION
 END_FTR_SECTION_IFSET(CPU_FTR_601)
        lwz     r3,_TRAP(r1)
        andi.   r0,r3,1
-       beq     4f
+       beq     5f
        SAVE_NVGPRS(r1)
        rlwinm  r3,r3,0,0,30
        stw     r3,_TRAP(r1)
+5:     mfspr   r2,SPRN_SPRG_THREAD
+       addi    r2,r2,-THREAD
+       tovirt(r2,r2)                   /* set back r2 to current */
 4:     addi    r3,r1,STACK_FRAME_OVERHEAD
        bl      unrecoverable_exception
        /* shouldn't return */
index afb638778f443316cf81d07ff2151d8af068ba97..20f14996281d2316955424adfd01648cde55aff8 100644 (file)
@@ -77,17 +77,6 @@ special_reg_save:
        andi.   r3,r3,MSR_PR
        bnelr
 
-       /* Copy info into temporary exception thread info */
-       ld      r11,PACAKSAVE(r13)
-       CURRENT_THREAD_INFO(r11, r11)
-       CURRENT_THREAD_INFO(r12, r1)
-       ld      r10,TI_FLAGS(r11)
-       std     r10,TI_FLAGS(r12)
-       ld      r10,TI_PREEMPT(r11)
-       std     r10,TI_PREEMPT(r12)
-       ld      r10,TI_TASK(r11)
-       std     r10,TI_TASK(r12)
-
        /*
         * Advance to the next TLB exception frame for handler
         * types that don't do it automatically.
index 2112805ef1d1e70482d1eddd86d80d511a19e673..888fcff3f8cc11e7989cfb25477f3078f3594d4f 100644 (file)
@@ -834,9 +834,9 @@ __secondary_start:
        /* get current's stack and current */
        lis     r1,secondary_ti@ha
        tophys(r1,r1)
-       lwz     r1,secondary_ti@l(r1)
-       tophys(r2,r1)
-       lwz     r2,TI_TASK(r2)
+       lwz     r2,secondary_ti@l(r1)
+       tophys(r1,r2)
+       lwz     r1,TASK_STACK(r1)
 
        /* stack */
        addi    r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
index 4e8c8bf504139fedce80ef0ebcc40f2843665a1c..f94a93b6c2f2f83ab22cb647d07cdc2ccb02c5a7 100644 (file)
@@ -1021,8 +1021,8 @@ _GLOBAL(start_secondary_47x)
 
        /* Get current's stack and current */
        lis     r1,secondary_ti@ha
-       lwz     r1,secondary_ti@l(r1)
-       lwz     r2,TI_TASK(r1)
+       lwz     r2,secondary_ti@l(r1)
+       lwz     r1,TASK_STACK(r2)
 
        /* Current stack pointer */
        addi    r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
index 69e80e6d0d16a2b310fcdf380d44acb51075f287..1b22a8dea399687b6692b94186d377212e544aea 100644 (file)
@@ -155,13 +155,7 @@ END_BTB_FLUSH_SECTION
        stw     r10,GPR11(r11);                                              \
        b       2f;                                                          \
        /* COMING FROM PRIV MODE */                                          \
-1:     lwz     r9,TI_FLAGS-EXC_LVL_FRAME_OVERHEAD(r11);                     \
-       lwz     r10,TI_PREEMPT-EXC_LVL_FRAME_OVERHEAD(r11);                  \
-       stw     r9,TI_FLAGS-EXC_LVL_FRAME_OVERHEAD(r8);                      \
-       stw     r10,TI_PREEMPT-EXC_LVL_FRAME_OVERHEAD(r8);                   \
-       lwz     r9,TI_TASK-EXC_LVL_FRAME_OVERHEAD(r11);                      \
-       stw     r9,TI_TASK-EXC_LVL_FRAME_OVERHEAD(r8);                       \
-       mr      r11,r8;                                                      \
+1:     mr      r11, r8;                                                             \
 2:     mfspr   r8,SPRN_SPRG_RSCRATCH_##exc_level;                           \
        stw     r12,GPR12(r11);         /* save various registers          */\
        mflr    r10;                                                         \
index 6301bb24889a380ccaf130ba40a949be9f02dfe7..11f38adbe0201213f37d2cc914f3e86a012c3bb9 100644 (file)
@@ -719,8 +719,7 @@ finish_tlb_load:
 
        /* Get the next_tlbcam_idx percpu var */
 #ifdef CONFIG_SMP
-       lwz     r12, TASK_STACK-THREAD(r12)
-       lwz     r15, TI_CPU(r12)
+       lwz     r15, TI_CPU-THREAD(r12)
        lis     r14, __per_cpu_offset@h
        ori     r14, r14, __per_cpu_offset@l
        rlwinm  r15, r15, 2, 0, 29
@@ -1093,8 +1092,8 @@ __secondary_start:
 
        /* get current's stack and current */
        lis     r1,secondary_ti@ha
-       lwz     r1,secondary_ti@l(r1)
-       lwz     r2,TI_TASK(r1)
+       lwz     r2,secondary_ti@l(r1)
+       lwz     r1,TASK_STACK(r2)
 
        /* stack */
        addi    r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
index 531e9ef153c09778122a72d620e4baf8d33b756c..85c48911938a7cba9ad23405948f80fcae943f8a 100644 (file)
@@ -673,24 +673,9 @@ void do_IRQ(struct pt_regs *regs)
                set_irq_regs(old_regs);
                return;
        }
-
-       /* Prepare the thread_info in the irq stack */
-       irqtp->task = curtp->task;
-       irqtp->flags = 0;
-
-       /* Copy the preempt_count so that the [soft]irq checks work. */
-       irqtp->preempt_count = curtp->preempt_count;
-
        /* Switch stack and call */
        call_do_irq(regs, irqtp);
 
-       /* Restore stack limit */
-       irqtp->task = NULL;
-
-       /* Copy back updates to the thread_info */
-       if (irqtp->flags)
-               set_bits(irqtp->flags, &curtp->flags);
-
        set_irq_regs(old_regs);
 }
 
@@ -698,85 +683,23 @@ void __init init_IRQ(void)
 {
        if (ppc_md.init_IRQ)
                ppc_md.init_IRQ();
-
-       exc_lvl_ctx_init();
-
-       irq_ctx_init();
 }
 
 #if defined(CONFIG_BOOKE) || defined(CONFIG_40x)
 struct thread_info   *critirq_ctx[NR_CPUS] __read_mostly;
 struct thread_info    *dbgirq_ctx[NR_CPUS] __read_mostly;
 struct thread_info *mcheckirq_ctx[NR_CPUS] __read_mostly;
-
-void exc_lvl_ctx_init(void)
-{
-       struct thread_info *tp;
-       int i, cpu_nr;
-
-       for_each_possible_cpu(i) {
-#ifdef CONFIG_PPC64
-               cpu_nr = i;
-#else
-#ifdef CONFIG_SMP
-               cpu_nr = get_hard_smp_processor_id(i);
-#else
-               cpu_nr = 0;
-#endif
-#endif
-
-               tp = critirq_ctx[cpu_nr];
-               tp->cpu = cpu_nr;
-               tp->preempt_count = 0;
-
-#ifdef CONFIG_BOOKE
-               tp = dbgirq_ctx[cpu_nr];
-               tp->cpu = cpu_nr;
-               tp->preempt_count = 0;
-
-               tp = mcheckirq_ctx[cpu_nr];
-               tp->cpu = cpu_nr;
-               tp->preempt_count = HARDIRQ_OFFSET;
-#endif
-       }
-}
 #endif
 
 struct thread_info *softirq_ctx[NR_CPUS] __read_mostly;
 struct thread_info *hardirq_ctx[NR_CPUS] __read_mostly;
 
-void irq_ctx_init(void)
-{
-       struct thread_info *tp;
-       int i;
-
-       for_each_possible_cpu(i) {
-               tp = softirq_ctx[i];
-               tp->cpu = i;
-               klp_init_thread_info(tp);
-
-               tp = hardirq_ctx[i];
-               tp->cpu = i;
-               klp_init_thread_info(tp);
-       }
-}
-
 void do_softirq_own_stack(void)
 {
-       struct thread_info *curtp, *irqtp;
+       struct thread_info *irqtp;
 
-       curtp = current_thread_info();
        irqtp = softirq_ctx[smp_processor_id()];
-       irqtp->task = curtp->task;
-       irqtp->flags = 0;
        call_do_softirq(irqtp);
-       irqtp->task = NULL;
-
-       /* Set any flag that may have been set on the
-        * alternate stack
-        */
-       if (irqtp->flags)
-               set_bits(irqtp->flags, &curtp->flags);
 }
 
 irq_hw_number_t virq_to_hw(unsigned int virq)
index e1865565f0aeead6284fa396614faf56143f9f9b..7dd55eb1259dc15f9d1204fb848d9a395f0b3d91 100644 (file)
@@ -151,41 +151,13 @@ static int kgdb_handle_breakpoint(struct pt_regs *regs)
        return 1;
 }
 
-static DEFINE_PER_CPU(struct thread_info, kgdb_thread_info);
 static int kgdb_singlestep(struct pt_regs *regs)
 {
-       struct thread_info *thread_info, *exception_thread_info;
-       struct thread_info *backup_current_thread_info =
-               this_cpu_ptr(&kgdb_thread_info);
-
        if (user_mode(regs))
                return 0;
 
-       /*
-        * On Book E and perhaps other processors, singlestep is handled on
-        * the critical exception stack.  This causes current_thread_info()
-        * to fail, since it it locates the thread_info by masking off
-        * the low bits of the current stack pointer.  We work around
-        * this issue by copying the thread_info from the kernel stack
-        * before calling kgdb_handle_exception, and copying it back
-        * afterwards.  On most processors the copy is avoided since
-        * exception_thread_info == thread_info.
-        */
-       thread_info = (struct thread_info *)(regs->gpr[1] & ~(THREAD_SIZE-1));
-       exception_thread_info = current_thread_info();
-
-       if (thread_info != exception_thread_info) {
-               /* Save the original current_thread_info. */
-               memcpy(backup_current_thread_info, exception_thread_info, sizeof *thread_info);
-               memcpy(exception_thread_info, thread_info, sizeof *thread_info);
-       }
-
        kgdb_handle_exception(0, SIGTRAP, 0, regs);
 
-       if (thread_info != exception_thread_info)
-               /* Restore current_thread_info lastly. */
-               memcpy(exception_thread_info, backup_current_thread_info, sizeof *thread_info);
-
        return 1;
 }
 
index a0f6f45005bd42c9dfa94e608bd2dabbab2f99bd..75692c327ba0932bcceb5aa6e1e7912360982355 100644 (file)
@@ -317,10 +317,8 @@ void default_machine_kexec(struct kimage *image)
         * We setup preempt_count to avoid using VMX in memcpy.
         * XXX: the task struct will likely be invalid once we do the copy!
         */
-       kexec_stack.thread_info.task = current_thread_info()->task;
-       kexec_stack.thread_info.flags = 0;
-       kexec_stack.thread_info.preempt_count = HARDIRQ_OFFSET;
-       kexec_stack.thread_info.cpu = current_thread_info()->cpu;
+       current_thread_info()->flags = 0;
+       current_thread_info()->preempt_count = HARDIRQ_OFFSET;
 
        /* We need a static PACA, too; copy this CPU's PACA over and switch to
         * it. Also poison per_cpu_offset and NULL lppaca to catch anyone using
index dc2aaaf75c87b328111f84dce7c113d8dadd2e82..fd07711035bdd635f82ff75c6a26525cadf82924 100644 (file)
@@ -1634,7 +1634,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
        unsigned long sp = (unsigned long)task_stack_page(p) + THREAD_SIZE;
        struct thread_info *ti = task_thread_info(p);
 
-       klp_init_thread_info(ti);
+       klp_init_thread_info(p);
 
        /* Copy registers */
        sp -= sizeof(struct pt_regs);
index 9a6a0859c1efcce4ddad2bd095da8fe94477d74b..e7534f306c8e02c389700c7c7c03c27d161cbb82 100644 (file)
@@ -937,7 +937,7 @@ void __init setup_arch(char **cmdline_p)
        /* Reserve large chunks of memory for use by CMA for KVM. */
        kvm_cma_reserve();
 
-       klp_init_thread_info(&init_thread_info);
+       klp_init_thread_info(&init_task);
 
        init_mm.start_code = (unsigned long)_stext;
        init_mm.end_code = (unsigned long) _etext;
index 080dd515d5873d4cf4a0ea9c627ac30cd52b3d52..0912948a8ea6895658b33af59a51dcddcbc97a18 100644 (file)
@@ -689,24 +689,6 @@ void __init exc_lvl_early_init(void)
 }
 #endif
 
-/*
- * Emergency stacks are used for a range of things, from asynchronous
- * NMIs (system reset, machine check) to synchronous, process context.
- * We set preempt_count to zero, even though that isn't necessarily correct. To
- * get the right value we'd need to copy it from the previous thread_info, but
- * doing that might fault causing more problems.
- * TODO: what to do with accounting?
- */
-static void emerg_stack_init_thread_info(struct thread_info *ti, int cpu)
-{
-       ti->task = NULL;
-       ti->cpu = cpu;
-       ti->preempt_count = 0;
-       ti->local_flags = 0;
-       ti->flags = 0;
-       klp_init_thread_info(ti);
-}
-
 /*
  * Stack space used when we detect a bad kernel stack pointer, and
  * early in SMP boots before relocation is enabled. Exclusive emergency
@@ -737,18 +719,15 @@ void __init emergency_stack_init(void)
                struct thread_info *ti;
 
                ti = alloc_stack(limit, i);
-               emerg_stack_init_thread_info(ti, i);
                paca_ptrs[i]->emergency_sp = (void *)ti + THREAD_SIZE;
 
 #ifdef CONFIG_PPC_BOOK3S_64
                /* emergency stack for NMI exception handling. */
                ti = alloc_stack(limit, i);
-               emerg_stack_init_thread_info(ti, i);
                paca_ptrs[i]->nmi_emergency_sp = (void *)ti + THREAD_SIZE;
 
                /* emergency stack for machine check exception handling. */
                ti = alloc_stack(limit, i);
-               emerg_stack_init_thread_info(ti, i);
                paca_ptrs[i]->mc_emergency_sp = (void *)ti + THREAD_SIZE;
 #endif
        }
index 829ef5411b50b93a998a43db6c3d286310e514fa..96c25a89e877400fd42ec76fe20cd8e5ebc8b3fb 100644 (file)
@@ -988,7 +988,7 @@ static void cpu_idle_thread_init(unsigned int cpu, struct task_struct *idle)
        paca_ptrs[cpu]->kstack = (unsigned long)task_stack_page(idle) +
                                 THREAD_SIZE - STACK_FRAME_OVERHEAD;
 #endif
-       ti->cpu = cpu;
+       idle->cpu = cpu;
        secondary_ti = current_set[cpu] = ti;
 }
 
index 6f4daacad296240c1892e30af7456391541f2659..dc50a8d4b3b972a479aa2b00b1ea1c46db2977e2 100644 (file)
@@ -106,9 +106,8 @@ DECLARE_LOAD_FUNC(sk_load_byte_msh);
        } while (0)
 #else
 #define PPC_BPF_LOAD_CPU(r)     \
-       do { BUILD_BUG_ON(FIELD_SIZEOF(struct thread_info, cpu) != 4);          \
-               PPC_LHZ_OFFS(r, (1 & ~(THREAD_SIZE - 1)),                       \
-                               offsetof(struct thread_info, cpu));             \
+       do { BUILD_BUG_ON(FIELD_SIZEOF(struct task_struct, cpu) != 4);          \
+               PPC_LHZ_OFFS(r, 2, offsetof(struct task_struct, cpu));          \
        } while(0)
 #endif
 #else