Merge branch 'x86-threadinfo-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
authorLinus Torvalds <torvalds@linux-foundation.org>
Tue, 1 Apr 2014 17:17:18 +0000 (10:17 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Tue, 1 Apr 2014 17:17:18 +0000 (10:17 -0700)
Pull x86 threadinfo changes from Ingo Molnar:
 "The main change here is the consolidation/unification of 32 and 64 bit
  thread_info handling methods, from Steve Rostedt"

* 'x86-threadinfo-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86, threadinfo: Redo "x86: Use inline assembler to get sp"
  x86: Clean up dumpstack_64.c code
  x86: Keep thread_info on thread stack in x86_32
  x86: Prepare removal of previous_esp from i386 thread_info structure
  x86: Nuke GET_THREAD_INFO_WITH_ESP() macro for i386
  x86: Nuke the supervisor_stack field in i386 thread_info

1  2 
arch/x86/kernel/cpu/common.c
arch/x86/kernel/dumpstack_32.c
arch/x86/kernel/smpboot.c

index cca53d88762a88aa994a178ef47f060f489def68,29c1944a98ac0127c230da1d911f45e07e57b41b..a135239badb7fd4762ebf939ae755183660641b2
@@@ -1025,8 -1025,7 +1025,8 @@@ __setup("show_msr=", setup_show_msr)
  
  static __init int setup_noclflush(char *arg)
  {
 -      setup_clear_cpu_cap(X86_FEATURE_CLFLSH);
 +      setup_clear_cpu_cap(X86_FEATURE_CLFLUSH);
 +      setup_clear_cpu_cap(X86_FEATURE_CLFLUSHOPT);
        return 1;
  }
  __setup("noclflush", setup_noclflush);
@@@ -1079,6 -1078,10 +1079,10 @@@ static __init int setup_disablecpuid(ch
  }
  __setup("clearcpuid=", setup_disablecpuid);
  
+ DEFINE_PER_CPU(unsigned long, kernel_stack) =
+       (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
+ EXPORT_PER_CPU_SYMBOL(kernel_stack);
  #ifdef CONFIG_X86_64
  struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
  struct desc_ptr debug_idt_descr = { NR_VECTORS * 16 - 1,
@@@ -1095,10 -1098,6 +1099,6 @@@ DEFINE_PER_CPU(struct task_struct *, cu
        &init_task;
  EXPORT_PER_CPU_SYMBOL(current_task);
  
- DEFINE_PER_CPU(unsigned long, kernel_stack) =
-       (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
- EXPORT_PER_CPU_SYMBOL(kernel_stack);
  DEFINE_PER_CPU(char *, irq_stack_ptr) =
        init_per_cpu_var(irq_stack_union.irq_stack) + IRQ_STACK_SIZE - 64;
  
index a21d49c071db0198a767a9e41d8f97a4621ede88,dca820b627d673797f89e891bc42a72352893884..5abd4cd4230c69f3ff4730e97a1297be40013c44
  
  #include <asm/stacktrace.h>
  
+ static void *is_irq_stack(void *p, void *irq)
+ {
+       if (p < irq || p >= (irq + THREAD_SIZE))
+               return NULL;
+       return irq + THREAD_SIZE;
+ }
+ static void *is_hardirq_stack(unsigned long *stack, int cpu)
+ {
+       void *irq = per_cpu(hardirq_stack, cpu);
+       return is_irq_stack(stack, irq);
+ }
+ static void *is_softirq_stack(unsigned long *stack, int cpu)
+ {
+       void *irq = per_cpu(softirq_stack, cpu);
+       return is_irq_stack(stack, irq);
+ }
  
  void dump_trace(struct task_struct *task, struct pt_regs *regs,
                unsigned long *stack, unsigned long bp,
                const struct stacktrace_ops *ops, void *data)
  {
+       const unsigned cpu = get_cpu();
        int graph = 0;
+       u32 *prev_esp;
  
        if (!task)
                task = current;
@@@ -30,7 -53,7 +53,7 @@@
                unsigned long dummy;
  
                stack = &dummy;
 -              if (task && task != current)
 +              if (task != current)
                        stack = (unsigned long *)task->thread.sp;
        }
  
  
        for (;;) {
                struct thread_info *context;
+               void *end_stack;
+               end_stack = is_hardirq_stack(stack, cpu);
+               if (!end_stack)
+                       end_stack = is_softirq_stack(stack, cpu);
  
-               context = (struct thread_info *)
-                       ((unsigned long)stack & (~(THREAD_SIZE - 1)));
-               bp = ops->walk_stack(context, stack, bp, ops, data, NULL, &graph);
+               context = task_thread_info(task);
+               bp = ops->walk_stack(context, stack, bp, ops, data,
+                                    end_stack, &graph);
  
-               stack = (unsigned long *)context->previous_esp;
+               /* Stop if not on irq stack */
+               if (!end_stack)
+                       break;
+               /* The previous esp is saved on the bottom of the stack */
+               prev_esp = (u32 *)(end_stack - THREAD_SIZE);
+               stack = (unsigned long *)*prev_esp;
                if (!stack)
                        break;
                if (ops->stack(data, "IRQ") < 0)
                        break;
                touch_nmi_watchdog();
        }
+       put_cpu();
  }
  EXPORT_SYMBOL(dump_trace);
  
index 5aad5a370c85886c662cb6ba5580f1ce56368dc5,867d53ea88a3ddc8e0d6e2e51d680c4aefac457c..34826934d4a7b37da39ae7556d04be37cae55ee8
@@@ -122,9 -122,8 +122,9 @@@ static void smp_callin(void
         * Since CPU0 is not wakened up by INIT, it doesn't wait for the IPI.
         */
        cpuid = smp_processor_id();
 -      if (apic->wait_for_init_deassert && cpuid != 0)
 -              apic->wait_for_init_deassert(&init_deasserted);
 +      if (apic->wait_for_init_deassert && cpuid)
 +              while (!atomic_read(&init_deasserted))
 +                      cpu_relax();
  
        /*
         * (This works even if the APIC is not enabled.)
@@@ -702,15 -701,11 +702,15 @@@ wakeup_cpu_via_init_nmi(int cpu, unsign
        int id;
        int boot_error;
  
 +      preempt_disable();
 +
        /*
         * Wake up AP by INIT, INIT, STARTUP sequence.
         */
 -      if (cpu)
 -              return wakeup_secondary_cpu_via_init(apicid, start_ip);
 +      if (cpu) {
 +              boot_error = wakeup_secondary_cpu_via_init(apicid, start_ip);
 +              goto out;
 +      }
  
        /*
         * Wake up BSP by nmi.
                boot_error = wakeup_secondary_cpu_via_nmi(id, start_ip);
        }
  
 +out:
 +      preempt_enable();
 +
        return boot_error;
  }
  
@@@ -766,10 -758,10 +766,10 @@@ static int do_boot_cpu(int apicid, int 
  #else
        clear_tsk_thread_flag(idle, TIF_FORK);
        initial_gs = per_cpu_offset(cpu);
+ #endif
        per_cpu(kernel_stack, cpu) =
                (unsigned long)task_stack_page(idle) -
                KERNEL_STACK_OFFSET + THREAD_SIZE;
- #endif
        early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
        initial_code = (unsigned long)start_secondary;
        stack_start  = idle->thread.sp;
@@@ -1387,7 -1379,7 +1387,7 @@@ static inline void mwait_play_dead(void
  
        if (!this_cpu_has(X86_FEATURE_MWAIT))
                return;
 -      if (!this_cpu_has(X86_FEATURE_CLFLSH))
 +      if (!this_cpu_has(X86_FEATURE_CLFLUSH))
                return;
        if (__this_cpu_read(cpu_info.cpuid_level) < CPUID_MWAIT_LEAF)
                return;