static __init int setup_noclflush(char *arg)
{
- setup_clear_cpu_cap(X86_FEATURE_CLFLSH);
+ setup_clear_cpu_cap(X86_FEATURE_CLFLUSH);
+ setup_clear_cpu_cap(X86_FEATURE_CLFLUSHOPT);
return 1;
}
__setup("noclflush", setup_noclflush);
}
__setup("clearcpuid=", setup_disablecpuid);
+ DEFINE_PER_CPU(unsigned long, kernel_stack) =
+ (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
+ EXPORT_PER_CPU_SYMBOL(kernel_stack);
+
#ifdef CONFIG_X86_64
struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
struct desc_ptr debug_idt_descr = { NR_VECTORS * 16 - 1,
&init_task;
EXPORT_PER_CPU_SYMBOL(current_task);
- DEFINE_PER_CPU(unsigned long, kernel_stack) =
- (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
- EXPORT_PER_CPU_SYMBOL(kernel_stack);
-
DEFINE_PER_CPU(char *, irq_stack_ptr) =
init_per_cpu_var(irq_stack_union.irq_stack) + IRQ_STACK_SIZE - 64;
#include <asm/stacktrace.h>
+ static void *is_irq_stack(void *p, void *irq)
+ {
+ if (p < irq || p >= (irq + THREAD_SIZE))
+ return NULL;
+ return irq + THREAD_SIZE;
+ }
+
+
+ static void *is_hardirq_stack(unsigned long *stack, int cpu)
+ {
+ void *irq = per_cpu(hardirq_stack, cpu);
+
+ return is_irq_stack(stack, irq);
+ }
+
+ static void *is_softirq_stack(unsigned long *stack, int cpu)
+ {
+ void *irq = per_cpu(softirq_stack, cpu);
+
+ return is_irq_stack(stack, irq);
+ }
void dump_trace(struct task_struct *task, struct pt_regs *regs,
unsigned long *stack, unsigned long bp,
const struct stacktrace_ops *ops, void *data)
{
+ const unsigned cpu = get_cpu();
int graph = 0;
+ u32 *prev_esp;
if (!task)
task = current;
unsigned long dummy;
stack = &dummy;
- if (task && task != current)
+ if (task != current)
stack = (unsigned long *)task->thread.sp;
}
for (;;) {
struct thread_info *context;
+ void *end_stack;
+
+ end_stack = is_hardirq_stack(stack, cpu);
+ if (!end_stack)
+ end_stack = is_softirq_stack(stack, cpu);
- context = (struct thread_info *)
- ((unsigned long)stack & (~(THREAD_SIZE - 1)));
- bp = ops->walk_stack(context, stack, bp, ops, data, NULL, &graph);
+ context = task_thread_info(task);
+ bp = ops->walk_stack(context, stack, bp, ops, data,
+ end_stack, &graph);
- stack = (unsigned long *)context->previous_esp;
+ /* Stop if not on irq stack */
+ if (!end_stack)
+ break;
+
+ /* The previous esp is saved on the bottom of the stack */
+ prev_esp = (u32 *)(end_stack - THREAD_SIZE);
+ stack = (unsigned long *)*prev_esp;
if (!stack)
break;
+
if (ops->stack(data, "IRQ") < 0)
break;
touch_nmi_watchdog();
}
+ put_cpu();
}
EXPORT_SYMBOL(dump_trace);
* Since CPU0 is not wakened up by INIT, it doesn't wait for the IPI.
*/
cpuid = smp_processor_id();
- if (apic->wait_for_init_deassert && cpuid != 0)
- apic->wait_for_init_deassert(&init_deasserted);
+ if (apic->wait_for_init_deassert && cpuid)
+ while (!atomic_read(&init_deasserted))
+ cpu_relax();
/*
* (This works even if the APIC is not enabled.)
int id;
int boot_error;
+ preempt_disable();
+
/*
* Wake up AP by INIT, INIT, STARTUP sequence.
*/
- if (cpu)
- return wakeup_secondary_cpu_via_init(apicid, start_ip);
+ if (cpu) {
+ boot_error = wakeup_secondary_cpu_via_init(apicid, start_ip);
+ goto out;
+ }
/*
* Wake up BSP by nmi.
boot_error = wakeup_secondary_cpu_via_nmi(id, start_ip);
}
+out:
+ preempt_enable();
+
return boot_error;
}
#else
clear_tsk_thread_flag(idle, TIF_FORK);
initial_gs = per_cpu_offset(cpu);
+ #endif
per_cpu(kernel_stack, cpu) =
(unsigned long)task_stack_page(idle) -
KERNEL_STACK_OFFSET + THREAD_SIZE;
- #endif
early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
initial_code = (unsigned long)start_secondary;
stack_start = idle->thread.sp;
if (!this_cpu_has(X86_FEATURE_MWAIT))
return;
- if (!this_cpu_has(X86_FEATURE_CLFLSH))
+ if (!this_cpu_has(X86_FEATURE_CLFLUSH))
return;
if (__this_cpu_read(cpu_info.cpuid_level) < CPUID_MWAIT_LEAF)
return;