Merge branch 'x86-debug-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
authorLinus Torvalds <torvalds@linux-foundation.org>
Sat, 5 Dec 2009 23:33:27 +0000 (15:33 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Sat, 5 Dec 2009 23:33:27 +0000 (15:33 -0800)
* 'x86-debug-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
  x86: Limit number of per cpu TSC sync messages
  x86: dumpstack, 64-bit: Disable preemption when walking the IRQ/exception stacks
  x86: dumpstack: Clean up the x86_stack_ids[][] initalization and other details
  x86, cpu: mv display_cacheinfo -> cpu_detect_cache_sizes
  x86: Suppress stack overrun message for init_task
  x86: Fix cpu_devs[] initialization in early_cpu_init()
  x86: Remove CPU cache size output for non-Intel too
  x86: Minimise printk spew from per-vendor init code
  x86: Remove the CPU cache size printk's
  cpumask: Avoid cpumask_t in arch/x86/kernel/apic/nmi.c
  x86: Make sure we also print a Code: line for show_regs()

14 files changed:
arch/x86/kernel/apic/nmi.c
arch/x86/kernel/cpu/amd.c
arch/x86/kernel/cpu/centaur.c
arch/x86/kernel/cpu/common.c
arch/x86/kernel/cpu/cpu.h
arch/x86/kernel/cpu/cyrix.c
arch/x86/kernel/cpu/intel_cacheinfo.c
arch/x86/kernel/cpu/transmeta.c
arch/x86/kernel/dumpstack_32.c
arch/x86/kernel/dumpstack_64.c
arch/x86/kernel/process_32.c
arch/x86/kernel/process_64.c
arch/x86/kernel/tsc_sync.c
arch/x86/mm/fault.c

index 7ff61d6a188ab2d1779270a6ff937c0a1a40b82a..6389432a9dbf7f07a0dd08b4e67857c6ec899d6d 100644 (file)
@@ -39,7 +39,8 @@
 int unknown_nmi_panic;
 int nmi_watchdog_enabled;
 
-static cpumask_t backtrace_mask __read_mostly;
+/* For reliability, we're prepared to waste bits here. */
+static DECLARE_BITMAP(backtrace_mask, NR_CPUS) __read_mostly;
 
 /* nmi_active:
  * >0: the lapic NMI watchdog is active, but can be disabled
@@ -414,7 +415,7 @@ nmi_watchdog_tick(struct pt_regs *regs, unsigned reason)
        }
 
        /* We can be called before check_nmi_watchdog, hence NULL check. */
-       if (cpumask_test_cpu(cpu, &backtrace_mask)) {
+       if (cpumask_test_cpu(cpu, to_cpumask(backtrace_mask))) {
                static DEFINE_SPINLOCK(lock);   /* Serialise the printks */
 
                spin_lock(&lock);
@@ -422,7 +423,7 @@ nmi_watchdog_tick(struct pt_regs *regs, unsigned reason)
                show_regs(regs);
                dump_stack();
                spin_unlock(&lock);
-               cpumask_clear_cpu(cpu, &backtrace_mask);
+               cpumask_clear_cpu(cpu, to_cpumask(backtrace_mask));
 
                rc = 1;
        }
@@ -558,14 +559,14 @@ void arch_trigger_all_cpu_backtrace(void)
 {
        int i;
 
-       cpumask_copy(&backtrace_mask, cpu_online_mask);
+       cpumask_copy(to_cpumask(backtrace_mask), cpu_online_mask);
 
        printk(KERN_INFO "sending NMI to all CPUs:\n");
        apic->send_IPI_all(NMI_VECTOR);
 
        /* Wait for up to 10 seconds for all CPUs to do the backtrace */
        for (i = 0; i < 10 * 1000; i++) {
-               if (cpumask_empty(&backtrace_mask))
+               if (cpumask_empty(to_cpumask(backtrace_mask)))
                        break;
                mdelay(1);
        }
index c910a716a71ce103b878154b24b2813917624716..7128b3799cecdd8c2f708124e1939c0686224511 100644 (file)
@@ -535,7 +535,7 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)
                }
        }
 
-       display_cacheinfo(c);
+       cpu_detect_cache_sizes(c);
 
        /* Multi core CPU? */
        if (c->extended_cpuid_level >= 0x80000008) {
index c95e831bb0954d8c5c50520c27aa4c0e4819f548..e58d978e075824afd7ade02bea73775554a7e0c1 100644 (file)
@@ -294,7 +294,7 @@ static void __cpuinit init_c3(struct cpuinfo_x86 *c)
                set_cpu_cap(c, X86_FEATURE_REP_GOOD);
        }
 
-       display_cacheinfo(c);
+       cpu_detect_cache_sizes(c);
 }
 
 enum {
index 9053be5d95cd4fb21f4aae94f2e7335af6a6f83a..a4ec8b64754405d636a16f6405156ff71553150e 100644 (file)
@@ -61,7 +61,7 @@ void __init setup_cpu_local_masks(void)
 static void __cpuinit default_init(struct cpuinfo_x86 *c)
 {
 #ifdef CONFIG_X86_64
-       display_cacheinfo(c);
+       cpu_detect_cache_sizes(c);
 #else
        /* Not much we can do here... */
        /* Check if at least it has cpuid */
@@ -383,7 +383,7 @@ static void __cpuinit get_model_name(struct cpuinfo_x86 *c)
        }
 }
 
-void __cpuinit display_cacheinfo(struct cpuinfo_x86 *c)
+void __cpuinit cpu_detect_cache_sizes(struct cpuinfo_x86 *c)
 {
        unsigned int n, dummy, ebx, ecx, edx, l2size;
 
@@ -391,8 +391,6 @@ void __cpuinit display_cacheinfo(struct cpuinfo_x86 *c)
 
        if (n >= 0x80000005) {
                cpuid(0x80000005, &dummy, &ebx, &ecx, &edx);
-               printk(KERN_INFO "CPU: L1 I Cache: %dK (%d bytes/line), D cache %dK (%d bytes/line)\n",
-                               edx>>24, edx&0xFF, ecx>>24, ecx&0xFF);
                c->x86_cache_size = (ecx>>24) + (edx>>24);
 #ifdef CONFIG_X86_64
                /* On K8 L1 TLB is inclusive, so don't count it */
@@ -422,9 +420,6 @@ void __cpuinit display_cacheinfo(struct cpuinfo_x86 *c)
 #endif
 
        c->x86_cache_size = l2size;
-
-       printk(KERN_INFO "CPU: L2 Cache: %dK (%d bytes/line)\n",
-                       l2size, ecx & 0xFF);
 }
 
 void __cpuinit detect_ht(struct cpuinfo_x86 *c)
@@ -659,24 +654,31 @@ void __init early_cpu_init(void)
        const struct cpu_dev *const *cdev;
        int count = 0;
 
+#ifdef PROCESSOR_SELECT
        printk(KERN_INFO "KERNEL supported cpus:\n");
+#endif
+
        for (cdev = __x86_cpu_dev_start; cdev < __x86_cpu_dev_end; cdev++) {
                const struct cpu_dev *cpudev = *cdev;
-               unsigned int j;
 
                if (count >= X86_VENDOR_NUM)
                        break;
                cpu_devs[count] = cpudev;
                count++;
 
-               for (j = 0; j < 2; j++) {
-                       if (!cpudev->c_ident[j])
-                               continue;
-                       printk(KERN_INFO "  %s %s\n", cpudev->c_vendor,
-                               cpudev->c_ident[j]);
+#ifdef PROCESSOR_SELECT
+               {
+                       unsigned int j;
+
+                       for (j = 0; j < 2; j++) {
+                               if (!cpudev->c_ident[j])
+                                       continue;
+                               printk(KERN_INFO "  %s %s\n", cpudev->c_vendor,
+                                       cpudev->c_ident[j]);
+                       }
                }
+#endif
        }
-
        early_identify_cpu(&boot_cpu_data);
 }
 
index 6de9a908e4008bf6d99e56cb8a9ef909b1f3949c..3624e8a0f71bf72e4c3cd86abcedfbb1c53d9b4d 100644 (file)
@@ -32,6 +32,6 @@ struct cpu_dev {
 extern const struct cpu_dev *const __x86_cpu_dev_start[],
                            *const __x86_cpu_dev_end[];
 
-extern void display_cacheinfo(struct cpuinfo_x86 *c);
+extern void cpu_detect_cache_sizes(struct cpuinfo_x86 *c);
 
 #endif
index 19807b89f058c3289dc0c5dfdb1d51a518f0a151..4fbd384fb645f19661ce7b98276c492bf9259297 100644 (file)
@@ -373,7 +373,7 @@ static void __cpuinit init_nsc(struct cpuinfo_x86 *c)
        /* Handle the GX (Formally known as the GX2) */
 
        if (c->x86 == 5 && c->x86_model == 5)
-               display_cacheinfo(c);
+               cpu_detect_cache_sizes(c);
        else
                init_cyrix(c);
 }
index 804c40e2bc3e1fd588f08f50db90dc83e02ae1b4..0df4c2b7107f936d2ebe11190b3a6aac04feaccc 100644 (file)
@@ -488,22 +488,6 @@ unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c)
 #endif
        }
 
-       if (trace)
-               printk(KERN_INFO "CPU: Trace cache: %dK uops", trace);
-       else if (l1i)
-               printk(KERN_INFO "CPU: L1 I cache: %dK", l1i);
-
-       if (l1d)
-               printk(KERN_CONT ", L1 D cache: %dK\n", l1d);
-       else
-               printk(KERN_CONT "\n");
-
-       if (l2)
-               printk(KERN_INFO "CPU: L2 cache: %dK\n", l2);
-
-       if (l3)
-               printk(KERN_INFO "CPU: L3 cache: %dK\n", l3);
-
        c->x86_cache_size = l3 ? l3 : (l2 ? l2 : (l1i+l1d));
 
        return l2;
index bb62b3e5caadca0faba5e489fb874bf563e0073c..28000743bbb06984b0b7182caca09833fed756b9 100644 (file)
@@ -26,7 +26,7 @@ static void __cpuinit init_transmeta(struct cpuinfo_x86 *c)
 
        early_init_transmeta(c);
 
-       display_cacheinfo(c);
+       cpu_detect_cache_sizes(c);
 
        /* Print CMS and CPU revision */
        max = cpuid_eax(0x80860000);
index f7dd2a7c3bf42b51bb9368a926885c8d55a4153a..e0ed4c7abb626e13f8f87d735b40576f6b44bb8e 100644 (file)
@@ -10,9 +10,9 @@
 #include <linux/module.h>
 #include <linux/ptrace.h>
 #include <linux/kexec.h>
+#include <linux/sysfs.h>
 #include <linux/bug.h>
 #include <linux/nmi.h>
-#include <linux/sysfs.h>
 
 #include <asm/stacktrace.h>
 
@@ -35,6 +35,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
 
        if (!stack) {
                unsigned long dummy;
+
                stack = &dummy;
                if (task && task != current)
                        stack = (unsigned long *)task->thread.sp;
@@ -57,8 +58,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
 
                context = (struct thread_info *)
                        ((unsigned long)stack & (~(THREAD_SIZE - 1)));
-               bp = print_context_stack(context, stack, bp, ops,
-                                        data, NULL, &graph);
+               bp = print_context_stack(context, stack, bp, ops, data, NULL, &graph);
 
                stack = (unsigned long *)context->previous_esp;
                if (!stack)
@@ -72,7 +72,7 @@ EXPORT_SYMBOL(dump_trace);
 
 void
 show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs,
-               unsigned long *sp, unsigned long bp, char *log_lvl)
+                  unsigned long *sp, unsigned long bp, char *log_lvl)
 {
        unsigned long *stack;
        int i;
@@ -156,4 +156,3 @@ int is_valid_bugaddr(unsigned long ip)
 
        return ud2 == 0x0b0f;
 }
-
index a071e6be177e7d94f0b77426006e2f5127761548..8e740934bd1f56307b1cc8b7c318bdaeeda5b9d5 100644 (file)
 #include <linux/module.h>
 #include <linux/ptrace.h>
 #include <linux/kexec.h>
+#include <linux/sysfs.h>
 #include <linux/bug.h>
 #include <linux/nmi.h>
-#include <linux/sysfs.h>
 
 #include <asm/stacktrace.h>
 
 #include "dumpstack.h"
 
+#define N_EXCEPTION_STACKS_END \
+               (N_EXCEPTION_STACKS + DEBUG_STKSZ/EXCEPTION_STKSZ - 2)
 
 static char x86_stack_ids[][8] = {
-               [DEBUG_STACK - 1] = "#DB",
-               [NMI_STACK - 1] = "NMI",
-               [DOUBLEFAULT_STACK - 1] = "#DF",
-               [STACKFAULT_STACK - 1] = "#SS",
-               [MCE_STACK - 1] = "#MC",
+               [ DEBUG_STACK-1                 ]       = "#DB",
+               [ NMI_STACK-1                   ]       = "NMI",
+               [ DOUBLEFAULT_STACK-1           ]       = "#DF",
+               [ STACKFAULT_STACK-1            ]       = "#SS",
+               [ MCE_STACK-1                   ]       = "#MC",
 #if DEBUG_STKSZ > EXCEPTION_STKSZ
-               [N_EXCEPTION_STACKS ...
-                       N_EXCEPTION_STACKS + DEBUG_STKSZ / EXCEPTION_STKSZ - 2] = "#DB[?]"
+               [ N_EXCEPTION_STACKS ...
+                 N_EXCEPTION_STACKS_END        ]       = "#DB[?]"
 #endif
-       };
+};
 
 int x86_is_stack_id(int id, char *name)
 {
@@ -37,7 +39,7 @@ int x86_is_stack_id(int id, char *name)
 }
 
 static unsigned long *in_exception_stack(unsigned cpu, unsigned long stack,
-                                       unsigned *usedp, char **idp)
+                                        unsigned *usedp, char **idp)
 {
        unsigned k;
 
@@ -202,21 +204,24 @@ EXPORT_SYMBOL(dump_trace);
 
 void
 show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs,
-               unsigned long *sp, unsigned long bp, char *log_lvl)
+                  unsigned long *sp, unsigned long bp, char *log_lvl)
 {
+       unsigned long *irq_stack_end;
+       unsigned long *irq_stack;
        unsigned long *stack;
+       int cpu;
        int i;
-       const int cpu = smp_processor_id();
-       unsigned long *irq_stack_end =
-               (unsigned long *)(per_cpu(irq_stack_ptr, cpu));
-       unsigned long *irq_stack =
-               (unsigned long *)(per_cpu(irq_stack_ptr, cpu) - IRQ_STACK_SIZE);
+
+       preempt_disable();
+       cpu = smp_processor_id();
+
+       irq_stack_end   = (unsigned long *)(per_cpu(irq_stack_ptr, cpu));
+       irq_stack       = (unsigned long *)(per_cpu(irq_stack_ptr, cpu) - IRQ_STACK_SIZE);
 
        /*
-        * debugging aid: "show_stack(NULL, NULL);" prints the
-        * back trace for this cpu.
+        * Debugging aid: "show_stack(NULL, NULL);" prints the
+        * back trace for this cpu:
         */
-
        if (sp == NULL) {
                if (task)
                        sp = (unsigned long *)task->thread.sp;
@@ -240,6 +245,8 @@ show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs,
                printk(" %016lx", *stack++);
                touch_nmi_watchdog();
        }
+       preempt_enable();
+
        printk("\n");
        show_trace_log_lvl(task, regs, sp, bp, log_lvl);
 }
@@ -303,4 +310,3 @@ int is_valid_bugaddr(unsigned long ip)
 
        return ud2 == 0x0b0f;
 }
-
index 540140284f60ec62fc1a623ec2f3ee47249a1748..075580b3568283609a3a1034aefe58994bd0782f 100644 (file)
@@ -188,7 +188,7 @@ void __show_regs(struct pt_regs *regs, int all)
 
 void show_regs(struct pt_regs *regs)
 {
-       __show_regs(regs, 1);
+       show_registers(regs);
        show_trace(NULL, regs, &regs->sp, regs->bp);
 }
 
index 70cf15873f3d65da38e42fbbb670555d2fc22722..a98fe88fab64df0a9b4e4e497939245e7a338fc6 100644 (file)
@@ -227,8 +227,7 @@ void __show_regs(struct pt_regs *regs, int all)
 
 void show_regs(struct pt_regs *regs)
 {
-       printk(KERN_INFO "CPU %d:", smp_processor_id());
-       __show_regs(regs, 1);
+       show_registers(regs);
        show_trace(NULL, regs, (void *)(regs + 1), regs->bp);
 }
 
index f37930954d1596c8c366cfa87a5ddb629bb13513..eed156851f5d6f85df71907cfeb5e73b62730d40 100644 (file)
@@ -114,13 +114,12 @@ void __cpuinit check_tsc_sync_source(int cpu)
                return;
 
        if (boot_cpu_has(X86_FEATURE_TSC_RELIABLE)) {
-               printk_once(KERN_INFO "Skipping synchronization checks as TSC is reliable.\n");
+               if (cpu == (nr_cpu_ids-1) || system_state != SYSTEM_BOOTING)
+                       pr_info(
+                       "Skipped synchronization checks as TSC is reliable.\n");
                return;
        }
 
-       pr_info("checking TSC synchronization [CPU#%d -> CPU#%d]:",
-               smp_processor_id(), cpu);
-
        /*
         * Reset it - in case this is a second bootup:
         */
@@ -142,12 +141,14 @@ void __cpuinit check_tsc_sync_source(int cpu)
                cpu_relax();
 
        if (nr_warps) {
-               printk("\n");
+               pr_warning("TSC synchronization [CPU#%d -> CPU#%d]:\n",
+                       smp_processor_id(), cpu);
                pr_warning("Measured %Ld cycles TSC warp between CPUs, "
                           "turning off TSC clock.\n", max_warp);
                mark_tsc_unstable("check_tsc_sync_source failed");
        } else {
-               printk(" passed.\n");
+               pr_debug("TSC synchronization [CPU#%d -> CPU#%d]: passed\n",
+                       smp_processor_id(), cpu);
        }
 
        /*
index 8f4e2ac93928edd82f4b34ac3bdead37eea289d4..f62777940dfbcc1ef534f9a90dbd96873ba453e1 100644 (file)
@@ -659,7 +659,7 @@ no_context(struct pt_regs *regs, unsigned long error_code,
        show_fault_oops(regs, error_code, address);
 
        stackend = end_of_stack(tsk);
-       if (*stackend != STACK_END_MAGIC)
+       if (tsk != &init_task && *stackend != STACK_END_MAGIC)
                printk(KERN_ALERT "Thread overran stack, or stack corrupted\n");
 
        tsk->thread.cr2         = address;