debug lockups: Improve lockup detection
[linux-2.6-block.git] / kernel / rcutree.c
index d2a372fb0b9b511cfe9ebdc1f5431f2ec5faafcd..9c5fa9fc57ecb5ed0dda298e1581b285d4a87be3 100644 (file)
@@ -35,6 +35,7 @@
 #include <linux/rcupdate.h>
 #include <linux/interrupt.h>
 #include <linux/sched.h>
+#include <linux/nmi.h>
 #include <asm/atomic.h>
 #include <linux/bitops.h>
 #include <linux/module.h>
@@ -469,6 +470,8 @@ static void print_other_cpu_stall(struct rcu_state *rsp)
        }
        printk(" (detected by %d, t=%ld jiffies)\n",
               smp_processor_id(), (long)(jiffies - rsp->gp_start));
+       trigger_all_cpu_backtrace();
+
        force_quiescent_state(rsp, 0);  /* Kick them all. */
 }
 
@@ -479,12 +482,14 @@ static void print_cpu_stall(struct rcu_state *rsp)
 
        printk(KERN_ERR "INFO: RCU detected CPU %d stall (t=%lu jiffies)\n",
                        smp_processor_id(), jiffies - rsp->gp_start);
-       dump_stack();
+       trigger_all_cpu_backtrace();
+
        spin_lock_irqsave(&rnp->lock, flags);
        if ((long)(jiffies - rsp->jiffies_stall) >= 0)
                rsp->jiffies_stall =
                        jiffies + RCU_SECONDS_TILL_STALL_RECHECK;
        spin_unlock_irqrestore(&rnp->lock, flags);
+
        set_need_resched();  /* kick ourselves to get things going. */
 }
 
@@ -1259,31 +1264,44 @@ static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp)
        check_cpu_stall(rsp, rdp);
 
        /* Is the RCU core waiting for a quiescent state from this CPU? */
-       if (rdp->qs_pending)
+       if (rdp->qs_pending) {
+               rdp->n_rp_qs_pending++;
                return 1;
+       }
 
        /* Does this CPU have callbacks ready to invoke? */
-       if (cpu_has_callbacks_ready_to_invoke(rdp))
+       if (cpu_has_callbacks_ready_to_invoke(rdp)) {
+               rdp->n_rp_cb_ready++;
                return 1;
+       }
 
        /* Has RCU gone idle with this CPU needing another grace period? */
-       if (cpu_needs_another_gp(rsp, rdp))
+       if (cpu_needs_another_gp(rsp, rdp)) {
+               rdp->n_rp_cpu_needs_gp++;
                return 1;
+       }
 
        /* Has another RCU grace period completed?  */
-       if (ACCESS_ONCE(rsp->completed) != rdp->completed) /* outside of lock */
+       if (ACCESS_ONCE(rsp->completed) != rdp->completed) { /* outside lock */
+               rdp->n_rp_gp_completed++;
                return 1;
+       }
 
        /* Has a new RCU grace period started? */
-       if (ACCESS_ONCE(rsp->gpnum) != rdp->gpnum) /* outside of lock */
+       if (ACCESS_ONCE(rsp->gpnum) != rdp->gpnum) { /* outside lock */
+               rdp->n_rp_gp_started++;
                return 1;
+       }
 
        /* Has an RCU GP gone long enough to send resched IPIs &c? */
        if (ACCESS_ONCE(rsp->completed) != ACCESS_ONCE(rsp->gpnum) &&
-           ((long)(ACCESS_ONCE(rsp->jiffies_force_qs) - jiffies) < 0))
+           ((long)(ACCESS_ONCE(rsp->jiffies_force_qs) - jiffies) < 0)) {
+               rdp->n_rp_need_fqs++;
                return 1;
+       }
 
        /* nothing to do */
+       rdp->n_rp_need_nothing++;
        return 0;
 }
 
@@ -1520,7 +1538,7 @@ void __init __rcu_init(void)
        int j;
        struct rcu_node *rnp;
 
-       printk(KERN_WARNING "Experimental hierarchical RCU implementation.\n");
+       printk(KERN_INFO "Hierarchical RCU implementation.\n");
 #ifdef CONFIG_RCU_CPU_STALL_DETECTOR
        printk(KERN_INFO "RCU-based detection of stalled CPUs is enabled.\n");
 #endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
@@ -1533,7 +1551,6 @@ void __init __rcu_init(void)
                rcu_cpu_notify(&rcu_nb, CPU_UP_PREPARE, (void *)(long)i);
        /* Register notifier for non-boot CPUs */
        register_cpu_notifier(&rcu_nb);
-       printk(KERN_WARNING "Experimental hierarchical RCU init done.\n");
 }
 
 module_param(blimit, int, 0);