rcu: Remove single-rcu_node optimization in rcu_start_gp()
authorPaul E. McKenney <paulmck@linux.vnet.ibm.com>
Wed, 11 Jan 2012 19:34:50 +0000 (11:34 -0800)
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>
Tue, 21 Feb 2012 17:03:38 +0000 (09:03 -0800)
The grace-period initialization sequence in rcu_start_gp() has a special
case for systems where the rcu_node tree is a single rcu_node structure.
This made sense some years ago when systems were smaller and up to 64
CPUs could share a single rcu_node structure, but now that large systems
are common and a given leaf rcu_node structure can support only 16 CPUs
(due to lock contention on the rcu_node's ->lock field), this optimization
is almost never taken.  And even the small mobile platforms that might
make use of it might rather have the kernel text reduction.

Therefore, this commit removes the check for single-rcu_node trees.

Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Reviewed-by: Josh Triplett <josh@joshtriplett.org>
kernel/rcutree.c

index 7789e666394dce8bd453c2f825a56e76f30f4739..49bb363a083748b572d9c68e5c3baac5435e90b6 100644 (file)
@@ -984,26 +984,8 @@ rcu_start_gp(struct rcu_state *rsp, unsigned long flags)
        rsp->fqs_state = RCU_GP_INIT; /* Hold off force_quiescent_state. */
        rsp->jiffies_force_qs = jiffies + RCU_JIFFIES_TILL_FORCE_QS;
        record_gp_stall_check_time(rsp);
-
-       /* Special-case the common single-level case. */
-       if (NUM_RCU_NODES == 1) {
-               rcu_preempt_check_blocked_tasks(rnp);
-               rnp->qsmask = rnp->qsmaskinit;
-               rnp->gpnum = rsp->gpnum;
-               rnp->completed = rsp->completed;
-               rsp->fqs_state = RCU_SIGNAL_INIT; /* force_quiescent_state OK */
-               rcu_start_gp_per_cpu(rsp, rnp, rdp);
-               rcu_preempt_boost_start_gp(rnp);
-               trace_rcu_grace_period_init(rsp->name, rnp->gpnum,
-                                           rnp->level, rnp->grplo,
-                                           rnp->grphi, rnp->qsmask);
-               raw_spin_unlock_irqrestore(&rnp->lock, flags);
-               return;
-       }
-
        raw_spin_unlock(&rnp->lock);  /* leave irqs disabled. */
 
-
        /* Exclude any concurrent CPU-hotplug operations. */
        raw_spin_lock(&rsp->onofflock);  /* irqs already disabled. */