rcu/nocb: Rename __wake_nocb_leader() to __wake_nocb_gp()
[linux-2.6-block.git] / kernel / rcu / tree_plugin.h
index acb225023ed1939a23ac143f0d713405764075ac..7c7870da234a1bb8e041bf813ac51781b18cf5ae 100644 (file)
@@ -288,7 +288,6 @@ void rcu_note_context_switch(bool preempt)
        struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
        struct rcu_node *rnp;
 
-       barrier(); /* Avoid RCU read-side critical sections leaking down. */
        trace_rcu_utilization(TPS("Start context switch"));
        lockdep_assert_irqs_disabled();
        WARN_ON_ONCE(!preempt && t->rcu_read_lock_nesting > 0);
@@ -314,15 +313,6 @@ void rcu_note_context_switch(bool preempt)
                                       ? rnp->gp_seq
                                       : rcu_seq_snap(&rnp->gp_seq));
                rcu_preempt_ctxt_queue(rnp, rdp);
-       } else if (t->rcu_read_lock_nesting < 0 &&
-                  t->rcu_read_unlock_special.s) {
-
-               /*
-                * Complete exit from RCU read-side critical section on
-                * behalf of preempted instance of __rcu_read_unlock().
-                */
-               rcu_read_unlock_special(t);
-               rcu_preempt_deferred_qs(t);
        } else {
                rcu_preempt_deferred_qs(t);
        }
@@ -340,7 +330,6 @@ void rcu_note_context_switch(bool preempt)
        if (rdp->exp_deferred_qs)
                rcu_report_exp_rdp(rdp);
        trace_rcu_utilization(TPS("End context switch"));
-       barrier(); /* Avoid RCU read-side critical sections leaking up. */
 }
 EXPORT_SYMBOL_GPL(rcu_note_context_switch);
 
@@ -626,22 +615,18 @@ static void rcu_read_unlock_special(struct task_struct *t)
                      (rdp->grpmask & rnp->expmask) ||
                      tick_nohz_full_cpu(rdp->cpu);
                // Need to defer quiescent state until everything is enabled.
-               if ((exp || in_irq()) && irqs_were_disabled && use_softirq &&
-                   (in_irq() || !t->rcu_read_unlock_special.b.deferred_qs)) {
+               if (irqs_were_disabled && use_softirq &&
+                   (in_interrupt() ||
+                    (exp && !t->rcu_read_unlock_special.b.deferred_qs))) {
                        // Using softirq, safe to awaken, and we get
                        // no help from enabling irqs, unlike bh/preempt.
                        raise_softirq_irqoff(RCU_SOFTIRQ);
-               } else if (exp && irqs_were_disabled && !use_softirq &&
-                          !t->rcu_read_unlock_special.b.deferred_qs) {
-                       // Safe to awaken and we get no help from enabling
-                       // irqs, unlike bh/preempt.
-                       invoke_rcu_core();
                } else {
                        // Enabling BH or preempt does reschedule, so...
                        // Also if no expediting or NO_HZ_FULL, slow is OK.
                        set_tsk_need_resched(current);
                        set_preempt_need_resched();
-                       if (IS_ENABLED(CONFIG_IRQ_WORK) &&
+                       if (IS_ENABLED(CONFIG_IRQ_WORK) && irqs_were_disabled &&
                            !rdp->defer_qs_iw_pending && exp) {
                                // Get scheduler to re-evaluate and call hooks.
                                // If !IRQ_WORK, FQS scan will eventually IPI.
@@ -828,11 +813,6 @@ static void rcu_qs(void)
  * dyntick-idle quiescent state visible to other CPUs, which will in
  * some cases serve for expedited as well as normal grace periods.
  * Either way, register a lightweight quiescent state.
- *
- * The barrier() calls are redundant in the common case when this is
- * called externally, but just in case this is called from within this
- * file.
- *
  */
 void rcu_all_qs(void)
 {
@@ -847,14 +827,12 @@ void rcu_all_qs(void)
                return;
        }
        this_cpu_write(rcu_data.rcu_urgent_qs, false);
-       barrier(); /* Avoid RCU read-side critical sections leaking down. */
        if (unlikely(raw_cpu_read(rcu_data.rcu_need_heavy_qs))) {
                local_irq_save(flags);
                rcu_momentary_dyntick_idle();
                local_irq_restore(flags);
        }
        rcu_qs();
-       barrier(); /* Avoid RCU read-side critical sections leaking up. */
        preempt_enable();
 }
 EXPORT_SYMBOL_GPL(rcu_all_qs);
@@ -864,7 +842,6 @@ EXPORT_SYMBOL_GPL(rcu_all_qs);
  */
 void rcu_note_context_switch(bool preempt)
 {
-       barrier(); /* Avoid RCU read-side critical sections leaking down. */
        trace_rcu_utilization(TPS("Start context switch"));
        rcu_qs();
        /* Load rcu_urgent_qs before other flags. */
@@ -877,7 +854,6 @@ void rcu_note_context_switch(bool preempt)
                rcu_tasks_qs(current);
 out:
        trace_rcu_utilization(TPS("End context switch"));
-       barrier(); /* Avoid RCU read-side critical sections leaking up. */
 }
 EXPORT_SYMBOL_GPL(rcu_note_context_switch);
 
@@ -1134,7 +1110,7 @@ static void rcu_preempt_boost_start_gp(struct rcu_node *rnp)
  * already exist.  We only create this kthread for preemptible RCU.
  * Returns zero if all is well, a negated errno otherwise.
  */
-static int rcu_spawn_one_boost_kthread(struct rcu_node *rnp)
+static void rcu_spawn_one_boost_kthread(struct rcu_node *rnp)
 {
        int rnp_index = rnp - rcu_get_root();
        unsigned long flags;
@@ -1142,25 +1118,27 @@ static int rcu_spawn_one_boost_kthread(struct rcu_node *rnp)
        struct task_struct *t;
 
        if (!IS_ENABLED(CONFIG_PREEMPT_RCU))
-               return 0;
+               return;
 
        if (!rcu_scheduler_fully_active || rcu_rnp_online_cpus(rnp) == 0)
-               return 0;
+               return;
 
        rcu_state.boost = 1;
+
        if (rnp->boost_kthread_task != NULL)
-               return 0;
+               return;
+
        t = kthread_create(rcu_boost_kthread, (void *)rnp,
                           "rcub/%d", rnp_index);
-       if (IS_ERR(t))
-               return PTR_ERR(t);
+       if (WARN_ON_ONCE(IS_ERR(t)))
+               return;
+
        raw_spin_lock_irqsave_rcu_node(rnp, flags);
        rnp->boost_kthread_task = t;
        raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
        sp.sched_priority = kthread_prio;
        sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
        wake_up_process(t); /* get to TASK_INTERRUPTIBLE quickly. */
-       return 0;
 }
 
 /*
@@ -1201,7 +1179,7 @@ static void __init rcu_spawn_boost_kthreads(void)
        struct rcu_node *rnp;
 
        rcu_for_each_leaf_node(rnp)
-               (void)rcu_spawn_one_boost_kthread(rnp);
+               rcu_spawn_one_boost_kthread(rnp);
 }
 
 static void rcu_prepare_kthreads(int cpu)
@@ -1211,7 +1189,7 @@ static void rcu_prepare_kthreads(int cpu)
 
        /* Fire up the incoming CPU's kthread and leaf rcu_node kthread. */
        if (rcu_scheduler_fully_active)
-               (void)rcu_spawn_one_boost_kthread(rnp);
+               rcu_spawn_one_boost_kthread(rnp);
 }
 
 #else /* #ifdef CONFIG_RCU_BOOST */
@@ -1469,10 +1447,10 @@ static void rcu_cleanup_after_idle(void)
  * specified by rcu_nocb_mask.  For the CPUs in the set, there are kthreads
  * created that pull the callbacks from the corresponding CPU, wait for
  * a grace period to elapse, and invoke the callbacks.  These kthreads
- * are organized into leaders, which manage incoming callbacks, wait for
- * grace periods, and awaken followers, and the followers, which only
- * invoke callbacks.  Each leader is its own follower.  The no-CBs CPUs
- * do a wake_up() on their kthread when they insert a callback into any
+ * are organized into GP kthreads, which manage incoming callbacks, wait for
+ * grace periods, and awaken CB kthreads, and the CB kthreads, which only
+ * invoke callbacks.  Each GP kthread invokes its own CBs.  The no-CBs CPUs
+ * do a wake_up() on their GP kthread when they insert a callback into any
  * empty list, unless the rcu_nocb_poll boot parameter has been specified,
  * in which case each kthread actively polls its CPU.  (Which isn't so great
  * for energy efficiency, but which does reduce RCU's overhead on that CPU.)
@@ -1543,47 +1521,47 @@ bool rcu_is_nocb_cpu(int cpu)
 }
 
 /*
- * Kick the leader kthread for this NOCB group.  Caller holds ->nocb_lock
+ * Kick the GP kthread for this NOCB group.  Caller holds ->nocb_lock
  * and this function releases it.
  */
-static void __wake_nocb_leader(struct rcu_data *rdp, bool force,
-                              unsigned long flags)
+static void __wake_nocb_gp(struct rcu_data *rdp, bool force,
+                          unsigned long flags)
        __releases(rdp->nocb_lock)
 {
-       struct rcu_data *rdp_leader = rdp->nocb_leader;
+       struct rcu_data *rdp_gp = rdp->nocb_gp_rdp;
 
        lockdep_assert_held(&rdp->nocb_lock);
-       if (!READ_ONCE(rdp_leader->nocb_kthread)) {
+       if (!READ_ONCE(rdp_gp->nocb_gp_kthread)) {
                raw_spin_unlock_irqrestore(&rdp->nocb_lock, flags);
                return;
        }
-       if (rdp_leader->nocb_leader_sleep || force) {
+       if (rdp_gp->nocb_gp_sleep || force) {
                /* Prior smp_mb__after_atomic() orders against prior enqueue. */
-               WRITE_ONCE(rdp_leader->nocb_leader_sleep, false);
+               WRITE_ONCE(rdp_gp->nocb_gp_sleep, false);
                del_timer(&rdp->nocb_timer);
                raw_spin_unlock_irqrestore(&rdp->nocb_lock, flags);
-               smp_mb(); /* ->nocb_leader_sleep before swake_up_one(). */
-               swake_up_one(&rdp_leader->nocb_wq);
+               smp_mb(); /* ->nocb_gp_sleep before swake_up_one(). */
+               swake_up_one(&rdp_gp->nocb_gp_wq);
        } else {
                raw_spin_unlock_irqrestore(&rdp->nocb_lock, flags);
        }
 }
 
 /*
- * Kick the leader kthread for this NOCB group, but caller has not
+ * Kick the GP kthread for this NOCB group, but caller has not
  * acquired locks.
  */
-static void wake_nocb_leader(struct rcu_data *rdp, bool force)
+static void wake_nocb_gp(struct rcu_data *rdp, bool force)
 {
        unsigned long flags;
 
        raw_spin_lock_irqsave(&rdp->nocb_lock, flags);
-       __wake_nocb_leader(rdp, force, flags);
+       __wake_nocb_gp(rdp, force, flags);
 }
 
 /*
- * Arrange to wake the leader kthread for this NOCB group at some
- * future time when it is safe to do so.
+ * Arrange to wake the GP kthread for this NOCB group at some future
+ * time when it is safe to do so.
  */
 static void wake_nocb_leader_defer(struct rcu_data *rdp, int waketype,
                                   const char *reason)
@@ -1626,10 +1604,10 @@ static bool rcu_nocb_cpu_needs_barrier(int cpu)
        if (!rhp)
                rhp = READ_ONCE(rdp->nocb_gp_head);
        if (!rhp)
-               rhp = READ_ONCE(rdp->nocb_follower_head);
+               rhp = READ_ONCE(rdp->nocb_cb_head);
 
        /* Having no rcuo kthread but CBs after scheduler starts is bad! */
-       if (!READ_ONCE(rdp->nocb_kthread) && rhp &&
+       if (!READ_ONCE(rdp->nocb_cb_kthread) && rhp &&
            rcu_scheduler_fully_active) {
                /* RCU callback enqueued before CPU first came online??? */
                pr_err("RCU: Never-onlined no-CBs CPU %d has CB %p\n",
@@ -1668,7 +1646,7 @@ static void __call_rcu_nocb_enqueue(struct rcu_data *rdp,
        smp_mb__after_atomic(); /* Store *old_rhpp before _wake test. */
 
        /* If we are not being polled and there is a kthread, awaken it ... */
-       t = READ_ONCE(rdp->nocb_kthread);
+       t = READ_ONCE(rdp->nocb_gp_kthread);
        if (rcu_nocb_poll || !t) {
                trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
                                    TPS("WakeNotPoll"));
@@ -1678,7 +1656,7 @@ static void __call_rcu_nocb_enqueue(struct rcu_data *rdp,
        if (old_rhpp == &rdp->nocb_head) {
                if (!irqs_disabled_flags(flags)) {
                        /* ... if queue was empty ... */
-                       wake_nocb_leader(rdp, false);
+                       wake_nocb_gp(rdp, false);
                        trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
                                            TPS("WakeEmpty"));
                } else {
@@ -1689,7 +1667,7 @@ static void __call_rcu_nocb_enqueue(struct rcu_data *rdp,
        } else if (len > rdp->qlen_last_fqs_check + qhimark) {
                /* ... or if many callbacks queued. */
                if (!irqs_disabled_flags(flags)) {
-                       wake_nocb_leader(rdp, true);
+                       wake_nocb_gp(rdp, true);
                        trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
                                            TPS("WakeOvf"));
                } else {
@@ -1805,10 +1783,10 @@ static void rcu_nocb_wait_gp(struct rcu_data *rdp)
 }
 
 /*
- * Leaders come here to wait for additional callbacks to show up.
+ * No-CBs GP kthreads come here to wait for additional callbacks to show up.
  * This function does not return until callbacks appear.
  */
-static void nocb_leader_wait(struct rcu_data *my_rdp)
+static void nocb_gp_wait(struct rcu_data *my_rdp)
 {
        bool firsttime = true;
        unsigned long flags;
@@ -1816,15 +1794,13 @@ static void nocb_leader_wait(struct rcu_data *my_rdp)
        struct rcu_data *rdp;
        struct rcu_head **tail;
 
-wait_again:
-
        /* Wait for callbacks to appear. */
        if (!rcu_nocb_poll) {
                trace_rcu_nocb_wake(rcu_state.name, my_rdp->cpu, TPS("Sleep"));
-               swait_event_interruptible_exclusive(my_rdp->nocb_wq,
-                               !READ_ONCE(my_rdp->nocb_leader_sleep));
+               swait_event_interruptible_exclusive(my_rdp->nocb_gp_wq,
+                               !READ_ONCE(my_rdp->nocb_gp_sleep));
                raw_spin_lock_irqsave(&my_rdp->nocb_lock, flags);
-               my_rdp->nocb_leader_sleep = true;
+               my_rdp->nocb_gp_sleep = true;
                WRITE_ONCE(my_rdp->nocb_defer_wakeup, RCU_NOCB_WAKE_NOT);
                del_timer(&my_rdp->nocb_timer);
                raw_spin_unlock_irqrestore(&my_rdp->nocb_lock, flags);
@@ -1834,16 +1810,16 @@ wait_again:
        }
 
        /*
-        * Each pass through the following loop checks a follower for CBs.
-        * We are our own first follower.  Any CBs found are moved to
+        * Each pass through the following loop checks for CBs.
+        * We are our own first CB kthread.  Any CBs found are moved to
         * nocb_gp_head, where they await a grace period.
         */
        gotcbs = false;
        smp_mb(); /* wakeup and _sleep before ->nocb_head reads. */
-       for (rdp = my_rdp; rdp; rdp = rdp->nocb_next_follower) {
+       for (rdp = my_rdp; rdp; rdp = rdp->nocb_next_cb_rdp) {
                rdp->nocb_gp_head = READ_ONCE(rdp->nocb_head);
                if (!rdp->nocb_gp_head)
-                       continue;  /* No CBs here, try next follower. */
+                       continue;  /* No CBs here, try next. */
 
                /* Move callbacks to wait-for-GP list, which is empty. */
                WRITE_ONCE(rdp->nocb_head, NULL);
@@ -1860,67 +1836,81 @@ wait_again:
                        trace_rcu_nocb_wake(rcu_state.name, my_rdp->cpu,
                                            TPS("WokeEmpty"));
                }
-               goto wait_again;
+               return;
        }
 
        /* Wait for one grace period. */
        rcu_nocb_wait_gp(my_rdp);
 
-       /* Each pass through the following loop wakes a follower, if needed. */
-       for (rdp = my_rdp; rdp; rdp = rdp->nocb_next_follower) {
+       /* Each pass through this loop wakes a CB kthread, if needed. */
+       for (rdp = my_rdp; rdp; rdp = rdp->nocb_next_cb_rdp) {
                if (!rcu_nocb_poll &&
                    READ_ONCE(rdp->nocb_head) &&
-                   READ_ONCE(my_rdp->nocb_leader_sleep)) {
+                   READ_ONCE(my_rdp->nocb_gp_sleep)) {
                        raw_spin_lock_irqsave(&my_rdp->nocb_lock, flags);
-                       my_rdp->nocb_leader_sleep = false;/* No need to sleep.*/
+                       my_rdp->nocb_gp_sleep = false;/* No need to sleep.*/
                        raw_spin_unlock_irqrestore(&my_rdp->nocb_lock, flags);
                }
                if (!rdp->nocb_gp_head)
-                       continue; /* No CBs, so no need to wake follower. */
+                       continue; /* No CBs, so no need to wake kthread. */
 
-               /* Append callbacks to follower's "done" list. */
+               /* Append callbacks to CB kthread's "done" list. */
                raw_spin_lock_irqsave(&rdp->nocb_lock, flags);
-               tail = rdp->nocb_follower_tail;
-               rdp->nocb_follower_tail = rdp->nocb_gp_tail;
+               tail = rdp->nocb_cb_tail;
+               rdp->nocb_cb_tail = rdp->nocb_gp_tail;
                *tail = rdp->nocb_gp_head;
                raw_spin_unlock_irqrestore(&rdp->nocb_lock, flags);
-               if (rdp != my_rdp && tail == &rdp->nocb_follower_head) {
-                       /* List was empty, so wake up the follower.  */
-                       swake_up_one(&rdp->nocb_wq);
+               if (tail == &rdp->nocb_cb_head) {
+                       /* List was empty, so wake up the kthread.  */
+                       swake_up_one(&rdp->nocb_cb_wq);
                }
        }
+}
 
-       /* If we (the leader) don't have CBs, go wait some more. */
-       if (!my_rdp->nocb_follower_head)
-               goto wait_again;
+/*
+ * No-CBs grace-period-wait kthread.  There is one of these per group
+ * of CPUs, but only once at least one CPU in that group has come online
+ * at least once since boot.  This kthread checks for newly posted
+ * callbacks from any of the CPUs it is responsible for, waits for a
+ * grace period, then awakens all of the rcu_nocb_cb_kthread() instances
+ * that then have callback-invocation work to do.
+ */
+static int rcu_nocb_gp_kthread(void *arg)
+{
+       struct rcu_data *rdp = arg;
+
+       for (;;)
+               nocb_gp_wait(rdp);
+       return 0;
 }
 
 /*
- * Followers come here to wait for additional callbacks to show up.
- * This function does not return until callbacks appear.
+ * No-CBs CB kthreads come here to wait for additional callbacks to show up.
+ * This function returns true ("keep waiting") until callbacks appear and
+ * then false ("stop waiting") when callbacks finally do appear.
  */
-static void nocb_follower_wait(struct rcu_data *rdp)
+static bool nocb_cb_wait(struct rcu_data *rdp)
 {
-       for (;;) {
-               trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("FollowerSleep"));
-               swait_event_interruptible_exclusive(rdp->nocb_wq,
-                                        READ_ONCE(rdp->nocb_follower_head));
-               if (smp_load_acquire(&rdp->nocb_follower_head)) {
-                       /* ^^^ Ensure CB invocation follows _head test. */
-                       return;
-               }
-               WARN_ON(signal_pending(current));
-               trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("WokeEmpty"));
+       trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("FollowerSleep"));
+       swait_event_interruptible_exclusive(rdp->nocb_cb_wq,
+                                READ_ONCE(rdp->nocb_cb_head));
+       if (smp_load_acquire(&rdp->nocb_cb_head)) { /* VVV */
+               /* ^^^ Ensure CB invocation follows _head test. */
+               return false;
        }
+       WARN_ON(signal_pending(current));
+       trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("WokeEmpty"));
+       return true;
 }
 
 /*
  * Per-rcu_data kthread, but only for no-CBs CPUs.  Each kthread invokes
- * callbacks queued by the corresponding no-CBs CPU, however, there is
- * an optional leader-follower relationship so that the grace-period
- * kthreads don't have to do quite so many wakeups.
+ * callbacks queued by the corresponding no-CBs CPU, however, there is an
+ * optional GP-CB relationship so that the grace-period kthreads don't
+ * have to do quite so many wakeups (as in they only need to wake the
+ * no-CBs GP kthreads, not the CB kthreads).
  */
-static int rcu_nocb_kthread(void *arg)
+static int rcu_nocb_cb_kthread(void *arg)
 {
        int c, cl;
        unsigned long flags;
@@ -1932,17 +1922,15 @@ static int rcu_nocb_kthread(void *arg)
        /* Each pass through this loop invokes one batch of callbacks */
        for (;;) {
                /* Wait for callbacks. */
-               if (rdp->nocb_leader == rdp)
-                       nocb_leader_wait(rdp);
-               else
-                       nocb_follower_wait(rdp);
+               while (nocb_cb_wait(rdp))
+                       continue;
 
                /* Pull the ready-to-invoke callbacks onto local list. */
                raw_spin_lock_irqsave(&rdp->nocb_lock, flags);
-               list = rdp->nocb_follower_head;
-               rdp->nocb_follower_head = NULL;
-               tail = rdp->nocb_follower_tail;
-               rdp->nocb_follower_tail = &rdp->nocb_follower_head;
+               list = rdp->nocb_cb_head;
+               rdp->nocb_cb_head = NULL;
+               tail = rdp->nocb_cb_tail;
+               rdp->nocb_cb_tail = &rdp->nocb_cb_head;
                raw_spin_unlock_irqrestore(&rdp->nocb_lock, flags);
                if (WARN_ON_ONCE(!list))
                        continue;
@@ -2000,7 +1988,7 @@ static void do_nocb_deferred_wakeup_common(struct rcu_data *rdp)
        }
        ndw = READ_ONCE(rdp->nocb_defer_wakeup);
        WRITE_ONCE(rdp->nocb_defer_wakeup, RCU_NOCB_WAKE_NOT);
-       __wake_nocb_leader(rdp, ndw == RCU_NOCB_WAKE_FORCE, flags);
+       __wake_nocb_gp(rdp, ndw == RCU_NOCB_WAKE_FORCE, flags);
        trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("DeferredWake"));
 }
 
@@ -2069,58 +2057,48 @@ void __init rcu_init_nohz(void)
 static void __init rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp)
 {
        rdp->nocb_tail = &rdp->nocb_head;
-       init_swait_queue_head(&rdp->nocb_wq);
-       rdp->nocb_follower_tail = &rdp->nocb_follower_head;
+       init_swait_queue_head(&rdp->nocb_cb_wq);
+       init_swait_queue_head(&rdp->nocb_gp_wq);
+       rdp->nocb_cb_tail = &rdp->nocb_cb_head;
        raw_spin_lock_init(&rdp->nocb_lock);
        timer_setup(&rdp->nocb_timer, do_nocb_deferred_wakeup_timer, 0);
 }
 
 /*
  * If the specified CPU is a no-CBs CPU that does not already have its
- * rcuo kthread, spawn it.  If the CPUs are brought online out of order,
- * this can require re-organizing the leader-follower relationships.
+ * rcuo CB kthread, spawn it.  Additionally, if the rcuo GP kthread
+ * for this CPU's group has not yet been created, spawn it as well.
  */
 static void rcu_spawn_one_nocb_kthread(int cpu)
 {
-       struct rcu_data *rdp;
-       struct rcu_data *rdp_last;
-       struct rcu_data *rdp_old_leader;
-       struct rcu_data *rdp_spawn = per_cpu_ptr(&rcu_data, cpu);
+       struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
+       struct rcu_data *rdp_gp;
        struct task_struct *t;
 
        /*
         * If this isn't a no-CBs CPU or if it already has an rcuo kthread,
         * then nothing to do.
         */
-       if (!rcu_is_nocb_cpu(cpu) || rdp_spawn->nocb_kthread)
+       if (!rcu_is_nocb_cpu(cpu) || rdp->nocb_cb_kthread)
                return;
 
-       /* If we didn't spawn the leader first, reorganize! */
-       rdp_old_leader = rdp_spawn->nocb_leader;
-       if (rdp_old_leader != rdp_spawn && !rdp_old_leader->nocb_kthread) {
-               rdp_last = NULL;
-               rdp = rdp_old_leader;
-               do {
-                       rdp->nocb_leader = rdp_spawn;
-                       if (rdp_last && rdp != rdp_spawn)
-                               rdp_last->nocb_next_follower = rdp;
-                       if (rdp == rdp_spawn) {
-                               rdp = rdp->nocb_next_follower;
-                       } else {
-                               rdp_last = rdp;
-                               rdp = rdp->nocb_next_follower;
-                               rdp_last->nocb_next_follower = NULL;
-                       }
-               } while (rdp);
-               rdp_spawn->nocb_next_follower = rdp_old_leader;
+       /* If we didn't spawn the GP kthread first, reorganize! */
+       rdp_gp = rdp->nocb_gp_rdp;
+       if (!rdp_gp->nocb_gp_kthread) {
+               t = kthread_run(rcu_nocb_gp_kthread, rdp_gp,
+                               "rcuog/%d", rdp_gp->cpu);
+               if (WARN_ONCE(IS_ERR(t), "%s: Could not start rcuo GP kthread, OOM is now expected behavior\n", __func__))
+                       return;
+               WRITE_ONCE(rdp_gp->nocb_gp_kthread, t);
        }
 
        /* Spawn the kthread for this CPU. */
-       t = kthread_run(rcu_nocb_kthread, rdp_spawn,
+       t = kthread_run(rcu_nocb_cb_kthread, rdp,
                        "rcuo%c/%d", rcu_state.abbr, cpu);
-       if (WARN_ONCE(IS_ERR(t), "%s: Could not start rcuo kthread, OOM is now expected behavior\n", __func__))
+       if (WARN_ONCE(IS_ERR(t), "%s: Could not start rcuo CB kthread, OOM is now expected behavior\n", __func__))
                return;
-       WRITE_ONCE(rdp_spawn->nocb_kthread, t);
+       WRITE_ONCE(rdp->nocb_cb_kthread, t);
+       WRITE_ONCE(rdp->nocb_gp_kthread, rdp_gp->nocb_gp_kthread);
 }
 
 /*
@@ -2147,18 +2125,18 @@ static void __init rcu_spawn_nocb_kthreads(void)
                rcu_spawn_cpu_nocb_kthread(cpu);
 }
 
-/* How many follower CPU IDs per leader?  Default of -1 for sqrt(nr_cpu_ids). */
+/* How many CB CPU IDs per GP kthread?  Default of -1 for sqrt(nr_cpu_ids). */
 static int rcu_nocb_leader_stride = -1;
 module_param(rcu_nocb_leader_stride, int, 0444);
 
 /*
- * Initialize leader-follower relationships for all no-CBs CPU.
+ * Initialize GP-CB relationships for all no-CBs CPU.
  */
 static void __init rcu_organize_nocb_kthreads(void)
 {
        int cpu;
        int ls = rcu_nocb_leader_stride;
-       int nl = 0;  /* Next leader. */
+       int nl = 0;  /* Next GP kthread. */
        struct rcu_data *rdp;
        struct rcu_data *rdp_leader = NULL;  /* Suppress misguided gcc warn. */
        struct rcu_data *rdp_prev = NULL;
@@ -2178,14 +2156,14 @@ static void __init rcu_organize_nocb_kthreads(void)
        for_each_cpu(cpu, rcu_nocb_mask) {
                rdp = per_cpu_ptr(&rcu_data, cpu);
                if (rdp->cpu >= nl) {
-                       /* New leader, set up for followers & next leader. */
+                       /* New GP kthread, set up for CBs & next GP. */
                        nl = DIV_ROUND_UP(rdp->cpu + 1, ls) * ls;
-                       rdp->nocb_leader = rdp;
+                       rdp->nocb_gp_rdp = rdp;
                        rdp_leader = rdp;
                } else {
-                       /* Another follower, link to previous leader. */
-                       rdp->nocb_leader = rdp_leader;
-                       rdp_prev->nocb_next_follower = rdp;
+                       /* Another CB kthread, link to previous GP kthread. */
+                       rdp->nocb_gp_rdp = rdp_leader;
+                       rdp_prev->nocb_next_cb_rdp = rdp;
                }
                rdp_prev = rdp;
        }