1 /* SPDX-License-Identifier: GPL-2.0+ */
3 * Read-Copy Update mechanism for mutual exclusion (tree-based version)
4 * Internal non-public definitions that provide either classic
5 * or preemptible semantics.
7 * Copyright Red Hat, 2009
8 * Copyright IBM Corporation, 2009
11 * Author: Ingo Molnar <mingo@elte.hu>
12 * Paul E. McKenney <paulmck@linux.ibm.com>
13 * Frederic Weisbecker <frederic@kernel.org>
16 #ifdef CONFIG_RCU_NOCB_CPU
17 static cpumask_var_t rcu_nocb_mask; /* CPUs to have callbacks offloaded. */
18 static bool __read_mostly rcu_nocb_poll; /* Offload kthread are to poll. */
19 static inline int rcu_lockdep_is_held_nocb(struct rcu_data *rdp)
21 return lockdep_is_held(&rdp->nocb_lock);
24 static inline bool rcu_current_is_nocb_kthread(struct rcu_data *rdp)
26 /* Race on early boot between thread creation and assignment */
27 if (!rdp->nocb_cb_kthread || !rdp->nocb_gp_kthread)
30 if (current == rdp->nocb_cb_kthread || current == rdp->nocb_gp_kthread)
37 * Offload callback processing from the boot-time-specified set of CPUs
38 * specified by rcu_nocb_mask. For the CPUs in the set, there are kthreads
39 * created that pull the callbacks from the corresponding CPU, wait for
40 * a grace period to elapse, and invoke the callbacks. These kthreads
41 * are organized into GP kthreads, which manage incoming callbacks, wait for
42 * grace periods, and awaken CB kthreads, and the CB kthreads, which only
43 * invoke callbacks. Each GP kthread invokes its own CBs. The no-CBs CPUs
44 * do a wake_up() on their GP kthread when they insert a callback into any
45 * empty list, unless the rcu_nocb_poll boot parameter has been specified,
46 * in which case each kthread actively polls its CPU. (Which isn't so great
47 * for energy efficiency, but which does reduce RCU's overhead on that CPU.)
49 * This is intended to be used in conjunction with Frederic Weisbecker's
50 * adaptive-idle work, which would seriously reduce OS jitter on CPUs
51 * running CPU-bound user-mode computations.
53 * Offloading of callbacks can also be used as an energy-efficiency
54 * measure because CPUs with no RCU callbacks queued are more aggressive
55 * about entering dyntick-idle mode.
60 * Parse the boot-time rcu_nocb_mask CPU list from the kernel parameters.
61 * If the list is invalid, a warning is emitted and all CPUs are offloaded.
64 static bool rcu_nocb_is_setup;
66 static int __init rcu_nocb_setup(char *str)
68 alloc_bootmem_cpumask_var(&rcu_nocb_mask);
70 if (cpulist_parse(++str, rcu_nocb_mask)) {
71 pr_warn("rcu_nocbs= bad CPU range, all CPUs set\n");
72 cpumask_setall(rcu_nocb_mask);
75 rcu_nocb_is_setup = true;
78 __setup("rcu_nocbs", rcu_nocb_setup);
80 static int __init parse_rcu_nocb_poll(char *arg)
85 early_param("rcu_nocb_poll", parse_rcu_nocb_poll);
88 * Don't bother bypassing ->cblist if the call_rcu() rate is low.
89 * After all, the main point of bypassing is to avoid lock contention
90 * on ->nocb_lock, which only can happen at high call_rcu() rates.
92 static int nocb_nobypass_lim_per_jiffy = 16 * 1000 / HZ;
93 module_param(nocb_nobypass_lim_per_jiffy, int, 0);
96 * Acquire the specified rcu_data structure's ->nocb_bypass_lock. If the
97 * lock isn't immediately available, increment ->nocb_lock_contended to
98 * flag the contention.
100 static void rcu_nocb_bypass_lock(struct rcu_data *rdp)
101 __acquires(&rdp->nocb_bypass_lock)
103 lockdep_assert_irqs_disabled();
104 if (raw_spin_trylock(&rdp->nocb_bypass_lock))
106 atomic_inc(&rdp->nocb_lock_contended);
107 WARN_ON_ONCE(smp_processor_id() != rdp->cpu);
108 smp_mb__after_atomic(); /* atomic_inc() before lock. */
109 raw_spin_lock(&rdp->nocb_bypass_lock);
110 smp_mb__before_atomic(); /* atomic_dec() after lock. */
111 atomic_dec(&rdp->nocb_lock_contended);
115 * Spinwait until the specified rcu_data structure's ->nocb_lock is
116 * not contended. Please note that this is extremely special-purpose,
117 * relying on the fact that at most two kthreads and one CPU contend for
118 * this lock, and also that the two kthreads are guaranteed to have frequent
119 * grace-period-duration time intervals between successive acquisitions
120 * of the lock. This allows us to use an extremely simple throttling
121 * mechanism, and further to apply it only to the CPU doing floods of
122 * call_rcu() invocations. Don't try this at home!
124 static void rcu_nocb_wait_contended(struct rcu_data *rdp)
126 WARN_ON_ONCE(smp_processor_id() != rdp->cpu);
127 while (WARN_ON_ONCE(atomic_read(&rdp->nocb_lock_contended)))
132 * Conditionally acquire the specified rcu_data structure's
133 * ->nocb_bypass_lock.
135 static bool rcu_nocb_bypass_trylock(struct rcu_data *rdp)
137 lockdep_assert_irqs_disabled();
138 return raw_spin_trylock(&rdp->nocb_bypass_lock);
142 * Release the specified rcu_data structure's ->nocb_bypass_lock.
144 static void rcu_nocb_bypass_unlock(struct rcu_data *rdp)
145 __releases(&rdp->nocb_bypass_lock)
147 lockdep_assert_irqs_disabled();
148 raw_spin_unlock(&rdp->nocb_bypass_lock);
152 * Acquire the specified rcu_data structure's ->nocb_lock, but only
153 * if it corresponds to a no-CBs CPU.
155 static void rcu_nocb_lock(struct rcu_data *rdp)
157 lockdep_assert_irqs_disabled();
158 if (!rcu_rdp_is_offloaded(rdp))
160 raw_spin_lock(&rdp->nocb_lock);
164 * Release the specified rcu_data structure's ->nocb_lock, but only
165 * if it corresponds to a no-CBs CPU.
167 static void rcu_nocb_unlock(struct rcu_data *rdp)
169 if (rcu_rdp_is_offloaded(rdp)) {
170 lockdep_assert_irqs_disabled();
171 raw_spin_unlock(&rdp->nocb_lock);
176 * Release the specified rcu_data structure's ->nocb_lock and restore
177 * interrupts, but only if it corresponds to a no-CBs CPU.
179 static void rcu_nocb_unlock_irqrestore(struct rcu_data *rdp,
182 if (rcu_rdp_is_offloaded(rdp)) {
183 lockdep_assert_irqs_disabled();
184 raw_spin_unlock_irqrestore(&rdp->nocb_lock, flags);
186 local_irq_restore(flags);
190 /* Lockdep check that ->cblist may be safely accessed. */
191 static void rcu_lockdep_assert_cblist_protected(struct rcu_data *rdp)
193 lockdep_assert_irqs_disabled();
194 if (rcu_rdp_is_offloaded(rdp))
195 lockdep_assert_held(&rdp->nocb_lock);
199 * Wake up any no-CBs CPUs' kthreads that were waiting on the just-ended
202 static void rcu_nocb_gp_cleanup(struct swait_queue_head *sq)
207 static struct swait_queue_head *rcu_nocb_gp_get(struct rcu_node *rnp)
209 return &rnp->nocb_gp_wq[rcu_seq_ctr(rnp->gp_seq) & 0x1];
212 static void rcu_init_one_nocb(struct rcu_node *rnp)
214 init_swait_queue_head(&rnp->nocb_gp_wq[0]);
215 init_swait_queue_head(&rnp->nocb_gp_wq[1]);
218 /* Is the specified CPU a no-CBs CPU? */
219 bool rcu_is_nocb_cpu(int cpu)
221 if (cpumask_available(rcu_nocb_mask))
222 return cpumask_test_cpu(cpu, rcu_nocb_mask);
226 static bool __wake_nocb_gp(struct rcu_data *rdp_gp,
227 struct rcu_data *rdp,
228 bool force, unsigned long flags)
229 __releases(rdp_gp->nocb_gp_lock)
231 bool needwake = false;
233 if (!READ_ONCE(rdp_gp->nocb_gp_kthread)) {
234 raw_spin_unlock_irqrestore(&rdp_gp->nocb_gp_lock, flags);
235 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
236 TPS("AlreadyAwake"));
240 if (rdp_gp->nocb_defer_wakeup > RCU_NOCB_WAKE_NOT) {
241 WRITE_ONCE(rdp_gp->nocb_defer_wakeup, RCU_NOCB_WAKE_NOT);
242 del_timer(&rdp_gp->nocb_timer);
245 if (force || READ_ONCE(rdp_gp->nocb_gp_sleep)) {
246 WRITE_ONCE(rdp_gp->nocb_gp_sleep, false);
249 raw_spin_unlock_irqrestore(&rdp_gp->nocb_gp_lock, flags);
251 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("DoWake"));
252 wake_up_process(rdp_gp->nocb_gp_kthread);
259 * Kick the GP kthread for this NOCB group.
261 static bool wake_nocb_gp(struct rcu_data *rdp, bool force)
264 struct rcu_data *rdp_gp = rdp->nocb_gp_rdp;
266 raw_spin_lock_irqsave(&rdp_gp->nocb_gp_lock, flags);
267 return __wake_nocb_gp(rdp_gp, rdp, force, flags);
271 * Arrange to wake the GP kthread for this NOCB group at some future
272 * time when it is safe to do so.
274 static void wake_nocb_gp_defer(struct rcu_data *rdp, int waketype,
278 struct rcu_data *rdp_gp = rdp->nocb_gp_rdp;
280 raw_spin_lock_irqsave(&rdp_gp->nocb_gp_lock, flags);
283 * Bypass wakeup overrides previous deferments. In case
284 * of callback storm, no need to wake up too early.
286 if (waketype == RCU_NOCB_WAKE_BYPASS) {
287 mod_timer(&rdp_gp->nocb_timer, jiffies + 2);
288 WRITE_ONCE(rdp_gp->nocb_defer_wakeup, waketype);
290 if (rdp_gp->nocb_defer_wakeup < RCU_NOCB_WAKE)
291 mod_timer(&rdp_gp->nocb_timer, jiffies + 1);
292 if (rdp_gp->nocb_defer_wakeup < waketype)
293 WRITE_ONCE(rdp_gp->nocb_defer_wakeup, waketype);
296 raw_spin_unlock_irqrestore(&rdp_gp->nocb_gp_lock, flags);
298 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, reason);
302 * Flush the ->nocb_bypass queue into ->cblist, enqueuing rhp if non-NULL.
303 * However, if there is a callback to be enqueued and if ->nocb_bypass
304 * proves to be initially empty, just return false because the no-CB GP
305 * kthread may need to be awakened in this case.
307 * Note that this function always returns true if rhp is NULL.
309 static bool rcu_nocb_do_flush_bypass(struct rcu_data *rdp, struct rcu_head *rhp,
312 struct rcu_cblist rcl;
314 WARN_ON_ONCE(!rcu_rdp_is_offloaded(rdp));
315 rcu_lockdep_assert_cblist_protected(rdp);
316 lockdep_assert_held(&rdp->nocb_bypass_lock);
317 if (rhp && !rcu_cblist_n_cbs(&rdp->nocb_bypass)) {
318 raw_spin_unlock(&rdp->nocb_bypass_lock);
321 /* Note: ->cblist.len already accounts for ->nocb_bypass contents. */
323 rcu_segcblist_inc_len(&rdp->cblist); /* Must precede enqueue. */
324 rcu_cblist_flush_enqueue(&rcl, &rdp->nocb_bypass, rhp);
325 rcu_segcblist_insert_pend_cbs(&rdp->cblist, &rcl);
326 WRITE_ONCE(rdp->nocb_bypass_first, j);
327 rcu_nocb_bypass_unlock(rdp);
332 * Flush the ->nocb_bypass queue into ->cblist, enqueuing rhp if non-NULL.
333 * However, if there is a callback to be enqueued and if ->nocb_bypass
334 * proves to be initially empty, just return false because the no-CB GP
335 * kthread may need to be awakened in this case.
337 * Note that this function always returns true if rhp is NULL.
339 static bool rcu_nocb_flush_bypass(struct rcu_data *rdp, struct rcu_head *rhp,
342 if (!rcu_rdp_is_offloaded(rdp))
344 rcu_lockdep_assert_cblist_protected(rdp);
345 rcu_nocb_bypass_lock(rdp);
346 return rcu_nocb_do_flush_bypass(rdp, rhp, j);
350 * If the ->nocb_bypass_lock is immediately available, flush the
351 * ->nocb_bypass queue into ->cblist.
353 static void rcu_nocb_try_flush_bypass(struct rcu_data *rdp, unsigned long j)
355 rcu_lockdep_assert_cblist_protected(rdp);
356 if (!rcu_rdp_is_offloaded(rdp) ||
357 !rcu_nocb_bypass_trylock(rdp))
359 WARN_ON_ONCE(!rcu_nocb_do_flush_bypass(rdp, NULL, j));
363 * See whether it is appropriate to use the ->nocb_bypass list in order
364 * to control contention on ->nocb_lock. A limited number of direct
365 * enqueues are permitted into ->cblist per jiffy. If ->nocb_bypass
366 * is non-empty, further callbacks must be placed into ->nocb_bypass,
367 * otherwise rcu_barrier() breaks. Use rcu_nocb_flush_bypass() to switch
368 * back to direct use of ->cblist. However, ->nocb_bypass should not be
369 * used if ->cblist is empty, because otherwise callbacks can be stranded
370 * on ->nocb_bypass because we cannot count on the current CPU ever again
371 * invoking call_rcu(). The general rule is that if ->nocb_bypass is
372 * non-empty, the corresponding no-CBs grace-period kthread must not be
373 * in an indefinite sleep state.
375 * Finally, it is not permitted to use the bypass during early boot,
376 * as doing so would confuse the auto-initialization code. Besides
377 * which, there is no point in worrying about lock contention while
378 * there is only one CPU in operation.
380 static bool rcu_nocb_try_bypass(struct rcu_data *rdp, struct rcu_head *rhp,
381 bool *was_alldone, unsigned long flags)
384 unsigned long cur_gp_seq;
385 unsigned long j = jiffies;
386 long ncbs = rcu_cblist_n_cbs(&rdp->nocb_bypass);
388 lockdep_assert_irqs_disabled();
390 // Pure softirq/rcuc based processing: no bypassing, no
392 if (!rcu_rdp_is_offloaded(rdp)) {
393 *was_alldone = !rcu_segcblist_pend_cbs(&rdp->cblist);
397 // In the process of (de-)offloading: no bypassing, but
399 if (!rcu_segcblist_completely_offloaded(&rdp->cblist)) {
401 *was_alldone = !rcu_segcblist_pend_cbs(&rdp->cblist);
402 return false; /* Not offloaded, no bypassing. */
405 // Don't use ->nocb_bypass during early boot.
406 if (rcu_scheduler_active != RCU_SCHEDULER_RUNNING) {
408 WARN_ON_ONCE(rcu_cblist_n_cbs(&rdp->nocb_bypass));
409 *was_alldone = !rcu_segcblist_pend_cbs(&rdp->cblist);
413 // If we have advanced to a new jiffy, reset counts to allow
414 // moving back from ->nocb_bypass to ->cblist.
415 if (j == rdp->nocb_nobypass_last) {
416 c = rdp->nocb_nobypass_count + 1;
418 WRITE_ONCE(rdp->nocb_nobypass_last, j);
419 c = rdp->nocb_nobypass_count - nocb_nobypass_lim_per_jiffy;
420 if (ULONG_CMP_LT(rdp->nocb_nobypass_count,
421 nocb_nobypass_lim_per_jiffy))
423 else if (c > nocb_nobypass_lim_per_jiffy)
424 c = nocb_nobypass_lim_per_jiffy;
426 WRITE_ONCE(rdp->nocb_nobypass_count, c);
428 // If there hasn't yet been all that many ->cblist enqueues
429 // this jiffy, tell the caller to enqueue onto ->cblist. But flush
430 // ->nocb_bypass first.
431 if (rdp->nocb_nobypass_count < nocb_nobypass_lim_per_jiffy) {
433 *was_alldone = !rcu_segcblist_pend_cbs(&rdp->cblist);
435 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
437 WARN_ON_ONCE(!rcu_nocb_flush_bypass(rdp, NULL, j));
438 WARN_ON_ONCE(rcu_cblist_n_cbs(&rdp->nocb_bypass));
439 return false; // Caller must enqueue the callback.
442 // If ->nocb_bypass has been used too long or is too full,
443 // flush ->nocb_bypass to ->cblist.
444 if ((ncbs && j != READ_ONCE(rdp->nocb_bypass_first)) ||
447 if (!rcu_nocb_flush_bypass(rdp, rhp, j)) {
448 *was_alldone = !rcu_segcblist_pend_cbs(&rdp->cblist);
450 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
452 WARN_ON_ONCE(rcu_cblist_n_cbs(&rdp->nocb_bypass));
453 return false; // Caller must enqueue the callback.
455 if (j != rdp->nocb_gp_adv_time &&
456 rcu_segcblist_nextgp(&rdp->cblist, &cur_gp_seq) &&
457 rcu_seq_done(&rdp->mynode->gp_seq, cur_gp_seq)) {
458 rcu_advance_cbs_nowake(rdp->mynode, rdp);
459 rdp->nocb_gp_adv_time = j;
461 rcu_nocb_unlock_irqrestore(rdp, flags);
462 return true; // Callback already enqueued.
465 // We need to use the bypass.
466 rcu_nocb_wait_contended(rdp);
467 rcu_nocb_bypass_lock(rdp);
468 ncbs = rcu_cblist_n_cbs(&rdp->nocb_bypass);
469 rcu_segcblist_inc_len(&rdp->cblist); /* Must precede enqueue. */
470 rcu_cblist_enqueue(&rdp->nocb_bypass, rhp);
472 WRITE_ONCE(rdp->nocb_bypass_first, j);
473 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("FirstBQ"));
475 rcu_nocb_bypass_unlock(rdp);
476 smp_mb(); /* Order enqueue before wake. */
478 local_irq_restore(flags);
480 // No-CBs GP kthread might be indefinitely asleep, if so, wake.
481 rcu_nocb_lock(rdp); // Rare during call_rcu() flood.
482 if (!rcu_segcblist_pend_cbs(&rdp->cblist)) {
483 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
485 __call_rcu_nocb_wake(rdp, true, flags);
487 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
488 TPS("FirstBQnoWake"));
489 rcu_nocb_unlock_irqrestore(rdp, flags);
492 return true; // Callback already enqueued.
496 * Awaken the no-CBs grace-period kthread if needed, either due to it
497 * legitimately being asleep or due to overload conditions.
499 * If warranted, also wake up the kthread servicing this CPUs queues.
501 static void __call_rcu_nocb_wake(struct rcu_data *rdp, bool was_alldone,
503 __releases(rdp->nocb_lock)
505 unsigned long cur_gp_seq;
508 struct task_struct *t;
510 // If we are being polled or there is no kthread, just leave.
511 t = READ_ONCE(rdp->nocb_gp_kthread);
512 if (rcu_nocb_poll || !t) {
513 rcu_nocb_unlock_irqrestore(rdp, flags);
514 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
518 // Need to actually to a wakeup.
519 len = rcu_segcblist_n_cbs(&rdp->cblist);
521 rdp->qlen_last_fqs_check = len;
522 if (!irqs_disabled_flags(flags)) {
523 /* ... if queue was empty ... */
524 rcu_nocb_unlock_irqrestore(rdp, flags);
525 wake_nocb_gp(rdp, false);
526 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
529 rcu_nocb_unlock_irqrestore(rdp, flags);
530 wake_nocb_gp_defer(rdp, RCU_NOCB_WAKE,
531 TPS("WakeEmptyIsDeferred"));
533 } else if (len > rdp->qlen_last_fqs_check + qhimark) {
534 /* ... or if many callbacks queued. */
535 rdp->qlen_last_fqs_check = len;
537 if (j != rdp->nocb_gp_adv_time &&
538 rcu_segcblist_nextgp(&rdp->cblist, &cur_gp_seq) &&
539 rcu_seq_done(&rdp->mynode->gp_seq, cur_gp_seq)) {
540 rcu_advance_cbs_nowake(rdp->mynode, rdp);
541 rdp->nocb_gp_adv_time = j;
543 smp_mb(); /* Enqueue before timer_pending(). */
544 if ((rdp->nocb_cb_sleep ||
545 !rcu_segcblist_ready_cbs(&rdp->cblist)) &&
546 !timer_pending(&rdp->nocb_timer)) {
547 rcu_nocb_unlock_irqrestore(rdp, flags);
548 wake_nocb_gp_defer(rdp, RCU_NOCB_WAKE_FORCE,
549 TPS("WakeOvfIsDeferred"));
551 rcu_nocb_unlock_irqrestore(rdp, flags);
552 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("WakeNot"));
555 rcu_nocb_unlock_irqrestore(rdp, flags);
556 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("WakeNot"));
561 * Check if we ignore this rdp.
563 * We check that without holding the nocb lock but
564 * we make sure not to miss a freshly offloaded rdp
565 * with the current ordering:
567 * rdp_offload_toggle() nocb_gp_enabled_cb()
568 * ------------------------- ----------------------------
569 * WRITE flags LOCK nocb_gp_lock
570 * LOCK nocb_gp_lock READ/WRITE nocb_gp_sleep
571 * READ/WRITE nocb_gp_sleep UNLOCK nocb_gp_lock
572 * UNLOCK nocb_gp_lock READ flags
574 static inline bool nocb_gp_enabled_cb(struct rcu_data *rdp)
576 u8 flags = SEGCBLIST_OFFLOADED | SEGCBLIST_KTHREAD_GP;
578 return rcu_segcblist_test_flags(&rdp->cblist, flags);
581 static inline bool nocb_gp_update_state_deoffloading(struct rcu_data *rdp,
582 bool *needwake_state)
584 struct rcu_segcblist *cblist = &rdp->cblist;
586 if (rcu_segcblist_test_flags(cblist, SEGCBLIST_OFFLOADED)) {
587 if (!rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_GP)) {
588 rcu_segcblist_set_flags(cblist, SEGCBLIST_KTHREAD_GP);
589 if (rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_CB))
590 *needwake_state = true;
596 * De-offloading. Clear our flag and notify the de-offload worker.
597 * We will ignore this rdp until it ever gets re-offloaded.
599 WARN_ON_ONCE(!rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_GP));
600 rcu_segcblist_clear_flags(cblist, SEGCBLIST_KTHREAD_GP);
601 if (!rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_CB))
602 *needwake_state = true;
608 * No-CBs GP kthreads come here to wait for additional callbacks to show up
609 * or for grace periods to end.
611 static void nocb_gp_wait(struct rcu_data *my_rdp)
615 int __maybe_unused cpu = my_rdp->cpu;
616 unsigned long cur_gp_seq;
619 unsigned long j = jiffies;
620 bool needwait_gp = false; // This prevents actual uninitialized use.
623 struct rcu_data *rdp;
624 struct rcu_node *rnp;
625 unsigned long wait_gp_seq = 0; // Suppress "use uninitialized" warning.
626 bool wasempty = false;
629 * Each pass through the following loop checks for CBs and for the
630 * nearest grace period (if any) to wait for next. The CB kthreads
631 * and the global grace-period kthread are awakened if needed.
633 WARN_ON_ONCE(my_rdp->nocb_gp_rdp != my_rdp);
635 * An rcu_data structure is removed from the list after its
636 * CPU is de-offloaded and added to the list before that CPU is
637 * (re-)offloaded. If the following loop happens to be referencing
638 * that rcu_data structure during the time that the corresponding
639 * CPU is de-offloaded and then immediately re-offloaded, this
640 * loop's rdp pointer will be carried to the end of the list by
641 * the resulting pair of list operations. This can cause the loop
642 * to skip over some of the rcu_data structures that were supposed
643 * to have been scanned. Fortunately a new iteration through the
644 * entire loop is forced after a given CPU's rcu_data structure
645 * is added to the list, so the skipped-over rcu_data structures
646 * won't be ignored for long.
648 list_for_each_entry_rcu(rdp, &my_rdp->nocb_head_rdp, nocb_entry_rdp, 1) {
649 bool needwake_state = false;
651 if (!nocb_gp_enabled_cb(rdp))
653 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("Check"));
654 rcu_nocb_lock_irqsave(rdp, flags);
655 if (nocb_gp_update_state_deoffloading(rdp, &needwake_state)) {
656 rcu_nocb_unlock_irqrestore(rdp, flags);
658 swake_up_one(&rdp->nocb_state_wq);
661 bypass_ncbs = rcu_cblist_n_cbs(&rdp->nocb_bypass);
663 (time_after(j, READ_ONCE(rdp->nocb_bypass_first) + 1) ||
664 bypass_ncbs > 2 * qhimark)) {
665 // Bypass full or old, so flush it.
666 (void)rcu_nocb_try_flush_bypass(rdp, j);
667 bypass_ncbs = rcu_cblist_n_cbs(&rdp->nocb_bypass);
668 } else if (!bypass_ncbs && rcu_segcblist_empty(&rdp->cblist)) {
669 rcu_nocb_unlock_irqrestore(rdp, flags);
671 swake_up_one(&rdp->nocb_state_wq);
672 continue; /* No callbacks here, try next. */
675 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
681 // Advance callbacks if helpful and low contention.
683 if (!rcu_segcblist_restempty(&rdp->cblist,
684 RCU_NEXT_READY_TAIL) ||
685 (rcu_segcblist_nextgp(&rdp->cblist, &cur_gp_seq) &&
686 rcu_seq_done(&rnp->gp_seq, cur_gp_seq))) {
687 raw_spin_lock_rcu_node(rnp); /* irqs disabled. */
688 needwake_gp = rcu_advance_cbs(rnp, rdp);
689 wasempty = rcu_segcblist_restempty(&rdp->cblist,
690 RCU_NEXT_READY_TAIL);
691 raw_spin_unlock_rcu_node(rnp); /* irqs disabled. */
693 // Need to wait on some grace period?
694 WARN_ON_ONCE(wasempty &&
695 !rcu_segcblist_restempty(&rdp->cblist,
696 RCU_NEXT_READY_TAIL));
697 if (rcu_segcblist_nextgp(&rdp->cblist, &cur_gp_seq)) {
699 ULONG_CMP_LT(cur_gp_seq, wait_gp_seq))
700 wait_gp_seq = cur_gp_seq;
702 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
705 if (rcu_segcblist_ready_cbs(&rdp->cblist)) {
706 needwake = rdp->nocb_cb_sleep;
707 WRITE_ONCE(rdp->nocb_cb_sleep, false);
708 smp_mb(); /* CB invocation -after- GP end. */
712 rcu_nocb_unlock_irqrestore(rdp, flags);
714 swake_up_one(&rdp->nocb_cb_wq);
718 rcu_gp_kthread_wake();
720 swake_up_one(&rdp->nocb_state_wq);
723 my_rdp->nocb_gp_bypass = bypass;
724 my_rdp->nocb_gp_gp = needwait_gp;
725 my_rdp->nocb_gp_seq = needwait_gp ? wait_gp_seq : 0;
727 if (bypass && !rcu_nocb_poll) {
728 // At least one child with non-empty ->nocb_bypass, so set
729 // timer in order to avoid stranding its callbacks.
730 wake_nocb_gp_defer(my_rdp, RCU_NOCB_WAKE_BYPASS,
731 TPS("WakeBypassIsDeferred"));
734 /* Polling, so trace if first poll in the series. */
736 trace_rcu_nocb_wake(rcu_state.name, cpu, TPS("Poll"));
737 schedule_timeout_idle(1);
738 } else if (!needwait_gp) {
739 /* Wait for callbacks to appear. */
740 trace_rcu_nocb_wake(rcu_state.name, cpu, TPS("Sleep"));
741 swait_event_interruptible_exclusive(my_rdp->nocb_gp_wq,
742 !READ_ONCE(my_rdp->nocb_gp_sleep));
743 trace_rcu_nocb_wake(rcu_state.name, cpu, TPS("EndSleep"));
745 rnp = my_rdp->mynode;
746 trace_rcu_this_gp(rnp, my_rdp, wait_gp_seq, TPS("StartWait"));
747 swait_event_interruptible_exclusive(
748 rnp->nocb_gp_wq[rcu_seq_ctr(wait_gp_seq) & 0x1],
749 rcu_seq_done(&rnp->gp_seq, wait_gp_seq) ||
750 !READ_ONCE(my_rdp->nocb_gp_sleep));
751 trace_rcu_this_gp(rnp, my_rdp, wait_gp_seq, TPS("EndWait"));
753 if (!rcu_nocb_poll) {
754 raw_spin_lock_irqsave(&my_rdp->nocb_gp_lock, flags);
755 if (my_rdp->nocb_defer_wakeup > RCU_NOCB_WAKE_NOT) {
756 WRITE_ONCE(my_rdp->nocb_defer_wakeup, RCU_NOCB_WAKE_NOT);
757 del_timer(&my_rdp->nocb_timer);
759 WRITE_ONCE(my_rdp->nocb_gp_sleep, true);
760 raw_spin_unlock_irqrestore(&my_rdp->nocb_gp_lock, flags);
762 my_rdp->nocb_gp_seq = -1;
763 WARN_ON(signal_pending(current));
767 * No-CBs grace-period-wait kthread. There is one of these per group
768 * of CPUs, but only once at least one CPU in that group has come online
769 * at least once since boot. This kthread checks for newly posted
770 * callbacks from any of the CPUs it is responsible for, waits for a
771 * grace period, then awakens all of the rcu_nocb_cb_kthread() instances
772 * that then have callback-invocation work to do.
774 static int rcu_nocb_gp_kthread(void *arg)
776 struct rcu_data *rdp = arg;
779 WRITE_ONCE(rdp->nocb_gp_loops, rdp->nocb_gp_loops + 1);
781 cond_resched_tasks_rcu_qs();
786 static inline bool nocb_cb_can_run(struct rcu_data *rdp)
788 u8 flags = SEGCBLIST_OFFLOADED | SEGCBLIST_KTHREAD_CB;
790 return rcu_segcblist_test_flags(&rdp->cblist, flags);
793 static inline bool nocb_cb_wait_cond(struct rcu_data *rdp)
795 return nocb_cb_can_run(rdp) && !READ_ONCE(rdp->nocb_cb_sleep);
799 * Invoke any ready callbacks from the corresponding no-CBs CPU,
800 * then, if there are no more, wait for more to appear.
802 static void nocb_cb_wait(struct rcu_data *rdp)
804 struct rcu_segcblist *cblist = &rdp->cblist;
805 unsigned long cur_gp_seq;
807 bool needwake_state = false;
808 bool needwake_gp = false;
809 bool can_sleep = true;
810 struct rcu_node *rnp = rdp->mynode;
813 swait_event_interruptible_exclusive(rdp->nocb_cb_wq,
814 nocb_cb_wait_cond(rdp));
816 // VVV Ensure CB invocation follows _sleep test.
817 if (smp_load_acquire(&rdp->nocb_cb_sleep)) { // ^^^
818 WARN_ON(signal_pending(current));
819 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("WokeEmpty"));
821 } while (!nocb_cb_can_run(rdp));
824 local_irq_save(flags);
825 rcu_momentary_dyntick_idle();
826 local_irq_restore(flags);
828 * Disable BH to provide the expected environment. Also, when
829 * transitioning to/from NOCB mode, a self-requeuing callback might
830 * be invoked from softirq. A short grace period could cause both
831 * instances of this callback would execute concurrently.
836 lockdep_assert_irqs_enabled();
837 rcu_nocb_lock_irqsave(rdp, flags);
838 if (rcu_segcblist_nextgp(cblist, &cur_gp_seq) &&
839 rcu_seq_done(&rnp->gp_seq, cur_gp_seq) &&
840 raw_spin_trylock_rcu_node(rnp)) { /* irqs already disabled. */
841 needwake_gp = rcu_advance_cbs(rdp->mynode, rdp);
842 raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */
845 if (rcu_segcblist_test_flags(cblist, SEGCBLIST_OFFLOADED)) {
846 if (!rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_CB)) {
847 rcu_segcblist_set_flags(cblist, SEGCBLIST_KTHREAD_CB);
848 if (rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_GP))
849 needwake_state = true;
851 if (rcu_segcblist_ready_cbs(cblist))
855 * De-offloading. Clear our flag and notify the de-offload worker.
856 * We won't touch the callbacks and keep sleeping until we ever
859 WARN_ON_ONCE(!rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_CB));
860 rcu_segcblist_clear_flags(cblist, SEGCBLIST_KTHREAD_CB);
861 if (!rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_GP))
862 needwake_state = true;
865 WRITE_ONCE(rdp->nocb_cb_sleep, can_sleep);
867 if (rdp->nocb_cb_sleep)
868 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("CBSleep"));
870 rcu_nocb_unlock_irqrestore(rdp, flags);
872 rcu_gp_kthread_wake();
875 swake_up_one(&rdp->nocb_state_wq);
879 * Per-rcu_data kthread, but only for no-CBs CPUs. Repeatedly invoke
880 * nocb_cb_wait() to do the dirty work.
882 static int rcu_nocb_cb_kthread(void *arg)
884 struct rcu_data *rdp = arg;
886 // Each pass through this loop does one callback batch, and,
887 // if there are no more ready callbacks, waits for them.
890 cond_resched_tasks_rcu_qs();
895 /* Is a deferred wakeup of rcu_nocb_kthread() required? */
896 static int rcu_nocb_need_deferred_wakeup(struct rcu_data *rdp, int level)
898 return READ_ONCE(rdp->nocb_defer_wakeup) >= level;
901 /* Do a deferred wakeup of rcu_nocb_kthread(). */
902 static bool do_nocb_deferred_wakeup_common(struct rcu_data *rdp_gp,
903 struct rcu_data *rdp, int level,
905 __releases(rdp_gp->nocb_gp_lock)
910 if (!rcu_nocb_need_deferred_wakeup(rdp_gp, level)) {
911 raw_spin_unlock_irqrestore(&rdp_gp->nocb_gp_lock, flags);
915 ndw = rdp_gp->nocb_defer_wakeup;
916 ret = __wake_nocb_gp(rdp_gp, rdp, ndw == RCU_NOCB_WAKE_FORCE, flags);
917 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("DeferredWake"));
922 /* Do a deferred wakeup of rcu_nocb_kthread() from a timer handler. */
923 static void do_nocb_deferred_wakeup_timer(struct timer_list *t)
926 struct rcu_data *rdp = from_timer(rdp, t, nocb_timer);
928 WARN_ON_ONCE(rdp->nocb_gp_rdp != rdp);
929 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("Timer"));
931 raw_spin_lock_irqsave(&rdp->nocb_gp_lock, flags);
932 smp_mb__after_spinlock(); /* Timer expire before wakeup. */
933 do_nocb_deferred_wakeup_common(rdp, rdp, RCU_NOCB_WAKE_BYPASS, flags);
937 * Do a deferred wakeup of rcu_nocb_kthread() from fastpath.
938 * This means we do an inexact common-case check. Note that if
939 * we miss, ->nocb_timer will eventually clean things up.
941 static bool do_nocb_deferred_wakeup(struct rcu_data *rdp)
944 struct rcu_data *rdp_gp = rdp->nocb_gp_rdp;
946 if (!rdp_gp || !rcu_nocb_need_deferred_wakeup(rdp_gp, RCU_NOCB_WAKE))
949 raw_spin_lock_irqsave(&rdp_gp->nocb_gp_lock, flags);
950 return do_nocb_deferred_wakeup_common(rdp_gp, rdp, RCU_NOCB_WAKE, flags);
953 void rcu_nocb_flush_deferred_wakeup(void)
955 do_nocb_deferred_wakeup(this_cpu_ptr(&rcu_data));
957 EXPORT_SYMBOL_GPL(rcu_nocb_flush_deferred_wakeup);
959 static int rdp_offload_toggle(struct rcu_data *rdp,
960 bool offload, unsigned long flags)
961 __releases(rdp->nocb_lock)
963 struct rcu_segcblist *cblist = &rdp->cblist;
964 struct rcu_data *rdp_gp = rdp->nocb_gp_rdp;
965 bool wake_gp = false;
967 rcu_segcblist_offload(cblist, offload);
969 if (rdp->nocb_cb_sleep)
970 rdp->nocb_cb_sleep = false;
971 rcu_nocb_unlock_irqrestore(rdp, flags);
974 * Ignore former value of nocb_cb_sleep and force wake up as it could
975 * have been spuriously set to false already.
977 swake_up_one(&rdp->nocb_cb_wq);
979 raw_spin_lock_irqsave(&rdp_gp->nocb_gp_lock, flags);
980 if (rdp_gp->nocb_gp_sleep) {
981 rdp_gp->nocb_gp_sleep = false;
984 raw_spin_unlock_irqrestore(&rdp_gp->nocb_gp_lock, flags);
987 wake_up_process(rdp_gp->nocb_gp_kthread);
992 static long rcu_nocb_rdp_deoffload(void *arg)
994 struct rcu_data *rdp = arg;
995 struct rcu_segcblist *cblist = &rdp->cblist;
999 WARN_ON_ONCE(rdp->cpu != raw_smp_processor_id());
1001 pr_info("De-offloading %d\n", rdp->cpu);
1003 rcu_nocb_lock_irqsave(rdp, flags);
1005 * Flush once and for all now. This suffices because we are
1006 * running on the target CPU holding ->nocb_lock (thus having
1007 * interrupts disabled), and because rdp_offload_toggle()
1008 * invokes rcu_segcblist_offload(), which clears SEGCBLIST_OFFLOADED.
1009 * Thus future calls to rcu_segcblist_completely_offloaded() will
1010 * return false, which means that future calls to rcu_nocb_try_bypass()
1011 * will refuse to put anything into the bypass.
1013 WARN_ON_ONCE(!rcu_nocb_flush_bypass(rdp, NULL, jiffies));
1015 * Start with invoking rcu_core() early. This way if the current thread
1016 * happens to preempt an ongoing call to rcu_core() in the middle,
1017 * leaving some work dismissed because rcu_core() still thinks the rdp is
1018 * completely offloaded, we are guaranteed a nearby future instance of
1019 * rcu_core() to catch up.
1021 rcu_segcblist_set_flags(cblist, SEGCBLIST_RCU_CORE);
1023 ret = rdp_offload_toggle(rdp, false, flags);
1024 swait_event_exclusive(rdp->nocb_state_wq,
1025 !rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_CB |
1026 SEGCBLIST_KTHREAD_GP));
1027 /* Stop nocb_gp_wait() from iterating over this structure. */
1028 list_del_rcu(&rdp->nocb_entry_rdp);
1030 * Lock one last time to acquire latest callback updates from kthreads
1031 * so we can later handle callbacks locally without locking.
1033 rcu_nocb_lock_irqsave(rdp, flags);
1035 * Theoretically we could clear SEGCBLIST_LOCKING after the nocb
1036 * lock is released but how about being paranoid for once?
1038 rcu_segcblist_clear_flags(cblist, SEGCBLIST_LOCKING);
1040 * Without SEGCBLIST_LOCKING, we can't use
1041 * rcu_nocb_unlock_irqrestore() anymore.
1043 raw_spin_unlock_irqrestore(&rdp->nocb_lock, flags);
1046 WARN_ON_ONCE(rcu_cblist_n_cbs(&rdp->nocb_bypass));
1052 int rcu_nocb_cpu_deoffload(int cpu)
1054 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
1057 mutex_lock(&rcu_state.barrier_mutex);
1059 if (rcu_rdp_is_offloaded(rdp)) {
1060 if (cpu_online(cpu)) {
1061 ret = work_on_cpu(cpu, rcu_nocb_rdp_deoffload, rdp);
1063 cpumask_clear_cpu(cpu, rcu_nocb_mask);
1065 pr_info("NOCB: Can't CB-deoffload an offline CPU\n");
1070 mutex_unlock(&rcu_state.barrier_mutex);
1074 EXPORT_SYMBOL_GPL(rcu_nocb_cpu_deoffload);
1076 static long rcu_nocb_rdp_offload(void *arg)
1078 struct rcu_data *rdp = arg;
1079 struct rcu_segcblist *cblist = &rdp->cblist;
1080 unsigned long flags;
1083 WARN_ON_ONCE(rdp->cpu != raw_smp_processor_id());
1085 * For now we only support re-offload, ie: the rdp must have been
1086 * offloaded on boot first.
1088 if (!rdp->nocb_gp_rdp)
1091 pr_info("Offloading %d\n", rdp->cpu);
1094 * Cause future nocb_gp_wait() invocations to iterate over
1095 * structure, resetting ->nocb_gp_sleep and waking up the related
1096 * "rcuog". Since nocb_gp_wait() in turn locks ->nocb_gp_lock
1097 * before setting ->nocb_gp_sleep again, we are guaranteed to
1098 * iterate this newly added structure before "rcuog" goes to
1101 list_add_tail_rcu(&rdp->nocb_entry_rdp, &rdp->nocb_gp_rdp->nocb_head_rdp);
1104 * Can't use rcu_nocb_lock_irqsave() before SEGCBLIST_LOCKING
1107 raw_spin_lock_irqsave(&rdp->nocb_lock, flags);
1110 * We didn't take the nocb lock while working on the
1111 * rdp->cblist with SEGCBLIST_LOCKING cleared (pure softirq/rcuc mode).
1112 * Every modifications that have been done previously on
1113 * rdp->cblist must be visible remotely by the nocb kthreads
1114 * upon wake up after reading the cblist flags.
1116 * The layout against nocb_lock enforces that ordering:
1118 * __rcu_nocb_rdp_offload() nocb_cb_wait()/nocb_gp_wait()
1119 * ------------------------- ----------------------------
1120 * WRITE callbacks rcu_nocb_lock()
1121 * rcu_nocb_lock() READ flags
1122 * WRITE flags READ callbacks
1123 * rcu_nocb_unlock() rcu_nocb_unlock()
1125 ret = rdp_offload_toggle(rdp, true, flags);
1126 swait_event_exclusive(rdp->nocb_state_wq,
1127 rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_CB) &&
1128 rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_GP));
1131 * All kthreads are ready to work, we can finally relieve rcu_core() and
1132 * enable nocb bypass.
1134 rcu_nocb_lock_irqsave(rdp, flags);
1135 rcu_segcblist_clear_flags(cblist, SEGCBLIST_RCU_CORE);
1136 rcu_nocb_unlock_irqrestore(rdp, flags);
1141 int rcu_nocb_cpu_offload(int cpu)
1143 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
1146 mutex_lock(&rcu_state.barrier_mutex);
1148 if (!rcu_rdp_is_offloaded(rdp)) {
1149 if (cpu_online(cpu)) {
1150 ret = work_on_cpu(cpu, rcu_nocb_rdp_offload, rdp);
1152 cpumask_set_cpu(cpu, rcu_nocb_mask);
1154 pr_info("NOCB: Can't CB-offload an offline CPU\n");
1159 mutex_unlock(&rcu_state.barrier_mutex);
1163 EXPORT_SYMBOL_GPL(rcu_nocb_cpu_offload);
1165 void __init rcu_init_nohz(void)
1168 bool need_rcu_nocb_mask = false;
1169 struct rcu_data *rdp;
1171 #if defined(CONFIG_NO_HZ_FULL)
1172 if (tick_nohz_full_running && cpumask_weight(tick_nohz_full_mask))
1173 need_rcu_nocb_mask = true;
1174 #endif /* #if defined(CONFIG_NO_HZ_FULL) */
1176 if (need_rcu_nocb_mask) {
1177 if (!cpumask_available(rcu_nocb_mask)) {
1178 if (!zalloc_cpumask_var(&rcu_nocb_mask, GFP_KERNEL)) {
1179 pr_info("rcu_nocb_mask allocation failed, callback offloading disabled.\n");
1183 rcu_nocb_is_setup = true;
1186 if (!rcu_nocb_is_setup)
1189 #if defined(CONFIG_NO_HZ_FULL)
1190 if (tick_nohz_full_running)
1191 cpumask_or(rcu_nocb_mask, rcu_nocb_mask, tick_nohz_full_mask);
1192 #endif /* #if defined(CONFIG_NO_HZ_FULL) */
1194 if (!cpumask_subset(rcu_nocb_mask, cpu_possible_mask)) {
1195 pr_info("\tNote: kernel parameter 'rcu_nocbs=', 'nohz_full', or 'isolcpus=' contains nonexistent CPUs.\n");
1196 cpumask_and(rcu_nocb_mask, cpu_possible_mask,
1199 if (cpumask_empty(rcu_nocb_mask))
1200 pr_info("\tOffload RCU callbacks from CPUs: (none).\n");
1202 pr_info("\tOffload RCU callbacks from CPUs: %*pbl.\n",
1203 cpumask_pr_args(rcu_nocb_mask));
1205 pr_info("\tPoll for callbacks from no-CBs CPUs.\n");
1207 for_each_cpu(cpu, rcu_nocb_mask) {
1208 rdp = per_cpu_ptr(&rcu_data, cpu);
1209 if (rcu_segcblist_empty(&rdp->cblist))
1210 rcu_segcblist_init(&rdp->cblist);
1211 rcu_segcblist_offload(&rdp->cblist, true);
1212 rcu_segcblist_set_flags(&rdp->cblist, SEGCBLIST_KTHREAD_CB | SEGCBLIST_KTHREAD_GP);
1213 rcu_segcblist_clear_flags(&rdp->cblist, SEGCBLIST_RCU_CORE);
1215 rcu_organize_nocb_kthreads();
1218 /* Initialize per-rcu_data variables for no-CBs CPUs. */
1219 static void __init rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp)
1221 init_swait_queue_head(&rdp->nocb_cb_wq);
1222 init_swait_queue_head(&rdp->nocb_gp_wq);
1223 init_swait_queue_head(&rdp->nocb_state_wq);
1224 raw_spin_lock_init(&rdp->nocb_lock);
1225 raw_spin_lock_init(&rdp->nocb_bypass_lock);
1226 raw_spin_lock_init(&rdp->nocb_gp_lock);
1227 timer_setup(&rdp->nocb_timer, do_nocb_deferred_wakeup_timer, 0);
1228 rcu_cblist_init(&rdp->nocb_bypass);
1232 * If the specified CPU is a no-CBs CPU that does not already have its
1233 * rcuo CB kthread, spawn it. Additionally, if the rcuo GP kthread
1234 * for this CPU's group has not yet been created, spawn it as well.
1236 static void rcu_spawn_cpu_nocb_kthread(int cpu)
1238 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
1239 struct rcu_data *rdp_gp;
1240 struct task_struct *t;
1242 if (!rcu_scheduler_fully_active || !rcu_nocb_is_setup)
1245 /* If there already is an rcuo kthread, then nothing to do. */
1246 if (rdp->nocb_cb_kthread)
1249 /* If we didn't spawn the GP kthread first, reorganize! */
1250 rdp_gp = rdp->nocb_gp_rdp;
1251 if (!rdp_gp->nocb_gp_kthread) {
1252 t = kthread_run(rcu_nocb_gp_kthread, rdp_gp,
1253 "rcuog/%d", rdp_gp->cpu);
1254 if (WARN_ONCE(IS_ERR(t), "%s: Could not start rcuo GP kthread, OOM is now expected behavior\n", __func__))
1256 WRITE_ONCE(rdp_gp->nocb_gp_kthread, t);
1259 /* Spawn the kthread for this CPU. */
1260 t = kthread_run(rcu_nocb_cb_kthread, rdp,
1261 "rcuo%c/%d", rcu_state.abbr, cpu);
1262 if (WARN_ONCE(IS_ERR(t), "%s: Could not start rcuo CB kthread, OOM is now expected behavior\n", __func__))
1264 WRITE_ONCE(rdp->nocb_cb_kthread, t);
1265 WRITE_ONCE(rdp->nocb_gp_kthread, rdp_gp->nocb_gp_kthread);
1269 * Once the scheduler is running, spawn rcuo kthreads for all online
1270 * no-CBs CPUs. This assumes that the early_initcall()s happen before
1271 * non-boot CPUs come online -- if this changes, we will need to add
1272 * some mutual exclusion.
1274 static void __init rcu_spawn_nocb_kthreads(void)
1278 if (rcu_nocb_is_setup) {
1279 for_each_online_cpu(cpu)
1280 rcu_spawn_cpu_nocb_kthread(cpu);
1284 /* How many CB CPU IDs per GP kthread? Default of -1 for sqrt(nr_cpu_ids). */
1285 static int rcu_nocb_gp_stride = -1;
1286 module_param(rcu_nocb_gp_stride, int, 0444);
1289 * Initialize GP-CB relationships for all no-CBs CPU.
1291 static void __init rcu_organize_nocb_kthreads(void)
1294 bool firsttime = true;
1295 bool gotnocbs = false;
1296 bool gotnocbscbs = true;
1297 int ls = rcu_nocb_gp_stride;
1298 int nl = 0; /* Next GP kthread. */
1299 struct rcu_data *rdp;
1300 struct rcu_data *rdp_gp = NULL; /* Suppress misguided gcc warn. */
1302 if (!cpumask_available(rcu_nocb_mask))
1305 ls = nr_cpu_ids / int_sqrt(nr_cpu_ids);
1306 rcu_nocb_gp_stride = ls;
1310 * Each pass through this loop sets up one rcu_data structure.
1311 * Should the corresponding CPU come online in the future, then
1312 * we will spawn the needed set of rcu_nocb_kthread() kthreads.
1314 for_each_possible_cpu(cpu) {
1315 rdp = per_cpu_ptr(&rcu_data, cpu);
1316 if (rdp->cpu >= nl) {
1317 /* New GP kthread, set up for CBs & next GP. */
1319 nl = DIV_ROUND_UP(rdp->cpu + 1, ls) * ls;
1321 INIT_LIST_HEAD(&rdp->nocb_head_rdp);
1324 pr_cont("%s\n", gotnocbscbs
1325 ? "" : " (self only)");
1326 gotnocbscbs = false;
1328 pr_alert("%s: No-CB GP kthread CPU %d:",
1332 /* Another CB kthread, link to previous GP kthread. */
1335 pr_cont(" %d", cpu);
1337 rdp->nocb_gp_rdp = rdp_gp;
1338 if (cpumask_test_cpu(cpu, rcu_nocb_mask))
1339 list_add_tail(&rdp->nocb_entry_rdp, &rdp_gp->nocb_head_rdp);
1341 if (gotnocbs && dump_tree)
1342 pr_cont("%s\n", gotnocbscbs ? "" : " (self only)");
1346 * Bind the current task to the offloaded CPUs. If there are no offloaded
1347 * CPUs, leave the task unbound. Splat if the bind attempt fails.
1349 void rcu_bind_current_to_nocb(void)
1351 if (cpumask_available(rcu_nocb_mask) && cpumask_weight(rcu_nocb_mask))
1352 WARN_ON(sched_setaffinity(current->pid, rcu_nocb_mask));
1354 EXPORT_SYMBOL_GPL(rcu_bind_current_to_nocb);
1356 // The ->on_cpu field is available only in CONFIG_SMP=y, so...
1358 static char *show_rcu_should_be_on_cpu(struct task_struct *tsp)
1360 return tsp && task_is_running(tsp) && !tsp->on_cpu ? "!" : "";
1362 #else // #ifdef CONFIG_SMP
1363 static char *show_rcu_should_be_on_cpu(struct task_struct *tsp)
1367 #endif // #else #ifdef CONFIG_SMP
1370 * Dump out nocb grace-period kthread state for the specified rcu_data
1373 static void show_rcu_nocb_gp_state(struct rcu_data *rdp)
1375 struct rcu_node *rnp = rdp->mynode;
1377 pr_info("nocb GP %d %c%c%c%c%c %c[%c%c] %c%c:%ld rnp %d:%d %lu %c CPU %d%s\n",
1379 "kK"[!!rdp->nocb_gp_kthread],
1380 "lL"[raw_spin_is_locked(&rdp->nocb_gp_lock)],
1381 "dD"[!!rdp->nocb_defer_wakeup],
1382 "tT"[timer_pending(&rdp->nocb_timer)],
1383 "sS"[!!rdp->nocb_gp_sleep],
1384 ".W"[swait_active(&rdp->nocb_gp_wq)],
1385 ".W"[swait_active(&rnp->nocb_gp_wq[0])],
1386 ".W"[swait_active(&rnp->nocb_gp_wq[1])],
1387 ".B"[!!rdp->nocb_gp_bypass],
1388 ".G"[!!rdp->nocb_gp_gp],
1389 (long)rdp->nocb_gp_seq,
1390 rnp->grplo, rnp->grphi, READ_ONCE(rdp->nocb_gp_loops),
1391 rdp->nocb_gp_kthread ? task_state_to_char(rdp->nocb_gp_kthread) : '.',
1392 rdp->nocb_cb_kthread ? (int)task_cpu(rdp->nocb_gp_kthread) : -1,
1393 show_rcu_should_be_on_cpu(rdp->nocb_cb_kthread));
1396 /* Dump out nocb kthread state for the specified rcu_data structure. */
1397 static void show_rcu_nocb_state(struct rcu_data *rdp)
1401 struct rcu_data *nocb_next_rdp;
1402 struct rcu_segcblist *rsclp = &rdp->cblist;
1406 if (rdp->nocb_gp_rdp == rdp)
1407 show_rcu_nocb_gp_state(rdp);
1409 nocb_next_rdp = list_next_or_null_rcu(&rdp->nocb_gp_rdp->nocb_head_rdp,
1410 &rdp->nocb_entry_rdp,
1414 sprintf(bufw, "%ld", rsclp->gp_seq[RCU_WAIT_TAIL]);
1415 sprintf(bufr, "%ld", rsclp->gp_seq[RCU_NEXT_READY_TAIL]);
1416 pr_info(" CB %d^%d->%d %c%c%c%c%c%c F%ld L%ld C%d %c%c%s%c%s%c%c q%ld %c CPU %d%s\n",
1417 rdp->cpu, rdp->nocb_gp_rdp->cpu,
1418 nocb_next_rdp ? nocb_next_rdp->cpu : -1,
1419 "kK"[!!rdp->nocb_cb_kthread],
1420 "bB"[raw_spin_is_locked(&rdp->nocb_bypass_lock)],
1421 "cC"[!!atomic_read(&rdp->nocb_lock_contended)],
1422 "lL"[raw_spin_is_locked(&rdp->nocb_lock)],
1423 "sS"[!!rdp->nocb_cb_sleep],
1424 ".W"[swait_active(&rdp->nocb_cb_wq)],
1425 jiffies - rdp->nocb_bypass_first,
1426 jiffies - rdp->nocb_nobypass_last,
1427 rdp->nocb_nobypass_count,
1428 ".D"[rcu_segcblist_ready_cbs(rsclp)],
1429 ".W"[!rcu_segcblist_segempty(rsclp, RCU_WAIT_TAIL)],
1430 rcu_segcblist_segempty(rsclp, RCU_WAIT_TAIL) ? "" : bufw,
1431 ".R"[!rcu_segcblist_segempty(rsclp, RCU_NEXT_READY_TAIL)],
1432 rcu_segcblist_segempty(rsclp, RCU_NEXT_READY_TAIL) ? "" : bufr,
1433 ".N"[!rcu_segcblist_segempty(rsclp, RCU_NEXT_TAIL)],
1434 ".B"[!!rcu_cblist_n_cbs(&rdp->nocb_bypass)],
1435 rcu_segcblist_n_cbs(&rdp->cblist),
1436 rdp->nocb_cb_kthread ? task_state_to_char(rdp->nocb_cb_kthread) : '.',
1437 rdp->nocb_cb_kthread ? (int)task_cpu(rdp->nocb_gp_kthread) : -1,
1438 show_rcu_should_be_on_cpu(rdp->nocb_cb_kthread));
1440 /* It is OK for GP kthreads to have GP state. */
1441 if (rdp->nocb_gp_rdp == rdp)
1444 waslocked = raw_spin_is_locked(&rdp->nocb_gp_lock);
1445 wassleep = swait_active(&rdp->nocb_gp_wq);
1446 if (!rdp->nocb_gp_sleep && !waslocked && !wassleep)
1447 return; /* Nothing untoward. */
1449 pr_info(" nocb GP activity on CB-only CPU!!! %c%c%c %c\n",
1451 "dD"[!!rdp->nocb_defer_wakeup],
1452 "sS"[!!rdp->nocb_gp_sleep],
1456 #else /* #ifdef CONFIG_RCU_NOCB_CPU */
1458 static inline int rcu_lockdep_is_held_nocb(struct rcu_data *rdp)
1463 static inline bool rcu_current_is_nocb_kthread(struct rcu_data *rdp)
1468 /* No ->nocb_lock to acquire. */
1469 static void rcu_nocb_lock(struct rcu_data *rdp)
1473 /* No ->nocb_lock to release. */
1474 static void rcu_nocb_unlock(struct rcu_data *rdp)
1478 /* No ->nocb_lock to release. */
1479 static void rcu_nocb_unlock_irqrestore(struct rcu_data *rdp,
1480 unsigned long flags)
1482 local_irq_restore(flags);
1485 /* Lockdep check that ->cblist may be safely accessed. */
1486 static void rcu_lockdep_assert_cblist_protected(struct rcu_data *rdp)
1488 lockdep_assert_irqs_disabled();
1491 static void rcu_nocb_gp_cleanup(struct swait_queue_head *sq)
1495 static struct swait_queue_head *rcu_nocb_gp_get(struct rcu_node *rnp)
1500 static void rcu_init_one_nocb(struct rcu_node *rnp)
1504 static bool rcu_nocb_flush_bypass(struct rcu_data *rdp, struct rcu_head *rhp,
1510 static bool rcu_nocb_try_bypass(struct rcu_data *rdp, struct rcu_head *rhp,
1511 bool *was_alldone, unsigned long flags)
1516 static void __call_rcu_nocb_wake(struct rcu_data *rdp, bool was_empty,
1517 unsigned long flags)
1519 WARN_ON_ONCE(1); /* Should be dead code! */
1522 static void __init rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp)
1526 static int rcu_nocb_need_deferred_wakeup(struct rcu_data *rdp, int level)
1531 static bool do_nocb_deferred_wakeup(struct rcu_data *rdp)
1536 static void rcu_spawn_cpu_nocb_kthread(int cpu)
1540 static void __init rcu_spawn_nocb_kthreads(void)
1544 static void show_rcu_nocb_state(struct rcu_data *rdp)
1548 #endif /* #else #ifdef CONFIG_RCU_NOCB_CPU */