Merge tag 'arc-6.6-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/vgupta/arc
[linux-2.6-block.git] / kernel / rcu / tree_nocb.h
CommitLineData
dfcb2754
FW
1/* SPDX-License-Identifier: GPL-2.0+ */
2/*
3 * Read-Copy Update mechanism for mutual exclusion (tree-based version)
4 * Internal non-public definitions that provide either classic
5 * or preemptible semantics.
6 *
7 * Copyright Red Hat, 2009
8 * Copyright IBM Corporation, 2009
9 * Copyright SUSE, 2021
10 *
11 * Author: Ingo Molnar <mingo@elte.hu>
12 * Paul E. McKenney <paulmck@linux.ibm.com>
13 * Frederic Weisbecker <frederic@kernel.org>
14 */
15
16#ifdef CONFIG_RCU_NOCB_CPU
17static cpumask_var_t rcu_nocb_mask; /* CPUs to have callbacks offloaded. */
18static bool __read_mostly rcu_nocb_poll; /* Offload kthread are to poll. */
19static inline int rcu_lockdep_is_held_nocb(struct rcu_data *rdp)
20{
21 return lockdep_is_held(&rdp->nocb_lock);
22}
23
24static inline bool rcu_current_is_nocb_kthread(struct rcu_data *rdp)
25{
26 /* Race on early boot between thread creation and assignment */
27 if (!rdp->nocb_cb_kthread || !rdp->nocb_gp_kthread)
28 return true;
29
30 if (current == rdp->nocb_cb_kthread || current == rdp->nocb_gp_kthread)
31 if (in_task())
32 return true;
33 return false;
34}
35
36/*
37 * Offload callback processing from the boot-time-specified set of CPUs
38 * specified by rcu_nocb_mask. For the CPUs in the set, there are kthreads
39 * created that pull the callbacks from the corresponding CPU, wait for
40 * a grace period to elapse, and invoke the callbacks. These kthreads
41 * are organized into GP kthreads, which manage incoming callbacks, wait for
42 * grace periods, and awaken CB kthreads, and the CB kthreads, which only
43 * invoke callbacks. Each GP kthread invokes its own CBs. The no-CBs CPUs
44 * do a wake_up() on their GP kthread when they insert a callback into any
45 * empty list, unless the rcu_nocb_poll boot parameter has been specified,
46 * in which case each kthread actively polls its CPU. (Which isn't so great
47 * for energy efficiency, but which does reduce RCU's overhead on that CPU.)
48 *
49 * This is intended to be used in conjunction with Frederic Weisbecker's
50 * adaptive-idle work, which would seriously reduce OS jitter on CPUs
51 * running CPU-bound user-mode computations.
52 *
53 * Offloading of callbacks can also be used as an energy-efficiency
54 * measure because CPUs with no RCU callbacks queued are more aggressive
55 * about entering dyntick-idle mode.
56 */
57
58
59/*
60 * Parse the boot-time rcu_nocb_mask CPU list from the kernel parameters.
61 * If the list is invalid, a warning is emitted and all CPUs are offloaded.
62 */
63static int __init rcu_nocb_setup(char *str)
64{
65 alloc_bootmem_cpumask_var(&rcu_nocb_mask);
d2cf0854
FW
66 if (*str == '=') {
67 if (cpulist_parse(++str, rcu_nocb_mask)) {
68 pr_warn("rcu_nocbs= bad CPU range, all CPUs set\n");
69 cpumask_setall(rcu_nocb_mask);
70 }
dfcb2754 71 }
8d2aaa9b 72 rcu_state.nocb_is_setup = true;
dfcb2754
FW
73 return 1;
74}
d2cf0854 75__setup("rcu_nocbs", rcu_nocb_setup);
dfcb2754
FW
76
77static int __init parse_rcu_nocb_poll(char *arg)
78{
79 rcu_nocb_poll = true;
3292ba02 80 return 1;
dfcb2754 81}
3292ba02 82__setup("rcu_nocb_poll", parse_rcu_nocb_poll);
dfcb2754
FW
83
84/*
85 * Don't bother bypassing ->cblist if the call_rcu() rate is low.
86 * After all, the main point of bypassing is to avoid lock contention
87 * on ->nocb_lock, which only can happen at high call_rcu() rates.
88 */
89static int nocb_nobypass_lim_per_jiffy = 16 * 1000 / HZ;
90module_param(nocb_nobypass_lim_per_jiffy, int, 0);
91
92/*
93 * Acquire the specified rcu_data structure's ->nocb_bypass_lock. If the
94 * lock isn't immediately available, increment ->nocb_lock_contended to
95 * flag the contention.
96 */
97static void rcu_nocb_bypass_lock(struct rcu_data *rdp)
98 __acquires(&rdp->nocb_bypass_lock)
99{
100 lockdep_assert_irqs_disabled();
101 if (raw_spin_trylock(&rdp->nocb_bypass_lock))
102 return;
103 atomic_inc(&rdp->nocb_lock_contended);
104 WARN_ON_ONCE(smp_processor_id() != rdp->cpu);
105 smp_mb__after_atomic(); /* atomic_inc() before lock. */
106 raw_spin_lock(&rdp->nocb_bypass_lock);
107 smp_mb__before_atomic(); /* atomic_dec() after lock. */
108 atomic_dec(&rdp->nocb_lock_contended);
109}
110
111/*
112 * Spinwait until the specified rcu_data structure's ->nocb_lock is
113 * not contended. Please note that this is extremely special-purpose,
114 * relying on the fact that at most two kthreads and one CPU contend for
115 * this lock, and also that the two kthreads are guaranteed to have frequent
116 * grace-period-duration time intervals between successive acquisitions
117 * of the lock. This allows us to use an extremely simple throttling
118 * mechanism, and further to apply it only to the CPU doing floods of
119 * call_rcu() invocations. Don't try this at home!
120 */
121static void rcu_nocb_wait_contended(struct rcu_data *rdp)
122{
123 WARN_ON_ONCE(smp_processor_id() != rdp->cpu);
124 while (WARN_ON_ONCE(atomic_read(&rdp->nocb_lock_contended)))
125 cpu_relax();
126}
127
128/*
129 * Conditionally acquire the specified rcu_data structure's
130 * ->nocb_bypass_lock.
131 */
132static bool rcu_nocb_bypass_trylock(struct rcu_data *rdp)
133{
134 lockdep_assert_irqs_disabled();
135 return raw_spin_trylock(&rdp->nocb_bypass_lock);
136}
137
138/*
139 * Release the specified rcu_data structure's ->nocb_bypass_lock.
140 */
141static void rcu_nocb_bypass_unlock(struct rcu_data *rdp)
142 __releases(&rdp->nocb_bypass_lock)
143{
144 lockdep_assert_irqs_disabled();
145 raw_spin_unlock(&rdp->nocb_bypass_lock);
146}
147
148/*
149 * Acquire the specified rcu_data structure's ->nocb_lock, but only
150 * if it corresponds to a no-CBs CPU.
151 */
152static void rcu_nocb_lock(struct rcu_data *rdp)
153{
154 lockdep_assert_irqs_disabled();
155 if (!rcu_rdp_is_offloaded(rdp))
156 return;
157 raw_spin_lock(&rdp->nocb_lock);
158}
159
160/*
161 * Release the specified rcu_data structure's ->nocb_lock, but only
162 * if it corresponds to a no-CBs CPU.
163 */
164static void rcu_nocb_unlock(struct rcu_data *rdp)
165{
166 if (rcu_rdp_is_offloaded(rdp)) {
167 lockdep_assert_irqs_disabled();
168 raw_spin_unlock(&rdp->nocb_lock);
169 }
170}
171
172/*
173 * Release the specified rcu_data structure's ->nocb_lock and restore
174 * interrupts, but only if it corresponds to a no-CBs CPU.
175 */
176static void rcu_nocb_unlock_irqrestore(struct rcu_data *rdp,
177 unsigned long flags)
178{
179 if (rcu_rdp_is_offloaded(rdp)) {
180 lockdep_assert_irqs_disabled();
181 raw_spin_unlock_irqrestore(&rdp->nocb_lock, flags);
182 } else {
183 local_irq_restore(flags);
184 }
185}
186
187/* Lockdep check that ->cblist may be safely accessed. */
188static void rcu_lockdep_assert_cblist_protected(struct rcu_data *rdp)
189{
190 lockdep_assert_irqs_disabled();
191 if (rcu_rdp_is_offloaded(rdp))
192 lockdep_assert_held(&rdp->nocb_lock);
193}
194
195/*
196 * Wake up any no-CBs CPUs' kthreads that were waiting on the just-ended
197 * grace period.
198 */
199static void rcu_nocb_gp_cleanup(struct swait_queue_head *sq)
200{
201 swake_up_all(sq);
202}
203
204static struct swait_queue_head *rcu_nocb_gp_get(struct rcu_node *rnp)
205{
206 return &rnp->nocb_gp_wq[rcu_seq_ctr(rnp->gp_seq) & 0x1];
207}
208
209static void rcu_init_one_nocb(struct rcu_node *rnp)
210{
211 init_swait_queue_head(&rnp->nocb_gp_wq[0]);
212 init_swait_queue_head(&rnp->nocb_gp_wq[1]);
213}
214
dfcb2754
FW
215static bool __wake_nocb_gp(struct rcu_data *rdp_gp,
216 struct rcu_data *rdp,
217 bool force, unsigned long flags)
218 __releases(rdp_gp->nocb_gp_lock)
219{
220 bool needwake = false;
221
222 if (!READ_ONCE(rdp_gp->nocb_gp_kthread)) {
223 raw_spin_unlock_irqrestore(&rdp_gp->nocb_gp_lock, flags);
224 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
225 TPS("AlreadyAwake"));
226 return false;
227 }
228
229 if (rdp_gp->nocb_defer_wakeup > RCU_NOCB_WAKE_NOT) {
230 WRITE_ONCE(rdp_gp->nocb_defer_wakeup, RCU_NOCB_WAKE_NOT);
231 del_timer(&rdp_gp->nocb_timer);
232 }
233
234 if (force || READ_ONCE(rdp_gp->nocb_gp_sleep)) {
235 WRITE_ONCE(rdp_gp->nocb_gp_sleep, false);
236 needwake = true;
237 }
238 raw_spin_unlock_irqrestore(&rdp_gp->nocb_gp_lock, flags);
239 if (needwake) {
240 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("DoWake"));
241 wake_up_process(rdp_gp->nocb_gp_kthread);
242 }
243
244 return needwake;
245}
246
247/*
248 * Kick the GP kthread for this NOCB group.
249 */
250static bool wake_nocb_gp(struct rcu_data *rdp, bool force)
251{
252 unsigned long flags;
253 struct rcu_data *rdp_gp = rdp->nocb_gp_rdp;
254
255 raw_spin_lock_irqsave(&rdp_gp->nocb_gp_lock, flags);
256 return __wake_nocb_gp(rdp_gp, rdp, force, flags);
257}
258
3cb278e7
JFG
259/*
260 * LAZY_FLUSH_JIFFIES decides the maximum amount of time that
261 * can elapse before lazy callbacks are flushed. Lazy callbacks
262 * could be flushed much earlier for a number of other reasons
263 * however, LAZY_FLUSH_JIFFIES will ensure no lazy callbacks are
264 * left unsubmitted to RCU after those many jiffies.
265 */
266#define LAZY_FLUSH_JIFFIES (10 * HZ)
267static unsigned long jiffies_till_flush = LAZY_FLUSH_JIFFIES;
268
269#ifdef CONFIG_RCU_LAZY
270// To be called only from test code.
271void rcu_lazy_set_jiffies_till_flush(unsigned long jif)
272{
273 jiffies_till_flush = jif;
274}
275EXPORT_SYMBOL(rcu_lazy_set_jiffies_till_flush);
276
277unsigned long rcu_lazy_get_jiffies_till_flush(void)
278{
279 return jiffies_till_flush;
280}
281EXPORT_SYMBOL(rcu_lazy_get_jiffies_till_flush);
282#endif
283
dfcb2754
FW
284/*
285 * Arrange to wake the GP kthread for this NOCB group at some future
286 * time when it is safe to do so.
287 */
288static void wake_nocb_gp_defer(struct rcu_data *rdp, int waketype,
289 const char *reason)
290{
291 unsigned long flags;
292 struct rcu_data *rdp_gp = rdp->nocb_gp_rdp;
293
294 raw_spin_lock_irqsave(&rdp_gp->nocb_gp_lock, flags);
295
296 /*
3cb278e7
JFG
297 * Bypass wakeup overrides previous deferments. In case of
298 * callback storms, no need to wake up too early.
dfcb2754 299 */
3cb278e7
JFG
300 if (waketype == RCU_NOCB_WAKE_LAZY &&
301 rdp->nocb_defer_wakeup == RCU_NOCB_WAKE_NOT) {
302 mod_timer(&rdp_gp->nocb_timer, jiffies + jiffies_till_flush);
303 WRITE_ONCE(rdp_gp->nocb_defer_wakeup, waketype);
304 } else if (waketype == RCU_NOCB_WAKE_BYPASS) {
dfcb2754
FW
305 mod_timer(&rdp_gp->nocb_timer, jiffies + 2);
306 WRITE_ONCE(rdp_gp->nocb_defer_wakeup, waketype);
307 } else {
308 if (rdp_gp->nocb_defer_wakeup < RCU_NOCB_WAKE)
309 mod_timer(&rdp_gp->nocb_timer, jiffies + 1);
310 if (rdp_gp->nocb_defer_wakeup < waketype)
311 WRITE_ONCE(rdp_gp->nocb_defer_wakeup, waketype);
312 }
313
314 raw_spin_unlock_irqrestore(&rdp_gp->nocb_gp_lock, flags);
315
316 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, reason);
317}
318
319/*
320 * Flush the ->nocb_bypass queue into ->cblist, enqueuing rhp if non-NULL.
321 * However, if there is a callback to be enqueued and if ->nocb_bypass
322 * proves to be initially empty, just return false because the no-CB GP
323 * kthread may need to be awakened in this case.
324 *
3cb278e7
JFG
325 * Return true if there was something to be flushed and it succeeded, otherwise
326 * false.
327 *
dfcb2754
FW
328 * Note that this function always returns true if rhp is NULL.
329 */
3d222a0c 330static bool rcu_nocb_do_flush_bypass(struct rcu_data *rdp, struct rcu_head *rhp_in,
3cb278e7 331 unsigned long j, bool lazy)
dfcb2754
FW
332{
333 struct rcu_cblist rcl;
3d222a0c 334 struct rcu_head *rhp = rhp_in;
dfcb2754
FW
335
336 WARN_ON_ONCE(!rcu_rdp_is_offloaded(rdp));
337 rcu_lockdep_assert_cblist_protected(rdp);
338 lockdep_assert_held(&rdp->nocb_bypass_lock);
339 if (rhp && !rcu_cblist_n_cbs(&rdp->nocb_bypass)) {
340 raw_spin_unlock(&rdp->nocb_bypass_lock);
341 return false;
342 }
343 /* Note: ->cblist.len already accounts for ->nocb_bypass contents. */
344 if (rhp)
345 rcu_segcblist_inc_len(&rdp->cblist); /* Must precede enqueue. */
3cb278e7
JFG
346
347 /*
348 * If the new CB requested was a lazy one, queue it onto the main
3d222a0c
JFG
349 * ->cblist so that we can take advantage of the grace-period that will
350 * happen regardless. But queue it onto the bypass list first so that
351 * the lazy CB is ordered with the existing CBs in the bypass list.
3cb278e7
JFG
352 */
353 if (lazy && rhp) {
3d222a0c
JFG
354 rcu_cblist_enqueue(&rdp->nocb_bypass, rhp);
355 rhp = NULL;
3cb278e7 356 }
3d222a0c
JFG
357 rcu_cblist_flush_enqueue(&rcl, &rdp->nocb_bypass, rhp);
358 WRITE_ONCE(rdp->lazy_len, 0);
3cb278e7 359
dfcb2754
FW
360 rcu_segcblist_insert_pend_cbs(&rdp->cblist, &rcl);
361 WRITE_ONCE(rdp->nocb_bypass_first, j);
362 rcu_nocb_bypass_unlock(rdp);
363 return true;
364}
365
366/*
367 * Flush the ->nocb_bypass queue into ->cblist, enqueuing rhp if non-NULL.
368 * However, if there is a callback to be enqueued and if ->nocb_bypass
369 * proves to be initially empty, just return false because the no-CB GP
370 * kthread may need to be awakened in this case.
371 *
372 * Note that this function always returns true if rhp is NULL.
373 */
374static bool rcu_nocb_flush_bypass(struct rcu_data *rdp, struct rcu_head *rhp,
3cb278e7 375 unsigned long j, bool lazy)
dfcb2754
FW
376{
377 if (!rcu_rdp_is_offloaded(rdp))
378 return true;
379 rcu_lockdep_assert_cblist_protected(rdp);
380 rcu_nocb_bypass_lock(rdp);
3cb278e7 381 return rcu_nocb_do_flush_bypass(rdp, rhp, j, lazy);
dfcb2754
FW
382}
383
384/*
385 * If the ->nocb_bypass_lock is immediately available, flush the
386 * ->nocb_bypass queue into ->cblist.
387 */
388static void rcu_nocb_try_flush_bypass(struct rcu_data *rdp, unsigned long j)
389{
390 rcu_lockdep_assert_cblist_protected(rdp);
391 if (!rcu_rdp_is_offloaded(rdp) ||
392 !rcu_nocb_bypass_trylock(rdp))
393 return;
3cb278e7 394 WARN_ON_ONCE(!rcu_nocb_do_flush_bypass(rdp, NULL, j, false));
dfcb2754
FW
395}
396
397/*
398 * See whether it is appropriate to use the ->nocb_bypass list in order
399 * to control contention on ->nocb_lock. A limited number of direct
400 * enqueues are permitted into ->cblist per jiffy. If ->nocb_bypass
401 * is non-empty, further callbacks must be placed into ->nocb_bypass,
402 * otherwise rcu_barrier() breaks. Use rcu_nocb_flush_bypass() to switch
403 * back to direct use of ->cblist. However, ->nocb_bypass should not be
404 * used if ->cblist is empty, because otherwise callbacks can be stranded
405 * on ->nocb_bypass because we cannot count on the current CPU ever again
406 * invoking call_rcu(). The general rule is that if ->nocb_bypass is
407 * non-empty, the corresponding no-CBs grace-period kthread must not be
408 * in an indefinite sleep state.
409 *
410 * Finally, it is not permitted to use the bypass during early boot,
411 * as doing so would confuse the auto-initialization code. Besides
412 * which, there is no point in worrying about lock contention while
413 * there is only one CPU in operation.
414 */
415static bool rcu_nocb_try_bypass(struct rcu_data *rdp, struct rcu_head *rhp,
3cb278e7
JFG
416 bool *was_alldone, unsigned long flags,
417 bool lazy)
dfcb2754
FW
418{
419 unsigned long c;
420 unsigned long cur_gp_seq;
421 unsigned long j = jiffies;
422 long ncbs = rcu_cblist_n_cbs(&rdp->nocb_bypass);
3cb278e7 423 bool bypass_is_lazy = (ncbs == READ_ONCE(rdp->lazy_len));
dfcb2754
FW
424
425 lockdep_assert_irqs_disabled();
426
427 // Pure softirq/rcuc based processing: no bypassing, no
428 // locking.
429 if (!rcu_rdp_is_offloaded(rdp)) {
430 *was_alldone = !rcu_segcblist_pend_cbs(&rdp->cblist);
431 return false;
432 }
433
434 // In the process of (de-)offloading: no bypassing, but
435 // locking.
436 if (!rcu_segcblist_completely_offloaded(&rdp->cblist)) {
437 rcu_nocb_lock(rdp);
438 *was_alldone = !rcu_segcblist_pend_cbs(&rdp->cblist);
439 return false; /* Not offloaded, no bypassing. */
440 }
441
442 // Don't use ->nocb_bypass during early boot.
443 if (rcu_scheduler_active != RCU_SCHEDULER_RUNNING) {
444 rcu_nocb_lock(rdp);
445 WARN_ON_ONCE(rcu_cblist_n_cbs(&rdp->nocb_bypass));
446 *was_alldone = !rcu_segcblist_pend_cbs(&rdp->cblist);
447 return false;
448 }
449
450 // If we have advanced to a new jiffy, reset counts to allow
451 // moving back from ->nocb_bypass to ->cblist.
452 if (j == rdp->nocb_nobypass_last) {
453 c = rdp->nocb_nobypass_count + 1;
454 } else {
455 WRITE_ONCE(rdp->nocb_nobypass_last, j);
456 c = rdp->nocb_nobypass_count - nocb_nobypass_lim_per_jiffy;
457 if (ULONG_CMP_LT(rdp->nocb_nobypass_count,
458 nocb_nobypass_lim_per_jiffy))
459 c = 0;
460 else if (c > nocb_nobypass_lim_per_jiffy)
461 c = nocb_nobypass_lim_per_jiffy;
462 }
463 WRITE_ONCE(rdp->nocb_nobypass_count, c);
464
465 // If there hasn't yet been all that many ->cblist enqueues
466 // this jiffy, tell the caller to enqueue onto ->cblist. But flush
467 // ->nocb_bypass first.
3cb278e7
JFG
468 // Lazy CBs throttle this back and do immediate bypass queuing.
469 if (rdp->nocb_nobypass_count < nocb_nobypass_lim_per_jiffy && !lazy) {
dfcb2754
FW
470 rcu_nocb_lock(rdp);
471 *was_alldone = !rcu_segcblist_pend_cbs(&rdp->cblist);
472 if (*was_alldone)
473 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
474 TPS("FirstQ"));
3cb278e7
JFG
475
476 WARN_ON_ONCE(!rcu_nocb_flush_bypass(rdp, NULL, j, false));
dfcb2754
FW
477 WARN_ON_ONCE(rcu_cblist_n_cbs(&rdp->nocb_bypass));
478 return false; // Caller must enqueue the callback.
479 }
480
481 // If ->nocb_bypass has been used too long or is too full,
482 // flush ->nocb_bypass to ->cblist.
3cb278e7
JFG
483 if ((ncbs && !bypass_is_lazy && j != READ_ONCE(rdp->nocb_bypass_first)) ||
484 (ncbs && bypass_is_lazy &&
485 (time_after(j, READ_ONCE(rdp->nocb_bypass_first) + jiffies_till_flush))) ||
dfcb2754
FW
486 ncbs >= qhimark) {
487 rcu_nocb_lock(rdp);
b50606f3
JFG
488 *was_alldone = !rcu_segcblist_pend_cbs(&rdp->cblist);
489
3cb278e7 490 if (!rcu_nocb_flush_bypass(rdp, rhp, j, lazy)) {
dfcb2754
FW
491 if (*was_alldone)
492 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
493 TPS("FirstQ"));
494 WARN_ON_ONCE(rcu_cblist_n_cbs(&rdp->nocb_bypass));
495 return false; // Caller must enqueue the callback.
496 }
497 if (j != rdp->nocb_gp_adv_time &&
498 rcu_segcblist_nextgp(&rdp->cblist, &cur_gp_seq) &&
499 rcu_seq_done(&rdp->mynode->gp_seq, cur_gp_seq)) {
500 rcu_advance_cbs_nowake(rdp->mynode, rdp);
501 rdp->nocb_gp_adv_time = j;
502 }
b50606f3
JFG
503
504 // The flush succeeded and we moved CBs into the regular list.
505 // Don't wait for the wake up timer as it may be too far ahead.
506 // Wake up the GP thread now instead, if the cblist was empty.
507 __call_rcu_nocb_wake(rdp, *was_alldone, flags);
508
dfcb2754
FW
509 return true; // Callback already enqueued.
510 }
511
512 // We need to use the bypass.
513 rcu_nocb_wait_contended(rdp);
514 rcu_nocb_bypass_lock(rdp);
515 ncbs = rcu_cblist_n_cbs(&rdp->nocb_bypass);
516 rcu_segcblist_inc_len(&rdp->cblist); /* Must precede enqueue. */
517 rcu_cblist_enqueue(&rdp->nocb_bypass, rhp);
3cb278e7
JFG
518
519 if (lazy)
520 WRITE_ONCE(rdp->lazy_len, rdp->lazy_len + 1);
521
dfcb2754
FW
522 if (!ncbs) {
523 WRITE_ONCE(rdp->nocb_bypass_first, j);
524 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("FirstBQ"));
525 }
526 rcu_nocb_bypass_unlock(rdp);
527 smp_mb(); /* Order enqueue before wake. */
3cb278e7
JFG
528 // A wake up of the grace period kthread or timer adjustment
529 // needs to be done only if:
530 // 1. Bypass list was fully empty before (this is the first
531 // bypass list entry), or:
532 // 2. Both of these conditions are met:
533 // a. The bypass list previously had only lazy CBs, and:
534 // b. The new CB is non-lazy.
535 if (ncbs && (!bypass_is_lazy || lazy)) {
dfcb2754
FW
536 local_irq_restore(flags);
537 } else {
538 // No-CBs GP kthread might be indefinitely asleep, if so, wake.
539 rcu_nocb_lock(rdp); // Rare during call_rcu() flood.
540 if (!rcu_segcblist_pend_cbs(&rdp->cblist)) {
541 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
542 TPS("FirstBQwake"));
543 __call_rcu_nocb_wake(rdp, true, flags);
544 } else {
545 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
546 TPS("FirstBQnoWake"));
547 rcu_nocb_unlock_irqrestore(rdp, flags);
548 }
549 }
550 return true; // Callback already enqueued.
551}
552
553/*
554 * Awaken the no-CBs grace-period kthread if needed, either due to it
555 * legitimately being asleep or due to overload conditions.
556 *
557 * If warranted, also wake up the kthread servicing this CPUs queues.
558 */
559static void __call_rcu_nocb_wake(struct rcu_data *rdp, bool was_alldone,
560 unsigned long flags)
561 __releases(rdp->nocb_lock)
562{
3cb278e7 563 long bypass_len;
dfcb2754
FW
564 unsigned long cur_gp_seq;
565 unsigned long j;
3cb278e7 566 long lazy_len;
dfcb2754
FW
567 long len;
568 struct task_struct *t;
569
570 // If we are being polled or there is no kthread, just leave.
571 t = READ_ONCE(rdp->nocb_gp_kthread);
572 if (rcu_nocb_poll || !t) {
573 rcu_nocb_unlock_irqrestore(rdp, flags);
574 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
575 TPS("WakeNotPoll"));
576 return;
577 }
578 // Need to actually to a wakeup.
579 len = rcu_segcblist_n_cbs(&rdp->cblist);
3cb278e7
JFG
580 bypass_len = rcu_cblist_n_cbs(&rdp->nocb_bypass);
581 lazy_len = READ_ONCE(rdp->lazy_len);
dfcb2754
FW
582 if (was_alldone) {
583 rdp->qlen_last_fqs_check = len;
3cb278e7
JFG
584 // Only lazy CBs in bypass list
585 if (lazy_len && bypass_len == lazy_len) {
586 rcu_nocb_unlock_irqrestore(rdp, flags);
587 wake_nocb_gp_defer(rdp, RCU_NOCB_WAKE_LAZY,
588 TPS("WakeLazy"));
589 } else if (!irqs_disabled_flags(flags)) {
dfcb2754
FW
590 /* ... if queue was empty ... */
591 rcu_nocb_unlock_irqrestore(rdp, flags);
592 wake_nocb_gp(rdp, false);
593 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
594 TPS("WakeEmpty"));
595 } else {
596 rcu_nocb_unlock_irqrestore(rdp, flags);
597 wake_nocb_gp_defer(rdp, RCU_NOCB_WAKE,
598 TPS("WakeEmptyIsDeferred"));
599 }
600 } else if (len > rdp->qlen_last_fqs_check + qhimark) {
601 /* ... or if many callbacks queued. */
602 rdp->qlen_last_fqs_check = len;
603 j = jiffies;
604 if (j != rdp->nocb_gp_adv_time &&
605 rcu_segcblist_nextgp(&rdp->cblist, &cur_gp_seq) &&
606 rcu_seq_done(&rdp->mynode->gp_seq, cur_gp_seq)) {
607 rcu_advance_cbs_nowake(rdp->mynode, rdp);
608 rdp->nocb_gp_adv_time = j;
609 }
610 smp_mb(); /* Enqueue before timer_pending(). */
611 if ((rdp->nocb_cb_sleep ||
612 !rcu_segcblist_ready_cbs(&rdp->cblist)) &&
613 !timer_pending(&rdp->nocb_timer)) {
614 rcu_nocb_unlock_irqrestore(rdp, flags);
615 wake_nocb_gp_defer(rdp, RCU_NOCB_WAKE_FORCE,
616 TPS("WakeOvfIsDeferred"));
617 } else {
618 rcu_nocb_unlock_irqrestore(rdp, flags);
619 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("WakeNot"));
620 }
621 } else {
622 rcu_nocb_unlock_irqrestore(rdp, flags);
623 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("WakeNot"));
624 }
dfcb2754
FW
625}
626
1598f4a4
FW
627static int nocb_gp_toggle_rdp(struct rcu_data *rdp,
628 bool *wake_state)
dfcb2754
FW
629{
630 struct rcu_segcblist *cblist = &rdp->cblist;
1598f4a4
FW
631 unsigned long flags;
632 int ret;
dfcb2754 633
1598f4a4
FW
634 rcu_nocb_lock_irqsave(rdp, flags);
635 if (rcu_segcblist_test_flags(cblist, SEGCBLIST_OFFLOADED) &&
636 !rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_GP)) {
637 /*
638 * Offloading. Set our flag and notify the offload worker.
639 * We will handle this rdp until it ever gets de-offloaded.
640 */
641 rcu_segcblist_set_flags(cblist, SEGCBLIST_KTHREAD_GP);
642 if (rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_CB))
643 *wake_state = true;
644 ret = 1;
645 } else if (!rcu_segcblist_test_flags(cblist, SEGCBLIST_OFFLOADED) &&
646 rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_GP)) {
647 /*
648 * De-offloading. Clear our flag and notify the de-offload worker.
649 * We will ignore this rdp until it ever gets re-offloaded.
650 */
651 rcu_segcblist_clear_flags(cblist, SEGCBLIST_KTHREAD_GP);
652 if (!rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_CB))
653 *wake_state = true;
654 ret = 0;
655 } else {
656 WARN_ON_ONCE(1);
657 ret = -1;
dfcb2754
FW
658 }
659
1598f4a4 660 rcu_nocb_unlock_irqrestore(rdp, flags);
dfcb2754 661
1598f4a4
FW
662 return ret;
663}
dfcb2754 664
0578e14c
Z
665static void nocb_gp_sleep(struct rcu_data *my_rdp, int cpu)
666{
667 trace_rcu_nocb_wake(rcu_state.name, cpu, TPS("Sleep"));
668 swait_event_interruptible_exclusive(my_rdp->nocb_gp_wq,
669 !READ_ONCE(my_rdp->nocb_gp_sleep));
670 trace_rcu_nocb_wake(rcu_state.name, cpu, TPS("EndSleep"));
671}
672
dfcb2754
FW
673/*
674 * No-CBs GP kthreads come here to wait for additional callbacks to show up
675 * or for grace periods to end.
676 */
677static void nocb_gp_wait(struct rcu_data *my_rdp)
678{
679 bool bypass = false;
dfcb2754
FW
680 int __maybe_unused cpu = my_rdp->cpu;
681 unsigned long cur_gp_seq;
682 unsigned long flags;
683 bool gotcbs = false;
684 unsigned long j = jiffies;
3cb278e7 685 bool lazy = false;
dfcb2754
FW
686 bool needwait_gp = false; // This prevents actual uninitialized use.
687 bool needwake;
688 bool needwake_gp;
1598f4a4 689 struct rcu_data *rdp, *rdp_toggling = NULL;
dfcb2754
FW
690 struct rcu_node *rnp;
691 unsigned long wait_gp_seq = 0; // Suppress "use uninitialized" warning.
692 bool wasempty = false;
693
694 /*
695 * Each pass through the following loop checks for CBs and for the
696 * nearest grace period (if any) to wait for next. The CB kthreads
697 * and the global grace-period kthread are awakened if needed.
698 */
699 WARN_ON_ONCE(my_rdp->nocb_gp_rdp != my_rdp);
2ebc45c4
FW
700 /*
701 * An rcu_data structure is removed from the list after its
702 * CPU is de-offloaded and added to the list before that CPU is
703 * (re-)offloaded. If the following loop happens to be referencing
704 * that rcu_data structure during the time that the corresponding
705 * CPU is de-offloaded and then immediately re-offloaded, this
706 * loop's rdp pointer will be carried to the end of the list by
707 * the resulting pair of list operations. This can cause the loop
708 * to skip over some of the rcu_data structures that were supposed
709 * to have been scanned. Fortunately a new iteration through the
710 * entire loop is forced after a given CPU's rcu_data structure
711 * is added to the list, so the skipped-over rcu_data structures
712 * won't be ignored for long.
713 */
1598f4a4 714 list_for_each_entry(rdp, &my_rdp->nocb_head_rdp, nocb_entry_rdp) {
3cb278e7
JFG
715 long bypass_ncbs;
716 bool flush_bypass = false;
717 long lazy_ncbs;
718
dfcb2754
FW
719 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("Check"));
720 rcu_nocb_lock_irqsave(rdp, flags);
1598f4a4 721 lockdep_assert_held(&rdp->nocb_lock);
dfcb2754 722 bypass_ncbs = rcu_cblist_n_cbs(&rdp->nocb_bypass);
3cb278e7
JFG
723 lazy_ncbs = READ_ONCE(rdp->lazy_len);
724
725 if (bypass_ncbs && (lazy_ncbs == bypass_ncbs) &&
726 (time_after(j, READ_ONCE(rdp->nocb_bypass_first) + jiffies_till_flush) ||
727 bypass_ncbs > 2 * qhimark)) {
728 flush_bypass = true;
729 } else if (bypass_ncbs && (lazy_ncbs != bypass_ncbs) &&
dfcb2754
FW
730 (time_after(j, READ_ONCE(rdp->nocb_bypass_first) + 1) ||
731 bypass_ncbs > 2 * qhimark)) {
3cb278e7 732 flush_bypass = true;
dfcb2754
FW
733 } else if (!bypass_ncbs && rcu_segcblist_empty(&rdp->cblist)) {
734 rcu_nocb_unlock_irqrestore(rdp, flags);
dfcb2754
FW
735 continue; /* No callbacks here, try next. */
736 }
3cb278e7
JFG
737
738 if (flush_bypass) {
739 // Bypass full or old, so flush it.
740 (void)rcu_nocb_try_flush_bypass(rdp, j);
741 bypass_ncbs = rcu_cblist_n_cbs(&rdp->nocb_bypass);
742 lazy_ncbs = READ_ONCE(rdp->lazy_len);
743 }
744
dfcb2754
FW
745 if (bypass_ncbs) {
746 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
3cb278e7
JFG
747 bypass_ncbs == lazy_ncbs ? TPS("Lazy") : TPS("Bypass"));
748 if (bypass_ncbs == lazy_ncbs)
749 lazy = true;
750 else
751 bypass = true;
dfcb2754
FW
752 }
753 rnp = rdp->mynode;
754
755 // Advance callbacks if helpful and low contention.
756 needwake_gp = false;
757 if (!rcu_segcblist_restempty(&rdp->cblist,
758 RCU_NEXT_READY_TAIL) ||
759 (rcu_segcblist_nextgp(&rdp->cblist, &cur_gp_seq) &&
760 rcu_seq_done(&rnp->gp_seq, cur_gp_seq))) {
761 raw_spin_lock_rcu_node(rnp); /* irqs disabled. */
762 needwake_gp = rcu_advance_cbs(rnp, rdp);
763 wasempty = rcu_segcblist_restempty(&rdp->cblist,
764 RCU_NEXT_READY_TAIL);
765 raw_spin_unlock_rcu_node(rnp); /* irqs disabled. */
766 }
767 // Need to wait on some grace period?
768 WARN_ON_ONCE(wasempty &&
769 !rcu_segcblist_restempty(&rdp->cblist,
770 RCU_NEXT_READY_TAIL));
771 if (rcu_segcblist_nextgp(&rdp->cblist, &cur_gp_seq)) {
772 if (!needwait_gp ||
773 ULONG_CMP_LT(cur_gp_seq, wait_gp_seq))
774 wait_gp_seq = cur_gp_seq;
775 needwait_gp = true;
776 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
777 TPS("NeedWaitGP"));
778 }
779 if (rcu_segcblist_ready_cbs(&rdp->cblist)) {
780 needwake = rdp->nocb_cb_sleep;
781 WRITE_ONCE(rdp->nocb_cb_sleep, false);
782 smp_mb(); /* CB invocation -after- GP end. */
783 } else {
784 needwake = false;
785 }
786 rcu_nocb_unlock_irqrestore(rdp, flags);
787 if (needwake) {
788 swake_up_one(&rdp->nocb_cb_wq);
789 gotcbs = true;
790 }
791 if (needwake_gp)
792 rcu_gp_kthread_wake();
dfcb2754
FW
793 }
794
795 my_rdp->nocb_gp_bypass = bypass;
796 my_rdp->nocb_gp_gp = needwait_gp;
797 my_rdp->nocb_gp_seq = needwait_gp ? wait_gp_seq : 0;
798
3cb278e7
JFG
799 // At least one child with non-empty ->nocb_bypass, so set
800 // timer in order to avoid stranding its callbacks.
801 if (!rcu_nocb_poll) {
802 // If bypass list only has lazy CBs. Add a deferred lazy wake up.
803 if (lazy && !bypass) {
804 wake_nocb_gp_defer(my_rdp, RCU_NOCB_WAKE_LAZY,
805 TPS("WakeLazyIsDeferred"));
806 // Otherwise add a deferred bypass wake up.
807 } else if (bypass) {
808 wake_nocb_gp_defer(my_rdp, RCU_NOCB_WAKE_BYPASS,
809 TPS("WakeBypassIsDeferred"));
810 }
dfcb2754 811 }
3cb278e7 812
dfcb2754
FW
813 if (rcu_nocb_poll) {
814 /* Polling, so trace if first poll in the series. */
815 if (gotcbs)
816 trace_rcu_nocb_wake(rcu_state.name, cpu, TPS("Poll"));
0578e14c
Z
817 if (list_empty(&my_rdp->nocb_head_rdp)) {
818 raw_spin_lock_irqsave(&my_rdp->nocb_gp_lock, flags);
819 if (!my_rdp->nocb_toggling_rdp)
820 WRITE_ONCE(my_rdp->nocb_gp_sleep, true);
821 raw_spin_unlock_irqrestore(&my_rdp->nocb_gp_lock, flags);
822 /* Wait for any offloading rdp */
823 nocb_gp_sleep(my_rdp, cpu);
824 } else {
825 schedule_timeout_idle(1);
826 }
dfcb2754
FW
827 } else if (!needwait_gp) {
828 /* Wait for callbacks to appear. */
0578e14c 829 nocb_gp_sleep(my_rdp, cpu);
dfcb2754
FW
830 } else {
831 rnp = my_rdp->mynode;
832 trace_rcu_this_gp(rnp, my_rdp, wait_gp_seq, TPS("StartWait"));
833 swait_event_interruptible_exclusive(
834 rnp->nocb_gp_wq[rcu_seq_ctr(wait_gp_seq) & 0x1],
835 rcu_seq_done(&rnp->gp_seq, wait_gp_seq) ||
836 !READ_ONCE(my_rdp->nocb_gp_sleep));
837 trace_rcu_this_gp(rnp, my_rdp, wait_gp_seq, TPS("EndWait"));
838 }
1598f4a4 839
dfcb2754
FW
840 if (!rcu_nocb_poll) {
841 raw_spin_lock_irqsave(&my_rdp->nocb_gp_lock, flags);
1598f4a4
FW
842 // (De-)queue an rdp to/from the group if its nocb state is changing
843 rdp_toggling = my_rdp->nocb_toggling_rdp;
844 if (rdp_toggling)
845 my_rdp->nocb_toggling_rdp = NULL;
846
dfcb2754
FW
847 if (my_rdp->nocb_defer_wakeup > RCU_NOCB_WAKE_NOT) {
848 WRITE_ONCE(my_rdp->nocb_defer_wakeup, RCU_NOCB_WAKE_NOT);
849 del_timer(&my_rdp->nocb_timer);
850 }
851 WRITE_ONCE(my_rdp->nocb_gp_sleep, true);
852 raw_spin_unlock_irqrestore(&my_rdp->nocb_gp_lock, flags);
1598f4a4
FW
853 } else {
854 rdp_toggling = READ_ONCE(my_rdp->nocb_toggling_rdp);
855 if (rdp_toggling) {
856 /*
857 * Paranoid locking to make sure nocb_toggling_rdp is well
858 * reset *before* we (re)set SEGCBLIST_KTHREAD_GP or we could
859 * race with another round of nocb toggling for this rdp.
860 * Nocb locking should prevent from that already but we stick
861 * to paranoia, especially in rare path.
862 */
863 raw_spin_lock_irqsave(&my_rdp->nocb_gp_lock, flags);
864 my_rdp->nocb_toggling_rdp = NULL;
865 raw_spin_unlock_irqrestore(&my_rdp->nocb_gp_lock, flags);
866 }
867 }
868
869 if (rdp_toggling) {
870 bool wake_state = false;
871 int ret;
872
873 ret = nocb_gp_toggle_rdp(rdp_toggling, &wake_state);
874 if (ret == 1)
875 list_add_tail(&rdp_toggling->nocb_entry_rdp, &my_rdp->nocb_head_rdp);
876 else if (ret == 0)
877 list_del(&rdp_toggling->nocb_entry_rdp);
878 if (wake_state)
879 swake_up_one(&rdp_toggling->nocb_state_wq);
dfcb2754 880 }
1598f4a4 881
dfcb2754
FW
882 my_rdp->nocb_gp_seq = -1;
883 WARN_ON(signal_pending(current));
884}
885
886/*
887 * No-CBs grace-period-wait kthread. There is one of these per group
888 * of CPUs, but only once at least one CPU in that group has come online
889 * at least once since boot. This kthread checks for newly posted
890 * callbacks from any of the CPUs it is responsible for, waits for a
891 * grace period, then awakens all of the rcu_nocb_cb_kthread() instances
892 * that then have callback-invocation work to do.
893 */
894static int rcu_nocb_gp_kthread(void *arg)
895{
896 struct rcu_data *rdp = arg;
897
898 for (;;) {
899 WRITE_ONCE(rdp->nocb_gp_loops, rdp->nocb_gp_loops + 1);
900 nocb_gp_wait(rdp);
901 cond_resched_tasks_rcu_qs();
902 }
903 return 0;
904}
905
906static inline bool nocb_cb_can_run(struct rcu_data *rdp)
907{
908 u8 flags = SEGCBLIST_OFFLOADED | SEGCBLIST_KTHREAD_CB;
52b030aa 909
dfcb2754
FW
910 return rcu_segcblist_test_flags(&rdp->cblist, flags);
911}
912
913static inline bool nocb_cb_wait_cond(struct rcu_data *rdp)
914{
915 return nocb_cb_can_run(rdp) && !READ_ONCE(rdp->nocb_cb_sleep);
916}
917
918/*
919 * Invoke any ready callbacks from the corresponding no-CBs CPU,
920 * then, if there are no more, wait for more to appear.
921 */
922static void nocb_cb_wait(struct rcu_data *rdp)
923{
924 struct rcu_segcblist *cblist = &rdp->cblist;
925 unsigned long cur_gp_seq;
926 unsigned long flags;
927 bool needwake_state = false;
928 bool needwake_gp = false;
929 bool can_sleep = true;
930 struct rcu_node *rnp = rdp->mynode;
931
8d970396
FW
932 do {
933 swait_event_interruptible_exclusive(rdp->nocb_cb_wq,
934 nocb_cb_wait_cond(rdp));
935
936 // VVV Ensure CB invocation follows _sleep test.
937 if (smp_load_acquire(&rdp->nocb_cb_sleep)) { // ^^^
938 WARN_ON(signal_pending(current));
939 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("WokeEmpty"));
940 }
941 } while (!nocb_cb_can_run(rdp));
942
943
dfcb2754
FW
944 local_irq_save(flags);
945 rcu_momentary_dyntick_idle();
946 local_irq_restore(flags);
947 /*
948 * Disable BH to provide the expected environment. Also, when
949 * transitioning to/from NOCB mode, a self-requeuing callback might
950 * be invoked from softirq. A short grace period could cause both
951 * instances of this callback would execute concurrently.
952 */
953 local_bh_disable();
954 rcu_do_batch(rdp);
955 local_bh_enable();
956 lockdep_assert_irqs_enabled();
957 rcu_nocb_lock_irqsave(rdp, flags);
958 if (rcu_segcblist_nextgp(cblist, &cur_gp_seq) &&
959 rcu_seq_done(&rnp->gp_seq, cur_gp_seq) &&
960 raw_spin_trylock_rcu_node(rnp)) { /* irqs already disabled. */
961 needwake_gp = rcu_advance_cbs(rdp->mynode, rdp);
962 raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */
963 }
964
965 if (rcu_segcblist_test_flags(cblist, SEGCBLIST_OFFLOADED)) {
966 if (!rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_CB)) {
967 rcu_segcblist_set_flags(cblist, SEGCBLIST_KTHREAD_CB);
968 if (rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_GP))
969 needwake_state = true;
970 }
971 if (rcu_segcblist_ready_cbs(cblist))
972 can_sleep = false;
973 } else {
974 /*
975 * De-offloading. Clear our flag and notify the de-offload worker.
976 * We won't touch the callbacks and keep sleeping until we ever
977 * get re-offloaded.
978 */
979 WARN_ON_ONCE(!rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_CB));
980 rcu_segcblist_clear_flags(cblist, SEGCBLIST_KTHREAD_CB);
981 if (!rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_GP))
982 needwake_state = true;
983 }
984
985 WRITE_ONCE(rdp->nocb_cb_sleep, can_sleep);
986
987 if (rdp->nocb_cb_sleep)
988 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("CBSleep"));
989
990 rcu_nocb_unlock_irqrestore(rdp, flags);
991 if (needwake_gp)
992 rcu_gp_kthread_wake();
993
994 if (needwake_state)
995 swake_up_one(&rdp->nocb_state_wq);
dfcb2754
FW
996}
997
998/*
999 * Per-rcu_data kthread, but only for no-CBs CPUs. Repeatedly invoke
1000 * nocb_cb_wait() to do the dirty work.
1001 */
1002static int rcu_nocb_cb_kthread(void *arg)
1003{
1004 struct rcu_data *rdp = arg;
1005
1006 // Each pass through this loop does one callback batch, and,
1007 // if there are no more ready callbacks, waits for them.
1008 for (;;) {
1009 nocb_cb_wait(rdp);
1010 cond_resched_tasks_rcu_qs();
1011 }
1012 return 0;
1013}
1014
1015/* Is a deferred wakeup of rcu_nocb_kthread() required? */
1016static int rcu_nocb_need_deferred_wakeup(struct rcu_data *rdp, int level)
1017{
1018 return READ_ONCE(rdp->nocb_defer_wakeup) >= level;
1019}
1020
1021/* Do a deferred wakeup of rcu_nocb_kthread(). */
1022static bool do_nocb_deferred_wakeup_common(struct rcu_data *rdp_gp,
1023 struct rcu_data *rdp, int level,
1024 unsigned long flags)
1025 __releases(rdp_gp->nocb_gp_lock)
1026{
1027 int ndw;
1028 int ret;
1029
1030 if (!rcu_nocb_need_deferred_wakeup(rdp_gp, level)) {
1031 raw_spin_unlock_irqrestore(&rdp_gp->nocb_gp_lock, flags);
1032 return false;
1033 }
1034
1035 ndw = rdp_gp->nocb_defer_wakeup;
1036 ret = __wake_nocb_gp(rdp_gp, rdp, ndw == RCU_NOCB_WAKE_FORCE, flags);
1037 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("DeferredWake"));
1038
1039 return ret;
1040}
1041
1042/* Do a deferred wakeup of rcu_nocb_kthread() from a timer handler. */
1043static void do_nocb_deferred_wakeup_timer(struct timer_list *t)
1044{
1045 unsigned long flags;
1046 struct rcu_data *rdp = from_timer(rdp, t, nocb_timer);
1047
1048 WARN_ON_ONCE(rdp->nocb_gp_rdp != rdp);
1049 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("Timer"));
1050
1051 raw_spin_lock_irqsave(&rdp->nocb_gp_lock, flags);
1052 smp_mb__after_spinlock(); /* Timer expire before wakeup. */
1053 do_nocb_deferred_wakeup_common(rdp, rdp, RCU_NOCB_WAKE_BYPASS, flags);
1054}
1055
1056/*
1057 * Do a deferred wakeup of rcu_nocb_kthread() from fastpath.
1058 * This means we do an inexact common-case check. Note that if
1059 * we miss, ->nocb_timer will eventually clean things up.
1060 */
1061static bool do_nocb_deferred_wakeup(struct rcu_data *rdp)
1062{
1063 unsigned long flags;
1064 struct rcu_data *rdp_gp = rdp->nocb_gp_rdp;
1065
1066 if (!rdp_gp || !rcu_nocb_need_deferred_wakeup(rdp_gp, RCU_NOCB_WAKE))
1067 return false;
1068
1069 raw_spin_lock_irqsave(&rdp_gp->nocb_gp_lock, flags);
1070 return do_nocb_deferred_wakeup_common(rdp_gp, rdp, RCU_NOCB_WAKE, flags);
1071}
1072
1073void rcu_nocb_flush_deferred_wakeup(void)
1074{
1075 do_nocb_deferred_wakeup(this_cpu_ptr(&rcu_data));
1076}
1077EXPORT_SYMBOL_GPL(rcu_nocb_flush_deferred_wakeup);
1078
1079static int rdp_offload_toggle(struct rcu_data *rdp,
1080 bool offload, unsigned long flags)
1081 __releases(rdp->nocb_lock)
1082{
1083 struct rcu_segcblist *cblist = &rdp->cblist;
1084 struct rcu_data *rdp_gp = rdp->nocb_gp_rdp;
1085 bool wake_gp = false;
1086
1087 rcu_segcblist_offload(cblist, offload);
1088
1089 if (rdp->nocb_cb_sleep)
1090 rdp->nocb_cb_sleep = false;
1091 rcu_nocb_unlock_irqrestore(rdp, flags);
1092
1093 /*
1094 * Ignore former value of nocb_cb_sleep and force wake up as it could
1095 * have been spuriously set to false already.
1096 */
1097 swake_up_one(&rdp->nocb_cb_wq);
1098
1099 raw_spin_lock_irqsave(&rdp_gp->nocb_gp_lock, flags);
1598f4a4
FW
1100 // Queue this rdp for add/del to/from the list to iterate on rcuog
1101 WRITE_ONCE(rdp_gp->nocb_toggling_rdp, rdp);
dfcb2754
FW
1102 if (rdp_gp->nocb_gp_sleep) {
1103 rdp_gp->nocb_gp_sleep = false;
1104 wake_gp = true;
1105 }
1106 raw_spin_unlock_irqrestore(&rdp_gp->nocb_gp_lock, flags);
1107
3a5761dc 1108 return wake_gp;
dfcb2754
FW
1109}
1110
1111static long rcu_nocb_rdp_deoffload(void *arg)
1112{
1113 struct rcu_data *rdp = arg;
1114 struct rcu_segcblist *cblist = &rdp->cblist;
1115 unsigned long flags;
3a5761dc
Z
1116 int wake_gp;
1117 struct rcu_data *rdp_gp = rdp->nocb_gp_rdp;
dfcb2754 1118
3a5761dc
Z
1119 /*
1120 * rcu_nocb_rdp_deoffload() may be called directly if
1121 * rcuog/o[p] spawn failed, because at this time the rdp->cpu
1122 * is not online yet.
1123 */
1124 WARN_ON_ONCE((rdp->cpu != raw_smp_processor_id()) && cpu_online(rdp->cpu));
dfcb2754
FW
1125
1126 pr_info("De-offloading %d\n", rdp->cpu);
1127
1128 rcu_nocb_lock_irqsave(rdp, flags);
1129 /*
1130 * Flush once and for all now. This suffices because we are
1131 * running on the target CPU holding ->nocb_lock (thus having
1132 * interrupts disabled), and because rdp_offload_toggle()
1133 * invokes rcu_segcblist_offload(), which clears SEGCBLIST_OFFLOADED.
1134 * Thus future calls to rcu_segcblist_completely_offloaded() will
1135 * return false, which means that future calls to rcu_nocb_try_bypass()
1136 * will refuse to put anything into the bypass.
1137 */
3cb278e7 1138 WARN_ON_ONCE(!rcu_nocb_flush_bypass(rdp, NULL, jiffies, false));
fbb94cbd
FW
1139 /*
1140 * Start with invoking rcu_core() early. This way if the current thread
1141 * happens to preempt an ongoing call to rcu_core() in the middle,
1142 * leaving some work dismissed because rcu_core() still thinks the rdp is
1143 * completely offloaded, we are guaranteed a nearby future instance of
1144 * rcu_core() to catch up.
1145 */
1146 rcu_segcblist_set_flags(cblist, SEGCBLIST_RCU_CORE);
1147 invoke_rcu_core();
3a5761dc
Z
1148 wake_gp = rdp_offload_toggle(rdp, false, flags);
1149
1150 mutex_lock(&rdp_gp->nocb_gp_kthread_mutex);
1151 if (rdp_gp->nocb_gp_kthread) {
1152 if (wake_gp)
1153 wake_up_process(rdp_gp->nocb_gp_kthread);
1154
1155 /*
1156 * If rcuo[p] kthread spawn failed, directly remove SEGCBLIST_KTHREAD_CB.
1157 * Just wait SEGCBLIST_KTHREAD_GP to be cleared by rcuog.
1158 */
1159 if (!rdp->nocb_cb_kthread) {
1160 rcu_nocb_lock_irqsave(rdp, flags);
1161 rcu_segcblist_clear_flags(&rdp->cblist, SEGCBLIST_KTHREAD_CB);
1162 rcu_nocb_unlock_irqrestore(rdp, flags);
1163 }
1164
1165 swait_event_exclusive(rdp->nocb_state_wq,
1166 !rcu_segcblist_test_flags(cblist,
1167 SEGCBLIST_KTHREAD_CB | SEGCBLIST_KTHREAD_GP));
1168 } else {
1169 /*
1170 * No kthread to clear the flags for us or remove the rdp from the nocb list
1171 * to iterate. Do it here instead. Locking doesn't look stricly necessary
1172 * but we stick to paranoia in this rare path.
1173 */
1174 rcu_nocb_lock_irqsave(rdp, flags);
1175 rcu_segcblist_clear_flags(&rdp->cblist,
1176 SEGCBLIST_KTHREAD_CB | SEGCBLIST_KTHREAD_GP);
1177 rcu_nocb_unlock_irqrestore(rdp, flags);
1178
1179 list_del(&rdp->nocb_entry_rdp);
1180 }
1181 mutex_unlock(&rdp_gp->nocb_gp_kthread_mutex);
1182
dfcb2754
FW
1183 /*
1184 * Lock one last time to acquire latest callback updates from kthreads
1185 * so we can later handle callbacks locally without locking.
1186 */
1187 rcu_nocb_lock_irqsave(rdp, flags);
1188 /*
213d56bf 1189 * Theoretically we could clear SEGCBLIST_LOCKING after the nocb
dfcb2754
FW
1190 * lock is released but how about being paranoid for once?
1191 */
213d56bf 1192 rcu_segcblist_clear_flags(cblist, SEGCBLIST_LOCKING);
dfcb2754 1193 /*
213d56bf 1194 * Without SEGCBLIST_LOCKING, we can't use
dfcb2754
FW
1195 * rcu_nocb_unlock_irqrestore() anymore.
1196 */
1197 raw_spin_unlock_irqrestore(&rdp->nocb_lock, flags);
1198
1199 /* Sanity check */
1200 WARN_ON_ONCE(rcu_cblist_n_cbs(&rdp->nocb_bypass));
1201
1202
3a5761dc 1203 return 0;
dfcb2754
FW
1204}
1205
1206int rcu_nocb_cpu_deoffload(int cpu)
1207{
1208 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
1209 int ret = 0;
1210
dfcb2754 1211 cpus_read_lock();
24a57aff 1212 mutex_lock(&rcu_state.barrier_mutex);
dfcb2754
FW
1213 if (rcu_rdp_is_offloaded(rdp)) {
1214 if (cpu_online(cpu)) {
1215 ret = work_on_cpu(cpu, rcu_nocb_rdp_deoffload, rdp);
1216 if (!ret)
1217 cpumask_clear_cpu(cpu, rcu_nocb_mask);
1218 } else {
638dce22 1219 pr_info("NOCB: Cannot CB-deoffload offline CPU %d\n", rdp->cpu);
dfcb2754
FW
1220 ret = -EINVAL;
1221 }
1222 }
dfcb2754 1223 mutex_unlock(&rcu_state.barrier_mutex);
24a57aff 1224 cpus_read_unlock();
dfcb2754
FW
1225
1226 return ret;
1227}
1228EXPORT_SYMBOL_GPL(rcu_nocb_cpu_deoffload);
1229
1230static long rcu_nocb_rdp_offload(void *arg)
1231{
1232 struct rcu_data *rdp = arg;
1233 struct rcu_segcblist *cblist = &rdp->cblist;
1234 unsigned long flags;
3a5761dc
Z
1235 int wake_gp;
1236 struct rcu_data *rdp_gp = rdp->nocb_gp_rdp;
dfcb2754
FW
1237
1238 WARN_ON_ONCE(rdp->cpu != raw_smp_processor_id());
1239 /*
1240 * For now we only support re-offload, ie: the rdp must have been
1241 * offloaded on boot first.
1242 */
1243 if (!rdp->nocb_gp_rdp)
1244 return -EINVAL;
1245
3a5761dc
Z
1246 if (WARN_ON_ONCE(!rdp_gp->nocb_gp_kthread))
1247 return -EINVAL;
1248
dfcb2754 1249 pr_info("Offloading %d\n", rdp->cpu);
2ebc45c4 1250
dfcb2754 1251 /*
213d56bf
FW
1252 * Can't use rcu_nocb_lock_irqsave() before SEGCBLIST_LOCKING
1253 * is set.
dfcb2754
FW
1254 */
1255 raw_spin_lock_irqsave(&rdp->nocb_lock, flags);
1256
1257 /*
1258 * We didn't take the nocb lock while working on the
213d56bf 1259 * rdp->cblist with SEGCBLIST_LOCKING cleared (pure softirq/rcuc mode).
dfcb2754
FW
1260 * Every modifications that have been done previously on
1261 * rdp->cblist must be visible remotely by the nocb kthreads
1262 * upon wake up after reading the cblist flags.
1263 *
1264 * The layout against nocb_lock enforces that ordering:
1265 *
1266 * __rcu_nocb_rdp_offload() nocb_cb_wait()/nocb_gp_wait()
1267 * ------------------------- ----------------------------
1268 * WRITE callbacks rcu_nocb_lock()
1269 * rcu_nocb_lock() READ flags
1270 * WRITE flags READ callbacks
1271 * rcu_nocb_unlock() rcu_nocb_unlock()
1272 */
3a5761dc
Z
1273 wake_gp = rdp_offload_toggle(rdp, true, flags);
1274 if (wake_gp)
1275 wake_up_process(rdp_gp->nocb_gp_kthread);
dfcb2754
FW
1276 swait_event_exclusive(rdp->nocb_state_wq,
1277 rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_CB) &&
1278 rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_GP));
1279
213d56bf
FW
1280 /*
1281 * All kthreads are ready to work, we can finally relieve rcu_core() and
1282 * enable nocb bypass.
1283 */
1284 rcu_nocb_lock_irqsave(rdp, flags);
1285 rcu_segcblist_clear_flags(cblist, SEGCBLIST_RCU_CORE);
1286 rcu_nocb_unlock_irqrestore(rdp, flags);
1287
3a5761dc 1288 return 0;
dfcb2754
FW
1289}
1290
1291int rcu_nocb_cpu_offload(int cpu)
1292{
1293 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
1294 int ret = 0;
1295
dfcb2754 1296 cpus_read_lock();
24a57aff 1297 mutex_lock(&rcu_state.barrier_mutex);
dfcb2754
FW
1298 if (!rcu_rdp_is_offloaded(rdp)) {
1299 if (cpu_online(cpu)) {
1300 ret = work_on_cpu(cpu, rcu_nocb_rdp_offload, rdp);
1301 if (!ret)
1302 cpumask_set_cpu(cpu, rcu_nocb_mask);
1303 } else {
638dce22 1304 pr_info("NOCB: Cannot CB-offload offline CPU %d\n", rdp->cpu);
dfcb2754
FW
1305 ret = -EINVAL;
1306 }
1307 }
dfcb2754 1308 mutex_unlock(&rcu_state.barrier_mutex);
24a57aff 1309 cpus_read_unlock();
dfcb2754
FW
1310
1311 return ret;
1312}
1313EXPORT_SYMBOL_GPL(rcu_nocb_cpu_offload);
1314
2450b78e 1315#ifdef CONFIG_RCU_LAZY
c945b4da
VP
1316static unsigned long
1317lazy_rcu_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
1318{
1319 int cpu;
1320 unsigned long count = 0;
1321
fbde57d2
FW
1322 if (WARN_ON_ONCE(!cpumask_available(rcu_nocb_mask)))
1323 return 0;
1324
1325 /* Protect rcu_nocb_mask against concurrent (de-)offloading. */
1326 if (!mutex_trylock(&rcu_state.barrier_mutex))
1327 return 0;
1328
c945b4da 1329 /* Snapshot count of all CPUs */
fbde57d2 1330 for_each_cpu(cpu, rcu_nocb_mask) {
c945b4da
VP
1331 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
1332
1333 count += READ_ONCE(rdp->lazy_len);
1334 }
1335
fbde57d2
FW
1336 mutex_unlock(&rcu_state.barrier_mutex);
1337
c945b4da
VP
1338 return count ? count : SHRINK_EMPTY;
1339}
1340
1341static unsigned long
1342lazy_rcu_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
1343{
1344 int cpu;
1345 unsigned long flags;
1346 unsigned long count = 0;
1347
fbde57d2
FW
1348 if (WARN_ON_ONCE(!cpumask_available(rcu_nocb_mask)))
1349 return 0;
5c83cedb
FW
1350 /*
1351 * Protect against concurrent (de-)offloading. Otherwise nocb locking
1352 * may be ignored or imbalanced.
1353 */
1354 if (!mutex_trylock(&rcu_state.barrier_mutex)) {
1355 /*
1356 * But really don't insist if barrier_mutex is contended since we
1357 * can't guarantee that it will never engage in a dependency
1358 * chain involving memory allocation. The lock is seldom contended
1359 * anyway.
1360 */
1361 return 0;
1362 }
1363
c945b4da 1364 /* Snapshot count of all CPUs */
fbde57d2 1365 for_each_cpu(cpu, rcu_nocb_mask) {
c945b4da 1366 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
5c83cedb
FW
1367 int _count;
1368
fbde57d2 1369 if (WARN_ON_ONCE(!rcu_rdp_is_offloaded(rdp)))
5c83cedb
FW
1370 continue;
1371
b96a8b0b 1372 if (!READ_ONCE(rdp->lazy_len))
c945b4da 1373 continue;
5c83cedb 1374
c945b4da 1375 rcu_nocb_lock_irqsave(rdp, flags);
b96a8b0b
FW
1376 /*
1377 * Recheck under the nocb lock. Since we are not holding the bypass
1378 * lock we may still race with increments from the enqueuer but still
1379 * we know for sure if there is at least one lazy callback.
1380 */
1381 _count = READ_ONCE(rdp->lazy_len);
1382 if (!_count) {
1383 rcu_nocb_unlock_irqrestore(rdp, flags);
1384 continue;
1385 }
76259260 1386 WARN_ON_ONCE(!rcu_nocb_flush_bypass(rdp, NULL, jiffies, false));
c945b4da
VP
1387 rcu_nocb_unlock_irqrestore(rdp, flags);
1388 wake_nocb_gp(rdp, false);
1389 sc->nr_to_scan -= _count;
1390 count += _count;
1391 if (sc->nr_to_scan <= 0)
1392 break;
1393 }
5c83cedb
FW
1394
1395 mutex_unlock(&rcu_state.barrier_mutex);
1396
c945b4da
VP
1397 return count ? count : SHRINK_STOP;
1398}
1399
1400static struct shrinker lazy_rcu_shrinker = {
1401 .count_objects = lazy_rcu_shrink_count,
1402 .scan_objects = lazy_rcu_shrink_scan,
1403 .batch = 0,
1404 .seeks = DEFAULT_SEEKS,
1405};
2450b78e 1406#endif // #ifdef CONFIG_RCU_LAZY
c945b4da 1407
dfcb2754
FW
1408void __init rcu_init_nohz(void)
1409{
1410 int cpu;
dfcb2754 1411 struct rcu_data *rdp;
f1ffec1e 1412 const struct cpumask *cpumask = NULL;
b37a667c 1413
dfcb2754 1414#if defined(CONFIG_NO_HZ_FULL)
f1ffec1e
ZL
1415 if (tick_nohz_full_running && !cpumask_empty(tick_nohz_full_mask))
1416 cpumask = tick_nohz_full_mask;
1417#endif
1418
1419 if (IS_ENABLED(CONFIG_RCU_NOCB_CPU_DEFAULT_ALL) &&
1420 !rcu_state.nocb_is_setup && !cpumask)
1421 cpumask = cpu_possible_mask;
dfcb2754 1422
f1ffec1e 1423 if (cpumask) {
a81aeaf7
FW
1424 if (!cpumask_available(rcu_nocb_mask)) {
1425 if (!zalloc_cpumask_var(&rcu_nocb_mask, GFP_KERNEL)) {
1426 pr_info("rcu_nocb_mask allocation failed, callback offloading disabled.\n");
1427 return;
1428 }
dfcb2754 1429 }
f1ffec1e
ZL
1430
1431 cpumask_or(rcu_nocb_mask, rcu_nocb_mask, cpumask);
8d2aaa9b 1432 rcu_state.nocb_is_setup = true;
dfcb2754 1433 }
a81aeaf7 1434
8d2aaa9b 1435 if (!rcu_state.nocb_is_setup)
dfcb2754
FW
1436 return;
1437
2450b78e 1438#ifdef CONFIG_RCU_LAZY
c945b4da
VP
1439 if (register_shrinker(&lazy_rcu_shrinker, "rcu-lazy"))
1440 pr_err("Failed to register lazy_rcu shrinker!\n");
2450b78e 1441#endif // #ifdef CONFIG_RCU_LAZY
c945b4da 1442
dfcb2754
FW
1443 if (!cpumask_subset(rcu_nocb_mask, cpu_possible_mask)) {
1444 pr_info("\tNote: kernel parameter 'rcu_nocbs=', 'nohz_full', or 'isolcpus=' contains nonexistent CPUs.\n");
1445 cpumask_and(rcu_nocb_mask, cpu_possible_mask,
1446 rcu_nocb_mask);
1447 }
1448 if (cpumask_empty(rcu_nocb_mask))
1449 pr_info("\tOffload RCU callbacks from CPUs: (none).\n");
1450 else
1451 pr_info("\tOffload RCU callbacks from CPUs: %*pbl.\n",
1452 cpumask_pr_args(rcu_nocb_mask));
1453 if (rcu_nocb_poll)
1454 pr_info("\tPoll for callbacks from no-CBs CPUs.\n");
1455
1456 for_each_cpu(cpu, rcu_nocb_mask) {
1457 rdp = per_cpu_ptr(&rcu_data, cpu);
1458 if (rcu_segcblist_empty(&rdp->cblist))
1459 rcu_segcblist_init(&rdp->cblist);
1460 rcu_segcblist_offload(&rdp->cblist, true);
213d56bf
FW
1461 rcu_segcblist_set_flags(&rdp->cblist, SEGCBLIST_KTHREAD_CB | SEGCBLIST_KTHREAD_GP);
1462 rcu_segcblist_clear_flags(&rdp->cblist, SEGCBLIST_RCU_CORE);
dfcb2754
FW
1463 }
1464 rcu_organize_nocb_kthreads();
1465}
1466
1467/* Initialize per-rcu_data variables for no-CBs CPUs. */
1468static void __init rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp)
1469{
1470 init_swait_queue_head(&rdp->nocb_cb_wq);
1471 init_swait_queue_head(&rdp->nocb_gp_wq);
1472 init_swait_queue_head(&rdp->nocb_state_wq);
1473 raw_spin_lock_init(&rdp->nocb_lock);
1474 raw_spin_lock_init(&rdp->nocb_bypass_lock);
1475 raw_spin_lock_init(&rdp->nocb_gp_lock);
1476 timer_setup(&rdp->nocb_timer, do_nocb_deferred_wakeup_timer, 0);
1477 rcu_cblist_init(&rdp->nocb_bypass);
3cb278e7 1478 WRITE_ONCE(rdp->lazy_len, 0);
02e30241 1479 mutex_init(&rdp->nocb_gp_kthread_mutex);
dfcb2754
FW
1480}
1481
1482/*
1483 * If the specified CPU is a no-CBs CPU that does not already have its
1484 * rcuo CB kthread, spawn it. Additionally, if the rcuo GP kthread
1485 * for this CPU's group has not yet been created, spawn it as well.
1486 */
10d47031 1487static void rcu_spawn_cpu_nocb_kthread(int cpu)
dfcb2754
FW
1488{
1489 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
1490 struct rcu_data *rdp_gp;
1491 struct task_struct *t;
54577e23 1492 struct sched_param sp;
dfcb2754 1493
8d2aaa9b 1494 if (!rcu_scheduler_fully_active || !rcu_state.nocb_is_setup)
10d47031
FW
1495 return;
1496
2cf4528d
FW
1497 /* If there already is an rcuo kthread, then nothing to do. */
1498 if (rdp->nocb_cb_kthread)
dfcb2754
FW
1499 return;
1500
1501 /* If we didn't spawn the GP kthread first, reorganize! */
54577e23 1502 sp.sched_priority = kthread_prio;
dfcb2754 1503 rdp_gp = rdp->nocb_gp_rdp;
02e30241 1504 mutex_lock(&rdp_gp->nocb_gp_kthread_mutex);
dfcb2754
FW
1505 if (!rdp_gp->nocb_gp_kthread) {
1506 t = kthread_run(rcu_nocb_gp_kthread, rdp_gp,
1507 "rcuog/%d", rdp_gp->cpu);
02e30241
NU
1508 if (WARN_ONCE(IS_ERR(t), "%s: Could not start rcuo GP kthread, OOM is now expected behavior\n", __func__)) {
1509 mutex_unlock(&rdp_gp->nocb_gp_kthread_mutex);
3a5761dc 1510 goto end;
02e30241 1511 }
dfcb2754 1512 WRITE_ONCE(rdp_gp->nocb_gp_kthread, t);
54577e23
AC
1513 if (kthread_prio)
1514 sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
dfcb2754 1515 }
02e30241 1516 mutex_unlock(&rdp_gp->nocb_gp_kthread_mutex);
dfcb2754
FW
1517
1518 /* Spawn the kthread for this CPU. */
1519 t = kthread_run(rcu_nocb_cb_kthread, rdp,
1520 "rcuo%c/%d", rcu_state.abbr, cpu);
1521 if (WARN_ONCE(IS_ERR(t), "%s: Could not start rcuo CB kthread, OOM is now expected behavior\n", __func__))
3a5761dc 1522 goto end;
c8b16a65 1523
8f489b4d 1524 if (IS_ENABLED(CONFIG_RCU_NOCB_CPU_CB_BOOST) && kthread_prio)
c8b16a65 1525 sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
8f489b4d 1526
dfcb2754
FW
1527 WRITE_ONCE(rdp->nocb_cb_kthread, t);
1528 WRITE_ONCE(rdp->nocb_gp_kthread, rdp_gp->nocb_gp_kthread);
3a5761dc
Z
1529 return;
1530end:
1531 mutex_lock(&rcu_state.barrier_mutex);
1532 if (rcu_rdp_is_offloaded(rdp)) {
1533 rcu_nocb_rdp_deoffload(rdp);
1534 cpumask_clear_cpu(cpu, rcu_nocb_mask);
1535 }
1536 mutex_unlock(&rcu_state.barrier_mutex);
dfcb2754
FW
1537}
1538
dfcb2754
FW
1539/* How many CB CPU IDs per GP kthread? Default of -1 for sqrt(nr_cpu_ids). */
1540static int rcu_nocb_gp_stride = -1;
1541module_param(rcu_nocb_gp_stride, int, 0444);
1542
1543/*
1544 * Initialize GP-CB relationships for all no-CBs CPU.
1545 */
1546static void __init rcu_organize_nocb_kthreads(void)
1547{
1548 int cpu;
1549 bool firsttime = true;
1550 bool gotnocbs = false;
1551 bool gotnocbscbs = true;
1552 int ls = rcu_nocb_gp_stride;
1553 int nl = 0; /* Next GP kthread. */
1554 struct rcu_data *rdp;
1555 struct rcu_data *rdp_gp = NULL; /* Suppress misguided gcc warn. */
dfcb2754
FW
1556
1557 if (!cpumask_available(rcu_nocb_mask))
1558 return;
1559 if (ls == -1) {
1560 ls = nr_cpu_ids / int_sqrt(nr_cpu_ids);
1561 rcu_nocb_gp_stride = ls;
1562 }
1563
1564 /*
1565 * Each pass through this loop sets up one rcu_data structure.
1566 * Should the corresponding CPU come online in the future, then
1567 * we will spawn the needed set of rcu_nocb_kthread() kthreads.
1568 */
2cf4528d 1569 for_each_possible_cpu(cpu) {
dfcb2754
FW
1570 rdp = per_cpu_ptr(&rcu_data, cpu);
1571 if (rdp->cpu >= nl) {
1572 /* New GP kthread, set up for CBs & next GP. */
1573 gotnocbs = true;
1574 nl = DIV_ROUND_UP(rdp->cpu + 1, ls) * ls;
dfcb2754 1575 rdp_gp = rdp;
2ebc45c4 1576 INIT_LIST_HEAD(&rdp->nocb_head_rdp);
dfcb2754
FW
1577 if (dump_tree) {
1578 if (!firsttime)
1579 pr_cont("%s\n", gotnocbscbs
1580 ? "" : " (self only)");
1581 gotnocbscbs = false;
1582 firsttime = false;
1583 pr_alert("%s: No-CB GP kthread CPU %d:",
1584 __func__, cpu);
1585 }
1586 } else {
1587 /* Another CB kthread, link to previous GP kthread. */
1588 gotnocbscbs = true;
dfcb2754
FW
1589 if (dump_tree)
1590 pr_cont(" %d", cpu);
1591 }
2ebc45c4 1592 rdp->nocb_gp_rdp = rdp_gp;
2cf4528d
FW
1593 if (cpumask_test_cpu(cpu, rcu_nocb_mask))
1594 list_add_tail(&rdp->nocb_entry_rdp, &rdp_gp->nocb_head_rdp);
dfcb2754
FW
1595 }
1596 if (gotnocbs && dump_tree)
1597 pr_cont("%s\n", gotnocbscbs ? "" : " (self only)");
1598}
1599
1600/*
1601 * Bind the current task to the offloaded CPUs. If there are no offloaded
1602 * CPUs, leave the task unbound. Splat if the bind attempt fails.
1603 */
1604void rcu_bind_current_to_nocb(void)
1605{
6a2c1d45 1606 if (cpumask_available(rcu_nocb_mask) && !cpumask_empty(rcu_nocb_mask))
dfcb2754
FW
1607 WARN_ON(sched_setaffinity(current->pid, rcu_nocb_mask));
1608}
1609EXPORT_SYMBOL_GPL(rcu_bind_current_to_nocb);
1610
1611// The ->on_cpu field is available only in CONFIG_SMP=y, so...
1612#ifdef CONFIG_SMP
1613static char *show_rcu_should_be_on_cpu(struct task_struct *tsp)
1614{
1615 return tsp && task_is_running(tsp) && !tsp->on_cpu ? "!" : "";
1616}
1617#else // #ifdef CONFIG_SMP
1618static char *show_rcu_should_be_on_cpu(struct task_struct *tsp)
1619{
1620 return "";
1621}
1622#endif // #else #ifdef CONFIG_SMP
1623
1624/*
1625 * Dump out nocb grace-period kthread state for the specified rcu_data
1626 * structure.
1627 */
1628static void show_rcu_nocb_gp_state(struct rcu_data *rdp)
1629{
1630 struct rcu_node *rnp = rdp->mynode;
1631
1632 pr_info("nocb GP %d %c%c%c%c%c %c[%c%c] %c%c:%ld rnp %d:%d %lu %c CPU %d%s\n",
1633 rdp->cpu,
1634 "kK"[!!rdp->nocb_gp_kthread],
1635 "lL"[raw_spin_is_locked(&rdp->nocb_gp_lock)],
1636 "dD"[!!rdp->nocb_defer_wakeup],
1637 "tT"[timer_pending(&rdp->nocb_timer)],
1638 "sS"[!!rdp->nocb_gp_sleep],
1639 ".W"[swait_active(&rdp->nocb_gp_wq)],
1640 ".W"[swait_active(&rnp->nocb_gp_wq[0])],
1641 ".W"[swait_active(&rnp->nocb_gp_wq[1])],
1642 ".B"[!!rdp->nocb_gp_bypass],
1643 ".G"[!!rdp->nocb_gp_gp],
1644 (long)rdp->nocb_gp_seq,
1645 rnp->grplo, rnp->grphi, READ_ONCE(rdp->nocb_gp_loops),
1646 rdp->nocb_gp_kthread ? task_state_to_char(rdp->nocb_gp_kthread) : '.',
5334da2a
Z
1647 rdp->nocb_gp_kthread ? (int)task_cpu(rdp->nocb_gp_kthread) : -1,
1648 show_rcu_should_be_on_cpu(rdp->nocb_gp_kthread));
dfcb2754
FW
1649}
1650
1651/* Dump out nocb kthread state for the specified rcu_data structure. */
1652static void show_rcu_nocb_state(struct rcu_data *rdp)
1653{
1654 char bufw[20];
1655 char bufr[20];
2ebc45c4 1656 struct rcu_data *nocb_next_rdp;
dfcb2754
FW
1657 struct rcu_segcblist *rsclp = &rdp->cblist;
1658 bool waslocked;
1659 bool wassleep;
1660
1661 if (rdp->nocb_gp_rdp == rdp)
1662 show_rcu_nocb_gp_state(rdp);
1663
2ebc45c4
FW
1664 nocb_next_rdp = list_next_or_null_rcu(&rdp->nocb_gp_rdp->nocb_head_rdp,
1665 &rdp->nocb_entry_rdp,
1666 typeof(*rdp),
1667 nocb_entry_rdp);
1668
dfcb2754
FW
1669 sprintf(bufw, "%ld", rsclp->gp_seq[RCU_WAIT_TAIL]);
1670 sprintf(bufr, "%ld", rsclp->gp_seq[RCU_NEXT_READY_TAIL]);
1671 pr_info(" CB %d^%d->%d %c%c%c%c%c%c F%ld L%ld C%d %c%c%s%c%s%c%c q%ld %c CPU %d%s\n",
1672 rdp->cpu, rdp->nocb_gp_rdp->cpu,
2ebc45c4 1673 nocb_next_rdp ? nocb_next_rdp->cpu : -1,
dfcb2754
FW
1674 "kK"[!!rdp->nocb_cb_kthread],
1675 "bB"[raw_spin_is_locked(&rdp->nocb_bypass_lock)],
1676 "cC"[!!atomic_read(&rdp->nocb_lock_contended)],
1677 "lL"[raw_spin_is_locked(&rdp->nocb_lock)],
1678 "sS"[!!rdp->nocb_cb_sleep],
1679 ".W"[swait_active(&rdp->nocb_cb_wq)],
1680 jiffies - rdp->nocb_bypass_first,
1681 jiffies - rdp->nocb_nobypass_last,
1682 rdp->nocb_nobypass_count,
1683 ".D"[rcu_segcblist_ready_cbs(rsclp)],
1684 ".W"[!rcu_segcblist_segempty(rsclp, RCU_WAIT_TAIL)],
1685 rcu_segcblist_segempty(rsclp, RCU_WAIT_TAIL) ? "" : bufw,
1686 ".R"[!rcu_segcblist_segempty(rsclp, RCU_NEXT_READY_TAIL)],
1687 rcu_segcblist_segempty(rsclp, RCU_NEXT_READY_TAIL) ? "" : bufr,
1688 ".N"[!rcu_segcblist_segempty(rsclp, RCU_NEXT_TAIL)],
1689 ".B"[!!rcu_cblist_n_cbs(&rdp->nocb_bypass)],
1690 rcu_segcblist_n_cbs(&rdp->cblist),
1691 rdp->nocb_cb_kthread ? task_state_to_char(rdp->nocb_cb_kthread) : '.',
5334da2a 1692 rdp->nocb_cb_kthread ? (int)task_cpu(rdp->nocb_cb_kthread) : -1,
dfcb2754
FW
1693 show_rcu_should_be_on_cpu(rdp->nocb_cb_kthread));
1694
1695 /* It is OK for GP kthreads to have GP state. */
1696 if (rdp->nocb_gp_rdp == rdp)
1697 return;
1698
1699 waslocked = raw_spin_is_locked(&rdp->nocb_gp_lock);
1700 wassleep = swait_active(&rdp->nocb_gp_wq);
1701 if (!rdp->nocb_gp_sleep && !waslocked && !wassleep)
1702 return; /* Nothing untoward. */
1703
1704 pr_info(" nocb GP activity on CB-only CPU!!! %c%c%c %c\n",
1705 "lL"[waslocked],
1706 "dD"[!!rdp->nocb_defer_wakeup],
1707 "sS"[!!rdp->nocb_gp_sleep],
1708 ".W"[wassleep]);
1709}
1710
1711#else /* #ifdef CONFIG_RCU_NOCB_CPU */
1712
1713static inline int rcu_lockdep_is_held_nocb(struct rcu_data *rdp)
1714{
1715 return 0;
1716}
1717
1718static inline bool rcu_current_is_nocb_kthread(struct rcu_data *rdp)
1719{
1720 return false;
1721}
1722
1723/* No ->nocb_lock to acquire. */
1724static void rcu_nocb_lock(struct rcu_data *rdp)
1725{
1726}
1727
1728/* No ->nocb_lock to release. */
1729static void rcu_nocb_unlock(struct rcu_data *rdp)
1730{
1731}
1732
1733/* No ->nocb_lock to release. */
1734static void rcu_nocb_unlock_irqrestore(struct rcu_data *rdp,
1735 unsigned long flags)
1736{
1737 local_irq_restore(flags);
1738}
1739
1740/* Lockdep check that ->cblist may be safely accessed. */
1741static void rcu_lockdep_assert_cblist_protected(struct rcu_data *rdp)
1742{
1743 lockdep_assert_irqs_disabled();
1744}
1745
1746static void rcu_nocb_gp_cleanup(struct swait_queue_head *sq)
1747{
1748}
1749
1750static struct swait_queue_head *rcu_nocb_gp_get(struct rcu_node *rnp)
1751{
1752 return NULL;
1753}
1754
1755static void rcu_init_one_nocb(struct rcu_node *rnp)
1756{
1757}
1758
b8f7aca3
FW
1759static bool wake_nocb_gp(struct rcu_data *rdp, bool force)
1760{
1761 return false;
1762}
1763
dfcb2754 1764static bool rcu_nocb_flush_bypass(struct rcu_data *rdp, struct rcu_head *rhp,
3cb278e7 1765 unsigned long j, bool lazy)
dfcb2754
FW
1766{
1767 return true;
1768}
1769
1770static bool rcu_nocb_try_bypass(struct rcu_data *rdp, struct rcu_head *rhp,
3cb278e7 1771 bool *was_alldone, unsigned long flags, bool lazy)
dfcb2754
FW
1772{
1773 return false;
1774}
1775
1776static void __call_rcu_nocb_wake(struct rcu_data *rdp, bool was_empty,
1777 unsigned long flags)
1778{
1779 WARN_ON_ONCE(1); /* Should be dead code! */
1780}
1781
1782static void __init rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp)
1783{
1784}
1785
1786static int rcu_nocb_need_deferred_wakeup(struct rcu_data *rdp, int level)
1787{
1788 return false;
1789}
1790
1791static bool do_nocb_deferred_wakeup(struct rcu_data *rdp)
1792{
1793 return false;
1794}
1795
1796static void rcu_spawn_cpu_nocb_kthread(int cpu)
1797{
1798}
1799
dfcb2754
FW
1800static void show_rcu_nocb_state(struct rcu_data *rdp)
1801{
1802}
1803
1804#endif /* #else #ifdef CONFIG_RCU_NOCB_CPU */