rcu: Use funnel locking for synchronize_rcu_expedited()'s polling loop
[linux-2.6-block.git] / kernel / rcu / tree.c
CommitLineData
64db4cff
PM
1/*
2 * Read-Copy Update mechanism for mutual exclusion
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
87de1cfd
PM
15 * along with this program; if not, you can access it online at
16 * http://www.gnu.org/licenses/gpl-2.0.html.
64db4cff
PM
17 *
18 * Copyright IBM Corporation, 2008
19 *
20 * Authors: Dipankar Sarma <dipankar@in.ibm.com>
21 * Manfred Spraul <manfred@colorfullife.com>
22 * Paul E. McKenney <paulmck@linux.vnet.ibm.com> Hierarchical version
23 *
24 * Based on the original work by Paul McKenney <paulmck@us.ibm.com>
25 * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
26 *
27 * For detailed explanation of Read-Copy Update mechanism see -
a71fca58 28 * Documentation/RCU
64db4cff
PM
29 */
30#include <linux/types.h>
31#include <linux/kernel.h>
32#include <linux/init.h>
33#include <linux/spinlock.h>
34#include <linux/smp.h>
35#include <linux/rcupdate.h>
36#include <linux/interrupt.h>
37#include <linux/sched.h>
c1dc0b9c 38#include <linux/nmi.h>
8826f3b0 39#include <linux/atomic.h>
64db4cff 40#include <linux/bitops.h>
9984de1a 41#include <linux/export.h>
64db4cff
PM
42#include <linux/completion.h>
43#include <linux/moduleparam.h>
4102adab 44#include <linux/module.h>
64db4cff
PM
45#include <linux/percpu.h>
46#include <linux/notifier.h>
47#include <linux/cpu.h>
48#include <linux/mutex.h>
49#include <linux/time.h>
bbad9379 50#include <linux/kernel_stat.h>
a26ac245
PM
51#include <linux/wait.h>
52#include <linux/kthread.h>
268bb0ce 53#include <linux/prefetch.h>
3d3b7db0
PM
54#include <linux/delay.h>
55#include <linux/stop_machine.h>
661a85dc 56#include <linux/random.h>
af658dca 57#include <linux/trace_events.h>
d1d74d14 58#include <linux/suspend.h>
64db4cff 59
4102adab 60#include "tree.h"
29c00b4a 61#include "rcu.h"
9f77da9f 62
4102adab
PM
63MODULE_ALIAS("rcutree");
64#ifdef MODULE_PARAM_PREFIX
65#undef MODULE_PARAM_PREFIX
66#endif
67#define MODULE_PARAM_PREFIX "rcutree."
68
64db4cff
PM
69/* Data structures. */
70
f885b7f2 71static struct lock_class_key rcu_node_class[RCU_NUM_LVLS];
394f2769 72static struct lock_class_key rcu_fqs_class[RCU_NUM_LVLS];
385b73c0 73static struct lock_class_key rcu_exp_class[RCU_NUM_LVLS];
88b91c7c 74
f7f7bac9
SRRH
75/*
76 * In order to export the rcu_state name to the tracing tools, it
77 * needs to be added in the __tracepoint_string section.
78 * This requires defining a separate variable tp_<sname>_varname
79 * that points to the string being used, and this will allow
80 * the tracing userspace tools to be able to decipher the string
81 * address to the matching string.
82 */
a8a29b3b
AB
83#ifdef CONFIG_TRACING
84# define DEFINE_RCU_TPS(sname) \
f7f7bac9 85static char sname##_varname[] = #sname; \
a8a29b3b
AB
86static const char *tp_##sname##_varname __used __tracepoint_string = sname##_varname;
87# define RCU_STATE_NAME(sname) sname##_varname
88#else
89# define DEFINE_RCU_TPS(sname)
90# define RCU_STATE_NAME(sname) __stringify(sname)
91#endif
92
93#define RCU_STATE_INITIALIZER(sname, sabbr, cr) \
94DEFINE_RCU_TPS(sname) \
c92fb057 95static DEFINE_PER_CPU_SHARED_ALIGNED(struct rcu_data, sname##_data); \
a41bfeb2 96struct rcu_state sname##_state = { \
6c90cc7b 97 .level = { &sname##_state.node[0] }, \
2723249a 98 .rda = &sname##_data, \
037b64ed 99 .call = cr, \
af446b70 100 .fqs_state = RCU_GP_IDLE, \
42c3533e
PM
101 .gpnum = 0UL - 300UL, \
102 .completed = 0UL - 300UL, \
7b2e6011 103 .orphan_lock = __RAW_SPIN_LOCK_UNLOCKED(&sname##_state.orphan_lock), \
6c90cc7b
PM
104 .orphan_nxttail = &sname##_state.orphan_nxtlist, \
105 .orphan_donetail = &sname##_state.orphan_donelist, \
7be7f0be 106 .barrier_mutex = __MUTEX_INITIALIZER(sname##_state.barrier_mutex), \
a8a29b3b 107 .name = RCU_STATE_NAME(sname), \
a4889858 108 .abbr = sabbr, \
2723249a 109}
64db4cff 110
a41bfeb2
SRRH
111RCU_STATE_INITIALIZER(rcu_sched, 's', call_rcu_sched);
112RCU_STATE_INITIALIZER(rcu_bh, 'b', call_rcu_bh);
b1f77b05 113
b28a7c01 114static struct rcu_state *const rcu_state_p;
2927a689 115static struct rcu_data __percpu *const rcu_data_p;
6ce75a23 116LIST_HEAD(rcu_struct_flavors);
27f4d280 117
a3dc2948
PM
118/* Dump rcu_node combining tree at boot to verify correct setup. */
119static bool dump_tree;
120module_param(dump_tree, bool, 0444);
7fa27001
PM
121/* Control rcu_node-tree auto-balancing at boot time. */
122static bool rcu_fanout_exact;
123module_param(rcu_fanout_exact, bool, 0444);
47d631af
PM
124/* Increase (but not decrease) the RCU_FANOUT_LEAF at boot time. */
125static int rcu_fanout_leaf = RCU_FANOUT_LEAF;
7e5c2dfb 126module_param(rcu_fanout_leaf, int, 0444);
f885b7f2 127int rcu_num_lvls __read_mostly = RCU_NUM_LVLS;
cb007102
AG
128/* Number of rcu_nodes at specified level. */
129static int num_rcu_lvl[] = NUM_RCU_LVL_INIT;
f885b7f2
PM
130int rcu_num_nodes __read_mostly = NUM_RCU_NODES; /* Total # rcu_nodes in use. */
131
b0d30417
PM
132/*
133 * The rcu_scheduler_active variable transitions from zero to one just
134 * before the first task is spawned. So when this variable is zero, RCU
135 * can assume that there is but one task, allowing RCU to (for example)
b44f6656 136 * optimize synchronize_sched() to a simple barrier(). When this variable
b0d30417
PM
137 * is one, RCU must actually do all the hard work required to detect real
138 * grace periods. This variable is also used to suppress boot-time false
139 * positives from lockdep-RCU error checking.
140 */
bbad9379
PM
141int rcu_scheduler_active __read_mostly;
142EXPORT_SYMBOL_GPL(rcu_scheduler_active);
143
b0d30417
PM
144/*
145 * The rcu_scheduler_fully_active variable transitions from zero to one
146 * during the early_initcall() processing, which is after the scheduler
147 * is capable of creating new tasks. So RCU processing (for example,
148 * creating tasks for RCU priority boosting) must be delayed until after
149 * rcu_scheduler_fully_active transitions from zero to one. We also
150 * currently delay invocation of any RCU callbacks until after this point.
151 *
152 * It might later prove better for people registering RCU callbacks during
153 * early boot to take responsibility for these callbacks, but one step at
154 * a time.
155 */
156static int rcu_scheduler_fully_active __read_mostly;
157
0aa04b05
PM
158static void rcu_init_new_rnp(struct rcu_node *rnp_leaf);
159static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf);
5d01bbd1 160static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu);
a46e0899
PM
161static void invoke_rcu_core(void);
162static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp);
a26ac245 163
a94844b2 164/* rcuc/rcub kthread realtime priority */
26730f55 165#ifdef CONFIG_RCU_KTHREAD_PRIO
a94844b2 166static int kthread_prio = CONFIG_RCU_KTHREAD_PRIO;
26730f55
PM
167#else /* #ifdef CONFIG_RCU_KTHREAD_PRIO */
168static int kthread_prio = IS_ENABLED(CONFIG_RCU_BOOST) ? 1 : 0;
169#endif /* #else #ifdef CONFIG_RCU_KTHREAD_PRIO */
a94844b2
PM
170module_param(kthread_prio, int, 0644);
171
8d7dc928 172/* Delay in jiffies for grace-period initialization delays, debug only. */
0f41c0dd
PM
173
174#ifdef CONFIG_RCU_TORTURE_TEST_SLOW_PREINIT
175static int gp_preinit_delay = CONFIG_RCU_TORTURE_TEST_SLOW_PREINIT_DELAY;
176module_param(gp_preinit_delay, int, 0644);
177#else /* #ifdef CONFIG_RCU_TORTURE_TEST_SLOW_PREINIT */
178static const int gp_preinit_delay;
179#endif /* #else #ifdef CONFIG_RCU_TORTURE_TEST_SLOW_PREINIT */
180
8d7dc928
PM
181#ifdef CONFIG_RCU_TORTURE_TEST_SLOW_INIT
182static int gp_init_delay = CONFIG_RCU_TORTURE_TEST_SLOW_INIT_DELAY;
37745d28 183module_param(gp_init_delay, int, 0644);
8d7dc928
PM
184#else /* #ifdef CONFIG_RCU_TORTURE_TEST_SLOW_INIT */
185static const int gp_init_delay;
186#endif /* #else #ifdef CONFIG_RCU_TORTURE_TEST_SLOW_INIT */
eab128e8 187
0f41c0dd
PM
188#ifdef CONFIG_RCU_TORTURE_TEST_SLOW_CLEANUP
189static int gp_cleanup_delay = CONFIG_RCU_TORTURE_TEST_SLOW_CLEANUP_DELAY;
190module_param(gp_cleanup_delay, int, 0644);
191#else /* #ifdef CONFIG_RCU_TORTURE_TEST_SLOW_CLEANUP */
192static const int gp_cleanup_delay;
193#endif /* #else #ifdef CONFIG_RCU_TORTURE_TEST_SLOW_CLEANUP */
194
eab128e8
PM
195/*
196 * Number of grace periods between delays, normalized by the duration of
197 * the delay. The longer the the delay, the more the grace periods between
198 * each delay. The reason for this normalization is that it means that,
199 * for non-zero delays, the overall slowdown of grace periods is constant
200 * regardless of the duration of the delay. This arrangement balances
201 * the need for long delays to increase some race probabilities with the
202 * need for fast grace periods to increase other race probabilities.
203 */
204#define PER_RCU_NODE_PERIOD 3 /* Number of grace periods between delays. */
37745d28 205
4a298656
PM
206/*
207 * Track the rcutorture test sequence number and the update version
208 * number within a given test. The rcutorture_testseq is incremented
209 * on every rcutorture module load and unload, so has an odd value
210 * when a test is running. The rcutorture_vernum is set to zero
211 * when rcutorture starts and is incremented on each rcutorture update.
212 * These variables enable correlating rcutorture output with the
213 * RCU tracing information.
214 */
215unsigned long rcutorture_testseq;
216unsigned long rcutorture_vernum;
217
0aa04b05
PM
218/*
219 * Compute the mask of online CPUs for the specified rcu_node structure.
220 * This will not be stable unless the rcu_node structure's ->lock is
221 * held, but the bit corresponding to the current CPU will be stable
222 * in most contexts.
223 */
224unsigned long rcu_rnp_online_cpus(struct rcu_node *rnp)
225{
7d0ae808 226 return READ_ONCE(rnp->qsmaskinitnext);
0aa04b05
PM
227}
228
fc2219d4 229/*
7d0ae808 230 * Return true if an RCU grace period is in progress. The READ_ONCE()s
fc2219d4
PM
231 * permit this function to be invoked without holding the root rcu_node
232 * structure's ->lock, but of course results can be subject to change.
233 */
234static int rcu_gp_in_progress(struct rcu_state *rsp)
235{
7d0ae808 236 return READ_ONCE(rsp->completed) != READ_ONCE(rsp->gpnum);
fc2219d4
PM
237}
238
b1f77b05 239/*
d6714c22 240 * Note a quiescent state. Because we do not need to know
b1f77b05 241 * how many quiescent states passed, just if there was at least
d6714c22 242 * one since the start of the grace period, this just sets a flag.
e4cc1f22 243 * The caller must have disabled preemption.
b1f77b05 244 */
284a8c93 245void rcu_sched_qs(void)
b1f77b05 246{
284a8c93
PM
247 if (!__this_cpu_read(rcu_sched_data.passed_quiesce)) {
248 trace_rcu_grace_period(TPS("rcu_sched"),
249 __this_cpu_read(rcu_sched_data.gpnum),
250 TPS("cpuqs"));
251 __this_cpu_write(rcu_sched_data.passed_quiesce, 1);
252 }
b1f77b05
IM
253}
254
284a8c93 255void rcu_bh_qs(void)
b1f77b05 256{
284a8c93
PM
257 if (!__this_cpu_read(rcu_bh_data.passed_quiesce)) {
258 trace_rcu_grace_period(TPS("rcu_bh"),
259 __this_cpu_read(rcu_bh_data.gpnum),
260 TPS("cpuqs"));
261 __this_cpu_write(rcu_bh_data.passed_quiesce, 1);
262 }
b1f77b05 263}
64db4cff 264
4a81e832
PM
265static DEFINE_PER_CPU(int, rcu_sched_qs_mask);
266
267static DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = {
268 .dynticks_nesting = DYNTICK_TASK_EXIT_IDLE,
269 .dynticks = ATOMIC_INIT(1),
270#ifdef CONFIG_NO_HZ_FULL_SYSIDLE
271 .dynticks_idle_nesting = DYNTICK_TASK_NEST_VALUE,
272 .dynticks_idle = ATOMIC_INIT(1),
273#endif /* #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */
274};
275
5cd37193
PM
276DEFINE_PER_CPU_SHARED_ALIGNED(unsigned long, rcu_qs_ctr);
277EXPORT_PER_CPU_SYMBOL_GPL(rcu_qs_ctr);
278
4a81e832
PM
279/*
280 * Let the RCU core know that this CPU has gone through the scheduler,
281 * which is a quiescent state. This is called when the need for a
282 * quiescent state is urgent, so we burn an atomic operation and full
283 * memory barriers to let the RCU core know about it, regardless of what
284 * this CPU might (or might not) do in the near future.
285 *
286 * We inform the RCU core by emulating a zero-duration dyntick-idle
287 * period, which we in turn do by incrementing the ->dynticks counter
288 * by two.
289 */
290static void rcu_momentary_dyntick_idle(void)
291{
292 unsigned long flags;
293 struct rcu_data *rdp;
294 struct rcu_dynticks *rdtp;
295 int resched_mask;
296 struct rcu_state *rsp;
297
298 local_irq_save(flags);
299
300 /*
301 * Yes, we can lose flag-setting operations. This is OK, because
302 * the flag will be set again after some delay.
303 */
304 resched_mask = raw_cpu_read(rcu_sched_qs_mask);
305 raw_cpu_write(rcu_sched_qs_mask, 0);
306
307 /* Find the flavor that needs a quiescent state. */
308 for_each_rcu_flavor(rsp) {
309 rdp = raw_cpu_ptr(rsp->rda);
310 if (!(resched_mask & rsp->flavor_mask))
311 continue;
312 smp_mb(); /* rcu_sched_qs_mask before cond_resched_completed. */
7d0ae808
PM
313 if (READ_ONCE(rdp->mynode->completed) !=
314 READ_ONCE(rdp->cond_resched_completed))
4a81e832
PM
315 continue;
316
317 /*
318 * Pretend to be momentarily idle for the quiescent state.
319 * This allows the grace-period kthread to record the
320 * quiescent state, with no need for this CPU to do anything
321 * further.
322 */
323 rdtp = this_cpu_ptr(&rcu_dynticks);
324 smp_mb__before_atomic(); /* Earlier stuff before QS. */
325 atomic_add(2, &rdtp->dynticks); /* QS. */
326 smp_mb__after_atomic(); /* Later stuff after QS. */
327 break;
328 }
329 local_irq_restore(flags);
330}
331
25502a6c
PM
332/*
333 * Note a context switch. This is a quiescent state for RCU-sched,
334 * and requires special handling for preemptible RCU.
e4cc1f22 335 * The caller must have disabled preemption.
25502a6c 336 */
38200cf2 337void rcu_note_context_switch(void)
25502a6c 338{
f7f7bac9 339 trace_rcu_utilization(TPS("Start context switch"));
284a8c93 340 rcu_sched_qs();
38200cf2 341 rcu_preempt_note_context_switch();
4a81e832
PM
342 if (unlikely(raw_cpu_read(rcu_sched_qs_mask)))
343 rcu_momentary_dyntick_idle();
f7f7bac9 344 trace_rcu_utilization(TPS("End context switch"));
25502a6c 345}
29ce8310 346EXPORT_SYMBOL_GPL(rcu_note_context_switch);
25502a6c 347
5cd37193 348/*
1925d196 349 * Register a quiescent state for all RCU flavors. If there is an
5cd37193
PM
350 * emergency, invoke rcu_momentary_dyntick_idle() to do a heavy-weight
351 * dyntick-idle quiescent state visible to other CPUs (but only for those
1925d196 352 * RCU flavors in desperate need of a quiescent state, which will normally
5cd37193
PM
353 * be none of them). Either way, do a lightweight quiescent state for
354 * all RCU flavors.
355 */
356void rcu_all_qs(void)
357{
358 if (unlikely(raw_cpu_read(rcu_sched_qs_mask)))
359 rcu_momentary_dyntick_idle();
360 this_cpu_inc(rcu_qs_ctr);
361}
362EXPORT_SYMBOL_GPL(rcu_all_qs);
363
878d7439
ED
364static long blimit = 10; /* Maximum callbacks per rcu_do_batch. */
365static long qhimark = 10000; /* If this many pending, ignore blimit. */
366static long qlowmark = 100; /* Once only this many pending, use blimit. */
64db4cff 367
878d7439
ED
368module_param(blimit, long, 0444);
369module_param(qhimark, long, 0444);
370module_param(qlowmark, long, 0444);
3d76c082 371
026ad283
PM
372static ulong jiffies_till_first_fqs = ULONG_MAX;
373static ulong jiffies_till_next_fqs = ULONG_MAX;
d40011f6
PM
374
375module_param(jiffies_till_first_fqs, ulong, 0644);
376module_param(jiffies_till_next_fqs, ulong, 0644);
377
4a81e832
PM
378/*
379 * How long the grace period must be before we start recruiting
380 * quiescent-state help from rcu_note_context_switch().
381 */
382static ulong jiffies_till_sched_qs = HZ / 20;
383module_param(jiffies_till_sched_qs, ulong, 0644);
384
48a7639c 385static bool rcu_start_gp_advanced(struct rcu_state *rsp, struct rcu_node *rnp,
910ee45d 386 struct rcu_data *rdp);
217af2a2
PM
387static void force_qs_rnp(struct rcu_state *rsp,
388 int (*f)(struct rcu_data *rsp, bool *isidle,
389 unsigned long *maxj),
390 bool *isidle, unsigned long *maxj);
4cdfc175 391static void force_quiescent_state(struct rcu_state *rsp);
e3950ecd 392static int rcu_pending(void);
64db4cff
PM
393
394/*
917963d0 395 * Return the number of RCU batches started thus far for debug & stats.
64db4cff 396 */
917963d0
PM
397unsigned long rcu_batches_started(void)
398{
399 return rcu_state_p->gpnum;
400}
401EXPORT_SYMBOL_GPL(rcu_batches_started);
402
403/*
404 * Return the number of RCU-sched batches started thus far for debug & stats.
64db4cff 405 */
917963d0
PM
406unsigned long rcu_batches_started_sched(void)
407{
408 return rcu_sched_state.gpnum;
409}
410EXPORT_SYMBOL_GPL(rcu_batches_started_sched);
411
412/*
413 * Return the number of RCU BH batches started thus far for debug & stats.
414 */
415unsigned long rcu_batches_started_bh(void)
416{
417 return rcu_bh_state.gpnum;
418}
419EXPORT_SYMBOL_GPL(rcu_batches_started_bh);
420
421/*
422 * Return the number of RCU batches completed thus far for debug & stats.
423 */
424unsigned long rcu_batches_completed(void)
425{
426 return rcu_state_p->completed;
427}
428EXPORT_SYMBOL_GPL(rcu_batches_completed);
429
430/*
431 * Return the number of RCU-sched batches completed thus far for debug & stats.
64db4cff 432 */
9733e4f0 433unsigned long rcu_batches_completed_sched(void)
64db4cff 434{
d6714c22 435 return rcu_sched_state.completed;
64db4cff 436}
d6714c22 437EXPORT_SYMBOL_GPL(rcu_batches_completed_sched);
64db4cff
PM
438
439/*
917963d0 440 * Return the number of RCU BH batches completed thus far for debug & stats.
64db4cff 441 */
9733e4f0 442unsigned long rcu_batches_completed_bh(void)
64db4cff
PM
443{
444 return rcu_bh_state.completed;
445}
446EXPORT_SYMBOL_GPL(rcu_batches_completed_bh);
447
a381d757
ACB
448/*
449 * Force a quiescent state.
450 */
451void rcu_force_quiescent_state(void)
452{
e534165b 453 force_quiescent_state(rcu_state_p);
a381d757
ACB
454}
455EXPORT_SYMBOL_GPL(rcu_force_quiescent_state);
456
bf66f18e
PM
457/*
458 * Force a quiescent state for RCU BH.
459 */
460void rcu_bh_force_quiescent_state(void)
461{
4cdfc175 462 force_quiescent_state(&rcu_bh_state);
bf66f18e
PM
463}
464EXPORT_SYMBOL_GPL(rcu_bh_force_quiescent_state);
465
e7580f33
PM
466/*
467 * Force a quiescent state for RCU-sched.
468 */
469void rcu_sched_force_quiescent_state(void)
470{
471 force_quiescent_state(&rcu_sched_state);
472}
473EXPORT_SYMBOL_GPL(rcu_sched_force_quiescent_state);
474
afea227f
PM
475/*
476 * Show the state of the grace-period kthreads.
477 */
478void show_rcu_gp_kthreads(void)
479{
480 struct rcu_state *rsp;
481
482 for_each_rcu_flavor(rsp) {
483 pr_info("%s: wait state: %d ->state: %#lx\n",
484 rsp->name, rsp->gp_state, rsp->gp_kthread->state);
485 /* sched_show_task(rsp->gp_kthread); */
486 }
487}
488EXPORT_SYMBOL_GPL(show_rcu_gp_kthreads);
489
4a298656
PM
490/*
491 * Record the number of times rcutorture tests have been initiated and
492 * terminated. This information allows the debugfs tracing stats to be
493 * correlated to the rcutorture messages, even when the rcutorture module
494 * is being repeatedly loaded and unloaded. In other words, we cannot
495 * store this state in rcutorture itself.
496 */
497void rcutorture_record_test_transition(void)
498{
499 rcutorture_testseq++;
500 rcutorture_vernum = 0;
501}
502EXPORT_SYMBOL_GPL(rcutorture_record_test_transition);
503
ad0dc7f9
PM
504/*
505 * Send along grace-period-related data for rcutorture diagnostics.
506 */
507void rcutorture_get_gp_data(enum rcutorture_type test_type, int *flags,
508 unsigned long *gpnum, unsigned long *completed)
509{
510 struct rcu_state *rsp = NULL;
511
512 switch (test_type) {
513 case RCU_FLAVOR:
e534165b 514 rsp = rcu_state_p;
ad0dc7f9
PM
515 break;
516 case RCU_BH_FLAVOR:
517 rsp = &rcu_bh_state;
518 break;
519 case RCU_SCHED_FLAVOR:
520 rsp = &rcu_sched_state;
521 break;
522 default:
523 break;
524 }
525 if (rsp != NULL) {
7d0ae808
PM
526 *flags = READ_ONCE(rsp->gp_flags);
527 *gpnum = READ_ONCE(rsp->gpnum);
528 *completed = READ_ONCE(rsp->completed);
ad0dc7f9
PM
529 return;
530 }
531 *flags = 0;
532 *gpnum = 0;
533 *completed = 0;
534}
535EXPORT_SYMBOL_GPL(rcutorture_get_gp_data);
536
4a298656
PM
537/*
538 * Record the number of writer passes through the current rcutorture test.
539 * This is also used to correlate debugfs tracing stats with the rcutorture
540 * messages.
541 */
542void rcutorture_record_progress(unsigned long vernum)
543{
544 rcutorture_vernum++;
545}
546EXPORT_SYMBOL_GPL(rcutorture_record_progress);
547
64db4cff
PM
548/*
549 * Does the CPU have callbacks ready to be invoked?
550 */
551static int
552cpu_has_callbacks_ready_to_invoke(struct rcu_data *rdp)
553{
3fbfbf7a
PM
554 return &rdp->nxtlist != rdp->nxttail[RCU_DONE_TAIL] &&
555 rdp->nxttail[RCU_DONE_TAIL] != NULL;
64db4cff
PM
556}
557
365187fb
PM
558/*
559 * Return the root node of the specified rcu_state structure.
560 */
561static struct rcu_node *rcu_get_root(struct rcu_state *rsp)
562{
563 return &rsp->node[0];
564}
565
566/*
567 * Is there any need for future grace periods?
568 * Interrupts must be disabled. If the caller does not hold the root
569 * rnp_node structure's ->lock, the results are advisory only.
570 */
571static int rcu_future_needs_gp(struct rcu_state *rsp)
572{
573 struct rcu_node *rnp = rcu_get_root(rsp);
7d0ae808 574 int idx = (READ_ONCE(rnp->completed) + 1) & 0x1;
365187fb
PM
575 int *fp = &rnp->need_future_gp[idx];
576
7d0ae808 577 return READ_ONCE(*fp);
365187fb
PM
578}
579
64db4cff 580/*
dc35c893
PM
581 * Does the current CPU require a not-yet-started grace period?
582 * The caller must have disabled interrupts to prevent races with
583 * normal callback registry.
64db4cff
PM
584 */
585static int
586cpu_needs_another_gp(struct rcu_state *rsp, struct rcu_data *rdp)
587{
dc35c893 588 int i;
3fbfbf7a 589
dc35c893
PM
590 if (rcu_gp_in_progress(rsp))
591 return 0; /* No, a grace period is already in progress. */
365187fb 592 if (rcu_future_needs_gp(rsp))
34ed6246 593 return 1; /* Yes, a no-CBs CPU needs one. */
dc35c893
PM
594 if (!rdp->nxttail[RCU_NEXT_TAIL])
595 return 0; /* No, this is a no-CBs (or offline) CPU. */
596 if (*rdp->nxttail[RCU_NEXT_READY_TAIL])
597 return 1; /* Yes, this CPU has newly registered callbacks. */
598 for (i = RCU_WAIT_TAIL; i < RCU_NEXT_TAIL; i++)
599 if (rdp->nxttail[i - 1] != rdp->nxttail[i] &&
7d0ae808 600 ULONG_CMP_LT(READ_ONCE(rsp->completed),
dc35c893
PM
601 rdp->nxtcompleted[i]))
602 return 1; /* Yes, CBs for future grace period. */
603 return 0; /* No grace period needed. */
64db4cff
PM
604}
605
9b2e4f18 606/*
adf5091e 607 * rcu_eqs_enter_common - current CPU is moving towards extended quiescent state
9b2e4f18
PM
608 *
609 * If the new value of the ->dynticks_nesting counter now is zero,
610 * we really have entered idle, and must do the appropriate accounting.
611 * The caller must have disabled interrupts.
612 */
28ced795 613static void rcu_eqs_enter_common(long long oldval, bool user)
9b2e4f18 614{
96d3fd0d
PM
615 struct rcu_state *rsp;
616 struct rcu_data *rdp;
28ced795 617 struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
96d3fd0d 618
f7f7bac9 619 trace_rcu_dyntick(TPS("Start"), oldval, rdtp->dynticks_nesting);
1ce46ee5
PM
620 if (IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
621 !user && !is_idle_task(current)) {
289828e6
PM
622 struct task_struct *idle __maybe_unused =
623 idle_task(smp_processor_id());
0989cb46 624
f7f7bac9 625 trace_rcu_dyntick(TPS("Error on entry: not idle task"), oldval, 0);
bf1304e9 626 ftrace_dump(DUMP_ORIG);
0989cb46
PM
627 WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s",
628 current->pid, current->comm,
629 idle->pid, idle->comm); /* must be idle task! */
9b2e4f18 630 }
96d3fd0d
PM
631 for_each_rcu_flavor(rsp) {
632 rdp = this_cpu_ptr(rsp->rda);
633 do_nocb_deferred_wakeup(rdp);
634 }
198bbf81 635 rcu_prepare_for_idle();
9b2e4f18 636 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
4e857c58 637 smp_mb__before_atomic(); /* See above. */
9b2e4f18 638 atomic_inc(&rdtp->dynticks);
4e857c58 639 smp_mb__after_atomic(); /* Force ordering with next sojourn. */
1ce46ee5
PM
640 WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
641 atomic_read(&rdtp->dynticks) & 0x1);
176f8f7a 642 rcu_dynticks_task_enter();
c44e2cdd
PM
643
644 /*
adf5091e 645 * It is illegal to enter an extended quiescent state while
c44e2cdd
PM
646 * in an RCU read-side critical section.
647 */
648 rcu_lockdep_assert(!lock_is_held(&rcu_lock_map),
649 "Illegal idle entry in RCU read-side critical section.");
650 rcu_lockdep_assert(!lock_is_held(&rcu_bh_lock_map),
651 "Illegal idle entry in RCU-bh read-side critical section.");
652 rcu_lockdep_assert(!lock_is_held(&rcu_sched_lock_map),
653 "Illegal idle entry in RCU-sched read-side critical section.");
9b2e4f18 654}
64db4cff 655
adf5091e
FW
656/*
657 * Enter an RCU extended quiescent state, which can be either the
658 * idle loop or adaptive-tickless usermode execution.
64db4cff 659 */
adf5091e 660static void rcu_eqs_enter(bool user)
64db4cff 661{
4145fa7f 662 long long oldval;
64db4cff
PM
663 struct rcu_dynticks *rdtp;
664
c9d4b0af 665 rdtp = this_cpu_ptr(&rcu_dynticks);
4145fa7f 666 oldval = rdtp->dynticks_nesting;
1ce46ee5
PM
667 WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
668 (oldval & DYNTICK_TASK_NEST_MASK) == 0);
3a592405 669 if ((oldval & DYNTICK_TASK_NEST_MASK) == DYNTICK_TASK_NEST_VALUE) {
29e37d81 670 rdtp->dynticks_nesting = 0;
28ced795 671 rcu_eqs_enter_common(oldval, user);
3a592405 672 } else {
29e37d81 673 rdtp->dynticks_nesting -= DYNTICK_TASK_NEST_VALUE;
3a592405 674 }
64db4cff 675}
adf5091e
FW
676
677/**
678 * rcu_idle_enter - inform RCU that current CPU is entering idle
679 *
680 * Enter idle mode, in other words, -leave- the mode in which RCU
681 * read-side critical sections can occur. (Though RCU read-side
682 * critical sections can occur in irq handlers in idle, a possibility
683 * handled by irq_enter() and irq_exit().)
684 *
685 * We crowbar the ->dynticks_nesting field to zero to allow for
686 * the possibility of usermode upcalls having messed up our count
687 * of interrupt nesting level during the prior busy period.
688 */
689void rcu_idle_enter(void)
690{
c5d900bf
FW
691 unsigned long flags;
692
693 local_irq_save(flags);
cb349ca9 694 rcu_eqs_enter(false);
28ced795 695 rcu_sysidle_enter(0);
c5d900bf 696 local_irq_restore(flags);
adf5091e 697}
8a2ecf47 698EXPORT_SYMBOL_GPL(rcu_idle_enter);
64db4cff 699
2b1d5024 700#ifdef CONFIG_RCU_USER_QS
adf5091e
FW
701/**
702 * rcu_user_enter - inform RCU that we are resuming userspace.
703 *
704 * Enter RCU idle mode right before resuming userspace. No use of RCU
705 * is permitted between this call and rcu_user_exit(). This way the
706 * CPU doesn't need to maintain the tick for RCU maintenance purposes
707 * when the CPU runs in userspace.
708 */
709void rcu_user_enter(void)
710{
91d1aa43 711 rcu_eqs_enter(1);
adf5091e 712}
2b1d5024 713#endif /* CONFIG_RCU_USER_QS */
19dd1591 714
9b2e4f18
PM
715/**
716 * rcu_irq_exit - inform RCU that current CPU is exiting irq towards idle
717 *
718 * Exit from an interrupt handler, which might possibly result in entering
719 * idle mode, in other words, leaving the mode in which read-side critical
720 * sections can occur.
64db4cff 721 *
9b2e4f18
PM
722 * This code assumes that the idle loop never does anything that might
723 * result in unbalanced calls to irq_enter() and irq_exit(). If your
724 * architecture violates this assumption, RCU will give you what you
725 * deserve, good and hard. But very infrequently and irreproducibly.
726 *
727 * Use things like work queues to work around this limitation.
728 *
729 * You have been warned.
64db4cff 730 */
9b2e4f18 731void rcu_irq_exit(void)
64db4cff
PM
732{
733 unsigned long flags;
4145fa7f 734 long long oldval;
64db4cff
PM
735 struct rcu_dynticks *rdtp;
736
737 local_irq_save(flags);
c9d4b0af 738 rdtp = this_cpu_ptr(&rcu_dynticks);
4145fa7f 739 oldval = rdtp->dynticks_nesting;
9b2e4f18 740 rdtp->dynticks_nesting--;
1ce46ee5
PM
741 WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
742 rdtp->dynticks_nesting < 0);
b6fc6020 743 if (rdtp->dynticks_nesting)
f7f7bac9 744 trace_rcu_dyntick(TPS("--="), oldval, rdtp->dynticks_nesting);
b6fc6020 745 else
28ced795
CL
746 rcu_eqs_enter_common(oldval, true);
747 rcu_sysidle_enter(1);
9b2e4f18
PM
748 local_irq_restore(flags);
749}
750
751/*
adf5091e 752 * rcu_eqs_exit_common - current CPU moving away from extended quiescent state
9b2e4f18
PM
753 *
754 * If the new value of the ->dynticks_nesting counter was previously zero,
755 * we really have exited idle, and must do the appropriate accounting.
756 * The caller must have disabled interrupts.
757 */
28ced795 758static void rcu_eqs_exit_common(long long oldval, int user)
9b2e4f18 759{
28ced795
CL
760 struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
761
176f8f7a 762 rcu_dynticks_task_exit();
4e857c58 763 smp_mb__before_atomic(); /* Force ordering w/previous sojourn. */
23b5c8fa
PM
764 atomic_inc(&rdtp->dynticks);
765 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
4e857c58 766 smp_mb__after_atomic(); /* See above. */
1ce46ee5
PM
767 WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
768 !(atomic_read(&rdtp->dynticks) & 0x1));
8fa7845d 769 rcu_cleanup_after_idle();
f7f7bac9 770 trace_rcu_dyntick(TPS("End"), oldval, rdtp->dynticks_nesting);
1ce46ee5
PM
771 if (IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
772 !user && !is_idle_task(current)) {
289828e6
PM
773 struct task_struct *idle __maybe_unused =
774 idle_task(smp_processor_id());
0989cb46 775
f7f7bac9 776 trace_rcu_dyntick(TPS("Error on exit: not idle task"),
4145fa7f 777 oldval, rdtp->dynticks_nesting);
bf1304e9 778 ftrace_dump(DUMP_ORIG);
0989cb46
PM
779 WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s",
780 current->pid, current->comm,
781 idle->pid, idle->comm); /* must be idle task! */
9b2e4f18
PM
782 }
783}
784
adf5091e
FW
785/*
786 * Exit an RCU extended quiescent state, which can be either the
787 * idle loop or adaptive-tickless usermode execution.
9b2e4f18 788 */
adf5091e 789static void rcu_eqs_exit(bool user)
9b2e4f18 790{
9b2e4f18
PM
791 struct rcu_dynticks *rdtp;
792 long long oldval;
793
c9d4b0af 794 rdtp = this_cpu_ptr(&rcu_dynticks);
9b2e4f18 795 oldval = rdtp->dynticks_nesting;
1ce46ee5 796 WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && oldval < 0);
3a592405 797 if (oldval & DYNTICK_TASK_NEST_MASK) {
29e37d81 798 rdtp->dynticks_nesting += DYNTICK_TASK_NEST_VALUE;
3a592405 799 } else {
29e37d81 800 rdtp->dynticks_nesting = DYNTICK_TASK_EXIT_IDLE;
28ced795 801 rcu_eqs_exit_common(oldval, user);
3a592405 802 }
9b2e4f18 803}
adf5091e
FW
804
805/**
806 * rcu_idle_exit - inform RCU that current CPU is leaving idle
807 *
808 * Exit idle mode, in other words, -enter- the mode in which RCU
809 * read-side critical sections can occur.
810 *
811 * We crowbar the ->dynticks_nesting field to DYNTICK_TASK_NEST to
812 * allow for the possibility of usermode upcalls messing up our count
813 * of interrupt nesting level during the busy period that is just
814 * now starting.
815 */
816void rcu_idle_exit(void)
817{
c5d900bf
FW
818 unsigned long flags;
819
820 local_irq_save(flags);
cb349ca9 821 rcu_eqs_exit(false);
28ced795 822 rcu_sysidle_exit(0);
c5d900bf 823 local_irq_restore(flags);
adf5091e 824}
8a2ecf47 825EXPORT_SYMBOL_GPL(rcu_idle_exit);
9b2e4f18 826
2b1d5024 827#ifdef CONFIG_RCU_USER_QS
adf5091e
FW
828/**
829 * rcu_user_exit - inform RCU that we are exiting userspace.
830 *
831 * Exit RCU idle mode while entering the kernel because it can
832 * run a RCU read side critical section anytime.
833 */
834void rcu_user_exit(void)
835{
91d1aa43 836 rcu_eqs_exit(1);
adf5091e 837}
2b1d5024 838#endif /* CONFIG_RCU_USER_QS */
19dd1591 839
9b2e4f18
PM
840/**
841 * rcu_irq_enter - inform RCU that current CPU is entering irq away from idle
842 *
843 * Enter an interrupt handler, which might possibly result in exiting
844 * idle mode, in other words, entering the mode in which read-side critical
845 * sections can occur.
846 *
847 * Note that the Linux kernel is fully capable of entering an interrupt
848 * handler that it never exits, for example when doing upcalls to
849 * user mode! This code assumes that the idle loop never does upcalls to
850 * user mode. If your architecture does do upcalls from the idle loop (or
851 * does anything else that results in unbalanced calls to the irq_enter()
852 * and irq_exit() functions), RCU will give you what you deserve, good
853 * and hard. But very infrequently and irreproducibly.
854 *
855 * Use things like work queues to work around this limitation.
856 *
857 * You have been warned.
858 */
859void rcu_irq_enter(void)
860{
861 unsigned long flags;
862 struct rcu_dynticks *rdtp;
863 long long oldval;
864
865 local_irq_save(flags);
c9d4b0af 866 rdtp = this_cpu_ptr(&rcu_dynticks);
9b2e4f18
PM
867 oldval = rdtp->dynticks_nesting;
868 rdtp->dynticks_nesting++;
1ce46ee5
PM
869 WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
870 rdtp->dynticks_nesting == 0);
b6fc6020 871 if (oldval)
f7f7bac9 872 trace_rcu_dyntick(TPS("++="), oldval, rdtp->dynticks_nesting);
b6fc6020 873 else
28ced795
CL
874 rcu_eqs_exit_common(oldval, true);
875 rcu_sysidle_exit(1);
64db4cff 876 local_irq_restore(flags);
64db4cff
PM
877}
878
879/**
880 * rcu_nmi_enter - inform RCU of entry to NMI context
881 *
734d1680
PM
882 * If the CPU was idle from RCU's viewpoint, update rdtp->dynticks and
883 * rdtp->dynticks_nmi_nesting to let the RCU grace-period handling know
884 * that the CPU is active. This implementation permits nested NMIs, as
885 * long as the nesting level does not overflow an int. (You will probably
886 * run out of stack space first.)
64db4cff
PM
887 */
888void rcu_nmi_enter(void)
889{
c9d4b0af 890 struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
734d1680 891 int incby = 2;
64db4cff 892
734d1680
PM
893 /* Complain about underflow. */
894 WARN_ON_ONCE(rdtp->dynticks_nmi_nesting < 0);
895
896 /*
897 * If idle from RCU viewpoint, atomically increment ->dynticks
898 * to mark non-idle and increment ->dynticks_nmi_nesting by one.
899 * Otherwise, increment ->dynticks_nmi_nesting by two. This means
900 * if ->dynticks_nmi_nesting is equal to one, we are guaranteed
901 * to be in the outermost NMI handler that interrupted an RCU-idle
902 * period (observation due to Andy Lutomirski).
903 */
904 if (!(atomic_read(&rdtp->dynticks) & 0x1)) {
905 smp_mb__before_atomic(); /* Force delay from prior write. */
906 atomic_inc(&rdtp->dynticks);
907 /* atomic_inc() before later RCU read-side crit sects */
908 smp_mb__after_atomic(); /* See above. */
909 WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
910 incby = 1;
911 }
912 rdtp->dynticks_nmi_nesting += incby;
913 barrier();
64db4cff
PM
914}
915
916/**
917 * rcu_nmi_exit - inform RCU of exit from NMI context
918 *
734d1680
PM
919 * If we are returning from the outermost NMI handler that interrupted an
920 * RCU-idle period, update rdtp->dynticks and rdtp->dynticks_nmi_nesting
921 * to let the RCU grace-period handling know that the CPU is back to
922 * being RCU-idle.
64db4cff
PM
923 */
924void rcu_nmi_exit(void)
925{
c9d4b0af 926 struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
64db4cff 927
734d1680
PM
928 /*
929 * Check for ->dynticks_nmi_nesting underflow and bad ->dynticks.
930 * (We are exiting an NMI handler, so RCU better be paying attention
931 * to us!)
932 */
933 WARN_ON_ONCE(rdtp->dynticks_nmi_nesting <= 0);
934 WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
935
936 /*
937 * If the nesting level is not 1, the CPU wasn't RCU-idle, so
938 * leave it in non-RCU-idle state.
939 */
940 if (rdtp->dynticks_nmi_nesting != 1) {
941 rdtp->dynticks_nmi_nesting -= 2;
64db4cff 942 return;
734d1680
PM
943 }
944
945 /* This NMI interrupted an RCU-idle CPU, restore RCU-idleness. */
946 rdtp->dynticks_nmi_nesting = 0;
23b5c8fa 947 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
4e857c58 948 smp_mb__before_atomic(); /* See above. */
23b5c8fa 949 atomic_inc(&rdtp->dynticks);
4e857c58 950 smp_mb__after_atomic(); /* Force delay to next write. */
23b5c8fa 951 WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
64db4cff
PM
952}
953
954/**
5c173eb8
PM
955 * __rcu_is_watching - are RCU read-side critical sections safe?
956 *
957 * Return true if RCU is watching the running CPU, which means that
958 * this CPU can safely enter RCU read-side critical sections. Unlike
959 * rcu_is_watching(), the caller of __rcu_is_watching() must have at
960 * least disabled preemption.
961 */
9418fb20 962bool notrace __rcu_is_watching(void)
5c173eb8
PM
963{
964 return atomic_read(this_cpu_ptr(&rcu_dynticks.dynticks)) & 0x1;
965}
966
967/**
968 * rcu_is_watching - see if RCU thinks that the current CPU is idle
64db4cff 969 *
9b2e4f18 970 * If the current CPU is in its idle loop and is neither in an interrupt
34240697 971 * or NMI handler, return true.
64db4cff 972 */
9418fb20 973bool notrace rcu_is_watching(void)
64db4cff 974{
f534ed1f 975 bool ret;
34240697
PM
976
977 preempt_disable();
5c173eb8 978 ret = __rcu_is_watching();
34240697
PM
979 preempt_enable();
980 return ret;
64db4cff 981}
5c173eb8 982EXPORT_SYMBOL_GPL(rcu_is_watching);
64db4cff 983
62fde6ed 984#if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU)
c0d6d01b
PM
985
986/*
987 * Is the current CPU online? Disable preemption to avoid false positives
988 * that could otherwise happen due to the current CPU number being sampled,
989 * this task being preempted, its old CPU being taken offline, resuming
990 * on some other CPU, then determining that its old CPU is now offline.
991 * It is OK to use RCU on an offline processor during initial boot, hence
2036d94a
PM
992 * the check for rcu_scheduler_fully_active. Note also that it is OK
993 * for a CPU coming online to use RCU for one jiffy prior to marking itself
994 * online in the cpu_online_mask. Similarly, it is OK for a CPU going
995 * offline to continue to use RCU for one jiffy after marking itself
996 * offline in the cpu_online_mask. This leniency is necessary given the
997 * non-atomic nature of the online and offline processing, for example,
998 * the fact that a CPU enters the scheduler after completing the CPU_DYING
999 * notifiers.
1000 *
1001 * This is also why RCU internally marks CPUs online during the
1002 * CPU_UP_PREPARE phase and offline during the CPU_DEAD phase.
c0d6d01b
PM
1003 *
1004 * Disable checking if in an NMI handler because we cannot safely report
1005 * errors from NMI handlers anyway.
1006 */
1007bool rcu_lockdep_current_cpu_online(void)
1008{
2036d94a
PM
1009 struct rcu_data *rdp;
1010 struct rcu_node *rnp;
c0d6d01b
PM
1011 bool ret;
1012
1013 if (in_nmi())
f6f7ee9a 1014 return true;
c0d6d01b 1015 preempt_disable();
c9d4b0af 1016 rdp = this_cpu_ptr(&rcu_sched_data);
2036d94a 1017 rnp = rdp->mynode;
0aa04b05 1018 ret = (rdp->grpmask & rcu_rnp_online_cpus(rnp)) ||
c0d6d01b
PM
1019 !rcu_scheduler_fully_active;
1020 preempt_enable();
1021 return ret;
1022}
1023EXPORT_SYMBOL_GPL(rcu_lockdep_current_cpu_online);
1024
62fde6ed 1025#endif /* #if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU) */
9b2e4f18 1026
64db4cff 1027/**
9b2e4f18 1028 * rcu_is_cpu_rrupt_from_idle - see if idle or immediately interrupted from idle
64db4cff 1029 *
9b2e4f18
PM
1030 * If the current CPU is idle or running at a first-level (not nested)
1031 * interrupt from idle, return true. The caller must have at least
1032 * disabled preemption.
64db4cff 1033 */
62e3cb14 1034static int rcu_is_cpu_rrupt_from_idle(void)
64db4cff 1035{
c9d4b0af 1036 return __this_cpu_read(rcu_dynticks.dynticks_nesting) <= 1;
64db4cff
PM
1037}
1038
64db4cff
PM
1039/*
1040 * Snapshot the specified CPU's dynticks counter so that we can later
1041 * credit them with an implicit quiescent state. Return 1 if this CPU
1eba8f84 1042 * is in dynticks idle mode, which is an extended quiescent state.
64db4cff 1043 */
217af2a2
PM
1044static int dyntick_save_progress_counter(struct rcu_data *rdp,
1045 bool *isidle, unsigned long *maxj)
64db4cff 1046{
23b5c8fa 1047 rdp->dynticks_snap = atomic_add_return(0, &rdp->dynticks->dynticks);
0edd1b17 1048 rcu_sysidle_check_cpu(rdp, isidle, maxj);
7941dbde
ACB
1049 if ((rdp->dynticks_snap & 0x1) == 0) {
1050 trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("dti"));
1051 return 1;
1052 } else {
7d0ae808 1053 if (ULONG_CMP_LT(READ_ONCE(rdp->gpnum) + ULONG_MAX / 4,
e3663b10 1054 rdp->mynode->gpnum))
7d0ae808 1055 WRITE_ONCE(rdp->gpwrap, true);
7941dbde
ACB
1056 return 0;
1057 }
64db4cff
PM
1058}
1059
1060/*
1061 * Return true if the specified CPU has passed through a quiescent
1062 * state by virtue of being in or having passed through an dynticks
1063 * idle state since the last call to dyntick_save_progress_counter()
a82dcc76 1064 * for this same CPU, or by virtue of having been offline.
64db4cff 1065 */
217af2a2
PM
1066static int rcu_implicit_dynticks_qs(struct rcu_data *rdp,
1067 bool *isidle, unsigned long *maxj)
64db4cff 1068{
7eb4f455 1069 unsigned int curr;
4a81e832 1070 int *rcrmp;
7eb4f455 1071 unsigned int snap;
64db4cff 1072
7eb4f455
PM
1073 curr = (unsigned int)atomic_add_return(0, &rdp->dynticks->dynticks);
1074 snap = (unsigned int)rdp->dynticks_snap;
64db4cff
PM
1075
1076 /*
1077 * If the CPU passed through or entered a dynticks idle phase with
1078 * no active irq/NMI handlers, then we can safely pretend that the CPU
1079 * already acknowledged the request to pass through a quiescent
1080 * state. Either way, that CPU cannot possibly be in an RCU
1081 * read-side critical section that started before the beginning
1082 * of the current RCU grace period.
1083 */
7eb4f455 1084 if ((curr & 0x1) == 0 || UINT_CMP_GE(curr, snap + 2)) {
f7f7bac9 1085 trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("dti"));
64db4cff
PM
1086 rdp->dynticks_fqs++;
1087 return 1;
1088 }
1089
a82dcc76
PM
1090 /*
1091 * Check for the CPU being offline, but only if the grace period
1092 * is old enough. We don't need to worry about the CPU changing
1093 * state: If we see it offline even once, it has been through a
1094 * quiescent state.
1095 *
1096 * The reason for insisting that the grace period be at least
1097 * one jiffy old is that CPUs that are not quite online and that
1098 * have just gone offline can still execute RCU read-side critical
1099 * sections.
1100 */
1101 if (ULONG_CMP_GE(rdp->rsp->gp_start + 2, jiffies))
1102 return 0; /* Grace period is not old enough. */
1103 barrier();
1104 if (cpu_is_offline(rdp->cpu)) {
f7f7bac9 1105 trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("ofl"));
a82dcc76
PM
1106 rdp->offline_fqs++;
1107 return 1;
1108 }
65d798f0
PM
1109
1110 /*
4a81e832
PM
1111 * A CPU running for an extended time within the kernel can
1112 * delay RCU grace periods. When the CPU is in NO_HZ_FULL mode,
1113 * even context-switching back and forth between a pair of
1114 * in-kernel CPU-bound tasks cannot advance grace periods.
1115 * So if the grace period is old enough, make the CPU pay attention.
1116 * Note that the unsynchronized assignments to the per-CPU
1117 * rcu_sched_qs_mask variable are safe. Yes, setting of
1118 * bits can be lost, but they will be set again on the next
1119 * force-quiescent-state pass. So lost bit sets do not result
1120 * in incorrect behavior, merely in a grace period lasting
1121 * a few jiffies longer than it might otherwise. Because
1122 * there are at most four threads involved, and because the
1123 * updates are only once every few jiffies, the probability of
1124 * lossage (and thus of slight grace-period extension) is
1125 * quite low.
1126 *
1127 * Note that if the jiffies_till_sched_qs boot/sysfs parameter
1128 * is set too high, we override with half of the RCU CPU stall
1129 * warning delay.
6193c76a 1130 */
4a81e832
PM
1131 rcrmp = &per_cpu(rcu_sched_qs_mask, rdp->cpu);
1132 if (ULONG_CMP_GE(jiffies,
1133 rdp->rsp->gp_start + jiffies_till_sched_qs) ||
cb1e78cf 1134 ULONG_CMP_GE(jiffies, rdp->rsp->jiffies_resched)) {
7d0ae808
PM
1135 if (!(READ_ONCE(*rcrmp) & rdp->rsp->flavor_mask)) {
1136 WRITE_ONCE(rdp->cond_resched_completed,
1137 READ_ONCE(rdp->mynode->completed));
4a81e832 1138 smp_mb(); /* ->cond_resched_completed before *rcrmp. */
7d0ae808
PM
1139 WRITE_ONCE(*rcrmp,
1140 READ_ONCE(*rcrmp) + rdp->rsp->flavor_mask);
4a81e832
PM
1141 resched_cpu(rdp->cpu); /* Force CPU into scheduler. */
1142 rdp->rsp->jiffies_resched += 5; /* Enable beating. */
1143 } else if (ULONG_CMP_GE(jiffies, rdp->rsp->jiffies_resched)) {
1144 /* Time to beat on that CPU again! */
1145 resched_cpu(rdp->cpu); /* Force CPU into scheduler. */
1146 rdp->rsp->jiffies_resched += 5; /* Re-enable beating. */
1147 }
6193c76a
PM
1148 }
1149
a82dcc76 1150 return 0;
64db4cff
PM
1151}
1152
64db4cff
PM
1153static void record_gp_stall_check_time(struct rcu_state *rsp)
1154{
cb1e78cf 1155 unsigned long j = jiffies;
6193c76a 1156 unsigned long j1;
26cdfedf
PM
1157
1158 rsp->gp_start = j;
1159 smp_wmb(); /* Record start time before stall time. */
6193c76a 1160 j1 = rcu_jiffies_till_stall_check();
7d0ae808 1161 WRITE_ONCE(rsp->jiffies_stall, j + j1);
6193c76a 1162 rsp->jiffies_resched = j + j1 / 2;
7d0ae808 1163 rsp->n_force_qs_gpstart = READ_ONCE(rsp->n_force_qs);
64db4cff
PM
1164}
1165
fb81a44b
PM
1166/*
1167 * Complain about starvation of grace-period kthread.
1168 */
1169static void rcu_check_gp_kthread_starvation(struct rcu_state *rsp)
1170{
1171 unsigned long gpa;
1172 unsigned long j;
1173
1174 j = jiffies;
7d0ae808 1175 gpa = READ_ONCE(rsp->gp_activity);
fb81a44b 1176 if (j - gpa > 2 * HZ)
319362c9 1177 pr_err("%s kthread starved for %ld jiffies! g%lu c%lu f%#x s%d ->state=%#lx\n",
81e701e4 1178 rsp->name, j - gpa,
319362c9
PM
1179 rsp->gpnum, rsp->completed,
1180 rsp->gp_flags, rsp->gp_state,
1181 rsp->gp_kthread ? rsp->gp_kthread->state : 0);
64db4cff
PM
1182}
1183
b637a328 1184/*
bc1dce51 1185 * Dump stacks of all tasks running on stalled CPUs.
b637a328
PM
1186 */
1187static void rcu_dump_cpu_stacks(struct rcu_state *rsp)
1188{
1189 int cpu;
1190 unsigned long flags;
1191 struct rcu_node *rnp;
1192
1193 rcu_for_each_leaf_node(rsp, rnp) {
1194 raw_spin_lock_irqsave(&rnp->lock, flags);
1195 if (rnp->qsmask != 0) {
1196 for (cpu = 0; cpu <= rnp->grphi - rnp->grplo; cpu++)
1197 if (rnp->qsmask & (1UL << cpu))
1198 dump_cpu_task(rnp->grplo + cpu);
1199 }
1200 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1201 }
1202}
1203
6ccd2ecd 1204static void print_other_cpu_stall(struct rcu_state *rsp, unsigned long gpnum)
64db4cff
PM
1205{
1206 int cpu;
1207 long delta;
1208 unsigned long flags;
6ccd2ecd
PM
1209 unsigned long gpa;
1210 unsigned long j;
285fe294 1211 int ndetected = 0;
64db4cff 1212 struct rcu_node *rnp = rcu_get_root(rsp);
53bb857c 1213 long totqlen = 0;
64db4cff
PM
1214
1215 /* Only let one CPU complain about others per time interval. */
1216
1304afb2 1217 raw_spin_lock_irqsave(&rnp->lock, flags);
7d0ae808 1218 delta = jiffies - READ_ONCE(rsp->jiffies_stall);
fc2219d4 1219 if (delta < RCU_STALL_RAT_DELAY || !rcu_gp_in_progress(rsp)) {
1304afb2 1220 raw_spin_unlock_irqrestore(&rnp->lock, flags);
64db4cff
PM
1221 return;
1222 }
7d0ae808
PM
1223 WRITE_ONCE(rsp->jiffies_stall,
1224 jiffies + 3 * rcu_jiffies_till_stall_check() + 3);
1304afb2 1225 raw_spin_unlock_irqrestore(&rnp->lock, flags);
64db4cff 1226
8cdd32a9
PM
1227 /*
1228 * OK, time to rat on our buddy...
1229 * See Documentation/RCU/stallwarn.txt for info on how to debug
1230 * RCU CPU stall warnings.
1231 */
d7f3e207 1232 pr_err("INFO: %s detected stalls on CPUs/tasks:",
4300aa64 1233 rsp->name);
a858af28 1234 print_cpu_stall_info_begin();
a0b6c9a7 1235 rcu_for_each_leaf_node(rsp, rnp) {
3acd9eb3 1236 raw_spin_lock_irqsave(&rnp->lock, flags);
9bc8b558 1237 ndetected += rcu_print_task_stall(rnp);
c8020a67
PM
1238 if (rnp->qsmask != 0) {
1239 for (cpu = 0; cpu <= rnp->grphi - rnp->grplo; cpu++)
1240 if (rnp->qsmask & (1UL << cpu)) {
1241 print_cpu_stall_info(rsp,
1242 rnp->grplo + cpu);
1243 ndetected++;
1244 }
1245 }
3acd9eb3 1246 raw_spin_unlock_irqrestore(&rnp->lock, flags);
64db4cff 1247 }
a858af28 1248
a858af28 1249 print_cpu_stall_info_end();
53bb857c
PM
1250 for_each_possible_cpu(cpu)
1251 totqlen += per_cpu_ptr(rsp->rda, cpu)->qlen;
83ebe63e 1252 pr_cont("(detected by %d, t=%ld jiffies, g=%ld, c=%ld, q=%lu)\n",
eee05882 1253 smp_processor_id(), (long)(jiffies - rsp->gp_start),
83ebe63e 1254 (long)rsp->gpnum, (long)rsp->completed, totqlen);
6ccd2ecd 1255 if (ndetected) {
b637a328 1256 rcu_dump_cpu_stacks(rsp);
6ccd2ecd 1257 } else {
7d0ae808
PM
1258 if (READ_ONCE(rsp->gpnum) != gpnum ||
1259 READ_ONCE(rsp->completed) == gpnum) {
6ccd2ecd
PM
1260 pr_err("INFO: Stall ended before state dump start\n");
1261 } else {
1262 j = jiffies;
7d0ae808 1263 gpa = READ_ONCE(rsp->gp_activity);
237a0f21 1264 pr_err("All QSes seen, last %s kthread activity %ld (%ld-%ld), jiffies_till_next_fqs=%ld, root ->qsmask %#lx\n",
6ccd2ecd 1265 rsp->name, j - gpa, j, gpa,
237a0f21
PM
1266 jiffies_till_next_fqs,
1267 rcu_get_root(rsp)->qsmask);
6ccd2ecd
PM
1268 /* In this case, the current CPU might be at fault. */
1269 sched_show_task(current);
1270 }
1271 }
c1dc0b9c 1272
4cdfc175 1273 /* Complain about tasks blocking the grace period. */
1ed509a2
PM
1274 rcu_print_detail_task_stall(rsp);
1275
fb81a44b
PM
1276 rcu_check_gp_kthread_starvation(rsp);
1277
4cdfc175 1278 force_quiescent_state(rsp); /* Kick them all. */
64db4cff
PM
1279}
1280
1281static void print_cpu_stall(struct rcu_state *rsp)
1282{
53bb857c 1283 int cpu;
64db4cff
PM
1284 unsigned long flags;
1285 struct rcu_node *rnp = rcu_get_root(rsp);
53bb857c 1286 long totqlen = 0;
64db4cff 1287
8cdd32a9
PM
1288 /*
1289 * OK, time to rat on ourselves...
1290 * See Documentation/RCU/stallwarn.txt for info on how to debug
1291 * RCU CPU stall warnings.
1292 */
d7f3e207 1293 pr_err("INFO: %s self-detected stall on CPU", rsp->name);
a858af28
PM
1294 print_cpu_stall_info_begin();
1295 print_cpu_stall_info(rsp, smp_processor_id());
1296 print_cpu_stall_info_end();
53bb857c
PM
1297 for_each_possible_cpu(cpu)
1298 totqlen += per_cpu_ptr(rsp->rda, cpu)->qlen;
83ebe63e
PM
1299 pr_cont(" (t=%lu jiffies g=%ld c=%ld q=%lu)\n",
1300 jiffies - rsp->gp_start,
1301 (long)rsp->gpnum, (long)rsp->completed, totqlen);
fb81a44b
PM
1302
1303 rcu_check_gp_kthread_starvation(rsp);
1304
bc1dce51 1305 rcu_dump_cpu_stacks(rsp);
c1dc0b9c 1306
1304afb2 1307 raw_spin_lock_irqsave(&rnp->lock, flags);
7d0ae808
PM
1308 if (ULONG_CMP_GE(jiffies, READ_ONCE(rsp->jiffies_stall)))
1309 WRITE_ONCE(rsp->jiffies_stall,
1310 jiffies + 3 * rcu_jiffies_till_stall_check() + 3);
1304afb2 1311 raw_spin_unlock_irqrestore(&rnp->lock, flags);
c1dc0b9c 1312
b021fe3e
PZ
1313 /*
1314 * Attempt to revive the RCU machinery by forcing a context switch.
1315 *
1316 * A context switch would normally allow the RCU state machine to make
1317 * progress and it could be we're stuck in kernel space without context
1318 * switches for an entirely unreasonable amount of time.
1319 */
1320 resched_cpu(smp_processor_id());
64db4cff
PM
1321}
1322
1323static void check_cpu_stall(struct rcu_state *rsp, struct rcu_data *rdp)
1324{
26cdfedf
PM
1325 unsigned long completed;
1326 unsigned long gpnum;
1327 unsigned long gps;
bad6e139
PM
1328 unsigned long j;
1329 unsigned long js;
64db4cff
PM
1330 struct rcu_node *rnp;
1331
26cdfedf 1332 if (rcu_cpu_stall_suppress || !rcu_gp_in_progress(rsp))
c68de209 1333 return;
cb1e78cf 1334 j = jiffies;
26cdfedf
PM
1335
1336 /*
1337 * Lots of memory barriers to reject false positives.
1338 *
1339 * The idea is to pick up rsp->gpnum, then rsp->jiffies_stall,
1340 * then rsp->gp_start, and finally rsp->completed. These values
1341 * are updated in the opposite order with memory barriers (or
1342 * equivalent) during grace-period initialization and cleanup.
1343 * Now, a false positive can occur if we get an new value of
1344 * rsp->gp_start and a old value of rsp->jiffies_stall. But given
1345 * the memory barriers, the only way that this can happen is if one
1346 * grace period ends and another starts between these two fetches.
1347 * Detect this by comparing rsp->completed with the previous fetch
1348 * from rsp->gpnum.
1349 *
1350 * Given this check, comparisons of jiffies, rsp->jiffies_stall,
1351 * and rsp->gp_start suffice to forestall false positives.
1352 */
7d0ae808 1353 gpnum = READ_ONCE(rsp->gpnum);
26cdfedf 1354 smp_rmb(); /* Pick up ->gpnum first... */
7d0ae808 1355 js = READ_ONCE(rsp->jiffies_stall);
26cdfedf 1356 smp_rmb(); /* ...then ->jiffies_stall before the rest... */
7d0ae808 1357 gps = READ_ONCE(rsp->gp_start);
26cdfedf 1358 smp_rmb(); /* ...and finally ->gp_start before ->completed. */
7d0ae808 1359 completed = READ_ONCE(rsp->completed);
26cdfedf
PM
1360 if (ULONG_CMP_GE(completed, gpnum) ||
1361 ULONG_CMP_LT(j, js) ||
1362 ULONG_CMP_GE(gps, js))
1363 return; /* No stall or GP completed since entering function. */
64db4cff 1364 rnp = rdp->mynode;
c96ea7cf 1365 if (rcu_gp_in_progress(rsp) &&
7d0ae808 1366 (READ_ONCE(rnp->qsmask) & rdp->grpmask)) {
64db4cff
PM
1367
1368 /* We haven't checked in, so go dump stack. */
1369 print_cpu_stall(rsp);
1370
bad6e139
PM
1371 } else if (rcu_gp_in_progress(rsp) &&
1372 ULONG_CMP_GE(j, js + RCU_STALL_RAT_DELAY)) {
64db4cff 1373
bad6e139 1374 /* They had a few time units to dump stack, so complain. */
6ccd2ecd 1375 print_other_cpu_stall(rsp, gpnum);
64db4cff
PM
1376 }
1377}
1378
53d84e00
PM
1379/**
1380 * rcu_cpu_stall_reset - prevent further stall warnings in current grace period
1381 *
1382 * Set the stall-warning timeout way off into the future, thus preventing
1383 * any RCU CPU stall-warning messages from appearing in the current set of
1384 * RCU grace periods.
1385 *
1386 * The caller must disable hard irqs.
1387 */
1388void rcu_cpu_stall_reset(void)
1389{
6ce75a23
PM
1390 struct rcu_state *rsp;
1391
1392 for_each_rcu_flavor(rsp)
7d0ae808 1393 WRITE_ONCE(rsp->jiffies_stall, jiffies + ULONG_MAX / 2);
53d84e00
PM
1394}
1395
3f5d3ea6 1396/*
d3f3f3f2
PM
1397 * Initialize the specified rcu_data structure's default callback list
1398 * to empty. The default callback list is the one that is not used by
1399 * no-callbacks CPUs.
3f5d3ea6 1400 */
d3f3f3f2 1401static void init_default_callback_list(struct rcu_data *rdp)
3f5d3ea6
PM
1402{
1403 int i;
1404
1405 rdp->nxtlist = NULL;
1406 for (i = 0; i < RCU_NEXT_SIZE; i++)
1407 rdp->nxttail[i] = &rdp->nxtlist;
1408}
1409
d3f3f3f2
PM
1410/*
1411 * Initialize the specified rcu_data structure's callback list to empty.
1412 */
1413static void init_callback_list(struct rcu_data *rdp)
1414{
1415 if (init_nocb_callback_list(rdp))
1416 return;
1417 init_default_callback_list(rdp);
1418}
1419
dc35c893
PM
1420/*
1421 * Determine the value that ->completed will have at the end of the
1422 * next subsequent grace period. This is used to tag callbacks so that
1423 * a CPU can invoke callbacks in a timely fashion even if that CPU has
1424 * been dyntick-idle for an extended period with callbacks under the
1425 * influence of RCU_FAST_NO_HZ.
1426 *
1427 * The caller must hold rnp->lock with interrupts disabled.
1428 */
1429static unsigned long rcu_cbs_completed(struct rcu_state *rsp,
1430 struct rcu_node *rnp)
1431{
1432 /*
1433 * If RCU is idle, we just wait for the next grace period.
1434 * But we can only be sure that RCU is idle if we are looking
1435 * at the root rcu_node structure -- otherwise, a new grace
1436 * period might have started, but just not yet gotten around
1437 * to initializing the current non-root rcu_node structure.
1438 */
1439 if (rcu_get_root(rsp) == rnp && rnp->gpnum == rnp->completed)
1440 return rnp->completed + 1;
1441
1442 /*
1443 * Otherwise, wait for a possible partial grace period and
1444 * then the subsequent full grace period.
1445 */
1446 return rnp->completed + 2;
1447}
1448
0446be48
PM
1449/*
1450 * Trace-event helper function for rcu_start_future_gp() and
1451 * rcu_nocb_wait_gp().
1452 */
1453static void trace_rcu_future_gp(struct rcu_node *rnp, struct rcu_data *rdp,
e66c33d5 1454 unsigned long c, const char *s)
0446be48
PM
1455{
1456 trace_rcu_future_grace_period(rdp->rsp->name, rnp->gpnum,
1457 rnp->completed, c, rnp->level,
1458 rnp->grplo, rnp->grphi, s);
1459}
1460
1461/*
1462 * Start some future grace period, as needed to handle newly arrived
1463 * callbacks. The required future grace periods are recorded in each
48a7639c
PM
1464 * rcu_node structure's ->need_future_gp field. Returns true if there
1465 * is reason to awaken the grace-period kthread.
0446be48
PM
1466 *
1467 * The caller must hold the specified rcu_node structure's ->lock.
1468 */
48a7639c
PM
1469static bool __maybe_unused
1470rcu_start_future_gp(struct rcu_node *rnp, struct rcu_data *rdp,
1471 unsigned long *c_out)
0446be48
PM
1472{
1473 unsigned long c;
1474 int i;
48a7639c 1475 bool ret = false;
0446be48
PM
1476 struct rcu_node *rnp_root = rcu_get_root(rdp->rsp);
1477
1478 /*
1479 * Pick up grace-period number for new callbacks. If this
1480 * grace period is already marked as needed, return to the caller.
1481 */
1482 c = rcu_cbs_completed(rdp->rsp, rnp);
f7f7bac9 1483 trace_rcu_future_gp(rnp, rdp, c, TPS("Startleaf"));
0446be48 1484 if (rnp->need_future_gp[c & 0x1]) {
f7f7bac9 1485 trace_rcu_future_gp(rnp, rdp, c, TPS("Prestartleaf"));
48a7639c 1486 goto out;
0446be48
PM
1487 }
1488
1489 /*
1490 * If either this rcu_node structure or the root rcu_node structure
1491 * believe that a grace period is in progress, then we must wait
1492 * for the one following, which is in "c". Because our request
1493 * will be noticed at the end of the current grace period, we don't
48bd8e9b
PK
1494 * need to explicitly start one. We only do the lockless check
1495 * of rnp_root's fields if the current rcu_node structure thinks
1496 * there is no grace period in flight, and because we hold rnp->lock,
1497 * the only possible change is when rnp_root's two fields are
1498 * equal, in which case rnp_root->gpnum might be concurrently
1499 * incremented. But that is OK, as it will just result in our
1500 * doing some extra useless work.
0446be48
PM
1501 */
1502 if (rnp->gpnum != rnp->completed ||
7d0ae808 1503 READ_ONCE(rnp_root->gpnum) != READ_ONCE(rnp_root->completed)) {
0446be48 1504 rnp->need_future_gp[c & 0x1]++;
f7f7bac9 1505 trace_rcu_future_gp(rnp, rdp, c, TPS("Startedleaf"));
48a7639c 1506 goto out;
0446be48
PM
1507 }
1508
1509 /*
1510 * There might be no grace period in progress. If we don't already
1511 * hold it, acquire the root rcu_node structure's lock in order to
1512 * start one (if needed).
1513 */
6303b9c8 1514 if (rnp != rnp_root) {
0446be48 1515 raw_spin_lock(&rnp_root->lock);
6303b9c8
PM
1516 smp_mb__after_unlock_lock();
1517 }
0446be48
PM
1518
1519 /*
1520 * Get a new grace-period number. If there really is no grace
1521 * period in progress, it will be smaller than the one we obtained
1522 * earlier. Adjust callbacks as needed. Note that even no-CBs
1523 * CPUs have a ->nxtcompleted[] array, so no no-CBs checks needed.
1524 */
1525 c = rcu_cbs_completed(rdp->rsp, rnp_root);
1526 for (i = RCU_DONE_TAIL; i < RCU_NEXT_TAIL; i++)
1527 if (ULONG_CMP_LT(c, rdp->nxtcompleted[i]))
1528 rdp->nxtcompleted[i] = c;
1529
1530 /*
1531 * If the needed for the required grace period is already
1532 * recorded, trace and leave.
1533 */
1534 if (rnp_root->need_future_gp[c & 0x1]) {
f7f7bac9 1535 trace_rcu_future_gp(rnp, rdp, c, TPS("Prestartedroot"));
0446be48
PM
1536 goto unlock_out;
1537 }
1538
1539 /* Record the need for the future grace period. */
1540 rnp_root->need_future_gp[c & 0x1]++;
1541
1542 /* If a grace period is not already in progress, start one. */
1543 if (rnp_root->gpnum != rnp_root->completed) {
f7f7bac9 1544 trace_rcu_future_gp(rnp, rdp, c, TPS("Startedleafroot"));
0446be48 1545 } else {
f7f7bac9 1546 trace_rcu_future_gp(rnp, rdp, c, TPS("Startedroot"));
48a7639c 1547 ret = rcu_start_gp_advanced(rdp->rsp, rnp_root, rdp);
0446be48
PM
1548 }
1549unlock_out:
1550 if (rnp != rnp_root)
1551 raw_spin_unlock(&rnp_root->lock);
48a7639c
PM
1552out:
1553 if (c_out != NULL)
1554 *c_out = c;
1555 return ret;
0446be48
PM
1556}
1557
1558/*
1559 * Clean up any old requests for the just-ended grace period. Also return
1560 * whether any additional grace periods have been requested. Also invoke
1561 * rcu_nocb_gp_cleanup() in order to wake up any no-callbacks kthreads
1562 * waiting for this grace period to complete.
1563 */
1564static int rcu_future_gp_cleanup(struct rcu_state *rsp, struct rcu_node *rnp)
1565{
1566 int c = rnp->completed;
1567 int needmore;
1568 struct rcu_data *rdp = this_cpu_ptr(rsp->rda);
1569
1570 rcu_nocb_gp_cleanup(rsp, rnp);
1571 rnp->need_future_gp[c & 0x1] = 0;
1572 needmore = rnp->need_future_gp[(c + 1) & 0x1];
f7f7bac9
SRRH
1573 trace_rcu_future_gp(rnp, rdp, c,
1574 needmore ? TPS("CleanupMore") : TPS("Cleanup"));
0446be48
PM
1575 return needmore;
1576}
1577
48a7639c
PM
1578/*
1579 * Awaken the grace-period kthread for the specified flavor of RCU.
1580 * Don't do a self-awaken, and don't bother awakening when there is
1581 * nothing for the grace-period kthread to do (as in several CPUs
1582 * raced to awaken, and we lost), and finally don't try to awaken
1583 * a kthread that has not yet been created.
1584 */
1585static void rcu_gp_kthread_wake(struct rcu_state *rsp)
1586{
1587 if (current == rsp->gp_kthread ||
7d0ae808 1588 !READ_ONCE(rsp->gp_flags) ||
48a7639c
PM
1589 !rsp->gp_kthread)
1590 return;
1591 wake_up(&rsp->gp_wq);
1592}
1593
dc35c893
PM
1594/*
1595 * If there is room, assign a ->completed number to any callbacks on
1596 * this CPU that have not already been assigned. Also accelerate any
1597 * callbacks that were previously assigned a ->completed number that has
1598 * since proven to be too conservative, which can happen if callbacks get
1599 * assigned a ->completed number while RCU is idle, but with reference to
1600 * a non-root rcu_node structure. This function is idempotent, so it does
48a7639c
PM
1601 * not hurt to call it repeatedly. Returns an flag saying that we should
1602 * awaken the RCU grace-period kthread.
dc35c893
PM
1603 *
1604 * The caller must hold rnp->lock with interrupts disabled.
1605 */
48a7639c 1606static bool rcu_accelerate_cbs(struct rcu_state *rsp, struct rcu_node *rnp,
dc35c893
PM
1607 struct rcu_data *rdp)
1608{
1609 unsigned long c;
1610 int i;
48a7639c 1611 bool ret;
dc35c893
PM
1612
1613 /* If the CPU has no callbacks, nothing to do. */
1614 if (!rdp->nxttail[RCU_NEXT_TAIL] || !*rdp->nxttail[RCU_DONE_TAIL])
48a7639c 1615 return false;
dc35c893
PM
1616
1617 /*
1618 * Starting from the sublist containing the callbacks most
1619 * recently assigned a ->completed number and working down, find the
1620 * first sublist that is not assignable to an upcoming grace period.
1621 * Such a sublist has something in it (first two tests) and has
1622 * a ->completed number assigned that will complete sooner than
1623 * the ->completed number for newly arrived callbacks (last test).
1624 *
1625 * The key point is that any later sublist can be assigned the
1626 * same ->completed number as the newly arrived callbacks, which
1627 * means that the callbacks in any of these later sublist can be
1628 * grouped into a single sublist, whether or not they have already
1629 * been assigned a ->completed number.
1630 */
1631 c = rcu_cbs_completed(rsp, rnp);
1632 for (i = RCU_NEXT_TAIL - 1; i > RCU_DONE_TAIL; i--)
1633 if (rdp->nxttail[i] != rdp->nxttail[i - 1] &&
1634 !ULONG_CMP_GE(rdp->nxtcompleted[i], c))
1635 break;
1636
1637 /*
1638 * If there are no sublist for unassigned callbacks, leave.
1639 * At the same time, advance "i" one sublist, so that "i" will
1640 * index into the sublist where all the remaining callbacks should
1641 * be grouped into.
1642 */
1643 if (++i >= RCU_NEXT_TAIL)
48a7639c 1644 return false;
dc35c893
PM
1645
1646 /*
1647 * Assign all subsequent callbacks' ->completed number to the next
1648 * full grace period and group them all in the sublist initially
1649 * indexed by "i".
1650 */
1651 for (; i <= RCU_NEXT_TAIL; i++) {
1652 rdp->nxttail[i] = rdp->nxttail[RCU_NEXT_TAIL];
1653 rdp->nxtcompleted[i] = c;
1654 }
910ee45d 1655 /* Record any needed additional grace periods. */
48a7639c 1656 ret = rcu_start_future_gp(rnp, rdp, NULL);
6d4b418c
PM
1657
1658 /* Trace depending on how much we were able to accelerate. */
1659 if (!*rdp->nxttail[RCU_WAIT_TAIL])
f7f7bac9 1660 trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("AccWaitCB"));
6d4b418c 1661 else
f7f7bac9 1662 trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("AccReadyCB"));
48a7639c 1663 return ret;
dc35c893
PM
1664}
1665
1666/*
1667 * Move any callbacks whose grace period has completed to the
1668 * RCU_DONE_TAIL sublist, then compact the remaining sublists and
1669 * assign ->completed numbers to any callbacks in the RCU_NEXT_TAIL
1670 * sublist. This function is idempotent, so it does not hurt to
1671 * invoke it repeatedly. As long as it is not invoked -too- often...
48a7639c 1672 * Returns true if the RCU grace-period kthread needs to be awakened.
dc35c893
PM
1673 *
1674 * The caller must hold rnp->lock with interrupts disabled.
1675 */
48a7639c 1676static bool rcu_advance_cbs(struct rcu_state *rsp, struct rcu_node *rnp,
dc35c893
PM
1677 struct rcu_data *rdp)
1678{
1679 int i, j;
1680
1681 /* If the CPU has no callbacks, nothing to do. */
1682 if (!rdp->nxttail[RCU_NEXT_TAIL] || !*rdp->nxttail[RCU_DONE_TAIL])
48a7639c 1683 return false;
dc35c893
PM
1684
1685 /*
1686 * Find all callbacks whose ->completed numbers indicate that they
1687 * are ready to invoke, and put them into the RCU_DONE_TAIL sublist.
1688 */
1689 for (i = RCU_WAIT_TAIL; i < RCU_NEXT_TAIL; i++) {
1690 if (ULONG_CMP_LT(rnp->completed, rdp->nxtcompleted[i]))
1691 break;
1692 rdp->nxttail[RCU_DONE_TAIL] = rdp->nxttail[i];
1693 }
1694 /* Clean up any sublist tail pointers that were misordered above. */
1695 for (j = RCU_WAIT_TAIL; j < i; j++)
1696 rdp->nxttail[j] = rdp->nxttail[RCU_DONE_TAIL];
1697
1698 /* Copy down callbacks to fill in empty sublists. */
1699 for (j = RCU_WAIT_TAIL; i < RCU_NEXT_TAIL; i++, j++) {
1700 if (rdp->nxttail[j] == rdp->nxttail[RCU_NEXT_TAIL])
1701 break;
1702 rdp->nxttail[j] = rdp->nxttail[i];
1703 rdp->nxtcompleted[j] = rdp->nxtcompleted[i];
1704 }
1705
1706 /* Classify any remaining callbacks. */
48a7639c 1707 return rcu_accelerate_cbs(rsp, rnp, rdp);
dc35c893
PM
1708}
1709
d09b62df 1710/*
ba9fbe95
PM
1711 * Update CPU-local rcu_data state to record the beginnings and ends of
1712 * grace periods. The caller must hold the ->lock of the leaf rcu_node
1713 * structure corresponding to the current CPU, and must have irqs disabled.
48a7639c 1714 * Returns true if the grace-period kthread needs to be awakened.
d09b62df 1715 */
48a7639c
PM
1716static bool __note_gp_changes(struct rcu_state *rsp, struct rcu_node *rnp,
1717 struct rcu_data *rdp)
d09b62df 1718{
48a7639c
PM
1719 bool ret;
1720
ba9fbe95 1721 /* Handle the ends of any preceding grace periods first. */
e3663b10 1722 if (rdp->completed == rnp->completed &&
7d0ae808 1723 !unlikely(READ_ONCE(rdp->gpwrap))) {
d09b62df 1724
ba9fbe95 1725 /* No grace period end, so just accelerate recent callbacks. */
48a7639c 1726 ret = rcu_accelerate_cbs(rsp, rnp, rdp);
d09b62df 1727
dc35c893
PM
1728 } else {
1729
1730 /* Advance callbacks. */
48a7639c 1731 ret = rcu_advance_cbs(rsp, rnp, rdp);
d09b62df
PM
1732
1733 /* Remember that we saw this grace-period completion. */
1734 rdp->completed = rnp->completed;
f7f7bac9 1735 trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpuend"));
d09b62df 1736 }
398ebe60 1737
7d0ae808 1738 if (rdp->gpnum != rnp->gpnum || unlikely(READ_ONCE(rdp->gpwrap))) {
6eaef633
PM
1739 /*
1740 * If the current grace period is waiting for this CPU,
1741 * set up to detect a quiescent state, otherwise don't
1742 * go looking for one.
1743 */
1744 rdp->gpnum = rnp->gpnum;
f7f7bac9 1745 trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpustart"));
6eaef633 1746 rdp->passed_quiesce = 0;
5cd37193 1747 rdp->rcu_qs_ctr_snap = __this_cpu_read(rcu_qs_ctr);
6eaef633
PM
1748 rdp->qs_pending = !!(rnp->qsmask & rdp->grpmask);
1749 zero_cpu_stall_ticks(rdp);
7d0ae808 1750 WRITE_ONCE(rdp->gpwrap, false);
6eaef633 1751 }
48a7639c 1752 return ret;
6eaef633
PM
1753}
1754
d34ea322 1755static void note_gp_changes(struct rcu_state *rsp, struct rcu_data *rdp)
6eaef633
PM
1756{
1757 unsigned long flags;
48a7639c 1758 bool needwake;
6eaef633
PM
1759 struct rcu_node *rnp;
1760
1761 local_irq_save(flags);
1762 rnp = rdp->mynode;
7d0ae808
PM
1763 if ((rdp->gpnum == READ_ONCE(rnp->gpnum) &&
1764 rdp->completed == READ_ONCE(rnp->completed) &&
1765 !unlikely(READ_ONCE(rdp->gpwrap))) || /* w/out lock. */
6eaef633
PM
1766 !raw_spin_trylock(&rnp->lock)) { /* irqs already off, so later. */
1767 local_irq_restore(flags);
1768 return;
1769 }
6303b9c8 1770 smp_mb__after_unlock_lock();
48a7639c 1771 needwake = __note_gp_changes(rsp, rnp, rdp);
6eaef633 1772 raw_spin_unlock_irqrestore(&rnp->lock, flags);
48a7639c
PM
1773 if (needwake)
1774 rcu_gp_kthread_wake(rsp);
6eaef633
PM
1775}
1776
0f41c0dd
PM
1777static void rcu_gp_slow(struct rcu_state *rsp, int delay)
1778{
1779 if (delay > 0 &&
1780 !(rsp->gpnum % (rcu_num_nodes * PER_RCU_NODE_PERIOD * delay)))
1781 schedule_timeout_uninterruptible(delay);
1782}
1783
b3dbec76 1784/*
f7be8209 1785 * Initialize a new grace period. Return 0 if no grace period required.
b3dbec76 1786 */
7fdefc10 1787static int rcu_gp_init(struct rcu_state *rsp)
b3dbec76 1788{
0aa04b05 1789 unsigned long oldmask;
b3dbec76 1790 struct rcu_data *rdp;
7fdefc10 1791 struct rcu_node *rnp = rcu_get_root(rsp);
b3dbec76 1792
7d0ae808 1793 WRITE_ONCE(rsp->gp_activity, jiffies);
7fdefc10 1794 raw_spin_lock_irq(&rnp->lock);
6303b9c8 1795 smp_mb__after_unlock_lock();
7d0ae808 1796 if (!READ_ONCE(rsp->gp_flags)) {
f7be8209
PM
1797 /* Spurious wakeup, tell caller to go back to sleep. */
1798 raw_spin_unlock_irq(&rnp->lock);
1799 return 0;
1800 }
7d0ae808 1801 WRITE_ONCE(rsp->gp_flags, 0); /* Clear all flags: New grace period. */
b3dbec76 1802
f7be8209
PM
1803 if (WARN_ON_ONCE(rcu_gp_in_progress(rsp))) {
1804 /*
1805 * Grace period already in progress, don't start another.
1806 * Not supposed to be able to happen.
1807 */
7fdefc10
PM
1808 raw_spin_unlock_irq(&rnp->lock);
1809 return 0;
1810 }
1811
7fdefc10 1812 /* Advance to a new grace period and initialize state. */
26cdfedf 1813 record_gp_stall_check_time(rsp);
765a3f4f
PM
1814 /* Record GP times before starting GP, hence smp_store_release(). */
1815 smp_store_release(&rsp->gpnum, rsp->gpnum + 1);
f7f7bac9 1816 trace_rcu_grace_period(rsp->name, rsp->gpnum, TPS("start"));
7fdefc10
PM
1817 raw_spin_unlock_irq(&rnp->lock);
1818
0aa04b05
PM
1819 /*
1820 * Apply per-leaf buffered online and offline operations to the
1821 * rcu_node tree. Note that this new grace period need not wait
1822 * for subsequent online CPUs, and that quiescent-state forcing
1823 * will handle subsequent offline CPUs.
1824 */
1825 rcu_for_each_leaf_node(rsp, rnp) {
0f41c0dd 1826 rcu_gp_slow(rsp, gp_preinit_delay);
0aa04b05
PM
1827 raw_spin_lock_irq(&rnp->lock);
1828 smp_mb__after_unlock_lock();
1829 if (rnp->qsmaskinit == rnp->qsmaskinitnext &&
1830 !rnp->wait_blkd_tasks) {
1831 /* Nothing to do on this leaf rcu_node structure. */
1832 raw_spin_unlock_irq(&rnp->lock);
1833 continue;
1834 }
1835
1836 /* Record old state, apply changes to ->qsmaskinit field. */
1837 oldmask = rnp->qsmaskinit;
1838 rnp->qsmaskinit = rnp->qsmaskinitnext;
1839
1840 /* If zero-ness of ->qsmaskinit changed, propagate up tree. */
1841 if (!oldmask != !rnp->qsmaskinit) {
1842 if (!oldmask) /* First online CPU for this rcu_node. */
1843 rcu_init_new_rnp(rnp);
1844 else if (rcu_preempt_has_tasks(rnp)) /* blocked tasks */
1845 rnp->wait_blkd_tasks = true;
1846 else /* Last offline CPU and can propagate. */
1847 rcu_cleanup_dead_rnp(rnp);
1848 }
1849
1850 /*
1851 * If all waited-on tasks from prior grace period are
1852 * done, and if all this rcu_node structure's CPUs are
1853 * still offline, propagate up the rcu_node tree and
1854 * clear ->wait_blkd_tasks. Otherwise, if one of this
1855 * rcu_node structure's CPUs has since come back online,
1856 * simply clear ->wait_blkd_tasks (but rcu_cleanup_dead_rnp()
1857 * checks for this, so just call it unconditionally).
1858 */
1859 if (rnp->wait_blkd_tasks &&
1860 (!rcu_preempt_has_tasks(rnp) ||
1861 rnp->qsmaskinit)) {
1862 rnp->wait_blkd_tasks = false;
1863 rcu_cleanup_dead_rnp(rnp);
1864 }
1865
1866 raw_spin_unlock_irq(&rnp->lock);
1867 }
7fdefc10
PM
1868
1869 /*
1870 * Set the quiescent-state-needed bits in all the rcu_node
1871 * structures for all currently online CPUs in breadth-first order,
1872 * starting from the root rcu_node structure, relying on the layout
1873 * of the tree within the rsp->node[] array. Note that other CPUs
1874 * will access only the leaves of the hierarchy, thus seeing that no
1875 * grace period is in progress, at least until the corresponding
1876 * leaf node has been initialized. In addition, we have excluded
1877 * CPU-hotplug operations.
1878 *
1879 * The grace period cannot complete until the initialization
1880 * process finishes, because this kthread handles both.
1881 */
1882 rcu_for_each_node_breadth_first(rsp, rnp) {
0f41c0dd 1883 rcu_gp_slow(rsp, gp_init_delay);
b3dbec76 1884 raw_spin_lock_irq(&rnp->lock);
6303b9c8 1885 smp_mb__after_unlock_lock();
b3dbec76 1886 rdp = this_cpu_ptr(rsp->rda);
7fdefc10
PM
1887 rcu_preempt_check_blocked_tasks(rnp);
1888 rnp->qsmask = rnp->qsmaskinit;
7d0ae808 1889 WRITE_ONCE(rnp->gpnum, rsp->gpnum);
3f47da0f 1890 if (WARN_ON_ONCE(rnp->completed != rsp->completed))
7d0ae808 1891 WRITE_ONCE(rnp->completed, rsp->completed);
7fdefc10 1892 if (rnp == rdp->mynode)
48a7639c 1893 (void)__note_gp_changes(rsp, rnp, rdp);
7fdefc10
PM
1894 rcu_preempt_boost_start_gp(rnp);
1895 trace_rcu_grace_period_init(rsp->name, rnp->gpnum,
1896 rnp->level, rnp->grplo,
1897 rnp->grphi, rnp->qsmask);
1898 raw_spin_unlock_irq(&rnp->lock);
bde6c3aa 1899 cond_resched_rcu_qs();
7d0ae808 1900 WRITE_ONCE(rsp->gp_activity, jiffies);
7fdefc10 1901 }
b3dbec76 1902
7fdefc10
PM
1903 return 1;
1904}
b3dbec76 1905
4cdfc175
PM
1906/*
1907 * Do one round of quiescent-state forcing.
1908 */
01896f7e 1909static int rcu_gp_fqs(struct rcu_state *rsp, int fqs_state_in)
4cdfc175
PM
1910{
1911 int fqs_state = fqs_state_in;
217af2a2
PM
1912 bool isidle = false;
1913 unsigned long maxj;
4cdfc175
PM
1914 struct rcu_node *rnp = rcu_get_root(rsp);
1915
7d0ae808 1916 WRITE_ONCE(rsp->gp_activity, jiffies);
4cdfc175
PM
1917 rsp->n_force_qs++;
1918 if (fqs_state == RCU_SAVE_DYNTICK) {
1919 /* Collect dyntick-idle snapshots. */
0edd1b17 1920 if (is_sysidle_rcu_state(rsp)) {
e02b2edf 1921 isidle = true;
0edd1b17
PM
1922 maxj = jiffies - ULONG_MAX / 4;
1923 }
217af2a2
PM
1924 force_qs_rnp(rsp, dyntick_save_progress_counter,
1925 &isidle, &maxj);
0edd1b17 1926 rcu_sysidle_report_gp(rsp, isidle, maxj);
4cdfc175
PM
1927 fqs_state = RCU_FORCE_QS;
1928 } else {
1929 /* Handle dyntick-idle and offline CPUs. */
675da67f 1930 isidle = true;
217af2a2 1931 force_qs_rnp(rsp, rcu_implicit_dynticks_qs, &isidle, &maxj);
4cdfc175
PM
1932 }
1933 /* Clear flag to prevent immediate re-entry. */
7d0ae808 1934 if (READ_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) {
4cdfc175 1935 raw_spin_lock_irq(&rnp->lock);
6303b9c8 1936 smp_mb__after_unlock_lock();
7d0ae808
PM
1937 WRITE_ONCE(rsp->gp_flags,
1938 READ_ONCE(rsp->gp_flags) & ~RCU_GP_FLAG_FQS);
4cdfc175
PM
1939 raw_spin_unlock_irq(&rnp->lock);
1940 }
1941 return fqs_state;
1942}
1943
7fdefc10
PM
1944/*
1945 * Clean up after the old grace period.
1946 */
4cdfc175 1947static void rcu_gp_cleanup(struct rcu_state *rsp)
7fdefc10
PM
1948{
1949 unsigned long gp_duration;
48a7639c 1950 bool needgp = false;
dae6e64d 1951 int nocb = 0;
7fdefc10
PM
1952 struct rcu_data *rdp;
1953 struct rcu_node *rnp = rcu_get_root(rsp);
b3dbec76 1954
7d0ae808 1955 WRITE_ONCE(rsp->gp_activity, jiffies);
7fdefc10 1956 raw_spin_lock_irq(&rnp->lock);
6303b9c8 1957 smp_mb__after_unlock_lock();
7fdefc10
PM
1958 gp_duration = jiffies - rsp->gp_start;
1959 if (gp_duration > rsp->gp_max)
1960 rsp->gp_max = gp_duration;
b3dbec76 1961
7fdefc10
PM
1962 /*
1963 * We know the grace period is complete, but to everyone else
1964 * it appears to still be ongoing. But it is also the case
1965 * that to everyone else it looks like there is nothing that
1966 * they can do to advance the grace period. It is therefore
1967 * safe for us to drop the lock in order to mark the grace
1968 * period as completed in all of the rcu_node structures.
7fdefc10 1969 */
5d4b8659 1970 raw_spin_unlock_irq(&rnp->lock);
b3dbec76 1971
5d4b8659
PM
1972 /*
1973 * Propagate new ->completed value to rcu_node structures so
1974 * that other CPUs don't have to wait until the start of the next
1975 * grace period to process their callbacks. This also avoids
1976 * some nasty RCU grace-period initialization races by forcing
1977 * the end of the current grace period to be completely recorded in
1978 * all of the rcu_node structures before the beginning of the next
1979 * grace period is recorded in any of the rcu_node structures.
1980 */
1981 rcu_for_each_node_breadth_first(rsp, rnp) {
755609a9 1982 raw_spin_lock_irq(&rnp->lock);
6303b9c8 1983 smp_mb__after_unlock_lock();
5c60d25f
PM
1984 WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp));
1985 WARN_ON_ONCE(rnp->qsmask);
7d0ae808 1986 WRITE_ONCE(rnp->completed, rsp->gpnum);
b11cc576
PM
1987 rdp = this_cpu_ptr(rsp->rda);
1988 if (rnp == rdp->mynode)
48a7639c 1989 needgp = __note_gp_changes(rsp, rnp, rdp) || needgp;
78e4bc34 1990 /* smp_mb() provided by prior unlock-lock pair. */
0446be48 1991 nocb += rcu_future_gp_cleanup(rsp, rnp);
5d4b8659 1992 raw_spin_unlock_irq(&rnp->lock);
bde6c3aa 1993 cond_resched_rcu_qs();
7d0ae808 1994 WRITE_ONCE(rsp->gp_activity, jiffies);
0f41c0dd 1995 rcu_gp_slow(rsp, gp_cleanup_delay);
7fdefc10 1996 }
5d4b8659
PM
1997 rnp = rcu_get_root(rsp);
1998 raw_spin_lock_irq(&rnp->lock);
765a3f4f 1999 smp_mb__after_unlock_lock(); /* Order GP before ->completed update. */
dae6e64d 2000 rcu_nocb_gp_set(rnp, nocb);
7fdefc10 2001
765a3f4f 2002 /* Declare grace period done. */
7d0ae808 2003 WRITE_ONCE(rsp->completed, rsp->gpnum);
f7f7bac9 2004 trace_rcu_grace_period(rsp->name, rsp->completed, TPS("end"));
7fdefc10 2005 rsp->fqs_state = RCU_GP_IDLE;
5d4b8659 2006 rdp = this_cpu_ptr(rsp->rda);
48a7639c
PM
2007 /* Advance CBs to reduce false positives below. */
2008 needgp = rcu_advance_cbs(rsp, rnp, rdp) || needgp;
2009 if (needgp || cpu_needs_another_gp(rsp, rdp)) {
7d0ae808 2010 WRITE_ONCE(rsp->gp_flags, RCU_GP_FLAG_INIT);
bb311ecc 2011 trace_rcu_grace_period(rsp->name,
7d0ae808 2012 READ_ONCE(rsp->gpnum),
bb311ecc
PM
2013 TPS("newreq"));
2014 }
7fdefc10 2015 raw_spin_unlock_irq(&rnp->lock);
7fdefc10
PM
2016}
2017
2018/*
2019 * Body of kthread that handles grace periods.
2020 */
2021static int __noreturn rcu_gp_kthread(void *arg)
2022{
4cdfc175 2023 int fqs_state;
88d6df61 2024 int gf;
d40011f6 2025 unsigned long j;
4cdfc175 2026 int ret;
7fdefc10
PM
2027 struct rcu_state *rsp = arg;
2028 struct rcu_node *rnp = rcu_get_root(rsp);
2029
5871968d 2030 rcu_bind_gp_kthread();
7fdefc10
PM
2031 for (;;) {
2032
2033 /* Handle grace-period start. */
2034 for (;;) {
63c4db78 2035 trace_rcu_grace_period(rsp->name,
7d0ae808 2036 READ_ONCE(rsp->gpnum),
63c4db78 2037 TPS("reqwait"));
afea227f 2038 rsp->gp_state = RCU_GP_WAIT_GPS;
4cdfc175 2039 wait_event_interruptible(rsp->gp_wq,
7d0ae808 2040 READ_ONCE(rsp->gp_flags) &
4cdfc175 2041 RCU_GP_FLAG_INIT);
319362c9 2042 rsp->gp_state = RCU_GP_DONE_GPS;
78e4bc34 2043 /* Locking provides needed memory barrier. */
f7be8209 2044 if (rcu_gp_init(rsp))
7fdefc10 2045 break;
bde6c3aa 2046 cond_resched_rcu_qs();
7d0ae808 2047 WRITE_ONCE(rsp->gp_activity, jiffies);
73a860cd 2048 WARN_ON(signal_pending(current));
63c4db78 2049 trace_rcu_grace_period(rsp->name,
7d0ae808 2050 READ_ONCE(rsp->gpnum),
63c4db78 2051 TPS("reqwaitsig"));
7fdefc10 2052 }
cabc49c1 2053
4cdfc175
PM
2054 /* Handle quiescent-state forcing. */
2055 fqs_state = RCU_SAVE_DYNTICK;
d40011f6
PM
2056 j = jiffies_till_first_fqs;
2057 if (j > HZ) {
2058 j = HZ;
2059 jiffies_till_first_fqs = HZ;
2060 }
88d6df61 2061 ret = 0;
cabc49c1 2062 for (;;) {
88d6df61
PM
2063 if (!ret)
2064 rsp->jiffies_force_qs = jiffies + j;
63c4db78 2065 trace_rcu_grace_period(rsp->name,
7d0ae808 2066 READ_ONCE(rsp->gpnum),
63c4db78 2067 TPS("fqswait"));
afea227f 2068 rsp->gp_state = RCU_GP_WAIT_FQS;
4cdfc175 2069 ret = wait_event_interruptible_timeout(rsp->gp_wq,
7d0ae808 2070 ((gf = READ_ONCE(rsp->gp_flags)) &
88d6df61 2071 RCU_GP_FLAG_FQS) ||
7d0ae808 2072 (!READ_ONCE(rnp->qsmask) &&
4cdfc175 2073 !rcu_preempt_blocked_readers_cgp(rnp)),
d40011f6 2074 j);
319362c9 2075 rsp->gp_state = RCU_GP_DONE_FQS;
78e4bc34 2076 /* Locking provides needed memory barriers. */
4cdfc175 2077 /* If grace period done, leave loop. */
7d0ae808 2078 if (!READ_ONCE(rnp->qsmask) &&
4cdfc175 2079 !rcu_preempt_blocked_readers_cgp(rnp))
cabc49c1 2080 break;
4cdfc175 2081 /* If time for quiescent-state forcing, do it. */
88d6df61
PM
2082 if (ULONG_CMP_GE(jiffies, rsp->jiffies_force_qs) ||
2083 (gf & RCU_GP_FLAG_FQS)) {
63c4db78 2084 trace_rcu_grace_period(rsp->name,
7d0ae808 2085 READ_ONCE(rsp->gpnum),
63c4db78 2086 TPS("fqsstart"));
4cdfc175 2087 fqs_state = rcu_gp_fqs(rsp, fqs_state);
63c4db78 2088 trace_rcu_grace_period(rsp->name,
7d0ae808 2089 READ_ONCE(rsp->gpnum),
63c4db78 2090 TPS("fqsend"));
bde6c3aa 2091 cond_resched_rcu_qs();
7d0ae808 2092 WRITE_ONCE(rsp->gp_activity, jiffies);
4cdfc175
PM
2093 } else {
2094 /* Deal with stray signal. */
bde6c3aa 2095 cond_resched_rcu_qs();
7d0ae808 2096 WRITE_ONCE(rsp->gp_activity, jiffies);
73a860cd 2097 WARN_ON(signal_pending(current));
63c4db78 2098 trace_rcu_grace_period(rsp->name,
7d0ae808 2099 READ_ONCE(rsp->gpnum),
63c4db78 2100 TPS("fqswaitsig"));
4cdfc175 2101 }
d40011f6
PM
2102 j = jiffies_till_next_fqs;
2103 if (j > HZ) {
2104 j = HZ;
2105 jiffies_till_next_fqs = HZ;
2106 } else if (j < 1) {
2107 j = 1;
2108 jiffies_till_next_fqs = 1;
2109 }
cabc49c1 2110 }
4cdfc175
PM
2111
2112 /* Handle grace-period end. */
319362c9 2113 rsp->gp_state = RCU_GP_CLEANUP;
4cdfc175 2114 rcu_gp_cleanup(rsp);
319362c9 2115 rsp->gp_state = RCU_GP_CLEANED;
b3dbec76 2116 }
b3dbec76
PM
2117}
2118
64db4cff
PM
2119/*
2120 * Start a new RCU grace period if warranted, re-initializing the hierarchy
2121 * in preparation for detecting the next grace period. The caller must hold
b8462084 2122 * the root node's ->lock and hard irqs must be disabled.
e5601400
PM
2123 *
2124 * Note that it is legal for a dying CPU (which is marked as offline) to
2125 * invoke this function. This can happen when the dying CPU reports its
2126 * quiescent state.
48a7639c
PM
2127 *
2128 * Returns true if the grace-period kthread must be awakened.
64db4cff 2129 */
48a7639c 2130static bool
910ee45d
PM
2131rcu_start_gp_advanced(struct rcu_state *rsp, struct rcu_node *rnp,
2132 struct rcu_data *rdp)
64db4cff 2133{
b8462084 2134 if (!rsp->gp_kthread || !cpu_needs_another_gp(rsp, rdp)) {
afe24b12 2135 /*
b3dbec76 2136 * Either we have not yet spawned the grace-period
62da1921
PM
2137 * task, this CPU does not need another grace period,
2138 * or a grace period is already in progress.
b3dbec76 2139 * Either way, don't start a new grace period.
afe24b12 2140 */
48a7639c 2141 return false;
afe24b12 2142 }
7d0ae808
PM
2143 WRITE_ONCE(rsp->gp_flags, RCU_GP_FLAG_INIT);
2144 trace_rcu_grace_period(rsp->name, READ_ONCE(rsp->gpnum),
bb311ecc 2145 TPS("newreq"));
62da1921 2146
016a8d5b
SR
2147 /*
2148 * We can't do wakeups while holding the rnp->lock, as that
1eafd31c 2149 * could cause possible deadlocks with the rq->lock. Defer
48a7639c 2150 * the wakeup to our caller.
016a8d5b 2151 */
48a7639c 2152 return true;
64db4cff
PM
2153}
2154
910ee45d
PM
2155/*
2156 * Similar to rcu_start_gp_advanced(), but also advance the calling CPU's
2157 * callbacks. Note that rcu_start_gp_advanced() cannot do this because it
2158 * is invoked indirectly from rcu_advance_cbs(), which would result in
2159 * endless recursion -- or would do so if it wasn't for the self-deadlock
2160 * that is encountered beforehand.
48a7639c
PM
2161 *
2162 * Returns true if the grace-period kthread needs to be awakened.
910ee45d 2163 */
48a7639c 2164static bool rcu_start_gp(struct rcu_state *rsp)
910ee45d
PM
2165{
2166 struct rcu_data *rdp = this_cpu_ptr(rsp->rda);
2167 struct rcu_node *rnp = rcu_get_root(rsp);
48a7639c 2168 bool ret = false;
910ee45d
PM
2169
2170 /*
2171 * If there is no grace period in progress right now, any
2172 * callbacks we have up to this point will be satisfied by the
2173 * next grace period. Also, advancing the callbacks reduces the
2174 * probability of false positives from cpu_needs_another_gp()
2175 * resulting in pointless grace periods. So, advance callbacks
2176 * then start the grace period!
2177 */
48a7639c
PM
2178 ret = rcu_advance_cbs(rsp, rnp, rdp) || ret;
2179 ret = rcu_start_gp_advanced(rsp, rnp, rdp) || ret;
2180 return ret;
910ee45d
PM
2181}
2182
f41d911f 2183/*
d3f6bad3
PM
2184 * Report a full set of quiescent states to the specified rcu_state
2185 * data structure. This involves cleaning up after the prior grace
2186 * period and letting rcu_start_gp() start up the next grace period
b8462084
PM
2187 * if one is needed. Note that the caller must hold rnp->lock, which
2188 * is released before return.
f41d911f 2189 */
d3f6bad3 2190static void rcu_report_qs_rsp(struct rcu_state *rsp, unsigned long flags)
fc2219d4 2191 __releases(rcu_get_root(rsp)->lock)
f41d911f 2192{
fc2219d4 2193 WARN_ON_ONCE(!rcu_gp_in_progress(rsp));
cd73ca21 2194 WRITE_ONCE(rsp->gp_flags, READ_ONCE(rsp->gp_flags) | RCU_GP_FLAG_FQS);
cabc49c1 2195 raw_spin_unlock_irqrestore(&rcu_get_root(rsp)->lock, flags);
2aa792e6 2196 rcu_gp_kthread_wake(rsp);
f41d911f
PM
2197}
2198
64db4cff 2199/*
d3f6bad3
PM
2200 * Similar to rcu_report_qs_rdp(), for which it is a helper function.
2201 * Allows quiescent states for a group of CPUs to be reported at one go
2202 * to the specified rcu_node structure, though all the CPUs in the group
654e9533
PM
2203 * must be represented by the same rcu_node structure (which need not be a
2204 * leaf rcu_node structure, though it often will be). The gps parameter
2205 * is the grace-period snapshot, which means that the quiescent states
2206 * are valid only if rnp->gpnum is equal to gps. That structure's lock
2207 * must be held upon entry, and it is released before return.
64db4cff
PM
2208 */
2209static void
d3f6bad3 2210rcu_report_qs_rnp(unsigned long mask, struct rcu_state *rsp,
654e9533 2211 struct rcu_node *rnp, unsigned long gps, unsigned long flags)
64db4cff
PM
2212 __releases(rnp->lock)
2213{
654e9533 2214 unsigned long oldmask = 0;
28ecd580
PM
2215 struct rcu_node *rnp_c;
2216
64db4cff
PM
2217 /* Walk up the rcu_node hierarchy. */
2218 for (;;) {
654e9533 2219 if (!(rnp->qsmask & mask) || rnp->gpnum != gps) {
64db4cff 2220
654e9533
PM
2221 /*
2222 * Our bit has already been cleared, or the
2223 * relevant grace period is already over, so done.
2224 */
1304afb2 2225 raw_spin_unlock_irqrestore(&rnp->lock, flags);
64db4cff
PM
2226 return;
2227 }
654e9533 2228 WARN_ON_ONCE(oldmask); /* Any child must be all zeroed! */
64db4cff 2229 rnp->qsmask &= ~mask;
d4c08f2a
PM
2230 trace_rcu_quiescent_state_report(rsp->name, rnp->gpnum,
2231 mask, rnp->qsmask, rnp->level,
2232 rnp->grplo, rnp->grphi,
2233 !!rnp->gp_tasks);
27f4d280 2234 if (rnp->qsmask != 0 || rcu_preempt_blocked_readers_cgp(rnp)) {
64db4cff
PM
2235
2236 /* Other bits still set at this level, so done. */
1304afb2 2237 raw_spin_unlock_irqrestore(&rnp->lock, flags);
64db4cff
PM
2238 return;
2239 }
2240 mask = rnp->grpmask;
2241 if (rnp->parent == NULL) {
2242
2243 /* No more levels. Exit loop holding root lock. */
2244
2245 break;
2246 }
1304afb2 2247 raw_spin_unlock_irqrestore(&rnp->lock, flags);
28ecd580 2248 rnp_c = rnp;
64db4cff 2249 rnp = rnp->parent;
1304afb2 2250 raw_spin_lock_irqsave(&rnp->lock, flags);
6303b9c8 2251 smp_mb__after_unlock_lock();
654e9533 2252 oldmask = rnp_c->qsmask;
64db4cff
PM
2253 }
2254
2255 /*
2256 * Get here if we are the last CPU to pass through a quiescent
d3f6bad3 2257 * state for this grace period. Invoke rcu_report_qs_rsp()
f41d911f 2258 * to clean up and start the next grace period if one is needed.
64db4cff 2259 */
d3f6bad3 2260 rcu_report_qs_rsp(rsp, flags); /* releases rnp->lock. */
64db4cff
PM
2261}
2262
cc99a310
PM
2263/*
2264 * Record a quiescent state for all tasks that were previously queued
2265 * on the specified rcu_node structure and that were blocking the current
2266 * RCU grace period. The caller must hold the specified rnp->lock with
2267 * irqs disabled, and this lock is released upon return, but irqs remain
2268 * disabled.
2269 */
0aa04b05 2270static void rcu_report_unblock_qs_rnp(struct rcu_state *rsp,
cc99a310
PM
2271 struct rcu_node *rnp, unsigned long flags)
2272 __releases(rnp->lock)
2273{
654e9533 2274 unsigned long gps;
cc99a310
PM
2275 unsigned long mask;
2276 struct rcu_node *rnp_p;
2277
a77da14c
PM
2278 if (rcu_state_p == &rcu_sched_state || rsp != rcu_state_p ||
2279 rnp->qsmask != 0 || rcu_preempt_blocked_readers_cgp(rnp)) {
cc99a310
PM
2280 raw_spin_unlock_irqrestore(&rnp->lock, flags);
2281 return; /* Still need more quiescent states! */
2282 }
2283
2284 rnp_p = rnp->parent;
2285 if (rnp_p == NULL) {
2286 /*
a77da14c
PM
2287 * Only one rcu_node structure in the tree, so don't
2288 * try to report up to its nonexistent parent!
cc99a310
PM
2289 */
2290 rcu_report_qs_rsp(rsp, flags);
2291 return;
2292 }
2293
654e9533
PM
2294 /* Report up the rest of the hierarchy, tracking current ->gpnum. */
2295 gps = rnp->gpnum;
cc99a310
PM
2296 mask = rnp->grpmask;
2297 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
2298 raw_spin_lock(&rnp_p->lock); /* irqs already disabled. */
2299 smp_mb__after_unlock_lock();
654e9533 2300 rcu_report_qs_rnp(mask, rsp, rnp_p, gps, flags);
cc99a310
PM
2301}
2302
64db4cff 2303/*
d3f6bad3
PM
2304 * Record a quiescent state for the specified CPU to that CPU's rcu_data
2305 * structure. This must be either called from the specified CPU, or
2306 * called when the specified CPU is known to be offline (and when it is
2307 * also known that no other CPU is concurrently trying to help the offline
2308 * CPU). The lastcomp argument is used to make sure we are still in the
2309 * grace period of interest. We don't want to end the current grace period
2310 * based on quiescent states detected in an earlier grace period!
64db4cff
PM
2311 */
2312static void
d7d6a11e 2313rcu_report_qs_rdp(int cpu, struct rcu_state *rsp, struct rcu_data *rdp)
64db4cff
PM
2314{
2315 unsigned long flags;
2316 unsigned long mask;
48a7639c 2317 bool needwake;
64db4cff
PM
2318 struct rcu_node *rnp;
2319
2320 rnp = rdp->mynode;
1304afb2 2321 raw_spin_lock_irqsave(&rnp->lock, flags);
6303b9c8 2322 smp_mb__after_unlock_lock();
5cd37193
PM
2323 if ((rdp->passed_quiesce == 0 &&
2324 rdp->rcu_qs_ctr_snap == __this_cpu_read(rcu_qs_ctr)) ||
2325 rdp->gpnum != rnp->gpnum || rnp->completed == rnp->gpnum ||
2326 rdp->gpwrap) {
64db4cff
PM
2327
2328 /*
e4cc1f22
PM
2329 * The grace period in which this quiescent state was
2330 * recorded has ended, so don't report it upwards.
2331 * We will instead need a new quiescent state that lies
2332 * within the current grace period.
64db4cff 2333 */
e4cc1f22 2334 rdp->passed_quiesce = 0; /* need qs for new gp. */
5cd37193 2335 rdp->rcu_qs_ctr_snap = __this_cpu_read(rcu_qs_ctr);
1304afb2 2336 raw_spin_unlock_irqrestore(&rnp->lock, flags);
64db4cff
PM
2337 return;
2338 }
2339 mask = rdp->grpmask;
2340 if ((rnp->qsmask & mask) == 0) {
1304afb2 2341 raw_spin_unlock_irqrestore(&rnp->lock, flags);
64db4cff
PM
2342 } else {
2343 rdp->qs_pending = 0;
2344
2345 /*
2346 * This GP can't end until cpu checks in, so all of our
2347 * callbacks can be processed during the next GP.
2348 */
48a7639c 2349 needwake = rcu_accelerate_cbs(rsp, rnp, rdp);
64db4cff 2350
654e9533
PM
2351 rcu_report_qs_rnp(mask, rsp, rnp, rnp->gpnum, flags);
2352 /* ^^^ Released rnp->lock */
48a7639c
PM
2353 if (needwake)
2354 rcu_gp_kthread_wake(rsp);
64db4cff
PM
2355 }
2356}
2357
2358/*
2359 * Check to see if there is a new grace period of which this CPU
2360 * is not yet aware, and if so, set up local rcu_data state for it.
2361 * Otherwise, see if this CPU has just passed through its first
2362 * quiescent state for this grace period, and record that fact if so.
2363 */
2364static void
2365rcu_check_quiescent_state(struct rcu_state *rsp, struct rcu_data *rdp)
2366{
05eb552b
PM
2367 /* Check for grace-period ends and beginnings. */
2368 note_gp_changes(rsp, rdp);
64db4cff
PM
2369
2370 /*
2371 * Does this CPU still need to do its part for current grace period?
2372 * If no, return and let the other CPUs do their part as well.
2373 */
2374 if (!rdp->qs_pending)
2375 return;
2376
2377 /*
2378 * Was there a quiescent state since the beginning of the grace
2379 * period? If no, then exit and wait for the next call.
2380 */
5cd37193
PM
2381 if (!rdp->passed_quiesce &&
2382 rdp->rcu_qs_ctr_snap == __this_cpu_read(rcu_qs_ctr))
64db4cff
PM
2383 return;
2384
d3f6bad3
PM
2385 /*
2386 * Tell RCU we are done (but rcu_report_qs_rdp() will be the
2387 * judge of that).
2388 */
d7d6a11e 2389 rcu_report_qs_rdp(rdp->cpu, rsp, rdp);
64db4cff
PM
2390}
2391
e74f4c45 2392/*
b1420f1c
PM
2393 * Send the specified CPU's RCU callbacks to the orphanage. The
2394 * specified CPU must be offline, and the caller must hold the
7b2e6011 2395 * ->orphan_lock.
e74f4c45 2396 */
b1420f1c
PM
2397static void
2398rcu_send_cbs_to_orphanage(int cpu, struct rcu_state *rsp,
2399 struct rcu_node *rnp, struct rcu_data *rdp)
e74f4c45 2400{
3fbfbf7a 2401 /* No-CBs CPUs do not have orphanable callbacks. */
ea46351c 2402 if (!IS_ENABLED(CONFIG_HOTPLUG_CPU) || rcu_is_nocb_cpu(rdp->cpu))
3fbfbf7a
PM
2403 return;
2404
b1420f1c
PM
2405 /*
2406 * Orphan the callbacks. First adjust the counts. This is safe
abfd6e58
PM
2407 * because _rcu_barrier() excludes CPU-hotplug operations, so it
2408 * cannot be running now. Thus no memory barrier is required.
b1420f1c 2409 */
a50c3af9 2410 if (rdp->nxtlist != NULL) {
b1420f1c
PM
2411 rsp->qlen_lazy += rdp->qlen_lazy;
2412 rsp->qlen += rdp->qlen;
2413 rdp->n_cbs_orphaned += rdp->qlen;
a50c3af9 2414 rdp->qlen_lazy = 0;
7d0ae808 2415 WRITE_ONCE(rdp->qlen, 0);
a50c3af9
PM
2416 }
2417
2418 /*
b1420f1c
PM
2419 * Next, move those callbacks still needing a grace period to
2420 * the orphanage, where some other CPU will pick them up.
2421 * Some of the callbacks might have gone partway through a grace
2422 * period, but that is too bad. They get to start over because we
2423 * cannot assume that grace periods are synchronized across CPUs.
2424 * We don't bother updating the ->nxttail[] array yet, instead
2425 * we just reset the whole thing later on.
a50c3af9 2426 */
b1420f1c
PM
2427 if (*rdp->nxttail[RCU_DONE_TAIL] != NULL) {
2428 *rsp->orphan_nxttail = *rdp->nxttail[RCU_DONE_TAIL];
2429 rsp->orphan_nxttail = rdp->nxttail[RCU_NEXT_TAIL];
2430 *rdp->nxttail[RCU_DONE_TAIL] = NULL;
a50c3af9
PM
2431 }
2432
2433 /*
b1420f1c
PM
2434 * Then move the ready-to-invoke callbacks to the orphanage,
2435 * where some other CPU will pick them up. These will not be
2436 * required to pass though another grace period: They are done.
a50c3af9 2437 */
e5601400 2438 if (rdp->nxtlist != NULL) {
b1420f1c
PM
2439 *rsp->orphan_donetail = rdp->nxtlist;
2440 rsp->orphan_donetail = rdp->nxttail[RCU_DONE_TAIL];
e5601400 2441 }
e74f4c45 2442
b33078b6
PM
2443 /*
2444 * Finally, initialize the rcu_data structure's list to empty and
2445 * disallow further callbacks on this CPU.
2446 */
3f5d3ea6 2447 init_callback_list(rdp);
b33078b6 2448 rdp->nxttail[RCU_NEXT_TAIL] = NULL;
b1420f1c
PM
2449}
2450
2451/*
2452 * Adopt the RCU callbacks from the specified rcu_state structure's
7b2e6011 2453 * orphanage. The caller must hold the ->orphan_lock.
b1420f1c 2454 */
96d3fd0d 2455static void rcu_adopt_orphan_cbs(struct rcu_state *rsp, unsigned long flags)
b1420f1c
PM
2456{
2457 int i;
fa07a58f 2458 struct rcu_data *rdp = raw_cpu_ptr(rsp->rda);
b1420f1c 2459
3fbfbf7a 2460 /* No-CBs CPUs are handled specially. */
ea46351c
PM
2461 if (!IS_ENABLED(CONFIG_HOTPLUG_CPU) ||
2462 rcu_nocb_adopt_orphan_cbs(rsp, rdp, flags))
3fbfbf7a
PM
2463 return;
2464
b1420f1c
PM
2465 /* Do the accounting first. */
2466 rdp->qlen_lazy += rsp->qlen_lazy;
2467 rdp->qlen += rsp->qlen;
2468 rdp->n_cbs_adopted += rsp->qlen;
8f5af6f1
PM
2469 if (rsp->qlen_lazy != rsp->qlen)
2470 rcu_idle_count_callbacks_posted();
b1420f1c
PM
2471 rsp->qlen_lazy = 0;
2472 rsp->qlen = 0;
2473
2474 /*
2475 * We do not need a memory barrier here because the only way we
2476 * can get here if there is an rcu_barrier() in flight is if
2477 * we are the task doing the rcu_barrier().
2478 */
2479
2480 /* First adopt the ready-to-invoke callbacks. */
2481 if (rsp->orphan_donelist != NULL) {
2482 *rsp->orphan_donetail = *rdp->nxttail[RCU_DONE_TAIL];
2483 *rdp->nxttail[RCU_DONE_TAIL] = rsp->orphan_donelist;
2484 for (i = RCU_NEXT_SIZE - 1; i >= RCU_DONE_TAIL; i--)
2485 if (rdp->nxttail[i] == rdp->nxttail[RCU_DONE_TAIL])
2486 rdp->nxttail[i] = rsp->orphan_donetail;
2487 rsp->orphan_donelist = NULL;
2488 rsp->orphan_donetail = &rsp->orphan_donelist;
2489 }
2490
2491 /* And then adopt the callbacks that still need a grace period. */
2492 if (rsp->orphan_nxtlist != NULL) {
2493 *rdp->nxttail[RCU_NEXT_TAIL] = rsp->orphan_nxtlist;
2494 rdp->nxttail[RCU_NEXT_TAIL] = rsp->orphan_nxttail;
2495 rsp->orphan_nxtlist = NULL;
2496 rsp->orphan_nxttail = &rsp->orphan_nxtlist;
2497 }
2498}
2499
2500/*
2501 * Trace the fact that this CPU is going offline.
2502 */
2503static void rcu_cleanup_dying_cpu(struct rcu_state *rsp)
2504{
2505 RCU_TRACE(unsigned long mask);
2506 RCU_TRACE(struct rcu_data *rdp = this_cpu_ptr(rsp->rda));
2507 RCU_TRACE(struct rcu_node *rnp = rdp->mynode);
2508
ea46351c
PM
2509 if (!IS_ENABLED(CONFIG_HOTPLUG_CPU))
2510 return;
2511
b1420f1c 2512 RCU_TRACE(mask = rdp->grpmask);
e5601400
PM
2513 trace_rcu_grace_period(rsp->name,
2514 rnp->gpnum + 1 - !!(rnp->qsmask & mask),
f7f7bac9 2515 TPS("cpuofl"));
64db4cff
PM
2516}
2517
8af3a5e7
PM
2518/*
2519 * All CPUs for the specified rcu_node structure have gone offline,
2520 * and all tasks that were preempted within an RCU read-side critical
2521 * section while running on one of those CPUs have since exited their RCU
2522 * read-side critical section. Some other CPU is reporting this fact with
2523 * the specified rcu_node structure's ->lock held and interrupts disabled.
2524 * This function therefore goes up the tree of rcu_node structures,
2525 * clearing the corresponding bits in the ->qsmaskinit fields. Note that
2526 * the leaf rcu_node structure's ->qsmaskinit field has already been
2527 * updated
2528 *
2529 * This function does check that the specified rcu_node structure has
2530 * all CPUs offline and no blocked tasks, so it is OK to invoke it
2531 * prematurely. That said, invoking it after the fact will cost you
2532 * a needless lock acquisition. So once it has done its work, don't
2533 * invoke it again.
2534 */
2535static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf)
2536{
2537 long mask;
2538 struct rcu_node *rnp = rnp_leaf;
2539
ea46351c
PM
2540 if (!IS_ENABLED(CONFIG_HOTPLUG_CPU) ||
2541 rnp->qsmaskinit || rcu_preempt_has_tasks(rnp))
8af3a5e7
PM
2542 return;
2543 for (;;) {
2544 mask = rnp->grpmask;
2545 rnp = rnp->parent;
2546 if (!rnp)
2547 break;
2548 raw_spin_lock(&rnp->lock); /* irqs already disabled. */
2549 smp_mb__after_unlock_lock(); /* GP memory ordering. */
2550 rnp->qsmaskinit &= ~mask;
0aa04b05 2551 rnp->qsmask &= ~mask;
8af3a5e7
PM
2552 if (rnp->qsmaskinit) {
2553 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
2554 return;
2555 }
2556 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
2557 }
2558}
2559
88428cc5
PM
2560/*
2561 * The CPU is exiting the idle loop into the arch_cpu_idle_dead()
2562 * function. We now remove it from the rcu_node tree's ->qsmaskinit
2563 * bit masks.
2564 */
2565static void rcu_cleanup_dying_idle_cpu(int cpu, struct rcu_state *rsp)
2566{
2567 unsigned long flags;
2568 unsigned long mask;
2569 struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
2570 struct rcu_node *rnp = rdp->mynode; /* Outgoing CPU's rdp & rnp. */
2571
ea46351c
PM
2572 if (!IS_ENABLED(CONFIG_HOTPLUG_CPU))
2573 return;
2574
88428cc5
PM
2575 /* Remove outgoing CPU from mask in the leaf rcu_node structure. */
2576 mask = rdp->grpmask;
2577 raw_spin_lock_irqsave(&rnp->lock, flags);
2578 smp_mb__after_unlock_lock(); /* Enforce GP memory-order guarantee. */
2579 rnp->qsmaskinitnext &= ~mask;
2580 raw_spin_unlock_irqrestore(&rnp->lock, flags);
2581}
2582
64db4cff 2583/*
e5601400 2584 * The CPU has been completely removed, and some other CPU is reporting
b1420f1c
PM
2585 * this fact from process context. Do the remainder of the cleanup,
2586 * including orphaning the outgoing CPU's RCU callbacks, and also
1331e7a1
PM
2587 * adopting them. There can only be one CPU hotplug operation at a time,
2588 * so no other CPU can be attempting to update rcu_cpu_kthread_task.
64db4cff 2589 */
e5601400 2590static void rcu_cleanup_dead_cpu(int cpu, struct rcu_state *rsp)
64db4cff 2591{
2036d94a 2592 unsigned long flags;
e5601400 2593 struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
b1420f1c 2594 struct rcu_node *rnp = rdp->mynode; /* Outgoing CPU's rdp & rnp. */
e5601400 2595
ea46351c
PM
2596 if (!IS_ENABLED(CONFIG_HOTPLUG_CPU))
2597 return;
2598
2036d94a 2599 /* Adjust any no-longer-needed kthreads. */
5d01bbd1 2600 rcu_boost_kthread_setaffinity(rnp, -1);
2036d94a 2601
b1420f1c 2602 /* Orphan the dead CPU's callbacks, and adopt them if appropriate. */
78043c46 2603 raw_spin_lock_irqsave(&rsp->orphan_lock, flags);
b1420f1c 2604 rcu_send_cbs_to_orphanage(cpu, rsp, rnp, rdp);
96d3fd0d 2605 rcu_adopt_orphan_cbs(rsp, flags);
a8f4cbad 2606 raw_spin_unlock_irqrestore(&rsp->orphan_lock, flags);
b1420f1c 2607
cf01537e
PM
2608 WARN_ONCE(rdp->qlen != 0 || rdp->nxtlist != NULL,
2609 "rcu_cleanup_dead_cpu: Callbacks on offline CPU %d: qlen=%lu, nxtlist=%p\n",
2610 cpu, rdp->qlen, rdp->nxtlist);
64db4cff
PM
2611}
2612
64db4cff
PM
2613/*
2614 * Invoke any RCU callbacks that have made it to the end of their grace
2615 * period. Thottle as specified by rdp->blimit.
2616 */
37c72e56 2617static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
64db4cff
PM
2618{
2619 unsigned long flags;
2620 struct rcu_head *next, *list, **tail;
878d7439
ED
2621 long bl, count, count_lazy;
2622 int i;
64db4cff 2623
dc35c893 2624 /* If no callbacks are ready, just return. */
29c00b4a 2625 if (!cpu_has_callbacks_ready_to_invoke(rdp)) {
486e2593 2626 trace_rcu_batch_start(rsp->name, rdp->qlen_lazy, rdp->qlen, 0);
7d0ae808 2627 trace_rcu_batch_end(rsp->name, 0, !!READ_ONCE(rdp->nxtlist),
4968c300
PM
2628 need_resched(), is_idle_task(current),
2629 rcu_is_callbacks_kthread());
64db4cff 2630 return;
29c00b4a 2631 }
64db4cff
PM
2632
2633 /*
2634 * Extract the list of ready callbacks, disabling to prevent
2635 * races with call_rcu() from interrupt handlers.
2636 */
2637 local_irq_save(flags);
8146c4e2 2638 WARN_ON_ONCE(cpu_is_offline(smp_processor_id()));
29c00b4a 2639 bl = rdp->blimit;
486e2593 2640 trace_rcu_batch_start(rsp->name, rdp->qlen_lazy, rdp->qlen, bl);
64db4cff
PM
2641 list = rdp->nxtlist;
2642 rdp->nxtlist = *rdp->nxttail[RCU_DONE_TAIL];
2643 *rdp->nxttail[RCU_DONE_TAIL] = NULL;
2644 tail = rdp->nxttail[RCU_DONE_TAIL];
b41772ab
PM
2645 for (i = RCU_NEXT_SIZE - 1; i >= 0; i--)
2646 if (rdp->nxttail[i] == rdp->nxttail[RCU_DONE_TAIL])
2647 rdp->nxttail[i] = &rdp->nxtlist;
64db4cff
PM
2648 local_irq_restore(flags);
2649
2650 /* Invoke callbacks. */
486e2593 2651 count = count_lazy = 0;
64db4cff
PM
2652 while (list) {
2653 next = list->next;
2654 prefetch(next);
551d55a9 2655 debug_rcu_head_unqueue(list);
486e2593
PM
2656 if (__rcu_reclaim(rsp->name, list))
2657 count_lazy++;
64db4cff 2658 list = next;
dff1672d
PM
2659 /* Stop only if limit reached and CPU has something to do. */
2660 if (++count >= bl &&
2661 (need_resched() ||
2662 (!is_idle_task(current) && !rcu_is_callbacks_kthread())))
64db4cff
PM
2663 break;
2664 }
2665
2666 local_irq_save(flags);
4968c300
PM
2667 trace_rcu_batch_end(rsp->name, count, !!list, need_resched(),
2668 is_idle_task(current),
2669 rcu_is_callbacks_kthread());
64db4cff
PM
2670
2671 /* Update count, and requeue any remaining callbacks. */
64db4cff
PM
2672 if (list != NULL) {
2673 *tail = rdp->nxtlist;
2674 rdp->nxtlist = list;
b41772ab
PM
2675 for (i = 0; i < RCU_NEXT_SIZE; i++)
2676 if (&rdp->nxtlist == rdp->nxttail[i])
2677 rdp->nxttail[i] = tail;
64db4cff
PM
2678 else
2679 break;
2680 }
b1420f1c
PM
2681 smp_mb(); /* List handling before counting for rcu_barrier(). */
2682 rdp->qlen_lazy -= count_lazy;
7d0ae808 2683 WRITE_ONCE(rdp->qlen, rdp->qlen - count);
b1420f1c 2684 rdp->n_cbs_invoked += count;
64db4cff
PM
2685
2686 /* Reinstate batch limit if we have worked down the excess. */
2687 if (rdp->blimit == LONG_MAX && rdp->qlen <= qlowmark)
2688 rdp->blimit = blimit;
2689
37c72e56
PM
2690 /* Reset ->qlen_last_fqs_check trigger if enough CBs have drained. */
2691 if (rdp->qlen == 0 && rdp->qlen_last_fqs_check != 0) {
2692 rdp->qlen_last_fqs_check = 0;
2693 rdp->n_force_qs_snap = rsp->n_force_qs;
2694 } else if (rdp->qlen < rdp->qlen_last_fqs_check - qhimark)
2695 rdp->qlen_last_fqs_check = rdp->qlen;
cfca9279 2696 WARN_ON_ONCE((rdp->nxtlist == NULL) != (rdp->qlen == 0));
37c72e56 2697
64db4cff
PM
2698 local_irq_restore(flags);
2699
e0f23060 2700 /* Re-invoke RCU core processing if there are callbacks remaining. */
64db4cff 2701 if (cpu_has_callbacks_ready_to_invoke(rdp))
a46e0899 2702 invoke_rcu_core();
64db4cff
PM
2703}
2704
2705/*
2706 * Check to see if this CPU is in a non-context-switch quiescent state
2707 * (user mode or idle loop for rcu, non-softirq execution for rcu_bh).
e0f23060 2708 * Also schedule RCU core processing.
64db4cff 2709 *
9b2e4f18 2710 * This function must be called from hardirq context. It is normally
64db4cff
PM
2711 * invoked from the scheduling-clock interrupt. If rcu_pending returns
2712 * false, there is no point in invoking rcu_check_callbacks().
2713 */
c3377c2d 2714void rcu_check_callbacks(int user)
64db4cff 2715{
f7f7bac9 2716 trace_rcu_utilization(TPS("Start scheduler-tick"));
a858af28 2717 increment_cpu_stall_ticks();
9b2e4f18 2718 if (user || rcu_is_cpu_rrupt_from_idle()) {
64db4cff
PM
2719
2720 /*
2721 * Get here if this CPU took its interrupt from user
2722 * mode or from the idle loop, and if this is not a
2723 * nested interrupt. In this case, the CPU is in
d6714c22 2724 * a quiescent state, so note it.
64db4cff
PM
2725 *
2726 * No memory barrier is required here because both
d6714c22
PM
2727 * rcu_sched_qs() and rcu_bh_qs() reference only CPU-local
2728 * variables that other CPUs neither access nor modify,
2729 * at least not while the corresponding CPU is online.
64db4cff
PM
2730 */
2731
284a8c93
PM
2732 rcu_sched_qs();
2733 rcu_bh_qs();
64db4cff
PM
2734
2735 } else if (!in_softirq()) {
2736
2737 /*
2738 * Get here if this CPU did not take its interrupt from
2739 * softirq, in other words, if it is not interrupting
2740 * a rcu_bh read-side critical section. This is an _bh
d6714c22 2741 * critical section, so note it.
64db4cff
PM
2742 */
2743
284a8c93 2744 rcu_bh_qs();
64db4cff 2745 }
86aea0e6 2746 rcu_preempt_check_callbacks();
e3950ecd 2747 if (rcu_pending())
a46e0899 2748 invoke_rcu_core();
8315f422
PM
2749 if (user)
2750 rcu_note_voluntary_context_switch(current);
f7f7bac9 2751 trace_rcu_utilization(TPS("End scheduler-tick"));
64db4cff
PM
2752}
2753
64db4cff
PM
2754/*
2755 * Scan the leaf rcu_node structures, processing dyntick state for any that
2756 * have not yet encountered a quiescent state, using the function specified.
27f4d280
PM
2757 * Also initiate boosting for any threads blocked on the root rcu_node.
2758 *
ee47eb9f 2759 * The caller must have suppressed start of new grace periods.
64db4cff 2760 */
217af2a2
PM
2761static void force_qs_rnp(struct rcu_state *rsp,
2762 int (*f)(struct rcu_data *rsp, bool *isidle,
2763 unsigned long *maxj),
2764 bool *isidle, unsigned long *maxj)
64db4cff
PM
2765{
2766 unsigned long bit;
2767 int cpu;
2768 unsigned long flags;
2769 unsigned long mask;
a0b6c9a7 2770 struct rcu_node *rnp;
64db4cff 2771
a0b6c9a7 2772 rcu_for_each_leaf_node(rsp, rnp) {
bde6c3aa 2773 cond_resched_rcu_qs();
64db4cff 2774 mask = 0;
1304afb2 2775 raw_spin_lock_irqsave(&rnp->lock, flags);
6303b9c8 2776 smp_mb__after_unlock_lock();
a0b6c9a7 2777 if (rnp->qsmask == 0) {
a77da14c
PM
2778 if (rcu_state_p == &rcu_sched_state ||
2779 rsp != rcu_state_p ||
2780 rcu_preempt_blocked_readers_cgp(rnp)) {
2781 /*
2782 * No point in scanning bits because they
2783 * are all zero. But we might need to
2784 * priority-boost blocked readers.
2785 */
2786 rcu_initiate_boost(rnp, flags);
2787 /* rcu_initiate_boost() releases rnp->lock */
2788 continue;
2789 }
2790 if (rnp->parent &&
2791 (rnp->parent->qsmask & rnp->grpmask)) {
2792 /*
2793 * Race between grace-period
2794 * initialization and task exiting RCU
2795 * read-side critical section: Report.
2796 */
2797 rcu_report_unblock_qs_rnp(rsp, rnp, flags);
2798 /* rcu_report_unblock_qs_rnp() rlses ->lock */
2799 continue;
2800 }
64db4cff 2801 }
a0b6c9a7 2802 cpu = rnp->grplo;
64db4cff 2803 bit = 1;
a0b6c9a7 2804 for (; cpu <= rnp->grphi; cpu++, bit <<= 1) {
0edd1b17 2805 if ((rnp->qsmask & bit) != 0) {
0edd1b17
PM
2806 if (f(per_cpu_ptr(rsp->rda, cpu), isidle, maxj))
2807 mask |= bit;
2808 }
64db4cff 2809 }
45f014c5 2810 if (mask != 0) {
654e9533
PM
2811 /* Idle/offline CPUs, report (releases rnp->lock. */
2812 rcu_report_qs_rnp(mask, rsp, rnp, rnp->gpnum, flags);
0aa04b05
PM
2813 } else {
2814 /* Nothing to do here, so just drop the lock. */
2815 raw_spin_unlock_irqrestore(&rnp->lock, flags);
64db4cff 2816 }
64db4cff 2817 }
64db4cff
PM
2818}
2819
2820/*
2821 * Force quiescent states on reluctant CPUs, and also detect which
2822 * CPUs are in dyntick-idle mode.
2823 */
4cdfc175 2824static void force_quiescent_state(struct rcu_state *rsp)
64db4cff
PM
2825{
2826 unsigned long flags;
394f2769
PM
2827 bool ret;
2828 struct rcu_node *rnp;
2829 struct rcu_node *rnp_old = NULL;
2830
2831 /* Funnel through hierarchy to reduce memory contention. */
d860d403 2832 rnp = __this_cpu_read(rsp->rda->mynode);
394f2769 2833 for (; rnp != NULL; rnp = rnp->parent) {
7d0ae808 2834 ret = (READ_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) ||
394f2769
PM
2835 !raw_spin_trylock(&rnp->fqslock);
2836 if (rnp_old != NULL)
2837 raw_spin_unlock(&rnp_old->fqslock);
2838 if (ret) {
a792563b 2839 rsp->n_force_qs_lh++;
394f2769
PM
2840 return;
2841 }
2842 rnp_old = rnp;
2843 }
2844 /* rnp_old == rcu_get_root(rsp), rnp == NULL. */
64db4cff 2845
394f2769
PM
2846 /* Reached the root of the rcu_node tree, acquire lock. */
2847 raw_spin_lock_irqsave(&rnp_old->lock, flags);
6303b9c8 2848 smp_mb__after_unlock_lock();
394f2769 2849 raw_spin_unlock(&rnp_old->fqslock);
7d0ae808 2850 if (READ_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) {
a792563b 2851 rsp->n_force_qs_lh++;
394f2769 2852 raw_spin_unlock_irqrestore(&rnp_old->lock, flags);
4cdfc175 2853 return; /* Someone beat us to it. */
46a1e34e 2854 }
7d0ae808 2855 WRITE_ONCE(rsp->gp_flags, READ_ONCE(rsp->gp_flags) | RCU_GP_FLAG_FQS);
394f2769 2856 raw_spin_unlock_irqrestore(&rnp_old->lock, flags);
2aa792e6 2857 rcu_gp_kthread_wake(rsp);
64db4cff
PM
2858}
2859
64db4cff 2860/*
e0f23060
PM
2861 * This does the RCU core processing work for the specified rcu_state
2862 * and rcu_data structures. This may be called only from the CPU to
2863 * whom the rdp belongs.
64db4cff
PM
2864 */
2865static void
1bca8cf1 2866__rcu_process_callbacks(struct rcu_state *rsp)
64db4cff
PM
2867{
2868 unsigned long flags;
48a7639c 2869 bool needwake;
fa07a58f 2870 struct rcu_data *rdp = raw_cpu_ptr(rsp->rda);
64db4cff 2871
2e597558
PM
2872 WARN_ON_ONCE(rdp->beenonline == 0);
2873
64db4cff
PM
2874 /* Update RCU state based on any recent quiescent states. */
2875 rcu_check_quiescent_state(rsp, rdp);
2876
2877 /* Does this CPU require a not-yet-started grace period? */
dc35c893 2878 local_irq_save(flags);
64db4cff 2879 if (cpu_needs_another_gp(rsp, rdp)) {
dc35c893 2880 raw_spin_lock(&rcu_get_root(rsp)->lock); /* irqs disabled. */
48a7639c 2881 needwake = rcu_start_gp(rsp);
b8462084 2882 raw_spin_unlock_irqrestore(&rcu_get_root(rsp)->lock, flags);
48a7639c
PM
2883 if (needwake)
2884 rcu_gp_kthread_wake(rsp);
dc35c893
PM
2885 } else {
2886 local_irq_restore(flags);
64db4cff
PM
2887 }
2888
2889 /* If there are callbacks ready, invoke them. */
09223371 2890 if (cpu_has_callbacks_ready_to_invoke(rdp))
a46e0899 2891 invoke_rcu_callbacks(rsp, rdp);
96d3fd0d
PM
2892
2893 /* Do any needed deferred wakeups of rcuo kthreads. */
2894 do_nocb_deferred_wakeup(rdp);
09223371
SL
2895}
2896
64db4cff 2897/*
e0f23060 2898 * Do RCU core processing for the current CPU.
64db4cff 2899 */
09223371 2900static void rcu_process_callbacks(struct softirq_action *unused)
64db4cff 2901{
6ce75a23
PM
2902 struct rcu_state *rsp;
2903
bfa00b4c
PM
2904 if (cpu_is_offline(smp_processor_id()))
2905 return;
f7f7bac9 2906 trace_rcu_utilization(TPS("Start RCU core"));
6ce75a23
PM
2907 for_each_rcu_flavor(rsp)
2908 __rcu_process_callbacks(rsp);
f7f7bac9 2909 trace_rcu_utilization(TPS("End RCU core"));
64db4cff
PM
2910}
2911
a26ac245 2912/*
e0f23060
PM
2913 * Schedule RCU callback invocation. If the specified type of RCU
2914 * does not support RCU priority boosting, just do a direct call,
2915 * otherwise wake up the per-CPU kernel kthread. Note that because we
924df8a0 2916 * are running on the current CPU with softirqs disabled, the
e0f23060 2917 * rcu_cpu_kthread_task cannot disappear out from under us.
a26ac245 2918 */
a46e0899 2919static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
a26ac245 2920{
7d0ae808 2921 if (unlikely(!READ_ONCE(rcu_scheduler_fully_active)))
b0d30417 2922 return;
a46e0899
PM
2923 if (likely(!rsp->boost)) {
2924 rcu_do_batch(rsp, rdp);
a26ac245
PM
2925 return;
2926 }
a46e0899 2927 invoke_rcu_callbacks_kthread();
a26ac245
PM
2928}
2929
a46e0899 2930static void invoke_rcu_core(void)
09223371 2931{
b0f74036
PM
2932 if (cpu_online(smp_processor_id()))
2933 raise_softirq(RCU_SOFTIRQ);
09223371
SL
2934}
2935
29154c57
PM
2936/*
2937 * Handle any core-RCU processing required by a call_rcu() invocation.
2938 */
2939static void __call_rcu_core(struct rcu_state *rsp, struct rcu_data *rdp,
2940 struct rcu_head *head, unsigned long flags)
64db4cff 2941{
48a7639c
PM
2942 bool needwake;
2943
62fde6ed
PM
2944 /*
2945 * If called from an extended quiescent state, invoke the RCU
2946 * core in order to force a re-evaluation of RCU's idleness.
2947 */
9910affa 2948 if (!rcu_is_watching())
62fde6ed
PM
2949 invoke_rcu_core();
2950
a16b7a69 2951 /* If interrupts were disabled or CPU offline, don't invoke RCU core. */
29154c57 2952 if (irqs_disabled_flags(flags) || cpu_is_offline(smp_processor_id()))
2655d57e 2953 return;
64db4cff 2954
37c72e56
PM
2955 /*
2956 * Force the grace period if too many callbacks or too long waiting.
2957 * Enforce hysteresis, and don't invoke force_quiescent_state()
2958 * if some other CPU has recently done so. Also, don't bother
2959 * invoking force_quiescent_state() if the newly enqueued callback
2960 * is the only one waiting for a grace period to complete.
2961 */
2655d57e 2962 if (unlikely(rdp->qlen > rdp->qlen_last_fqs_check + qhimark)) {
b52573d2
PM
2963
2964 /* Are we ignoring a completed grace period? */
470716fc 2965 note_gp_changes(rsp, rdp);
b52573d2
PM
2966
2967 /* Start a new grace period if one not already started. */
2968 if (!rcu_gp_in_progress(rsp)) {
b52573d2
PM
2969 struct rcu_node *rnp_root = rcu_get_root(rsp);
2970
b8462084 2971 raw_spin_lock(&rnp_root->lock);
6303b9c8 2972 smp_mb__after_unlock_lock();
48a7639c 2973 needwake = rcu_start_gp(rsp);
b8462084 2974 raw_spin_unlock(&rnp_root->lock);
48a7639c
PM
2975 if (needwake)
2976 rcu_gp_kthread_wake(rsp);
b52573d2
PM
2977 } else {
2978 /* Give the grace period a kick. */
2979 rdp->blimit = LONG_MAX;
2980 if (rsp->n_force_qs == rdp->n_force_qs_snap &&
2981 *rdp->nxttail[RCU_DONE_TAIL] != head)
4cdfc175 2982 force_quiescent_state(rsp);
b52573d2
PM
2983 rdp->n_force_qs_snap = rsp->n_force_qs;
2984 rdp->qlen_last_fqs_check = rdp->qlen;
2985 }
4cdfc175 2986 }
29154c57
PM
2987}
2988
ae150184
PM
2989/*
2990 * RCU callback function to leak a callback.
2991 */
2992static void rcu_leak_callback(struct rcu_head *rhp)
2993{
2994}
2995
3fbfbf7a
PM
2996/*
2997 * Helper function for call_rcu() and friends. The cpu argument will
2998 * normally be -1, indicating "currently running CPU". It may specify
2999 * a CPU only if that CPU is a no-CBs CPU. Currently, only _rcu_barrier()
3000 * is expected to specify a CPU.
3001 */
64db4cff
PM
3002static void
3003__call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
3fbfbf7a 3004 struct rcu_state *rsp, int cpu, bool lazy)
64db4cff
PM
3005{
3006 unsigned long flags;
3007 struct rcu_data *rdp;
3008
1146edcb 3009 WARN_ON_ONCE((unsigned long)head & 0x1); /* Misaligned rcu_head! */
ae150184
PM
3010 if (debug_rcu_head_queue(head)) {
3011 /* Probable double call_rcu(), so leak the callback. */
7d0ae808 3012 WRITE_ONCE(head->func, rcu_leak_callback);
ae150184
PM
3013 WARN_ONCE(1, "__call_rcu(): Leaked duplicate callback\n");
3014 return;
3015 }
64db4cff
PM
3016 head->func = func;
3017 head->next = NULL;
3018
64db4cff
PM
3019 /*
3020 * Opportunistically note grace-period endings and beginnings.
3021 * Note that we might see a beginning right after we see an
3022 * end, but never vice versa, since this CPU has to pass through
3023 * a quiescent state betweentimes.
3024 */
3025 local_irq_save(flags);
394f99a9 3026 rdp = this_cpu_ptr(rsp->rda);
64db4cff
PM
3027
3028 /* Add the callback to our list. */
3fbfbf7a
PM
3029 if (unlikely(rdp->nxttail[RCU_NEXT_TAIL] == NULL) || cpu != -1) {
3030 int offline;
3031
3032 if (cpu != -1)
3033 rdp = per_cpu_ptr(rsp->rda, cpu);
143da9c2
PM
3034 if (likely(rdp->mynode)) {
3035 /* Post-boot, so this should be for a no-CBs CPU. */
3036 offline = !__call_rcu_nocb(rdp, head, lazy, flags);
3037 WARN_ON_ONCE(offline);
3038 /* Offline CPU, _call_rcu() illegal, leak callback. */
3039 local_irq_restore(flags);
3040 return;
3041 }
3042 /*
3043 * Very early boot, before rcu_init(). Initialize if needed
3044 * and then drop through to queue the callback.
3045 */
3046 BUG_ON(cpu != -1);
34404ca8 3047 WARN_ON_ONCE(!rcu_is_watching());
143da9c2
PM
3048 if (!likely(rdp->nxtlist))
3049 init_default_callback_list(rdp);
0d8ee37e 3050 }
7d0ae808 3051 WRITE_ONCE(rdp->qlen, rdp->qlen + 1);
486e2593
PM
3052 if (lazy)
3053 rdp->qlen_lazy++;
c57afe80
PM
3054 else
3055 rcu_idle_count_callbacks_posted();
b1420f1c
PM
3056 smp_mb(); /* Count before adding callback for rcu_barrier(). */
3057 *rdp->nxttail[RCU_NEXT_TAIL] = head;
3058 rdp->nxttail[RCU_NEXT_TAIL] = &head->next;
2655d57e 3059
d4c08f2a
PM
3060 if (__is_kfree_rcu_offset((unsigned long)func))
3061 trace_rcu_kfree_callback(rsp->name, head, (unsigned long)func,
486e2593 3062 rdp->qlen_lazy, rdp->qlen);
d4c08f2a 3063 else
486e2593 3064 trace_rcu_callback(rsp->name, head, rdp->qlen_lazy, rdp->qlen);
d4c08f2a 3065
29154c57
PM
3066 /* Go handle any RCU core processing required. */
3067 __call_rcu_core(rsp, rdp, head, flags);
64db4cff
PM
3068 local_irq_restore(flags);
3069}
3070
3071/*
d6714c22 3072 * Queue an RCU-sched callback for invocation after a grace period.
64db4cff 3073 */
d6714c22 3074void call_rcu_sched(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
64db4cff 3075{
3fbfbf7a 3076 __call_rcu(head, func, &rcu_sched_state, -1, 0);
64db4cff 3077}
d6714c22 3078EXPORT_SYMBOL_GPL(call_rcu_sched);
64db4cff
PM
3079
3080/*
486e2593 3081 * Queue an RCU callback for invocation after a quicker grace period.
64db4cff
PM
3082 */
3083void call_rcu_bh(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
3084{
3fbfbf7a 3085 __call_rcu(head, func, &rcu_bh_state, -1, 0);
64db4cff
PM
3086}
3087EXPORT_SYMBOL_GPL(call_rcu_bh);
3088
495aa969
ACB
3089/*
3090 * Queue an RCU callback for lazy invocation after a grace period.
3091 * This will likely be later named something like "call_rcu_lazy()",
3092 * but this change will require some way of tagging the lazy RCU
3093 * callbacks in the list of pending callbacks. Until then, this
3094 * function may only be called from __kfree_rcu().
3095 */
3096void kfree_call_rcu(struct rcu_head *head,
3097 void (*func)(struct rcu_head *rcu))
3098{
e534165b 3099 __call_rcu(head, func, rcu_state_p, -1, 1);
495aa969
ACB
3100}
3101EXPORT_SYMBOL_GPL(kfree_call_rcu);
3102
6d813391
PM
3103/*
3104 * Because a context switch is a grace period for RCU-sched and RCU-bh,
3105 * any blocking grace-period wait automatically implies a grace period
3106 * if there is only one CPU online at any point time during execution
3107 * of either synchronize_sched() or synchronize_rcu_bh(). It is OK to
3108 * occasionally incorrectly indicate that there are multiple CPUs online
3109 * when there was in fact only one the whole time, as this just adds
3110 * some overhead: RCU still operates correctly.
6d813391
PM
3111 */
3112static inline int rcu_blocking_is_gp(void)
3113{
95f0c1de
PM
3114 int ret;
3115
6d813391 3116 might_sleep(); /* Check for RCU read-side critical section. */
95f0c1de
PM
3117 preempt_disable();
3118 ret = num_online_cpus() <= 1;
3119 preempt_enable();
3120 return ret;
6d813391
PM
3121}
3122
6ebb237b
PM
3123/**
3124 * synchronize_sched - wait until an rcu-sched grace period has elapsed.
3125 *
3126 * Control will return to the caller some time after a full rcu-sched
3127 * grace period has elapsed, in other words after all currently executing
3128 * rcu-sched read-side critical sections have completed. These read-side
3129 * critical sections are delimited by rcu_read_lock_sched() and
3130 * rcu_read_unlock_sched(), and may be nested. Note that preempt_disable(),
3131 * local_irq_disable(), and so on may be used in place of
3132 * rcu_read_lock_sched().
3133 *
3134 * This means that all preempt_disable code sequences, including NMI and
f0a0e6f2
PM
3135 * non-threaded hardware-interrupt handlers, in progress on entry will
3136 * have completed before this primitive returns. However, this does not
3137 * guarantee that softirq handlers will have completed, since in some
3138 * kernels, these handlers can run in process context, and can block.
3139 *
3140 * Note that this guarantee implies further memory-ordering guarantees.
3141 * On systems with more than one CPU, when synchronize_sched() returns,
3142 * each CPU is guaranteed to have executed a full memory barrier since the
3143 * end of its last RCU-sched read-side critical section whose beginning
3144 * preceded the call to synchronize_sched(). In addition, each CPU having
3145 * an RCU read-side critical section that extends beyond the return from
3146 * synchronize_sched() is guaranteed to have executed a full memory barrier
3147 * after the beginning of synchronize_sched() and before the beginning of
3148 * that RCU read-side critical section. Note that these guarantees include
3149 * CPUs that are offline, idle, or executing in user mode, as well as CPUs
3150 * that are executing in the kernel.
3151 *
3152 * Furthermore, if CPU A invoked synchronize_sched(), which returned
3153 * to its caller on CPU B, then both CPU A and CPU B are guaranteed
3154 * to have executed a full memory barrier during the execution of
3155 * synchronize_sched() -- even if CPU A and CPU B are the same CPU (but
3156 * again only if the system has more than one CPU).
6ebb237b
PM
3157 *
3158 * This primitive provides the guarantees made by the (now removed)
3159 * synchronize_kernel() API. In contrast, synchronize_rcu() only
3160 * guarantees that rcu_read_lock() sections will have completed.
3161 * In "classic RCU", these two guarantees happen to be one and
3162 * the same, but can differ in realtime RCU implementations.
3163 */
3164void synchronize_sched(void)
3165{
fe15d706
PM
3166 rcu_lockdep_assert(!lock_is_held(&rcu_bh_lock_map) &&
3167 !lock_is_held(&rcu_lock_map) &&
3168 !lock_is_held(&rcu_sched_lock_map),
3169 "Illegal synchronize_sched() in RCU-sched read-side critical section");
6ebb237b
PM
3170 if (rcu_blocking_is_gp())
3171 return;
5afff48b 3172 if (rcu_gp_is_expedited())
3705b88d
AM
3173 synchronize_sched_expedited();
3174 else
3175 wait_rcu_gp(call_rcu_sched);
6ebb237b
PM
3176}
3177EXPORT_SYMBOL_GPL(synchronize_sched);
3178
3179/**
3180 * synchronize_rcu_bh - wait until an rcu_bh grace period has elapsed.
3181 *
3182 * Control will return to the caller some time after a full rcu_bh grace
3183 * period has elapsed, in other words after all currently executing rcu_bh
3184 * read-side critical sections have completed. RCU read-side critical
3185 * sections are delimited by rcu_read_lock_bh() and rcu_read_unlock_bh(),
3186 * and may be nested.
f0a0e6f2
PM
3187 *
3188 * See the description of synchronize_sched() for more detailed information
3189 * on memory ordering guarantees.
6ebb237b
PM
3190 */
3191void synchronize_rcu_bh(void)
3192{
fe15d706
PM
3193 rcu_lockdep_assert(!lock_is_held(&rcu_bh_lock_map) &&
3194 !lock_is_held(&rcu_lock_map) &&
3195 !lock_is_held(&rcu_sched_lock_map),
3196 "Illegal synchronize_rcu_bh() in RCU-bh read-side critical section");
6ebb237b
PM
3197 if (rcu_blocking_is_gp())
3198 return;
5afff48b 3199 if (rcu_gp_is_expedited())
3705b88d
AM
3200 synchronize_rcu_bh_expedited();
3201 else
3202 wait_rcu_gp(call_rcu_bh);
6ebb237b
PM
3203}
3204EXPORT_SYMBOL_GPL(synchronize_rcu_bh);
3205
765a3f4f
PM
3206/**
3207 * get_state_synchronize_rcu - Snapshot current RCU state
3208 *
3209 * Returns a cookie that is used by a later call to cond_synchronize_rcu()
3210 * to determine whether or not a full grace period has elapsed in the
3211 * meantime.
3212 */
3213unsigned long get_state_synchronize_rcu(void)
3214{
3215 /*
3216 * Any prior manipulation of RCU-protected data must happen
3217 * before the load from ->gpnum.
3218 */
3219 smp_mb(); /* ^^^ */
3220
3221 /*
3222 * Make sure this load happens before the purportedly
3223 * time-consuming work between get_state_synchronize_rcu()
3224 * and cond_synchronize_rcu().
3225 */
e534165b 3226 return smp_load_acquire(&rcu_state_p->gpnum);
765a3f4f
PM
3227}
3228EXPORT_SYMBOL_GPL(get_state_synchronize_rcu);
3229
3230/**
3231 * cond_synchronize_rcu - Conditionally wait for an RCU grace period
3232 *
3233 * @oldstate: return value from earlier call to get_state_synchronize_rcu()
3234 *
3235 * If a full RCU grace period has elapsed since the earlier call to
3236 * get_state_synchronize_rcu(), just return. Otherwise, invoke
3237 * synchronize_rcu() to wait for a full grace period.
3238 *
3239 * Yes, this function does not take counter wrap into account. But
3240 * counter wrap is harmless. If the counter wraps, we have waited for
3241 * more than 2 billion grace periods (and way more on a 64-bit system!),
3242 * so waiting for one additional grace period should be just fine.
3243 */
3244void cond_synchronize_rcu(unsigned long oldstate)
3245{
3246 unsigned long newstate;
3247
3248 /*
3249 * Ensure that this load happens before any RCU-destructive
3250 * actions the caller might carry out after we return.
3251 */
e534165b 3252 newstate = smp_load_acquire(&rcu_state_p->completed);
765a3f4f
PM
3253 if (ULONG_CMP_GE(oldstate, newstate))
3254 synchronize_rcu();
3255}
3256EXPORT_SYMBOL_GPL(cond_synchronize_rcu);
3257
28f00767
PM
3258/* Adjust sequence number for start of update-side operation. */
3259static void rcu_seq_start(unsigned long *sp)
3260{
3261 WRITE_ONCE(*sp, *sp + 1);
3262 smp_mb(); /* Ensure update-side operation after counter increment. */
3263 WARN_ON_ONCE(!(*sp & 0x1));
3264}
3265
3266/* Adjust sequence number for end of update-side operation. */
3267static void rcu_seq_end(unsigned long *sp)
3268{
3269 smp_mb(); /* Ensure update-side operation before counter increment. */
3270 WRITE_ONCE(*sp, *sp + 1);
3271 WARN_ON_ONCE(*sp & 0x1);
3272}
3273
3274/* Take a snapshot of the update side's sequence number. */
3275static unsigned long rcu_seq_snap(unsigned long *sp)
3276{
3277 unsigned long s;
3278
3279 smp_mb(); /* Caller's modifications seen first by other CPUs. */
3280 s = (READ_ONCE(*sp) + 3) & ~0x1;
3281 smp_mb(); /* Above access must not bleed into critical section. */
3282 return s;
3283}
3284
3285/*
3286 * Given a snapshot from rcu_seq_snap(), determine whether or not a
3287 * full update-side operation has occurred.
3288 */
3289static bool rcu_seq_done(unsigned long *sp, unsigned long s)
3290{
3291 return ULONG_CMP_GE(READ_ONCE(*sp), s);
3292}
3293
3294/* Wrapper functions for expedited grace periods. */
3295static void rcu_exp_gp_seq_start(struct rcu_state *rsp)
3296{
3297 rcu_seq_start(&rsp->expedited_sequence);
3298}
3299static void rcu_exp_gp_seq_end(struct rcu_state *rsp)
3300{
3301 rcu_seq_end(&rsp->expedited_sequence);
3302}
3303static unsigned long rcu_exp_gp_seq_snap(struct rcu_state *rsp)
3304{
3305 return rcu_seq_snap(&rsp->expedited_sequence);
3306}
3307static bool rcu_exp_gp_seq_done(struct rcu_state *rsp, unsigned long s)
3308{
3309 return rcu_seq_done(&rsp->expedited_sequence, s);
3310}
3311
29fd9309
PM
3312/* Common code for synchronize_{rcu,sched}_expedited() work-done checking. */
3313static bool sync_exp_work_done(struct rcu_state *rsp, struct rcu_node *rnp,
3314 atomic_long_t *stat, unsigned long s)
385b73c0 3315{
28f00767 3316 if (rcu_exp_gp_seq_done(rsp, s)) {
385b73c0
PM
3317 if (rnp)
3318 mutex_unlock(&rnp->exp_funnel_mutex);
3319 /* Ensure test happens before caller kfree(). */
3320 smp_mb__before_atomic(); /* ^^^ */
3321 atomic_long_inc(stat);
385b73c0
PM
3322 return true;
3323 }
3324 return false;
3325}
3326
b09e5f86
PM
3327/*
3328 * Funnel-lock acquisition for expedited grace periods. Returns a
3329 * pointer to the root rcu_node structure, or NULL if some other
3330 * task did the expedited grace period for us.
3331 */
3332static struct rcu_node *exp_funnel_lock(struct rcu_state *rsp, unsigned long s)
3333{
3334 struct rcu_node *rnp0;
3335 struct rcu_node *rnp1 = NULL;
3336
3337 /*
3338 * Each pass through the following loop works its way
3339 * up the rcu_node tree, returning if others have done the
3340 * work or otherwise falls through holding the root rnp's
3341 * ->exp_funnel_mutex. The mapping from CPU to rcu_node structure
3342 * can be inexact, as it is just promoting locality and is not
3343 * strictly needed for correctness.
3344 */
3345 rnp0 = per_cpu_ptr(rsp->rda, raw_smp_processor_id())->mynode;
3346 for (; rnp0 != NULL; rnp0 = rnp0->parent) {
29fd9309 3347 if (sync_exp_work_done(rsp, rnp1, &rsp->expedited_workdone1, s))
b09e5f86
PM
3348 return NULL;
3349 mutex_lock(&rnp0->exp_funnel_mutex);
3350 if (rnp1)
3351 mutex_unlock(&rnp1->exp_funnel_mutex);
3352 rnp1 = rnp0;
3353 }
29fd9309 3354 if (sync_exp_work_done(rsp, rnp1, &rsp->expedited_workdone2, s))
b09e5f86
PM
3355 return NULL;
3356 return rnp1;
3357}
3358
3359static int synchronize_sched_expedited_cpu_stop(void *data)
3360{
3361 struct rcu_state *rsp = data;
3362
3363 /* We are here: If we are last, do the wakeup. */
3364 if (atomic_dec_and_test(&rsp->expedited_need_qs))
3365 wake_up(&rsp->expedited_wq);
3366 return 0;
3367}
3368
236fefaf
PM
3369/**
3370 * synchronize_sched_expedited - Brute-force RCU-sched grace period
3371 *
3372 * Wait for an RCU-sched grace period to elapse, but use a "big hammer"
3373 * approach to force the grace period to end quickly. This consumes
3374 * significant time on all CPUs and is unfriendly to real-time workloads,
3375 * so is thus not recommended for any sort of common-case code. In fact,
3376 * if you are using synchronize_sched_expedited() in a loop, please
3377 * restructure your code to batch your updates, and then use a single
3378 * synchronize_sched() instead.
3d3b7db0 3379 *
d6ada2cf
PM
3380 * This implementation can be thought of as an application of sequence
3381 * locking to expedited grace periods, but using the sequence counter to
3382 * determine when someone else has already done the work instead of for
385b73c0 3383 * retrying readers.
3d3b7db0
PM
3384 */
3385void synchronize_sched_expedited(void)
3386{
e0775cef 3387 int cpu;
7fd0ddc5 3388 unsigned long s;
b09e5f86 3389 struct rcu_node *rnp;
3a6d7c64 3390 struct rcu_state *rsp = &rcu_sched_state;
3d3b7db0 3391
d6ada2cf 3392 /* Take a snapshot of the sequence number. */
28f00767 3393 s = rcu_exp_gp_seq_snap(rsp);
3d3b7db0 3394
dd56af42
PM
3395 if (!try_get_online_cpus()) {
3396 /* CPU hotplug operation in flight, fall back to normal GP. */
3397 wait_rcu_gp(call_rcu_sched);
3398 atomic_long_inc(&rsp->expedited_normal);
3399 return;
3400 }
1cc85961 3401 WARN_ON_ONCE(cpu_is_offline(raw_smp_processor_id()));
3d3b7db0 3402
b09e5f86 3403 rnp = exp_funnel_lock(rsp, s);
29fd9309
PM
3404 if (rnp == NULL) {
3405 put_online_cpus();
b09e5f86 3406 return; /* Someone else did our work for us. */
29fd9309 3407 }
d6ada2cf 3408
28f00767 3409 rcu_exp_gp_seq_start(rsp);
d6ada2cf 3410
c190c3b1 3411 /* Stop each CPU that is online, non-idle, and not us. */
3a6d7c64
PZ
3412 init_waitqueue_head(&rsp->expedited_wq);
3413 atomic_set(&rsp->expedited_need_qs, 1); /* Extra count avoids race. */
c190c3b1 3414 for_each_online_cpu(cpu) {
3a6d7c64 3415 struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
c190c3b1
PZ
3416 struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);
3417
3418 /* Skip our CPU and any idle CPUs. */
3419 if (raw_smp_processor_id() == cpu ||
3420 !(atomic_add_return(0, &rdtp->dynticks) & 0x1))
3421 continue;
3a6d7c64
PZ
3422 atomic_inc(&rsp->expedited_need_qs);
3423 stop_one_cpu_nowait(cpu, synchronize_sched_expedited_cpu_stop,
3424 rsp, &rdp->exp_stop_work);
c190c3b1 3425 }
e0775cef 3426
3a6d7c64
PZ
3427 /* Remove extra count and, if necessary, wait for CPUs to stop. */
3428 if (!atomic_dec_and_test(&rsp->expedited_need_qs))
3429 wait_event(rsp->expedited_wq,
3430 !atomic_read(&rsp->expedited_need_qs));
3431
28f00767 3432 rcu_exp_gp_seq_end(rsp);
b09e5f86 3433 mutex_unlock(&rnp->exp_funnel_mutex);
d6ada2cf 3434 smp_mb(); /* ensure subsequent action seen after grace period. */
3d3b7db0
PM
3435
3436 put_online_cpus();
3437}
3438EXPORT_SYMBOL_GPL(synchronize_sched_expedited);
3439
64db4cff
PM
3440/*
3441 * Check to see if there is any immediate RCU-related work to be done
3442 * by the current CPU, for the specified type of RCU, returning 1 if so.
3443 * The checks are in order of increasing expense: checks that can be
3444 * carried out against CPU-local state are performed first. However,
3445 * we must check for CPU stalls first, else we might not get a chance.
3446 */
3447static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp)
3448{
2f51f988
PM
3449 struct rcu_node *rnp = rdp->mynode;
3450
64db4cff
PM
3451 rdp->n_rcu_pending++;
3452
3453 /* Check for CPU stalls, if enabled. */
3454 check_cpu_stall(rsp, rdp);
3455
a096932f
PM
3456 /* Is this CPU a NO_HZ_FULL CPU that should ignore RCU? */
3457 if (rcu_nohz_full_cpu(rsp))
3458 return 0;
3459
64db4cff 3460 /* Is the RCU core waiting for a quiescent state from this CPU? */
5c51dd73 3461 if (rcu_scheduler_fully_active &&
5cd37193
PM
3462 rdp->qs_pending && !rdp->passed_quiesce &&
3463 rdp->rcu_qs_ctr_snap == __this_cpu_read(rcu_qs_ctr)) {
d21670ac 3464 rdp->n_rp_qs_pending++;
5cd37193
PM
3465 } else if (rdp->qs_pending &&
3466 (rdp->passed_quiesce ||
3467 rdp->rcu_qs_ctr_snap != __this_cpu_read(rcu_qs_ctr))) {
d21670ac 3468 rdp->n_rp_report_qs++;
64db4cff 3469 return 1;
7ba5c840 3470 }
64db4cff
PM
3471
3472 /* Does this CPU have callbacks ready to invoke? */
7ba5c840
PM
3473 if (cpu_has_callbacks_ready_to_invoke(rdp)) {
3474 rdp->n_rp_cb_ready++;
64db4cff 3475 return 1;
7ba5c840 3476 }
64db4cff
PM
3477
3478 /* Has RCU gone idle with this CPU needing another grace period? */
7ba5c840
PM
3479 if (cpu_needs_another_gp(rsp, rdp)) {
3480 rdp->n_rp_cpu_needs_gp++;
64db4cff 3481 return 1;
7ba5c840 3482 }
64db4cff
PM
3483
3484 /* Has another RCU grace period completed? */
7d0ae808 3485 if (READ_ONCE(rnp->completed) != rdp->completed) { /* outside lock */
7ba5c840 3486 rdp->n_rp_gp_completed++;
64db4cff 3487 return 1;
7ba5c840 3488 }
64db4cff
PM
3489
3490 /* Has a new RCU grace period started? */
7d0ae808
PM
3491 if (READ_ONCE(rnp->gpnum) != rdp->gpnum ||
3492 unlikely(READ_ONCE(rdp->gpwrap))) { /* outside lock */
7ba5c840 3493 rdp->n_rp_gp_started++;
64db4cff 3494 return 1;
7ba5c840 3495 }
64db4cff 3496
96d3fd0d
PM
3497 /* Does this CPU need a deferred NOCB wakeup? */
3498 if (rcu_nocb_need_deferred_wakeup(rdp)) {
3499 rdp->n_rp_nocb_defer_wakeup++;
3500 return 1;
3501 }
3502
64db4cff 3503 /* nothing to do */
7ba5c840 3504 rdp->n_rp_need_nothing++;
64db4cff
PM
3505 return 0;
3506}
3507
3508/*
3509 * Check to see if there is any immediate RCU-related work to be done
3510 * by the current CPU, returning 1 if so. This function is part of the
3511 * RCU implementation; it is -not- an exported member of the RCU API.
3512 */
e3950ecd 3513static int rcu_pending(void)
64db4cff 3514{
6ce75a23
PM
3515 struct rcu_state *rsp;
3516
3517 for_each_rcu_flavor(rsp)
e3950ecd 3518 if (__rcu_pending(rsp, this_cpu_ptr(rsp->rda)))
6ce75a23
PM
3519 return 1;
3520 return 0;
64db4cff
PM
3521}
3522
3523/*
c0f4dfd4
PM
3524 * Return true if the specified CPU has any callback. If all_lazy is
3525 * non-NULL, store an indication of whether all callbacks are lazy.
3526 * (If there are no callbacks, all of them are deemed to be lazy.)
64db4cff 3527 */
82072c4f 3528static bool __maybe_unused rcu_cpu_has_callbacks(bool *all_lazy)
64db4cff 3529{
c0f4dfd4
PM
3530 bool al = true;
3531 bool hc = false;
3532 struct rcu_data *rdp;
6ce75a23
PM
3533 struct rcu_state *rsp;
3534
c0f4dfd4 3535 for_each_rcu_flavor(rsp) {
aa6da514 3536 rdp = this_cpu_ptr(rsp->rda);
69c8d28c
PM
3537 if (!rdp->nxtlist)
3538 continue;
3539 hc = true;
3540 if (rdp->qlen != rdp->qlen_lazy || !all_lazy) {
c0f4dfd4 3541 al = false;
69c8d28c
PM
3542 break;
3543 }
c0f4dfd4
PM
3544 }
3545 if (all_lazy)
3546 *all_lazy = al;
3547 return hc;
64db4cff
PM
3548}
3549
a83eff0a
PM
3550/*
3551 * Helper function for _rcu_barrier() tracing. If tracing is disabled,
3552 * the compiler is expected to optimize this away.
3553 */
e66c33d5 3554static void _rcu_barrier_trace(struct rcu_state *rsp, const char *s,
a83eff0a
PM
3555 int cpu, unsigned long done)
3556{
3557 trace_rcu_barrier(rsp->name, s, cpu,
3558 atomic_read(&rsp->barrier_cpu_count), done);
3559}
3560
b1420f1c
PM
3561/*
3562 * RCU callback function for _rcu_barrier(). If we are last, wake
3563 * up the task executing _rcu_barrier().
3564 */
24ebbca8 3565static void rcu_barrier_callback(struct rcu_head *rhp)
d0ec774c 3566{
24ebbca8
PM
3567 struct rcu_data *rdp = container_of(rhp, struct rcu_data, barrier_head);
3568 struct rcu_state *rsp = rdp->rsp;
3569
a83eff0a
PM
3570 if (atomic_dec_and_test(&rsp->barrier_cpu_count)) {
3571 _rcu_barrier_trace(rsp, "LastCB", -1, rsp->n_barrier_done);
7db74df8 3572 complete(&rsp->barrier_completion);
a83eff0a
PM
3573 } else {
3574 _rcu_barrier_trace(rsp, "CB", -1, rsp->n_barrier_done);
3575 }
d0ec774c
PM
3576}
3577
3578/*
3579 * Called with preemption disabled, and from cross-cpu IRQ context.
3580 */
3581static void rcu_barrier_func(void *type)
3582{
037b64ed 3583 struct rcu_state *rsp = type;
fa07a58f 3584 struct rcu_data *rdp = raw_cpu_ptr(rsp->rda);
d0ec774c 3585
a83eff0a 3586 _rcu_barrier_trace(rsp, "IRQ", -1, rsp->n_barrier_done);
24ebbca8 3587 atomic_inc(&rsp->barrier_cpu_count);
06668efa 3588 rsp->call(&rdp->barrier_head, rcu_barrier_callback);
d0ec774c
PM
3589}
3590
d0ec774c
PM
3591/*
3592 * Orchestrate the specified type of RCU barrier, waiting for all
3593 * RCU callbacks of the specified type to complete.
3594 */
037b64ed 3595static void _rcu_barrier(struct rcu_state *rsp)
d0ec774c 3596{
b1420f1c 3597 int cpu;
b1420f1c 3598 struct rcu_data *rdp;
7d0ae808 3599 unsigned long snap = READ_ONCE(rsp->n_barrier_done);
cf3a9c48 3600 unsigned long snap_done;
b1420f1c 3601
a83eff0a 3602 _rcu_barrier_trace(rsp, "Begin", -1, snap);
b1420f1c 3603
e74f4c45 3604 /* Take mutex to serialize concurrent rcu_barrier() requests. */
7be7f0be 3605 mutex_lock(&rsp->barrier_mutex);
b1420f1c 3606
cf3a9c48
PM
3607 /*
3608 * Ensure that all prior references, including to ->n_barrier_done,
3609 * are ordered before the _rcu_barrier() machinery.
3610 */
3611 smp_mb(); /* See above block comment. */
3612
3613 /*
3614 * Recheck ->n_barrier_done to see if others did our work for us.
3615 * This means checking ->n_barrier_done for an even-to-odd-to-even
3616 * transition. The "if" expression below therefore rounds the old
3617 * value up to the next even number and adds two before comparing.
3618 */
458fb381 3619 snap_done = rsp->n_barrier_done;
a83eff0a 3620 _rcu_barrier_trace(rsp, "Check", -1, snap_done);
458fb381
PM
3621
3622 /*
3623 * If the value in snap is odd, we needed to wait for the current
3624 * rcu_barrier() to complete, then wait for the next one, in other
3625 * words, we need the value of snap_done to be three larger than
3626 * the value of snap. On the other hand, if the value in snap is
3627 * even, we only had to wait for the next rcu_barrier() to complete,
3628 * in other words, we need the value of snap_done to be only two
3629 * greater than the value of snap. The "(snap + 3) & ~0x1" computes
3630 * this for us (thank you, Linus!).
3631 */
3632 if (ULONG_CMP_GE(snap_done, (snap + 3) & ~0x1)) {
a83eff0a 3633 _rcu_barrier_trace(rsp, "EarlyExit", -1, snap_done);
cf3a9c48
PM
3634 smp_mb(); /* caller's subsequent code after above check. */
3635 mutex_unlock(&rsp->barrier_mutex);
3636 return;
3637 }
3638
3639 /*
3640 * Increment ->n_barrier_done to avoid duplicate work. Use
7d0ae808 3641 * WRITE_ONCE() to prevent the compiler from speculating
cf3a9c48
PM
3642 * the increment to precede the early-exit check.
3643 */
7d0ae808 3644 WRITE_ONCE(rsp->n_barrier_done, rsp->n_barrier_done + 1);
cf3a9c48 3645 WARN_ON_ONCE((rsp->n_barrier_done & 0x1) != 1);
a83eff0a 3646 _rcu_barrier_trace(rsp, "Inc1", -1, rsp->n_barrier_done);
cf3a9c48 3647 smp_mb(); /* Order ->n_barrier_done increment with below mechanism. */
b1420f1c 3648
d0ec774c 3649 /*
b1420f1c
PM
3650 * Initialize the count to one rather than to zero in order to
3651 * avoid a too-soon return to zero in case of a short grace period
1331e7a1
PM
3652 * (or preemption of this task). Exclude CPU-hotplug operations
3653 * to ensure that no offline CPU has callbacks queued.
d0ec774c 3654 */
7db74df8 3655 init_completion(&rsp->barrier_completion);
24ebbca8 3656 atomic_set(&rsp->barrier_cpu_count, 1);
1331e7a1 3657 get_online_cpus();
b1420f1c
PM
3658
3659 /*
1331e7a1
PM
3660 * Force each CPU with callbacks to register a new callback.
3661 * When that callback is invoked, we will know that all of the
3662 * corresponding CPU's preceding callbacks have been invoked.
b1420f1c 3663 */
3fbfbf7a 3664 for_each_possible_cpu(cpu) {
d1e43fa5 3665 if (!cpu_online(cpu) && !rcu_is_nocb_cpu(cpu))
3fbfbf7a 3666 continue;
b1420f1c 3667 rdp = per_cpu_ptr(rsp->rda, cpu);
d1e43fa5 3668 if (rcu_is_nocb_cpu(cpu)) {
d7e29933
PM
3669 if (!rcu_nocb_cpu_needs_barrier(rsp, cpu)) {
3670 _rcu_barrier_trace(rsp, "OfflineNoCB", cpu,
3671 rsp->n_barrier_done);
3672 } else {
3673 _rcu_barrier_trace(rsp, "OnlineNoCB", cpu,
3674 rsp->n_barrier_done);
41050a00 3675 smp_mb__before_atomic();
d7e29933
PM
3676 atomic_inc(&rsp->barrier_cpu_count);
3677 __call_rcu(&rdp->barrier_head,
3678 rcu_barrier_callback, rsp, cpu, 0);
3679 }
7d0ae808 3680 } else if (READ_ONCE(rdp->qlen)) {
a83eff0a
PM
3681 _rcu_barrier_trace(rsp, "OnlineQ", cpu,
3682 rsp->n_barrier_done);
037b64ed 3683 smp_call_function_single(cpu, rcu_barrier_func, rsp, 1);
b1420f1c 3684 } else {
a83eff0a
PM
3685 _rcu_barrier_trace(rsp, "OnlineNQ", cpu,
3686 rsp->n_barrier_done);
b1420f1c
PM
3687 }
3688 }
1331e7a1 3689 put_online_cpus();
b1420f1c
PM
3690
3691 /*
3692 * Now that we have an rcu_barrier_callback() callback on each
3693 * CPU, and thus each counted, remove the initial count.
3694 */
24ebbca8 3695 if (atomic_dec_and_test(&rsp->barrier_cpu_count))
7db74df8 3696 complete(&rsp->barrier_completion);
b1420f1c 3697
cf3a9c48
PM
3698 /* Increment ->n_barrier_done to prevent duplicate work. */
3699 smp_mb(); /* Keep increment after above mechanism. */
7d0ae808 3700 WRITE_ONCE(rsp->n_barrier_done, rsp->n_barrier_done + 1);
cf3a9c48 3701 WARN_ON_ONCE((rsp->n_barrier_done & 0x1) != 0);
a83eff0a 3702 _rcu_barrier_trace(rsp, "Inc2", -1, rsp->n_barrier_done);
cf3a9c48
PM
3703 smp_mb(); /* Keep increment before caller's subsequent code. */
3704
b1420f1c 3705 /* Wait for all rcu_barrier_callback() callbacks to be invoked. */
7db74df8 3706 wait_for_completion(&rsp->barrier_completion);
b1420f1c
PM
3707
3708 /* Other rcu_barrier() invocations can now safely proceed. */
7be7f0be 3709 mutex_unlock(&rsp->barrier_mutex);
d0ec774c 3710}
d0ec774c
PM
3711
3712/**
3713 * rcu_barrier_bh - Wait until all in-flight call_rcu_bh() callbacks complete.
3714 */
3715void rcu_barrier_bh(void)
3716{
037b64ed 3717 _rcu_barrier(&rcu_bh_state);
d0ec774c
PM
3718}
3719EXPORT_SYMBOL_GPL(rcu_barrier_bh);
3720
3721/**
3722 * rcu_barrier_sched - Wait for in-flight call_rcu_sched() callbacks.
3723 */
3724void rcu_barrier_sched(void)
3725{
037b64ed 3726 _rcu_barrier(&rcu_sched_state);
d0ec774c
PM
3727}
3728EXPORT_SYMBOL_GPL(rcu_barrier_sched);
3729
0aa04b05
PM
3730/*
3731 * Propagate ->qsinitmask bits up the rcu_node tree to account for the
3732 * first CPU in a given leaf rcu_node structure coming online. The caller
3733 * must hold the corresponding leaf rcu_node ->lock with interrrupts
3734 * disabled.
3735 */
3736static void rcu_init_new_rnp(struct rcu_node *rnp_leaf)
3737{
3738 long mask;
3739 struct rcu_node *rnp = rnp_leaf;
3740
3741 for (;;) {
3742 mask = rnp->grpmask;
3743 rnp = rnp->parent;
3744 if (rnp == NULL)
3745 return;
3746 raw_spin_lock(&rnp->lock); /* Interrupts already disabled. */
3747 rnp->qsmaskinit |= mask;
3748 raw_spin_unlock(&rnp->lock); /* Interrupts remain disabled. */
3749 }
3750}
3751
64db4cff 3752/*
27569620 3753 * Do boot-time initialization of a CPU's per-CPU RCU data.
64db4cff 3754 */
27569620
PM
3755static void __init
3756rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp)
64db4cff
PM
3757{
3758 unsigned long flags;
394f99a9 3759 struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
27569620
PM
3760 struct rcu_node *rnp = rcu_get_root(rsp);
3761
3762 /* Set up local state, ensuring consistent view of global state. */
1304afb2 3763 raw_spin_lock_irqsave(&rnp->lock, flags);
27569620 3764 rdp->grpmask = 1UL << (cpu - rdp->mynode->grplo);
27569620 3765 rdp->dynticks = &per_cpu(rcu_dynticks, cpu);
29e37d81 3766 WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != DYNTICK_TASK_EXIT_IDLE);
9b2e4f18 3767 WARN_ON_ONCE(atomic_read(&rdp->dynticks->dynticks) != 1);
27569620 3768 rdp->cpu = cpu;
d4c08f2a 3769 rdp->rsp = rsp;
3fbfbf7a 3770 rcu_boot_init_nocb_percpu_data(rdp);
1304afb2 3771 raw_spin_unlock_irqrestore(&rnp->lock, flags);
27569620
PM
3772}
3773
3774/*
3775 * Initialize a CPU's per-CPU RCU data. Note that only one online or
3776 * offline event can be happening at a given time. Note also that we
3777 * can accept some slop in the rsp->completed access due to the fact
3778 * that this CPU cannot possibly have any RCU callbacks in flight yet.
64db4cff 3779 */
49fb4c62 3780static void
9b67122a 3781rcu_init_percpu_data(int cpu, struct rcu_state *rsp)
64db4cff
PM
3782{
3783 unsigned long flags;
64db4cff 3784 unsigned long mask;
394f99a9 3785 struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
64db4cff
PM
3786 struct rcu_node *rnp = rcu_get_root(rsp);
3787
3788 /* Set up local state, ensuring consistent view of global state. */
1304afb2 3789 raw_spin_lock_irqsave(&rnp->lock, flags);
64db4cff 3790 rdp->beenonline = 1; /* We have now been online. */
37c72e56
PM
3791 rdp->qlen_last_fqs_check = 0;
3792 rdp->n_force_qs_snap = rsp->n_force_qs;
64db4cff 3793 rdp->blimit = blimit;
39c8d313
PM
3794 if (!rdp->nxtlist)
3795 init_callback_list(rdp); /* Re-enable callbacks on this CPU. */
29e37d81 3796 rdp->dynticks->dynticks_nesting = DYNTICK_TASK_EXIT_IDLE;
2333210b 3797 rcu_sysidle_init_percpu_data(rdp->dynticks);
c92b131b
PM
3798 atomic_set(&rdp->dynticks->dynticks,
3799 (atomic_read(&rdp->dynticks->dynticks) & ~0x1) + 1);
1304afb2 3800 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
64db4cff 3801
0aa04b05
PM
3802 /*
3803 * Add CPU to leaf rcu_node pending-online bitmask. Any needed
3804 * propagation up the rcu_node tree will happen at the beginning
3805 * of the next grace period.
3806 */
64db4cff
PM
3807 rnp = rdp->mynode;
3808 mask = rdp->grpmask;
0aa04b05
PM
3809 raw_spin_lock(&rnp->lock); /* irqs already disabled. */
3810 smp_mb__after_unlock_lock();
3811 rnp->qsmaskinitnext |= mask;
3812 rdp->gpnum = rnp->completed; /* Make CPU later note any new GP. */
3813 rdp->completed = rnp->completed;
3814 rdp->passed_quiesce = false;
a738eec6 3815 rdp->rcu_qs_ctr_snap = per_cpu(rcu_qs_ctr, cpu);
0aa04b05
PM
3816 rdp->qs_pending = false;
3817 trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpuonl"));
3818 raw_spin_unlock_irqrestore(&rnp->lock, flags);
64db4cff
PM
3819}
3820
49fb4c62 3821static void rcu_prepare_cpu(int cpu)
64db4cff 3822{
6ce75a23
PM
3823 struct rcu_state *rsp;
3824
3825 for_each_rcu_flavor(rsp)
9b67122a 3826 rcu_init_percpu_data(cpu, rsp);
64db4cff
PM
3827}
3828
3829/*
f41d911f 3830 * Handle CPU online/offline notification events.
64db4cff 3831 */
88428cc5
PM
3832int rcu_cpu_notify(struct notifier_block *self,
3833 unsigned long action, void *hcpu)
64db4cff
PM
3834{
3835 long cpu = (long)hcpu;
e534165b 3836 struct rcu_data *rdp = per_cpu_ptr(rcu_state_p->rda, cpu);
a26ac245 3837 struct rcu_node *rnp = rdp->mynode;
6ce75a23 3838 struct rcu_state *rsp;
64db4cff
PM
3839
3840 switch (action) {
3841 case CPU_UP_PREPARE:
3842 case CPU_UP_PREPARE_FROZEN:
d72bce0e
PZ
3843 rcu_prepare_cpu(cpu);
3844 rcu_prepare_kthreads(cpu);
35ce7f29 3845 rcu_spawn_all_nocb_kthreads(cpu);
a26ac245
PM
3846 break;
3847 case CPU_ONLINE:
0f962a5e 3848 case CPU_DOWN_FAILED:
5d01bbd1 3849 rcu_boost_kthread_setaffinity(rnp, -1);
0f962a5e
PM
3850 break;
3851 case CPU_DOWN_PREPARE:
34ed6246 3852 rcu_boost_kthread_setaffinity(rnp, cpu);
64db4cff 3853 break;
d0ec774c
PM
3854 case CPU_DYING:
3855 case CPU_DYING_FROZEN:
6ce75a23
PM
3856 for_each_rcu_flavor(rsp)
3857 rcu_cleanup_dying_cpu(rsp);
d0ec774c 3858 break;
88428cc5
PM
3859 case CPU_DYING_IDLE:
3860 for_each_rcu_flavor(rsp) {
3861 rcu_cleanup_dying_idle_cpu(cpu, rsp);
3862 }
3863 break;
64db4cff
PM
3864 case CPU_DEAD:
3865 case CPU_DEAD_FROZEN:
3866 case CPU_UP_CANCELED:
3867 case CPU_UP_CANCELED_FROZEN:
776d6807 3868 for_each_rcu_flavor(rsp) {
6ce75a23 3869 rcu_cleanup_dead_cpu(cpu, rsp);
776d6807
PM
3870 do_nocb_deferred_wakeup(per_cpu_ptr(rsp->rda, cpu));
3871 }
64db4cff
PM
3872 break;
3873 default:
3874 break;
3875 }
34ed6246 3876 return NOTIFY_OK;
64db4cff
PM
3877}
3878
d1d74d14
BP
3879static int rcu_pm_notify(struct notifier_block *self,
3880 unsigned long action, void *hcpu)
3881{
3882 switch (action) {
3883 case PM_HIBERNATION_PREPARE:
3884 case PM_SUSPEND_PREPARE:
3885 if (nr_cpu_ids <= 256) /* Expediting bad for large systems. */
5afff48b 3886 rcu_expedite_gp();
d1d74d14
BP
3887 break;
3888 case PM_POST_HIBERNATION:
3889 case PM_POST_SUSPEND:
5afff48b
PM
3890 if (nr_cpu_ids <= 256) /* Expediting bad for large systems. */
3891 rcu_unexpedite_gp();
d1d74d14
BP
3892 break;
3893 default:
3894 break;
3895 }
3896 return NOTIFY_OK;
3897}
3898
b3dbec76 3899/*
9386c0b7 3900 * Spawn the kthreads that handle each RCU flavor's grace periods.
b3dbec76
PM
3901 */
3902static int __init rcu_spawn_gp_kthread(void)
3903{
3904 unsigned long flags;
a94844b2 3905 int kthread_prio_in = kthread_prio;
b3dbec76
PM
3906 struct rcu_node *rnp;
3907 struct rcu_state *rsp;
a94844b2 3908 struct sched_param sp;
b3dbec76
PM
3909 struct task_struct *t;
3910
a94844b2
PM
3911 /* Force priority into range. */
3912 if (IS_ENABLED(CONFIG_RCU_BOOST) && kthread_prio < 1)
3913 kthread_prio = 1;
3914 else if (kthread_prio < 0)
3915 kthread_prio = 0;
3916 else if (kthread_prio > 99)
3917 kthread_prio = 99;
3918 if (kthread_prio != kthread_prio_in)
3919 pr_alert("rcu_spawn_gp_kthread(): Limited prio to %d from %d\n",
3920 kthread_prio, kthread_prio_in);
3921
9386c0b7 3922 rcu_scheduler_fully_active = 1;
b3dbec76 3923 for_each_rcu_flavor(rsp) {
a94844b2 3924 t = kthread_create(rcu_gp_kthread, rsp, "%s", rsp->name);
b3dbec76
PM
3925 BUG_ON(IS_ERR(t));
3926 rnp = rcu_get_root(rsp);
3927 raw_spin_lock_irqsave(&rnp->lock, flags);
3928 rsp->gp_kthread = t;
a94844b2
PM
3929 if (kthread_prio) {
3930 sp.sched_priority = kthread_prio;
3931 sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
3932 }
3933 wake_up_process(t);
b3dbec76
PM
3934 raw_spin_unlock_irqrestore(&rnp->lock, flags);
3935 }
35ce7f29 3936 rcu_spawn_nocb_kthreads();
9386c0b7 3937 rcu_spawn_boost_kthreads();
b3dbec76
PM
3938 return 0;
3939}
3940early_initcall(rcu_spawn_gp_kthread);
3941
bbad9379
PM
3942/*
3943 * This function is invoked towards the end of the scheduler's initialization
3944 * process. Before this is called, the idle task might contain
3945 * RCU read-side critical sections (during which time, this idle
3946 * task is booting the system). After this function is called, the
3947 * idle tasks are prohibited from containing RCU read-side critical
3948 * sections. This function also enables RCU lockdep checking.
3949 */
3950void rcu_scheduler_starting(void)
3951{
3952 WARN_ON(num_online_cpus() != 1);
3953 WARN_ON(nr_context_switches() > 0);
3954 rcu_scheduler_active = 1;
3955}
3956
64db4cff
PM
3957/*
3958 * Compute the per-level fanout, either using the exact fanout specified
7fa27001 3959 * or balancing the tree, depending on the rcu_fanout_exact boot parameter.
64db4cff 3960 */
199977bf 3961static void __init rcu_init_levelspread(int *levelspread, const int *levelcnt)
64db4cff 3962{
64db4cff
PM
3963 int i;
3964
7fa27001 3965 if (rcu_fanout_exact) {
199977bf 3966 levelspread[rcu_num_lvls - 1] = rcu_fanout_leaf;
66292405 3967 for (i = rcu_num_lvls - 2; i >= 0; i--)
199977bf 3968 levelspread[i] = RCU_FANOUT;
66292405
PM
3969 } else {
3970 int ccur;
3971 int cprv;
3972
3973 cprv = nr_cpu_ids;
3974 for (i = rcu_num_lvls - 1; i >= 0; i--) {
199977bf
AG
3975 ccur = levelcnt[i];
3976 levelspread[i] = (cprv + ccur - 1) / ccur;
66292405
PM
3977 cprv = ccur;
3978 }
64db4cff
PM
3979 }
3980}
64db4cff
PM
3981
3982/*
3983 * Helper function for rcu_init() that initializes one rcu_state structure.
3984 */
394f99a9
LJ
3985static void __init rcu_init_one(struct rcu_state *rsp,
3986 struct rcu_data __percpu *rda)
64db4cff 3987{
cb007102
AG
3988 static const char * const buf[] = RCU_NODE_NAME_INIT;
3989 static const char * const fqs[] = RCU_FQS_NAME_INIT;
385b73c0 3990 static const char * const exp[] = RCU_EXP_NAME_INIT;
4a81e832 3991 static u8 fl_mask = 0x1;
199977bf
AG
3992
3993 int levelcnt[RCU_NUM_LVLS]; /* # nodes in each level. */
3994 int levelspread[RCU_NUM_LVLS]; /* kids/node in each level. */
64db4cff
PM
3995 int cpustride = 1;
3996 int i;
3997 int j;
3998 struct rcu_node *rnp;
3999
05b84aec 4000 BUILD_BUG_ON(RCU_NUM_LVLS > ARRAY_SIZE(buf)); /* Fix buf[] init! */
b6407e86 4001
3eaaaf6c
PM
4002 /* Silence gcc 4.8 false positive about array index out of range. */
4003 if (rcu_num_lvls <= 0 || rcu_num_lvls > RCU_NUM_LVLS)
4004 panic("rcu_init_one: rcu_num_lvls out of range");
4930521a 4005
64db4cff
PM
4006 /* Initialize the level-tracking arrays. */
4007
f885b7f2 4008 for (i = 0; i < rcu_num_lvls; i++)
199977bf 4009 levelcnt[i] = num_rcu_lvl[i];
f885b7f2 4010 for (i = 1; i < rcu_num_lvls; i++)
199977bf
AG
4011 rsp->level[i] = rsp->level[i - 1] + levelcnt[i - 1];
4012 rcu_init_levelspread(levelspread, levelcnt);
4a81e832
PM
4013 rsp->flavor_mask = fl_mask;
4014 fl_mask <<= 1;
64db4cff
PM
4015
4016 /* Initialize the elements themselves, starting from the leaves. */
4017
f885b7f2 4018 for (i = rcu_num_lvls - 1; i >= 0; i--) {
199977bf 4019 cpustride *= levelspread[i];
64db4cff 4020 rnp = rsp->level[i];
199977bf 4021 for (j = 0; j < levelcnt[i]; j++, rnp++) {
1304afb2 4022 raw_spin_lock_init(&rnp->lock);
b6407e86
PM
4023 lockdep_set_class_and_name(&rnp->lock,
4024 &rcu_node_class[i], buf[i]);
394f2769
PM
4025 raw_spin_lock_init(&rnp->fqslock);
4026 lockdep_set_class_and_name(&rnp->fqslock,
4027 &rcu_fqs_class[i], fqs[i]);
25d30cf4
PM
4028 rnp->gpnum = rsp->gpnum;
4029 rnp->completed = rsp->completed;
64db4cff
PM
4030 rnp->qsmask = 0;
4031 rnp->qsmaskinit = 0;
4032 rnp->grplo = j * cpustride;
4033 rnp->grphi = (j + 1) * cpustride - 1;
595f3900
HS
4034 if (rnp->grphi >= nr_cpu_ids)
4035 rnp->grphi = nr_cpu_ids - 1;
64db4cff
PM
4036 if (i == 0) {
4037 rnp->grpnum = 0;
4038 rnp->grpmask = 0;
4039 rnp->parent = NULL;
4040 } else {
199977bf 4041 rnp->grpnum = j % levelspread[i - 1];
64db4cff
PM
4042 rnp->grpmask = 1UL << rnp->grpnum;
4043 rnp->parent = rsp->level[i - 1] +
199977bf 4044 j / levelspread[i - 1];
64db4cff
PM
4045 }
4046 rnp->level = i;
12f5f524 4047 INIT_LIST_HEAD(&rnp->blkd_tasks);
dae6e64d 4048 rcu_init_one_nocb(rnp);
385b73c0
PM
4049 mutex_init(&rnp->exp_funnel_mutex);
4050 lockdep_set_class_and_name(&rnp->exp_funnel_mutex,
4051 &rcu_exp_class[i], exp[i]);
64db4cff
PM
4052 }
4053 }
0c34029a 4054
b3dbec76 4055 init_waitqueue_head(&rsp->gp_wq);
f885b7f2 4056 rnp = rsp->level[rcu_num_lvls - 1];
0c34029a 4057 for_each_possible_cpu(i) {
4a90a068 4058 while (i > rnp->grphi)
0c34029a 4059 rnp++;
394f99a9 4060 per_cpu_ptr(rsp->rda, i)->mynode = rnp;
0c34029a
LJ
4061 rcu_boot_init_percpu_data(i, rsp);
4062 }
6ce75a23 4063 list_add(&rsp->flavors, &rcu_struct_flavors);
64db4cff
PM
4064}
4065
f885b7f2
PM
4066/*
4067 * Compute the rcu_node tree geometry from kernel parameters. This cannot
4102adab 4068 * replace the definitions in tree.h because those are needed to size
f885b7f2
PM
4069 * the ->node array in the rcu_state structure.
4070 */
4071static void __init rcu_init_geometry(void)
4072{
026ad283 4073 ulong d;
f885b7f2 4074 int i;
05b84aec 4075 int rcu_capacity[RCU_NUM_LVLS];
f885b7f2 4076
026ad283
PM
4077 /*
4078 * Initialize any unspecified boot parameters.
4079 * The default values of jiffies_till_first_fqs and
4080 * jiffies_till_next_fqs are set to the RCU_JIFFIES_TILL_FORCE_QS
4081 * value, which is a function of HZ, then adding one for each
4082 * RCU_JIFFIES_FQS_DIV CPUs that might be on the system.
4083 */
4084 d = RCU_JIFFIES_TILL_FORCE_QS + nr_cpu_ids / RCU_JIFFIES_FQS_DIV;
4085 if (jiffies_till_first_fqs == ULONG_MAX)
4086 jiffies_till_first_fqs = d;
4087 if (jiffies_till_next_fqs == ULONG_MAX)
4088 jiffies_till_next_fqs = d;
4089
f885b7f2 4090 /* If the compile-time values are accurate, just leave. */
47d631af 4091 if (rcu_fanout_leaf == RCU_FANOUT_LEAF &&
b17c7035 4092 nr_cpu_ids == NR_CPUS)
f885b7f2 4093 return;
39479098
PM
4094 pr_info("RCU: Adjusting geometry for rcu_fanout_leaf=%d, nr_cpu_ids=%d\n",
4095 rcu_fanout_leaf, nr_cpu_ids);
f885b7f2 4096
75cf15a4
AG
4097 /*
4098 * The boot-time rcu_fanout_leaf parameter is only permitted
4099 * to increase the leaf-level fanout, not decrease it. Of course,
4100 * the leaf-level fanout cannot exceed the number of bits in
4101 * the rcu_node masks. Complain and fall back to the compile-
4102 * time values if these limits are exceeded.
4103 */
4104 if (rcu_fanout_leaf < RCU_FANOUT_LEAF ||
4105 rcu_fanout_leaf > sizeof(unsigned long) * 8) {
13bd6494 4106 rcu_fanout_leaf = RCU_FANOUT_LEAF;
75cf15a4
AG
4107 WARN_ON(1);
4108 return;
4109 }
4110
f885b7f2
PM
4111 /*
4112 * Compute number of nodes that can be handled an rcu_node tree
9618138b 4113 * with the given number of levels.
f885b7f2 4114 */
9618138b 4115 rcu_capacity[0] = rcu_fanout_leaf;
05b84aec 4116 for (i = 1; i < RCU_NUM_LVLS; i++)
05c5df31 4117 rcu_capacity[i] = rcu_capacity[i - 1] * RCU_FANOUT;
f885b7f2
PM
4118
4119 /*
75cf15a4
AG
4120 * The tree must be able to accommodate the configured number of CPUs.
4121 * If this limit is exceeded than we have a serious problem elsewhere.
f885b7f2 4122 */
05b84aec 4123 if (nr_cpu_ids > rcu_capacity[RCU_NUM_LVLS - 1])
75cf15a4 4124 panic("rcu_init_geometry: rcu_capacity[] is too small");
f885b7f2 4125
679f9858 4126 /* Calculate the number of levels in the tree. */
9618138b 4127 for (i = 0; nr_cpu_ids > rcu_capacity[i]; i++) {
679f9858 4128 }
9618138b 4129 rcu_num_lvls = i + 1;
679f9858 4130
f885b7f2 4131 /* Calculate the number of rcu_nodes at each level of the tree. */
679f9858 4132 for (i = 0; i < rcu_num_lvls; i++) {
9618138b 4133 int cap = rcu_capacity[(rcu_num_lvls - 1) - i];
679f9858
AG
4134 num_rcu_lvl[i] = DIV_ROUND_UP(nr_cpu_ids, cap);
4135 }
f885b7f2
PM
4136
4137 /* Calculate the total number of rcu_node structures. */
4138 rcu_num_nodes = 0;
679f9858 4139 for (i = 0; i < rcu_num_lvls; i++)
f885b7f2 4140 rcu_num_nodes += num_rcu_lvl[i];
f885b7f2
PM
4141}
4142
a3dc2948
PM
4143/*
4144 * Dump out the structure of the rcu_node combining tree associated
4145 * with the rcu_state structure referenced by rsp.
4146 */
4147static void __init rcu_dump_rcu_node_tree(struct rcu_state *rsp)
4148{
4149 int level = 0;
4150 struct rcu_node *rnp;
4151
4152 pr_info("rcu_node tree layout dump\n");
4153 pr_info(" ");
4154 rcu_for_each_node_breadth_first(rsp, rnp) {
4155 if (rnp->level != level) {
4156 pr_cont("\n");
4157 pr_info(" ");
4158 level = rnp->level;
4159 }
4160 pr_cont("%d:%d ^%d ", rnp->grplo, rnp->grphi, rnp->grpnum);
4161 }
4162 pr_cont("\n");
4163}
4164
9f680ab4 4165void __init rcu_init(void)
64db4cff 4166{
017c4261 4167 int cpu;
9f680ab4 4168
47627678
PM
4169 rcu_early_boot_tests();
4170
f41d911f 4171 rcu_bootup_announce();
f885b7f2 4172 rcu_init_geometry();
394f99a9 4173 rcu_init_one(&rcu_bh_state, &rcu_bh_data);
69c8d28c 4174 rcu_init_one(&rcu_sched_state, &rcu_sched_data);
a3dc2948
PM
4175 if (dump_tree)
4176 rcu_dump_rcu_node_tree(&rcu_sched_state);
f41d911f 4177 __rcu_init_preempt();
b5b39360 4178 open_softirq(RCU_SOFTIRQ, rcu_process_callbacks);
9f680ab4
PM
4179
4180 /*
4181 * We don't need protection against CPU-hotplug here because
4182 * this is called early in boot, before either interrupts
4183 * or the scheduler are operational.
4184 */
4185 cpu_notifier(rcu_cpu_notify, 0);
d1d74d14 4186 pm_notifier(rcu_pm_notify, 0);
017c4261
PM
4187 for_each_online_cpu(cpu)
4188 rcu_cpu_notify(NULL, CPU_UP_PREPARE, (void *)(long)cpu);
64db4cff
PM
4189}
4190
4102adab 4191#include "tree_plugin.h"