cpu/hotplug: Make wait for dead cpu completion based
[linux-2.6-block.git] / kernel / rcu / tree.c
CommitLineData
64db4cff
PM
1/*
2 * Read-Copy Update mechanism for mutual exclusion
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
87de1cfd
PM
15 * along with this program; if not, you can access it online at
16 * http://www.gnu.org/licenses/gpl-2.0.html.
64db4cff
PM
17 *
18 * Copyright IBM Corporation, 2008
19 *
20 * Authors: Dipankar Sarma <dipankar@in.ibm.com>
21 * Manfred Spraul <manfred@colorfullife.com>
22 * Paul E. McKenney <paulmck@linux.vnet.ibm.com> Hierarchical version
23 *
24 * Based on the original work by Paul McKenney <paulmck@us.ibm.com>
25 * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
26 *
27 * For detailed explanation of Read-Copy Update mechanism see -
a71fca58 28 * Documentation/RCU
64db4cff
PM
29 */
30#include <linux/types.h>
31#include <linux/kernel.h>
32#include <linux/init.h>
33#include <linux/spinlock.h>
34#include <linux/smp.h>
35#include <linux/rcupdate.h>
36#include <linux/interrupt.h>
37#include <linux/sched.h>
c1dc0b9c 38#include <linux/nmi.h>
8826f3b0 39#include <linux/atomic.h>
64db4cff 40#include <linux/bitops.h>
9984de1a 41#include <linux/export.h>
64db4cff
PM
42#include <linux/completion.h>
43#include <linux/moduleparam.h>
4102adab 44#include <linux/module.h>
64db4cff
PM
45#include <linux/percpu.h>
46#include <linux/notifier.h>
47#include <linux/cpu.h>
48#include <linux/mutex.h>
49#include <linux/time.h>
bbad9379 50#include <linux/kernel_stat.h>
a26ac245
PM
51#include <linux/wait.h>
52#include <linux/kthread.h>
268bb0ce 53#include <linux/prefetch.h>
3d3b7db0
PM
54#include <linux/delay.h>
55#include <linux/stop_machine.h>
661a85dc 56#include <linux/random.h>
af658dca 57#include <linux/trace_events.h>
d1d74d14 58#include <linux/suspend.h>
64db4cff 59
4102adab 60#include "tree.h"
29c00b4a 61#include "rcu.h"
9f77da9f 62
4102adab
PM
63MODULE_ALIAS("rcutree");
64#ifdef MODULE_PARAM_PREFIX
65#undef MODULE_PARAM_PREFIX
66#endif
67#define MODULE_PARAM_PREFIX "rcutree."
68
64db4cff
PM
69/* Data structures. */
70
f7f7bac9
SRRH
71/*
72 * In order to export the rcu_state name to the tracing tools, it
73 * needs to be added in the __tracepoint_string section.
74 * This requires defining a separate variable tp_<sname>_varname
75 * that points to the string being used, and this will allow
76 * the tracing userspace tools to be able to decipher the string
77 * address to the matching string.
78 */
a8a29b3b
AB
79#ifdef CONFIG_TRACING
80# define DEFINE_RCU_TPS(sname) \
f7f7bac9 81static char sname##_varname[] = #sname; \
a8a29b3b
AB
82static const char *tp_##sname##_varname __used __tracepoint_string = sname##_varname;
83# define RCU_STATE_NAME(sname) sname##_varname
84#else
85# define DEFINE_RCU_TPS(sname)
86# define RCU_STATE_NAME(sname) __stringify(sname)
87#endif
88
89#define RCU_STATE_INITIALIZER(sname, sabbr, cr) \
90DEFINE_RCU_TPS(sname) \
c92fb057 91static DEFINE_PER_CPU_SHARED_ALIGNED(struct rcu_data, sname##_data); \
a41bfeb2 92struct rcu_state sname##_state = { \
6c90cc7b 93 .level = { &sname##_state.node[0] }, \
2723249a 94 .rda = &sname##_data, \
037b64ed 95 .call = cr, \
77f81fe0 96 .gp_state = RCU_GP_IDLE, \
42c3533e
PM
97 .gpnum = 0UL - 300UL, \
98 .completed = 0UL - 300UL, \
7b2e6011 99 .orphan_lock = __RAW_SPIN_LOCK_UNLOCKED(&sname##_state.orphan_lock), \
6c90cc7b
PM
100 .orphan_nxttail = &sname##_state.orphan_nxtlist, \
101 .orphan_donetail = &sname##_state.orphan_donelist, \
7be7f0be 102 .barrier_mutex = __MUTEX_INITIALIZER(sname##_state.barrier_mutex), \
a8a29b3b 103 .name = RCU_STATE_NAME(sname), \
a4889858 104 .abbr = sabbr, \
2723249a 105}
64db4cff 106
a41bfeb2
SRRH
107RCU_STATE_INITIALIZER(rcu_sched, 's', call_rcu_sched);
108RCU_STATE_INITIALIZER(rcu_bh, 'b', call_rcu_bh);
b1f77b05 109
b28a7c01 110static struct rcu_state *const rcu_state_p;
2927a689 111static struct rcu_data __percpu *const rcu_data_p;
6ce75a23 112LIST_HEAD(rcu_struct_flavors);
27f4d280 113
a3dc2948
PM
114/* Dump rcu_node combining tree at boot to verify correct setup. */
115static bool dump_tree;
116module_param(dump_tree, bool, 0444);
7fa27001
PM
117/* Control rcu_node-tree auto-balancing at boot time. */
118static bool rcu_fanout_exact;
119module_param(rcu_fanout_exact, bool, 0444);
47d631af
PM
120/* Increase (but not decrease) the RCU_FANOUT_LEAF at boot time. */
121static int rcu_fanout_leaf = RCU_FANOUT_LEAF;
7e5c2dfb 122module_param(rcu_fanout_leaf, int, 0444);
f885b7f2 123int rcu_num_lvls __read_mostly = RCU_NUM_LVLS;
cb007102
AG
124/* Number of rcu_nodes at specified level. */
125static int num_rcu_lvl[] = NUM_RCU_LVL_INIT;
f885b7f2
PM
126int rcu_num_nodes __read_mostly = NUM_RCU_NODES; /* Total # rcu_nodes in use. */
127
b0d30417
PM
128/*
129 * The rcu_scheduler_active variable transitions from zero to one just
130 * before the first task is spawned. So when this variable is zero, RCU
131 * can assume that there is but one task, allowing RCU to (for example)
b44f6656 132 * optimize synchronize_sched() to a simple barrier(). When this variable
b0d30417
PM
133 * is one, RCU must actually do all the hard work required to detect real
134 * grace periods. This variable is also used to suppress boot-time false
135 * positives from lockdep-RCU error checking.
136 */
bbad9379
PM
137int rcu_scheduler_active __read_mostly;
138EXPORT_SYMBOL_GPL(rcu_scheduler_active);
139
b0d30417
PM
140/*
141 * The rcu_scheduler_fully_active variable transitions from zero to one
142 * during the early_initcall() processing, which is after the scheduler
143 * is capable of creating new tasks. So RCU processing (for example,
144 * creating tasks for RCU priority boosting) must be delayed until after
145 * rcu_scheduler_fully_active transitions from zero to one. We also
146 * currently delay invocation of any RCU callbacks until after this point.
147 *
148 * It might later prove better for people registering RCU callbacks during
149 * early boot to take responsibility for these callbacks, but one step at
150 * a time.
151 */
152static int rcu_scheduler_fully_active __read_mostly;
153
0aa04b05
PM
154static void rcu_init_new_rnp(struct rcu_node *rnp_leaf);
155static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf);
5d01bbd1 156static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu);
a46e0899
PM
157static void invoke_rcu_core(void);
158static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp);
6587a23b
PM
159static void rcu_report_exp_rdp(struct rcu_state *rsp,
160 struct rcu_data *rdp, bool wake);
a26ac245 161
a94844b2 162/* rcuc/rcub kthread realtime priority */
26730f55 163#ifdef CONFIG_RCU_KTHREAD_PRIO
a94844b2 164static int kthread_prio = CONFIG_RCU_KTHREAD_PRIO;
26730f55
PM
165#else /* #ifdef CONFIG_RCU_KTHREAD_PRIO */
166static int kthread_prio = IS_ENABLED(CONFIG_RCU_BOOST) ? 1 : 0;
167#endif /* #else #ifdef CONFIG_RCU_KTHREAD_PRIO */
a94844b2
PM
168module_param(kthread_prio, int, 0644);
169
8d7dc928 170/* Delay in jiffies for grace-period initialization delays, debug only. */
0f41c0dd
PM
171
172#ifdef CONFIG_RCU_TORTURE_TEST_SLOW_PREINIT
173static int gp_preinit_delay = CONFIG_RCU_TORTURE_TEST_SLOW_PREINIT_DELAY;
174module_param(gp_preinit_delay, int, 0644);
175#else /* #ifdef CONFIG_RCU_TORTURE_TEST_SLOW_PREINIT */
176static const int gp_preinit_delay;
177#endif /* #else #ifdef CONFIG_RCU_TORTURE_TEST_SLOW_PREINIT */
178
8d7dc928
PM
179#ifdef CONFIG_RCU_TORTURE_TEST_SLOW_INIT
180static int gp_init_delay = CONFIG_RCU_TORTURE_TEST_SLOW_INIT_DELAY;
37745d28 181module_param(gp_init_delay, int, 0644);
8d7dc928
PM
182#else /* #ifdef CONFIG_RCU_TORTURE_TEST_SLOW_INIT */
183static const int gp_init_delay;
184#endif /* #else #ifdef CONFIG_RCU_TORTURE_TEST_SLOW_INIT */
eab128e8 185
0f41c0dd
PM
186#ifdef CONFIG_RCU_TORTURE_TEST_SLOW_CLEANUP
187static int gp_cleanup_delay = CONFIG_RCU_TORTURE_TEST_SLOW_CLEANUP_DELAY;
188module_param(gp_cleanup_delay, int, 0644);
189#else /* #ifdef CONFIG_RCU_TORTURE_TEST_SLOW_CLEANUP */
190static const int gp_cleanup_delay;
191#endif /* #else #ifdef CONFIG_RCU_TORTURE_TEST_SLOW_CLEANUP */
192
eab128e8
PM
193/*
194 * Number of grace periods between delays, normalized by the duration of
195 * the delay. The longer the the delay, the more the grace periods between
196 * each delay. The reason for this normalization is that it means that,
197 * for non-zero delays, the overall slowdown of grace periods is constant
198 * regardless of the duration of the delay. This arrangement balances
199 * the need for long delays to increase some race probabilities with the
200 * need for fast grace periods to increase other race probabilities.
201 */
202#define PER_RCU_NODE_PERIOD 3 /* Number of grace periods between delays. */
37745d28 203
4a298656
PM
204/*
205 * Track the rcutorture test sequence number and the update version
206 * number within a given test. The rcutorture_testseq is incremented
207 * on every rcutorture module load and unload, so has an odd value
208 * when a test is running. The rcutorture_vernum is set to zero
209 * when rcutorture starts and is incremented on each rcutorture update.
210 * These variables enable correlating rcutorture output with the
211 * RCU tracing information.
212 */
213unsigned long rcutorture_testseq;
214unsigned long rcutorture_vernum;
215
0aa04b05
PM
216/*
217 * Compute the mask of online CPUs for the specified rcu_node structure.
218 * This will not be stable unless the rcu_node structure's ->lock is
219 * held, but the bit corresponding to the current CPU will be stable
220 * in most contexts.
221 */
222unsigned long rcu_rnp_online_cpus(struct rcu_node *rnp)
223{
7d0ae808 224 return READ_ONCE(rnp->qsmaskinitnext);
0aa04b05
PM
225}
226
fc2219d4 227/*
7d0ae808 228 * Return true if an RCU grace period is in progress. The READ_ONCE()s
fc2219d4
PM
229 * permit this function to be invoked without holding the root rcu_node
230 * structure's ->lock, but of course results can be subject to change.
231 */
232static int rcu_gp_in_progress(struct rcu_state *rsp)
233{
7d0ae808 234 return READ_ONCE(rsp->completed) != READ_ONCE(rsp->gpnum);
fc2219d4
PM
235}
236
b1f77b05 237/*
d6714c22 238 * Note a quiescent state. Because we do not need to know
b1f77b05 239 * how many quiescent states passed, just if there was at least
d6714c22 240 * one since the start of the grace period, this just sets a flag.
e4cc1f22 241 * The caller must have disabled preemption.
b1f77b05 242 */
284a8c93 243void rcu_sched_qs(void)
b1f77b05 244{
fecbf6f0
PM
245 if (!__this_cpu_read(rcu_sched_data.cpu_no_qs.s))
246 return;
247 trace_rcu_grace_period(TPS("rcu_sched"),
248 __this_cpu_read(rcu_sched_data.gpnum),
249 TPS("cpuqs"));
250 __this_cpu_write(rcu_sched_data.cpu_no_qs.b.norm, false);
251 if (!__this_cpu_read(rcu_sched_data.cpu_no_qs.b.exp))
252 return;
46a5d164
PM
253 __this_cpu_write(rcu_sched_data.cpu_no_qs.b.exp, false);
254 rcu_report_exp_rdp(&rcu_sched_state,
255 this_cpu_ptr(&rcu_sched_data), true);
b1f77b05
IM
256}
257
284a8c93 258void rcu_bh_qs(void)
b1f77b05 259{
5b74c458 260 if (__this_cpu_read(rcu_bh_data.cpu_no_qs.s)) {
284a8c93
PM
261 trace_rcu_grace_period(TPS("rcu_bh"),
262 __this_cpu_read(rcu_bh_data.gpnum),
263 TPS("cpuqs"));
5b74c458 264 __this_cpu_write(rcu_bh_data.cpu_no_qs.b.norm, false);
284a8c93 265 }
b1f77b05 266}
64db4cff 267
4a81e832
PM
268static DEFINE_PER_CPU(int, rcu_sched_qs_mask);
269
270static DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = {
271 .dynticks_nesting = DYNTICK_TASK_EXIT_IDLE,
272 .dynticks = ATOMIC_INIT(1),
273#ifdef CONFIG_NO_HZ_FULL_SYSIDLE
274 .dynticks_idle_nesting = DYNTICK_TASK_NEST_VALUE,
275 .dynticks_idle = ATOMIC_INIT(1),
276#endif /* #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */
277};
278
5cd37193
PM
279DEFINE_PER_CPU_SHARED_ALIGNED(unsigned long, rcu_qs_ctr);
280EXPORT_PER_CPU_SYMBOL_GPL(rcu_qs_ctr);
281
4a81e832
PM
282/*
283 * Let the RCU core know that this CPU has gone through the scheduler,
284 * which is a quiescent state. This is called when the need for a
285 * quiescent state is urgent, so we burn an atomic operation and full
286 * memory barriers to let the RCU core know about it, regardless of what
287 * this CPU might (or might not) do in the near future.
288 *
289 * We inform the RCU core by emulating a zero-duration dyntick-idle
290 * period, which we in turn do by incrementing the ->dynticks counter
291 * by two.
46a5d164
PM
292 *
293 * The caller must have disabled interrupts.
4a81e832
PM
294 */
295static void rcu_momentary_dyntick_idle(void)
296{
4a81e832
PM
297 struct rcu_data *rdp;
298 struct rcu_dynticks *rdtp;
299 int resched_mask;
300 struct rcu_state *rsp;
301
4a81e832
PM
302 /*
303 * Yes, we can lose flag-setting operations. This is OK, because
304 * the flag will be set again after some delay.
305 */
306 resched_mask = raw_cpu_read(rcu_sched_qs_mask);
307 raw_cpu_write(rcu_sched_qs_mask, 0);
308
309 /* Find the flavor that needs a quiescent state. */
310 for_each_rcu_flavor(rsp) {
311 rdp = raw_cpu_ptr(rsp->rda);
312 if (!(resched_mask & rsp->flavor_mask))
313 continue;
314 smp_mb(); /* rcu_sched_qs_mask before cond_resched_completed. */
7d0ae808
PM
315 if (READ_ONCE(rdp->mynode->completed) !=
316 READ_ONCE(rdp->cond_resched_completed))
4a81e832
PM
317 continue;
318
319 /*
320 * Pretend to be momentarily idle for the quiescent state.
321 * This allows the grace-period kthread to record the
322 * quiescent state, with no need for this CPU to do anything
323 * further.
324 */
325 rdtp = this_cpu_ptr(&rcu_dynticks);
326 smp_mb__before_atomic(); /* Earlier stuff before QS. */
327 atomic_add(2, &rdtp->dynticks); /* QS. */
328 smp_mb__after_atomic(); /* Later stuff after QS. */
329 break;
330 }
4a81e832
PM
331}
332
25502a6c
PM
333/*
334 * Note a context switch. This is a quiescent state for RCU-sched,
335 * and requires special handling for preemptible RCU.
46a5d164 336 * The caller must have disabled interrupts.
25502a6c 337 */
38200cf2 338void rcu_note_context_switch(void)
25502a6c 339{
bb73c52b 340 barrier(); /* Avoid RCU read-side critical sections leaking down. */
f7f7bac9 341 trace_rcu_utilization(TPS("Start context switch"));
284a8c93 342 rcu_sched_qs();
38200cf2 343 rcu_preempt_note_context_switch();
4a81e832
PM
344 if (unlikely(raw_cpu_read(rcu_sched_qs_mask)))
345 rcu_momentary_dyntick_idle();
f7f7bac9 346 trace_rcu_utilization(TPS("End context switch"));
bb73c52b 347 barrier(); /* Avoid RCU read-side critical sections leaking up. */
25502a6c 348}
29ce8310 349EXPORT_SYMBOL_GPL(rcu_note_context_switch);
25502a6c 350
5cd37193 351/*
1925d196 352 * Register a quiescent state for all RCU flavors. If there is an
5cd37193
PM
353 * emergency, invoke rcu_momentary_dyntick_idle() to do a heavy-weight
354 * dyntick-idle quiescent state visible to other CPUs (but only for those
1925d196 355 * RCU flavors in desperate need of a quiescent state, which will normally
5cd37193
PM
356 * be none of them). Either way, do a lightweight quiescent state for
357 * all RCU flavors.
bb73c52b
BF
358 *
359 * The barrier() calls are redundant in the common case when this is
360 * called externally, but just in case this is called from within this
361 * file.
362 *
5cd37193
PM
363 */
364void rcu_all_qs(void)
365{
46a5d164
PM
366 unsigned long flags;
367
bb73c52b 368 barrier(); /* Avoid RCU read-side critical sections leaking down. */
46a5d164
PM
369 if (unlikely(raw_cpu_read(rcu_sched_qs_mask))) {
370 local_irq_save(flags);
5cd37193 371 rcu_momentary_dyntick_idle();
46a5d164
PM
372 local_irq_restore(flags);
373 }
5cd37193 374 this_cpu_inc(rcu_qs_ctr);
bb73c52b 375 barrier(); /* Avoid RCU read-side critical sections leaking up. */
5cd37193
PM
376}
377EXPORT_SYMBOL_GPL(rcu_all_qs);
378
878d7439
ED
379static long blimit = 10; /* Maximum callbacks per rcu_do_batch. */
380static long qhimark = 10000; /* If this many pending, ignore blimit. */
381static long qlowmark = 100; /* Once only this many pending, use blimit. */
64db4cff 382
878d7439
ED
383module_param(blimit, long, 0444);
384module_param(qhimark, long, 0444);
385module_param(qlowmark, long, 0444);
3d76c082 386
026ad283
PM
387static ulong jiffies_till_first_fqs = ULONG_MAX;
388static ulong jiffies_till_next_fqs = ULONG_MAX;
d40011f6
PM
389
390module_param(jiffies_till_first_fqs, ulong, 0644);
391module_param(jiffies_till_next_fqs, ulong, 0644);
392
4a81e832
PM
393/*
394 * How long the grace period must be before we start recruiting
395 * quiescent-state help from rcu_note_context_switch().
396 */
397static ulong jiffies_till_sched_qs = HZ / 20;
398module_param(jiffies_till_sched_qs, ulong, 0644);
399
48a7639c 400static bool rcu_start_gp_advanced(struct rcu_state *rsp, struct rcu_node *rnp,
910ee45d 401 struct rcu_data *rdp);
217af2a2
PM
402static void force_qs_rnp(struct rcu_state *rsp,
403 int (*f)(struct rcu_data *rsp, bool *isidle,
404 unsigned long *maxj),
405 bool *isidle, unsigned long *maxj);
4cdfc175 406static void force_quiescent_state(struct rcu_state *rsp);
e3950ecd 407static int rcu_pending(void);
64db4cff
PM
408
409/*
917963d0 410 * Return the number of RCU batches started thus far for debug & stats.
64db4cff 411 */
917963d0
PM
412unsigned long rcu_batches_started(void)
413{
414 return rcu_state_p->gpnum;
415}
416EXPORT_SYMBOL_GPL(rcu_batches_started);
417
418/*
419 * Return the number of RCU-sched batches started thus far for debug & stats.
64db4cff 420 */
917963d0
PM
421unsigned long rcu_batches_started_sched(void)
422{
423 return rcu_sched_state.gpnum;
424}
425EXPORT_SYMBOL_GPL(rcu_batches_started_sched);
426
427/*
428 * Return the number of RCU BH batches started thus far for debug & stats.
429 */
430unsigned long rcu_batches_started_bh(void)
431{
432 return rcu_bh_state.gpnum;
433}
434EXPORT_SYMBOL_GPL(rcu_batches_started_bh);
435
436/*
437 * Return the number of RCU batches completed thus far for debug & stats.
438 */
439unsigned long rcu_batches_completed(void)
440{
441 return rcu_state_p->completed;
442}
443EXPORT_SYMBOL_GPL(rcu_batches_completed);
444
445/*
446 * Return the number of RCU-sched batches completed thus far for debug & stats.
64db4cff 447 */
9733e4f0 448unsigned long rcu_batches_completed_sched(void)
64db4cff 449{
d6714c22 450 return rcu_sched_state.completed;
64db4cff 451}
d6714c22 452EXPORT_SYMBOL_GPL(rcu_batches_completed_sched);
64db4cff
PM
453
454/*
917963d0 455 * Return the number of RCU BH batches completed thus far for debug & stats.
64db4cff 456 */
9733e4f0 457unsigned long rcu_batches_completed_bh(void)
64db4cff
PM
458{
459 return rcu_bh_state.completed;
460}
461EXPORT_SYMBOL_GPL(rcu_batches_completed_bh);
462
a381d757
ACB
463/*
464 * Force a quiescent state.
465 */
466void rcu_force_quiescent_state(void)
467{
e534165b 468 force_quiescent_state(rcu_state_p);
a381d757
ACB
469}
470EXPORT_SYMBOL_GPL(rcu_force_quiescent_state);
471
bf66f18e
PM
472/*
473 * Force a quiescent state for RCU BH.
474 */
475void rcu_bh_force_quiescent_state(void)
476{
4cdfc175 477 force_quiescent_state(&rcu_bh_state);
bf66f18e
PM
478}
479EXPORT_SYMBOL_GPL(rcu_bh_force_quiescent_state);
480
e7580f33
PM
481/*
482 * Force a quiescent state for RCU-sched.
483 */
484void rcu_sched_force_quiescent_state(void)
485{
486 force_quiescent_state(&rcu_sched_state);
487}
488EXPORT_SYMBOL_GPL(rcu_sched_force_quiescent_state);
489
afea227f
PM
490/*
491 * Show the state of the grace-period kthreads.
492 */
493void show_rcu_gp_kthreads(void)
494{
495 struct rcu_state *rsp;
496
497 for_each_rcu_flavor(rsp) {
498 pr_info("%s: wait state: %d ->state: %#lx\n",
499 rsp->name, rsp->gp_state, rsp->gp_kthread->state);
500 /* sched_show_task(rsp->gp_kthread); */
501 }
502}
503EXPORT_SYMBOL_GPL(show_rcu_gp_kthreads);
504
4a298656
PM
505/*
506 * Record the number of times rcutorture tests have been initiated and
507 * terminated. This information allows the debugfs tracing stats to be
508 * correlated to the rcutorture messages, even when the rcutorture module
509 * is being repeatedly loaded and unloaded. In other words, we cannot
510 * store this state in rcutorture itself.
511 */
512void rcutorture_record_test_transition(void)
513{
514 rcutorture_testseq++;
515 rcutorture_vernum = 0;
516}
517EXPORT_SYMBOL_GPL(rcutorture_record_test_transition);
518
ad0dc7f9
PM
519/*
520 * Send along grace-period-related data for rcutorture diagnostics.
521 */
522void rcutorture_get_gp_data(enum rcutorture_type test_type, int *flags,
523 unsigned long *gpnum, unsigned long *completed)
524{
525 struct rcu_state *rsp = NULL;
526
527 switch (test_type) {
528 case RCU_FLAVOR:
e534165b 529 rsp = rcu_state_p;
ad0dc7f9
PM
530 break;
531 case RCU_BH_FLAVOR:
532 rsp = &rcu_bh_state;
533 break;
534 case RCU_SCHED_FLAVOR:
535 rsp = &rcu_sched_state;
536 break;
537 default:
538 break;
539 }
540 if (rsp != NULL) {
7d0ae808
PM
541 *flags = READ_ONCE(rsp->gp_flags);
542 *gpnum = READ_ONCE(rsp->gpnum);
543 *completed = READ_ONCE(rsp->completed);
ad0dc7f9
PM
544 return;
545 }
546 *flags = 0;
547 *gpnum = 0;
548 *completed = 0;
549}
550EXPORT_SYMBOL_GPL(rcutorture_get_gp_data);
551
4a298656
PM
552/*
553 * Record the number of writer passes through the current rcutorture test.
554 * This is also used to correlate debugfs tracing stats with the rcutorture
555 * messages.
556 */
557void rcutorture_record_progress(unsigned long vernum)
558{
559 rcutorture_vernum++;
560}
561EXPORT_SYMBOL_GPL(rcutorture_record_progress);
562
64db4cff
PM
563/*
564 * Does the CPU have callbacks ready to be invoked?
565 */
566static int
567cpu_has_callbacks_ready_to_invoke(struct rcu_data *rdp)
568{
3fbfbf7a
PM
569 return &rdp->nxtlist != rdp->nxttail[RCU_DONE_TAIL] &&
570 rdp->nxttail[RCU_DONE_TAIL] != NULL;
64db4cff
PM
571}
572
365187fb
PM
573/*
574 * Return the root node of the specified rcu_state structure.
575 */
576static struct rcu_node *rcu_get_root(struct rcu_state *rsp)
577{
578 return &rsp->node[0];
579}
580
581/*
582 * Is there any need for future grace periods?
583 * Interrupts must be disabled. If the caller does not hold the root
584 * rnp_node structure's ->lock, the results are advisory only.
585 */
586static int rcu_future_needs_gp(struct rcu_state *rsp)
587{
588 struct rcu_node *rnp = rcu_get_root(rsp);
7d0ae808 589 int idx = (READ_ONCE(rnp->completed) + 1) & 0x1;
365187fb
PM
590 int *fp = &rnp->need_future_gp[idx];
591
7d0ae808 592 return READ_ONCE(*fp);
365187fb
PM
593}
594
64db4cff 595/*
dc35c893
PM
596 * Does the current CPU require a not-yet-started grace period?
597 * The caller must have disabled interrupts to prevent races with
598 * normal callback registry.
64db4cff 599 */
d117c8aa 600static bool
64db4cff
PM
601cpu_needs_another_gp(struct rcu_state *rsp, struct rcu_data *rdp)
602{
dc35c893 603 int i;
3fbfbf7a 604
dc35c893 605 if (rcu_gp_in_progress(rsp))
d117c8aa 606 return false; /* No, a grace period is already in progress. */
365187fb 607 if (rcu_future_needs_gp(rsp))
d117c8aa 608 return true; /* Yes, a no-CBs CPU needs one. */
dc35c893 609 if (!rdp->nxttail[RCU_NEXT_TAIL])
d117c8aa 610 return false; /* No, this is a no-CBs (or offline) CPU. */
dc35c893 611 if (*rdp->nxttail[RCU_NEXT_READY_TAIL])
d117c8aa 612 return true; /* Yes, CPU has newly registered callbacks. */
dc35c893
PM
613 for (i = RCU_WAIT_TAIL; i < RCU_NEXT_TAIL; i++)
614 if (rdp->nxttail[i - 1] != rdp->nxttail[i] &&
7d0ae808 615 ULONG_CMP_LT(READ_ONCE(rsp->completed),
dc35c893 616 rdp->nxtcompleted[i]))
d117c8aa
PM
617 return true; /* Yes, CBs for future grace period. */
618 return false; /* No grace period needed. */
64db4cff
PM
619}
620
9b2e4f18 621/*
adf5091e 622 * rcu_eqs_enter_common - current CPU is moving towards extended quiescent state
9b2e4f18
PM
623 *
624 * If the new value of the ->dynticks_nesting counter now is zero,
625 * we really have entered idle, and must do the appropriate accounting.
626 * The caller must have disabled interrupts.
627 */
28ced795 628static void rcu_eqs_enter_common(long long oldval, bool user)
9b2e4f18 629{
96d3fd0d
PM
630 struct rcu_state *rsp;
631 struct rcu_data *rdp;
28ced795 632 struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
96d3fd0d 633
f7f7bac9 634 trace_rcu_dyntick(TPS("Start"), oldval, rdtp->dynticks_nesting);
1ce46ee5
PM
635 if (IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
636 !user && !is_idle_task(current)) {
289828e6
PM
637 struct task_struct *idle __maybe_unused =
638 idle_task(smp_processor_id());
0989cb46 639
f7f7bac9 640 trace_rcu_dyntick(TPS("Error on entry: not idle task"), oldval, 0);
bf1304e9 641 ftrace_dump(DUMP_ORIG);
0989cb46
PM
642 WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s",
643 current->pid, current->comm,
644 idle->pid, idle->comm); /* must be idle task! */
9b2e4f18 645 }
96d3fd0d
PM
646 for_each_rcu_flavor(rsp) {
647 rdp = this_cpu_ptr(rsp->rda);
648 do_nocb_deferred_wakeup(rdp);
649 }
198bbf81 650 rcu_prepare_for_idle();
9b2e4f18 651 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
4e857c58 652 smp_mb__before_atomic(); /* See above. */
9b2e4f18 653 atomic_inc(&rdtp->dynticks);
4e857c58 654 smp_mb__after_atomic(); /* Force ordering with next sojourn. */
1ce46ee5
PM
655 WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
656 atomic_read(&rdtp->dynticks) & 0x1);
176f8f7a 657 rcu_dynticks_task_enter();
c44e2cdd
PM
658
659 /*
adf5091e 660 * It is illegal to enter an extended quiescent state while
c44e2cdd
PM
661 * in an RCU read-side critical section.
662 */
f78f5b90
PM
663 RCU_LOCKDEP_WARN(lock_is_held(&rcu_lock_map),
664 "Illegal idle entry in RCU read-side critical section.");
665 RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map),
666 "Illegal idle entry in RCU-bh read-side critical section.");
667 RCU_LOCKDEP_WARN(lock_is_held(&rcu_sched_lock_map),
668 "Illegal idle entry in RCU-sched read-side critical section.");
9b2e4f18 669}
64db4cff 670
adf5091e
FW
671/*
672 * Enter an RCU extended quiescent state, which can be either the
673 * idle loop or adaptive-tickless usermode execution.
64db4cff 674 */
adf5091e 675static void rcu_eqs_enter(bool user)
64db4cff 676{
4145fa7f 677 long long oldval;
64db4cff
PM
678 struct rcu_dynticks *rdtp;
679
c9d4b0af 680 rdtp = this_cpu_ptr(&rcu_dynticks);
4145fa7f 681 oldval = rdtp->dynticks_nesting;
1ce46ee5
PM
682 WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
683 (oldval & DYNTICK_TASK_NEST_MASK) == 0);
3a592405 684 if ((oldval & DYNTICK_TASK_NEST_MASK) == DYNTICK_TASK_NEST_VALUE) {
29e37d81 685 rdtp->dynticks_nesting = 0;
28ced795 686 rcu_eqs_enter_common(oldval, user);
3a592405 687 } else {
29e37d81 688 rdtp->dynticks_nesting -= DYNTICK_TASK_NEST_VALUE;
3a592405 689 }
64db4cff 690}
adf5091e
FW
691
692/**
693 * rcu_idle_enter - inform RCU that current CPU is entering idle
694 *
695 * Enter idle mode, in other words, -leave- the mode in which RCU
696 * read-side critical sections can occur. (Though RCU read-side
697 * critical sections can occur in irq handlers in idle, a possibility
698 * handled by irq_enter() and irq_exit().)
699 *
700 * We crowbar the ->dynticks_nesting field to zero to allow for
701 * the possibility of usermode upcalls having messed up our count
702 * of interrupt nesting level during the prior busy period.
703 */
704void rcu_idle_enter(void)
705{
c5d900bf
FW
706 unsigned long flags;
707
708 local_irq_save(flags);
cb349ca9 709 rcu_eqs_enter(false);
28ced795 710 rcu_sysidle_enter(0);
c5d900bf 711 local_irq_restore(flags);
adf5091e 712}
8a2ecf47 713EXPORT_SYMBOL_GPL(rcu_idle_enter);
64db4cff 714
d1ec4c34 715#ifdef CONFIG_NO_HZ_FULL
adf5091e
FW
716/**
717 * rcu_user_enter - inform RCU that we are resuming userspace.
718 *
719 * Enter RCU idle mode right before resuming userspace. No use of RCU
720 * is permitted between this call and rcu_user_exit(). This way the
721 * CPU doesn't need to maintain the tick for RCU maintenance purposes
722 * when the CPU runs in userspace.
723 */
724void rcu_user_enter(void)
725{
91d1aa43 726 rcu_eqs_enter(1);
adf5091e 727}
d1ec4c34 728#endif /* CONFIG_NO_HZ_FULL */
19dd1591 729
9b2e4f18
PM
730/**
731 * rcu_irq_exit - inform RCU that current CPU is exiting irq towards idle
732 *
733 * Exit from an interrupt handler, which might possibly result in entering
734 * idle mode, in other words, leaving the mode in which read-side critical
7c9906ca 735 * sections can occur. The caller must have disabled interrupts.
64db4cff 736 *
9b2e4f18
PM
737 * This code assumes that the idle loop never does anything that might
738 * result in unbalanced calls to irq_enter() and irq_exit(). If your
739 * architecture violates this assumption, RCU will give you what you
740 * deserve, good and hard. But very infrequently and irreproducibly.
741 *
742 * Use things like work queues to work around this limitation.
743 *
744 * You have been warned.
64db4cff 745 */
9b2e4f18 746void rcu_irq_exit(void)
64db4cff 747{
4145fa7f 748 long long oldval;
64db4cff
PM
749 struct rcu_dynticks *rdtp;
750
7c9906ca 751 RCU_LOCKDEP_WARN(!irqs_disabled(), "rcu_irq_exit() invoked with irqs enabled!!!");
c9d4b0af 752 rdtp = this_cpu_ptr(&rcu_dynticks);
4145fa7f 753 oldval = rdtp->dynticks_nesting;
9b2e4f18 754 rdtp->dynticks_nesting--;
1ce46ee5
PM
755 WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
756 rdtp->dynticks_nesting < 0);
b6fc6020 757 if (rdtp->dynticks_nesting)
f7f7bac9 758 trace_rcu_dyntick(TPS("--="), oldval, rdtp->dynticks_nesting);
b6fc6020 759 else
28ced795
CL
760 rcu_eqs_enter_common(oldval, true);
761 rcu_sysidle_enter(1);
7c9906ca
PM
762}
763
764/*
765 * Wrapper for rcu_irq_exit() where interrupts are enabled.
766 */
767void rcu_irq_exit_irqson(void)
768{
769 unsigned long flags;
770
771 local_irq_save(flags);
772 rcu_irq_exit();
9b2e4f18
PM
773 local_irq_restore(flags);
774}
775
776/*
adf5091e 777 * rcu_eqs_exit_common - current CPU moving away from extended quiescent state
9b2e4f18
PM
778 *
779 * If the new value of the ->dynticks_nesting counter was previously zero,
780 * we really have exited idle, and must do the appropriate accounting.
781 * The caller must have disabled interrupts.
782 */
28ced795 783static void rcu_eqs_exit_common(long long oldval, int user)
9b2e4f18 784{
28ced795
CL
785 struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
786
176f8f7a 787 rcu_dynticks_task_exit();
4e857c58 788 smp_mb__before_atomic(); /* Force ordering w/previous sojourn. */
23b5c8fa
PM
789 atomic_inc(&rdtp->dynticks);
790 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
4e857c58 791 smp_mb__after_atomic(); /* See above. */
1ce46ee5
PM
792 WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
793 !(atomic_read(&rdtp->dynticks) & 0x1));
8fa7845d 794 rcu_cleanup_after_idle();
f7f7bac9 795 trace_rcu_dyntick(TPS("End"), oldval, rdtp->dynticks_nesting);
1ce46ee5
PM
796 if (IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
797 !user && !is_idle_task(current)) {
289828e6
PM
798 struct task_struct *idle __maybe_unused =
799 idle_task(smp_processor_id());
0989cb46 800
f7f7bac9 801 trace_rcu_dyntick(TPS("Error on exit: not idle task"),
4145fa7f 802 oldval, rdtp->dynticks_nesting);
bf1304e9 803 ftrace_dump(DUMP_ORIG);
0989cb46
PM
804 WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s",
805 current->pid, current->comm,
806 idle->pid, idle->comm); /* must be idle task! */
9b2e4f18
PM
807 }
808}
809
adf5091e
FW
810/*
811 * Exit an RCU extended quiescent state, which can be either the
812 * idle loop or adaptive-tickless usermode execution.
9b2e4f18 813 */
adf5091e 814static void rcu_eqs_exit(bool user)
9b2e4f18 815{
9b2e4f18
PM
816 struct rcu_dynticks *rdtp;
817 long long oldval;
818
c9d4b0af 819 rdtp = this_cpu_ptr(&rcu_dynticks);
9b2e4f18 820 oldval = rdtp->dynticks_nesting;
1ce46ee5 821 WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && oldval < 0);
3a592405 822 if (oldval & DYNTICK_TASK_NEST_MASK) {
29e37d81 823 rdtp->dynticks_nesting += DYNTICK_TASK_NEST_VALUE;
3a592405 824 } else {
29e37d81 825 rdtp->dynticks_nesting = DYNTICK_TASK_EXIT_IDLE;
28ced795 826 rcu_eqs_exit_common(oldval, user);
3a592405 827 }
9b2e4f18 828}
adf5091e
FW
829
830/**
831 * rcu_idle_exit - inform RCU that current CPU is leaving idle
832 *
833 * Exit idle mode, in other words, -enter- the mode in which RCU
834 * read-side critical sections can occur.
835 *
836 * We crowbar the ->dynticks_nesting field to DYNTICK_TASK_NEST to
837 * allow for the possibility of usermode upcalls messing up our count
838 * of interrupt nesting level during the busy period that is just
839 * now starting.
840 */
841void rcu_idle_exit(void)
842{
c5d900bf
FW
843 unsigned long flags;
844
845 local_irq_save(flags);
cb349ca9 846 rcu_eqs_exit(false);
28ced795 847 rcu_sysidle_exit(0);
c5d900bf 848 local_irq_restore(flags);
adf5091e 849}
8a2ecf47 850EXPORT_SYMBOL_GPL(rcu_idle_exit);
9b2e4f18 851
d1ec4c34 852#ifdef CONFIG_NO_HZ_FULL
adf5091e
FW
853/**
854 * rcu_user_exit - inform RCU that we are exiting userspace.
855 *
856 * Exit RCU idle mode while entering the kernel because it can
857 * run a RCU read side critical section anytime.
858 */
859void rcu_user_exit(void)
860{
91d1aa43 861 rcu_eqs_exit(1);
adf5091e 862}
d1ec4c34 863#endif /* CONFIG_NO_HZ_FULL */
19dd1591 864
9b2e4f18
PM
865/**
866 * rcu_irq_enter - inform RCU that current CPU is entering irq away from idle
867 *
868 * Enter an interrupt handler, which might possibly result in exiting
869 * idle mode, in other words, entering the mode in which read-side critical
7c9906ca 870 * sections can occur. The caller must have disabled interrupts.
9b2e4f18
PM
871 *
872 * Note that the Linux kernel is fully capable of entering an interrupt
873 * handler that it never exits, for example when doing upcalls to
874 * user mode! This code assumes that the idle loop never does upcalls to
875 * user mode. If your architecture does do upcalls from the idle loop (or
876 * does anything else that results in unbalanced calls to the irq_enter()
877 * and irq_exit() functions), RCU will give you what you deserve, good
878 * and hard. But very infrequently and irreproducibly.
879 *
880 * Use things like work queues to work around this limitation.
881 *
882 * You have been warned.
883 */
884void rcu_irq_enter(void)
885{
9b2e4f18
PM
886 struct rcu_dynticks *rdtp;
887 long long oldval;
888
7c9906ca 889 RCU_LOCKDEP_WARN(!irqs_disabled(), "rcu_irq_enter() invoked with irqs enabled!!!");
c9d4b0af 890 rdtp = this_cpu_ptr(&rcu_dynticks);
9b2e4f18
PM
891 oldval = rdtp->dynticks_nesting;
892 rdtp->dynticks_nesting++;
1ce46ee5
PM
893 WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
894 rdtp->dynticks_nesting == 0);
b6fc6020 895 if (oldval)
f7f7bac9 896 trace_rcu_dyntick(TPS("++="), oldval, rdtp->dynticks_nesting);
b6fc6020 897 else
28ced795
CL
898 rcu_eqs_exit_common(oldval, true);
899 rcu_sysidle_exit(1);
7c9906ca
PM
900}
901
902/*
903 * Wrapper for rcu_irq_enter() where interrupts are enabled.
904 */
905void rcu_irq_enter_irqson(void)
906{
907 unsigned long flags;
908
909 local_irq_save(flags);
910 rcu_irq_enter();
64db4cff 911 local_irq_restore(flags);
64db4cff
PM
912}
913
914/**
915 * rcu_nmi_enter - inform RCU of entry to NMI context
916 *
734d1680
PM
917 * If the CPU was idle from RCU's viewpoint, update rdtp->dynticks and
918 * rdtp->dynticks_nmi_nesting to let the RCU grace-period handling know
919 * that the CPU is active. This implementation permits nested NMIs, as
920 * long as the nesting level does not overflow an int. (You will probably
921 * run out of stack space first.)
64db4cff
PM
922 */
923void rcu_nmi_enter(void)
924{
c9d4b0af 925 struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
734d1680 926 int incby = 2;
64db4cff 927
734d1680
PM
928 /* Complain about underflow. */
929 WARN_ON_ONCE(rdtp->dynticks_nmi_nesting < 0);
930
931 /*
932 * If idle from RCU viewpoint, atomically increment ->dynticks
933 * to mark non-idle and increment ->dynticks_nmi_nesting by one.
934 * Otherwise, increment ->dynticks_nmi_nesting by two. This means
935 * if ->dynticks_nmi_nesting is equal to one, we are guaranteed
936 * to be in the outermost NMI handler that interrupted an RCU-idle
937 * period (observation due to Andy Lutomirski).
938 */
939 if (!(atomic_read(&rdtp->dynticks) & 0x1)) {
940 smp_mb__before_atomic(); /* Force delay from prior write. */
941 atomic_inc(&rdtp->dynticks);
942 /* atomic_inc() before later RCU read-side crit sects */
943 smp_mb__after_atomic(); /* See above. */
944 WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
945 incby = 1;
946 }
947 rdtp->dynticks_nmi_nesting += incby;
948 barrier();
64db4cff
PM
949}
950
951/**
952 * rcu_nmi_exit - inform RCU of exit from NMI context
953 *
734d1680
PM
954 * If we are returning from the outermost NMI handler that interrupted an
955 * RCU-idle period, update rdtp->dynticks and rdtp->dynticks_nmi_nesting
956 * to let the RCU grace-period handling know that the CPU is back to
957 * being RCU-idle.
64db4cff
PM
958 */
959void rcu_nmi_exit(void)
960{
c9d4b0af 961 struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
64db4cff 962
734d1680
PM
963 /*
964 * Check for ->dynticks_nmi_nesting underflow and bad ->dynticks.
965 * (We are exiting an NMI handler, so RCU better be paying attention
966 * to us!)
967 */
968 WARN_ON_ONCE(rdtp->dynticks_nmi_nesting <= 0);
969 WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
970
971 /*
972 * If the nesting level is not 1, the CPU wasn't RCU-idle, so
973 * leave it in non-RCU-idle state.
974 */
975 if (rdtp->dynticks_nmi_nesting != 1) {
976 rdtp->dynticks_nmi_nesting -= 2;
64db4cff 977 return;
734d1680
PM
978 }
979
980 /* This NMI interrupted an RCU-idle CPU, restore RCU-idleness. */
981 rdtp->dynticks_nmi_nesting = 0;
23b5c8fa 982 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
4e857c58 983 smp_mb__before_atomic(); /* See above. */
23b5c8fa 984 atomic_inc(&rdtp->dynticks);
4e857c58 985 smp_mb__after_atomic(); /* Force delay to next write. */
23b5c8fa 986 WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
64db4cff
PM
987}
988
989/**
5c173eb8
PM
990 * __rcu_is_watching - are RCU read-side critical sections safe?
991 *
992 * Return true if RCU is watching the running CPU, which means that
993 * this CPU can safely enter RCU read-side critical sections. Unlike
994 * rcu_is_watching(), the caller of __rcu_is_watching() must have at
995 * least disabled preemption.
996 */
9418fb20 997bool notrace __rcu_is_watching(void)
5c173eb8
PM
998{
999 return atomic_read(this_cpu_ptr(&rcu_dynticks.dynticks)) & 0x1;
1000}
1001
1002/**
1003 * rcu_is_watching - see if RCU thinks that the current CPU is idle
64db4cff 1004 *
9b2e4f18 1005 * If the current CPU is in its idle loop and is neither in an interrupt
34240697 1006 * or NMI handler, return true.
64db4cff 1007 */
9418fb20 1008bool notrace rcu_is_watching(void)
64db4cff 1009{
f534ed1f 1010 bool ret;
34240697 1011
46f00d18 1012 preempt_disable_notrace();
5c173eb8 1013 ret = __rcu_is_watching();
46f00d18 1014 preempt_enable_notrace();
34240697 1015 return ret;
64db4cff 1016}
5c173eb8 1017EXPORT_SYMBOL_GPL(rcu_is_watching);
64db4cff 1018
62fde6ed 1019#if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU)
c0d6d01b
PM
1020
1021/*
1022 * Is the current CPU online? Disable preemption to avoid false positives
1023 * that could otherwise happen due to the current CPU number being sampled,
1024 * this task being preempted, its old CPU being taken offline, resuming
1025 * on some other CPU, then determining that its old CPU is now offline.
1026 * It is OK to use RCU on an offline processor during initial boot, hence
2036d94a
PM
1027 * the check for rcu_scheduler_fully_active. Note also that it is OK
1028 * for a CPU coming online to use RCU for one jiffy prior to marking itself
1029 * online in the cpu_online_mask. Similarly, it is OK for a CPU going
1030 * offline to continue to use RCU for one jiffy after marking itself
1031 * offline in the cpu_online_mask. This leniency is necessary given the
1032 * non-atomic nature of the online and offline processing, for example,
1033 * the fact that a CPU enters the scheduler after completing the CPU_DYING
1034 * notifiers.
1035 *
1036 * This is also why RCU internally marks CPUs online during the
1037 * CPU_UP_PREPARE phase and offline during the CPU_DEAD phase.
c0d6d01b
PM
1038 *
1039 * Disable checking if in an NMI handler because we cannot safely report
1040 * errors from NMI handlers anyway.
1041 */
1042bool rcu_lockdep_current_cpu_online(void)
1043{
2036d94a
PM
1044 struct rcu_data *rdp;
1045 struct rcu_node *rnp;
c0d6d01b
PM
1046 bool ret;
1047
1048 if (in_nmi())
f6f7ee9a 1049 return true;
c0d6d01b 1050 preempt_disable();
c9d4b0af 1051 rdp = this_cpu_ptr(&rcu_sched_data);
2036d94a 1052 rnp = rdp->mynode;
0aa04b05 1053 ret = (rdp->grpmask & rcu_rnp_online_cpus(rnp)) ||
c0d6d01b
PM
1054 !rcu_scheduler_fully_active;
1055 preempt_enable();
1056 return ret;
1057}
1058EXPORT_SYMBOL_GPL(rcu_lockdep_current_cpu_online);
1059
62fde6ed 1060#endif /* #if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU) */
9b2e4f18 1061
64db4cff 1062/**
9b2e4f18 1063 * rcu_is_cpu_rrupt_from_idle - see if idle or immediately interrupted from idle
64db4cff 1064 *
9b2e4f18
PM
1065 * If the current CPU is idle or running at a first-level (not nested)
1066 * interrupt from idle, return true. The caller must have at least
1067 * disabled preemption.
64db4cff 1068 */
62e3cb14 1069static int rcu_is_cpu_rrupt_from_idle(void)
64db4cff 1070{
c9d4b0af 1071 return __this_cpu_read(rcu_dynticks.dynticks_nesting) <= 1;
64db4cff
PM
1072}
1073
64db4cff
PM
1074/*
1075 * Snapshot the specified CPU's dynticks counter so that we can later
1076 * credit them with an implicit quiescent state. Return 1 if this CPU
1eba8f84 1077 * is in dynticks idle mode, which is an extended quiescent state.
64db4cff 1078 */
217af2a2
PM
1079static int dyntick_save_progress_counter(struct rcu_data *rdp,
1080 bool *isidle, unsigned long *maxj)
64db4cff 1081{
23b5c8fa 1082 rdp->dynticks_snap = atomic_add_return(0, &rdp->dynticks->dynticks);
0edd1b17 1083 rcu_sysidle_check_cpu(rdp, isidle, maxj);
7941dbde
ACB
1084 if ((rdp->dynticks_snap & 0x1) == 0) {
1085 trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("dti"));
1086 return 1;
1087 } else {
7d0ae808 1088 if (ULONG_CMP_LT(READ_ONCE(rdp->gpnum) + ULONG_MAX / 4,
e3663b10 1089 rdp->mynode->gpnum))
7d0ae808 1090 WRITE_ONCE(rdp->gpwrap, true);
7941dbde
ACB
1091 return 0;
1092 }
64db4cff
PM
1093}
1094
1095/*
1096 * Return true if the specified CPU has passed through a quiescent
1097 * state by virtue of being in or having passed through an dynticks
1098 * idle state since the last call to dyntick_save_progress_counter()
a82dcc76 1099 * for this same CPU, or by virtue of having been offline.
64db4cff 1100 */
217af2a2
PM
1101static int rcu_implicit_dynticks_qs(struct rcu_data *rdp,
1102 bool *isidle, unsigned long *maxj)
64db4cff 1103{
7eb4f455 1104 unsigned int curr;
4a81e832 1105 int *rcrmp;
7eb4f455 1106 unsigned int snap;
64db4cff 1107
7eb4f455
PM
1108 curr = (unsigned int)atomic_add_return(0, &rdp->dynticks->dynticks);
1109 snap = (unsigned int)rdp->dynticks_snap;
64db4cff
PM
1110
1111 /*
1112 * If the CPU passed through or entered a dynticks idle phase with
1113 * no active irq/NMI handlers, then we can safely pretend that the CPU
1114 * already acknowledged the request to pass through a quiescent
1115 * state. Either way, that CPU cannot possibly be in an RCU
1116 * read-side critical section that started before the beginning
1117 * of the current RCU grace period.
1118 */
7eb4f455 1119 if ((curr & 0x1) == 0 || UINT_CMP_GE(curr, snap + 2)) {
f7f7bac9 1120 trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("dti"));
64db4cff
PM
1121 rdp->dynticks_fqs++;
1122 return 1;
1123 }
1124
a82dcc76
PM
1125 /*
1126 * Check for the CPU being offline, but only if the grace period
1127 * is old enough. We don't need to worry about the CPU changing
1128 * state: If we see it offline even once, it has been through a
1129 * quiescent state.
1130 *
1131 * The reason for insisting that the grace period be at least
1132 * one jiffy old is that CPUs that are not quite online and that
1133 * have just gone offline can still execute RCU read-side critical
1134 * sections.
1135 */
1136 if (ULONG_CMP_GE(rdp->rsp->gp_start + 2, jiffies))
1137 return 0; /* Grace period is not old enough. */
1138 barrier();
1139 if (cpu_is_offline(rdp->cpu)) {
f7f7bac9 1140 trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("ofl"));
a82dcc76
PM
1141 rdp->offline_fqs++;
1142 return 1;
1143 }
65d798f0
PM
1144
1145 /*
4a81e832
PM
1146 * A CPU running for an extended time within the kernel can
1147 * delay RCU grace periods. When the CPU is in NO_HZ_FULL mode,
1148 * even context-switching back and forth between a pair of
1149 * in-kernel CPU-bound tasks cannot advance grace periods.
1150 * So if the grace period is old enough, make the CPU pay attention.
1151 * Note that the unsynchronized assignments to the per-CPU
1152 * rcu_sched_qs_mask variable are safe. Yes, setting of
1153 * bits can be lost, but they will be set again on the next
1154 * force-quiescent-state pass. So lost bit sets do not result
1155 * in incorrect behavior, merely in a grace period lasting
1156 * a few jiffies longer than it might otherwise. Because
1157 * there are at most four threads involved, and because the
1158 * updates are only once every few jiffies, the probability of
1159 * lossage (and thus of slight grace-period extension) is
1160 * quite low.
1161 *
1162 * Note that if the jiffies_till_sched_qs boot/sysfs parameter
1163 * is set too high, we override with half of the RCU CPU stall
1164 * warning delay.
6193c76a 1165 */
4a81e832
PM
1166 rcrmp = &per_cpu(rcu_sched_qs_mask, rdp->cpu);
1167 if (ULONG_CMP_GE(jiffies,
1168 rdp->rsp->gp_start + jiffies_till_sched_qs) ||
cb1e78cf 1169 ULONG_CMP_GE(jiffies, rdp->rsp->jiffies_resched)) {
7d0ae808
PM
1170 if (!(READ_ONCE(*rcrmp) & rdp->rsp->flavor_mask)) {
1171 WRITE_ONCE(rdp->cond_resched_completed,
1172 READ_ONCE(rdp->mynode->completed));
4a81e832 1173 smp_mb(); /* ->cond_resched_completed before *rcrmp. */
7d0ae808
PM
1174 WRITE_ONCE(*rcrmp,
1175 READ_ONCE(*rcrmp) + rdp->rsp->flavor_mask);
4a81e832
PM
1176 resched_cpu(rdp->cpu); /* Force CPU into scheduler. */
1177 rdp->rsp->jiffies_resched += 5; /* Enable beating. */
1178 } else if (ULONG_CMP_GE(jiffies, rdp->rsp->jiffies_resched)) {
1179 /* Time to beat on that CPU again! */
1180 resched_cpu(rdp->cpu); /* Force CPU into scheduler. */
1181 rdp->rsp->jiffies_resched += 5; /* Re-enable beating. */
1182 }
6193c76a
PM
1183 }
1184
a82dcc76 1185 return 0;
64db4cff
PM
1186}
1187
64db4cff
PM
1188static void record_gp_stall_check_time(struct rcu_state *rsp)
1189{
cb1e78cf 1190 unsigned long j = jiffies;
6193c76a 1191 unsigned long j1;
26cdfedf
PM
1192
1193 rsp->gp_start = j;
1194 smp_wmb(); /* Record start time before stall time. */
6193c76a 1195 j1 = rcu_jiffies_till_stall_check();
7d0ae808 1196 WRITE_ONCE(rsp->jiffies_stall, j + j1);
6193c76a 1197 rsp->jiffies_resched = j + j1 / 2;
7d0ae808 1198 rsp->n_force_qs_gpstart = READ_ONCE(rsp->n_force_qs);
64db4cff
PM
1199}
1200
6b50e119
PM
1201/*
1202 * Convert a ->gp_state value to a character string.
1203 */
1204static const char *gp_state_getname(short gs)
1205{
1206 if (gs < 0 || gs >= ARRAY_SIZE(gp_state_names))
1207 return "???";
1208 return gp_state_names[gs];
1209}
1210
fb81a44b
PM
1211/*
1212 * Complain about starvation of grace-period kthread.
1213 */
1214static void rcu_check_gp_kthread_starvation(struct rcu_state *rsp)
1215{
1216 unsigned long gpa;
1217 unsigned long j;
1218
1219 j = jiffies;
7d0ae808 1220 gpa = READ_ONCE(rsp->gp_activity);
b1adb3e2 1221 if (j - gpa > 2 * HZ) {
6b50e119 1222 pr_err("%s kthread starved for %ld jiffies! g%lu c%lu f%#x %s(%d) ->state=%#lx\n",
81e701e4 1223 rsp->name, j - gpa,
319362c9 1224 rsp->gpnum, rsp->completed,
6b50e119
PM
1225 rsp->gp_flags,
1226 gp_state_getname(rsp->gp_state), rsp->gp_state,
a0e3a3aa 1227 rsp->gp_kthread ? rsp->gp_kthread->state : ~0);
b1adb3e2
PM
1228 if (rsp->gp_kthread)
1229 sched_show_task(rsp->gp_kthread);
1230 }
64db4cff
PM
1231}
1232
b637a328 1233/*
bc1dce51 1234 * Dump stacks of all tasks running on stalled CPUs.
b637a328
PM
1235 */
1236static void rcu_dump_cpu_stacks(struct rcu_state *rsp)
1237{
1238 int cpu;
1239 unsigned long flags;
1240 struct rcu_node *rnp;
1241
1242 rcu_for_each_leaf_node(rsp, rnp) {
6cf10081 1243 raw_spin_lock_irqsave_rcu_node(rnp, flags);
b637a328
PM
1244 if (rnp->qsmask != 0) {
1245 for (cpu = 0; cpu <= rnp->grphi - rnp->grplo; cpu++)
1246 if (rnp->qsmask & (1UL << cpu))
1247 dump_cpu_task(rnp->grplo + cpu);
1248 }
1249 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1250 }
1251}
1252
6ccd2ecd 1253static void print_other_cpu_stall(struct rcu_state *rsp, unsigned long gpnum)
64db4cff
PM
1254{
1255 int cpu;
1256 long delta;
1257 unsigned long flags;
6ccd2ecd
PM
1258 unsigned long gpa;
1259 unsigned long j;
285fe294 1260 int ndetected = 0;
64db4cff 1261 struct rcu_node *rnp = rcu_get_root(rsp);
53bb857c 1262 long totqlen = 0;
64db4cff
PM
1263
1264 /* Only let one CPU complain about others per time interval. */
1265
6cf10081 1266 raw_spin_lock_irqsave_rcu_node(rnp, flags);
7d0ae808 1267 delta = jiffies - READ_ONCE(rsp->jiffies_stall);
fc2219d4 1268 if (delta < RCU_STALL_RAT_DELAY || !rcu_gp_in_progress(rsp)) {
1304afb2 1269 raw_spin_unlock_irqrestore(&rnp->lock, flags);
64db4cff
PM
1270 return;
1271 }
7d0ae808
PM
1272 WRITE_ONCE(rsp->jiffies_stall,
1273 jiffies + 3 * rcu_jiffies_till_stall_check() + 3);
1304afb2 1274 raw_spin_unlock_irqrestore(&rnp->lock, flags);
64db4cff 1275
8cdd32a9
PM
1276 /*
1277 * OK, time to rat on our buddy...
1278 * See Documentation/RCU/stallwarn.txt for info on how to debug
1279 * RCU CPU stall warnings.
1280 */
d7f3e207 1281 pr_err("INFO: %s detected stalls on CPUs/tasks:",
4300aa64 1282 rsp->name);
a858af28 1283 print_cpu_stall_info_begin();
a0b6c9a7 1284 rcu_for_each_leaf_node(rsp, rnp) {
6cf10081 1285 raw_spin_lock_irqsave_rcu_node(rnp, flags);
9bc8b558 1286 ndetected += rcu_print_task_stall(rnp);
c8020a67
PM
1287 if (rnp->qsmask != 0) {
1288 for (cpu = 0; cpu <= rnp->grphi - rnp->grplo; cpu++)
1289 if (rnp->qsmask & (1UL << cpu)) {
1290 print_cpu_stall_info(rsp,
1291 rnp->grplo + cpu);
1292 ndetected++;
1293 }
1294 }
3acd9eb3 1295 raw_spin_unlock_irqrestore(&rnp->lock, flags);
64db4cff 1296 }
a858af28 1297
a858af28 1298 print_cpu_stall_info_end();
53bb857c
PM
1299 for_each_possible_cpu(cpu)
1300 totqlen += per_cpu_ptr(rsp->rda, cpu)->qlen;
83ebe63e 1301 pr_cont("(detected by %d, t=%ld jiffies, g=%ld, c=%ld, q=%lu)\n",
eee05882 1302 smp_processor_id(), (long)(jiffies - rsp->gp_start),
83ebe63e 1303 (long)rsp->gpnum, (long)rsp->completed, totqlen);
6ccd2ecd 1304 if (ndetected) {
b637a328 1305 rcu_dump_cpu_stacks(rsp);
6ccd2ecd 1306 } else {
7d0ae808
PM
1307 if (READ_ONCE(rsp->gpnum) != gpnum ||
1308 READ_ONCE(rsp->completed) == gpnum) {
6ccd2ecd
PM
1309 pr_err("INFO: Stall ended before state dump start\n");
1310 } else {
1311 j = jiffies;
7d0ae808 1312 gpa = READ_ONCE(rsp->gp_activity);
237a0f21 1313 pr_err("All QSes seen, last %s kthread activity %ld (%ld-%ld), jiffies_till_next_fqs=%ld, root ->qsmask %#lx\n",
6ccd2ecd 1314 rsp->name, j - gpa, j, gpa,
237a0f21
PM
1315 jiffies_till_next_fqs,
1316 rcu_get_root(rsp)->qsmask);
6ccd2ecd
PM
1317 /* In this case, the current CPU might be at fault. */
1318 sched_show_task(current);
1319 }
1320 }
c1dc0b9c 1321
4cdfc175 1322 /* Complain about tasks blocking the grace period. */
1ed509a2
PM
1323 rcu_print_detail_task_stall(rsp);
1324
fb81a44b
PM
1325 rcu_check_gp_kthread_starvation(rsp);
1326
4cdfc175 1327 force_quiescent_state(rsp); /* Kick them all. */
64db4cff
PM
1328}
1329
1330static void print_cpu_stall(struct rcu_state *rsp)
1331{
53bb857c 1332 int cpu;
64db4cff
PM
1333 unsigned long flags;
1334 struct rcu_node *rnp = rcu_get_root(rsp);
53bb857c 1335 long totqlen = 0;
64db4cff 1336
8cdd32a9
PM
1337 /*
1338 * OK, time to rat on ourselves...
1339 * See Documentation/RCU/stallwarn.txt for info on how to debug
1340 * RCU CPU stall warnings.
1341 */
d7f3e207 1342 pr_err("INFO: %s self-detected stall on CPU", rsp->name);
a858af28
PM
1343 print_cpu_stall_info_begin();
1344 print_cpu_stall_info(rsp, smp_processor_id());
1345 print_cpu_stall_info_end();
53bb857c
PM
1346 for_each_possible_cpu(cpu)
1347 totqlen += per_cpu_ptr(rsp->rda, cpu)->qlen;
83ebe63e
PM
1348 pr_cont(" (t=%lu jiffies g=%ld c=%ld q=%lu)\n",
1349 jiffies - rsp->gp_start,
1350 (long)rsp->gpnum, (long)rsp->completed, totqlen);
fb81a44b
PM
1351
1352 rcu_check_gp_kthread_starvation(rsp);
1353
bc1dce51 1354 rcu_dump_cpu_stacks(rsp);
c1dc0b9c 1355
6cf10081 1356 raw_spin_lock_irqsave_rcu_node(rnp, flags);
7d0ae808
PM
1357 if (ULONG_CMP_GE(jiffies, READ_ONCE(rsp->jiffies_stall)))
1358 WRITE_ONCE(rsp->jiffies_stall,
1359 jiffies + 3 * rcu_jiffies_till_stall_check() + 3);
1304afb2 1360 raw_spin_unlock_irqrestore(&rnp->lock, flags);
c1dc0b9c 1361
b021fe3e
PZ
1362 /*
1363 * Attempt to revive the RCU machinery by forcing a context switch.
1364 *
1365 * A context switch would normally allow the RCU state machine to make
1366 * progress and it could be we're stuck in kernel space without context
1367 * switches for an entirely unreasonable amount of time.
1368 */
1369 resched_cpu(smp_processor_id());
64db4cff
PM
1370}
1371
1372static void check_cpu_stall(struct rcu_state *rsp, struct rcu_data *rdp)
1373{
26cdfedf
PM
1374 unsigned long completed;
1375 unsigned long gpnum;
1376 unsigned long gps;
bad6e139
PM
1377 unsigned long j;
1378 unsigned long js;
64db4cff
PM
1379 struct rcu_node *rnp;
1380
26cdfedf 1381 if (rcu_cpu_stall_suppress || !rcu_gp_in_progress(rsp))
c68de209 1382 return;
cb1e78cf 1383 j = jiffies;
26cdfedf
PM
1384
1385 /*
1386 * Lots of memory barriers to reject false positives.
1387 *
1388 * The idea is to pick up rsp->gpnum, then rsp->jiffies_stall,
1389 * then rsp->gp_start, and finally rsp->completed. These values
1390 * are updated in the opposite order with memory barriers (or
1391 * equivalent) during grace-period initialization and cleanup.
1392 * Now, a false positive can occur if we get an new value of
1393 * rsp->gp_start and a old value of rsp->jiffies_stall. But given
1394 * the memory barriers, the only way that this can happen is if one
1395 * grace period ends and another starts between these two fetches.
1396 * Detect this by comparing rsp->completed with the previous fetch
1397 * from rsp->gpnum.
1398 *
1399 * Given this check, comparisons of jiffies, rsp->jiffies_stall,
1400 * and rsp->gp_start suffice to forestall false positives.
1401 */
7d0ae808 1402 gpnum = READ_ONCE(rsp->gpnum);
26cdfedf 1403 smp_rmb(); /* Pick up ->gpnum first... */
7d0ae808 1404 js = READ_ONCE(rsp->jiffies_stall);
26cdfedf 1405 smp_rmb(); /* ...then ->jiffies_stall before the rest... */
7d0ae808 1406 gps = READ_ONCE(rsp->gp_start);
26cdfedf 1407 smp_rmb(); /* ...and finally ->gp_start before ->completed. */
7d0ae808 1408 completed = READ_ONCE(rsp->completed);
26cdfedf
PM
1409 if (ULONG_CMP_GE(completed, gpnum) ||
1410 ULONG_CMP_LT(j, js) ||
1411 ULONG_CMP_GE(gps, js))
1412 return; /* No stall or GP completed since entering function. */
64db4cff 1413 rnp = rdp->mynode;
c96ea7cf 1414 if (rcu_gp_in_progress(rsp) &&
7d0ae808 1415 (READ_ONCE(rnp->qsmask) & rdp->grpmask)) {
64db4cff
PM
1416
1417 /* We haven't checked in, so go dump stack. */
1418 print_cpu_stall(rsp);
1419
bad6e139
PM
1420 } else if (rcu_gp_in_progress(rsp) &&
1421 ULONG_CMP_GE(j, js + RCU_STALL_RAT_DELAY)) {
64db4cff 1422
bad6e139 1423 /* They had a few time units to dump stack, so complain. */
6ccd2ecd 1424 print_other_cpu_stall(rsp, gpnum);
64db4cff
PM
1425 }
1426}
1427
53d84e00
PM
1428/**
1429 * rcu_cpu_stall_reset - prevent further stall warnings in current grace period
1430 *
1431 * Set the stall-warning timeout way off into the future, thus preventing
1432 * any RCU CPU stall-warning messages from appearing in the current set of
1433 * RCU grace periods.
1434 *
1435 * The caller must disable hard irqs.
1436 */
1437void rcu_cpu_stall_reset(void)
1438{
6ce75a23
PM
1439 struct rcu_state *rsp;
1440
1441 for_each_rcu_flavor(rsp)
7d0ae808 1442 WRITE_ONCE(rsp->jiffies_stall, jiffies + ULONG_MAX / 2);
53d84e00
PM
1443}
1444
3f5d3ea6 1445/*
d3f3f3f2
PM
1446 * Initialize the specified rcu_data structure's default callback list
1447 * to empty. The default callback list is the one that is not used by
1448 * no-callbacks CPUs.
3f5d3ea6 1449 */
d3f3f3f2 1450static void init_default_callback_list(struct rcu_data *rdp)
3f5d3ea6
PM
1451{
1452 int i;
1453
1454 rdp->nxtlist = NULL;
1455 for (i = 0; i < RCU_NEXT_SIZE; i++)
1456 rdp->nxttail[i] = &rdp->nxtlist;
1457}
1458
d3f3f3f2
PM
1459/*
1460 * Initialize the specified rcu_data structure's callback list to empty.
1461 */
1462static void init_callback_list(struct rcu_data *rdp)
1463{
1464 if (init_nocb_callback_list(rdp))
1465 return;
1466 init_default_callback_list(rdp);
1467}
1468
dc35c893
PM
1469/*
1470 * Determine the value that ->completed will have at the end of the
1471 * next subsequent grace period. This is used to tag callbacks so that
1472 * a CPU can invoke callbacks in a timely fashion even if that CPU has
1473 * been dyntick-idle for an extended period with callbacks under the
1474 * influence of RCU_FAST_NO_HZ.
1475 *
1476 * The caller must hold rnp->lock with interrupts disabled.
1477 */
1478static unsigned long rcu_cbs_completed(struct rcu_state *rsp,
1479 struct rcu_node *rnp)
1480{
1481 /*
1482 * If RCU is idle, we just wait for the next grace period.
1483 * But we can only be sure that RCU is idle if we are looking
1484 * at the root rcu_node structure -- otherwise, a new grace
1485 * period might have started, but just not yet gotten around
1486 * to initializing the current non-root rcu_node structure.
1487 */
1488 if (rcu_get_root(rsp) == rnp && rnp->gpnum == rnp->completed)
1489 return rnp->completed + 1;
1490
1491 /*
1492 * Otherwise, wait for a possible partial grace period and
1493 * then the subsequent full grace period.
1494 */
1495 return rnp->completed + 2;
1496}
1497
0446be48
PM
1498/*
1499 * Trace-event helper function for rcu_start_future_gp() and
1500 * rcu_nocb_wait_gp().
1501 */
1502static void trace_rcu_future_gp(struct rcu_node *rnp, struct rcu_data *rdp,
e66c33d5 1503 unsigned long c, const char *s)
0446be48
PM
1504{
1505 trace_rcu_future_grace_period(rdp->rsp->name, rnp->gpnum,
1506 rnp->completed, c, rnp->level,
1507 rnp->grplo, rnp->grphi, s);
1508}
1509
1510/*
1511 * Start some future grace period, as needed to handle newly arrived
1512 * callbacks. The required future grace periods are recorded in each
48a7639c
PM
1513 * rcu_node structure's ->need_future_gp field. Returns true if there
1514 * is reason to awaken the grace-period kthread.
0446be48
PM
1515 *
1516 * The caller must hold the specified rcu_node structure's ->lock.
1517 */
48a7639c
PM
1518static bool __maybe_unused
1519rcu_start_future_gp(struct rcu_node *rnp, struct rcu_data *rdp,
1520 unsigned long *c_out)
0446be48
PM
1521{
1522 unsigned long c;
1523 int i;
48a7639c 1524 bool ret = false;
0446be48
PM
1525 struct rcu_node *rnp_root = rcu_get_root(rdp->rsp);
1526
1527 /*
1528 * Pick up grace-period number for new callbacks. If this
1529 * grace period is already marked as needed, return to the caller.
1530 */
1531 c = rcu_cbs_completed(rdp->rsp, rnp);
f7f7bac9 1532 trace_rcu_future_gp(rnp, rdp, c, TPS("Startleaf"));
0446be48 1533 if (rnp->need_future_gp[c & 0x1]) {
f7f7bac9 1534 trace_rcu_future_gp(rnp, rdp, c, TPS("Prestartleaf"));
48a7639c 1535 goto out;
0446be48
PM
1536 }
1537
1538 /*
1539 * If either this rcu_node structure or the root rcu_node structure
1540 * believe that a grace period is in progress, then we must wait
1541 * for the one following, which is in "c". Because our request
1542 * will be noticed at the end of the current grace period, we don't
48bd8e9b
PK
1543 * need to explicitly start one. We only do the lockless check
1544 * of rnp_root's fields if the current rcu_node structure thinks
1545 * there is no grace period in flight, and because we hold rnp->lock,
1546 * the only possible change is when rnp_root's two fields are
1547 * equal, in which case rnp_root->gpnum might be concurrently
1548 * incremented. But that is OK, as it will just result in our
1549 * doing some extra useless work.
0446be48
PM
1550 */
1551 if (rnp->gpnum != rnp->completed ||
7d0ae808 1552 READ_ONCE(rnp_root->gpnum) != READ_ONCE(rnp_root->completed)) {
0446be48 1553 rnp->need_future_gp[c & 0x1]++;
f7f7bac9 1554 trace_rcu_future_gp(rnp, rdp, c, TPS("Startedleaf"));
48a7639c 1555 goto out;
0446be48
PM
1556 }
1557
1558 /*
1559 * There might be no grace period in progress. If we don't already
1560 * hold it, acquire the root rcu_node structure's lock in order to
1561 * start one (if needed).
1562 */
2a67e741
PZ
1563 if (rnp != rnp_root)
1564 raw_spin_lock_rcu_node(rnp_root);
0446be48
PM
1565
1566 /*
1567 * Get a new grace-period number. If there really is no grace
1568 * period in progress, it will be smaller than the one we obtained
1569 * earlier. Adjust callbacks as needed. Note that even no-CBs
1570 * CPUs have a ->nxtcompleted[] array, so no no-CBs checks needed.
1571 */
1572 c = rcu_cbs_completed(rdp->rsp, rnp_root);
1573 for (i = RCU_DONE_TAIL; i < RCU_NEXT_TAIL; i++)
1574 if (ULONG_CMP_LT(c, rdp->nxtcompleted[i]))
1575 rdp->nxtcompleted[i] = c;
1576
1577 /*
1578 * If the needed for the required grace period is already
1579 * recorded, trace and leave.
1580 */
1581 if (rnp_root->need_future_gp[c & 0x1]) {
f7f7bac9 1582 trace_rcu_future_gp(rnp, rdp, c, TPS("Prestartedroot"));
0446be48
PM
1583 goto unlock_out;
1584 }
1585
1586 /* Record the need for the future grace period. */
1587 rnp_root->need_future_gp[c & 0x1]++;
1588
1589 /* If a grace period is not already in progress, start one. */
1590 if (rnp_root->gpnum != rnp_root->completed) {
f7f7bac9 1591 trace_rcu_future_gp(rnp, rdp, c, TPS("Startedleafroot"));
0446be48 1592 } else {
f7f7bac9 1593 trace_rcu_future_gp(rnp, rdp, c, TPS("Startedroot"));
48a7639c 1594 ret = rcu_start_gp_advanced(rdp->rsp, rnp_root, rdp);
0446be48
PM
1595 }
1596unlock_out:
1597 if (rnp != rnp_root)
1598 raw_spin_unlock(&rnp_root->lock);
48a7639c
PM
1599out:
1600 if (c_out != NULL)
1601 *c_out = c;
1602 return ret;
0446be48
PM
1603}
1604
1605/*
1606 * Clean up any old requests for the just-ended grace period. Also return
1607 * whether any additional grace periods have been requested. Also invoke
1608 * rcu_nocb_gp_cleanup() in order to wake up any no-callbacks kthreads
1609 * waiting for this grace period to complete.
1610 */
1611static int rcu_future_gp_cleanup(struct rcu_state *rsp, struct rcu_node *rnp)
1612{
1613 int c = rnp->completed;
1614 int needmore;
1615 struct rcu_data *rdp = this_cpu_ptr(rsp->rda);
1616
1617 rcu_nocb_gp_cleanup(rsp, rnp);
1618 rnp->need_future_gp[c & 0x1] = 0;
1619 needmore = rnp->need_future_gp[(c + 1) & 0x1];
f7f7bac9
SRRH
1620 trace_rcu_future_gp(rnp, rdp, c,
1621 needmore ? TPS("CleanupMore") : TPS("Cleanup"));
0446be48
PM
1622 return needmore;
1623}
1624
48a7639c
PM
1625/*
1626 * Awaken the grace-period kthread for the specified flavor of RCU.
1627 * Don't do a self-awaken, and don't bother awakening when there is
1628 * nothing for the grace-period kthread to do (as in several CPUs
1629 * raced to awaken, and we lost), and finally don't try to awaken
1630 * a kthread that has not yet been created.
1631 */
1632static void rcu_gp_kthread_wake(struct rcu_state *rsp)
1633{
1634 if (current == rsp->gp_kthread ||
7d0ae808 1635 !READ_ONCE(rsp->gp_flags) ||
48a7639c
PM
1636 !rsp->gp_kthread)
1637 return;
1638 wake_up(&rsp->gp_wq);
1639}
1640
dc35c893
PM
1641/*
1642 * If there is room, assign a ->completed number to any callbacks on
1643 * this CPU that have not already been assigned. Also accelerate any
1644 * callbacks that were previously assigned a ->completed number that has
1645 * since proven to be too conservative, which can happen if callbacks get
1646 * assigned a ->completed number while RCU is idle, but with reference to
1647 * a non-root rcu_node structure. This function is idempotent, so it does
48a7639c
PM
1648 * not hurt to call it repeatedly. Returns an flag saying that we should
1649 * awaken the RCU grace-period kthread.
dc35c893
PM
1650 *
1651 * The caller must hold rnp->lock with interrupts disabled.
1652 */
48a7639c 1653static bool rcu_accelerate_cbs(struct rcu_state *rsp, struct rcu_node *rnp,
dc35c893
PM
1654 struct rcu_data *rdp)
1655{
1656 unsigned long c;
1657 int i;
48a7639c 1658 bool ret;
dc35c893
PM
1659
1660 /* If the CPU has no callbacks, nothing to do. */
1661 if (!rdp->nxttail[RCU_NEXT_TAIL] || !*rdp->nxttail[RCU_DONE_TAIL])
48a7639c 1662 return false;
dc35c893
PM
1663
1664 /*
1665 * Starting from the sublist containing the callbacks most
1666 * recently assigned a ->completed number and working down, find the
1667 * first sublist that is not assignable to an upcoming grace period.
1668 * Such a sublist has something in it (first two tests) and has
1669 * a ->completed number assigned that will complete sooner than
1670 * the ->completed number for newly arrived callbacks (last test).
1671 *
1672 * The key point is that any later sublist can be assigned the
1673 * same ->completed number as the newly arrived callbacks, which
1674 * means that the callbacks in any of these later sublist can be
1675 * grouped into a single sublist, whether or not they have already
1676 * been assigned a ->completed number.
1677 */
1678 c = rcu_cbs_completed(rsp, rnp);
1679 for (i = RCU_NEXT_TAIL - 1; i > RCU_DONE_TAIL; i--)
1680 if (rdp->nxttail[i] != rdp->nxttail[i - 1] &&
1681 !ULONG_CMP_GE(rdp->nxtcompleted[i], c))
1682 break;
1683
1684 /*
1685 * If there are no sublist for unassigned callbacks, leave.
1686 * At the same time, advance "i" one sublist, so that "i" will
1687 * index into the sublist where all the remaining callbacks should
1688 * be grouped into.
1689 */
1690 if (++i >= RCU_NEXT_TAIL)
48a7639c 1691 return false;
dc35c893
PM
1692
1693 /*
1694 * Assign all subsequent callbacks' ->completed number to the next
1695 * full grace period and group them all in the sublist initially
1696 * indexed by "i".
1697 */
1698 for (; i <= RCU_NEXT_TAIL; i++) {
1699 rdp->nxttail[i] = rdp->nxttail[RCU_NEXT_TAIL];
1700 rdp->nxtcompleted[i] = c;
1701 }
910ee45d 1702 /* Record any needed additional grace periods. */
48a7639c 1703 ret = rcu_start_future_gp(rnp, rdp, NULL);
6d4b418c
PM
1704
1705 /* Trace depending on how much we were able to accelerate. */
1706 if (!*rdp->nxttail[RCU_WAIT_TAIL])
f7f7bac9 1707 trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("AccWaitCB"));
6d4b418c 1708 else
f7f7bac9 1709 trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("AccReadyCB"));
48a7639c 1710 return ret;
dc35c893
PM
1711}
1712
1713/*
1714 * Move any callbacks whose grace period has completed to the
1715 * RCU_DONE_TAIL sublist, then compact the remaining sublists and
1716 * assign ->completed numbers to any callbacks in the RCU_NEXT_TAIL
1717 * sublist. This function is idempotent, so it does not hurt to
1718 * invoke it repeatedly. As long as it is not invoked -too- often...
48a7639c 1719 * Returns true if the RCU grace-period kthread needs to be awakened.
dc35c893
PM
1720 *
1721 * The caller must hold rnp->lock with interrupts disabled.
1722 */
48a7639c 1723static bool rcu_advance_cbs(struct rcu_state *rsp, struct rcu_node *rnp,
dc35c893
PM
1724 struct rcu_data *rdp)
1725{
1726 int i, j;
1727
1728 /* If the CPU has no callbacks, nothing to do. */
1729 if (!rdp->nxttail[RCU_NEXT_TAIL] || !*rdp->nxttail[RCU_DONE_TAIL])
48a7639c 1730 return false;
dc35c893
PM
1731
1732 /*
1733 * Find all callbacks whose ->completed numbers indicate that they
1734 * are ready to invoke, and put them into the RCU_DONE_TAIL sublist.
1735 */
1736 for (i = RCU_WAIT_TAIL; i < RCU_NEXT_TAIL; i++) {
1737 if (ULONG_CMP_LT(rnp->completed, rdp->nxtcompleted[i]))
1738 break;
1739 rdp->nxttail[RCU_DONE_TAIL] = rdp->nxttail[i];
1740 }
1741 /* Clean up any sublist tail pointers that were misordered above. */
1742 for (j = RCU_WAIT_TAIL; j < i; j++)
1743 rdp->nxttail[j] = rdp->nxttail[RCU_DONE_TAIL];
1744
1745 /* Copy down callbacks to fill in empty sublists. */
1746 for (j = RCU_WAIT_TAIL; i < RCU_NEXT_TAIL; i++, j++) {
1747 if (rdp->nxttail[j] == rdp->nxttail[RCU_NEXT_TAIL])
1748 break;
1749 rdp->nxttail[j] = rdp->nxttail[i];
1750 rdp->nxtcompleted[j] = rdp->nxtcompleted[i];
1751 }
1752
1753 /* Classify any remaining callbacks. */
48a7639c 1754 return rcu_accelerate_cbs(rsp, rnp, rdp);
dc35c893
PM
1755}
1756
d09b62df 1757/*
ba9fbe95
PM
1758 * Update CPU-local rcu_data state to record the beginnings and ends of
1759 * grace periods. The caller must hold the ->lock of the leaf rcu_node
1760 * structure corresponding to the current CPU, and must have irqs disabled.
48a7639c 1761 * Returns true if the grace-period kthread needs to be awakened.
d09b62df 1762 */
48a7639c
PM
1763static bool __note_gp_changes(struct rcu_state *rsp, struct rcu_node *rnp,
1764 struct rcu_data *rdp)
d09b62df 1765{
48a7639c
PM
1766 bool ret;
1767
ba9fbe95 1768 /* Handle the ends of any preceding grace periods first. */
e3663b10 1769 if (rdp->completed == rnp->completed &&
7d0ae808 1770 !unlikely(READ_ONCE(rdp->gpwrap))) {
d09b62df 1771
ba9fbe95 1772 /* No grace period end, so just accelerate recent callbacks. */
48a7639c 1773 ret = rcu_accelerate_cbs(rsp, rnp, rdp);
d09b62df 1774
dc35c893
PM
1775 } else {
1776
1777 /* Advance callbacks. */
48a7639c 1778 ret = rcu_advance_cbs(rsp, rnp, rdp);
d09b62df
PM
1779
1780 /* Remember that we saw this grace-period completion. */
1781 rdp->completed = rnp->completed;
f7f7bac9 1782 trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpuend"));
d09b62df 1783 }
398ebe60 1784
7d0ae808 1785 if (rdp->gpnum != rnp->gpnum || unlikely(READ_ONCE(rdp->gpwrap))) {
6eaef633
PM
1786 /*
1787 * If the current grace period is waiting for this CPU,
1788 * set up to detect a quiescent state, otherwise don't
1789 * go looking for one.
1790 */
1791 rdp->gpnum = rnp->gpnum;
f7f7bac9 1792 trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpustart"));
5b74c458 1793 rdp->cpu_no_qs.b.norm = true;
5cd37193 1794 rdp->rcu_qs_ctr_snap = __this_cpu_read(rcu_qs_ctr);
97c668b8 1795 rdp->core_needs_qs = !!(rnp->qsmask & rdp->grpmask);
6eaef633 1796 zero_cpu_stall_ticks(rdp);
7d0ae808 1797 WRITE_ONCE(rdp->gpwrap, false);
6eaef633 1798 }
48a7639c 1799 return ret;
6eaef633
PM
1800}
1801
d34ea322 1802static void note_gp_changes(struct rcu_state *rsp, struct rcu_data *rdp)
6eaef633
PM
1803{
1804 unsigned long flags;
48a7639c 1805 bool needwake;
6eaef633
PM
1806 struct rcu_node *rnp;
1807
1808 local_irq_save(flags);
1809 rnp = rdp->mynode;
7d0ae808
PM
1810 if ((rdp->gpnum == READ_ONCE(rnp->gpnum) &&
1811 rdp->completed == READ_ONCE(rnp->completed) &&
1812 !unlikely(READ_ONCE(rdp->gpwrap))) || /* w/out lock. */
2a67e741 1813 !raw_spin_trylock_rcu_node(rnp)) { /* irqs already off, so later. */
6eaef633
PM
1814 local_irq_restore(flags);
1815 return;
1816 }
48a7639c 1817 needwake = __note_gp_changes(rsp, rnp, rdp);
6eaef633 1818 raw_spin_unlock_irqrestore(&rnp->lock, flags);
48a7639c
PM
1819 if (needwake)
1820 rcu_gp_kthread_wake(rsp);
6eaef633
PM
1821}
1822
0f41c0dd
PM
1823static void rcu_gp_slow(struct rcu_state *rsp, int delay)
1824{
1825 if (delay > 0 &&
1826 !(rsp->gpnum % (rcu_num_nodes * PER_RCU_NODE_PERIOD * delay)))
1827 schedule_timeout_uninterruptible(delay);
1828}
1829
b3dbec76 1830/*
45fed3e7 1831 * Initialize a new grace period. Return false if no grace period required.
b3dbec76 1832 */
45fed3e7 1833static bool rcu_gp_init(struct rcu_state *rsp)
b3dbec76 1834{
0aa04b05 1835 unsigned long oldmask;
b3dbec76 1836 struct rcu_data *rdp;
7fdefc10 1837 struct rcu_node *rnp = rcu_get_root(rsp);
b3dbec76 1838
7d0ae808 1839 WRITE_ONCE(rsp->gp_activity, jiffies);
2a67e741 1840 raw_spin_lock_irq_rcu_node(rnp);
7d0ae808 1841 if (!READ_ONCE(rsp->gp_flags)) {
f7be8209
PM
1842 /* Spurious wakeup, tell caller to go back to sleep. */
1843 raw_spin_unlock_irq(&rnp->lock);
45fed3e7 1844 return false;
f7be8209 1845 }
7d0ae808 1846 WRITE_ONCE(rsp->gp_flags, 0); /* Clear all flags: New grace period. */
b3dbec76 1847
f7be8209
PM
1848 if (WARN_ON_ONCE(rcu_gp_in_progress(rsp))) {
1849 /*
1850 * Grace period already in progress, don't start another.
1851 * Not supposed to be able to happen.
1852 */
7fdefc10 1853 raw_spin_unlock_irq(&rnp->lock);
45fed3e7 1854 return false;
7fdefc10
PM
1855 }
1856
7fdefc10 1857 /* Advance to a new grace period and initialize state. */
26cdfedf 1858 record_gp_stall_check_time(rsp);
765a3f4f
PM
1859 /* Record GP times before starting GP, hence smp_store_release(). */
1860 smp_store_release(&rsp->gpnum, rsp->gpnum + 1);
f7f7bac9 1861 trace_rcu_grace_period(rsp->name, rsp->gpnum, TPS("start"));
7fdefc10
PM
1862 raw_spin_unlock_irq(&rnp->lock);
1863
0aa04b05
PM
1864 /*
1865 * Apply per-leaf buffered online and offline operations to the
1866 * rcu_node tree. Note that this new grace period need not wait
1867 * for subsequent online CPUs, and that quiescent-state forcing
1868 * will handle subsequent offline CPUs.
1869 */
1870 rcu_for_each_leaf_node(rsp, rnp) {
0f41c0dd 1871 rcu_gp_slow(rsp, gp_preinit_delay);
2a67e741 1872 raw_spin_lock_irq_rcu_node(rnp);
0aa04b05
PM
1873 if (rnp->qsmaskinit == rnp->qsmaskinitnext &&
1874 !rnp->wait_blkd_tasks) {
1875 /* Nothing to do on this leaf rcu_node structure. */
1876 raw_spin_unlock_irq(&rnp->lock);
1877 continue;
1878 }
1879
1880 /* Record old state, apply changes to ->qsmaskinit field. */
1881 oldmask = rnp->qsmaskinit;
1882 rnp->qsmaskinit = rnp->qsmaskinitnext;
1883
1884 /* If zero-ness of ->qsmaskinit changed, propagate up tree. */
1885 if (!oldmask != !rnp->qsmaskinit) {
1886 if (!oldmask) /* First online CPU for this rcu_node. */
1887 rcu_init_new_rnp(rnp);
1888 else if (rcu_preempt_has_tasks(rnp)) /* blocked tasks */
1889 rnp->wait_blkd_tasks = true;
1890 else /* Last offline CPU and can propagate. */
1891 rcu_cleanup_dead_rnp(rnp);
1892 }
1893
1894 /*
1895 * If all waited-on tasks from prior grace period are
1896 * done, and if all this rcu_node structure's CPUs are
1897 * still offline, propagate up the rcu_node tree and
1898 * clear ->wait_blkd_tasks. Otherwise, if one of this
1899 * rcu_node structure's CPUs has since come back online,
1900 * simply clear ->wait_blkd_tasks (but rcu_cleanup_dead_rnp()
1901 * checks for this, so just call it unconditionally).
1902 */
1903 if (rnp->wait_blkd_tasks &&
1904 (!rcu_preempt_has_tasks(rnp) ||
1905 rnp->qsmaskinit)) {
1906 rnp->wait_blkd_tasks = false;
1907 rcu_cleanup_dead_rnp(rnp);
1908 }
1909
1910 raw_spin_unlock_irq(&rnp->lock);
1911 }
7fdefc10
PM
1912
1913 /*
1914 * Set the quiescent-state-needed bits in all the rcu_node
1915 * structures for all currently online CPUs in breadth-first order,
1916 * starting from the root rcu_node structure, relying on the layout
1917 * of the tree within the rsp->node[] array. Note that other CPUs
1918 * will access only the leaves of the hierarchy, thus seeing that no
1919 * grace period is in progress, at least until the corresponding
1920 * leaf node has been initialized. In addition, we have excluded
1921 * CPU-hotplug operations.
1922 *
1923 * The grace period cannot complete until the initialization
1924 * process finishes, because this kthread handles both.
1925 */
1926 rcu_for_each_node_breadth_first(rsp, rnp) {
0f41c0dd 1927 rcu_gp_slow(rsp, gp_init_delay);
2a67e741 1928 raw_spin_lock_irq_rcu_node(rnp);
b3dbec76 1929 rdp = this_cpu_ptr(rsp->rda);
7fdefc10
PM
1930 rcu_preempt_check_blocked_tasks(rnp);
1931 rnp->qsmask = rnp->qsmaskinit;
7d0ae808 1932 WRITE_ONCE(rnp->gpnum, rsp->gpnum);
3f47da0f 1933 if (WARN_ON_ONCE(rnp->completed != rsp->completed))
7d0ae808 1934 WRITE_ONCE(rnp->completed, rsp->completed);
7fdefc10 1935 if (rnp == rdp->mynode)
48a7639c 1936 (void)__note_gp_changes(rsp, rnp, rdp);
7fdefc10
PM
1937 rcu_preempt_boost_start_gp(rnp);
1938 trace_rcu_grace_period_init(rsp->name, rnp->gpnum,
1939 rnp->level, rnp->grplo,
1940 rnp->grphi, rnp->qsmask);
1941 raw_spin_unlock_irq(&rnp->lock);
bde6c3aa 1942 cond_resched_rcu_qs();
7d0ae808 1943 WRITE_ONCE(rsp->gp_activity, jiffies);
7fdefc10 1944 }
b3dbec76 1945
45fed3e7 1946 return true;
7fdefc10 1947}
b3dbec76 1948
b9a425cf
PM
1949/*
1950 * Helper function for wait_event_interruptible_timeout() wakeup
1951 * at force-quiescent-state time.
1952 */
1953static bool rcu_gp_fqs_check_wake(struct rcu_state *rsp, int *gfp)
1954{
1955 struct rcu_node *rnp = rcu_get_root(rsp);
1956
1957 /* Someone like call_rcu() requested a force-quiescent-state scan. */
1958 *gfp = READ_ONCE(rsp->gp_flags);
1959 if (*gfp & RCU_GP_FLAG_FQS)
1960 return true;
1961
1962 /* The current grace period has completed. */
1963 if (!READ_ONCE(rnp->qsmask) && !rcu_preempt_blocked_readers_cgp(rnp))
1964 return true;
1965
1966 return false;
1967}
1968
4cdfc175
PM
1969/*
1970 * Do one round of quiescent-state forcing.
1971 */
77f81fe0 1972static void rcu_gp_fqs(struct rcu_state *rsp, bool first_time)
4cdfc175 1973{
217af2a2
PM
1974 bool isidle = false;
1975 unsigned long maxj;
4cdfc175
PM
1976 struct rcu_node *rnp = rcu_get_root(rsp);
1977
7d0ae808 1978 WRITE_ONCE(rsp->gp_activity, jiffies);
4cdfc175 1979 rsp->n_force_qs++;
77f81fe0 1980 if (first_time) {
4cdfc175 1981 /* Collect dyntick-idle snapshots. */
0edd1b17 1982 if (is_sysidle_rcu_state(rsp)) {
e02b2edf 1983 isidle = true;
0edd1b17
PM
1984 maxj = jiffies - ULONG_MAX / 4;
1985 }
217af2a2
PM
1986 force_qs_rnp(rsp, dyntick_save_progress_counter,
1987 &isidle, &maxj);
0edd1b17 1988 rcu_sysidle_report_gp(rsp, isidle, maxj);
4cdfc175
PM
1989 } else {
1990 /* Handle dyntick-idle and offline CPUs. */
675da67f 1991 isidle = true;
217af2a2 1992 force_qs_rnp(rsp, rcu_implicit_dynticks_qs, &isidle, &maxj);
4cdfc175
PM
1993 }
1994 /* Clear flag to prevent immediate re-entry. */
7d0ae808 1995 if (READ_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) {
2a67e741 1996 raw_spin_lock_irq_rcu_node(rnp);
7d0ae808
PM
1997 WRITE_ONCE(rsp->gp_flags,
1998 READ_ONCE(rsp->gp_flags) & ~RCU_GP_FLAG_FQS);
4cdfc175
PM
1999 raw_spin_unlock_irq(&rnp->lock);
2000 }
4cdfc175
PM
2001}
2002
7fdefc10
PM
2003/*
2004 * Clean up after the old grace period.
2005 */
4cdfc175 2006static void rcu_gp_cleanup(struct rcu_state *rsp)
7fdefc10
PM
2007{
2008 unsigned long gp_duration;
48a7639c 2009 bool needgp = false;
dae6e64d 2010 int nocb = 0;
7fdefc10
PM
2011 struct rcu_data *rdp;
2012 struct rcu_node *rnp = rcu_get_root(rsp);
b3dbec76 2013
7d0ae808 2014 WRITE_ONCE(rsp->gp_activity, jiffies);
2a67e741 2015 raw_spin_lock_irq_rcu_node(rnp);
7fdefc10
PM
2016 gp_duration = jiffies - rsp->gp_start;
2017 if (gp_duration > rsp->gp_max)
2018 rsp->gp_max = gp_duration;
b3dbec76 2019
7fdefc10
PM
2020 /*
2021 * We know the grace period is complete, but to everyone else
2022 * it appears to still be ongoing. But it is also the case
2023 * that to everyone else it looks like there is nothing that
2024 * they can do to advance the grace period. It is therefore
2025 * safe for us to drop the lock in order to mark the grace
2026 * period as completed in all of the rcu_node structures.
7fdefc10 2027 */
5d4b8659 2028 raw_spin_unlock_irq(&rnp->lock);
b3dbec76 2029
5d4b8659
PM
2030 /*
2031 * Propagate new ->completed value to rcu_node structures so
2032 * that other CPUs don't have to wait until the start of the next
2033 * grace period to process their callbacks. This also avoids
2034 * some nasty RCU grace-period initialization races by forcing
2035 * the end of the current grace period to be completely recorded in
2036 * all of the rcu_node structures before the beginning of the next
2037 * grace period is recorded in any of the rcu_node structures.
2038 */
2039 rcu_for_each_node_breadth_first(rsp, rnp) {
2a67e741 2040 raw_spin_lock_irq_rcu_node(rnp);
5c60d25f
PM
2041 WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp));
2042 WARN_ON_ONCE(rnp->qsmask);
7d0ae808 2043 WRITE_ONCE(rnp->completed, rsp->gpnum);
b11cc576
PM
2044 rdp = this_cpu_ptr(rsp->rda);
2045 if (rnp == rdp->mynode)
48a7639c 2046 needgp = __note_gp_changes(rsp, rnp, rdp) || needgp;
78e4bc34 2047 /* smp_mb() provided by prior unlock-lock pair. */
0446be48 2048 nocb += rcu_future_gp_cleanup(rsp, rnp);
5d4b8659 2049 raw_spin_unlock_irq(&rnp->lock);
bde6c3aa 2050 cond_resched_rcu_qs();
7d0ae808 2051 WRITE_ONCE(rsp->gp_activity, jiffies);
0f41c0dd 2052 rcu_gp_slow(rsp, gp_cleanup_delay);
7fdefc10 2053 }
5d4b8659 2054 rnp = rcu_get_root(rsp);
2a67e741 2055 raw_spin_lock_irq_rcu_node(rnp); /* Order GP before ->completed update. */
dae6e64d 2056 rcu_nocb_gp_set(rnp, nocb);
7fdefc10 2057
765a3f4f 2058 /* Declare grace period done. */
7d0ae808 2059 WRITE_ONCE(rsp->completed, rsp->gpnum);
f7f7bac9 2060 trace_rcu_grace_period(rsp->name, rsp->completed, TPS("end"));
77f81fe0 2061 rsp->gp_state = RCU_GP_IDLE;
5d4b8659 2062 rdp = this_cpu_ptr(rsp->rda);
48a7639c
PM
2063 /* Advance CBs to reduce false positives below. */
2064 needgp = rcu_advance_cbs(rsp, rnp, rdp) || needgp;
2065 if (needgp || cpu_needs_another_gp(rsp, rdp)) {
7d0ae808 2066 WRITE_ONCE(rsp->gp_flags, RCU_GP_FLAG_INIT);
bb311ecc 2067 trace_rcu_grace_period(rsp->name,
7d0ae808 2068 READ_ONCE(rsp->gpnum),
bb311ecc
PM
2069 TPS("newreq"));
2070 }
7fdefc10 2071 raw_spin_unlock_irq(&rnp->lock);
7fdefc10
PM
2072}
2073
2074/*
2075 * Body of kthread that handles grace periods.
2076 */
2077static int __noreturn rcu_gp_kthread(void *arg)
2078{
77f81fe0 2079 bool first_gp_fqs;
88d6df61 2080 int gf;
d40011f6 2081 unsigned long j;
4cdfc175 2082 int ret;
7fdefc10
PM
2083 struct rcu_state *rsp = arg;
2084 struct rcu_node *rnp = rcu_get_root(rsp);
2085
5871968d 2086 rcu_bind_gp_kthread();
7fdefc10
PM
2087 for (;;) {
2088
2089 /* Handle grace-period start. */
2090 for (;;) {
63c4db78 2091 trace_rcu_grace_period(rsp->name,
7d0ae808 2092 READ_ONCE(rsp->gpnum),
63c4db78 2093 TPS("reqwait"));
afea227f 2094 rsp->gp_state = RCU_GP_WAIT_GPS;
4cdfc175 2095 wait_event_interruptible(rsp->gp_wq,
7d0ae808 2096 READ_ONCE(rsp->gp_flags) &
4cdfc175 2097 RCU_GP_FLAG_INIT);
319362c9 2098 rsp->gp_state = RCU_GP_DONE_GPS;
78e4bc34 2099 /* Locking provides needed memory barrier. */
f7be8209 2100 if (rcu_gp_init(rsp))
7fdefc10 2101 break;
bde6c3aa 2102 cond_resched_rcu_qs();
7d0ae808 2103 WRITE_ONCE(rsp->gp_activity, jiffies);
73a860cd 2104 WARN_ON(signal_pending(current));
63c4db78 2105 trace_rcu_grace_period(rsp->name,
7d0ae808 2106 READ_ONCE(rsp->gpnum),
63c4db78 2107 TPS("reqwaitsig"));
7fdefc10 2108 }
cabc49c1 2109
4cdfc175 2110 /* Handle quiescent-state forcing. */
77f81fe0 2111 first_gp_fqs = true;
d40011f6
PM
2112 j = jiffies_till_first_fqs;
2113 if (j > HZ) {
2114 j = HZ;
2115 jiffies_till_first_fqs = HZ;
2116 }
88d6df61 2117 ret = 0;
cabc49c1 2118 for (;;) {
88d6df61
PM
2119 if (!ret)
2120 rsp->jiffies_force_qs = jiffies + j;
63c4db78 2121 trace_rcu_grace_period(rsp->name,
7d0ae808 2122 READ_ONCE(rsp->gpnum),
63c4db78 2123 TPS("fqswait"));
afea227f 2124 rsp->gp_state = RCU_GP_WAIT_FQS;
4cdfc175 2125 ret = wait_event_interruptible_timeout(rsp->gp_wq,
b9a425cf 2126 rcu_gp_fqs_check_wake(rsp, &gf), j);
32bb1c79 2127 rsp->gp_state = RCU_GP_DOING_FQS;
78e4bc34 2128 /* Locking provides needed memory barriers. */
4cdfc175 2129 /* If grace period done, leave loop. */
7d0ae808 2130 if (!READ_ONCE(rnp->qsmask) &&
4cdfc175 2131 !rcu_preempt_blocked_readers_cgp(rnp))
cabc49c1 2132 break;
4cdfc175 2133 /* If time for quiescent-state forcing, do it. */
88d6df61
PM
2134 if (ULONG_CMP_GE(jiffies, rsp->jiffies_force_qs) ||
2135 (gf & RCU_GP_FLAG_FQS)) {
63c4db78 2136 trace_rcu_grace_period(rsp->name,
7d0ae808 2137 READ_ONCE(rsp->gpnum),
63c4db78 2138 TPS("fqsstart"));
77f81fe0
PM
2139 rcu_gp_fqs(rsp, first_gp_fqs);
2140 first_gp_fqs = false;
63c4db78 2141 trace_rcu_grace_period(rsp->name,
7d0ae808 2142 READ_ONCE(rsp->gpnum),
63c4db78 2143 TPS("fqsend"));
bde6c3aa 2144 cond_resched_rcu_qs();
7d0ae808 2145 WRITE_ONCE(rsp->gp_activity, jiffies);
4cdfc175
PM
2146 } else {
2147 /* Deal with stray signal. */
bde6c3aa 2148 cond_resched_rcu_qs();
7d0ae808 2149 WRITE_ONCE(rsp->gp_activity, jiffies);
73a860cd 2150 WARN_ON(signal_pending(current));
63c4db78 2151 trace_rcu_grace_period(rsp->name,
7d0ae808 2152 READ_ONCE(rsp->gpnum),
63c4db78 2153 TPS("fqswaitsig"));
4cdfc175 2154 }
d40011f6
PM
2155 j = jiffies_till_next_fqs;
2156 if (j > HZ) {
2157 j = HZ;
2158 jiffies_till_next_fqs = HZ;
2159 } else if (j < 1) {
2160 j = 1;
2161 jiffies_till_next_fqs = 1;
2162 }
cabc49c1 2163 }
4cdfc175
PM
2164
2165 /* Handle grace-period end. */
319362c9 2166 rsp->gp_state = RCU_GP_CLEANUP;
4cdfc175 2167 rcu_gp_cleanup(rsp);
319362c9 2168 rsp->gp_state = RCU_GP_CLEANED;
b3dbec76 2169 }
b3dbec76
PM
2170}
2171
64db4cff
PM
2172/*
2173 * Start a new RCU grace period if warranted, re-initializing the hierarchy
2174 * in preparation for detecting the next grace period. The caller must hold
b8462084 2175 * the root node's ->lock and hard irqs must be disabled.
e5601400
PM
2176 *
2177 * Note that it is legal for a dying CPU (which is marked as offline) to
2178 * invoke this function. This can happen when the dying CPU reports its
2179 * quiescent state.
48a7639c
PM
2180 *
2181 * Returns true if the grace-period kthread must be awakened.
64db4cff 2182 */
48a7639c 2183static bool
910ee45d
PM
2184rcu_start_gp_advanced(struct rcu_state *rsp, struct rcu_node *rnp,
2185 struct rcu_data *rdp)
64db4cff 2186{
b8462084 2187 if (!rsp->gp_kthread || !cpu_needs_another_gp(rsp, rdp)) {
afe24b12 2188 /*
b3dbec76 2189 * Either we have not yet spawned the grace-period
62da1921
PM
2190 * task, this CPU does not need another grace period,
2191 * or a grace period is already in progress.
b3dbec76 2192 * Either way, don't start a new grace period.
afe24b12 2193 */
48a7639c 2194 return false;
afe24b12 2195 }
7d0ae808
PM
2196 WRITE_ONCE(rsp->gp_flags, RCU_GP_FLAG_INIT);
2197 trace_rcu_grace_period(rsp->name, READ_ONCE(rsp->gpnum),
bb311ecc 2198 TPS("newreq"));
62da1921 2199
016a8d5b
SR
2200 /*
2201 * We can't do wakeups while holding the rnp->lock, as that
1eafd31c 2202 * could cause possible deadlocks with the rq->lock. Defer
48a7639c 2203 * the wakeup to our caller.
016a8d5b 2204 */
48a7639c 2205 return true;
64db4cff
PM
2206}
2207
910ee45d
PM
2208/*
2209 * Similar to rcu_start_gp_advanced(), but also advance the calling CPU's
2210 * callbacks. Note that rcu_start_gp_advanced() cannot do this because it
2211 * is invoked indirectly from rcu_advance_cbs(), which would result in
2212 * endless recursion -- or would do so if it wasn't for the self-deadlock
2213 * that is encountered beforehand.
48a7639c
PM
2214 *
2215 * Returns true if the grace-period kthread needs to be awakened.
910ee45d 2216 */
48a7639c 2217static bool rcu_start_gp(struct rcu_state *rsp)
910ee45d
PM
2218{
2219 struct rcu_data *rdp = this_cpu_ptr(rsp->rda);
2220 struct rcu_node *rnp = rcu_get_root(rsp);
48a7639c 2221 bool ret = false;
910ee45d
PM
2222
2223 /*
2224 * If there is no grace period in progress right now, any
2225 * callbacks we have up to this point will be satisfied by the
2226 * next grace period. Also, advancing the callbacks reduces the
2227 * probability of false positives from cpu_needs_another_gp()
2228 * resulting in pointless grace periods. So, advance callbacks
2229 * then start the grace period!
2230 */
48a7639c
PM
2231 ret = rcu_advance_cbs(rsp, rnp, rdp) || ret;
2232 ret = rcu_start_gp_advanced(rsp, rnp, rdp) || ret;
2233 return ret;
910ee45d
PM
2234}
2235
f41d911f 2236/*
d3f6bad3
PM
2237 * Report a full set of quiescent states to the specified rcu_state
2238 * data structure. This involves cleaning up after the prior grace
2239 * period and letting rcu_start_gp() start up the next grace period
b8462084
PM
2240 * if one is needed. Note that the caller must hold rnp->lock, which
2241 * is released before return.
f41d911f 2242 */
d3f6bad3 2243static void rcu_report_qs_rsp(struct rcu_state *rsp, unsigned long flags)
fc2219d4 2244 __releases(rcu_get_root(rsp)->lock)
f41d911f 2245{
fc2219d4 2246 WARN_ON_ONCE(!rcu_gp_in_progress(rsp));
cd73ca21 2247 WRITE_ONCE(rsp->gp_flags, READ_ONCE(rsp->gp_flags) | RCU_GP_FLAG_FQS);
cabc49c1 2248 raw_spin_unlock_irqrestore(&rcu_get_root(rsp)->lock, flags);
2aa792e6 2249 rcu_gp_kthread_wake(rsp);
f41d911f
PM
2250}
2251
64db4cff 2252/*
d3f6bad3
PM
2253 * Similar to rcu_report_qs_rdp(), for which it is a helper function.
2254 * Allows quiescent states for a group of CPUs to be reported at one go
2255 * to the specified rcu_node structure, though all the CPUs in the group
654e9533
PM
2256 * must be represented by the same rcu_node structure (which need not be a
2257 * leaf rcu_node structure, though it often will be). The gps parameter
2258 * is the grace-period snapshot, which means that the quiescent states
2259 * are valid only if rnp->gpnum is equal to gps. That structure's lock
2260 * must be held upon entry, and it is released before return.
64db4cff
PM
2261 */
2262static void
d3f6bad3 2263rcu_report_qs_rnp(unsigned long mask, struct rcu_state *rsp,
654e9533 2264 struct rcu_node *rnp, unsigned long gps, unsigned long flags)
64db4cff
PM
2265 __releases(rnp->lock)
2266{
654e9533 2267 unsigned long oldmask = 0;
28ecd580
PM
2268 struct rcu_node *rnp_c;
2269
64db4cff
PM
2270 /* Walk up the rcu_node hierarchy. */
2271 for (;;) {
654e9533 2272 if (!(rnp->qsmask & mask) || rnp->gpnum != gps) {
64db4cff 2273
654e9533
PM
2274 /*
2275 * Our bit has already been cleared, or the
2276 * relevant grace period is already over, so done.
2277 */
1304afb2 2278 raw_spin_unlock_irqrestore(&rnp->lock, flags);
64db4cff
PM
2279 return;
2280 }
654e9533 2281 WARN_ON_ONCE(oldmask); /* Any child must be all zeroed! */
64db4cff 2282 rnp->qsmask &= ~mask;
d4c08f2a
PM
2283 trace_rcu_quiescent_state_report(rsp->name, rnp->gpnum,
2284 mask, rnp->qsmask, rnp->level,
2285 rnp->grplo, rnp->grphi,
2286 !!rnp->gp_tasks);
27f4d280 2287 if (rnp->qsmask != 0 || rcu_preempt_blocked_readers_cgp(rnp)) {
64db4cff
PM
2288
2289 /* Other bits still set at this level, so done. */
1304afb2 2290 raw_spin_unlock_irqrestore(&rnp->lock, flags);
64db4cff
PM
2291 return;
2292 }
2293 mask = rnp->grpmask;
2294 if (rnp->parent == NULL) {
2295
2296 /* No more levels. Exit loop holding root lock. */
2297
2298 break;
2299 }
1304afb2 2300 raw_spin_unlock_irqrestore(&rnp->lock, flags);
28ecd580 2301 rnp_c = rnp;
64db4cff 2302 rnp = rnp->parent;
2a67e741 2303 raw_spin_lock_irqsave_rcu_node(rnp, flags);
654e9533 2304 oldmask = rnp_c->qsmask;
64db4cff
PM
2305 }
2306
2307 /*
2308 * Get here if we are the last CPU to pass through a quiescent
d3f6bad3 2309 * state for this grace period. Invoke rcu_report_qs_rsp()
f41d911f 2310 * to clean up and start the next grace period if one is needed.
64db4cff 2311 */
d3f6bad3 2312 rcu_report_qs_rsp(rsp, flags); /* releases rnp->lock. */
64db4cff
PM
2313}
2314
cc99a310
PM
2315/*
2316 * Record a quiescent state for all tasks that were previously queued
2317 * on the specified rcu_node structure and that were blocking the current
2318 * RCU grace period. The caller must hold the specified rnp->lock with
2319 * irqs disabled, and this lock is released upon return, but irqs remain
2320 * disabled.
2321 */
0aa04b05 2322static void rcu_report_unblock_qs_rnp(struct rcu_state *rsp,
cc99a310
PM
2323 struct rcu_node *rnp, unsigned long flags)
2324 __releases(rnp->lock)
2325{
654e9533 2326 unsigned long gps;
cc99a310
PM
2327 unsigned long mask;
2328 struct rcu_node *rnp_p;
2329
a77da14c
PM
2330 if (rcu_state_p == &rcu_sched_state || rsp != rcu_state_p ||
2331 rnp->qsmask != 0 || rcu_preempt_blocked_readers_cgp(rnp)) {
cc99a310
PM
2332 raw_spin_unlock_irqrestore(&rnp->lock, flags);
2333 return; /* Still need more quiescent states! */
2334 }
2335
2336 rnp_p = rnp->parent;
2337 if (rnp_p == NULL) {
2338 /*
a77da14c
PM
2339 * Only one rcu_node structure in the tree, so don't
2340 * try to report up to its nonexistent parent!
cc99a310
PM
2341 */
2342 rcu_report_qs_rsp(rsp, flags);
2343 return;
2344 }
2345
654e9533
PM
2346 /* Report up the rest of the hierarchy, tracking current ->gpnum. */
2347 gps = rnp->gpnum;
cc99a310
PM
2348 mask = rnp->grpmask;
2349 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
2a67e741 2350 raw_spin_lock_rcu_node(rnp_p); /* irqs already disabled. */
654e9533 2351 rcu_report_qs_rnp(mask, rsp, rnp_p, gps, flags);
cc99a310
PM
2352}
2353
64db4cff 2354/*
d3f6bad3
PM
2355 * Record a quiescent state for the specified CPU to that CPU's rcu_data
2356 * structure. This must be either called from the specified CPU, or
2357 * called when the specified CPU is known to be offline (and when it is
2358 * also known that no other CPU is concurrently trying to help the offline
2359 * CPU). The lastcomp argument is used to make sure we are still in the
2360 * grace period of interest. We don't want to end the current grace period
2361 * based on quiescent states detected in an earlier grace period!
64db4cff
PM
2362 */
2363static void
d7d6a11e 2364rcu_report_qs_rdp(int cpu, struct rcu_state *rsp, struct rcu_data *rdp)
64db4cff
PM
2365{
2366 unsigned long flags;
2367 unsigned long mask;
48a7639c 2368 bool needwake;
64db4cff
PM
2369 struct rcu_node *rnp;
2370
2371 rnp = rdp->mynode;
2a67e741 2372 raw_spin_lock_irqsave_rcu_node(rnp, flags);
5b74c458 2373 if ((rdp->cpu_no_qs.b.norm &&
5cd37193
PM
2374 rdp->rcu_qs_ctr_snap == __this_cpu_read(rcu_qs_ctr)) ||
2375 rdp->gpnum != rnp->gpnum || rnp->completed == rnp->gpnum ||
2376 rdp->gpwrap) {
64db4cff
PM
2377
2378 /*
e4cc1f22
PM
2379 * The grace period in which this quiescent state was
2380 * recorded has ended, so don't report it upwards.
2381 * We will instead need a new quiescent state that lies
2382 * within the current grace period.
64db4cff 2383 */
5b74c458 2384 rdp->cpu_no_qs.b.norm = true; /* need qs for new gp. */
5cd37193 2385 rdp->rcu_qs_ctr_snap = __this_cpu_read(rcu_qs_ctr);
1304afb2 2386 raw_spin_unlock_irqrestore(&rnp->lock, flags);
64db4cff
PM
2387 return;
2388 }
2389 mask = rdp->grpmask;
2390 if ((rnp->qsmask & mask) == 0) {
1304afb2 2391 raw_spin_unlock_irqrestore(&rnp->lock, flags);
64db4cff 2392 } else {
97c668b8 2393 rdp->core_needs_qs = 0;
64db4cff
PM
2394
2395 /*
2396 * This GP can't end until cpu checks in, so all of our
2397 * callbacks can be processed during the next GP.
2398 */
48a7639c 2399 needwake = rcu_accelerate_cbs(rsp, rnp, rdp);
64db4cff 2400
654e9533
PM
2401 rcu_report_qs_rnp(mask, rsp, rnp, rnp->gpnum, flags);
2402 /* ^^^ Released rnp->lock */
48a7639c
PM
2403 if (needwake)
2404 rcu_gp_kthread_wake(rsp);
64db4cff
PM
2405 }
2406}
2407
2408/*
2409 * Check to see if there is a new grace period of which this CPU
2410 * is not yet aware, and if so, set up local rcu_data state for it.
2411 * Otherwise, see if this CPU has just passed through its first
2412 * quiescent state for this grace period, and record that fact if so.
2413 */
2414static void
2415rcu_check_quiescent_state(struct rcu_state *rsp, struct rcu_data *rdp)
2416{
05eb552b
PM
2417 /* Check for grace-period ends and beginnings. */
2418 note_gp_changes(rsp, rdp);
64db4cff
PM
2419
2420 /*
2421 * Does this CPU still need to do its part for current grace period?
2422 * If no, return and let the other CPUs do their part as well.
2423 */
97c668b8 2424 if (!rdp->core_needs_qs)
64db4cff
PM
2425 return;
2426
2427 /*
2428 * Was there a quiescent state since the beginning of the grace
2429 * period? If no, then exit and wait for the next call.
2430 */
5b74c458 2431 if (rdp->cpu_no_qs.b.norm &&
5cd37193 2432 rdp->rcu_qs_ctr_snap == __this_cpu_read(rcu_qs_ctr))
64db4cff
PM
2433 return;
2434
d3f6bad3
PM
2435 /*
2436 * Tell RCU we are done (but rcu_report_qs_rdp() will be the
2437 * judge of that).
2438 */
d7d6a11e 2439 rcu_report_qs_rdp(rdp->cpu, rsp, rdp);
64db4cff
PM
2440}
2441
e74f4c45 2442/*
b1420f1c
PM
2443 * Send the specified CPU's RCU callbacks to the orphanage. The
2444 * specified CPU must be offline, and the caller must hold the
7b2e6011 2445 * ->orphan_lock.
e74f4c45 2446 */
b1420f1c
PM
2447static void
2448rcu_send_cbs_to_orphanage(int cpu, struct rcu_state *rsp,
2449 struct rcu_node *rnp, struct rcu_data *rdp)
e74f4c45 2450{
3fbfbf7a 2451 /* No-CBs CPUs do not have orphanable callbacks. */
ea46351c 2452 if (!IS_ENABLED(CONFIG_HOTPLUG_CPU) || rcu_is_nocb_cpu(rdp->cpu))
3fbfbf7a
PM
2453 return;
2454
b1420f1c
PM
2455 /*
2456 * Orphan the callbacks. First adjust the counts. This is safe
abfd6e58
PM
2457 * because _rcu_barrier() excludes CPU-hotplug operations, so it
2458 * cannot be running now. Thus no memory barrier is required.
b1420f1c 2459 */
a50c3af9 2460 if (rdp->nxtlist != NULL) {
b1420f1c
PM
2461 rsp->qlen_lazy += rdp->qlen_lazy;
2462 rsp->qlen += rdp->qlen;
2463 rdp->n_cbs_orphaned += rdp->qlen;
a50c3af9 2464 rdp->qlen_lazy = 0;
7d0ae808 2465 WRITE_ONCE(rdp->qlen, 0);
a50c3af9
PM
2466 }
2467
2468 /*
b1420f1c
PM
2469 * Next, move those callbacks still needing a grace period to
2470 * the orphanage, where some other CPU will pick them up.
2471 * Some of the callbacks might have gone partway through a grace
2472 * period, but that is too bad. They get to start over because we
2473 * cannot assume that grace periods are synchronized across CPUs.
2474 * We don't bother updating the ->nxttail[] array yet, instead
2475 * we just reset the whole thing later on.
a50c3af9 2476 */
b1420f1c
PM
2477 if (*rdp->nxttail[RCU_DONE_TAIL] != NULL) {
2478 *rsp->orphan_nxttail = *rdp->nxttail[RCU_DONE_TAIL];
2479 rsp->orphan_nxttail = rdp->nxttail[RCU_NEXT_TAIL];
2480 *rdp->nxttail[RCU_DONE_TAIL] = NULL;
a50c3af9
PM
2481 }
2482
2483 /*
b1420f1c
PM
2484 * Then move the ready-to-invoke callbacks to the orphanage,
2485 * where some other CPU will pick them up. These will not be
2486 * required to pass though another grace period: They are done.
a50c3af9 2487 */
e5601400 2488 if (rdp->nxtlist != NULL) {
b1420f1c
PM
2489 *rsp->orphan_donetail = rdp->nxtlist;
2490 rsp->orphan_donetail = rdp->nxttail[RCU_DONE_TAIL];
e5601400 2491 }
e74f4c45 2492
b33078b6
PM
2493 /*
2494 * Finally, initialize the rcu_data structure's list to empty and
2495 * disallow further callbacks on this CPU.
2496 */
3f5d3ea6 2497 init_callback_list(rdp);
b33078b6 2498 rdp->nxttail[RCU_NEXT_TAIL] = NULL;
b1420f1c
PM
2499}
2500
2501/*
2502 * Adopt the RCU callbacks from the specified rcu_state structure's
7b2e6011 2503 * orphanage. The caller must hold the ->orphan_lock.
b1420f1c 2504 */
96d3fd0d 2505static void rcu_adopt_orphan_cbs(struct rcu_state *rsp, unsigned long flags)
b1420f1c
PM
2506{
2507 int i;
fa07a58f 2508 struct rcu_data *rdp = raw_cpu_ptr(rsp->rda);
b1420f1c 2509
3fbfbf7a 2510 /* No-CBs CPUs are handled specially. */
ea46351c
PM
2511 if (!IS_ENABLED(CONFIG_HOTPLUG_CPU) ||
2512 rcu_nocb_adopt_orphan_cbs(rsp, rdp, flags))
3fbfbf7a
PM
2513 return;
2514
b1420f1c
PM
2515 /* Do the accounting first. */
2516 rdp->qlen_lazy += rsp->qlen_lazy;
2517 rdp->qlen += rsp->qlen;
2518 rdp->n_cbs_adopted += rsp->qlen;
8f5af6f1
PM
2519 if (rsp->qlen_lazy != rsp->qlen)
2520 rcu_idle_count_callbacks_posted();
b1420f1c
PM
2521 rsp->qlen_lazy = 0;
2522 rsp->qlen = 0;
2523
2524 /*
2525 * We do not need a memory barrier here because the only way we
2526 * can get here if there is an rcu_barrier() in flight is if
2527 * we are the task doing the rcu_barrier().
2528 */
2529
2530 /* First adopt the ready-to-invoke callbacks. */
2531 if (rsp->orphan_donelist != NULL) {
2532 *rsp->orphan_donetail = *rdp->nxttail[RCU_DONE_TAIL];
2533 *rdp->nxttail[RCU_DONE_TAIL] = rsp->orphan_donelist;
2534 for (i = RCU_NEXT_SIZE - 1; i >= RCU_DONE_TAIL; i--)
2535 if (rdp->nxttail[i] == rdp->nxttail[RCU_DONE_TAIL])
2536 rdp->nxttail[i] = rsp->orphan_donetail;
2537 rsp->orphan_donelist = NULL;
2538 rsp->orphan_donetail = &rsp->orphan_donelist;
2539 }
2540
2541 /* And then adopt the callbacks that still need a grace period. */
2542 if (rsp->orphan_nxtlist != NULL) {
2543 *rdp->nxttail[RCU_NEXT_TAIL] = rsp->orphan_nxtlist;
2544 rdp->nxttail[RCU_NEXT_TAIL] = rsp->orphan_nxttail;
2545 rsp->orphan_nxtlist = NULL;
2546 rsp->orphan_nxttail = &rsp->orphan_nxtlist;
2547 }
2548}
2549
2550/*
2551 * Trace the fact that this CPU is going offline.
2552 */
2553static void rcu_cleanup_dying_cpu(struct rcu_state *rsp)
2554{
2555 RCU_TRACE(unsigned long mask);
2556 RCU_TRACE(struct rcu_data *rdp = this_cpu_ptr(rsp->rda));
2557 RCU_TRACE(struct rcu_node *rnp = rdp->mynode);
2558
ea46351c
PM
2559 if (!IS_ENABLED(CONFIG_HOTPLUG_CPU))
2560 return;
2561
b1420f1c 2562 RCU_TRACE(mask = rdp->grpmask);
e5601400
PM
2563 trace_rcu_grace_period(rsp->name,
2564 rnp->gpnum + 1 - !!(rnp->qsmask & mask),
f7f7bac9 2565 TPS("cpuofl"));
64db4cff
PM
2566}
2567
8af3a5e7
PM
2568/*
2569 * All CPUs for the specified rcu_node structure have gone offline,
2570 * and all tasks that were preempted within an RCU read-side critical
2571 * section while running on one of those CPUs have since exited their RCU
2572 * read-side critical section. Some other CPU is reporting this fact with
2573 * the specified rcu_node structure's ->lock held and interrupts disabled.
2574 * This function therefore goes up the tree of rcu_node structures,
2575 * clearing the corresponding bits in the ->qsmaskinit fields. Note that
2576 * the leaf rcu_node structure's ->qsmaskinit field has already been
2577 * updated
2578 *
2579 * This function does check that the specified rcu_node structure has
2580 * all CPUs offline and no blocked tasks, so it is OK to invoke it
2581 * prematurely. That said, invoking it after the fact will cost you
2582 * a needless lock acquisition. So once it has done its work, don't
2583 * invoke it again.
2584 */
2585static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf)
2586{
2587 long mask;
2588 struct rcu_node *rnp = rnp_leaf;
2589
ea46351c
PM
2590 if (!IS_ENABLED(CONFIG_HOTPLUG_CPU) ||
2591 rnp->qsmaskinit || rcu_preempt_has_tasks(rnp))
8af3a5e7
PM
2592 return;
2593 for (;;) {
2594 mask = rnp->grpmask;
2595 rnp = rnp->parent;
2596 if (!rnp)
2597 break;
2a67e741 2598 raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */
8af3a5e7 2599 rnp->qsmaskinit &= ~mask;
0aa04b05 2600 rnp->qsmask &= ~mask;
8af3a5e7
PM
2601 if (rnp->qsmaskinit) {
2602 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
2603 return;
2604 }
2605 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
2606 }
2607}
2608
88428cc5
PM
2609/*
2610 * The CPU is exiting the idle loop into the arch_cpu_idle_dead()
2611 * function. We now remove it from the rcu_node tree's ->qsmaskinit
2612 * bit masks.
2613 */
2614static void rcu_cleanup_dying_idle_cpu(int cpu, struct rcu_state *rsp)
2615{
2616 unsigned long flags;
2617 unsigned long mask;
2618 struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
2619 struct rcu_node *rnp = rdp->mynode; /* Outgoing CPU's rdp & rnp. */
2620
ea46351c
PM
2621 if (!IS_ENABLED(CONFIG_HOTPLUG_CPU))
2622 return;
2623
88428cc5
PM
2624 /* Remove outgoing CPU from mask in the leaf rcu_node structure. */
2625 mask = rdp->grpmask;
2a67e741 2626 raw_spin_lock_irqsave_rcu_node(rnp, flags); /* Enforce GP memory-order guarantee. */
88428cc5
PM
2627 rnp->qsmaskinitnext &= ~mask;
2628 raw_spin_unlock_irqrestore(&rnp->lock, flags);
2629}
2630
64db4cff 2631/*
e5601400 2632 * The CPU has been completely removed, and some other CPU is reporting
b1420f1c
PM
2633 * this fact from process context. Do the remainder of the cleanup,
2634 * including orphaning the outgoing CPU's RCU callbacks, and also
1331e7a1
PM
2635 * adopting them. There can only be one CPU hotplug operation at a time,
2636 * so no other CPU can be attempting to update rcu_cpu_kthread_task.
64db4cff 2637 */
e5601400 2638static void rcu_cleanup_dead_cpu(int cpu, struct rcu_state *rsp)
64db4cff 2639{
2036d94a 2640 unsigned long flags;
e5601400 2641 struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
b1420f1c 2642 struct rcu_node *rnp = rdp->mynode; /* Outgoing CPU's rdp & rnp. */
e5601400 2643
ea46351c
PM
2644 if (!IS_ENABLED(CONFIG_HOTPLUG_CPU))
2645 return;
2646
2036d94a 2647 /* Adjust any no-longer-needed kthreads. */
5d01bbd1 2648 rcu_boost_kthread_setaffinity(rnp, -1);
2036d94a 2649
b1420f1c 2650 /* Orphan the dead CPU's callbacks, and adopt them if appropriate. */
78043c46 2651 raw_spin_lock_irqsave(&rsp->orphan_lock, flags);
b1420f1c 2652 rcu_send_cbs_to_orphanage(cpu, rsp, rnp, rdp);
96d3fd0d 2653 rcu_adopt_orphan_cbs(rsp, flags);
a8f4cbad 2654 raw_spin_unlock_irqrestore(&rsp->orphan_lock, flags);
b1420f1c 2655
cf01537e
PM
2656 WARN_ONCE(rdp->qlen != 0 || rdp->nxtlist != NULL,
2657 "rcu_cleanup_dead_cpu: Callbacks on offline CPU %d: qlen=%lu, nxtlist=%p\n",
2658 cpu, rdp->qlen, rdp->nxtlist);
64db4cff
PM
2659}
2660
64db4cff
PM
2661/*
2662 * Invoke any RCU callbacks that have made it to the end of their grace
2663 * period. Thottle as specified by rdp->blimit.
2664 */
37c72e56 2665static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
64db4cff
PM
2666{
2667 unsigned long flags;
2668 struct rcu_head *next, *list, **tail;
878d7439
ED
2669 long bl, count, count_lazy;
2670 int i;
64db4cff 2671
dc35c893 2672 /* If no callbacks are ready, just return. */
29c00b4a 2673 if (!cpu_has_callbacks_ready_to_invoke(rdp)) {
486e2593 2674 trace_rcu_batch_start(rsp->name, rdp->qlen_lazy, rdp->qlen, 0);
7d0ae808 2675 trace_rcu_batch_end(rsp->name, 0, !!READ_ONCE(rdp->nxtlist),
4968c300
PM
2676 need_resched(), is_idle_task(current),
2677 rcu_is_callbacks_kthread());
64db4cff 2678 return;
29c00b4a 2679 }
64db4cff
PM
2680
2681 /*
2682 * Extract the list of ready callbacks, disabling to prevent
2683 * races with call_rcu() from interrupt handlers.
2684 */
2685 local_irq_save(flags);
8146c4e2 2686 WARN_ON_ONCE(cpu_is_offline(smp_processor_id()));
29c00b4a 2687 bl = rdp->blimit;
486e2593 2688 trace_rcu_batch_start(rsp->name, rdp->qlen_lazy, rdp->qlen, bl);
64db4cff
PM
2689 list = rdp->nxtlist;
2690 rdp->nxtlist = *rdp->nxttail[RCU_DONE_TAIL];
2691 *rdp->nxttail[RCU_DONE_TAIL] = NULL;
2692 tail = rdp->nxttail[RCU_DONE_TAIL];
b41772ab
PM
2693 for (i = RCU_NEXT_SIZE - 1; i >= 0; i--)
2694 if (rdp->nxttail[i] == rdp->nxttail[RCU_DONE_TAIL])
2695 rdp->nxttail[i] = &rdp->nxtlist;
64db4cff
PM
2696 local_irq_restore(flags);
2697
2698 /* Invoke callbacks. */
486e2593 2699 count = count_lazy = 0;
64db4cff
PM
2700 while (list) {
2701 next = list->next;
2702 prefetch(next);
551d55a9 2703 debug_rcu_head_unqueue(list);
486e2593
PM
2704 if (__rcu_reclaim(rsp->name, list))
2705 count_lazy++;
64db4cff 2706 list = next;
dff1672d
PM
2707 /* Stop only if limit reached and CPU has something to do. */
2708 if (++count >= bl &&
2709 (need_resched() ||
2710 (!is_idle_task(current) && !rcu_is_callbacks_kthread())))
64db4cff
PM
2711 break;
2712 }
2713
2714 local_irq_save(flags);
4968c300
PM
2715 trace_rcu_batch_end(rsp->name, count, !!list, need_resched(),
2716 is_idle_task(current),
2717 rcu_is_callbacks_kthread());
64db4cff
PM
2718
2719 /* Update count, and requeue any remaining callbacks. */
64db4cff
PM
2720 if (list != NULL) {
2721 *tail = rdp->nxtlist;
2722 rdp->nxtlist = list;
b41772ab
PM
2723 for (i = 0; i < RCU_NEXT_SIZE; i++)
2724 if (&rdp->nxtlist == rdp->nxttail[i])
2725 rdp->nxttail[i] = tail;
64db4cff
PM
2726 else
2727 break;
2728 }
b1420f1c
PM
2729 smp_mb(); /* List handling before counting for rcu_barrier(). */
2730 rdp->qlen_lazy -= count_lazy;
7d0ae808 2731 WRITE_ONCE(rdp->qlen, rdp->qlen - count);
b1420f1c 2732 rdp->n_cbs_invoked += count;
64db4cff
PM
2733
2734 /* Reinstate batch limit if we have worked down the excess. */
2735 if (rdp->blimit == LONG_MAX && rdp->qlen <= qlowmark)
2736 rdp->blimit = blimit;
2737
37c72e56
PM
2738 /* Reset ->qlen_last_fqs_check trigger if enough CBs have drained. */
2739 if (rdp->qlen == 0 && rdp->qlen_last_fqs_check != 0) {
2740 rdp->qlen_last_fqs_check = 0;
2741 rdp->n_force_qs_snap = rsp->n_force_qs;
2742 } else if (rdp->qlen < rdp->qlen_last_fqs_check - qhimark)
2743 rdp->qlen_last_fqs_check = rdp->qlen;
cfca9279 2744 WARN_ON_ONCE((rdp->nxtlist == NULL) != (rdp->qlen == 0));
37c72e56 2745
64db4cff
PM
2746 local_irq_restore(flags);
2747
e0f23060 2748 /* Re-invoke RCU core processing if there are callbacks remaining. */
64db4cff 2749 if (cpu_has_callbacks_ready_to_invoke(rdp))
a46e0899 2750 invoke_rcu_core();
64db4cff
PM
2751}
2752
2753/*
2754 * Check to see if this CPU is in a non-context-switch quiescent state
2755 * (user mode or idle loop for rcu, non-softirq execution for rcu_bh).
e0f23060 2756 * Also schedule RCU core processing.
64db4cff 2757 *
9b2e4f18 2758 * This function must be called from hardirq context. It is normally
64db4cff
PM
2759 * invoked from the scheduling-clock interrupt. If rcu_pending returns
2760 * false, there is no point in invoking rcu_check_callbacks().
2761 */
c3377c2d 2762void rcu_check_callbacks(int user)
64db4cff 2763{
f7f7bac9 2764 trace_rcu_utilization(TPS("Start scheduler-tick"));
a858af28 2765 increment_cpu_stall_ticks();
9b2e4f18 2766 if (user || rcu_is_cpu_rrupt_from_idle()) {
64db4cff
PM
2767
2768 /*
2769 * Get here if this CPU took its interrupt from user
2770 * mode or from the idle loop, and if this is not a
2771 * nested interrupt. In this case, the CPU is in
d6714c22 2772 * a quiescent state, so note it.
64db4cff
PM
2773 *
2774 * No memory barrier is required here because both
d6714c22
PM
2775 * rcu_sched_qs() and rcu_bh_qs() reference only CPU-local
2776 * variables that other CPUs neither access nor modify,
2777 * at least not while the corresponding CPU is online.
64db4cff
PM
2778 */
2779
284a8c93
PM
2780 rcu_sched_qs();
2781 rcu_bh_qs();
64db4cff
PM
2782
2783 } else if (!in_softirq()) {
2784
2785 /*
2786 * Get here if this CPU did not take its interrupt from
2787 * softirq, in other words, if it is not interrupting
2788 * a rcu_bh read-side critical section. This is an _bh
d6714c22 2789 * critical section, so note it.
64db4cff
PM
2790 */
2791
284a8c93 2792 rcu_bh_qs();
64db4cff 2793 }
86aea0e6 2794 rcu_preempt_check_callbacks();
e3950ecd 2795 if (rcu_pending())
a46e0899 2796 invoke_rcu_core();
8315f422
PM
2797 if (user)
2798 rcu_note_voluntary_context_switch(current);
f7f7bac9 2799 trace_rcu_utilization(TPS("End scheduler-tick"));
64db4cff
PM
2800}
2801
64db4cff
PM
2802/*
2803 * Scan the leaf rcu_node structures, processing dyntick state for any that
2804 * have not yet encountered a quiescent state, using the function specified.
27f4d280
PM
2805 * Also initiate boosting for any threads blocked on the root rcu_node.
2806 *
ee47eb9f 2807 * The caller must have suppressed start of new grace periods.
64db4cff 2808 */
217af2a2
PM
2809static void force_qs_rnp(struct rcu_state *rsp,
2810 int (*f)(struct rcu_data *rsp, bool *isidle,
2811 unsigned long *maxj),
2812 bool *isidle, unsigned long *maxj)
64db4cff
PM
2813{
2814 unsigned long bit;
2815 int cpu;
2816 unsigned long flags;
2817 unsigned long mask;
a0b6c9a7 2818 struct rcu_node *rnp;
64db4cff 2819
a0b6c9a7 2820 rcu_for_each_leaf_node(rsp, rnp) {
bde6c3aa 2821 cond_resched_rcu_qs();
64db4cff 2822 mask = 0;
2a67e741 2823 raw_spin_lock_irqsave_rcu_node(rnp, flags);
a0b6c9a7 2824 if (rnp->qsmask == 0) {
a77da14c
PM
2825 if (rcu_state_p == &rcu_sched_state ||
2826 rsp != rcu_state_p ||
2827 rcu_preempt_blocked_readers_cgp(rnp)) {
2828 /*
2829 * No point in scanning bits because they
2830 * are all zero. But we might need to
2831 * priority-boost blocked readers.
2832 */
2833 rcu_initiate_boost(rnp, flags);
2834 /* rcu_initiate_boost() releases rnp->lock */
2835 continue;
2836 }
2837 if (rnp->parent &&
2838 (rnp->parent->qsmask & rnp->grpmask)) {
2839 /*
2840 * Race between grace-period
2841 * initialization and task exiting RCU
2842 * read-side critical section: Report.
2843 */
2844 rcu_report_unblock_qs_rnp(rsp, rnp, flags);
2845 /* rcu_report_unblock_qs_rnp() rlses ->lock */
2846 continue;
2847 }
64db4cff 2848 }
a0b6c9a7 2849 cpu = rnp->grplo;
64db4cff 2850 bit = 1;
a0b6c9a7 2851 for (; cpu <= rnp->grphi; cpu++, bit <<= 1) {
0edd1b17 2852 if ((rnp->qsmask & bit) != 0) {
0edd1b17
PM
2853 if (f(per_cpu_ptr(rsp->rda, cpu), isidle, maxj))
2854 mask |= bit;
2855 }
64db4cff 2856 }
45f014c5 2857 if (mask != 0) {
654e9533
PM
2858 /* Idle/offline CPUs, report (releases rnp->lock. */
2859 rcu_report_qs_rnp(mask, rsp, rnp, rnp->gpnum, flags);
0aa04b05
PM
2860 } else {
2861 /* Nothing to do here, so just drop the lock. */
2862 raw_spin_unlock_irqrestore(&rnp->lock, flags);
64db4cff 2863 }
64db4cff 2864 }
64db4cff
PM
2865}
2866
2867/*
2868 * Force quiescent states on reluctant CPUs, and also detect which
2869 * CPUs are in dyntick-idle mode.
2870 */
4cdfc175 2871static void force_quiescent_state(struct rcu_state *rsp)
64db4cff
PM
2872{
2873 unsigned long flags;
394f2769
PM
2874 bool ret;
2875 struct rcu_node *rnp;
2876 struct rcu_node *rnp_old = NULL;
2877
2878 /* Funnel through hierarchy to reduce memory contention. */
d860d403 2879 rnp = __this_cpu_read(rsp->rda->mynode);
394f2769 2880 for (; rnp != NULL; rnp = rnp->parent) {
7d0ae808 2881 ret = (READ_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) ||
394f2769
PM
2882 !raw_spin_trylock(&rnp->fqslock);
2883 if (rnp_old != NULL)
2884 raw_spin_unlock(&rnp_old->fqslock);
2885 if (ret) {
a792563b 2886 rsp->n_force_qs_lh++;
394f2769
PM
2887 return;
2888 }
2889 rnp_old = rnp;
2890 }
2891 /* rnp_old == rcu_get_root(rsp), rnp == NULL. */
64db4cff 2892
394f2769 2893 /* Reached the root of the rcu_node tree, acquire lock. */
2a67e741 2894 raw_spin_lock_irqsave_rcu_node(rnp_old, flags);
394f2769 2895 raw_spin_unlock(&rnp_old->fqslock);
7d0ae808 2896 if (READ_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) {
a792563b 2897 rsp->n_force_qs_lh++;
394f2769 2898 raw_spin_unlock_irqrestore(&rnp_old->lock, flags);
4cdfc175 2899 return; /* Someone beat us to it. */
46a1e34e 2900 }
7d0ae808 2901 WRITE_ONCE(rsp->gp_flags, READ_ONCE(rsp->gp_flags) | RCU_GP_FLAG_FQS);
394f2769 2902 raw_spin_unlock_irqrestore(&rnp_old->lock, flags);
2aa792e6 2903 rcu_gp_kthread_wake(rsp);
64db4cff
PM
2904}
2905
64db4cff 2906/*
e0f23060
PM
2907 * This does the RCU core processing work for the specified rcu_state
2908 * and rcu_data structures. This may be called only from the CPU to
2909 * whom the rdp belongs.
64db4cff
PM
2910 */
2911static void
1bca8cf1 2912__rcu_process_callbacks(struct rcu_state *rsp)
64db4cff
PM
2913{
2914 unsigned long flags;
48a7639c 2915 bool needwake;
fa07a58f 2916 struct rcu_data *rdp = raw_cpu_ptr(rsp->rda);
64db4cff 2917
2e597558
PM
2918 WARN_ON_ONCE(rdp->beenonline == 0);
2919
64db4cff
PM
2920 /* Update RCU state based on any recent quiescent states. */
2921 rcu_check_quiescent_state(rsp, rdp);
2922
2923 /* Does this CPU require a not-yet-started grace period? */
dc35c893 2924 local_irq_save(flags);
64db4cff 2925 if (cpu_needs_another_gp(rsp, rdp)) {
6cf10081 2926 raw_spin_lock_rcu_node(rcu_get_root(rsp)); /* irqs disabled. */
48a7639c 2927 needwake = rcu_start_gp(rsp);
b8462084 2928 raw_spin_unlock_irqrestore(&rcu_get_root(rsp)->lock, flags);
48a7639c
PM
2929 if (needwake)
2930 rcu_gp_kthread_wake(rsp);
dc35c893
PM
2931 } else {
2932 local_irq_restore(flags);
64db4cff
PM
2933 }
2934
2935 /* If there are callbacks ready, invoke them. */
09223371 2936 if (cpu_has_callbacks_ready_to_invoke(rdp))
a46e0899 2937 invoke_rcu_callbacks(rsp, rdp);
96d3fd0d
PM
2938
2939 /* Do any needed deferred wakeups of rcuo kthreads. */
2940 do_nocb_deferred_wakeup(rdp);
09223371
SL
2941}
2942
64db4cff 2943/*
e0f23060 2944 * Do RCU core processing for the current CPU.
64db4cff 2945 */
09223371 2946static void rcu_process_callbacks(struct softirq_action *unused)
64db4cff 2947{
6ce75a23
PM
2948 struct rcu_state *rsp;
2949
bfa00b4c
PM
2950 if (cpu_is_offline(smp_processor_id()))
2951 return;
f7f7bac9 2952 trace_rcu_utilization(TPS("Start RCU core"));
6ce75a23
PM
2953 for_each_rcu_flavor(rsp)
2954 __rcu_process_callbacks(rsp);
f7f7bac9 2955 trace_rcu_utilization(TPS("End RCU core"));
64db4cff
PM
2956}
2957
a26ac245 2958/*
e0f23060
PM
2959 * Schedule RCU callback invocation. If the specified type of RCU
2960 * does not support RCU priority boosting, just do a direct call,
2961 * otherwise wake up the per-CPU kernel kthread. Note that because we
924df8a0 2962 * are running on the current CPU with softirqs disabled, the
e0f23060 2963 * rcu_cpu_kthread_task cannot disappear out from under us.
a26ac245 2964 */
a46e0899 2965static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
a26ac245 2966{
7d0ae808 2967 if (unlikely(!READ_ONCE(rcu_scheduler_fully_active)))
b0d30417 2968 return;
a46e0899
PM
2969 if (likely(!rsp->boost)) {
2970 rcu_do_batch(rsp, rdp);
a26ac245
PM
2971 return;
2972 }
a46e0899 2973 invoke_rcu_callbacks_kthread();
a26ac245
PM
2974}
2975
a46e0899 2976static void invoke_rcu_core(void)
09223371 2977{
b0f74036
PM
2978 if (cpu_online(smp_processor_id()))
2979 raise_softirq(RCU_SOFTIRQ);
09223371
SL
2980}
2981
29154c57
PM
2982/*
2983 * Handle any core-RCU processing required by a call_rcu() invocation.
2984 */
2985static void __call_rcu_core(struct rcu_state *rsp, struct rcu_data *rdp,
2986 struct rcu_head *head, unsigned long flags)
64db4cff 2987{
48a7639c
PM
2988 bool needwake;
2989
62fde6ed
PM
2990 /*
2991 * If called from an extended quiescent state, invoke the RCU
2992 * core in order to force a re-evaluation of RCU's idleness.
2993 */
9910affa 2994 if (!rcu_is_watching())
62fde6ed
PM
2995 invoke_rcu_core();
2996
a16b7a69 2997 /* If interrupts were disabled or CPU offline, don't invoke RCU core. */
29154c57 2998 if (irqs_disabled_flags(flags) || cpu_is_offline(smp_processor_id()))
2655d57e 2999 return;
64db4cff 3000
37c72e56
PM
3001 /*
3002 * Force the grace period if too many callbacks or too long waiting.
3003 * Enforce hysteresis, and don't invoke force_quiescent_state()
3004 * if some other CPU has recently done so. Also, don't bother
3005 * invoking force_quiescent_state() if the newly enqueued callback
3006 * is the only one waiting for a grace period to complete.
3007 */
2655d57e 3008 if (unlikely(rdp->qlen > rdp->qlen_last_fqs_check + qhimark)) {
b52573d2
PM
3009
3010 /* Are we ignoring a completed grace period? */
470716fc 3011 note_gp_changes(rsp, rdp);
b52573d2
PM
3012
3013 /* Start a new grace period if one not already started. */
3014 if (!rcu_gp_in_progress(rsp)) {
b52573d2
PM
3015 struct rcu_node *rnp_root = rcu_get_root(rsp);
3016
2a67e741 3017 raw_spin_lock_rcu_node(rnp_root);
48a7639c 3018 needwake = rcu_start_gp(rsp);
b8462084 3019 raw_spin_unlock(&rnp_root->lock);
48a7639c
PM
3020 if (needwake)
3021 rcu_gp_kthread_wake(rsp);
b52573d2
PM
3022 } else {
3023 /* Give the grace period a kick. */
3024 rdp->blimit = LONG_MAX;
3025 if (rsp->n_force_qs == rdp->n_force_qs_snap &&
3026 *rdp->nxttail[RCU_DONE_TAIL] != head)
4cdfc175 3027 force_quiescent_state(rsp);
b52573d2
PM
3028 rdp->n_force_qs_snap = rsp->n_force_qs;
3029 rdp->qlen_last_fqs_check = rdp->qlen;
3030 }
4cdfc175 3031 }
29154c57
PM
3032}
3033
ae150184
PM
3034/*
3035 * RCU callback function to leak a callback.
3036 */
3037static void rcu_leak_callback(struct rcu_head *rhp)
3038{
3039}
3040
3fbfbf7a
PM
3041/*
3042 * Helper function for call_rcu() and friends. The cpu argument will
3043 * normally be -1, indicating "currently running CPU". It may specify
3044 * a CPU only if that CPU is a no-CBs CPU. Currently, only _rcu_barrier()
3045 * is expected to specify a CPU.
3046 */
64db4cff 3047static void
b6a4ae76 3048__call_rcu(struct rcu_head *head, rcu_callback_t func,
3fbfbf7a 3049 struct rcu_state *rsp, int cpu, bool lazy)
64db4cff
PM
3050{
3051 unsigned long flags;
3052 struct rcu_data *rdp;
3053
1146edcb 3054 WARN_ON_ONCE((unsigned long)head & 0x1); /* Misaligned rcu_head! */
ae150184
PM
3055 if (debug_rcu_head_queue(head)) {
3056 /* Probable double call_rcu(), so leak the callback. */
7d0ae808 3057 WRITE_ONCE(head->func, rcu_leak_callback);
ae150184
PM
3058 WARN_ONCE(1, "__call_rcu(): Leaked duplicate callback\n");
3059 return;
3060 }
64db4cff
PM
3061 head->func = func;
3062 head->next = NULL;
3063
64db4cff
PM
3064 /*
3065 * Opportunistically note grace-period endings and beginnings.
3066 * Note that we might see a beginning right after we see an
3067 * end, but never vice versa, since this CPU has to pass through
3068 * a quiescent state betweentimes.
3069 */
3070 local_irq_save(flags);
394f99a9 3071 rdp = this_cpu_ptr(rsp->rda);
64db4cff
PM
3072
3073 /* Add the callback to our list. */
3fbfbf7a
PM
3074 if (unlikely(rdp->nxttail[RCU_NEXT_TAIL] == NULL) || cpu != -1) {
3075 int offline;
3076
3077 if (cpu != -1)
3078 rdp = per_cpu_ptr(rsp->rda, cpu);
143da9c2
PM
3079 if (likely(rdp->mynode)) {
3080 /* Post-boot, so this should be for a no-CBs CPU. */
3081 offline = !__call_rcu_nocb(rdp, head, lazy, flags);
3082 WARN_ON_ONCE(offline);
3083 /* Offline CPU, _call_rcu() illegal, leak callback. */
3084 local_irq_restore(flags);
3085 return;
3086 }
3087 /*
3088 * Very early boot, before rcu_init(). Initialize if needed
3089 * and then drop through to queue the callback.
3090 */
3091 BUG_ON(cpu != -1);
34404ca8 3092 WARN_ON_ONCE(!rcu_is_watching());
143da9c2
PM
3093 if (!likely(rdp->nxtlist))
3094 init_default_callback_list(rdp);
0d8ee37e 3095 }
7d0ae808 3096 WRITE_ONCE(rdp->qlen, rdp->qlen + 1);
486e2593
PM
3097 if (lazy)
3098 rdp->qlen_lazy++;
c57afe80
PM
3099 else
3100 rcu_idle_count_callbacks_posted();
b1420f1c
PM
3101 smp_mb(); /* Count before adding callback for rcu_barrier(). */
3102 *rdp->nxttail[RCU_NEXT_TAIL] = head;
3103 rdp->nxttail[RCU_NEXT_TAIL] = &head->next;
2655d57e 3104
d4c08f2a
PM
3105 if (__is_kfree_rcu_offset((unsigned long)func))
3106 trace_rcu_kfree_callback(rsp->name, head, (unsigned long)func,
486e2593 3107 rdp->qlen_lazy, rdp->qlen);
d4c08f2a 3108 else
486e2593 3109 trace_rcu_callback(rsp->name, head, rdp->qlen_lazy, rdp->qlen);
d4c08f2a 3110
29154c57
PM
3111 /* Go handle any RCU core processing required. */
3112 __call_rcu_core(rsp, rdp, head, flags);
64db4cff
PM
3113 local_irq_restore(flags);
3114}
3115
3116/*
d6714c22 3117 * Queue an RCU-sched callback for invocation after a grace period.
64db4cff 3118 */
b6a4ae76 3119void call_rcu_sched(struct rcu_head *head, rcu_callback_t func)
64db4cff 3120{
3fbfbf7a 3121 __call_rcu(head, func, &rcu_sched_state, -1, 0);
64db4cff 3122}
d6714c22 3123EXPORT_SYMBOL_GPL(call_rcu_sched);
64db4cff
PM
3124
3125/*
486e2593 3126 * Queue an RCU callback for invocation after a quicker grace period.
64db4cff 3127 */
b6a4ae76 3128void call_rcu_bh(struct rcu_head *head, rcu_callback_t func)
64db4cff 3129{
3fbfbf7a 3130 __call_rcu(head, func, &rcu_bh_state, -1, 0);
64db4cff
PM
3131}
3132EXPORT_SYMBOL_GPL(call_rcu_bh);
3133
495aa969
ACB
3134/*
3135 * Queue an RCU callback for lazy invocation after a grace period.
3136 * This will likely be later named something like "call_rcu_lazy()",
3137 * but this change will require some way of tagging the lazy RCU
3138 * callbacks in the list of pending callbacks. Until then, this
3139 * function may only be called from __kfree_rcu().
3140 */
3141void kfree_call_rcu(struct rcu_head *head,
b6a4ae76 3142 rcu_callback_t func)
495aa969 3143{
e534165b 3144 __call_rcu(head, func, rcu_state_p, -1, 1);
495aa969
ACB
3145}
3146EXPORT_SYMBOL_GPL(kfree_call_rcu);
3147
6d813391
PM
3148/*
3149 * Because a context switch is a grace period for RCU-sched and RCU-bh,
3150 * any blocking grace-period wait automatically implies a grace period
3151 * if there is only one CPU online at any point time during execution
3152 * of either synchronize_sched() or synchronize_rcu_bh(). It is OK to
3153 * occasionally incorrectly indicate that there are multiple CPUs online
3154 * when there was in fact only one the whole time, as this just adds
3155 * some overhead: RCU still operates correctly.
6d813391
PM
3156 */
3157static inline int rcu_blocking_is_gp(void)
3158{
95f0c1de
PM
3159 int ret;
3160
6d813391 3161 might_sleep(); /* Check for RCU read-side critical section. */
95f0c1de
PM
3162 preempt_disable();
3163 ret = num_online_cpus() <= 1;
3164 preempt_enable();
3165 return ret;
6d813391
PM
3166}
3167
6ebb237b
PM
3168/**
3169 * synchronize_sched - wait until an rcu-sched grace period has elapsed.
3170 *
3171 * Control will return to the caller some time after a full rcu-sched
3172 * grace period has elapsed, in other words after all currently executing
3173 * rcu-sched read-side critical sections have completed. These read-side
3174 * critical sections are delimited by rcu_read_lock_sched() and
3175 * rcu_read_unlock_sched(), and may be nested. Note that preempt_disable(),
3176 * local_irq_disable(), and so on may be used in place of
3177 * rcu_read_lock_sched().
3178 *
3179 * This means that all preempt_disable code sequences, including NMI and
f0a0e6f2
PM
3180 * non-threaded hardware-interrupt handlers, in progress on entry will
3181 * have completed before this primitive returns. However, this does not
3182 * guarantee that softirq handlers will have completed, since in some
3183 * kernels, these handlers can run in process context, and can block.
3184 *
3185 * Note that this guarantee implies further memory-ordering guarantees.
3186 * On systems with more than one CPU, when synchronize_sched() returns,
3187 * each CPU is guaranteed to have executed a full memory barrier since the
3188 * end of its last RCU-sched read-side critical section whose beginning
3189 * preceded the call to synchronize_sched(). In addition, each CPU having
3190 * an RCU read-side critical section that extends beyond the return from
3191 * synchronize_sched() is guaranteed to have executed a full memory barrier
3192 * after the beginning of synchronize_sched() and before the beginning of
3193 * that RCU read-side critical section. Note that these guarantees include
3194 * CPUs that are offline, idle, or executing in user mode, as well as CPUs
3195 * that are executing in the kernel.
3196 *
3197 * Furthermore, if CPU A invoked synchronize_sched(), which returned
3198 * to its caller on CPU B, then both CPU A and CPU B are guaranteed
3199 * to have executed a full memory barrier during the execution of
3200 * synchronize_sched() -- even if CPU A and CPU B are the same CPU (but
3201 * again only if the system has more than one CPU).
6ebb237b
PM
3202 *
3203 * This primitive provides the guarantees made by the (now removed)
3204 * synchronize_kernel() API. In contrast, synchronize_rcu() only
3205 * guarantees that rcu_read_lock() sections will have completed.
3206 * In "classic RCU", these two guarantees happen to be one and
3207 * the same, but can differ in realtime RCU implementations.
3208 */
3209void synchronize_sched(void)
3210{
f78f5b90
PM
3211 RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) ||
3212 lock_is_held(&rcu_lock_map) ||
3213 lock_is_held(&rcu_sched_lock_map),
3214 "Illegal synchronize_sched() in RCU-sched read-side critical section");
6ebb237b
PM
3215 if (rcu_blocking_is_gp())
3216 return;
5afff48b 3217 if (rcu_gp_is_expedited())
3705b88d
AM
3218 synchronize_sched_expedited();
3219 else
3220 wait_rcu_gp(call_rcu_sched);
6ebb237b
PM
3221}
3222EXPORT_SYMBOL_GPL(synchronize_sched);
3223
3224/**
3225 * synchronize_rcu_bh - wait until an rcu_bh grace period has elapsed.
3226 *
3227 * Control will return to the caller some time after a full rcu_bh grace
3228 * period has elapsed, in other words after all currently executing rcu_bh
3229 * read-side critical sections have completed. RCU read-side critical
3230 * sections are delimited by rcu_read_lock_bh() and rcu_read_unlock_bh(),
3231 * and may be nested.
f0a0e6f2
PM
3232 *
3233 * See the description of synchronize_sched() for more detailed information
3234 * on memory ordering guarantees.
6ebb237b
PM
3235 */
3236void synchronize_rcu_bh(void)
3237{
f78f5b90
PM
3238 RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) ||
3239 lock_is_held(&rcu_lock_map) ||
3240 lock_is_held(&rcu_sched_lock_map),
3241 "Illegal synchronize_rcu_bh() in RCU-bh read-side critical section");
6ebb237b
PM
3242 if (rcu_blocking_is_gp())
3243 return;
5afff48b 3244 if (rcu_gp_is_expedited())
3705b88d
AM
3245 synchronize_rcu_bh_expedited();
3246 else
3247 wait_rcu_gp(call_rcu_bh);
6ebb237b
PM
3248}
3249EXPORT_SYMBOL_GPL(synchronize_rcu_bh);
3250
765a3f4f
PM
3251/**
3252 * get_state_synchronize_rcu - Snapshot current RCU state
3253 *
3254 * Returns a cookie that is used by a later call to cond_synchronize_rcu()
3255 * to determine whether or not a full grace period has elapsed in the
3256 * meantime.
3257 */
3258unsigned long get_state_synchronize_rcu(void)
3259{
3260 /*
3261 * Any prior manipulation of RCU-protected data must happen
3262 * before the load from ->gpnum.
3263 */
3264 smp_mb(); /* ^^^ */
3265
3266 /*
3267 * Make sure this load happens before the purportedly
3268 * time-consuming work between get_state_synchronize_rcu()
3269 * and cond_synchronize_rcu().
3270 */
e534165b 3271 return smp_load_acquire(&rcu_state_p->gpnum);
765a3f4f
PM
3272}
3273EXPORT_SYMBOL_GPL(get_state_synchronize_rcu);
3274
3275/**
3276 * cond_synchronize_rcu - Conditionally wait for an RCU grace period
3277 *
3278 * @oldstate: return value from earlier call to get_state_synchronize_rcu()
3279 *
3280 * If a full RCU grace period has elapsed since the earlier call to
3281 * get_state_synchronize_rcu(), just return. Otherwise, invoke
3282 * synchronize_rcu() to wait for a full grace period.
3283 *
3284 * Yes, this function does not take counter wrap into account. But
3285 * counter wrap is harmless. If the counter wraps, we have waited for
3286 * more than 2 billion grace periods (and way more on a 64-bit system!),
3287 * so waiting for one additional grace period should be just fine.
3288 */
3289void cond_synchronize_rcu(unsigned long oldstate)
3290{
3291 unsigned long newstate;
3292
3293 /*
3294 * Ensure that this load happens before any RCU-destructive
3295 * actions the caller might carry out after we return.
3296 */
e534165b 3297 newstate = smp_load_acquire(&rcu_state_p->completed);
765a3f4f
PM
3298 if (ULONG_CMP_GE(oldstate, newstate))
3299 synchronize_rcu();
3300}
3301EXPORT_SYMBOL_GPL(cond_synchronize_rcu);
3302
24560056
PM
3303/**
3304 * get_state_synchronize_sched - Snapshot current RCU-sched state
3305 *
3306 * Returns a cookie that is used by a later call to cond_synchronize_sched()
3307 * to determine whether or not a full grace period has elapsed in the
3308 * meantime.
3309 */
3310unsigned long get_state_synchronize_sched(void)
3311{
3312 /*
3313 * Any prior manipulation of RCU-protected data must happen
3314 * before the load from ->gpnum.
3315 */
3316 smp_mb(); /* ^^^ */
3317
3318 /*
3319 * Make sure this load happens before the purportedly
3320 * time-consuming work between get_state_synchronize_sched()
3321 * and cond_synchronize_sched().
3322 */
3323 return smp_load_acquire(&rcu_sched_state.gpnum);
3324}
3325EXPORT_SYMBOL_GPL(get_state_synchronize_sched);
3326
3327/**
3328 * cond_synchronize_sched - Conditionally wait for an RCU-sched grace period
3329 *
3330 * @oldstate: return value from earlier call to get_state_synchronize_sched()
3331 *
3332 * If a full RCU-sched grace period has elapsed since the earlier call to
3333 * get_state_synchronize_sched(), just return. Otherwise, invoke
3334 * synchronize_sched() to wait for a full grace period.
3335 *
3336 * Yes, this function does not take counter wrap into account. But
3337 * counter wrap is harmless. If the counter wraps, we have waited for
3338 * more than 2 billion grace periods (and way more on a 64-bit system!),
3339 * so waiting for one additional grace period should be just fine.
3340 */
3341void cond_synchronize_sched(unsigned long oldstate)
3342{
3343 unsigned long newstate;
3344
3345 /*
3346 * Ensure that this load happens before any RCU-destructive
3347 * actions the caller might carry out after we return.
3348 */
3349 newstate = smp_load_acquire(&rcu_sched_state.completed);
3350 if (ULONG_CMP_GE(oldstate, newstate))
3351 synchronize_sched();
3352}
3353EXPORT_SYMBOL_GPL(cond_synchronize_sched);
3354
28f00767
PM
3355/* Adjust sequence number for start of update-side operation. */
3356static void rcu_seq_start(unsigned long *sp)
3357{
3358 WRITE_ONCE(*sp, *sp + 1);
3359 smp_mb(); /* Ensure update-side operation after counter increment. */
3360 WARN_ON_ONCE(!(*sp & 0x1));
3361}
3362
3363/* Adjust sequence number for end of update-side operation. */
3364static void rcu_seq_end(unsigned long *sp)
3365{
3366 smp_mb(); /* Ensure update-side operation before counter increment. */
3367 WRITE_ONCE(*sp, *sp + 1);
3368 WARN_ON_ONCE(*sp & 0x1);
3369}
3370
3371/* Take a snapshot of the update side's sequence number. */
3372static unsigned long rcu_seq_snap(unsigned long *sp)
3373{
3374 unsigned long s;
3375
28f00767
PM
3376 s = (READ_ONCE(*sp) + 3) & ~0x1;
3377 smp_mb(); /* Above access must not bleed into critical section. */
3378 return s;
3379}
3380
3381/*
3382 * Given a snapshot from rcu_seq_snap(), determine whether or not a
3383 * full update-side operation has occurred.
3384 */
3385static bool rcu_seq_done(unsigned long *sp, unsigned long s)
3386{
3387 return ULONG_CMP_GE(READ_ONCE(*sp), s);
3388}
3389
3390/* Wrapper functions for expedited grace periods. */
3391static void rcu_exp_gp_seq_start(struct rcu_state *rsp)
3392{
3393 rcu_seq_start(&rsp->expedited_sequence);
3394}
3395static void rcu_exp_gp_seq_end(struct rcu_state *rsp)
3396{
3397 rcu_seq_end(&rsp->expedited_sequence);
704dd435 3398 smp_mb(); /* Ensure that consecutive grace periods serialize. */
28f00767
PM
3399}
3400static unsigned long rcu_exp_gp_seq_snap(struct rcu_state *rsp)
3401{
886ef5a1 3402 smp_mb(); /* Caller's modifications seen first by other CPUs. */
28f00767
PM
3403 return rcu_seq_snap(&rsp->expedited_sequence);
3404}
3405static bool rcu_exp_gp_seq_done(struct rcu_state *rsp, unsigned long s)
3406{
3407 return rcu_seq_done(&rsp->expedited_sequence, s);
3408}
3409
b9585e94
PM
3410/*
3411 * Reset the ->expmaskinit values in the rcu_node tree to reflect any
3412 * recent CPU-online activity. Note that these masks are not cleared
3413 * when CPUs go offline, so they reflect the union of all CPUs that have
3414 * ever been online. This means that this function normally takes its
3415 * no-work-to-do fastpath.
3416 */
3417static void sync_exp_reset_tree_hotplug(struct rcu_state *rsp)
3418{
3419 bool done;
3420 unsigned long flags;
3421 unsigned long mask;
3422 unsigned long oldmask;
3423 int ncpus = READ_ONCE(rsp->ncpus);
3424 struct rcu_node *rnp;
3425 struct rcu_node *rnp_up;
3426
3427 /* If no new CPUs onlined since last time, nothing to do. */
3428 if (likely(ncpus == rsp->ncpus_snap))
3429 return;
3430 rsp->ncpus_snap = ncpus;
3431
3432 /*
3433 * Each pass through the following loop propagates newly onlined
3434 * CPUs for the current rcu_node structure up the rcu_node tree.
3435 */
3436 rcu_for_each_leaf_node(rsp, rnp) {
2a67e741 3437 raw_spin_lock_irqsave_rcu_node(rnp, flags);
b9585e94
PM
3438 if (rnp->expmaskinit == rnp->expmaskinitnext) {
3439 raw_spin_unlock_irqrestore(&rnp->lock, flags);
3440 continue; /* No new CPUs, nothing to do. */
3441 }
3442
3443 /* Update this node's mask, track old value for propagation. */
3444 oldmask = rnp->expmaskinit;
3445 rnp->expmaskinit = rnp->expmaskinitnext;
3446 raw_spin_unlock_irqrestore(&rnp->lock, flags);
3447
3448 /* If was already nonzero, nothing to propagate. */
3449 if (oldmask)
3450 continue;
3451
3452 /* Propagate the new CPU up the tree. */
3453 mask = rnp->grpmask;
3454 rnp_up = rnp->parent;
3455 done = false;
3456 while (rnp_up) {
2a67e741 3457 raw_spin_lock_irqsave_rcu_node(rnp_up, flags);
b9585e94
PM
3458 if (rnp_up->expmaskinit)
3459 done = true;
3460 rnp_up->expmaskinit |= mask;
3461 raw_spin_unlock_irqrestore(&rnp_up->lock, flags);
3462 if (done)
3463 break;
3464 mask = rnp_up->grpmask;
3465 rnp_up = rnp_up->parent;
3466 }
3467 }
3468}
3469
3470/*
3471 * Reset the ->expmask values in the rcu_node tree in preparation for
3472 * a new expedited grace period.
3473 */
3474static void __maybe_unused sync_exp_reset_tree(struct rcu_state *rsp)
3475{
3476 unsigned long flags;
3477 struct rcu_node *rnp;
3478
3479 sync_exp_reset_tree_hotplug(rsp);
3480 rcu_for_each_node_breadth_first(rsp, rnp) {
2a67e741 3481 raw_spin_lock_irqsave_rcu_node(rnp, flags);
b9585e94
PM
3482 WARN_ON_ONCE(rnp->expmask);
3483 rnp->expmask = rnp->expmaskinit;
3484 raw_spin_unlock_irqrestore(&rnp->lock, flags);
3485 }
3486}
3487
7922cd0e 3488/*
8203d6d0 3489 * Return non-zero if there is no RCU expedited grace period in progress
7922cd0e
PM
3490 * for the specified rcu_node structure, in other words, if all CPUs and
3491 * tasks covered by the specified rcu_node structure have done their bit
3492 * for the current expedited grace period. Works only for preemptible
3493 * RCU -- other RCU implementation use other means.
3494 *
3495 * Caller must hold the root rcu_node's exp_funnel_mutex.
3496 */
3497static int sync_rcu_preempt_exp_done(struct rcu_node *rnp)
3498{
8203d6d0 3499 return rnp->exp_tasks == NULL &&
7922cd0e
PM
3500 READ_ONCE(rnp->expmask) == 0;
3501}
3502
3503/*
3504 * Report the exit from RCU read-side critical section for the last task
3505 * that queued itself during or before the current expedited preemptible-RCU
3506 * grace period. This event is reported either to the rcu_node structure on
3507 * which the task was queued or to one of that rcu_node structure's ancestors,
3508 * recursively up the tree. (Calm down, calm down, we do the recursion
3509 * iteratively!)
3510 *
8203d6d0
PM
3511 * Caller must hold the root rcu_node's exp_funnel_mutex and the
3512 * specified rcu_node structure's ->lock.
7922cd0e 3513 */
8203d6d0
PM
3514static void __rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp,
3515 bool wake, unsigned long flags)
3516 __releases(rnp->lock)
7922cd0e 3517{
7922cd0e
PM
3518 unsigned long mask;
3519
7922cd0e
PM
3520 for (;;) {
3521 if (!sync_rcu_preempt_exp_done(rnp)) {
8203d6d0
PM
3522 if (!rnp->expmask)
3523 rcu_initiate_boost(rnp, flags);
3524 else
3525 raw_spin_unlock_irqrestore(&rnp->lock, flags);
7922cd0e
PM
3526 break;
3527 }
3528 if (rnp->parent == NULL) {
3529 raw_spin_unlock_irqrestore(&rnp->lock, flags);
3530 if (wake) {
3531 smp_mb(); /* EGP done before wake_up(). */
3532 wake_up(&rsp->expedited_wq);
3533 }
3534 break;
3535 }
3536 mask = rnp->grpmask;
3537 raw_spin_unlock(&rnp->lock); /* irqs remain disabled */
3538 rnp = rnp->parent;
2a67e741 3539 raw_spin_lock_rcu_node(rnp); /* irqs already disabled */
8203d6d0 3540 WARN_ON_ONCE(!(rnp->expmask & mask));
7922cd0e
PM
3541 rnp->expmask &= ~mask;
3542 }
3543}
3544
8203d6d0
PM
3545/*
3546 * Report expedited quiescent state for specified node. This is a
3547 * lock-acquisition wrapper function for __rcu_report_exp_rnp().
3548 *
3549 * Caller must hold the root rcu_node's exp_funnel_mutex.
3550 */
3551static void __maybe_unused rcu_report_exp_rnp(struct rcu_state *rsp,
3552 struct rcu_node *rnp, bool wake)
3553{
3554 unsigned long flags;
3555
2a67e741 3556 raw_spin_lock_irqsave_rcu_node(rnp, flags);
8203d6d0
PM
3557 __rcu_report_exp_rnp(rsp, rnp, wake, flags);
3558}
3559
3560/*
3561 * Report expedited quiescent state for multiple CPUs, all covered by the
3562 * specified leaf rcu_node structure. Caller must hold the root
3563 * rcu_node's exp_funnel_mutex.
3564 */
3565static void rcu_report_exp_cpu_mult(struct rcu_state *rsp, struct rcu_node *rnp,
3566 unsigned long mask, bool wake)
3567{
3568 unsigned long flags;
3569
2a67e741 3570 raw_spin_lock_irqsave_rcu_node(rnp, flags);
338b0f76
PM
3571 if (!(rnp->expmask & mask)) {
3572 raw_spin_unlock_irqrestore(&rnp->lock, flags);
3573 return;
3574 }
8203d6d0
PM
3575 rnp->expmask &= ~mask;
3576 __rcu_report_exp_rnp(rsp, rnp, wake, flags); /* Releases rnp->lock. */
3577}
3578
3579/*
3580 * Report expedited quiescent state for specified rcu_data (CPU).
3581 * Caller must hold the root rcu_node's exp_funnel_mutex.
3582 */
6587a23b
PM
3583static void rcu_report_exp_rdp(struct rcu_state *rsp, struct rcu_data *rdp,
3584 bool wake)
8203d6d0
PM
3585{
3586 rcu_report_exp_cpu_mult(rsp, rdp->mynode, rdp->grpmask, wake);
3587}
3588
29fd9309
PM
3589/* Common code for synchronize_{rcu,sched}_expedited() work-done checking. */
3590static bool sync_exp_work_done(struct rcu_state *rsp, struct rcu_node *rnp,
2cd6ffaf 3591 struct rcu_data *rdp,
29fd9309 3592 atomic_long_t *stat, unsigned long s)
3d3b7db0 3593{
28f00767 3594 if (rcu_exp_gp_seq_done(rsp, s)) {
385b73c0
PM
3595 if (rnp)
3596 mutex_unlock(&rnp->exp_funnel_mutex);
2cd6ffaf
PM
3597 else if (rdp)
3598 mutex_unlock(&rdp->exp_funnel_mutex);
385b73c0
PM
3599 /* Ensure test happens before caller kfree(). */
3600 smp_mb__before_atomic(); /* ^^^ */
3601 atomic_long_inc(stat);
385b73c0
PM
3602 return true;
3603 }
3604 return false;
3605}
3606
b09e5f86
PM
3607/*
3608 * Funnel-lock acquisition for expedited grace periods. Returns a
3609 * pointer to the root rcu_node structure, or NULL if some other
3610 * task did the expedited grace period for us.
3611 */
3612static struct rcu_node *exp_funnel_lock(struct rcu_state *rsp, unsigned long s)
3613{
df5bd514 3614 struct rcu_data *rdp = per_cpu_ptr(rsp->rda, raw_smp_processor_id());
b09e5f86
PM
3615 struct rcu_node *rnp0;
3616 struct rcu_node *rnp1 = NULL;
3617
3d3b7db0 3618 /*
cdacbe1f
PM
3619 * First try directly acquiring the root lock in order to reduce
3620 * latency in the common case where expedited grace periods are
3621 * rare. We check mutex_is_locked() to avoid pathological levels of
3622 * memory contention on ->exp_funnel_mutex in the heavy-load case.
3d3b7db0 3623 */
cdacbe1f
PM
3624 rnp0 = rcu_get_root(rsp);
3625 if (!mutex_is_locked(&rnp0->exp_funnel_mutex)) {
3626 if (mutex_trylock(&rnp0->exp_funnel_mutex)) {
3627 if (sync_exp_work_done(rsp, rnp0, NULL,
df5bd514 3628 &rdp->expedited_workdone0, s))
cdacbe1f
PM
3629 return NULL;
3630 return rnp0;
3631 }
3632 }
3633
b09e5f86
PM
3634 /*
3635 * Each pass through the following loop works its way
3636 * up the rcu_node tree, returning if others have done the
3637 * work or otherwise falls through holding the root rnp's
3638 * ->exp_funnel_mutex. The mapping from CPU to rcu_node structure
3639 * can be inexact, as it is just promoting locality and is not
3640 * strictly needed for correctness.
3641 */
df5bd514 3642 if (sync_exp_work_done(rsp, NULL, NULL, &rdp->expedited_workdone1, s))
2cd6ffaf
PM
3643 return NULL;
3644 mutex_lock(&rdp->exp_funnel_mutex);
3645 rnp0 = rdp->mynode;
b09e5f86 3646 for (; rnp0 != NULL; rnp0 = rnp0->parent) {
2cd6ffaf 3647 if (sync_exp_work_done(rsp, rnp1, rdp,
df5bd514 3648 &rdp->expedited_workdone2, s))
b09e5f86
PM
3649 return NULL;
3650 mutex_lock(&rnp0->exp_funnel_mutex);
3651 if (rnp1)
3652 mutex_unlock(&rnp1->exp_funnel_mutex);
2cd6ffaf
PM
3653 else
3654 mutex_unlock(&rdp->exp_funnel_mutex);
b09e5f86
PM
3655 rnp1 = rnp0;
3656 }
2cd6ffaf 3657 if (sync_exp_work_done(rsp, rnp1, rdp,
df5bd514 3658 &rdp->expedited_workdone3, s))
b09e5f86
PM
3659 return NULL;
3660 return rnp1;
3661}
3662
cf3620a6 3663/* Invoked on each online non-idle CPU for expedited quiescent state. */
338b0f76 3664static void sync_sched_exp_handler(void *data)
b09e5f86 3665{
338b0f76
PM
3666 struct rcu_data *rdp;
3667 struct rcu_node *rnp;
3668 struct rcu_state *rsp = data;
b09e5f86 3669
338b0f76
PM
3670 rdp = this_cpu_ptr(rsp->rda);
3671 rnp = rdp->mynode;
3672 if (!(READ_ONCE(rnp->expmask) & rdp->grpmask) ||
3673 __this_cpu_read(rcu_sched_data.cpu_no_qs.b.exp))
3674 return;
6587a23b
PM
3675 __this_cpu_write(rcu_sched_data.cpu_no_qs.b.exp, true);
3676 resched_cpu(smp_processor_id());
3d3b7db0
PM
3677}
3678
338b0f76
PM
3679/* Send IPI for expedited cleanup if needed at end of CPU-hotplug operation. */
3680static void sync_sched_exp_online_cleanup(int cpu)
3681{
3682 struct rcu_data *rdp;
3683 int ret;
3684 struct rcu_node *rnp;
3685 struct rcu_state *rsp = &rcu_sched_state;
3686
3687 rdp = per_cpu_ptr(rsp->rda, cpu);
3688 rnp = rdp->mynode;
3689 if (!(READ_ONCE(rnp->expmask) & rdp->grpmask))
3690 return;
3691 ret = smp_call_function_single(cpu, sync_sched_exp_handler, rsp, 0);
3692 WARN_ON_ONCE(ret);
3693}
3694
bce5fa12
PM
3695/*
3696 * Select the nodes that the upcoming expedited grace period needs
3697 * to wait for.
3698 */
dcdb8807
PM
3699static void sync_rcu_exp_select_cpus(struct rcu_state *rsp,
3700 smp_call_func_t func)
bce5fa12
PM
3701{
3702 int cpu;
3703 unsigned long flags;
3704 unsigned long mask;
3705 unsigned long mask_ofl_test;
3706 unsigned long mask_ofl_ipi;
6587a23b 3707 int ret;
bce5fa12
PM
3708 struct rcu_node *rnp;
3709
3710 sync_exp_reset_tree(rsp);
3711 rcu_for_each_leaf_node(rsp, rnp) {
2a67e741 3712 raw_spin_lock_irqsave_rcu_node(rnp, flags);
bce5fa12
PM
3713
3714 /* Each pass checks a CPU for identity, offline, and idle. */
3715 mask_ofl_test = 0;
3716 for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++) {
3717 struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
3718 struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);
3719
3720 if (raw_smp_processor_id() == cpu ||
bce5fa12
PM
3721 !(atomic_add_return(0, &rdtp->dynticks) & 0x1))
3722 mask_ofl_test |= rdp->grpmask;
3723 }
3724 mask_ofl_ipi = rnp->expmask & ~mask_ofl_test;
3725
3726 /*
3727 * Need to wait for any blocked tasks as well. Note that
3728 * additional blocking tasks will also block the expedited
3729 * GP until such time as the ->expmask bits are cleared.
3730 */
3731 if (rcu_preempt_has_tasks(rnp))
3732 rnp->exp_tasks = rnp->blkd_tasks.next;
3733 raw_spin_unlock_irqrestore(&rnp->lock, flags);
3734
3735 /* IPI the remaining CPUs for expedited quiescent state. */
3736 mask = 1;
3737 for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++, mask <<= 1) {
3738 if (!(mask_ofl_ipi & mask))
3739 continue;
338b0f76 3740retry_ipi:
dcdb8807 3741 ret = smp_call_function_single(cpu, func, rsp, 0);
338b0f76 3742 if (!ret) {
6587a23b 3743 mask_ofl_ipi &= ~mask;
1307f214
PM
3744 continue;
3745 }
3746 /* Failed, raced with offline. */
3747 raw_spin_lock_irqsave_rcu_node(rnp, flags);
3748 if (cpu_online(cpu) &&
3749 (rnp->expmask & mask)) {
338b0f76 3750 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1307f214
PM
3751 schedule_timeout_uninterruptible(1);
3752 if (cpu_online(cpu) &&
3753 (rnp->expmask & mask))
3754 goto retry_ipi;
3755 raw_spin_lock_irqsave_rcu_node(rnp, flags);
338b0f76 3756 }
1307f214
PM
3757 if (!(rnp->expmask & mask))
3758 mask_ofl_ipi &= ~mask;
3759 raw_spin_unlock_irqrestore(&rnp->lock, flags);
bce5fa12
PM
3760 }
3761 /* Report quiescent states for those that went offline. */
3762 mask_ofl_test |= mask_ofl_ipi;
3763 if (mask_ofl_test)
3764 rcu_report_exp_cpu_mult(rsp, rnp, mask_ofl_test, false);
3765 }
3d3b7db0
PM
3766}
3767
cf3620a6
PM
3768static void synchronize_sched_expedited_wait(struct rcu_state *rsp)
3769{
3770 int cpu;
3771 unsigned long jiffies_stall;
3772 unsigned long jiffies_start;
bce5fa12 3773 unsigned long mask;
72611ab9 3774 int ndetected;
bce5fa12
PM
3775 struct rcu_node *rnp;
3776 struct rcu_node *rnp_root = rcu_get_root(rsp);
cf3620a6
PM
3777 int ret;
3778
3779 jiffies_stall = rcu_jiffies_till_stall_check();
3780 jiffies_start = jiffies;
3781
3782 for (;;) {
3783 ret = wait_event_interruptible_timeout(
3784 rsp->expedited_wq,
bce5fa12 3785 sync_rcu_preempt_exp_done(rnp_root),
cf3620a6 3786 jiffies_stall);
73f36f9d 3787 if (ret > 0 || sync_rcu_preempt_exp_done(rnp_root))
cf3620a6
PM
3788 return;
3789 if (ret < 0) {
3790 /* Hit a signal, disable CPU stall warnings. */
3791 wait_event(rsp->expedited_wq,
bce5fa12 3792 sync_rcu_preempt_exp_done(rnp_root));
cf3620a6
PM
3793 return;
3794 }
c5865638 3795 pr_err("INFO: %s detected expedited stalls on CPUs/tasks: {",
cf3620a6 3796 rsp->name);
72611ab9 3797 ndetected = 0;
bce5fa12 3798 rcu_for_each_leaf_node(rsp, rnp) {
72611ab9 3799 ndetected = rcu_print_task_exp_stall(rnp);
bce5fa12
PM
3800 mask = 1;
3801 for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++, mask <<= 1) {
74611ecb
PM
3802 struct rcu_data *rdp;
3803
bce5fa12
PM
3804 if (!(rnp->expmask & mask))
3805 continue;
72611ab9 3806 ndetected++;
74611ecb
PM
3807 rdp = per_cpu_ptr(rsp->rda, cpu);
3808 pr_cont(" %d-%c%c%c", cpu,
3809 "O."[cpu_online(cpu)],
3810 "o."[!!(rdp->grpmask & rnp->expmaskinit)],
3811 "N."[!!(rdp->grpmask & rnp->expmaskinitnext)]);
bce5fa12
PM
3812 }
3813 mask <<= 1;
cf3620a6 3814 }
72611ab9
PM
3815 pr_cont(" } %lu jiffies s: %lu root: %#lx/%c\n",
3816 jiffies - jiffies_start, rsp->expedited_sequence,
3817 rnp_root->expmask, ".T"[!!rnp_root->exp_tasks]);
3818 if (!ndetected) {
3819 pr_err("blocking rcu_node structures:");
3820 rcu_for_each_node_breadth_first(rsp, rnp) {
3821 if (rnp == rnp_root)
3822 continue; /* printed unconditionally */
3823 if (sync_rcu_preempt_exp_done(rnp))
3824 continue;
3825 pr_cont(" l=%u:%d-%d:%#lx/%c",
3826 rnp->level, rnp->grplo, rnp->grphi,
3827 rnp->expmask,
3828 ".T"[!!rnp->exp_tasks]);
3829 }
3830 pr_cont("\n");
3831 }
bce5fa12
PM
3832 rcu_for_each_leaf_node(rsp, rnp) {
3833 mask = 1;
3834 for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++, mask <<= 1) {
3835 if (!(rnp->expmask & mask))
3836 continue;
3837 dump_cpu_task(cpu);
3838 }
cf3620a6
PM
3839 }
3840 jiffies_stall = 3 * rcu_jiffies_till_stall_check() + 3;
3841 }
3842}
3843
236fefaf
PM
3844/**
3845 * synchronize_sched_expedited - Brute-force RCU-sched grace period
3846 *
3847 * Wait for an RCU-sched grace period to elapse, but use a "big hammer"
3848 * approach to force the grace period to end quickly. This consumes
3849 * significant time on all CPUs and is unfriendly to real-time workloads,
3850 * so is thus not recommended for any sort of common-case code. In fact,
3851 * if you are using synchronize_sched_expedited() in a loop, please
3852 * restructure your code to batch your updates, and then use a single
3853 * synchronize_sched() instead.
3d3b7db0 3854 *
d6ada2cf
PM
3855 * This implementation can be thought of as an application of sequence
3856 * locking to expedited grace periods, but using the sequence counter to
3857 * determine when someone else has already done the work instead of for
385b73c0 3858 * retrying readers.
3d3b7db0
PM
3859 */
3860void synchronize_sched_expedited(void)
3861{
7fd0ddc5 3862 unsigned long s;
b09e5f86 3863 struct rcu_node *rnp;
40694d66 3864 struct rcu_state *rsp = &rcu_sched_state;
3d3b7db0 3865
06f60de1
PM
3866 /* If only one CPU, this is automatically a grace period. */
3867 if (rcu_blocking_is_gp())
3868 return;
3869
5a9be7c6
PM
3870 /* If expedited grace periods are prohibited, fall back to normal. */
3871 if (rcu_gp_is_normal()) {
3872 wait_rcu_gp(call_rcu_sched);
3873 return;
3874 }
3875
d6ada2cf 3876 /* Take a snapshot of the sequence number. */
28f00767 3877 s = rcu_exp_gp_seq_snap(rsp);
3d3b7db0 3878
b09e5f86 3879 rnp = exp_funnel_lock(rsp, s);
807226e2 3880 if (rnp == NULL)
b09e5f86 3881 return; /* Someone else did our work for us. */
e0775cef 3882
28f00767 3883 rcu_exp_gp_seq_start(rsp);
338b0f76 3884 sync_rcu_exp_select_cpus(rsp, sync_sched_exp_handler);
bce5fa12 3885 synchronize_sched_expedited_wait(rsp);
e0775cef 3886
28f00767 3887 rcu_exp_gp_seq_end(rsp);
b09e5f86 3888 mutex_unlock(&rnp->exp_funnel_mutex);
3d3b7db0
PM
3889}
3890EXPORT_SYMBOL_GPL(synchronize_sched_expedited);
3891
64db4cff
PM
3892/*
3893 * Check to see if there is any immediate RCU-related work to be done
3894 * by the current CPU, for the specified type of RCU, returning 1 if so.
3895 * The checks are in order of increasing expense: checks that can be
3896 * carried out against CPU-local state are performed first. However,
3897 * we must check for CPU stalls first, else we might not get a chance.
3898 */
3899static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp)
3900{
2f51f988
PM
3901 struct rcu_node *rnp = rdp->mynode;
3902
64db4cff
PM
3903 rdp->n_rcu_pending++;
3904
3905 /* Check for CPU stalls, if enabled. */
3906 check_cpu_stall(rsp, rdp);
3907
a096932f
PM
3908 /* Is this CPU a NO_HZ_FULL CPU that should ignore RCU? */
3909 if (rcu_nohz_full_cpu(rsp))
3910 return 0;
3911
64db4cff 3912 /* Is the RCU core waiting for a quiescent state from this CPU? */
5c51dd73 3913 if (rcu_scheduler_fully_active &&
5b74c458 3914 rdp->core_needs_qs && rdp->cpu_no_qs.b.norm &&
5cd37193 3915 rdp->rcu_qs_ctr_snap == __this_cpu_read(rcu_qs_ctr)) {
97c668b8
PM
3916 rdp->n_rp_core_needs_qs++;
3917 } else if (rdp->core_needs_qs &&
5b74c458 3918 (!rdp->cpu_no_qs.b.norm ||
5cd37193 3919 rdp->rcu_qs_ctr_snap != __this_cpu_read(rcu_qs_ctr))) {
d21670ac 3920 rdp->n_rp_report_qs++;
64db4cff 3921 return 1;
7ba5c840 3922 }
64db4cff
PM
3923
3924 /* Does this CPU have callbacks ready to invoke? */
7ba5c840
PM
3925 if (cpu_has_callbacks_ready_to_invoke(rdp)) {
3926 rdp->n_rp_cb_ready++;
64db4cff 3927 return 1;
7ba5c840 3928 }
64db4cff
PM
3929
3930 /* Has RCU gone idle with this CPU needing another grace period? */
7ba5c840
PM
3931 if (cpu_needs_another_gp(rsp, rdp)) {
3932 rdp->n_rp_cpu_needs_gp++;
64db4cff 3933 return 1;
7ba5c840 3934 }
64db4cff
PM
3935
3936 /* Has another RCU grace period completed? */
7d0ae808 3937 if (READ_ONCE(rnp->completed) != rdp->completed) { /* outside lock */
7ba5c840 3938 rdp->n_rp_gp_completed++;
64db4cff 3939 return 1;
7ba5c840 3940 }
64db4cff
PM
3941
3942 /* Has a new RCU grace period started? */
7d0ae808
PM
3943 if (READ_ONCE(rnp->gpnum) != rdp->gpnum ||
3944 unlikely(READ_ONCE(rdp->gpwrap))) { /* outside lock */
7ba5c840 3945 rdp->n_rp_gp_started++;
64db4cff 3946 return 1;
7ba5c840 3947 }
64db4cff 3948
96d3fd0d
PM
3949 /* Does this CPU need a deferred NOCB wakeup? */
3950 if (rcu_nocb_need_deferred_wakeup(rdp)) {
3951 rdp->n_rp_nocb_defer_wakeup++;
3952 return 1;
3953 }
3954
64db4cff 3955 /* nothing to do */
7ba5c840 3956 rdp->n_rp_need_nothing++;
64db4cff
PM
3957 return 0;
3958}
3959
3960/*
3961 * Check to see if there is any immediate RCU-related work to be done
3962 * by the current CPU, returning 1 if so. This function is part of the
3963 * RCU implementation; it is -not- an exported member of the RCU API.
3964 */
e3950ecd 3965static int rcu_pending(void)
64db4cff 3966{
6ce75a23
PM
3967 struct rcu_state *rsp;
3968
3969 for_each_rcu_flavor(rsp)
e3950ecd 3970 if (__rcu_pending(rsp, this_cpu_ptr(rsp->rda)))
6ce75a23
PM
3971 return 1;
3972 return 0;
64db4cff
PM
3973}
3974
3975/*
c0f4dfd4
PM
3976 * Return true if the specified CPU has any callback. If all_lazy is
3977 * non-NULL, store an indication of whether all callbacks are lazy.
3978 * (If there are no callbacks, all of them are deemed to be lazy.)
64db4cff 3979 */
82072c4f 3980static bool __maybe_unused rcu_cpu_has_callbacks(bool *all_lazy)
64db4cff 3981{
c0f4dfd4
PM
3982 bool al = true;
3983 bool hc = false;
3984 struct rcu_data *rdp;
6ce75a23
PM
3985 struct rcu_state *rsp;
3986
c0f4dfd4 3987 for_each_rcu_flavor(rsp) {
aa6da514 3988 rdp = this_cpu_ptr(rsp->rda);
69c8d28c
PM
3989 if (!rdp->nxtlist)
3990 continue;
3991 hc = true;
3992 if (rdp->qlen != rdp->qlen_lazy || !all_lazy) {
c0f4dfd4 3993 al = false;
69c8d28c
PM
3994 break;
3995 }
c0f4dfd4
PM
3996 }
3997 if (all_lazy)
3998 *all_lazy = al;
3999 return hc;
64db4cff
PM
4000}
4001
a83eff0a
PM
4002/*
4003 * Helper function for _rcu_barrier() tracing. If tracing is disabled,
4004 * the compiler is expected to optimize this away.
4005 */
e66c33d5 4006static void _rcu_barrier_trace(struct rcu_state *rsp, const char *s,
a83eff0a
PM
4007 int cpu, unsigned long done)
4008{
4009 trace_rcu_barrier(rsp->name, s, cpu,
4010 atomic_read(&rsp->barrier_cpu_count), done);
4011}
4012
b1420f1c
PM
4013/*
4014 * RCU callback function for _rcu_barrier(). If we are last, wake
4015 * up the task executing _rcu_barrier().
4016 */
24ebbca8 4017static void rcu_barrier_callback(struct rcu_head *rhp)
d0ec774c 4018{
24ebbca8
PM
4019 struct rcu_data *rdp = container_of(rhp, struct rcu_data, barrier_head);
4020 struct rcu_state *rsp = rdp->rsp;
4021
a83eff0a 4022 if (atomic_dec_and_test(&rsp->barrier_cpu_count)) {
4f525a52 4023 _rcu_barrier_trace(rsp, "LastCB", -1, rsp->barrier_sequence);
7db74df8 4024 complete(&rsp->barrier_completion);
a83eff0a 4025 } else {
4f525a52 4026 _rcu_barrier_trace(rsp, "CB", -1, rsp->barrier_sequence);
a83eff0a 4027 }
d0ec774c
PM
4028}
4029
4030/*
4031 * Called with preemption disabled, and from cross-cpu IRQ context.
4032 */
4033static void rcu_barrier_func(void *type)
4034{
037b64ed 4035 struct rcu_state *rsp = type;
fa07a58f 4036 struct rcu_data *rdp = raw_cpu_ptr(rsp->rda);
d0ec774c 4037
4f525a52 4038 _rcu_barrier_trace(rsp, "IRQ", -1, rsp->barrier_sequence);
24ebbca8 4039 atomic_inc(&rsp->barrier_cpu_count);
06668efa 4040 rsp->call(&rdp->barrier_head, rcu_barrier_callback);
d0ec774c
PM
4041}
4042
d0ec774c
PM
4043/*
4044 * Orchestrate the specified type of RCU barrier, waiting for all
4045 * RCU callbacks of the specified type to complete.
4046 */
037b64ed 4047static void _rcu_barrier(struct rcu_state *rsp)
d0ec774c 4048{
b1420f1c 4049 int cpu;
b1420f1c 4050 struct rcu_data *rdp;
4f525a52 4051 unsigned long s = rcu_seq_snap(&rsp->barrier_sequence);
b1420f1c 4052
4f525a52 4053 _rcu_barrier_trace(rsp, "Begin", -1, s);
b1420f1c 4054
e74f4c45 4055 /* Take mutex to serialize concurrent rcu_barrier() requests. */
7be7f0be 4056 mutex_lock(&rsp->barrier_mutex);
b1420f1c 4057
4f525a52
PM
4058 /* Did someone else do our work for us? */
4059 if (rcu_seq_done(&rsp->barrier_sequence, s)) {
4060 _rcu_barrier_trace(rsp, "EarlyExit", -1, rsp->barrier_sequence);
cf3a9c48
PM
4061 smp_mb(); /* caller's subsequent code after above check. */
4062 mutex_unlock(&rsp->barrier_mutex);
4063 return;
4064 }
4065
4f525a52
PM
4066 /* Mark the start of the barrier operation. */
4067 rcu_seq_start(&rsp->barrier_sequence);
4068 _rcu_barrier_trace(rsp, "Inc1", -1, rsp->barrier_sequence);
b1420f1c 4069
d0ec774c 4070 /*
b1420f1c
PM
4071 * Initialize the count to one rather than to zero in order to
4072 * avoid a too-soon return to zero in case of a short grace period
1331e7a1
PM
4073 * (or preemption of this task). Exclude CPU-hotplug operations
4074 * to ensure that no offline CPU has callbacks queued.
d0ec774c 4075 */
7db74df8 4076 init_completion(&rsp->barrier_completion);
24ebbca8 4077 atomic_set(&rsp->barrier_cpu_count, 1);
1331e7a1 4078 get_online_cpus();
b1420f1c
PM
4079
4080 /*
1331e7a1
PM
4081 * Force each CPU with callbacks to register a new callback.
4082 * When that callback is invoked, we will know that all of the
4083 * corresponding CPU's preceding callbacks have been invoked.
b1420f1c 4084 */
3fbfbf7a 4085 for_each_possible_cpu(cpu) {
d1e43fa5 4086 if (!cpu_online(cpu) && !rcu_is_nocb_cpu(cpu))
3fbfbf7a 4087 continue;
b1420f1c 4088 rdp = per_cpu_ptr(rsp->rda, cpu);
d1e43fa5 4089 if (rcu_is_nocb_cpu(cpu)) {
d7e29933
PM
4090 if (!rcu_nocb_cpu_needs_barrier(rsp, cpu)) {
4091 _rcu_barrier_trace(rsp, "OfflineNoCB", cpu,
4f525a52 4092 rsp->barrier_sequence);
d7e29933
PM
4093 } else {
4094 _rcu_barrier_trace(rsp, "OnlineNoCB", cpu,
4f525a52 4095 rsp->barrier_sequence);
41050a00 4096 smp_mb__before_atomic();
d7e29933
PM
4097 atomic_inc(&rsp->barrier_cpu_count);
4098 __call_rcu(&rdp->barrier_head,
4099 rcu_barrier_callback, rsp, cpu, 0);
4100 }
7d0ae808 4101 } else if (READ_ONCE(rdp->qlen)) {
a83eff0a 4102 _rcu_barrier_trace(rsp, "OnlineQ", cpu,
4f525a52 4103 rsp->barrier_sequence);
037b64ed 4104 smp_call_function_single(cpu, rcu_barrier_func, rsp, 1);
b1420f1c 4105 } else {
a83eff0a 4106 _rcu_barrier_trace(rsp, "OnlineNQ", cpu,
4f525a52 4107 rsp->barrier_sequence);
b1420f1c
PM
4108 }
4109 }
1331e7a1 4110 put_online_cpus();
b1420f1c
PM
4111
4112 /*
4113 * Now that we have an rcu_barrier_callback() callback on each
4114 * CPU, and thus each counted, remove the initial count.
4115 */
24ebbca8 4116 if (atomic_dec_and_test(&rsp->barrier_cpu_count))
7db74df8 4117 complete(&rsp->barrier_completion);
b1420f1c
PM
4118
4119 /* Wait for all rcu_barrier_callback() callbacks to be invoked. */
7db74df8 4120 wait_for_completion(&rsp->barrier_completion);
b1420f1c 4121
4f525a52
PM
4122 /* Mark the end of the barrier operation. */
4123 _rcu_barrier_trace(rsp, "Inc2", -1, rsp->barrier_sequence);
4124 rcu_seq_end(&rsp->barrier_sequence);
4125
b1420f1c 4126 /* Other rcu_barrier() invocations can now safely proceed. */
7be7f0be 4127 mutex_unlock(&rsp->barrier_mutex);
d0ec774c 4128}
d0ec774c
PM
4129
4130/**
4131 * rcu_barrier_bh - Wait until all in-flight call_rcu_bh() callbacks complete.
4132 */
4133void rcu_barrier_bh(void)
4134{
037b64ed 4135 _rcu_barrier(&rcu_bh_state);
d0ec774c
PM
4136}
4137EXPORT_SYMBOL_GPL(rcu_barrier_bh);
4138
4139/**
4140 * rcu_barrier_sched - Wait for in-flight call_rcu_sched() callbacks.
4141 */
4142void rcu_barrier_sched(void)
4143{
037b64ed 4144 _rcu_barrier(&rcu_sched_state);
d0ec774c
PM
4145}
4146EXPORT_SYMBOL_GPL(rcu_barrier_sched);
4147
0aa04b05
PM
4148/*
4149 * Propagate ->qsinitmask bits up the rcu_node tree to account for the
4150 * first CPU in a given leaf rcu_node structure coming online. The caller
4151 * must hold the corresponding leaf rcu_node ->lock with interrrupts
4152 * disabled.
4153 */
4154static void rcu_init_new_rnp(struct rcu_node *rnp_leaf)
4155{
4156 long mask;
4157 struct rcu_node *rnp = rnp_leaf;
4158
4159 for (;;) {
4160 mask = rnp->grpmask;
4161 rnp = rnp->parent;
4162 if (rnp == NULL)
4163 return;
6cf10081 4164 raw_spin_lock_rcu_node(rnp); /* Interrupts already disabled. */
0aa04b05
PM
4165 rnp->qsmaskinit |= mask;
4166 raw_spin_unlock(&rnp->lock); /* Interrupts remain disabled. */
4167 }
4168}
4169
64db4cff 4170/*
27569620 4171 * Do boot-time initialization of a CPU's per-CPU RCU data.
64db4cff 4172 */
27569620
PM
4173static void __init
4174rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp)
64db4cff
PM
4175{
4176 unsigned long flags;
394f99a9 4177 struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
27569620
PM
4178 struct rcu_node *rnp = rcu_get_root(rsp);
4179
4180 /* Set up local state, ensuring consistent view of global state. */
6cf10081 4181 raw_spin_lock_irqsave_rcu_node(rnp, flags);
27569620 4182 rdp->grpmask = 1UL << (cpu - rdp->mynode->grplo);
27569620 4183 rdp->dynticks = &per_cpu(rcu_dynticks, cpu);
29e37d81 4184 WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != DYNTICK_TASK_EXIT_IDLE);
9b2e4f18 4185 WARN_ON_ONCE(atomic_read(&rdp->dynticks->dynticks) != 1);
27569620 4186 rdp->cpu = cpu;
d4c08f2a 4187 rdp->rsp = rsp;
2cd6ffaf 4188 mutex_init(&rdp->exp_funnel_mutex);
3fbfbf7a 4189 rcu_boot_init_nocb_percpu_data(rdp);
1304afb2 4190 raw_spin_unlock_irqrestore(&rnp->lock, flags);
27569620
PM
4191}
4192
4193/*
4194 * Initialize a CPU's per-CPU RCU data. Note that only one online or
4195 * offline event can be happening at a given time. Note also that we
4196 * can accept some slop in the rsp->completed access due to the fact
4197 * that this CPU cannot possibly have any RCU callbacks in flight yet.
64db4cff 4198 */
49fb4c62 4199static void
9b67122a 4200rcu_init_percpu_data(int cpu, struct rcu_state *rsp)
64db4cff
PM
4201{
4202 unsigned long flags;
64db4cff 4203 unsigned long mask;
394f99a9 4204 struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
64db4cff
PM
4205 struct rcu_node *rnp = rcu_get_root(rsp);
4206
4207 /* Set up local state, ensuring consistent view of global state. */
6cf10081 4208 raw_spin_lock_irqsave_rcu_node(rnp, flags);
37c72e56
PM
4209 rdp->qlen_last_fqs_check = 0;
4210 rdp->n_force_qs_snap = rsp->n_force_qs;
64db4cff 4211 rdp->blimit = blimit;
39c8d313
PM
4212 if (!rdp->nxtlist)
4213 init_callback_list(rdp); /* Re-enable callbacks on this CPU. */
29e37d81 4214 rdp->dynticks->dynticks_nesting = DYNTICK_TASK_EXIT_IDLE;
2333210b 4215 rcu_sysidle_init_percpu_data(rdp->dynticks);
c92b131b
PM
4216 atomic_set(&rdp->dynticks->dynticks,
4217 (atomic_read(&rdp->dynticks->dynticks) & ~0x1) + 1);
1304afb2 4218 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
64db4cff 4219
0aa04b05
PM
4220 /*
4221 * Add CPU to leaf rcu_node pending-online bitmask. Any needed
4222 * propagation up the rcu_node tree will happen at the beginning
4223 * of the next grace period.
4224 */
64db4cff
PM
4225 rnp = rdp->mynode;
4226 mask = rdp->grpmask;
2a67e741 4227 raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */
0aa04b05 4228 rnp->qsmaskinitnext |= mask;
b9585e94
PM
4229 rnp->expmaskinitnext |= mask;
4230 if (!rdp->beenonline)
4231 WRITE_ONCE(rsp->ncpus, READ_ONCE(rsp->ncpus) + 1);
4232 rdp->beenonline = true; /* We have now been online. */
0aa04b05
PM
4233 rdp->gpnum = rnp->completed; /* Make CPU later note any new GP. */
4234 rdp->completed = rnp->completed;
5b74c458 4235 rdp->cpu_no_qs.b.norm = true;
a738eec6 4236 rdp->rcu_qs_ctr_snap = per_cpu(rcu_qs_ctr, cpu);
97c668b8 4237 rdp->core_needs_qs = false;
0aa04b05
PM
4238 trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpuonl"));
4239 raw_spin_unlock_irqrestore(&rnp->lock, flags);
64db4cff
PM
4240}
4241
49fb4c62 4242static void rcu_prepare_cpu(int cpu)
64db4cff 4243{
6ce75a23
PM
4244 struct rcu_state *rsp;
4245
4246 for_each_rcu_flavor(rsp)
9b67122a 4247 rcu_init_percpu_data(cpu, rsp);
64db4cff
PM
4248}
4249
4250/*
f41d911f 4251 * Handle CPU online/offline notification events.
64db4cff 4252 */
88428cc5
PM
4253int rcu_cpu_notify(struct notifier_block *self,
4254 unsigned long action, void *hcpu)
64db4cff
PM
4255{
4256 long cpu = (long)hcpu;
e534165b 4257 struct rcu_data *rdp = per_cpu_ptr(rcu_state_p->rda, cpu);
a26ac245 4258 struct rcu_node *rnp = rdp->mynode;
6ce75a23 4259 struct rcu_state *rsp;
64db4cff
PM
4260
4261 switch (action) {
4262 case CPU_UP_PREPARE:
4263 case CPU_UP_PREPARE_FROZEN:
d72bce0e
PZ
4264 rcu_prepare_cpu(cpu);
4265 rcu_prepare_kthreads(cpu);
35ce7f29 4266 rcu_spawn_all_nocb_kthreads(cpu);
a26ac245
PM
4267 break;
4268 case CPU_ONLINE:
0f962a5e 4269 case CPU_DOWN_FAILED:
338b0f76 4270 sync_sched_exp_online_cleanup(cpu);
5d01bbd1 4271 rcu_boost_kthread_setaffinity(rnp, -1);
0f962a5e
PM
4272 break;
4273 case CPU_DOWN_PREPARE:
34ed6246 4274 rcu_boost_kthread_setaffinity(rnp, cpu);
64db4cff 4275 break;
d0ec774c
PM
4276 case CPU_DYING:
4277 case CPU_DYING_FROZEN:
6ce75a23
PM
4278 for_each_rcu_flavor(rsp)
4279 rcu_cleanup_dying_cpu(rsp);
d0ec774c 4280 break;
88428cc5 4281 case CPU_DYING_IDLE:
6587a23b 4282 /* QS for any half-done expedited RCU-sched GP. */
338b0f76
PM
4283 preempt_disable();
4284 rcu_report_exp_rdp(&rcu_sched_state,
4285 this_cpu_ptr(rcu_sched_state.rda), true);
4286 preempt_enable();
6587a23b 4287
88428cc5
PM
4288 for_each_rcu_flavor(rsp) {
4289 rcu_cleanup_dying_idle_cpu(cpu, rsp);
4290 }
4291 break;
64db4cff
PM
4292 case CPU_DEAD:
4293 case CPU_DEAD_FROZEN:
4294 case CPU_UP_CANCELED:
4295 case CPU_UP_CANCELED_FROZEN:
776d6807 4296 for_each_rcu_flavor(rsp) {
6ce75a23 4297 rcu_cleanup_dead_cpu(cpu, rsp);
776d6807
PM
4298 do_nocb_deferred_wakeup(per_cpu_ptr(rsp->rda, cpu));
4299 }
64db4cff
PM
4300 break;
4301 default:
4302 break;
4303 }
34ed6246 4304 return NOTIFY_OK;
64db4cff
PM
4305}
4306
d1d74d14
BP
4307static int rcu_pm_notify(struct notifier_block *self,
4308 unsigned long action, void *hcpu)
4309{
4310 switch (action) {
4311 case PM_HIBERNATION_PREPARE:
4312 case PM_SUSPEND_PREPARE:
4313 if (nr_cpu_ids <= 256) /* Expediting bad for large systems. */
5afff48b 4314 rcu_expedite_gp();
d1d74d14
BP
4315 break;
4316 case PM_POST_HIBERNATION:
4317 case PM_POST_SUSPEND:
5afff48b
PM
4318 if (nr_cpu_ids <= 256) /* Expediting bad for large systems. */
4319 rcu_unexpedite_gp();
d1d74d14
BP
4320 break;
4321 default:
4322 break;
4323 }
4324 return NOTIFY_OK;
4325}
4326
b3dbec76 4327/*
9386c0b7 4328 * Spawn the kthreads that handle each RCU flavor's grace periods.
b3dbec76
PM
4329 */
4330static int __init rcu_spawn_gp_kthread(void)
4331{
4332 unsigned long flags;
a94844b2 4333 int kthread_prio_in = kthread_prio;
b3dbec76
PM
4334 struct rcu_node *rnp;
4335 struct rcu_state *rsp;
a94844b2 4336 struct sched_param sp;
b3dbec76
PM
4337 struct task_struct *t;
4338
a94844b2
PM
4339 /* Force priority into range. */
4340 if (IS_ENABLED(CONFIG_RCU_BOOST) && kthread_prio < 1)
4341 kthread_prio = 1;
4342 else if (kthread_prio < 0)
4343 kthread_prio = 0;
4344 else if (kthread_prio > 99)
4345 kthread_prio = 99;
4346 if (kthread_prio != kthread_prio_in)
4347 pr_alert("rcu_spawn_gp_kthread(): Limited prio to %d from %d\n",
4348 kthread_prio, kthread_prio_in);
4349
9386c0b7 4350 rcu_scheduler_fully_active = 1;
b3dbec76 4351 for_each_rcu_flavor(rsp) {
a94844b2 4352 t = kthread_create(rcu_gp_kthread, rsp, "%s", rsp->name);
b3dbec76
PM
4353 BUG_ON(IS_ERR(t));
4354 rnp = rcu_get_root(rsp);
6cf10081 4355 raw_spin_lock_irqsave_rcu_node(rnp, flags);
b3dbec76 4356 rsp->gp_kthread = t;
a94844b2
PM
4357 if (kthread_prio) {
4358 sp.sched_priority = kthread_prio;
4359 sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
4360 }
b3dbec76 4361 raw_spin_unlock_irqrestore(&rnp->lock, flags);
e11f1335 4362 wake_up_process(t);
b3dbec76 4363 }
35ce7f29 4364 rcu_spawn_nocb_kthreads();
9386c0b7 4365 rcu_spawn_boost_kthreads();
b3dbec76
PM
4366 return 0;
4367}
4368early_initcall(rcu_spawn_gp_kthread);
4369
bbad9379
PM
4370/*
4371 * This function is invoked towards the end of the scheduler's initialization
4372 * process. Before this is called, the idle task might contain
4373 * RCU read-side critical sections (during which time, this idle
4374 * task is booting the system). After this function is called, the
4375 * idle tasks are prohibited from containing RCU read-side critical
4376 * sections. This function also enables RCU lockdep checking.
4377 */
4378void rcu_scheduler_starting(void)
4379{
4380 WARN_ON(num_online_cpus() != 1);
4381 WARN_ON(nr_context_switches() > 0);
4382 rcu_scheduler_active = 1;
4383}
4384
64db4cff
PM
4385/*
4386 * Compute the per-level fanout, either using the exact fanout specified
7fa27001 4387 * or balancing the tree, depending on the rcu_fanout_exact boot parameter.
64db4cff 4388 */
199977bf 4389static void __init rcu_init_levelspread(int *levelspread, const int *levelcnt)
64db4cff 4390{
64db4cff
PM
4391 int i;
4392
7fa27001 4393 if (rcu_fanout_exact) {
199977bf 4394 levelspread[rcu_num_lvls - 1] = rcu_fanout_leaf;
66292405 4395 for (i = rcu_num_lvls - 2; i >= 0; i--)
199977bf 4396 levelspread[i] = RCU_FANOUT;
66292405
PM
4397 } else {
4398 int ccur;
4399 int cprv;
4400
4401 cprv = nr_cpu_ids;
4402 for (i = rcu_num_lvls - 1; i >= 0; i--) {
199977bf
AG
4403 ccur = levelcnt[i];
4404 levelspread[i] = (cprv + ccur - 1) / ccur;
66292405
PM
4405 cprv = ccur;
4406 }
64db4cff
PM
4407 }
4408}
64db4cff
PM
4409
4410/*
4411 * Helper function for rcu_init() that initializes one rcu_state structure.
4412 */
a87f203e 4413static void __init rcu_init_one(struct rcu_state *rsp)
64db4cff 4414{
cb007102
AG
4415 static const char * const buf[] = RCU_NODE_NAME_INIT;
4416 static const char * const fqs[] = RCU_FQS_NAME_INIT;
385b73c0 4417 static const char * const exp[] = RCU_EXP_NAME_INIT;
3dc5dbe9
PM
4418 static struct lock_class_key rcu_node_class[RCU_NUM_LVLS];
4419 static struct lock_class_key rcu_fqs_class[RCU_NUM_LVLS];
4420 static struct lock_class_key rcu_exp_class[RCU_NUM_LVLS];
4a81e832 4421 static u8 fl_mask = 0x1;
199977bf
AG
4422
4423 int levelcnt[RCU_NUM_LVLS]; /* # nodes in each level. */
4424 int levelspread[RCU_NUM_LVLS]; /* kids/node in each level. */
64db4cff
PM
4425 int cpustride = 1;
4426 int i;
4427 int j;
4428 struct rcu_node *rnp;
4429
05b84aec 4430 BUILD_BUG_ON(RCU_NUM_LVLS > ARRAY_SIZE(buf)); /* Fix buf[] init! */
b6407e86 4431
3eaaaf6c
PM
4432 /* Silence gcc 4.8 false positive about array index out of range. */
4433 if (rcu_num_lvls <= 0 || rcu_num_lvls > RCU_NUM_LVLS)
4434 panic("rcu_init_one: rcu_num_lvls out of range");
4930521a 4435
64db4cff
PM
4436 /* Initialize the level-tracking arrays. */
4437
f885b7f2 4438 for (i = 0; i < rcu_num_lvls; i++)
199977bf 4439 levelcnt[i] = num_rcu_lvl[i];
f885b7f2 4440 for (i = 1; i < rcu_num_lvls; i++)
199977bf
AG
4441 rsp->level[i] = rsp->level[i - 1] + levelcnt[i - 1];
4442 rcu_init_levelspread(levelspread, levelcnt);
4a81e832
PM
4443 rsp->flavor_mask = fl_mask;
4444 fl_mask <<= 1;
64db4cff
PM
4445
4446 /* Initialize the elements themselves, starting from the leaves. */
4447
f885b7f2 4448 for (i = rcu_num_lvls - 1; i >= 0; i--) {
199977bf 4449 cpustride *= levelspread[i];
64db4cff 4450 rnp = rsp->level[i];
199977bf 4451 for (j = 0; j < levelcnt[i]; j++, rnp++) {
1304afb2 4452 raw_spin_lock_init(&rnp->lock);
b6407e86
PM
4453 lockdep_set_class_and_name(&rnp->lock,
4454 &rcu_node_class[i], buf[i]);
394f2769
PM
4455 raw_spin_lock_init(&rnp->fqslock);
4456 lockdep_set_class_and_name(&rnp->fqslock,
4457 &rcu_fqs_class[i], fqs[i]);
25d30cf4
PM
4458 rnp->gpnum = rsp->gpnum;
4459 rnp->completed = rsp->completed;
64db4cff
PM
4460 rnp->qsmask = 0;
4461 rnp->qsmaskinit = 0;
4462 rnp->grplo = j * cpustride;
4463 rnp->grphi = (j + 1) * cpustride - 1;
595f3900
HS
4464 if (rnp->grphi >= nr_cpu_ids)
4465 rnp->grphi = nr_cpu_ids - 1;
64db4cff
PM
4466 if (i == 0) {
4467 rnp->grpnum = 0;
4468 rnp->grpmask = 0;
4469 rnp->parent = NULL;
4470 } else {
199977bf 4471 rnp->grpnum = j % levelspread[i - 1];
64db4cff
PM
4472 rnp->grpmask = 1UL << rnp->grpnum;
4473 rnp->parent = rsp->level[i - 1] +
199977bf 4474 j / levelspread[i - 1];
64db4cff
PM
4475 }
4476 rnp->level = i;
12f5f524 4477 INIT_LIST_HEAD(&rnp->blkd_tasks);
dae6e64d 4478 rcu_init_one_nocb(rnp);
385b73c0 4479 mutex_init(&rnp->exp_funnel_mutex);
83c2c735
PM
4480 lockdep_set_class_and_name(&rnp->exp_funnel_mutex,
4481 &rcu_exp_class[i], exp[i]);
64db4cff
PM
4482 }
4483 }
0c34029a 4484
b3dbec76 4485 init_waitqueue_head(&rsp->gp_wq);
f4ecea30 4486 init_waitqueue_head(&rsp->expedited_wq);
f885b7f2 4487 rnp = rsp->level[rcu_num_lvls - 1];
0c34029a 4488 for_each_possible_cpu(i) {
4a90a068 4489 while (i > rnp->grphi)
0c34029a 4490 rnp++;
394f99a9 4491 per_cpu_ptr(rsp->rda, i)->mynode = rnp;
0c34029a
LJ
4492 rcu_boot_init_percpu_data(i, rsp);
4493 }
6ce75a23 4494 list_add(&rsp->flavors, &rcu_struct_flavors);
64db4cff
PM
4495}
4496
f885b7f2
PM
4497/*
4498 * Compute the rcu_node tree geometry from kernel parameters. This cannot
4102adab 4499 * replace the definitions in tree.h because those are needed to size
f885b7f2
PM
4500 * the ->node array in the rcu_state structure.
4501 */
4502static void __init rcu_init_geometry(void)
4503{
026ad283 4504 ulong d;
f885b7f2 4505 int i;
05b84aec 4506 int rcu_capacity[RCU_NUM_LVLS];
f885b7f2 4507
026ad283
PM
4508 /*
4509 * Initialize any unspecified boot parameters.
4510 * The default values of jiffies_till_first_fqs and
4511 * jiffies_till_next_fqs are set to the RCU_JIFFIES_TILL_FORCE_QS
4512 * value, which is a function of HZ, then adding one for each
4513 * RCU_JIFFIES_FQS_DIV CPUs that might be on the system.
4514 */
4515 d = RCU_JIFFIES_TILL_FORCE_QS + nr_cpu_ids / RCU_JIFFIES_FQS_DIV;
4516 if (jiffies_till_first_fqs == ULONG_MAX)
4517 jiffies_till_first_fqs = d;
4518 if (jiffies_till_next_fqs == ULONG_MAX)
4519 jiffies_till_next_fqs = d;
4520
f885b7f2 4521 /* If the compile-time values are accurate, just leave. */
47d631af 4522 if (rcu_fanout_leaf == RCU_FANOUT_LEAF &&
b17c7035 4523 nr_cpu_ids == NR_CPUS)
f885b7f2 4524 return;
39479098
PM
4525 pr_info("RCU: Adjusting geometry for rcu_fanout_leaf=%d, nr_cpu_ids=%d\n",
4526 rcu_fanout_leaf, nr_cpu_ids);
f885b7f2 4527
f885b7f2 4528 /*
ee968ac6
PM
4529 * The boot-time rcu_fanout_leaf parameter must be at least two
4530 * and cannot exceed the number of bits in the rcu_node masks.
4531 * Complain and fall back to the compile-time values if this
4532 * limit is exceeded.
f885b7f2 4533 */
ee968ac6 4534 if (rcu_fanout_leaf < 2 ||
75cf15a4 4535 rcu_fanout_leaf > sizeof(unsigned long) * 8) {
13bd6494 4536 rcu_fanout_leaf = RCU_FANOUT_LEAF;
f885b7f2
PM
4537 WARN_ON(1);
4538 return;
4539 }
4540
f885b7f2
PM
4541 /*
4542 * Compute number of nodes that can be handled an rcu_node tree
9618138b 4543 * with the given number of levels.
f885b7f2 4544 */
9618138b 4545 rcu_capacity[0] = rcu_fanout_leaf;
05b84aec 4546 for (i = 1; i < RCU_NUM_LVLS; i++)
05c5df31 4547 rcu_capacity[i] = rcu_capacity[i - 1] * RCU_FANOUT;
f885b7f2
PM
4548
4549 /*
75cf15a4 4550 * The tree must be able to accommodate the configured number of CPUs.
ee968ac6 4551 * If this limit is exceeded, fall back to the compile-time values.
f885b7f2 4552 */
ee968ac6
PM
4553 if (nr_cpu_ids > rcu_capacity[RCU_NUM_LVLS - 1]) {
4554 rcu_fanout_leaf = RCU_FANOUT_LEAF;
4555 WARN_ON(1);
4556 return;
4557 }
f885b7f2 4558
679f9858 4559 /* Calculate the number of levels in the tree. */
9618138b 4560 for (i = 0; nr_cpu_ids > rcu_capacity[i]; i++) {
679f9858 4561 }
9618138b 4562 rcu_num_lvls = i + 1;
679f9858 4563
f885b7f2 4564 /* Calculate the number of rcu_nodes at each level of the tree. */
679f9858 4565 for (i = 0; i < rcu_num_lvls; i++) {
9618138b 4566 int cap = rcu_capacity[(rcu_num_lvls - 1) - i];
679f9858
AG
4567 num_rcu_lvl[i] = DIV_ROUND_UP(nr_cpu_ids, cap);
4568 }
f885b7f2
PM
4569
4570 /* Calculate the total number of rcu_node structures. */
4571 rcu_num_nodes = 0;
679f9858 4572 for (i = 0; i < rcu_num_lvls; i++)
f885b7f2 4573 rcu_num_nodes += num_rcu_lvl[i];
f885b7f2
PM
4574}
4575
a3dc2948
PM
4576/*
4577 * Dump out the structure of the rcu_node combining tree associated
4578 * with the rcu_state structure referenced by rsp.
4579 */
4580static void __init rcu_dump_rcu_node_tree(struct rcu_state *rsp)
4581{
4582 int level = 0;
4583 struct rcu_node *rnp;
4584
4585 pr_info("rcu_node tree layout dump\n");
4586 pr_info(" ");
4587 rcu_for_each_node_breadth_first(rsp, rnp) {
4588 if (rnp->level != level) {
4589 pr_cont("\n");
4590 pr_info(" ");
4591 level = rnp->level;
4592 }
4593 pr_cont("%d:%d ^%d ", rnp->grplo, rnp->grphi, rnp->grpnum);
4594 }
4595 pr_cont("\n");
4596}
4597
9f680ab4 4598void __init rcu_init(void)
64db4cff 4599{
017c4261 4600 int cpu;
9f680ab4 4601
47627678
PM
4602 rcu_early_boot_tests();
4603
f41d911f 4604 rcu_bootup_announce();
f885b7f2 4605 rcu_init_geometry();
a87f203e
PM
4606 rcu_init_one(&rcu_bh_state);
4607 rcu_init_one(&rcu_sched_state);
a3dc2948
PM
4608 if (dump_tree)
4609 rcu_dump_rcu_node_tree(&rcu_sched_state);
f41d911f 4610 __rcu_init_preempt();
b5b39360 4611 open_softirq(RCU_SOFTIRQ, rcu_process_callbacks);
9f680ab4
PM
4612
4613 /*
4614 * We don't need protection against CPU-hotplug here because
4615 * this is called early in boot, before either interrupts
4616 * or the scheduler are operational.
4617 */
4618 cpu_notifier(rcu_cpu_notify, 0);
d1d74d14 4619 pm_notifier(rcu_pm_notify, 0);
017c4261
PM
4620 for_each_online_cpu(cpu)
4621 rcu_cpu_notify(NULL, CPU_UP_PREPARE, (void *)(long)cpu);
64db4cff
PM
4622}
4623
4102adab 4624#include "tree_plugin.h"