1 // SPDX-License-Identifier: GPL-2.0+
3 * Read-Copy Update mechanism for mutual exclusion (tree-based version)
5 * Copyright IBM Corporation, 2008
7 * Authors: Dipankar Sarma <dipankar@in.ibm.com>
8 * Manfred Spraul <manfred@colorfullife.com>
9 * Paul E. McKenney <paulmck@linux.ibm.com>
11 * Based on the original work by Paul McKenney <paulmck@linux.ibm.com>
12 * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
14 * For detailed explanation of Read-Copy Update mechanism see -
18 #define pr_fmt(fmt) "rcu: " fmt
20 #include <linux/types.h>
21 #include <linux/kernel.h>
22 #include <linux/init.h>
23 #include <linux/spinlock.h>
24 #include <linux/smp.h>
25 #include <linux/rcupdate_wait.h>
26 #include <linux/interrupt.h>
27 #include <linux/sched.h>
28 #include <linux/sched/debug.h>
29 #include <linux/nmi.h>
30 #include <linux/atomic.h>
31 #include <linux/bitops.h>
32 #include <linux/export.h>
33 #include <linux/completion.h>
34 #include <linux/kmemleak.h>
35 #include <linux/moduleparam.h>
36 #include <linux/panic.h>
37 #include <linux/panic_notifier.h>
38 #include <linux/percpu.h>
39 #include <linux/notifier.h>
40 #include <linux/cpu.h>
41 #include <linux/mutex.h>
42 #include <linux/time.h>
43 #include <linux/kernel_stat.h>
44 #include <linux/wait.h>
45 #include <linux/kthread.h>
46 #include <uapi/linux/sched/types.h>
47 #include <linux/prefetch.h>
48 #include <linux/delay.h>
49 #include <linux/random.h>
50 #include <linux/trace_events.h>
51 #include <linux/suspend.h>
52 #include <linux/ftrace.h>
53 #include <linux/tick.h>
54 #include <linux/sysrq.h>
55 #include <linux/kprobes.h>
56 #include <linux/gfp.h>
57 #include <linux/oom.h>
58 #include <linux/smpboot.h>
59 #include <linux/jiffies.h>
60 #include <linux/slab.h>
61 #include <linux/sched/isolation.h>
62 #include <linux/sched/clock.h>
63 #include <linux/vmalloc.h>
65 #include <linux/kasan.h>
66 #include <linux/context_tracking.h>
67 #include "../time/tick-internal.h"
72 #ifdef MODULE_PARAM_PREFIX
73 #undef MODULE_PARAM_PREFIX
75 #define MODULE_PARAM_PREFIX "rcutree."
77 /* Data structures. */
79 static DEFINE_PER_CPU_SHARED_ALIGNED(struct rcu_data, rcu_data) = {
81 #ifdef CONFIG_RCU_NOCB_CPU
82 .cblist.flags = SEGCBLIST_RCU_CORE,
85 static struct rcu_state rcu_state = {
86 .level = { &rcu_state.node[0] },
87 .gp_state = RCU_GP_IDLE,
88 .gp_seq = (0UL - 300UL) << RCU_SEQ_CTR_SHIFT,
89 .barrier_mutex = __MUTEX_INITIALIZER(rcu_state.barrier_mutex),
90 .barrier_lock = __RAW_SPIN_LOCK_UNLOCKED(rcu_state.barrier_lock),
93 .exp_mutex = __MUTEX_INITIALIZER(rcu_state.exp_mutex),
94 .exp_wake_mutex = __MUTEX_INITIALIZER(rcu_state.exp_wake_mutex),
95 .ofl_lock = __ARCH_SPIN_LOCK_UNLOCKED,
98 /* Dump rcu_node combining tree at boot to verify correct setup. */
99 static bool dump_tree;
100 module_param(dump_tree, bool, 0444);
101 /* By default, use RCU_SOFTIRQ instead of rcuc kthreads. */
102 static bool use_softirq = !IS_ENABLED(CONFIG_PREEMPT_RT);
103 #ifndef CONFIG_PREEMPT_RT
104 module_param(use_softirq, bool, 0444);
106 /* Control rcu_node-tree auto-balancing at boot time. */
107 static bool rcu_fanout_exact;
108 module_param(rcu_fanout_exact, bool, 0444);
109 /* Increase (but not decrease) the RCU_FANOUT_LEAF at boot time. */
110 static int rcu_fanout_leaf = RCU_FANOUT_LEAF;
111 module_param(rcu_fanout_leaf, int, 0444);
112 int rcu_num_lvls __read_mostly = RCU_NUM_LVLS;
113 /* Number of rcu_nodes at specified level. */
114 int num_rcu_lvl[] = NUM_RCU_LVL_INIT;
115 int rcu_num_nodes __read_mostly = NUM_RCU_NODES; /* Total # rcu_nodes in use. */
118 * The rcu_scheduler_active variable is initialized to the value
119 * RCU_SCHEDULER_INACTIVE and transitions RCU_SCHEDULER_INIT just before the
120 * first task is spawned. So when this variable is RCU_SCHEDULER_INACTIVE,
121 * RCU can assume that there is but one task, allowing RCU to (for example)
122 * optimize synchronize_rcu() to a simple barrier(). When this variable
123 * is RCU_SCHEDULER_INIT, RCU must actually do all the hard work required
124 * to detect real grace periods. This variable is also used to suppress
125 * boot-time false positives from lockdep-RCU error checking. Finally, it
126 * transitions from RCU_SCHEDULER_INIT to RCU_SCHEDULER_RUNNING after RCU
127 * is fully initialized, including all of its kthreads having been spawned.
129 int rcu_scheduler_active __read_mostly;
130 EXPORT_SYMBOL_GPL(rcu_scheduler_active);
133 * The rcu_scheduler_fully_active variable transitions from zero to one
134 * during the early_initcall() processing, which is after the scheduler
135 * is capable of creating new tasks. So RCU processing (for example,
136 * creating tasks for RCU priority boosting) must be delayed until after
137 * rcu_scheduler_fully_active transitions from zero to one. We also
138 * currently delay invocation of any RCU callbacks until after this point.
140 * It might later prove better for people registering RCU callbacks during
141 * early boot to take responsibility for these callbacks, but one step at
144 static int rcu_scheduler_fully_active __read_mostly;
146 static void rcu_report_qs_rnp(unsigned long mask, struct rcu_node *rnp,
147 unsigned long gps, unsigned long flags);
148 static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu);
149 static void invoke_rcu_core(void);
150 static void rcu_report_exp_rdp(struct rcu_data *rdp);
151 static void sync_sched_exp_online_cleanup(int cpu);
152 static void check_cb_ovld_locked(struct rcu_data *rdp, struct rcu_node *rnp);
153 static bool rcu_rdp_is_offloaded(struct rcu_data *rdp);
154 static bool rcu_rdp_cpu_online(struct rcu_data *rdp);
155 static bool rcu_init_invoked(void);
156 static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf);
157 static void rcu_init_new_rnp(struct rcu_node *rnp_leaf);
160 * rcuc/rcub/rcuop kthread realtime priority. The "rcuop"
161 * real-time priority(enabling/disabling) is controlled by
162 * the extra CONFIG_RCU_NOCB_CPU_CB_BOOST configuration.
164 static int kthread_prio = IS_ENABLED(CONFIG_RCU_BOOST) ? 1 : 0;
165 module_param(kthread_prio, int, 0444);
167 /* Delay in jiffies for grace-period initialization delays, debug only. */
169 static int gp_preinit_delay;
170 module_param(gp_preinit_delay, int, 0444);
171 static int gp_init_delay;
172 module_param(gp_init_delay, int, 0444);
173 static int gp_cleanup_delay;
174 module_param(gp_cleanup_delay, int, 0444);
176 // Add delay to rcu_read_unlock() for strict grace periods.
177 static int rcu_unlock_delay;
178 #ifdef CONFIG_RCU_STRICT_GRACE_PERIOD
179 module_param(rcu_unlock_delay, int, 0444);
183 * This rcu parameter is runtime-read-only. It reflects
184 * a minimum allowed number of objects which can be cached
185 * per-CPU. Object size is equal to one page. This value
186 * can be changed at boot time.
188 static int rcu_min_cached_objs = 5;
189 module_param(rcu_min_cached_objs, int, 0444);
191 // A page shrinker can ask for pages to be freed to make them
192 // available for other parts of the system. This usually happens
193 // under low memory conditions, and in that case we should also
194 // defer page-cache filling for a short time period.
196 // The default value is 5 seconds, which is long enough to reduce
197 // interference with the shrinker while it asks other systems to
198 // drain their caches.
199 static int rcu_delay_page_cache_fill_msec = 5000;
200 module_param(rcu_delay_page_cache_fill_msec, int, 0444);
202 /* Retrieve RCU kthreads priority for rcutorture */
203 int rcu_get_gp_kthreads_prio(void)
207 EXPORT_SYMBOL_GPL(rcu_get_gp_kthreads_prio);
210 * Number of grace periods between delays, normalized by the duration of
211 * the delay. The longer the delay, the more the grace periods between
212 * each delay. The reason for this normalization is that it means that,
213 * for non-zero delays, the overall slowdown of grace periods is constant
214 * regardless of the duration of the delay. This arrangement balances
215 * the need for long delays to increase some race probabilities with the
216 * need for fast grace periods to increase other race probabilities.
218 #define PER_RCU_NODE_PERIOD 3 /* Number of grace periods between delays for debugging. */
221 * Return true if an RCU grace period is in progress. The READ_ONCE()s
222 * permit this function to be invoked without holding the root rcu_node
223 * structure's ->lock, but of course results can be subject to change.
225 static int rcu_gp_in_progress(void)
227 return rcu_seq_state(rcu_seq_current(&rcu_state.gp_seq));
231 * Return the number of callbacks queued on the specified CPU.
232 * Handles both the nocbs and normal cases.
234 static long rcu_get_n_cbs_cpu(int cpu)
236 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
238 if (rcu_segcblist_is_enabled(&rdp->cblist))
239 return rcu_segcblist_n_cbs(&rdp->cblist);
243 void rcu_softirq_qs(void)
246 rcu_preempt_deferred_qs(current);
247 rcu_tasks_qs(current, false);
251 * Reset the current CPU's ->dynticks counter to indicate that the
252 * newly onlined CPU is no longer in an extended quiescent state.
253 * This will either leave the counter unchanged, or increment it
254 * to the next non-quiescent value.
256 * The non-atomic test/increment sequence works because the upper bits
257 * of the ->dynticks counter are manipulated only by the corresponding CPU,
258 * or when the corresponding CPU is offline.
260 static void rcu_dynticks_eqs_online(void)
262 if (ct_dynticks() & RCU_DYNTICKS_IDX)
264 ct_state_inc(RCU_DYNTICKS_IDX);
268 * Snapshot the ->dynticks counter with full ordering so as to allow
269 * stable comparison of this counter with past and future snapshots.
271 static int rcu_dynticks_snap(int cpu)
273 smp_mb(); // Fundamental RCU ordering guarantee.
274 return ct_dynticks_cpu_acquire(cpu);
278 * Return true if the snapshot returned from rcu_dynticks_snap()
279 * indicates that RCU is in an extended quiescent state.
281 static bool rcu_dynticks_in_eqs(int snap)
283 return !(snap & RCU_DYNTICKS_IDX);
287 * Return true if the CPU corresponding to the specified rcu_data
288 * structure has spent some time in an extended quiescent state since
289 * rcu_dynticks_snap() returned the specified snapshot.
291 static bool rcu_dynticks_in_eqs_since(struct rcu_data *rdp, int snap)
293 return snap != rcu_dynticks_snap(rdp->cpu);
297 * Return true if the referenced integer is zero while the specified
298 * CPU remains within a single extended quiescent state.
300 bool rcu_dynticks_zero_in_eqs(int cpu, int *vp)
304 // If not quiescent, force back to earlier extended quiescent state.
305 snap = ct_dynticks_cpu(cpu) & ~RCU_DYNTICKS_IDX;
306 smp_rmb(); // Order ->dynticks and *vp reads.
308 return false; // Non-zero, so report failure;
309 smp_rmb(); // Order *vp read and ->dynticks re-read.
311 // If still in the same extended quiescent state, we are good!
312 return snap == ct_dynticks_cpu(cpu);
316 * Let the RCU core know that this CPU has gone through the scheduler,
317 * which is a quiescent state. This is called when the need for a
318 * quiescent state is urgent, so we burn an atomic operation and full
319 * memory barriers to let the RCU core know about it, regardless of what
320 * this CPU might (or might not) do in the near future.
322 * We inform the RCU core by emulating a zero-duration dyntick-idle period.
324 * The caller must have disabled interrupts and must not be idle.
326 notrace void rcu_momentary_dyntick_idle(void)
330 raw_cpu_write(rcu_data.rcu_need_heavy_qs, false);
331 seq = ct_state_inc(2 * RCU_DYNTICKS_IDX);
332 /* It is illegal to call this from idle state. */
333 WARN_ON_ONCE(!(seq & RCU_DYNTICKS_IDX));
334 rcu_preempt_deferred_qs(current);
336 EXPORT_SYMBOL_GPL(rcu_momentary_dyntick_idle);
339 * rcu_is_cpu_rrupt_from_idle - see if 'interrupted' from idle
341 * If the current CPU is idle and running at a first-level (not nested)
342 * interrupt, or directly, from idle, return true.
344 * The caller must have at least disabled IRQs.
346 static int rcu_is_cpu_rrupt_from_idle(void)
351 * Usually called from the tick; but also used from smp_function_call()
352 * for expedited grace periods. This latter can result in running from
353 * the idle task, instead of an actual IPI.
355 lockdep_assert_irqs_disabled();
357 /* Check for counter underflows */
358 RCU_LOCKDEP_WARN(ct_dynticks_nesting() < 0,
359 "RCU dynticks_nesting counter underflow!");
360 RCU_LOCKDEP_WARN(ct_dynticks_nmi_nesting() <= 0,
361 "RCU dynticks_nmi_nesting counter underflow/zero!");
363 /* Are we at first interrupt nesting level? */
364 nesting = ct_dynticks_nmi_nesting();
369 * If we're not in an interrupt, we must be in the idle task!
371 WARN_ON_ONCE(!nesting && !is_idle_task(current));
373 /* Does CPU appear to be idle from an RCU standpoint? */
374 return ct_dynticks_nesting() == 0;
377 #define DEFAULT_RCU_BLIMIT (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD) ? 1000 : 10)
378 // Maximum callbacks per rcu_do_batch ...
379 #define DEFAULT_MAX_RCU_BLIMIT 10000 // ... even during callback flood.
380 static long blimit = DEFAULT_RCU_BLIMIT;
381 #define DEFAULT_RCU_QHIMARK 10000 // If this many pending, ignore blimit.
382 static long qhimark = DEFAULT_RCU_QHIMARK;
383 #define DEFAULT_RCU_QLOMARK 100 // Once only this many pending, use blimit.
384 static long qlowmark = DEFAULT_RCU_QLOMARK;
385 #define DEFAULT_RCU_QOVLD_MULT 2
386 #define DEFAULT_RCU_QOVLD (DEFAULT_RCU_QOVLD_MULT * DEFAULT_RCU_QHIMARK)
387 static long qovld = DEFAULT_RCU_QOVLD; // If this many pending, hammer QS.
388 static long qovld_calc = -1; // No pre-initialization lock acquisitions!
390 module_param(blimit, long, 0444);
391 module_param(qhimark, long, 0444);
392 module_param(qlowmark, long, 0444);
393 module_param(qovld, long, 0444);
395 static ulong jiffies_till_first_fqs = IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD) ? 0 : ULONG_MAX;
396 static ulong jiffies_till_next_fqs = ULONG_MAX;
397 static bool rcu_kick_kthreads;
398 static int rcu_divisor = 7;
399 module_param(rcu_divisor, int, 0644);
401 /* Force an exit from rcu_do_batch() after 3 milliseconds. */
402 static long rcu_resched_ns = 3 * NSEC_PER_MSEC;
403 module_param(rcu_resched_ns, long, 0644);
406 * How long the grace period must be before we start recruiting
407 * quiescent-state help from rcu_note_context_switch().
409 static ulong jiffies_till_sched_qs = ULONG_MAX;
410 module_param(jiffies_till_sched_qs, ulong, 0444);
411 static ulong jiffies_to_sched_qs; /* See adjust_jiffies_till_sched_qs(). */
412 module_param(jiffies_to_sched_qs, ulong, 0444); /* Display only! */
415 * Make sure that we give the grace-period kthread time to detect any
416 * idle CPUs before taking active measures to force quiescent states.
417 * However, don't go below 100 milliseconds, adjusted upwards for really
420 static void adjust_jiffies_till_sched_qs(void)
424 /* If jiffies_till_sched_qs was specified, respect the request. */
425 if (jiffies_till_sched_qs != ULONG_MAX) {
426 WRITE_ONCE(jiffies_to_sched_qs, jiffies_till_sched_qs);
429 /* Otherwise, set to third fqs scan, but bound below on large system. */
430 j = READ_ONCE(jiffies_till_first_fqs) +
431 2 * READ_ONCE(jiffies_till_next_fqs);
432 if (j < HZ / 10 + nr_cpu_ids / RCU_JIFFIES_FQS_DIV)
433 j = HZ / 10 + nr_cpu_ids / RCU_JIFFIES_FQS_DIV;
434 pr_info("RCU calculated value of scheduler-enlistment delay is %ld jiffies.\n", j);
435 WRITE_ONCE(jiffies_to_sched_qs, j);
438 static int param_set_first_fqs_jiffies(const char *val, const struct kernel_param *kp)
441 int ret = kstrtoul(val, 0, &j);
444 WRITE_ONCE(*(ulong *)kp->arg, (j > HZ) ? HZ : j);
445 adjust_jiffies_till_sched_qs();
450 static int param_set_next_fqs_jiffies(const char *val, const struct kernel_param *kp)
453 int ret = kstrtoul(val, 0, &j);
456 WRITE_ONCE(*(ulong *)kp->arg, (j > HZ) ? HZ : (j ?: 1));
457 adjust_jiffies_till_sched_qs();
462 static const struct kernel_param_ops first_fqs_jiffies_ops = {
463 .set = param_set_first_fqs_jiffies,
464 .get = param_get_ulong,
467 static const struct kernel_param_ops next_fqs_jiffies_ops = {
468 .set = param_set_next_fqs_jiffies,
469 .get = param_get_ulong,
472 module_param_cb(jiffies_till_first_fqs, &first_fqs_jiffies_ops, &jiffies_till_first_fqs, 0644);
473 module_param_cb(jiffies_till_next_fqs, &next_fqs_jiffies_ops, &jiffies_till_next_fqs, 0644);
474 module_param(rcu_kick_kthreads, bool, 0644);
476 static void force_qs_rnp(int (*f)(struct rcu_data *rdp));
477 static int rcu_pending(int user);
480 * Return the number of RCU GPs completed thus far for debug & stats.
482 unsigned long rcu_get_gp_seq(void)
484 return READ_ONCE(rcu_state.gp_seq);
486 EXPORT_SYMBOL_GPL(rcu_get_gp_seq);
489 * Return the number of RCU expedited batches completed thus far for
490 * debug & stats. Odd numbers mean that a batch is in progress, even
491 * numbers mean idle. The value returned will thus be roughly double
492 * the cumulative batches since boot.
494 unsigned long rcu_exp_batches_completed(void)
496 return rcu_state.expedited_sequence;
498 EXPORT_SYMBOL_GPL(rcu_exp_batches_completed);
501 * Return the root node of the rcu_state structure.
503 static struct rcu_node *rcu_get_root(void)
505 return &rcu_state.node[0];
509 * Send along grace-period-related data for rcutorture diagnostics.
511 void rcutorture_get_gp_data(enum rcutorture_type test_type, int *flags,
512 unsigned long *gp_seq)
516 *flags = READ_ONCE(rcu_state.gp_flags);
517 *gp_seq = rcu_seq_current(&rcu_state.gp_seq);
523 EXPORT_SYMBOL_GPL(rcutorture_get_gp_data);
525 #if defined(CONFIG_NO_HZ_FULL) && (!defined(CONFIG_GENERIC_ENTRY) || !defined(CONFIG_KVM_XFER_TO_GUEST_WORK))
527 * An empty function that will trigger a reschedule on
528 * IRQ tail once IRQs get re-enabled on userspace/guest resume.
530 static void late_wakeup_func(struct irq_work *work)
534 static DEFINE_PER_CPU(struct irq_work, late_wakeup_work) =
535 IRQ_WORK_INIT(late_wakeup_func);
540 * 1) the task is about to enter in guest mode and $ARCH doesn't support KVM generic work
541 * 2) the task is about to enter in user mode and $ARCH doesn't support generic entry.
543 * In these cases the late RCU wake ups aren't supported in the resched loops and our
544 * last resort is to fire a local irq_work that will trigger a reschedule once IRQs
545 * get re-enabled again.
547 noinstr void rcu_irq_work_resched(void)
549 struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
551 if (IS_ENABLED(CONFIG_GENERIC_ENTRY) && !(current->flags & PF_VCPU))
554 if (IS_ENABLED(CONFIG_KVM_XFER_TO_GUEST_WORK) && (current->flags & PF_VCPU))
557 instrumentation_begin();
558 if (do_nocb_deferred_wakeup(rdp) && need_resched()) {
559 irq_work_queue(this_cpu_ptr(&late_wakeup_work));
561 instrumentation_end();
563 #endif /* #if defined(CONFIG_NO_HZ_FULL) && (!defined(CONFIG_GENERIC_ENTRY) || !defined(CONFIG_KVM_XFER_TO_GUEST_WORK)) */
565 #ifdef CONFIG_PROVE_RCU
567 * rcu_irq_exit_check_preempt - Validate that scheduling is possible
569 void rcu_irq_exit_check_preempt(void)
571 lockdep_assert_irqs_disabled();
573 RCU_LOCKDEP_WARN(ct_dynticks_nesting() <= 0,
574 "RCU dynticks_nesting counter underflow/zero!");
575 RCU_LOCKDEP_WARN(ct_dynticks_nmi_nesting() !=
577 "Bad RCU dynticks_nmi_nesting counter\n");
578 RCU_LOCKDEP_WARN(rcu_dynticks_curr_cpu_in_eqs(),
579 "RCU in extended quiescent state!");
581 #endif /* #ifdef CONFIG_PROVE_RCU */
583 #ifdef CONFIG_NO_HZ_FULL
585 * __rcu_irq_enter_check_tick - Enable scheduler tick on CPU if RCU needs it.
587 * The scheduler tick is not normally enabled when CPUs enter the kernel
588 * from nohz_full userspace execution. After all, nohz_full userspace
589 * execution is an RCU quiescent state and the time executing in the kernel
590 * is quite short. Except of course when it isn't. And it is not hard to
591 * cause a large system to spend tens of seconds or even minutes looping
592 * in the kernel, which can cause a number of problems, include RCU CPU
595 * Therefore, if a nohz_full CPU fails to report a quiescent state
596 * in a timely manner, the RCU grace-period kthread sets that CPU's
597 * ->rcu_urgent_qs flag with the expectation that the next interrupt or
598 * exception will invoke this function, which will turn on the scheduler
599 * tick, which will enable RCU to detect that CPU's quiescent states,
600 * for example, due to cond_resched() calls in CONFIG_PREEMPT=n kernels.
601 * The tick will be disabled once a quiescent state is reported for
604 * Of course, in carefully tuned systems, there might never be an
605 * interrupt or exception. In that case, the RCU grace-period kthread
606 * will eventually cause one to happen. However, in less carefully
607 * controlled environments, this function allows RCU to get what it
608 * needs without creating otherwise useless interruptions.
610 void __rcu_irq_enter_check_tick(void)
612 struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
614 // If we're here from NMI there's nothing to do.
618 RCU_LOCKDEP_WARN(rcu_dynticks_curr_cpu_in_eqs(),
619 "Illegal rcu_irq_enter_check_tick() from extended quiescent state");
621 if (!tick_nohz_full_cpu(rdp->cpu) ||
622 !READ_ONCE(rdp->rcu_urgent_qs) ||
623 READ_ONCE(rdp->rcu_forced_tick)) {
624 // RCU doesn't need nohz_full help from this CPU, or it is
625 // already getting that help.
629 // We get here only when not in an extended quiescent state and
630 // from interrupts (as opposed to NMIs). Therefore, (1) RCU is
631 // already watching and (2) The fact that we are in an interrupt
632 // handler and that the rcu_node lock is an irq-disabled lock
633 // prevents self-deadlock. So we can safely recheck under the lock.
634 // Note that the nohz_full state currently cannot change.
635 raw_spin_lock_rcu_node(rdp->mynode);
636 if (READ_ONCE(rdp->rcu_urgent_qs) && !rdp->rcu_forced_tick) {
637 // A nohz_full CPU is in the kernel and RCU needs a
638 // quiescent state. Turn on the tick!
639 WRITE_ONCE(rdp->rcu_forced_tick, true);
640 tick_dep_set_cpu(rdp->cpu, TICK_DEP_BIT_RCU);
642 raw_spin_unlock_rcu_node(rdp->mynode);
644 NOKPROBE_SYMBOL(__rcu_irq_enter_check_tick);
645 #endif /* CONFIG_NO_HZ_FULL */
648 * Check to see if any future non-offloaded RCU-related work will need
649 * to be done by the current CPU, even if none need be done immediately,
650 * returning 1 if so. This function is part of the RCU implementation;
651 * it is -not- an exported member of the RCU API. This is used by
652 * the idle-entry code to figure out whether it is safe to disable the
653 * scheduler-clock interrupt.
655 * Just check whether or not this CPU has non-offloaded RCU callbacks
658 int rcu_needs_cpu(void)
660 return !rcu_segcblist_empty(&this_cpu_ptr(&rcu_data)->cblist) &&
661 !rcu_rdp_is_offloaded(this_cpu_ptr(&rcu_data));
665 * If any sort of urgency was applied to the current CPU (for example,
666 * the scheduler-clock interrupt was enabled on a nohz_full CPU) in order
667 * to get to a quiescent state, disable it.
669 static void rcu_disable_urgency_upon_qs(struct rcu_data *rdp)
671 raw_lockdep_assert_held_rcu_node(rdp->mynode);
672 WRITE_ONCE(rdp->rcu_urgent_qs, false);
673 WRITE_ONCE(rdp->rcu_need_heavy_qs, false);
674 if (tick_nohz_full_cpu(rdp->cpu) && rdp->rcu_forced_tick) {
675 tick_dep_clear_cpu(rdp->cpu, TICK_DEP_BIT_RCU);
676 WRITE_ONCE(rdp->rcu_forced_tick, false);
681 * rcu_is_watching - RCU read-side critical sections permitted on current CPU?
683 * Return @true if RCU is watching the running CPU and @false otherwise.
684 * An @true return means that this CPU can safely enter RCU read-side
687 * Although calls to rcu_is_watching() from most parts of the kernel
688 * will return @true, there are important exceptions. For example, if the
689 * current CPU is deep within its idle loop, in kernel entry/exit code,
690 * or offline, rcu_is_watching() will return @false.
692 * Make notrace because it can be called by the internal functions of
693 * ftrace, and making this notrace removes unnecessary recursion calls.
695 notrace bool rcu_is_watching(void)
699 preempt_disable_notrace();
700 ret = !rcu_dynticks_curr_cpu_in_eqs();
701 preempt_enable_notrace();
704 EXPORT_SYMBOL_GPL(rcu_is_watching);
707 * If a holdout task is actually running, request an urgent quiescent
708 * state from its CPU. This is unsynchronized, so migrations can cause
709 * the request to go to the wrong CPU. Which is OK, all that will happen
710 * is that the CPU's next context switch will be a bit slower and next
711 * time around this task will generate another request.
713 void rcu_request_urgent_qs_task(struct task_struct *t)
720 return; /* This task is not running on that CPU. */
721 smp_store_release(per_cpu_ptr(&rcu_data.rcu_urgent_qs, cpu), true);
725 * When trying to report a quiescent state on behalf of some other CPU,
726 * it is our responsibility to check for and handle potential overflow
727 * of the rcu_node ->gp_seq counter with respect to the rcu_data counters.
728 * After all, the CPU might be in deep idle state, and thus executing no
731 static void rcu_gpnum_ovf(struct rcu_node *rnp, struct rcu_data *rdp)
733 raw_lockdep_assert_held_rcu_node(rnp);
734 if (ULONG_CMP_LT(rcu_seq_current(&rdp->gp_seq) + ULONG_MAX / 4,
736 WRITE_ONCE(rdp->gpwrap, true);
737 if (ULONG_CMP_LT(rdp->rcu_iw_gp_seq + ULONG_MAX / 4, rnp->gp_seq))
738 rdp->rcu_iw_gp_seq = rnp->gp_seq + ULONG_MAX / 4;
742 * Snapshot the specified CPU's dynticks counter so that we can later
743 * credit them with an implicit quiescent state. Return 1 if this CPU
744 * is in dynticks idle mode, which is an extended quiescent state.
746 static int dyntick_save_progress_counter(struct rcu_data *rdp)
748 rdp->dynticks_snap = rcu_dynticks_snap(rdp->cpu);
749 if (rcu_dynticks_in_eqs(rdp->dynticks_snap)) {
750 trace_rcu_fqs(rcu_state.name, rdp->gp_seq, rdp->cpu, TPS("dti"));
751 rcu_gpnum_ovf(rdp->mynode, rdp);
758 * Return true if the specified CPU has passed through a quiescent
759 * state by virtue of being in or having passed through an dynticks
760 * idle state since the last call to dyntick_save_progress_counter()
761 * for this same CPU, or by virtue of having been offline.
763 static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
766 struct rcu_node *rnp = rdp->mynode;
769 * If the CPU passed through or entered a dynticks idle phase with
770 * no active irq/NMI handlers, then we can safely pretend that the CPU
771 * already acknowledged the request to pass through a quiescent
772 * state. Either way, that CPU cannot possibly be in an RCU
773 * read-side critical section that started before the beginning
774 * of the current RCU grace period.
776 if (rcu_dynticks_in_eqs_since(rdp, rdp->dynticks_snap)) {
777 trace_rcu_fqs(rcu_state.name, rdp->gp_seq, rdp->cpu, TPS("dti"));
778 rcu_gpnum_ovf(rnp, rdp);
783 * Complain if a CPU that is considered to be offline from RCU's
784 * perspective has not yet reported a quiescent state. After all,
785 * the offline CPU should have reported a quiescent state during
786 * the CPU-offline process, or, failing that, by rcu_gp_init()
787 * if it ran concurrently with either the CPU going offline or the
788 * last task on a leaf rcu_node structure exiting its RCU read-side
789 * critical section while all CPUs corresponding to that structure
790 * are offline. This added warning detects bugs in any of these
793 * The rcu_node structure's ->lock is held here, which excludes
794 * the relevant portions the CPU-hotplug code, the grace-period
795 * initialization code, and the rcu_read_unlock() code paths.
797 * For more detail, please refer to the "Hotplug CPU" section
798 * of RCU's Requirements documentation.
800 if (WARN_ON_ONCE(!rcu_rdp_cpu_online(rdp))) {
801 struct rcu_node *rnp1;
803 pr_info("%s: grp: %d-%d level: %d ->gp_seq %ld ->completedqs %ld\n",
804 __func__, rnp->grplo, rnp->grphi, rnp->level,
805 (long)rnp->gp_seq, (long)rnp->completedqs);
806 for (rnp1 = rnp; rnp1; rnp1 = rnp1->parent)
807 pr_info("%s: %d:%d ->qsmask %#lx ->qsmaskinit %#lx ->qsmaskinitnext %#lx ->rcu_gp_init_mask %#lx\n",
808 __func__, rnp1->grplo, rnp1->grphi, rnp1->qsmask, rnp1->qsmaskinit, rnp1->qsmaskinitnext, rnp1->rcu_gp_init_mask);
809 pr_info("%s %d: %c online: %ld(%d) offline: %ld(%d)\n",
810 __func__, rdp->cpu, ".o"[rcu_rdp_cpu_online(rdp)],
811 (long)rdp->rcu_onl_gp_seq, rdp->rcu_onl_gp_flags,
812 (long)rdp->rcu_ofl_gp_seq, rdp->rcu_ofl_gp_flags);
813 return 1; /* Break things loose after complaining. */
817 * A CPU running for an extended time within the kernel can
818 * delay RCU grace periods: (1) At age jiffies_to_sched_qs,
819 * set .rcu_urgent_qs, (2) At age 2*jiffies_to_sched_qs, set
820 * both .rcu_need_heavy_qs and .rcu_urgent_qs. Note that the
821 * unsynchronized assignments to the per-CPU rcu_need_heavy_qs
822 * variable are safe because the assignments are repeated if this
823 * CPU failed to pass through a quiescent state. This code
824 * also checks .jiffies_resched in case jiffies_to_sched_qs
827 jtsq = READ_ONCE(jiffies_to_sched_qs);
828 if (!READ_ONCE(rdp->rcu_need_heavy_qs) &&
829 (time_after(jiffies, rcu_state.gp_start + jtsq * 2) ||
830 time_after(jiffies, rcu_state.jiffies_resched) ||
832 WRITE_ONCE(rdp->rcu_need_heavy_qs, true);
833 /* Store rcu_need_heavy_qs before rcu_urgent_qs. */
834 smp_store_release(&rdp->rcu_urgent_qs, true);
835 } else if (time_after(jiffies, rcu_state.gp_start + jtsq)) {
836 WRITE_ONCE(rdp->rcu_urgent_qs, true);
840 * NO_HZ_FULL CPUs can run in-kernel without rcu_sched_clock_irq!
841 * The above code handles this, but only for straight cond_resched().
842 * And some in-kernel loops check need_resched() before calling
843 * cond_resched(), which defeats the above code for CPUs that are
844 * running in-kernel with scheduling-clock interrupts disabled.
845 * So hit them over the head with the resched_cpu() hammer!
847 if (tick_nohz_full_cpu(rdp->cpu) &&
848 (time_after(jiffies, READ_ONCE(rdp->last_fqs_resched) + jtsq * 3) ||
850 WRITE_ONCE(rdp->rcu_urgent_qs, true);
851 resched_cpu(rdp->cpu);
852 WRITE_ONCE(rdp->last_fqs_resched, jiffies);
856 * If more than halfway to RCU CPU stall-warning time, invoke
857 * resched_cpu() more frequently to try to loosen things up a bit.
858 * Also check to see if the CPU is getting hammered with interrupts,
859 * but only once per grace period, just to keep the IPIs down to
862 if (time_after(jiffies, rcu_state.jiffies_resched)) {
863 if (time_after(jiffies,
864 READ_ONCE(rdp->last_fqs_resched) + jtsq)) {
865 resched_cpu(rdp->cpu);
866 WRITE_ONCE(rdp->last_fqs_resched, jiffies);
868 if (IS_ENABLED(CONFIG_IRQ_WORK) &&
869 !rdp->rcu_iw_pending && rdp->rcu_iw_gp_seq != rnp->gp_seq &&
870 (rnp->ffmask & rdp->grpmask)) {
871 rdp->rcu_iw_pending = true;
872 rdp->rcu_iw_gp_seq = rnp->gp_seq;
873 irq_work_queue_on(&rdp->rcu_iw, rdp->cpu);
876 if (rcu_cpu_stall_cputime && rdp->snap_record.gp_seq != rdp->gp_seq) {
878 struct rcu_snap_record *rsrp;
879 struct kernel_cpustat *kcsp;
881 kcsp = &kcpustat_cpu(cpu);
883 rsrp = &rdp->snap_record;
884 rsrp->cputime_irq = kcpustat_field(kcsp, CPUTIME_IRQ, cpu);
885 rsrp->cputime_softirq = kcpustat_field(kcsp, CPUTIME_SOFTIRQ, cpu);
886 rsrp->cputime_system = kcpustat_field(kcsp, CPUTIME_SYSTEM, cpu);
887 rsrp->nr_hardirqs = kstat_cpu_irqs_sum(rdp->cpu);
888 rsrp->nr_softirqs = kstat_cpu_softirqs_sum(rdp->cpu);
889 rsrp->nr_csw = nr_context_switches_cpu(rdp->cpu);
890 rsrp->jiffies = jiffies;
891 rsrp->gp_seq = rdp->gp_seq;
898 /* Trace-event wrapper function for trace_rcu_future_grace_period. */
899 static void trace_rcu_this_gp(struct rcu_node *rnp, struct rcu_data *rdp,
900 unsigned long gp_seq_req, const char *s)
902 trace_rcu_future_grace_period(rcu_state.name, READ_ONCE(rnp->gp_seq),
903 gp_seq_req, rnp->level,
904 rnp->grplo, rnp->grphi, s);
908 * rcu_start_this_gp - Request the start of a particular grace period
909 * @rnp_start: The leaf node of the CPU from which to start.
910 * @rdp: The rcu_data corresponding to the CPU from which to start.
911 * @gp_seq_req: The gp_seq of the grace period to start.
913 * Start the specified grace period, as needed to handle newly arrived
914 * callbacks. The required future grace periods are recorded in each
915 * rcu_node structure's ->gp_seq_needed field. Returns true if there
916 * is reason to awaken the grace-period kthread.
918 * The caller must hold the specified rcu_node structure's ->lock, which
919 * is why the caller is responsible for waking the grace-period kthread.
921 * Returns true if the GP thread needs to be awakened else false.
923 static bool rcu_start_this_gp(struct rcu_node *rnp_start, struct rcu_data *rdp,
924 unsigned long gp_seq_req)
927 struct rcu_node *rnp;
930 * Use funnel locking to either acquire the root rcu_node
931 * structure's lock or bail out if the need for this grace period
932 * has already been recorded -- or if that grace period has in
933 * fact already started. If there is already a grace period in
934 * progress in a non-leaf node, no recording is needed because the
935 * end of the grace period will scan the leaf rcu_node structures.
936 * Note that rnp_start->lock must not be released.
938 raw_lockdep_assert_held_rcu_node(rnp_start);
939 trace_rcu_this_gp(rnp_start, rdp, gp_seq_req, TPS("Startleaf"));
940 for (rnp = rnp_start; 1; rnp = rnp->parent) {
941 if (rnp != rnp_start)
942 raw_spin_lock_rcu_node(rnp);
943 if (ULONG_CMP_GE(rnp->gp_seq_needed, gp_seq_req) ||
944 rcu_seq_started(&rnp->gp_seq, gp_seq_req) ||
946 rcu_seq_state(rcu_seq_current(&rnp->gp_seq)))) {
947 trace_rcu_this_gp(rnp, rdp, gp_seq_req,
951 WRITE_ONCE(rnp->gp_seq_needed, gp_seq_req);
952 if (rcu_seq_state(rcu_seq_current(&rnp->gp_seq))) {
954 * We just marked the leaf or internal node, and a
955 * grace period is in progress, which means that
956 * rcu_gp_cleanup() will see the marking. Bail to
959 trace_rcu_this_gp(rnp_start, rdp, gp_seq_req,
963 if (rnp != rnp_start && rnp->parent != NULL)
964 raw_spin_unlock_rcu_node(rnp);
966 break; /* At root, and perhaps also leaf. */
969 /* If GP already in progress, just leave, otherwise start one. */
970 if (rcu_gp_in_progress()) {
971 trace_rcu_this_gp(rnp, rdp, gp_seq_req, TPS("Startedleafroot"));
974 trace_rcu_this_gp(rnp, rdp, gp_seq_req, TPS("Startedroot"));
975 WRITE_ONCE(rcu_state.gp_flags, rcu_state.gp_flags | RCU_GP_FLAG_INIT);
976 WRITE_ONCE(rcu_state.gp_req_activity, jiffies);
977 if (!READ_ONCE(rcu_state.gp_kthread)) {
978 trace_rcu_this_gp(rnp, rdp, gp_seq_req, TPS("NoGPkthread"));
981 trace_rcu_grace_period(rcu_state.name, data_race(rcu_state.gp_seq), TPS("newreq"));
982 ret = true; /* Caller must wake GP kthread. */
984 /* Push furthest requested GP to leaf node and rcu_data structure. */
985 if (ULONG_CMP_LT(gp_seq_req, rnp->gp_seq_needed)) {
986 WRITE_ONCE(rnp_start->gp_seq_needed, rnp->gp_seq_needed);
987 WRITE_ONCE(rdp->gp_seq_needed, rnp->gp_seq_needed);
989 if (rnp != rnp_start)
990 raw_spin_unlock_rcu_node(rnp);
995 * Clean up any old requests for the just-ended grace period. Also return
996 * whether any additional grace periods have been requested.
998 static bool rcu_future_gp_cleanup(struct rcu_node *rnp)
1001 struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
1003 needmore = ULONG_CMP_LT(rnp->gp_seq, rnp->gp_seq_needed);
1005 rnp->gp_seq_needed = rnp->gp_seq; /* Avoid counter wrap. */
1006 trace_rcu_this_gp(rnp, rdp, rnp->gp_seq,
1007 needmore ? TPS("CleanupMore") : TPS("Cleanup"));
1012 * Awaken the grace-period kthread. Don't do a self-awaken (unless in an
1013 * interrupt or softirq handler, in which case we just might immediately
1014 * sleep upon return, resulting in a grace-period hang), and don't bother
1015 * awakening when there is nothing for the grace-period kthread to do
1016 * (as in several CPUs raced to awaken, we lost), and finally don't try
1017 * to awaken a kthread that has not yet been created. If all those checks
1018 * are passed, track some debug information and awaken.
1020 * So why do the self-wakeup when in an interrupt or softirq handler
1021 * in the grace-period kthread's context? Because the kthread might have
1022 * been interrupted just as it was going to sleep, and just after the final
1023 * pre-sleep check of the awaken condition. In this case, a wakeup really
1024 * is required, and is therefore supplied.
1026 static void rcu_gp_kthread_wake(void)
1028 struct task_struct *t = READ_ONCE(rcu_state.gp_kthread);
1030 if ((current == t && !in_hardirq() && !in_serving_softirq()) ||
1031 !READ_ONCE(rcu_state.gp_flags) || !t)
1033 WRITE_ONCE(rcu_state.gp_wake_time, jiffies);
1034 WRITE_ONCE(rcu_state.gp_wake_seq, READ_ONCE(rcu_state.gp_seq));
1035 swake_up_one(&rcu_state.gp_wq);
1039 * If there is room, assign a ->gp_seq number to any callbacks on this
1040 * CPU that have not already been assigned. Also accelerate any callbacks
1041 * that were previously assigned a ->gp_seq number that has since proven
1042 * to be too conservative, which can happen if callbacks get assigned a
1043 * ->gp_seq number while RCU is idle, but with reference to a non-root
1044 * rcu_node structure. This function is idempotent, so it does not hurt
1045 * to call it repeatedly. Returns an flag saying that we should awaken
1046 * the RCU grace-period kthread.
1048 * The caller must hold rnp->lock with interrupts disabled.
1050 static bool rcu_accelerate_cbs(struct rcu_node *rnp, struct rcu_data *rdp)
1052 unsigned long gp_seq_req;
1055 rcu_lockdep_assert_cblist_protected(rdp);
1056 raw_lockdep_assert_held_rcu_node(rnp);
1058 /* If no pending (not yet ready to invoke) callbacks, nothing to do. */
1059 if (!rcu_segcblist_pend_cbs(&rdp->cblist))
1062 trace_rcu_segcb_stats(&rdp->cblist, TPS("SegCbPreAcc"));
1065 * Callbacks are often registered with incomplete grace-period
1066 * information. Something about the fact that getting exact
1067 * information requires acquiring a global lock... RCU therefore
1068 * makes a conservative estimate of the grace period number at which
1069 * a given callback will become ready to invoke. The following
1070 * code checks this estimate and improves it when possible, thus
1071 * accelerating callback invocation to an earlier grace-period
1074 gp_seq_req = rcu_seq_snap(&rcu_state.gp_seq);
1075 if (rcu_segcblist_accelerate(&rdp->cblist, gp_seq_req))
1076 ret = rcu_start_this_gp(rnp, rdp, gp_seq_req);
1078 /* Trace depending on how much we were able to accelerate. */
1079 if (rcu_segcblist_restempty(&rdp->cblist, RCU_WAIT_TAIL))
1080 trace_rcu_grace_period(rcu_state.name, gp_seq_req, TPS("AccWaitCB"));
1082 trace_rcu_grace_period(rcu_state.name, gp_seq_req, TPS("AccReadyCB"));
1084 trace_rcu_segcb_stats(&rdp->cblist, TPS("SegCbPostAcc"));
1090 * Similar to rcu_accelerate_cbs(), but does not require that the leaf
1091 * rcu_node structure's ->lock be held. It consults the cached value
1092 * of ->gp_seq_needed in the rcu_data structure, and if that indicates
1093 * that a new grace-period request be made, invokes rcu_accelerate_cbs()
1094 * while holding the leaf rcu_node structure's ->lock.
1096 static void rcu_accelerate_cbs_unlocked(struct rcu_node *rnp,
1097 struct rcu_data *rdp)
1102 rcu_lockdep_assert_cblist_protected(rdp);
1103 c = rcu_seq_snap(&rcu_state.gp_seq);
1104 if (!READ_ONCE(rdp->gpwrap) && ULONG_CMP_GE(rdp->gp_seq_needed, c)) {
1105 /* Old request still live, so mark recent callbacks. */
1106 (void)rcu_segcblist_accelerate(&rdp->cblist, c);
1109 raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */
1110 needwake = rcu_accelerate_cbs(rnp, rdp);
1111 raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */
1113 rcu_gp_kthread_wake();
1117 * Move any callbacks whose grace period has completed to the
1118 * RCU_DONE_TAIL sublist, then compact the remaining sublists and
1119 * assign ->gp_seq numbers to any callbacks in the RCU_NEXT_TAIL
1120 * sublist. This function is idempotent, so it does not hurt to
1121 * invoke it repeatedly. As long as it is not invoked -too- often...
1122 * Returns true if the RCU grace-period kthread needs to be awakened.
1124 * The caller must hold rnp->lock with interrupts disabled.
1126 static bool rcu_advance_cbs(struct rcu_node *rnp, struct rcu_data *rdp)
1128 rcu_lockdep_assert_cblist_protected(rdp);
1129 raw_lockdep_assert_held_rcu_node(rnp);
1131 /* If no pending (not yet ready to invoke) callbacks, nothing to do. */
1132 if (!rcu_segcblist_pend_cbs(&rdp->cblist))
1136 * Find all callbacks whose ->gp_seq numbers indicate that they
1137 * are ready to invoke, and put them into the RCU_DONE_TAIL sublist.
1139 rcu_segcblist_advance(&rdp->cblist, rnp->gp_seq);
1141 /* Classify any remaining callbacks. */
1142 return rcu_accelerate_cbs(rnp, rdp);
1146 * Move and classify callbacks, but only if doing so won't require
1147 * that the RCU grace-period kthread be awakened.
1149 static void __maybe_unused rcu_advance_cbs_nowake(struct rcu_node *rnp,
1150 struct rcu_data *rdp)
1152 rcu_lockdep_assert_cblist_protected(rdp);
1153 if (!rcu_seq_state(rcu_seq_current(&rnp->gp_seq)) || !raw_spin_trylock_rcu_node(rnp))
1155 // The grace period cannot end while we hold the rcu_node lock.
1156 if (rcu_seq_state(rcu_seq_current(&rnp->gp_seq)))
1157 WARN_ON_ONCE(rcu_advance_cbs(rnp, rdp));
1158 raw_spin_unlock_rcu_node(rnp);
1162 * In CONFIG_RCU_STRICT_GRACE_PERIOD=y kernels, attempt to generate a
1163 * quiescent state. This is intended to be invoked when the CPU notices
1164 * a new grace period.
1166 static void rcu_strict_gp_check_qs(void)
1168 if (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD)) {
1175 * Update CPU-local rcu_data state to record the beginnings and ends of
1176 * grace periods. The caller must hold the ->lock of the leaf rcu_node
1177 * structure corresponding to the current CPU, and must have irqs disabled.
1178 * Returns true if the grace-period kthread needs to be awakened.
1180 static bool __note_gp_changes(struct rcu_node *rnp, struct rcu_data *rdp)
1184 const bool offloaded = rcu_rdp_is_offloaded(rdp);
1186 raw_lockdep_assert_held_rcu_node(rnp);
1188 if (rdp->gp_seq == rnp->gp_seq)
1189 return false; /* Nothing to do. */
1191 /* Handle the ends of any preceding grace periods first. */
1192 if (rcu_seq_completed_gp(rdp->gp_seq, rnp->gp_seq) ||
1193 unlikely(READ_ONCE(rdp->gpwrap))) {
1195 ret = rcu_advance_cbs(rnp, rdp); /* Advance CBs. */
1196 rdp->core_needs_qs = false;
1197 trace_rcu_grace_period(rcu_state.name, rdp->gp_seq, TPS("cpuend"));
1200 ret = rcu_accelerate_cbs(rnp, rdp); /* Recent CBs. */
1201 if (rdp->core_needs_qs)
1202 rdp->core_needs_qs = !!(rnp->qsmask & rdp->grpmask);
1205 /* Now handle the beginnings of any new-to-this-CPU grace periods. */
1206 if (rcu_seq_new_gp(rdp->gp_seq, rnp->gp_seq) ||
1207 unlikely(READ_ONCE(rdp->gpwrap))) {
1209 * If the current grace period is waiting for this CPU,
1210 * set up to detect a quiescent state, otherwise don't
1211 * go looking for one.
1213 trace_rcu_grace_period(rcu_state.name, rnp->gp_seq, TPS("cpustart"));
1214 need_qs = !!(rnp->qsmask & rdp->grpmask);
1215 rdp->cpu_no_qs.b.norm = need_qs;
1216 rdp->core_needs_qs = need_qs;
1217 zero_cpu_stall_ticks(rdp);
1219 rdp->gp_seq = rnp->gp_seq; /* Remember new grace-period state. */
1220 if (ULONG_CMP_LT(rdp->gp_seq_needed, rnp->gp_seq_needed) || rdp->gpwrap)
1221 WRITE_ONCE(rdp->gp_seq_needed, rnp->gp_seq_needed);
1222 if (IS_ENABLED(CONFIG_PROVE_RCU) && READ_ONCE(rdp->gpwrap))
1223 WRITE_ONCE(rdp->last_sched_clock, jiffies);
1224 WRITE_ONCE(rdp->gpwrap, false);
1225 rcu_gpnum_ovf(rnp, rdp);
1229 static void note_gp_changes(struct rcu_data *rdp)
1231 unsigned long flags;
1233 struct rcu_node *rnp;
1235 local_irq_save(flags);
1237 if ((rdp->gp_seq == rcu_seq_current(&rnp->gp_seq) &&
1238 !unlikely(READ_ONCE(rdp->gpwrap))) || /* w/out lock. */
1239 !raw_spin_trylock_rcu_node(rnp)) { /* irqs already off, so later. */
1240 local_irq_restore(flags);
1243 needwake = __note_gp_changes(rnp, rdp);
1244 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1245 rcu_strict_gp_check_qs();
1247 rcu_gp_kthread_wake();
1250 static atomic_t *rcu_gp_slow_suppress;
1252 /* Register a counter to suppress debugging grace-period delays. */
1253 void rcu_gp_slow_register(atomic_t *rgssp)
1255 WARN_ON_ONCE(rcu_gp_slow_suppress);
1257 WRITE_ONCE(rcu_gp_slow_suppress, rgssp);
1259 EXPORT_SYMBOL_GPL(rcu_gp_slow_register);
1261 /* Unregister a counter, with NULL for not caring which. */
1262 void rcu_gp_slow_unregister(atomic_t *rgssp)
1264 WARN_ON_ONCE(rgssp && rgssp != rcu_gp_slow_suppress && rcu_gp_slow_suppress != NULL);
1266 WRITE_ONCE(rcu_gp_slow_suppress, NULL);
1268 EXPORT_SYMBOL_GPL(rcu_gp_slow_unregister);
1270 static bool rcu_gp_slow_is_suppressed(void)
1272 atomic_t *rgssp = READ_ONCE(rcu_gp_slow_suppress);
1274 return rgssp && atomic_read(rgssp);
1277 static void rcu_gp_slow(int delay)
1279 if (!rcu_gp_slow_is_suppressed() && delay > 0 &&
1280 !(rcu_seq_ctr(rcu_state.gp_seq) % (rcu_num_nodes * PER_RCU_NODE_PERIOD * delay)))
1281 schedule_timeout_idle(delay);
1284 static unsigned long sleep_duration;
1286 /* Allow rcutorture to stall the grace-period kthread. */
1287 void rcu_gp_set_torture_wait(int duration)
1289 if (IS_ENABLED(CONFIG_RCU_TORTURE_TEST) && duration > 0)
1290 WRITE_ONCE(sleep_duration, duration);
1292 EXPORT_SYMBOL_GPL(rcu_gp_set_torture_wait);
1294 /* Actually implement the aforementioned wait. */
1295 static void rcu_gp_torture_wait(void)
1297 unsigned long duration;
1299 if (!IS_ENABLED(CONFIG_RCU_TORTURE_TEST))
1301 duration = xchg(&sleep_duration, 0UL);
1303 pr_alert("%s: Waiting %lu jiffies\n", __func__, duration);
1304 schedule_timeout_idle(duration);
1305 pr_alert("%s: Wait complete\n", __func__);
1310 * Handler for on_each_cpu() to invoke the target CPU's RCU core
1313 static void rcu_strict_gp_boundary(void *unused)
1318 // Make the polled API aware of the beginning of a grace period.
1319 static void rcu_poll_gp_seq_start(unsigned long *snap)
1321 struct rcu_node *rnp = rcu_get_root();
1323 if (rcu_scheduler_active != RCU_SCHEDULER_INACTIVE)
1324 raw_lockdep_assert_held_rcu_node(rnp);
1326 // If RCU was idle, note beginning of GP.
1327 if (!rcu_seq_state(rcu_state.gp_seq_polled))
1328 rcu_seq_start(&rcu_state.gp_seq_polled);
1330 // Either way, record current state.
1331 *snap = rcu_state.gp_seq_polled;
1334 // Make the polled API aware of the end of a grace period.
1335 static void rcu_poll_gp_seq_end(unsigned long *snap)
1337 struct rcu_node *rnp = rcu_get_root();
1339 if (rcu_scheduler_active != RCU_SCHEDULER_INACTIVE)
1340 raw_lockdep_assert_held_rcu_node(rnp);
1342 // If the previously noted GP is still in effect, record the
1343 // end of that GP. Either way, zero counter to avoid counter-wrap
1345 if (*snap && *snap == rcu_state.gp_seq_polled) {
1346 rcu_seq_end(&rcu_state.gp_seq_polled);
1347 rcu_state.gp_seq_polled_snap = 0;
1348 rcu_state.gp_seq_polled_exp_snap = 0;
1354 // Make the polled API aware of the beginning of a grace period, but
1355 // where caller does not hold the root rcu_node structure's lock.
1356 static void rcu_poll_gp_seq_start_unlocked(unsigned long *snap)
1358 unsigned long flags;
1359 struct rcu_node *rnp = rcu_get_root();
1361 if (rcu_init_invoked()) {
1362 if (rcu_scheduler_active != RCU_SCHEDULER_INACTIVE)
1363 lockdep_assert_irqs_enabled();
1364 raw_spin_lock_irqsave_rcu_node(rnp, flags);
1366 rcu_poll_gp_seq_start(snap);
1367 if (rcu_init_invoked())
1368 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1371 // Make the polled API aware of the end of a grace period, but where
1372 // caller does not hold the root rcu_node structure's lock.
1373 static void rcu_poll_gp_seq_end_unlocked(unsigned long *snap)
1375 unsigned long flags;
1376 struct rcu_node *rnp = rcu_get_root();
1378 if (rcu_init_invoked()) {
1379 if (rcu_scheduler_active != RCU_SCHEDULER_INACTIVE)
1380 lockdep_assert_irqs_enabled();
1381 raw_spin_lock_irqsave_rcu_node(rnp, flags);
1383 rcu_poll_gp_seq_end(snap);
1384 if (rcu_init_invoked())
1385 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1389 * Initialize a new grace period. Return false if no grace period required.
1391 static noinline_for_stack bool rcu_gp_init(void)
1393 unsigned long flags;
1394 unsigned long oldmask;
1396 struct rcu_data *rdp;
1397 struct rcu_node *rnp = rcu_get_root();
1399 WRITE_ONCE(rcu_state.gp_activity, jiffies);
1400 raw_spin_lock_irq_rcu_node(rnp);
1401 if (!READ_ONCE(rcu_state.gp_flags)) {
1402 /* Spurious wakeup, tell caller to go back to sleep. */
1403 raw_spin_unlock_irq_rcu_node(rnp);
1406 WRITE_ONCE(rcu_state.gp_flags, 0); /* Clear all flags: New GP. */
1408 if (WARN_ON_ONCE(rcu_gp_in_progress())) {
1410 * Grace period already in progress, don't start another.
1411 * Not supposed to be able to happen.
1413 raw_spin_unlock_irq_rcu_node(rnp);
1417 /* Advance to a new grace period and initialize state. */
1418 record_gp_stall_check_time();
1419 /* Record GP times before starting GP, hence rcu_seq_start(). */
1420 rcu_seq_start(&rcu_state.gp_seq);
1421 ASSERT_EXCLUSIVE_WRITER(rcu_state.gp_seq);
1422 trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq, TPS("start"));
1423 rcu_poll_gp_seq_start(&rcu_state.gp_seq_polled_snap);
1424 raw_spin_unlock_irq_rcu_node(rnp);
1427 * Apply per-leaf buffered online and offline operations to
1428 * the rcu_node tree. Note that this new grace period need not
1429 * wait for subsequent online CPUs, and that RCU hooks in the CPU
1430 * offlining path, when combined with checks in this function,
1431 * will handle CPUs that are currently going offline or that will
1432 * go offline later. Please also refer to "Hotplug CPU" section
1433 * of RCU's Requirements documentation.
1435 WRITE_ONCE(rcu_state.gp_state, RCU_GP_ONOFF);
1436 /* Exclude CPU hotplug operations. */
1437 rcu_for_each_leaf_node(rnp) {
1438 local_irq_save(flags);
1439 arch_spin_lock(&rcu_state.ofl_lock);
1440 raw_spin_lock_rcu_node(rnp);
1441 if (rnp->qsmaskinit == rnp->qsmaskinitnext &&
1442 !rnp->wait_blkd_tasks) {
1443 /* Nothing to do on this leaf rcu_node structure. */
1444 raw_spin_unlock_rcu_node(rnp);
1445 arch_spin_unlock(&rcu_state.ofl_lock);
1446 local_irq_restore(flags);
1450 /* Record old state, apply changes to ->qsmaskinit field. */
1451 oldmask = rnp->qsmaskinit;
1452 rnp->qsmaskinit = rnp->qsmaskinitnext;
1454 /* If zero-ness of ->qsmaskinit changed, propagate up tree. */
1455 if (!oldmask != !rnp->qsmaskinit) {
1456 if (!oldmask) { /* First online CPU for rcu_node. */
1457 if (!rnp->wait_blkd_tasks) /* Ever offline? */
1458 rcu_init_new_rnp(rnp);
1459 } else if (rcu_preempt_has_tasks(rnp)) {
1460 rnp->wait_blkd_tasks = true; /* blocked tasks */
1461 } else { /* Last offline CPU and can propagate. */
1462 rcu_cleanup_dead_rnp(rnp);
1467 * If all waited-on tasks from prior grace period are
1468 * done, and if all this rcu_node structure's CPUs are
1469 * still offline, propagate up the rcu_node tree and
1470 * clear ->wait_blkd_tasks. Otherwise, if one of this
1471 * rcu_node structure's CPUs has since come back online,
1472 * simply clear ->wait_blkd_tasks.
1474 if (rnp->wait_blkd_tasks &&
1475 (!rcu_preempt_has_tasks(rnp) || rnp->qsmaskinit)) {
1476 rnp->wait_blkd_tasks = false;
1477 if (!rnp->qsmaskinit)
1478 rcu_cleanup_dead_rnp(rnp);
1481 raw_spin_unlock_rcu_node(rnp);
1482 arch_spin_unlock(&rcu_state.ofl_lock);
1483 local_irq_restore(flags);
1485 rcu_gp_slow(gp_preinit_delay); /* Races with CPU hotplug. */
1488 * Set the quiescent-state-needed bits in all the rcu_node
1489 * structures for all currently online CPUs in breadth-first
1490 * order, starting from the root rcu_node structure, relying on the
1491 * layout of the tree within the rcu_state.node[] array. Note that
1492 * other CPUs will access only the leaves of the hierarchy, thus
1493 * seeing that no grace period is in progress, at least until the
1494 * corresponding leaf node has been initialized.
1496 * The grace period cannot complete until the initialization
1497 * process finishes, because this kthread handles both.
1499 WRITE_ONCE(rcu_state.gp_state, RCU_GP_INIT);
1500 rcu_for_each_node_breadth_first(rnp) {
1501 rcu_gp_slow(gp_init_delay);
1502 raw_spin_lock_irqsave_rcu_node(rnp, flags);
1503 rdp = this_cpu_ptr(&rcu_data);
1504 rcu_preempt_check_blocked_tasks(rnp);
1505 rnp->qsmask = rnp->qsmaskinit;
1506 WRITE_ONCE(rnp->gp_seq, rcu_state.gp_seq);
1507 if (rnp == rdp->mynode)
1508 (void)__note_gp_changes(rnp, rdp);
1509 rcu_preempt_boost_start_gp(rnp);
1510 trace_rcu_grace_period_init(rcu_state.name, rnp->gp_seq,
1511 rnp->level, rnp->grplo,
1512 rnp->grphi, rnp->qsmask);
1513 /* Quiescent states for tasks on any now-offline CPUs. */
1514 mask = rnp->qsmask & ~rnp->qsmaskinitnext;
1515 rnp->rcu_gp_init_mask = mask;
1516 if ((mask || rnp->wait_blkd_tasks) && rcu_is_leaf_node(rnp))
1517 rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags);
1519 raw_spin_unlock_irq_rcu_node(rnp);
1520 cond_resched_tasks_rcu_qs();
1521 WRITE_ONCE(rcu_state.gp_activity, jiffies);
1524 // If strict, make all CPUs aware of new grace period.
1525 if (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD))
1526 on_each_cpu(rcu_strict_gp_boundary, NULL, 0);
1532 * Helper function for swait_event_idle_exclusive() wakeup at force-quiescent-state
1535 static bool rcu_gp_fqs_check_wake(int *gfp)
1537 struct rcu_node *rnp = rcu_get_root();
1539 // If under overload conditions, force an immediate FQS scan.
1540 if (*gfp & RCU_GP_FLAG_OVLD)
1543 // Someone like call_rcu() requested a force-quiescent-state scan.
1544 *gfp = READ_ONCE(rcu_state.gp_flags);
1545 if (*gfp & RCU_GP_FLAG_FQS)
1548 // The current grace period has completed.
1549 if (!READ_ONCE(rnp->qsmask) && !rcu_preempt_blocked_readers_cgp(rnp))
1556 * Do one round of quiescent-state forcing.
1558 static void rcu_gp_fqs(bool first_time)
1560 int nr_fqs = READ_ONCE(rcu_state.nr_fqs_jiffies_stall);
1561 struct rcu_node *rnp = rcu_get_root();
1563 WRITE_ONCE(rcu_state.gp_activity, jiffies);
1564 WRITE_ONCE(rcu_state.n_force_qs, rcu_state.n_force_qs + 1);
1566 WARN_ON_ONCE(nr_fqs > 3);
1567 /* Only countdown nr_fqs for stall purposes if jiffies moves. */
1570 WRITE_ONCE(rcu_state.jiffies_stall,
1571 jiffies + rcu_jiffies_till_stall_check());
1573 WRITE_ONCE(rcu_state.nr_fqs_jiffies_stall, --nr_fqs);
1577 /* Collect dyntick-idle snapshots. */
1578 force_qs_rnp(dyntick_save_progress_counter);
1580 /* Handle dyntick-idle and offline CPUs. */
1581 force_qs_rnp(rcu_implicit_dynticks_qs);
1583 /* Clear flag to prevent immediate re-entry. */
1584 if (READ_ONCE(rcu_state.gp_flags) & RCU_GP_FLAG_FQS) {
1585 raw_spin_lock_irq_rcu_node(rnp);
1586 WRITE_ONCE(rcu_state.gp_flags,
1587 READ_ONCE(rcu_state.gp_flags) & ~RCU_GP_FLAG_FQS);
1588 raw_spin_unlock_irq_rcu_node(rnp);
1593 * Loop doing repeated quiescent-state forcing until the grace period ends.
1595 static noinline_for_stack void rcu_gp_fqs_loop(void)
1597 bool first_gp_fqs = true;
1601 struct rcu_node *rnp = rcu_get_root();
1603 j = READ_ONCE(jiffies_till_first_fqs);
1604 if (rcu_state.cbovld)
1605 gf = RCU_GP_FLAG_OVLD;
1608 if (rcu_state.cbovld) {
1613 if (!ret || time_before(jiffies + j, rcu_state.jiffies_force_qs)) {
1614 WRITE_ONCE(rcu_state.jiffies_force_qs, jiffies + j);
1616 * jiffies_force_qs before RCU_GP_WAIT_FQS state
1617 * update; required for stall checks.
1620 WRITE_ONCE(rcu_state.jiffies_kick_kthreads,
1621 jiffies + (j ? 3 * j : 2));
1623 trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq,
1625 WRITE_ONCE(rcu_state.gp_state, RCU_GP_WAIT_FQS);
1626 (void)swait_event_idle_timeout_exclusive(rcu_state.gp_wq,
1627 rcu_gp_fqs_check_wake(&gf), j);
1628 rcu_gp_torture_wait();
1629 WRITE_ONCE(rcu_state.gp_state, RCU_GP_DOING_FQS);
1630 /* Locking provides needed memory barriers. */
1632 * Exit the loop if the root rcu_node structure indicates that the grace period
1633 * has ended, leave the loop. The rcu_preempt_blocked_readers_cgp(rnp) check
1634 * is required only for single-node rcu_node trees because readers blocking
1635 * the current grace period are queued only on leaf rcu_node structures.
1636 * For multi-node trees, checking the root node's ->qsmask suffices, because a
1637 * given root node's ->qsmask bit is cleared only when all CPUs and tasks from
1638 * the corresponding leaf nodes have passed through their quiescent state.
1640 if (!READ_ONCE(rnp->qsmask) &&
1641 !rcu_preempt_blocked_readers_cgp(rnp))
1643 /* If time for quiescent-state forcing, do it. */
1644 if (!time_after(rcu_state.jiffies_force_qs, jiffies) ||
1645 (gf & (RCU_GP_FLAG_FQS | RCU_GP_FLAG_OVLD))) {
1646 trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq,
1648 rcu_gp_fqs(first_gp_fqs);
1651 first_gp_fqs = false;
1652 gf = rcu_state.cbovld ? RCU_GP_FLAG_OVLD : 0;
1654 trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq,
1656 cond_resched_tasks_rcu_qs();
1657 WRITE_ONCE(rcu_state.gp_activity, jiffies);
1658 ret = 0; /* Force full wait till next FQS. */
1659 j = READ_ONCE(jiffies_till_next_fqs);
1661 /* Deal with stray signal. */
1662 cond_resched_tasks_rcu_qs();
1663 WRITE_ONCE(rcu_state.gp_activity, jiffies);
1664 WARN_ON(signal_pending(current));
1665 trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq,
1667 ret = 1; /* Keep old FQS timing. */
1669 if (time_after(jiffies, rcu_state.jiffies_force_qs))
1672 j = rcu_state.jiffies_force_qs - j;
1679 * Clean up after the old grace period.
1681 static noinline void rcu_gp_cleanup(void)
1684 bool needgp = false;
1685 unsigned long gp_duration;
1686 unsigned long new_gp_seq;
1688 struct rcu_data *rdp;
1689 struct rcu_node *rnp = rcu_get_root();
1690 struct swait_queue_head *sq;
1692 WRITE_ONCE(rcu_state.gp_activity, jiffies);
1693 raw_spin_lock_irq_rcu_node(rnp);
1694 rcu_state.gp_end = jiffies;
1695 gp_duration = rcu_state.gp_end - rcu_state.gp_start;
1696 if (gp_duration > rcu_state.gp_max)
1697 rcu_state.gp_max = gp_duration;
1700 * We know the grace period is complete, but to everyone else
1701 * it appears to still be ongoing. But it is also the case
1702 * that to everyone else it looks like there is nothing that
1703 * they can do to advance the grace period. It is therefore
1704 * safe for us to drop the lock in order to mark the grace
1705 * period as completed in all of the rcu_node structures.
1707 rcu_poll_gp_seq_end(&rcu_state.gp_seq_polled_snap);
1708 raw_spin_unlock_irq_rcu_node(rnp);
1711 * Propagate new ->gp_seq value to rcu_node structures so that
1712 * other CPUs don't have to wait until the start of the next grace
1713 * period to process their callbacks. This also avoids some nasty
1714 * RCU grace-period initialization races by forcing the end of
1715 * the current grace period to be completely recorded in all of
1716 * the rcu_node structures before the beginning of the next grace
1717 * period is recorded in any of the rcu_node structures.
1719 new_gp_seq = rcu_state.gp_seq;
1720 rcu_seq_end(&new_gp_seq);
1721 rcu_for_each_node_breadth_first(rnp) {
1722 raw_spin_lock_irq_rcu_node(rnp);
1723 if (WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp)))
1724 dump_blkd_tasks(rnp, 10);
1725 WARN_ON_ONCE(rnp->qsmask);
1726 WRITE_ONCE(rnp->gp_seq, new_gp_seq);
1728 smp_mb(); // Order against failing poll_state_synchronize_rcu_full().
1729 rdp = this_cpu_ptr(&rcu_data);
1730 if (rnp == rdp->mynode)
1731 needgp = __note_gp_changes(rnp, rdp) || needgp;
1732 /* smp_mb() provided by prior unlock-lock pair. */
1733 needgp = rcu_future_gp_cleanup(rnp) || needgp;
1734 // Reset overload indication for CPUs no longer overloaded
1735 if (rcu_is_leaf_node(rnp))
1736 for_each_leaf_node_cpu_mask(rnp, cpu, rnp->cbovldmask) {
1737 rdp = per_cpu_ptr(&rcu_data, cpu);
1738 check_cb_ovld_locked(rdp, rnp);
1740 sq = rcu_nocb_gp_get(rnp);
1741 raw_spin_unlock_irq_rcu_node(rnp);
1742 rcu_nocb_gp_cleanup(sq);
1743 cond_resched_tasks_rcu_qs();
1744 WRITE_ONCE(rcu_state.gp_activity, jiffies);
1745 rcu_gp_slow(gp_cleanup_delay);
1747 rnp = rcu_get_root();
1748 raw_spin_lock_irq_rcu_node(rnp); /* GP before ->gp_seq update. */
1750 /* Declare grace period done, trace first to use old GP number. */
1751 trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq, TPS("end"));
1752 rcu_seq_end(&rcu_state.gp_seq);
1753 ASSERT_EXCLUSIVE_WRITER(rcu_state.gp_seq);
1754 WRITE_ONCE(rcu_state.gp_state, RCU_GP_IDLE);
1755 /* Check for GP requests since above loop. */
1756 rdp = this_cpu_ptr(&rcu_data);
1757 if (!needgp && ULONG_CMP_LT(rnp->gp_seq, rnp->gp_seq_needed)) {
1758 trace_rcu_this_gp(rnp, rdp, rnp->gp_seq_needed,
1759 TPS("CleanupMore"));
1762 /* Advance CBs to reduce false positives below. */
1763 offloaded = rcu_rdp_is_offloaded(rdp);
1764 if ((offloaded || !rcu_accelerate_cbs(rnp, rdp)) && needgp) {
1766 // We get here if a grace period was needed (“needgp”)
1767 // and the above call to rcu_accelerate_cbs() did not set
1768 // the RCU_GP_FLAG_INIT bit in ->gp_state (which records
1769 // the need for another grace period). The purpose
1770 // of the “offloaded” check is to avoid invoking
1771 // rcu_accelerate_cbs() on an offloaded CPU because we do not
1772 // hold the ->nocb_lock needed to safely access an offloaded
1773 // ->cblist. We do not want to acquire that lock because
1774 // it can be heavily contended during callback floods.
1776 WRITE_ONCE(rcu_state.gp_flags, RCU_GP_FLAG_INIT);
1777 WRITE_ONCE(rcu_state.gp_req_activity, jiffies);
1778 trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq, TPS("newreq"));
1781 // We get here either if there is no need for an
1782 // additional grace period or if rcu_accelerate_cbs() has
1783 // already set the RCU_GP_FLAG_INIT bit in ->gp_flags.
1784 // So all we need to do is to clear all of the other
1787 WRITE_ONCE(rcu_state.gp_flags, rcu_state.gp_flags & RCU_GP_FLAG_INIT);
1789 raw_spin_unlock_irq_rcu_node(rnp);
1791 // If strict, make all CPUs aware of the end of the old grace period.
1792 if (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD))
1793 on_each_cpu(rcu_strict_gp_boundary, NULL, 0);
1797 * Body of kthread that handles grace periods.
1799 static int __noreturn rcu_gp_kthread(void *unused)
1801 rcu_bind_gp_kthread();
1804 /* Handle grace-period start. */
1806 trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq,
1808 WRITE_ONCE(rcu_state.gp_state, RCU_GP_WAIT_GPS);
1809 swait_event_idle_exclusive(rcu_state.gp_wq,
1810 READ_ONCE(rcu_state.gp_flags) &
1812 rcu_gp_torture_wait();
1813 WRITE_ONCE(rcu_state.gp_state, RCU_GP_DONE_GPS);
1814 /* Locking provides needed memory barrier. */
1817 cond_resched_tasks_rcu_qs();
1818 WRITE_ONCE(rcu_state.gp_activity, jiffies);
1819 WARN_ON(signal_pending(current));
1820 trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq,
1824 /* Handle quiescent-state forcing. */
1827 /* Handle grace-period end. */
1828 WRITE_ONCE(rcu_state.gp_state, RCU_GP_CLEANUP);
1830 WRITE_ONCE(rcu_state.gp_state, RCU_GP_CLEANED);
1835 * Report a full set of quiescent states to the rcu_state data structure.
1836 * Invoke rcu_gp_kthread_wake() to awaken the grace-period kthread if
1837 * another grace period is required. Whether we wake the grace-period
1838 * kthread or it awakens itself for the next round of quiescent-state
1839 * forcing, that kthread will clean up after the just-completed grace
1840 * period. Note that the caller must hold rnp->lock, which is released
1843 static void rcu_report_qs_rsp(unsigned long flags)
1844 __releases(rcu_get_root()->lock)
1846 raw_lockdep_assert_held_rcu_node(rcu_get_root());
1847 WARN_ON_ONCE(!rcu_gp_in_progress());
1848 WRITE_ONCE(rcu_state.gp_flags,
1849 READ_ONCE(rcu_state.gp_flags) | RCU_GP_FLAG_FQS);
1850 raw_spin_unlock_irqrestore_rcu_node(rcu_get_root(), flags);
1851 rcu_gp_kthread_wake();
1855 * Similar to rcu_report_qs_rdp(), for which it is a helper function.
1856 * Allows quiescent states for a group of CPUs to be reported at one go
1857 * to the specified rcu_node structure, though all the CPUs in the group
1858 * must be represented by the same rcu_node structure (which need not be a
1859 * leaf rcu_node structure, though it often will be). The gps parameter
1860 * is the grace-period snapshot, which means that the quiescent states
1861 * are valid only if rnp->gp_seq is equal to gps. That structure's lock
1862 * must be held upon entry, and it is released before return.
1864 * As a special case, if mask is zero, the bit-already-cleared check is
1865 * disabled. This allows propagating quiescent state due to resumed tasks
1866 * during grace-period initialization.
1868 static void rcu_report_qs_rnp(unsigned long mask, struct rcu_node *rnp,
1869 unsigned long gps, unsigned long flags)
1870 __releases(rnp->lock)
1872 unsigned long oldmask = 0;
1873 struct rcu_node *rnp_c;
1875 raw_lockdep_assert_held_rcu_node(rnp);
1877 /* Walk up the rcu_node hierarchy. */
1879 if ((!(rnp->qsmask & mask) && mask) || rnp->gp_seq != gps) {
1882 * Our bit has already been cleared, or the
1883 * relevant grace period is already over, so done.
1885 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1888 WARN_ON_ONCE(oldmask); /* Any child must be all zeroed! */
1889 WARN_ON_ONCE(!rcu_is_leaf_node(rnp) &&
1890 rcu_preempt_blocked_readers_cgp(rnp));
1891 WRITE_ONCE(rnp->qsmask, rnp->qsmask & ~mask);
1892 trace_rcu_quiescent_state_report(rcu_state.name, rnp->gp_seq,
1893 mask, rnp->qsmask, rnp->level,
1894 rnp->grplo, rnp->grphi,
1896 if (rnp->qsmask != 0 || rcu_preempt_blocked_readers_cgp(rnp)) {
1898 /* Other bits still set at this level, so done. */
1899 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1902 rnp->completedqs = rnp->gp_seq;
1903 mask = rnp->grpmask;
1904 if (rnp->parent == NULL) {
1906 /* No more levels. Exit loop holding root lock. */
1910 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1913 raw_spin_lock_irqsave_rcu_node(rnp, flags);
1914 oldmask = READ_ONCE(rnp_c->qsmask);
1918 * Get here if we are the last CPU to pass through a quiescent
1919 * state for this grace period. Invoke rcu_report_qs_rsp()
1920 * to clean up and start the next grace period if one is needed.
1922 rcu_report_qs_rsp(flags); /* releases rnp->lock. */
1926 * Record a quiescent state for all tasks that were previously queued
1927 * on the specified rcu_node structure and that were blocking the current
1928 * RCU grace period. The caller must hold the corresponding rnp->lock with
1929 * irqs disabled, and this lock is released upon return, but irqs remain
1932 static void __maybe_unused
1933 rcu_report_unblock_qs_rnp(struct rcu_node *rnp, unsigned long flags)
1934 __releases(rnp->lock)
1938 struct rcu_node *rnp_p;
1940 raw_lockdep_assert_held_rcu_node(rnp);
1941 if (WARN_ON_ONCE(!IS_ENABLED(CONFIG_PREEMPT_RCU)) ||
1942 WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp)) ||
1944 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1945 return; /* Still need more quiescent states! */
1948 rnp->completedqs = rnp->gp_seq;
1949 rnp_p = rnp->parent;
1950 if (rnp_p == NULL) {
1952 * Only one rcu_node structure in the tree, so don't
1953 * try to report up to its nonexistent parent!
1955 rcu_report_qs_rsp(flags);
1959 /* Report up the rest of the hierarchy, tracking current ->gp_seq. */
1961 mask = rnp->grpmask;
1962 raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */
1963 raw_spin_lock_rcu_node(rnp_p); /* irqs already disabled. */
1964 rcu_report_qs_rnp(mask, rnp_p, gps, flags);
1968 * Record a quiescent state for the specified CPU to that CPU's rcu_data
1969 * structure. This must be called from the specified CPU.
1972 rcu_report_qs_rdp(struct rcu_data *rdp)
1974 unsigned long flags;
1976 bool needacc = false;
1977 struct rcu_node *rnp;
1979 WARN_ON_ONCE(rdp->cpu != smp_processor_id());
1981 raw_spin_lock_irqsave_rcu_node(rnp, flags);
1982 if (rdp->cpu_no_qs.b.norm || rdp->gp_seq != rnp->gp_seq ||
1986 * The grace period in which this quiescent state was
1987 * recorded has ended, so don't report it upwards.
1988 * We will instead need a new quiescent state that lies
1989 * within the current grace period.
1991 rdp->cpu_no_qs.b.norm = true; /* need qs for new gp. */
1992 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1995 mask = rdp->grpmask;
1996 rdp->core_needs_qs = false;
1997 if ((rnp->qsmask & mask) == 0) {
1998 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2001 * This GP can't end until cpu checks in, so all of our
2002 * callbacks can be processed during the next GP.
2004 * NOCB kthreads have their own way to deal with that...
2006 if (!rcu_rdp_is_offloaded(rdp)) {
2008 * The current GP has not yet ended, so it
2009 * should not be possible for rcu_accelerate_cbs()
2010 * to return true. So complain, but don't awaken.
2012 WARN_ON_ONCE(rcu_accelerate_cbs(rnp, rdp));
2013 } else if (!rcu_segcblist_completely_offloaded(&rdp->cblist)) {
2015 * ...but NOCB kthreads may miss or delay callbacks acceleration
2016 * if in the middle of a (de-)offloading process.
2021 rcu_disable_urgency_upon_qs(rdp);
2022 rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags);
2023 /* ^^^ Released rnp->lock */
2026 rcu_nocb_lock_irqsave(rdp, flags);
2027 rcu_accelerate_cbs_unlocked(rnp, rdp);
2028 rcu_nocb_unlock_irqrestore(rdp, flags);
2034 * Check to see if there is a new grace period of which this CPU
2035 * is not yet aware, and if so, set up local rcu_data state for it.
2036 * Otherwise, see if this CPU has just passed through its first
2037 * quiescent state for this grace period, and record that fact if so.
2040 rcu_check_quiescent_state(struct rcu_data *rdp)
2042 /* Check for grace-period ends and beginnings. */
2043 note_gp_changes(rdp);
2046 * Does this CPU still need to do its part for current grace period?
2047 * If no, return and let the other CPUs do their part as well.
2049 if (!rdp->core_needs_qs)
2053 * Was there a quiescent state since the beginning of the grace
2054 * period? If no, then exit and wait for the next call.
2056 if (rdp->cpu_no_qs.b.norm)
2060 * Tell RCU we are done (but rcu_report_qs_rdp() will be the
2063 rcu_report_qs_rdp(rdp);
2066 /* Return true if callback-invocation time limit exceeded. */
2067 static bool rcu_do_batch_check_time(long count, long tlimit,
2068 bool jlimit_check, unsigned long jlimit)
2070 // Invoke local_clock() only once per 32 consecutive callbacks.
2071 return unlikely(tlimit) &&
2072 (!likely(count & 31) ||
2073 (IS_ENABLED(CONFIG_RCU_DOUBLE_CHECK_CB_TIME) &&
2074 jlimit_check && time_after(jiffies, jlimit))) &&
2075 local_clock() >= tlimit;
2079 * Invoke any RCU callbacks that have made it to the end of their grace
2080 * period. Throttle as specified by rdp->blimit.
2082 static void rcu_do_batch(struct rcu_data *rdp)
2087 bool __maybe_unused empty;
2088 unsigned long flags;
2089 unsigned long jlimit;
2090 bool jlimit_check = false;
2092 struct rcu_cblist rcl = RCU_CBLIST_INITIALIZER(rcl);
2093 struct rcu_head *rhp;
2096 /* If no callbacks are ready, just return. */
2097 if (!rcu_segcblist_ready_cbs(&rdp->cblist)) {
2098 trace_rcu_batch_start(rcu_state.name,
2099 rcu_segcblist_n_cbs(&rdp->cblist), 0);
2100 trace_rcu_batch_end(rcu_state.name, 0,
2101 !rcu_segcblist_empty(&rdp->cblist),
2102 need_resched(), is_idle_task(current),
2103 rcu_is_callbacks_kthread(rdp));
2108 * Extract the list of ready callbacks, disabling IRQs to prevent
2109 * races with call_rcu() from interrupt handlers. Leave the
2110 * callback counts, as rcu_barrier() needs to be conservative.
2112 rcu_nocb_lock_irqsave(rdp, flags);
2113 WARN_ON_ONCE(cpu_is_offline(smp_processor_id()));
2114 pending = rcu_segcblist_get_seglen(&rdp->cblist, RCU_DONE_TAIL);
2115 div = READ_ONCE(rcu_divisor);
2116 div = div < 0 ? 7 : div > sizeof(long) * 8 - 2 ? sizeof(long) * 8 - 2 : div;
2117 bl = max(rdp->blimit, pending >> div);
2118 if ((in_serving_softirq() || rdp->rcu_cpu_kthread_status == RCU_KTHREAD_RUNNING) &&
2119 (IS_ENABLED(CONFIG_RCU_DOUBLE_CHECK_CB_TIME) || unlikely(bl > 100))) {
2120 const long npj = NSEC_PER_SEC / HZ;
2121 long rrn = READ_ONCE(rcu_resched_ns);
2123 rrn = rrn < NSEC_PER_MSEC ? NSEC_PER_MSEC : rrn > NSEC_PER_SEC ? NSEC_PER_SEC : rrn;
2124 tlimit = local_clock() + rrn;
2125 jlimit = jiffies + (rrn + npj + 1) / npj;
2126 jlimit_check = true;
2128 trace_rcu_batch_start(rcu_state.name,
2129 rcu_segcblist_n_cbs(&rdp->cblist), bl);
2130 rcu_segcblist_extract_done_cbs(&rdp->cblist, &rcl);
2131 if (rcu_rdp_is_offloaded(rdp))
2132 rdp->qlen_last_fqs_check = rcu_segcblist_n_cbs(&rdp->cblist);
2134 trace_rcu_segcb_stats(&rdp->cblist, TPS("SegCbDequeued"));
2135 rcu_nocb_unlock_irqrestore(rdp, flags);
2137 /* Invoke callbacks. */
2138 tick_dep_set_task(current, TICK_DEP_BIT_RCU);
2139 rhp = rcu_cblist_dequeue(&rcl);
2141 for (; rhp; rhp = rcu_cblist_dequeue(&rcl)) {
2145 debug_rcu_head_unqueue(rhp);
2147 rcu_lock_acquire(&rcu_callback_map);
2148 trace_rcu_invoke_callback(rcu_state.name, rhp);
2151 debug_rcu_head_callback(rhp);
2152 WRITE_ONCE(rhp->func, (rcu_callback_t)0L);
2155 rcu_lock_release(&rcu_callback_map);
2158 * Stop only if limit reached and CPU has something to do.
2160 if (in_serving_softirq()) {
2161 if (count >= bl && (need_resched() || !is_idle_task(current)))
2164 * Make sure we don't spend too much time here and deprive other
2165 * softirq vectors of CPU cycles.
2167 if (rcu_do_batch_check_time(count, tlimit, jlimit_check, jlimit))
2170 // In rcuc/rcuoc context, so no worries about
2171 // depriving other softirq vectors of CPU cycles.
2173 lockdep_assert_irqs_enabled();
2174 cond_resched_tasks_rcu_qs();
2175 lockdep_assert_irqs_enabled();
2177 // But rcuc kthreads can delay quiescent-state
2178 // reporting, so check time limits for them.
2179 if (rdp->rcu_cpu_kthread_status == RCU_KTHREAD_RUNNING &&
2180 rcu_do_batch_check_time(count, tlimit, jlimit_check, jlimit)) {
2181 rdp->rcu_cpu_has_work = 1;
2187 rcu_nocb_lock_irqsave(rdp, flags);
2188 rdp->n_cbs_invoked += count;
2189 trace_rcu_batch_end(rcu_state.name, count, !!rcl.head, need_resched(),
2190 is_idle_task(current), rcu_is_callbacks_kthread(rdp));
2192 /* Update counts and requeue any remaining callbacks. */
2193 rcu_segcblist_insert_done_cbs(&rdp->cblist, &rcl);
2194 rcu_segcblist_add_len(&rdp->cblist, -count);
2196 /* Reinstate batch limit if we have worked down the excess. */
2197 count = rcu_segcblist_n_cbs(&rdp->cblist);
2198 if (rdp->blimit >= DEFAULT_MAX_RCU_BLIMIT && count <= qlowmark)
2199 rdp->blimit = blimit;
2201 /* Reset ->qlen_last_fqs_check trigger if enough CBs have drained. */
2202 if (count == 0 && rdp->qlen_last_fqs_check != 0) {
2203 rdp->qlen_last_fqs_check = 0;
2204 rdp->n_force_qs_snap = READ_ONCE(rcu_state.n_force_qs);
2205 } else if (count < rdp->qlen_last_fqs_check - qhimark)
2206 rdp->qlen_last_fqs_check = count;
2209 * The following usually indicates a double call_rcu(). To track
2210 * this down, try building with CONFIG_DEBUG_OBJECTS_RCU_HEAD=y.
2212 empty = rcu_segcblist_empty(&rdp->cblist);
2213 WARN_ON_ONCE(count == 0 && !empty);
2214 WARN_ON_ONCE(!IS_ENABLED(CONFIG_RCU_NOCB_CPU) &&
2215 count != 0 && empty);
2216 WARN_ON_ONCE(count == 0 && rcu_segcblist_n_segment_cbs(&rdp->cblist) != 0);
2217 WARN_ON_ONCE(!empty && rcu_segcblist_n_segment_cbs(&rdp->cblist) == 0);
2219 rcu_nocb_unlock_irqrestore(rdp, flags);
2221 tick_dep_clear_task(current, TICK_DEP_BIT_RCU);
2225 * This function is invoked from each scheduling-clock interrupt,
2226 * and checks to see if this CPU is in a non-context-switch quiescent
2227 * state, for example, user mode or idle loop. It also schedules RCU
2228 * core processing. If the current grace period has gone on too long,
2229 * it will ask the scheduler to manufacture a context switch for the sole
2230 * purpose of providing the needed quiescent state.
2232 void rcu_sched_clock_irq(int user)
2236 if (IS_ENABLED(CONFIG_PROVE_RCU)) {
2238 WARN_ON_ONCE(time_before(j, __this_cpu_read(rcu_data.last_sched_clock)));
2239 __this_cpu_write(rcu_data.last_sched_clock, j);
2241 trace_rcu_utilization(TPS("Start scheduler-tick"));
2242 lockdep_assert_irqs_disabled();
2243 raw_cpu_inc(rcu_data.ticks_this_gp);
2244 /* The load-acquire pairs with the store-release setting to true. */
2245 if (smp_load_acquire(this_cpu_ptr(&rcu_data.rcu_urgent_qs))) {
2246 /* Idle and userspace execution already are quiescent states. */
2247 if (!rcu_is_cpu_rrupt_from_idle() && !user) {
2248 set_tsk_need_resched(current);
2249 set_preempt_need_resched();
2251 __this_cpu_write(rcu_data.rcu_urgent_qs, false);
2253 rcu_flavor_sched_clock_irq(user);
2254 if (rcu_pending(user))
2256 if (user || rcu_is_cpu_rrupt_from_idle())
2257 rcu_note_voluntary_context_switch(current);
2258 lockdep_assert_irqs_disabled();
2260 trace_rcu_utilization(TPS("End scheduler-tick"));
2264 * Scan the leaf rcu_node structures. For each structure on which all
2265 * CPUs have reported a quiescent state and on which there are tasks
2266 * blocking the current grace period, initiate RCU priority boosting.
2267 * Otherwise, invoke the specified function to check dyntick state for
2268 * each CPU that has not yet reported a quiescent state.
2270 static void force_qs_rnp(int (*f)(struct rcu_data *rdp))
2273 unsigned long flags;
2275 struct rcu_data *rdp;
2276 struct rcu_node *rnp;
2278 rcu_state.cbovld = rcu_state.cbovldnext;
2279 rcu_state.cbovldnext = false;
2280 rcu_for_each_leaf_node(rnp) {
2281 cond_resched_tasks_rcu_qs();
2283 raw_spin_lock_irqsave_rcu_node(rnp, flags);
2284 rcu_state.cbovldnext |= !!rnp->cbovldmask;
2285 if (rnp->qsmask == 0) {
2286 if (rcu_preempt_blocked_readers_cgp(rnp)) {
2288 * No point in scanning bits because they
2289 * are all zero. But we might need to
2290 * priority-boost blocked readers.
2292 rcu_initiate_boost(rnp, flags);
2293 /* rcu_initiate_boost() releases rnp->lock */
2296 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2299 for_each_leaf_node_cpu_mask(rnp, cpu, rnp->qsmask) {
2300 rdp = per_cpu_ptr(&rcu_data, cpu);
2302 mask |= rdp->grpmask;
2303 rcu_disable_urgency_upon_qs(rdp);
2307 /* Idle/offline CPUs, report (releases rnp->lock). */
2308 rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags);
2310 /* Nothing to do here, so just drop the lock. */
2311 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2317 * Force quiescent states on reluctant CPUs, and also detect which
2318 * CPUs are in dyntick-idle mode.
2320 void rcu_force_quiescent_state(void)
2322 unsigned long flags;
2324 struct rcu_node *rnp;
2325 struct rcu_node *rnp_old = NULL;
2327 /* Funnel through hierarchy to reduce memory contention. */
2328 rnp = raw_cpu_read(rcu_data.mynode);
2329 for (; rnp != NULL; rnp = rnp->parent) {
2330 ret = (READ_ONCE(rcu_state.gp_flags) & RCU_GP_FLAG_FQS) ||
2331 !raw_spin_trylock(&rnp->fqslock);
2332 if (rnp_old != NULL)
2333 raw_spin_unlock(&rnp_old->fqslock);
2338 /* rnp_old == rcu_get_root(), rnp == NULL. */
2340 /* Reached the root of the rcu_node tree, acquire lock. */
2341 raw_spin_lock_irqsave_rcu_node(rnp_old, flags);
2342 raw_spin_unlock(&rnp_old->fqslock);
2343 if (READ_ONCE(rcu_state.gp_flags) & RCU_GP_FLAG_FQS) {
2344 raw_spin_unlock_irqrestore_rcu_node(rnp_old, flags);
2345 return; /* Someone beat us to it. */
2347 WRITE_ONCE(rcu_state.gp_flags,
2348 READ_ONCE(rcu_state.gp_flags) | RCU_GP_FLAG_FQS);
2349 raw_spin_unlock_irqrestore_rcu_node(rnp_old, flags);
2350 rcu_gp_kthread_wake();
2352 EXPORT_SYMBOL_GPL(rcu_force_quiescent_state);
2354 // Workqueue handler for an RCU reader for kernels enforcing struct RCU
2356 static void strict_work_handler(struct work_struct *work)
2362 /* Perform RCU core processing work for the current CPU. */
2363 static __latent_entropy void rcu_core(void)
2365 unsigned long flags;
2366 struct rcu_data *rdp = raw_cpu_ptr(&rcu_data);
2367 struct rcu_node *rnp = rdp->mynode;
2369 * On RT rcu_core() can be preempted when IRQs aren't disabled.
2370 * Therefore this function can race with concurrent NOCB (de-)offloading
2371 * on this CPU and the below condition must be considered volatile.
2372 * However if we race with:
2374 * _ Offloading: In the worst case we accelerate or process callbacks
2375 * concurrently with NOCB kthreads. We are guaranteed to
2376 * call rcu_nocb_lock() if that happens.
2378 * _ Deoffloading: In the worst case we miss callbacks acceleration or
2379 * processing. This is fine because the early stage
2380 * of deoffloading invokes rcu_core() after setting
2381 * SEGCBLIST_RCU_CORE. So we guarantee that we'll process
2382 * what could have been dismissed without the need to wait
2383 * for the next rcu_pending() check in the next jiffy.
2385 const bool do_batch = !rcu_segcblist_completely_offloaded(&rdp->cblist);
2387 if (cpu_is_offline(smp_processor_id()))
2389 trace_rcu_utilization(TPS("Start RCU core"));
2390 WARN_ON_ONCE(!rdp->beenonline);
2392 /* Report any deferred quiescent states if preemption enabled. */
2393 if (IS_ENABLED(CONFIG_PREEMPT_COUNT) && (!(preempt_count() & PREEMPT_MASK))) {
2394 rcu_preempt_deferred_qs(current);
2395 } else if (rcu_preempt_need_deferred_qs(current)) {
2396 set_tsk_need_resched(current);
2397 set_preempt_need_resched();
2400 /* Update RCU state based on any recent quiescent states. */
2401 rcu_check_quiescent_state(rdp);
2403 /* No grace period and unregistered callbacks? */
2404 if (!rcu_gp_in_progress() &&
2405 rcu_segcblist_is_enabled(&rdp->cblist) && do_batch) {
2406 rcu_nocb_lock_irqsave(rdp, flags);
2407 if (!rcu_segcblist_restempty(&rdp->cblist, RCU_NEXT_READY_TAIL))
2408 rcu_accelerate_cbs_unlocked(rnp, rdp);
2409 rcu_nocb_unlock_irqrestore(rdp, flags);
2412 rcu_check_gp_start_stall(rnp, rdp, rcu_jiffies_till_stall_check());
2414 /* If there are callbacks ready, invoke them. */
2415 if (do_batch && rcu_segcblist_ready_cbs(&rdp->cblist) &&
2416 likely(READ_ONCE(rcu_scheduler_fully_active))) {
2418 /* Re-invoke RCU core processing if there are callbacks remaining. */
2419 if (rcu_segcblist_ready_cbs(&rdp->cblist))
2423 /* Do any needed deferred wakeups of rcuo kthreads. */
2424 do_nocb_deferred_wakeup(rdp);
2425 trace_rcu_utilization(TPS("End RCU core"));
2427 // If strict GPs, schedule an RCU reader in a clean environment.
2428 if (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD))
2429 queue_work_on(rdp->cpu, rcu_gp_wq, &rdp->strict_work);
2432 static void rcu_core_si(struct softirq_action *h)
2437 static void rcu_wake_cond(struct task_struct *t, int status)
2440 * If the thread is yielding, only wake it when this
2441 * is invoked from idle
2443 if (t && (status != RCU_KTHREAD_YIELDING || is_idle_task(current)))
2447 static void invoke_rcu_core_kthread(void)
2449 struct task_struct *t;
2450 unsigned long flags;
2452 local_irq_save(flags);
2453 __this_cpu_write(rcu_data.rcu_cpu_has_work, 1);
2454 t = __this_cpu_read(rcu_data.rcu_cpu_kthread_task);
2455 if (t != NULL && t != current)
2456 rcu_wake_cond(t, __this_cpu_read(rcu_data.rcu_cpu_kthread_status));
2457 local_irq_restore(flags);
2461 * Wake up this CPU's rcuc kthread to do RCU core processing.
2463 static void invoke_rcu_core(void)
2465 if (!cpu_online(smp_processor_id()))
2468 raise_softirq(RCU_SOFTIRQ);
2470 invoke_rcu_core_kthread();
2473 static void rcu_cpu_kthread_park(unsigned int cpu)
2475 per_cpu(rcu_data.rcu_cpu_kthread_status, cpu) = RCU_KTHREAD_OFFCPU;
2478 static int rcu_cpu_kthread_should_run(unsigned int cpu)
2480 return __this_cpu_read(rcu_data.rcu_cpu_has_work);
2484 * Per-CPU kernel thread that invokes RCU callbacks. This replaces
2485 * the RCU softirq used in configurations of RCU that do not support RCU
2486 * priority boosting.
2488 static void rcu_cpu_kthread(unsigned int cpu)
2490 unsigned int *statusp = this_cpu_ptr(&rcu_data.rcu_cpu_kthread_status);
2491 char work, *workp = this_cpu_ptr(&rcu_data.rcu_cpu_has_work);
2492 unsigned long *j = this_cpu_ptr(&rcu_data.rcuc_activity);
2495 trace_rcu_utilization(TPS("Start CPU kthread@rcu_run"));
2496 for (spincnt = 0; spincnt < 10; spincnt++) {
2497 WRITE_ONCE(*j, jiffies);
2499 *statusp = RCU_KTHREAD_RUNNING;
2500 local_irq_disable();
2502 WRITE_ONCE(*workp, 0);
2507 if (!READ_ONCE(*workp)) {
2508 trace_rcu_utilization(TPS("End CPU kthread@rcu_wait"));
2509 *statusp = RCU_KTHREAD_WAITING;
2513 *statusp = RCU_KTHREAD_YIELDING;
2514 trace_rcu_utilization(TPS("Start CPU kthread@rcu_yield"));
2515 schedule_timeout_idle(2);
2516 trace_rcu_utilization(TPS("End CPU kthread@rcu_yield"));
2517 *statusp = RCU_KTHREAD_WAITING;
2518 WRITE_ONCE(*j, jiffies);
2521 static struct smp_hotplug_thread rcu_cpu_thread_spec = {
2522 .store = &rcu_data.rcu_cpu_kthread_task,
2523 .thread_should_run = rcu_cpu_kthread_should_run,
2524 .thread_fn = rcu_cpu_kthread,
2525 .thread_comm = "rcuc/%u",
2526 .setup = rcu_cpu_kthread_setup,
2527 .park = rcu_cpu_kthread_park,
2531 * Spawn per-CPU RCU core processing kthreads.
2533 static int __init rcu_spawn_core_kthreads(void)
2537 for_each_possible_cpu(cpu)
2538 per_cpu(rcu_data.rcu_cpu_has_work, cpu) = 0;
2541 WARN_ONCE(smpboot_register_percpu_thread(&rcu_cpu_thread_spec),
2542 "%s: Could not start rcuc kthread, OOM is now expected behavior\n", __func__);
2547 * Handle any core-RCU processing required by a call_rcu() invocation.
2549 static void __call_rcu_core(struct rcu_data *rdp, struct rcu_head *head,
2550 unsigned long flags)
2553 * If called from an extended quiescent state, invoke the RCU
2554 * core in order to force a re-evaluation of RCU's idleness.
2556 if (!rcu_is_watching())
2559 /* If interrupts were disabled or CPU offline, don't invoke RCU core. */
2560 if (irqs_disabled_flags(flags) || cpu_is_offline(smp_processor_id()))
2564 * Force the grace period if too many callbacks or too long waiting.
2565 * Enforce hysteresis, and don't invoke rcu_force_quiescent_state()
2566 * if some other CPU has recently done so. Also, don't bother
2567 * invoking rcu_force_quiescent_state() if the newly enqueued callback
2568 * is the only one waiting for a grace period to complete.
2570 if (unlikely(rcu_segcblist_n_cbs(&rdp->cblist) >
2571 rdp->qlen_last_fqs_check + qhimark)) {
2573 /* Are we ignoring a completed grace period? */
2574 note_gp_changes(rdp);
2576 /* Start a new grace period if one not already started. */
2577 if (!rcu_gp_in_progress()) {
2578 rcu_accelerate_cbs_unlocked(rdp->mynode, rdp);
2580 /* Give the grace period a kick. */
2581 rdp->blimit = DEFAULT_MAX_RCU_BLIMIT;
2582 if (READ_ONCE(rcu_state.n_force_qs) == rdp->n_force_qs_snap &&
2583 rcu_segcblist_first_pend_cb(&rdp->cblist) != head)
2584 rcu_force_quiescent_state();
2585 rdp->n_force_qs_snap = READ_ONCE(rcu_state.n_force_qs);
2586 rdp->qlen_last_fqs_check = rcu_segcblist_n_cbs(&rdp->cblist);
2592 * RCU callback function to leak a callback.
2594 static void rcu_leak_callback(struct rcu_head *rhp)
2599 * Check and if necessary update the leaf rcu_node structure's
2600 * ->cbovldmask bit corresponding to the current CPU based on that CPU's
2601 * number of queued RCU callbacks. The caller must hold the leaf rcu_node
2602 * structure's ->lock.
2604 static void check_cb_ovld_locked(struct rcu_data *rdp, struct rcu_node *rnp)
2606 raw_lockdep_assert_held_rcu_node(rnp);
2607 if (qovld_calc <= 0)
2608 return; // Early boot and wildcard value set.
2609 if (rcu_segcblist_n_cbs(&rdp->cblist) >= qovld_calc)
2610 WRITE_ONCE(rnp->cbovldmask, rnp->cbovldmask | rdp->grpmask);
2612 WRITE_ONCE(rnp->cbovldmask, rnp->cbovldmask & ~rdp->grpmask);
2616 * Check and if necessary update the leaf rcu_node structure's
2617 * ->cbovldmask bit corresponding to the current CPU based on that CPU's
2618 * number of queued RCU callbacks. No locks need be held, but the
2619 * caller must have disabled interrupts.
2621 * Note that this function ignores the possibility that there are a lot
2622 * of callbacks all of which have already seen the end of their respective
2623 * grace periods. This omission is due to the need for no-CBs CPUs to
2624 * be holding ->nocb_lock to do this check, which is too heavy for a
2625 * common-case operation.
2627 static void check_cb_ovld(struct rcu_data *rdp)
2629 struct rcu_node *const rnp = rdp->mynode;
2631 if (qovld_calc <= 0 ||
2632 ((rcu_segcblist_n_cbs(&rdp->cblist) >= qovld_calc) ==
2633 !!(READ_ONCE(rnp->cbovldmask) & rdp->grpmask)))
2634 return; // Early boot wildcard value or already set correctly.
2635 raw_spin_lock_rcu_node(rnp);
2636 check_cb_ovld_locked(rdp, rnp);
2637 raw_spin_unlock_rcu_node(rnp);
2641 __call_rcu_common(struct rcu_head *head, rcu_callback_t func, bool lazy_in)
2643 static atomic_t doublefrees;
2644 unsigned long flags;
2646 struct rcu_data *rdp;
2649 /* Misaligned rcu_head! */
2650 WARN_ON_ONCE((unsigned long)head & (sizeof(void *) - 1));
2652 if (debug_rcu_head_queue(head)) {
2654 * Probable double call_rcu(), so leak the callback.
2655 * Use rcu:rcu_callback trace event to find the previous
2656 * time callback was passed to call_rcu().
2658 if (atomic_inc_return(&doublefrees) < 4) {
2659 pr_err("%s(): Double-freed CB %p->%pS()!!! ", __func__, head, head->func);
2662 WRITE_ONCE(head->func, rcu_leak_callback);
2667 kasan_record_aux_stack_noalloc(head);
2668 local_irq_save(flags);
2669 rdp = this_cpu_ptr(&rcu_data);
2670 lazy = lazy_in && !rcu_async_should_hurry();
2672 /* Add the callback to our list. */
2673 if (unlikely(!rcu_segcblist_is_enabled(&rdp->cblist))) {
2674 // This can trigger due to call_rcu() from offline CPU:
2675 WARN_ON_ONCE(rcu_scheduler_active != RCU_SCHEDULER_INACTIVE);
2676 WARN_ON_ONCE(!rcu_is_watching());
2677 // Very early boot, before rcu_init(). Initialize if needed
2678 // and then drop through to queue the callback.
2679 if (rcu_segcblist_empty(&rdp->cblist))
2680 rcu_segcblist_init(&rdp->cblist);
2684 if (rcu_nocb_try_bypass(rdp, head, &was_alldone, flags, lazy))
2685 return; // Enqueued onto ->nocb_bypass, so just leave.
2686 // If no-CBs CPU gets here, rcu_nocb_try_bypass() acquired ->nocb_lock.
2687 rcu_segcblist_enqueue(&rdp->cblist, head);
2688 if (__is_kvfree_rcu_offset((unsigned long)func))
2689 trace_rcu_kvfree_callback(rcu_state.name, head,
2690 (unsigned long)func,
2691 rcu_segcblist_n_cbs(&rdp->cblist));
2693 trace_rcu_callback(rcu_state.name, head,
2694 rcu_segcblist_n_cbs(&rdp->cblist));
2696 trace_rcu_segcb_stats(&rdp->cblist, TPS("SegCBQueued"));
2698 /* Go handle any RCU core processing required. */
2699 if (unlikely(rcu_rdp_is_offloaded(rdp))) {
2700 __call_rcu_nocb_wake(rdp, was_alldone, flags); /* unlocks */
2702 __call_rcu_core(rdp, head, flags);
2703 local_irq_restore(flags);
2707 #ifdef CONFIG_RCU_LAZY
2709 * call_rcu_hurry() - Queue RCU callback for invocation after grace period, and
2710 * flush all lazy callbacks (including the new one) to the main ->cblist while
2713 * @head: structure to be used for queueing the RCU updates.
2714 * @func: actual callback function to be invoked after the grace period
2716 * The callback function will be invoked some time after a full grace
2717 * period elapses, in other words after all pre-existing RCU read-side
2718 * critical sections have completed.
2720 * Use this API instead of call_rcu() if you don't want the callback to be
2721 * invoked after very long periods of time, which can happen on systems without
2722 * memory pressure and on systems which are lightly loaded or mostly idle.
2723 * This function will cause callbacks to be invoked sooner than later at the
2724 * expense of extra power. Other than that, this function is identical to, and
2725 * reuses call_rcu()'s logic. Refer to call_rcu() for more details about memory
2726 * ordering and other functionality.
2728 void call_rcu_hurry(struct rcu_head *head, rcu_callback_t func)
2730 __call_rcu_common(head, func, false);
2732 EXPORT_SYMBOL_GPL(call_rcu_hurry);
2736 * call_rcu() - Queue an RCU callback for invocation after a grace period.
2737 * By default the callbacks are 'lazy' and are kept hidden from the main
2738 * ->cblist to prevent starting of grace periods too soon.
2739 * If you desire grace periods to start very soon, use call_rcu_hurry().
2741 * @head: structure to be used for queueing the RCU updates.
2742 * @func: actual callback function to be invoked after the grace period
2744 * The callback function will be invoked some time after a full grace
2745 * period elapses, in other words after all pre-existing RCU read-side
2746 * critical sections have completed. However, the callback function
2747 * might well execute concurrently with RCU read-side critical sections
2748 * that started after call_rcu() was invoked.
2750 * RCU read-side critical sections are delimited by rcu_read_lock()
2751 * and rcu_read_unlock(), and may be nested. In addition, but only in
2752 * v5.0 and later, regions of code across which interrupts, preemption,
2753 * or softirqs have been disabled also serve as RCU read-side critical
2754 * sections. This includes hardware interrupt handlers, softirq handlers,
2757 * Note that all CPUs must agree that the grace period extended beyond
2758 * all pre-existing RCU read-side critical section. On systems with more
2759 * than one CPU, this means that when "func()" is invoked, each CPU is
2760 * guaranteed to have executed a full memory barrier since the end of its
2761 * last RCU read-side critical section whose beginning preceded the call
2762 * to call_rcu(). It also means that each CPU executing an RCU read-side
2763 * critical section that continues beyond the start of "func()" must have
2764 * executed a memory barrier after the call_rcu() but before the beginning
2765 * of that RCU read-side critical section. Note that these guarantees
2766 * include CPUs that are offline, idle, or executing in user mode, as
2767 * well as CPUs that are executing in the kernel.
2769 * Furthermore, if CPU A invoked call_rcu() and CPU B invoked the
2770 * resulting RCU callback function "func()", then both CPU A and CPU B are
2771 * guaranteed to execute a full memory barrier during the time interval
2772 * between the call to call_rcu() and the invocation of "func()" -- even
2773 * if CPU A and CPU B are the same CPU (but again only if the system has
2774 * more than one CPU).
2776 * Implementation of these memory-ordering guarantees is described here:
2777 * Documentation/RCU/Design/Memory-Ordering/Tree-RCU-Memory-Ordering.rst.
2779 void call_rcu(struct rcu_head *head, rcu_callback_t func)
2781 __call_rcu_common(head, func, IS_ENABLED(CONFIG_RCU_LAZY));
2783 EXPORT_SYMBOL_GPL(call_rcu);
2785 /* Maximum number of jiffies to wait before draining a batch. */
2786 #define KFREE_DRAIN_JIFFIES (5 * HZ)
2787 #define KFREE_N_BATCHES 2
2788 #define FREE_N_CHANNELS 2
2791 * struct kvfree_rcu_bulk_data - single block to store kvfree_rcu() pointers
2792 * @list: List node. All blocks are linked between each other
2793 * @gp_snap: Snapshot of RCU state for objects placed to this bulk
2794 * @nr_records: Number of active pointers in the array
2795 * @records: Array of the kvfree_rcu() pointers
2797 struct kvfree_rcu_bulk_data {
2798 struct list_head list;
2799 struct rcu_gp_oldstate gp_snap;
2800 unsigned long nr_records;
2805 * This macro defines how many entries the "records" array
2806 * will contain. It is based on the fact that the size of
2807 * kvfree_rcu_bulk_data structure becomes exactly one page.
2809 #define KVFREE_BULK_MAX_ENTR \
2810 ((PAGE_SIZE - sizeof(struct kvfree_rcu_bulk_data)) / sizeof(void *))
2813 * struct kfree_rcu_cpu_work - single batch of kfree_rcu() requests
2814 * @rcu_work: Let queue_rcu_work() invoke workqueue handler after grace period
2815 * @head_free: List of kfree_rcu() objects waiting for a grace period
2816 * @head_free_gp_snap: Grace-period snapshot to check for attempted premature frees.
2817 * @bulk_head_free: Bulk-List of kvfree_rcu() objects waiting for a grace period
2818 * @krcp: Pointer to @kfree_rcu_cpu structure
2821 struct kfree_rcu_cpu_work {
2822 struct rcu_work rcu_work;
2823 struct rcu_head *head_free;
2824 struct rcu_gp_oldstate head_free_gp_snap;
2825 struct list_head bulk_head_free[FREE_N_CHANNELS];
2826 struct kfree_rcu_cpu *krcp;
2830 * struct kfree_rcu_cpu - batch up kfree_rcu() requests for RCU grace period
2831 * @head: List of kfree_rcu() objects not yet waiting for a grace period
2832 * @head_gp_snap: Snapshot of RCU state for objects placed to "@head"
2833 * @bulk_head: Bulk-List of kvfree_rcu() objects not yet waiting for a grace period
2834 * @krw_arr: Array of batches of kfree_rcu() objects waiting for a grace period
2835 * @lock: Synchronize access to this structure
2836 * @monitor_work: Promote @head to @head_free after KFREE_DRAIN_JIFFIES
2837 * @initialized: The @rcu_work fields have been initialized
2838 * @head_count: Number of objects in rcu_head singular list
2839 * @bulk_count: Number of objects in bulk-list
2841 * A simple cache list that contains objects for reuse purpose.
2842 * In order to save some per-cpu space the list is singular.
2843 * Even though it is lockless an access has to be protected by the
2845 * @page_cache_work: A work to refill the cache when it is empty
2846 * @backoff_page_cache_fill: Delay cache refills
2847 * @work_in_progress: Indicates that page_cache_work is running
2848 * @hrtimer: A hrtimer for scheduling a page_cache_work
2849 * @nr_bkv_objs: number of allocated objects at @bkvcache.
2851 * This is a per-CPU structure. The reason that it is not included in
2852 * the rcu_data structure is to permit this code to be extracted from
2853 * the RCU files. Such extraction could allow further optimization of
2854 * the interactions with the slab allocators.
2856 struct kfree_rcu_cpu {
2857 // Objects queued on a linked list
2858 // through their rcu_head structures.
2859 struct rcu_head *head;
2860 unsigned long head_gp_snap;
2861 atomic_t head_count;
2863 // Objects queued on a bulk-list.
2864 struct list_head bulk_head[FREE_N_CHANNELS];
2865 atomic_t bulk_count[FREE_N_CHANNELS];
2867 struct kfree_rcu_cpu_work krw_arr[KFREE_N_BATCHES];
2868 raw_spinlock_t lock;
2869 struct delayed_work monitor_work;
2872 struct delayed_work page_cache_work;
2873 atomic_t backoff_page_cache_fill;
2874 atomic_t work_in_progress;
2875 struct hrtimer hrtimer;
2877 struct llist_head bkvcache;
2881 static DEFINE_PER_CPU(struct kfree_rcu_cpu, krc) = {
2882 .lock = __RAW_SPIN_LOCK_UNLOCKED(krc.lock),
2885 static __always_inline void
2886 debug_rcu_bhead_unqueue(struct kvfree_rcu_bulk_data *bhead)
2888 #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
2891 for (i = 0; i < bhead->nr_records; i++)
2892 debug_rcu_head_unqueue((struct rcu_head *)(bhead->records[i]));
2896 static inline struct kfree_rcu_cpu *
2897 krc_this_cpu_lock(unsigned long *flags)
2899 struct kfree_rcu_cpu *krcp;
2901 local_irq_save(*flags); // For safely calling this_cpu_ptr().
2902 krcp = this_cpu_ptr(&krc);
2903 raw_spin_lock(&krcp->lock);
2909 krc_this_cpu_unlock(struct kfree_rcu_cpu *krcp, unsigned long flags)
2911 raw_spin_unlock_irqrestore(&krcp->lock, flags);
2914 static inline struct kvfree_rcu_bulk_data *
2915 get_cached_bnode(struct kfree_rcu_cpu *krcp)
2917 if (!krcp->nr_bkv_objs)
2920 WRITE_ONCE(krcp->nr_bkv_objs, krcp->nr_bkv_objs - 1);
2921 return (struct kvfree_rcu_bulk_data *)
2922 llist_del_first(&krcp->bkvcache);
2926 put_cached_bnode(struct kfree_rcu_cpu *krcp,
2927 struct kvfree_rcu_bulk_data *bnode)
2930 if (krcp->nr_bkv_objs >= rcu_min_cached_objs)
2933 llist_add((struct llist_node *) bnode, &krcp->bkvcache);
2934 WRITE_ONCE(krcp->nr_bkv_objs, krcp->nr_bkv_objs + 1);
2939 drain_page_cache(struct kfree_rcu_cpu *krcp)
2941 unsigned long flags;
2942 struct llist_node *page_list, *pos, *n;
2945 if (!rcu_min_cached_objs)
2948 raw_spin_lock_irqsave(&krcp->lock, flags);
2949 page_list = llist_del_all(&krcp->bkvcache);
2950 WRITE_ONCE(krcp->nr_bkv_objs, 0);
2951 raw_spin_unlock_irqrestore(&krcp->lock, flags);
2953 llist_for_each_safe(pos, n, page_list) {
2954 free_page((unsigned long)pos);
2962 kvfree_rcu_bulk(struct kfree_rcu_cpu *krcp,
2963 struct kvfree_rcu_bulk_data *bnode, int idx)
2965 unsigned long flags;
2968 if (!WARN_ON_ONCE(!poll_state_synchronize_rcu_full(&bnode->gp_snap))) {
2969 debug_rcu_bhead_unqueue(bnode);
2970 rcu_lock_acquire(&rcu_callback_map);
2971 if (idx == 0) { // kmalloc() / kfree().
2972 trace_rcu_invoke_kfree_bulk_callback(
2973 rcu_state.name, bnode->nr_records,
2976 kfree_bulk(bnode->nr_records, bnode->records);
2977 } else { // vmalloc() / vfree().
2978 for (i = 0; i < bnode->nr_records; i++) {
2979 trace_rcu_invoke_kvfree_callback(
2980 rcu_state.name, bnode->records[i], 0);
2982 vfree(bnode->records[i]);
2985 rcu_lock_release(&rcu_callback_map);
2988 raw_spin_lock_irqsave(&krcp->lock, flags);
2989 if (put_cached_bnode(krcp, bnode))
2991 raw_spin_unlock_irqrestore(&krcp->lock, flags);
2994 free_page((unsigned long) bnode);
2996 cond_resched_tasks_rcu_qs();
3000 kvfree_rcu_list(struct rcu_head *head)
3002 struct rcu_head *next;
3004 for (; head; head = next) {
3005 void *ptr = (void *) head->func;
3006 unsigned long offset = (void *) head - ptr;
3009 debug_rcu_head_unqueue((struct rcu_head *)ptr);
3010 rcu_lock_acquire(&rcu_callback_map);
3011 trace_rcu_invoke_kvfree_callback(rcu_state.name, head, offset);
3013 if (!WARN_ON_ONCE(!__is_kvfree_rcu_offset(offset)))
3016 rcu_lock_release(&rcu_callback_map);
3017 cond_resched_tasks_rcu_qs();
3022 * This function is invoked in workqueue context after a grace period.
3023 * It frees all the objects queued on ->bulk_head_free or ->head_free.
3025 static void kfree_rcu_work(struct work_struct *work)
3027 unsigned long flags;
3028 struct kvfree_rcu_bulk_data *bnode, *n;
3029 struct list_head bulk_head[FREE_N_CHANNELS];
3030 struct rcu_head *head;
3031 struct kfree_rcu_cpu *krcp;
3032 struct kfree_rcu_cpu_work *krwp;
3033 struct rcu_gp_oldstate head_gp_snap;
3036 krwp = container_of(to_rcu_work(work),
3037 struct kfree_rcu_cpu_work, rcu_work);
3040 raw_spin_lock_irqsave(&krcp->lock, flags);
3041 // Channels 1 and 2.
3042 for (i = 0; i < FREE_N_CHANNELS; i++)
3043 list_replace_init(&krwp->bulk_head_free[i], &bulk_head[i]);
3046 head = krwp->head_free;
3047 krwp->head_free = NULL;
3048 head_gp_snap = krwp->head_free_gp_snap;
3049 raw_spin_unlock_irqrestore(&krcp->lock, flags);
3051 // Handle the first two channels.
3052 for (i = 0; i < FREE_N_CHANNELS; i++) {
3053 // Start from the tail page, so a GP is likely passed for it.
3054 list_for_each_entry_safe(bnode, n, &bulk_head[i], list)
3055 kvfree_rcu_bulk(krcp, bnode, i);
3059 * This is used when the "bulk" path can not be used for the
3060 * double-argument of kvfree_rcu(). This happens when the
3061 * page-cache is empty, which means that objects are instead
3062 * queued on a linked list through their rcu_head structures.
3063 * This list is named "Channel 3".
3065 if (head && !WARN_ON_ONCE(!poll_state_synchronize_rcu_full(&head_gp_snap)))
3066 kvfree_rcu_list(head);
3070 need_offload_krc(struct kfree_rcu_cpu *krcp)
3074 for (i = 0; i < FREE_N_CHANNELS; i++)
3075 if (!list_empty(&krcp->bulk_head[i]))
3078 return !!READ_ONCE(krcp->head);
3082 need_wait_for_krwp_work(struct kfree_rcu_cpu_work *krwp)
3086 for (i = 0; i < FREE_N_CHANNELS; i++)
3087 if (!list_empty(&krwp->bulk_head_free[i]))
3090 return !!krwp->head_free;
3093 static int krc_count(struct kfree_rcu_cpu *krcp)
3095 int sum = atomic_read(&krcp->head_count);
3098 for (i = 0; i < FREE_N_CHANNELS; i++)
3099 sum += atomic_read(&krcp->bulk_count[i]);
3105 schedule_delayed_monitor_work(struct kfree_rcu_cpu *krcp)
3107 long delay, delay_left;
3109 delay = krc_count(krcp) >= KVFREE_BULK_MAX_ENTR ? 1:KFREE_DRAIN_JIFFIES;
3110 if (delayed_work_pending(&krcp->monitor_work)) {
3111 delay_left = krcp->monitor_work.timer.expires - jiffies;
3112 if (delay < delay_left)
3113 mod_delayed_work(system_wq, &krcp->monitor_work, delay);
3116 queue_delayed_work(system_wq, &krcp->monitor_work, delay);
3120 kvfree_rcu_drain_ready(struct kfree_rcu_cpu *krcp)
3122 struct list_head bulk_ready[FREE_N_CHANNELS];
3123 struct kvfree_rcu_bulk_data *bnode, *n;
3124 struct rcu_head *head_ready = NULL;
3125 unsigned long flags;
3128 raw_spin_lock_irqsave(&krcp->lock, flags);
3129 for (i = 0; i < FREE_N_CHANNELS; i++) {
3130 INIT_LIST_HEAD(&bulk_ready[i]);
3132 list_for_each_entry_safe_reverse(bnode, n, &krcp->bulk_head[i], list) {
3133 if (!poll_state_synchronize_rcu_full(&bnode->gp_snap))
3136 atomic_sub(bnode->nr_records, &krcp->bulk_count[i]);
3137 list_move(&bnode->list, &bulk_ready[i]);
3141 if (krcp->head && poll_state_synchronize_rcu(krcp->head_gp_snap)) {
3142 head_ready = krcp->head;
3143 atomic_set(&krcp->head_count, 0);
3144 WRITE_ONCE(krcp->head, NULL);
3146 raw_spin_unlock_irqrestore(&krcp->lock, flags);
3148 for (i = 0; i < FREE_N_CHANNELS; i++) {
3149 list_for_each_entry_safe(bnode, n, &bulk_ready[i], list)
3150 kvfree_rcu_bulk(krcp, bnode, i);
3154 kvfree_rcu_list(head_ready);
3158 * This function is invoked after the KFREE_DRAIN_JIFFIES timeout.
3160 static void kfree_rcu_monitor(struct work_struct *work)
3162 struct kfree_rcu_cpu *krcp = container_of(work,
3163 struct kfree_rcu_cpu, monitor_work.work);
3164 unsigned long flags;
3167 // Drain ready for reclaim.
3168 kvfree_rcu_drain_ready(krcp);
3170 raw_spin_lock_irqsave(&krcp->lock, flags);
3172 // Attempt to start a new batch.
3173 for (i = 0; i < KFREE_N_BATCHES; i++) {
3174 struct kfree_rcu_cpu_work *krwp = &(krcp->krw_arr[i]);
3176 // Try to detach bulk_head or head and attach it, only when
3177 // all channels are free. Any channel is not free means at krwp
3178 // there is on-going rcu work to handle krwp's free business.
3179 if (need_wait_for_krwp_work(krwp))
3182 // kvfree_rcu_drain_ready() might handle this krcp, if so give up.
3183 if (need_offload_krc(krcp)) {
3184 // Channel 1 corresponds to the SLAB-pointer bulk path.
3185 // Channel 2 corresponds to vmalloc-pointer bulk path.
3186 for (j = 0; j < FREE_N_CHANNELS; j++) {
3187 if (list_empty(&krwp->bulk_head_free[j])) {
3188 atomic_set(&krcp->bulk_count[j], 0);
3189 list_replace_init(&krcp->bulk_head[j],
3190 &krwp->bulk_head_free[j]);
3194 // Channel 3 corresponds to both SLAB and vmalloc
3195 // objects queued on the linked list.
3196 if (!krwp->head_free) {
3197 krwp->head_free = krcp->head;
3198 get_state_synchronize_rcu_full(&krwp->head_free_gp_snap);
3199 atomic_set(&krcp->head_count, 0);
3200 WRITE_ONCE(krcp->head, NULL);
3203 // One work is per one batch, so there are three
3204 // "free channels", the batch can handle. It can
3205 // be that the work is in the pending state when
3206 // channels have been detached following by each
3208 queue_rcu_work(system_wq, &krwp->rcu_work);
3212 raw_spin_unlock_irqrestore(&krcp->lock, flags);
3214 // If there is nothing to detach, it means that our job is
3215 // successfully done here. In case of having at least one
3216 // of the channels that is still busy we should rearm the
3217 // work to repeat an attempt. Because previous batches are
3218 // still in progress.
3219 if (need_offload_krc(krcp))
3220 schedule_delayed_monitor_work(krcp);
3223 static enum hrtimer_restart
3224 schedule_page_work_fn(struct hrtimer *t)
3226 struct kfree_rcu_cpu *krcp =
3227 container_of(t, struct kfree_rcu_cpu, hrtimer);
3229 queue_delayed_work(system_highpri_wq, &krcp->page_cache_work, 0);
3230 return HRTIMER_NORESTART;
3233 static void fill_page_cache_func(struct work_struct *work)
3235 struct kvfree_rcu_bulk_data *bnode;
3236 struct kfree_rcu_cpu *krcp =
3237 container_of(work, struct kfree_rcu_cpu,
3238 page_cache_work.work);
3239 unsigned long flags;
3244 nr_pages = atomic_read(&krcp->backoff_page_cache_fill) ?
3245 1 : rcu_min_cached_objs;
3247 for (i = READ_ONCE(krcp->nr_bkv_objs); i < nr_pages; i++) {
3248 bnode = (struct kvfree_rcu_bulk_data *)
3249 __get_free_page(GFP_KERNEL | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
3254 raw_spin_lock_irqsave(&krcp->lock, flags);
3255 pushed = put_cached_bnode(krcp, bnode);
3256 raw_spin_unlock_irqrestore(&krcp->lock, flags);
3259 free_page((unsigned long) bnode);
3264 atomic_set(&krcp->work_in_progress, 0);
3265 atomic_set(&krcp->backoff_page_cache_fill, 0);
3269 run_page_cache_worker(struct kfree_rcu_cpu *krcp)
3271 // If cache disabled, bail out.
3272 if (!rcu_min_cached_objs)
3275 if (rcu_scheduler_active == RCU_SCHEDULER_RUNNING &&
3276 !atomic_xchg(&krcp->work_in_progress, 1)) {
3277 if (atomic_read(&krcp->backoff_page_cache_fill)) {
3278 queue_delayed_work(system_wq,
3279 &krcp->page_cache_work,
3280 msecs_to_jiffies(rcu_delay_page_cache_fill_msec));
3282 hrtimer_init(&krcp->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3283 krcp->hrtimer.function = schedule_page_work_fn;
3284 hrtimer_start(&krcp->hrtimer, 0, HRTIMER_MODE_REL);
3289 // Record ptr in a page managed by krcp, with the pre-krc_this_cpu_lock()
3290 // state specified by flags. If can_alloc is true, the caller must
3291 // be schedulable and not be holding any locks or mutexes that might be
3292 // acquired by the memory allocator or anything that it might invoke.
3293 // Returns true if ptr was successfully recorded, else the caller must
3296 add_ptr_to_bulk_krc_lock(struct kfree_rcu_cpu **krcp,
3297 unsigned long *flags, void *ptr, bool can_alloc)
3299 struct kvfree_rcu_bulk_data *bnode;
3302 *krcp = krc_this_cpu_lock(flags);
3303 if (unlikely(!(*krcp)->initialized))
3306 idx = !!is_vmalloc_addr(ptr);
3307 bnode = list_first_entry_or_null(&(*krcp)->bulk_head[idx],
3308 struct kvfree_rcu_bulk_data, list);
3310 /* Check if a new block is required. */
3311 if (!bnode || bnode->nr_records == KVFREE_BULK_MAX_ENTR) {
3312 bnode = get_cached_bnode(*krcp);
3313 if (!bnode && can_alloc) {
3314 krc_this_cpu_unlock(*krcp, *flags);
3316 // __GFP_NORETRY - allows a light-weight direct reclaim
3317 // what is OK from minimizing of fallback hitting point of
3318 // view. Apart of that it forbids any OOM invoking what is
3319 // also beneficial since we are about to release memory soon.
3321 // __GFP_NOMEMALLOC - prevents from consuming of all the
3322 // memory reserves. Please note we have a fallback path.
3324 // __GFP_NOWARN - it is supposed that an allocation can
3325 // be failed under low memory or high memory pressure
3327 bnode = (struct kvfree_rcu_bulk_data *)
3328 __get_free_page(GFP_KERNEL | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
3329 raw_spin_lock_irqsave(&(*krcp)->lock, *flags);
3335 // Initialize the new block and attach it.
3336 bnode->nr_records = 0;
3337 list_add(&bnode->list, &(*krcp)->bulk_head[idx]);
3340 // Finally insert and update the GP for this page.
3341 bnode->records[bnode->nr_records++] = ptr;
3342 get_state_synchronize_rcu_full(&bnode->gp_snap);
3343 atomic_inc(&(*krcp)->bulk_count[idx]);
3349 * Queue a request for lazy invocation of the appropriate free routine
3350 * after a grace period. Please note that three paths are maintained,
3351 * two for the common case using arrays of pointers and a third one that
3352 * is used only when the main paths cannot be used, for example, due to
3355 * Each kvfree_call_rcu() request is added to a batch. The batch will be drained
3356 * every KFREE_DRAIN_JIFFIES number of jiffies. All the objects in the batch will
3357 * be free'd in workqueue context. This allows us to: batch requests together to
3358 * reduce the number of grace periods during heavy kfree_rcu()/kvfree_rcu() load.
3360 void kvfree_call_rcu(struct rcu_head *head, void *ptr)
3362 unsigned long flags;
3363 struct kfree_rcu_cpu *krcp;
3367 * Please note there is a limitation for the head-less
3368 * variant, that is why there is a clear rule for such
3369 * objects: it can be used from might_sleep() context
3370 * only. For other places please embed an rcu_head to
3376 // Queue the object but don't yet schedule the batch.
3377 if (debug_rcu_head_queue(ptr)) {
3378 // Probable double kfree_rcu(), just leak.
3379 WARN_ONCE(1, "%s(): Double-freed call. rcu_head %p\n",
3382 // Mark as success and leave.
3386 kasan_record_aux_stack_noalloc(ptr);
3387 success = add_ptr_to_bulk_krc_lock(&krcp, &flags, ptr, !head);
3389 run_page_cache_worker(krcp);
3392 // Inline if kvfree_rcu(one_arg) call.
3396 head->next = krcp->head;
3397 WRITE_ONCE(krcp->head, head);
3398 atomic_inc(&krcp->head_count);
3400 // Take a snapshot for this krcp.
3401 krcp->head_gp_snap = get_state_synchronize_rcu();
3406 * The kvfree_rcu() caller considers the pointer freed at this point
3407 * and likely removes any references to it. Since the actual slab
3408 * freeing (and kmemleak_free()) is deferred, tell kmemleak to ignore
3409 * this object (no scanning or false positives reporting).
3411 kmemleak_ignore(ptr);
3413 // Set timer to drain after KFREE_DRAIN_JIFFIES.
3414 if (rcu_scheduler_active == RCU_SCHEDULER_RUNNING)
3415 schedule_delayed_monitor_work(krcp);
3418 krc_this_cpu_unlock(krcp, flags);
3421 * Inline kvfree() after synchronize_rcu(). We can do
3422 * it from might_sleep() context only, so the current
3423 * CPU can pass the QS state.
3426 debug_rcu_head_unqueue((struct rcu_head *) ptr);
3431 EXPORT_SYMBOL_GPL(kvfree_call_rcu);
3433 static unsigned long
3434 kfree_rcu_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
3437 unsigned long count = 0;
3439 /* Snapshot count of all CPUs */
3440 for_each_possible_cpu(cpu) {
3441 struct kfree_rcu_cpu *krcp = per_cpu_ptr(&krc, cpu);
3443 count += krc_count(krcp);
3444 count += READ_ONCE(krcp->nr_bkv_objs);
3445 atomic_set(&krcp->backoff_page_cache_fill, 1);
3448 return count == 0 ? SHRINK_EMPTY : count;
3451 static unsigned long
3452 kfree_rcu_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
3456 for_each_possible_cpu(cpu) {
3458 struct kfree_rcu_cpu *krcp = per_cpu_ptr(&krc, cpu);
3460 count = krc_count(krcp);
3461 count += drain_page_cache(krcp);
3462 kfree_rcu_monitor(&krcp->monitor_work.work);
3464 sc->nr_to_scan -= count;
3467 if (sc->nr_to_scan <= 0)
3471 return freed == 0 ? SHRINK_STOP : freed;
3474 void __init kfree_rcu_scheduler_running(void)
3478 for_each_possible_cpu(cpu) {
3479 struct kfree_rcu_cpu *krcp = per_cpu_ptr(&krc, cpu);
3481 if (need_offload_krc(krcp))
3482 schedule_delayed_monitor_work(krcp);
3487 * During early boot, any blocking grace-period wait automatically
3488 * implies a grace period.
3490 * Later on, this could in theory be the case for kernels built with
3491 * CONFIG_SMP=y && CONFIG_PREEMPTION=y running on a single CPU, but this
3492 * is not a common case. Furthermore, this optimization would cause
3493 * the rcu_gp_oldstate structure to expand by 50%, so this potential
3494 * grace-period optimization is ignored once the scheduler is running.
3496 static int rcu_blocking_is_gp(void)
3498 if (rcu_scheduler_active != RCU_SCHEDULER_INACTIVE) {
3506 * synchronize_rcu - wait until a grace period has elapsed.
3508 * Control will return to the caller some time after a full grace
3509 * period has elapsed, in other words after all currently executing RCU
3510 * read-side critical sections have completed. Note, however, that
3511 * upon return from synchronize_rcu(), the caller might well be executing
3512 * concurrently with new RCU read-side critical sections that began while
3513 * synchronize_rcu() was waiting.
3515 * RCU read-side critical sections are delimited by rcu_read_lock()
3516 * and rcu_read_unlock(), and may be nested. In addition, but only in
3517 * v5.0 and later, regions of code across which interrupts, preemption,
3518 * or softirqs have been disabled also serve as RCU read-side critical
3519 * sections. This includes hardware interrupt handlers, softirq handlers,
3522 * Note that this guarantee implies further memory-ordering guarantees.
3523 * On systems with more than one CPU, when synchronize_rcu() returns,
3524 * each CPU is guaranteed to have executed a full memory barrier since
3525 * the end of its last RCU read-side critical section whose beginning
3526 * preceded the call to synchronize_rcu(). In addition, each CPU having
3527 * an RCU read-side critical section that extends beyond the return from
3528 * synchronize_rcu() is guaranteed to have executed a full memory barrier
3529 * after the beginning of synchronize_rcu() and before the beginning of
3530 * that RCU read-side critical section. Note that these guarantees include
3531 * CPUs that are offline, idle, or executing in user mode, as well as CPUs
3532 * that are executing in the kernel.
3534 * Furthermore, if CPU A invoked synchronize_rcu(), which returned
3535 * to its caller on CPU B, then both CPU A and CPU B are guaranteed
3536 * to have executed a full memory barrier during the execution of
3537 * synchronize_rcu() -- even if CPU A and CPU B are the same CPU (but
3538 * again only if the system has more than one CPU).
3540 * Implementation of these memory-ordering guarantees is described here:
3541 * Documentation/RCU/Design/Memory-Ordering/Tree-RCU-Memory-Ordering.rst.
3543 void synchronize_rcu(void)
3545 unsigned long flags;
3546 struct rcu_node *rnp;
3548 RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) ||
3549 lock_is_held(&rcu_lock_map) ||
3550 lock_is_held(&rcu_sched_lock_map),
3551 "Illegal synchronize_rcu() in RCU read-side critical section");
3552 if (!rcu_blocking_is_gp()) {
3553 if (rcu_gp_is_expedited())
3554 synchronize_rcu_expedited();
3556 wait_rcu_gp(call_rcu_hurry);
3560 // Context allows vacuous grace periods.
3561 // Note well that this code runs with !PREEMPT && !SMP.
3562 // In addition, all code that advances grace periods runs at
3563 // process level. Therefore, this normal GP overlaps with other
3564 // normal GPs only by being fully nested within them, which allows
3565 // reuse of ->gp_seq_polled_snap.
3566 rcu_poll_gp_seq_start_unlocked(&rcu_state.gp_seq_polled_snap);
3567 rcu_poll_gp_seq_end_unlocked(&rcu_state.gp_seq_polled_snap);
3569 // Update the normal grace-period counters to record
3570 // this grace period, but only those used by the boot CPU.
3571 // The rcu_scheduler_starting() will take care of the rest of
3573 local_irq_save(flags);
3574 WARN_ON_ONCE(num_online_cpus() > 1);
3575 rcu_state.gp_seq += (1 << RCU_SEQ_CTR_SHIFT);
3576 for (rnp = this_cpu_ptr(&rcu_data)->mynode; rnp; rnp = rnp->parent)
3577 rnp->gp_seq_needed = rnp->gp_seq = rcu_state.gp_seq;
3578 local_irq_restore(flags);
3580 EXPORT_SYMBOL_GPL(synchronize_rcu);
3583 * get_completed_synchronize_rcu_full - Return a full pre-completed polled state cookie
3584 * @rgosp: Place to put state cookie
3586 * Stores into @rgosp a value that will always be treated by functions
3587 * like poll_state_synchronize_rcu_full() as a cookie whose grace period
3588 * has already completed.
3590 void get_completed_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp)
3592 rgosp->rgos_norm = RCU_GET_STATE_COMPLETED;
3593 rgosp->rgos_exp = RCU_GET_STATE_COMPLETED;
3595 EXPORT_SYMBOL_GPL(get_completed_synchronize_rcu_full);
3598 * get_state_synchronize_rcu - Snapshot current RCU state
3600 * Returns a cookie that is used by a later call to cond_synchronize_rcu()
3601 * or poll_state_synchronize_rcu() to determine whether or not a full
3602 * grace period has elapsed in the meantime.
3604 unsigned long get_state_synchronize_rcu(void)
3607 * Any prior manipulation of RCU-protected data must happen
3608 * before the load from ->gp_seq.
3611 return rcu_seq_snap(&rcu_state.gp_seq_polled);
3613 EXPORT_SYMBOL_GPL(get_state_synchronize_rcu);
3616 * get_state_synchronize_rcu_full - Snapshot RCU state, both normal and expedited
3617 * @rgosp: location to place combined normal/expedited grace-period state
3619 * Places the normal and expedited grace-period states in @rgosp. This
3620 * state value can be passed to a later call to cond_synchronize_rcu_full()
3621 * or poll_state_synchronize_rcu_full() to determine whether or not a
3622 * grace period (whether normal or expedited) has elapsed in the meantime.
3623 * The rcu_gp_oldstate structure takes up twice the memory of an unsigned
3624 * long, but is guaranteed to see all grace periods. In contrast, the
3625 * combined state occupies less memory, but can sometimes fail to take
3626 * grace periods into account.
3628 * This does not guarantee that the needed grace period will actually
3631 void get_state_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp)
3633 struct rcu_node *rnp = rcu_get_root();
3636 * Any prior manipulation of RCU-protected data must happen
3637 * before the loads from ->gp_seq and ->expedited_sequence.
3640 rgosp->rgos_norm = rcu_seq_snap(&rnp->gp_seq);
3641 rgosp->rgos_exp = rcu_seq_snap(&rcu_state.expedited_sequence);
3643 EXPORT_SYMBOL_GPL(get_state_synchronize_rcu_full);
3646 * Helper function for start_poll_synchronize_rcu() and
3647 * start_poll_synchronize_rcu_full().
3649 static void start_poll_synchronize_rcu_common(void)
3651 unsigned long flags;
3653 struct rcu_data *rdp;
3654 struct rcu_node *rnp;
3656 lockdep_assert_irqs_enabled();
3657 local_irq_save(flags);
3658 rdp = this_cpu_ptr(&rcu_data);
3660 raw_spin_lock_rcu_node(rnp); // irqs already disabled.
3661 // Note it is possible for a grace period to have elapsed between
3662 // the above call to get_state_synchronize_rcu() and the below call
3663 // to rcu_seq_snap. This is OK, the worst that happens is that we
3664 // get a grace period that no one needed. These accesses are ordered
3665 // by smp_mb(), and we are accessing them in the opposite order
3666 // from which they are updated at grace-period start, as required.
3667 needwake = rcu_start_this_gp(rnp, rdp, rcu_seq_snap(&rcu_state.gp_seq));
3668 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
3670 rcu_gp_kthread_wake();
3674 * start_poll_synchronize_rcu - Snapshot and start RCU grace period
3676 * Returns a cookie that is used by a later call to cond_synchronize_rcu()
3677 * or poll_state_synchronize_rcu() to determine whether or not a full
3678 * grace period has elapsed in the meantime. If the needed grace period
3679 * is not already slated to start, notifies RCU core of the need for that
3682 * Interrupts must be enabled for the case where it is necessary to awaken
3683 * the grace-period kthread.
3685 unsigned long start_poll_synchronize_rcu(void)
3687 unsigned long gp_seq = get_state_synchronize_rcu();
3689 start_poll_synchronize_rcu_common();
3692 EXPORT_SYMBOL_GPL(start_poll_synchronize_rcu);
3695 * start_poll_synchronize_rcu_full - Take a full snapshot and start RCU grace period
3696 * @rgosp: value from get_state_synchronize_rcu_full() or start_poll_synchronize_rcu_full()
3698 * Places the normal and expedited grace-period states in *@rgos. This
3699 * state value can be passed to a later call to cond_synchronize_rcu_full()
3700 * or poll_state_synchronize_rcu_full() to determine whether or not a
3701 * grace period (whether normal or expedited) has elapsed in the meantime.
3702 * If the needed grace period is not already slated to start, notifies
3703 * RCU core of the need for that grace period.
3705 * Interrupts must be enabled for the case where it is necessary to awaken
3706 * the grace-period kthread.
3708 void start_poll_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp)
3710 get_state_synchronize_rcu_full(rgosp);
3712 start_poll_synchronize_rcu_common();
3714 EXPORT_SYMBOL_GPL(start_poll_synchronize_rcu_full);
3717 * poll_state_synchronize_rcu - Has the specified RCU grace period completed?
3718 * @oldstate: value from get_state_synchronize_rcu() or start_poll_synchronize_rcu()
3720 * If a full RCU grace period has elapsed since the earlier call from
3721 * which @oldstate was obtained, return @true, otherwise return @false.
3722 * If @false is returned, it is the caller's responsibility to invoke this
3723 * function later on until it does return @true. Alternatively, the caller
3724 * can explicitly wait for a grace period, for example, by passing @oldstate
3725 * to either cond_synchronize_rcu() or cond_synchronize_rcu_expedited()
3726 * on the one hand or by directly invoking either synchronize_rcu() or
3727 * synchronize_rcu_expedited() on the other.
3729 * Yes, this function does not take counter wrap into account.
3730 * But counter wrap is harmless. If the counter wraps, we have waited for
3731 * more than a billion grace periods (and way more on a 64-bit system!).
3732 * Those needing to keep old state values for very long time periods
3733 * (many hours even on 32-bit systems) should check them occasionally and
3734 * either refresh them or set a flag indicating that the grace period has
3735 * completed. Alternatively, they can use get_completed_synchronize_rcu()
3736 * to get a guaranteed-completed grace-period state.
3738 * In addition, because oldstate compresses the grace-period state for
3739 * both normal and expedited grace periods into a single unsigned long,
3740 * it can miss a grace period when synchronize_rcu() runs concurrently
3741 * with synchronize_rcu_expedited(). If this is unacceptable, please
3742 * instead use the _full() variant of these polling APIs.
3744 * This function provides the same memory-ordering guarantees that
3745 * would be provided by a synchronize_rcu() that was invoked at the call
3746 * to the function that provided @oldstate, and that returned at the end
3749 bool poll_state_synchronize_rcu(unsigned long oldstate)
3751 if (oldstate == RCU_GET_STATE_COMPLETED ||
3752 rcu_seq_done_exact(&rcu_state.gp_seq_polled, oldstate)) {
3753 smp_mb(); /* Ensure GP ends before subsequent accesses. */
3758 EXPORT_SYMBOL_GPL(poll_state_synchronize_rcu);
3761 * poll_state_synchronize_rcu_full - Has the specified RCU grace period completed?
3762 * @rgosp: value from get_state_synchronize_rcu_full() or start_poll_synchronize_rcu_full()
3764 * If a full RCU grace period has elapsed since the earlier call from
3765 * which *rgosp was obtained, return @true, otherwise return @false.
3766 * If @false is returned, it is the caller's responsibility to invoke this
3767 * function later on until it does return @true. Alternatively, the caller
3768 * can explicitly wait for a grace period, for example, by passing @rgosp
3769 * to cond_synchronize_rcu() or by directly invoking synchronize_rcu().
3771 * Yes, this function does not take counter wrap into account.
3772 * But counter wrap is harmless. If the counter wraps, we have waited
3773 * for more than a billion grace periods (and way more on a 64-bit
3774 * system!). Those needing to keep rcu_gp_oldstate values for very
3775 * long time periods (many hours even on 32-bit systems) should check
3776 * them occasionally and either refresh them or set a flag indicating
3777 * that the grace period has completed. Alternatively, they can use
3778 * get_completed_synchronize_rcu_full() to get a guaranteed-completed
3779 * grace-period state.
3781 * This function provides the same memory-ordering guarantees that would
3782 * be provided by a synchronize_rcu() that was invoked at the call to
3783 * the function that provided @rgosp, and that returned at the end of this
3784 * function. And this guarantee requires that the root rcu_node structure's
3785 * ->gp_seq field be checked instead of that of the rcu_state structure.
3786 * The problem is that the just-ending grace-period's callbacks can be
3787 * invoked between the time that the root rcu_node structure's ->gp_seq
3788 * field is updated and the time that the rcu_state structure's ->gp_seq
3789 * field is updated. Therefore, if a single synchronize_rcu() is to
3790 * cause a subsequent poll_state_synchronize_rcu_full() to return @true,
3791 * then the root rcu_node structure is the one that needs to be polled.
3793 bool poll_state_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp)
3795 struct rcu_node *rnp = rcu_get_root();
3797 smp_mb(); // Order against root rcu_node structure grace-period cleanup.
3798 if (rgosp->rgos_norm == RCU_GET_STATE_COMPLETED ||
3799 rcu_seq_done_exact(&rnp->gp_seq, rgosp->rgos_norm) ||
3800 rgosp->rgos_exp == RCU_GET_STATE_COMPLETED ||
3801 rcu_seq_done_exact(&rcu_state.expedited_sequence, rgosp->rgos_exp)) {
3802 smp_mb(); /* Ensure GP ends before subsequent accesses. */
3807 EXPORT_SYMBOL_GPL(poll_state_synchronize_rcu_full);
3810 * cond_synchronize_rcu - Conditionally wait for an RCU grace period
3811 * @oldstate: value from get_state_synchronize_rcu(), start_poll_synchronize_rcu(), or start_poll_synchronize_rcu_expedited()
3813 * If a full RCU grace period has elapsed since the earlier call to
3814 * get_state_synchronize_rcu() or start_poll_synchronize_rcu(), just return.
3815 * Otherwise, invoke synchronize_rcu() to wait for a full grace period.
3817 * Yes, this function does not take counter wrap into account.
3818 * But counter wrap is harmless. If the counter wraps, we have waited for
3819 * more than 2 billion grace periods (and way more on a 64-bit system!),
3820 * so waiting for a couple of additional grace periods should be just fine.
3822 * This function provides the same memory-ordering guarantees that
3823 * would be provided by a synchronize_rcu() that was invoked at the call
3824 * to the function that provided @oldstate and that returned at the end
3827 void cond_synchronize_rcu(unsigned long oldstate)
3829 if (!poll_state_synchronize_rcu(oldstate))
3832 EXPORT_SYMBOL_GPL(cond_synchronize_rcu);
3835 * cond_synchronize_rcu_full - Conditionally wait for an RCU grace period
3836 * @rgosp: value from get_state_synchronize_rcu_full(), start_poll_synchronize_rcu_full(), or start_poll_synchronize_rcu_expedited_full()
3838 * If a full RCU grace period has elapsed since the call to
3839 * get_state_synchronize_rcu_full(), start_poll_synchronize_rcu_full(),
3840 * or start_poll_synchronize_rcu_expedited_full() from which @rgosp was
3841 * obtained, just return. Otherwise, invoke synchronize_rcu() to wait
3842 * for a full grace period.
3844 * Yes, this function does not take counter wrap into account.
3845 * But counter wrap is harmless. If the counter wraps, we have waited for
3846 * more than 2 billion grace periods (and way more on a 64-bit system!),
3847 * so waiting for a couple of additional grace periods should be just fine.
3849 * This function provides the same memory-ordering guarantees that
3850 * would be provided by a synchronize_rcu() that was invoked at the call
3851 * to the function that provided @rgosp and that returned at the end of
3854 void cond_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp)
3856 if (!poll_state_synchronize_rcu_full(rgosp))
3859 EXPORT_SYMBOL_GPL(cond_synchronize_rcu_full);
3862 * Check to see if there is any immediate RCU-related work to be done by
3863 * the current CPU, returning 1 if so and zero otherwise. The checks are
3864 * in order of increasing expense: checks that can be carried out against
3865 * CPU-local state are performed first. However, we must check for CPU
3866 * stalls first, else we might not get a chance.
3868 static int rcu_pending(int user)
3870 bool gp_in_progress;
3871 struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
3872 struct rcu_node *rnp = rdp->mynode;
3874 lockdep_assert_irqs_disabled();
3876 /* Check for CPU stalls, if enabled. */
3877 check_cpu_stall(rdp);
3879 /* Does this CPU need a deferred NOCB wakeup? */
3880 if (rcu_nocb_need_deferred_wakeup(rdp, RCU_NOCB_WAKE))
3883 /* Is this a nohz_full CPU in userspace or idle? (Ignore RCU if so.) */
3884 if ((user || rcu_is_cpu_rrupt_from_idle()) && rcu_nohz_full_cpu())
3887 /* Is the RCU core waiting for a quiescent state from this CPU? */
3888 gp_in_progress = rcu_gp_in_progress();
3889 if (rdp->core_needs_qs && !rdp->cpu_no_qs.b.norm && gp_in_progress)
3892 /* Does this CPU have callbacks ready to invoke? */
3893 if (!rcu_rdp_is_offloaded(rdp) &&
3894 rcu_segcblist_ready_cbs(&rdp->cblist))
3897 /* Has RCU gone idle with this CPU needing another grace period? */
3898 if (!gp_in_progress && rcu_segcblist_is_enabled(&rdp->cblist) &&
3899 !rcu_rdp_is_offloaded(rdp) &&
3900 !rcu_segcblist_restempty(&rdp->cblist, RCU_NEXT_READY_TAIL))
3903 /* Have RCU grace period completed or started? */
3904 if (rcu_seq_current(&rnp->gp_seq) != rdp->gp_seq ||
3905 unlikely(READ_ONCE(rdp->gpwrap))) /* outside lock */
3913 * Helper function for rcu_barrier() tracing. If tracing is disabled,
3914 * the compiler is expected to optimize this away.
3916 static void rcu_barrier_trace(const char *s, int cpu, unsigned long done)
3918 trace_rcu_barrier(rcu_state.name, s, cpu,
3919 atomic_read(&rcu_state.barrier_cpu_count), done);
3923 * RCU callback function for rcu_barrier(). If we are last, wake
3924 * up the task executing rcu_barrier().
3926 * Note that the value of rcu_state.barrier_sequence must be captured
3927 * before the atomic_dec_and_test(). Otherwise, if this CPU is not last,
3928 * other CPUs might count the value down to zero before this CPU gets
3929 * around to invoking rcu_barrier_trace(), which might result in bogus
3930 * data from the next instance of rcu_barrier().
3932 static void rcu_barrier_callback(struct rcu_head *rhp)
3934 unsigned long __maybe_unused s = rcu_state.barrier_sequence;
3936 if (atomic_dec_and_test(&rcu_state.barrier_cpu_count)) {
3937 rcu_barrier_trace(TPS("LastCB"), -1, s);
3938 complete(&rcu_state.barrier_completion);
3940 rcu_barrier_trace(TPS("CB"), -1, s);
3945 * If needed, entrain an rcu_barrier() callback on rdp->cblist.
3947 static void rcu_barrier_entrain(struct rcu_data *rdp)
3949 unsigned long gseq = READ_ONCE(rcu_state.barrier_sequence);
3950 unsigned long lseq = READ_ONCE(rdp->barrier_seq_snap);
3951 bool wake_nocb = false;
3952 bool was_alldone = false;
3954 lockdep_assert_held(&rcu_state.barrier_lock);
3955 if (rcu_seq_state(lseq) || !rcu_seq_state(gseq) || rcu_seq_ctr(lseq) != rcu_seq_ctr(gseq))
3957 rcu_barrier_trace(TPS("IRQ"), -1, rcu_state.barrier_sequence);
3958 rdp->barrier_head.func = rcu_barrier_callback;
3959 debug_rcu_head_queue(&rdp->barrier_head);
3962 * Flush bypass and wakeup rcuog if we add callbacks to an empty regular
3963 * queue. This way we don't wait for bypass timer that can reach seconds
3964 * if it's fully lazy.
3966 was_alldone = rcu_rdp_is_offloaded(rdp) && !rcu_segcblist_pend_cbs(&rdp->cblist);
3967 WARN_ON_ONCE(!rcu_nocb_flush_bypass(rdp, NULL, jiffies, false));
3968 wake_nocb = was_alldone && rcu_segcblist_pend_cbs(&rdp->cblist);
3969 if (rcu_segcblist_entrain(&rdp->cblist, &rdp->barrier_head)) {
3970 atomic_inc(&rcu_state.barrier_cpu_count);
3972 debug_rcu_head_unqueue(&rdp->barrier_head);
3973 rcu_barrier_trace(TPS("IRQNQ"), -1, rcu_state.barrier_sequence);
3975 rcu_nocb_unlock(rdp);
3977 wake_nocb_gp(rdp, false);
3978 smp_store_release(&rdp->barrier_seq_snap, gseq);
3982 * Called with preemption disabled, and from cross-cpu IRQ context.
3984 static void rcu_barrier_handler(void *cpu_in)
3986 uintptr_t cpu = (uintptr_t)cpu_in;
3987 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
3989 lockdep_assert_irqs_disabled();
3990 WARN_ON_ONCE(cpu != rdp->cpu);
3991 WARN_ON_ONCE(cpu != smp_processor_id());
3992 raw_spin_lock(&rcu_state.barrier_lock);
3993 rcu_barrier_entrain(rdp);
3994 raw_spin_unlock(&rcu_state.barrier_lock);
3998 * rcu_barrier - Wait until all in-flight call_rcu() callbacks complete.
4000 * Note that this primitive does not necessarily wait for an RCU grace period
4001 * to complete. For example, if there are no RCU callbacks queued anywhere
4002 * in the system, then rcu_barrier() is within its rights to return
4003 * immediately, without waiting for anything, much less an RCU grace period.
4005 void rcu_barrier(void)
4008 unsigned long flags;
4010 struct rcu_data *rdp;
4011 unsigned long s = rcu_seq_snap(&rcu_state.barrier_sequence);
4013 rcu_barrier_trace(TPS("Begin"), -1, s);
4015 /* Take mutex to serialize concurrent rcu_barrier() requests. */
4016 mutex_lock(&rcu_state.barrier_mutex);
4018 /* Did someone else do our work for us? */
4019 if (rcu_seq_done(&rcu_state.barrier_sequence, s)) {
4020 rcu_barrier_trace(TPS("EarlyExit"), -1, rcu_state.barrier_sequence);
4021 smp_mb(); /* caller's subsequent code after above check. */
4022 mutex_unlock(&rcu_state.barrier_mutex);
4026 /* Mark the start of the barrier operation. */
4027 raw_spin_lock_irqsave(&rcu_state.barrier_lock, flags);
4028 rcu_seq_start(&rcu_state.barrier_sequence);
4029 gseq = rcu_state.barrier_sequence;
4030 rcu_barrier_trace(TPS("Inc1"), -1, rcu_state.barrier_sequence);
4033 * Initialize the count to two rather than to zero in order
4034 * to avoid a too-soon return to zero in case of an immediate
4035 * invocation of the just-enqueued callback (or preemption of
4036 * this task). Exclude CPU-hotplug operations to ensure that no
4037 * offline non-offloaded CPU has callbacks queued.
4039 init_completion(&rcu_state.barrier_completion);
4040 atomic_set(&rcu_state.barrier_cpu_count, 2);
4041 raw_spin_unlock_irqrestore(&rcu_state.barrier_lock, flags);
4044 * Force each CPU with callbacks to register a new callback.
4045 * When that callback is invoked, we will know that all of the
4046 * corresponding CPU's preceding callbacks have been invoked.
4048 for_each_possible_cpu(cpu) {
4049 rdp = per_cpu_ptr(&rcu_data, cpu);
4051 if (smp_load_acquire(&rdp->barrier_seq_snap) == gseq)
4053 raw_spin_lock_irqsave(&rcu_state.barrier_lock, flags);
4054 if (!rcu_segcblist_n_cbs(&rdp->cblist)) {
4055 WRITE_ONCE(rdp->barrier_seq_snap, gseq);
4056 raw_spin_unlock_irqrestore(&rcu_state.barrier_lock, flags);
4057 rcu_barrier_trace(TPS("NQ"), cpu, rcu_state.barrier_sequence);
4060 if (!rcu_rdp_cpu_online(rdp)) {
4061 rcu_barrier_entrain(rdp);
4062 WARN_ON_ONCE(READ_ONCE(rdp->barrier_seq_snap) != gseq);
4063 raw_spin_unlock_irqrestore(&rcu_state.barrier_lock, flags);
4064 rcu_barrier_trace(TPS("OfflineNoCBQ"), cpu, rcu_state.barrier_sequence);
4067 raw_spin_unlock_irqrestore(&rcu_state.barrier_lock, flags);
4068 if (smp_call_function_single(cpu, rcu_barrier_handler, (void *)cpu, 1)) {
4069 schedule_timeout_uninterruptible(1);
4072 WARN_ON_ONCE(READ_ONCE(rdp->barrier_seq_snap) != gseq);
4073 rcu_barrier_trace(TPS("OnlineQ"), cpu, rcu_state.barrier_sequence);
4077 * Now that we have an rcu_barrier_callback() callback on each
4078 * CPU, and thus each counted, remove the initial count.
4080 if (atomic_sub_and_test(2, &rcu_state.barrier_cpu_count))
4081 complete(&rcu_state.barrier_completion);
4083 /* Wait for all rcu_barrier_callback() callbacks to be invoked. */
4084 wait_for_completion(&rcu_state.barrier_completion);
4086 /* Mark the end of the barrier operation. */
4087 rcu_barrier_trace(TPS("Inc2"), -1, rcu_state.barrier_sequence);
4088 rcu_seq_end(&rcu_state.barrier_sequence);
4089 gseq = rcu_state.barrier_sequence;
4090 for_each_possible_cpu(cpu) {
4091 rdp = per_cpu_ptr(&rcu_data, cpu);
4093 WRITE_ONCE(rdp->barrier_seq_snap, gseq);
4096 /* Other rcu_barrier() invocations can now safely proceed. */
4097 mutex_unlock(&rcu_state.barrier_mutex);
4099 EXPORT_SYMBOL_GPL(rcu_barrier);
4101 static unsigned long rcu_barrier_last_throttle;
4104 * rcu_barrier_throttled - Do rcu_barrier(), but limit to one per second
4106 * This can be thought of as guard rails around rcu_barrier() that
4107 * permits unrestricted userspace use, at least assuming the hardware's
4108 * try_cmpxchg() is robust. There will be at most one call per second to
4109 * rcu_barrier() system-wide from use of this function, which means that
4110 * callers might needlessly wait a second or three.
4112 * This is intended for use by test suites to avoid OOM by flushing RCU
4113 * callbacks from the previous test before starting the next. See the
4114 * rcutree.do_rcu_barrier module parameter for more information.
4116 * Why not simply make rcu_barrier() more scalable? That might be
4117 * the eventual endpoint, but let's keep it simple for the time being.
4118 * Note that the module parameter infrastructure serializes calls to a
4119 * given .set() function, but should concurrent .set() invocation ever be
4120 * possible, we are ready!
4122 static void rcu_barrier_throttled(void)
4124 unsigned long j = jiffies;
4125 unsigned long old = READ_ONCE(rcu_barrier_last_throttle);
4126 unsigned long s = rcu_seq_snap(&rcu_state.barrier_sequence);
4128 while (time_in_range(j, old, old + HZ / 16) ||
4129 !try_cmpxchg(&rcu_barrier_last_throttle, &old, j)) {
4130 schedule_timeout_idle(HZ / 16);
4131 if (rcu_seq_done(&rcu_state.barrier_sequence, s)) {
4132 smp_mb(); /* caller's subsequent code after above check. */
4136 old = READ_ONCE(rcu_barrier_last_throttle);
4142 * Invoke rcu_barrier_throttled() when a rcutree.do_rcu_barrier
4143 * request arrives. We insist on a true value to allow for possible
4146 static int param_set_do_rcu_barrier(const char *val, const struct kernel_param *kp)
4151 if (rcu_scheduler_active != RCU_SCHEDULER_RUNNING)
4153 ret = kstrtobool(val, &b);
4155 atomic_inc((atomic_t *)kp->arg);
4156 rcu_barrier_throttled();
4157 atomic_dec((atomic_t *)kp->arg);
4163 * Output the number of outstanding rcutree.do_rcu_barrier requests.
4165 static int param_get_do_rcu_barrier(char *buffer, const struct kernel_param *kp)
4167 return sprintf(buffer, "%d\n", atomic_read((atomic_t *)kp->arg));
4170 static const struct kernel_param_ops do_rcu_barrier_ops = {
4171 .set = param_set_do_rcu_barrier,
4172 .get = param_get_do_rcu_barrier,
4174 static atomic_t do_rcu_barrier;
4175 module_param_cb(do_rcu_barrier, &do_rcu_barrier_ops, &do_rcu_barrier, 0644);
4178 * Compute the mask of online CPUs for the specified rcu_node structure.
4179 * This will not be stable unless the rcu_node structure's ->lock is
4180 * held, but the bit corresponding to the current CPU will be stable
4183 static unsigned long rcu_rnp_online_cpus(struct rcu_node *rnp)
4185 return READ_ONCE(rnp->qsmaskinitnext);
4189 * Is the CPU corresponding to the specified rcu_data structure online
4190 * from RCU's perspective? This perspective is given by that structure's
4191 * ->qsmaskinitnext field rather than by the global cpu_online_mask.
4193 static bool rcu_rdp_cpu_online(struct rcu_data *rdp)
4195 return !!(rdp->grpmask & rcu_rnp_online_cpus(rdp->mynode));
4198 #if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU)
4201 * Is the current CPU online as far as RCU is concerned?
4203 * Disable preemption to avoid false positives that could otherwise
4204 * happen due to the current CPU number being sampled, this task being
4205 * preempted, its old CPU being taken offline, resuming on some other CPU,
4206 * then determining that its old CPU is now offline.
4208 * Disable checking if in an NMI handler because we cannot safely
4209 * report errors from NMI handlers anyway. In addition, it is OK to use
4210 * RCU on an offline processor during initial boot, hence the check for
4211 * rcu_scheduler_fully_active.
4213 bool rcu_lockdep_current_cpu_online(void)
4215 struct rcu_data *rdp;
4218 if (in_nmi() || !rcu_scheduler_fully_active)
4220 preempt_disable_notrace();
4221 rdp = this_cpu_ptr(&rcu_data);
4223 * Strictly, we care here about the case where the current CPU is
4224 * in rcutree_report_cpu_starting() and thus has an excuse for rdp->grpmask
4225 * not being up to date. So arch_spin_is_locked() might have a
4226 * false positive if it's held by some *other* CPU, but that's
4227 * OK because that just means a false *negative* on the warning.
4229 if (rcu_rdp_cpu_online(rdp) || arch_spin_is_locked(&rcu_state.ofl_lock))
4231 preempt_enable_notrace();
4234 EXPORT_SYMBOL_GPL(rcu_lockdep_current_cpu_online);
4236 #endif /* #if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU) */
4238 // Has rcu_init() been invoked? This is used (for example) to determine
4239 // whether spinlocks may be acquired safely.
4240 static bool rcu_init_invoked(void)
4242 return !!rcu_state.n_online_cpus;
4246 * All CPUs for the specified rcu_node structure have gone offline,
4247 * and all tasks that were preempted within an RCU read-side critical
4248 * section while running on one of those CPUs have since exited their RCU
4249 * read-side critical section. Some other CPU is reporting this fact with
4250 * the specified rcu_node structure's ->lock held and interrupts disabled.
4251 * This function therefore goes up the tree of rcu_node structures,
4252 * clearing the corresponding bits in the ->qsmaskinit fields. Note that
4253 * the leaf rcu_node structure's ->qsmaskinit field has already been
4256 * This function does check that the specified rcu_node structure has
4257 * all CPUs offline and no blocked tasks, so it is OK to invoke it
4258 * prematurely. That said, invoking it after the fact will cost you
4259 * a needless lock acquisition. So once it has done its work, don't
4262 static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf)
4265 struct rcu_node *rnp = rnp_leaf;
4267 raw_lockdep_assert_held_rcu_node(rnp_leaf);
4268 if (!IS_ENABLED(CONFIG_HOTPLUG_CPU) ||
4269 WARN_ON_ONCE(rnp_leaf->qsmaskinit) ||
4270 WARN_ON_ONCE(rcu_preempt_has_tasks(rnp_leaf)))
4273 mask = rnp->grpmask;
4277 raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */
4278 rnp->qsmaskinit &= ~mask;
4279 /* Between grace periods, so better already be zero! */
4280 WARN_ON_ONCE(rnp->qsmask);
4281 if (rnp->qsmaskinit) {
4282 raw_spin_unlock_rcu_node(rnp);
4283 /* irqs remain disabled. */
4286 raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */
4291 * Propagate ->qsinitmask bits up the rcu_node tree to account for the
4292 * first CPU in a given leaf rcu_node structure coming online. The caller
4293 * must hold the corresponding leaf rcu_node ->lock with interrupts
4296 static void rcu_init_new_rnp(struct rcu_node *rnp_leaf)
4300 struct rcu_node *rnp = rnp_leaf;
4302 raw_lockdep_assert_held_rcu_node(rnp_leaf);
4303 WARN_ON_ONCE(rnp->wait_blkd_tasks);
4305 mask = rnp->grpmask;
4309 raw_spin_lock_rcu_node(rnp); /* Interrupts already disabled. */
4310 oldmask = rnp->qsmaskinit;
4311 rnp->qsmaskinit |= mask;
4312 raw_spin_unlock_rcu_node(rnp); /* Interrupts remain disabled. */
4319 * Do boot-time initialization of a CPU's per-CPU RCU data.
4322 rcu_boot_init_percpu_data(int cpu)
4324 struct context_tracking *ct = this_cpu_ptr(&context_tracking);
4325 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
4327 /* Set up local state, ensuring consistent view of global state. */
4328 rdp->grpmask = leaf_node_cpu_bit(rdp->mynode, cpu);
4329 INIT_WORK(&rdp->strict_work, strict_work_handler);
4330 WARN_ON_ONCE(ct->dynticks_nesting != 1);
4331 WARN_ON_ONCE(rcu_dynticks_in_eqs(rcu_dynticks_snap(cpu)));
4332 rdp->barrier_seq_snap = rcu_state.barrier_sequence;
4333 rdp->rcu_ofl_gp_seq = rcu_state.gp_seq;
4334 rdp->rcu_ofl_gp_flags = RCU_GP_CLEANED;
4335 rdp->rcu_onl_gp_seq = rcu_state.gp_seq;
4336 rdp->rcu_onl_gp_flags = RCU_GP_CLEANED;
4337 rdp->last_sched_clock = jiffies;
4339 rcu_boot_init_nocb_percpu_data(rdp);
4343 * Invoked early in the CPU-online process, when pretty much all services
4344 * are available. The incoming CPU is not present.
4346 * Initializes a CPU's per-CPU RCU data. Note that only one online or
4347 * offline event can be happening at a given time. Note also that we can
4348 * accept some slop in the rsp->gp_seq access due to the fact that this
4349 * CPU cannot possibly have any non-offloaded RCU callbacks in flight yet.
4350 * And any offloaded callbacks are being numbered elsewhere.
4352 int rcutree_prepare_cpu(unsigned int cpu)
4354 unsigned long flags;
4355 struct context_tracking *ct = per_cpu_ptr(&context_tracking, cpu);
4356 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
4357 struct rcu_node *rnp = rcu_get_root();
4359 /* Set up local state, ensuring consistent view of global state. */
4360 raw_spin_lock_irqsave_rcu_node(rnp, flags);
4361 rdp->qlen_last_fqs_check = 0;
4362 rdp->n_force_qs_snap = READ_ONCE(rcu_state.n_force_qs);
4363 rdp->blimit = blimit;
4364 ct->dynticks_nesting = 1; /* CPU not up, no tearing. */
4365 raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */
4368 * Only non-NOCB CPUs that didn't have early-boot callbacks need to be
4371 if (!rcu_segcblist_is_enabled(&rdp->cblist))
4372 rcu_segcblist_init(&rdp->cblist); /* Re-enable callbacks. */
4375 * Add CPU to leaf rcu_node pending-online bitmask. Any needed
4376 * propagation up the rcu_node tree will happen at the beginning
4377 * of the next grace period.
4380 raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */
4381 rdp->gp_seq = READ_ONCE(rnp->gp_seq);
4382 rdp->gp_seq_needed = rdp->gp_seq;
4383 rdp->cpu_no_qs.b.norm = true;
4384 rdp->core_needs_qs = false;
4385 rdp->rcu_iw_pending = false;
4386 rdp->rcu_iw = IRQ_WORK_INIT_HARD(rcu_iw_handler);
4387 rdp->rcu_iw_gp_seq = rdp->gp_seq - 1;
4388 trace_rcu_grace_period(rcu_state.name, rdp->gp_seq, TPS("cpuonl"));
4389 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
4390 rcu_spawn_one_boost_kthread(rnp);
4391 rcu_spawn_cpu_nocb_kthread(cpu);
4392 WRITE_ONCE(rcu_state.n_online_cpus, rcu_state.n_online_cpus + 1);
4398 * Update RCU priority boot kthread affinity for CPU-hotplug changes.
4400 static void rcutree_affinity_setting(unsigned int cpu, int outgoing)
4402 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
4404 rcu_boost_kthread_setaffinity(rdp->mynode, outgoing);
4408 * Has the specified (known valid) CPU ever been fully online?
4410 bool rcu_cpu_beenfullyonline(int cpu)
4412 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
4414 return smp_load_acquire(&rdp->beenonline);
4418 * Near the end of the CPU-online process. Pretty much all services
4419 * enabled, and the CPU is now very much alive.
4421 int rcutree_online_cpu(unsigned int cpu)
4423 unsigned long flags;
4424 struct rcu_data *rdp;
4425 struct rcu_node *rnp;
4427 rdp = per_cpu_ptr(&rcu_data, cpu);
4429 raw_spin_lock_irqsave_rcu_node(rnp, flags);
4430 rnp->ffmask |= rdp->grpmask;
4431 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
4432 if (rcu_scheduler_active == RCU_SCHEDULER_INACTIVE)
4433 return 0; /* Too early in boot for scheduler work. */
4434 sync_sched_exp_online_cleanup(cpu);
4435 rcutree_affinity_setting(cpu, -1);
4437 // Stop-machine done, so allow nohz_full to disable tick.
4438 tick_dep_clear(TICK_DEP_BIT_RCU);
4443 * Mark the specified CPU as being online so that subsequent grace periods
4444 * (both expedited and normal) will wait on it. Note that this means that
4445 * incoming CPUs are not allowed to use RCU read-side critical sections
4446 * until this function is called. Failing to observe this restriction
4447 * will result in lockdep splats.
4449 * Note that this function is special in that it is invoked directly
4450 * from the incoming CPU rather than from the cpuhp_step mechanism.
4451 * This is because this function must be invoked at a precise location.
4452 * This incoming CPU must not have enabled interrupts yet.
4454 * This mirrors the effects of rcutree_report_cpu_dead().
4456 void rcutree_report_cpu_starting(unsigned int cpu)
4459 struct rcu_data *rdp;
4460 struct rcu_node *rnp;
4463 lockdep_assert_irqs_disabled();
4464 rdp = per_cpu_ptr(&rcu_data, cpu);
4465 if (rdp->cpu_started)
4467 rdp->cpu_started = true;
4470 mask = rdp->grpmask;
4471 arch_spin_lock(&rcu_state.ofl_lock);
4472 rcu_dynticks_eqs_online();
4473 raw_spin_lock(&rcu_state.barrier_lock);
4474 raw_spin_lock_rcu_node(rnp);
4475 WRITE_ONCE(rnp->qsmaskinitnext, rnp->qsmaskinitnext | mask);
4476 raw_spin_unlock(&rcu_state.barrier_lock);
4477 newcpu = !(rnp->expmaskinitnext & mask);
4478 rnp->expmaskinitnext |= mask;
4479 /* Allow lockless access for expedited grace periods. */
4480 smp_store_release(&rcu_state.ncpus, rcu_state.ncpus + newcpu); /* ^^^ */
4481 ASSERT_EXCLUSIVE_WRITER(rcu_state.ncpus);
4482 rcu_gpnum_ovf(rnp, rdp); /* Offline-induced counter wrap? */
4483 rdp->rcu_onl_gp_seq = READ_ONCE(rcu_state.gp_seq);
4484 rdp->rcu_onl_gp_flags = READ_ONCE(rcu_state.gp_flags);
4486 /* An incoming CPU should never be blocking a grace period. */
4487 if (WARN_ON_ONCE(rnp->qsmask & mask)) { /* RCU waiting on incoming CPU? */
4488 /* rcu_report_qs_rnp() *really* wants some flags to restore */
4489 unsigned long flags;
4491 local_irq_save(flags);
4492 rcu_disable_urgency_upon_qs(rdp);
4493 /* Report QS -after- changing ->qsmaskinitnext! */
4494 rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags);
4496 raw_spin_unlock_rcu_node(rnp);
4498 arch_spin_unlock(&rcu_state.ofl_lock);
4499 smp_store_release(&rdp->beenonline, true);
4500 smp_mb(); /* Ensure RCU read-side usage follows above initialization. */
4504 * The outgoing function has no further need of RCU, so remove it from
4505 * the rcu_node tree's ->qsmaskinitnext bit masks.
4507 * Note that this function is special in that it is invoked directly
4508 * from the outgoing CPU rather than from the cpuhp_step mechanism.
4509 * This is because this function must be invoked at a precise location.
4511 * This mirrors the effect of rcutree_report_cpu_starting().
4513 void rcutree_report_cpu_dead(void)
4515 unsigned long flags;
4517 struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
4518 struct rcu_node *rnp = rdp->mynode; /* Outgoing CPU's rdp & rnp. */
4521 * IRQS must be disabled from now on and until the CPU dies, or an interrupt
4522 * may introduce a new READ-side while it is actually off the QS masks.
4524 lockdep_assert_irqs_disabled();
4525 // Do any dangling deferred wakeups.
4526 do_nocb_deferred_wakeup(rdp);
4528 rcu_preempt_deferred_qs(current);
4530 /* Remove outgoing CPU from mask in the leaf rcu_node structure. */
4531 mask = rdp->grpmask;
4532 arch_spin_lock(&rcu_state.ofl_lock);
4533 raw_spin_lock_irqsave_rcu_node(rnp, flags); /* Enforce GP memory-order guarantee. */
4534 rdp->rcu_ofl_gp_seq = READ_ONCE(rcu_state.gp_seq);
4535 rdp->rcu_ofl_gp_flags = READ_ONCE(rcu_state.gp_flags);
4536 if (rnp->qsmask & mask) { /* RCU waiting on outgoing CPU? */
4537 /* Report quiescent state -before- changing ->qsmaskinitnext! */
4538 rcu_disable_urgency_upon_qs(rdp);
4539 rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags);
4540 raw_spin_lock_irqsave_rcu_node(rnp, flags);
4542 WRITE_ONCE(rnp->qsmaskinitnext, rnp->qsmaskinitnext & ~mask);
4543 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
4544 arch_spin_unlock(&rcu_state.ofl_lock);
4545 rdp->cpu_started = false;
4548 #ifdef CONFIG_HOTPLUG_CPU
4550 * The outgoing CPU has just passed through the dying-idle state, and we
4551 * are being invoked from the CPU that was IPIed to continue the offline
4552 * operation. Migrate the outgoing CPU's callbacks to the current CPU.
4554 void rcutree_migrate_callbacks(int cpu)
4556 unsigned long flags;
4557 struct rcu_data *my_rdp;
4558 struct rcu_node *my_rnp;
4559 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
4562 if (rcu_rdp_is_offloaded(rdp) ||
4563 rcu_segcblist_empty(&rdp->cblist))
4564 return; /* No callbacks to migrate. */
4566 raw_spin_lock_irqsave(&rcu_state.barrier_lock, flags);
4567 WARN_ON_ONCE(rcu_rdp_cpu_online(rdp));
4568 rcu_barrier_entrain(rdp);
4569 my_rdp = this_cpu_ptr(&rcu_data);
4570 my_rnp = my_rdp->mynode;
4571 rcu_nocb_lock(my_rdp); /* irqs already disabled. */
4572 WARN_ON_ONCE(!rcu_nocb_flush_bypass(my_rdp, NULL, jiffies, false));
4573 raw_spin_lock_rcu_node(my_rnp); /* irqs already disabled. */
4574 /* Leverage recent GPs and set GP for new callbacks. */
4575 needwake = rcu_advance_cbs(my_rnp, rdp) ||
4576 rcu_advance_cbs(my_rnp, my_rdp);
4577 rcu_segcblist_merge(&my_rdp->cblist, &rdp->cblist);
4578 raw_spin_unlock(&rcu_state.barrier_lock); /* irqs remain disabled. */
4579 needwake = needwake || rcu_advance_cbs(my_rnp, my_rdp);
4580 rcu_segcblist_disable(&rdp->cblist);
4581 WARN_ON_ONCE(rcu_segcblist_empty(&my_rdp->cblist) != !rcu_segcblist_n_cbs(&my_rdp->cblist));
4582 check_cb_ovld_locked(my_rdp, my_rnp);
4583 if (rcu_rdp_is_offloaded(my_rdp)) {
4584 raw_spin_unlock_rcu_node(my_rnp); /* irqs remain disabled. */
4585 __call_rcu_nocb_wake(my_rdp, true, flags);
4587 rcu_nocb_unlock(my_rdp); /* irqs remain disabled. */
4588 raw_spin_unlock_irqrestore_rcu_node(my_rnp, flags);
4591 rcu_gp_kthread_wake();
4592 lockdep_assert_irqs_enabled();
4593 WARN_ONCE(rcu_segcblist_n_cbs(&rdp->cblist) != 0 ||
4594 !rcu_segcblist_empty(&rdp->cblist),
4595 "rcu_cleanup_dead_cpu: Callbacks on offline CPU %d: qlen=%lu, 1stCB=%p\n",
4596 cpu, rcu_segcblist_n_cbs(&rdp->cblist),
4597 rcu_segcblist_first_cb(&rdp->cblist));
4601 * The CPU has been completely removed, and some other CPU is reporting
4602 * this fact from process context. Do the remainder of the cleanup.
4603 * There can only be one CPU hotplug operation at a time, so no need for
4606 int rcutree_dead_cpu(unsigned int cpu)
4608 WRITE_ONCE(rcu_state.n_online_cpus, rcu_state.n_online_cpus - 1);
4609 // Stop-machine done, so allow nohz_full to disable tick.
4610 tick_dep_clear(TICK_DEP_BIT_RCU);
4615 * Near the end of the offline process. Trace the fact that this CPU
4618 int rcutree_dying_cpu(unsigned int cpu)
4621 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
4622 struct rcu_node *rnp = rdp->mynode;
4624 blkd = !!(READ_ONCE(rnp->qsmask) & rdp->grpmask);
4625 trace_rcu_grace_period(rcu_state.name, READ_ONCE(rnp->gp_seq),
4626 blkd ? TPS("cpuofl-bgp") : TPS("cpuofl"));
4631 * Near the beginning of the process. The CPU is still very much alive
4632 * with pretty much all services enabled.
4634 int rcutree_offline_cpu(unsigned int cpu)
4636 unsigned long flags;
4637 struct rcu_data *rdp;
4638 struct rcu_node *rnp;
4640 rdp = per_cpu_ptr(&rcu_data, cpu);
4642 raw_spin_lock_irqsave_rcu_node(rnp, flags);
4643 rnp->ffmask &= ~rdp->grpmask;
4644 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
4646 rcutree_affinity_setting(cpu, cpu);
4648 // nohz_full CPUs need the tick for stop-machine to work quickly
4649 tick_dep_set(TICK_DEP_BIT_RCU);
4652 #endif /* #ifdef CONFIG_HOTPLUG_CPU */
4655 * On non-huge systems, use expedited RCU grace periods to make suspend
4656 * and hibernation run faster.
4658 static int rcu_pm_notify(struct notifier_block *self,
4659 unsigned long action, void *hcpu)
4662 case PM_HIBERNATION_PREPARE:
4663 case PM_SUSPEND_PREPARE:
4667 case PM_POST_HIBERNATION:
4668 case PM_POST_SUSPEND:
4669 rcu_unexpedite_gp();
4678 #ifdef CONFIG_RCU_EXP_KTHREAD
4679 struct kthread_worker *rcu_exp_gp_kworker;
4680 struct kthread_worker *rcu_exp_par_gp_kworker;
4682 static void __init rcu_start_exp_gp_kworkers(void)
4684 const char *par_gp_kworker_name = "rcu_exp_par_gp_kthread_worker";
4685 const char *gp_kworker_name = "rcu_exp_gp_kthread_worker";
4686 struct sched_param param = { .sched_priority = kthread_prio };
4688 rcu_exp_gp_kworker = kthread_create_worker(0, gp_kworker_name);
4689 if (IS_ERR_OR_NULL(rcu_exp_gp_kworker)) {
4690 pr_err("Failed to create %s!\n", gp_kworker_name);
4694 rcu_exp_par_gp_kworker = kthread_create_worker(0, par_gp_kworker_name);
4695 if (IS_ERR_OR_NULL(rcu_exp_par_gp_kworker)) {
4696 pr_err("Failed to create %s!\n", par_gp_kworker_name);
4697 kthread_destroy_worker(rcu_exp_gp_kworker);
4701 sched_setscheduler_nocheck(rcu_exp_gp_kworker->task, SCHED_FIFO, ¶m);
4702 sched_setscheduler_nocheck(rcu_exp_par_gp_kworker->task, SCHED_FIFO,
4706 static inline void rcu_alloc_par_gp_wq(void)
4709 #else /* !CONFIG_RCU_EXP_KTHREAD */
4710 struct workqueue_struct *rcu_par_gp_wq;
4712 static void __init rcu_start_exp_gp_kworkers(void)
4716 static inline void rcu_alloc_par_gp_wq(void)
4718 rcu_par_gp_wq = alloc_workqueue("rcu_par_gp", WQ_MEM_RECLAIM, 0);
4719 WARN_ON(!rcu_par_gp_wq);
4721 #endif /* CONFIG_RCU_EXP_KTHREAD */
4724 * Spawn the kthreads that handle RCU's grace periods.
4726 static int __init rcu_spawn_gp_kthread(void)
4728 unsigned long flags;
4729 struct rcu_node *rnp;
4730 struct sched_param sp;
4731 struct task_struct *t;
4732 struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
4734 rcu_scheduler_fully_active = 1;
4735 t = kthread_create(rcu_gp_kthread, NULL, "%s", rcu_state.name);
4736 if (WARN_ONCE(IS_ERR(t), "%s: Could not start grace-period kthread, OOM is now expected behavior\n", __func__))
4739 sp.sched_priority = kthread_prio;
4740 sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
4742 rnp = rcu_get_root();
4743 raw_spin_lock_irqsave_rcu_node(rnp, flags);
4744 WRITE_ONCE(rcu_state.gp_activity, jiffies);
4745 WRITE_ONCE(rcu_state.gp_req_activity, jiffies);
4746 // Reset .gp_activity and .gp_req_activity before setting .gp_kthread.
4747 smp_store_release(&rcu_state.gp_kthread, t); /* ^^^ */
4748 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
4750 /* This is a pre-SMP initcall, we expect a single CPU */
4751 WARN_ON(num_online_cpus() > 1);
4753 * Those kthreads couldn't be created on rcu_init() -> rcutree_prepare_cpu()
4754 * due to rcu_scheduler_fully_active.
4756 rcu_spawn_cpu_nocb_kthread(smp_processor_id());
4757 rcu_spawn_one_boost_kthread(rdp->mynode);
4758 rcu_spawn_core_kthreads();
4759 /* Create kthread worker for expedited GPs */
4760 rcu_start_exp_gp_kworkers();
4763 early_initcall(rcu_spawn_gp_kthread);
4766 * This function is invoked towards the end of the scheduler's
4767 * initialization process. Before this is called, the idle task might
4768 * contain synchronous grace-period primitives (during which time, this idle
4769 * task is booting the system, and such primitives are no-ops). After this
4770 * function is called, any synchronous grace-period primitives are run as
4771 * expedited, with the requesting task driving the grace period forward.
4772 * A later core_initcall() rcu_set_runtime_mode() will switch to full
4773 * runtime RCU functionality.
4775 void rcu_scheduler_starting(void)
4777 unsigned long flags;
4778 struct rcu_node *rnp;
4780 WARN_ON(num_online_cpus() != 1);
4781 WARN_ON(nr_context_switches() > 0);
4782 rcu_test_sync_prims();
4784 // Fix up the ->gp_seq counters.
4785 local_irq_save(flags);
4786 rcu_for_each_node_breadth_first(rnp)
4787 rnp->gp_seq_needed = rnp->gp_seq = rcu_state.gp_seq;
4788 local_irq_restore(flags);
4790 // Switch out of early boot mode.
4791 rcu_scheduler_active = RCU_SCHEDULER_INIT;
4792 rcu_test_sync_prims();
4796 * Helper function for rcu_init() that initializes the rcu_state structure.
4798 static void __init rcu_init_one(void)
4800 static const char * const buf[] = RCU_NODE_NAME_INIT;
4801 static const char * const fqs[] = RCU_FQS_NAME_INIT;
4802 static struct lock_class_key rcu_node_class[RCU_NUM_LVLS];
4803 static struct lock_class_key rcu_fqs_class[RCU_NUM_LVLS];
4805 int levelspread[RCU_NUM_LVLS]; /* kids/node in each level. */
4809 struct rcu_node *rnp;
4811 BUILD_BUG_ON(RCU_NUM_LVLS > ARRAY_SIZE(buf)); /* Fix buf[] init! */
4813 /* Silence gcc 4.8 false positive about array index out of range. */
4814 if (rcu_num_lvls <= 0 || rcu_num_lvls > RCU_NUM_LVLS)
4815 panic("rcu_init_one: rcu_num_lvls out of range");
4817 /* Initialize the level-tracking arrays. */
4819 for (i = 1; i < rcu_num_lvls; i++)
4820 rcu_state.level[i] =
4821 rcu_state.level[i - 1] + num_rcu_lvl[i - 1];
4822 rcu_init_levelspread(levelspread, num_rcu_lvl);
4824 /* Initialize the elements themselves, starting from the leaves. */
4826 for (i = rcu_num_lvls - 1; i >= 0; i--) {
4827 cpustride *= levelspread[i];
4828 rnp = rcu_state.level[i];
4829 for (j = 0; j < num_rcu_lvl[i]; j++, rnp++) {
4830 raw_spin_lock_init(&ACCESS_PRIVATE(rnp, lock));
4831 lockdep_set_class_and_name(&ACCESS_PRIVATE(rnp, lock),
4832 &rcu_node_class[i], buf[i]);
4833 raw_spin_lock_init(&rnp->fqslock);
4834 lockdep_set_class_and_name(&rnp->fqslock,
4835 &rcu_fqs_class[i], fqs[i]);
4836 rnp->gp_seq = rcu_state.gp_seq;
4837 rnp->gp_seq_needed = rcu_state.gp_seq;
4838 rnp->completedqs = rcu_state.gp_seq;
4840 rnp->qsmaskinit = 0;
4841 rnp->grplo = j * cpustride;
4842 rnp->grphi = (j + 1) * cpustride - 1;
4843 if (rnp->grphi >= nr_cpu_ids)
4844 rnp->grphi = nr_cpu_ids - 1;
4850 rnp->grpnum = j % levelspread[i - 1];
4851 rnp->grpmask = BIT(rnp->grpnum);
4852 rnp->parent = rcu_state.level[i - 1] +
4853 j / levelspread[i - 1];
4856 INIT_LIST_HEAD(&rnp->blkd_tasks);
4857 rcu_init_one_nocb(rnp);
4858 init_waitqueue_head(&rnp->exp_wq[0]);
4859 init_waitqueue_head(&rnp->exp_wq[1]);
4860 init_waitqueue_head(&rnp->exp_wq[2]);
4861 init_waitqueue_head(&rnp->exp_wq[3]);
4862 spin_lock_init(&rnp->exp_lock);
4863 mutex_init(&rnp->boost_kthread_mutex);
4864 raw_spin_lock_init(&rnp->exp_poll_lock);
4865 rnp->exp_seq_poll_rq = RCU_GET_STATE_COMPLETED;
4866 INIT_WORK(&rnp->exp_poll_wq, sync_rcu_do_polled_gp);
4870 init_swait_queue_head(&rcu_state.gp_wq);
4871 init_swait_queue_head(&rcu_state.expedited_wq);
4872 rnp = rcu_first_leaf_node();
4873 for_each_possible_cpu(i) {
4874 while (i > rnp->grphi)
4876 per_cpu_ptr(&rcu_data, i)->mynode = rnp;
4877 rcu_boot_init_percpu_data(i);
4882 * Force priority from the kernel command-line into range.
4884 static void __init sanitize_kthread_prio(void)
4886 int kthread_prio_in = kthread_prio;
4888 if (IS_ENABLED(CONFIG_RCU_BOOST) && kthread_prio < 2
4889 && IS_BUILTIN(CONFIG_RCU_TORTURE_TEST))
4891 else if (IS_ENABLED(CONFIG_RCU_BOOST) && kthread_prio < 1)
4893 else if (kthread_prio < 0)
4895 else if (kthread_prio > 99)
4898 if (kthread_prio != kthread_prio_in)
4899 pr_alert("%s: Limited prio to %d from %d\n",
4900 __func__, kthread_prio, kthread_prio_in);
4904 * Compute the rcu_node tree geometry from kernel parameters. This cannot
4905 * replace the definitions in tree.h because those are needed to size
4906 * the ->node array in the rcu_state structure.
4908 void rcu_init_geometry(void)
4912 static unsigned long old_nr_cpu_ids;
4913 int rcu_capacity[RCU_NUM_LVLS];
4914 static bool initialized;
4918 * Warn if setup_nr_cpu_ids() had not yet been invoked,
4919 * unless nr_cpus_ids == NR_CPUS, in which case who cares?
4921 WARN_ON_ONCE(old_nr_cpu_ids != nr_cpu_ids);
4925 old_nr_cpu_ids = nr_cpu_ids;
4929 * Initialize any unspecified boot parameters.
4930 * The default values of jiffies_till_first_fqs and
4931 * jiffies_till_next_fqs are set to the RCU_JIFFIES_TILL_FORCE_QS
4932 * value, which is a function of HZ, then adding one for each
4933 * RCU_JIFFIES_FQS_DIV CPUs that might be on the system.
4935 d = RCU_JIFFIES_TILL_FORCE_QS + nr_cpu_ids / RCU_JIFFIES_FQS_DIV;
4936 if (jiffies_till_first_fqs == ULONG_MAX)
4937 jiffies_till_first_fqs = d;
4938 if (jiffies_till_next_fqs == ULONG_MAX)
4939 jiffies_till_next_fqs = d;
4940 adjust_jiffies_till_sched_qs();
4942 /* If the compile-time values are accurate, just leave. */
4943 if (rcu_fanout_leaf == RCU_FANOUT_LEAF &&
4944 nr_cpu_ids == NR_CPUS)
4946 pr_info("Adjusting geometry for rcu_fanout_leaf=%d, nr_cpu_ids=%u\n",
4947 rcu_fanout_leaf, nr_cpu_ids);
4950 * The boot-time rcu_fanout_leaf parameter must be at least two
4951 * and cannot exceed the number of bits in the rcu_node masks.
4952 * Complain and fall back to the compile-time values if this
4953 * limit is exceeded.
4955 if (rcu_fanout_leaf < 2 ||
4956 rcu_fanout_leaf > sizeof(unsigned long) * 8) {
4957 rcu_fanout_leaf = RCU_FANOUT_LEAF;
4963 * Compute number of nodes that can be handled an rcu_node tree
4964 * with the given number of levels.
4966 rcu_capacity[0] = rcu_fanout_leaf;
4967 for (i = 1; i < RCU_NUM_LVLS; i++)
4968 rcu_capacity[i] = rcu_capacity[i - 1] * RCU_FANOUT;
4971 * The tree must be able to accommodate the configured number of CPUs.
4972 * If this limit is exceeded, fall back to the compile-time values.
4974 if (nr_cpu_ids > rcu_capacity[RCU_NUM_LVLS - 1]) {
4975 rcu_fanout_leaf = RCU_FANOUT_LEAF;
4980 /* Calculate the number of levels in the tree. */
4981 for (i = 0; nr_cpu_ids > rcu_capacity[i]; i++) {
4983 rcu_num_lvls = i + 1;
4985 /* Calculate the number of rcu_nodes at each level of the tree. */
4986 for (i = 0; i < rcu_num_lvls; i++) {
4987 int cap = rcu_capacity[(rcu_num_lvls - 1) - i];
4988 num_rcu_lvl[i] = DIV_ROUND_UP(nr_cpu_ids, cap);
4991 /* Calculate the total number of rcu_node structures. */
4993 for (i = 0; i < rcu_num_lvls; i++)
4994 rcu_num_nodes += num_rcu_lvl[i];
4998 * Dump out the structure of the rcu_node combining tree associated
4999 * with the rcu_state structure.
5001 static void __init rcu_dump_rcu_node_tree(void)
5004 struct rcu_node *rnp;
5006 pr_info("rcu_node tree layout dump\n");
5008 rcu_for_each_node_breadth_first(rnp) {
5009 if (rnp->level != level) {
5014 pr_cont("%d:%d ^%d ", rnp->grplo, rnp->grphi, rnp->grpnum);
5019 struct workqueue_struct *rcu_gp_wq;
5021 static void __init kfree_rcu_batch_init(void)
5025 struct shrinker *kfree_rcu_shrinker;
5027 /* Clamp it to [0:100] seconds interval. */
5028 if (rcu_delay_page_cache_fill_msec < 0 ||
5029 rcu_delay_page_cache_fill_msec > 100 * MSEC_PER_SEC) {
5031 rcu_delay_page_cache_fill_msec =
5032 clamp(rcu_delay_page_cache_fill_msec, 0,
5033 (int) (100 * MSEC_PER_SEC));
5035 pr_info("Adjusting rcutree.rcu_delay_page_cache_fill_msec to %d ms.\n",
5036 rcu_delay_page_cache_fill_msec);
5039 for_each_possible_cpu(cpu) {
5040 struct kfree_rcu_cpu *krcp = per_cpu_ptr(&krc, cpu);
5042 for (i = 0; i < KFREE_N_BATCHES; i++) {
5043 INIT_RCU_WORK(&krcp->krw_arr[i].rcu_work, kfree_rcu_work);
5044 krcp->krw_arr[i].krcp = krcp;
5046 for (j = 0; j < FREE_N_CHANNELS; j++)
5047 INIT_LIST_HEAD(&krcp->krw_arr[i].bulk_head_free[j]);
5050 for (i = 0; i < FREE_N_CHANNELS; i++)
5051 INIT_LIST_HEAD(&krcp->bulk_head[i]);
5053 INIT_DELAYED_WORK(&krcp->monitor_work, kfree_rcu_monitor);
5054 INIT_DELAYED_WORK(&krcp->page_cache_work, fill_page_cache_func);
5055 krcp->initialized = true;
5058 kfree_rcu_shrinker = shrinker_alloc(0, "rcu-kfree");
5059 if (!kfree_rcu_shrinker) {
5060 pr_err("Failed to allocate kfree_rcu() shrinker!\n");
5064 kfree_rcu_shrinker->count_objects = kfree_rcu_shrink_count;
5065 kfree_rcu_shrinker->scan_objects = kfree_rcu_shrink_scan;
5067 shrinker_register(kfree_rcu_shrinker);
5070 void __init rcu_init(void)
5072 int cpu = smp_processor_id();
5074 rcu_early_boot_tests();
5076 kfree_rcu_batch_init();
5077 rcu_bootup_announce();
5078 sanitize_kthread_prio();
5079 rcu_init_geometry();
5082 rcu_dump_rcu_node_tree();
5084 open_softirq(RCU_SOFTIRQ, rcu_core_si);
5087 * We don't need protection against CPU-hotplug here because
5088 * this is called early in boot, before either interrupts
5089 * or the scheduler are operational.
5091 pm_notifier(rcu_pm_notify, 0);
5092 WARN_ON(num_online_cpus() > 1); // Only one CPU this early in boot.
5093 rcutree_prepare_cpu(cpu);
5094 rcutree_report_cpu_starting(cpu);
5095 rcutree_online_cpu(cpu);
5097 /* Create workqueue for Tree SRCU and for expedited GPs. */
5098 rcu_gp_wq = alloc_workqueue("rcu_gp", WQ_MEM_RECLAIM, 0);
5099 WARN_ON(!rcu_gp_wq);
5100 rcu_alloc_par_gp_wq();
5102 /* Fill in default value for rcutree.qovld boot parameter. */
5103 /* -After- the rcu_node ->lock fields are initialized! */
5105 qovld_calc = DEFAULT_RCU_QOVLD_MULT * qhimark;
5109 // Kick-start in case any polled grace periods started early.
5110 (void)start_poll_synchronize_rcu_expedited();
5112 rcu_test_sync_prims();
5115 #include "tree_stall.h"
5116 #include "tree_exp.h"
5117 #include "tree_nocb.h"
5118 #include "tree_plugin.h"