1 // SPDX-License-Identifier: GPL-2.0+
3 * Sleepable Read-Copy Update mechanism for mutual exclusion.
5 * Copyright (C) IBM Corporation, 2006
6 * Copyright (C) Fujitsu, 2012
8 * Authors: Paul McKenney <paulmck@linux.ibm.com>
9 * Lai Jiangshan <laijs@cn.fujitsu.com>
11 * For detailed explanation of Read-Copy Update mechanism see -
12 * Documentation/RCU/ *.txt
16 #define pr_fmt(fmt) "rcu: " fmt
18 #include <linux/export.h>
19 #include <linux/mutex.h>
20 #include <linux/percpu.h>
21 #include <linux/preempt.h>
22 #include <linux/rcupdate_wait.h>
23 #include <linux/sched.h>
24 #include <linux/smp.h>
25 #include <linux/delay.h>
26 #include <linux/module.h>
27 #include <linux/slab.h>
28 #include <linux/srcu.h>
31 #include "rcu_segcblist.h"
33 /* Holdoff in nanoseconds for auto-expediting. */
34 #define DEFAULT_SRCU_EXP_HOLDOFF (25 * 1000)
35 static ulong exp_holdoff = DEFAULT_SRCU_EXP_HOLDOFF;
36 module_param(exp_holdoff, ulong, 0444);
38 /* Overflow-check frequency. N bits roughly says every 2**N grace periods. */
39 static ulong counter_wrap_check = (ULONG_MAX >> 2);
40 module_param(counter_wrap_check, ulong, 0444);
43 * Control conversion to SRCU_SIZE_BIG:
44 * 0: Don't convert at all.
45 * 1: Convert at init_srcu_struct() time.
46 * 2: Convert when rcutorture invokes srcu_torture_stats_print().
47 * 3: Decide at boot time based on system shape (default).
48 * 0x1x: Convert when excessive contention encountered.
50 #define SRCU_SIZING_NONE 0
51 #define SRCU_SIZING_INIT 1
52 #define SRCU_SIZING_TORTURE 2
53 #define SRCU_SIZING_AUTO 3
54 #define SRCU_SIZING_CONTEND 0x10
55 #define SRCU_SIZING_IS(x) ((convert_to_big & ~SRCU_SIZING_CONTEND) == x)
56 #define SRCU_SIZING_IS_NONE() (SRCU_SIZING_IS(SRCU_SIZING_NONE))
57 #define SRCU_SIZING_IS_INIT() (SRCU_SIZING_IS(SRCU_SIZING_INIT))
58 #define SRCU_SIZING_IS_TORTURE() (SRCU_SIZING_IS(SRCU_SIZING_TORTURE))
59 #define SRCU_SIZING_IS_CONTEND() (convert_to_big & SRCU_SIZING_CONTEND)
60 static int convert_to_big = SRCU_SIZING_AUTO;
61 module_param(convert_to_big, int, 0444);
63 /* Number of CPUs to trigger init_srcu_struct()-time transition to big. */
64 static int big_cpu_lim __read_mostly = 128;
65 module_param(big_cpu_lim, int, 0444);
67 /* Contention events per jiffy to initiate transition to big. */
68 static int small_contention_lim __read_mostly = 100;
69 module_param(small_contention_lim, int, 0444);
71 /* Early-boot callback-management, so early that no lock is required! */
72 static LIST_HEAD(srcu_boot_list);
73 static bool __read_mostly srcu_init_done;
75 static void srcu_invoke_callbacks(struct work_struct *work);
76 static void srcu_reschedule(struct srcu_struct *ssp, unsigned long delay);
77 static void process_srcu(struct work_struct *work);
78 static void srcu_delay_timer(struct timer_list *t);
80 /* Wrappers for lock acquisition and release, see raw_spin_lock_rcu_node(). */
81 #define spin_lock_rcu_node(p) \
83 spin_lock(&ACCESS_PRIVATE(p, lock)); \
84 smp_mb__after_unlock_lock(); \
87 #define spin_unlock_rcu_node(p) spin_unlock(&ACCESS_PRIVATE(p, lock))
89 #define spin_lock_irq_rcu_node(p) \
91 spin_lock_irq(&ACCESS_PRIVATE(p, lock)); \
92 smp_mb__after_unlock_lock(); \
95 #define spin_unlock_irq_rcu_node(p) \
96 spin_unlock_irq(&ACCESS_PRIVATE(p, lock))
98 #define spin_lock_irqsave_rcu_node(p, flags) \
100 spin_lock_irqsave(&ACCESS_PRIVATE(p, lock), flags); \
101 smp_mb__after_unlock_lock(); \
104 #define spin_trylock_irqsave_rcu_node(p, flags) \
106 bool ___locked = spin_trylock_irqsave(&ACCESS_PRIVATE(p, lock), flags); \
109 smp_mb__after_unlock_lock(); \
113 #define spin_unlock_irqrestore_rcu_node(p, flags) \
114 spin_unlock_irqrestore(&ACCESS_PRIVATE(p, lock), flags) \
117 * Initialize SRCU per-CPU data. Note that statically allocated
118 * srcu_struct structures might already have srcu_read_lock() and
119 * srcu_read_unlock() running against them. So if the is_static parameter
120 * is set, don't initialize ->srcu_lock_count[] and ->srcu_unlock_count[].
122 static void init_srcu_struct_data(struct srcu_struct *ssp)
125 struct srcu_data *sdp;
128 * Initialize the per-CPU srcu_data array, which feeds into the
129 * leaves of the srcu_node tree.
131 WARN_ON_ONCE(ARRAY_SIZE(sdp->srcu_lock_count) !=
132 ARRAY_SIZE(sdp->srcu_unlock_count));
133 for_each_possible_cpu(cpu) {
134 sdp = per_cpu_ptr(ssp->sda, cpu);
135 spin_lock_init(&ACCESS_PRIVATE(sdp, lock));
136 rcu_segcblist_init(&sdp->srcu_cblist);
137 sdp->srcu_cblist_invoking = false;
138 sdp->srcu_gp_seq_needed = ssp->srcu_sup->srcu_gp_seq;
139 sdp->srcu_gp_seq_needed_exp = ssp->srcu_sup->srcu_gp_seq;
142 INIT_WORK(&sdp->work, srcu_invoke_callbacks);
143 timer_setup(&sdp->delay_work, srcu_delay_timer, 0);
148 /* Invalid seq state, used during snp node initialization */
149 #define SRCU_SNP_INIT_SEQ 0x2
152 * Check whether sequence number corresponding to snp node,
155 static inline bool srcu_invl_snp_seq(unsigned long s)
157 return s == SRCU_SNP_INIT_SEQ;
161 * Allocated and initialize SRCU combining tree. Returns @true if
162 * allocation succeeded and @false otherwise.
164 static bool init_srcu_struct_nodes(struct srcu_struct *ssp, gfp_t gfp_flags)
169 int levelspread[RCU_NUM_LVLS];
170 struct srcu_data *sdp;
171 struct srcu_node *snp;
172 struct srcu_node *snp_first;
174 /* Initialize geometry if it has not already been initialized. */
176 ssp->srcu_sup->node = kcalloc(rcu_num_nodes, sizeof(*ssp->srcu_sup->node), gfp_flags);
177 if (!ssp->srcu_sup->node)
180 /* Work out the overall tree geometry. */
181 ssp->srcu_sup->level[0] = &ssp->srcu_sup->node[0];
182 for (i = 1; i < rcu_num_lvls; i++)
183 ssp->srcu_sup->level[i] = ssp->srcu_sup->level[i - 1] + num_rcu_lvl[i - 1];
184 rcu_init_levelspread(levelspread, num_rcu_lvl);
186 /* Each pass through this loop initializes one srcu_node structure. */
187 srcu_for_each_node_breadth_first(ssp, snp) {
188 spin_lock_init(&ACCESS_PRIVATE(snp, lock));
189 WARN_ON_ONCE(ARRAY_SIZE(snp->srcu_have_cbs) !=
190 ARRAY_SIZE(snp->srcu_data_have_cbs));
191 for (i = 0; i < ARRAY_SIZE(snp->srcu_have_cbs); i++) {
192 snp->srcu_have_cbs[i] = SRCU_SNP_INIT_SEQ;
193 snp->srcu_data_have_cbs[i] = 0;
195 snp->srcu_gp_seq_needed_exp = SRCU_SNP_INIT_SEQ;
198 if (snp == &ssp->srcu_sup->node[0]) {
199 /* Root node, special case. */
200 snp->srcu_parent = NULL;
205 if (snp == ssp->srcu_sup->level[level + 1])
207 snp->srcu_parent = ssp->srcu_sup->level[level - 1] +
208 (snp - ssp->srcu_sup->level[level]) /
209 levelspread[level - 1];
213 * Initialize the per-CPU srcu_data array, which feeds into the
214 * leaves of the srcu_node tree.
216 level = rcu_num_lvls - 1;
217 snp_first = ssp->srcu_sup->level[level];
218 for_each_possible_cpu(cpu) {
219 sdp = per_cpu_ptr(ssp->sda, cpu);
220 sdp->mynode = &snp_first[cpu / levelspread[level]];
221 for (snp = sdp->mynode; snp != NULL; snp = snp->srcu_parent) {
226 sdp->grpmask = 1UL << (cpu - sdp->mynode->grplo);
228 smp_store_release(&ssp->srcu_sup->srcu_size_state, SRCU_SIZE_WAIT_BARRIER);
233 * Initialize non-compile-time initialized fields, including the
234 * associated srcu_node and srcu_data structures. The is_static parameter
235 * tells us that ->sda has already been wired up to srcu_data.
237 static int init_srcu_struct_fields(struct srcu_struct *ssp, bool is_static)
240 ssp->srcu_sup = kzalloc(sizeof(*ssp->srcu_sup), GFP_KERNEL);
244 spin_lock_init(&ACCESS_PRIVATE(ssp->srcu_sup, lock));
245 ssp->srcu_sup->srcu_size_state = SRCU_SIZE_SMALL;
246 ssp->srcu_sup->node = NULL;
247 mutex_init(&ssp->srcu_sup->srcu_cb_mutex);
248 mutex_init(&ssp->srcu_sup->srcu_gp_mutex);
250 ssp->srcu_sup->srcu_gp_seq = 0;
251 ssp->srcu_sup->srcu_barrier_seq = 0;
252 mutex_init(&ssp->srcu_sup->srcu_barrier_mutex);
253 atomic_set(&ssp->srcu_sup->srcu_barrier_cpu_cnt, 0);
254 INIT_DELAYED_WORK(&ssp->srcu_sup->work, process_srcu);
255 ssp->srcu_sup->sda_is_static = is_static;
257 ssp->sda = alloc_percpu(struct srcu_data);
260 init_srcu_struct_data(ssp);
261 ssp->srcu_sup->srcu_gp_seq_needed_exp = 0;
262 ssp->srcu_sup->srcu_last_gp_end = ktime_get_mono_fast_ns();
263 if (READ_ONCE(ssp->srcu_sup->srcu_size_state) == SRCU_SIZE_SMALL && SRCU_SIZING_IS_INIT()) {
264 if (!init_srcu_struct_nodes(ssp, GFP_ATOMIC))
266 WRITE_ONCE(ssp->srcu_sup->srcu_size_state, SRCU_SIZE_BIG);
268 ssp->srcu_sup->srcu_ssp = ssp;
269 smp_store_release(&ssp->srcu_sup->srcu_gp_seq_needed, 0); /* Init done. */
274 free_percpu(ssp->sda);
279 kfree(ssp->srcu_sup);
280 ssp->srcu_sup = NULL;
285 #ifdef CONFIG_DEBUG_LOCK_ALLOC
287 int __init_srcu_struct(struct srcu_struct *ssp, const char *name,
288 struct lock_class_key *key)
290 /* Don't re-initialize a lock while it is held. */
291 debug_check_no_locks_freed((void *)ssp, sizeof(*ssp));
292 lockdep_init_map(&ssp->dep_map, name, key, 0);
293 return init_srcu_struct_fields(ssp, false);
295 EXPORT_SYMBOL_GPL(__init_srcu_struct);
297 #else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
300 * init_srcu_struct - initialize a sleep-RCU structure
301 * @ssp: structure to initialize.
303 * Must invoke this on a given srcu_struct before passing that srcu_struct
304 * to any other function. Each srcu_struct represents a separate domain
305 * of SRCU protection.
307 int init_srcu_struct(struct srcu_struct *ssp)
309 return init_srcu_struct_fields(ssp, false);
311 EXPORT_SYMBOL_GPL(init_srcu_struct);
313 #endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */
316 * Initiate a transition to SRCU_SIZE_BIG with lock held.
318 static void __srcu_transition_to_big(struct srcu_struct *ssp)
320 lockdep_assert_held(&ACCESS_PRIVATE(ssp->srcu_sup, lock));
321 smp_store_release(&ssp->srcu_sup->srcu_size_state, SRCU_SIZE_ALLOC);
325 * Initiate an idempotent transition to SRCU_SIZE_BIG.
327 static void srcu_transition_to_big(struct srcu_struct *ssp)
331 /* Double-checked locking on ->srcu_size-state. */
332 if (smp_load_acquire(&ssp->srcu_sup->srcu_size_state) != SRCU_SIZE_SMALL)
334 spin_lock_irqsave_rcu_node(ssp->srcu_sup, flags);
335 if (smp_load_acquire(&ssp->srcu_sup->srcu_size_state) != SRCU_SIZE_SMALL) {
336 spin_unlock_irqrestore_rcu_node(ssp->srcu_sup, flags);
339 __srcu_transition_to_big(ssp);
340 spin_unlock_irqrestore_rcu_node(ssp->srcu_sup, flags);
344 * Check to see if the just-encountered contention event justifies
345 * a transition to SRCU_SIZE_BIG.
347 static void spin_lock_irqsave_check_contention(struct srcu_struct *ssp)
351 if (!SRCU_SIZING_IS_CONTEND() || ssp->srcu_sup->srcu_size_state)
354 if (ssp->srcu_sup->srcu_size_jiffies != j) {
355 ssp->srcu_sup->srcu_size_jiffies = j;
356 ssp->srcu_sup->srcu_n_lock_retries = 0;
358 if (++ssp->srcu_sup->srcu_n_lock_retries <= small_contention_lim)
360 __srcu_transition_to_big(ssp);
364 * Acquire the specified srcu_data structure's ->lock, but check for
365 * excessive contention, which results in initiation of a transition
366 * to SRCU_SIZE_BIG. But only if the srcutree.convert_to_big module
367 * parameter permits this.
369 static void spin_lock_irqsave_sdp_contention(struct srcu_data *sdp, unsigned long *flags)
371 struct srcu_struct *ssp = sdp->ssp;
373 if (spin_trylock_irqsave_rcu_node(sdp, *flags))
375 spin_lock_irqsave_rcu_node(ssp->srcu_sup, *flags);
376 spin_lock_irqsave_check_contention(ssp);
377 spin_unlock_irqrestore_rcu_node(ssp->srcu_sup, *flags);
378 spin_lock_irqsave_rcu_node(sdp, *flags);
382 * Acquire the specified srcu_struct structure's ->lock, but check for
383 * excessive contention, which results in initiation of a transition
384 * to SRCU_SIZE_BIG. But only if the srcutree.convert_to_big module
385 * parameter permits this.
387 static void spin_lock_irqsave_ssp_contention(struct srcu_struct *ssp, unsigned long *flags)
389 if (spin_trylock_irqsave_rcu_node(ssp->srcu_sup, *flags))
391 spin_lock_irqsave_rcu_node(ssp->srcu_sup, *flags);
392 spin_lock_irqsave_check_contention(ssp);
396 * First-use initialization of statically allocated srcu_struct
397 * structure. Wiring up the combining tree is more than can be
398 * done with compile-time initialization, so this check is added
399 * to each update-side SRCU primitive. Use ssp->lock, which -is-
400 * compile-time initialized, to resolve races involving multiple
401 * CPUs trying to garner first-use privileges.
403 static void check_init_srcu_struct(struct srcu_struct *ssp)
407 /* The smp_load_acquire() pairs with the smp_store_release(). */
408 if (!rcu_seq_state(smp_load_acquire(&ssp->srcu_sup->srcu_gp_seq_needed))) /*^^^*/
409 return; /* Already initialized. */
410 spin_lock_irqsave_rcu_node(ssp->srcu_sup, flags);
411 if (!rcu_seq_state(ssp->srcu_sup->srcu_gp_seq_needed)) {
412 spin_unlock_irqrestore_rcu_node(ssp->srcu_sup, flags);
415 init_srcu_struct_fields(ssp, true);
416 spin_unlock_irqrestore_rcu_node(ssp->srcu_sup, flags);
420 * Returns approximate total of the readers' ->srcu_lock_count[] values
421 * for the rank of per-CPU counters specified by idx.
423 static unsigned long srcu_readers_lock_idx(struct srcu_struct *ssp, int idx)
426 unsigned long sum = 0;
428 for_each_possible_cpu(cpu) {
429 struct srcu_data *cpuc = per_cpu_ptr(ssp->sda, cpu);
431 sum += atomic_long_read(&cpuc->srcu_lock_count[idx]);
437 * Returns approximate total of the readers' ->srcu_unlock_count[] values
438 * for the rank of per-CPU counters specified by idx.
440 static unsigned long srcu_readers_unlock_idx(struct srcu_struct *ssp, int idx)
443 unsigned long mask = 0;
444 unsigned long sum = 0;
446 for_each_possible_cpu(cpu) {
447 struct srcu_data *cpuc = per_cpu_ptr(ssp->sda, cpu);
449 sum += atomic_long_read(&cpuc->srcu_unlock_count[idx]);
450 if (IS_ENABLED(CONFIG_PROVE_RCU))
451 mask = mask | READ_ONCE(cpuc->srcu_nmi_safety);
453 WARN_ONCE(IS_ENABLED(CONFIG_PROVE_RCU) && (mask & (mask >> 1)),
454 "Mixed NMI-safe readers for srcu_struct at %ps.\n", ssp);
459 * Return true if the number of pre-existing readers is determined to
462 static bool srcu_readers_active_idx_check(struct srcu_struct *ssp, int idx)
464 unsigned long unlocks;
466 unlocks = srcu_readers_unlock_idx(ssp, idx);
469 * Make sure that a lock is always counted if the corresponding
470 * unlock is counted. Needs to be a smp_mb() as the read side may
471 * contain a read from a variable that is written to before the
472 * synchronize_srcu() in the write side. In this case smp_mb()s
473 * A and B act like the store buffering pattern.
475 * This smp_mb() also pairs with smp_mb() C to prevent accesses
476 * after the synchronize_srcu() from being executed before the
482 * If the locks are the same as the unlocks, then there must have
483 * been no readers on this index at some point in this function.
484 * But there might be more readers, as a task might have read
485 * the current ->srcu_idx but not yet have incremented its CPU's
486 * ->srcu_lock_count[idx] counter. In fact, it is possible
487 * that most of the tasks have been preempted between fetching
488 * ->srcu_idx and incrementing ->srcu_lock_count[idx]. And there
489 * could be almost (ULONG_MAX / sizeof(struct task_struct)) tasks
490 * in a system whose address space was fully populated with memory.
491 * Call this quantity Nt.
493 * So suppose that the updater is preempted at this point in the
494 * code for a long time. That now-preempted updater has already
495 * flipped ->srcu_idx (possibly during the preceding grace period),
496 * done an smp_mb() (again, possibly during the preceding grace
497 * period), and summed up the ->srcu_unlock_count[idx] counters.
498 * How many times can a given one of the aforementioned Nt tasks
499 * increment the old ->srcu_idx value's ->srcu_lock_count[idx]
500 * counter, in the absence of nesting?
502 * It can clearly do so once, given that it has already fetched
503 * the old value of ->srcu_idx and is just about to use that value
504 * to index its increment of ->srcu_lock_count[idx]. But as soon as
505 * it leaves that SRCU read-side critical section, it will increment
506 * ->srcu_unlock_count[idx], which must follow the updater's above
507 * read from that same value. Thus, as soon the reading task does
508 * an smp_mb() and a later fetch from ->srcu_idx, that task will be
509 * guaranteed to get the new index. Except that the increment of
510 * ->srcu_unlock_count[idx] in __srcu_read_unlock() is after the
511 * smp_mb(), and the fetch from ->srcu_idx in __srcu_read_lock()
512 * is before the smp_mb(). Thus, that task might not see the new
513 * value of ->srcu_idx until the -second- __srcu_read_lock(),
514 * which in turn means that this task might well increment
515 * ->srcu_lock_count[idx] for the old value of ->srcu_idx twice,
518 * However, it is important to note that a given smp_mb() takes
519 * effect not just for the task executing it, but also for any
520 * later task running on that same CPU.
522 * That is, there can be almost Nt + Nc further increments of
523 * ->srcu_lock_count[idx] for the old index, where Nc is the number
524 * of CPUs. But this is OK because the size of the task_struct
525 * structure limits the value of Nt and current systems limit Nc
528 * OK, but what about nesting? This does impose a limit on
529 * nesting of half of the size of the task_struct structure
530 * (measured in bytes), which should be sufficient. A late 2022
531 * TREE01 rcutorture run reported this size to be no less than
532 * 9408 bytes, allowing up to 4704 levels of nesting, which is
533 * comfortably beyond excessive. Especially on 64-bit systems,
534 * which are unlikely to be configured with an address space fully
535 * populated with memory, at least not anytime soon.
537 return srcu_readers_lock_idx(ssp, idx) == unlocks;
541 * srcu_readers_active - returns true if there are readers. and false
543 * @ssp: which srcu_struct to count active readers (holding srcu_read_lock).
545 * Note that this is not an atomic primitive, and can therefore suffer
546 * severe errors when invoked on an active srcu_struct. That said, it
547 * can be useful as an error check at cleanup time.
549 static bool srcu_readers_active(struct srcu_struct *ssp)
552 unsigned long sum = 0;
554 for_each_possible_cpu(cpu) {
555 struct srcu_data *cpuc = per_cpu_ptr(ssp->sda, cpu);
557 sum += atomic_long_read(&cpuc->srcu_lock_count[0]);
558 sum += atomic_long_read(&cpuc->srcu_lock_count[1]);
559 sum -= atomic_long_read(&cpuc->srcu_unlock_count[0]);
560 sum -= atomic_long_read(&cpuc->srcu_unlock_count[1]);
566 * We use an adaptive strategy for synchronize_srcu() and especially for
567 * synchronize_srcu_expedited(). We spin for a fixed time period
568 * (defined below, boot time configurable) to allow SRCU readers to exit
569 * their read-side critical sections. If there are still some readers
570 * after one jiffy, we repeatedly block for one jiffy time periods.
571 * The blocking time is increased as the grace-period age increases,
572 * with max blocking time capped at 10 jiffies.
574 #define SRCU_DEFAULT_RETRY_CHECK_DELAY 5
576 static ulong srcu_retry_check_delay = SRCU_DEFAULT_RETRY_CHECK_DELAY;
577 module_param(srcu_retry_check_delay, ulong, 0444);
579 #define SRCU_INTERVAL 1 // Base delay if no expedited GPs pending.
580 #define SRCU_MAX_INTERVAL 10 // Maximum incremental delay from slow readers.
582 #define SRCU_DEFAULT_MAX_NODELAY_PHASE_LO 3UL // Lowmark on default per-GP-phase
583 // no-delay instances.
584 #define SRCU_DEFAULT_MAX_NODELAY_PHASE_HI 1000UL // Highmark on default per-GP-phase
585 // no-delay instances.
587 #define SRCU_UL_CLAMP_LO(val, low) ((val) > (low) ? (val) : (low))
588 #define SRCU_UL_CLAMP_HI(val, high) ((val) < (high) ? (val) : (high))
589 #define SRCU_UL_CLAMP(val, low, high) SRCU_UL_CLAMP_HI(SRCU_UL_CLAMP_LO((val), (low)), (high))
590 // per-GP-phase no-delay instances adjusted to allow non-sleeping poll upto
591 // one jiffies time duration. Mult by 2 is done to factor in the srcu_get_delay()
592 // called from process_srcu().
593 #define SRCU_DEFAULT_MAX_NODELAY_PHASE_ADJUSTED \
594 (2UL * USEC_PER_SEC / HZ / SRCU_DEFAULT_RETRY_CHECK_DELAY)
596 // Maximum per-GP-phase consecutive no-delay instances.
597 #define SRCU_DEFAULT_MAX_NODELAY_PHASE \
598 SRCU_UL_CLAMP(SRCU_DEFAULT_MAX_NODELAY_PHASE_ADJUSTED, \
599 SRCU_DEFAULT_MAX_NODELAY_PHASE_LO, \
600 SRCU_DEFAULT_MAX_NODELAY_PHASE_HI)
602 static ulong srcu_max_nodelay_phase = SRCU_DEFAULT_MAX_NODELAY_PHASE;
603 module_param(srcu_max_nodelay_phase, ulong, 0444);
605 // Maximum consecutive no-delay instances.
606 #define SRCU_DEFAULT_MAX_NODELAY (SRCU_DEFAULT_MAX_NODELAY_PHASE > 100 ? \
607 SRCU_DEFAULT_MAX_NODELAY_PHASE : 100)
609 static ulong srcu_max_nodelay = SRCU_DEFAULT_MAX_NODELAY;
610 module_param(srcu_max_nodelay, ulong, 0444);
613 * Return grace-period delay, zero if there are expedited grace
614 * periods pending, SRCU_INTERVAL otherwise.
616 static unsigned long srcu_get_delay(struct srcu_struct *ssp)
618 unsigned long gpstart;
620 unsigned long jbase = SRCU_INTERVAL;
621 struct srcu_usage *sup = ssp->srcu_sup;
623 if (ULONG_CMP_LT(READ_ONCE(sup->srcu_gp_seq), READ_ONCE(sup->srcu_gp_seq_needed_exp)))
625 if (rcu_seq_state(READ_ONCE(sup->srcu_gp_seq))) {
627 gpstart = READ_ONCE(sup->srcu_gp_start);
628 if (time_after(j, gpstart))
629 jbase += j - gpstart;
631 WRITE_ONCE(sup->srcu_n_exp_nodelay, READ_ONCE(sup->srcu_n_exp_nodelay) + 1);
632 if (READ_ONCE(sup->srcu_n_exp_nodelay) > srcu_max_nodelay_phase)
636 return jbase > SRCU_MAX_INTERVAL ? SRCU_MAX_INTERVAL : jbase;
640 * cleanup_srcu_struct - deconstruct a sleep-RCU structure
641 * @ssp: structure to clean up.
643 * Must invoke this after you are finished using a given srcu_struct that
644 * was initialized via init_srcu_struct(), else you leak memory.
646 void cleanup_srcu_struct(struct srcu_struct *ssp)
649 struct srcu_usage *sup = ssp->srcu_sup;
651 if (WARN_ON(!srcu_get_delay(ssp)))
652 return; /* Just leak it! */
653 if (WARN_ON(srcu_readers_active(ssp)))
654 return; /* Just leak it! */
655 flush_delayed_work(&sup->work);
656 for_each_possible_cpu(cpu) {
657 struct srcu_data *sdp = per_cpu_ptr(ssp->sda, cpu);
659 del_timer_sync(&sdp->delay_work);
660 flush_work(&sdp->work);
661 if (WARN_ON(rcu_segcblist_n_cbs(&sdp->srcu_cblist)))
662 return; /* Forgot srcu_barrier(), so just leak it! */
664 if (WARN_ON(rcu_seq_state(READ_ONCE(sup->srcu_gp_seq)) != SRCU_STATE_IDLE) ||
665 WARN_ON(rcu_seq_current(&sup->srcu_gp_seq) != sup->srcu_gp_seq_needed) ||
666 WARN_ON(srcu_readers_active(ssp))) {
667 pr_info("%s: Active srcu_struct %p read state: %d gp state: %lu/%lu\n",
668 __func__, ssp, rcu_seq_state(READ_ONCE(sup->srcu_gp_seq)),
669 rcu_seq_current(&sup->srcu_gp_seq), sup->srcu_gp_seq_needed);
670 return; /* Caller forgot to stop doing call_srcu()? */
674 sup->srcu_size_state = SRCU_SIZE_SMALL;
675 if (!sup->sda_is_static) {
676 free_percpu(ssp->sda);
679 ssp->srcu_sup = NULL;
682 EXPORT_SYMBOL_GPL(cleanup_srcu_struct);
684 #ifdef CONFIG_PROVE_RCU
686 * Check for consistent NMI safety.
688 void srcu_check_nmi_safety(struct srcu_struct *ssp, bool nmi_safe)
690 int nmi_safe_mask = 1 << nmi_safe;
691 int old_nmi_safe_mask;
692 struct srcu_data *sdp;
694 /* NMI-unsafe use in NMI is a bad sign */
695 WARN_ON_ONCE(!nmi_safe && in_nmi());
696 sdp = raw_cpu_ptr(ssp->sda);
697 old_nmi_safe_mask = READ_ONCE(sdp->srcu_nmi_safety);
698 if (!old_nmi_safe_mask) {
699 WRITE_ONCE(sdp->srcu_nmi_safety, nmi_safe_mask);
702 WARN_ONCE(old_nmi_safe_mask != nmi_safe_mask, "CPU %d old state %d new state %d\n", sdp->cpu, old_nmi_safe_mask, nmi_safe_mask);
704 EXPORT_SYMBOL_GPL(srcu_check_nmi_safety);
705 #endif /* CONFIG_PROVE_RCU */
708 * Counts the new reader in the appropriate per-CPU element of the
710 * Returns an index that must be passed to the matching srcu_read_unlock().
712 int __srcu_read_lock(struct srcu_struct *ssp)
716 idx = READ_ONCE(ssp->srcu_idx) & 0x1;
717 this_cpu_inc(ssp->sda->srcu_lock_count[idx].counter);
718 smp_mb(); /* B */ /* Avoid leaking the critical section. */
721 EXPORT_SYMBOL_GPL(__srcu_read_lock);
724 * Removes the count for the old reader from the appropriate per-CPU
725 * element of the srcu_struct. Note that this may well be a different
726 * CPU than that which was incremented by the corresponding srcu_read_lock().
728 void __srcu_read_unlock(struct srcu_struct *ssp, int idx)
730 smp_mb(); /* C */ /* Avoid leaking the critical section. */
731 this_cpu_inc(ssp->sda->srcu_unlock_count[idx].counter);
733 EXPORT_SYMBOL_GPL(__srcu_read_unlock);
735 #ifdef CONFIG_NEED_SRCU_NMI_SAFE
738 * Counts the new reader in the appropriate per-CPU element of the
739 * srcu_struct, but in an NMI-safe manner using RMW atomics.
740 * Returns an index that must be passed to the matching srcu_read_unlock().
742 int __srcu_read_lock_nmisafe(struct srcu_struct *ssp)
745 struct srcu_data *sdp = raw_cpu_ptr(ssp->sda);
747 idx = READ_ONCE(ssp->srcu_idx) & 0x1;
748 atomic_long_inc(&sdp->srcu_lock_count[idx]);
749 smp_mb__after_atomic(); /* B */ /* Avoid leaking the critical section. */
752 EXPORT_SYMBOL_GPL(__srcu_read_lock_nmisafe);
755 * Removes the count for the old reader from the appropriate per-CPU
756 * element of the srcu_struct. Note that this may well be a different
757 * CPU than that which was incremented by the corresponding srcu_read_lock().
759 void __srcu_read_unlock_nmisafe(struct srcu_struct *ssp, int idx)
761 struct srcu_data *sdp = raw_cpu_ptr(ssp->sda);
763 smp_mb__before_atomic(); /* C */ /* Avoid leaking the critical section. */
764 atomic_long_inc(&sdp->srcu_unlock_count[idx]);
766 EXPORT_SYMBOL_GPL(__srcu_read_unlock_nmisafe);
768 #endif // CONFIG_NEED_SRCU_NMI_SAFE
771 * Start an SRCU grace period.
773 static void srcu_gp_start(struct srcu_struct *ssp)
777 lockdep_assert_held(&ACCESS_PRIVATE(ssp->srcu_sup, lock));
778 WARN_ON_ONCE(ULONG_CMP_GE(ssp->srcu_sup->srcu_gp_seq, ssp->srcu_sup->srcu_gp_seq_needed));
779 WRITE_ONCE(ssp->srcu_sup->srcu_gp_start, jiffies);
780 WRITE_ONCE(ssp->srcu_sup->srcu_n_exp_nodelay, 0);
781 smp_mb(); /* Order prior store to ->srcu_gp_seq_needed vs. GP start. */
782 rcu_seq_start(&ssp->srcu_sup->srcu_gp_seq);
783 state = rcu_seq_state(ssp->srcu_sup->srcu_gp_seq);
784 WARN_ON_ONCE(state != SRCU_STATE_SCAN1);
788 static void srcu_delay_timer(struct timer_list *t)
790 struct srcu_data *sdp = container_of(t, struct srcu_data, delay_work);
792 queue_work_on(sdp->cpu, rcu_gp_wq, &sdp->work);
795 static void srcu_queue_delayed_work_on(struct srcu_data *sdp,
799 queue_work_on(sdp->cpu, rcu_gp_wq, &sdp->work);
803 timer_reduce(&sdp->delay_work, jiffies + delay);
807 * Schedule callback invocation for the specified srcu_data structure,
808 * if possible, on the corresponding CPU.
810 static void srcu_schedule_cbs_sdp(struct srcu_data *sdp, unsigned long delay)
812 srcu_queue_delayed_work_on(sdp, delay);
816 * Schedule callback invocation for all srcu_data structures associated
817 * with the specified srcu_node structure that have callbacks for the
818 * just-completed grace period, the one corresponding to idx. If possible,
819 * schedule this invocation on the corresponding CPUs.
821 static void srcu_schedule_cbs_snp(struct srcu_struct *ssp, struct srcu_node *snp,
822 unsigned long mask, unsigned long delay)
826 for (cpu = snp->grplo; cpu <= snp->grphi; cpu++) {
827 if (!(mask & (1UL << (cpu - snp->grplo))))
829 srcu_schedule_cbs_sdp(per_cpu_ptr(ssp->sda, cpu), delay);
834 * Note the end of an SRCU grace period. Initiates callback invocation
835 * and starts a new grace period if needed.
837 * The ->srcu_cb_mutex acquisition does not protect any data, but
838 * instead prevents more than one grace period from starting while we
839 * are initiating callback invocation. This allows the ->srcu_have_cbs[]
840 * array to have a finite number of elements.
842 static void srcu_gp_end(struct srcu_struct *ssp)
844 unsigned long cbdelay = 1;
852 struct srcu_data *sdp;
854 struct srcu_node *snp;
856 struct srcu_usage *sup = ssp->srcu_sup;
858 /* Prevent more than one additional grace period. */
859 mutex_lock(&sup->srcu_cb_mutex);
861 /* End the current grace period. */
862 spin_lock_irq_rcu_node(sup);
863 idx = rcu_seq_state(sup->srcu_gp_seq);
864 WARN_ON_ONCE(idx != SRCU_STATE_SCAN2);
865 if (ULONG_CMP_LT(READ_ONCE(sup->srcu_gp_seq), READ_ONCE(sup->srcu_gp_seq_needed_exp)))
868 WRITE_ONCE(sup->srcu_last_gp_end, ktime_get_mono_fast_ns());
869 rcu_seq_end(&sup->srcu_gp_seq);
870 gpseq = rcu_seq_current(&sup->srcu_gp_seq);
871 if (ULONG_CMP_LT(sup->srcu_gp_seq_needed_exp, gpseq))
872 WRITE_ONCE(sup->srcu_gp_seq_needed_exp, gpseq);
873 spin_unlock_irq_rcu_node(sup);
874 mutex_unlock(&sup->srcu_gp_mutex);
875 /* A new grace period can start at this point. But only one. */
877 /* Initiate callback invocation as needed. */
878 ss_state = smp_load_acquire(&sup->srcu_size_state);
879 if (ss_state < SRCU_SIZE_WAIT_BARRIER) {
880 srcu_schedule_cbs_sdp(per_cpu_ptr(ssp->sda, get_boot_cpu_id()),
883 idx = rcu_seq_ctr(gpseq) % ARRAY_SIZE(snp->srcu_have_cbs);
884 srcu_for_each_node_breadth_first(ssp, snp) {
885 spin_lock_irq_rcu_node(snp);
887 last_lvl = snp >= sup->level[rcu_num_lvls - 1];
889 cbs = ss_state < SRCU_SIZE_BIG || snp->srcu_have_cbs[idx] == gpseq;
890 snp->srcu_have_cbs[idx] = gpseq;
891 rcu_seq_set_state(&snp->srcu_have_cbs[idx], 1);
892 sgsne = snp->srcu_gp_seq_needed_exp;
893 if (srcu_invl_snp_seq(sgsne) || ULONG_CMP_LT(sgsne, gpseq))
894 WRITE_ONCE(snp->srcu_gp_seq_needed_exp, gpseq);
895 if (ss_state < SRCU_SIZE_BIG)
898 mask = snp->srcu_data_have_cbs[idx];
899 snp->srcu_data_have_cbs[idx] = 0;
900 spin_unlock_irq_rcu_node(snp);
902 srcu_schedule_cbs_snp(ssp, snp, mask, cbdelay);
906 /* Occasionally prevent srcu_data counter wrap. */
907 if (!(gpseq & counter_wrap_check))
908 for_each_possible_cpu(cpu) {
909 sdp = per_cpu_ptr(ssp->sda, cpu);
910 spin_lock_irqsave_rcu_node(sdp, flags);
911 if (ULONG_CMP_GE(gpseq, sdp->srcu_gp_seq_needed + 100))
912 sdp->srcu_gp_seq_needed = gpseq;
913 if (ULONG_CMP_GE(gpseq, sdp->srcu_gp_seq_needed_exp + 100))
914 sdp->srcu_gp_seq_needed_exp = gpseq;
915 spin_unlock_irqrestore_rcu_node(sdp, flags);
918 /* Callback initiation done, allow grace periods after next. */
919 mutex_unlock(&sup->srcu_cb_mutex);
921 /* Start a new grace period if needed. */
922 spin_lock_irq_rcu_node(sup);
923 gpseq = rcu_seq_current(&sup->srcu_gp_seq);
924 if (!rcu_seq_state(gpseq) &&
925 ULONG_CMP_LT(gpseq, sup->srcu_gp_seq_needed)) {
927 spin_unlock_irq_rcu_node(sup);
928 srcu_reschedule(ssp, 0);
930 spin_unlock_irq_rcu_node(sup);
933 /* Transition to big if needed. */
934 if (ss_state != SRCU_SIZE_SMALL && ss_state != SRCU_SIZE_BIG) {
935 if (ss_state == SRCU_SIZE_ALLOC)
936 init_srcu_struct_nodes(ssp, GFP_KERNEL);
938 smp_store_release(&sup->srcu_size_state, ss_state + 1);
943 * Funnel-locking scheme to scalably mediate many concurrent expedited
944 * grace-period requests. This function is invoked for the first known
945 * expedited request for a grace period that has already been requested,
946 * but without expediting. To start a completely new grace period,
947 * whether expedited or not, use srcu_funnel_gp_start() instead.
949 static void srcu_funnel_exp_start(struct srcu_struct *ssp, struct srcu_node *snp,
956 for (; snp != NULL; snp = snp->srcu_parent) {
957 sgsne = READ_ONCE(snp->srcu_gp_seq_needed_exp);
958 if (WARN_ON_ONCE(rcu_seq_done(&ssp->srcu_sup->srcu_gp_seq, s)) ||
959 (!srcu_invl_snp_seq(sgsne) && ULONG_CMP_GE(sgsne, s)))
961 spin_lock_irqsave_rcu_node(snp, flags);
962 sgsne = snp->srcu_gp_seq_needed_exp;
963 if (!srcu_invl_snp_seq(sgsne) && ULONG_CMP_GE(sgsne, s)) {
964 spin_unlock_irqrestore_rcu_node(snp, flags);
967 WRITE_ONCE(snp->srcu_gp_seq_needed_exp, s);
968 spin_unlock_irqrestore_rcu_node(snp, flags);
970 spin_lock_irqsave_ssp_contention(ssp, &flags);
971 if (ULONG_CMP_LT(ssp->srcu_sup->srcu_gp_seq_needed_exp, s))
972 WRITE_ONCE(ssp->srcu_sup->srcu_gp_seq_needed_exp, s);
973 spin_unlock_irqrestore_rcu_node(ssp->srcu_sup, flags);
977 * Funnel-locking scheme to scalably mediate many concurrent grace-period
978 * requests. The winner has to do the work of actually starting grace
979 * period s. Losers must either ensure that their desired grace-period
980 * number is recorded on at least their leaf srcu_node structure, or they
981 * must take steps to invoke their own callbacks.
983 * Note that this function also does the work of srcu_funnel_exp_start(),
984 * in some cases by directly invoking it.
986 * The srcu read lock should be hold around this function. And s is a seq snap
987 * after holding that lock.
989 static void srcu_funnel_gp_start(struct srcu_struct *ssp, struct srcu_data *sdp,
990 unsigned long s, bool do_norm)
993 int idx = rcu_seq_ctr(s) % ARRAY_SIZE(sdp->mynode->srcu_have_cbs);
995 struct srcu_node *snp;
996 struct srcu_node *snp_leaf;
997 unsigned long snp_seq;
998 struct srcu_usage *sup = ssp->srcu_sup;
1000 /* Ensure that snp node tree is fully initialized before traversing it */
1001 if (smp_load_acquire(&sup->srcu_size_state) < SRCU_SIZE_WAIT_BARRIER)
1004 snp_leaf = sdp->mynode;
1007 /* Each pass through the loop does one level of the srcu_node tree. */
1008 for (snp = snp_leaf; snp != NULL; snp = snp->srcu_parent) {
1009 if (WARN_ON_ONCE(rcu_seq_done(&sup->srcu_gp_seq, s)) && snp != snp_leaf)
1010 return; /* GP already done and CBs recorded. */
1011 spin_lock_irqsave_rcu_node(snp, flags);
1012 snp_seq = snp->srcu_have_cbs[idx];
1013 if (!srcu_invl_snp_seq(snp_seq) && ULONG_CMP_GE(snp_seq, s)) {
1014 if (snp == snp_leaf && snp_seq == s)
1015 snp->srcu_data_have_cbs[idx] |= sdp->grpmask;
1016 spin_unlock_irqrestore_rcu_node(snp, flags);
1017 if (snp == snp_leaf && snp_seq != s) {
1018 srcu_schedule_cbs_sdp(sdp, do_norm ? SRCU_INTERVAL : 0);
1022 srcu_funnel_exp_start(ssp, snp, s);
1025 snp->srcu_have_cbs[idx] = s;
1026 if (snp == snp_leaf)
1027 snp->srcu_data_have_cbs[idx] |= sdp->grpmask;
1028 sgsne = snp->srcu_gp_seq_needed_exp;
1029 if (!do_norm && (srcu_invl_snp_seq(sgsne) || ULONG_CMP_LT(sgsne, s)))
1030 WRITE_ONCE(snp->srcu_gp_seq_needed_exp, s);
1031 spin_unlock_irqrestore_rcu_node(snp, flags);
1034 /* Top of tree, must ensure the grace period will be started. */
1035 spin_lock_irqsave_ssp_contention(ssp, &flags);
1036 if (ULONG_CMP_LT(sup->srcu_gp_seq_needed, s)) {
1038 * Record need for grace period s. Pair with load
1039 * acquire setting up for initialization.
1041 smp_store_release(&sup->srcu_gp_seq_needed, s); /*^^^*/
1043 if (!do_norm && ULONG_CMP_LT(sup->srcu_gp_seq_needed_exp, s))
1044 WRITE_ONCE(sup->srcu_gp_seq_needed_exp, s);
1046 /* If grace period not already in progress, start it. */
1047 if (!WARN_ON_ONCE(rcu_seq_done(&sup->srcu_gp_seq, s)) &&
1048 rcu_seq_state(sup->srcu_gp_seq) == SRCU_STATE_IDLE) {
1049 WARN_ON_ONCE(ULONG_CMP_GE(sup->srcu_gp_seq, sup->srcu_gp_seq_needed));
1052 // And how can that list_add() in the "else" clause
1053 // possibly be safe for concurrent execution? Well,
1054 // it isn't. And it does not have to be. After all, it
1055 // can only be executed during early boot when there is only
1056 // the one boot CPU running with interrupts still disabled.
1057 if (likely(srcu_init_done))
1058 queue_delayed_work(rcu_gp_wq, &sup->work,
1059 !!srcu_get_delay(ssp));
1060 else if (list_empty(&sup->work.work.entry))
1061 list_add(&sup->work.work.entry, &srcu_boot_list);
1063 spin_unlock_irqrestore_rcu_node(sup, flags);
1067 * Wait until all readers counted by array index idx complete, but
1068 * loop an additional time if there is an expedited grace period pending.
1069 * The caller must ensure that ->srcu_idx is not changed while checking.
1071 static bool try_check_zero(struct srcu_struct *ssp, int idx, int trycount)
1073 unsigned long curdelay;
1075 curdelay = !srcu_get_delay(ssp);
1078 if (srcu_readers_active_idx_check(ssp, idx))
1080 if ((--trycount + curdelay) <= 0)
1082 udelay(srcu_retry_check_delay);
1087 * Increment the ->srcu_idx counter so that future SRCU readers will
1088 * use the other rank of the ->srcu_(un)lock_count[] arrays. This allows
1089 * us to wait for pre-existing readers in a starvation-free manner.
1091 static void srcu_flip(struct srcu_struct *ssp)
1094 * Because the flip of ->srcu_idx is executed only if the
1095 * preceding call to srcu_readers_active_idx_check() found that
1096 * the ->srcu_unlock_count[] and ->srcu_lock_count[] sums matched
1097 * and because that summing uses atomic_long_read(), there is
1098 * ordering due to a control dependency between that summing and
1099 * the WRITE_ONCE() in this call to srcu_flip(). This ordering
1100 * ensures that if this updater saw a given reader's increment from
1101 * __srcu_read_lock(), that reader was using a value of ->srcu_idx
1102 * from before the previous call to srcu_flip(), which should be
1103 * quite rare. This ordering thus helps forward progress because
1104 * the grace period could otherwise be delayed by additional
1105 * calls to __srcu_read_lock() using that old (soon to be new)
1106 * value of ->srcu_idx.
1108 * This sum-equality check and ordering also ensures that if
1109 * a given call to __srcu_read_lock() uses the new value of
1110 * ->srcu_idx, this updater's earlier scans cannot have seen
1111 * that reader's increments, which is all to the good, because
1112 * this grace period need not wait on that reader. After all,
1113 * if those earlier scans had seen that reader, there would have
1114 * been a sum mismatch and this code would not be reached.
1116 * This means that the following smp_mb() is redundant, but
1117 * it stays until either (1) Compilers learn about this sort of
1118 * control dependency or (2) Some production workload running on
1119 * a production system is unduly delayed by this slowpath smp_mb().
1121 smp_mb(); /* E */ /* Pairs with B and C. */
1123 WRITE_ONCE(ssp->srcu_idx, ssp->srcu_idx + 1); // Flip the counter.
1126 * Ensure that if the updater misses an __srcu_read_unlock()
1127 * increment, that task's __srcu_read_lock() following its next
1128 * __srcu_read_lock() or __srcu_read_unlock() will see the above
1129 * counter update. Note that both this memory barrier and the
1130 * one in srcu_readers_active_idx_check() provide the guarantee
1131 * for __srcu_read_lock().
1133 smp_mb(); /* D */ /* Pairs with C. */
1137 * If SRCU is likely idle, return true, otherwise return false.
1139 * Note that it is OK for several current from-idle requests for a new
1140 * grace period from idle to specify expediting because they will all end
1141 * up requesting the same grace period anyhow. So no loss.
1143 * Note also that if any CPU (including the current one) is still invoking
1144 * callbacks, this function will nevertheless say "idle". This is not
1145 * ideal, but the overhead of checking all CPUs' callback lists is even
1146 * less ideal, especially on large systems. Furthermore, the wakeup
1147 * can happen before the callback is fully removed, so we have no choice
1148 * but to accept this type of error.
1150 * This function is also subject to counter-wrap errors, but let's face
1151 * it, if this function was preempted for enough time for the counters
1152 * to wrap, it really doesn't matter whether or not we expedite the grace
1153 * period. The extra overhead of a needlessly expedited grace period is
1154 * negligible when amortized over that time period, and the extra latency
1155 * of a needlessly non-expedited grace period is similarly negligible.
1157 static bool srcu_might_be_idle(struct srcu_struct *ssp)
1159 unsigned long curseq;
1160 unsigned long flags;
1161 struct srcu_data *sdp;
1163 unsigned long tlast;
1165 check_init_srcu_struct(ssp);
1166 /* If the local srcu_data structure has callbacks, not idle. */
1167 sdp = raw_cpu_ptr(ssp->sda);
1168 spin_lock_irqsave_rcu_node(sdp, flags);
1169 if (rcu_segcblist_pend_cbs(&sdp->srcu_cblist)) {
1170 spin_unlock_irqrestore_rcu_node(sdp, flags);
1171 return false; /* Callbacks already present, so not idle. */
1173 spin_unlock_irqrestore_rcu_node(sdp, flags);
1176 * No local callbacks, so probabilistically probe global state.
1177 * Exact information would require acquiring locks, which would
1178 * kill scalability, hence the probabilistic nature of the probe.
1181 /* First, see if enough time has passed since the last GP. */
1182 t = ktime_get_mono_fast_ns();
1183 tlast = READ_ONCE(ssp->srcu_sup->srcu_last_gp_end);
1184 if (exp_holdoff == 0 ||
1185 time_in_range_open(t, tlast, tlast + exp_holdoff))
1186 return false; /* Too soon after last GP. */
1188 /* Next, check for probable idleness. */
1189 curseq = rcu_seq_current(&ssp->srcu_sup->srcu_gp_seq);
1190 smp_mb(); /* Order ->srcu_gp_seq with ->srcu_gp_seq_needed. */
1191 if (ULONG_CMP_LT(curseq, READ_ONCE(ssp->srcu_sup->srcu_gp_seq_needed)))
1192 return false; /* Grace period in progress, so not idle. */
1193 smp_mb(); /* Order ->srcu_gp_seq with prior access. */
1194 if (curseq != rcu_seq_current(&ssp->srcu_sup->srcu_gp_seq))
1195 return false; /* GP # changed, so not idle. */
1196 return true; /* With reasonable probability, idle! */
1200 * SRCU callback function to leak a callback.
1202 static void srcu_leak_callback(struct rcu_head *rhp)
1207 * Start an SRCU grace period, and also queue the callback if non-NULL.
1209 static unsigned long srcu_gp_start_if_needed(struct srcu_struct *ssp,
1210 struct rcu_head *rhp, bool do_norm)
1212 unsigned long flags;
1214 bool needexp = false;
1215 bool needgp = false;
1217 struct srcu_data *sdp;
1218 struct srcu_node *sdp_mynode;
1221 check_init_srcu_struct(ssp);
1223 * While starting a new grace period, make sure we are in an
1224 * SRCU read-side critical section so that the grace-period
1225 * sequence number cannot wrap around in the meantime.
1227 idx = __srcu_read_lock_nmisafe(ssp);
1228 ss_state = smp_load_acquire(&ssp->srcu_sup->srcu_size_state);
1229 if (ss_state < SRCU_SIZE_WAIT_CALL)
1230 sdp = per_cpu_ptr(ssp->sda, get_boot_cpu_id());
1232 sdp = raw_cpu_ptr(ssp->sda);
1233 spin_lock_irqsave_sdp_contention(sdp, &flags);
1235 rcu_segcblist_enqueue(&sdp->srcu_cblist, rhp);
1237 * The snapshot for acceleration must be taken _before_ the read of the
1238 * current gp sequence used for advancing, otherwise advancing may fail
1239 * and acceleration may then fail too.
1241 * This could happen if:
1243 * 1) The RCU_WAIT_TAIL segment has callbacks (gp_num = X + 4) and the
1244 * RCU_NEXT_READY_TAIL also has callbacks (gp_num = X + 8).
1246 * 2) The grace period for RCU_WAIT_TAIL is seen as started but not
1247 * completed so rcu_seq_current() returns X + SRCU_STATE_SCAN1.
1249 * 3) This value is passed to rcu_segcblist_advance() which can't move
1250 * any segment forward and fails.
1252 * 4) srcu_gp_start_if_needed() still proceeds with callback acceleration.
1253 * But then the call to rcu_seq_snap() observes the grace period for the
1254 * RCU_WAIT_TAIL segment as completed and the subsequent one for the
1255 * RCU_NEXT_READY_TAIL segment as started (ie: X + 4 + SRCU_STATE_SCAN1)
1256 * so it returns a snapshot of the next grace period, which is X + 12.
1258 * 5) The value of X + 12 is passed to rcu_segcblist_accelerate() but the
1259 * freshly enqueued callback in RCU_NEXT_TAIL can't move to
1260 * RCU_NEXT_READY_TAIL which already has callbacks for a previous grace
1261 * period (gp_num = X + 8). So acceleration fails.
1263 s = rcu_seq_snap(&ssp->srcu_sup->srcu_gp_seq);
1265 rcu_segcblist_advance(&sdp->srcu_cblist,
1266 rcu_seq_current(&ssp->srcu_sup->srcu_gp_seq));
1267 WARN_ON_ONCE(!rcu_segcblist_accelerate(&sdp->srcu_cblist, s));
1269 if (ULONG_CMP_LT(sdp->srcu_gp_seq_needed, s)) {
1270 sdp->srcu_gp_seq_needed = s;
1273 if (!do_norm && ULONG_CMP_LT(sdp->srcu_gp_seq_needed_exp, s)) {
1274 sdp->srcu_gp_seq_needed_exp = s;
1277 spin_unlock_irqrestore_rcu_node(sdp, flags);
1279 /* Ensure that snp node tree is fully initialized before traversing it */
1280 if (ss_state < SRCU_SIZE_WAIT_BARRIER)
1283 sdp_mynode = sdp->mynode;
1286 srcu_funnel_gp_start(ssp, sdp, s, do_norm);
1288 srcu_funnel_exp_start(ssp, sdp_mynode, s);
1289 __srcu_read_unlock_nmisafe(ssp, idx);
1294 * Enqueue an SRCU callback on the srcu_data structure associated with
1295 * the current CPU and the specified srcu_struct structure, initiating
1296 * grace-period processing if it is not already running.
1298 * Note that all CPUs must agree that the grace period extended beyond
1299 * all pre-existing SRCU read-side critical section. On systems with
1300 * more than one CPU, this means that when "func()" is invoked, each CPU
1301 * is guaranteed to have executed a full memory barrier since the end of
1302 * its last corresponding SRCU read-side critical section whose beginning
1303 * preceded the call to call_srcu(). It also means that each CPU executing
1304 * an SRCU read-side critical section that continues beyond the start of
1305 * "func()" must have executed a memory barrier after the call_srcu()
1306 * but before the beginning of that SRCU read-side critical section.
1307 * Note that these guarantees include CPUs that are offline, idle, or
1308 * executing in user mode, as well as CPUs that are executing in the kernel.
1310 * Furthermore, if CPU A invoked call_srcu() and CPU B invoked the
1311 * resulting SRCU callback function "func()", then both CPU A and CPU
1312 * B are guaranteed to execute a full memory barrier during the time
1313 * interval between the call to call_srcu() and the invocation of "func()".
1314 * This guarantee applies even if CPU A and CPU B are the same CPU (but
1315 * again only if the system has more than one CPU).
1317 * Of course, these guarantees apply only for invocations of call_srcu(),
1318 * srcu_read_lock(), and srcu_read_unlock() that are all passed the same
1319 * srcu_struct structure.
1321 static void __call_srcu(struct srcu_struct *ssp, struct rcu_head *rhp,
1322 rcu_callback_t func, bool do_norm)
1324 if (debug_rcu_head_queue(rhp)) {
1325 /* Probable double call_srcu(), so leak the callback. */
1326 WRITE_ONCE(rhp->func, srcu_leak_callback);
1327 WARN_ONCE(1, "call_srcu(): Leaked duplicate callback\n");
1331 (void)srcu_gp_start_if_needed(ssp, rhp, do_norm);
1335 * call_srcu() - Queue a callback for invocation after an SRCU grace period
1336 * @ssp: srcu_struct in queue the callback
1337 * @rhp: structure to be used for queueing the SRCU callback.
1338 * @func: function to be invoked after the SRCU grace period
1340 * The callback function will be invoked some time after a full SRCU
1341 * grace period elapses, in other words after all pre-existing SRCU
1342 * read-side critical sections have completed. However, the callback
1343 * function might well execute concurrently with other SRCU read-side
1344 * critical sections that started after call_srcu() was invoked. SRCU
1345 * read-side critical sections are delimited by srcu_read_lock() and
1346 * srcu_read_unlock(), and may be nested.
1348 * The callback will be invoked from process context, but must nevertheless
1349 * be fast and must not block.
1351 void call_srcu(struct srcu_struct *ssp, struct rcu_head *rhp,
1352 rcu_callback_t func)
1354 __call_srcu(ssp, rhp, func, true);
1356 EXPORT_SYMBOL_GPL(call_srcu);
1359 * Helper function for synchronize_srcu() and synchronize_srcu_expedited().
1361 static void __synchronize_srcu(struct srcu_struct *ssp, bool do_norm)
1363 struct rcu_synchronize rcu;
1365 srcu_lock_sync(&ssp->dep_map);
1367 RCU_LOCKDEP_WARN(lockdep_is_held(ssp) ||
1368 lock_is_held(&rcu_bh_lock_map) ||
1369 lock_is_held(&rcu_lock_map) ||
1370 lock_is_held(&rcu_sched_lock_map),
1371 "Illegal synchronize_srcu() in same-type SRCU (or in RCU) read-side critical section");
1373 if (rcu_scheduler_active == RCU_SCHEDULER_INACTIVE)
1376 check_init_srcu_struct(ssp);
1377 init_completion(&rcu.completion);
1378 init_rcu_head_on_stack(&rcu.head);
1379 __call_srcu(ssp, &rcu.head, wakeme_after_rcu, do_norm);
1380 wait_for_completion(&rcu.completion);
1381 destroy_rcu_head_on_stack(&rcu.head);
1384 * Make sure that later code is ordered after the SRCU grace
1385 * period. This pairs with the spin_lock_irq_rcu_node()
1386 * in srcu_invoke_callbacks(). Unlike Tree RCU, this is needed
1387 * because the current CPU might have been totally uninvolved with
1388 * (and thus unordered against) that grace period.
1394 * synchronize_srcu_expedited - Brute-force SRCU grace period
1395 * @ssp: srcu_struct with which to synchronize.
1397 * Wait for an SRCU grace period to elapse, but be more aggressive about
1398 * spinning rather than blocking when waiting.
1400 * Note that synchronize_srcu_expedited() has the same deadlock and
1401 * memory-ordering properties as does synchronize_srcu().
1403 void synchronize_srcu_expedited(struct srcu_struct *ssp)
1405 __synchronize_srcu(ssp, rcu_gp_is_normal());
1407 EXPORT_SYMBOL_GPL(synchronize_srcu_expedited);
1410 * synchronize_srcu - wait for prior SRCU read-side critical-section completion
1411 * @ssp: srcu_struct with which to synchronize.
1413 * Wait for the count to drain to zero of both indexes. To avoid the
1414 * possible starvation of synchronize_srcu(), it waits for the count of
1415 * the index=((->srcu_idx & 1) ^ 1) to drain to zero at first,
1416 * and then flip the srcu_idx and wait for the count of the other index.
1418 * Can block; must be called from process context.
1420 * Note that it is illegal to call synchronize_srcu() from the corresponding
1421 * SRCU read-side critical section; doing so will result in deadlock.
1422 * However, it is perfectly legal to call synchronize_srcu() on one
1423 * srcu_struct from some other srcu_struct's read-side critical section,
1424 * as long as the resulting graph of srcu_structs is acyclic.
1426 * There are memory-ordering constraints implied by synchronize_srcu().
1427 * On systems with more than one CPU, when synchronize_srcu() returns,
1428 * each CPU is guaranteed to have executed a full memory barrier since
1429 * the end of its last corresponding SRCU read-side critical section
1430 * whose beginning preceded the call to synchronize_srcu(). In addition,
1431 * each CPU having an SRCU read-side critical section that extends beyond
1432 * the return from synchronize_srcu() is guaranteed to have executed a
1433 * full memory barrier after the beginning of synchronize_srcu() and before
1434 * the beginning of that SRCU read-side critical section. Note that these
1435 * guarantees include CPUs that are offline, idle, or executing in user mode,
1436 * as well as CPUs that are executing in the kernel.
1438 * Furthermore, if CPU A invoked synchronize_srcu(), which returned
1439 * to its caller on CPU B, then both CPU A and CPU B are guaranteed
1440 * to have executed a full memory barrier during the execution of
1441 * synchronize_srcu(). This guarantee applies even if CPU A and CPU B
1442 * are the same CPU, but again only if the system has more than one CPU.
1444 * Of course, these memory-ordering guarantees apply only when
1445 * synchronize_srcu(), srcu_read_lock(), and srcu_read_unlock() are
1446 * passed the same srcu_struct structure.
1448 * Implementation of these memory-ordering guarantees is similar to
1449 * that of synchronize_rcu().
1451 * If SRCU is likely idle, expedite the first request. This semantic
1452 * was provided by Classic SRCU, and is relied upon by its users, so TREE
1453 * SRCU must also provide it. Note that detecting idleness is heuristic
1454 * and subject to both false positives and negatives.
1456 void synchronize_srcu(struct srcu_struct *ssp)
1458 if (srcu_might_be_idle(ssp) || rcu_gp_is_expedited())
1459 synchronize_srcu_expedited(ssp);
1461 __synchronize_srcu(ssp, true);
1463 EXPORT_SYMBOL_GPL(synchronize_srcu);
1466 * get_state_synchronize_srcu - Provide an end-of-grace-period cookie
1467 * @ssp: srcu_struct to provide cookie for.
1469 * This function returns a cookie that can be passed to
1470 * poll_state_synchronize_srcu(), which will return true if a full grace
1471 * period has elapsed in the meantime. It is the caller's responsibility
1472 * to make sure that grace period happens, for example, by invoking
1473 * call_srcu() after return from get_state_synchronize_srcu().
1475 unsigned long get_state_synchronize_srcu(struct srcu_struct *ssp)
1477 // Any prior manipulation of SRCU-protected data must happen
1478 // before the load from ->srcu_gp_seq.
1480 return rcu_seq_snap(&ssp->srcu_sup->srcu_gp_seq);
1482 EXPORT_SYMBOL_GPL(get_state_synchronize_srcu);
1485 * start_poll_synchronize_srcu - Provide cookie and start grace period
1486 * @ssp: srcu_struct to provide cookie for.
1488 * This function returns a cookie that can be passed to
1489 * poll_state_synchronize_srcu(), which will return true if a full grace
1490 * period has elapsed in the meantime. Unlike get_state_synchronize_srcu(),
1491 * this function also ensures that any needed SRCU grace period will be
1492 * started. This convenience does come at a cost in terms of CPU overhead.
1494 unsigned long start_poll_synchronize_srcu(struct srcu_struct *ssp)
1496 return srcu_gp_start_if_needed(ssp, NULL, true);
1498 EXPORT_SYMBOL_GPL(start_poll_synchronize_srcu);
1501 * poll_state_synchronize_srcu - Has cookie's grace period ended?
1502 * @ssp: srcu_struct to provide cookie for.
1503 * @cookie: Return value from get_state_synchronize_srcu() or start_poll_synchronize_srcu().
1505 * This function takes the cookie that was returned from either
1506 * get_state_synchronize_srcu() or start_poll_synchronize_srcu(), and
1507 * returns @true if an SRCU grace period elapsed since the time that the
1508 * cookie was created.
1510 * Because cookies are finite in size, wrapping/overflow is possible.
1511 * This is more pronounced on 32-bit systems where cookies are 32 bits,
1512 * where in theory wrapping could happen in about 14 hours assuming
1513 * 25-microsecond expedited SRCU grace periods. However, a more likely
1514 * overflow lower bound is on the order of 24 days in the case of
1515 * one-millisecond SRCU grace periods. Of course, wrapping in a 64-bit
1516 * system requires geologic timespans, as in more than seven million years
1517 * even for expedited SRCU grace periods.
1519 * Wrapping/overflow is much more of an issue for CONFIG_SMP=n systems
1520 * that also have CONFIG_PREEMPTION=n, which selects Tiny SRCU. This uses
1521 * a 16-bit cookie, which rcutorture routinely wraps in a matter of a
1522 * few minutes. If this proves to be a problem, this counter will be
1523 * expanded to the same size as for Tree SRCU.
1525 bool poll_state_synchronize_srcu(struct srcu_struct *ssp, unsigned long cookie)
1527 if (!rcu_seq_done(&ssp->srcu_sup->srcu_gp_seq, cookie))
1529 // Ensure that the end of the SRCU grace period happens before
1530 // any subsequent code that the caller might execute.
1534 EXPORT_SYMBOL_GPL(poll_state_synchronize_srcu);
1537 * Callback function for srcu_barrier() use.
1539 static void srcu_barrier_cb(struct rcu_head *rhp)
1541 struct srcu_data *sdp;
1542 struct srcu_struct *ssp;
1544 sdp = container_of(rhp, struct srcu_data, srcu_barrier_head);
1546 if (atomic_dec_and_test(&ssp->srcu_sup->srcu_barrier_cpu_cnt))
1547 complete(&ssp->srcu_sup->srcu_barrier_completion);
1551 * Enqueue an srcu_barrier() callback on the specified srcu_data
1552 * structure's ->cblist. but only if that ->cblist already has at least one
1553 * callback enqueued. Note that if a CPU already has callbacks enqueue,
1554 * it must have already registered the need for a future grace period,
1555 * so all we need do is enqueue a callback that will use the same grace
1556 * period as the last callback already in the queue.
1558 static void srcu_barrier_one_cpu(struct srcu_struct *ssp, struct srcu_data *sdp)
1560 spin_lock_irq_rcu_node(sdp);
1561 atomic_inc(&ssp->srcu_sup->srcu_barrier_cpu_cnt);
1562 sdp->srcu_barrier_head.func = srcu_barrier_cb;
1563 debug_rcu_head_queue(&sdp->srcu_barrier_head);
1564 if (!rcu_segcblist_entrain(&sdp->srcu_cblist,
1565 &sdp->srcu_barrier_head)) {
1566 debug_rcu_head_unqueue(&sdp->srcu_barrier_head);
1567 atomic_dec(&ssp->srcu_sup->srcu_barrier_cpu_cnt);
1569 spin_unlock_irq_rcu_node(sdp);
1573 * srcu_barrier - Wait until all in-flight call_srcu() callbacks complete.
1574 * @ssp: srcu_struct on which to wait for in-flight callbacks.
1576 void srcu_barrier(struct srcu_struct *ssp)
1580 unsigned long s = rcu_seq_snap(&ssp->srcu_sup->srcu_barrier_seq);
1582 check_init_srcu_struct(ssp);
1583 mutex_lock(&ssp->srcu_sup->srcu_barrier_mutex);
1584 if (rcu_seq_done(&ssp->srcu_sup->srcu_barrier_seq, s)) {
1585 smp_mb(); /* Force ordering following return. */
1586 mutex_unlock(&ssp->srcu_sup->srcu_barrier_mutex);
1587 return; /* Someone else did our work for us. */
1589 rcu_seq_start(&ssp->srcu_sup->srcu_barrier_seq);
1590 init_completion(&ssp->srcu_sup->srcu_barrier_completion);
1592 /* Initial count prevents reaching zero until all CBs are posted. */
1593 atomic_set(&ssp->srcu_sup->srcu_barrier_cpu_cnt, 1);
1595 idx = __srcu_read_lock_nmisafe(ssp);
1596 if (smp_load_acquire(&ssp->srcu_sup->srcu_size_state) < SRCU_SIZE_WAIT_BARRIER)
1597 srcu_barrier_one_cpu(ssp, per_cpu_ptr(ssp->sda, get_boot_cpu_id()));
1599 for_each_possible_cpu(cpu)
1600 srcu_barrier_one_cpu(ssp, per_cpu_ptr(ssp->sda, cpu));
1601 __srcu_read_unlock_nmisafe(ssp, idx);
1603 /* Remove the initial count, at which point reaching zero can happen. */
1604 if (atomic_dec_and_test(&ssp->srcu_sup->srcu_barrier_cpu_cnt))
1605 complete(&ssp->srcu_sup->srcu_barrier_completion);
1606 wait_for_completion(&ssp->srcu_sup->srcu_barrier_completion);
1608 rcu_seq_end(&ssp->srcu_sup->srcu_barrier_seq);
1609 mutex_unlock(&ssp->srcu_sup->srcu_barrier_mutex);
1611 EXPORT_SYMBOL_GPL(srcu_barrier);
1614 * srcu_batches_completed - return batches completed.
1615 * @ssp: srcu_struct on which to report batch completion.
1617 * Report the number of batches, correlated with, but not necessarily
1618 * precisely the same as, the number of grace periods that have elapsed.
1620 unsigned long srcu_batches_completed(struct srcu_struct *ssp)
1622 return READ_ONCE(ssp->srcu_idx);
1624 EXPORT_SYMBOL_GPL(srcu_batches_completed);
1627 * Core SRCU state machine. Push state bits of ->srcu_gp_seq
1628 * to SRCU_STATE_SCAN2, and invoke srcu_gp_end() when scan has
1629 * completed in that state.
1631 static void srcu_advance_state(struct srcu_struct *ssp)
1635 mutex_lock(&ssp->srcu_sup->srcu_gp_mutex);
1638 * Because readers might be delayed for an extended period after
1639 * fetching ->srcu_idx for their index, at any point in time there
1640 * might well be readers using both idx=0 and idx=1. We therefore
1641 * need to wait for readers to clear from both index values before
1642 * invoking a callback.
1644 * The load-acquire ensures that we see the accesses performed
1645 * by the prior grace period.
1647 idx = rcu_seq_state(smp_load_acquire(&ssp->srcu_sup->srcu_gp_seq)); /* ^^^ */
1648 if (idx == SRCU_STATE_IDLE) {
1649 spin_lock_irq_rcu_node(ssp->srcu_sup);
1650 if (ULONG_CMP_GE(ssp->srcu_sup->srcu_gp_seq, ssp->srcu_sup->srcu_gp_seq_needed)) {
1651 WARN_ON_ONCE(rcu_seq_state(ssp->srcu_sup->srcu_gp_seq));
1652 spin_unlock_irq_rcu_node(ssp->srcu_sup);
1653 mutex_unlock(&ssp->srcu_sup->srcu_gp_mutex);
1656 idx = rcu_seq_state(READ_ONCE(ssp->srcu_sup->srcu_gp_seq));
1657 if (idx == SRCU_STATE_IDLE)
1659 spin_unlock_irq_rcu_node(ssp->srcu_sup);
1660 if (idx != SRCU_STATE_IDLE) {
1661 mutex_unlock(&ssp->srcu_sup->srcu_gp_mutex);
1662 return; /* Someone else started the grace period. */
1666 if (rcu_seq_state(READ_ONCE(ssp->srcu_sup->srcu_gp_seq)) == SRCU_STATE_SCAN1) {
1667 idx = 1 ^ (ssp->srcu_idx & 1);
1668 if (!try_check_zero(ssp, idx, 1)) {
1669 mutex_unlock(&ssp->srcu_sup->srcu_gp_mutex);
1670 return; /* readers present, retry later. */
1673 spin_lock_irq_rcu_node(ssp->srcu_sup);
1674 rcu_seq_set_state(&ssp->srcu_sup->srcu_gp_seq, SRCU_STATE_SCAN2);
1675 ssp->srcu_sup->srcu_n_exp_nodelay = 0;
1676 spin_unlock_irq_rcu_node(ssp->srcu_sup);
1679 if (rcu_seq_state(READ_ONCE(ssp->srcu_sup->srcu_gp_seq)) == SRCU_STATE_SCAN2) {
1682 * SRCU read-side critical sections are normally short,
1683 * so check at least twice in quick succession after a flip.
1685 idx = 1 ^ (ssp->srcu_idx & 1);
1686 if (!try_check_zero(ssp, idx, 2)) {
1687 mutex_unlock(&ssp->srcu_sup->srcu_gp_mutex);
1688 return; /* readers present, retry later. */
1690 ssp->srcu_sup->srcu_n_exp_nodelay = 0;
1691 srcu_gp_end(ssp); /* Releases ->srcu_gp_mutex. */
1696 * Invoke a limited number of SRCU callbacks that have passed through
1697 * their grace period. If there are more to do, SRCU will reschedule
1698 * the workqueue. Note that needed memory barriers have been executed
1699 * in this task's context by srcu_readers_active_idx_check().
1701 static void srcu_invoke_callbacks(struct work_struct *work)
1705 struct rcu_cblist ready_cbs;
1706 struct rcu_head *rhp;
1707 struct srcu_data *sdp;
1708 struct srcu_struct *ssp;
1710 sdp = container_of(work, struct srcu_data, work);
1713 rcu_cblist_init(&ready_cbs);
1714 spin_lock_irq_rcu_node(sdp);
1715 WARN_ON_ONCE(!rcu_segcblist_segempty(&sdp->srcu_cblist, RCU_NEXT_TAIL));
1716 rcu_segcblist_advance(&sdp->srcu_cblist,
1717 rcu_seq_current(&ssp->srcu_sup->srcu_gp_seq));
1719 * Although this function is theoretically re-entrant, concurrent
1720 * callbacks invocation is disallowed to avoid executing an SRCU barrier
1723 if (sdp->srcu_cblist_invoking ||
1724 !rcu_segcblist_ready_cbs(&sdp->srcu_cblist)) {
1725 spin_unlock_irq_rcu_node(sdp);
1726 return; /* Someone else on the job or nothing to do. */
1729 /* We are on the job! Extract and invoke ready callbacks. */
1730 sdp->srcu_cblist_invoking = true;
1731 rcu_segcblist_extract_done_cbs(&sdp->srcu_cblist, &ready_cbs);
1732 len = ready_cbs.len;
1733 spin_unlock_irq_rcu_node(sdp);
1734 rhp = rcu_cblist_dequeue(&ready_cbs);
1735 for (; rhp != NULL; rhp = rcu_cblist_dequeue(&ready_cbs)) {
1736 debug_rcu_head_unqueue(rhp);
1737 debug_rcu_head_callback(rhp);
1742 WARN_ON_ONCE(ready_cbs.len);
1745 * Update counts, accelerate new callbacks, and if needed,
1746 * schedule another round of callback invocation.
1748 spin_lock_irq_rcu_node(sdp);
1749 rcu_segcblist_add_len(&sdp->srcu_cblist, -len);
1750 sdp->srcu_cblist_invoking = false;
1751 more = rcu_segcblist_ready_cbs(&sdp->srcu_cblist);
1752 spin_unlock_irq_rcu_node(sdp);
1753 /* An SRCU barrier or callbacks from previous nesting work pending */
1755 srcu_schedule_cbs_sdp(sdp, 0);
1759 * Finished one round of SRCU grace period. Start another if there are
1760 * more SRCU callbacks queued, otherwise put SRCU into not-running state.
1762 static void srcu_reschedule(struct srcu_struct *ssp, unsigned long delay)
1766 spin_lock_irq_rcu_node(ssp->srcu_sup);
1767 if (ULONG_CMP_GE(ssp->srcu_sup->srcu_gp_seq, ssp->srcu_sup->srcu_gp_seq_needed)) {
1768 if (!WARN_ON_ONCE(rcu_seq_state(ssp->srcu_sup->srcu_gp_seq))) {
1769 /* All requests fulfilled, time to go idle. */
1772 } else if (!rcu_seq_state(ssp->srcu_sup->srcu_gp_seq)) {
1773 /* Outstanding request and no GP. Start one. */
1776 spin_unlock_irq_rcu_node(ssp->srcu_sup);
1779 queue_delayed_work(rcu_gp_wq, &ssp->srcu_sup->work, delay);
1783 * This is the work-queue function that handles SRCU grace periods.
1785 static void process_srcu(struct work_struct *work)
1787 unsigned long curdelay;
1789 struct srcu_struct *ssp;
1790 struct srcu_usage *sup;
1792 sup = container_of(work, struct srcu_usage, work.work);
1793 ssp = sup->srcu_ssp;
1795 srcu_advance_state(ssp);
1796 curdelay = srcu_get_delay(ssp);
1798 WRITE_ONCE(sup->reschedule_count, 0);
1801 if (READ_ONCE(sup->reschedule_jiffies) == j) {
1802 WRITE_ONCE(sup->reschedule_count, READ_ONCE(sup->reschedule_count) + 1);
1803 if (READ_ONCE(sup->reschedule_count) > srcu_max_nodelay)
1806 WRITE_ONCE(sup->reschedule_count, 1);
1807 WRITE_ONCE(sup->reschedule_jiffies, j);
1810 srcu_reschedule(ssp, curdelay);
1813 void srcutorture_get_gp_data(enum rcutorture_type test_type,
1814 struct srcu_struct *ssp, int *flags,
1815 unsigned long *gp_seq)
1817 if (test_type != SRCU_FLAVOR)
1820 *gp_seq = rcu_seq_current(&ssp->srcu_sup->srcu_gp_seq);
1822 EXPORT_SYMBOL_GPL(srcutorture_get_gp_data);
1824 static const char * const srcu_size_state_name[] = {
1827 "SRCU_SIZE_WAIT_BARRIER",
1828 "SRCU_SIZE_WAIT_CALL",
1829 "SRCU_SIZE_WAIT_CBS1",
1830 "SRCU_SIZE_WAIT_CBS2",
1831 "SRCU_SIZE_WAIT_CBS3",
1832 "SRCU_SIZE_WAIT_CBS4",
1837 void srcu_torture_stats_print(struct srcu_struct *ssp, char *tt, char *tf)
1841 unsigned long s0 = 0, s1 = 0;
1842 int ss_state = READ_ONCE(ssp->srcu_sup->srcu_size_state);
1843 int ss_state_idx = ss_state;
1845 idx = ssp->srcu_idx & 0x1;
1846 if (ss_state < 0 || ss_state >= ARRAY_SIZE(srcu_size_state_name))
1847 ss_state_idx = ARRAY_SIZE(srcu_size_state_name) - 1;
1848 pr_alert("%s%s Tree SRCU g%ld state %d (%s)",
1849 tt, tf, rcu_seq_current(&ssp->srcu_sup->srcu_gp_seq), ss_state,
1850 srcu_size_state_name[ss_state_idx]);
1852 // Called after cleanup_srcu_struct(), perhaps.
1853 pr_cont(" No per-CPU srcu_data structures (->sda == NULL).\n");
1855 pr_cont(" per-CPU(idx=%d):", idx);
1856 for_each_possible_cpu(cpu) {
1857 unsigned long l0, l1;
1858 unsigned long u0, u1;
1860 struct srcu_data *sdp;
1862 sdp = per_cpu_ptr(ssp->sda, cpu);
1863 u0 = data_race(atomic_long_read(&sdp->srcu_unlock_count[!idx]));
1864 u1 = data_race(atomic_long_read(&sdp->srcu_unlock_count[idx]));
1867 * Make sure that a lock is always counted if the corresponding
1868 * unlock is counted.
1872 l0 = data_race(atomic_long_read(&sdp->srcu_lock_count[!idx]));
1873 l1 = data_race(atomic_long_read(&sdp->srcu_lock_count[idx]));
1877 pr_cont(" %d(%ld,%ld %c)",
1879 "C."[rcu_segcblist_empty(&sdp->srcu_cblist)]);
1883 pr_cont(" T(%ld,%ld)\n", s0, s1);
1885 if (SRCU_SIZING_IS_TORTURE())
1886 srcu_transition_to_big(ssp);
1888 EXPORT_SYMBOL_GPL(srcu_torture_stats_print);
1890 static int __init srcu_bootup_announce(void)
1892 pr_info("Hierarchical SRCU implementation.\n");
1893 if (exp_holdoff != DEFAULT_SRCU_EXP_HOLDOFF)
1894 pr_info("\tNon-default auto-expedite holdoff of %lu ns.\n", exp_holdoff);
1895 if (srcu_retry_check_delay != SRCU_DEFAULT_RETRY_CHECK_DELAY)
1896 pr_info("\tNon-default retry check delay of %lu us.\n", srcu_retry_check_delay);
1897 if (srcu_max_nodelay != SRCU_DEFAULT_MAX_NODELAY)
1898 pr_info("\tNon-default max no-delay of %lu.\n", srcu_max_nodelay);
1899 pr_info("\tMax phase no-delay instances is %lu.\n", srcu_max_nodelay_phase);
1902 early_initcall(srcu_bootup_announce);
1904 void __init srcu_init(void)
1906 struct srcu_usage *sup;
1908 /* Decide on srcu_struct-size strategy. */
1909 if (SRCU_SIZING_IS(SRCU_SIZING_AUTO)) {
1910 if (nr_cpu_ids >= big_cpu_lim) {
1911 convert_to_big = SRCU_SIZING_INIT; // Don't bother waiting for contention.
1912 pr_info("%s: Setting srcu_struct sizes to big.\n", __func__);
1914 convert_to_big = SRCU_SIZING_NONE | SRCU_SIZING_CONTEND;
1915 pr_info("%s: Setting srcu_struct sizes based on contention.\n", __func__);
1920 * Once that is set, call_srcu() can follow the normal path and
1921 * queue delayed work. This must follow RCU workqueues creation
1922 * and timers initialization.
1924 srcu_init_done = true;
1925 while (!list_empty(&srcu_boot_list)) {
1926 sup = list_first_entry(&srcu_boot_list, struct srcu_usage,
1928 list_del_init(&sup->work.work.entry);
1929 if (SRCU_SIZING_IS(SRCU_SIZING_INIT) &&
1930 sup->srcu_size_state == SRCU_SIZE_SMALL)
1931 sup->srcu_size_state = SRCU_SIZE_ALLOC;
1932 queue_work(rcu_gp_wq, &sup->work.work);
1936 #ifdef CONFIG_MODULES
1938 /* Initialize any global-scope srcu_struct structures used by this module. */
1939 static int srcu_module_coming(struct module *mod)
1942 struct srcu_struct *ssp;
1943 struct srcu_struct **sspp = mod->srcu_struct_ptrs;
1945 for (i = 0; i < mod->num_srcu_structs; i++) {
1947 ssp->sda = alloc_percpu(struct srcu_data);
1948 if (WARN_ON_ONCE(!ssp->sda))
1954 /* Clean up any global-scope srcu_struct structures used by this module. */
1955 static void srcu_module_going(struct module *mod)
1958 struct srcu_struct *ssp;
1959 struct srcu_struct **sspp = mod->srcu_struct_ptrs;
1961 for (i = 0; i < mod->num_srcu_structs; i++) {
1963 if (!rcu_seq_state(smp_load_acquire(&ssp->srcu_sup->srcu_gp_seq_needed)) &&
1964 !WARN_ON_ONCE(!ssp->srcu_sup->sda_is_static))
1965 cleanup_srcu_struct(ssp);
1966 if (!WARN_ON(srcu_readers_active(ssp)))
1967 free_percpu(ssp->sda);
1971 /* Handle one module, either coming or going. */
1972 static int srcu_module_notify(struct notifier_block *self,
1973 unsigned long val, void *data)
1975 struct module *mod = data;
1979 case MODULE_STATE_COMING:
1980 ret = srcu_module_coming(mod);
1982 case MODULE_STATE_GOING:
1983 srcu_module_going(mod);
1991 static struct notifier_block srcu_module_nb = {
1992 .notifier_call = srcu_module_notify,
1996 static __init int init_srcu_module_notifier(void)
2000 ret = register_module_notifier(&srcu_module_nb);
2002 pr_warn("Failed to register srcu module notifier\n");
2005 late_initcall(init_srcu_module_notifier);
2007 #endif /* #ifdef CONFIG_MODULES */