2 * Read-Copy Update mechanism for mutual exclusion (tree-based version)
3 * Internal non-public definitions.
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, you can access it online at
17 * http://www.gnu.org/licenses/gpl-2.0.html.
19 * Copyright IBM Corporation, 2008
21 * Author: Ingo Molnar <mingo@elte.hu>
22 * Paul E. McKenney <paulmck@linux.vnet.ibm.com>
25 #include <linux/cache.h>
26 #include <linux/spinlock.h>
27 #include <linux/rtmutex.h>
28 #include <linux/threads.h>
29 #include <linux/cpumask.h>
30 #include <linux/seqlock.h>
31 #include <linux/swait.h>
32 #include <linux/stop_machine.h>
35 * Define shape of hierarchy based on NR_CPUS, CONFIG_RCU_FANOUT, and
36 * CONFIG_RCU_FANOUT_LEAF.
37 * In theory, it should be possible to add more levels straightforwardly.
38 * In practice, this did work well going from three levels to four.
39 * Of course, your mileage may vary.
42 #ifdef CONFIG_RCU_FANOUT
43 #define RCU_FANOUT CONFIG_RCU_FANOUT
44 #else /* #ifdef CONFIG_RCU_FANOUT */
46 # define RCU_FANOUT 64
48 # define RCU_FANOUT 32
50 #endif /* #else #ifdef CONFIG_RCU_FANOUT */
52 #ifdef CONFIG_RCU_FANOUT_LEAF
53 #define RCU_FANOUT_LEAF CONFIG_RCU_FANOUT_LEAF
54 #else /* #ifdef CONFIG_RCU_FANOUT_LEAF */
56 # define RCU_FANOUT_LEAF 64
58 # define RCU_FANOUT_LEAF 32
60 #endif /* #else #ifdef CONFIG_RCU_FANOUT_LEAF */
62 #define RCU_FANOUT_1 (RCU_FANOUT_LEAF)
63 #define RCU_FANOUT_2 (RCU_FANOUT_1 * RCU_FANOUT)
64 #define RCU_FANOUT_3 (RCU_FANOUT_2 * RCU_FANOUT)
65 #define RCU_FANOUT_4 (RCU_FANOUT_3 * RCU_FANOUT)
67 #if NR_CPUS <= RCU_FANOUT_1
68 # define RCU_NUM_LVLS 1
69 # define NUM_RCU_LVL_0 1
70 # define NUM_RCU_NODES NUM_RCU_LVL_0
71 # define NUM_RCU_LVL_INIT { NUM_RCU_LVL_0 }
72 # define RCU_NODE_NAME_INIT { "rcu_node_0" }
73 # define RCU_FQS_NAME_INIT { "rcu_node_fqs_0" }
74 #elif NR_CPUS <= RCU_FANOUT_2
75 # define RCU_NUM_LVLS 2
76 # define NUM_RCU_LVL_0 1
77 # define NUM_RCU_LVL_1 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_1)
78 # define NUM_RCU_NODES (NUM_RCU_LVL_0 + NUM_RCU_LVL_1)
79 # define NUM_RCU_LVL_INIT { NUM_RCU_LVL_0, NUM_RCU_LVL_1 }
80 # define RCU_NODE_NAME_INIT { "rcu_node_0", "rcu_node_1" }
81 # define RCU_FQS_NAME_INIT { "rcu_node_fqs_0", "rcu_node_fqs_1" }
82 #elif NR_CPUS <= RCU_FANOUT_3
83 # define RCU_NUM_LVLS 3
84 # define NUM_RCU_LVL_0 1
85 # define NUM_RCU_LVL_1 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_2)
86 # define NUM_RCU_LVL_2 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_1)
87 # define NUM_RCU_NODES (NUM_RCU_LVL_0 + NUM_RCU_LVL_1 + NUM_RCU_LVL_2)
88 # define NUM_RCU_LVL_INIT { NUM_RCU_LVL_0, NUM_RCU_LVL_1, NUM_RCU_LVL_2 }
89 # define RCU_NODE_NAME_INIT { "rcu_node_0", "rcu_node_1", "rcu_node_2" }
90 # define RCU_FQS_NAME_INIT { "rcu_node_fqs_0", "rcu_node_fqs_1", "rcu_node_fqs_2" }
91 #elif NR_CPUS <= RCU_FANOUT_4
92 # define RCU_NUM_LVLS 4
93 # define NUM_RCU_LVL_0 1
94 # define NUM_RCU_LVL_1 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_3)
95 # define NUM_RCU_LVL_2 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_2)
96 # define NUM_RCU_LVL_3 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_1)
97 # define NUM_RCU_NODES (NUM_RCU_LVL_0 + NUM_RCU_LVL_1 + NUM_RCU_LVL_2 + NUM_RCU_LVL_3)
98 # define NUM_RCU_LVL_INIT { NUM_RCU_LVL_0, NUM_RCU_LVL_1, NUM_RCU_LVL_2, NUM_RCU_LVL_3 }
99 # define RCU_NODE_NAME_INIT { "rcu_node_0", "rcu_node_1", "rcu_node_2", "rcu_node_3" }
100 # define RCU_FQS_NAME_INIT { "rcu_node_fqs_0", "rcu_node_fqs_1", "rcu_node_fqs_2", "rcu_node_fqs_3" }
102 # error "CONFIG_RCU_FANOUT insufficient for NR_CPUS"
103 #endif /* #if (NR_CPUS) <= RCU_FANOUT_1 */
105 extern int rcu_num_lvls;
106 extern int rcu_num_nodes;
109 * Dynticks per-CPU state.
111 struct rcu_dynticks {
112 long long dynticks_nesting; /* Track irq/process nesting level. */
113 /* Process level is worth LLONG_MAX/2. */
114 int dynticks_nmi_nesting; /* Track NMI nesting level. */
115 atomic_t dynticks; /* Even value for idle, else odd. */
116 bool rcu_need_heavy_qs; /* GP old, need heavy quiescent state. */
117 unsigned long rcu_qs_ctr; /* Light universal quiescent state ctr. */
118 bool rcu_urgent_qs; /* GP old need light quiescent state. */
119 #ifdef CONFIG_NO_HZ_FULL_SYSIDLE
120 long long dynticks_idle_nesting;
121 /* irq/process nesting level from idle. */
122 atomic_t dynticks_idle; /* Even value for idle, else odd. */
123 /* "Idle" excludes userspace execution. */
124 unsigned long dynticks_idle_jiffies;
125 /* End of last non-NMI non-idle period. */
126 #endif /* #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */
127 #ifdef CONFIG_RCU_FAST_NO_HZ
128 bool all_lazy; /* Are all CPU's CBs lazy? */
129 unsigned long nonlazy_posted;
130 /* # times non-lazy CBs posted to CPU. */
131 unsigned long nonlazy_posted_snap;
132 /* idle-period nonlazy_posted snapshot. */
133 unsigned long last_accelerate;
134 /* Last jiffy CBs were accelerated. */
135 unsigned long last_advance_all;
136 /* Last jiffy CBs were all advanced. */
137 int tick_nohz_enabled_snap; /* Previously seen value from sysfs. */
138 #endif /* #ifdef CONFIG_RCU_FAST_NO_HZ */
141 /* RCU's kthread states for tracing. */
142 #define RCU_KTHREAD_STOPPED 0
143 #define RCU_KTHREAD_RUNNING 1
144 #define RCU_KTHREAD_WAITING 2
145 #define RCU_KTHREAD_OFFCPU 3
146 #define RCU_KTHREAD_YIELDING 4
147 #define RCU_KTHREAD_MAX 4
150 * Definition for node within the RCU grace-period-detection hierarchy.
153 raw_spinlock_t __private lock; /* Root rcu_node's lock protects */
154 /* some rcu_state fields as well as */
156 unsigned long gpnum; /* Current grace period for this node. */
157 /* This will either be equal to or one */
158 /* behind the root rcu_node's gpnum. */
159 unsigned long completed; /* Last GP completed for this node. */
160 /* This will either be equal to or one */
161 /* behind the root rcu_node's gpnum. */
162 unsigned long qsmask; /* CPUs or groups that need to switch in */
163 /* order for current grace period to proceed.*/
164 /* In leaf rcu_node, each bit corresponds to */
165 /* an rcu_data structure, otherwise, each */
166 /* bit corresponds to a child rcu_node */
168 unsigned long qsmaskinit;
169 /* Per-GP initial value for qsmask. */
170 /* Initialized from ->qsmaskinitnext at the */
171 /* beginning of each grace period. */
172 unsigned long qsmaskinitnext;
173 /* Online CPUs for next grace period. */
174 unsigned long expmask; /* CPUs or groups that need to check in */
175 /* to allow the current expedited GP */
177 unsigned long expmaskinit;
178 /* Per-GP initial values for expmask. */
179 /* Initialized from ->expmaskinitnext at the */
180 /* beginning of each expedited GP. */
181 unsigned long expmaskinitnext;
182 /* Online CPUs for next expedited GP. */
183 /* Any CPU that has ever been online will */
184 /* have its bit set. */
185 unsigned long grpmask; /* Mask to apply to parent qsmask. */
186 /* Only one bit will be set in this mask. */
187 int grplo; /* lowest-numbered CPU or group here. */
188 int grphi; /* highest-numbered CPU or group here. */
189 u8 grpnum; /* CPU/group number for next level up. */
190 u8 level; /* root is at level 0. */
191 bool wait_blkd_tasks;/* Necessary to wait for blocked tasks to */
192 /* exit RCU read-side critical sections */
193 /* before propagating offline up the */
195 struct rcu_node *parent;
196 struct list_head blkd_tasks;
197 /* Tasks blocked in RCU read-side critical */
198 /* section. Tasks are placed at the head */
199 /* of this list and age towards the tail. */
200 struct list_head *gp_tasks;
201 /* Pointer to the first task blocking the */
202 /* current grace period, or NULL if there */
203 /* is no such task. */
204 struct list_head *exp_tasks;
205 /* Pointer to the first task blocking the */
206 /* current expedited grace period, or NULL */
207 /* if there is no such task. If there */
208 /* is no current expedited grace period, */
209 /* then there can cannot be any such task. */
210 struct list_head *boost_tasks;
211 /* Pointer to first task that needs to be */
212 /* priority boosted, or NULL if no priority */
213 /* boosting is needed for this rcu_node */
214 /* structure. If there are no tasks */
215 /* queued on this rcu_node structure that */
216 /* are blocking the current grace period, */
217 /* there can be no such task. */
218 struct rt_mutex boost_mtx;
219 /* Used only for the priority-boosting */
220 /* side effect, not as a lock. */
221 unsigned long boost_time;
222 /* When to start boosting (jiffies). */
223 struct task_struct *boost_kthread_task;
224 /* kthread that takes care of priority */
225 /* boosting for this rcu_node structure. */
226 unsigned int boost_kthread_status;
227 /* State of boost_kthread_task for tracing. */
228 unsigned long n_tasks_boosted;
229 /* Total number of tasks boosted. */
230 unsigned long n_exp_boosts;
231 /* Number of tasks boosted for expedited GP. */
232 unsigned long n_normal_boosts;
233 /* Number of tasks boosted for normal GP. */
234 unsigned long n_balk_blkd_tasks;
235 /* Refused to boost: no blocked tasks. */
236 unsigned long n_balk_exp_gp_tasks;
237 /* Refused to boost: nothing blocking GP. */
238 unsigned long n_balk_boost_tasks;
239 /* Refused to boost: already boosting. */
240 unsigned long n_balk_notblocked;
241 /* Refused to boost: RCU RS CS still running. */
242 unsigned long n_balk_notyet;
243 /* Refused to boost: not yet time. */
244 unsigned long n_balk_nos;
245 /* Refused to boost: not sure why, though. */
246 /* This can happen due to race conditions. */
247 #ifdef CONFIG_RCU_NOCB_CPU
248 struct swait_queue_head nocb_gp_wq[2];
249 /* Place for rcu_nocb_kthread() to wait GP. */
250 #endif /* #ifdef CONFIG_RCU_NOCB_CPU */
251 int need_future_gp[2];
252 /* Counts of upcoming no-CB GP requests. */
253 raw_spinlock_t fqslock ____cacheline_internodealigned_in_smp;
255 spinlock_t exp_lock ____cacheline_internodealigned_in_smp;
256 unsigned long exp_seq_rq;
257 wait_queue_head_t exp_wq[4];
258 } ____cacheline_internodealigned_in_smp;
261 * Bitmasks in an rcu_node cover the interval [grplo, grphi] of CPU IDs, and
262 * are indexed relative to this interval rather than the global CPU ID space.
263 * This generates the bit for a CPU in node-local masks.
265 #define leaf_node_cpu_bit(rnp, cpu) (1UL << ((cpu) - (rnp)->grplo))
268 * Do a full breadth-first scan of the rcu_node structures for the
269 * specified rcu_state structure.
271 #define rcu_for_each_node_breadth_first(rsp, rnp) \
272 for ((rnp) = &(rsp)->node[0]; \
273 (rnp) < &(rsp)->node[rcu_num_nodes]; (rnp)++)
276 * Do a breadth-first scan of the non-leaf rcu_node structures for the
277 * specified rcu_state structure. Note that if there is a singleton
278 * rcu_node tree with but one rcu_node structure, this loop is a no-op.
280 #define rcu_for_each_nonleaf_node_breadth_first(rsp, rnp) \
281 for ((rnp) = &(rsp)->node[0]; \
282 (rnp) < (rsp)->level[rcu_num_lvls - 1]; (rnp)++)
285 * Scan the leaves of the rcu_node hierarchy for the specified rcu_state
286 * structure. Note that if there is a singleton rcu_node tree with but
287 * one rcu_node structure, this loop -will- visit the rcu_node structure.
288 * It is still a leaf node, even if it is also the root node.
290 #define rcu_for_each_leaf_node(rsp, rnp) \
291 for ((rnp) = (rsp)->level[rcu_num_lvls - 1]; \
292 (rnp) < &(rsp)->node[rcu_num_nodes]; (rnp)++)
295 * Iterate over all possible CPUs in a leaf RCU node.
297 #define for_each_leaf_node_possible_cpu(rnp, cpu) \
298 for ((cpu) = cpumask_next(rnp->grplo - 1, cpu_possible_mask); \
300 cpu = cpumask_next((cpu), cpu_possible_mask))
303 * Union to allow "aggregate OR" operation on the need for a quiescent
304 * state by the normal and expedited grace periods.
311 u16 s; /* Set of bits, aggregate OR here. */
314 /* Index values for nxttail array in struct rcu_data. */
315 #define RCU_DONE_TAIL 0 /* Also RCU_WAIT head. */
316 #define RCU_WAIT_TAIL 1 /* Also RCU_NEXT_READY head. */
317 #define RCU_NEXT_READY_TAIL 2 /* Also RCU_NEXT head. */
318 #define RCU_NEXT_TAIL 3
319 #define RCU_NEXT_SIZE 4
321 /* Per-CPU data for read-copy update. */
323 /* 1) quiescent-state and grace-period handling : */
324 unsigned long completed; /* Track rsp->completed gp number */
325 /* in order to detect GP end. */
326 unsigned long gpnum; /* Highest gp number that this CPU */
327 /* is aware of having started. */
328 unsigned long rcu_qs_ctr_snap;/* Snapshot of rcu_qs_ctr to check */
329 /* for rcu_all_qs() invocations. */
330 union rcu_noqs cpu_no_qs; /* No QSes yet for this CPU. */
331 bool core_needs_qs; /* Core waits for quiesc state. */
332 bool beenonline; /* CPU online at least once. */
333 bool gpwrap; /* Possible gpnum/completed wrap. */
334 struct rcu_node *mynode; /* This CPU's leaf of hierarchy */
335 unsigned long grpmask; /* Mask to apply to leaf qsmask. */
336 unsigned long ticks_this_gp; /* The number of scheduling-clock */
337 /* ticks this CPU has handled */
338 /* during and after the last grace */
339 /* period it is aware of. */
341 /* 2) batch handling */
343 * If nxtlist is not NULL, it is partitioned as follows.
344 * Any of the partitions might be empty, in which case the
345 * pointer to that partition will be equal to the pointer for
346 * the following partition. When the list is empty, all of
347 * the nxttail elements point to the ->nxtlist pointer itself,
348 * which in that case is NULL.
350 * [nxtlist, *nxttail[RCU_DONE_TAIL]):
351 * Entries that batch # <= ->completed
352 * The grace period for these entries has completed, and
353 * the other grace-period-completed entries may be moved
354 * here temporarily in rcu_process_callbacks().
355 * [*nxttail[RCU_DONE_TAIL], *nxttail[RCU_WAIT_TAIL]):
356 * Entries that batch # <= ->completed - 1: waiting for current GP
357 * [*nxttail[RCU_WAIT_TAIL], *nxttail[RCU_NEXT_READY_TAIL]):
358 * Entries known to have arrived before current GP ended
359 * [*nxttail[RCU_NEXT_READY_TAIL], *nxttail[RCU_NEXT_TAIL]):
360 * Entries that might have arrived after current GP ended
361 * Note that the value of *nxttail[RCU_NEXT_TAIL] will
362 * always be NULL, as this is the end of the list.
364 struct rcu_head *nxtlist;
365 struct rcu_head **nxttail[RCU_NEXT_SIZE];
366 unsigned long nxtcompleted[RCU_NEXT_SIZE];
367 /* grace periods for sublists. */
368 long qlen_lazy; /* # of lazy queued callbacks */
369 long qlen; /* # of queued callbacks, incl lazy */
370 long qlen_last_fqs_check;
371 /* qlen at last check for QS forcing */
372 unsigned long n_cbs_invoked; /* count of RCU cbs invoked. */
373 unsigned long n_nocbs_invoked; /* count of no-CBs RCU cbs invoked. */
374 unsigned long n_cbs_orphaned; /* RCU cbs orphaned by dying CPU */
375 unsigned long n_cbs_adopted; /* RCU cbs adopted from dying CPU */
376 unsigned long n_force_qs_snap;
377 /* did other CPU force QS recently? */
378 long blimit; /* Upper limit on a processed batch */
380 /* 3) dynticks interface. */
381 struct rcu_dynticks *dynticks; /* Shared per-CPU dynticks state. */
382 int dynticks_snap; /* Per-GP tracking for dynticks. */
384 /* 4) reasons this CPU needed to be kicked by force_quiescent_state */
385 unsigned long dynticks_fqs; /* Kicked due to dynticks idle. */
386 unsigned long offline_fqs; /* Kicked due to being offline. */
387 unsigned long cond_resched_completed;
388 /* Grace period that needs help */
389 /* from cond_resched(). */
391 /* 5) __rcu_pending() statistics. */
392 unsigned long n_rcu_pending; /* rcu_pending() calls since boot. */
393 unsigned long n_rp_core_needs_qs;
394 unsigned long n_rp_report_qs;
395 unsigned long n_rp_cb_ready;
396 unsigned long n_rp_cpu_needs_gp;
397 unsigned long n_rp_gp_completed;
398 unsigned long n_rp_gp_started;
399 unsigned long n_rp_nocb_defer_wakeup;
400 unsigned long n_rp_need_nothing;
402 /* 6) _rcu_barrier(), OOM callbacks, and expediting. */
403 struct rcu_head barrier_head;
404 #ifdef CONFIG_RCU_FAST_NO_HZ
405 struct rcu_head oom_head;
406 #endif /* #ifdef CONFIG_RCU_FAST_NO_HZ */
407 atomic_long_t exp_workdone0; /* # done by workqueue. */
408 atomic_long_t exp_workdone1; /* # done by others #1. */
409 atomic_long_t exp_workdone2; /* # done by others #2. */
410 atomic_long_t exp_workdone3; /* # done by others #3. */
411 int exp_dynticks_snap; /* Double-check need for IPI. */
413 /* 7) Callback offloading. */
414 #ifdef CONFIG_RCU_NOCB_CPU
415 struct rcu_head *nocb_head; /* CBs waiting for kthread. */
416 struct rcu_head **nocb_tail;
417 atomic_long_t nocb_q_count; /* # CBs waiting for nocb */
418 atomic_long_t nocb_q_count_lazy; /* invocation (all stages). */
419 struct rcu_head *nocb_follower_head; /* CBs ready to invoke. */
420 struct rcu_head **nocb_follower_tail;
421 struct swait_queue_head nocb_wq; /* For nocb kthreads to sleep on. */
422 struct task_struct *nocb_kthread;
423 int nocb_defer_wakeup; /* Defer wakeup of nocb_kthread. */
425 /* The following fields are used by the leader, hence own cacheline. */
426 struct rcu_head *nocb_gp_head ____cacheline_internodealigned_in_smp;
427 /* CBs waiting for GP. */
428 struct rcu_head **nocb_gp_tail;
429 bool nocb_leader_sleep; /* Is the nocb leader thread asleep? */
430 struct rcu_data *nocb_next_follower;
431 /* Next follower in wakeup chain. */
433 /* The following fields are used by the follower, hence new cachline. */
434 struct rcu_data *nocb_leader ____cacheline_internodealigned_in_smp;
435 /* Leader CPU takes GP-end wakeups. */
436 #endif /* #ifdef CONFIG_RCU_NOCB_CPU */
438 /* 8) RCU CPU stall data. */
439 unsigned int softirq_snap; /* Snapshot of softirq activity. */
442 struct rcu_state *rsp;
445 /* Values for nocb_defer_wakeup field in struct rcu_data. */
446 #define RCU_NOGP_WAKE_NOT 0
447 #define RCU_NOGP_WAKE 1
448 #define RCU_NOGP_WAKE_FORCE 2
450 #define RCU_JIFFIES_TILL_FORCE_QS (1 + (HZ > 250) + (HZ > 500))
451 /* For jiffies_till_first_fqs and */
452 /* and jiffies_till_next_fqs. */
454 #define RCU_JIFFIES_FQS_DIV 256 /* Very large systems need more */
455 /* delay between bouts of */
456 /* quiescent-state forcing. */
458 #define RCU_STALL_RAT_DELAY 2 /* Allow other CPUs time to take */
459 /* at least one scheduling clock */
460 /* irq before ratting on them. */
462 #define rcu_wait(cond) \
465 set_current_state(TASK_INTERRUPTIBLE); \
470 __set_current_state(TASK_RUNNING); \
474 * RCU global state, including node hierarchy. This hierarchy is
475 * represented in "heap" form in a dense array. The root (first level)
476 * of the hierarchy is in ->node[0] (referenced by ->level[0]), the second
477 * level in ->node[1] through ->node[m] (->node[1] referenced by ->level[1]),
478 * and the third level in ->node[m+1] and following (->node[m+1] referenced
479 * by ->level[2]). The number of levels is determined by the number of
480 * CPUs and by CONFIG_RCU_FANOUT. Small systems will have a "hierarchy"
481 * consisting of a single rcu_node.
484 struct rcu_node node[NUM_RCU_NODES]; /* Hierarchy. */
485 struct rcu_node *level[RCU_NUM_LVLS + 1];
486 /* Hierarchy levels (+1 to */
487 /* shut bogus gcc warning) */
488 struct rcu_data __percpu *rda; /* pointer of percu rcu_data. */
489 call_rcu_func_t call; /* call_rcu() flavor. */
490 int ncpus; /* # CPUs seen so far. */
492 /* The following fields are guarded by the root rcu_node's lock. */
494 u8 boost ____cacheline_internodealigned_in_smp;
495 /* Subject to priority boost. */
496 unsigned long gpnum; /* Current gp number. */
497 unsigned long completed; /* # of last completed gp. */
498 struct task_struct *gp_kthread; /* Task for grace periods. */
499 struct swait_queue_head gp_wq; /* Where GP task waits. */
500 short gp_flags; /* Commands for GP task. */
501 short gp_state; /* GP kthread sleep state. */
503 /* End of fields guarded by root rcu_node's lock. */
505 raw_spinlock_t orphan_lock ____cacheline_internodealigned_in_smp;
506 /* Protect following fields. */
507 struct rcu_head *orphan_nxtlist; /* Orphaned callbacks that */
508 /* need a grace period. */
509 struct rcu_head **orphan_nxttail; /* Tail of above. */
510 struct rcu_head *orphan_donelist; /* Orphaned callbacks that */
511 /* are ready to invoke. */
512 struct rcu_head **orphan_donetail; /* Tail of above. */
513 long qlen_lazy; /* Number of lazy callbacks. */
514 long qlen; /* Total number of callbacks. */
515 /* End of fields guarded by orphan_lock. */
517 struct mutex barrier_mutex; /* Guards barrier fields. */
518 atomic_t barrier_cpu_count; /* # CPUs waiting on. */
519 struct completion barrier_completion; /* Wake at barrier end. */
520 unsigned long barrier_sequence; /* ++ at start and end of */
521 /* _rcu_barrier(). */
522 /* End of fields guarded by barrier_mutex. */
524 struct mutex exp_mutex; /* Serialize expedited GP. */
525 struct mutex exp_wake_mutex; /* Serialize wakeup. */
526 unsigned long expedited_sequence; /* Take a ticket. */
527 atomic_t expedited_need_qs; /* # CPUs left to check in. */
528 struct swait_queue_head expedited_wq; /* Wait for check-ins. */
529 int ncpus_snap; /* # CPUs seen last time. */
531 unsigned long jiffies_force_qs; /* Time at which to invoke */
532 /* force_quiescent_state(). */
533 unsigned long jiffies_kick_kthreads; /* Time at which to kick */
534 /* kthreads, if configured. */
535 unsigned long n_force_qs; /* Number of calls to */
536 /* force_quiescent_state(). */
537 unsigned long n_force_qs_lh; /* ~Number of calls leaving */
538 /* due to lock unavailable. */
539 unsigned long n_force_qs_ngp; /* Number of calls leaving */
540 /* due to no GP active. */
541 unsigned long gp_start; /* Time at which GP started, */
542 /* but in jiffies. */
543 unsigned long gp_activity; /* Time of last GP kthread */
544 /* activity in jiffies. */
545 unsigned long jiffies_stall; /* Time at which to check */
546 /* for CPU stalls. */
547 unsigned long jiffies_resched; /* Time at which to resched */
548 /* a reluctant CPU. */
549 unsigned long n_force_qs_gpstart; /* Snapshot of n_force_qs at */
551 unsigned long gp_max; /* Maximum GP duration in */
553 const char *name; /* Name of structure. */
554 char abbr; /* Abbreviated name. */
555 struct list_head flavors; /* List of RCU flavors. */
558 /* Values for rcu_state structure's gp_flags field. */
559 #define RCU_GP_FLAG_INIT 0x1 /* Need grace-period initialization. */
560 #define RCU_GP_FLAG_FQS 0x2 /* Need grace-period quiescent-state forcing. */
562 /* Values for rcu_state structure's gp_state field. */
563 #define RCU_GP_IDLE 0 /* Initial state and no GP in progress. */
564 #define RCU_GP_WAIT_GPS 1 /* Wait for grace-period start. */
565 #define RCU_GP_DONE_GPS 2 /* Wait done for grace-period start. */
566 #define RCU_GP_WAIT_FQS 3 /* Wait for force-quiescent-state time. */
567 #define RCU_GP_DOING_FQS 4 /* Wait done for force-quiescent-state time. */
568 #define RCU_GP_CLEANUP 5 /* Grace-period cleanup started. */
569 #define RCU_GP_CLEANED 6 /* Grace-period cleanup complete. */
571 #ifndef RCU_TREE_NONCORE
572 static const char * const gp_state_names[] = {
581 #endif /* #ifndef RCU_TREE_NONCORE */
583 extern struct list_head rcu_struct_flavors;
585 /* Sequence through rcu_state structures for each RCU flavor. */
586 #define for_each_rcu_flavor(rsp) \
587 list_for_each_entry((rsp), &rcu_struct_flavors, flavors)
590 * RCU implementation internal declarations:
592 extern struct rcu_state rcu_sched_state;
594 extern struct rcu_state rcu_bh_state;
596 #ifdef CONFIG_PREEMPT_RCU
597 extern struct rcu_state rcu_preempt_state;
598 #endif /* #ifdef CONFIG_PREEMPT_RCU */
600 int rcu_dynticks_snap(struct rcu_dynticks *rdtp);
601 bool rcu_eqs_special_set(int cpu);
603 #ifdef CONFIG_RCU_BOOST
604 DECLARE_PER_CPU(unsigned int, rcu_cpu_kthread_status);
605 DECLARE_PER_CPU(int, rcu_cpu_kthread_cpu);
606 DECLARE_PER_CPU(unsigned int, rcu_cpu_kthread_loops);
607 DECLARE_PER_CPU(char, rcu_cpu_has_work);
608 #endif /* #ifdef CONFIG_RCU_BOOST */
610 #ifndef RCU_TREE_NONCORE
612 /* Forward declarations for rcutree_plugin.h */
613 static void rcu_bootup_announce(void);
614 static void rcu_preempt_note_context_switch(void);
615 static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp);
616 #ifdef CONFIG_HOTPLUG_CPU
617 static bool rcu_preempt_has_tasks(struct rcu_node *rnp);
618 #endif /* #ifdef CONFIG_HOTPLUG_CPU */
619 static void rcu_print_detail_task_stall(struct rcu_state *rsp);
620 static int rcu_print_task_stall(struct rcu_node *rnp);
621 static int rcu_print_task_exp_stall(struct rcu_node *rnp);
622 static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp);
623 static void rcu_preempt_check_callbacks(void);
624 void call_rcu(struct rcu_head *head, rcu_callback_t func);
625 static void __init __rcu_init_preempt(void);
626 static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags);
627 static void rcu_preempt_boost_start_gp(struct rcu_node *rnp);
628 static void invoke_rcu_callbacks_kthread(void);
629 static bool rcu_is_callbacks_kthread(void);
630 #ifdef CONFIG_RCU_BOOST
631 static void rcu_preempt_do_callbacks(void);
632 static int rcu_spawn_one_boost_kthread(struct rcu_state *rsp,
633 struct rcu_node *rnp);
634 #endif /* #ifdef CONFIG_RCU_BOOST */
635 static void __init rcu_spawn_boost_kthreads(void);
636 static void rcu_prepare_kthreads(int cpu);
637 static void rcu_cleanup_after_idle(void);
638 static void rcu_prepare_for_idle(void);
639 static void rcu_idle_count_callbacks_posted(void);
640 static bool rcu_preempt_has_tasks(struct rcu_node *rnp);
641 static void print_cpu_stall_info_begin(void);
642 static void print_cpu_stall_info(struct rcu_state *rsp, int cpu);
643 static void print_cpu_stall_info_end(void);
644 static void zero_cpu_stall_ticks(struct rcu_data *rdp);
645 static void increment_cpu_stall_ticks(void);
646 static bool rcu_nocb_cpu_needs_barrier(struct rcu_state *rsp, int cpu);
647 static void rcu_nocb_gp_set(struct rcu_node *rnp, int nrq);
648 static struct swait_queue_head *rcu_nocb_gp_get(struct rcu_node *rnp);
649 static void rcu_nocb_gp_cleanup(struct swait_queue_head *sq);
650 static void rcu_init_one_nocb(struct rcu_node *rnp);
651 static bool __call_rcu_nocb(struct rcu_data *rdp, struct rcu_head *rhp,
652 bool lazy, unsigned long flags);
653 static bool rcu_nocb_adopt_orphan_cbs(struct rcu_state *rsp,
654 struct rcu_data *rdp,
655 unsigned long flags);
656 static int rcu_nocb_need_deferred_wakeup(struct rcu_data *rdp);
657 static void do_nocb_deferred_wakeup(struct rcu_data *rdp);
658 static void rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp);
659 static void rcu_spawn_all_nocb_kthreads(int cpu);
660 static void __init rcu_spawn_nocb_kthreads(void);
661 #ifdef CONFIG_RCU_NOCB_CPU
662 static void __init rcu_organize_nocb_kthreads(struct rcu_state *rsp);
663 #endif /* #ifdef CONFIG_RCU_NOCB_CPU */
664 static void __maybe_unused rcu_kick_nohz_cpu(int cpu);
665 static bool init_nocb_callback_list(struct rcu_data *rdp);
666 static void rcu_sysidle_enter(int irq);
667 static void rcu_sysidle_exit(int irq);
668 static void rcu_sysidle_check_cpu(struct rcu_data *rdp, bool *isidle,
669 unsigned long *maxj);
670 static bool is_sysidle_rcu_state(struct rcu_state *rsp);
671 static void rcu_sysidle_report_gp(struct rcu_state *rsp, int isidle,
673 static void rcu_bind_gp_kthread(void);
674 static void rcu_sysidle_init_percpu_data(struct rcu_dynticks *rdtp);
675 static bool rcu_nohz_full_cpu(struct rcu_state *rsp);
676 static void rcu_dynticks_task_enter(void);
677 static void rcu_dynticks_task_exit(void);
679 #endif /* #ifndef RCU_TREE_NONCORE */
681 #ifdef CONFIG_RCU_TRACE
682 /* Read out queue lengths for tracing. */
683 static inline void rcu_nocb_q_lengths(struct rcu_data *rdp, long *ql, long *qll)
685 #ifdef CONFIG_RCU_NOCB_CPU
686 *ql = atomic_long_read(&rdp->nocb_q_count);
687 *qll = atomic_long_read(&rdp->nocb_q_count_lazy);
688 #else /* #ifdef CONFIG_RCU_NOCB_CPU */
691 #endif /* #else #ifdef CONFIG_RCU_NOCB_CPU */
693 #endif /* #ifdef CONFIG_RCU_TRACE */
696 * Wrappers for the rcu_node::lock acquire and release.
698 * Because the rcu_nodes form a tree, the tree traversal locking will observe
699 * different lock values, this in turn means that an UNLOCK of one level
700 * followed by a LOCK of another level does not imply a full memory barrier;
701 * and most importantly transitivity is lost.
703 * In order to restore full ordering between tree levels, augment the regular
704 * lock acquire functions with smp_mb__after_unlock_lock().
706 * As ->lock of struct rcu_node is a __private field, therefore one should use
707 * these wrappers rather than directly call raw_spin_{lock,unlock}* on ->lock.
709 static inline void raw_spin_lock_rcu_node(struct rcu_node *rnp)
711 raw_spin_lock(&ACCESS_PRIVATE(rnp, lock));
712 smp_mb__after_unlock_lock();
715 static inline void raw_spin_unlock_rcu_node(struct rcu_node *rnp)
717 raw_spin_unlock(&ACCESS_PRIVATE(rnp, lock));
720 static inline void raw_spin_lock_irq_rcu_node(struct rcu_node *rnp)
722 raw_spin_lock_irq(&ACCESS_PRIVATE(rnp, lock));
723 smp_mb__after_unlock_lock();
726 static inline void raw_spin_unlock_irq_rcu_node(struct rcu_node *rnp)
728 raw_spin_unlock_irq(&ACCESS_PRIVATE(rnp, lock));
731 #define raw_spin_lock_irqsave_rcu_node(rnp, flags) \
733 typecheck(unsigned long, flags); \
734 raw_spin_lock_irqsave(&ACCESS_PRIVATE(rnp, lock), flags); \
735 smp_mb__after_unlock_lock(); \
738 #define raw_spin_unlock_irqrestore_rcu_node(rnp, flags) \
740 typecheck(unsigned long, flags); \
741 raw_spin_unlock_irqrestore(&ACCESS_PRIVATE(rnp, lock), flags); \
744 static inline bool raw_spin_trylock_rcu_node(struct rcu_node *rnp)
746 bool locked = raw_spin_trylock(&ACCESS_PRIVATE(rnp, lock));
749 smp_mb__after_unlock_lock();