Commit | Line | Data |
---|---|---|
9f77da9f PM |
1 | /* |
2 | * Read-Copy Update mechanism for mutual exclusion (tree-based version) | |
3 | * Internal non-public definitions. | |
4 | * | |
5 | * This program is free software; you can redistribute it and/or modify | |
6 | * it under the terms of the GNU General Public License as published by | |
7 | * the Free Software Foundation; either version 2 of the License, or | |
8 | * (at your option) any later version. | |
9 | * | |
10 | * This program is distributed in the hope that it will be useful, | |
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
13 | * GNU General Public License for more details. | |
14 | * | |
15 | * You should have received a copy of the GNU General Public License | |
87de1cfd PM |
16 | * along with this program; if not, you can access it online at |
17 | * http://www.gnu.org/licenses/gpl-2.0.html. | |
9f77da9f PM |
18 | * |
19 | * Copyright IBM Corporation, 2008 | |
20 | * | |
21 | * Author: Ingo Molnar <mingo@elte.hu> | |
22 | * Paul E. McKenney <paulmck@linux.vnet.ibm.com> | |
23 | */ | |
24 | ||
25 | #include <linux/cache.h> | |
26 | #include <linux/spinlock.h> | |
037741a6 | 27 | #include <linux/rtmutex.h> |
9f77da9f PM |
28 | #include <linux/threads.h> |
29 | #include <linux/cpumask.h> | |
30 | #include <linux/seqlock.h> | |
abedf8e2 | 31 | #include <linux/swait.h> |
3a6d7c64 | 32 | #include <linux/stop_machine.h> |
f2425b4e | 33 | #include <linux/rcu_node_tree.h> |
f885b7f2 | 34 | |
45753c5f IM |
35 | #include "rcu_segcblist.h" |
36 | ||
9f77da9f PM |
37 | /* |
38 | * Dynticks per-CPU state. | |
39 | */ | |
40 | struct rcu_dynticks { | |
9b2e4f18 PM |
41 | long long dynticks_nesting; /* Track irq/process nesting level. */ |
42 | /* Process level is worth LLONG_MAX/2. */ | |
43 | int dynticks_nmi_nesting; /* Track NMI nesting level. */ | |
44 | atomic_t dynticks; /* Even value for idle, else odd. */ | |
9226b10d | 45 | bool rcu_need_heavy_qs; /* GP old, need heavy quiescent state. */ |
9577df9a | 46 | unsigned long rcu_qs_ctr; /* Light universal quiescent state ctr. */ |
9226b10d | 47 | bool rcu_urgent_qs; /* GP old need light quiescent state. */ |
2333210b PM |
48 | #ifdef CONFIG_NO_HZ_FULL_SYSIDLE |
49 | long long dynticks_idle_nesting; | |
50 | /* irq/process nesting level from idle. */ | |
51 | atomic_t dynticks_idle; /* Even value for idle, else odd. */ | |
52 | /* "Idle" excludes userspace execution. */ | |
53 | unsigned long dynticks_idle_jiffies; | |
54 | /* End of last non-NMI non-idle period. */ | |
55 | #endif /* #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */ | |
5955f7ee | 56 | #ifdef CONFIG_RCU_FAST_NO_HZ |
c0f4dfd4 | 57 | bool all_lazy; /* Are all CPU's CBs lazy? */ |
5955f7ee PM |
58 | unsigned long nonlazy_posted; |
59 | /* # times non-lazy CBs posted to CPU. */ | |
60 | unsigned long nonlazy_posted_snap; | |
61 | /* idle-period nonlazy_posted snapshot. */ | |
c0f4dfd4 PM |
62 | unsigned long last_accelerate; |
63 | /* Last jiffy CBs were accelerated. */ | |
c229828c PM |
64 | unsigned long last_advance_all; |
65 | /* Last jiffy CBs were all advanced. */ | |
9d2ad243 | 66 | int tick_nohz_enabled_snap; /* Previously seen value from sysfs. */ |
5955f7ee | 67 | #endif /* #ifdef CONFIG_RCU_FAST_NO_HZ */ |
9f77da9f PM |
68 | }; |
69 | ||
d71df90e PM |
70 | /* RCU's kthread states for tracing. */ |
71 | #define RCU_KTHREAD_STOPPED 0 | |
72 | #define RCU_KTHREAD_RUNNING 1 | |
73 | #define RCU_KTHREAD_WAITING 2 | |
15ba0ba8 PM |
74 | #define RCU_KTHREAD_OFFCPU 3 |
75 | #define RCU_KTHREAD_YIELDING 4 | |
76 | #define RCU_KTHREAD_MAX 4 | |
d71df90e | 77 | |
9f77da9f PM |
78 | /* |
79 | * Definition for node within the RCU grace-period-detection hierarchy. | |
80 | */ | |
81 | struct rcu_node { | |
67c583a7 BF |
82 | raw_spinlock_t __private lock; /* Root rcu_node's lock protects */ |
83 | /* some rcu_state fields as well as */ | |
84 | /* following. */ | |
20133cfc | 85 | unsigned long gpnum; /* Current grace period for this node. */ |
86848966 PM |
86 | /* This will either be equal to or one */ |
87 | /* behind the root rcu_node's gpnum. */ | |
20133cfc | 88 | unsigned long completed; /* Last GP completed for this node. */ |
d09b62df PM |
89 | /* This will either be equal to or one */ |
90 | /* behind the root rcu_node's gpnum. */ | |
9f77da9f PM |
91 | unsigned long qsmask; /* CPUs or groups that need to switch in */ |
92 | /* order for current grace period to proceed.*/ | |
1eba8f84 PM |
93 | /* In leaf rcu_node, each bit corresponds to */ |
94 | /* an rcu_data structure, otherwise, each */ | |
95 | /* bit corresponds to a child rcu_node */ | |
96 | /* structure. */ | |
9f77da9f | 97 | unsigned long qsmaskinit; |
b9585e94 | 98 | /* Per-GP initial value for qsmask. */ |
0aa04b05 PM |
99 | /* Initialized from ->qsmaskinitnext at the */ |
100 | /* beginning of each grace period. */ | |
101 | unsigned long qsmaskinitnext; | |
102 | /* Online CPUs for next grace period. */ | |
b9585e94 PM |
103 | unsigned long expmask; /* CPUs or groups that need to check in */ |
104 | /* to allow the current expedited GP */ | |
105 | /* to complete. */ | |
106 | unsigned long expmaskinit; | |
107 | /* Per-GP initial values for expmask. */ | |
108 | /* Initialized from ->expmaskinitnext at the */ | |
109 | /* beginning of each expedited GP. */ | |
110 | unsigned long expmaskinitnext; | |
111 | /* Online CPUs for next expedited GP. */ | |
1de6e56d PM |
112 | /* Any CPU that has ever been online will */ |
113 | /* have its bit set. */ | |
9f77da9f | 114 | unsigned long grpmask; /* Mask to apply to parent qsmask. */ |
1eba8f84 | 115 | /* Only one bit will be set in this mask. */ |
9f77da9f PM |
116 | int grplo; /* lowest-numbered CPU or group here. */ |
117 | int grphi; /* highest-numbered CPU or group here. */ | |
118 | u8 grpnum; /* CPU/group number for next level up. */ | |
119 | u8 level; /* root is at level 0. */ | |
0aa04b05 PM |
120 | bool wait_blkd_tasks;/* Necessary to wait for blocked tasks to */ |
121 | /* exit RCU read-side critical sections */ | |
122 | /* before propagating offline up the */ | |
123 | /* rcu_node tree? */ | |
9f77da9f | 124 | struct rcu_node *parent; |
12f5f524 PM |
125 | struct list_head blkd_tasks; |
126 | /* Tasks blocked in RCU read-side critical */ | |
127 | /* section. Tasks are placed at the head */ | |
128 | /* of this list and age towards the tail. */ | |
129 | struct list_head *gp_tasks; | |
130 | /* Pointer to the first task blocking the */ | |
131 | /* current grace period, or NULL if there */ | |
132 | /* is no such task. */ | |
133 | struct list_head *exp_tasks; | |
134 | /* Pointer to the first task blocking the */ | |
135 | /* current expedited grace period, or NULL */ | |
136 | /* if there is no such task. If there */ | |
137 | /* is no current expedited grace period, */ | |
138 | /* then there can cannot be any such task. */ | |
27f4d280 PM |
139 | struct list_head *boost_tasks; |
140 | /* Pointer to first task that needs to be */ | |
141 | /* priority boosted, or NULL if no priority */ | |
142 | /* boosting is needed for this rcu_node */ | |
143 | /* structure. If there are no tasks */ | |
144 | /* queued on this rcu_node structure that */ | |
145 | /* are blocking the current grace period, */ | |
146 | /* there can be no such task. */ | |
abaa93d9 PM |
147 | struct rt_mutex boost_mtx; |
148 | /* Used only for the priority-boosting */ | |
149 | /* side effect, not as a lock. */ | |
27f4d280 PM |
150 | unsigned long boost_time; |
151 | /* When to start boosting (jiffies). */ | |
152 | struct task_struct *boost_kthread_task; | |
153 | /* kthread that takes care of priority */ | |
154 | /* boosting for this rcu_node structure. */ | |
d71df90e PM |
155 | unsigned int boost_kthread_status; |
156 | /* State of boost_kthread_task for tracing. */ | |
0ea1f2eb PM |
157 | unsigned long n_tasks_boosted; |
158 | /* Total number of tasks boosted. */ | |
159 | unsigned long n_exp_boosts; | |
160 | /* Number of tasks boosted for expedited GP. */ | |
161 | unsigned long n_normal_boosts; | |
162 | /* Number of tasks boosted for normal GP. */ | |
163 | unsigned long n_balk_blkd_tasks; | |
164 | /* Refused to boost: no blocked tasks. */ | |
165 | unsigned long n_balk_exp_gp_tasks; | |
166 | /* Refused to boost: nothing blocking GP. */ | |
167 | unsigned long n_balk_boost_tasks; | |
168 | /* Refused to boost: already boosting. */ | |
169 | unsigned long n_balk_notblocked; | |
170 | /* Refused to boost: RCU RS CS still running. */ | |
171 | unsigned long n_balk_notyet; | |
172 | /* Refused to boost: not yet time. */ | |
173 | unsigned long n_balk_nos; | |
174 | /* Refused to boost: not sure why, though. */ | |
175 | /* This can happen due to race conditions. */ | |
dae6e64d | 176 | #ifdef CONFIG_RCU_NOCB_CPU |
abedf8e2 | 177 | struct swait_queue_head nocb_gp_wq[2]; |
dae6e64d | 178 | /* Place for rcu_nocb_kthread() to wait GP. */ |
dae6e64d | 179 | #endif /* #ifdef CONFIG_RCU_NOCB_CPU */ |
8b425aa8 PM |
180 | int need_future_gp[2]; |
181 | /* Counts of upcoming no-CB GP requests. */ | |
394f2769 | 182 | raw_spinlock_t fqslock ____cacheline_internodealigned_in_smp; |
385b73c0 | 183 | |
f6a12f34 PM |
184 | spinlock_t exp_lock ____cacheline_internodealigned_in_smp; |
185 | unsigned long exp_seq_rq; | |
3b5f668e | 186 | wait_queue_head_t exp_wq[4]; |
9f77da9f PM |
187 | } ____cacheline_internodealigned_in_smp; |
188 | ||
bc75e999 MR |
189 | /* |
190 | * Bitmasks in an rcu_node cover the interval [grplo, grphi] of CPU IDs, and | |
191 | * are indexed relative to this interval rather than the global CPU ID space. | |
192 | * This generates the bit for a CPU in node-local masks. | |
193 | */ | |
194 | #define leaf_node_cpu_bit(rnp, cpu) (1UL << ((cpu) - (rnp)->grplo)) | |
195 | ||
5b74c458 PM |
196 | /* |
197 | * Union to allow "aggregate OR" operation on the need for a quiescent | |
198 | * state by the normal and expedited grace periods. | |
199 | */ | |
200 | union rcu_noqs { | |
201 | struct { | |
202 | u8 norm; | |
203 | u8 exp; | |
204 | } b; /* Bits. */ | |
205 | u16 s; /* Set of bits, aggregate OR here. */ | |
206 | }; | |
207 | ||
9f77da9f PM |
208 | /* Index values for nxttail array in struct rcu_data. */ |
209 | #define RCU_DONE_TAIL 0 /* Also RCU_WAIT head. */ | |
210 | #define RCU_WAIT_TAIL 1 /* Also RCU_NEXT_READY head. */ | |
211 | #define RCU_NEXT_READY_TAIL 2 /* Also RCU_NEXT head. */ | |
212 | #define RCU_NEXT_TAIL 3 | |
213 | #define RCU_NEXT_SIZE 4 | |
214 | ||
215 | /* Per-CPU data for read-copy update. */ | |
216 | struct rcu_data { | |
217 | /* 1) quiescent-state and grace-period handling : */ | |
20133cfc | 218 | unsigned long completed; /* Track rsp->completed gp number */ |
9f77da9f | 219 | /* in order to detect GP end. */ |
20133cfc | 220 | unsigned long gpnum; /* Highest gp number that this CPU */ |
9f77da9f | 221 | /* is aware of having started. */ |
5cd37193 PM |
222 | unsigned long rcu_qs_ctr_snap;/* Snapshot of rcu_qs_ctr to check */ |
223 | /* for rcu_all_qs() invocations. */ | |
5b74c458 | 224 | union rcu_noqs cpu_no_qs; /* No QSes yet for this CPU. */ |
97c668b8 | 225 | bool core_needs_qs; /* Core waits for quiesc state. */ |
9f77da9f | 226 | bool beenonline; /* CPU online at least once. */ |
e3663b10 | 227 | bool gpwrap; /* Possible gpnum/completed wrap. */ |
9f77da9f PM |
228 | struct rcu_node *mynode; /* This CPU's leaf of hierarchy */ |
229 | unsigned long grpmask; /* Mask to apply to leaf qsmask. */ | |
a858af28 PM |
230 | unsigned long ticks_this_gp; /* The number of scheduling-clock */ |
231 | /* ticks this CPU has handled */ | |
232 | /* during and after the last grace */ | |
233 | /* period it is aware of. */ | |
9f77da9f PM |
234 | |
235 | /* 2) batch handling */ | |
15fecf89 PM |
236 | struct rcu_segcblist cblist; /* Segmented callback list, with */ |
237 | /* different callbacks waiting for */ | |
238 | /* different grace periods. */ | |
37c72e56 PM |
239 | long qlen_last_fqs_check; |
240 | /* qlen at last check for QS forcing */ | |
269dcc1c | 241 | unsigned long n_cbs_invoked; /* count of RCU cbs invoked. */ |
c635a4e1 | 242 | unsigned long n_nocbs_invoked; /* count of no-CBs RCU cbs invoked. */ |
29494be7 LJ |
243 | unsigned long n_cbs_orphaned; /* RCU cbs orphaned by dying CPU */ |
244 | unsigned long n_cbs_adopted; /* RCU cbs adopted from dying CPU */ | |
37c72e56 PM |
245 | unsigned long n_force_qs_snap; |
246 | /* did other CPU force QS recently? */ | |
9f77da9f PM |
247 | long blimit; /* Upper limit on a processed batch */ |
248 | ||
9f77da9f PM |
249 | /* 3) dynticks interface. */ |
250 | struct rcu_dynticks *dynticks; /* Shared per-CPU dynticks state. */ | |
251 | int dynticks_snap; /* Per-GP tracking for dynticks. */ | |
9f77da9f PM |
252 | |
253 | /* 4) reasons this CPU needed to be kicked by force_quiescent_state */ | |
9f77da9f | 254 | unsigned long dynticks_fqs; /* Kicked due to dynticks idle. */ |
9f77da9f | 255 | unsigned long offline_fqs; /* Kicked due to being offline. */ |
4a81e832 PM |
256 | unsigned long cond_resched_completed; |
257 | /* Grace period that needs help */ | |
258 | /* from cond_resched(). */ | |
9f77da9f PM |
259 | |
260 | /* 5) __rcu_pending() statistics. */ | |
20133cfc | 261 | unsigned long n_rcu_pending; /* rcu_pending() calls since boot. */ |
97c668b8 | 262 | unsigned long n_rp_core_needs_qs; |
d21670ac | 263 | unsigned long n_rp_report_qs; |
20133cfc PM |
264 | unsigned long n_rp_cb_ready; |
265 | unsigned long n_rp_cpu_needs_gp; | |
266 | unsigned long n_rp_gp_completed; | |
267 | unsigned long n_rp_gp_started; | |
96d3fd0d | 268 | unsigned long n_rp_nocb_defer_wakeup; |
20133cfc | 269 | unsigned long n_rp_need_nothing; |
9f77da9f | 270 | |
2cd6ffaf | 271 | /* 6) _rcu_barrier(), OOM callbacks, and expediting. */ |
06668efa | 272 | struct rcu_head barrier_head; |
b626c1b6 PM |
273 | #ifdef CONFIG_RCU_FAST_NO_HZ |
274 | struct rcu_head oom_head; | |
275 | #endif /* #ifdef CONFIG_RCU_FAST_NO_HZ */ | |
8b355e3b | 276 | atomic_long_t exp_workdone0; /* # done by workqueue. */ |
d40a4f09 PM |
277 | atomic_long_t exp_workdone1; /* # done by others #1. */ |
278 | atomic_long_t exp_workdone2; /* # done by others #2. */ | |
279 | atomic_long_t exp_workdone3; /* # done by others #3. */ | |
0742ac3e | 280 | int exp_dynticks_snap; /* Double-check need for IPI. */ |
06668efa | 281 | |
3fbfbf7a PM |
282 | /* 7) Callback offloading. */ |
283 | #ifdef CONFIG_RCU_NOCB_CPU | |
284 | struct rcu_head *nocb_head; /* CBs waiting for kthread. */ | |
285 | struct rcu_head **nocb_tail; | |
41050a00 PM |
286 | atomic_long_t nocb_q_count; /* # CBs waiting for nocb */ |
287 | atomic_long_t nocb_q_count_lazy; /* invocation (all stages). */ | |
fbce7497 PM |
288 | struct rcu_head *nocb_follower_head; /* CBs ready to invoke. */ |
289 | struct rcu_head **nocb_follower_tail; | |
abedf8e2 | 290 | struct swait_queue_head nocb_wq; /* For nocb kthreads to sleep on. */ |
3fbfbf7a | 291 | struct task_struct *nocb_kthread; |
9fdd3bc9 | 292 | int nocb_defer_wakeup; /* Defer wakeup of nocb_kthread. */ |
fbce7497 PM |
293 | |
294 | /* The following fields are used by the leader, hence own cacheline. */ | |
295 | struct rcu_head *nocb_gp_head ____cacheline_internodealigned_in_smp; | |
296 | /* CBs waiting for GP. */ | |
297 | struct rcu_head **nocb_gp_tail; | |
11ed7f93 | 298 | bool nocb_leader_sleep; /* Is the nocb leader thread asleep? */ |
fbce7497 PM |
299 | struct rcu_data *nocb_next_follower; |
300 | /* Next follower in wakeup chain. */ | |
301 | ||
302 | /* The following fields are used by the follower, hence new cachline. */ | |
303 | struct rcu_data *nocb_leader ____cacheline_internodealigned_in_smp; | |
304 | /* Leader CPU takes GP-end wakeups. */ | |
3fbfbf7a PM |
305 | #endif /* #ifdef CONFIG_RCU_NOCB_CPU */ |
306 | ||
6231069b | 307 | /* 8) RCU CPU stall data. */ |
6231069b | 308 | unsigned int softirq_snap; /* Snapshot of softirq activity. */ |
6231069b | 309 | |
9f77da9f | 310 | int cpu; |
d4c08f2a | 311 | struct rcu_state *rsp; |
9f77da9f PM |
312 | }; |
313 | ||
9fdd3bc9 PM |
314 | /* Values for nocb_defer_wakeup field in struct rcu_data. */ |
315 | #define RCU_NOGP_WAKE_NOT 0 | |
316 | #define RCU_NOGP_WAKE 1 | |
317 | #define RCU_NOGP_WAKE_FORCE 2 | |
318 | ||
026ad283 PM |
319 | #define RCU_JIFFIES_TILL_FORCE_QS (1 + (HZ > 250) + (HZ > 500)) |
320 | /* For jiffies_till_first_fqs and */ | |
321 | /* and jiffies_till_next_fqs. */ | |
007b0924 | 322 | |
026ad283 PM |
323 | #define RCU_JIFFIES_FQS_DIV 256 /* Very large systems need more */ |
324 | /* delay between bouts of */ | |
325 | /* quiescent-state forcing. */ | |
326 | ||
327 | #define RCU_STALL_RAT_DELAY 2 /* Allow other CPUs time to take */ | |
328 | /* at least one scheduling clock */ | |
329 | /* irq before ratting on them. */ | |
9f77da9f | 330 | |
08bca60a PZ |
331 | #define rcu_wait(cond) \ |
332 | do { \ | |
333 | for (;;) { \ | |
334 | set_current_state(TASK_INTERRUPTIBLE); \ | |
335 | if (cond) \ | |
336 | break; \ | |
337 | schedule(); \ | |
338 | } \ | |
339 | __set_current_state(TASK_RUNNING); \ | |
340 | } while (0) | |
9f77da9f PM |
341 | |
342 | /* | |
343 | * RCU global state, including node hierarchy. This hierarchy is | |
344 | * represented in "heap" form in a dense array. The root (first level) | |
345 | * of the hierarchy is in ->node[0] (referenced by ->level[0]), the second | |
346 | * level in ->node[1] through ->node[m] (->node[1] referenced by ->level[1]), | |
347 | * and the third level in ->node[m+1] and following (->node[m+1] referenced | |
348 | * by ->level[2]). The number of levels is determined by the number of | |
349 | * CPUs and by CONFIG_RCU_FANOUT. Small systems will have a "hierarchy" | |
350 | * consisting of a single rcu_node. | |
351 | */ | |
352 | struct rcu_state { | |
353 | struct rcu_node node[NUM_RCU_NODES]; /* Hierarchy. */ | |
032dfc87 AG |
354 | struct rcu_node *level[RCU_NUM_LVLS + 1]; |
355 | /* Hierarchy levels (+1 to */ | |
356 | /* shut bogus gcc warning) */ | |
394f99a9 | 357 | struct rcu_data __percpu *rda; /* pointer of percu rcu_data. */ |
db3e8db4 | 358 | call_rcu_func_t call; /* call_rcu() flavor. */ |
b9585e94 | 359 | int ncpus; /* # CPUs seen so far. */ |
9f77da9f PM |
360 | |
361 | /* The following fields are guarded by the root rcu_node's lock. */ | |
362 | ||
77f81fe0 PM |
363 | u8 boost ____cacheline_internodealigned_in_smp; |
364 | /* Subject to priority boost. */ | |
20133cfc PM |
365 | unsigned long gpnum; /* Current gp number. */ |
366 | unsigned long completed; /* # of last completed gp. */ | |
b3dbec76 | 367 | struct task_struct *gp_kthread; /* Task for grace periods. */ |
abedf8e2 | 368 | struct swait_queue_head gp_wq; /* Where GP task waits. */ |
afea227f PM |
369 | short gp_flags; /* Commands for GP task. */ |
370 | short gp_state; /* GP kthread sleep state. */ | |
1eba8f84 | 371 | |
d9a3da06 | 372 | /* End of fields guarded by root rcu_node's lock. */ |
1eba8f84 | 373 | |
7b2e6011 PM |
374 | raw_spinlock_t orphan_lock ____cacheline_internodealigned_in_smp; |
375 | /* Protect following fields. */ | |
15fecf89 | 376 | struct rcu_cblist orphan_pend; /* Orphaned callbacks that */ |
b1420f1c | 377 | /* need a grace period. */ |
15fecf89 | 378 | struct rcu_cblist orphan_done; /* Orphaned callbacks that */ |
b1420f1c | 379 | /* are ready to invoke. */ |
15fecf89 | 380 | /* (Contains counts.) */ |
7b2e6011 | 381 | /* End of fields guarded by orphan_lock. */ |
a4fbe35a | 382 | |
7be7f0be | 383 | struct mutex barrier_mutex; /* Guards barrier fields. */ |
24ebbca8 | 384 | atomic_t barrier_cpu_count; /* # CPUs waiting on. */ |
7db74df8 | 385 | struct completion barrier_completion; /* Wake at barrier end. */ |
4f525a52 | 386 | unsigned long barrier_sequence; /* ++ at start and end of */ |
cf3a9c48 | 387 | /* _rcu_barrier(). */ |
a4fbe35a PM |
388 | /* End of fields guarded by barrier_mutex. */ |
389 | ||
f6a12f34 | 390 | struct mutex exp_mutex; /* Serialize expedited GP. */ |
3b5f668e | 391 | struct mutex exp_wake_mutex; /* Serialize wakeup. */ |
d6ada2cf | 392 | unsigned long expedited_sequence; /* Take a ticket. */ |
3a6d7c64 | 393 | atomic_t expedited_need_qs; /* # CPUs left to check in. */ |
abedf8e2 | 394 | struct swait_queue_head expedited_wq; /* Wait for check-ins. */ |
b9585e94 | 395 | int ncpus_snap; /* # CPUs seen last time. */ |
40694d66 | 396 | |
9f77da9f PM |
397 | unsigned long jiffies_force_qs; /* Time at which to invoke */ |
398 | /* force_quiescent_state(). */ | |
8c7c4829 PM |
399 | unsigned long jiffies_kick_kthreads; /* Time at which to kick */ |
400 | /* kthreads, if configured. */ | |
9f77da9f PM |
401 | unsigned long n_force_qs; /* Number of calls to */ |
402 | /* force_quiescent_state(). */ | |
403 | unsigned long n_force_qs_lh; /* ~Number of calls leaving */ | |
404 | /* due to lock unavailable. */ | |
405 | unsigned long n_force_qs_ngp; /* Number of calls leaving */ | |
406 | /* due to no GP active. */ | |
9f77da9f PM |
407 | unsigned long gp_start; /* Time at which GP started, */ |
408 | /* but in jiffies. */ | |
6ccd2ecd PM |
409 | unsigned long gp_activity; /* Time of last GP kthread */ |
410 | /* activity in jiffies. */ | |
9f77da9f PM |
411 | unsigned long jiffies_stall; /* Time at which to check */ |
412 | /* for CPU stalls. */ | |
6193c76a PM |
413 | unsigned long jiffies_resched; /* Time at which to resched */ |
414 | /* a reluctant CPU. */ | |
fc908ed3 PM |
415 | unsigned long n_force_qs_gpstart; /* Snapshot of n_force_qs at */ |
416 | /* GP start. */ | |
15ba0ba8 PM |
417 | unsigned long gp_max; /* Maximum GP duration in */ |
418 | /* jiffies. */ | |
e66c33d5 | 419 | const char *name; /* Name of structure. */ |
a4889858 | 420 | char abbr; /* Abbreviated name. */ |
6ce75a23 | 421 | struct list_head flavors; /* List of RCU flavors. */ |
9f77da9f PM |
422 | }; |
423 | ||
4cdfc175 PM |
424 | /* Values for rcu_state structure's gp_flags field. */ |
425 | #define RCU_GP_FLAG_INIT 0x1 /* Need grace-period initialization. */ | |
426 | #define RCU_GP_FLAG_FQS 0x2 /* Need grace-period quiescent-state forcing. */ | |
427 | ||
c34d2f41 | 428 | /* Values for rcu_state structure's gp_state field. */ |
77f81fe0 | 429 | #define RCU_GP_IDLE 0 /* Initial state and no GP in progress. */ |
afea227f | 430 | #define RCU_GP_WAIT_GPS 1 /* Wait for grace-period start. */ |
319362c9 PM |
431 | #define RCU_GP_DONE_GPS 2 /* Wait done for grace-period start. */ |
432 | #define RCU_GP_WAIT_FQS 3 /* Wait for force-quiescent-state time. */ | |
32bb1c79 | 433 | #define RCU_GP_DOING_FQS 4 /* Wait done for force-quiescent-state time. */ |
319362c9 PM |
434 | #define RCU_GP_CLEANUP 5 /* Grace-period cleanup started. */ |
435 | #define RCU_GP_CLEANED 6 /* Grace-period cleanup complete. */ | |
afea227f | 436 | |
6b50e119 PM |
437 | #ifndef RCU_TREE_NONCORE |
438 | static const char * const gp_state_names[] = { | |
439 | "RCU_GP_IDLE", | |
440 | "RCU_GP_WAIT_GPS", | |
441 | "RCU_GP_DONE_GPS", | |
442 | "RCU_GP_WAIT_FQS", | |
443 | "RCU_GP_DOING_FQS", | |
444 | "RCU_GP_CLEANUP", | |
445 | "RCU_GP_CLEANED", | |
446 | }; | |
447 | #endif /* #ifndef RCU_TREE_NONCORE */ | |
448 | ||
6ce75a23 | 449 | extern struct list_head rcu_struct_flavors; |
3fbfbf7a PM |
450 | |
451 | /* Sequence through rcu_state structures for each RCU flavor. */ | |
6ce75a23 PM |
452 | #define for_each_rcu_flavor(rsp) \ |
453 | list_for_each_entry((rsp), &rcu_struct_flavors, flavors) | |
454 | ||
6258c4fb IM |
455 | /* |
456 | * RCU implementation internal declarations: | |
457 | */ | |
d6714c22 | 458 | extern struct rcu_state rcu_sched_state; |
6258c4fb IM |
459 | |
460 | extern struct rcu_state rcu_bh_state; | |
6258c4fb | 461 | |
28f6569a | 462 | #ifdef CONFIG_PREEMPT_RCU |
f41d911f | 463 | extern struct rcu_state rcu_preempt_state; |
28f6569a | 464 | #endif /* #ifdef CONFIG_PREEMPT_RCU */ |
f41d911f | 465 | |
02a5c550 | 466 | int rcu_dynticks_snap(struct rcu_dynticks *rdtp); |
b8c17e66 | 467 | bool rcu_eqs_special_set(int cpu); |
02a5c550 | 468 | |
eab0993c PM |
469 | #ifdef CONFIG_RCU_BOOST |
470 | DECLARE_PER_CPU(unsigned int, rcu_cpu_kthread_status); | |
471 | DECLARE_PER_CPU(int, rcu_cpu_kthread_cpu); | |
472 | DECLARE_PER_CPU(unsigned int, rcu_cpu_kthread_loops); | |
473 | DECLARE_PER_CPU(char, rcu_cpu_has_work); | |
474 | #endif /* #ifdef CONFIG_RCU_BOOST */ | |
475 | ||
017c4261 | 476 | #ifndef RCU_TREE_NONCORE |
9f77da9f | 477 | |
9b2619af | 478 | /* Forward declarations for rcutree_plugin.h */ |
dbe01350 | 479 | static void rcu_bootup_announce(void); |
5b72f964 | 480 | static void rcu_preempt_note_context_switch(bool preempt); |
27f4d280 | 481 | static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp); |
b668c9cf | 482 | #ifdef CONFIG_HOTPLUG_CPU |
8af3a5e7 | 483 | static bool rcu_preempt_has_tasks(struct rcu_node *rnp); |
b668c9cf | 484 | #endif /* #ifdef CONFIG_HOTPLUG_CPU */ |
1ed509a2 | 485 | static void rcu_print_detail_task_stall(struct rcu_state *rsp); |
9bc8b558 | 486 | static int rcu_print_task_stall(struct rcu_node *rnp); |
74611ecb | 487 | static int rcu_print_task_exp_stall(struct rcu_node *rnp); |
9b2619af | 488 | static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp); |
86aea0e6 | 489 | static void rcu_preempt_check_callbacks(void); |
b6a4ae76 | 490 | void call_rcu(struct rcu_head *head, rcu_callback_t func); |
9b2619af | 491 | static void __init __rcu_init_preempt(void); |
1217ed1b | 492 | static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags); |
a46e0899 PM |
493 | static void rcu_preempt_boost_start_gp(struct rcu_node *rnp); |
494 | static void invoke_rcu_callbacks_kthread(void); | |
dff1672d | 495 | static bool rcu_is_callbacks_kthread(void); |
a46e0899 PM |
496 | #ifdef CONFIG_RCU_BOOST |
497 | static void rcu_preempt_do_callbacks(void); | |
49fb4c62 | 498 | static int rcu_spawn_one_boost_kthread(struct rcu_state *rsp, |
5d01bbd1 | 499 | struct rcu_node *rnp); |
a46e0899 | 500 | #endif /* #ifdef CONFIG_RCU_BOOST */ |
9386c0b7 | 501 | static void __init rcu_spawn_boost_kthreads(void); |
49fb4c62 | 502 | static void rcu_prepare_kthreads(int cpu); |
8fa7845d | 503 | static void rcu_cleanup_after_idle(void); |
198bbf81 | 504 | static void rcu_prepare_for_idle(void); |
c57afe80 | 505 | static void rcu_idle_count_callbacks_posted(void); |
0aa04b05 | 506 | static bool rcu_preempt_has_tasks(struct rcu_node *rnp); |
a858af28 PM |
507 | static void print_cpu_stall_info_begin(void); |
508 | static void print_cpu_stall_info(struct rcu_state *rsp, int cpu); | |
509 | static void print_cpu_stall_info_end(void); | |
510 | static void zero_cpu_stall_ticks(struct rcu_data *rdp); | |
511 | static void increment_cpu_stall_ticks(void); | |
d7e29933 | 512 | static bool rcu_nocb_cpu_needs_barrier(struct rcu_state *rsp, int cpu); |
dae6e64d | 513 | static void rcu_nocb_gp_set(struct rcu_node *rnp, int nrq); |
abedf8e2 PG |
514 | static struct swait_queue_head *rcu_nocb_gp_get(struct rcu_node *rnp); |
515 | static void rcu_nocb_gp_cleanup(struct swait_queue_head *sq); | |
dae6e64d | 516 | static void rcu_init_one_nocb(struct rcu_node *rnp); |
3fbfbf7a | 517 | static bool __call_rcu_nocb(struct rcu_data *rdp, struct rcu_head *rhp, |
96d3fd0d | 518 | bool lazy, unsigned long flags); |
3fbfbf7a | 519 | static bool rcu_nocb_adopt_orphan_cbs(struct rcu_state *rsp, |
96d3fd0d PM |
520 | struct rcu_data *rdp, |
521 | unsigned long flags); | |
9fdd3bc9 | 522 | static int rcu_nocb_need_deferred_wakeup(struct rcu_data *rdp); |
96d3fd0d | 523 | static void do_nocb_deferred_wakeup(struct rcu_data *rdp); |
3fbfbf7a | 524 | static void rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp); |
35ce7f29 PM |
525 | static void rcu_spawn_all_nocb_kthreads(int cpu); |
526 | static void __init rcu_spawn_nocb_kthreads(void); | |
527 | #ifdef CONFIG_RCU_NOCB_CPU | |
528 | static void __init rcu_organize_nocb_kthreads(struct rcu_state *rsp); | |
529 | #endif /* #ifdef CONFIG_RCU_NOCB_CPU */ | |
4a81e832 | 530 | static void __maybe_unused rcu_kick_nohz_cpu(int cpu); |
34ed6246 | 531 | static bool init_nocb_callback_list(struct rcu_data *rdp); |
28ced795 CL |
532 | static void rcu_sysidle_enter(int irq); |
533 | static void rcu_sysidle_exit(int irq); | |
0edd1b17 PM |
534 | static void rcu_sysidle_check_cpu(struct rcu_data *rdp, bool *isidle, |
535 | unsigned long *maxj); | |
536 | static bool is_sysidle_rcu_state(struct rcu_state *rsp); | |
537 | static void rcu_sysidle_report_gp(struct rcu_state *rsp, int isidle, | |
538 | unsigned long maxj); | |
eb75767b | 539 | static void rcu_bind_gp_kthread(void); |
2333210b | 540 | static void rcu_sysidle_init_percpu_data(struct rcu_dynticks *rdtp); |
a096932f | 541 | static bool rcu_nohz_full_cpu(struct rcu_state *rsp); |
176f8f7a PM |
542 | static void rcu_dynticks_task_enter(void); |
543 | static void rcu_dynticks_task_exit(void); | |
9b2619af | 544 | |
da915ad5 PM |
545 | #ifdef CONFIG_SRCU |
546 | void srcu_online_cpu(unsigned int cpu); | |
547 | void srcu_offline_cpu(unsigned int cpu); | |
548 | #else /* #ifdef CONFIG_SRCU */ | |
549 | void srcu_online_cpu(unsigned int cpu) { } | |
550 | void srcu_offline_cpu(unsigned int cpu) { } | |
551 | #endif /* #else #ifdef CONFIG_SRCU */ | |
552 | ||
017c4261 | 553 | #endif /* #ifndef RCU_TREE_NONCORE */ |
3fbfbf7a PM |
554 | |
555 | #ifdef CONFIG_RCU_TRACE | |
41050a00 | 556 | /* Read out queue lengths for tracing. */ |
3fbfbf7a PM |
557 | static inline void rcu_nocb_q_lengths(struct rcu_data *rdp, long *ql, long *qll) |
558 | { | |
41050a00 PM |
559 | #ifdef CONFIG_RCU_NOCB_CPU |
560 | *ql = atomic_long_read(&rdp->nocb_q_count); | |
561 | *qll = atomic_long_read(&rdp->nocb_q_count_lazy); | |
3fbfbf7a | 562 | #else /* #ifdef CONFIG_RCU_NOCB_CPU */ |
3fbfbf7a PM |
563 | *ql = 0; |
564 | *qll = 0; | |
3fbfbf7a | 565 | #endif /* #else #ifdef CONFIG_RCU_NOCB_CPU */ |
41050a00 | 566 | } |
3fbfbf7a | 567 | #endif /* #ifdef CONFIG_RCU_TRACE */ |
12d560f4 | 568 | |
2a67e741 | 569 | /* |
67c583a7 | 570 | * Wrappers for the rcu_node::lock acquire and release. |
2a67e741 PZ |
571 | * |
572 | * Because the rcu_nodes form a tree, the tree traversal locking will observe | |
573 | * different lock values, this in turn means that an UNLOCK of one level | |
574 | * followed by a LOCK of another level does not imply a full memory barrier; | |
575 | * and most importantly transitivity is lost. | |
576 | * | |
577 | * In order to restore full ordering between tree levels, augment the regular | |
578 | * lock acquire functions with smp_mb__after_unlock_lock(). | |
67c583a7 BF |
579 | * |
580 | * As ->lock of struct rcu_node is a __private field, therefore one should use | |
581 | * these wrappers rather than directly call raw_spin_{lock,unlock}* on ->lock. | |
2a67e741 PZ |
582 | */ |
583 | static inline void raw_spin_lock_rcu_node(struct rcu_node *rnp) | |
584 | { | |
67c583a7 | 585 | raw_spin_lock(&ACCESS_PRIVATE(rnp, lock)); |
2a67e741 PZ |
586 | smp_mb__after_unlock_lock(); |
587 | } | |
588 | ||
67c583a7 BF |
589 | static inline void raw_spin_unlock_rcu_node(struct rcu_node *rnp) |
590 | { | |
591 | raw_spin_unlock(&ACCESS_PRIVATE(rnp, lock)); | |
592 | } | |
593 | ||
2a67e741 PZ |
594 | static inline void raw_spin_lock_irq_rcu_node(struct rcu_node *rnp) |
595 | { | |
67c583a7 | 596 | raw_spin_lock_irq(&ACCESS_PRIVATE(rnp, lock)); |
2a67e741 PZ |
597 | smp_mb__after_unlock_lock(); |
598 | } | |
599 | ||
67c583a7 BF |
600 | static inline void raw_spin_unlock_irq_rcu_node(struct rcu_node *rnp) |
601 | { | |
602 | raw_spin_unlock_irq(&ACCESS_PRIVATE(rnp, lock)); | |
603 | } | |
604 | ||
605 | #define raw_spin_lock_irqsave_rcu_node(rnp, flags) \ | |
606 | do { \ | |
607 | typecheck(unsigned long, flags); \ | |
608 | raw_spin_lock_irqsave(&ACCESS_PRIVATE(rnp, lock), flags); \ | |
609 | smp_mb__after_unlock_lock(); \ | |
610 | } while (0) | |
611 | ||
612 | #define raw_spin_unlock_irqrestore_rcu_node(rnp, flags) \ | |
613 | do { \ | |
614 | typecheck(unsigned long, flags); \ | |
615 | raw_spin_unlock_irqrestore(&ACCESS_PRIVATE(rnp, lock), flags); \ | |
2a67e741 PZ |
616 | } while (0) |
617 | ||
618 | static inline bool raw_spin_trylock_rcu_node(struct rcu_node *rnp) | |
619 | { | |
67c583a7 | 620 | bool locked = raw_spin_trylock(&ACCESS_PRIVATE(rnp, lock)); |
2a67e741 PZ |
621 | |
622 | if (locked) | |
623 | smp_mb__after_unlock_lock(); | |
624 | return locked; | |
625 | } |