rcu: Invert passed_quiesce and rename to cpu_no_qs
[linux-2.6-block.git] / kernel / rcu / tree.h
... / ...
CommitLineData
1/*
2 * Read-Copy Update mechanism for mutual exclusion (tree-based version)
3 * Internal non-public definitions.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, you can access it online at
17 * http://www.gnu.org/licenses/gpl-2.0.html.
18 *
19 * Copyright IBM Corporation, 2008
20 *
21 * Author: Ingo Molnar <mingo@elte.hu>
22 * Paul E. McKenney <paulmck@linux.vnet.ibm.com>
23 */
24
25#include <linux/cache.h>
26#include <linux/spinlock.h>
27#include <linux/threads.h>
28#include <linux/cpumask.h>
29#include <linux/seqlock.h>
30#include <linux/stop_machine.h>
31
32/*
33 * Define shape of hierarchy based on NR_CPUS, CONFIG_RCU_FANOUT, and
34 * CONFIG_RCU_FANOUT_LEAF.
35 * In theory, it should be possible to add more levels straightforwardly.
36 * In practice, this did work well going from three levels to four.
37 * Of course, your mileage may vary.
38 */
39
40#ifdef CONFIG_RCU_FANOUT
41#define RCU_FANOUT CONFIG_RCU_FANOUT
42#else /* #ifdef CONFIG_RCU_FANOUT */
43# ifdef CONFIG_64BIT
44# define RCU_FANOUT 64
45# else
46# define RCU_FANOUT 32
47# endif
48#endif /* #else #ifdef CONFIG_RCU_FANOUT */
49
50#ifdef CONFIG_RCU_FANOUT_LEAF
51#define RCU_FANOUT_LEAF CONFIG_RCU_FANOUT_LEAF
52#else /* #ifdef CONFIG_RCU_FANOUT_LEAF */
53# ifdef CONFIG_64BIT
54# define RCU_FANOUT_LEAF 64
55# else
56# define RCU_FANOUT_LEAF 32
57# endif
58#endif /* #else #ifdef CONFIG_RCU_FANOUT_LEAF */
59
60#define RCU_FANOUT_1 (RCU_FANOUT_LEAF)
61#define RCU_FANOUT_2 (RCU_FANOUT_1 * RCU_FANOUT)
62#define RCU_FANOUT_3 (RCU_FANOUT_2 * RCU_FANOUT)
63#define RCU_FANOUT_4 (RCU_FANOUT_3 * RCU_FANOUT)
64
65#if NR_CPUS <= RCU_FANOUT_1
66# define RCU_NUM_LVLS 1
67# define NUM_RCU_LVL_0 1
68# define NUM_RCU_NODES NUM_RCU_LVL_0
69# define NUM_RCU_LVL_INIT { NUM_RCU_LVL_0 }
70# define RCU_NODE_NAME_INIT { "rcu_node_0" }
71# define RCU_FQS_NAME_INIT { "rcu_node_fqs_0" }
72# define RCU_EXP_NAME_INIT { "rcu_node_exp_0" }
73# define RCU_EXP_SCHED_NAME_INIT \
74 { "rcu_node_exp_sched_0" }
75#elif NR_CPUS <= RCU_FANOUT_2
76# define RCU_NUM_LVLS 2
77# define NUM_RCU_LVL_0 1
78# define NUM_RCU_LVL_1 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_1)
79# define NUM_RCU_NODES (NUM_RCU_LVL_0 + NUM_RCU_LVL_1)
80# define NUM_RCU_LVL_INIT { NUM_RCU_LVL_0, NUM_RCU_LVL_1 }
81# define RCU_NODE_NAME_INIT { "rcu_node_0", "rcu_node_1" }
82# define RCU_FQS_NAME_INIT { "rcu_node_fqs_0", "rcu_node_fqs_1" }
83# define RCU_EXP_NAME_INIT { "rcu_node_exp_0", "rcu_node_exp_1" }
84# define RCU_EXP_SCHED_NAME_INIT \
85 { "rcu_node_exp_sched_0", "rcu_node_exp_sched_1" }
86#elif NR_CPUS <= RCU_FANOUT_3
87# define RCU_NUM_LVLS 3
88# define NUM_RCU_LVL_0 1
89# define NUM_RCU_LVL_1 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_2)
90# define NUM_RCU_LVL_2 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_1)
91# define NUM_RCU_NODES (NUM_RCU_LVL_0 + NUM_RCU_LVL_1 + NUM_RCU_LVL_2)
92# define NUM_RCU_LVL_INIT { NUM_RCU_LVL_0, NUM_RCU_LVL_1, NUM_RCU_LVL_2 }
93# define RCU_NODE_NAME_INIT { "rcu_node_0", "rcu_node_1", "rcu_node_2" }
94# define RCU_FQS_NAME_INIT { "rcu_node_fqs_0", "rcu_node_fqs_1", "rcu_node_fqs_2" }
95# define RCU_EXP_NAME_INIT { "rcu_node_exp_0", "rcu_node_exp_1", "rcu_node_exp_2" }
96# define RCU_EXP_SCHED_NAME_INIT \
97 { "rcu_node_exp_sched_0", "rcu_node_exp_sched_1", "rcu_node_exp_sched_2" }
98#elif NR_CPUS <= RCU_FANOUT_4
99# define RCU_NUM_LVLS 4
100# define NUM_RCU_LVL_0 1
101# define NUM_RCU_LVL_1 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_3)
102# define NUM_RCU_LVL_2 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_2)
103# define NUM_RCU_LVL_3 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_1)
104# define NUM_RCU_NODES (NUM_RCU_LVL_0 + NUM_RCU_LVL_1 + NUM_RCU_LVL_2 + NUM_RCU_LVL_3)
105# define NUM_RCU_LVL_INIT { NUM_RCU_LVL_0, NUM_RCU_LVL_1, NUM_RCU_LVL_2, NUM_RCU_LVL_3 }
106# define RCU_NODE_NAME_INIT { "rcu_node_0", "rcu_node_1", "rcu_node_2", "rcu_node_3" }
107# define RCU_FQS_NAME_INIT { "rcu_node_fqs_0", "rcu_node_fqs_1", "rcu_node_fqs_2", "rcu_node_fqs_3" }
108# define RCU_EXP_NAME_INIT { "rcu_node_exp_0", "rcu_node_exp_1", "rcu_node_exp_2", "rcu_node_exp_3" }
109# define RCU_EXP_SCHED_NAME_INIT \
110 { "rcu_node_exp_sched_0", "rcu_node_exp_sched_1", "rcu_node_exp_sched_2", "rcu_node_exp_sched_3" }
111#else
112# error "CONFIG_RCU_FANOUT insufficient for NR_CPUS"
113#endif /* #if (NR_CPUS) <= RCU_FANOUT_1 */
114
115extern int rcu_num_lvls;
116extern int rcu_num_nodes;
117
118/*
119 * Dynticks per-CPU state.
120 */
121struct rcu_dynticks {
122 long long dynticks_nesting; /* Track irq/process nesting level. */
123 /* Process level is worth LLONG_MAX/2. */
124 int dynticks_nmi_nesting; /* Track NMI nesting level. */
125 atomic_t dynticks; /* Even value for idle, else odd. */
126#ifdef CONFIG_NO_HZ_FULL_SYSIDLE
127 long long dynticks_idle_nesting;
128 /* irq/process nesting level from idle. */
129 atomic_t dynticks_idle; /* Even value for idle, else odd. */
130 /* "Idle" excludes userspace execution. */
131 unsigned long dynticks_idle_jiffies;
132 /* End of last non-NMI non-idle period. */
133#endif /* #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */
134#ifdef CONFIG_RCU_FAST_NO_HZ
135 bool all_lazy; /* Are all CPU's CBs lazy? */
136 unsigned long nonlazy_posted;
137 /* # times non-lazy CBs posted to CPU. */
138 unsigned long nonlazy_posted_snap;
139 /* idle-period nonlazy_posted snapshot. */
140 unsigned long last_accelerate;
141 /* Last jiffy CBs were accelerated. */
142 unsigned long last_advance_all;
143 /* Last jiffy CBs were all advanced. */
144 int tick_nohz_enabled_snap; /* Previously seen value from sysfs. */
145#endif /* #ifdef CONFIG_RCU_FAST_NO_HZ */
146};
147
148/* RCU's kthread states for tracing. */
149#define RCU_KTHREAD_STOPPED 0
150#define RCU_KTHREAD_RUNNING 1
151#define RCU_KTHREAD_WAITING 2
152#define RCU_KTHREAD_OFFCPU 3
153#define RCU_KTHREAD_YIELDING 4
154#define RCU_KTHREAD_MAX 4
155
156/*
157 * Definition for node within the RCU grace-period-detection hierarchy.
158 */
159struct rcu_node {
160 raw_spinlock_t lock; /* Root rcu_node's lock protects some */
161 /* rcu_state fields as well as following. */
162 unsigned long gpnum; /* Current grace period for this node. */
163 /* This will either be equal to or one */
164 /* behind the root rcu_node's gpnum. */
165 unsigned long completed; /* Last GP completed for this node. */
166 /* This will either be equal to or one */
167 /* behind the root rcu_node's gpnum. */
168 unsigned long qsmask; /* CPUs or groups that need to switch in */
169 /* order for current grace period to proceed.*/
170 /* In leaf rcu_node, each bit corresponds to */
171 /* an rcu_data structure, otherwise, each */
172 /* bit corresponds to a child rcu_node */
173 /* structure. */
174 unsigned long qsmaskinit;
175 /* Per-GP initial value for qsmask. */
176 /* Initialized from ->qsmaskinitnext at the */
177 /* beginning of each grace period. */
178 unsigned long qsmaskinitnext;
179 /* Online CPUs for next grace period. */
180 unsigned long expmask; /* CPUs or groups that need to check in */
181 /* to allow the current expedited GP */
182 /* to complete. */
183 unsigned long expmaskinit;
184 /* Per-GP initial values for expmask. */
185 /* Initialized from ->expmaskinitnext at the */
186 /* beginning of each expedited GP. */
187 unsigned long expmaskinitnext;
188 /* Online CPUs for next expedited GP. */
189 unsigned long grpmask; /* Mask to apply to parent qsmask. */
190 /* Only one bit will be set in this mask. */
191 int grplo; /* lowest-numbered CPU or group here. */
192 int grphi; /* highest-numbered CPU or group here. */
193 u8 grpnum; /* CPU/group number for next level up. */
194 u8 level; /* root is at level 0. */
195 bool wait_blkd_tasks;/* Necessary to wait for blocked tasks to */
196 /* exit RCU read-side critical sections */
197 /* before propagating offline up the */
198 /* rcu_node tree? */
199 struct rcu_node *parent;
200 struct list_head blkd_tasks;
201 /* Tasks blocked in RCU read-side critical */
202 /* section. Tasks are placed at the head */
203 /* of this list and age towards the tail. */
204 struct list_head *gp_tasks;
205 /* Pointer to the first task blocking the */
206 /* current grace period, or NULL if there */
207 /* is no such task. */
208 struct list_head *exp_tasks;
209 /* Pointer to the first task blocking the */
210 /* current expedited grace period, or NULL */
211 /* if there is no such task. If there */
212 /* is no current expedited grace period, */
213 /* then there can cannot be any such task. */
214 struct list_head *boost_tasks;
215 /* Pointer to first task that needs to be */
216 /* priority boosted, or NULL if no priority */
217 /* boosting is needed for this rcu_node */
218 /* structure. If there are no tasks */
219 /* queued on this rcu_node structure that */
220 /* are blocking the current grace period, */
221 /* there can be no such task. */
222 struct rt_mutex boost_mtx;
223 /* Used only for the priority-boosting */
224 /* side effect, not as a lock. */
225 unsigned long boost_time;
226 /* When to start boosting (jiffies). */
227 struct task_struct *boost_kthread_task;
228 /* kthread that takes care of priority */
229 /* boosting for this rcu_node structure. */
230 unsigned int boost_kthread_status;
231 /* State of boost_kthread_task for tracing. */
232 unsigned long n_tasks_boosted;
233 /* Total number of tasks boosted. */
234 unsigned long n_exp_boosts;
235 /* Number of tasks boosted for expedited GP. */
236 unsigned long n_normal_boosts;
237 /* Number of tasks boosted for normal GP. */
238 unsigned long n_balk_blkd_tasks;
239 /* Refused to boost: no blocked tasks. */
240 unsigned long n_balk_exp_gp_tasks;
241 /* Refused to boost: nothing blocking GP. */
242 unsigned long n_balk_boost_tasks;
243 /* Refused to boost: already boosting. */
244 unsigned long n_balk_notblocked;
245 /* Refused to boost: RCU RS CS still running. */
246 unsigned long n_balk_notyet;
247 /* Refused to boost: not yet time. */
248 unsigned long n_balk_nos;
249 /* Refused to boost: not sure why, though. */
250 /* This can happen due to race conditions. */
251#ifdef CONFIG_RCU_NOCB_CPU
252 wait_queue_head_t nocb_gp_wq[2];
253 /* Place for rcu_nocb_kthread() to wait GP. */
254#endif /* #ifdef CONFIG_RCU_NOCB_CPU */
255 int need_future_gp[2];
256 /* Counts of upcoming no-CB GP requests. */
257 raw_spinlock_t fqslock ____cacheline_internodealigned_in_smp;
258
259 struct mutex exp_funnel_mutex ____cacheline_internodealigned_in_smp;
260} ____cacheline_internodealigned_in_smp;
261
262/*
263 * Do a full breadth-first scan of the rcu_node structures for the
264 * specified rcu_state structure.
265 */
266#define rcu_for_each_node_breadth_first(rsp, rnp) \
267 for ((rnp) = &(rsp)->node[0]; \
268 (rnp) < &(rsp)->node[rcu_num_nodes]; (rnp)++)
269
270/*
271 * Do a breadth-first scan of the non-leaf rcu_node structures for the
272 * specified rcu_state structure. Note that if there is a singleton
273 * rcu_node tree with but one rcu_node structure, this loop is a no-op.
274 */
275#define rcu_for_each_nonleaf_node_breadth_first(rsp, rnp) \
276 for ((rnp) = &(rsp)->node[0]; \
277 (rnp) < (rsp)->level[rcu_num_lvls - 1]; (rnp)++)
278
279/*
280 * Scan the leaves of the rcu_node hierarchy for the specified rcu_state
281 * structure. Note that if there is a singleton rcu_node tree with but
282 * one rcu_node structure, this loop -will- visit the rcu_node structure.
283 * It is still a leaf node, even if it is also the root node.
284 */
285#define rcu_for_each_leaf_node(rsp, rnp) \
286 for ((rnp) = (rsp)->level[rcu_num_lvls - 1]; \
287 (rnp) < &(rsp)->node[rcu_num_nodes]; (rnp)++)
288
289/* Index values for nxttail array in struct rcu_data. */
290#define RCU_DONE_TAIL 0 /* Also RCU_WAIT head. */
291#define RCU_WAIT_TAIL 1 /* Also RCU_NEXT_READY head. */
292#define RCU_NEXT_READY_TAIL 2 /* Also RCU_NEXT head. */
293#define RCU_NEXT_TAIL 3
294#define RCU_NEXT_SIZE 4
295
296/* Per-CPU data for read-copy update. */
297struct rcu_data {
298 /* 1) quiescent-state and grace-period handling : */
299 unsigned long completed; /* Track rsp->completed gp number */
300 /* in order to detect GP end. */
301 unsigned long gpnum; /* Highest gp number that this CPU */
302 /* is aware of having started. */
303 unsigned long rcu_qs_ctr_snap;/* Snapshot of rcu_qs_ctr to check */
304 /* for rcu_all_qs() invocations. */
305 bool cpu_no_qs; /* No QS yet for this CPU. */
306 bool core_needs_qs; /* Core waits for quiesc state. */
307 bool beenonline; /* CPU online at least once. */
308 bool gpwrap; /* Possible gpnum/completed wrap. */
309 struct rcu_node *mynode; /* This CPU's leaf of hierarchy */
310 unsigned long grpmask; /* Mask to apply to leaf qsmask. */
311 unsigned long ticks_this_gp; /* The number of scheduling-clock */
312 /* ticks this CPU has handled */
313 /* during and after the last grace */
314 /* period it is aware of. */
315 struct cpu_stop_work exp_stop_work;
316 /* Expedited grace-period control */
317 /* for CPU stopping. */
318
319 /* 2) batch handling */
320 /*
321 * If nxtlist is not NULL, it is partitioned as follows.
322 * Any of the partitions might be empty, in which case the
323 * pointer to that partition will be equal to the pointer for
324 * the following partition. When the list is empty, all of
325 * the nxttail elements point to the ->nxtlist pointer itself,
326 * which in that case is NULL.
327 *
328 * [nxtlist, *nxttail[RCU_DONE_TAIL]):
329 * Entries that batch # <= ->completed
330 * The grace period for these entries has completed, and
331 * the other grace-period-completed entries may be moved
332 * here temporarily in rcu_process_callbacks().
333 * [*nxttail[RCU_DONE_TAIL], *nxttail[RCU_WAIT_TAIL]):
334 * Entries that batch # <= ->completed - 1: waiting for current GP
335 * [*nxttail[RCU_WAIT_TAIL], *nxttail[RCU_NEXT_READY_TAIL]):
336 * Entries known to have arrived before current GP ended
337 * [*nxttail[RCU_NEXT_READY_TAIL], *nxttail[RCU_NEXT_TAIL]):
338 * Entries that might have arrived after current GP ended
339 * Note that the value of *nxttail[RCU_NEXT_TAIL] will
340 * always be NULL, as this is the end of the list.
341 */
342 struct rcu_head *nxtlist;
343 struct rcu_head **nxttail[RCU_NEXT_SIZE];
344 unsigned long nxtcompleted[RCU_NEXT_SIZE];
345 /* grace periods for sublists. */
346 long qlen_lazy; /* # of lazy queued callbacks */
347 long qlen; /* # of queued callbacks, incl lazy */
348 long qlen_last_fqs_check;
349 /* qlen at last check for QS forcing */
350 unsigned long n_cbs_invoked; /* count of RCU cbs invoked. */
351 unsigned long n_nocbs_invoked; /* count of no-CBs RCU cbs invoked. */
352 unsigned long n_cbs_orphaned; /* RCU cbs orphaned by dying CPU */
353 unsigned long n_cbs_adopted; /* RCU cbs adopted from dying CPU */
354 unsigned long n_force_qs_snap;
355 /* did other CPU force QS recently? */
356 long blimit; /* Upper limit on a processed batch */
357
358 /* 3) dynticks interface. */
359 struct rcu_dynticks *dynticks; /* Shared per-CPU dynticks state. */
360 int dynticks_snap; /* Per-GP tracking for dynticks. */
361
362 /* 4) reasons this CPU needed to be kicked by force_quiescent_state */
363 unsigned long dynticks_fqs; /* Kicked due to dynticks idle. */
364 unsigned long offline_fqs; /* Kicked due to being offline. */
365 unsigned long cond_resched_completed;
366 /* Grace period that needs help */
367 /* from cond_resched(). */
368
369 /* 5) __rcu_pending() statistics. */
370 unsigned long n_rcu_pending; /* rcu_pending() calls since boot. */
371 unsigned long n_rp_core_needs_qs;
372 unsigned long n_rp_report_qs;
373 unsigned long n_rp_cb_ready;
374 unsigned long n_rp_cpu_needs_gp;
375 unsigned long n_rp_gp_completed;
376 unsigned long n_rp_gp_started;
377 unsigned long n_rp_nocb_defer_wakeup;
378 unsigned long n_rp_need_nothing;
379
380 /* 6) _rcu_barrier(), OOM callbacks, and expediting. */
381 struct rcu_head barrier_head;
382#ifdef CONFIG_RCU_FAST_NO_HZ
383 struct rcu_head oom_head;
384#endif /* #ifdef CONFIG_RCU_FAST_NO_HZ */
385 struct mutex exp_funnel_mutex;
386
387 /* 7) Callback offloading. */
388#ifdef CONFIG_RCU_NOCB_CPU
389 struct rcu_head *nocb_head; /* CBs waiting for kthread. */
390 struct rcu_head **nocb_tail;
391 atomic_long_t nocb_q_count; /* # CBs waiting for nocb */
392 atomic_long_t nocb_q_count_lazy; /* invocation (all stages). */
393 struct rcu_head *nocb_follower_head; /* CBs ready to invoke. */
394 struct rcu_head **nocb_follower_tail;
395 wait_queue_head_t nocb_wq; /* For nocb kthreads to sleep on. */
396 struct task_struct *nocb_kthread;
397 int nocb_defer_wakeup; /* Defer wakeup of nocb_kthread. */
398
399 /* The following fields are used by the leader, hence own cacheline. */
400 struct rcu_head *nocb_gp_head ____cacheline_internodealigned_in_smp;
401 /* CBs waiting for GP. */
402 struct rcu_head **nocb_gp_tail;
403 bool nocb_leader_sleep; /* Is the nocb leader thread asleep? */
404 struct rcu_data *nocb_next_follower;
405 /* Next follower in wakeup chain. */
406
407 /* The following fields are used by the follower, hence new cachline. */
408 struct rcu_data *nocb_leader ____cacheline_internodealigned_in_smp;
409 /* Leader CPU takes GP-end wakeups. */
410#endif /* #ifdef CONFIG_RCU_NOCB_CPU */
411
412 /* 8) RCU CPU stall data. */
413 unsigned int softirq_snap; /* Snapshot of softirq activity. */
414
415 int cpu;
416 struct rcu_state *rsp;
417};
418
419/* Values for fqs_state field in struct rcu_state. */
420#define RCU_GP_IDLE 0 /* No grace period in progress. */
421#define RCU_GP_INIT 1 /* Grace period being initialized. */
422#define RCU_SAVE_DYNTICK 2 /* Need to scan dyntick state. */
423#define RCU_FORCE_QS 3 /* Need to force quiescent state. */
424#define RCU_SIGNAL_INIT RCU_SAVE_DYNTICK
425
426/* Values for nocb_defer_wakeup field in struct rcu_data. */
427#define RCU_NOGP_WAKE_NOT 0
428#define RCU_NOGP_WAKE 1
429#define RCU_NOGP_WAKE_FORCE 2
430
431#define RCU_JIFFIES_TILL_FORCE_QS (1 + (HZ > 250) + (HZ > 500))
432 /* For jiffies_till_first_fqs and */
433 /* and jiffies_till_next_fqs. */
434
435#define RCU_JIFFIES_FQS_DIV 256 /* Very large systems need more */
436 /* delay between bouts of */
437 /* quiescent-state forcing. */
438
439#define RCU_STALL_RAT_DELAY 2 /* Allow other CPUs time to take */
440 /* at least one scheduling clock */
441 /* irq before ratting on them. */
442
443#define rcu_wait(cond) \
444do { \
445 for (;;) { \
446 set_current_state(TASK_INTERRUPTIBLE); \
447 if (cond) \
448 break; \
449 schedule(); \
450 } \
451 __set_current_state(TASK_RUNNING); \
452} while (0)
453
454/*
455 * RCU global state, including node hierarchy. This hierarchy is
456 * represented in "heap" form in a dense array. The root (first level)
457 * of the hierarchy is in ->node[0] (referenced by ->level[0]), the second
458 * level in ->node[1] through ->node[m] (->node[1] referenced by ->level[1]),
459 * and the third level in ->node[m+1] and following (->node[m+1] referenced
460 * by ->level[2]). The number of levels is determined by the number of
461 * CPUs and by CONFIG_RCU_FANOUT. Small systems will have a "hierarchy"
462 * consisting of a single rcu_node.
463 */
464struct rcu_state {
465 struct rcu_node node[NUM_RCU_NODES]; /* Hierarchy. */
466 struct rcu_node *level[RCU_NUM_LVLS + 1];
467 /* Hierarchy levels (+1 to */
468 /* shut bogus gcc warning) */
469 u8 flavor_mask; /* bit in flavor mask. */
470 struct rcu_data __percpu *rda; /* pointer of percu rcu_data. */
471 void (*call)(struct rcu_head *head, /* call_rcu() flavor. */
472 void (*func)(struct rcu_head *head));
473 int ncpus; /* # CPUs seen so far. */
474
475 /* The following fields are guarded by the root rcu_node's lock. */
476
477 u8 fqs_state ____cacheline_internodealigned_in_smp;
478 /* Force QS state. */
479 u8 boost; /* Subject to priority boost. */
480 unsigned long gpnum; /* Current gp number. */
481 unsigned long completed; /* # of last completed gp. */
482 struct task_struct *gp_kthread; /* Task for grace periods. */
483 wait_queue_head_t gp_wq; /* Where GP task waits. */
484 short gp_flags; /* Commands for GP task. */
485 short gp_state; /* GP kthread sleep state. */
486
487 /* End of fields guarded by root rcu_node's lock. */
488
489 raw_spinlock_t orphan_lock ____cacheline_internodealigned_in_smp;
490 /* Protect following fields. */
491 struct rcu_head *orphan_nxtlist; /* Orphaned callbacks that */
492 /* need a grace period. */
493 struct rcu_head **orphan_nxttail; /* Tail of above. */
494 struct rcu_head *orphan_donelist; /* Orphaned callbacks that */
495 /* are ready to invoke. */
496 struct rcu_head **orphan_donetail; /* Tail of above. */
497 long qlen_lazy; /* Number of lazy callbacks. */
498 long qlen; /* Total number of callbacks. */
499 /* End of fields guarded by orphan_lock. */
500
501 struct mutex barrier_mutex; /* Guards barrier fields. */
502 atomic_t barrier_cpu_count; /* # CPUs waiting on. */
503 struct completion barrier_completion; /* Wake at barrier end. */
504 unsigned long barrier_sequence; /* ++ at start and end of */
505 /* _rcu_barrier(). */
506 /* End of fields guarded by barrier_mutex. */
507
508 unsigned long expedited_sequence; /* Take a ticket. */
509 atomic_long_t expedited_workdone0; /* # done by others #0. */
510 atomic_long_t expedited_workdone1; /* # done by others #1. */
511 atomic_long_t expedited_workdone2; /* # done by others #2. */
512 atomic_long_t expedited_workdone3; /* # done by others #3. */
513 atomic_long_t expedited_normal; /* # fallbacks to normal. */
514 atomic_t expedited_need_qs; /* # CPUs left to check in. */
515 wait_queue_head_t expedited_wq; /* Wait for check-ins. */
516 int ncpus_snap; /* # CPUs seen last time. */
517
518 unsigned long jiffies_force_qs; /* Time at which to invoke */
519 /* force_quiescent_state(). */
520 unsigned long n_force_qs; /* Number of calls to */
521 /* force_quiescent_state(). */
522 unsigned long n_force_qs_lh; /* ~Number of calls leaving */
523 /* due to lock unavailable. */
524 unsigned long n_force_qs_ngp; /* Number of calls leaving */
525 /* due to no GP active. */
526 unsigned long gp_start; /* Time at which GP started, */
527 /* but in jiffies. */
528 unsigned long gp_activity; /* Time of last GP kthread */
529 /* activity in jiffies. */
530 unsigned long jiffies_stall; /* Time at which to check */
531 /* for CPU stalls. */
532 unsigned long jiffies_resched; /* Time at which to resched */
533 /* a reluctant CPU. */
534 unsigned long n_force_qs_gpstart; /* Snapshot of n_force_qs at */
535 /* GP start. */
536 unsigned long gp_max; /* Maximum GP duration in */
537 /* jiffies. */
538 const char *name; /* Name of structure. */
539 char abbr; /* Abbreviated name. */
540 struct list_head flavors; /* List of RCU flavors. */
541};
542
543/* Values for rcu_state structure's gp_flags field. */
544#define RCU_GP_FLAG_INIT 0x1 /* Need grace-period initialization. */
545#define RCU_GP_FLAG_FQS 0x2 /* Need grace-period quiescent-state forcing. */
546
547/* Values for rcu_state structure's gp_flags field. */
548#define RCU_GP_WAIT_INIT 0 /* Initial state. */
549#define RCU_GP_WAIT_GPS 1 /* Wait for grace-period start. */
550#define RCU_GP_DONE_GPS 2 /* Wait done for grace-period start. */
551#define RCU_GP_WAIT_FQS 3 /* Wait for force-quiescent-state time. */
552#define RCU_GP_DOING_FQS 4 /* Wait done for force-quiescent-state time. */
553#define RCU_GP_CLEANUP 5 /* Grace-period cleanup started. */
554#define RCU_GP_CLEANED 6 /* Grace-period cleanup complete. */
555
556extern struct list_head rcu_struct_flavors;
557
558/* Sequence through rcu_state structures for each RCU flavor. */
559#define for_each_rcu_flavor(rsp) \
560 list_for_each_entry((rsp), &rcu_struct_flavors, flavors)
561
562/*
563 * RCU implementation internal declarations:
564 */
565extern struct rcu_state rcu_sched_state;
566
567extern struct rcu_state rcu_bh_state;
568
569#ifdef CONFIG_PREEMPT_RCU
570extern struct rcu_state rcu_preempt_state;
571#endif /* #ifdef CONFIG_PREEMPT_RCU */
572
573#ifdef CONFIG_RCU_BOOST
574DECLARE_PER_CPU(unsigned int, rcu_cpu_kthread_status);
575DECLARE_PER_CPU(int, rcu_cpu_kthread_cpu);
576DECLARE_PER_CPU(unsigned int, rcu_cpu_kthread_loops);
577DECLARE_PER_CPU(char, rcu_cpu_has_work);
578#endif /* #ifdef CONFIG_RCU_BOOST */
579
580#ifndef RCU_TREE_NONCORE
581
582/* Forward declarations for rcutree_plugin.h */
583static void rcu_bootup_announce(void);
584static void rcu_preempt_note_context_switch(void);
585static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp);
586#ifdef CONFIG_HOTPLUG_CPU
587static bool rcu_preempt_has_tasks(struct rcu_node *rnp);
588#endif /* #ifdef CONFIG_HOTPLUG_CPU */
589static void rcu_print_detail_task_stall(struct rcu_state *rsp);
590static int rcu_print_task_stall(struct rcu_node *rnp);
591static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp);
592static void rcu_preempt_check_callbacks(void);
593void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu));
594static void __init __rcu_init_preempt(void);
595static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags);
596static void rcu_preempt_boost_start_gp(struct rcu_node *rnp);
597static void invoke_rcu_callbacks_kthread(void);
598static bool rcu_is_callbacks_kthread(void);
599#ifdef CONFIG_RCU_BOOST
600static void rcu_preempt_do_callbacks(void);
601static int rcu_spawn_one_boost_kthread(struct rcu_state *rsp,
602 struct rcu_node *rnp);
603#endif /* #ifdef CONFIG_RCU_BOOST */
604static void __init rcu_spawn_boost_kthreads(void);
605static void rcu_prepare_kthreads(int cpu);
606static void rcu_cleanup_after_idle(void);
607static void rcu_prepare_for_idle(void);
608static void rcu_idle_count_callbacks_posted(void);
609static bool rcu_preempt_has_tasks(struct rcu_node *rnp);
610static void print_cpu_stall_info_begin(void);
611static void print_cpu_stall_info(struct rcu_state *rsp, int cpu);
612static void print_cpu_stall_info_end(void);
613static void zero_cpu_stall_ticks(struct rcu_data *rdp);
614static void increment_cpu_stall_ticks(void);
615static bool rcu_nocb_cpu_needs_barrier(struct rcu_state *rsp, int cpu);
616static void rcu_nocb_gp_set(struct rcu_node *rnp, int nrq);
617static void rcu_nocb_gp_cleanup(struct rcu_state *rsp, struct rcu_node *rnp);
618static void rcu_init_one_nocb(struct rcu_node *rnp);
619static bool __call_rcu_nocb(struct rcu_data *rdp, struct rcu_head *rhp,
620 bool lazy, unsigned long flags);
621static bool rcu_nocb_adopt_orphan_cbs(struct rcu_state *rsp,
622 struct rcu_data *rdp,
623 unsigned long flags);
624static int rcu_nocb_need_deferred_wakeup(struct rcu_data *rdp);
625static void do_nocb_deferred_wakeup(struct rcu_data *rdp);
626static void rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp);
627static void rcu_spawn_all_nocb_kthreads(int cpu);
628static void __init rcu_spawn_nocb_kthreads(void);
629#ifdef CONFIG_RCU_NOCB_CPU
630static void __init rcu_organize_nocb_kthreads(struct rcu_state *rsp);
631#endif /* #ifdef CONFIG_RCU_NOCB_CPU */
632static void __maybe_unused rcu_kick_nohz_cpu(int cpu);
633static bool init_nocb_callback_list(struct rcu_data *rdp);
634static void rcu_sysidle_enter(int irq);
635static void rcu_sysidle_exit(int irq);
636static void rcu_sysidle_check_cpu(struct rcu_data *rdp, bool *isidle,
637 unsigned long *maxj);
638static bool is_sysidle_rcu_state(struct rcu_state *rsp);
639static void rcu_sysidle_report_gp(struct rcu_state *rsp, int isidle,
640 unsigned long maxj);
641static void rcu_bind_gp_kthread(void);
642static void rcu_sysidle_init_percpu_data(struct rcu_dynticks *rdtp);
643static bool rcu_nohz_full_cpu(struct rcu_state *rsp);
644static void rcu_dynticks_task_enter(void);
645static void rcu_dynticks_task_exit(void);
646
647#endif /* #ifndef RCU_TREE_NONCORE */
648
649#ifdef CONFIG_RCU_TRACE
650/* Read out queue lengths for tracing. */
651static inline void rcu_nocb_q_lengths(struct rcu_data *rdp, long *ql, long *qll)
652{
653#ifdef CONFIG_RCU_NOCB_CPU
654 *ql = atomic_long_read(&rdp->nocb_q_count);
655 *qll = atomic_long_read(&rdp->nocb_q_count_lazy);
656#else /* #ifdef CONFIG_RCU_NOCB_CPU */
657 *ql = 0;
658 *qll = 0;
659#endif /* #else #ifdef CONFIG_RCU_NOCB_CPU */
660}
661#endif /* #ifdef CONFIG_RCU_TRACE */
662
663/*
664 * Place this after a lock-acquisition primitive to guarantee that
665 * an UNLOCK+LOCK pair act as a full barrier. This guarantee applies
666 * if the UNLOCK and LOCK are executed by the same CPU or if the
667 * UNLOCK and LOCK operate on the same lock variable.
668 */
669#ifdef CONFIG_PPC
670#define smp_mb__after_unlock_lock() smp_mb() /* Full ordering for lock. */
671#else /* #ifdef CONFIG_PPC */
672#define smp_mb__after_unlock_lock() do { } while (0)
673#endif /* #else #ifdef CONFIG_PPC */