ext4: fix race between unwritten extent conversion and truncate
[linux-2.6-block.git] / kernel / rcutree_plugin.h
CommitLineData
f41d911f
PM
1/*
2 * Read-Copy Update mechanism for mutual exclusion (tree-based version)
3 * Internal non-public definitions that provide either classic
6cc68793 4 * or preemptible semantics.
f41d911f
PM
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 *
20 * Copyright Red Hat, 2009
21 * Copyright IBM Corporation, 2009
22 *
23 * Author: Ingo Molnar <mingo@elte.hu>
24 * Paul E. McKenney <paulmck@linux.vnet.ibm.com>
25 */
26
d9a3da06 27#include <linux/delay.h>
7b27d547 28#include <linux/stop_machine.h>
f41d911f 29
5b61b0ba
MG
30#define RCU_KTHREAD_PRIO 1
31
32#ifdef CONFIG_RCU_BOOST
33#define RCU_BOOST_PRIO CONFIG_RCU_BOOST_PRIO
34#else
35#define RCU_BOOST_PRIO RCU_KTHREAD_PRIO
36#endif
37
26845c28
PM
38/*
39 * Check the RCU kernel configuration parameters and print informative
40 * messages about anything out of the ordinary. If you like #ifdef, you
41 * will love this function.
42 */
43static void __init rcu_bootup_announce_oddness(void)
44{
45#ifdef CONFIG_RCU_TRACE
46 printk(KERN_INFO "\tRCU debugfs-based tracing is enabled.\n");
47#endif
48#if (defined(CONFIG_64BIT) && CONFIG_RCU_FANOUT != 64) || (!defined(CONFIG_64BIT) && CONFIG_RCU_FANOUT != 32)
49 printk(KERN_INFO "\tCONFIG_RCU_FANOUT set to non-default value of %d\n",
50 CONFIG_RCU_FANOUT);
51#endif
52#ifdef CONFIG_RCU_FANOUT_EXACT
53 printk(KERN_INFO "\tHierarchical RCU autobalancing is disabled.\n");
54#endif
55#ifdef CONFIG_RCU_FAST_NO_HZ
56 printk(KERN_INFO
57 "\tRCU dyntick-idle grace-period acceleration is enabled.\n");
58#endif
59#ifdef CONFIG_PROVE_RCU
60 printk(KERN_INFO "\tRCU lockdep checking is enabled.\n");
61#endif
62#ifdef CONFIG_RCU_TORTURE_TEST_RUNNABLE
63 printk(KERN_INFO "\tRCU torture testing starts during boot.\n");
64#endif
81a294c4 65#if defined(CONFIG_TREE_PREEMPT_RCU) && !defined(CONFIG_RCU_CPU_STALL_VERBOSE)
26845c28
PM
66 printk(KERN_INFO "\tVerbose stalled-CPUs detection is disabled.\n");
67#endif
68#if NUM_RCU_LVL_4 != 0
69 printk(KERN_INFO "\tExperimental four-level hierarchy is enabled.\n");
70#endif
71}
72
f41d911f
PM
73#ifdef CONFIG_TREE_PREEMPT_RCU
74
e99033c5 75struct rcu_state rcu_preempt_state = RCU_STATE_INITIALIZER(rcu_preempt);
f41d911f 76DEFINE_PER_CPU(struct rcu_data, rcu_preempt_data);
27f4d280 77static struct rcu_state *rcu_state = &rcu_preempt_state;
f41d911f 78
10f39bb1 79static void rcu_read_unlock_special(struct task_struct *t);
d9a3da06
PM
80static int rcu_preempted_readers_exp(struct rcu_node *rnp);
81
f41d911f
PM
82/*
83 * Tell them what RCU they are running.
84 */
0e0fc1c2 85static void __init rcu_bootup_announce(void)
f41d911f 86{
6cc68793 87 printk(KERN_INFO "Preemptible hierarchical RCU implementation.\n");
26845c28 88 rcu_bootup_announce_oddness();
f41d911f
PM
89}
90
91/*
92 * Return the number of RCU-preempt batches processed thus far
93 * for debug and statistics.
94 */
95long rcu_batches_completed_preempt(void)
96{
97 return rcu_preempt_state.completed;
98}
99EXPORT_SYMBOL_GPL(rcu_batches_completed_preempt);
100
101/*
102 * Return the number of RCU batches processed thus far for debug & stats.
103 */
104long rcu_batches_completed(void)
105{
106 return rcu_batches_completed_preempt();
107}
108EXPORT_SYMBOL_GPL(rcu_batches_completed);
109
bf66f18e
PM
110/*
111 * Force a quiescent state for preemptible RCU.
112 */
113void rcu_force_quiescent_state(void)
114{
115 force_quiescent_state(&rcu_preempt_state, 0);
116}
117EXPORT_SYMBOL_GPL(rcu_force_quiescent_state);
118
f41d911f 119/*
6cc68793 120 * Record a preemptible-RCU quiescent state for the specified CPU. Note
f41d911f
PM
121 * that this just means that the task currently running on the CPU is
122 * not in a quiescent state. There might be any number of tasks blocked
123 * while in an RCU read-side critical section.
25502a6c
PM
124 *
125 * Unlike the other rcu_*_qs() functions, callers to this function
126 * must disable irqs in order to protect the assignment to
127 * ->rcu_read_unlock_special.
f41d911f 128 */
c3422bea 129static void rcu_preempt_qs(int cpu)
f41d911f
PM
130{
131 struct rcu_data *rdp = &per_cpu(rcu_preempt_data, cpu);
25502a6c 132
e4cc1f22 133 rdp->passed_quiesce_gpnum = rdp->gpnum;
c3422bea 134 barrier();
e4cc1f22 135 if (rdp->passed_quiesce == 0)
d4c08f2a 136 trace_rcu_grace_period("rcu_preempt", rdp->gpnum, "cpuqs");
e4cc1f22 137 rdp->passed_quiesce = 1;
25502a6c 138 current->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_NEED_QS;
f41d911f
PM
139}
140
141/*
c3422bea
PM
142 * We have entered the scheduler, and the current task might soon be
143 * context-switched away from. If this task is in an RCU read-side
144 * critical section, we will no longer be able to rely on the CPU to
12f5f524
PM
145 * record that fact, so we enqueue the task on the blkd_tasks list.
146 * The task will dequeue itself when it exits the outermost enclosing
147 * RCU read-side critical section. Therefore, the current grace period
148 * cannot be permitted to complete until the blkd_tasks list entries
149 * predating the current grace period drain, in other words, until
150 * rnp->gp_tasks becomes NULL.
c3422bea
PM
151 *
152 * Caller must disable preemption.
f41d911f 153 */
c3422bea 154static void rcu_preempt_note_context_switch(int cpu)
f41d911f
PM
155{
156 struct task_struct *t = current;
c3422bea 157 unsigned long flags;
f41d911f
PM
158 struct rcu_data *rdp;
159 struct rcu_node *rnp;
160
10f39bb1 161 if (t->rcu_read_lock_nesting > 0 &&
f41d911f
PM
162 (t->rcu_read_unlock_special & RCU_READ_UNLOCK_BLOCKED) == 0) {
163
164 /* Possibly blocking in an RCU read-side critical section. */
394f99a9 165 rdp = per_cpu_ptr(rcu_preempt_state.rda, cpu);
f41d911f 166 rnp = rdp->mynode;
1304afb2 167 raw_spin_lock_irqsave(&rnp->lock, flags);
f41d911f 168 t->rcu_read_unlock_special |= RCU_READ_UNLOCK_BLOCKED;
86848966 169 t->rcu_blocked_node = rnp;
f41d911f
PM
170
171 /*
172 * If this CPU has already checked in, then this task
173 * will hold up the next grace period rather than the
174 * current grace period. Queue the task accordingly.
175 * If the task is queued for the current grace period
176 * (i.e., this CPU has not yet passed through a quiescent
177 * state for the current grace period), then as long
178 * as that task remains queued, the current grace period
12f5f524
PM
179 * cannot end. Note that there is some uncertainty as
180 * to exactly when the current grace period started.
181 * We take a conservative approach, which can result
182 * in unnecessarily waiting on tasks that started very
183 * slightly after the current grace period began. C'est
184 * la vie!!!
b0e165c0
PM
185 *
186 * But first, note that the current CPU must still be
187 * on line!
f41d911f 188 */
b0e165c0 189 WARN_ON_ONCE((rdp->grpmask & rnp->qsmaskinit) == 0);
e7d8842e 190 WARN_ON_ONCE(!list_empty(&t->rcu_node_entry));
12f5f524
PM
191 if ((rnp->qsmask & rdp->grpmask) && rnp->gp_tasks != NULL) {
192 list_add(&t->rcu_node_entry, rnp->gp_tasks->prev);
193 rnp->gp_tasks = &t->rcu_node_entry;
27f4d280
PM
194#ifdef CONFIG_RCU_BOOST
195 if (rnp->boost_tasks != NULL)
196 rnp->boost_tasks = rnp->gp_tasks;
197#endif /* #ifdef CONFIG_RCU_BOOST */
12f5f524
PM
198 } else {
199 list_add(&t->rcu_node_entry, &rnp->blkd_tasks);
200 if (rnp->qsmask & rdp->grpmask)
201 rnp->gp_tasks = &t->rcu_node_entry;
202 }
d4c08f2a
PM
203 trace_rcu_preempt_task(rdp->rsp->name,
204 t->pid,
205 (rnp->qsmask & rdp->grpmask)
206 ? rnp->gpnum
207 : rnp->gpnum + 1);
1304afb2 208 raw_spin_unlock_irqrestore(&rnp->lock, flags);
10f39bb1
PM
209 } else if (t->rcu_read_lock_nesting < 0 &&
210 t->rcu_read_unlock_special) {
211
212 /*
213 * Complete exit from RCU read-side critical section on
214 * behalf of preempted instance of __rcu_read_unlock().
215 */
216 rcu_read_unlock_special(t);
f41d911f
PM
217 }
218
219 /*
220 * Either we were not in an RCU read-side critical section to
221 * begin with, or we have now recorded that critical section
222 * globally. Either way, we can now note a quiescent state
223 * for this CPU. Again, if we were in an RCU read-side critical
224 * section, and if that critical section was blocking the current
225 * grace period, then the fact that the task has been enqueued
226 * means that we continue to block the current grace period.
227 */
e7d8842e 228 local_irq_save(flags);
25502a6c 229 rcu_preempt_qs(cpu);
e7d8842e 230 local_irq_restore(flags);
f41d911f
PM
231}
232
233/*
6cc68793 234 * Tree-preemptible RCU implementation for rcu_read_lock().
f41d911f
PM
235 * Just increment ->rcu_read_lock_nesting, shared state will be updated
236 * if we block.
237 */
238void __rcu_read_lock(void)
239{
80dcf60e 240 current->rcu_read_lock_nesting++;
f41d911f
PM
241 barrier(); /* needed if we ever invoke rcu_read_lock in rcutree.c */
242}
243EXPORT_SYMBOL_GPL(__rcu_read_lock);
244
fc2219d4
PM
245/*
246 * Check for preempted RCU readers blocking the current grace period
247 * for the specified rcu_node structure. If the caller needs a reliable
248 * answer, it must hold the rcu_node's ->lock.
249 */
27f4d280 250static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp)
fc2219d4 251{
12f5f524 252 return rnp->gp_tasks != NULL;
fc2219d4
PM
253}
254
b668c9cf
PM
255/*
256 * Record a quiescent state for all tasks that were previously queued
257 * on the specified rcu_node structure and that were blocking the current
258 * RCU grace period. The caller must hold the specified rnp->lock with
259 * irqs disabled, and this lock is released upon return, but irqs remain
260 * disabled.
261 */
d3f6bad3 262static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp, unsigned long flags)
b668c9cf
PM
263 __releases(rnp->lock)
264{
265 unsigned long mask;
266 struct rcu_node *rnp_p;
267
27f4d280 268 if (rnp->qsmask != 0 || rcu_preempt_blocked_readers_cgp(rnp)) {
1304afb2 269 raw_spin_unlock_irqrestore(&rnp->lock, flags);
b668c9cf
PM
270 return; /* Still need more quiescent states! */
271 }
272
273 rnp_p = rnp->parent;
274 if (rnp_p == NULL) {
275 /*
276 * Either there is only one rcu_node in the tree,
277 * or tasks were kicked up to root rcu_node due to
278 * CPUs going offline.
279 */
d3f6bad3 280 rcu_report_qs_rsp(&rcu_preempt_state, flags);
b668c9cf
PM
281 return;
282 }
283
284 /* Report up the rest of the hierarchy. */
285 mask = rnp->grpmask;
1304afb2
PM
286 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
287 raw_spin_lock(&rnp_p->lock); /* irqs already disabled. */
d3f6bad3 288 rcu_report_qs_rnp(mask, &rcu_preempt_state, rnp_p, flags);
b668c9cf
PM
289}
290
12f5f524
PM
291/*
292 * Advance a ->blkd_tasks-list pointer to the next entry, instead
293 * returning NULL if at the end of the list.
294 */
295static struct list_head *rcu_next_node_entry(struct task_struct *t,
296 struct rcu_node *rnp)
297{
298 struct list_head *np;
299
300 np = t->rcu_node_entry.next;
301 if (np == &rnp->blkd_tasks)
302 np = NULL;
303 return np;
304}
305
b668c9cf
PM
306/*
307 * Handle special cases during rcu_read_unlock(), such as needing to
308 * notify RCU core processing or task having blocked during the RCU
309 * read-side critical section.
310 */
be0e1e21 311static noinline void rcu_read_unlock_special(struct task_struct *t)
f41d911f
PM
312{
313 int empty;
d9a3da06 314 int empty_exp;
389abd48 315 int empty_exp_now;
f41d911f 316 unsigned long flags;
12f5f524 317 struct list_head *np;
82e78d80
PM
318#ifdef CONFIG_RCU_BOOST
319 struct rt_mutex *rbmp = NULL;
320#endif /* #ifdef CONFIG_RCU_BOOST */
f41d911f
PM
321 struct rcu_node *rnp;
322 int special;
323
324 /* NMI handlers cannot block and cannot safely manipulate state. */
325 if (in_nmi())
326 return;
327
328 local_irq_save(flags);
329
330 /*
331 * If RCU core is waiting for this CPU to exit critical section,
332 * let it know that we have done so.
333 */
334 special = t->rcu_read_unlock_special;
335 if (special & RCU_READ_UNLOCK_NEED_QS) {
c3422bea 336 rcu_preempt_qs(smp_processor_id());
f41d911f
PM
337 }
338
339 /* Hardware IRQ handlers cannot block. */
ec433f0c 340 if (in_irq() || in_serving_softirq()) {
f41d911f
PM
341 local_irq_restore(flags);
342 return;
343 }
344
345 /* Clean up if blocked during RCU read-side critical section. */
346 if (special & RCU_READ_UNLOCK_BLOCKED) {
347 t->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_BLOCKED;
348
dd5d19ba
PM
349 /*
350 * Remove this task from the list it blocked on. The
351 * task can migrate while we acquire the lock, but at
352 * most one time. So at most two passes through loop.
353 */
354 for (;;) {
86848966 355 rnp = t->rcu_blocked_node;
1304afb2 356 raw_spin_lock(&rnp->lock); /* irqs already disabled. */
86848966 357 if (rnp == t->rcu_blocked_node)
dd5d19ba 358 break;
1304afb2 359 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
dd5d19ba 360 }
27f4d280 361 empty = !rcu_preempt_blocked_readers_cgp(rnp);
d9a3da06
PM
362 empty_exp = !rcu_preempted_readers_exp(rnp);
363 smp_mb(); /* ensure expedited fastpath sees end of RCU c-s. */
12f5f524 364 np = rcu_next_node_entry(t, rnp);
f41d911f 365 list_del_init(&t->rcu_node_entry);
82e78d80 366 t->rcu_blocked_node = NULL;
d4c08f2a
PM
367 trace_rcu_unlock_preempted_task("rcu_preempt",
368 rnp->gpnum, t->pid);
12f5f524
PM
369 if (&t->rcu_node_entry == rnp->gp_tasks)
370 rnp->gp_tasks = np;
371 if (&t->rcu_node_entry == rnp->exp_tasks)
372 rnp->exp_tasks = np;
27f4d280
PM
373#ifdef CONFIG_RCU_BOOST
374 if (&t->rcu_node_entry == rnp->boost_tasks)
375 rnp->boost_tasks = np;
82e78d80
PM
376 /* Snapshot/clear ->rcu_boost_mutex with rcu_node lock held. */
377 if (t->rcu_boost_mutex) {
378 rbmp = t->rcu_boost_mutex;
379 t->rcu_boost_mutex = NULL;
7765be2f 380 }
27f4d280 381#endif /* #ifdef CONFIG_RCU_BOOST */
f41d911f
PM
382
383 /*
384 * If this was the last task on the current list, and if
385 * we aren't waiting on any CPUs, report the quiescent state.
389abd48
PM
386 * Note that rcu_report_unblock_qs_rnp() releases rnp->lock,
387 * so we must take a snapshot of the expedited state.
f41d911f 388 */
389abd48 389 empty_exp_now = !rcu_preempted_readers_exp(rnp);
d4c08f2a
PM
390 if (!empty && !rcu_preempt_blocked_readers_cgp(rnp)) {
391 trace_rcu_quiescent_state_report("preempt_rcu",
392 rnp->gpnum,
393 0, rnp->qsmask,
394 rnp->level,
395 rnp->grplo,
396 rnp->grphi,
397 !!rnp->gp_tasks);
d3f6bad3 398 rcu_report_unblock_qs_rnp(rnp, flags);
d4c08f2a
PM
399 } else
400 raw_spin_unlock_irqrestore(&rnp->lock, flags);
d9a3da06 401
27f4d280
PM
402#ifdef CONFIG_RCU_BOOST
403 /* Unboost if we were boosted. */
82e78d80
PM
404 if (rbmp)
405 rt_mutex_unlock(rbmp);
27f4d280
PM
406#endif /* #ifdef CONFIG_RCU_BOOST */
407
d9a3da06
PM
408 /*
409 * If this was the last task on the expedited lists,
410 * then we need to report up the rcu_node hierarchy.
411 */
389abd48 412 if (!empty_exp && empty_exp_now)
b40d293e 413 rcu_report_exp_rnp(&rcu_preempt_state, rnp, true);
b668c9cf
PM
414 } else {
415 local_irq_restore(flags);
f41d911f 416 }
f41d911f
PM
417}
418
419/*
6cc68793 420 * Tree-preemptible RCU implementation for rcu_read_unlock().
f41d911f
PM
421 * Decrement ->rcu_read_lock_nesting. If the result is zero (outermost
422 * rcu_read_unlock()) and ->rcu_read_unlock_special is non-zero, then
423 * invoke rcu_read_unlock_special() to clean up after a context switch
424 * in an RCU read-side critical section and other special cases.
425 */
426void __rcu_read_unlock(void)
427{
428 struct task_struct *t = current;
429
10f39bb1
PM
430 if (t->rcu_read_lock_nesting != 1)
431 --t->rcu_read_lock_nesting;
432 else {
6206ab9b 433 barrier(); /* critical section before exit code. */
10f39bb1
PM
434 t->rcu_read_lock_nesting = INT_MIN;
435 barrier(); /* assign before ->rcu_read_unlock_special load */
be0e1e21
PM
436 if (unlikely(ACCESS_ONCE(t->rcu_read_unlock_special)))
437 rcu_read_unlock_special(t);
10f39bb1
PM
438 barrier(); /* ->rcu_read_unlock_special load before assign */
439 t->rcu_read_lock_nesting = 0;
be0e1e21 440 }
cba8244a 441#ifdef CONFIG_PROVE_LOCKING
10f39bb1
PM
442 {
443 int rrln = ACCESS_ONCE(t->rcu_read_lock_nesting);
444
445 WARN_ON_ONCE(rrln < 0 && rrln > INT_MIN / 2);
446 }
cba8244a 447#endif /* #ifdef CONFIG_PROVE_LOCKING */
f41d911f
PM
448}
449EXPORT_SYMBOL_GPL(__rcu_read_unlock);
450
1ed509a2
PM
451#ifdef CONFIG_RCU_CPU_STALL_VERBOSE
452
453/*
454 * Dump detailed information for all tasks blocking the current RCU
455 * grace period on the specified rcu_node structure.
456 */
457static void rcu_print_detail_task_stall_rnp(struct rcu_node *rnp)
458{
459 unsigned long flags;
1ed509a2
PM
460 struct task_struct *t;
461
27f4d280 462 if (!rcu_preempt_blocked_readers_cgp(rnp))
12f5f524
PM
463 return;
464 raw_spin_lock_irqsave(&rnp->lock, flags);
465 t = list_entry(rnp->gp_tasks,
466 struct task_struct, rcu_node_entry);
467 list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry)
468 sched_show_task(t);
469 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1ed509a2
PM
470}
471
472/*
473 * Dump detailed information for all tasks blocking the current RCU
474 * grace period.
475 */
476static void rcu_print_detail_task_stall(struct rcu_state *rsp)
477{
478 struct rcu_node *rnp = rcu_get_root(rsp);
479
480 rcu_print_detail_task_stall_rnp(rnp);
481 rcu_for_each_leaf_node(rsp, rnp)
482 rcu_print_detail_task_stall_rnp(rnp);
483}
484
485#else /* #ifdef CONFIG_RCU_CPU_STALL_VERBOSE */
486
487static void rcu_print_detail_task_stall(struct rcu_state *rsp)
488{
489}
490
491#endif /* #else #ifdef CONFIG_RCU_CPU_STALL_VERBOSE */
492
f41d911f
PM
493/*
494 * Scan the current list of tasks blocked within RCU read-side critical
495 * sections, printing out the tid of each.
496 */
9bc8b558 497static int rcu_print_task_stall(struct rcu_node *rnp)
f41d911f 498{
f41d911f 499 struct task_struct *t;
9bc8b558 500 int ndetected = 0;
f41d911f 501
27f4d280 502 if (!rcu_preempt_blocked_readers_cgp(rnp))
9bc8b558 503 return 0;
12f5f524
PM
504 t = list_entry(rnp->gp_tasks,
505 struct task_struct, rcu_node_entry);
9bc8b558 506 list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) {
12f5f524 507 printk(" P%d", t->pid);
9bc8b558
PM
508 ndetected++;
509 }
510 return ndetected;
f41d911f
PM
511}
512
53d84e00
PM
513/*
514 * Suppress preemptible RCU's CPU stall warnings by pushing the
515 * time of the next stall-warning message comfortably far into the
516 * future.
517 */
518static void rcu_preempt_stall_reset(void)
519{
520 rcu_preempt_state.jiffies_stall = jiffies + ULONG_MAX / 2;
521}
522
b0e165c0
PM
523/*
524 * Check that the list of blocked tasks for the newly completed grace
525 * period is in fact empty. It is a serious bug to complete a grace
526 * period that still has RCU readers blocked! This function must be
527 * invoked -before- updating this rnp's ->gpnum, and the rnp's ->lock
528 * must be held by the caller.
12f5f524
PM
529 *
530 * Also, if there are blocked tasks on the list, they automatically
531 * block the newly created grace period, so set up ->gp_tasks accordingly.
b0e165c0
PM
532 */
533static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp)
534{
27f4d280 535 WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp));
12f5f524
PM
536 if (!list_empty(&rnp->blkd_tasks))
537 rnp->gp_tasks = rnp->blkd_tasks.next;
28ecd580 538 WARN_ON_ONCE(rnp->qsmask);
b0e165c0
PM
539}
540
33f76148
PM
541#ifdef CONFIG_HOTPLUG_CPU
542
dd5d19ba
PM
543/*
544 * Handle tasklist migration for case in which all CPUs covered by the
545 * specified rcu_node have gone offline. Move them up to the root
546 * rcu_node. The reason for not just moving them to the immediate
547 * parent is to remove the need for rcu_read_unlock_special() to
548 * make more than two attempts to acquire the target rcu_node's lock.
b668c9cf
PM
549 * Returns true if there were tasks blocking the current RCU grace
550 * period.
dd5d19ba 551 *
237c80c5
PM
552 * Returns 1 if there was previously a task blocking the current grace
553 * period on the specified rcu_node structure.
554 *
dd5d19ba
PM
555 * The caller must hold rnp->lock with irqs disabled.
556 */
237c80c5
PM
557static int rcu_preempt_offline_tasks(struct rcu_state *rsp,
558 struct rcu_node *rnp,
559 struct rcu_data *rdp)
dd5d19ba 560{
dd5d19ba
PM
561 struct list_head *lp;
562 struct list_head *lp_root;
d9a3da06 563 int retval = 0;
dd5d19ba 564 struct rcu_node *rnp_root = rcu_get_root(rsp);
12f5f524 565 struct task_struct *t;
dd5d19ba 566
86848966
PM
567 if (rnp == rnp_root) {
568 WARN_ONCE(1, "Last CPU thought to be offlined?");
237c80c5 569 return 0; /* Shouldn't happen: at least one CPU online. */
86848966 570 }
12f5f524
PM
571
572 /* If we are on an internal node, complain bitterly. */
573 WARN_ON_ONCE(rnp != rdp->mynode);
dd5d19ba
PM
574
575 /*
12f5f524
PM
576 * Move tasks up to root rcu_node. Don't try to get fancy for
577 * this corner-case operation -- just put this node's tasks
578 * at the head of the root node's list, and update the root node's
579 * ->gp_tasks and ->exp_tasks pointers to those of this node's,
580 * if non-NULL. This might result in waiting for more tasks than
581 * absolutely necessary, but this is a good performance/complexity
582 * tradeoff.
dd5d19ba 583 */
27f4d280 584 if (rcu_preempt_blocked_readers_cgp(rnp))
d9a3da06
PM
585 retval |= RCU_OFL_TASKS_NORM_GP;
586 if (rcu_preempted_readers_exp(rnp))
587 retval |= RCU_OFL_TASKS_EXP_GP;
12f5f524
PM
588 lp = &rnp->blkd_tasks;
589 lp_root = &rnp_root->blkd_tasks;
590 while (!list_empty(lp)) {
591 t = list_entry(lp->next, typeof(*t), rcu_node_entry);
592 raw_spin_lock(&rnp_root->lock); /* irqs already disabled */
593 list_del(&t->rcu_node_entry);
594 t->rcu_blocked_node = rnp_root;
595 list_add(&t->rcu_node_entry, lp_root);
596 if (&t->rcu_node_entry == rnp->gp_tasks)
597 rnp_root->gp_tasks = rnp->gp_tasks;
598 if (&t->rcu_node_entry == rnp->exp_tasks)
599 rnp_root->exp_tasks = rnp->exp_tasks;
27f4d280
PM
600#ifdef CONFIG_RCU_BOOST
601 if (&t->rcu_node_entry == rnp->boost_tasks)
602 rnp_root->boost_tasks = rnp->boost_tasks;
603#endif /* #ifdef CONFIG_RCU_BOOST */
12f5f524 604 raw_spin_unlock(&rnp_root->lock); /* irqs still disabled */
dd5d19ba 605 }
27f4d280
PM
606
607#ifdef CONFIG_RCU_BOOST
608 /* In case root is being boosted and leaf is not. */
609 raw_spin_lock(&rnp_root->lock); /* irqs already disabled */
610 if (rnp_root->boost_tasks != NULL &&
611 rnp_root->boost_tasks != rnp_root->gp_tasks)
612 rnp_root->boost_tasks = rnp_root->gp_tasks;
613 raw_spin_unlock(&rnp_root->lock); /* irqs still disabled */
614#endif /* #ifdef CONFIG_RCU_BOOST */
615
12f5f524
PM
616 rnp->gp_tasks = NULL;
617 rnp->exp_tasks = NULL;
237c80c5 618 return retval;
dd5d19ba
PM
619}
620
33f76148 621/*
6cc68793 622 * Do CPU-offline processing for preemptible RCU.
33f76148
PM
623 */
624static void rcu_preempt_offline_cpu(int cpu)
625{
626 __rcu_offline_cpu(cpu, &rcu_preempt_state);
627}
628
629#endif /* #ifdef CONFIG_HOTPLUG_CPU */
630
f41d911f
PM
631/*
632 * Check for a quiescent state from the current CPU. When a task blocks,
633 * the task is recorded in the corresponding CPU's rcu_node structure,
634 * which is checked elsewhere.
635 *
636 * Caller must disable hard irqs.
637 */
638static void rcu_preempt_check_callbacks(int cpu)
639{
640 struct task_struct *t = current;
641
642 if (t->rcu_read_lock_nesting == 0) {
c3422bea 643 rcu_preempt_qs(cpu);
f41d911f
PM
644 return;
645 }
10f39bb1
PM
646 if (t->rcu_read_lock_nesting > 0 &&
647 per_cpu(rcu_preempt_data, cpu).qs_pending)
c3422bea 648 t->rcu_read_unlock_special |= RCU_READ_UNLOCK_NEED_QS;
f41d911f
PM
649}
650
651/*
6cc68793 652 * Process callbacks for preemptible RCU.
f41d911f
PM
653 */
654static void rcu_preempt_process_callbacks(void)
655{
656 __rcu_process_callbacks(&rcu_preempt_state,
657 &__get_cpu_var(rcu_preempt_data));
658}
659
a46e0899
PM
660#ifdef CONFIG_RCU_BOOST
661
09223371
SL
662static void rcu_preempt_do_callbacks(void)
663{
664 rcu_do_batch(&rcu_preempt_state, &__get_cpu_var(rcu_preempt_data));
665}
666
a46e0899
PM
667#endif /* #ifdef CONFIG_RCU_BOOST */
668
f41d911f 669/*
6cc68793 670 * Queue a preemptible-RCU callback for invocation after a grace period.
f41d911f
PM
671 */
672void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
673{
674 __call_rcu(head, func, &rcu_preempt_state);
675}
676EXPORT_SYMBOL_GPL(call_rcu);
677
6ebb237b
PM
678/**
679 * synchronize_rcu - wait until a grace period has elapsed.
680 *
681 * Control will return to the caller some time after a full grace
682 * period has elapsed, in other words after all currently executing RCU
77d8485a
PM
683 * read-side critical sections have completed. Note, however, that
684 * upon return from synchronize_rcu(), the caller might well be executing
685 * concurrently with new RCU read-side critical sections that began while
686 * synchronize_rcu() was waiting. RCU read-side critical sections are
687 * delimited by rcu_read_lock() and rcu_read_unlock(), and may be nested.
6ebb237b
PM
688 */
689void synchronize_rcu(void)
690{
6ebb237b
PM
691 if (!rcu_scheduler_active)
692 return;
2c42818e 693 wait_rcu_gp(call_rcu);
6ebb237b
PM
694}
695EXPORT_SYMBOL_GPL(synchronize_rcu);
696
d9a3da06
PM
697static DECLARE_WAIT_QUEUE_HEAD(sync_rcu_preempt_exp_wq);
698static long sync_rcu_preempt_exp_count;
699static DEFINE_MUTEX(sync_rcu_preempt_exp_mutex);
700
701/*
702 * Return non-zero if there are any tasks in RCU read-side critical
703 * sections blocking the current preemptible-RCU expedited grace period.
704 * If there is no preemptible-RCU expedited grace period currently in
705 * progress, returns zero unconditionally.
706 */
707static int rcu_preempted_readers_exp(struct rcu_node *rnp)
708{
12f5f524 709 return rnp->exp_tasks != NULL;
d9a3da06
PM
710}
711
712/*
713 * return non-zero if there is no RCU expedited grace period in progress
714 * for the specified rcu_node structure, in other words, if all CPUs and
715 * tasks covered by the specified rcu_node structure have done their bit
716 * for the current expedited grace period. Works only for preemptible
717 * RCU -- other RCU implementation use other means.
718 *
719 * Caller must hold sync_rcu_preempt_exp_mutex.
720 */
721static int sync_rcu_preempt_exp_done(struct rcu_node *rnp)
722{
723 return !rcu_preempted_readers_exp(rnp) &&
724 ACCESS_ONCE(rnp->expmask) == 0;
725}
726
727/*
728 * Report the exit from RCU read-side critical section for the last task
729 * that queued itself during or before the current expedited preemptible-RCU
730 * grace period. This event is reported either to the rcu_node structure on
731 * which the task was queued or to one of that rcu_node structure's ancestors,
732 * recursively up the tree. (Calm down, calm down, we do the recursion
733 * iteratively!)
734 *
b40d293e
TG
735 * Most callers will set the "wake" flag, but the task initiating the
736 * expedited grace period need not wake itself.
737 *
d9a3da06
PM
738 * Caller must hold sync_rcu_preempt_exp_mutex.
739 */
b40d293e
TG
740static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp,
741 bool wake)
d9a3da06
PM
742{
743 unsigned long flags;
744 unsigned long mask;
745
1304afb2 746 raw_spin_lock_irqsave(&rnp->lock, flags);
d9a3da06 747 for (;;) {
131906b0
PM
748 if (!sync_rcu_preempt_exp_done(rnp)) {
749 raw_spin_unlock_irqrestore(&rnp->lock, flags);
d9a3da06 750 break;
131906b0 751 }
d9a3da06 752 if (rnp->parent == NULL) {
131906b0 753 raw_spin_unlock_irqrestore(&rnp->lock, flags);
b40d293e
TG
754 if (wake)
755 wake_up(&sync_rcu_preempt_exp_wq);
d9a3da06
PM
756 break;
757 }
758 mask = rnp->grpmask;
1304afb2 759 raw_spin_unlock(&rnp->lock); /* irqs remain disabled */
d9a3da06 760 rnp = rnp->parent;
1304afb2 761 raw_spin_lock(&rnp->lock); /* irqs already disabled */
d9a3da06
PM
762 rnp->expmask &= ~mask;
763 }
d9a3da06
PM
764}
765
766/*
767 * Snapshot the tasks blocking the newly started preemptible-RCU expedited
768 * grace period for the specified rcu_node structure. If there are no such
769 * tasks, report it up the rcu_node hierarchy.
770 *
771 * Caller must hold sync_rcu_preempt_exp_mutex and rsp->onofflock.
772 */
773static void
774sync_rcu_preempt_exp_init(struct rcu_state *rsp, struct rcu_node *rnp)
775{
1217ed1b 776 unsigned long flags;
12f5f524 777 int must_wait = 0;
d9a3da06 778
1217ed1b
PM
779 raw_spin_lock_irqsave(&rnp->lock, flags);
780 if (list_empty(&rnp->blkd_tasks))
781 raw_spin_unlock_irqrestore(&rnp->lock, flags);
782 else {
12f5f524 783 rnp->exp_tasks = rnp->blkd_tasks.next;
1217ed1b 784 rcu_initiate_boost(rnp, flags); /* releases rnp->lock */
12f5f524
PM
785 must_wait = 1;
786 }
d9a3da06 787 if (!must_wait)
b40d293e 788 rcu_report_exp_rnp(rsp, rnp, false); /* Don't wake self. */
d9a3da06
PM
789}
790
019129d5 791/*
d9a3da06
PM
792 * Wait for an rcu-preempt grace period, but expedite it. The basic idea
793 * is to invoke synchronize_sched_expedited() to push all the tasks to
12f5f524 794 * the ->blkd_tasks lists and wait for this list to drain.
019129d5
PM
795 */
796void synchronize_rcu_expedited(void)
797{
d9a3da06
PM
798 unsigned long flags;
799 struct rcu_node *rnp;
800 struct rcu_state *rsp = &rcu_preempt_state;
801 long snap;
802 int trycount = 0;
803
804 smp_mb(); /* Caller's modifications seen first by other CPUs. */
805 snap = ACCESS_ONCE(sync_rcu_preempt_exp_count) + 1;
806 smp_mb(); /* Above access cannot bleed into critical section. */
807
808 /*
809 * Acquire lock, falling back to synchronize_rcu() if too many
810 * lock-acquisition failures. Of course, if someone does the
811 * expedited grace period for us, just leave.
812 */
813 while (!mutex_trylock(&sync_rcu_preempt_exp_mutex)) {
814 if (trycount++ < 10)
815 udelay(trycount * num_online_cpus());
816 else {
817 synchronize_rcu();
818 return;
819 }
820 if ((ACCESS_ONCE(sync_rcu_preempt_exp_count) - snap) > 0)
821 goto mb_ret; /* Others did our work for us. */
822 }
823 if ((ACCESS_ONCE(sync_rcu_preempt_exp_count) - snap) > 0)
824 goto unlock_mb_ret; /* Others did our work for us. */
825
12f5f524 826 /* force all RCU readers onto ->blkd_tasks lists. */
d9a3da06
PM
827 synchronize_sched_expedited();
828
1304afb2 829 raw_spin_lock_irqsave(&rsp->onofflock, flags);
d9a3da06
PM
830
831 /* Initialize ->expmask for all non-leaf rcu_node structures. */
832 rcu_for_each_nonleaf_node_breadth_first(rsp, rnp) {
1304afb2 833 raw_spin_lock(&rnp->lock); /* irqs already disabled. */
d9a3da06 834 rnp->expmask = rnp->qsmaskinit;
1304afb2 835 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
d9a3da06
PM
836 }
837
12f5f524 838 /* Snapshot current state of ->blkd_tasks lists. */
d9a3da06
PM
839 rcu_for_each_leaf_node(rsp, rnp)
840 sync_rcu_preempt_exp_init(rsp, rnp);
841 if (NUM_RCU_NODES > 1)
842 sync_rcu_preempt_exp_init(rsp, rcu_get_root(rsp));
843
1304afb2 844 raw_spin_unlock_irqrestore(&rsp->onofflock, flags);
d9a3da06 845
12f5f524 846 /* Wait for snapshotted ->blkd_tasks lists to drain. */
d9a3da06
PM
847 rnp = rcu_get_root(rsp);
848 wait_event(sync_rcu_preempt_exp_wq,
849 sync_rcu_preempt_exp_done(rnp));
850
851 /* Clean up and exit. */
852 smp_mb(); /* ensure expedited GP seen before counter increment. */
853 ACCESS_ONCE(sync_rcu_preempt_exp_count)++;
854unlock_mb_ret:
855 mutex_unlock(&sync_rcu_preempt_exp_mutex);
856mb_ret:
857 smp_mb(); /* ensure subsequent action seen after grace period. */
019129d5
PM
858}
859EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
860
f41d911f 861/*
6cc68793 862 * Check to see if there is any immediate preemptible-RCU-related work
f41d911f
PM
863 * to be done.
864 */
865static int rcu_preempt_pending(int cpu)
866{
867 return __rcu_pending(&rcu_preempt_state,
868 &per_cpu(rcu_preempt_data, cpu));
869}
870
871/*
6cc68793 872 * Does preemptible RCU need the CPU to stay out of dynticks mode?
f41d911f
PM
873 */
874static int rcu_preempt_needs_cpu(int cpu)
875{
876 return !!per_cpu(rcu_preempt_data, cpu).nxtlist;
877}
878
e74f4c45
PM
879/**
880 * rcu_barrier - Wait until all in-flight call_rcu() callbacks complete.
881 */
882void rcu_barrier(void)
883{
884 _rcu_barrier(&rcu_preempt_state, call_rcu);
885}
886EXPORT_SYMBOL_GPL(rcu_barrier);
887
f41d911f 888/*
6cc68793 889 * Initialize preemptible RCU's per-CPU data.
f41d911f
PM
890 */
891static void __cpuinit rcu_preempt_init_percpu_data(int cpu)
892{
893 rcu_init_percpu_data(cpu, &rcu_preempt_state, 1);
894}
895
e74f4c45 896/*
6cc68793 897 * Move preemptible RCU's callbacks from dying CPU to other online CPU.
e74f4c45 898 */
29494be7 899static void rcu_preempt_send_cbs_to_online(void)
e74f4c45 900{
29494be7 901 rcu_send_cbs_to_online(&rcu_preempt_state);
e74f4c45
PM
902}
903
1eba8f84 904/*
6cc68793 905 * Initialize preemptible RCU's state structures.
1eba8f84
PM
906 */
907static void __init __rcu_init_preempt(void)
908{
394f99a9 909 rcu_init_one(&rcu_preempt_state, &rcu_preempt_data);
1eba8f84
PM
910}
911
f41d911f 912/*
6cc68793 913 * Check for a task exiting while in a preemptible-RCU read-side
f41d911f
PM
914 * critical section, clean up if so. No need to issue warnings,
915 * as debug_check_no_locks_held() already does this if lockdep
916 * is enabled.
917 */
918void exit_rcu(void)
919{
920 struct task_struct *t = current;
921
922 if (t->rcu_read_lock_nesting == 0)
923 return;
924 t->rcu_read_lock_nesting = 1;
13491a0e 925 __rcu_read_unlock();
f41d911f
PM
926}
927
928#else /* #ifdef CONFIG_TREE_PREEMPT_RCU */
929
27f4d280
PM
930static struct rcu_state *rcu_state = &rcu_sched_state;
931
f41d911f
PM
932/*
933 * Tell them what RCU they are running.
934 */
0e0fc1c2 935static void __init rcu_bootup_announce(void)
f41d911f
PM
936{
937 printk(KERN_INFO "Hierarchical RCU implementation.\n");
26845c28 938 rcu_bootup_announce_oddness();
f41d911f
PM
939}
940
941/*
942 * Return the number of RCU batches processed thus far for debug & stats.
943 */
944long rcu_batches_completed(void)
945{
946 return rcu_batches_completed_sched();
947}
948EXPORT_SYMBOL_GPL(rcu_batches_completed);
949
bf66f18e
PM
950/*
951 * Force a quiescent state for RCU, which, because there is no preemptible
952 * RCU, becomes the same as rcu-sched.
953 */
954void rcu_force_quiescent_state(void)
955{
956 rcu_sched_force_quiescent_state();
957}
958EXPORT_SYMBOL_GPL(rcu_force_quiescent_state);
959
f41d911f 960/*
6cc68793 961 * Because preemptible RCU does not exist, we never have to check for
f41d911f
PM
962 * CPUs being in quiescent states.
963 */
c3422bea 964static void rcu_preempt_note_context_switch(int cpu)
f41d911f
PM
965{
966}
967
fc2219d4 968/*
6cc68793 969 * Because preemptible RCU does not exist, there are never any preempted
fc2219d4
PM
970 * RCU readers.
971 */
27f4d280 972static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp)
fc2219d4
PM
973{
974 return 0;
975}
976
b668c9cf
PM
977#ifdef CONFIG_HOTPLUG_CPU
978
979/* Because preemptible RCU does not exist, no quieting of tasks. */
d3f6bad3 980static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp, unsigned long flags)
b668c9cf 981{
1304afb2 982 raw_spin_unlock_irqrestore(&rnp->lock, flags);
b668c9cf
PM
983}
984
985#endif /* #ifdef CONFIG_HOTPLUG_CPU */
986
1ed509a2 987/*
6cc68793 988 * Because preemptible RCU does not exist, we never have to check for
1ed509a2
PM
989 * tasks blocked within RCU read-side critical sections.
990 */
991static void rcu_print_detail_task_stall(struct rcu_state *rsp)
992{
993}
994
f41d911f 995/*
6cc68793 996 * Because preemptible RCU does not exist, we never have to check for
f41d911f
PM
997 * tasks blocked within RCU read-side critical sections.
998 */
9bc8b558 999static int rcu_print_task_stall(struct rcu_node *rnp)
f41d911f 1000{
9bc8b558 1001 return 0;
f41d911f
PM
1002}
1003
53d84e00
PM
1004/*
1005 * Because preemptible RCU does not exist, there is no need to suppress
1006 * its CPU stall warnings.
1007 */
1008static void rcu_preempt_stall_reset(void)
1009{
1010}
1011
b0e165c0 1012/*
6cc68793 1013 * Because there is no preemptible RCU, there can be no readers blocked,
49e29126
PM
1014 * so there is no need to check for blocked tasks. So check only for
1015 * bogus qsmask values.
b0e165c0
PM
1016 */
1017static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp)
1018{
49e29126 1019 WARN_ON_ONCE(rnp->qsmask);
b0e165c0
PM
1020}
1021
33f76148
PM
1022#ifdef CONFIG_HOTPLUG_CPU
1023
dd5d19ba 1024/*
6cc68793 1025 * Because preemptible RCU does not exist, it never needs to migrate
237c80c5
PM
1026 * tasks that were blocked within RCU read-side critical sections, and
1027 * such non-existent tasks cannot possibly have been blocking the current
1028 * grace period.
dd5d19ba 1029 */
237c80c5
PM
1030static int rcu_preempt_offline_tasks(struct rcu_state *rsp,
1031 struct rcu_node *rnp,
1032 struct rcu_data *rdp)
dd5d19ba 1033{
237c80c5 1034 return 0;
dd5d19ba
PM
1035}
1036
33f76148 1037/*
6cc68793 1038 * Because preemptible RCU does not exist, it never needs CPU-offline
33f76148
PM
1039 * processing.
1040 */
1041static void rcu_preempt_offline_cpu(int cpu)
1042{
1043}
1044
1045#endif /* #ifdef CONFIG_HOTPLUG_CPU */
1046
f41d911f 1047/*
6cc68793 1048 * Because preemptible RCU does not exist, it never has any callbacks
f41d911f
PM
1049 * to check.
1050 */
1eba8f84 1051static void rcu_preempt_check_callbacks(int cpu)
f41d911f
PM
1052{
1053}
1054
1055/*
6cc68793 1056 * Because preemptible RCU does not exist, it never has any callbacks
f41d911f
PM
1057 * to process.
1058 */
1eba8f84 1059static void rcu_preempt_process_callbacks(void)
f41d911f
PM
1060{
1061}
1062
019129d5
PM
1063/*
1064 * Wait for an rcu-preempt grace period, but make it happen quickly.
6cc68793 1065 * But because preemptible RCU does not exist, map to rcu-sched.
019129d5
PM
1066 */
1067void synchronize_rcu_expedited(void)
1068{
1069 synchronize_sched_expedited();
1070}
1071EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
1072
d9a3da06
PM
1073#ifdef CONFIG_HOTPLUG_CPU
1074
1075/*
6cc68793 1076 * Because preemptible RCU does not exist, there is never any need to
d9a3da06
PM
1077 * report on tasks preempted in RCU read-side critical sections during
1078 * expedited RCU grace periods.
1079 */
b40d293e
TG
1080static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp,
1081 bool wake)
d9a3da06 1082{
d9a3da06
PM
1083}
1084
1085#endif /* #ifdef CONFIG_HOTPLUG_CPU */
1086
f41d911f 1087/*
6cc68793 1088 * Because preemptible RCU does not exist, it never has any work to do.
f41d911f
PM
1089 */
1090static int rcu_preempt_pending(int cpu)
1091{
1092 return 0;
1093}
1094
1095/*
6cc68793 1096 * Because preemptible RCU does not exist, it never needs any CPU.
f41d911f
PM
1097 */
1098static int rcu_preempt_needs_cpu(int cpu)
1099{
1100 return 0;
1101}
1102
e74f4c45 1103/*
6cc68793 1104 * Because preemptible RCU does not exist, rcu_barrier() is just
e74f4c45
PM
1105 * another name for rcu_barrier_sched().
1106 */
1107void rcu_barrier(void)
1108{
1109 rcu_barrier_sched();
1110}
1111EXPORT_SYMBOL_GPL(rcu_barrier);
1112
f41d911f 1113/*
6cc68793 1114 * Because preemptible RCU does not exist, there is no per-CPU
f41d911f
PM
1115 * data to initialize.
1116 */
1117static void __cpuinit rcu_preempt_init_percpu_data(int cpu)
1118{
1119}
1120
e74f4c45 1121/*
6cc68793 1122 * Because there is no preemptible RCU, there are no callbacks to move.
e74f4c45 1123 */
29494be7 1124static void rcu_preempt_send_cbs_to_online(void)
e74f4c45
PM
1125{
1126}
1127
1eba8f84 1128/*
6cc68793 1129 * Because preemptible RCU does not exist, it need not be initialized.
1eba8f84
PM
1130 */
1131static void __init __rcu_init_preempt(void)
1132{
1133}
1134
f41d911f 1135#endif /* #else #ifdef CONFIG_TREE_PREEMPT_RCU */
8bd93a2c 1136
27f4d280
PM
1137#ifdef CONFIG_RCU_BOOST
1138
1139#include "rtmutex_common.h"
1140
0ea1f2eb
PM
1141#ifdef CONFIG_RCU_TRACE
1142
1143static void rcu_initiate_boost_trace(struct rcu_node *rnp)
1144{
1145 if (list_empty(&rnp->blkd_tasks))
1146 rnp->n_balk_blkd_tasks++;
1147 else if (rnp->exp_tasks == NULL && rnp->gp_tasks == NULL)
1148 rnp->n_balk_exp_gp_tasks++;
1149 else if (rnp->gp_tasks != NULL && rnp->boost_tasks != NULL)
1150 rnp->n_balk_boost_tasks++;
1151 else if (rnp->gp_tasks != NULL && rnp->qsmask != 0)
1152 rnp->n_balk_notblocked++;
1153 else if (rnp->gp_tasks != NULL &&
a9f4793d 1154 ULONG_CMP_LT(jiffies, rnp->boost_time))
0ea1f2eb
PM
1155 rnp->n_balk_notyet++;
1156 else
1157 rnp->n_balk_nos++;
1158}
1159
1160#else /* #ifdef CONFIG_RCU_TRACE */
1161
1162static void rcu_initiate_boost_trace(struct rcu_node *rnp)
1163{
1164}
1165
1166#endif /* #else #ifdef CONFIG_RCU_TRACE */
1167
27f4d280
PM
1168/*
1169 * Carry out RCU priority boosting on the task indicated by ->exp_tasks
1170 * or ->boost_tasks, advancing the pointer to the next task in the
1171 * ->blkd_tasks list.
1172 *
1173 * Note that irqs must be enabled: boosting the task can block.
1174 * Returns 1 if there are more tasks needing to be boosted.
1175 */
1176static int rcu_boost(struct rcu_node *rnp)
1177{
1178 unsigned long flags;
1179 struct rt_mutex mtx;
1180 struct task_struct *t;
1181 struct list_head *tb;
1182
1183 if (rnp->exp_tasks == NULL && rnp->boost_tasks == NULL)
1184 return 0; /* Nothing left to boost. */
1185
1186 raw_spin_lock_irqsave(&rnp->lock, flags);
1187
1188 /*
1189 * Recheck under the lock: all tasks in need of boosting
1190 * might exit their RCU read-side critical sections on their own.
1191 */
1192 if (rnp->exp_tasks == NULL && rnp->boost_tasks == NULL) {
1193 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1194 return 0;
1195 }
1196
1197 /*
1198 * Preferentially boost tasks blocking expedited grace periods.
1199 * This cannot starve the normal grace periods because a second
1200 * expedited grace period must boost all blocked tasks, including
1201 * those blocking the pre-existing normal grace period.
1202 */
0ea1f2eb 1203 if (rnp->exp_tasks != NULL) {
27f4d280 1204 tb = rnp->exp_tasks;
0ea1f2eb
PM
1205 rnp->n_exp_boosts++;
1206 } else {
27f4d280 1207 tb = rnp->boost_tasks;
0ea1f2eb
PM
1208 rnp->n_normal_boosts++;
1209 }
1210 rnp->n_tasks_boosted++;
27f4d280
PM
1211
1212 /*
1213 * We boost task t by manufacturing an rt_mutex that appears to
1214 * be held by task t. We leave a pointer to that rt_mutex where
1215 * task t can find it, and task t will release the mutex when it
1216 * exits its outermost RCU read-side critical section. Then
1217 * simply acquiring this artificial rt_mutex will boost task
1218 * t's priority. (Thanks to tglx for suggesting this approach!)
1219 *
1220 * Note that task t must acquire rnp->lock to remove itself from
1221 * the ->blkd_tasks list, which it will do from exit() if from
1222 * nowhere else. We therefore are guaranteed that task t will
1223 * stay around at least until we drop rnp->lock. Note that
1224 * rnp->lock also resolves races between our priority boosting
1225 * and task t's exiting its outermost RCU read-side critical
1226 * section.
1227 */
1228 t = container_of(tb, struct task_struct, rcu_node_entry);
1229 rt_mutex_init_proxy_locked(&mtx, t);
1230 t->rcu_boost_mutex = &mtx;
27f4d280
PM
1231 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1232 rt_mutex_lock(&mtx); /* Side effect: boosts task t's priority. */
1233 rt_mutex_unlock(&mtx); /* Keep lockdep happy. */
1234
4f89b336
PM
1235 return ACCESS_ONCE(rnp->exp_tasks) != NULL ||
1236 ACCESS_ONCE(rnp->boost_tasks) != NULL;
27f4d280
PM
1237}
1238
1239/*
1240 * Timer handler to initiate waking up of boost kthreads that
1241 * have yielded the CPU due to excessive numbers of tasks to
1242 * boost. We wake up the per-rcu_node kthread, which in turn
1243 * will wake up the booster kthread.
1244 */
1245static void rcu_boost_kthread_timer(unsigned long arg)
1246{
1217ed1b 1247 invoke_rcu_node_kthread((struct rcu_node *)arg);
27f4d280
PM
1248}
1249
1250/*
1251 * Priority-boosting kthread. One per leaf rcu_node and one for the
1252 * root rcu_node.
1253 */
1254static int rcu_boost_kthread(void *arg)
1255{
1256 struct rcu_node *rnp = (struct rcu_node *)arg;
1257 int spincnt = 0;
1258 int more2boost;
1259
385680a9 1260 trace_rcu_utilization("Start boost kthread@init");
27f4d280 1261 for (;;) {
d71df90e 1262 rnp->boost_kthread_status = RCU_KTHREAD_WAITING;
385680a9 1263 trace_rcu_utilization("End boost kthread@rcu_wait");
08bca60a 1264 rcu_wait(rnp->boost_tasks || rnp->exp_tasks);
385680a9 1265 trace_rcu_utilization("Start boost kthread@rcu_wait");
d71df90e 1266 rnp->boost_kthread_status = RCU_KTHREAD_RUNNING;
27f4d280
PM
1267 more2boost = rcu_boost(rnp);
1268 if (more2boost)
1269 spincnt++;
1270 else
1271 spincnt = 0;
1272 if (spincnt > 10) {
385680a9 1273 trace_rcu_utilization("End boost kthread@rcu_yield");
27f4d280 1274 rcu_yield(rcu_boost_kthread_timer, (unsigned long)rnp);
385680a9 1275 trace_rcu_utilization("Start boost kthread@rcu_yield");
27f4d280
PM
1276 spincnt = 0;
1277 }
1278 }
1217ed1b 1279 /* NOTREACHED */
385680a9 1280 trace_rcu_utilization("End boost kthread@notreached");
27f4d280
PM
1281 return 0;
1282}
1283
1284/*
1285 * Check to see if it is time to start boosting RCU readers that are
1286 * blocking the current grace period, and, if so, tell the per-rcu_node
1287 * kthread to start boosting them. If there is an expedited grace
1288 * period in progress, it is always time to boost.
1289 *
1217ed1b
PM
1290 * The caller must hold rnp->lock, which this function releases,
1291 * but irqs remain disabled. The ->boost_kthread_task is immortal,
1292 * so we don't need to worry about it going away.
27f4d280 1293 */
1217ed1b 1294static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags)
27f4d280
PM
1295{
1296 struct task_struct *t;
1297
0ea1f2eb
PM
1298 if (!rcu_preempt_blocked_readers_cgp(rnp) && rnp->exp_tasks == NULL) {
1299 rnp->n_balk_exp_gp_tasks++;
1217ed1b 1300 raw_spin_unlock_irqrestore(&rnp->lock, flags);
27f4d280 1301 return;
0ea1f2eb 1302 }
27f4d280
PM
1303 if (rnp->exp_tasks != NULL ||
1304 (rnp->gp_tasks != NULL &&
1305 rnp->boost_tasks == NULL &&
1306 rnp->qsmask == 0 &&
1307 ULONG_CMP_GE(jiffies, rnp->boost_time))) {
1308 if (rnp->exp_tasks == NULL)
1309 rnp->boost_tasks = rnp->gp_tasks;
1217ed1b 1310 raw_spin_unlock_irqrestore(&rnp->lock, flags);
27f4d280
PM
1311 t = rnp->boost_kthread_task;
1312 if (t != NULL)
1313 wake_up_process(t);
1217ed1b 1314 } else {
0ea1f2eb 1315 rcu_initiate_boost_trace(rnp);
1217ed1b
PM
1316 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1317 }
27f4d280
PM
1318}
1319
a46e0899
PM
1320/*
1321 * Wake up the per-CPU kthread to invoke RCU callbacks.
1322 */
1323static void invoke_rcu_callbacks_kthread(void)
1324{
1325 unsigned long flags;
1326
1327 local_irq_save(flags);
1328 __this_cpu_write(rcu_cpu_has_work, 1);
1eb52121
SL
1329 if (__this_cpu_read(rcu_cpu_kthread_task) != NULL &&
1330 current != __this_cpu_read(rcu_cpu_kthread_task))
1331 wake_up_process(__this_cpu_read(rcu_cpu_kthread_task));
a46e0899
PM
1332 local_irq_restore(flags);
1333}
1334
dff1672d
PM
1335/*
1336 * Is the current CPU running the RCU-callbacks kthread?
1337 * Caller must have preemption disabled.
1338 */
1339static bool rcu_is_callbacks_kthread(void)
1340{
1341 return __get_cpu_var(rcu_cpu_kthread_task) == current;
1342}
1343
0f962a5e
PM
1344/*
1345 * Set the affinity of the boost kthread. The CPU-hotplug locks are
1346 * held, so no one should be messing with the existence of the boost
1347 * kthread.
1348 */
27f4d280
PM
1349static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp,
1350 cpumask_var_t cm)
1351{
27f4d280
PM
1352 struct task_struct *t;
1353
27f4d280
PM
1354 t = rnp->boost_kthread_task;
1355 if (t != NULL)
1356 set_cpus_allowed_ptr(rnp->boost_kthread_task, cm);
27f4d280
PM
1357}
1358
1359#define RCU_BOOST_DELAY_JIFFIES DIV_ROUND_UP(CONFIG_RCU_BOOST_DELAY * HZ, 1000)
1360
1361/*
1362 * Do priority-boost accounting for the start of a new grace period.
1363 */
1364static void rcu_preempt_boost_start_gp(struct rcu_node *rnp)
1365{
1366 rnp->boost_time = jiffies + RCU_BOOST_DELAY_JIFFIES;
1367}
1368
27f4d280
PM
1369/*
1370 * Create an RCU-boost kthread for the specified node if one does not
1371 * already exist. We only create this kthread for preemptible RCU.
1372 * Returns zero if all is well, a negated errno otherwise.
1373 */
1374static int __cpuinit rcu_spawn_one_boost_kthread(struct rcu_state *rsp,
1375 struct rcu_node *rnp,
1376 int rnp_index)
1377{
1378 unsigned long flags;
1379 struct sched_param sp;
1380 struct task_struct *t;
1381
1382 if (&rcu_preempt_state != rsp)
1383 return 0;
a46e0899 1384 rsp->boost = 1;
27f4d280
PM
1385 if (rnp->boost_kthread_task != NULL)
1386 return 0;
1387 t = kthread_create(rcu_boost_kthread, (void *)rnp,
5b61b0ba 1388 "rcub/%d", rnp_index);
27f4d280
PM
1389 if (IS_ERR(t))
1390 return PTR_ERR(t);
1391 raw_spin_lock_irqsave(&rnp->lock, flags);
1392 rnp->boost_kthread_task = t;
1393 raw_spin_unlock_irqrestore(&rnp->lock, flags);
5b61b0ba 1394 sp.sched_priority = RCU_BOOST_PRIO;
27f4d280 1395 sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
9a432736 1396 wake_up_process(t); /* get to TASK_INTERRUPTIBLE quickly. */
27f4d280
PM
1397 return 0;
1398}
1399
f8b7fc6b
PM
1400#ifdef CONFIG_HOTPLUG_CPU
1401
1402/*
1403 * Stop the RCU's per-CPU kthread when its CPU goes offline,.
1404 */
1405static void rcu_stop_cpu_kthread(int cpu)
1406{
1407 struct task_struct *t;
1408
1409 /* Stop the CPU's kthread. */
1410 t = per_cpu(rcu_cpu_kthread_task, cpu);
1411 if (t != NULL) {
1412 per_cpu(rcu_cpu_kthread_task, cpu) = NULL;
1413 kthread_stop(t);
1414 }
1415}
1416
1417#endif /* #ifdef CONFIG_HOTPLUG_CPU */
1418
1419static void rcu_kthread_do_work(void)
1420{
1421 rcu_do_batch(&rcu_sched_state, &__get_cpu_var(rcu_sched_data));
1422 rcu_do_batch(&rcu_bh_state, &__get_cpu_var(rcu_bh_data));
1423 rcu_preempt_do_callbacks();
1424}
1425
1426/*
1427 * Wake up the specified per-rcu_node-structure kthread.
1428 * Because the per-rcu_node kthreads are immortal, we don't need
1429 * to do anything to keep them alive.
1430 */
1431static void invoke_rcu_node_kthread(struct rcu_node *rnp)
1432{
1433 struct task_struct *t;
1434
1435 t = rnp->node_kthread_task;
1436 if (t != NULL)
1437 wake_up_process(t);
1438}
1439
1440/*
1441 * Set the specified CPU's kthread to run RT or not, as specified by
1442 * the to_rt argument. The CPU-hotplug locks are held, so the task
1443 * is not going away.
1444 */
1445static void rcu_cpu_kthread_setrt(int cpu, int to_rt)
1446{
1447 int policy;
1448 struct sched_param sp;
1449 struct task_struct *t;
1450
1451 t = per_cpu(rcu_cpu_kthread_task, cpu);
1452 if (t == NULL)
1453 return;
1454 if (to_rt) {
1455 policy = SCHED_FIFO;
1456 sp.sched_priority = RCU_KTHREAD_PRIO;
1457 } else {
1458 policy = SCHED_NORMAL;
1459 sp.sched_priority = 0;
1460 }
1461 sched_setscheduler_nocheck(t, policy, &sp);
1462}
1463
1464/*
1465 * Timer handler to initiate the waking up of per-CPU kthreads that
1466 * have yielded the CPU due to excess numbers of RCU callbacks.
1467 * We wake up the per-rcu_node kthread, which in turn will wake up
1468 * the booster kthread.
1469 */
1470static void rcu_cpu_kthread_timer(unsigned long arg)
1471{
1472 struct rcu_data *rdp = per_cpu_ptr(rcu_state->rda, arg);
1473 struct rcu_node *rnp = rdp->mynode;
1474
1475 atomic_or(rdp->grpmask, &rnp->wakemask);
1476 invoke_rcu_node_kthread(rnp);
1477}
1478
1479/*
1480 * Drop to non-real-time priority and yield, but only after posting a
1481 * timer that will cause us to regain our real-time priority if we
1482 * remain preempted. Either way, we restore our real-time priority
1483 * before returning.
1484 */
1485static void rcu_yield(void (*f)(unsigned long), unsigned long arg)
1486{
1487 struct sched_param sp;
1488 struct timer_list yield_timer;
5b61b0ba 1489 int prio = current->rt_priority;
f8b7fc6b
PM
1490
1491 setup_timer_on_stack(&yield_timer, f, arg);
1492 mod_timer(&yield_timer, jiffies + 2);
1493 sp.sched_priority = 0;
1494 sched_setscheduler_nocheck(current, SCHED_NORMAL, &sp);
1495 set_user_nice(current, 19);
1496 schedule();
5b61b0ba
MG
1497 set_user_nice(current, 0);
1498 sp.sched_priority = prio;
f8b7fc6b
PM
1499 sched_setscheduler_nocheck(current, SCHED_FIFO, &sp);
1500 del_timer(&yield_timer);
1501}
1502
1503/*
1504 * Handle cases where the rcu_cpu_kthread() ends up on the wrong CPU.
1505 * This can happen while the corresponding CPU is either coming online
1506 * or going offline. We cannot wait until the CPU is fully online
1507 * before starting the kthread, because the various notifier functions
1508 * can wait for RCU grace periods. So we park rcu_cpu_kthread() until
1509 * the corresponding CPU is online.
1510 *
1511 * Return 1 if the kthread needs to stop, 0 otherwise.
1512 *
1513 * Caller must disable bh. This function can momentarily enable it.
1514 */
1515static int rcu_cpu_kthread_should_stop(int cpu)
1516{
1517 while (cpu_is_offline(cpu) ||
1518 !cpumask_equal(&current->cpus_allowed, cpumask_of(cpu)) ||
1519 smp_processor_id() != cpu) {
1520 if (kthread_should_stop())
1521 return 1;
1522 per_cpu(rcu_cpu_kthread_status, cpu) = RCU_KTHREAD_OFFCPU;
1523 per_cpu(rcu_cpu_kthread_cpu, cpu) = raw_smp_processor_id();
1524 local_bh_enable();
1525 schedule_timeout_uninterruptible(1);
1526 if (!cpumask_equal(&current->cpus_allowed, cpumask_of(cpu)))
1527 set_cpus_allowed_ptr(current, cpumask_of(cpu));
1528 local_bh_disable();
1529 }
1530 per_cpu(rcu_cpu_kthread_cpu, cpu) = cpu;
1531 return 0;
1532}
1533
1534/*
1535 * Per-CPU kernel thread that invokes RCU callbacks. This replaces the
e0f23060
PM
1536 * RCU softirq used in flavors and configurations of RCU that do not
1537 * support RCU priority boosting.
f8b7fc6b
PM
1538 */
1539static int rcu_cpu_kthread(void *arg)
1540{
1541 int cpu = (int)(long)arg;
1542 unsigned long flags;
1543 int spincnt = 0;
1544 unsigned int *statusp = &per_cpu(rcu_cpu_kthread_status, cpu);
1545 char work;
1546 char *workp = &per_cpu(rcu_cpu_has_work, cpu);
1547
385680a9 1548 trace_rcu_utilization("Start CPU kthread@init");
f8b7fc6b
PM
1549 for (;;) {
1550 *statusp = RCU_KTHREAD_WAITING;
385680a9 1551 trace_rcu_utilization("End CPU kthread@rcu_wait");
f8b7fc6b 1552 rcu_wait(*workp != 0 || kthread_should_stop());
385680a9 1553 trace_rcu_utilization("Start CPU kthread@rcu_wait");
f8b7fc6b
PM
1554 local_bh_disable();
1555 if (rcu_cpu_kthread_should_stop(cpu)) {
1556 local_bh_enable();
1557 break;
1558 }
1559 *statusp = RCU_KTHREAD_RUNNING;
1560 per_cpu(rcu_cpu_kthread_loops, cpu)++;
1561 local_irq_save(flags);
1562 work = *workp;
1563 *workp = 0;
1564 local_irq_restore(flags);
1565 if (work)
1566 rcu_kthread_do_work();
1567 local_bh_enable();
1568 if (*workp != 0)
1569 spincnt++;
1570 else
1571 spincnt = 0;
1572 if (spincnt > 10) {
1573 *statusp = RCU_KTHREAD_YIELDING;
385680a9 1574 trace_rcu_utilization("End CPU kthread@rcu_yield");
f8b7fc6b 1575 rcu_yield(rcu_cpu_kthread_timer, (unsigned long)cpu);
385680a9 1576 trace_rcu_utilization("Start CPU kthread@rcu_yield");
f8b7fc6b
PM
1577 spincnt = 0;
1578 }
1579 }
1580 *statusp = RCU_KTHREAD_STOPPED;
385680a9 1581 trace_rcu_utilization("End CPU kthread@term");
f8b7fc6b
PM
1582 return 0;
1583}
1584
1585/*
1586 * Spawn a per-CPU kthread, setting up affinity and priority.
1587 * Because the CPU hotplug lock is held, no other CPU will be attempting
1588 * to manipulate rcu_cpu_kthread_task. There might be another CPU
1589 * attempting to access it during boot, but the locking in kthread_bind()
1590 * will enforce sufficient ordering.
1591 *
1592 * Please note that we cannot simply refuse to wake up the per-CPU
1593 * kthread because kthreads are created in TASK_UNINTERRUPTIBLE state,
1594 * which can result in softlockup complaints if the task ends up being
1595 * idle for more than a couple of minutes.
1596 *
1597 * However, please note also that we cannot bind the per-CPU kthread to its
1598 * CPU until that CPU is fully online. We also cannot wait until the
1599 * CPU is fully online before we create its per-CPU kthread, as this would
1600 * deadlock the system when CPU notifiers tried waiting for grace
1601 * periods. So we bind the per-CPU kthread to its CPU only if the CPU
1602 * is online. If its CPU is not yet fully online, then the code in
1603 * rcu_cpu_kthread() will wait until it is fully online, and then do
1604 * the binding.
1605 */
1606static int __cpuinit rcu_spawn_one_cpu_kthread(int cpu)
1607{
1608 struct sched_param sp;
1609 struct task_struct *t;
1610
b0d30417 1611 if (!rcu_scheduler_fully_active ||
f8b7fc6b
PM
1612 per_cpu(rcu_cpu_kthread_task, cpu) != NULL)
1613 return 0;
1f288094
ED
1614 t = kthread_create_on_node(rcu_cpu_kthread,
1615 (void *)(long)cpu,
1616 cpu_to_node(cpu),
5b61b0ba 1617 "rcuc/%d", cpu);
f8b7fc6b
PM
1618 if (IS_ERR(t))
1619 return PTR_ERR(t);
1620 if (cpu_online(cpu))
1621 kthread_bind(t, cpu);
1622 per_cpu(rcu_cpu_kthread_cpu, cpu) = cpu;
1623 WARN_ON_ONCE(per_cpu(rcu_cpu_kthread_task, cpu) != NULL);
1624 sp.sched_priority = RCU_KTHREAD_PRIO;
1625 sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
1626 per_cpu(rcu_cpu_kthread_task, cpu) = t;
1627 wake_up_process(t); /* Get to TASK_INTERRUPTIBLE quickly. */
1628 return 0;
1629}
1630
1631/*
1632 * Per-rcu_node kthread, which is in charge of waking up the per-CPU
1633 * kthreads when needed. We ignore requests to wake up kthreads
1634 * for offline CPUs, which is OK because force_quiescent_state()
1635 * takes care of this case.
1636 */
1637static int rcu_node_kthread(void *arg)
1638{
1639 int cpu;
1640 unsigned long flags;
1641 unsigned long mask;
1642 struct rcu_node *rnp = (struct rcu_node *)arg;
1643 struct sched_param sp;
1644 struct task_struct *t;
1645
1646 for (;;) {
1647 rnp->node_kthread_status = RCU_KTHREAD_WAITING;
1648 rcu_wait(atomic_read(&rnp->wakemask) != 0);
1649 rnp->node_kthread_status = RCU_KTHREAD_RUNNING;
1650 raw_spin_lock_irqsave(&rnp->lock, flags);
1651 mask = atomic_xchg(&rnp->wakemask, 0);
1652 rcu_initiate_boost(rnp, flags); /* releases rnp->lock. */
1653 for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++, mask >>= 1) {
1654 if ((mask & 0x1) == 0)
1655 continue;
1656 preempt_disable();
1657 t = per_cpu(rcu_cpu_kthread_task, cpu);
1658 if (!cpu_online(cpu) || t == NULL) {
1659 preempt_enable();
1660 continue;
1661 }
1662 per_cpu(rcu_cpu_has_work, cpu) = 1;
1663 sp.sched_priority = RCU_KTHREAD_PRIO;
1664 sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
1665 preempt_enable();
1666 }
1667 }
1668 /* NOTREACHED */
1669 rnp->node_kthread_status = RCU_KTHREAD_STOPPED;
1670 return 0;
1671}
1672
1673/*
1674 * Set the per-rcu_node kthread's affinity to cover all CPUs that are
1675 * served by the rcu_node in question. The CPU hotplug lock is still
1676 * held, so the value of rnp->qsmaskinit will be stable.
1677 *
1678 * We don't include outgoingcpu in the affinity set, use -1 if there is
1679 * no outgoing CPU. If there are no CPUs left in the affinity set,
1680 * this function allows the kthread to execute on any CPU.
1681 */
1682static void rcu_node_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
1683{
1684 cpumask_var_t cm;
1685 int cpu;
1686 unsigned long mask = rnp->qsmaskinit;
1687
1688 if (rnp->node_kthread_task == NULL)
1689 return;
1690 if (!alloc_cpumask_var(&cm, GFP_KERNEL))
1691 return;
1692 cpumask_clear(cm);
1693 for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++, mask >>= 1)
1694 if ((mask & 0x1) && cpu != outgoingcpu)
1695 cpumask_set_cpu(cpu, cm);
1696 if (cpumask_weight(cm) == 0) {
1697 cpumask_setall(cm);
1698 for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++)
1699 cpumask_clear_cpu(cpu, cm);
1700 WARN_ON_ONCE(cpumask_weight(cm) == 0);
1701 }
1702 set_cpus_allowed_ptr(rnp->node_kthread_task, cm);
1703 rcu_boost_kthread_setaffinity(rnp, cm);
1704 free_cpumask_var(cm);
1705}
1706
1707/*
1708 * Spawn a per-rcu_node kthread, setting priority and affinity.
1709 * Called during boot before online/offline can happen, or, if
1710 * during runtime, with the main CPU-hotplug locks held. So only
1711 * one of these can be executing at a time.
1712 */
1713static int __cpuinit rcu_spawn_one_node_kthread(struct rcu_state *rsp,
1714 struct rcu_node *rnp)
1715{
1716 unsigned long flags;
1717 int rnp_index = rnp - &rsp->node[0];
1718 struct sched_param sp;
1719 struct task_struct *t;
1720
b0d30417 1721 if (!rcu_scheduler_fully_active ||
f8b7fc6b
PM
1722 rnp->qsmaskinit == 0)
1723 return 0;
1724 if (rnp->node_kthread_task == NULL) {
1725 t = kthread_create(rcu_node_kthread, (void *)rnp,
5b61b0ba 1726 "rcun/%d", rnp_index);
f8b7fc6b
PM
1727 if (IS_ERR(t))
1728 return PTR_ERR(t);
1729 raw_spin_lock_irqsave(&rnp->lock, flags);
1730 rnp->node_kthread_task = t;
1731 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1732 sp.sched_priority = 99;
1733 sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
1734 wake_up_process(t); /* get to TASK_INTERRUPTIBLE quickly. */
1735 }
1736 return rcu_spawn_one_boost_kthread(rsp, rnp, rnp_index);
1737}
1738
1739/*
1740 * Spawn all kthreads -- called as soon as the scheduler is running.
1741 */
1742static int __init rcu_spawn_kthreads(void)
1743{
1744 int cpu;
1745 struct rcu_node *rnp;
1746
b0d30417 1747 rcu_scheduler_fully_active = 1;
f8b7fc6b
PM
1748 for_each_possible_cpu(cpu) {
1749 per_cpu(rcu_cpu_has_work, cpu) = 0;
1750 if (cpu_online(cpu))
1751 (void)rcu_spawn_one_cpu_kthread(cpu);
1752 }
1753 rnp = rcu_get_root(rcu_state);
1754 (void)rcu_spawn_one_node_kthread(rcu_state, rnp);
1755 if (NUM_RCU_NODES > 1) {
1756 rcu_for_each_leaf_node(rcu_state, rnp)
1757 (void)rcu_spawn_one_node_kthread(rcu_state, rnp);
1758 }
1759 return 0;
1760}
1761early_initcall(rcu_spawn_kthreads);
1762
1763static void __cpuinit rcu_prepare_kthreads(int cpu)
1764{
1765 struct rcu_data *rdp = per_cpu_ptr(rcu_state->rda, cpu);
1766 struct rcu_node *rnp = rdp->mynode;
1767
1768 /* Fire up the incoming CPU's kthread and leaf rcu_node kthread. */
b0d30417 1769 if (rcu_scheduler_fully_active) {
f8b7fc6b
PM
1770 (void)rcu_spawn_one_cpu_kthread(cpu);
1771 if (rnp->node_kthread_task == NULL)
1772 (void)rcu_spawn_one_node_kthread(rcu_state, rnp);
1773 }
1774}
1775
27f4d280
PM
1776#else /* #ifdef CONFIG_RCU_BOOST */
1777
1217ed1b 1778static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags)
27f4d280 1779{
1217ed1b 1780 raw_spin_unlock_irqrestore(&rnp->lock, flags);
27f4d280
PM
1781}
1782
a46e0899 1783static void invoke_rcu_callbacks_kthread(void)
27f4d280 1784{
a46e0899 1785 WARN_ON_ONCE(1);
27f4d280
PM
1786}
1787
dff1672d
PM
1788static bool rcu_is_callbacks_kthread(void)
1789{
1790 return false;
1791}
1792
27f4d280
PM
1793static void rcu_preempt_boost_start_gp(struct rcu_node *rnp)
1794{
1795}
1796
f8b7fc6b
PM
1797#ifdef CONFIG_HOTPLUG_CPU
1798
1799static void rcu_stop_cpu_kthread(int cpu)
1800{
1801}
1802
1803#endif /* #ifdef CONFIG_HOTPLUG_CPU */
1804
1805static void rcu_node_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
1806{
1807}
1808
1809static void rcu_cpu_kthread_setrt(int cpu, int to_rt)
1810{
1811}
1812
b0d30417
PM
1813static int __init rcu_scheduler_really_started(void)
1814{
1815 rcu_scheduler_fully_active = 1;
1816 return 0;
1817}
1818early_initcall(rcu_scheduler_really_started);
1819
f8b7fc6b
PM
1820static void __cpuinit rcu_prepare_kthreads(int cpu)
1821{
1822}
1823
27f4d280
PM
1824#endif /* #else #ifdef CONFIG_RCU_BOOST */
1825
7b27d547
LJ
1826#ifndef CONFIG_SMP
1827
1828void synchronize_sched_expedited(void)
1829{
1830 cond_resched();
1831}
1832EXPORT_SYMBOL_GPL(synchronize_sched_expedited);
1833
1834#else /* #ifndef CONFIG_SMP */
1835
e27fc964
TH
1836static atomic_t sync_sched_expedited_started = ATOMIC_INIT(0);
1837static atomic_t sync_sched_expedited_done = ATOMIC_INIT(0);
7b27d547
LJ
1838
1839static int synchronize_sched_expedited_cpu_stop(void *data)
1840{
1841 /*
1842 * There must be a full memory barrier on each affected CPU
1843 * between the time that try_stop_cpus() is called and the
1844 * time that it returns.
1845 *
1846 * In the current initial implementation of cpu_stop, the
1847 * above condition is already met when the control reaches
1848 * this point and the following smp_mb() is not strictly
1849 * necessary. Do smp_mb() anyway for documentation and
1850 * robustness against future implementation changes.
1851 */
1852 smp_mb(); /* See above comment block. */
1853 return 0;
1854}
1855
1856/*
1857 * Wait for an rcu-sched grace period to elapse, but use "big hammer"
1858 * approach to force grace period to end quickly. This consumes
1859 * significant time on all CPUs, and is thus not recommended for
1860 * any sort of common-case code.
1861 *
1862 * Note that it is illegal to call this function while holding any
1863 * lock that is acquired by a CPU-hotplug notifier. Failing to
1864 * observe this restriction will result in deadlock.
db3a8920 1865 *
e27fc964
TH
1866 * This implementation can be thought of as an application of ticket
1867 * locking to RCU, with sync_sched_expedited_started and
1868 * sync_sched_expedited_done taking on the roles of the halves
1869 * of the ticket-lock word. Each task atomically increments
1870 * sync_sched_expedited_started upon entry, snapshotting the old value,
1871 * then attempts to stop all the CPUs. If this succeeds, then each
1872 * CPU will have executed a context switch, resulting in an RCU-sched
1873 * grace period. We are then done, so we use atomic_cmpxchg() to
1874 * update sync_sched_expedited_done to match our snapshot -- but
1875 * only if someone else has not already advanced past our snapshot.
1876 *
1877 * On the other hand, if try_stop_cpus() fails, we check the value
1878 * of sync_sched_expedited_done. If it has advanced past our
1879 * initial snapshot, then someone else must have forced a grace period
1880 * some time after we took our snapshot. In this case, our work is
1881 * done for us, and we can simply return. Otherwise, we try again,
1882 * but keep our initial snapshot for purposes of checking for someone
1883 * doing our work for us.
1884 *
1885 * If we fail too many times in a row, we fall back to synchronize_sched().
7b27d547
LJ
1886 */
1887void synchronize_sched_expedited(void)
1888{
e27fc964 1889 int firstsnap, s, snap, trycount = 0;
7b27d547 1890
e27fc964
TH
1891 /* Note that atomic_inc_return() implies full memory barrier. */
1892 firstsnap = snap = atomic_inc_return(&sync_sched_expedited_started);
7b27d547 1893 get_online_cpus();
e27fc964
TH
1894
1895 /*
1896 * Each pass through the following loop attempts to force a
1897 * context switch on each CPU.
1898 */
7b27d547
LJ
1899 while (try_stop_cpus(cpu_online_mask,
1900 synchronize_sched_expedited_cpu_stop,
1901 NULL) == -EAGAIN) {
1902 put_online_cpus();
e27fc964
TH
1903
1904 /* No joy, try again later. Or just synchronize_sched(). */
7b27d547
LJ
1905 if (trycount++ < 10)
1906 udelay(trycount * num_online_cpus());
1907 else {
1908 synchronize_sched();
1909 return;
1910 }
e27fc964
TH
1911
1912 /* Check to see if someone else did our work for us. */
1913 s = atomic_read(&sync_sched_expedited_done);
1914 if (UINT_CMP_GE((unsigned)s, (unsigned)firstsnap)) {
7b27d547
LJ
1915 smp_mb(); /* ensure test happens before caller kfree */
1916 return;
1917 }
e27fc964
TH
1918
1919 /*
1920 * Refetching sync_sched_expedited_started allows later
1921 * callers to piggyback on our grace period. We subtract
1922 * 1 to get the same token that the last incrementer got.
1923 * We retry after they started, so our grace period works
1924 * for them, and they started after our first try, so their
1925 * grace period works for us.
1926 */
7b27d547 1927 get_online_cpus();
7077714e 1928 snap = atomic_read(&sync_sched_expedited_started);
e27fc964 1929 smp_mb(); /* ensure read is before try_stop_cpus(). */
7b27d547 1930 }
e27fc964
TH
1931
1932 /*
1933 * Everyone up to our most recent fetch is covered by our grace
1934 * period. Update the counter, but only if our work is still
1935 * relevant -- which it won't be if someone who started later
1936 * than we did beat us to the punch.
1937 */
1938 do {
1939 s = atomic_read(&sync_sched_expedited_done);
1940 if (UINT_CMP_GE((unsigned)s, (unsigned)snap)) {
1941 smp_mb(); /* ensure test happens before caller kfree */
1942 break;
1943 }
1944 } while (atomic_cmpxchg(&sync_sched_expedited_done, s, snap) != s);
1945
7b27d547
LJ
1946 put_online_cpus();
1947}
1948EXPORT_SYMBOL_GPL(synchronize_sched_expedited);
1949
1950#endif /* #else #ifndef CONFIG_SMP */
1951
8bd93a2c
PM
1952#if !defined(CONFIG_RCU_FAST_NO_HZ)
1953
1954/*
1955 * Check to see if any future RCU-related work will need to be done
1956 * by the current CPU, even if none need be done immediately, returning
1957 * 1 if so. This function is part of the RCU implementation; it is -not-
1958 * an exported member of the RCU API.
1959 *
7cb92499
PM
1960 * Because we not have RCU_FAST_NO_HZ, just check whether this CPU needs
1961 * any flavor of RCU.
8bd93a2c
PM
1962 */
1963int rcu_needs_cpu(int cpu)
1964{
aea1b35e
PM
1965 return rcu_cpu_has_callbacks(cpu);
1966}
1967
7cb92499
PM
1968/*
1969 * Because we do not have RCU_FAST_NO_HZ, don't bother initializing for it.
1970 */
1971static void rcu_prepare_for_idle_init(int cpu)
1972{
1973}
1974
1975/*
1976 * Because we do not have RCU_FAST_NO_HZ, don't bother cleaning up
1977 * after it.
1978 */
1979static void rcu_cleanup_after_idle(int cpu)
1980{
1981}
1982
aea1b35e
PM
1983/*
1984 * Do the idle-entry grace-period work, which, because CONFIG_RCU_FAST_NO_HZ=y,
1985 * is nothing.
1986 */
1987static void rcu_prepare_for_idle(int cpu)
1988{
1989}
1990
8bd93a2c
PM
1991#else /* #if !defined(CONFIG_RCU_FAST_NO_HZ) */
1992
f23f7fa1
PM
1993/*
1994 * This code is invoked when a CPU goes idle, at which point we want
1995 * to have the CPU do everything required for RCU so that it can enter
1996 * the energy-efficient dyntick-idle mode. This is handled by a
1997 * state machine implemented by rcu_prepare_for_idle() below.
1998 *
1999 * The following three proprocessor symbols control this state machine:
2000 *
2001 * RCU_IDLE_FLUSHES gives the maximum number of times that we will attempt
2002 * to satisfy RCU. Beyond this point, it is better to incur a periodic
2003 * scheduling-clock interrupt than to loop through the state machine
2004 * at full power.
2005 * RCU_IDLE_OPT_FLUSHES gives the number of RCU_IDLE_FLUSHES that are
2006 * optional if RCU does not need anything immediately from this
2007 * CPU, even if this CPU still has RCU callbacks queued. The first
2008 * times through the state machine are mandatory: we need to give
2009 * the state machine a chance to communicate a quiescent state
2010 * to the RCU core.
2011 * RCU_IDLE_GP_DELAY gives the number of jiffies that a CPU is permitted
2012 * to sleep in dyntick-idle mode with RCU callbacks pending. This
2013 * is sized to be roughly one RCU grace period. Those energy-efficiency
2014 * benchmarkers who might otherwise be tempted to set this to a large
2015 * number, be warned: Setting RCU_IDLE_GP_DELAY too high can hang your
2016 * system. And if you are -that- concerned about energy efficiency,
2017 * just power the system down and be done with it!
2018 *
2019 * The values below work well in practice. If future workloads require
2020 * adjustment, they can be converted into kernel config parameters, though
2021 * making the state machine smarter might be a better option.
2022 */
2023#define RCU_IDLE_FLUSHES 5 /* Number of dyntick-idle tries. */
2024#define RCU_IDLE_OPT_FLUSHES 3 /* Optional dyntick-idle tries. */
7cb92499 2025#define RCU_IDLE_GP_DELAY 6 /* Roughly one grace period. */
f23f7fa1 2026
a47cd880 2027static DEFINE_PER_CPU(int, rcu_dyntick_drain);
71da8132 2028static DEFINE_PER_CPU(unsigned long, rcu_dyntick_holdoff);
7cb92499
PM
2029static DEFINE_PER_CPU(struct hrtimer, rcu_idle_gp_timer);
2030static ktime_t rcu_idle_gp_wait;
8bd93a2c
PM
2031
2032/*
aea1b35e
PM
2033 * Allow the CPU to enter dyntick-idle mode if either: (1) There are no
2034 * callbacks on this CPU, (2) this CPU has not yet attempted to enter
2035 * dyntick-idle mode, or (3) this CPU is in the process of attempting to
2036 * enter dyntick-idle mode. Otherwise, if we have recently tried and failed
2037 * to enter dyntick-idle mode, we refuse to try to enter it. After all,
2038 * it is better to incur scheduling-clock interrupts than to spin
2039 * continuously for the same time duration!
2040 */
2041int rcu_needs_cpu(int cpu)
2042{
2043 /* If no callbacks, RCU doesn't need the CPU. */
2044 if (!rcu_cpu_has_callbacks(cpu))
2045 return 0;
2046 /* Otherwise, RCU needs the CPU only if it recently tried and failed. */
2047 return per_cpu(rcu_dyntick_holdoff, cpu) == jiffies;
2048}
2049
7cb92499
PM
2050/*
2051 * Timer handler used to force CPU to start pushing its remaining RCU
2052 * callbacks in the case where it entered dyntick-idle mode with callbacks
2053 * pending. The hander doesn't really need to do anything because the
2054 * real work is done upon re-entry to idle, or by the next scheduling-clock
2055 * interrupt should idle not be re-entered.
2056 */
2057static enum hrtimer_restart rcu_idle_gp_timer_func(struct hrtimer *hrtp)
2058{
2059 trace_rcu_prep_idle("Timer");
2060 return HRTIMER_NORESTART;
2061}
2062
2063/*
2064 * Initialize the timer used to pull CPUs out of dyntick-idle mode.
2065 */
2066static void rcu_prepare_for_idle_init(int cpu)
2067{
2068 static int firsttime = 1;
2069 struct hrtimer *hrtp = &per_cpu(rcu_idle_gp_timer, cpu);
2070
2071 hrtimer_init(hrtp, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
2072 hrtp->function = rcu_idle_gp_timer_func;
2073 if (firsttime) {
2074 unsigned int upj = jiffies_to_usecs(RCU_IDLE_GP_DELAY);
2075
2076 rcu_idle_gp_wait = ns_to_ktime(upj * (u64)1000);
2077 firsttime = 0;
2078 }
2079}
2080
2081/*
2082 * Clean up for exit from idle. Because we are exiting from idle, there
2083 * is no longer any point to rcu_idle_gp_timer, so cancel it. This will
2084 * do nothing if this timer is not active, so just cancel it unconditionally.
2085 */
2086static void rcu_cleanup_after_idle(int cpu)
2087{
2088 hrtimer_cancel(&per_cpu(rcu_idle_gp_timer, cpu));
2089}
2090
aea1b35e
PM
2091/*
2092 * Check to see if any RCU-related work can be done by the current CPU,
2093 * and if so, schedule a softirq to get it done. This function is part
2094 * of the RCU implementation; it is -not- an exported member of the RCU API.
8bd93a2c 2095 *
aea1b35e
PM
2096 * The idea is for the current CPU to clear out all work required by the
2097 * RCU core for the current grace period, so that this CPU can be permitted
2098 * to enter dyntick-idle mode. In some cases, it will need to be awakened
2099 * at the end of the grace period by whatever CPU ends the grace period.
2100 * This allows CPUs to go dyntick-idle more quickly, and to reduce the
2101 * number of wakeups by a modest integer factor.
a47cd880
PM
2102 *
2103 * Because it is not legal to invoke rcu_process_callbacks() with irqs
2104 * disabled, we do one pass of force_quiescent_state(), then do a
a46e0899 2105 * invoke_rcu_core() to cause rcu_process_callbacks() to be invoked
27f4d280 2106 * later. The per-cpu rcu_dyntick_drain variable controls the sequencing.
aea1b35e
PM
2107 *
2108 * The caller must have disabled interrupts.
8bd93a2c 2109 */
aea1b35e 2110static void rcu_prepare_for_idle(int cpu)
8bd93a2c 2111{
84ad00cb
PM
2112 unsigned long flags;
2113
2114 local_irq_save(flags);
8bd93a2c 2115
3084f2f8 2116 /*
f535a607
PM
2117 * If there are no callbacks on this CPU, enter dyntick-idle mode.
2118 * Also reset state to avoid prejudicing later attempts.
3084f2f8 2119 */
aea1b35e
PM
2120 if (!rcu_cpu_has_callbacks(cpu)) {
2121 per_cpu(rcu_dyntick_holdoff, cpu) = jiffies - 1;
3084f2f8 2122 per_cpu(rcu_dyntick_drain, cpu) = 0;
84ad00cb 2123 local_irq_restore(flags);
433cdddc 2124 trace_rcu_prep_idle("No callbacks");
aea1b35e 2125 return;
77e38ed3 2126 }
3084f2f8
PM
2127
2128 /*
2129 * If in holdoff mode, just return. We will presumably have
2130 * refrained from disabling the scheduling-clock tick.
2131 */
433cdddc 2132 if (per_cpu(rcu_dyntick_holdoff, cpu) == jiffies) {
84ad00cb 2133 local_irq_restore(flags);
433cdddc 2134 trace_rcu_prep_idle("In holdoff");
aea1b35e 2135 return;
433cdddc 2136 }
a47cd880
PM
2137
2138 /* Check and update the rcu_dyntick_drain sequencing. */
2139 if (per_cpu(rcu_dyntick_drain, cpu) <= 0) {
2140 /* First time through, initialize the counter. */
f23f7fa1
PM
2141 per_cpu(rcu_dyntick_drain, cpu) = RCU_IDLE_FLUSHES;
2142 } else if (per_cpu(rcu_dyntick_drain, cpu) <= RCU_IDLE_OPT_FLUSHES &&
2143 !rcu_pending(cpu)) {
7cb92499 2144 /* Can we go dyntick-idle despite still having callbacks? */
f23f7fa1
PM
2145 trace_rcu_prep_idle("Dyntick with callbacks");
2146 per_cpu(rcu_dyntick_drain, cpu) = 0;
2147 per_cpu(rcu_dyntick_holdoff, cpu) = jiffies - 1;
2148 hrtimer_start(&per_cpu(rcu_idle_gp_timer, cpu),
2149 rcu_idle_gp_wait, HRTIMER_MODE_REL);
2150 return; /* Nothing more to do immediately. */
2151 } else if (--per_cpu(rcu_dyntick_drain, cpu) <= 0) {
a47cd880 2152 /* We have hit the limit, so time to give up. */
71da8132 2153 per_cpu(rcu_dyntick_holdoff, cpu) = jiffies;
84ad00cb 2154 local_irq_restore(flags);
433cdddc 2155 trace_rcu_prep_idle("Begin holdoff");
aea1b35e
PM
2156 invoke_rcu_core(); /* Force the CPU out of dyntick-idle. */
2157 return;
a47cd880
PM
2158 }
2159
aea1b35e
PM
2160 /*
2161 * Do one step of pushing the remaining RCU callbacks through
2162 * the RCU core state machine.
2163 */
2164#ifdef CONFIG_TREE_PREEMPT_RCU
2165 if (per_cpu(rcu_preempt_data, cpu).nxtlist) {
3ad0decf 2166 local_irq_restore(flags);
aea1b35e
PM
2167 rcu_preempt_qs(cpu);
2168 force_quiescent_state(&rcu_preempt_state, 0);
3ad0decf 2169 local_irq_save(flags);
aea1b35e
PM
2170 }
2171#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
a47cd880 2172 if (per_cpu(rcu_sched_data, cpu).nxtlist) {
3ad0decf 2173 local_irq_restore(flags);
a47cd880
PM
2174 rcu_sched_qs(cpu);
2175 force_quiescent_state(&rcu_sched_state, 0);
3ad0decf 2176 local_irq_save(flags);
a47cd880
PM
2177 }
2178 if (per_cpu(rcu_bh_data, cpu).nxtlist) {
3ad0decf 2179 local_irq_restore(flags);
a47cd880
PM
2180 rcu_bh_qs(cpu);
2181 force_quiescent_state(&rcu_bh_state, 0);
3ad0decf 2182 local_irq_save(flags);
8bd93a2c
PM
2183 }
2184
433cdddc
PM
2185 /*
2186 * If RCU callbacks are still pending, RCU still needs this CPU.
2187 * So try forcing the callbacks through the grace period.
2188 */
3ad0decf 2189 if (rcu_cpu_has_callbacks(cpu)) {
84ad00cb 2190 local_irq_restore(flags);
433cdddc 2191 trace_rcu_prep_idle("More callbacks");
a46e0899 2192 invoke_rcu_core();
84ad00cb
PM
2193 } else {
2194 local_irq_restore(flags);
433cdddc 2195 trace_rcu_prep_idle("Callbacks drained");
84ad00cb 2196 }
8bd93a2c
PM
2197}
2198
2199#endif /* #else #if !defined(CONFIG_RCU_FAST_NO_HZ) */