sched/isolation: Move housekeeping related code to its own file
[linux-2.6-block.git] / kernel / rcu / update.c
CommitLineData
1da177e4
LT
1/*
2 * Read-Copy Update mechanism for mutual exclusion
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
87de1cfd
PM
15 * along with this program; if not, you can access it online at
16 * http://www.gnu.org/licenses/gpl-2.0.html.
1da177e4 17 *
01c1c660 18 * Copyright IBM Corporation, 2001
1da177e4
LT
19 *
20 * Authors: Dipankar Sarma <dipankar@in.ibm.com>
21 * Manfred Spraul <manfred@colorfullife.com>
a71fca58 22 *
1da177e4
LT
23 * Based on the original work by Paul McKenney <paulmck@us.ibm.com>
24 * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
25 * Papers:
26 * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf
27 * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001)
28 *
29 * For detailed explanation of Read-Copy Update mechanism see -
a71fca58 30 * http://lse.sourceforge.net/locking/rcupdate.html
1da177e4
LT
31 *
32 */
33#include <linux/types.h>
34#include <linux/kernel.h>
35#include <linux/init.h>
36#include <linux/spinlock.h>
37#include <linux/smp.h>
38#include <linux/interrupt.h>
3f07c014 39#include <linux/sched/signal.h>
b17b0153 40#include <linux/sched/debug.h>
60063497 41#include <linux/atomic.h>
1da177e4 42#include <linux/bitops.h>
1da177e4
LT
43#include <linux/percpu.h>
44#include <linux/notifier.h>
1da177e4 45#include <linux/cpu.h>
9331b315 46#include <linux/mutex.h>
9984de1a 47#include <linux/export.h>
e3818b8d 48#include <linux/hardirq.h>
e3ebfb96 49#include <linux/delay.h>
e77b7041 50#include <linux/moduleparam.h>
8315f422 51#include <linux/kthread.h>
4ff475ed 52#include <linux/tick.h>
f9411ebe 53#include <linux/rcupdate_wait.h>
78634061 54#include <linux/sched/isolation.h>
1da177e4 55
29c00b4a 56#define CREATE_TRACE_POINTS
29c00b4a
PM
57
58#include "rcu.h"
59
4102adab
PM
60#ifdef MODULE_PARAM_PREFIX
61#undef MODULE_PARAM_PREFIX
62#endif
63#define MODULE_PARAM_PREFIX "rcupdate."
64
79cfea02 65#ifndef CONFIG_TINY_RCU
3caec62f 66extern int rcu_expedited; /* from sysctl */
3705b88d 67module_param(rcu_expedited, int, 0);
3caec62f 68extern int rcu_normal; /* from sysctl */
5a9be7c6 69module_param(rcu_normal, int, 0);
3e42ec1a
PM
70static int rcu_normal_after_boot;
71module_param(rcu_normal_after_boot, int, 0);
79cfea02 72#endif /* #ifndef CONFIG_TINY_RCU */
3e42ec1a 73
293e2421 74#ifdef CONFIG_DEBUG_LOCK_ALLOC
d5671f6b
DV
75/**
76 * rcu_read_lock_sched_held() - might we be in RCU-sched read-side critical section?
77 *
78 * If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an
79 * RCU-sched read-side critical section. In absence of
80 * CONFIG_DEBUG_LOCK_ALLOC, this assumes we are in an RCU-sched read-side
81 * critical section unless it can prove otherwise. Note that disabling
82 * of preemption (including disabling irqs) counts as an RCU-sched
83 * read-side critical section. This is useful for debug checks in functions
84 * that required that they be called within an RCU-sched read-side
85 * critical section.
86 *
87 * Check debug_lockdep_rcu_enabled() to prevent false positives during boot
88 * and while lockdep is disabled.
89 *
90 * Note that if the CPU is in the idle loop from an RCU point of
91 * view (ie: that we are in the section between rcu_idle_enter() and
92 * rcu_idle_exit()) then rcu_read_lock_held() returns false even if the CPU
93 * did an rcu_read_lock(). The reason for this is that RCU ignores CPUs
94 * that are in such a section, considering these as in extended quiescent
95 * state, so such a CPU is effectively never in an RCU read-side critical
96 * section regardless of what RCU primitives it invokes. This state of
97 * affairs is required --- we need to keep an RCU-free window in idle
98 * where the CPU may possibly enter into low power mode. This way we can
99 * notice an extended quiescent state to other CPUs that started a grace
100 * period. Otherwise we would delay any grace period as long as we run in
101 * the idle task.
102 *
103 * Similarly, we avoid claiming an SRCU read lock held if the current
104 * CPU is offline.
105 */
106int rcu_read_lock_sched_held(void)
107{
108 int lockdep_opinion = 0;
109
110 if (!debug_lockdep_rcu_enabled())
111 return 1;
112 if (!rcu_is_watching())
113 return 0;
114 if (!rcu_lockdep_current_cpu_online())
115 return 0;
116 if (debug_locks)
117 lockdep_opinion = lock_is_held(&rcu_sched_lock_map);
293e2421 118 return lockdep_opinion || !preemptible();
d5671f6b
DV
119}
120EXPORT_SYMBOL(rcu_read_lock_sched_held);
121#endif
122
0d39482c
PM
123#ifndef CONFIG_TINY_RCU
124
5a9be7c6
PM
125/*
126 * Should expedited grace-period primitives always fall back to their
127 * non-expedited counterparts? Intended for use within RCU. Note
128 * that if the user specifies both rcu_expedited and rcu_normal, then
52d7e48b 129 * rcu_normal wins. (Except during the time period during boot from
900b1028 130 * when the first task is spawned until the rcu_set_runtime_mode()
52d7e48b 131 * core_initcall() is invoked, at which point everything is expedited.)
5a9be7c6
PM
132 */
133bool rcu_gp_is_normal(void)
134{
52d7e48b
PM
135 return READ_ONCE(rcu_normal) &&
136 rcu_scheduler_active != RCU_SCHEDULER_INIT;
5a9be7c6 137}
4f2a848c 138EXPORT_SYMBOL_GPL(rcu_gp_is_normal);
5a9be7c6 139
7c6094db 140static atomic_t rcu_expedited_nesting = ATOMIC_INIT(1);
0d39482c
PM
141
142/*
143 * Should normal grace-period primitives be expedited? Intended for
144 * use within RCU. Note that this function takes the rcu_expedited
52d7e48b
PM
145 * sysfs/boot variable and rcu_scheduler_active into account as well
146 * as the rcu_expedite_gp() nesting. So looping on rcu_unexpedite_gp()
147 * until rcu_gp_is_expedited() returns false is a -really- bad idea.
0d39482c
PM
148 */
149bool rcu_gp_is_expedited(void)
150{
52d7e48b
PM
151 return rcu_expedited || atomic_read(&rcu_expedited_nesting) ||
152 rcu_scheduler_active == RCU_SCHEDULER_INIT;
0d39482c
PM
153}
154EXPORT_SYMBOL_GPL(rcu_gp_is_expedited);
155
156/**
157 * rcu_expedite_gp - Expedite future RCU grace periods
158 *
159 * After a call to this function, future calls to synchronize_rcu() and
160 * friends act as the corresponding synchronize_rcu_expedited() function
161 * had instead been called.
162 */
163void rcu_expedite_gp(void)
164{
165 atomic_inc(&rcu_expedited_nesting);
166}
167EXPORT_SYMBOL_GPL(rcu_expedite_gp);
168
169/**
170 * rcu_unexpedite_gp - Cancel prior rcu_expedite_gp() invocation
171 *
172 * Undo a prior call to rcu_expedite_gp(). If all prior calls to
173 * rcu_expedite_gp() are undone by a subsequent call to rcu_unexpedite_gp(),
174 * and if the rcu_expedited sysfs/boot parameter is not set, then all
175 * subsequent calls to synchronize_rcu() and friends will return to
176 * their normal non-expedited behavior.
177 */
178void rcu_unexpedite_gp(void)
179{
180 atomic_dec(&rcu_expedited_nesting);
181}
182EXPORT_SYMBOL_GPL(rcu_unexpedite_gp);
183
ee42571f
PM
184/*
185 * Inform RCU of the end of the in-kernel boot sequence.
186 */
187void rcu_end_inkernel_boot(void)
188{
7c6094db 189 rcu_unexpedite_gp();
3e42ec1a
PM
190 if (rcu_normal_after_boot)
191 WRITE_ONCE(rcu_normal, 1);
ee42571f 192}
0d39482c 193
79cfea02
PM
194#endif /* #ifndef CONFIG_TINY_RCU */
195
900b1028
PM
196/*
197 * Test each non-SRCU synchronous grace-period wait API. This is
198 * useful just after a change in mode for these primitives, and
199 * during early boot.
200 */
201void rcu_test_sync_prims(void)
202{
203 if (!IS_ENABLED(CONFIG_PROVE_RCU))
204 return;
205 synchronize_rcu();
206 synchronize_rcu_bh();
207 synchronize_sched();
208 synchronize_rcu_expedited();
209 synchronize_rcu_bh_expedited();
210 synchronize_sched_expedited();
211}
212
213#if !defined(CONFIG_TINY_RCU) || defined(CONFIG_SRCU)
214
215/*
216 * Switch to run-time mode once RCU has fully initialized.
217 */
218static int __init rcu_set_runtime_mode(void)
219{
220 rcu_test_sync_prims();
221 rcu_scheduler_active = RCU_SCHEDULER_RUNNING;
222 rcu_test_sync_prims();
223 return 0;
224}
225core_initcall(rcu_set_runtime_mode);
226
227#endif /* #if !defined(CONFIG_TINY_RCU) || defined(CONFIG_SRCU) */
228
9dd8fb16
PM
229#ifdef CONFIG_PREEMPT_RCU
230
2a3fa843
PM
231/*
232 * Preemptible RCU implementation for rcu_read_lock().
233 * Just increment ->rcu_read_lock_nesting, shared state will be updated
234 * if we block.
235 */
236void __rcu_read_lock(void)
237{
238 current->rcu_read_lock_nesting++;
239 barrier(); /* critical section after entry code. */
240}
241EXPORT_SYMBOL_GPL(__rcu_read_lock);
242
243/*
244 * Preemptible RCU implementation for rcu_read_unlock().
245 * Decrement ->rcu_read_lock_nesting. If the result is zero (outermost
246 * rcu_read_unlock()) and ->rcu_read_unlock_special is non-zero, then
247 * invoke rcu_read_unlock_special() to clean up after a context switch
248 * in an RCU read-side critical section and other special cases.
249 */
250void __rcu_read_unlock(void)
251{
252 struct task_struct *t = current;
253
254 if (t->rcu_read_lock_nesting != 1) {
255 --t->rcu_read_lock_nesting;
256 } else {
257 barrier(); /* critical section before exit code. */
258 t->rcu_read_lock_nesting = INT_MIN;
259 barrier(); /* assign before ->rcu_read_unlock_special load */
7d0ae808 260 if (unlikely(READ_ONCE(t->rcu_read_unlock_special.s)))
2a3fa843
PM
261 rcu_read_unlock_special(t);
262 barrier(); /* ->rcu_read_unlock_special load before assign */
263 t->rcu_read_lock_nesting = 0;
264 }
265#ifdef CONFIG_PROVE_LOCKING
266 {
7d0ae808 267 int rrln = READ_ONCE(t->rcu_read_lock_nesting);
2a3fa843
PM
268
269 WARN_ON_ONCE(rrln < 0 && rrln > INT_MIN / 2);
270 }
271#endif /* #ifdef CONFIG_PROVE_LOCKING */
272}
273EXPORT_SYMBOL_GPL(__rcu_read_unlock);
274
2439b696 275#endif /* #ifdef CONFIG_PREEMPT_RCU */
9dd8fb16 276
162cc279
PM
277#ifdef CONFIG_DEBUG_LOCK_ALLOC
278static struct lock_class_key rcu_lock_key;
279struct lockdep_map rcu_lock_map =
280 STATIC_LOCKDEP_MAP_INIT("rcu_read_lock", &rcu_lock_key);
281EXPORT_SYMBOL_GPL(rcu_lock_map);
632ee200
PM
282
283static struct lock_class_key rcu_bh_lock_key;
284struct lockdep_map rcu_bh_lock_map =
285 STATIC_LOCKDEP_MAP_INIT("rcu_read_lock_bh", &rcu_bh_lock_key);
286EXPORT_SYMBOL_GPL(rcu_bh_lock_map);
287
288static struct lock_class_key rcu_sched_lock_key;
289struct lockdep_map rcu_sched_lock_map =
290 STATIC_LOCKDEP_MAP_INIT("rcu_read_lock_sched", &rcu_sched_lock_key);
291EXPORT_SYMBOL_GPL(rcu_sched_lock_map);
e3818b8d 292
24ef659a
PM
293static struct lock_class_key rcu_callback_key;
294struct lockdep_map rcu_callback_map =
295 STATIC_LOCKDEP_MAP_INIT("rcu_callback", &rcu_callback_key);
296EXPORT_SYMBOL_GPL(rcu_callback_map);
297
a0a5a056 298int notrace debug_lockdep_rcu_enabled(void)
bc293d62 299{
52d7e48b 300 return rcu_scheduler_active != RCU_SCHEDULER_INACTIVE && debug_locks &&
bc293d62
PM
301 current->lockdep_recursion == 0;
302}
303EXPORT_SYMBOL_GPL(debug_lockdep_rcu_enabled);
304
85b39d30
ON
305/**
306 * rcu_read_lock_held() - might we be in RCU read-side critical section?
307 *
308 * If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an RCU
309 * read-side critical section. In absence of CONFIG_DEBUG_LOCK_ALLOC,
310 * this assumes we are in an RCU read-side critical section unless it can
311 * prove otherwise. This is useful for debug checks in functions that
312 * require that they be called within an RCU read-side critical section.
313 *
314 * Checks debug_lockdep_rcu_enabled() to prevent false positives during boot
315 * and while lockdep is disabled.
316 *
317 * Note that rcu_read_lock() and the matching rcu_read_unlock() must
318 * occur in the same context, for example, it is illegal to invoke
319 * rcu_read_unlock() in process context if the matching rcu_read_lock()
320 * was invoked from within an irq handler.
321 *
322 * Note that rcu_read_lock() is disallowed if the CPU is either idle or
323 * offline from an RCU perspective, so check for those as well.
324 */
325int rcu_read_lock_held(void)
326{
327 if (!debug_lockdep_rcu_enabled())
328 return 1;
329 if (!rcu_is_watching())
330 return 0;
331 if (!rcu_lockdep_current_cpu_online())
332 return 0;
333 return lock_is_held(&rcu_lock_map);
334}
335EXPORT_SYMBOL_GPL(rcu_read_lock_held);
336
e3818b8d 337/**
ca5ecddf 338 * rcu_read_lock_bh_held() - might we be in RCU-bh read-side critical section?
e3818b8d
PM
339 *
340 * Check for bottom half being disabled, which covers both the
341 * CONFIG_PROVE_RCU and not cases. Note that if someone uses
342 * rcu_read_lock_bh(), but then later enables BH, lockdep (if enabled)
ca5ecddf
PM
343 * will show the situation. This is useful for debug checks in functions
344 * that require that they be called within an RCU read-side critical
345 * section.
e3818b8d
PM
346 *
347 * Check debug_lockdep_rcu_enabled() to prevent false positives during boot.
c0d6d01b
PM
348 *
349 * Note that rcu_read_lock() is disallowed if the CPU is either idle or
350 * offline from an RCU perspective, so check for those as well.
e3818b8d
PM
351 */
352int rcu_read_lock_bh_held(void)
353{
354 if (!debug_lockdep_rcu_enabled())
355 return 1;
5c173eb8 356 if (!rcu_is_watching())
e6b80a3b 357 return 0;
c0d6d01b
PM
358 if (!rcu_lockdep_current_cpu_online())
359 return 0;
773e3f93 360 return in_softirq() || irqs_disabled();
e3818b8d
PM
361}
362EXPORT_SYMBOL_GPL(rcu_read_lock_bh_held);
363
364#endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
365
ee376dbd
PM
366/**
367 * wakeme_after_rcu() - Callback function to awaken a task after grace period
368 * @head: Pointer to rcu_head member within rcu_synchronize structure
369 *
370 * Awaken the corresponding task now that a grace period has elapsed.
fbf6bfca 371 */
ee376dbd 372void wakeme_after_rcu(struct rcu_head *head)
21a1ea9e 373{
01c1c660
PM
374 struct rcu_synchronize *rcu;
375
376 rcu = container_of(head, struct rcu_synchronize, head);
377 complete(&rcu->completion);
21a1ea9e 378}
ec90a194 379EXPORT_SYMBOL_GPL(wakeme_after_rcu);
ee84b824 380
ec90a194
PM
381void __wait_rcu_gp(bool checktiny, int n, call_rcu_func_t *crcu_array,
382 struct rcu_synchronize *rs_array)
2c42818e 383{
ec90a194 384 int i;
68ab0b42 385 int j;
ec90a194
PM
386
387 /* Initialize and register callbacks for each flavor specified. */
388 for (i = 0; i < n; i++) {
389 if (checktiny &&
390 (crcu_array[i] == call_rcu ||
391 crcu_array[i] == call_rcu_bh)) {
392 might_sleep();
393 continue;
394 }
395 init_rcu_head_on_stack(&rs_array[i].head);
396 init_completion(&rs_array[i].completion);
68ab0b42
PM
397 for (j = 0; j < i; j++)
398 if (crcu_array[j] == crcu_array[i])
399 break;
400 if (j == i)
401 (crcu_array[i])(&rs_array[i].head, wakeme_after_rcu);
ec90a194
PM
402 }
403
404 /* Wait for all callbacks to be invoked. */
405 for (i = 0; i < n; i++) {
406 if (checktiny &&
407 (crcu_array[i] == call_rcu ||
408 crcu_array[i] == call_rcu_bh))
409 continue;
68ab0b42
PM
410 for (j = 0; j < i; j++)
411 if (crcu_array[j] == crcu_array[i])
412 break;
413 if (j == i)
414 wait_for_completion(&rs_array[i].completion);
ec90a194
PM
415 destroy_rcu_head_on_stack(&rs_array[i].head);
416 }
2c42818e 417}
ec90a194 418EXPORT_SYMBOL_GPL(__wait_rcu_gp);
2c42818e 419
551d55a9 420#ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
546a9d85 421void init_rcu_head(struct rcu_head *head)
551d55a9
MD
422{
423 debug_object_init(head, &rcuhead_debug_descr);
424}
425
546a9d85 426void destroy_rcu_head(struct rcu_head *head)
551d55a9
MD
427{
428 debug_object_free(head, &rcuhead_debug_descr);
429}
430
b9fdac7f 431static bool rcuhead_is_static_object(void *addr)
551d55a9 432{
b9fdac7f 433 return true;
551d55a9
MD
434}
435
436/**
437 * init_rcu_head_on_stack() - initialize on-stack rcu_head for debugobjects
438 * @head: pointer to rcu_head structure to be initialized
439 *
440 * This function informs debugobjects of a new rcu_head structure that
441 * has been allocated as an auto variable on the stack. This function
442 * is not required for rcu_head structures that are statically defined or
443 * that are dynamically allocated on the heap. This function has no
444 * effect for !CONFIG_DEBUG_OBJECTS_RCU_HEAD kernel builds.
445 */
446void init_rcu_head_on_stack(struct rcu_head *head)
447{
448 debug_object_init_on_stack(head, &rcuhead_debug_descr);
449}
450EXPORT_SYMBOL_GPL(init_rcu_head_on_stack);
451
452/**
453 * destroy_rcu_head_on_stack() - destroy on-stack rcu_head for debugobjects
454 * @head: pointer to rcu_head structure to be initialized
455 *
456 * This function informs debugobjects that an on-stack rcu_head structure
457 * is about to go out of scope. As with init_rcu_head_on_stack(), this
458 * function is not required for rcu_head structures that are statically
459 * defined or that are dynamically allocated on the heap. Also as with
460 * init_rcu_head_on_stack(), this function has no effect for
461 * !CONFIG_DEBUG_OBJECTS_RCU_HEAD kernel builds.
462 */
463void destroy_rcu_head_on_stack(struct rcu_head *head)
464{
465 debug_object_free(head, &rcuhead_debug_descr);
466}
467EXPORT_SYMBOL_GPL(destroy_rcu_head_on_stack);
468
469struct debug_obj_descr rcuhead_debug_descr = {
470 .name = "rcu_head",
b9fdac7f 471 .is_static_object = rcuhead_is_static_object,
551d55a9
MD
472};
473EXPORT_SYMBOL_GPL(rcuhead_debug_descr);
474#endif /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */
91afaf30 475
28f6569a 476#if defined(CONFIG_TREE_RCU) || defined(CONFIG_PREEMPT_RCU) || defined(CONFIG_RCU_TRACE)
e66c33d5 477void do_trace_rcu_torture_read(const char *rcutorturename, struct rcu_head *rhp,
52494535
PM
478 unsigned long secs,
479 unsigned long c_old, unsigned long c)
91afaf30 480{
52494535 481 trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c);
91afaf30
PM
482}
483EXPORT_SYMBOL_GPL(do_trace_rcu_torture_read);
484#else
52494535
PM
485#define do_trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c) \
486 do { } while (0)
91afaf30 487#endif
6bfc09e2
PM
488
489#ifdef CONFIG_RCU_STALL_COMMON
490
491#ifdef CONFIG_PROVE_RCU
492#define RCU_STALL_DELAY_DELTA (5 * HZ)
493#else
494#define RCU_STALL_DELAY_DELTA 0
495#endif
496
497int rcu_cpu_stall_suppress __read_mostly; /* 1 = suppress stall warnings. */
01896f7e 498static int rcu_cpu_stall_timeout __read_mostly = CONFIG_RCU_CPU_STALL_TIMEOUT;
6bfc09e2
PM
499
500module_param(rcu_cpu_stall_suppress, int, 0644);
501module_param(rcu_cpu_stall_timeout, int, 0644);
502
503int rcu_jiffies_till_stall_check(void)
504{
7d0ae808 505 int till_stall_check = READ_ONCE(rcu_cpu_stall_timeout);
6bfc09e2
PM
506
507 /*
508 * Limit check must be consistent with the Kconfig limits
509 * for CONFIG_RCU_CPU_STALL_TIMEOUT.
510 */
511 if (till_stall_check < 3) {
7d0ae808 512 WRITE_ONCE(rcu_cpu_stall_timeout, 3);
6bfc09e2
PM
513 till_stall_check = 3;
514 } else if (till_stall_check > 300) {
7d0ae808 515 WRITE_ONCE(rcu_cpu_stall_timeout, 300);
6bfc09e2
PM
516 till_stall_check = 300;
517 }
518 return till_stall_check * HZ + RCU_STALL_DELAY_DELTA;
519}
520
61f38db3
RR
521void rcu_sysrq_start(void)
522{
523 if (!rcu_cpu_stall_suppress)
524 rcu_cpu_stall_suppress = 2;
525}
526
527void rcu_sysrq_end(void)
528{
529 if (rcu_cpu_stall_suppress == 2)
530 rcu_cpu_stall_suppress = 0;
531}
532
6bfc09e2
PM
533static int rcu_panic(struct notifier_block *this, unsigned long ev, void *ptr)
534{
535 rcu_cpu_stall_suppress = 1;
536 return NOTIFY_DONE;
537}
538
539static struct notifier_block rcu_panic_block = {
540 .notifier_call = rcu_panic,
541};
542
543static int __init check_cpu_stall_init(void)
544{
545 atomic_notifier_chain_register(&panic_notifier_list, &rcu_panic_block);
546 return 0;
547}
548early_initcall(check_cpu_stall_init);
549
550#endif /* #ifdef CONFIG_RCU_STALL_COMMON */
8315f422
PM
551
552#ifdef CONFIG_TASKS_RCU
553
554/*
555 * Simple variant of RCU whose quiescent states are voluntary context switch,
556 * user-space execution, and idle. As such, grace periods can take one good
557 * long time. There are no read-side primitives similar to rcu_read_lock()
558 * and rcu_read_unlock() because this implementation is intended to get
559 * the system into a safe state for some of the manipulations involved in
560 * tracing and the like. Finally, this implementation does not support
561 * high call_rcu_tasks() rates from multiple CPUs. If this is required,
562 * per-CPU callback lists will be needed.
563 */
564
565/* Global list of callbacks and associated lock. */
566static struct rcu_head *rcu_tasks_cbs_head;
567static struct rcu_head **rcu_tasks_cbs_tail = &rcu_tasks_cbs_head;
c7b24d2b 568static DECLARE_WAIT_QUEUE_HEAD(rcu_tasks_cbs_wq);
8315f422
PM
569static DEFINE_RAW_SPINLOCK(rcu_tasks_cbs_lock);
570
3f95aa81 571/* Track exiting tasks in order to allow them to be waited for. */
ccdd29ff 572DEFINE_STATIC_SRCU(tasks_rcu_exit_srcu);
3f95aa81
PM
573
574/* Control stall timeouts. Disable with <= 0, otherwise jiffies till stall. */
59d80fd8
PM
575#define RCU_TASK_STALL_TIMEOUT (HZ * 60 * 10)
576static int rcu_task_stall_timeout __read_mostly = RCU_TASK_STALL_TIMEOUT;
3f95aa81
PM
577module_param(rcu_task_stall_timeout, int, 0644);
578
84a8f446 579static void rcu_spawn_tasks_kthread(void);
4929c913 580static struct task_struct *rcu_tasks_kthread_ptr;
84a8f446 581
a68a2bb2
PM
582/**
583 * call_rcu_tasks() - Queue an RCU for invocation task-based grace period
584 * @rhp: structure to be used for queueing the RCU updates.
585 * @func: actual callback function to be invoked after the grace period
586 *
587 * The callback function will be invoked some time after a full grace
588 * period elapses, in other words after all currently executing RCU
589 * read-side critical sections have completed. call_rcu_tasks() assumes
590 * that the read-side critical sections end at a voluntary context
591 * switch (not a preemption!), entry into idle, or transition to usermode
592 * execution. As such, there are no read-side primitives analogous to
593 * rcu_read_lock() and rcu_read_unlock() because this primitive is intended
594 * to determine that all tasks have passed through a safe state, not so
595 * much for data-strcuture synchronization.
596 *
597 * See the description of call_rcu() for more detailed information on
598 * memory ordering guarantees.
84a8f446 599 */
b6a4ae76 600void call_rcu_tasks(struct rcu_head *rhp, rcu_callback_t func)
8315f422
PM
601{
602 unsigned long flags;
c7b24d2b 603 bool needwake;
4929c913 604 bool havetask = READ_ONCE(rcu_tasks_kthread_ptr);
8315f422
PM
605
606 rhp->next = NULL;
607 rhp->func = func;
608 raw_spin_lock_irqsave(&rcu_tasks_cbs_lock, flags);
c7b24d2b 609 needwake = !rcu_tasks_cbs_head;
8315f422
PM
610 *rcu_tasks_cbs_tail = rhp;
611 rcu_tasks_cbs_tail = &rhp->next;
612 raw_spin_unlock_irqrestore(&rcu_tasks_cbs_lock, flags);
4929c913
PM
613 /* We can't create the thread unless interrupts are enabled. */
614 if ((needwake && havetask) ||
615 (!havetask && !irqs_disabled_flags(flags))) {
84a8f446 616 rcu_spawn_tasks_kthread();
c7b24d2b 617 wake_up(&rcu_tasks_cbs_wq);
84a8f446 618 }
8315f422
PM
619}
620EXPORT_SYMBOL_GPL(call_rcu_tasks);
621
53c6d4ed
PM
622/**
623 * synchronize_rcu_tasks - wait until an rcu-tasks grace period has elapsed.
624 *
625 * Control will return to the caller some time after a full rcu-tasks
626 * grace period has elapsed, in other words after all currently
627 * executing rcu-tasks read-side critical sections have elapsed. These
628 * read-side critical sections are delimited by calls to schedule(),
629 * cond_resched_rcu_qs(), idle execution, userspace execution, calls
630 * to synchronize_rcu_tasks(), and (in theory, anyway) cond_resched().
631 *
632 * This is a very specialized primitive, intended only for a few uses in
633 * tracing and other situations requiring manipulation of function
634 * preambles and profiling hooks. The synchronize_rcu_tasks() function
635 * is not (yet) intended for heavy use from multiple CPUs.
636 *
637 * Note that this guarantee implies further memory-ordering guarantees.
638 * On systems with more than one CPU, when synchronize_rcu_tasks() returns,
639 * each CPU is guaranteed to have executed a full memory barrier since the
640 * end of its last RCU-tasks read-side critical section whose beginning
641 * preceded the call to synchronize_rcu_tasks(). In addition, each CPU
642 * having an RCU-tasks read-side critical section that extends beyond
643 * the return from synchronize_rcu_tasks() is guaranteed to have executed
644 * a full memory barrier after the beginning of synchronize_rcu_tasks()
645 * and before the beginning of that RCU-tasks read-side critical section.
646 * Note that these guarantees include CPUs that are offline, idle, or
647 * executing in user mode, as well as CPUs that are executing in the kernel.
648 *
649 * Furthermore, if CPU A invoked synchronize_rcu_tasks(), which returned
650 * to its caller on CPU B, then both CPU A and CPU B are guaranteed
651 * to have executed a full memory barrier during the execution of
652 * synchronize_rcu_tasks() -- even if CPU A and CPU B are the same CPU
653 * (but again only if the system has more than one CPU).
654 */
655void synchronize_rcu_tasks(void)
656{
657 /* Complain if the scheduler has not started. */
52d7e48b 658 RCU_LOCKDEP_WARN(rcu_scheduler_active == RCU_SCHEDULER_INACTIVE,
f78f5b90 659 "synchronize_rcu_tasks called too soon");
53c6d4ed
PM
660
661 /* Wait for the grace period. */
662 wait_rcu_gp(call_rcu_tasks);
663}
06c2a923 664EXPORT_SYMBOL_GPL(synchronize_rcu_tasks);
53c6d4ed
PM
665
666/**
667 * rcu_barrier_tasks - Wait for in-flight call_rcu_tasks() callbacks.
668 *
669 * Although the current implementation is guaranteed to wait, it is not
670 * obligated to, for example, if there are no pending callbacks.
671 */
672void rcu_barrier_tasks(void)
673{
674 /* There is only one callback queue, so this is easy. ;-) */
675 synchronize_rcu_tasks();
676}
06c2a923 677EXPORT_SYMBOL_GPL(rcu_barrier_tasks);
53c6d4ed 678
52db30ab
PM
679/* See if tasks are still holding out, complain if so. */
680static void check_holdout_task(struct task_struct *t,
681 bool needreport, bool *firstreport)
8315f422 682{
4ff475ed
PM
683 int cpu;
684
7d0ae808
PM
685 if (!READ_ONCE(t->rcu_tasks_holdout) ||
686 t->rcu_tasks_nvcsw != READ_ONCE(t->nvcsw) ||
687 !READ_ONCE(t->on_rq) ||
176f8f7a
PM
688 (IS_ENABLED(CONFIG_NO_HZ_FULL) &&
689 !is_idle_task(t) && t->rcu_tasks_idle_cpu >= 0)) {
7d0ae808 690 WRITE_ONCE(t->rcu_tasks_holdout, false);
8f20a5e8 691 list_del_init(&t->rcu_tasks_holdout_list);
8315f422 692 put_task_struct(t);
52db30ab 693 return;
8315f422 694 }
bcbfdd01 695 rcu_request_urgent_qs_task(t);
52db30ab
PM
696 if (!needreport)
697 return;
698 if (*firstreport) {
699 pr_err("INFO: rcu_tasks detected stalls on tasks:\n");
700 *firstreport = false;
701 }
4ff475ed
PM
702 cpu = task_cpu(t);
703 pr_alert("%p: %c%c nvcsw: %lu/%lu holdout: %d idle_cpu: %d/%d\n",
704 t, ".I"[is_idle_task(t)],
705 "N."[cpu < 0 || !tick_nohz_full_cpu(cpu)],
706 t->rcu_tasks_nvcsw, t->nvcsw, t->rcu_tasks_holdout,
707 t->rcu_tasks_idle_cpu, cpu);
52db30ab 708 sched_show_task(t);
8315f422
PM
709}
710
711/* RCU-tasks kthread that detects grace periods and invokes callbacks. */
712static int __noreturn rcu_tasks_kthread(void *arg)
713{
714 unsigned long flags;
715 struct task_struct *g, *t;
52db30ab 716 unsigned long lastreport;
8315f422
PM
717 struct rcu_head *list;
718 struct rcu_head *next;
719 LIST_HEAD(rcu_tasks_holdouts);
720
60ced495
PM
721 /* Run on housekeeping CPUs by default. Sysadm can move if desired. */
722 housekeeping_affine(current);
8315f422
PM
723
724 /*
725 * Each pass through the following loop makes one check for
726 * newly arrived callbacks, and, if there are some, waits for
727 * one RCU-tasks grace period and then invokes the callbacks.
728 * This loop is terminated by the system going down. ;-)
729 */
730 for (;;) {
731
732 /* Pick up any new callbacks. */
733 raw_spin_lock_irqsave(&rcu_tasks_cbs_lock, flags);
734 list = rcu_tasks_cbs_head;
735 rcu_tasks_cbs_head = NULL;
736 rcu_tasks_cbs_tail = &rcu_tasks_cbs_head;
737 raw_spin_unlock_irqrestore(&rcu_tasks_cbs_lock, flags);
738
739 /* If there were none, wait a bit and start over. */
740 if (!list) {
c7b24d2b
PM
741 wait_event_interruptible(rcu_tasks_cbs_wq,
742 rcu_tasks_cbs_head);
743 if (!rcu_tasks_cbs_head) {
744 WARN_ON(signal_pending(current));
745 schedule_timeout_interruptible(HZ/10);
746 }
8315f422
PM
747 continue;
748 }
749
750 /*
751 * Wait for all pre-existing t->on_rq and t->nvcsw
752 * transitions to complete. Invoking synchronize_sched()
753 * suffices because all these transitions occur with
754 * interrupts disabled. Without this synchronize_sched(),
755 * a read-side critical section that started before the
756 * grace period might be incorrectly seen as having started
757 * after the grace period.
758 *
759 * This synchronize_sched() also dispenses with the
760 * need for a memory barrier on the first store to
761 * ->rcu_tasks_holdout, as it forces the store to happen
762 * after the beginning of the grace period.
763 */
764 synchronize_sched();
765
766 /*
767 * There were callbacks, so we need to wait for an
768 * RCU-tasks grace period. Start off by scanning
769 * the task list for tasks that are not already
770 * voluntarily blocked. Mark these tasks and make
771 * a list of them in rcu_tasks_holdouts.
772 */
773 rcu_read_lock();
774 for_each_process_thread(g, t) {
7d0ae808 775 if (t != current && READ_ONCE(t->on_rq) &&
8315f422
PM
776 !is_idle_task(t)) {
777 get_task_struct(t);
7d0ae808
PM
778 t->rcu_tasks_nvcsw = READ_ONCE(t->nvcsw);
779 WRITE_ONCE(t->rcu_tasks_holdout, true);
8315f422
PM
780 list_add(&t->rcu_tasks_holdout_list,
781 &rcu_tasks_holdouts);
782 }
783 }
784 rcu_read_unlock();
785
3f95aa81
PM
786 /*
787 * Wait for tasks that are in the process of exiting.
788 * This does only part of the job, ensuring that all
789 * tasks that were previously exiting reach the point
790 * where they have disabled preemption, allowing the
791 * later synchronize_sched() to finish the job.
792 */
793 synchronize_srcu(&tasks_rcu_exit_srcu);
794
8315f422
PM
795 /*
796 * Each pass through the following loop scans the list
797 * of holdout tasks, removing any that are no longer
798 * holdouts. When the list is empty, we are done.
799 */
52db30ab 800 lastreport = jiffies;
8315f422 801 while (!list_empty(&rcu_tasks_holdouts)) {
52db30ab
PM
802 bool firstreport;
803 bool needreport;
804 int rtst;
8f20a5e8 805 struct task_struct *t1;
52db30ab 806
8315f422 807 schedule_timeout_interruptible(HZ);
7d0ae808 808 rtst = READ_ONCE(rcu_task_stall_timeout);
52db30ab
PM
809 needreport = rtst > 0 &&
810 time_after(jiffies, lastreport + rtst);
811 if (needreport)
812 lastreport = jiffies;
813 firstreport = true;
8315f422 814 WARN_ON(signal_pending(current));
8f20a5e8
PM
815 list_for_each_entry_safe(t, t1, &rcu_tasks_holdouts,
816 rcu_tasks_holdout_list) {
52db30ab 817 check_holdout_task(t, needreport, &firstreport);
8f20a5e8
PM
818 cond_resched();
819 }
8315f422
PM
820 }
821
822 /*
823 * Because ->on_rq and ->nvcsw are not guaranteed
824 * to have a full memory barriers prior to them in the
825 * schedule() path, memory reordering on other CPUs could
826 * cause their RCU-tasks read-side critical sections to
827 * extend past the end of the grace period. However,
828 * because these ->nvcsw updates are carried out with
829 * interrupts disabled, we can use synchronize_sched()
830 * to force the needed ordering on all such CPUs.
831 *
832 * This synchronize_sched() also confines all
833 * ->rcu_tasks_holdout accesses to be within the grace
834 * period, avoiding the need for memory barriers for
835 * ->rcu_tasks_holdout accesses.
3f95aa81
PM
836 *
837 * In addition, this synchronize_sched() waits for exiting
838 * tasks to complete their final preempt_disable() region
839 * of execution, cleaning up after the synchronize_srcu()
840 * above.
8315f422
PM
841 */
842 synchronize_sched();
843
844 /* Invoke the callbacks. */
845 while (list) {
846 next = list->next;
847 local_bh_disable();
848 list->func(list);
849 local_bh_enable();
850 list = next;
851 cond_resched();
852 }
c7b24d2b 853 schedule_timeout_uninterruptible(HZ/10);
8315f422
PM
854 }
855}
856
84a8f446
PM
857/* Spawn rcu_tasks_kthread() at first call to call_rcu_tasks(). */
858static void rcu_spawn_tasks_kthread(void)
8315f422 859{
84a8f446 860 static DEFINE_MUTEX(rcu_tasks_kthread_mutex);
84a8f446 861 struct task_struct *t;
8315f422 862
7d0ae808 863 if (READ_ONCE(rcu_tasks_kthread_ptr)) {
84a8f446
PM
864 smp_mb(); /* Ensure caller sees full kthread. */
865 return;
866 }
867 mutex_lock(&rcu_tasks_kthread_mutex);
868 if (rcu_tasks_kthread_ptr) {
869 mutex_unlock(&rcu_tasks_kthread_mutex);
870 return;
871 }
8315f422
PM
872 t = kthread_run(rcu_tasks_kthread, NULL, "rcu_tasks_kthread");
873 BUG_ON(IS_ERR(t));
84a8f446 874 smp_mb(); /* Ensure others see full kthread. */
7d0ae808 875 WRITE_ONCE(rcu_tasks_kthread_ptr, t);
84a8f446 876 mutex_unlock(&rcu_tasks_kthread_mutex);
8315f422 877}
8315f422 878
ccdd29ff
PM
879/* Do the srcu_read_lock() for the above synchronize_srcu(). */
880void exit_tasks_rcu_start(void)
881{
882 preempt_disable();
883 current->rcu_tasks_idx = __srcu_read_lock(&tasks_rcu_exit_srcu);
884 preempt_enable();
885}
886
887/* Do the srcu_read_unlock() for the above synchronize_srcu(). */
888void exit_tasks_rcu_finish(void)
889{
890 preempt_disable();
891 __srcu_read_unlock(&tasks_rcu_exit_srcu, current->rcu_tasks_idx);
892 preempt_enable();
893}
894
8315f422 895#endif /* #ifdef CONFIG_TASKS_RCU */
aa23c6fb 896
59d80fd8
PM
897#ifndef CONFIG_TINY_RCU
898
899/*
900 * Print any non-default Tasks RCU settings.
901 */
902static void __init rcu_tasks_bootup_oddness(void)
903{
904#ifdef CONFIG_TASKS_RCU
905 if (rcu_task_stall_timeout != RCU_TASK_STALL_TIMEOUT)
906 pr_info("\tTasks-RCU CPU stall warnings timeout set to %d (rcu_task_stall_timeout).\n", rcu_task_stall_timeout);
907 else
908 pr_info("\tTasks RCU enabled.\n");
909#endif /* #ifdef CONFIG_TASKS_RCU */
910}
911
912#endif /* #ifndef CONFIG_TINY_RCU */
913
aa23c6fb
PK
914#ifdef CONFIG_PROVE_RCU
915
916/*
917 * Early boot self test parameters, one for each flavor
918 */
919static bool rcu_self_test;
920static bool rcu_self_test_bh;
921static bool rcu_self_test_sched;
922
923module_param(rcu_self_test, bool, 0444);
924module_param(rcu_self_test_bh, bool, 0444);
925module_param(rcu_self_test_sched, bool, 0444);
926
927static int rcu_self_test_counter;
928
929static void test_callback(struct rcu_head *r)
930{
931 rcu_self_test_counter++;
932 pr_info("RCU test callback executed %d\n", rcu_self_test_counter);
933}
934
935static void early_boot_test_call_rcu(void)
936{
937 static struct rcu_head head;
938
939 call_rcu(&head, test_callback);
940}
941
942static void early_boot_test_call_rcu_bh(void)
943{
944 static struct rcu_head head;
945
946 call_rcu_bh(&head, test_callback);
947}
948
949static void early_boot_test_call_rcu_sched(void)
950{
951 static struct rcu_head head;
952
953 call_rcu_sched(&head, test_callback);
954}
955
956void rcu_early_boot_tests(void)
957{
958 pr_info("Running RCU self tests\n");
959
960 if (rcu_self_test)
961 early_boot_test_call_rcu();
962 if (rcu_self_test_bh)
963 early_boot_test_call_rcu_bh();
964 if (rcu_self_test_sched)
965 early_boot_test_call_rcu_sched();
52d7e48b 966 rcu_test_sync_prims();
aa23c6fb
PK
967}
968
969static int rcu_verify_early_boot_tests(void)
970{
971 int ret = 0;
972 int early_boot_test_counter = 0;
973
974 if (rcu_self_test) {
975 early_boot_test_counter++;
976 rcu_barrier();
977 }
978 if (rcu_self_test_bh) {
979 early_boot_test_counter++;
980 rcu_barrier_bh();
981 }
982 if (rcu_self_test_sched) {
983 early_boot_test_counter++;
984 rcu_barrier_sched();
985 }
986
987 if (rcu_self_test_counter != early_boot_test_counter) {
988 WARN_ON(1);
989 ret = -1;
990 }
991
992 return ret;
993}
994late_initcall(rcu_verify_early_boot_tests);
995#else
996void rcu_early_boot_tests(void) {}
997#endif /* CONFIG_PROVE_RCU */
59d80fd8
PM
998
999#ifndef CONFIG_TINY_RCU
1000
1001/*
1002 * Print any significant non-default boot-time settings.
1003 */
1004void __init rcupdate_announce_bootup_oddness(void)
1005{
1006 if (rcu_normal)
1007 pr_info("\tNo expedited grace period (rcu_normal).\n");
1008 else if (rcu_normal_after_boot)
1009 pr_info("\tNo expedited grace period (rcu_normal_after_boot).\n");
1010 else if (rcu_expedited)
1011 pr_info("\tAll grace periods are expedited (rcu_expedited).\n");
1012 if (rcu_cpu_stall_suppress)
1013 pr_info("\tRCU CPU stall warnings suppressed (rcu_cpu_stall_suppress).\n");
1014 if (rcu_cpu_stall_timeout != CONFIG_RCU_CPU_STALL_TIMEOUT)
1015 pr_info("\tRCU CPU stall warnings timeout set to %d (rcu_cpu_stall_timeout).\n", rcu_cpu_stall_timeout);
1016 rcu_tasks_bootup_oddness();
1017}
1018
1019#endif /* #ifndef CONFIG_TINY_RCU */