Merge tag 'pstore-v6.6-rc1-fix' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux-2.6-block.git] / kernel / rcu / update.c
CommitLineData
38b4df64 1// SPDX-License-Identifier: GPL-2.0+
1da177e4
LT
2/*
3 * Read-Copy Update mechanism for mutual exclusion
4 *
01c1c660 5 * Copyright IBM Corporation, 2001
1da177e4
LT
6 *
7 * Authors: Dipankar Sarma <dipankar@in.ibm.com>
8 * Manfred Spraul <manfred@colorfullife.com>
a71fca58 9 *
38b4df64 10 * Based on the original work by Paul McKenney <paulmck@linux.ibm.com>
1da177e4
LT
11 * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
12 * Papers:
13 * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf
14 * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001)
15 *
16 * For detailed explanation of Read-Copy Update mechanism see -
a71fca58 17 * http://lse.sourceforge.net/locking/rcupdate.html
1da177e4
LT
18 *
19 */
20#include <linux/types.h>
21#include <linux/kernel.h>
22#include <linux/init.h>
23#include <linux/spinlock.h>
24#include <linux/smp.h>
25#include <linux/interrupt.h>
3f07c014 26#include <linux/sched/signal.h>
b17b0153 27#include <linux/sched/debug.h>
60063497 28#include <linux/atomic.h>
1da177e4 29#include <linux/bitops.h>
1da177e4
LT
30#include <linux/percpu.h>
31#include <linux/notifier.h>
1da177e4 32#include <linux/cpu.h>
9331b315 33#include <linux/mutex.h>
9984de1a 34#include <linux/export.h>
e3818b8d 35#include <linux/hardirq.h>
e3ebfb96 36#include <linux/delay.h>
e77b7041 37#include <linux/moduleparam.h>
8315f422 38#include <linux/kthread.h>
4ff475ed 39#include <linux/tick.h>
f9411ebe 40#include <linux/rcupdate_wait.h>
78634061 41#include <linux/sched/isolation.h>
a39f15b9 42#include <linux/kprobes.h>
a35d1690 43#include <linux/slab.h>
b38f57c1 44#include <linux/irq_work.h>
5b3cc99b 45#include <linux/rcupdate_trace.h>
1da177e4 46
29c00b4a 47#define CREATE_TRACE_POINTS
29c00b4a
PM
48
49#include "rcu.h"
50
4102adab
PM
51#ifdef MODULE_PARAM_PREFIX
52#undef MODULE_PARAM_PREFIX
53#endif
54#define MODULE_PARAM_PREFIX "rcupdate."
55
79cfea02 56#ifndef CONFIG_TINY_RCU
1eac0075
JL
57module_param(rcu_expedited, int, 0444);
58module_param(rcu_normal, int, 0444);
36221e10 59static int rcu_normal_after_boot = IS_ENABLED(CONFIG_PREEMPT_RT);
ebb6d30d 60#if !defined(CONFIG_PREEMPT_RT) || defined(CONFIG_NO_HZ_FULL)
1eac0075 61module_param(rcu_normal_after_boot, int, 0444);
36221e10 62#endif
79cfea02 63#endif /* #ifndef CONFIG_TINY_RCU */
3e42ec1a 64
293e2421 65#ifdef CONFIG_DEBUG_LOCK_ALLOC
d5671f6b 66/**
28875945
JFG
67 * rcu_read_lock_held_common() - might we be in RCU-sched read-side critical section?
68 * @ret: Best guess answer if lockdep cannot be relied on
d5671f6b 69 *
c28d5c09 70 * Returns true if lockdep must be ignored, in which case ``*ret`` contains
28875945 71 * the best guess described below. Otherwise returns false, in which
c28d5c09 72 * case ``*ret`` tells the caller nothing and the caller should instead
28875945
JFG
73 * consult lockdep.
74 *
c28d5c09 75 * If CONFIG_DEBUG_LOCK_ALLOC is selected, set ``*ret`` to nonzero iff in an
d5671f6b
DV
76 * RCU-sched read-side critical section. In absence of
77 * CONFIG_DEBUG_LOCK_ALLOC, this assumes we are in an RCU-sched read-side
78 * critical section unless it can prove otherwise. Note that disabling
79 * of preemption (including disabling irqs) counts as an RCU-sched
80 * read-side critical section. This is useful for debug checks in functions
81 * that required that they be called within an RCU-sched read-side
82 * critical section.
83 *
84 * Check debug_lockdep_rcu_enabled() to prevent false positives during boot
85 * and while lockdep is disabled.
86 *
28875945 87 * Note that if the CPU is in the idle loop from an RCU point of view (ie:
e67198cc 88 * that we are in the section between ct_idle_enter() and ct_idle_exit())
c28d5c09 89 * then rcu_read_lock_held() sets ``*ret`` to false even if the CPU did an
28875945
JFG
90 * rcu_read_lock(). The reason for this is that RCU ignores CPUs that are
91 * in such a section, considering these as in extended quiescent state,
92 * so such a CPU is effectively never in an RCU read-side critical section
93 * regardless of what RCU primitives it invokes. This state of affairs is
94 * required --- we need to keep an RCU-free window in idle where the CPU may
95 * possibly enter into low power mode. This way we can notice an extended
96 * quiescent state to other CPUs that started a grace period. Otherwise
97 * we would delay any grace period as long as we run in the idle task.
d5671f6b 98 *
28875945 99 * Similarly, we avoid claiming an RCU read lock held if the current
d5671f6b
DV
100 * CPU is offline.
101 */
28875945
JFG
102static bool rcu_read_lock_held_common(bool *ret)
103{
104 if (!debug_lockdep_rcu_enabled()) {
a66dbda7 105 *ret = true;
28875945
JFG
106 return true;
107 }
108 if (!rcu_is_watching()) {
a66dbda7 109 *ret = false;
28875945
JFG
110 return true;
111 }
112 if (!rcu_lockdep_current_cpu_online()) {
a66dbda7 113 *ret = false;
28875945
JFG
114 return true;
115 }
116 return false;
117}
118
d5671f6b
DV
119int rcu_read_lock_sched_held(void)
120{
28875945 121 bool ret;
d5671f6b 122
28875945
JFG
123 if (rcu_read_lock_held_common(&ret))
124 return ret;
9147089b 125 return lock_is_held(&rcu_sched_lock_map) || !preemptible();
d5671f6b
DV
126}
127EXPORT_SYMBOL(rcu_read_lock_sched_held);
128#endif
129
0d39482c
PM
130#ifndef CONFIG_TINY_RCU
131
5a9be7c6
PM
132/*
133 * Should expedited grace-period primitives always fall back to their
134 * non-expedited counterparts? Intended for use within RCU. Note
135 * that if the user specifies both rcu_expedited and rcu_normal, then
52d7e48b 136 * rcu_normal wins. (Except during the time period during boot from
900b1028 137 * when the first task is spawned until the rcu_set_runtime_mode()
52d7e48b 138 * core_initcall() is invoked, at which point everything is expedited.)
5a9be7c6
PM
139 */
140bool rcu_gp_is_normal(void)
141{
52d7e48b
PM
142 return READ_ONCE(rcu_normal) &&
143 rcu_scheduler_active != RCU_SCHEDULER_INIT;
5a9be7c6 144}
4f2a848c 145EXPORT_SYMBOL_GPL(rcu_gp_is_normal);
5a9be7c6 146
6efdda8b
JFG
147static atomic_t rcu_async_hurry_nesting = ATOMIC_INIT(1);
148/*
149 * Should call_rcu() callbacks be processed with urgency or are
150 * they OK being executed with arbitrary delays?
151 */
152bool rcu_async_should_hurry(void)
153{
154 return !IS_ENABLED(CONFIG_RCU_LAZY) ||
155 atomic_read(&rcu_async_hurry_nesting);
156}
157EXPORT_SYMBOL_GPL(rcu_async_should_hurry);
158
159/**
160 * rcu_async_hurry - Make future async RCU callbacks not lazy.
161 *
162 * After a call to this function, future calls to call_rcu()
163 * will be processed in a timely fashion.
164 */
165void rcu_async_hurry(void)
166{
167 if (IS_ENABLED(CONFIG_RCU_LAZY))
168 atomic_inc(&rcu_async_hurry_nesting);
169}
170EXPORT_SYMBOL_GPL(rcu_async_hurry);
0d39482c 171
6efdda8b
JFG
172/**
173 * rcu_async_relax - Make future async RCU callbacks lazy.
174 *
175 * After a call to this function, future calls to call_rcu()
176 * will be processed in a lazy fashion.
177 */
178void rcu_async_relax(void)
179{
180 if (IS_ENABLED(CONFIG_RCU_LAZY))
181 atomic_dec(&rcu_async_hurry_nesting);
182}
183EXPORT_SYMBOL_GPL(rcu_async_relax);
184
185static atomic_t rcu_expedited_nesting = ATOMIC_INIT(1);
0d39482c
PM
186/*
187 * Should normal grace-period primitives be expedited? Intended for
188 * use within RCU. Note that this function takes the rcu_expedited
52d7e48b
PM
189 * sysfs/boot variable and rcu_scheduler_active into account as well
190 * as the rcu_expedite_gp() nesting. So looping on rcu_unexpedite_gp()
191 * until rcu_gp_is_expedited() returns false is a -really- bad idea.
0d39482c
PM
192 */
193bool rcu_gp_is_expedited(void)
194{
b823cafa 195 return rcu_expedited || atomic_read(&rcu_expedited_nesting);
0d39482c
PM
196}
197EXPORT_SYMBOL_GPL(rcu_gp_is_expedited);
198
199/**
200 * rcu_expedite_gp - Expedite future RCU grace periods
201 *
202 * After a call to this function, future calls to synchronize_rcu() and
203 * friends act as the corresponding synchronize_rcu_expedited() function
204 * had instead been called.
205 */
206void rcu_expedite_gp(void)
207{
208 atomic_inc(&rcu_expedited_nesting);
209}
210EXPORT_SYMBOL_GPL(rcu_expedite_gp);
211
212/**
213 * rcu_unexpedite_gp - Cancel prior rcu_expedite_gp() invocation
214 *
215 * Undo a prior call to rcu_expedite_gp(). If all prior calls to
216 * rcu_expedite_gp() are undone by a subsequent call to rcu_unexpedite_gp(),
217 * and if the rcu_expedited sysfs/boot parameter is not set, then all
218 * subsequent calls to synchronize_rcu() and friends will return to
219 * their normal non-expedited behavior.
220 */
221void rcu_unexpedite_gp(void)
222{
223 atomic_dec(&rcu_expedited_nesting);
224}
225EXPORT_SYMBOL_GPL(rcu_unexpedite_gp);
226
59ee0326
PM
227static bool rcu_boot_ended __read_mostly;
228
ee42571f
PM
229/*
230 * Inform RCU of the end of the in-kernel boot sequence.
231 */
232void rcu_end_inkernel_boot(void)
233{
7c6094db 234 rcu_unexpedite_gp();
6efdda8b 235 rcu_async_relax();
3e42ec1a
PM
236 if (rcu_normal_after_boot)
237 WRITE_ONCE(rcu_normal, 1);
e40bb921 238 rcu_boot_ended = true;
ee42571f 239}
0d39482c 240
59ee0326
PM
241/*
242 * Let rcutorture know when it is OK to turn it up to eleven.
243 */
244bool rcu_inkernel_boot_has_ended(void)
245{
246 return rcu_boot_ended;
ee42571f 247}
59ee0326 248EXPORT_SYMBOL_GPL(rcu_inkernel_boot_has_ended);
0d39482c 249
79cfea02
PM
250#endif /* #ifndef CONFIG_TINY_RCU */
251
900b1028
PM
252/*
253 * Test each non-SRCU synchronous grace-period wait API. This is
254 * useful just after a change in mode for these primitives, and
255 * during early boot.
256 */
257void rcu_test_sync_prims(void)
258{
259 if (!IS_ENABLED(CONFIG_PROVE_RCU))
260 return;
748bf47a 261 pr_info("Running RCU synchronous self tests\n");
900b1028 262 synchronize_rcu();
900b1028 263 synchronize_rcu_expedited();
900b1028
PM
264}
265
0cd7e350 266#if !defined(CONFIG_TINY_RCU)
900b1028
PM
267
268/*
269 * Switch to run-time mode once RCU has fully initialized.
270 */
271static int __init rcu_set_runtime_mode(void)
272{
273 rcu_test_sync_prims();
274 rcu_scheduler_active = RCU_SCHEDULER_RUNNING;
a35d1690 275 kfree_rcu_scheduler_running();
900b1028
PM
276 rcu_test_sync_prims();
277 return 0;
278}
279core_initcall(rcu_set_runtime_mode);
280
0cd7e350 281#endif /* #if !defined(CONFIG_TINY_RCU) */
900b1028 282
162cc279
PM
283#ifdef CONFIG_DEBUG_LOCK_ALLOC
284static struct lock_class_key rcu_lock_key;
de8f5e4f
PZ
285struct lockdep_map rcu_lock_map = {
286 .name = "rcu_read_lock",
287 .key = &rcu_lock_key,
288 .wait_type_outer = LD_WAIT_FREE,
a2e05ddd 289 .wait_type_inner = LD_WAIT_CONFIG, /* PREEMPT_RT implies PREEMPT_RCU */
de8f5e4f 290};
162cc279 291EXPORT_SYMBOL_GPL(rcu_lock_map);
632ee200
PM
292
293static struct lock_class_key rcu_bh_lock_key;
de8f5e4f
PZ
294struct lockdep_map rcu_bh_lock_map = {
295 .name = "rcu_read_lock_bh",
296 .key = &rcu_bh_lock_key,
297 .wait_type_outer = LD_WAIT_FREE,
a2e05ddd 298 .wait_type_inner = LD_WAIT_CONFIG, /* PREEMPT_RT makes BH preemptible. */
de8f5e4f 299};
632ee200
PM
300EXPORT_SYMBOL_GPL(rcu_bh_lock_map);
301
302static struct lock_class_key rcu_sched_lock_key;
de8f5e4f
PZ
303struct lockdep_map rcu_sched_lock_map = {
304 .name = "rcu_read_lock_sched",
305 .key = &rcu_sched_lock_key,
306 .wait_type_outer = LD_WAIT_FREE,
307 .wait_type_inner = LD_WAIT_SPIN,
308};
632ee200 309EXPORT_SYMBOL_GPL(rcu_sched_lock_map);
e3818b8d 310
f8466f94 311// Tell lockdep when RCU callbacks are being invoked.
24ef659a
PM
312static struct lock_class_key rcu_callback_key;
313struct lockdep_map rcu_callback_map =
314 STATIC_LOCKDEP_MAP_INIT("rcu_callback", &rcu_callback_key);
315EXPORT_SYMBOL_GPL(rcu_callback_map);
316
ff5c4f5c 317noinstr int notrace debug_lockdep_rcu_enabled(void)
bc293d62 318{
30668200 319 return rcu_scheduler_active != RCU_SCHEDULER_INACTIVE && READ_ONCE(debug_locks) &&
bc293d62
PM
320 current->lockdep_recursion == 0;
321}
322EXPORT_SYMBOL_GPL(debug_lockdep_rcu_enabled);
323
85b39d30
ON
324/**
325 * rcu_read_lock_held() - might we be in RCU read-side critical section?
326 *
327 * If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an RCU
328 * read-side critical section. In absence of CONFIG_DEBUG_LOCK_ALLOC,
329 * this assumes we are in an RCU read-side critical section unless it can
330 * prove otherwise. This is useful for debug checks in functions that
331 * require that they be called within an RCU read-side critical section.
332 *
333 * Checks debug_lockdep_rcu_enabled() to prevent false positives during boot
334 * and while lockdep is disabled.
335 *
336 * Note that rcu_read_lock() and the matching rcu_read_unlock() must
337 * occur in the same context, for example, it is illegal to invoke
338 * rcu_read_unlock() in process context if the matching rcu_read_lock()
339 * was invoked from within an irq handler.
340 *
341 * Note that rcu_read_lock() is disallowed if the CPU is either idle or
342 * offline from an RCU perspective, so check for those as well.
343 */
344int rcu_read_lock_held(void)
345{
28875945
JFG
346 bool ret;
347
348 if (rcu_read_lock_held_common(&ret))
349 return ret;
85b39d30
ON
350 return lock_is_held(&rcu_lock_map);
351}
352EXPORT_SYMBOL_GPL(rcu_read_lock_held);
353
e3818b8d 354/**
ca5ecddf 355 * rcu_read_lock_bh_held() - might we be in RCU-bh read-side critical section?
e3818b8d
PM
356 *
357 * Check for bottom half being disabled, which covers both the
358 * CONFIG_PROVE_RCU and not cases. Note that if someone uses
359 * rcu_read_lock_bh(), but then later enables BH, lockdep (if enabled)
ca5ecddf
PM
360 * will show the situation. This is useful for debug checks in functions
361 * that require that they be called within an RCU read-side critical
362 * section.
e3818b8d
PM
363 *
364 * Check debug_lockdep_rcu_enabled() to prevent false positives during boot.
c0d6d01b 365 *
82fcecfa 366 * Note that rcu_read_lock_bh() is disallowed if the CPU is either idle or
c0d6d01b 367 * offline from an RCU perspective, so check for those as well.
e3818b8d
PM
368 */
369int rcu_read_lock_bh_held(void)
370{
28875945
JFG
371 bool ret;
372
373 if (rcu_read_lock_held_common(&ret))
374 return ret;
773e3f93 375 return in_softirq() || irqs_disabled();
e3818b8d
PM
376}
377EXPORT_SYMBOL_GPL(rcu_read_lock_bh_held);
378
28875945
JFG
379int rcu_read_lock_any_held(void)
380{
381 bool ret;
382
383 if (rcu_read_lock_held_common(&ret))
384 return ret;
385 if (lock_is_held(&rcu_lock_map) ||
386 lock_is_held(&rcu_bh_lock_map) ||
387 lock_is_held(&rcu_sched_lock_map))
388 return 1;
389 return !preemptible();
390}
391EXPORT_SYMBOL_GPL(rcu_read_lock_any_held);
392
e3818b8d
PM
393#endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
394
ee376dbd
PM
395/**
396 * wakeme_after_rcu() - Callback function to awaken a task after grace period
397 * @head: Pointer to rcu_head member within rcu_synchronize structure
398 *
399 * Awaken the corresponding task now that a grace period has elapsed.
fbf6bfca 400 */
ee376dbd 401void wakeme_after_rcu(struct rcu_head *head)
21a1ea9e 402{
01c1c660
PM
403 struct rcu_synchronize *rcu;
404
405 rcu = container_of(head, struct rcu_synchronize, head);
406 complete(&rcu->completion);
21a1ea9e 407}
ec90a194 408EXPORT_SYMBOL_GPL(wakeme_after_rcu);
ee84b824 409
ec90a194
PM
410void __wait_rcu_gp(bool checktiny, int n, call_rcu_func_t *crcu_array,
411 struct rcu_synchronize *rs_array)
2c42818e 412{
ec90a194 413 int i;
68ab0b42 414 int j;
ec90a194 415
06462efc 416 /* Initialize and register callbacks for each crcu_array element. */
ec90a194
PM
417 for (i = 0; i < n; i++) {
418 if (checktiny &&
309ba859 419 (crcu_array[i] == call_rcu)) {
ec90a194
PM
420 might_sleep();
421 continue;
422 }
68ab0b42
PM
423 for (j = 0; j < i; j++)
424 if (crcu_array[j] == crcu_array[i])
425 break;
7ee880b7
WY
426 if (j == i) {
427 init_rcu_head_on_stack(&rs_array[i].head);
428 init_completion(&rs_array[i].completion);
68ab0b42 429 (crcu_array[i])(&rs_array[i].head, wakeme_after_rcu);
7ee880b7 430 }
ec90a194
PM
431 }
432
433 /* Wait for all callbacks to be invoked. */
434 for (i = 0; i < n; i++) {
435 if (checktiny &&
309ba859 436 (crcu_array[i] == call_rcu))
ec90a194 437 continue;
68ab0b42
PM
438 for (j = 0; j < i; j++)
439 if (crcu_array[j] == crcu_array[i])
440 break;
7ee880b7 441 if (j == i) {
68ab0b42 442 wait_for_completion(&rs_array[i].completion);
7ee880b7
WY
443 destroy_rcu_head_on_stack(&rs_array[i].head);
444 }
ec90a194 445 }
2c42818e 446}
ec90a194 447EXPORT_SYMBOL_GPL(__wait_rcu_gp);
2c42818e 448
58d4292b
IM
449void finish_rcuwait(struct rcuwait *w)
450{
451 rcu_assign_pointer(w->task, NULL);
452 __set_current_state(TASK_RUNNING);
453}
454EXPORT_SYMBOL_GPL(finish_rcuwait);
455
551d55a9 456#ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
546a9d85 457void init_rcu_head(struct rcu_head *head)
551d55a9
MD
458{
459 debug_object_init(head, &rcuhead_debug_descr);
460}
156baec3 461EXPORT_SYMBOL_GPL(init_rcu_head);
551d55a9 462
546a9d85 463void destroy_rcu_head(struct rcu_head *head)
551d55a9
MD
464{
465 debug_object_free(head, &rcuhead_debug_descr);
466}
156baec3 467EXPORT_SYMBOL_GPL(destroy_rcu_head);
551d55a9 468
b9fdac7f 469static bool rcuhead_is_static_object(void *addr)
551d55a9 470{
b9fdac7f 471 return true;
551d55a9
MD
472}
473
474/**
475 * init_rcu_head_on_stack() - initialize on-stack rcu_head for debugobjects
476 * @head: pointer to rcu_head structure to be initialized
477 *
478 * This function informs debugobjects of a new rcu_head structure that
479 * has been allocated as an auto variable on the stack. This function
480 * is not required for rcu_head structures that are statically defined or
481 * that are dynamically allocated on the heap. This function has no
482 * effect for !CONFIG_DEBUG_OBJECTS_RCU_HEAD kernel builds.
483 */
484void init_rcu_head_on_stack(struct rcu_head *head)
485{
486 debug_object_init_on_stack(head, &rcuhead_debug_descr);
487}
488EXPORT_SYMBOL_GPL(init_rcu_head_on_stack);
489
490/**
491 * destroy_rcu_head_on_stack() - destroy on-stack rcu_head for debugobjects
492 * @head: pointer to rcu_head structure to be initialized
493 *
494 * This function informs debugobjects that an on-stack rcu_head structure
495 * is about to go out of scope. As with init_rcu_head_on_stack(), this
496 * function is not required for rcu_head structures that are statically
497 * defined or that are dynamically allocated on the heap. Also as with
498 * init_rcu_head_on_stack(), this function has no effect for
499 * !CONFIG_DEBUG_OBJECTS_RCU_HEAD kernel builds.
500 */
501void destroy_rcu_head_on_stack(struct rcu_head *head)
502{
503 debug_object_free(head, &rcuhead_debug_descr);
504}
505EXPORT_SYMBOL_GPL(destroy_rcu_head_on_stack);
506
f9e62f31 507const struct debug_obj_descr rcuhead_debug_descr = {
551d55a9 508 .name = "rcu_head",
b9fdac7f 509 .is_static_object = rcuhead_is_static_object,
551d55a9
MD
510};
511EXPORT_SYMBOL_GPL(rcuhead_debug_descr);
512#endif /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */
91afaf30 513
b3e627d3 514#if defined(CONFIG_TREE_RCU) || defined(CONFIG_RCU_TRACE)
e66c33d5 515void do_trace_rcu_torture_read(const char *rcutorturename, struct rcu_head *rhp,
52494535
PM
516 unsigned long secs,
517 unsigned long c_old, unsigned long c)
91afaf30 518{
52494535 519 trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c);
91afaf30
PM
520}
521EXPORT_SYMBOL_GPL(do_trace_rcu_torture_read);
522#else
52494535
PM
523#define do_trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c) \
524 do { } while (0)
91afaf30 525#endif
6bfc09e2 526
c682db55
PM
527#if IS_ENABLED(CONFIG_RCU_TORTURE_TEST) || IS_MODULE(CONFIG_RCU_TORTURE_TEST)
528/* Get rcutorture access to sched_setaffinity(). */
529long rcutorture_sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
530{
531 int ret;
532
533 ret = sched_setaffinity(pid, in_mask);
534 WARN_ONCE(ret, "%s: sched_setaffinity() returned %d\n", __func__, ret);
535 return ret;
536}
537EXPORT_SYMBOL_GPL(rcutorture_sched_setaffinity);
538#endif
539
6bfc09e2 540#ifdef CONFIG_RCU_STALL_COMMON
cdc694b2
PM
541int rcu_cpu_stall_ftrace_dump __read_mostly;
542module_param(rcu_cpu_stall_ftrace_dump, int, 0644);
58c53360 543int rcu_cpu_stall_suppress __read_mostly; // !0 = suppress stall warnings.
f22ce091 544EXPORT_SYMBOL_GPL(rcu_cpu_stall_suppress);
6bfc09e2 545module_param(rcu_cpu_stall_suppress, int, 0644);
10462d6f 546int rcu_cpu_stall_timeout __read_mostly = CONFIG_RCU_CPU_STALL_TIMEOUT;
6bfc09e2 547module_param(rcu_cpu_stall_timeout, int, 0644);
28b3ae42
UR
548int rcu_exp_cpu_stall_timeout __read_mostly = CONFIG_RCU_EXP_CPU_STALL_TIMEOUT;
549module_param(rcu_exp_cpu_stall_timeout, int, 0644);
be42f00b
ZL
550int rcu_cpu_stall_cputime __read_mostly = IS_ENABLED(CONFIG_RCU_CPU_STALL_CPUTIME);
551module_param(rcu_cpu_stall_cputime, int, 0644);
92987fe8
PM
552bool rcu_exp_stall_task_details __read_mostly;
553module_param(rcu_exp_stall_task_details, bool, 0644);
6bfc09e2 554#endif /* #ifdef CONFIG_RCU_STALL_COMMON */
8315f422 555
58c53360
PM
556// Suppress boot-time RCU CPU stall warnings and rcutorture writer stall
557// warnings. Also used by rcutorture even if stall warnings are excluded.
558int rcu_cpu_stall_suppress_at_boot __read_mostly; // !0 = suppress boot stalls.
559EXPORT_SYMBOL_GPL(rcu_cpu_stall_suppress_at_boot);
560module_param(rcu_cpu_stall_suppress_at_boot, int, 0444);
561
414c1238
PM
562/**
563 * get_completed_synchronize_rcu - Return a pre-completed polled state cookie
564 *
565 * Returns a value that will always be treated by functions like
566 * poll_state_synchronize_rcu() as a cookie whose grace period has already
567 * completed.
568 */
569unsigned long get_completed_synchronize_rcu(void)
570{
571 return RCU_GET_STATE_COMPLETED;
572}
573EXPORT_SYMBOL_GPL(get_completed_synchronize_rcu);
574
aa23c6fb
PK
575#ifdef CONFIG_PROVE_RCU
576
577/*
72ce30dd 578 * Early boot self test parameters.
aa23c6fb
PK
579 */
580static bool rcu_self_test;
aa23c6fb 581module_param(rcu_self_test, bool, 0444);
aa23c6fb
PK
582
583static int rcu_self_test_counter;
584
585static void test_callback(struct rcu_head *r)
586{
587 rcu_self_test_counter++;
588 pr_info("RCU test callback executed %d\n", rcu_self_test_counter);
589}
590
e0fcba9a 591DEFINE_STATIC_SRCU(early_srcu);
0a580fa6 592static unsigned long early_srcu_cookie;
e0fcba9a 593
a35d1690
BP
594struct early_boot_kfree_rcu {
595 struct rcu_head rh;
596};
597
aa23c6fb
PK
598static void early_boot_test_call_rcu(void)
599{
600 static struct rcu_head head;
efa3c40c 601 int idx;
e0fcba9a 602 static struct rcu_head shead;
a35d1690 603 struct early_boot_kfree_rcu *rhp;
aa23c6fb 604
efa3c40c
PM
605 idx = srcu_down_read(&early_srcu);
606 srcu_up_read(&early_srcu, idx);
aa23c6fb 607 call_rcu(&head, test_callback);
0cd7e350
PM
608 early_srcu_cookie = start_poll_synchronize_srcu(&early_srcu);
609 call_srcu(&early_srcu, &shead, test_callback);
a35d1690
BP
610 rhp = kmalloc(sizeof(*rhp), GFP_KERNEL);
611 if (!WARN_ON_ONCE(!rhp))
612 kfree_rcu(rhp, rh);
aa23c6fb
PK
613}
614
615void rcu_early_boot_tests(void)
616{
617 pr_info("Running RCU self tests\n");
618
619 if (rcu_self_test)
620 early_boot_test_call_rcu();
52d7e48b 621 rcu_test_sync_prims();
aa23c6fb
PK
622}
623
624static int rcu_verify_early_boot_tests(void)
625{
626 int ret = 0;
627 int early_boot_test_counter = 0;
628
629 if (rcu_self_test) {
630 early_boot_test_counter++;
631 rcu_barrier();
0cd7e350
PM
632 early_boot_test_counter++;
633 srcu_barrier(&early_srcu);
634 WARN_ON_ONCE(!poll_state_synchronize_srcu(&early_srcu, early_srcu_cookie));
66ea1029 635 cleanup_srcu_struct(&early_srcu);
aa23c6fb 636 }
aa23c6fb
PK
637 if (rcu_self_test_counter != early_boot_test_counter) {
638 WARN_ON(1);
639 ret = -1;
640 }
641
642 return ret;
643}
644late_initcall(rcu_verify_early_boot_tests);
645#else
646void rcu_early_boot_tests(void) {}
647#endif /* CONFIG_PROVE_RCU */
59d80fd8 648
eacd6f04
PM
649#include "tasks.h"
650
59d80fd8
PM
651#ifndef CONFIG_TINY_RCU
652
653/*
654 * Print any significant non-default boot-time settings.
655 */
656void __init rcupdate_announce_bootup_oddness(void)
657{
658 if (rcu_normal)
659 pr_info("\tNo expedited grace period (rcu_normal).\n");
660 else if (rcu_normal_after_boot)
661 pr_info("\tNo expedited grace period (rcu_normal_after_boot).\n");
662 else if (rcu_expedited)
663 pr_info("\tAll grace periods are expedited (rcu_expedited).\n");
664 if (rcu_cpu_stall_suppress)
665 pr_info("\tRCU CPU stall warnings suppressed (rcu_cpu_stall_suppress).\n");
666 if (rcu_cpu_stall_timeout != CONFIG_RCU_CPU_STALL_TIMEOUT)
667 pr_info("\tRCU CPU stall warnings timeout set to %d (rcu_cpu_stall_timeout).\n", rcu_cpu_stall_timeout);
668 rcu_tasks_bootup_oddness();
669}
670
671#endif /* #ifndef CONFIG_TINY_RCU */