rcu-tasks: Prevent complaints of unused show_rcu_tasks_classic_gp_kthread()
[linux-block.git] / kernel / rcu / tasks.h
CommitLineData
eacd6f04
PM
1/* SPDX-License-Identifier: GPL-2.0+ */
2/*
3 * Task-based RCU implementations.
4 *
5 * Copyright (C) 2020 Paul E. McKenney
6 */
7
8fd8ca38 8#ifdef CONFIG_TASKS_RCU_GENERIC
5873b8a9
PM
9
10////////////////////////////////////////////////////////////////////////
11//
12// Generic data structures.
13
14struct rcu_tasks;
15typedef void (*rcu_tasks_gp_func_t)(struct rcu_tasks *rtp);
e4fe5dd6
PM
16typedef void (*pregp_func_t)(void);
17typedef void (*pertask_func_t)(struct task_struct *t, struct list_head *hop);
9796e1ae 18typedef void (*postscan_func_t)(struct list_head *hop);
e4fe5dd6 19typedef void (*holdouts_func_t)(struct list_head *hop, bool ndrpt, bool *frptp);
af051ca4 20typedef void (*postgp_func_t)(struct rcu_tasks *rtp);
eacd6f04 21
07e10515
PM
22/**
23 * Definition for a Tasks-RCU-like mechanism.
24 * @cbs_head: Head of callback list.
25 * @cbs_tail: Tail pointer for callback list.
26 * @cbs_wq: Wait queue allowning new callback to get kthread's attention.
27 * @cbs_lock: Lock protecting callback list.
28 * @kthread_ptr: This flavor's grace-period/callback-invocation kthread.
5873b8a9 29 * @gp_func: This flavor's grace-period-wait function.
af051ca4
PM
30 * @gp_state: Grace period's most recent state transition (debugging).
31 * @gp_jiffies: Time of last @gp_state transition.
32 * @gp_start: Most recent grace-period start in jiffies.
238dbce3
PM
33 * @n_gps: Number of grace periods completed since boot.
34 * @n_ipis: Number of IPIs sent to encourage grace periods to end.
7e0669c3 35 * @n_ipis_fails: Number of IPI-send failures.
e4fe5dd6
PM
36 * @pregp_func: This flavor's pre-grace-period function (optional).
37 * @pertask_func: This flavor's per-task scan function (optional).
38 * @postscan_func: This flavor's post-task scan function (optional).
39 * @holdout_func: This flavor's holdout-list scan function (optional).
40 * @postgp_func: This flavor's post-grace-period function (optional).
5873b8a9 41 * @call_func: This flavor's call_rcu()-equivalent function.
c97d12a6
PM
42 * @name: This flavor's textual name.
43 * @kname: This flavor's kthread name.
07e10515
PM
44 */
45struct rcu_tasks {
46 struct rcu_head *cbs_head;
47 struct rcu_head **cbs_tail;
48 struct wait_queue_head cbs_wq;
49 raw_spinlock_t cbs_lock;
af051ca4
PM
50 int gp_state;
51 unsigned long gp_jiffies;
88092d0c 52 unsigned long gp_start;
238dbce3
PM
53 unsigned long n_gps;
54 unsigned long n_ipis;
7e0669c3 55 unsigned long n_ipis_fails;
07e10515 56 struct task_struct *kthread_ptr;
5873b8a9 57 rcu_tasks_gp_func_t gp_func;
e4fe5dd6
PM
58 pregp_func_t pregp_func;
59 pertask_func_t pertask_func;
60 postscan_func_t postscan_func;
61 holdouts_func_t holdouts_func;
62 postgp_func_t postgp_func;
5873b8a9 63 call_rcu_func_t call_func;
c97d12a6
PM
64 char *name;
65 char *kname;
07e10515
PM
66};
67
c97d12a6
PM
68#define DEFINE_RCU_TASKS(rt_name, gp, call, n) \
69static struct rcu_tasks rt_name = \
07e10515 70{ \
c97d12a6
PM
71 .cbs_tail = &rt_name.cbs_head, \
72 .cbs_wq = __WAIT_QUEUE_HEAD_INITIALIZER(rt_name.cbs_wq), \
73 .cbs_lock = __RAW_SPIN_LOCK_UNLOCKED(rt_name.cbs_lock), \
5873b8a9
PM
74 .gp_func = gp, \
75 .call_func = call, \
c97d12a6
PM
76 .name = n, \
77 .kname = #rt_name, \
07e10515
PM
78}
79
eacd6f04
PM
80/* Track exiting tasks in order to allow them to be waited for. */
81DEFINE_STATIC_SRCU(tasks_rcu_exit_srcu);
82
b0afa0f0
PM
83/* Avoid IPIing CPUs early in the grace period. */
84#define RCU_TASK_IPI_DELAY (HZ / 2)
85static int rcu_task_ipi_delay __read_mostly = RCU_TASK_IPI_DELAY;
86module_param(rcu_task_ipi_delay, int, 0644);
87
eacd6f04
PM
88/* Control stall timeouts. Disable with <= 0, otherwise jiffies till stall. */
89#define RCU_TASK_STALL_TIMEOUT (HZ * 60 * 10)
90static int rcu_task_stall_timeout __read_mostly = RCU_TASK_STALL_TIMEOUT;
91module_param(rcu_task_stall_timeout, int, 0644);
92
af051ca4
PM
93/* RCU tasks grace-period state for debugging. */
94#define RTGS_INIT 0
95#define RTGS_WAIT_WAIT_CBS 1
96#define RTGS_WAIT_GP 2
97#define RTGS_PRE_WAIT_GP 3
98#define RTGS_SCAN_TASKLIST 4
99#define RTGS_POST_SCAN_TASKLIST 5
100#define RTGS_WAIT_SCAN_HOLDOUTS 6
101#define RTGS_SCAN_HOLDOUTS 7
102#define RTGS_POST_GP 8
103#define RTGS_WAIT_READERS 9
104#define RTGS_INVOKE_CBS 10
105#define RTGS_WAIT_CBS 11
8344496e 106#ifndef CONFIG_TINY_RCU
af051ca4
PM
107static const char * const rcu_tasks_gp_state_names[] = {
108 "RTGS_INIT",
109 "RTGS_WAIT_WAIT_CBS",
110 "RTGS_WAIT_GP",
111 "RTGS_PRE_WAIT_GP",
112 "RTGS_SCAN_TASKLIST",
113 "RTGS_POST_SCAN_TASKLIST",
114 "RTGS_WAIT_SCAN_HOLDOUTS",
115 "RTGS_SCAN_HOLDOUTS",
116 "RTGS_POST_GP",
117 "RTGS_WAIT_READERS",
118 "RTGS_INVOKE_CBS",
119 "RTGS_WAIT_CBS",
120};
8344496e 121#endif /* #ifndef CONFIG_TINY_RCU */
af051ca4 122
5873b8a9
PM
123////////////////////////////////////////////////////////////////////////
124//
125// Generic code.
126
af051ca4
PM
127/* Record grace-period phase and time. */
128static void set_tasks_gp_state(struct rcu_tasks *rtp, int newstate)
129{
130 rtp->gp_state = newstate;
131 rtp->gp_jiffies = jiffies;
132}
133
8344496e 134#ifndef CONFIG_TINY_RCU
af051ca4
PM
135/* Return state name. */
136static const char *tasks_gp_state_getname(struct rcu_tasks *rtp)
137{
138 int i = data_race(rtp->gp_state); // Let KCSAN detect update races
139 int j = READ_ONCE(i); // Prevent the compiler from reading twice
140
141 if (j >= ARRAY_SIZE(rcu_tasks_gp_state_names))
142 return "???";
143 return rcu_tasks_gp_state_names[j];
144}
8344496e 145#endif /* #ifndef CONFIG_TINY_RCU */
af051ca4 146
5873b8a9
PM
147// Enqueue a callback for the specified flavor of Tasks RCU.
148static void call_rcu_tasks_generic(struct rcu_head *rhp, rcu_callback_t func,
149 struct rcu_tasks *rtp)
eacd6f04
PM
150{
151 unsigned long flags;
152 bool needwake;
153
154 rhp->next = NULL;
155 rhp->func = func;
07e10515
PM
156 raw_spin_lock_irqsave(&rtp->cbs_lock, flags);
157 needwake = !rtp->cbs_head;
158 WRITE_ONCE(*rtp->cbs_tail, rhp);
159 rtp->cbs_tail = &rhp->next;
160 raw_spin_unlock_irqrestore(&rtp->cbs_lock, flags);
eacd6f04 161 /* We can't create the thread unless interrupts are enabled. */
07e10515
PM
162 if (needwake && READ_ONCE(rtp->kthread_ptr))
163 wake_up(&rtp->cbs_wq);
eacd6f04 164}
eacd6f04 165
5873b8a9
PM
166// Wait for a grace period for the specified flavor of Tasks RCU.
167static void synchronize_rcu_tasks_generic(struct rcu_tasks *rtp)
eacd6f04
PM
168{
169 /* Complain if the scheduler has not started. */
170 RCU_LOCKDEP_WARN(rcu_scheduler_active == RCU_SCHEDULER_INACTIVE,
171 "synchronize_rcu_tasks called too soon");
172
173 /* Wait for the grace period. */
5873b8a9 174 wait_rcu_gp(rtp->call_func);
eacd6f04
PM
175}
176
177/* RCU-tasks kthread that detects grace periods and invokes callbacks. */
178static int __noreturn rcu_tasks_kthread(void *arg)
179{
180 unsigned long flags;
eacd6f04
PM
181 struct rcu_head *list;
182 struct rcu_head *next;
07e10515 183 struct rcu_tasks *rtp = arg;
eacd6f04
PM
184
185 /* Run on housekeeping CPUs by default. Sysadm can move if desired. */
186 housekeeping_affine(current, HK_FLAG_RCU);
07e10515 187 WRITE_ONCE(rtp->kthread_ptr, current); // Let GPs start!
eacd6f04
PM
188
189 /*
190 * Each pass through the following loop makes one check for
191 * newly arrived callbacks, and, if there are some, waits for
192 * one RCU-tasks grace period and then invokes the callbacks.
193 * This loop is terminated by the system going down. ;-)
194 */
195 for (;;) {
196
197 /* Pick up any new callbacks. */
07e10515 198 raw_spin_lock_irqsave(&rtp->cbs_lock, flags);
43766c3e 199 smp_mb__after_spinlock(); // Order updates vs. GP.
07e10515
PM
200 list = rtp->cbs_head;
201 rtp->cbs_head = NULL;
202 rtp->cbs_tail = &rtp->cbs_head;
203 raw_spin_unlock_irqrestore(&rtp->cbs_lock, flags);
eacd6f04
PM
204
205 /* If there were none, wait a bit and start over. */
206 if (!list) {
07e10515
PM
207 wait_event_interruptible(rtp->cbs_wq,
208 READ_ONCE(rtp->cbs_head));
209 if (!rtp->cbs_head) {
eacd6f04 210 WARN_ON(signal_pending(current));
af051ca4 211 set_tasks_gp_state(rtp, RTGS_WAIT_WAIT_CBS);
ea6eed9f 212 schedule_timeout_idle(HZ/10);
eacd6f04
PM
213 }
214 continue;
215 }
216
5873b8a9 217 // Wait for one grace period.
af051ca4 218 set_tasks_gp_state(rtp, RTGS_WAIT_GP);
88092d0c 219 rtp->gp_start = jiffies;
5873b8a9 220 rtp->gp_func(rtp);
238dbce3 221 rtp->n_gps++;
eacd6f04
PM
222
223 /* Invoke the callbacks. */
af051ca4 224 set_tasks_gp_state(rtp, RTGS_INVOKE_CBS);
eacd6f04
PM
225 while (list) {
226 next = list->next;
227 local_bh_disable();
228 list->func(list);
229 local_bh_enable();
230 list = next;
231 cond_resched();
232 }
233 /* Paranoid sleep to keep this from entering a tight loop */
ea6eed9f 234 schedule_timeout_idle(HZ/10);
af051ca4
PM
235
236 set_tasks_gp_state(rtp, RTGS_WAIT_CBS);
eacd6f04
PM
237 }
238}
239
5873b8a9
PM
240/* Spawn RCU-tasks grace-period kthread, e.g., at core_initcall() time. */
241static void __init rcu_spawn_tasks_kthread_generic(struct rcu_tasks *rtp)
eacd6f04
PM
242{
243 struct task_struct *t;
244
c97d12a6
PM
245 t = kthread_run(rcu_tasks_kthread, rtp, "%s_kthread", rtp->kname);
246 if (WARN_ONCE(IS_ERR(t), "%s: Could not start %s grace-period kthread, OOM is now expected behavior\n", __func__, rtp->name))
5873b8a9 247 return;
eacd6f04 248 smp_mb(); /* Ensure others see full kthread. */
eacd6f04 249}
eacd6f04 250
eacd6f04
PM
251#ifndef CONFIG_TINY_RCU
252
253/*
254 * Print any non-default Tasks RCU settings.
255 */
256static void __init rcu_tasks_bootup_oddness(void)
257{
d5f177d3 258#if defined(CONFIG_TASKS_RCU) || defined(CONFIG_TASKS_TRACE_RCU)
eacd6f04
PM
259 if (rcu_task_stall_timeout != RCU_TASK_STALL_TIMEOUT)
260 pr_info("\tTasks-RCU CPU stall warnings timeout set to %d (rcu_task_stall_timeout).\n", rcu_task_stall_timeout);
d5f177d3
PM
261#endif /* #ifdef CONFIG_TASKS_RCU */
262#ifdef CONFIG_TASKS_RCU
263 pr_info("\tTrampoline variant of Tasks RCU enabled.\n");
eacd6f04 264#endif /* #ifdef CONFIG_TASKS_RCU */
c84aad76
PM
265#ifdef CONFIG_TASKS_RUDE_RCU
266 pr_info("\tRude variant of Tasks RCU enabled.\n");
267#endif /* #ifdef CONFIG_TASKS_RUDE_RCU */
d5f177d3
PM
268#ifdef CONFIG_TASKS_TRACE_RCU
269 pr_info("\tTracing variant of Tasks RCU enabled.\n");
270#endif /* #ifdef CONFIG_TASKS_TRACE_RCU */
eacd6f04
PM
271}
272
273#endif /* #ifndef CONFIG_TINY_RCU */
5873b8a9 274
8344496e 275#ifndef CONFIG_TINY_RCU
e21408ce
PM
276/* Dump out rcutorture-relevant state common to all RCU-tasks flavors. */
277static void show_rcu_tasks_generic_gp_kthread(struct rcu_tasks *rtp, char *s)
278{
7e0669c3 279 pr_info("%s: %s(%d) since %lu g:%lu i:%lu/%lu %c%c %s\n",
e21408ce 280 rtp->kname,
7e0669c3 281 tasks_gp_state_getname(rtp), data_race(rtp->gp_state),
af051ca4 282 jiffies - data_race(rtp->gp_jiffies),
7e0669c3
PM
283 data_race(rtp->n_gps),
284 data_race(rtp->n_ipis_fails), data_race(rtp->n_ipis),
e21408ce
PM
285 ".k"[!!data_race(rtp->kthread_ptr)],
286 ".C"[!!data_race(rtp->cbs_head)],
287 s);
288}
8344496e 289#endif /* #ifndef CONFIG_TINY_RCU */
e21408ce 290
25246fc8
PM
291static void exit_tasks_rcu_finish_trace(struct task_struct *t);
292
293#if defined(CONFIG_TASKS_RCU) || defined(CONFIG_TASKS_TRACE_RCU)
5873b8a9 294
d01aa263
PM
295////////////////////////////////////////////////////////////////////////
296//
297// Shared code between task-list-scanning variants of Tasks RCU.
298
299/* Wait for one RCU-tasks grace period. */
300static void rcu_tasks_wait_gp(struct rcu_tasks *rtp)
301{
302 struct task_struct *g, *t;
303 unsigned long lastreport;
304 LIST_HEAD(holdouts);
305 int fract;
306
af051ca4 307 set_tasks_gp_state(rtp, RTGS_PRE_WAIT_GP);
d01aa263
PM
308 rtp->pregp_func();
309
310 /*
311 * There were callbacks, so we need to wait for an RCU-tasks
312 * grace period. Start off by scanning the task list for tasks
313 * that are not already voluntarily blocked. Mark these tasks
314 * and make a list of them in holdouts.
315 */
af051ca4 316 set_tasks_gp_state(rtp, RTGS_SCAN_TASKLIST);
d01aa263
PM
317 rcu_read_lock();
318 for_each_process_thread(g, t)
319 rtp->pertask_func(t, &holdouts);
320 rcu_read_unlock();
321
af051ca4 322 set_tasks_gp_state(rtp, RTGS_POST_SCAN_TASKLIST);
9796e1ae 323 rtp->postscan_func(&holdouts);
d01aa263
PM
324
325 /*
326 * Each pass through the following loop scans the list of holdout
327 * tasks, removing any that are no longer holdouts. When the list
328 * is empty, we are done.
329 */
330 lastreport = jiffies;
331
332 /* Start off with HZ/10 wait and slowly back off to 1 HZ wait. */
333 fract = 10;
334
335 for (;;) {
336 bool firstreport;
337 bool needreport;
338 int rtst;
339
340 if (list_empty(&holdouts))
341 break;
342
343 /* Slowly back off waiting for holdouts */
af051ca4 344 set_tasks_gp_state(rtp, RTGS_WAIT_SCAN_HOLDOUTS);
ea6eed9f 345 schedule_timeout_idle(HZ/fract);
d01aa263
PM
346
347 if (fract > 1)
348 fract--;
349
350 rtst = READ_ONCE(rcu_task_stall_timeout);
351 needreport = rtst > 0 && time_after(jiffies, lastreport + rtst);
352 if (needreport)
353 lastreport = jiffies;
354 firstreport = true;
355 WARN_ON(signal_pending(current));
af051ca4 356 set_tasks_gp_state(rtp, RTGS_SCAN_HOLDOUTS);
d01aa263
PM
357 rtp->holdouts_func(&holdouts, needreport, &firstreport);
358 }
359
af051ca4
PM
360 set_tasks_gp_state(rtp, RTGS_POST_GP);
361 rtp->postgp_func(rtp);
d01aa263
PM
362}
363
25246fc8
PM
364#endif /* #if defined(CONFIG_TASKS_RCU) || defined(CONFIG_TASKS_TRACE_RCU) */
365
366#ifdef CONFIG_TASKS_RCU
367
5873b8a9
PM
368////////////////////////////////////////////////////////////////////////
369//
370// Simple variant of RCU whose quiescent states are voluntary context
371// switch, cond_resched_rcu_qs(), user-space execution, and idle.
372// As such, grace periods can take one good long time. There are no
373// read-side primitives similar to rcu_read_lock() and rcu_read_unlock()
374// because this implementation is intended to get the system into a safe
375// state for some of the manipulations involved in tracing and the like.
376// Finally, this implementation does not support high call_rcu_tasks()
377// rates from multiple CPUs. If this is required, per-CPU callback lists
378// will be needed.
379
e4fe5dd6
PM
380/* Pre-grace-period preparation. */
381static void rcu_tasks_pregp_step(void)
382{
383 /*
384 * Wait for all pre-existing t->on_rq and t->nvcsw transitions
385 * to complete. Invoking synchronize_rcu() suffices because all
386 * these transitions occur with interrupts disabled. Without this
387 * synchronize_rcu(), a read-side critical section that started
388 * before the grace period might be incorrectly seen as having
389 * started after the grace period.
390 *
391 * This synchronize_rcu() also dispenses with the need for a
392 * memory barrier on the first store to t->rcu_tasks_holdout,
393 * as it forces the store to happen after the beginning of the
394 * grace period.
395 */
396 synchronize_rcu();
397}
398
399/* Per-task initial processing. */
400static void rcu_tasks_pertask(struct task_struct *t, struct list_head *hop)
401{
402 if (t != current && READ_ONCE(t->on_rq) && !is_idle_task(t)) {
403 get_task_struct(t);
404 t->rcu_tasks_nvcsw = READ_ONCE(t->nvcsw);
405 WRITE_ONCE(t->rcu_tasks_holdout, true);
406 list_add(&t->rcu_tasks_holdout_list, hop);
407 }
408}
409
410/* Processing between scanning taskslist and draining the holdout list. */
04a3c5aa 411static void rcu_tasks_postscan(struct list_head *hop)
e4fe5dd6
PM
412{
413 /*
414 * Wait for tasks that are in the process of exiting. This
415 * does only part of the job, ensuring that all tasks that were
416 * previously exiting reach the point where they have disabled
417 * preemption, allowing the later synchronize_rcu() to finish
418 * the job.
419 */
420 synchronize_srcu(&tasks_rcu_exit_srcu);
421}
422
5873b8a9
PM
423/* See if tasks are still holding out, complain if so. */
424static void check_holdout_task(struct task_struct *t,
425 bool needreport, bool *firstreport)
426{
427 int cpu;
428
429 if (!READ_ONCE(t->rcu_tasks_holdout) ||
430 t->rcu_tasks_nvcsw != READ_ONCE(t->nvcsw) ||
431 !READ_ONCE(t->on_rq) ||
432 (IS_ENABLED(CONFIG_NO_HZ_FULL) &&
433 !is_idle_task(t) && t->rcu_tasks_idle_cpu >= 0)) {
434 WRITE_ONCE(t->rcu_tasks_holdout, false);
435 list_del_init(&t->rcu_tasks_holdout_list);
436 put_task_struct(t);
437 return;
438 }
439 rcu_request_urgent_qs_task(t);
440 if (!needreport)
441 return;
442 if (*firstreport) {
443 pr_err("INFO: rcu_tasks detected stalls on tasks:\n");
444 *firstreport = false;
445 }
446 cpu = task_cpu(t);
447 pr_alert("%p: %c%c nvcsw: %lu/%lu holdout: %d idle_cpu: %d/%d\n",
448 t, ".I"[is_idle_task(t)],
449 "N."[cpu < 0 || !tick_nohz_full_cpu(cpu)],
450 t->rcu_tasks_nvcsw, t->nvcsw, t->rcu_tasks_holdout,
451 t->rcu_tasks_idle_cpu, cpu);
452 sched_show_task(t);
453}
454
e4fe5dd6
PM
455/* Scan the holdout lists for tasks no longer holding out. */
456static void check_all_holdout_tasks(struct list_head *hop,
457 bool needreport, bool *firstreport)
458{
459 struct task_struct *t, *t1;
460
461 list_for_each_entry_safe(t, t1, hop, rcu_tasks_holdout_list) {
462 check_holdout_task(t, needreport, firstreport);
463 cond_resched();
464 }
465}
466
467/* Finish off the Tasks-RCU grace period. */
af051ca4 468static void rcu_tasks_postgp(struct rcu_tasks *rtp)
e4fe5dd6
PM
469{
470 /*
471 * Because ->on_rq and ->nvcsw are not guaranteed to have a full
472 * memory barriers prior to them in the schedule() path, memory
473 * reordering on other CPUs could cause their RCU-tasks read-side
474 * critical sections to extend past the end of the grace period.
475 * However, because these ->nvcsw updates are carried out with
476 * interrupts disabled, we can use synchronize_rcu() to force the
477 * needed ordering on all such CPUs.
478 *
479 * This synchronize_rcu() also confines all ->rcu_tasks_holdout
480 * accesses to be within the grace period, avoiding the need for
481 * memory barriers for ->rcu_tasks_holdout accesses.
482 *
483 * In addition, this synchronize_rcu() waits for exiting tasks
484 * to complete their final preempt_disable() region of execution,
485 * cleaning up after the synchronize_srcu() above.
486 */
487 synchronize_rcu();
488}
489
5873b8a9 490void call_rcu_tasks(struct rcu_head *rhp, rcu_callback_t func);
c97d12a6 491DEFINE_RCU_TASKS(rcu_tasks, rcu_tasks_wait_gp, call_rcu_tasks, "RCU Tasks");
5873b8a9
PM
492
493/**
494 * call_rcu_tasks() - Queue an RCU for invocation task-based grace period
495 * @rhp: structure to be used for queueing the RCU updates.
496 * @func: actual callback function to be invoked after the grace period
497 *
498 * The callback function will be invoked some time after a full grace
499 * period elapses, in other words after all currently executing RCU
500 * read-side critical sections have completed. call_rcu_tasks() assumes
501 * that the read-side critical sections end at a voluntary context
502 * switch (not a preemption!), cond_resched_rcu_qs(), entry into idle,
503 * or transition to usermode execution. As such, there are no read-side
504 * primitives analogous to rcu_read_lock() and rcu_read_unlock() because
505 * this primitive is intended to determine that all tasks have passed
506 * through a safe state, not so much for data-strcuture synchronization.
507 *
508 * See the description of call_rcu() for more detailed information on
509 * memory ordering guarantees.
510 */
511void call_rcu_tasks(struct rcu_head *rhp, rcu_callback_t func)
512{
513 call_rcu_tasks_generic(rhp, func, &rcu_tasks);
514}
515EXPORT_SYMBOL_GPL(call_rcu_tasks);
516
517/**
518 * synchronize_rcu_tasks - wait until an rcu-tasks grace period has elapsed.
519 *
520 * Control will return to the caller some time after a full rcu-tasks
521 * grace period has elapsed, in other words after all currently
522 * executing rcu-tasks read-side critical sections have elapsed. These
523 * read-side critical sections are delimited by calls to schedule(),
524 * cond_resched_tasks_rcu_qs(), idle execution, userspace execution, calls
525 * to synchronize_rcu_tasks(), and (in theory, anyway) cond_resched().
526 *
527 * This is a very specialized primitive, intended only for a few uses in
528 * tracing and other situations requiring manipulation of function
529 * preambles and profiling hooks. The synchronize_rcu_tasks() function
530 * is not (yet) intended for heavy use from multiple CPUs.
531 *
532 * See the description of synchronize_rcu() for more detailed information
533 * on memory ordering guarantees.
534 */
535void synchronize_rcu_tasks(void)
536{
537 synchronize_rcu_tasks_generic(&rcu_tasks);
538}
539EXPORT_SYMBOL_GPL(synchronize_rcu_tasks);
540
541/**
542 * rcu_barrier_tasks - Wait for in-flight call_rcu_tasks() callbacks.
543 *
544 * Although the current implementation is guaranteed to wait, it is not
545 * obligated to, for example, if there are no pending callbacks.
546 */
547void rcu_barrier_tasks(void)
548{
549 /* There is only one callback queue, so this is easy. ;-) */
550 synchronize_rcu_tasks();
551}
552EXPORT_SYMBOL_GPL(rcu_barrier_tasks);
553
554static int __init rcu_spawn_tasks_kthread(void)
555{
e4fe5dd6
PM
556 rcu_tasks.pregp_func = rcu_tasks_pregp_step;
557 rcu_tasks.pertask_func = rcu_tasks_pertask;
558 rcu_tasks.postscan_func = rcu_tasks_postscan;
559 rcu_tasks.holdouts_func = check_all_holdout_tasks;
560 rcu_tasks.postgp_func = rcu_tasks_postgp;
5873b8a9
PM
561 rcu_spawn_tasks_kthread_generic(&rcu_tasks);
562 return 0;
563}
564core_initcall(rcu_spawn_tasks_kthread);
565
8344496e 566#ifndef CONFIG_TINY_RCU
e21408ce
PM
567static void show_rcu_tasks_classic_gp_kthread(void)
568{
569 show_rcu_tasks_generic_gp_kthread(&rcu_tasks, "");
570}
8344496e 571#endif /* #ifndef CONFIG_TINY_RCU */
e21408ce 572
25246fc8
PM
573/* Do the srcu_read_lock() for the above synchronize_srcu(). */
574void exit_tasks_rcu_start(void) __acquires(&tasks_rcu_exit_srcu)
575{
576 preempt_disable();
577 current->rcu_tasks_idx = __srcu_read_lock(&tasks_rcu_exit_srcu);
578 preempt_enable();
579}
580
581/* Do the srcu_read_unlock() for the above synchronize_srcu(). */
582void exit_tasks_rcu_finish(void) __releases(&tasks_rcu_exit_srcu)
583{
584 struct task_struct *t = current;
585
586 preempt_disable();
587 __srcu_read_unlock(&tasks_rcu_exit_srcu, t->rcu_tasks_idx);
588 preempt_enable();
589 exit_tasks_rcu_finish_trace(t);
590}
591
e21408ce 592#else /* #ifdef CONFIG_TASKS_RCU */
78edc005 593static inline void show_rcu_tasks_classic_gp_kthread(void) { }
25246fc8
PM
594void exit_tasks_rcu_start(void) { }
595void exit_tasks_rcu_finish(void) { exit_tasks_rcu_finish_trace(current); }
e21408ce 596#endif /* #else #ifdef CONFIG_TASKS_RCU */
c84aad76
PM
597
598#ifdef CONFIG_TASKS_RUDE_RCU
599
600////////////////////////////////////////////////////////////////////////
601//
602// "Rude" variant of Tasks RCU, inspired by Steve Rostedt's trick of
603// passing an empty function to schedule_on_each_cpu(). This approach
604// provides an asynchronous call_rcu_tasks_rude() API and batching
605// of concurrent calls to the synchronous synchronize_rcu_rude() API.
606// This sends IPIs far and wide and induces otherwise unnecessary context
607// switches on all online CPUs, whether idle or not.
608
609// Empty function to allow workqueues to force a context switch.
610static void rcu_tasks_be_rude(struct work_struct *work)
611{
612}
613
614// Wait for one rude RCU-tasks grace period.
615static void rcu_tasks_rude_wait_gp(struct rcu_tasks *rtp)
616{
238dbce3 617 rtp->n_ipis += cpumask_weight(cpu_online_mask);
c84aad76
PM
618 schedule_on_each_cpu(rcu_tasks_be_rude);
619}
620
621void call_rcu_tasks_rude(struct rcu_head *rhp, rcu_callback_t func);
c97d12a6
PM
622DEFINE_RCU_TASKS(rcu_tasks_rude, rcu_tasks_rude_wait_gp, call_rcu_tasks_rude,
623 "RCU Tasks Rude");
c84aad76
PM
624
625/**
626 * call_rcu_tasks_rude() - Queue a callback rude task-based grace period
627 * @rhp: structure to be used for queueing the RCU updates.
628 * @func: actual callback function to be invoked after the grace period
629 *
630 * The callback function will be invoked some time after a full grace
631 * period elapses, in other words after all currently executing RCU
632 * read-side critical sections have completed. call_rcu_tasks_rude()
633 * assumes that the read-side critical sections end at context switch,
634 * cond_resched_rcu_qs(), or transition to usermode execution. As such,
635 * there are no read-side primitives analogous to rcu_read_lock() and
636 * rcu_read_unlock() because this primitive is intended to determine
637 * that all tasks have passed through a safe state, not so much for
638 * data-strcuture synchronization.
639 *
640 * See the description of call_rcu() for more detailed information on
641 * memory ordering guarantees.
642 */
643void call_rcu_tasks_rude(struct rcu_head *rhp, rcu_callback_t func)
644{
645 call_rcu_tasks_generic(rhp, func, &rcu_tasks_rude);
646}
647EXPORT_SYMBOL_GPL(call_rcu_tasks_rude);
648
649/**
650 * synchronize_rcu_tasks_rude - wait for a rude rcu-tasks grace period
651 *
652 * Control will return to the caller some time after a rude rcu-tasks
653 * grace period has elapsed, in other words after all currently
654 * executing rcu-tasks read-side critical sections have elapsed. These
655 * read-side critical sections are delimited by calls to schedule(),
656 * cond_resched_tasks_rcu_qs(), userspace execution, and (in theory,
657 * anyway) cond_resched().
658 *
659 * This is a very specialized primitive, intended only for a few uses in
660 * tracing and other situations requiring manipulation of function preambles
661 * and profiling hooks. The synchronize_rcu_tasks_rude() function is not
662 * (yet) intended for heavy use from multiple CPUs.
663 *
664 * See the description of synchronize_rcu() for more detailed information
665 * on memory ordering guarantees.
666 */
667void synchronize_rcu_tasks_rude(void)
668{
669 synchronize_rcu_tasks_generic(&rcu_tasks_rude);
670}
671EXPORT_SYMBOL_GPL(synchronize_rcu_tasks_rude);
672
673/**
674 * rcu_barrier_tasks_rude - Wait for in-flight call_rcu_tasks_rude() callbacks.
675 *
676 * Although the current implementation is guaranteed to wait, it is not
677 * obligated to, for example, if there are no pending callbacks.
678 */
679void rcu_barrier_tasks_rude(void)
680{
681 /* There is only one callback queue, so this is easy. ;-) */
682 synchronize_rcu_tasks_rude();
683}
684EXPORT_SYMBOL_GPL(rcu_barrier_tasks_rude);
685
686static int __init rcu_spawn_tasks_rude_kthread(void)
687{
688 rcu_spawn_tasks_kthread_generic(&rcu_tasks_rude);
689 return 0;
690}
691core_initcall(rcu_spawn_tasks_rude_kthread);
692
8344496e 693#ifndef CONFIG_TINY_RCU
e21408ce
PM
694static void show_rcu_tasks_rude_gp_kthread(void)
695{
696 show_rcu_tasks_generic_gp_kthread(&rcu_tasks_rude, "");
697}
8344496e 698#endif /* #ifndef CONFIG_TINY_RCU */
e21408ce
PM
699
700#else /* #ifdef CONFIG_TASKS_RUDE_RCU */
701static void show_rcu_tasks_rude_gp_kthread(void) {}
702#endif /* #else #ifdef CONFIG_TASKS_RUDE_RCU */
d5f177d3
PM
703
704////////////////////////////////////////////////////////////////////////
705//
706// Tracing variant of Tasks RCU. This variant is designed to be used
707// to protect tracing hooks, including those of BPF. This variant
708// therefore:
709//
710// 1. Has explicit read-side markers to allow finite grace periods
711// in the face of in-kernel loops for PREEMPT=n builds.
712//
713// 2. Protects code in the idle loop, exception entry/exit, and
714// CPU-hotplug code paths, similar to the capabilities of SRCU.
715//
716// 3. Avoids expensive read-side instruction, having overhead similar
717// to that of Preemptible RCU.
718//
719// There are of course downsides. The grace-period code can send IPIs to
720// CPUs, even when those CPUs are in the idle loop or in nohz_full userspace.
721// It is necessary to scan the full tasklist, much as for Tasks RCU. There
722// is a single callback queue guarded by a single lock, again, much as for
723// Tasks RCU. If needed, these downsides can be at least partially remedied.
724//
725// Perhaps most important, this variant of RCU does not affect the vanilla
726// flavors, rcu_preempt and rcu_sched. The fact that RCU Tasks Trace
727// readers can operate from idle, offline, and exception entry/exit in no
728// way allows rcu_preempt and rcu_sched readers to also do so.
729
730// The lockdep state must be outside of #ifdef to be useful.
731#ifdef CONFIG_DEBUG_LOCK_ALLOC
732static struct lock_class_key rcu_lock_trace_key;
733struct lockdep_map rcu_trace_lock_map =
734 STATIC_LOCKDEP_MAP_INIT("rcu_read_lock_trace", &rcu_lock_trace_key);
735EXPORT_SYMBOL_GPL(rcu_trace_lock_map);
736#endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
737
738#ifdef CONFIG_TASKS_TRACE_RCU
739
30d8aa51
PM
740static atomic_t trc_n_readers_need_end; // Number of waited-for readers.
741static DECLARE_WAIT_QUEUE_HEAD(trc_wait); // List of holdout tasks.
d5f177d3
PM
742
743// Record outstanding IPIs to each CPU. No point in sending two...
744static DEFINE_PER_CPU(bool, trc_ipi_to_cpu);
745
40471509
PM
746// The number of detections of task quiescent state relying on
747// heavyweight readers executing explicit memory barriers.
748unsigned long n_heavy_reader_attempts;
749unsigned long n_heavy_reader_updates;
edf3775f 750unsigned long n_heavy_reader_ofl_updates;
40471509 751
b0afa0f0
PM
752void call_rcu_tasks_trace(struct rcu_head *rhp, rcu_callback_t func);
753DEFINE_RCU_TASKS(rcu_tasks_trace, rcu_tasks_wait_gp, call_rcu_tasks_trace,
754 "RCU Tasks Trace");
755
b38f57c1
PM
756/*
757 * This irq_work handler allows rcu_read_unlock_trace() to be invoked
758 * while the scheduler locks are held.
759 */
760static void rcu_read_unlock_iw(struct irq_work *iwp)
761{
762 wake_up(&trc_wait);
763}
764static DEFINE_IRQ_WORK(rcu_tasks_trace_iw, rcu_read_unlock_iw);
765
d5f177d3 766/* If we are the last reader, wake up the grace-period kthread. */
276c4104 767void rcu_read_unlock_trace_special(struct task_struct *t, int nesting)
d5f177d3 768{
276c4104
PM
769 int nq = t->trc_reader_special.b.need_qs;
770
9ae58d7b
PM
771 if (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB) &&
772 t->trc_reader_special.b.need_mb)
276c4104
PM
773 smp_mb(); // Pairs with update-side barriers.
774 // Update .need_qs before ->trc_reader_nesting for irq/NMI handlers.
775 if (nq)
776 WRITE_ONCE(t->trc_reader_special.b.need_qs, false);
777 WRITE_ONCE(t->trc_reader_nesting, nesting);
778 if (nq && atomic_dec_and_test(&trc_n_readers_need_end))
b38f57c1 779 irq_work_queue(&rcu_tasks_trace_iw);
d5f177d3
PM
780}
781EXPORT_SYMBOL_GPL(rcu_read_unlock_trace_special);
782
783/* Add a task to the holdout list, if it is not already on the list. */
784static void trc_add_holdout(struct task_struct *t, struct list_head *bhp)
785{
786 if (list_empty(&t->trc_holdout_list)) {
787 get_task_struct(t);
788 list_add(&t->trc_holdout_list, bhp);
789 }
790}
791
792/* Remove a task from the holdout list, if it is in fact present. */
793static void trc_del_holdout(struct task_struct *t)
794{
795 if (!list_empty(&t->trc_holdout_list)) {
796 list_del_init(&t->trc_holdout_list);
797 put_task_struct(t);
798 }
799}
800
801/* IPI handler to check task state. */
802static void trc_read_check_handler(void *t_in)
803{
804 struct task_struct *t = current;
805 struct task_struct *texp = t_in;
806
807 // If the task is no longer running on this CPU, leave.
808 if (unlikely(texp != t)) {
809 if (WARN_ON_ONCE(atomic_dec_and_test(&trc_n_readers_need_end)))
810 wake_up(&trc_wait);
811 goto reset_ipi; // Already on holdout list, so will check later.
812 }
813
814 // If the task is not in a read-side critical section, and
815 // if this is the last reader, awaken the grace-period kthread.
816 if (likely(!t->trc_reader_nesting)) {
817 if (WARN_ON_ONCE(atomic_dec_and_test(&trc_n_readers_need_end)))
818 wake_up(&trc_wait);
819 // Mark as checked after decrement to avoid false
820 // positives on the above WARN_ON_ONCE().
821 WRITE_ONCE(t->trc_reader_checked, true);
822 goto reset_ipi;
823 }
824 WRITE_ONCE(t->trc_reader_checked, true);
825
826 // Get here if the task is in a read-side critical section. Set
827 // its state so that it will awaken the grace-period kthread upon
828 // exit from that critical section.
276c4104
PM
829 WARN_ON_ONCE(t->trc_reader_special.b.need_qs);
830 WRITE_ONCE(t->trc_reader_special.b.need_qs, true);
d5f177d3
PM
831
832reset_ipi:
833 // Allow future IPIs to be sent on CPU and for task.
834 // Also order this IPI handler against any later manipulations of
835 // the intended task.
836 smp_store_release(&per_cpu(trc_ipi_to_cpu, smp_processor_id()), false); // ^^^
837 smp_store_release(&texp->trc_ipi_to_cpu, -1); // ^^^
838}
839
840/* Callback function for scheduler to check locked-down task. */
841static bool trc_inspect_reader(struct task_struct *t, void *arg)
842{
7d0c9c50
PM
843 int cpu = task_cpu(t);
844 bool in_qs = false;
7e3b70e0 845 bool ofl = cpu_is_offline(cpu);
7d0c9c50
PM
846
847 if (task_curr(t)) {
30d8aa51 848 WARN_ON_ONCE(ofl && !is_idle_task(t));
7e3b70e0 849
7d0c9c50 850 // If no chance of heavyweight readers, do it the hard way.
7e3b70e0 851 if (!ofl && !IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB))
7d0c9c50
PM
852 return false;
853
854 // If heavyweight readers are enabled on the remote task,
855 // we can inspect its state despite its currently running.
856 // However, we cannot safely change its state.
40471509 857 n_heavy_reader_attempts++;
7e3b70e0
PM
858 if (!ofl && // Check for "running" idle tasks on offline CPUs.
859 !rcu_dynticks_zero_in_eqs(cpu, &t->trc_reader_nesting))
7d0c9c50 860 return false; // No quiescent state, do it the hard way.
40471509 861 n_heavy_reader_updates++;
edf3775f
PM
862 if (ofl)
863 n_heavy_reader_ofl_updates++;
7d0c9c50
PM
864 in_qs = true;
865 } else {
866 in_qs = likely(!t->trc_reader_nesting);
867 }
d5f177d3
PM
868
869 // Mark as checked. Because this is called from the grace-period
870 // kthread, also remove the task from the holdout list.
871 t->trc_reader_checked = true;
872 trc_del_holdout(t);
873
7d0c9c50
PM
874 if (in_qs)
875 return true; // Already in quiescent state, done!!!
876
877 // The task is in a read-side critical section, so set up its
878 // state so that it will awaken the grace-period kthread upon exit
879 // from that critical section.
880 atomic_inc(&trc_n_readers_need_end); // One more to wait on.
881 WARN_ON_ONCE(t->trc_reader_special.b.need_qs);
882 WRITE_ONCE(t->trc_reader_special.b.need_qs, true);
d5f177d3
PM
883 return true;
884}
885
886/* Attempt to extract the state for the specified task. */
887static void trc_wait_for_one_reader(struct task_struct *t,
888 struct list_head *bhp)
889{
890 int cpu;
891
892 // If a previous IPI is still in flight, let it complete.
893 if (smp_load_acquire(&t->trc_ipi_to_cpu) != -1) // Order IPI
894 return;
895
896 // The current task had better be in a quiescent state.
897 if (t == current) {
898 t->trc_reader_checked = true;
899 trc_del_holdout(t);
900 WARN_ON_ONCE(t->trc_reader_nesting);
901 return;
902 }
903
904 // Attempt to nail down the task for inspection.
905 get_task_struct(t);
906 if (try_invoke_on_locked_down_task(t, trc_inspect_reader, NULL)) {
907 put_task_struct(t);
908 return;
909 }
910 put_task_struct(t);
911
912 // If currently running, send an IPI, either way, add to list.
913 trc_add_holdout(t, bhp);
914 if (task_curr(t) && time_after(jiffies, rcu_tasks_trace.gp_start + rcu_task_ipi_delay)) {
915 // The task is currently running, so try IPIing it.
916 cpu = task_cpu(t);
917
918 // If there is already an IPI outstanding, let it happen.
919 if (per_cpu(trc_ipi_to_cpu, cpu) || t->trc_ipi_to_cpu >= 0)
920 return;
921
922 atomic_inc(&trc_n_readers_need_end);
923 per_cpu(trc_ipi_to_cpu, cpu) = true;
924 t->trc_ipi_to_cpu = cpu;
238dbce3 925 rcu_tasks_trace.n_ipis++;
d5f177d3
PM
926 if (smp_call_function_single(cpu,
927 trc_read_check_handler, t, 0)) {
928 // Just in case there is some other reason for
929 // failure than the target CPU being offline.
7e0669c3 930 rcu_tasks_trace.n_ipis_fails++;
d5f177d3
PM
931 per_cpu(trc_ipi_to_cpu, cpu) = false;
932 t->trc_ipi_to_cpu = cpu;
933 if (atomic_dec_and_test(&trc_n_readers_need_end)) {
934 WARN_ON_ONCE(1);
935 wake_up(&trc_wait);
936 }
937 }
938 }
939}
940
941/* Initialize for a new RCU-tasks-trace grace period. */
942static void rcu_tasks_trace_pregp_step(void)
943{
944 int cpu;
945
d5f177d3
PM
946 // Allow for fast-acting IPIs.
947 atomic_set(&trc_n_readers_need_end, 1);
948
949 // There shouldn't be any old IPIs, but...
950 for_each_possible_cpu(cpu)
951 WARN_ON_ONCE(per_cpu(trc_ipi_to_cpu, cpu));
81b4a7bc
PM
952
953 // Disable CPU hotplug across the tasklist scan.
954 // This also waits for all readers in CPU-hotplug code paths.
955 cpus_read_lock();
d5f177d3
PM
956}
957
958/* Do first-round processing for the specified task. */
959static void rcu_tasks_trace_pertask(struct task_struct *t,
960 struct list_head *hop)
961{
276c4104 962 WRITE_ONCE(t->trc_reader_special.b.need_qs, false);
43766c3e 963 WRITE_ONCE(t->trc_reader_checked, false);
d5f177d3
PM
964 t->trc_ipi_to_cpu = -1;
965 trc_wait_for_one_reader(t, hop);
966}
967
9796e1ae
PM
968/*
969 * Do intermediate processing between task and holdout scans and
970 * pick up the idle tasks.
971 */
972static void rcu_tasks_trace_postscan(struct list_head *hop)
d5f177d3 973{
9796e1ae
PM
974 int cpu;
975
976 for_each_possible_cpu(cpu)
977 rcu_tasks_trace_pertask(idle_task(cpu), hop);
978
81b4a7bc
PM
979 // Re-enable CPU hotplug now that the tasklist scan has completed.
980 cpus_read_unlock();
981
d5f177d3
PM
982 // Wait for late-stage exiting tasks to finish exiting.
983 // These might have passed the call to exit_tasks_rcu_finish().
984 synchronize_rcu();
985 // Any tasks that exit after this point will set ->trc_reader_checked.
986}
987
4593e772
PM
988/* Show the state of a task stalling the current RCU tasks trace GP. */
989static void show_stalled_task_trace(struct task_struct *t, bool *firstreport)
990{
991 int cpu;
992
993 if (*firstreport) {
994 pr_err("INFO: rcu_tasks_trace detected stalls on tasks:\n");
995 *firstreport = false;
996 }
997 // FIXME: This should attempt to use try_invoke_on_nonrunning_task().
998 cpu = task_cpu(t);
999 pr_alert("P%d: %c%c%c nesting: %d%c cpu: %d\n",
1000 t->pid,
1001 ".I"[READ_ONCE(t->trc_ipi_to_cpu) > 0],
1002 ".i"[is_idle_task(t)],
1003 ".N"[cpu > 0 && tick_nohz_full_cpu(cpu)],
1004 t->trc_reader_nesting,
276c4104 1005 " N"[!!t->trc_reader_special.b.need_qs],
4593e772
PM
1006 cpu);
1007 sched_show_task(t);
1008}
1009
1010/* List stalled IPIs for RCU tasks trace. */
1011static void show_stalled_ipi_trace(void)
1012{
1013 int cpu;
1014
1015 for_each_possible_cpu(cpu)
1016 if (per_cpu(trc_ipi_to_cpu, cpu))
1017 pr_alert("\tIPI outstanding to CPU %d\n", cpu);
1018}
1019
d5f177d3
PM
1020/* Do one scan of the holdout list. */
1021static void check_all_holdout_tasks_trace(struct list_head *hop,
4593e772 1022 bool needreport, bool *firstreport)
d5f177d3
PM
1023{
1024 struct task_struct *g, *t;
1025
81b4a7bc
PM
1026 // Disable CPU hotplug across the holdout list scan.
1027 cpus_read_lock();
1028
d5f177d3
PM
1029 list_for_each_entry_safe(t, g, hop, trc_holdout_list) {
1030 // If safe and needed, try to check the current task.
1031 if (READ_ONCE(t->trc_ipi_to_cpu) == -1 &&
1032 !READ_ONCE(t->trc_reader_checked))
1033 trc_wait_for_one_reader(t, hop);
1034
1035 // If check succeeded, remove this task from the list.
1036 if (READ_ONCE(t->trc_reader_checked))
1037 trc_del_holdout(t);
4593e772
PM
1038 else if (needreport)
1039 show_stalled_task_trace(t, firstreport);
1040 }
81b4a7bc
PM
1041
1042 // Re-enable CPU hotplug now that the holdout list scan has completed.
1043 cpus_read_unlock();
1044
4593e772
PM
1045 if (needreport) {
1046 if (firstreport)
1047 pr_err("INFO: rcu_tasks_trace detected stalls? (Late IPI?)\n");
1048 show_stalled_ipi_trace();
d5f177d3
PM
1049 }
1050}
1051
1052/* Wait for grace period to complete and provide ordering. */
af051ca4 1053static void rcu_tasks_trace_postgp(struct rcu_tasks *rtp)
d5f177d3 1054{
4593e772
PM
1055 bool firstreport;
1056 struct task_struct *g, *t;
1057 LIST_HEAD(holdouts);
1058 long ret;
1059
d5f177d3
PM
1060 // Remove the safety count.
1061 smp_mb__before_atomic(); // Order vs. earlier atomics
1062 atomic_dec(&trc_n_readers_need_end);
1063 smp_mb__after_atomic(); // Order vs. later atomics
1064
1065 // Wait for readers.
af051ca4 1066 set_tasks_gp_state(rtp, RTGS_WAIT_READERS);
4593e772
PM
1067 for (;;) {
1068 ret = wait_event_idle_exclusive_timeout(
1069 trc_wait,
1070 atomic_read(&trc_n_readers_need_end) == 0,
1071 READ_ONCE(rcu_task_stall_timeout));
1072 if (ret)
1073 break; // Count reached zero.
af051ca4 1074 // Stall warning time, so make a list of the offenders.
4593e772 1075 for_each_process_thread(g, t)
276c4104 1076 if (READ_ONCE(t->trc_reader_special.b.need_qs))
4593e772
PM
1077 trc_add_holdout(t, &holdouts);
1078 firstreport = true;
1079 list_for_each_entry_safe(t, g, &holdouts, trc_holdout_list)
276c4104 1080 if (READ_ONCE(t->trc_reader_special.b.need_qs)) {
4593e772
PM
1081 show_stalled_task_trace(t, &firstreport);
1082 trc_del_holdout(t);
1083 }
1084 if (firstreport)
1085 pr_err("INFO: rcu_tasks_trace detected stalls? (Counter/taskslist mismatch?)\n");
1086 show_stalled_ipi_trace();
1087 pr_err("\t%d holdouts\n", atomic_read(&trc_n_readers_need_end));
1088 }
d5f177d3 1089 smp_mb(); // Caller's code must be ordered after wakeup.
43766c3e 1090 // Pairs with pretty much every ordering primitive.
d5f177d3
PM
1091}
1092
1093/* Report any needed quiescent state for this exiting task. */
25246fc8 1094static void exit_tasks_rcu_finish_trace(struct task_struct *t)
d5f177d3
PM
1095{
1096 WRITE_ONCE(t->trc_reader_checked, true);
1097 WARN_ON_ONCE(t->trc_reader_nesting);
1098 WRITE_ONCE(t->trc_reader_nesting, 0);
276c4104
PM
1099 if (WARN_ON_ONCE(READ_ONCE(t->trc_reader_special.b.need_qs)))
1100 rcu_read_unlock_trace_special(t, 0);
d5f177d3
PM
1101}
1102
d5f177d3
PM
1103/**
1104 * call_rcu_tasks_trace() - Queue a callback trace task-based grace period
1105 * @rhp: structure to be used for queueing the RCU updates.
1106 * @func: actual callback function to be invoked after the grace period
1107 *
1108 * The callback function will be invoked some time after a full grace
1109 * period elapses, in other words after all currently executing RCU
1110 * read-side critical sections have completed. call_rcu_tasks_trace()
1111 * assumes that the read-side critical sections end at context switch,
1112 * cond_resched_rcu_qs(), or transition to usermode execution. As such,
1113 * there are no read-side primitives analogous to rcu_read_lock() and
1114 * rcu_read_unlock() because this primitive is intended to determine
1115 * that all tasks have passed through a safe state, not so much for
1116 * data-strcuture synchronization.
1117 *
1118 * See the description of call_rcu() for more detailed information on
1119 * memory ordering guarantees.
1120 */
1121void call_rcu_tasks_trace(struct rcu_head *rhp, rcu_callback_t func)
1122{
1123 call_rcu_tasks_generic(rhp, func, &rcu_tasks_trace);
1124}
1125EXPORT_SYMBOL_GPL(call_rcu_tasks_trace);
1126
1127/**
1128 * synchronize_rcu_tasks_trace - wait for a trace rcu-tasks grace period
1129 *
1130 * Control will return to the caller some time after a trace rcu-tasks
c7dcf810
PM
1131 * grace period has elapsed, in other words after all currently executing
1132 * rcu-tasks read-side critical sections have elapsed. These read-side
1133 * critical sections are delimited by calls to rcu_read_lock_trace()
1134 * and rcu_read_unlock_trace().
d5f177d3
PM
1135 *
1136 * This is a very specialized primitive, intended only for a few uses in
1137 * tracing and other situations requiring manipulation of function preambles
1138 * and profiling hooks. The synchronize_rcu_tasks_trace() function is not
1139 * (yet) intended for heavy use from multiple CPUs.
1140 *
1141 * See the description of synchronize_rcu() for more detailed information
1142 * on memory ordering guarantees.
1143 */
1144void synchronize_rcu_tasks_trace(void)
1145{
1146 RCU_LOCKDEP_WARN(lock_is_held(&rcu_trace_lock_map), "Illegal synchronize_rcu_tasks_trace() in RCU Tasks Trace read-side critical section");
1147 synchronize_rcu_tasks_generic(&rcu_tasks_trace);
1148}
1149EXPORT_SYMBOL_GPL(synchronize_rcu_tasks_trace);
1150
1151/**
1152 * rcu_barrier_tasks_trace - Wait for in-flight call_rcu_tasks_trace() callbacks.
1153 *
1154 * Although the current implementation is guaranteed to wait, it is not
1155 * obligated to, for example, if there are no pending callbacks.
1156 */
1157void rcu_barrier_tasks_trace(void)
1158{
1159 /* There is only one callback queue, so this is easy. ;-) */
1160 synchronize_rcu_tasks_trace();
1161}
1162EXPORT_SYMBOL_GPL(rcu_barrier_tasks_trace);
1163
1164static int __init rcu_spawn_tasks_trace_kthread(void)
1165{
1166 rcu_tasks_trace.pregp_func = rcu_tasks_trace_pregp_step;
1167 rcu_tasks_trace.pertask_func = rcu_tasks_trace_pertask;
1168 rcu_tasks_trace.postscan_func = rcu_tasks_trace_postscan;
1169 rcu_tasks_trace.holdouts_func = check_all_holdout_tasks_trace;
1170 rcu_tasks_trace.postgp_func = rcu_tasks_trace_postgp;
1171 rcu_spawn_tasks_kthread_generic(&rcu_tasks_trace);
1172 return 0;
1173}
1174core_initcall(rcu_spawn_tasks_trace_kthread);
1175
8344496e 1176#ifndef CONFIG_TINY_RCU
e21408ce
PM
1177static void show_rcu_tasks_trace_gp_kthread(void)
1178{
40471509 1179 char buf[64];
e21408ce 1180
edf3775f
PM
1181 sprintf(buf, "N%d h:%lu/%lu/%lu", atomic_read(&trc_n_readers_need_end),
1182 data_race(n_heavy_reader_ofl_updates),
40471509
PM
1183 data_race(n_heavy_reader_updates),
1184 data_race(n_heavy_reader_attempts));
e21408ce
PM
1185 show_rcu_tasks_generic_gp_kthread(&rcu_tasks_trace, buf);
1186}
8344496e 1187#endif /* #ifndef CONFIG_TINY_RCU */
e21408ce 1188
d5f177d3 1189#else /* #ifdef CONFIG_TASKS_TRACE_RCU */
25246fc8 1190static void exit_tasks_rcu_finish_trace(struct task_struct *t) { }
e21408ce 1191static inline void show_rcu_tasks_trace_gp_kthread(void) {}
d5f177d3 1192#endif /* #else #ifdef CONFIG_TASKS_TRACE_RCU */
8fd8ca38 1193
8344496e 1194#ifndef CONFIG_TINY_RCU
e21408ce
PM
1195void show_rcu_tasks_gp_kthreads(void)
1196{
1197 show_rcu_tasks_classic_gp_kthread();
1198 show_rcu_tasks_rude_gp_kthread();
1199 show_rcu_tasks_trace_gp_kthread();
1200}
8344496e 1201#endif /* #ifndef CONFIG_TINY_RCU */
e21408ce 1202
8fd8ca38
PM
1203#else /* #ifdef CONFIG_TASKS_RCU_GENERIC */
1204static inline void rcu_tasks_bootup_oddness(void) {}
e21408ce 1205void show_rcu_tasks_gp_kthreads(void) {}
8fd8ca38 1206#endif /* #else #ifdef CONFIG_TASKS_RCU_GENERIC */