rcu-tasks: Don't remove tasks with pending IPIs from holdout list
[linux-block.git] / kernel / rcu / tasks.h
CommitLineData
eacd6f04
PM
1/* SPDX-License-Identifier: GPL-2.0+ */
2/*
3 * Task-based RCU implementations.
4 *
5 * Copyright (C) 2020 Paul E. McKenney
6 */
7
8fd8ca38 8#ifdef CONFIG_TASKS_RCU_GENERIC
5873b8a9
PM
9
10////////////////////////////////////////////////////////////////////////
11//
12// Generic data structures.
13
14struct rcu_tasks;
15typedef void (*rcu_tasks_gp_func_t)(struct rcu_tasks *rtp);
e4fe5dd6
PM
16typedef void (*pregp_func_t)(void);
17typedef void (*pertask_func_t)(struct task_struct *t, struct list_head *hop);
9796e1ae 18typedef void (*postscan_func_t)(struct list_head *hop);
e4fe5dd6 19typedef void (*holdouts_func_t)(struct list_head *hop, bool ndrpt, bool *frptp);
af051ca4 20typedef void (*postgp_func_t)(struct rcu_tasks *rtp);
eacd6f04 21
07e10515 22/**
85b86994 23 * struct rcu_tasks - Definition for a Tasks-RCU-like mechanism.
07e10515
PM
24 * @cbs_head: Head of callback list.
25 * @cbs_tail: Tail pointer for callback list.
a616aec9 26 * @cbs_wq: Wait queue allowing new callback to get kthread's attention.
07e10515
PM
27 * @cbs_lock: Lock protecting callback list.
28 * @kthread_ptr: This flavor's grace-period/callback-invocation kthread.
5873b8a9 29 * @gp_func: This flavor's grace-period-wait function.
af051ca4 30 * @gp_state: Grace period's most recent state transition (debugging).
4fe192df 31 * @gp_sleep: Per-grace-period sleep to prevent CPU-bound looping.
2393a613 32 * @init_fract: Initial backoff sleep interval.
af051ca4
PM
33 * @gp_jiffies: Time of last @gp_state transition.
34 * @gp_start: Most recent grace-period start in jiffies.
238dbce3
PM
35 * @n_gps: Number of grace periods completed since boot.
36 * @n_ipis: Number of IPIs sent to encourage grace periods to end.
7e0669c3 37 * @n_ipis_fails: Number of IPI-send failures.
e4fe5dd6
PM
38 * @pregp_func: This flavor's pre-grace-period function (optional).
39 * @pertask_func: This flavor's per-task scan function (optional).
40 * @postscan_func: This flavor's post-task scan function (optional).
85b86994 41 * @holdouts_func: This flavor's holdout-list scan function (optional).
e4fe5dd6 42 * @postgp_func: This flavor's post-grace-period function (optional).
5873b8a9 43 * @call_func: This flavor's call_rcu()-equivalent function.
c97d12a6
PM
44 * @name: This flavor's textual name.
45 * @kname: This flavor's kthread name.
07e10515
PM
46 */
47struct rcu_tasks {
48 struct rcu_head *cbs_head;
49 struct rcu_head **cbs_tail;
50 struct wait_queue_head cbs_wq;
51 raw_spinlock_t cbs_lock;
af051ca4 52 int gp_state;
4fe192df 53 int gp_sleep;
2393a613 54 int init_fract;
af051ca4 55 unsigned long gp_jiffies;
88092d0c 56 unsigned long gp_start;
238dbce3
PM
57 unsigned long n_gps;
58 unsigned long n_ipis;
7e0669c3 59 unsigned long n_ipis_fails;
07e10515 60 struct task_struct *kthread_ptr;
5873b8a9 61 rcu_tasks_gp_func_t gp_func;
e4fe5dd6
PM
62 pregp_func_t pregp_func;
63 pertask_func_t pertask_func;
64 postscan_func_t postscan_func;
65 holdouts_func_t holdouts_func;
66 postgp_func_t postgp_func;
5873b8a9 67 call_rcu_func_t call_func;
c97d12a6
PM
68 char *name;
69 char *kname;
07e10515
PM
70};
71
c97d12a6
PM
72#define DEFINE_RCU_TASKS(rt_name, gp, call, n) \
73static struct rcu_tasks rt_name = \
07e10515 74{ \
c97d12a6
PM
75 .cbs_tail = &rt_name.cbs_head, \
76 .cbs_wq = __WAIT_QUEUE_HEAD_INITIALIZER(rt_name.cbs_wq), \
77 .cbs_lock = __RAW_SPIN_LOCK_UNLOCKED(rt_name.cbs_lock), \
5873b8a9
PM
78 .gp_func = gp, \
79 .call_func = call, \
c97d12a6
PM
80 .name = n, \
81 .kname = #rt_name, \
07e10515
PM
82}
83
eacd6f04
PM
84/* Track exiting tasks in order to allow them to be waited for. */
85DEFINE_STATIC_SRCU(tasks_rcu_exit_srcu);
86
b0afa0f0 87/* Avoid IPIing CPUs early in the grace period. */
574de876 88#define RCU_TASK_IPI_DELAY (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB) ? HZ / 2 : 0)
b0afa0f0
PM
89static int rcu_task_ipi_delay __read_mostly = RCU_TASK_IPI_DELAY;
90module_param(rcu_task_ipi_delay, int, 0644);
91
eacd6f04
PM
92/* Control stall timeouts. Disable with <= 0, otherwise jiffies till stall. */
93#define RCU_TASK_STALL_TIMEOUT (HZ * 60 * 10)
94static int rcu_task_stall_timeout __read_mostly = RCU_TASK_STALL_TIMEOUT;
95module_param(rcu_task_stall_timeout, int, 0644);
96
af051ca4
PM
97/* RCU tasks grace-period state for debugging. */
98#define RTGS_INIT 0
99#define RTGS_WAIT_WAIT_CBS 1
100#define RTGS_WAIT_GP 2
101#define RTGS_PRE_WAIT_GP 3
102#define RTGS_SCAN_TASKLIST 4
103#define RTGS_POST_SCAN_TASKLIST 5
104#define RTGS_WAIT_SCAN_HOLDOUTS 6
105#define RTGS_SCAN_HOLDOUTS 7
106#define RTGS_POST_GP 8
107#define RTGS_WAIT_READERS 9
108#define RTGS_INVOKE_CBS 10
109#define RTGS_WAIT_CBS 11
8344496e 110#ifndef CONFIG_TINY_RCU
af051ca4
PM
111static const char * const rcu_tasks_gp_state_names[] = {
112 "RTGS_INIT",
113 "RTGS_WAIT_WAIT_CBS",
114 "RTGS_WAIT_GP",
115 "RTGS_PRE_WAIT_GP",
116 "RTGS_SCAN_TASKLIST",
117 "RTGS_POST_SCAN_TASKLIST",
118 "RTGS_WAIT_SCAN_HOLDOUTS",
119 "RTGS_SCAN_HOLDOUTS",
120 "RTGS_POST_GP",
121 "RTGS_WAIT_READERS",
122 "RTGS_INVOKE_CBS",
123 "RTGS_WAIT_CBS",
124};
8344496e 125#endif /* #ifndef CONFIG_TINY_RCU */
af051ca4 126
5873b8a9
PM
127////////////////////////////////////////////////////////////////////////
128//
129// Generic code.
130
af051ca4
PM
131/* Record grace-period phase and time. */
132static void set_tasks_gp_state(struct rcu_tasks *rtp, int newstate)
133{
134 rtp->gp_state = newstate;
135 rtp->gp_jiffies = jiffies;
136}
137
8344496e 138#ifndef CONFIG_TINY_RCU
af051ca4
PM
139/* Return state name. */
140static const char *tasks_gp_state_getname(struct rcu_tasks *rtp)
141{
142 int i = data_race(rtp->gp_state); // Let KCSAN detect update races
143 int j = READ_ONCE(i); // Prevent the compiler from reading twice
144
145 if (j >= ARRAY_SIZE(rcu_tasks_gp_state_names))
146 return "???";
147 return rcu_tasks_gp_state_names[j];
148}
8344496e 149#endif /* #ifndef CONFIG_TINY_RCU */
af051ca4 150
5873b8a9
PM
151// Enqueue a callback for the specified flavor of Tasks RCU.
152static void call_rcu_tasks_generic(struct rcu_head *rhp, rcu_callback_t func,
153 struct rcu_tasks *rtp)
eacd6f04
PM
154{
155 unsigned long flags;
156 bool needwake;
157
158 rhp->next = NULL;
159 rhp->func = func;
07e10515
PM
160 raw_spin_lock_irqsave(&rtp->cbs_lock, flags);
161 needwake = !rtp->cbs_head;
162 WRITE_ONCE(*rtp->cbs_tail, rhp);
163 rtp->cbs_tail = &rhp->next;
164 raw_spin_unlock_irqrestore(&rtp->cbs_lock, flags);
eacd6f04 165 /* We can't create the thread unless interrupts are enabled. */
07e10515
PM
166 if (needwake && READ_ONCE(rtp->kthread_ptr))
167 wake_up(&rtp->cbs_wq);
eacd6f04 168}
eacd6f04 169
5873b8a9
PM
170// Wait for a grace period for the specified flavor of Tasks RCU.
171static void synchronize_rcu_tasks_generic(struct rcu_tasks *rtp)
eacd6f04
PM
172{
173 /* Complain if the scheduler has not started. */
174 RCU_LOCKDEP_WARN(rcu_scheduler_active == RCU_SCHEDULER_INACTIVE,
175 "synchronize_rcu_tasks called too soon");
176
177 /* Wait for the grace period. */
5873b8a9 178 wait_rcu_gp(rtp->call_func);
eacd6f04
PM
179}
180
181/* RCU-tasks kthread that detects grace periods and invokes callbacks. */
182static int __noreturn rcu_tasks_kthread(void *arg)
183{
184 unsigned long flags;
eacd6f04
PM
185 struct rcu_head *list;
186 struct rcu_head *next;
07e10515 187 struct rcu_tasks *rtp = arg;
eacd6f04
PM
188
189 /* Run on housekeeping CPUs by default. Sysadm can move if desired. */
190 housekeeping_affine(current, HK_FLAG_RCU);
07e10515 191 WRITE_ONCE(rtp->kthread_ptr, current); // Let GPs start!
eacd6f04
PM
192
193 /*
194 * Each pass through the following loop makes one check for
195 * newly arrived callbacks, and, if there are some, waits for
196 * one RCU-tasks grace period and then invokes the callbacks.
197 * This loop is terminated by the system going down. ;-)
198 */
199 for (;;) {
0db7c32a 200 set_tasks_gp_state(rtp, RTGS_WAIT_CBS);
eacd6f04
PM
201
202 /* Pick up any new callbacks. */
07e10515 203 raw_spin_lock_irqsave(&rtp->cbs_lock, flags);
43766c3e 204 smp_mb__after_spinlock(); // Order updates vs. GP.
07e10515
PM
205 list = rtp->cbs_head;
206 rtp->cbs_head = NULL;
207 rtp->cbs_tail = &rtp->cbs_head;
208 raw_spin_unlock_irqrestore(&rtp->cbs_lock, flags);
eacd6f04
PM
209
210 /* If there were none, wait a bit and start over. */
211 if (!list) {
07e10515
PM
212 wait_event_interruptible(rtp->cbs_wq,
213 READ_ONCE(rtp->cbs_head));
214 if (!rtp->cbs_head) {
eacd6f04 215 WARN_ON(signal_pending(current));
af051ca4 216 set_tasks_gp_state(rtp, RTGS_WAIT_WAIT_CBS);
ea6eed9f 217 schedule_timeout_idle(HZ/10);
eacd6f04
PM
218 }
219 continue;
220 }
221
5873b8a9 222 // Wait for one grace period.
af051ca4 223 set_tasks_gp_state(rtp, RTGS_WAIT_GP);
88092d0c 224 rtp->gp_start = jiffies;
5873b8a9 225 rtp->gp_func(rtp);
238dbce3 226 rtp->n_gps++;
eacd6f04
PM
227
228 /* Invoke the callbacks. */
af051ca4 229 set_tasks_gp_state(rtp, RTGS_INVOKE_CBS);
eacd6f04
PM
230 while (list) {
231 next = list->next;
232 local_bh_disable();
233 list->func(list);
234 local_bh_enable();
235 list = next;
236 cond_resched();
237 }
238 /* Paranoid sleep to keep this from entering a tight loop */
4fe192df 239 schedule_timeout_idle(rtp->gp_sleep);
eacd6f04
PM
240 }
241}
242
1b04fa99 243/* Spawn RCU-tasks grace-period kthread. */
5873b8a9 244static void __init rcu_spawn_tasks_kthread_generic(struct rcu_tasks *rtp)
eacd6f04
PM
245{
246 struct task_struct *t;
247
c97d12a6
PM
248 t = kthread_run(rcu_tasks_kthread, rtp, "%s_kthread", rtp->kname);
249 if (WARN_ONCE(IS_ERR(t), "%s: Could not start %s grace-period kthread, OOM is now expected behavior\n", __func__, rtp->name))
5873b8a9 250 return;
eacd6f04 251 smp_mb(); /* Ensure others see full kthread. */
eacd6f04 252}
eacd6f04 253
eacd6f04
PM
254#ifndef CONFIG_TINY_RCU
255
256/*
257 * Print any non-default Tasks RCU settings.
258 */
259static void __init rcu_tasks_bootup_oddness(void)
260{
d5f177d3 261#if defined(CONFIG_TASKS_RCU) || defined(CONFIG_TASKS_TRACE_RCU)
eacd6f04
PM
262 if (rcu_task_stall_timeout != RCU_TASK_STALL_TIMEOUT)
263 pr_info("\tTasks-RCU CPU stall warnings timeout set to %d (rcu_task_stall_timeout).\n", rcu_task_stall_timeout);
d5f177d3
PM
264#endif /* #ifdef CONFIG_TASKS_RCU */
265#ifdef CONFIG_TASKS_RCU
266 pr_info("\tTrampoline variant of Tasks RCU enabled.\n");
eacd6f04 267#endif /* #ifdef CONFIG_TASKS_RCU */
c84aad76
PM
268#ifdef CONFIG_TASKS_RUDE_RCU
269 pr_info("\tRude variant of Tasks RCU enabled.\n");
270#endif /* #ifdef CONFIG_TASKS_RUDE_RCU */
d5f177d3
PM
271#ifdef CONFIG_TASKS_TRACE_RCU
272 pr_info("\tTracing variant of Tasks RCU enabled.\n");
273#endif /* #ifdef CONFIG_TASKS_TRACE_RCU */
eacd6f04
PM
274}
275
276#endif /* #ifndef CONFIG_TINY_RCU */
5873b8a9 277
8344496e 278#ifndef CONFIG_TINY_RCU
e21408ce
PM
279/* Dump out rcutorture-relevant state common to all RCU-tasks flavors. */
280static void show_rcu_tasks_generic_gp_kthread(struct rcu_tasks *rtp, char *s)
281{
7e0669c3 282 pr_info("%s: %s(%d) since %lu g:%lu i:%lu/%lu %c%c %s\n",
e21408ce 283 rtp->kname,
7e0669c3 284 tasks_gp_state_getname(rtp), data_race(rtp->gp_state),
af051ca4 285 jiffies - data_race(rtp->gp_jiffies),
7e0669c3
PM
286 data_race(rtp->n_gps),
287 data_race(rtp->n_ipis_fails), data_race(rtp->n_ipis),
e21408ce
PM
288 ".k"[!!data_race(rtp->kthread_ptr)],
289 ".C"[!!data_race(rtp->cbs_head)],
290 s);
291}
27c0f144 292#endif // #ifndef CONFIG_TINY_RCU
e21408ce 293
25246fc8
PM
294static void exit_tasks_rcu_finish_trace(struct task_struct *t);
295
296#if defined(CONFIG_TASKS_RCU) || defined(CONFIG_TASKS_TRACE_RCU)
5873b8a9 297
d01aa263
PM
298////////////////////////////////////////////////////////////////////////
299//
300// Shared code between task-list-scanning variants of Tasks RCU.
301
302/* Wait for one RCU-tasks grace period. */
303static void rcu_tasks_wait_gp(struct rcu_tasks *rtp)
304{
305 struct task_struct *g, *t;
306 unsigned long lastreport;
307 LIST_HEAD(holdouts);
308 int fract;
309
af051ca4 310 set_tasks_gp_state(rtp, RTGS_PRE_WAIT_GP);
d01aa263
PM
311 rtp->pregp_func();
312
313 /*
314 * There were callbacks, so we need to wait for an RCU-tasks
315 * grace period. Start off by scanning the task list for tasks
316 * that are not already voluntarily blocked. Mark these tasks
317 * and make a list of them in holdouts.
318 */
af051ca4 319 set_tasks_gp_state(rtp, RTGS_SCAN_TASKLIST);
d01aa263
PM
320 rcu_read_lock();
321 for_each_process_thread(g, t)
322 rtp->pertask_func(t, &holdouts);
323 rcu_read_unlock();
324
af051ca4 325 set_tasks_gp_state(rtp, RTGS_POST_SCAN_TASKLIST);
9796e1ae 326 rtp->postscan_func(&holdouts);
d01aa263
PM
327
328 /*
329 * Each pass through the following loop scans the list of holdout
330 * tasks, removing any that are no longer holdouts. When the list
331 * is empty, we are done.
332 */
333 lastreport = jiffies;
334
2393a613
PM
335 // Start off with initial wait and slowly back off to 1 HZ wait.
336 fract = rtp->init_fract;
d01aa263 337
77dc1741 338 while (!list_empty(&holdouts)) {
d01aa263
PM
339 bool firstreport;
340 bool needreport;
341 int rtst;
342
d01aa263 343 /* Slowly back off waiting for holdouts */
af051ca4 344 set_tasks_gp_state(rtp, RTGS_WAIT_SCAN_HOLDOUTS);
75dc2da5 345 schedule_timeout_idle(fract);
d01aa263 346
75dc2da5
PM
347 if (fract < HZ)
348 fract++;
d01aa263
PM
349
350 rtst = READ_ONCE(rcu_task_stall_timeout);
351 needreport = rtst > 0 && time_after(jiffies, lastreport + rtst);
352 if (needreport)
353 lastreport = jiffies;
354 firstreport = true;
355 WARN_ON(signal_pending(current));
af051ca4 356 set_tasks_gp_state(rtp, RTGS_SCAN_HOLDOUTS);
d01aa263
PM
357 rtp->holdouts_func(&holdouts, needreport, &firstreport);
358 }
359
af051ca4
PM
360 set_tasks_gp_state(rtp, RTGS_POST_GP);
361 rtp->postgp_func(rtp);
d01aa263
PM
362}
363
25246fc8
PM
364#endif /* #if defined(CONFIG_TASKS_RCU) || defined(CONFIG_TASKS_TRACE_RCU) */
365
366#ifdef CONFIG_TASKS_RCU
367
5873b8a9
PM
368////////////////////////////////////////////////////////////////////////
369//
370// Simple variant of RCU whose quiescent states are voluntary context
8af9e2c7 371// switch, cond_resched_tasks_rcu_qs(), user-space execution, and idle.
5873b8a9
PM
372// As such, grace periods can take one good long time. There are no
373// read-side primitives similar to rcu_read_lock() and rcu_read_unlock()
374// because this implementation is intended to get the system into a safe
375// state for some of the manipulations involved in tracing and the like.
376// Finally, this implementation does not support high call_rcu_tasks()
377// rates from multiple CPUs. If this is required, per-CPU callback lists
378// will be needed.
06a3ec92
PM
379//
380// The implementation uses rcu_tasks_wait_gp(), which relies on function
381// pointers in the rcu_tasks structure. The rcu_spawn_tasks_kthread()
382// function sets these function pointers up so that rcu_tasks_wait_gp()
383// invokes these functions in this order:
384//
385// rcu_tasks_pregp_step():
386// Invokes synchronize_rcu() in order to wait for all in-flight
387// t->on_rq and t->nvcsw transitions to complete. This works because
388// all such transitions are carried out with interrupts disabled.
389// rcu_tasks_pertask(), invoked on every non-idle task:
390// For every runnable non-idle task other than the current one, use
391// get_task_struct() to pin down that task, snapshot that task's
392// number of voluntary context switches, and add that task to the
393// holdout list.
394// rcu_tasks_postscan():
395// Invoke synchronize_srcu() to ensure that all tasks that were
396// in the process of exiting (and which thus might not know to
397// synchronize with this RCU Tasks grace period) have completed
398// exiting.
399// check_all_holdout_tasks(), repeatedly until holdout list is empty:
400// Scans the holdout list, attempting to identify a quiescent state
401// for each task on the list. If there is a quiescent state, the
402// corresponding task is removed from the holdout list.
403// rcu_tasks_postgp():
404// Invokes synchronize_rcu() in order to ensure that all prior
405// t->on_rq and t->nvcsw transitions are seen by all CPUs and tasks
406// to have happened before the end of this RCU Tasks grace period.
407// Again, this works because all such transitions are carried out
408// with interrupts disabled.
409//
410// For each exiting task, the exit_tasks_rcu_start() and
411// exit_tasks_rcu_finish() functions begin and end, respectively, the SRCU
412// read-side critical sections waited for by rcu_tasks_postscan().
413//
414// Pre-grace-period update-side code is ordered before the grace via the
415// ->cbs_lock and the smp_mb__after_spinlock(). Pre-grace-period read-side
416// code is ordered before the grace period via synchronize_rcu() call
417// in rcu_tasks_pregp_step() and by the scheduler's locks and interrupt
418// disabling.
5873b8a9 419
e4fe5dd6
PM
420/* Pre-grace-period preparation. */
421static void rcu_tasks_pregp_step(void)
422{
423 /*
424 * Wait for all pre-existing t->on_rq and t->nvcsw transitions
425 * to complete. Invoking synchronize_rcu() suffices because all
426 * these transitions occur with interrupts disabled. Without this
427 * synchronize_rcu(), a read-side critical section that started
428 * before the grace period might be incorrectly seen as having
429 * started after the grace period.
430 *
431 * This synchronize_rcu() also dispenses with the need for a
432 * memory barrier on the first store to t->rcu_tasks_holdout,
433 * as it forces the store to happen after the beginning of the
434 * grace period.
435 */
436 synchronize_rcu();
437}
438
439/* Per-task initial processing. */
440static void rcu_tasks_pertask(struct task_struct *t, struct list_head *hop)
441{
442 if (t != current && READ_ONCE(t->on_rq) && !is_idle_task(t)) {
443 get_task_struct(t);
444 t->rcu_tasks_nvcsw = READ_ONCE(t->nvcsw);
445 WRITE_ONCE(t->rcu_tasks_holdout, true);
446 list_add(&t->rcu_tasks_holdout_list, hop);
447 }
448}
449
450/* Processing between scanning taskslist and draining the holdout list. */
04a3c5aa 451static void rcu_tasks_postscan(struct list_head *hop)
e4fe5dd6
PM
452{
453 /*
454 * Wait for tasks that are in the process of exiting. This
455 * does only part of the job, ensuring that all tasks that were
456 * previously exiting reach the point where they have disabled
457 * preemption, allowing the later synchronize_rcu() to finish
458 * the job.
459 */
460 synchronize_srcu(&tasks_rcu_exit_srcu);
461}
462
5873b8a9
PM
463/* See if tasks are still holding out, complain if so. */
464static void check_holdout_task(struct task_struct *t,
465 bool needreport, bool *firstreport)
466{
467 int cpu;
468
469 if (!READ_ONCE(t->rcu_tasks_holdout) ||
470 t->rcu_tasks_nvcsw != READ_ONCE(t->nvcsw) ||
471 !READ_ONCE(t->on_rq) ||
472 (IS_ENABLED(CONFIG_NO_HZ_FULL) &&
473 !is_idle_task(t) && t->rcu_tasks_idle_cpu >= 0)) {
474 WRITE_ONCE(t->rcu_tasks_holdout, false);
475 list_del_init(&t->rcu_tasks_holdout_list);
476 put_task_struct(t);
477 return;
478 }
479 rcu_request_urgent_qs_task(t);
480 if (!needreport)
481 return;
482 if (*firstreport) {
483 pr_err("INFO: rcu_tasks detected stalls on tasks:\n");
484 *firstreport = false;
485 }
486 cpu = task_cpu(t);
487 pr_alert("%p: %c%c nvcsw: %lu/%lu holdout: %d idle_cpu: %d/%d\n",
488 t, ".I"[is_idle_task(t)],
489 "N."[cpu < 0 || !tick_nohz_full_cpu(cpu)],
490 t->rcu_tasks_nvcsw, t->nvcsw, t->rcu_tasks_holdout,
491 t->rcu_tasks_idle_cpu, cpu);
492 sched_show_task(t);
493}
494
e4fe5dd6
PM
495/* Scan the holdout lists for tasks no longer holding out. */
496static void check_all_holdout_tasks(struct list_head *hop,
497 bool needreport, bool *firstreport)
498{
499 struct task_struct *t, *t1;
500
501 list_for_each_entry_safe(t, t1, hop, rcu_tasks_holdout_list) {
502 check_holdout_task(t, needreport, firstreport);
503 cond_resched();
504 }
505}
506
507/* Finish off the Tasks-RCU grace period. */
af051ca4 508static void rcu_tasks_postgp(struct rcu_tasks *rtp)
e4fe5dd6
PM
509{
510 /*
511 * Because ->on_rq and ->nvcsw are not guaranteed to have a full
512 * memory barriers prior to them in the schedule() path, memory
513 * reordering on other CPUs could cause their RCU-tasks read-side
514 * critical sections to extend past the end of the grace period.
515 * However, because these ->nvcsw updates are carried out with
516 * interrupts disabled, we can use synchronize_rcu() to force the
517 * needed ordering on all such CPUs.
518 *
519 * This synchronize_rcu() also confines all ->rcu_tasks_holdout
520 * accesses to be within the grace period, avoiding the need for
521 * memory barriers for ->rcu_tasks_holdout accesses.
522 *
523 * In addition, this synchronize_rcu() waits for exiting tasks
524 * to complete their final preempt_disable() region of execution,
525 * cleaning up after the synchronize_srcu() above.
526 */
527 synchronize_rcu();
528}
529
5873b8a9 530void call_rcu_tasks(struct rcu_head *rhp, rcu_callback_t func);
c97d12a6 531DEFINE_RCU_TASKS(rcu_tasks, rcu_tasks_wait_gp, call_rcu_tasks, "RCU Tasks");
5873b8a9
PM
532
533/**
534 * call_rcu_tasks() - Queue an RCU for invocation task-based grace period
535 * @rhp: structure to be used for queueing the RCU updates.
536 * @func: actual callback function to be invoked after the grace period
537 *
538 * The callback function will be invoked some time after a full grace
539 * period elapses, in other words after all currently executing RCU
540 * read-side critical sections have completed. call_rcu_tasks() assumes
541 * that the read-side critical sections end at a voluntary context
8af9e2c7 542 * switch (not a preemption!), cond_resched_tasks_rcu_qs(), entry into idle,
5873b8a9
PM
543 * or transition to usermode execution. As such, there are no read-side
544 * primitives analogous to rcu_read_lock() and rcu_read_unlock() because
545 * this primitive is intended to determine that all tasks have passed
a616aec9 546 * through a safe state, not so much for data-structure synchronization.
5873b8a9
PM
547 *
548 * See the description of call_rcu() for more detailed information on
549 * memory ordering guarantees.
550 */
551void call_rcu_tasks(struct rcu_head *rhp, rcu_callback_t func)
552{
553 call_rcu_tasks_generic(rhp, func, &rcu_tasks);
554}
555EXPORT_SYMBOL_GPL(call_rcu_tasks);
556
557/**
558 * synchronize_rcu_tasks - wait until an rcu-tasks grace period has elapsed.
559 *
560 * Control will return to the caller some time after a full rcu-tasks
561 * grace period has elapsed, in other words after all currently
562 * executing rcu-tasks read-side critical sections have elapsed. These
563 * read-side critical sections are delimited by calls to schedule(),
564 * cond_resched_tasks_rcu_qs(), idle execution, userspace execution, calls
565 * to synchronize_rcu_tasks(), and (in theory, anyway) cond_resched().
566 *
567 * This is a very specialized primitive, intended only for a few uses in
568 * tracing and other situations requiring manipulation of function
569 * preambles and profiling hooks. The synchronize_rcu_tasks() function
570 * is not (yet) intended for heavy use from multiple CPUs.
571 *
572 * See the description of synchronize_rcu() for more detailed information
573 * on memory ordering guarantees.
574 */
575void synchronize_rcu_tasks(void)
576{
577 synchronize_rcu_tasks_generic(&rcu_tasks);
578}
579EXPORT_SYMBOL_GPL(synchronize_rcu_tasks);
580
581/**
582 * rcu_barrier_tasks - Wait for in-flight call_rcu_tasks() callbacks.
583 *
584 * Although the current implementation is guaranteed to wait, it is not
585 * obligated to, for example, if there are no pending callbacks.
586 */
587void rcu_barrier_tasks(void)
588{
589 /* There is only one callback queue, so this is easy. ;-) */
590 synchronize_rcu_tasks();
591}
592EXPORT_SYMBOL_GPL(rcu_barrier_tasks);
593
594static int __init rcu_spawn_tasks_kthread(void)
595{
4fe192df 596 rcu_tasks.gp_sleep = HZ / 10;
75dc2da5 597 rcu_tasks.init_fract = HZ / 10;
e4fe5dd6
PM
598 rcu_tasks.pregp_func = rcu_tasks_pregp_step;
599 rcu_tasks.pertask_func = rcu_tasks_pertask;
600 rcu_tasks.postscan_func = rcu_tasks_postscan;
601 rcu_tasks.holdouts_func = check_all_holdout_tasks;
602 rcu_tasks.postgp_func = rcu_tasks_postgp;
5873b8a9
PM
603 rcu_spawn_tasks_kthread_generic(&rcu_tasks);
604 return 0;
605}
5873b8a9 606
27c0f144
PM
607#if !defined(CONFIG_TINY_RCU)
608void show_rcu_tasks_classic_gp_kthread(void)
e21408ce
PM
609{
610 show_rcu_tasks_generic_gp_kthread(&rcu_tasks, "");
611}
27c0f144
PM
612EXPORT_SYMBOL_GPL(show_rcu_tasks_classic_gp_kthread);
613#endif // !defined(CONFIG_TINY_RCU)
e21408ce 614
25246fc8
PM
615/* Do the srcu_read_lock() for the above synchronize_srcu(). */
616void exit_tasks_rcu_start(void) __acquires(&tasks_rcu_exit_srcu)
617{
618 preempt_disable();
619 current->rcu_tasks_idx = __srcu_read_lock(&tasks_rcu_exit_srcu);
620 preempt_enable();
621}
622
623/* Do the srcu_read_unlock() for the above synchronize_srcu(). */
624void exit_tasks_rcu_finish(void) __releases(&tasks_rcu_exit_srcu)
625{
626 struct task_struct *t = current;
627
628 preempt_disable();
629 __srcu_read_unlock(&tasks_rcu_exit_srcu, t->rcu_tasks_idx);
630 preempt_enable();
631 exit_tasks_rcu_finish_trace(t);
632}
633
e21408ce 634#else /* #ifdef CONFIG_TASKS_RCU */
25246fc8
PM
635void exit_tasks_rcu_start(void) { }
636void exit_tasks_rcu_finish(void) { exit_tasks_rcu_finish_trace(current); }
e21408ce 637#endif /* #else #ifdef CONFIG_TASKS_RCU */
c84aad76
PM
638
639#ifdef CONFIG_TASKS_RUDE_RCU
640
641////////////////////////////////////////////////////////////////////////
642//
643// "Rude" variant of Tasks RCU, inspired by Steve Rostedt's trick of
644// passing an empty function to schedule_on_each_cpu(). This approach
e4be1f44
PM
645// provides an asynchronous call_rcu_tasks_rude() API and batching of
646// concurrent calls to the synchronous synchronize_rcu_tasks_rude() API.
9fc98e31
PM
647// This invokes schedule_on_each_cpu() in order to send IPIs far and wide
648// and induces otherwise unnecessary context switches on all online CPUs,
649// whether idle or not.
650//
651// Callback handling is provided by the rcu_tasks_kthread() function.
652//
653// Ordering is provided by the scheduler's context-switch code.
c84aad76
PM
654
655// Empty function to allow workqueues to force a context switch.
656static void rcu_tasks_be_rude(struct work_struct *work)
657{
658}
659
660// Wait for one rude RCU-tasks grace period.
661static void rcu_tasks_rude_wait_gp(struct rcu_tasks *rtp)
662{
238dbce3 663 rtp->n_ipis += cpumask_weight(cpu_online_mask);
c84aad76
PM
664 schedule_on_each_cpu(rcu_tasks_be_rude);
665}
666
667void call_rcu_tasks_rude(struct rcu_head *rhp, rcu_callback_t func);
c97d12a6
PM
668DEFINE_RCU_TASKS(rcu_tasks_rude, rcu_tasks_rude_wait_gp, call_rcu_tasks_rude,
669 "RCU Tasks Rude");
c84aad76
PM
670
671/**
672 * call_rcu_tasks_rude() - Queue a callback rude task-based grace period
673 * @rhp: structure to be used for queueing the RCU updates.
674 * @func: actual callback function to be invoked after the grace period
675 *
676 * The callback function will be invoked some time after a full grace
677 * period elapses, in other words after all currently executing RCU
678 * read-side critical sections have completed. call_rcu_tasks_rude()
679 * assumes that the read-side critical sections end at context switch,
8af9e2c7 680 * cond_resched_tasks_rcu_qs(), or transition to usermode execution (as
a6517e9c
NU
681 * usermode execution is schedulable). As such, there are no read-side
682 * primitives analogous to rcu_read_lock() and rcu_read_unlock() because
683 * this primitive is intended to determine that all tasks have passed
684 * through a safe state, not so much for data-structure synchronization.
c84aad76
PM
685 *
686 * See the description of call_rcu() for more detailed information on
687 * memory ordering guarantees.
688 */
689void call_rcu_tasks_rude(struct rcu_head *rhp, rcu_callback_t func)
690{
691 call_rcu_tasks_generic(rhp, func, &rcu_tasks_rude);
692}
693EXPORT_SYMBOL_GPL(call_rcu_tasks_rude);
694
695/**
696 * synchronize_rcu_tasks_rude - wait for a rude rcu-tasks grace period
697 *
698 * Control will return to the caller some time after a rude rcu-tasks
699 * grace period has elapsed, in other words after all currently
700 * executing rcu-tasks read-side critical sections have elapsed. These
701 * read-side critical sections are delimited by calls to schedule(),
a6517e9c
NU
702 * cond_resched_tasks_rcu_qs(), userspace execution (which is a schedulable
703 * context), and (in theory, anyway) cond_resched().
c84aad76
PM
704 *
705 * This is a very specialized primitive, intended only for a few uses in
706 * tracing and other situations requiring manipulation of function preambles
707 * and profiling hooks. The synchronize_rcu_tasks_rude() function is not
708 * (yet) intended for heavy use from multiple CPUs.
709 *
710 * See the description of synchronize_rcu() for more detailed information
711 * on memory ordering guarantees.
712 */
713void synchronize_rcu_tasks_rude(void)
714{
715 synchronize_rcu_tasks_generic(&rcu_tasks_rude);
716}
717EXPORT_SYMBOL_GPL(synchronize_rcu_tasks_rude);
718
719/**
720 * rcu_barrier_tasks_rude - Wait for in-flight call_rcu_tasks_rude() callbacks.
721 *
722 * Although the current implementation is guaranteed to wait, it is not
723 * obligated to, for example, if there are no pending callbacks.
724 */
725void rcu_barrier_tasks_rude(void)
726{
727 /* There is only one callback queue, so this is easy. ;-) */
728 synchronize_rcu_tasks_rude();
729}
730EXPORT_SYMBOL_GPL(rcu_barrier_tasks_rude);
731
732static int __init rcu_spawn_tasks_rude_kthread(void)
733{
4fe192df 734 rcu_tasks_rude.gp_sleep = HZ / 10;
c84aad76
PM
735 rcu_spawn_tasks_kthread_generic(&rcu_tasks_rude);
736 return 0;
737}
c84aad76 738
27c0f144
PM
739#if !defined(CONFIG_TINY_RCU)
740void show_rcu_tasks_rude_gp_kthread(void)
e21408ce
PM
741{
742 show_rcu_tasks_generic_gp_kthread(&rcu_tasks_rude, "");
743}
27c0f144
PM
744EXPORT_SYMBOL_GPL(show_rcu_tasks_rude_gp_kthread);
745#endif // !defined(CONFIG_TINY_RCU)
746#endif /* #ifdef CONFIG_TASKS_RUDE_RCU */
d5f177d3
PM
747
748////////////////////////////////////////////////////////////////////////
749//
750// Tracing variant of Tasks RCU. This variant is designed to be used
751// to protect tracing hooks, including those of BPF. This variant
752// therefore:
753//
754// 1. Has explicit read-side markers to allow finite grace periods
755// in the face of in-kernel loops for PREEMPT=n builds.
756//
757// 2. Protects code in the idle loop, exception entry/exit, and
758// CPU-hotplug code paths, similar to the capabilities of SRCU.
759//
c4f113ac 760// 3. Avoids expensive read-side instructions, having overhead similar
d5f177d3
PM
761// to that of Preemptible RCU.
762//
763// There are of course downsides. The grace-period code can send IPIs to
764// CPUs, even when those CPUs are in the idle loop or in nohz_full userspace.
765// It is necessary to scan the full tasklist, much as for Tasks RCU. There
766// is a single callback queue guarded by a single lock, again, much as for
767// Tasks RCU. If needed, these downsides can be at least partially remedied.
768//
769// Perhaps most important, this variant of RCU does not affect the vanilla
770// flavors, rcu_preempt and rcu_sched. The fact that RCU Tasks Trace
771// readers can operate from idle, offline, and exception entry/exit in no
772// way allows rcu_preempt and rcu_sched readers to also do so.
a434dd10
PM
773//
774// The implementation uses rcu_tasks_wait_gp(), which relies on function
775// pointers in the rcu_tasks structure. The rcu_spawn_tasks_trace_kthread()
776// function sets these function pointers up so that rcu_tasks_wait_gp()
777// invokes these functions in this order:
778//
779// rcu_tasks_trace_pregp_step():
780// Initialize the count of readers and block CPU-hotplug operations.
781// rcu_tasks_trace_pertask(), invoked on every non-idle task:
782// Initialize per-task state and attempt to identify an immediate
783// quiescent state for that task, or, failing that, attempt to
784// set that task's .need_qs flag so that task's next outermost
785// rcu_read_unlock_trace() will report the quiescent state (in which
786// case the count of readers is incremented). If both attempts fail,
45f4b4a2
PM
787// the task is added to a "holdout" list. Note that IPIs are used
788// to invoke trc_read_check_handler() in the context of running tasks
789// in order to avoid ordering overhead on common-case shared-variable
790// accessses.
a434dd10
PM
791// rcu_tasks_trace_postscan():
792// Initialize state and attempt to identify an immediate quiescent
793// state as above (but only for idle tasks), unblock CPU-hotplug
794// operations, and wait for an RCU grace period to avoid races with
795// tasks that are in the process of exiting.
796// check_all_holdout_tasks_trace(), repeatedly until holdout list is empty:
797// Scans the holdout list, attempting to identify a quiescent state
798// for each task on the list. If there is a quiescent state, the
799// corresponding task is removed from the holdout list.
800// rcu_tasks_trace_postgp():
801// Wait for the count of readers do drop to zero, reporting any stalls.
802// Also execute full memory barriers to maintain ordering with code
803// executing after the grace period.
804//
805// The exit_tasks_rcu_finish_trace() synchronizes with exiting tasks.
806//
807// Pre-grace-period update-side code is ordered before the grace
808// period via the ->cbs_lock and barriers in rcu_tasks_kthread().
809// Pre-grace-period read-side code is ordered before the grace period by
810// atomic_dec_and_test() of the count of readers (for IPIed readers) and by
811// scheduler context-switch ordering (for locked-down non-running readers).
d5f177d3
PM
812
813// The lockdep state must be outside of #ifdef to be useful.
814#ifdef CONFIG_DEBUG_LOCK_ALLOC
815static struct lock_class_key rcu_lock_trace_key;
816struct lockdep_map rcu_trace_lock_map =
817 STATIC_LOCKDEP_MAP_INIT("rcu_read_lock_trace", &rcu_lock_trace_key);
818EXPORT_SYMBOL_GPL(rcu_trace_lock_map);
819#endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
820
821#ifdef CONFIG_TASKS_TRACE_RCU
822
30d8aa51
PM
823static atomic_t trc_n_readers_need_end; // Number of waited-for readers.
824static DECLARE_WAIT_QUEUE_HEAD(trc_wait); // List of holdout tasks.
d5f177d3
PM
825
826// Record outstanding IPIs to each CPU. No point in sending two...
827static DEFINE_PER_CPU(bool, trc_ipi_to_cpu);
828
40471509
PM
829// The number of detections of task quiescent state relying on
830// heavyweight readers executing explicit memory barriers.
6731da9e
PM
831static unsigned long n_heavy_reader_attempts;
832static unsigned long n_heavy_reader_updates;
833static unsigned long n_heavy_reader_ofl_updates;
40471509 834
b0afa0f0
PM
835void call_rcu_tasks_trace(struct rcu_head *rhp, rcu_callback_t func);
836DEFINE_RCU_TASKS(rcu_tasks_trace, rcu_tasks_wait_gp, call_rcu_tasks_trace,
837 "RCU Tasks Trace");
838
b38f57c1
PM
839/*
840 * This irq_work handler allows rcu_read_unlock_trace() to be invoked
841 * while the scheduler locks are held.
842 */
843static void rcu_read_unlock_iw(struct irq_work *iwp)
844{
845 wake_up(&trc_wait);
846}
847static DEFINE_IRQ_WORK(rcu_tasks_trace_iw, rcu_read_unlock_iw);
848
d5f177d3 849/* If we are the last reader, wake up the grace-period kthread. */
a5c071cc 850void rcu_read_unlock_trace_special(struct task_struct *t)
d5f177d3 851{
f8ab3fad 852 int nq = READ_ONCE(t->trc_reader_special.b.need_qs);
276c4104 853
9ae58d7b
PM
854 if (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB) &&
855 t->trc_reader_special.b.need_mb)
276c4104
PM
856 smp_mb(); // Pairs with update-side barriers.
857 // Update .need_qs before ->trc_reader_nesting for irq/NMI handlers.
858 if (nq)
859 WRITE_ONCE(t->trc_reader_special.b.need_qs, false);
a5c071cc 860 WRITE_ONCE(t->trc_reader_nesting, 0);
276c4104 861 if (nq && atomic_dec_and_test(&trc_n_readers_need_end))
b38f57c1 862 irq_work_queue(&rcu_tasks_trace_iw);
d5f177d3
PM
863}
864EXPORT_SYMBOL_GPL(rcu_read_unlock_trace_special);
865
866/* Add a task to the holdout list, if it is not already on the list. */
867static void trc_add_holdout(struct task_struct *t, struct list_head *bhp)
868{
869 if (list_empty(&t->trc_holdout_list)) {
870 get_task_struct(t);
871 list_add(&t->trc_holdout_list, bhp);
872 }
873}
874
875/* Remove a task from the holdout list, if it is in fact present. */
876static void trc_del_holdout(struct task_struct *t)
877{
878 if (!list_empty(&t->trc_holdout_list)) {
879 list_del_init(&t->trc_holdout_list);
880 put_task_struct(t);
881 }
882}
883
884/* IPI handler to check task state. */
885static void trc_read_check_handler(void *t_in)
886{
887 struct task_struct *t = current;
888 struct task_struct *texp = t_in;
889
890 // If the task is no longer running on this CPU, leave.
891 if (unlikely(texp != t)) {
d5f177d3
PM
892 goto reset_ipi; // Already on holdout list, so will check later.
893 }
894
895 // If the task is not in a read-side critical section, and
896 // if this is the last reader, awaken the grace-period kthread.
bdb0cca0 897 if (likely(!READ_ONCE(t->trc_reader_nesting))) {
d5f177d3
PM
898 WRITE_ONCE(t->trc_reader_checked, true);
899 goto reset_ipi;
900 }
ba3a86e4 901 // If we are racing with an rcu_read_unlock_trace(), try again later.
96017bf9 902 if (unlikely(READ_ONCE(t->trc_reader_nesting) < 0))
ba3a86e4 903 goto reset_ipi;
d5f177d3
PM
904 WRITE_ONCE(t->trc_reader_checked, true);
905
906 // Get here if the task is in a read-side critical section. Set
907 // its state so that it will awaken the grace-period kthread upon
908 // exit from that critical section.
96017bf9 909 atomic_inc(&trc_n_readers_need_end); // One more to wait on.
f8ab3fad 910 WARN_ON_ONCE(READ_ONCE(t->trc_reader_special.b.need_qs));
276c4104 911 WRITE_ONCE(t->trc_reader_special.b.need_qs, true);
d5f177d3
PM
912
913reset_ipi:
914 // Allow future IPIs to be sent on CPU and for task.
915 // Also order this IPI handler against any later manipulations of
916 // the intended task.
8211e922 917 smp_store_release(per_cpu_ptr(&trc_ipi_to_cpu, smp_processor_id()), false); // ^^^
d5f177d3
PM
918 smp_store_release(&texp->trc_ipi_to_cpu, -1); // ^^^
919}
920
921/* Callback function for scheduler to check locked-down task. */
9b3c4ab3 922static int trc_inspect_reader(struct task_struct *t, void *arg)
d5f177d3 923{
7d0c9c50 924 int cpu = task_cpu(t);
18f08e75 925 int nesting;
7e3b70e0 926 bool ofl = cpu_is_offline(cpu);
7d0c9c50
PM
927
928 if (task_curr(t)) {
30d8aa51 929 WARN_ON_ONCE(ofl && !is_idle_task(t));
7e3b70e0 930
7d0c9c50 931 // If no chance of heavyweight readers, do it the hard way.
7e3b70e0 932 if (!ofl && !IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB))
9b3c4ab3 933 return -EINVAL;
7d0c9c50
PM
934
935 // If heavyweight readers are enabled on the remote task,
936 // we can inspect its state despite its currently running.
937 // However, we cannot safely change its state.
40471509 938 n_heavy_reader_attempts++;
7e3b70e0
PM
939 if (!ofl && // Check for "running" idle tasks on offline CPUs.
940 !rcu_dynticks_zero_in_eqs(cpu, &t->trc_reader_nesting))
9b3c4ab3 941 return -EINVAL; // No quiescent state, do it the hard way.
40471509 942 n_heavy_reader_updates++;
edf3775f
PM
943 if (ofl)
944 n_heavy_reader_ofl_updates++;
18f08e75 945 nesting = 0;
7d0c9c50 946 } else {
bdb0cca0 947 // The task is not running, so C-language access is safe.
18f08e75 948 nesting = t->trc_reader_nesting;
7d0c9c50 949 }
d5f177d3 950
18f08e75
PM
951 // If not exiting a read-side critical section, mark as checked
952 // so that the grace-period kthread will remove it from the
953 // holdout list.
954 t->trc_reader_checked = nesting >= 0;
955 if (nesting <= 0)
6fedc280 956 return nesting ? -EINVAL : 0; // If in QS, done, otherwise try again later.
7d0c9c50
PM
957
958 // The task is in a read-side critical section, so set up its
959 // state so that it will awaken the grace-period kthread upon exit
960 // from that critical section.
961 atomic_inc(&trc_n_readers_need_end); // One more to wait on.
f8ab3fad 962 WARN_ON_ONCE(READ_ONCE(t->trc_reader_special.b.need_qs));
7d0c9c50 963 WRITE_ONCE(t->trc_reader_special.b.need_qs, true);
9b3c4ab3 964 return 0;
d5f177d3
PM
965}
966
967/* Attempt to extract the state for the specified task. */
968static void trc_wait_for_one_reader(struct task_struct *t,
969 struct list_head *bhp)
970{
971 int cpu;
972
973 // If a previous IPI is still in flight, let it complete.
974 if (smp_load_acquire(&t->trc_ipi_to_cpu) != -1) // Order IPI
975 return;
976
977 // The current task had better be in a quiescent state.
978 if (t == current) {
979 t->trc_reader_checked = true;
bdb0cca0 980 WARN_ON_ONCE(READ_ONCE(t->trc_reader_nesting));
d5f177d3
PM
981 return;
982 }
983
984 // Attempt to nail down the task for inspection.
985 get_task_struct(t);
9b3c4ab3 986 if (!task_call_func(t, trc_inspect_reader, NULL)) {
d5f177d3
PM
987 put_task_struct(t);
988 return;
989 }
990 put_task_struct(t);
991
45f4b4a2
PM
992 // If this task is not yet on the holdout list, then we are in
993 // an RCU read-side critical section. Otherwise, the invocation of
d0a85858 994 // trc_add_holdout() that added it to the list did the necessary
45f4b4a2
PM
995 // get_task_struct(). Either way, the task cannot be freed out
996 // from under this code.
997
d5f177d3
PM
998 // If currently running, send an IPI, either way, add to list.
999 trc_add_holdout(t, bhp);
574de876
PM
1000 if (task_curr(t) &&
1001 time_after(jiffies + 1, rcu_tasks_trace.gp_start + rcu_task_ipi_delay)) {
d5f177d3
PM
1002 // The task is currently running, so try IPIing it.
1003 cpu = task_cpu(t);
1004
1005 // If there is already an IPI outstanding, let it happen.
1006 if (per_cpu(trc_ipi_to_cpu, cpu) || t->trc_ipi_to_cpu >= 0)
1007 return;
1008
d5f177d3
PM
1009 per_cpu(trc_ipi_to_cpu, cpu) = true;
1010 t->trc_ipi_to_cpu = cpu;
238dbce3 1011 rcu_tasks_trace.n_ipis++;
96017bf9 1012 if (smp_call_function_single(cpu, trc_read_check_handler, t, 0)) {
d5f177d3
PM
1013 // Just in case there is some other reason for
1014 // failure than the target CPU being offline.
46aa886c
NU
1015 WARN_ONCE(1, "%s(): smp_call_function_single() failed for CPU: %d\n",
1016 __func__, cpu);
7e0669c3 1017 rcu_tasks_trace.n_ipis_fails++;
d5f177d3 1018 per_cpu(trc_ipi_to_cpu, cpu) = false;
46aa886c 1019 t->trc_ipi_to_cpu = -1;
d5f177d3
PM
1020 }
1021 }
1022}
1023
1024/* Initialize for a new RCU-tasks-trace grace period. */
1025static void rcu_tasks_trace_pregp_step(void)
1026{
1027 int cpu;
1028
d5f177d3
PM
1029 // Allow for fast-acting IPIs.
1030 atomic_set(&trc_n_readers_need_end, 1);
1031
1032 // There shouldn't be any old IPIs, but...
1033 for_each_possible_cpu(cpu)
1034 WARN_ON_ONCE(per_cpu(trc_ipi_to_cpu, cpu));
81b4a7bc
PM
1035
1036 // Disable CPU hotplug across the tasklist scan.
1037 // This also waits for all readers in CPU-hotplug code paths.
1038 cpus_read_lock();
d5f177d3
PM
1039}
1040
1041/* Do first-round processing for the specified task. */
1042static void rcu_tasks_trace_pertask(struct task_struct *t,
1043 struct list_head *hop)
1044{
1b04fa99
URS
1045 // During early boot when there is only the one boot CPU, there
1046 // is no idle task for the other CPUs. Just return.
1047 if (unlikely(t == NULL))
1048 return;
1049
276c4104 1050 WRITE_ONCE(t->trc_reader_special.b.need_qs, false);
43766c3e 1051 WRITE_ONCE(t->trc_reader_checked, false);
d5f177d3
PM
1052 t->trc_ipi_to_cpu = -1;
1053 trc_wait_for_one_reader(t, hop);
1054}
1055
9796e1ae
PM
1056/*
1057 * Do intermediate processing between task and holdout scans and
1058 * pick up the idle tasks.
1059 */
1060static void rcu_tasks_trace_postscan(struct list_head *hop)
d5f177d3 1061{
9796e1ae
PM
1062 int cpu;
1063
1064 for_each_possible_cpu(cpu)
1065 rcu_tasks_trace_pertask(idle_task(cpu), hop);
1066
81b4a7bc
PM
1067 // Re-enable CPU hotplug now that the tasklist scan has completed.
1068 cpus_read_unlock();
1069
d5f177d3
PM
1070 // Wait for late-stage exiting tasks to finish exiting.
1071 // These might have passed the call to exit_tasks_rcu_finish().
1072 synchronize_rcu();
1073 // Any tasks that exit after this point will set ->trc_reader_checked.
1074}
1075
4593e772
PM
1076/* Show the state of a task stalling the current RCU tasks trace GP. */
1077static void show_stalled_task_trace(struct task_struct *t, bool *firstreport)
1078{
1079 int cpu;
1080
1081 if (*firstreport) {
1082 pr_err("INFO: rcu_tasks_trace detected stalls on tasks:\n");
1083 *firstreport = false;
1084 }
1085 // FIXME: This should attempt to use try_invoke_on_nonrunning_task().
1086 cpu = task_cpu(t);
1087 pr_alert("P%d: %c%c%c nesting: %d%c cpu: %d\n",
1088 t->pid,
d39ec8f3 1089 ".I"[READ_ONCE(t->trc_ipi_to_cpu) >= 0],
4593e772 1090 ".i"[is_idle_task(t)],
d39ec8f3 1091 ".N"[cpu >= 0 && tick_nohz_full_cpu(cpu)],
bdb0cca0 1092 READ_ONCE(t->trc_reader_nesting),
f8ab3fad 1093 " N"[!!READ_ONCE(t->trc_reader_special.b.need_qs)],
4593e772
PM
1094 cpu);
1095 sched_show_task(t);
1096}
1097
1098/* List stalled IPIs for RCU tasks trace. */
1099static void show_stalled_ipi_trace(void)
1100{
1101 int cpu;
1102
1103 for_each_possible_cpu(cpu)
1104 if (per_cpu(trc_ipi_to_cpu, cpu))
1105 pr_alert("\tIPI outstanding to CPU %d\n", cpu);
1106}
1107
d5f177d3
PM
1108/* Do one scan of the holdout list. */
1109static void check_all_holdout_tasks_trace(struct list_head *hop,
4593e772 1110 bool needreport, bool *firstreport)
d5f177d3
PM
1111{
1112 struct task_struct *g, *t;
1113
81b4a7bc
PM
1114 // Disable CPU hotplug across the holdout list scan.
1115 cpus_read_lock();
1116
d5f177d3
PM
1117 list_for_each_entry_safe(t, g, hop, trc_holdout_list) {
1118 // If safe and needed, try to check the current task.
1119 if (READ_ONCE(t->trc_ipi_to_cpu) == -1 &&
1120 !READ_ONCE(t->trc_reader_checked))
1121 trc_wait_for_one_reader(t, hop);
1122
1123 // If check succeeded, remove this task from the list.
f5dbc594
PM
1124 if (smp_load_acquire(&t->trc_ipi_to_cpu) == -1 &&
1125 READ_ONCE(t->trc_reader_checked))
d5f177d3 1126 trc_del_holdout(t);
4593e772
PM
1127 else if (needreport)
1128 show_stalled_task_trace(t, firstreport);
1129 }
81b4a7bc
PM
1130
1131 // Re-enable CPU hotplug now that the holdout list scan has completed.
1132 cpus_read_unlock();
1133
4593e772 1134 if (needreport) {
89401176 1135 if (*firstreport)
4593e772
PM
1136 pr_err("INFO: rcu_tasks_trace detected stalls? (Late IPI?)\n");
1137 show_stalled_ipi_trace();
d5f177d3
PM
1138 }
1139}
1140
cbe0d8d9
PM
1141static void rcu_tasks_trace_empty_fn(void *unused)
1142{
1143}
1144
d5f177d3 1145/* Wait for grace period to complete and provide ordering. */
af051ca4 1146static void rcu_tasks_trace_postgp(struct rcu_tasks *rtp)
d5f177d3 1147{
cbe0d8d9 1148 int cpu;
4593e772
PM
1149 bool firstreport;
1150 struct task_struct *g, *t;
1151 LIST_HEAD(holdouts);
1152 long ret;
1153
cbe0d8d9
PM
1154 // Wait for any lingering IPI handlers to complete. Note that
1155 // if a CPU has gone offline or transitioned to userspace in the
1156 // meantime, all IPI handlers should have been drained beforehand.
1157 // Yes, this assumes that CPUs process IPIs in order. If that ever
1158 // changes, there will need to be a recheck and/or timed wait.
1159 for_each_online_cpu(cpu)
f5dbc594 1160 if (WARN_ON_ONCE(smp_load_acquire(per_cpu_ptr(&trc_ipi_to_cpu, cpu))))
cbe0d8d9
PM
1161 smp_call_function_single(cpu, rcu_tasks_trace_empty_fn, NULL, 1);
1162
d5f177d3
PM
1163 // Remove the safety count.
1164 smp_mb__before_atomic(); // Order vs. earlier atomics
1165 atomic_dec(&trc_n_readers_need_end);
1166 smp_mb__after_atomic(); // Order vs. later atomics
1167
1168 // Wait for readers.
af051ca4 1169 set_tasks_gp_state(rtp, RTGS_WAIT_READERS);
4593e772
PM
1170 for (;;) {
1171 ret = wait_event_idle_exclusive_timeout(
1172 trc_wait,
1173 atomic_read(&trc_n_readers_need_end) == 0,
1174 READ_ONCE(rcu_task_stall_timeout));
1175 if (ret)
1176 break; // Count reached zero.
af051ca4 1177 // Stall warning time, so make a list of the offenders.
f747c7e1 1178 rcu_read_lock();
4593e772 1179 for_each_process_thread(g, t)
276c4104 1180 if (READ_ONCE(t->trc_reader_special.b.need_qs))
4593e772 1181 trc_add_holdout(t, &holdouts);
f747c7e1 1182 rcu_read_unlock();
4593e772 1183 firstreport = true;
592031cc
PM
1184 list_for_each_entry_safe(t, g, &holdouts, trc_holdout_list) {
1185 if (READ_ONCE(t->trc_reader_special.b.need_qs))
4593e772 1186 show_stalled_task_trace(t, &firstreport);
592031cc
PM
1187 trc_del_holdout(t); // Release task_struct reference.
1188 }
4593e772
PM
1189 if (firstreport)
1190 pr_err("INFO: rcu_tasks_trace detected stalls? (Counter/taskslist mismatch?)\n");
1191 show_stalled_ipi_trace();
1192 pr_err("\t%d holdouts\n", atomic_read(&trc_n_readers_need_end));
1193 }
d5f177d3 1194 smp_mb(); // Caller's code must be ordered after wakeup.
43766c3e 1195 // Pairs with pretty much every ordering primitive.
d5f177d3
PM
1196}
1197
1198/* Report any needed quiescent state for this exiting task. */
25246fc8 1199static void exit_tasks_rcu_finish_trace(struct task_struct *t)
d5f177d3
PM
1200{
1201 WRITE_ONCE(t->trc_reader_checked, true);
bdb0cca0 1202 WARN_ON_ONCE(READ_ONCE(t->trc_reader_nesting));
d5f177d3 1203 WRITE_ONCE(t->trc_reader_nesting, 0);
276c4104 1204 if (WARN_ON_ONCE(READ_ONCE(t->trc_reader_special.b.need_qs)))
a5c071cc 1205 rcu_read_unlock_trace_special(t);
d5f177d3
PM
1206}
1207
d5f177d3
PM
1208/**
1209 * call_rcu_tasks_trace() - Queue a callback trace task-based grace period
1210 * @rhp: structure to be used for queueing the RCU updates.
1211 * @func: actual callback function to be invoked after the grace period
1212 *
ed42c380
NU
1213 * The callback function will be invoked some time after a trace rcu-tasks
1214 * grace period elapses, in other words after all currently executing
1215 * trace rcu-tasks read-side critical sections have completed. These
1216 * read-side critical sections are delimited by calls to rcu_read_lock_trace()
1217 * and rcu_read_unlock_trace().
d5f177d3
PM
1218 *
1219 * See the description of call_rcu() for more detailed information on
1220 * memory ordering guarantees.
1221 */
1222void call_rcu_tasks_trace(struct rcu_head *rhp, rcu_callback_t func)
1223{
1224 call_rcu_tasks_generic(rhp, func, &rcu_tasks_trace);
1225}
1226EXPORT_SYMBOL_GPL(call_rcu_tasks_trace);
1227
1228/**
1229 * synchronize_rcu_tasks_trace - wait for a trace rcu-tasks grace period
1230 *
1231 * Control will return to the caller some time after a trace rcu-tasks
c7dcf810 1232 * grace period has elapsed, in other words after all currently executing
ed42c380 1233 * trace rcu-tasks read-side critical sections have elapsed. These read-side
c7dcf810
PM
1234 * critical sections are delimited by calls to rcu_read_lock_trace()
1235 * and rcu_read_unlock_trace().
d5f177d3
PM
1236 *
1237 * This is a very specialized primitive, intended only for a few uses in
1238 * tracing and other situations requiring manipulation of function preambles
1239 * and profiling hooks. The synchronize_rcu_tasks_trace() function is not
1240 * (yet) intended for heavy use from multiple CPUs.
1241 *
1242 * See the description of synchronize_rcu() for more detailed information
1243 * on memory ordering guarantees.
1244 */
1245void synchronize_rcu_tasks_trace(void)
1246{
1247 RCU_LOCKDEP_WARN(lock_is_held(&rcu_trace_lock_map), "Illegal synchronize_rcu_tasks_trace() in RCU Tasks Trace read-side critical section");
1248 synchronize_rcu_tasks_generic(&rcu_tasks_trace);
1249}
1250EXPORT_SYMBOL_GPL(synchronize_rcu_tasks_trace);
1251
1252/**
1253 * rcu_barrier_tasks_trace - Wait for in-flight call_rcu_tasks_trace() callbacks.
1254 *
1255 * Although the current implementation is guaranteed to wait, it is not
1256 * obligated to, for example, if there are no pending callbacks.
1257 */
1258void rcu_barrier_tasks_trace(void)
1259{
1260 /* There is only one callback queue, so this is easy. ;-) */
1261 synchronize_rcu_tasks_trace();
1262}
1263EXPORT_SYMBOL_GPL(rcu_barrier_tasks_trace);
1264
1265static int __init rcu_spawn_tasks_trace_kthread(void)
1266{
2393a613 1267 if (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB)) {
4fe192df 1268 rcu_tasks_trace.gp_sleep = HZ / 10;
75dc2da5 1269 rcu_tasks_trace.init_fract = HZ / 10;
2393a613 1270 } else {
4fe192df
PM
1271 rcu_tasks_trace.gp_sleep = HZ / 200;
1272 if (rcu_tasks_trace.gp_sleep <= 0)
1273 rcu_tasks_trace.gp_sleep = 1;
75dc2da5 1274 rcu_tasks_trace.init_fract = HZ / 200;
2393a613
PM
1275 if (rcu_tasks_trace.init_fract <= 0)
1276 rcu_tasks_trace.init_fract = 1;
1277 }
d5f177d3
PM
1278 rcu_tasks_trace.pregp_func = rcu_tasks_trace_pregp_step;
1279 rcu_tasks_trace.pertask_func = rcu_tasks_trace_pertask;
1280 rcu_tasks_trace.postscan_func = rcu_tasks_trace_postscan;
1281 rcu_tasks_trace.holdouts_func = check_all_holdout_tasks_trace;
1282 rcu_tasks_trace.postgp_func = rcu_tasks_trace_postgp;
1283 rcu_spawn_tasks_kthread_generic(&rcu_tasks_trace);
1284 return 0;
1285}
d5f177d3 1286
27c0f144
PM
1287#if !defined(CONFIG_TINY_RCU)
1288void show_rcu_tasks_trace_gp_kthread(void)
e21408ce 1289{
40471509 1290 char buf[64];
e21408ce 1291
edf3775f
PM
1292 sprintf(buf, "N%d h:%lu/%lu/%lu", atomic_read(&trc_n_readers_need_end),
1293 data_race(n_heavy_reader_ofl_updates),
40471509
PM
1294 data_race(n_heavy_reader_updates),
1295 data_race(n_heavy_reader_attempts));
e21408ce
PM
1296 show_rcu_tasks_generic_gp_kthread(&rcu_tasks_trace, buf);
1297}
27c0f144
PM
1298EXPORT_SYMBOL_GPL(show_rcu_tasks_trace_gp_kthread);
1299#endif // !defined(CONFIG_TINY_RCU)
e21408ce 1300
d5f177d3 1301#else /* #ifdef CONFIG_TASKS_TRACE_RCU */
25246fc8 1302static void exit_tasks_rcu_finish_trace(struct task_struct *t) { }
d5f177d3 1303#endif /* #else #ifdef CONFIG_TASKS_TRACE_RCU */
8fd8ca38 1304
8344496e 1305#ifndef CONFIG_TINY_RCU
e21408ce
PM
1306void show_rcu_tasks_gp_kthreads(void)
1307{
1308 show_rcu_tasks_classic_gp_kthread();
1309 show_rcu_tasks_rude_gp_kthread();
1310 show_rcu_tasks_trace_gp_kthread();
1311}
8344496e 1312#endif /* #ifndef CONFIG_TINY_RCU */
e21408ce 1313
bfba7ed0
URS
1314#ifdef CONFIG_PROVE_RCU
1315struct rcu_tasks_test_desc {
1316 struct rcu_head rh;
1317 const char *name;
1318 bool notrun;
1319};
1320
1321static struct rcu_tasks_test_desc tests[] = {
1322 {
1323 .name = "call_rcu_tasks()",
1324 /* If not defined, the test is skipped. */
1325 .notrun = !IS_ENABLED(CONFIG_TASKS_RCU),
1326 },
1327 {
1328 .name = "call_rcu_tasks_rude()",
1329 /* If not defined, the test is skipped. */
1330 .notrun = !IS_ENABLED(CONFIG_TASKS_RUDE_RCU),
1331 },
1332 {
1333 .name = "call_rcu_tasks_trace()",
1334 /* If not defined, the test is skipped. */
1335 .notrun = !IS_ENABLED(CONFIG_TASKS_TRACE_RCU)
1336 }
1337};
1338
1339static void test_rcu_tasks_callback(struct rcu_head *rhp)
1340{
1341 struct rcu_tasks_test_desc *rttd =
1342 container_of(rhp, struct rcu_tasks_test_desc, rh);
1343
1344 pr_info("Callback from %s invoked.\n", rttd->name);
1345
1346 rttd->notrun = true;
1347}
1348
1349static void rcu_tasks_initiate_self_tests(void)
1350{
1351 pr_info("Running RCU-tasks wait API self tests\n");
1352#ifdef CONFIG_TASKS_RCU
1353 synchronize_rcu_tasks();
1354 call_rcu_tasks(&tests[0].rh, test_rcu_tasks_callback);
1355#endif
1356
1357#ifdef CONFIG_TASKS_RUDE_RCU
1358 synchronize_rcu_tasks_rude();
1359 call_rcu_tasks_rude(&tests[1].rh, test_rcu_tasks_callback);
1360#endif
1361
1362#ifdef CONFIG_TASKS_TRACE_RCU
1363 synchronize_rcu_tasks_trace();
1364 call_rcu_tasks_trace(&tests[2].rh, test_rcu_tasks_callback);
1365#endif
1366}
1367
1368static int rcu_tasks_verify_self_tests(void)
1369{
1370 int ret = 0;
1371 int i;
1372
1373 for (i = 0; i < ARRAY_SIZE(tests); i++) {
1374 if (!tests[i].notrun) { // still hanging.
1375 pr_err("%s has been failed.\n", tests[i].name);
1376 ret = -1;
1377 }
1378 }
1379
1380 if (ret)
1381 WARN_ON(1);
1382
1383 return ret;
1384}
1385late_initcall(rcu_tasks_verify_self_tests);
1386#else /* #ifdef CONFIG_PROVE_RCU */
1387static void rcu_tasks_initiate_self_tests(void) { }
1388#endif /* #else #ifdef CONFIG_PROVE_RCU */
1389
1b04fa99
URS
1390void __init rcu_init_tasks_generic(void)
1391{
1392#ifdef CONFIG_TASKS_RCU
1393 rcu_spawn_tasks_kthread();
1394#endif
1395
1396#ifdef CONFIG_TASKS_RUDE_RCU
1397 rcu_spawn_tasks_rude_kthread();
1398#endif
1399
1400#ifdef CONFIG_TASKS_TRACE_RCU
1401 rcu_spawn_tasks_trace_kthread();
1402#endif
bfba7ed0
URS
1403
1404 // Run the self-tests.
1405 rcu_tasks_initiate_self_tests();
1b04fa99
URS
1406}
1407
8fd8ca38
PM
1408#else /* #ifdef CONFIG_TASKS_RCU_GENERIC */
1409static inline void rcu_tasks_bootup_oddness(void) {}
1410#endif /* #else #ifdef CONFIG_TASKS_RCU_GENERIC */