rcutorture: Use the gp_kthread_dbg operation specified by cur_ops
[linux-2.6-block.git] / kernel / rcu / tasks.h
CommitLineData
eacd6f04
PM
1/* SPDX-License-Identifier: GPL-2.0+ */
2/*
3 * Task-based RCU implementations.
4 *
5 * Copyright (C) 2020 Paul E. McKenney
6 */
7
8fd8ca38 8#ifdef CONFIG_TASKS_RCU_GENERIC
9b073de1 9#include "rcu_segcblist.h"
5873b8a9
PM
10
11////////////////////////////////////////////////////////////////////////
12//
13// Generic data structures.
14
15struct rcu_tasks;
16typedef void (*rcu_tasks_gp_func_t)(struct rcu_tasks *rtp);
7460ade1 17typedef void (*pregp_func_t)(struct list_head *hop);
e4fe5dd6 18typedef void (*pertask_func_t)(struct task_struct *t, struct list_head *hop);
9796e1ae 19typedef void (*postscan_func_t)(struct list_head *hop);
e4fe5dd6 20typedef void (*holdouts_func_t)(struct list_head *hop, bool ndrpt, bool *frptp);
af051ca4 21typedef void (*postgp_func_t)(struct rcu_tasks *rtp);
eacd6f04 22
07e10515 23/**
cafafd67 24 * struct rcu_tasks_percpu - Per-CPU component of definition for a Tasks-RCU-like mechanism.
9b073de1 25 * @cblist: Callback list.
381a4f3b 26 * @lock: Lock protecting per-CPU callback list.
7d13d30b 27 * @rtp_jiffies: Jiffies counter value for statistics.
d119357d
PM
28 * @lazy_timer: Timer to unlazify callbacks.
29 * @urgent_gp: Number of additional non-lazy grace periods.
7d13d30b 30 * @rtp_n_lock_retries: Rough lock-contention statistic.
d363f833 31 * @rtp_work: Work queue for invoking callbacks.
3063b33a 32 * @rtp_irq_work: IRQ work queue for deferred wakeups.
ce9b1c66 33 * @barrier_q_head: RCU callback for barrier operation.
434c9eef 34 * @rtp_blkd_tasks: List of tasks blocked as readers.
bfe93930 35 * @rtp_exit_list: List of tasks in the latter portion of do_exit().
ce9b1c66
PM
36 * @cpu: CPU number corresponding to this entry.
37 * @rtpp: Pointer to the rcu_tasks structure.
cafafd67
PM
38 */
39struct rcu_tasks_percpu {
9b073de1 40 struct rcu_segcblist cblist;
381a4f3b 41 raw_spinlock_t __private lock;
7d13d30b
PM
42 unsigned long rtp_jiffies;
43 unsigned long rtp_n_lock_retries;
d119357d
PM
44 struct timer_list lazy_timer;
45 unsigned int urgent_gp;
d363f833 46 struct work_struct rtp_work;
3063b33a 47 struct irq_work rtp_irq_work;
ce9b1c66 48 struct rcu_head barrier_q_head;
434c9eef 49 struct list_head rtp_blkd_tasks;
bfe93930 50 struct list_head rtp_exit_list;
d363f833
PM
51 int cpu;
52 struct rcu_tasks *rtpp;
cafafd67
PM
53};
54
55/**
56 * struct rcu_tasks - Definition for a Tasks-RCU-like mechanism.
88db792b 57 * @cbs_wait: RCU wait allowing a new callback to get kthread's attention.
cafafd67 58 * @cbs_gbl_lock: Lock protecting callback list.
d96225fd 59 * @tasks_gp_mutex: Mutex protecting grace period, needed during mid-boot dead zone.
5873b8a9 60 * @gp_func: This flavor's grace-period-wait function.
af051ca4 61 * @gp_state: Grace period's most recent state transition (debugging).
4fe192df 62 * @gp_sleep: Per-grace-period sleep to prevent CPU-bound looping.
2393a613 63 * @init_fract: Initial backoff sleep interval.
af051ca4
PM
64 * @gp_jiffies: Time of last @gp_state transition.
65 * @gp_start: Most recent grace-period start in jiffies.
b14fb4fb 66 * @tasks_gp_seq: Number of grace periods completed since boot.
238dbce3 67 * @n_ipis: Number of IPIs sent to encourage grace periods to end.
7e0669c3 68 * @n_ipis_fails: Number of IPI-send failures.
d119357d
PM
69 * @kthread_ptr: This flavor's grace-period/callback-invocation kthread.
70 * @lazy_jiffies: Number of jiffies to allow callbacks to be lazy.
e4fe5dd6
PM
71 * @pregp_func: This flavor's pre-grace-period function (optional).
72 * @pertask_func: This flavor's per-task scan function (optional).
73 * @postscan_func: This flavor's post-task scan function (optional).
85b86994 74 * @holdouts_func: This flavor's holdout-list scan function (optional).
e4fe5dd6 75 * @postgp_func: This flavor's post-grace-period function (optional).
5873b8a9 76 * @call_func: This flavor's call_rcu()-equivalent function.
cafafd67 77 * @rtpcpu: This flavor's rcu_tasks_percpu structure.
7a30871b 78 * @percpu_enqueue_shift: Shift down CPU ID this much when enqueuing callbacks.
2cee0789
PM
79 * @percpu_enqueue_lim: Number of per-CPU callback queues in use for enqueuing.
80 * @percpu_dequeue_lim: Number of per-CPU callback queues in use for dequeuing.
fd796e41 81 * @percpu_dequeue_gpseq: RCU grace-period number to propagate enqueue limit to dequeuers.
ce9b1c66
PM
82 * @barrier_q_mutex: Serialize barrier operations.
83 * @barrier_q_count: Number of queues being waited on.
84 * @barrier_q_completion: Barrier wait/wakeup mechanism.
85 * @barrier_q_seq: Sequence number for barrier operations.
c97d12a6
PM
86 * @name: This flavor's textual name.
87 * @kname: This flavor's kthread name.
07e10515
PM
88 */
89struct rcu_tasks {
88db792b 90 struct rcuwait cbs_wait;
cafafd67 91 raw_spinlock_t cbs_gbl_lock;
d96225fd 92 struct mutex tasks_gp_mutex;
af051ca4 93 int gp_state;
4fe192df 94 int gp_sleep;
2393a613 95 int init_fract;
af051ca4 96 unsigned long gp_jiffies;
88092d0c 97 unsigned long gp_start;
b14fb4fb 98 unsigned long tasks_gp_seq;
238dbce3 99 unsigned long n_ipis;
7e0669c3 100 unsigned long n_ipis_fails;
07e10515 101 struct task_struct *kthread_ptr;
d119357d 102 unsigned long lazy_jiffies;
5873b8a9 103 rcu_tasks_gp_func_t gp_func;
e4fe5dd6
PM
104 pregp_func_t pregp_func;
105 pertask_func_t pertask_func;
106 postscan_func_t postscan_func;
107 holdouts_func_t holdouts_func;
108 postgp_func_t postgp_func;
5873b8a9 109 call_rcu_func_t call_func;
cafafd67 110 struct rcu_tasks_percpu __percpu *rtpcpu;
7a30871b 111 int percpu_enqueue_shift;
8dd593fd 112 int percpu_enqueue_lim;
2cee0789 113 int percpu_dequeue_lim;
fd796e41 114 unsigned long percpu_dequeue_gpseq;
ce9b1c66
PM
115 struct mutex barrier_q_mutex;
116 atomic_t barrier_q_count;
117 struct completion barrier_q_completion;
118 unsigned long barrier_q_seq;
c97d12a6
PM
119 char *name;
120 char *kname;
07e10515
PM
121};
122
3063b33a
PM
123static void call_rcu_tasks_iw_wakeup(struct irq_work *iwp);
124
cafafd67
PM
125#define DEFINE_RCU_TASKS(rt_name, gp, call, n) \
126static DEFINE_PER_CPU(struct rcu_tasks_percpu, rt_name ## __percpu) = { \
381a4f3b 127 .lock = __RAW_SPIN_LOCK_UNLOCKED(rt_name ## __percpu.cbs_pcpu_lock), \
88db792b 128 .rtp_irq_work = IRQ_WORK_INIT_HARD(call_rcu_tasks_iw_wakeup), \
cafafd67
PM
129}; \
130static struct rcu_tasks rt_name = \
131{ \
88db792b 132 .cbs_wait = __RCUWAIT_INITIALIZER(rt_name.wait), \
cafafd67 133 .cbs_gbl_lock = __RAW_SPIN_LOCK_UNLOCKED(rt_name.cbs_gbl_lock), \
d96225fd 134 .tasks_gp_mutex = __MUTEX_INITIALIZER(rt_name.tasks_gp_mutex), \
cafafd67
PM
135 .gp_func = gp, \
136 .call_func = call, \
137 .rtpcpu = &rt_name ## __percpu, \
d119357d 138 .lazy_jiffies = DIV_ROUND_UP(HZ, 4), \
cafafd67 139 .name = n, \
2bcd18e0 140 .percpu_enqueue_shift = order_base_2(CONFIG_NR_CPUS), \
8dd593fd 141 .percpu_enqueue_lim = 1, \
2cee0789 142 .percpu_dequeue_lim = 1, \
ce9b1c66
PM
143 .barrier_q_mutex = __MUTEX_INITIALIZER(rt_name.barrier_q_mutex), \
144 .barrier_q_seq = (0UL - 50UL) << RCU_SEQ_CTR_SHIFT, \
cafafd67 145 .kname = #rt_name, \
07e10515
PM
146}
147
2b4be548 148#ifdef CONFIG_TASKS_RCU
eacd6f04 149
a4533cc0
NU
150/* Report delay in synchronize_srcu() completion in rcu_tasks_postscan(). */
151static void tasks_rcu_exit_srcu_stall(struct timer_list *unused);
152static DEFINE_TIMER(tasks_rcu_exit_srcu_stall_timer, tasks_rcu_exit_srcu_stall);
153#endif
154
b0afa0f0 155/* Avoid IPIing CPUs early in the grace period. */
574de876 156#define RCU_TASK_IPI_DELAY (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB) ? HZ / 2 : 0)
b0afa0f0
PM
157static int rcu_task_ipi_delay __read_mostly = RCU_TASK_IPI_DELAY;
158module_param(rcu_task_ipi_delay, int, 0644);
159
eacd6f04 160/* Control stall timeouts. Disable with <= 0, otherwise jiffies till stall. */
1cf1144e 161#define RCU_TASK_BOOT_STALL_TIMEOUT (HZ * 30)
eacd6f04
PM
162#define RCU_TASK_STALL_TIMEOUT (HZ * 60 * 10)
163static int rcu_task_stall_timeout __read_mostly = RCU_TASK_STALL_TIMEOUT;
164module_param(rcu_task_stall_timeout, int, 0644);
f2539003
PM
165#define RCU_TASK_STALL_INFO (HZ * 10)
166static int rcu_task_stall_info __read_mostly = RCU_TASK_STALL_INFO;
167module_param(rcu_task_stall_info, int, 0644);
168static int rcu_task_stall_info_mult __read_mostly = 3;
169module_param(rcu_task_stall_info_mult, int, 0444);
eacd6f04 170
8610b656
PM
171static int rcu_task_enqueue_lim __read_mostly = -1;
172module_param(rcu_task_enqueue_lim, int, 0444);
173
ab97152f
PM
174static bool rcu_task_cb_adjust;
175static int rcu_task_contend_lim __read_mostly = 100;
176module_param(rcu_task_contend_lim, int, 0444);
fd796e41
PM
177static int rcu_task_collapse_lim __read_mostly = 10;
178module_param(rcu_task_collapse_lim, int, 0444);
db13710a
PM
179static int rcu_task_lazy_lim __read_mostly = 32;
180module_param(rcu_task_lazy_lim, int, 0444);
ab97152f 181
af051ca4
PM
182/* RCU tasks grace-period state for debugging. */
183#define RTGS_INIT 0
184#define RTGS_WAIT_WAIT_CBS 1
185#define RTGS_WAIT_GP 2
186#define RTGS_PRE_WAIT_GP 3
187#define RTGS_SCAN_TASKLIST 4
188#define RTGS_POST_SCAN_TASKLIST 5
189#define RTGS_WAIT_SCAN_HOLDOUTS 6
190#define RTGS_SCAN_HOLDOUTS 7
191#define RTGS_POST_GP 8
192#define RTGS_WAIT_READERS 9
193#define RTGS_INVOKE_CBS 10
194#define RTGS_WAIT_CBS 11
8344496e 195#ifndef CONFIG_TINY_RCU
af051ca4
PM
196static const char * const rcu_tasks_gp_state_names[] = {
197 "RTGS_INIT",
198 "RTGS_WAIT_WAIT_CBS",
199 "RTGS_WAIT_GP",
200 "RTGS_PRE_WAIT_GP",
201 "RTGS_SCAN_TASKLIST",
202 "RTGS_POST_SCAN_TASKLIST",
203 "RTGS_WAIT_SCAN_HOLDOUTS",
204 "RTGS_SCAN_HOLDOUTS",
205 "RTGS_POST_GP",
206 "RTGS_WAIT_READERS",
207 "RTGS_INVOKE_CBS",
208 "RTGS_WAIT_CBS",
209};
8344496e 210#endif /* #ifndef CONFIG_TINY_RCU */
af051ca4 211
5873b8a9
PM
212////////////////////////////////////////////////////////////////////////
213//
214// Generic code.
215
d363f833
PM
216static void rcu_tasks_invoke_cbs_wq(struct work_struct *wp);
217
af051ca4
PM
218/* Record grace-period phase and time. */
219static void set_tasks_gp_state(struct rcu_tasks *rtp, int newstate)
220{
221 rtp->gp_state = newstate;
222 rtp->gp_jiffies = jiffies;
223}
224
8344496e 225#ifndef CONFIG_TINY_RCU
af051ca4
PM
226/* Return state name. */
227static const char *tasks_gp_state_getname(struct rcu_tasks *rtp)
228{
229 int i = data_race(rtp->gp_state); // Let KCSAN detect update races
230 int j = READ_ONCE(i); // Prevent the compiler from reading twice
231
232 if (j >= ARRAY_SIZE(rcu_tasks_gp_state_names))
233 return "???";
234 return rcu_tasks_gp_state_names[j];
235}
8344496e 236#endif /* #ifndef CONFIG_TINY_RCU */
af051ca4 237
cafafd67 238// Initialize per-CPU callback lists for the specified flavor of
cb88f7f5 239// Tasks RCU. Do not enqueue callbacks before this function is invoked.
cafafd67
PM
240static void cblist_init_generic(struct rcu_tasks *rtp)
241{
242 int cpu;
8610b656 243 int lim;
da123016 244 int shift;
cafafd67 245
ab97152f
PM
246 if (rcu_task_enqueue_lim < 0) {
247 rcu_task_enqueue_lim = 1;
248 rcu_task_cb_adjust = true;
ab97152f 249 } else if (rcu_task_enqueue_lim == 0) {
8610b656 250 rcu_task_enqueue_lim = 1;
ab97152f 251 }
8610b656
PM
252 lim = rcu_task_enqueue_lim;
253
254 if (lim > nr_cpu_ids)
255 lim = nr_cpu_ids;
da123016
PM
256 shift = ilog2(nr_cpu_ids / lim);
257 if (((nr_cpu_ids - 1) >> shift) >= lim)
258 shift++;
259 WRITE_ONCE(rtp->percpu_enqueue_shift, shift);
2cee0789 260 WRITE_ONCE(rtp->percpu_dequeue_lim, lim);
8610b656 261 smp_store_release(&rtp->percpu_enqueue_lim, lim);
cafafd67
PM
262 for_each_possible_cpu(cpu) {
263 struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu);
264
265 WARN_ON_ONCE(!rtpcp);
266 if (cpu)
381a4f3b 267 raw_spin_lock_init(&ACCESS_PRIVATE(rtpcp, lock));
9b073de1
PM
268 if (rcu_segcblist_empty(&rtpcp->cblist))
269 rcu_segcblist_init(&rtpcp->cblist);
d363f833
PM
270 INIT_WORK(&rtpcp->rtp_work, rcu_tasks_invoke_cbs_wq);
271 rtpcp->cpu = cpu;
272 rtpcp->rtpp = rtp;
434c9eef
PM
273 if (!rtpcp->rtp_blkd_tasks.next)
274 INIT_LIST_HEAD(&rtpcp->rtp_blkd_tasks);
46faf9d8
PM
275 if (!rtpcp->rtp_exit_list.next)
276 INIT_LIST_HEAD(&rtpcp->rtp_exit_list);
cafafd67 277 }
5fc8cbe4 278
edff5e9a
Z
279 pr_info("%s: Setting shift to %d and lim to %d rcu_task_cb_adjust=%d.\n", rtp->name,
280 data_race(rtp->percpu_enqueue_shift), data_race(rtp->percpu_enqueue_lim), rcu_task_cb_adjust);
cafafd67
PM
281}
282
d119357d
PM
283// Compute wakeup time for lazy callback timer.
284static unsigned long rcu_tasks_lazy_time(struct rcu_tasks *rtp)
285{
286 return jiffies + rtp->lazy_jiffies;
287}
288
289// Timer handler that unlazifies lazy callbacks.
290static void call_rcu_tasks_generic_timer(struct timer_list *tlp)
291{
292 unsigned long flags;
293 bool needwake = false;
294 struct rcu_tasks *rtp;
295 struct rcu_tasks_percpu *rtpcp = from_timer(rtpcp, tlp, lazy_timer);
296
297 rtp = rtpcp->rtpp;
298 raw_spin_lock_irqsave_rcu_node(rtpcp, flags);
299 if (!rcu_segcblist_empty(&rtpcp->cblist) && rtp->lazy_jiffies) {
300 if (!rtpcp->urgent_gp)
301 rtpcp->urgent_gp = 1;
302 needwake = true;
303 mod_timer(&rtpcp->lazy_timer, rcu_tasks_lazy_time(rtp));
304 }
305 raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags);
306 if (needwake)
307 rcuwait_wake_up(&rtp->cbs_wait);
308}
309
3063b33a
PM
310// IRQ-work handler that does deferred wakeup for call_rcu_tasks_generic().
311static void call_rcu_tasks_iw_wakeup(struct irq_work *iwp)
312{
313 struct rcu_tasks *rtp;
314 struct rcu_tasks_percpu *rtpcp = container_of(iwp, struct rcu_tasks_percpu, rtp_irq_work);
315
316 rtp = rtpcp->rtpp;
88db792b 317 rcuwait_wake_up(&rtp->cbs_wait);
3063b33a
PM
318}
319
5873b8a9
PM
320// Enqueue a callback for the specified flavor of Tasks RCU.
321static void call_rcu_tasks_generic(struct rcu_head *rhp, rcu_callback_t func,
322 struct rcu_tasks *rtp)
eacd6f04 323{
07d95c34 324 int chosen_cpu;
eacd6f04 325 unsigned long flags;
d119357d 326 bool havekthread = smp_load_acquire(&rtp->kthread_ptr);
07d95c34 327 int ideal_cpu;
7d13d30b 328 unsigned long j;
ab97152f 329 bool needadjust = false;
eacd6f04 330 bool needwake;
cafafd67 331 struct rcu_tasks_percpu *rtpcp;
eacd6f04
PM
332
333 rhp->next = NULL;
334 rhp->func = func;
cafafd67 335 local_irq_save(flags);
fd796e41 336 rcu_read_lock();
07d95c34
ED
337 ideal_cpu = smp_processor_id() >> READ_ONCE(rtp->percpu_enqueue_shift);
338 chosen_cpu = cpumask_next(ideal_cpu - 1, cpu_possible_mask);
339 rtpcp = per_cpu_ptr(rtp->rtpcpu, chosen_cpu);
7d13d30b
PM
340 if (!raw_spin_trylock_rcu_node(rtpcp)) { // irqs already disabled.
341 raw_spin_lock_rcu_node(rtpcp); // irqs already disabled.
342 j = jiffies;
343 if (rtpcp->rtp_jiffies != j) {
344 rtpcp->rtp_jiffies = j;
345 rtpcp->rtp_n_lock_retries = 0;
346 }
ab97152f
PM
347 if (rcu_task_cb_adjust && ++rtpcp->rtp_n_lock_retries > rcu_task_contend_lim &&
348 READ_ONCE(rtp->percpu_enqueue_lim) != nr_cpu_ids)
349 needadjust = true; // Defer adjustment to avoid deadlock.
7d13d30b 350 }
cb88f7f5
PM
351 // Queuing callbacks before initialization not yet supported.
352 if (WARN_ON_ONCE(!rcu_segcblist_is_enabled(&rtpcp->cblist)))
353 rcu_segcblist_init(&rtpcp->cblist);
db13710a
PM
354 needwake = (func == wakeme_after_rcu) ||
355 (rcu_segcblist_n_cbs(&rtpcp->cblist) == rcu_task_lazy_lim);
356 if (havekthread && !needwake && !timer_pending(&rtpcp->lazy_timer)) {
d119357d
PM
357 if (rtp->lazy_jiffies)
358 mod_timer(&rtpcp->lazy_timer, rcu_tasks_lazy_time(rtp));
359 else
360 needwake = rcu_segcblist_empty(&rtpcp->cblist);
cafafd67 361 }
d119357d
PM
362 if (needwake)
363 rtpcp->urgent_gp = 3;
9b073de1 364 rcu_segcblist_enqueue(&rtpcp->cblist, rhp);
381a4f3b 365 raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags);
ab97152f
PM
366 if (unlikely(needadjust)) {
367 raw_spin_lock_irqsave(&rtp->cbs_gbl_lock, flags);
368 if (rtp->percpu_enqueue_lim != nr_cpu_ids) {
00a8b4b5 369 WRITE_ONCE(rtp->percpu_enqueue_shift, 0);
fd796e41 370 WRITE_ONCE(rtp->percpu_dequeue_lim, nr_cpu_ids);
ab97152f
PM
371 smp_store_release(&rtp->percpu_enqueue_lim, nr_cpu_ids);
372 pr_info("Switching %s to per-CPU callback queuing.\n", rtp->name);
373 }
374 raw_spin_unlock_irqrestore(&rtp->cbs_gbl_lock, flags);
375 }
fd796e41 376 rcu_read_unlock();
eacd6f04 377 /* We can't create the thread unless interrupts are enabled. */
07e10515 378 if (needwake && READ_ONCE(rtp->kthread_ptr))
3063b33a 379 irq_work_queue(&rtpcp->rtp_irq_work);
eacd6f04 380}
eacd6f04 381
ce9b1c66
PM
382// RCU callback function for rcu_barrier_tasks_generic().
383static void rcu_barrier_tasks_generic_cb(struct rcu_head *rhp)
384{
385 struct rcu_tasks *rtp;
386 struct rcu_tasks_percpu *rtpcp;
387
388 rtpcp = container_of(rhp, struct rcu_tasks_percpu, barrier_q_head);
389 rtp = rtpcp->rtpp;
390 if (atomic_dec_and_test(&rtp->barrier_q_count))
391 complete(&rtp->barrier_q_completion);
392}
393
394// Wait for all in-flight callbacks for the specified RCU Tasks flavor.
395// Operates in a manner similar to rcu_barrier().
396static void rcu_barrier_tasks_generic(struct rcu_tasks *rtp)
397{
398 int cpu;
399 unsigned long flags;
400 struct rcu_tasks_percpu *rtpcp;
401 unsigned long s = rcu_seq_snap(&rtp->barrier_q_seq);
402
403 mutex_lock(&rtp->barrier_q_mutex);
404 if (rcu_seq_done(&rtp->barrier_q_seq, s)) {
405 smp_mb();
406 mutex_unlock(&rtp->barrier_q_mutex);
407 return;
408 }
409 rcu_seq_start(&rtp->barrier_q_seq);
410 init_completion(&rtp->barrier_q_completion);
411 atomic_set(&rtp->barrier_q_count, 2);
412 for_each_possible_cpu(cpu) {
2cee0789 413 if (cpu >= smp_load_acquire(&rtp->percpu_dequeue_lim))
ce9b1c66
PM
414 break;
415 rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu);
416 rtpcp->barrier_q_head.func = rcu_barrier_tasks_generic_cb;
417 raw_spin_lock_irqsave_rcu_node(rtpcp, flags);
418 if (rcu_segcblist_entrain(&rtpcp->cblist, &rtpcp->barrier_q_head))
419 atomic_inc(&rtp->barrier_q_count);
420 raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags);
421 }
422 if (atomic_sub_and_test(2, &rtp->barrier_q_count))
423 complete(&rtp->barrier_q_completion);
424 wait_for_completion(&rtp->barrier_q_completion);
425 rcu_seq_end(&rtp->barrier_q_seq);
426 mutex_unlock(&rtp->barrier_q_mutex);
427}
428
4d1114c0
PM
429// Advance callbacks and indicate whether either a grace period or
430// callback invocation is needed.
431static int rcu_tasks_need_gpcb(struct rcu_tasks *rtp)
432{
433 int cpu;
e62d8ae4 434 int dequeue_limit;
4d1114c0 435 unsigned long flags;
a4fcfbee 436 bool gpdone = poll_state_synchronize_rcu(rtp->percpu_dequeue_gpseq);
fd796e41
PM
437 long n;
438 long ncbs = 0;
439 long ncbsnz = 0;
4d1114c0
PM
440 int needgpcb = 0;
441
e62d8ae4
PM
442 dequeue_limit = smp_load_acquire(&rtp->percpu_dequeue_lim);
443 for (cpu = 0; cpu < dequeue_limit; cpu++) {
4d1114c0
PM
444 struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu);
445
446 /* Advance and accelerate any new callbacks. */
fd796e41 447 if (!rcu_segcblist_n_cbs(&rtpcp->cblist))
4d1114c0
PM
448 continue;
449 raw_spin_lock_irqsave_rcu_node(rtpcp, flags);
fd796e41
PM
450 // Should we shrink down to a single callback queue?
451 n = rcu_segcblist_n_cbs(&rtpcp->cblist);
452 if (n) {
453 ncbs += n;
454 if (cpu > 0)
455 ncbsnz += n;
456 }
4d1114c0
PM
457 rcu_segcblist_advance(&rtpcp->cblist, rcu_seq_current(&rtp->tasks_gp_seq));
458 (void)rcu_segcblist_accelerate(&rtpcp->cblist, rcu_seq_snap(&rtp->tasks_gp_seq));
d119357d
PM
459 if (rtpcp->urgent_gp > 0 && rcu_segcblist_pend_cbs(&rtpcp->cblist)) {
460 if (rtp->lazy_jiffies)
461 rtpcp->urgent_gp--;
4d1114c0 462 needgpcb |= 0x3;
d119357d
PM
463 } else if (rcu_segcblist_empty(&rtpcp->cblist)) {
464 rtpcp->urgent_gp = 0;
465 }
466 if (rcu_segcblist_ready_cbs(&rtpcp->cblist))
4d1114c0
PM
467 needgpcb |= 0x1;
468 raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags);
469 }
fd796e41
PM
470
471 // Shrink down to a single callback queue if appropriate.
472 // This is done in two stages: (1) If there are no more than
473 // rcu_task_collapse_lim callbacks on CPU 0 and none on any other
474 // CPU, limit enqueueing to CPU 0. (2) After an RCU grace period,
475 // if there has not been an increase in callbacks, limit dequeuing
476 // to CPU 0. Note the matching RCU read-side critical section in
477 // call_rcu_tasks_generic().
478 if (rcu_task_cb_adjust && ncbs <= rcu_task_collapse_lim) {
479 raw_spin_lock_irqsave(&rtp->cbs_gbl_lock, flags);
480 if (rtp->percpu_enqueue_lim > 1) {
2bcd18e0 481 WRITE_ONCE(rtp->percpu_enqueue_shift, order_base_2(nr_cpu_ids));
fd796e41
PM
482 smp_store_release(&rtp->percpu_enqueue_lim, 1);
483 rtp->percpu_dequeue_gpseq = get_state_synchronize_rcu();
a4fcfbee 484 gpdone = false;
fd796e41
PM
485 pr_info("Starting switch %s to CPU-0 callback queuing.\n", rtp->name);
486 }
487 raw_spin_unlock_irqrestore(&rtp->cbs_gbl_lock, flags);
488 }
a4fcfbee 489 if (rcu_task_cb_adjust && !ncbsnz && gpdone) {
fd796e41
PM
490 raw_spin_lock_irqsave(&rtp->cbs_gbl_lock, flags);
491 if (rtp->percpu_enqueue_lim < rtp->percpu_dequeue_lim) {
492 WRITE_ONCE(rtp->percpu_dequeue_lim, 1);
493 pr_info("Completing switch %s to CPU-0 callback queuing.\n", rtp->name);
494 }
a4fcfbee
Z
495 if (rtp->percpu_dequeue_lim == 1) {
496 for (cpu = rtp->percpu_dequeue_lim; cpu < nr_cpu_ids; cpu++) {
497 struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu);
4cf0585c 498
a4fcfbee
Z
499 WARN_ON_ONCE(rcu_segcblist_n_cbs(&rtpcp->cblist));
500 }
4cf0585c 501 }
fd796e41
PM
502 raw_spin_unlock_irqrestore(&rtp->cbs_gbl_lock, flags);
503 }
504
4d1114c0
PM
505 return needgpcb;
506}
507
57881863 508// Advance callbacks and invoke any that are ready.
d363f833 509static void rcu_tasks_invoke_cbs(struct rcu_tasks *rtp, struct rcu_tasks_percpu *rtpcp)
eacd6f04 510{
57881863 511 int cpu;
d363f833 512 int cpunext;
401b0de3 513 int cpuwq;
eacd6f04 514 unsigned long flags;
9b073de1 515 int len;
9b073de1 516 struct rcu_head *rhp;
d363f833
PM
517 struct rcu_cblist rcl = RCU_CBLIST_INITIALIZER(rcl);
518 struct rcu_tasks_percpu *rtpcp_next;
519
520 cpu = rtpcp->cpu;
521 cpunext = cpu * 2 + 1;
2cee0789 522 if (cpunext < smp_load_acquire(&rtp->percpu_dequeue_lim)) {
d363f833 523 rtpcp_next = per_cpu_ptr(rtp->rtpcpu, cpunext);
401b0de3
PM
524 cpuwq = rcu_cpu_beenfullyonline(cpunext) ? cpunext : WORK_CPU_UNBOUND;
525 queue_work_on(cpuwq, system_wq, &rtpcp_next->rtp_work);
d363f833 526 cpunext++;
2cee0789 527 if (cpunext < smp_load_acquire(&rtp->percpu_dequeue_lim)) {
d363f833 528 rtpcp_next = per_cpu_ptr(rtp->rtpcpu, cpunext);
401b0de3
PM
529 cpuwq = rcu_cpu_beenfullyonline(cpunext) ? cpunext : WORK_CPU_UNBOUND;
530 queue_work_on(cpuwq, system_wq, &rtpcp_next->rtp_work);
57881863 531 }
57881863 532 }
d363f833 533
ab2756ea 534 if (rcu_segcblist_empty(&rtpcp->cblist) || !cpu_possible(cpu))
d363f833
PM
535 return;
536 raw_spin_lock_irqsave_rcu_node(rtpcp, flags);
537 rcu_segcblist_advance(&rtpcp->cblist, rcu_seq_current(&rtp->tasks_gp_seq));
538 rcu_segcblist_extract_done_cbs(&rtpcp->cblist, &rcl);
539 raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags);
540 len = rcl.len;
541 for (rhp = rcu_cblist_dequeue(&rcl); rhp; rhp = rcu_cblist_dequeue(&rcl)) {
2cbc482d 542 debug_rcu_head_callback(rhp);
d363f833
PM
543 local_bh_disable();
544 rhp->func(rhp);
545 local_bh_enable();
546 cond_resched();
547 }
548 raw_spin_lock_irqsave_rcu_node(rtpcp, flags);
549 rcu_segcblist_add_len(&rtpcp->cblist, -len);
550 (void)rcu_segcblist_accelerate(&rtpcp->cblist, rcu_seq_snap(&rtp->tasks_gp_seq));
551 raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags);
552}
553
554// Workqueue flood to advance callbacks and invoke any that are ready.
555static void rcu_tasks_invoke_cbs_wq(struct work_struct *wp)
556{
557 struct rcu_tasks *rtp;
558 struct rcu_tasks_percpu *rtpcp = container_of(wp, struct rcu_tasks_percpu, rtp_work);
559
560 rtp = rtpcp->rtpp;
561 rcu_tasks_invoke_cbs(rtp, rtpcp);
57881863
PM
562}
563
d96225fd 564// Wait for one grace period.
4a8cc433 565static void rcu_tasks_one_gp(struct rcu_tasks *rtp, bool midboot)
57881863
PM
566{
567 int needgpcb;
d96225fd
PM
568
569 mutex_lock(&rtp->tasks_gp_mutex);
d96225fd
PM
570
571 // If there were none, wait a bit and start over.
4a8cc433
PM
572 if (unlikely(midboot)) {
573 needgpcb = 0x2;
574 } else {
9d0cce2b 575 mutex_unlock(&rtp->tasks_gp_mutex);
4a8cc433
PM
576 set_tasks_gp_state(rtp, RTGS_WAIT_CBS);
577 rcuwait_wait_event(&rtp->cbs_wait,
578 (needgpcb = rcu_tasks_need_gpcb(rtp)),
579 TASK_IDLE);
9d0cce2b 580 mutex_lock(&rtp->tasks_gp_mutex);
4a8cc433 581 }
d96225fd
PM
582
583 if (needgpcb & 0x2) {
584 // Wait for one grace period.
585 set_tasks_gp_state(rtp, RTGS_WAIT_GP);
586 rtp->gp_start = jiffies;
587 rcu_seq_start(&rtp->tasks_gp_seq);
588 rtp->gp_func(rtp);
589 rcu_seq_end(&rtp->tasks_gp_seq);
590 }
591
592 // Invoke callbacks.
593 set_tasks_gp_state(rtp, RTGS_INVOKE_CBS);
594 rcu_tasks_invoke_cbs(rtp, per_cpu_ptr(rtp->rtpcpu, 0));
595 mutex_unlock(&rtp->tasks_gp_mutex);
596}
597
598// RCU-tasks kthread that detects grace periods and invokes callbacks.
599static int __noreturn rcu_tasks_kthread(void *arg)
600{
d119357d 601 int cpu;
07e10515 602 struct rcu_tasks *rtp = arg;
eacd6f04 603
d119357d
PM
604 for_each_possible_cpu(cpu) {
605 struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu);
606
607 timer_setup(&rtpcp->lazy_timer, call_rcu_tasks_generic_timer, 0);
608 rtpcp->urgent_gp = 1;
609 }
610
eacd6f04 611 /* Run on housekeeping CPUs by default. Sysadm can move if desired. */
04d4e665 612 housekeeping_affine(current, HK_TYPE_RCU);
d119357d 613 smp_store_release(&rtp->kthread_ptr, current); // Let GPs start!
eacd6f04
PM
614
615 /*
616 * Each pass through the following loop makes one check for
617 * newly arrived callbacks, and, if there are some, waits for
618 * one RCU-tasks grace period and then invokes the callbacks.
619 * This loop is terminated by the system going down. ;-)
620 */
621 for (;;) {
d96225fd
PM
622 // Wait for one grace period and invoke any callbacks
623 // that are ready.
4a8cc433 624 rcu_tasks_one_gp(rtp, false);
57881863 625
d96225fd 626 // Paranoid sleep to keep this from entering a tight loop.
4fe192df 627 schedule_timeout_idle(rtp->gp_sleep);
eacd6f04
PM
628 }
629}
630
68cb4720
PM
631// Wait for a grace period for the specified flavor of Tasks RCU.
632static void synchronize_rcu_tasks_generic(struct rcu_tasks *rtp)
633{
634 /* Complain if the scheduler has not started. */
ea5c8987
Z
635 if (WARN_ONCE(rcu_scheduler_active == RCU_SCHEDULER_INACTIVE,
636 "synchronize_%s() called too soon", rtp->name))
637 return;
68cb4720 638
4a8cc433
PM
639 // If the grace-period kthread is running, use it.
640 if (READ_ONCE(rtp->kthread_ptr)) {
641 wait_rcu_gp(rtp->call_func);
642 return;
643 }
644 rcu_tasks_one_gp(rtp, true);
68cb4720
PM
645}
646
1b04fa99 647/* Spawn RCU-tasks grace-period kthread. */
5873b8a9 648static void __init rcu_spawn_tasks_kthread_generic(struct rcu_tasks *rtp)
eacd6f04
PM
649{
650 struct task_struct *t;
651
c97d12a6
PM
652 t = kthread_run(rcu_tasks_kthread, rtp, "%s_kthread", rtp->kname);
653 if (WARN_ONCE(IS_ERR(t), "%s: Could not start %s grace-period kthread, OOM is now expected behavior\n", __func__, rtp->name))
5873b8a9 654 return;
eacd6f04 655 smp_mb(); /* Ensure others see full kthread. */
eacd6f04 656}
eacd6f04 657
eacd6f04
PM
658#ifndef CONFIG_TINY_RCU
659
660/*
661 * Print any non-default Tasks RCU settings.
662 */
663static void __init rcu_tasks_bootup_oddness(void)
664{
d5f177d3 665#if defined(CONFIG_TASKS_RCU) || defined(CONFIG_TASKS_TRACE_RCU)
f2539003
PM
666 int rtsimc;
667
eacd6f04
PM
668 if (rcu_task_stall_timeout != RCU_TASK_STALL_TIMEOUT)
669 pr_info("\tTasks-RCU CPU stall warnings timeout set to %d (rcu_task_stall_timeout).\n", rcu_task_stall_timeout);
f2539003
PM
670 rtsimc = clamp(rcu_task_stall_info_mult, 1, 10);
671 if (rtsimc != rcu_task_stall_info_mult) {
672 pr_info("\tTasks-RCU CPU stall info multiplier clamped to %d (rcu_task_stall_info_mult).\n", rtsimc);
673 rcu_task_stall_info_mult = rtsimc;
674 }
d5f177d3
PM
675#endif /* #ifdef CONFIG_TASKS_RCU */
676#ifdef CONFIG_TASKS_RCU
677 pr_info("\tTrampoline variant of Tasks RCU enabled.\n");
eacd6f04 678#endif /* #ifdef CONFIG_TASKS_RCU */
c84aad76
PM
679#ifdef CONFIG_TASKS_RUDE_RCU
680 pr_info("\tRude variant of Tasks RCU enabled.\n");
681#endif /* #ifdef CONFIG_TASKS_RUDE_RCU */
d5f177d3
PM
682#ifdef CONFIG_TASKS_TRACE_RCU
683 pr_info("\tTracing variant of Tasks RCU enabled.\n");
684#endif /* #ifdef CONFIG_TASKS_TRACE_RCU */
eacd6f04
PM
685}
686
687#endif /* #ifndef CONFIG_TINY_RCU */
5873b8a9 688
8344496e 689#ifndef CONFIG_TINY_RCU
e21408ce
PM
690/* Dump out rcutorture-relevant state common to all RCU-tasks flavors. */
691static void show_rcu_tasks_generic_gp_kthread(struct rcu_tasks *rtp, char *s)
692{
10b3742f
PM
693 int cpu;
694 bool havecbs = false;
d119357d
PM
695 bool haveurgent = false;
696 bool haveurgentcbs = false;
10b3742f
PM
697
698 for_each_possible_cpu(cpu) {
699 struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu);
700
d119357d 701 if (!data_race(rcu_segcblist_empty(&rtpcp->cblist)))
10b3742f 702 havecbs = true;
d119357d
PM
703 if (data_race(rtpcp->urgent_gp))
704 haveurgent = true;
705 if (!data_race(rcu_segcblist_empty(&rtpcp->cblist)) && data_race(rtpcp->urgent_gp))
706 haveurgentcbs = true;
707 if (havecbs && haveurgent && haveurgentcbs)
10b3742f 708 break;
10b3742f 709 }
d119357d 710 pr_info("%s: %s(%d) since %lu g:%lu i:%lu/%lu %c%c%c%c l:%lu %s\n",
e21408ce 711 rtp->kname,
7e0669c3 712 tasks_gp_state_getname(rtp), data_race(rtp->gp_state),
af051ca4 713 jiffies - data_race(rtp->gp_jiffies),
b14fb4fb 714 data_race(rcu_seq_current(&rtp->tasks_gp_seq)),
7e0669c3 715 data_race(rtp->n_ipis_fails), data_race(rtp->n_ipis),
e21408ce 716 ".k"[!!data_race(rtp->kthread_ptr)],
10b3742f 717 ".C"[havecbs],
d119357d
PM
718 ".u"[haveurgent],
719 ".U"[haveurgentcbs],
720 rtp->lazy_jiffies,
e21408ce
PM
721 s);
722}
27c0f144 723#endif // #ifndef CONFIG_TINY_RCU
e21408ce 724
25246fc8
PM
725static void exit_tasks_rcu_finish_trace(struct task_struct *t);
726
727#if defined(CONFIG_TASKS_RCU) || defined(CONFIG_TASKS_TRACE_RCU)
5873b8a9 728
d01aa263
PM
729////////////////////////////////////////////////////////////////////////
730//
731// Shared code between task-list-scanning variants of Tasks RCU.
732
733/* Wait for one RCU-tasks grace period. */
734static void rcu_tasks_wait_gp(struct rcu_tasks *rtp)
735{
f2539003 736 struct task_struct *g;
d01aa263 737 int fract;
f2539003
PM
738 LIST_HEAD(holdouts);
739 unsigned long j;
740 unsigned long lastinfo;
741 unsigned long lastreport;
742 bool reported = false;
743 int rtsi;
744 struct task_struct *t;
d01aa263 745
af051ca4 746 set_tasks_gp_state(rtp, RTGS_PRE_WAIT_GP);
7460ade1 747 rtp->pregp_func(&holdouts);
d01aa263
PM
748
749 /*
750 * There were callbacks, so we need to wait for an RCU-tasks
751 * grace period. Start off by scanning the task list for tasks
752 * that are not already voluntarily blocked. Mark these tasks
753 * and make a list of them in holdouts.
754 */
af051ca4 755 set_tasks_gp_state(rtp, RTGS_SCAN_TASKLIST);
1a4a8153
PM
756 if (rtp->pertask_func) {
757 rcu_read_lock();
758 for_each_process_thread(g, t)
759 rtp->pertask_func(t, &holdouts);
760 rcu_read_unlock();
761 }
d01aa263 762
af051ca4 763 set_tasks_gp_state(rtp, RTGS_POST_SCAN_TASKLIST);
9796e1ae 764 rtp->postscan_func(&holdouts);
d01aa263
PM
765
766 /*
767 * Each pass through the following loop scans the list of holdout
768 * tasks, removing any that are no longer holdouts. When the list
769 * is empty, we are done.
770 */
771 lastreport = jiffies;
f2539003
PM
772 lastinfo = lastreport;
773 rtsi = READ_ONCE(rcu_task_stall_info);
d01aa263 774
2393a613
PM
775 // Start off with initial wait and slowly back off to 1 HZ wait.
776 fract = rtp->init_fract;
d01aa263 777
77dc1741 778 while (!list_empty(&holdouts)) {
777570d9 779 ktime_t exp;
d01aa263
PM
780 bool firstreport;
781 bool needreport;
782 int rtst;
783
f2539003 784 // Slowly back off waiting for holdouts
af051ca4 785 set_tasks_gp_state(rtp, RTGS_WAIT_SCAN_HOLDOUTS);
bddf7122
PM
786 if (!IS_ENABLED(CONFIG_PREEMPT_RT)) {
787 schedule_timeout_idle(fract);
788 } else {
789 exp = jiffies_to_nsecs(fract);
790 __set_current_state(TASK_IDLE);
791 schedule_hrtimeout_range(&exp, jiffies_to_nsecs(HZ / 2), HRTIMER_MODE_REL_HARD);
792 }
d01aa263 793
75dc2da5
PM
794 if (fract < HZ)
795 fract++;
d01aa263
PM
796
797 rtst = READ_ONCE(rcu_task_stall_timeout);
798 needreport = rtst > 0 && time_after(jiffies, lastreport + rtst);
f2539003 799 if (needreport) {
d01aa263 800 lastreport = jiffies;
f2539003
PM
801 reported = true;
802 }
d01aa263
PM
803 firstreport = true;
804 WARN_ON(signal_pending(current));
af051ca4 805 set_tasks_gp_state(rtp, RTGS_SCAN_HOLDOUTS);
d01aa263 806 rtp->holdouts_func(&holdouts, needreport, &firstreport);
f2539003
PM
807
808 // Print pre-stall informational messages if needed.
809 j = jiffies;
810 if (rtsi > 0 && !reported && time_after(j, lastinfo + rtsi)) {
811 lastinfo = j;
812 rtsi = rtsi * rcu_task_stall_info_mult;
df83fff7 813 pr_info("%s: %s grace period number %lu (since boot) is %lu jiffies old.\n",
f2539003
PM
814 __func__, rtp->kname, rtp->tasks_gp_seq, j - rtp->gp_start);
815 }
d01aa263
PM
816 }
817
af051ca4
PM
818 set_tasks_gp_state(rtp, RTGS_POST_GP);
819 rtp->postgp_func(rtp);
d01aa263
PM
820}
821
25246fc8
PM
822#endif /* #if defined(CONFIG_TASKS_RCU) || defined(CONFIG_TASKS_TRACE_RCU) */
823
824#ifdef CONFIG_TASKS_RCU
825
5873b8a9
PM
826////////////////////////////////////////////////////////////////////////
827//
828// Simple variant of RCU whose quiescent states are voluntary context
8af9e2c7 829// switch, cond_resched_tasks_rcu_qs(), user-space execution, and idle.
5873b8a9
PM
830// As such, grace periods can take one good long time. There are no
831// read-side primitives similar to rcu_read_lock() and rcu_read_unlock()
832// because this implementation is intended to get the system into a safe
833// state for some of the manipulations involved in tracing and the like.
834// Finally, this implementation does not support high call_rcu_tasks()
835// rates from multiple CPUs. If this is required, per-CPU callback lists
836// will be needed.
06a3ec92
PM
837//
838// The implementation uses rcu_tasks_wait_gp(), which relies on function
839// pointers in the rcu_tasks structure. The rcu_spawn_tasks_kthread()
840// function sets these function pointers up so that rcu_tasks_wait_gp()
841// invokes these functions in this order:
842//
843// rcu_tasks_pregp_step():
844// Invokes synchronize_rcu() in order to wait for all in-flight
845// t->on_rq and t->nvcsw transitions to complete. This works because
846// all such transitions are carried out with interrupts disabled.
847// rcu_tasks_pertask(), invoked on every non-idle task:
848// For every runnable non-idle task other than the current one, use
849// get_task_struct() to pin down that task, snapshot that task's
850// number of voluntary context switches, and add that task to the
851// holdout list.
852// rcu_tasks_postscan():
1612160b
PM
853// Gather per-CPU lists of tasks in do_exit() to ensure that all
854// tasks that were in the process of exiting (and which thus might
855// not know to synchronize with this RCU Tasks grace period) have
856// completed exiting. The synchronize_rcu() in rcu_tasks_postgp()
857// will take care of any tasks stuck in the non-preemptible region
858// of do_exit() following its call to exit_tasks_rcu_stop().
06a3ec92
PM
859// check_all_holdout_tasks(), repeatedly until holdout list is empty:
860// Scans the holdout list, attempting to identify a quiescent state
861// for each task on the list. If there is a quiescent state, the
862// corresponding task is removed from the holdout list.
863// rcu_tasks_postgp():
864// Invokes synchronize_rcu() in order to ensure that all prior
865// t->on_rq and t->nvcsw transitions are seen by all CPUs and tasks
866// to have happened before the end of this RCU Tasks grace period.
867// Again, this works because all such transitions are carried out
868// with interrupts disabled.
869//
870// For each exiting task, the exit_tasks_rcu_start() and
1612160b
PM
871// exit_tasks_rcu_finish() functions add and remove, respectively, the
872// current task to a per-CPU list of tasks that rcu_tasks_postscan() must
873// wait on. This is necessary because rcu_tasks_postscan() must wait on
874// tasks that have already been removed from the global list of tasks.
06a3ec92 875//
381a4f3b
PM
876// Pre-grace-period update-side code is ordered before the grace
877// via the raw_spin_lock.*rcu_node(). Pre-grace-period read-side code
878// is ordered before the grace period via synchronize_rcu() call in
879// rcu_tasks_pregp_step() and by the scheduler's locks and interrupt
06a3ec92 880// disabling.
5873b8a9 881
e4fe5dd6 882/* Pre-grace-period preparation. */
7460ade1 883static void rcu_tasks_pregp_step(struct list_head *hop)
e4fe5dd6
PM
884{
885 /*
886 * Wait for all pre-existing t->on_rq and t->nvcsw transitions
887 * to complete. Invoking synchronize_rcu() suffices because all
888 * these transitions occur with interrupts disabled. Without this
889 * synchronize_rcu(), a read-side critical section that started
890 * before the grace period might be incorrectly seen as having
891 * started after the grace period.
892 *
893 * This synchronize_rcu() also dispenses with the need for a
894 * memory barrier on the first store to t->rcu_tasks_holdout,
895 * as it forces the store to happen after the beginning of the
896 * grace period.
897 */
898 synchronize_rcu();
899}
900
9715ed50
FW
901/* Check for quiescent states since the pregp's synchronize_rcu() */
902static bool rcu_tasks_is_holdout(struct task_struct *t)
903{
904 int cpu;
905
906 /* Has the task been seen voluntarily sleeping? */
907 if (!READ_ONCE(t->on_rq))
908 return false;
909
910 /*
911 * Idle tasks (or idle injection) within the idle loop are RCU-tasks
912 * quiescent states. But CPU boot code performed by the idle task
913 * isn't a quiescent state.
914 */
915 if (is_idle_task(t))
916 return false;
917
918 cpu = task_cpu(t);
919
920 /* Idle tasks on offline CPUs are RCU-tasks quiescent states. */
921 if (t == idle_task(cpu) && !rcu_cpu_online(cpu))
922 return false;
923
924 return true;
925}
926
e4fe5dd6
PM
927/* Per-task initial processing. */
928static void rcu_tasks_pertask(struct task_struct *t, struct list_head *hop)
929{
9715ed50 930 if (t != current && rcu_tasks_is_holdout(t)) {
e4fe5dd6
PM
931 get_task_struct(t);
932 t->rcu_tasks_nvcsw = READ_ONCE(t->nvcsw);
933 WRITE_ONCE(t->rcu_tasks_holdout, true);
934 list_add(&t->rcu_tasks_holdout_list, hop);
935 }
936}
937
1612160b
PM
938void call_rcu_tasks(struct rcu_head *rhp, rcu_callback_t func);
939DEFINE_RCU_TASKS(rcu_tasks, rcu_tasks_wait_gp, call_rcu_tasks, "RCU Tasks");
940
e4fe5dd6 941/* Processing between scanning taskslist and draining the holdout list. */
04a3c5aa 942static void rcu_tasks_postscan(struct list_head *hop)
e4fe5dd6 943{
1612160b 944 int cpu;
a4533cc0
NU
945 int rtsi = READ_ONCE(rcu_task_stall_info);
946
947 if (!IS_ENABLED(CONFIG_TINY_RCU)) {
948 tasks_rcu_exit_srcu_stall_timer.expires = jiffies + rtsi;
949 add_timer(&tasks_rcu_exit_srcu_stall_timer);
950 }
951
e4fe5dd6 952 /*
e4e1e808
FW
953 * Exiting tasks may escape the tasklist scan. Those are vulnerable
954 * until their final schedule() with TASK_DEAD state. To cope with
955 * this, divide the fragile exit path part in two intersecting
956 * read side critical sections:
957 *
1612160b
PM
958 * 1) A task_struct list addition before calling exit_notify(),
959 * which may remove the task from the tasklist, with the
960 * removal after the final preempt_disable() call in do_exit().
e4e1e808
FW
961 *
962 * 2) An _RCU_ read side starting with the final preempt_disable()
963 * call in do_exit() and ending with the final call to schedule()
964 * with TASK_DEAD state.
965 *
966 * This handles the part 1). And postgp will handle part 2) with a
967 * call to synchronize_rcu().
e4fe5dd6 968 */
1612160b
PM
969
970 for_each_possible_cpu(cpu) {
0bb11a37 971 unsigned long j = jiffies + 1;
1612160b
PM
972 struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rcu_tasks.rtpcpu, cpu);
973 struct task_struct *t;
0bb11a37
PM
974 struct task_struct *t1;
975 struct list_head tmp;
1612160b
PM
976
977 raw_spin_lock_irq_rcu_node(rtpcp);
0bb11a37 978 list_for_each_entry_safe(t, t1, &rtpcp->rtp_exit_list, rcu_tasks_exit_list) {
1612160b
PM
979 if (list_empty(&t->rcu_tasks_holdout_list))
980 rcu_tasks_pertask(t, hop);
0bb11a37
PM
981
982 // RT kernels need frequent pauses, otherwise
983 // pause at least once per pair of jiffies.
984 if (!IS_ENABLED(CONFIG_PREEMPT_RT) && time_before(jiffies, j))
985 continue;
986
987 // Keep our place in the list while pausing.
988 // Nothing else traverses this list, so adding a
989 // bare list_head is OK.
990 list_add(&tmp, &t->rcu_tasks_exit_list);
991 raw_spin_unlock_irq_rcu_node(rtpcp);
992 cond_resched(); // For CONFIG_PREEMPT=n kernels
993 raw_spin_lock_irq_rcu_node(rtpcp);
994 t1 = list_entry(tmp.next, struct task_struct, rcu_tasks_exit_list);
995 list_del(&tmp);
996 j = jiffies + 1;
997 }
1612160b
PM
998 raw_spin_unlock_irq_rcu_node(rtpcp);
999 }
a4533cc0
NU
1000
1001 if (!IS_ENABLED(CONFIG_TINY_RCU))
1002 del_timer_sync(&tasks_rcu_exit_srcu_stall_timer);
e4fe5dd6
PM
1003}
1004
5873b8a9
PM
1005/* See if tasks are still holding out, complain if so. */
1006static void check_holdout_task(struct task_struct *t,
1007 bool needreport, bool *firstreport)
1008{
1009 int cpu;
1010
1011 if (!READ_ONCE(t->rcu_tasks_holdout) ||
1012 t->rcu_tasks_nvcsw != READ_ONCE(t->nvcsw) ||
9715ed50 1013 !rcu_tasks_is_holdout(t) ||
5873b8a9 1014 (IS_ENABLED(CONFIG_NO_HZ_FULL) &&
18966f7b 1015 !is_idle_task(t) && READ_ONCE(t->rcu_tasks_idle_cpu) >= 0)) {
5873b8a9
PM
1016 WRITE_ONCE(t->rcu_tasks_holdout, false);
1017 list_del_init(&t->rcu_tasks_holdout_list);
1018 put_task_struct(t);
1019 return;
1020 }
1021 rcu_request_urgent_qs_task(t);
1022 if (!needreport)
1023 return;
1024 if (*firstreport) {
1025 pr_err("INFO: rcu_tasks detected stalls on tasks:\n");
1026 *firstreport = false;
1027 }
1028 cpu = task_cpu(t);
1029 pr_alert("%p: %c%c nvcsw: %lu/%lu holdout: %d idle_cpu: %d/%d\n",
1030 t, ".I"[is_idle_task(t)],
1031 "N."[cpu < 0 || !tick_nohz_full_cpu(cpu)],
1032 t->rcu_tasks_nvcsw, t->nvcsw, t->rcu_tasks_holdout,
18966f7b 1033 data_race(t->rcu_tasks_idle_cpu), cpu);
5873b8a9
PM
1034 sched_show_task(t);
1035}
1036
e4fe5dd6
PM
1037/* Scan the holdout lists for tasks no longer holding out. */
1038static void check_all_holdout_tasks(struct list_head *hop,
1039 bool needreport, bool *firstreport)
1040{
1041 struct task_struct *t, *t1;
1042
1043 list_for_each_entry_safe(t, t1, hop, rcu_tasks_holdout_list) {
1044 check_holdout_task(t, needreport, firstreport);
1045 cond_resched();
1046 }
1047}
1048
1049/* Finish off the Tasks-RCU grace period. */
af051ca4 1050static void rcu_tasks_postgp(struct rcu_tasks *rtp)
e4fe5dd6
PM
1051{
1052 /*
1053 * Because ->on_rq and ->nvcsw are not guaranteed to have a full
1054 * memory barriers prior to them in the schedule() path, memory
1055 * reordering on other CPUs could cause their RCU-tasks read-side
1056 * critical sections to extend past the end of the grace period.
1057 * However, because these ->nvcsw updates are carried out with
1058 * interrupts disabled, we can use synchronize_rcu() to force the
1059 * needed ordering on all such CPUs.
1060 *
1061 * This synchronize_rcu() also confines all ->rcu_tasks_holdout
1062 * accesses to be within the grace period, avoiding the need for
1063 * memory barriers for ->rcu_tasks_holdout accesses.
1064 *
1065 * In addition, this synchronize_rcu() waits for exiting tasks
1066 * to complete their final preempt_disable() region of execution,
e4e1e808
FW
1067 * enforcing the whole region before tasklist removal until
1068 * the final schedule() with TASK_DEAD state to be an RCU TASKS
1069 * read side critical section.
e4fe5dd6
PM
1070 */
1071 synchronize_rcu();
1072}
1073
a4533cc0
NU
1074static void tasks_rcu_exit_srcu_stall(struct timer_list *unused)
1075{
1076#ifndef CONFIG_TINY_RCU
1077 int rtsi;
1078
1079 rtsi = READ_ONCE(rcu_task_stall_info);
1080 pr_info("%s: %s grace period number %lu (since boot) gp_state: %s is %lu jiffies old.\n",
1081 __func__, rcu_tasks.kname, rcu_tasks.tasks_gp_seq,
1082 tasks_gp_state_getname(&rcu_tasks), jiffies - rcu_tasks.gp_jiffies);
1083 pr_info("Please check any exiting tasks stuck between calls to exit_tasks_rcu_start() and exit_tasks_rcu_finish()\n");
1084 tasks_rcu_exit_srcu_stall_timer.expires = jiffies + rtsi;
1085 add_timer(&tasks_rcu_exit_srcu_stall_timer);
1086#endif // #ifndef CONFIG_TINY_RCU
1087}
1088
5873b8a9
PM
1089/**
1090 * call_rcu_tasks() - Queue an RCU for invocation task-based grace period
1091 * @rhp: structure to be used for queueing the RCU updates.
1092 * @func: actual callback function to be invoked after the grace period
1093 *
1094 * The callback function will be invoked some time after a full grace
1095 * period elapses, in other words after all currently executing RCU
1096 * read-side critical sections have completed. call_rcu_tasks() assumes
1097 * that the read-side critical sections end at a voluntary context
8af9e2c7 1098 * switch (not a preemption!), cond_resched_tasks_rcu_qs(), entry into idle,
5873b8a9
PM
1099 * or transition to usermode execution. As such, there are no read-side
1100 * primitives analogous to rcu_read_lock() and rcu_read_unlock() because
1101 * this primitive is intended to determine that all tasks have passed
a616aec9 1102 * through a safe state, not so much for data-structure synchronization.
5873b8a9
PM
1103 *
1104 * See the description of call_rcu() for more detailed information on
1105 * memory ordering guarantees.
1106 */
1107void call_rcu_tasks(struct rcu_head *rhp, rcu_callback_t func)
1108{
1109 call_rcu_tasks_generic(rhp, func, &rcu_tasks);
1110}
1111EXPORT_SYMBOL_GPL(call_rcu_tasks);
1112
1113/**
1114 * synchronize_rcu_tasks - wait until an rcu-tasks grace period has elapsed.
1115 *
1116 * Control will return to the caller some time after a full rcu-tasks
1117 * grace period has elapsed, in other words after all currently
1118 * executing rcu-tasks read-side critical sections have elapsed. These
1119 * read-side critical sections are delimited by calls to schedule(),
1120 * cond_resched_tasks_rcu_qs(), idle execution, userspace execution, calls
1121 * to synchronize_rcu_tasks(), and (in theory, anyway) cond_resched().
1122 *
1123 * This is a very specialized primitive, intended only for a few uses in
1124 * tracing and other situations requiring manipulation of function
1125 * preambles and profiling hooks. The synchronize_rcu_tasks() function
1126 * is not (yet) intended for heavy use from multiple CPUs.
1127 *
1128 * See the description of synchronize_rcu() for more detailed information
1129 * on memory ordering guarantees.
1130 */
1131void synchronize_rcu_tasks(void)
1132{
1133 synchronize_rcu_tasks_generic(&rcu_tasks);
1134}
1135EXPORT_SYMBOL_GPL(synchronize_rcu_tasks);
1136
1137/**
1138 * rcu_barrier_tasks - Wait for in-flight call_rcu_tasks() callbacks.
1139 *
1140 * Although the current implementation is guaranteed to wait, it is not
1141 * obligated to, for example, if there are no pending callbacks.
1142 */
1143void rcu_barrier_tasks(void)
1144{
ce9b1c66 1145 rcu_barrier_tasks_generic(&rcu_tasks);
5873b8a9
PM
1146}
1147EXPORT_SYMBOL_GPL(rcu_barrier_tasks);
1148
0325e8a1 1149static int rcu_tasks_lazy_ms = -1;
450d461a
PM
1150module_param(rcu_tasks_lazy_ms, int, 0444);
1151
5873b8a9
PM
1152static int __init rcu_spawn_tasks_kthread(void)
1153{
4fe192df 1154 rcu_tasks.gp_sleep = HZ / 10;
75dc2da5 1155 rcu_tasks.init_fract = HZ / 10;
450d461a
PM
1156 if (rcu_tasks_lazy_ms >= 0)
1157 rcu_tasks.lazy_jiffies = msecs_to_jiffies(rcu_tasks_lazy_ms);
e4fe5dd6
PM
1158 rcu_tasks.pregp_func = rcu_tasks_pregp_step;
1159 rcu_tasks.pertask_func = rcu_tasks_pertask;
1160 rcu_tasks.postscan_func = rcu_tasks_postscan;
1161 rcu_tasks.holdouts_func = check_all_holdout_tasks;
1162 rcu_tasks.postgp_func = rcu_tasks_postgp;
5873b8a9
PM
1163 rcu_spawn_tasks_kthread_generic(&rcu_tasks);
1164 return 0;
1165}
5873b8a9 1166
27c0f144
PM
1167#if !defined(CONFIG_TINY_RCU)
1168void show_rcu_tasks_classic_gp_kthread(void)
e21408ce
PM
1169{
1170 show_rcu_tasks_generic_gp_kthread(&rcu_tasks, "");
1171}
27c0f144
PM
1172EXPORT_SYMBOL_GPL(show_rcu_tasks_classic_gp_kthread);
1173#endif // !defined(CONFIG_TINY_RCU)
e21408ce 1174
271a8467
PM
1175struct task_struct *get_rcu_tasks_gp_kthread(void)
1176{
1177 return rcu_tasks.kthread_ptr;
1178}
1179EXPORT_SYMBOL_GPL(get_rcu_tasks_gp_kthread);
1180
e4e1e808 1181/*
6b70399f
PM
1182 * Protect against tasklist scan blind spot while the task is exiting and
1183 * may be removed from the tasklist. Do this by adding the task to yet
1184 * another list.
1185 *
1186 * Note that the task will remove itself from this list, so there is no
1187 * need for get_task_struct(), except in the case where rcu_tasks_pertask()
1188 * adds it to the holdout list, in which case rcu_tasks_pertask() supplies
1189 * the needed get_task_struct().
e4e1e808 1190 */
6b70399f 1191void exit_tasks_rcu_start(void)
25246fc8 1192{
6b70399f
PM
1193 unsigned long flags;
1194 struct rcu_tasks_percpu *rtpcp;
1195 struct task_struct *t = current;
1196
1197 WARN_ON_ONCE(!list_empty(&t->rcu_tasks_exit_list));
1198 preempt_disable();
1199 rtpcp = this_cpu_ptr(rcu_tasks.rtpcpu);
1200 t->rcu_tasks_exit_cpu = smp_processor_id();
1201 raw_spin_lock_irqsave_rcu_node(rtpcp, flags);
1202 if (!rtpcp->rtp_exit_list.next)
1203 INIT_LIST_HEAD(&rtpcp->rtp_exit_list);
1204 list_add(&t->rcu_tasks_exit_list, &rtpcp->rtp_exit_list);
1205 raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags);
1206 preempt_enable();
25246fc8
PM
1207}
1208
e4e1e808 1209/*
6b70399f
PM
1210 * Remove the task from the "yet another list" because do_exit() is now
1211 * non-preemptible, allowing synchronize_rcu() to wait beyond this point.
e4e1e808 1212 */
6b70399f 1213void exit_tasks_rcu_stop(void)
25246fc8 1214{
6b70399f
PM
1215 unsigned long flags;
1216 struct rcu_tasks_percpu *rtpcp;
25246fc8
PM
1217 struct task_struct *t = current;
1218
6b70399f
PM
1219 WARN_ON_ONCE(list_empty(&t->rcu_tasks_exit_list));
1220 rtpcp = per_cpu_ptr(rcu_tasks.rtpcpu, t->rcu_tasks_exit_cpu);
1221 raw_spin_lock_irqsave_rcu_node(rtpcp, flags);
1222 list_del_init(&t->rcu_tasks_exit_list);
1223 raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags);
28319d6d
FW
1224}
1225
1226/*
1227 * Contribute to protect against tasklist scan blind spot while the
1228 * task is exiting and may be removed from the tasklist. See
1229 * corresponding synchronize_srcu() for further details.
1230 */
1231void exit_tasks_rcu_finish(void)
1232{
1233 exit_tasks_rcu_stop();
1234 exit_tasks_rcu_finish_trace(current);
25246fc8
PM
1235}
1236
e21408ce 1237#else /* #ifdef CONFIG_TASKS_RCU */
25246fc8 1238void exit_tasks_rcu_start(void) { }
28319d6d 1239void exit_tasks_rcu_stop(void) { }
25246fc8 1240void exit_tasks_rcu_finish(void) { exit_tasks_rcu_finish_trace(current); }
e21408ce 1241#endif /* #else #ifdef CONFIG_TASKS_RCU */
c84aad76
PM
1242
1243#ifdef CONFIG_TASKS_RUDE_RCU
1244
1245////////////////////////////////////////////////////////////////////////
1246//
1247// "Rude" variant of Tasks RCU, inspired by Steve Rostedt's trick of
1248// passing an empty function to schedule_on_each_cpu(). This approach
e4be1f44
PM
1249// provides an asynchronous call_rcu_tasks_rude() API and batching of
1250// concurrent calls to the synchronous synchronize_rcu_tasks_rude() API.
9fc98e31
PM
1251// This invokes schedule_on_each_cpu() in order to send IPIs far and wide
1252// and induces otherwise unnecessary context switches on all online CPUs,
1253// whether idle or not.
1254//
1255// Callback handling is provided by the rcu_tasks_kthread() function.
1256//
1257// Ordering is provided by the scheduler's context-switch code.
c84aad76
PM
1258
1259// Empty function to allow workqueues to force a context switch.
1260static void rcu_tasks_be_rude(struct work_struct *work)
1261{
1262}
1263
1264// Wait for one rude RCU-tasks grace period.
1265static void rcu_tasks_rude_wait_gp(struct rcu_tasks *rtp)
1266{
238dbce3 1267 rtp->n_ipis += cpumask_weight(cpu_online_mask);
c84aad76
PM
1268 schedule_on_each_cpu(rcu_tasks_be_rude);
1269}
1270
1271void call_rcu_tasks_rude(struct rcu_head *rhp, rcu_callback_t func);
c97d12a6
PM
1272DEFINE_RCU_TASKS(rcu_tasks_rude, rcu_tasks_rude_wait_gp, call_rcu_tasks_rude,
1273 "RCU Tasks Rude");
c84aad76
PM
1274
1275/**
1276 * call_rcu_tasks_rude() - Queue a callback rude task-based grace period
1277 * @rhp: structure to be used for queueing the RCU updates.
1278 * @func: actual callback function to be invoked after the grace period
1279 *
1280 * The callback function will be invoked some time after a full grace
1281 * period elapses, in other words after all currently executing RCU
1282 * read-side critical sections have completed. call_rcu_tasks_rude()
1283 * assumes that the read-side critical sections end at context switch,
8af9e2c7 1284 * cond_resched_tasks_rcu_qs(), or transition to usermode execution (as
a6517e9c
NU
1285 * usermode execution is schedulable). As such, there are no read-side
1286 * primitives analogous to rcu_read_lock() and rcu_read_unlock() because
1287 * this primitive is intended to determine that all tasks have passed
1288 * through a safe state, not so much for data-structure synchronization.
c84aad76
PM
1289 *
1290 * See the description of call_rcu() for more detailed information on
1291 * memory ordering guarantees.
1292 */
1293void call_rcu_tasks_rude(struct rcu_head *rhp, rcu_callback_t func)
1294{
1295 call_rcu_tasks_generic(rhp, func, &rcu_tasks_rude);
1296}
1297EXPORT_SYMBOL_GPL(call_rcu_tasks_rude);
1298
1299/**
1300 * synchronize_rcu_tasks_rude - wait for a rude rcu-tasks grace period
1301 *
1302 * Control will return to the caller some time after a rude rcu-tasks
1303 * grace period has elapsed, in other words after all currently
1304 * executing rcu-tasks read-side critical sections have elapsed. These
1305 * read-side critical sections are delimited by calls to schedule(),
a6517e9c
NU
1306 * cond_resched_tasks_rcu_qs(), userspace execution (which is a schedulable
1307 * context), and (in theory, anyway) cond_resched().
c84aad76
PM
1308 *
1309 * This is a very specialized primitive, intended only for a few uses in
1310 * tracing and other situations requiring manipulation of function preambles
1311 * and profiling hooks. The synchronize_rcu_tasks_rude() function is not
1312 * (yet) intended for heavy use from multiple CPUs.
1313 *
1314 * See the description of synchronize_rcu() for more detailed information
1315 * on memory ordering guarantees.
1316 */
1317void synchronize_rcu_tasks_rude(void)
1318{
1319 synchronize_rcu_tasks_generic(&rcu_tasks_rude);
1320}
1321EXPORT_SYMBOL_GPL(synchronize_rcu_tasks_rude);
1322
1323/**
1324 * rcu_barrier_tasks_rude - Wait for in-flight call_rcu_tasks_rude() callbacks.
1325 *
1326 * Although the current implementation is guaranteed to wait, it is not
1327 * obligated to, for example, if there are no pending callbacks.
1328 */
1329void rcu_barrier_tasks_rude(void)
1330{
ce9b1c66 1331 rcu_barrier_tasks_generic(&rcu_tasks_rude);
c84aad76
PM
1332}
1333EXPORT_SYMBOL_GPL(rcu_barrier_tasks_rude);
1334
450d461a
PM
1335int rcu_tasks_rude_lazy_ms = -1;
1336module_param(rcu_tasks_rude_lazy_ms, int, 0444);
1337
c84aad76
PM
1338static int __init rcu_spawn_tasks_rude_kthread(void)
1339{
4fe192df 1340 rcu_tasks_rude.gp_sleep = HZ / 10;
450d461a
PM
1341 if (rcu_tasks_rude_lazy_ms >= 0)
1342 rcu_tasks_rude.lazy_jiffies = msecs_to_jiffies(rcu_tasks_rude_lazy_ms);
c84aad76
PM
1343 rcu_spawn_tasks_kthread_generic(&rcu_tasks_rude);
1344 return 0;
1345}
c84aad76 1346
27c0f144
PM
1347#if !defined(CONFIG_TINY_RCU)
1348void show_rcu_tasks_rude_gp_kthread(void)
e21408ce
PM
1349{
1350 show_rcu_tasks_generic_gp_kthread(&rcu_tasks_rude, "");
1351}
27c0f144
PM
1352EXPORT_SYMBOL_GPL(show_rcu_tasks_rude_gp_kthread);
1353#endif // !defined(CONFIG_TINY_RCU)
a15ec57c
PM
1354
1355struct task_struct *get_rcu_tasks_rude_gp_kthread(void)
1356{
1357 return rcu_tasks_rude.kthread_ptr;
1358}
1359EXPORT_SYMBOL_GPL(get_rcu_tasks_rude_gp_kthread);
1360
27c0f144 1361#endif /* #ifdef CONFIG_TASKS_RUDE_RCU */
d5f177d3
PM
1362
1363////////////////////////////////////////////////////////////////////////
1364//
1365// Tracing variant of Tasks RCU. This variant is designed to be used
1366// to protect tracing hooks, including those of BPF. This variant
1367// therefore:
1368//
1369// 1. Has explicit read-side markers to allow finite grace periods
1370// in the face of in-kernel loops for PREEMPT=n builds.
1371//
1372// 2. Protects code in the idle loop, exception entry/exit, and
1373// CPU-hotplug code paths, similar to the capabilities of SRCU.
1374//
c4f113ac 1375// 3. Avoids expensive read-side instructions, having overhead similar
d5f177d3
PM
1376// to that of Preemptible RCU.
1377//
eea3423b
PM
1378// There are of course downsides. For example, the grace-period code
1379// can send IPIs to CPUs, even when those CPUs are in the idle loop or
1380// in nohz_full userspace. If needed, these downsides can be at least
1381// partially remedied.
d5f177d3
PM
1382//
1383// Perhaps most important, this variant of RCU does not affect the vanilla
1384// flavors, rcu_preempt and rcu_sched. The fact that RCU Tasks Trace
1385// readers can operate from idle, offline, and exception entry/exit in no
1386// way allows rcu_preempt and rcu_sched readers to also do so.
a434dd10
PM
1387//
1388// The implementation uses rcu_tasks_wait_gp(), which relies on function
1389// pointers in the rcu_tasks structure. The rcu_spawn_tasks_trace_kthread()
1390// function sets these function pointers up so that rcu_tasks_wait_gp()
1391// invokes these functions in this order:
1392//
1393// rcu_tasks_trace_pregp_step():
eea3423b
PM
1394// Disables CPU hotplug, adds all currently executing tasks to the
1395// holdout list, then checks the state of all tasks that blocked
1396// or were preempted within their current RCU Tasks Trace read-side
1397// critical section, adding them to the holdout list if appropriate.
1398// Finally, this function re-enables CPU hotplug.
1399// The ->pertask_func() pointer is NULL, so there is no per-task processing.
a434dd10 1400// rcu_tasks_trace_postscan():
eea3423b
PM
1401// Invokes synchronize_rcu() to wait for late-stage exiting tasks
1402// to finish exiting.
a434dd10
PM
1403// check_all_holdout_tasks_trace(), repeatedly until holdout list is empty:
1404// Scans the holdout list, attempting to identify a quiescent state
1405// for each task on the list. If there is a quiescent state, the
eea3423b
PM
1406// corresponding task is removed from the holdout list. Once this
1407// list is empty, the grace period has completed.
a434dd10 1408// rcu_tasks_trace_postgp():
eea3423b 1409// Provides the needed full memory barrier and does debug checks.
a434dd10
PM
1410//
1411// The exit_tasks_rcu_finish_trace() synchronizes with exiting tasks.
1412//
eea3423b
PM
1413// Pre-grace-period update-side code is ordered before the grace period
1414// via the ->cbs_lock and barriers in rcu_tasks_kthread(). Pre-grace-period
1415// read-side code is ordered before the grace period by atomic operations
1416// on .b.need_qs flag of each task involved in this process, or by scheduler
1417// context-switch ordering (for locked-down non-running readers).
d5f177d3
PM
1418
1419// The lockdep state must be outside of #ifdef to be useful.
1420#ifdef CONFIG_DEBUG_LOCK_ALLOC
1421static struct lock_class_key rcu_lock_trace_key;
1422struct lockdep_map rcu_trace_lock_map =
1423 STATIC_LOCKDEP_MAP_INIT("rcu_read_lock_trace", &rcu_lock_trace_key);
1424EXPORT_SYMBOL_GPL(rcu_trace_lock_map);
1425#endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
1426
1427#ifdef CONFIG_TASKS_TRACE_RCU
1428
d5f177d3
PM
1429// Record outstanding IPIs to each CPU. No point in sending two...
1430static DEFINE_PER_CPU(bool, trc_ipi_to_cpu);
1431
40471509
PM
1432// The number of detections of task quiescent state relying on
1433// heavyweight readers executing explicit memory barriers.
6731da9e
PM
1434static unsigned long n_heavy_reader_attempts;
1435static unsigned long n_heavy_reader_updates;
1436static unsigned long n_heavy_reader_ofl_updates;
ffcc21a3 1437static unsigned long n_trc_holdouts;
40471509 1438
b0afa0f0
PM
1439void call_rcu_tasks_trace(struct rcu_head *rhp, rcu_callback_t func);
1440DEFINE_RCU_TASKS(rcu_tasks_trace, rcu_tasks_wait_gp, call_rcu_tasks_trace,
1441 "RCU Tasks Trace");
1442
3847b645
PM
1443/* Load from ->trc_reader_special.b.need_qs with proper ordering. */
1444static u8 rcu_ld_need_qs(struct task_struct *t)
1445{
1446 smp_mb(); // Enforce full grace-period ordering.
1447 return smp_load_acquire(&t->trc_reader_special.b.need_qs);
1448}
1449
1450/* Store to ->trc_reader_special.b.need_qs with proper ordering. */
1451static void rcu_st_need_qs(struct task_struct *t, u8 v)
1452{
1453 smp_store_release(&t->trc_reader_special.b.need_qs, v);
1454 smp_mb(); // Enforce full grace-period ordering.
1455}
1456
1457/*
1458 * Do a cmpxchg() on ->trc_reader_special.b.need_qs, allowing for
1459 * the four-byte operand-size restriction of some platforms.
1460 * Returns the old value, which is often ignored.
1461 */
1462u8 rcu_trc_cmpxchg_need_qs(struct task_struct *t, u8 old, u8 new)
1463{
1464 union rcu_special ret;
1465 union rcu_special trs_old = READ_ONCE(t->trc_reader_special);
1466 union rcu_special trs_new = trs_old;
1467
1468 if (trs_old.b.need_qs != old)
1469 return trs_old.b.need_qs;
1470 trs_new.b.need_qs = new;
1471 ret.s = cmpxchg(&t->trc_reader_special.s, trs_old.s, trs_new.s);
1472 return ret.b.need_qs;
1473}
1474EXPORT_SYMBOL_GPL(rcu_trc_cmpxchg_need_qs);
1475
eea3423b
PM
1476/*
1477 * If we are the last reader, signal the grace-period kthread.
1478 * Also remove from the per-CPU list of blocked tasks.
1479 */
a5c071cc 1480void rcu_read_unlock_trace_special(struct task_struct *t)
d5f177d3 1481{
0bcb3868
PM
1482 unsigned long flags;
1483 struct rcu_tasks_percpu *rtpcp;
1484 union rcu_special trs;
1485
1486 // Open-coded full-word version of rcu_ld_need_qs().
1487 smp_mb(); // Enforce full grace-period ordering.
1488 trs = smp_load_acquire(&t->trc_reader_special);
276c4104 1489
3847b645 1490 if (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB) && t->trc_reader_special.b.need_mb)
276c4104
PM
1491 smp_mb(); // Pairs with update-side barriers.
1492 // Update .need_qs before ->trc_reader_nesting for irq/NMI handlers.
0bcb3868 1493 if (trs.b.need_qs == (TRC_NEED_QS_CHECKED | TRC_NEED_QS)) {
3847b645
PM
1494 u8 result = rcu_trc_cmpxchg_need_qs(t, TRC_NEED_QS_CHECKED | TRC_NEED_QS,
1495 TRC_NEED_QS_CHECKED);
1496
0bcb3868
PM
1497 WARN_ONCE(result != trs.b.need_qs, "%s: result = %d", __func__, result);
1498 }
1499 if (trs.b.blocked) {
1500 rtpcp = per_cpu_ptr(rcu_tasks_trace.rtpcpu, t->trc_blkd_cpu);
1501 raw_spin_lock_irqsave_rcu_node(rtpcp, flags);
1502 list_del_init(&t->trc_blkd_node);
1503 WRITE_ONCE(t->trc_reader_special.b.blocked, false);
1504 raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags);
3847b645 1505 }
a5c071cc 1506 WRITE_ONCE(t->trc_reader_nesting, 0);
d5f177d3
PM
1507}
1508EXPORT_SYMBOL_GPL(rcu_read_unlock_trace_special);
1509
0356d4e6
PM
1510/* Add a newly blocked reader task to its CPU's list. */
1511void rcu_tasks_trace_qs_blkd(struct task_struct *t)
1512{
1513 unsigned long flags;
1514 struct rcu_tasks_percpu *rtpcp;
1515
1516 local_irq_save(flags);
1517 rtpcp = this_cpu_ptr(rcu_tasks_trace.rtpcpu);
1518 raw_spin_lock_rcu_node(rtpcp); // irqs already disabled
1519 t->trc_blkd_cpu = smp_processor_id();
1520 if (!rtpcp->rtp_blkd_tasks.next)
1521 INIT_LIST_HEAD(&rtpcp->rtp_blkd_tasks);
1522 list_add(&t->trc_blkd_node, &rtpcp->rtp_blkd_tasks);
0bcb3868 1523 WRITE_ONCE(t->trc_reader_special.b.blocked, true);
0356d4e6
PM
1524 raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags);
1525}
1526EXPORT_SYMBOL_GPL(rcu_tasks_trace_qs_blkd);
1527
d5f177d3
PM
1528/* Add a task to the holdout list, if it is not already on the list. */
1529static void trc_add_holdout(struct task_struct *t, struct list_head *bhp)
1530{
1531 if (list_empty(&t->trc_holdout_list)) {
1532 get_task_struct(t);
1533 list_add(&t->trc_holdout_list, bhp);
ffcc21a3 1534 n_trc_holdouts++;
d5f177d3
PM
1535 }
1536}
1537
1538/* Remove a task from the holdout list, if it is in fact present. */
1539static void trc_del_holdout(struct task_struct *t)
1540{
1541 if (!list_empty(&t->trc_holdout_list)) {
1542 list_del_init(&t->trc_holdout_list);
1543 put_task_struct(t);
ffcc21a3 1544 n_trc_holdouts--;
d5f177d3
PM
1545 }
1546}
1547
1548/* IPI handler to check task state. */
1549static void trc_read_check_handler(void *t_in)
1550{
9ff86b4c 1551 int nesting;
d5f177d3
PM
1552 struct task_struct *t = current;
1553 struct task_struct *texp = t_in;
1554
1555 // If the task is no longer running on this CPU, leave.
3847b645 1556 if (unlikely(texp != t))
d5f177d3 1557 goto reset_ipi; // Already on holdout list, so will check later.
d5f177d3
PM
1558
1559 // If the task is not in a read-side critical section, and
1560 // if this is the last reader, awaken the grace-period kthread.
9ff86b4c
PM
1561 nesting = READ_ONCE(t->trc_reader_nesting);
1562 if (likely(!nesting)) {
3847b645 1563 rcu_trc_cmpxchg_need_qs(t, 0, TRC_NEED_QS_CHECKED);
d5f177d3
PM
1564 goto reset_ipi;
1565 }
ba3a86e4 1566 // If we are racing with an rcu_read_unlock_trace(), try again later.
9ff86b4c 1567 if (unlikely(nesting < 0))
ba3a86e4 1568 goto reset_ipi;
d5f177d3 1569
eea3423b
PM
1570 // Get here if the task is in a read-side critical section.
1571 // Set its state so that it will update state for the grace-period
1572 // kthread upon exit from that critical section.
55061126 1573 rcu_trc_cmpxchg_need_qs(t, 0, TRC_NEED_QS | TRC_NEED_QS_CHECKED);
d5f177d3
PM
1574
1575reset_ipi:
1576 // Allow future IPIs to be sent on CPU and for task.
1577 // Also order this IPI handler against any later manipulations of
1578 // the intended task.
8211e922 1579 smp_store_release(per_cpu_ptr(&trc_ipi_to_cpu, smp_processor_id()), false); // ^^^
d5f177d3
PM
1580 smp_store_release(&texp->trc_ipi_to_cpu, -1); // ^^^
1581}
1582
1583/* Callback function for scheduler to check locked-down task. */
3847b645 1584static int trc_inspect_reader(struct task_struct *t, void *bhp_in)
d5f177d3 1585{
3847b645 1586 struct list_head *bhp = bhp_in;
7d0c9c50 1587 int cpu = task_cpu(t);
18f08e75 1588 int nesting;
7e3b70e0 1589 bool ofl = cpu_is_offline(cpu);
7d0c9c50 1590
897ba84d 1591 if (task_curr(t) && !ofl) {
7d0c9c50 1592 // If no chance of heavyweight readers, do it the hard way.
897ba84d 1593 if (!IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB))
9b3c4ab3 1594 return -EINVAL;
7d0c9c50
PM
1595
1596 // If heavyweight readers are enabled on the remote task,
1597 // we can inspect its state despite its currently running.
1598 // However, we cannot safely change its state.
40471509 1599 n_heavy_reader_attempts++;
897ba84d
PM
1600 // Check for "running" idle tasks on offline CPUs.
1601 if (!rcu_dynticks_zero_in_eqs(cpu, &t->trc_reader_nesting))
9b3c4ab3 1602 return -EINVAL; // No quiescent state, do it the hard way.
40471509 1603 n_heavy_reader_updates++;
18f08e75 1604 nesting = 0;
7d0c9c50 1605 } else {
bdb0cca0 1606 // The task is not running, so C-language access is safe.
18f08e75 1607 nesting = t->trc_reader_nesting;
a80712b9 1608 WARN_ON_ONCE(ofl && task_curr(t) && (t != idle_task(task_cpu(t))));
897ba84d
PM
1609 if (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB) && ofl)
1610 n_heavy_reader_ofl_updates++;
7d0c9c50 1611 }
d5f177d3 1612
18f08e75
PM
1613 // If not exiting a read-side critical section, mark as checked
1614 // so that the grace-period kthread will remove it from the
1615 // holdout list.
0968e892
PM
1616 if (!nesting) {
1617 rcu_trc_cmpxchg_need_qs(t, 0, TRC_NEED_QS_CHECKED);
1618 return 0; // In QS, so done.
3847b645 1619 }
0968e892 1620 if (nesting < 0)
eea3423b 1621 return -EINVAL; // Reader transitioning, try again later.
7d0c9c50
PM
1622
1623 // The task is in a read-side critical section, so set up its
0968e892
PM
1624 // state so that it will update state upon exit from that critical
1625 // section.
55061126 1626 if (!rcu_trc_cmpxchg_need_qs(t, 0, TRC_NEED_QS | TRC_NEED_QS_CHECKED))
3847b645 1627 trc_add_holdout(t, bhp);
9b3c4ab3 1628 return 0;
d5f177d3
PM
1629}
1630
1631/* Attempt to extract the state for the specified task. */
1632static void trc_wait_for_one_reader(struct task_struct *t,
1633 struct list_head *bhp)
1634{
1635 int cpu;
1636
1637 // If a previous IPI is still in flight, let it complete.
1638 if (smp_load_acquire(&t->trc_ipi_to_cpu) != -1) // Order IPI
1639 return;
1640
1641 // The current task had better be in a quiescent state.
1642 if (t == current) {
3847b645 1643 rcu_trc_cmpxchg_need_qs(t, 0, TRC_NEED_QS_CHECKED);
bdb0cca0 1644 WARN_ON_ONCE(READ_ONCE(t->trc_reader_nesting));
d5f177d3
PM
1645 return;
1646 }
1647
1648 // Attempt to nail down the task for inspection.
1649 get_task_struct(t);
3847b645 1650 if (!task_call_func(t, trc_inspect_reader, bhp)) {
d5f177d3
PM
1651 put_task_struct(t);
1652 return;
1653 }
1654 put_task_struct(t);
1655
45f4b4a2
PM
1656 // If this task is not yet on the holdout list, then we are in
1657 // an RCU read-side critical section. Otherwise, the invocation of
d0a85858 1658 // trc_add_holdout() that added it to the list did the necessary
45f4b4a2
PM
1659 // get_task_struct(). Either way, the task cannot be freed out
1660 // from under this code.
1661
d5f177d3
PM
1662 // If currently running, send an IPI, either way, add to list.
1663 trc_add_holdout(t, bhp);
574de876
PM
1664 if (task_curr(t) &&
1665 time_after(jiffies + 1, rcu_tasks_trace.gp_start + rcu_task_ipi_delay)) {
d5f177d3
PM
1666 // The task is currently running, so try IPIing it.
1667 cpu = task_cpu(t);
1668
1669 // If there is already an IPI outstanding, let it happen.
1670 if (per_cpu(trc_ipi_to_cpu, cpu) || t->trc_ipi_to_cpu >= 0)
1671 return;
1672
d5f177d3
PM
1673 per_cpu(trc_ipi_to_cpu, cpu) = true;
1674 t->trc_ipi_to_cpu = cpu;
238dbce3 1675 rcu_tasks_trace.n_ipis++;
96017bf9 1676 if (smp_call_function_single(cpu, trc_read_check_handler, t, 0)) {
d5f177d3
PM
1677 // Just in case there is some other reason for
1678 // failure than the target CPU being offline.
46aa886c
NU
1679 WARN_ONCE(1, "%s(): smp_call_function_single() failed for CPU: %d\n",
1680 __func__, cpu);
7e0669c3 1681 rcu_tasks_trace.n_ipis_fails++;
d5f177d3 1682 per_cpu(trc_ipi_to_cpu, cpu) = false;
46aa886c 1683 t->trc_ipi_to_cpu = -1;
d5f177d3
PM
1684 }
1685 }
1686}
1687
7460ade1
PM
1688/*
1689 * Initialize for first-round processing for the specified task.
1690 * Return false if task is NULL or already taken care of, true otherwise.
1691 */
1692static bool rcu_tasks_trace_pertask_prep(struct task_struct *t, bool notself)
d5f177d3 1693{
1b04fa99 1694 // During early boot when there is only the one boot CPU, there
19415004
PM
1695 // is no idle task for the other CPUs. Also, the grace-period
1696 // kthread is always in a quiescent state. In addition, just return
1697 // if this task is already on the list.
7460ade1
PM
1698 if (unlikely(t == NULL) || (t == current && notself) || !list_empty(&t->trc_holdout_list))
1699 return false;
1b04fa99 1700
3847b645 1701 rcu_st_need_qs(t, 0);
d5f177d3 1702 t->trc_ipi_to_cpu = -1;
7460ade1
PM
1703 return true;
1704}
1705
1706/* Do first-round processing for the specified task. */
1707static void rcu_tasks_trace_pertask(struct task_struct *t, struct list_head *hop)
1708{
1709 if (rcu_tasks_trace_pertask_prep(t, true))
1710 trc_wait_for_one_reader(t, hop);
1711}
1712
1fa98e2e 1713/* Initialize for a new RCU-tasks-trace grace period. */
7460ade1 1714static void rcu_tasks_trace_pregp_step(struct list_head *hop)
1fa98e2e 1715{
dc7d54b4 1716 LIST_HEAD(blkd_tasks);
1fa98e2e 1717 int cpu;
dc7d54b4
PM
1718 unsigned long flags;
1719 struct rcu_tasks_percpu *rtpcp;
1720 struct task_struct *t;
1fa98e2e
PM
1721
1722 // There shouldn't be any old IPIs, but...
1723 for_each_possible_cpu(cpu)
1724 WARN_ON_ONCE(per_cpu(trc_ipi_to_cpu, cpu));
1725
eea3423b
PM
1726 // Disable CPU hotplug across the CPU scan for the benefit of
1727 // any IPIs that might be needed. This also waits for all readers
1728 // in CPU-hotplug code paths.
1fa98e2e 1729 cpus_read_lock();
7460ade1 1730
eea3423b 1731 // These rcu_tasks_trace_pertask_prep() calls are serialized to
7460ade1 1732 // allow safe access to the hop list.
e386b672
PM
1733 for_each_online_cpu(cpu) {
1734 rcu_read_lock();
1735 t = cpu_curr_snapshot(cpu);
1736 if (rcu_tasks_trace_pertask_prep(t, true))
1737 trc_add_holdout(t, hop);
1738 rcu_read_unlock();
d6ad6063 1739 cond_resched_tasks_rcu_qs();
e386b672 1740 }
dc7d54b4
PM
1741
1742 // Only after all running tasks have been accounted for is it
1743 // safe to take care of the tasks that have blocked within their
1744 // current RCU tasks trace read-side critical section.
1745 for_each_possible_cpu(cpu) {
1746 rtpcp = per_cpu_ptr(rcu_tasks_trace.rtpcpu, cpu);
1747 raw_spin_lock_irqsave_rcu_node(rtpcp, flags);
1748 list_splice_init(&rtpcp->rtp_blkd_tasks, &blkd_tasks);
1749 while (!list_empty(&blkd_tasks)) {
1750 rcu_read_lock();
1751 t = list_first_entry(&blkd_tasks, struct task_struct, trc_blkd_node);
1752 list_del_init(&t->trc_blkd_node);
1753 list_add(&t->trc_blkd_node, &rtpcp->rtp_blkd_tasks);
1754 raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags);
1755 rcu_tasks_trace_pertask(t, hop);
1756 rcu_read_unlock();
1757 raw_spin_lock_irqsave_rcu_node(rtpcp, flags);
1758 }
1759 raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags);
d6ad6063 1760 cond_resched_tasks_rcu_qs();
dc7d54b4 1761 }
56096ecd
PM
1762
1763 // Re-enable CPU hotplug now that the holdout list is populated.
1764 cpus_read_unlock();
1fa98e2e
PM
1765}
1766
9796e1ae 1767/*
955a0192 1768 * Do intermediate processing between task and holdout scans.
9796e1ae
PM
1769 */
1770static void rcu_tasks_trace_postscan(struct list_head *hop)
d5f177d3
PM
1771{
1772 // Wait for late-stage exiting tasks to finish exiting.
1773 // These might have passed the call to exit_tasks_rcu_finish().
e6c86c51
PM
1774
1775 // If you remove the following line, update rcu_trace_implies_rcu_gp()!!!
d5f177d3 1776 synchronize_rcu();
3847b645
PM
1777 // Any tasks that exit after this point will set
1778 // TRC_NEED_QS_CHECKED in ->trc_reader_special.b.need_qs.
d5f177d3
PM
1779}
1780
65b629e7
NU
1781/* Communicate task state back to the RCU tasks trace stall warning request. */
1782struct trc_stall_chk_rdr {
1783 int nesting;
1784 int ipi_to_cpu;
1785 u8 needqs;
1786};
1787
1788static int trc_check_slow_task(struct task_struct *t, void *arg)
1789{
1790 struct trc_stall_chk_rdr *trc_rdrp = arg;
1791
f90f19da 1792 if (task_curr(t) && cpu_online(task_cpu(t)))
65b629e7
NU
1793 return false; // It is running, so decline to inspect it.
1794 trc_rdrp->nesting = READ_ONCE(t->trc_reader_nesting);
1795 trc_rdrp->ipi_to_cpu = READ_ONCE(t->trc_ipi_to_cpu);
3847b645 1796 trc_rdrp->needqs = rcu_ld_need_qs(t);
65b629e7
NU
1797 return true;
1798}
1799
4593e772
PM
1800/* Show the state of a task stalling the current RCU tasks trace GP. */
1801static void show_stalled_task_trace(struct task_struct *t, bool *firstreport)
1802{
1803 int cpu;
65b629e7
NU
1804 struct trc_stall_chk_rdr trc_rdr;
1805 bool is_idle_tsk = is_idle_task(t);
4593e772
PM
1806
1807 if (*firstreport) {
1808 pr_err("INFO: rcu_tasks_trace detected stalls on tasks:\n");
1809 *firstreport = false;
1810 }
4593e772 1811 cpu = task_cpu(t);
65b629e7 1812 if (!task_call_func(t, trc_check_slow_task, &trc_rdr))
9f3eb5fb 1813 pr_alert("P%d: %c%c\n",
65b629e7 1814 t->pid,
9f3eb5fb 1815 ".I"[t->trc_ipi_to_cpu >= 0],
65b629e7
NU
1816 ".i"[is_idle_tsk]);
1817 else
387c0ad7 1818 pr_alert("P%d: %c%c%c%c nesting: %d%c%c cpu: %d%s\n",
65b629e7
NU
1819 t->pid,
1820 ".I"[trc_rdr.ipi_to_cpu >= 0],
1821 ".i"[is_idle_tsk],
1822 ".N"[cpu >= 0 && tick_nohz_full_cpu(cpu)],
387c0ad7 1823 ".B"[!!data_race(t->trc_reader_special.b.blocked)],
65b629e7 1824 trc_rdr.nesting,
be15a164
PM
1825 " !CN"[trc_rdr.needqs & 0x3],
1826 " ?"[trc_rdr.needqs > 0x3],
c8c03ad9 1827 cpu, cpu_online(cpu) ? "" : "(offline)");
4593e772
PM
1828 sched_show_task(t);
1829}
1830
1831/* List stalled IPIs for RCU tasks trace. */
1832static void show_stalled_ipi_trace(void)
1833{
1834 int cpu;
1835
1836 for_each_possible_cpu(cpu)
1837 if (per_cpu(trc_ipi_to_cpu, cpu))
1838 pr_alert("\tIPI outstanding to CPU %d\n", cpu);
1839}
1840
d5f177d3
PM
1841/* Do one scan of the holdout list. */
1842static void check_all_holdout_tasks_trace(struct list_head *hop,
4593e772 1843 bool needreport, bool *firstreport)
d5f177d3
PM
1844{
1845 struct task_struct *g, *t;
1846
eea3423b 1847 // Disable CPU hotplug across the holdout list scan for IPIs.
81b4a7bc
PM
1848 cpus_read_lock();
1849
d5f177d3
PM
1850 list_for_each_entry_safe(t, g, hop, trc_holdout_list) {
1851 // If safe and needed, try to check the current task.
1852 if (READ_ONCE(t->trc_ipi_to_cpu) == -1 &&
3847b645 1853 !(rcu_ld_need_qs(t) & TRC_NEED_QS_CHECKED))
d5f177d3
PM
1854 trc_wait_for_one_reader(t, hop);
1855
1856 // If check succeeded, remove this task from the list.
f5dbc594 1857 if (smp_load_acquire(&t->trc_ipi_to_cpu) == -1 &&
3847b645 1858 rcu_ld_need_qs(t) == TRC_NEED_QS_CHECKED)
d5f177d3 1859 trc_del_holdout(t);
4593e772
PM
1860 else if (needreport)
1861 show_stalled_task_trace(t, firstreport);
d6ad6063 1862 cond_resched_tasks_rcu_qs();
4593e772 1863 }
81b4a7bc
PM
1864
1865 // Re-enable CPU hotplug now that the holdout list scan has completed.
1866 cpus_read_unlock();
1867
4593e772 1868 if (needreport) {
89401176 1869 if (*firstreport)
4593e772
PM
1870 pr_err("INFO: rcu_tasks_trace detected stalls? (Late IPI?)\n");
1871 show_stalled_ipi_trace();
d5f177d3
PM
1872 }
1873}
1874
cbe0d8d9
PM
1875static void rcu_tasks_trace_empty_fn(void *unused)
1876{
1877}
1878
d5f177d3 1879/* Wait for grace period to complete and provide ordering. */
af051ca4 1880static void rcu_tasks_trace_postgp(struct rcu_tasks *rtp)
d5f177d3 1881{
cbe0d8d9 1882 int cpu;
4593e772 1883
cbe0d8d9
PM
1884 // Wait for any lingering IPI handlers to complete. Note that
1885 // if a CPU has gone offline or transitioned to userspace in the
1886 // meantime, all IPI handlers should have been drained beforehand.
1887 // Yes, this assumes that CPUs process IPIs in order. If that ever
1888 // changes, there will need to be a recheck and/or timed wait.
1889 for_each_online_cpu(cpu)
f5dbc594 1890 if (WARN_ON_ONCE(smp_load_acquire(per_cpu_ptr(&trc_ipi_to_cpu, cpu))))
cbe0d8d9
PM
1891 smp_call_function_single(cpu, rcu_tasks_trace_empty_fn, NULL, 1);
1892
d5f177d3 1893 smp_mb(); // Caller's code must be ordered after wakeup.
43766c3e 1894 // Pairs with pretty much every ordering primitive.
d5f177d3
PM
1895}
1896
1897/* Report any needed quiescent state for this exiting task. */
25246fc8 1898static void exit_tasks_rcu_finish_trace(struct task_struct *t)
d5f177d3 1899{
0356d4e6
PM
1900 union rcu_special trs = READ_ONCE(t->trc_reader_special);
1901
3847b645 1902 rcu_trc_cmpxchg_need_qs(t, 0, TRC_NEED_QS_CHECKED);
bdb0cca0 1903 WARN_ON_ONCE(READ_ONCE(t->trc_reader_nesting));
0bcb3868 1904 if (WARN_ON_ONCE(rcu_ld_need_qs(t) & TRC_NEED_QS || trs.b.blocked))
a5c071cc 1905 rcu_read_unlock_trace_special(t);
3847b645
PM
1906 else
1907 WRITE_ONCE(t->trc_reader_nesting, 0);
d5f177d3
PM
1908}
1909
d5f177d3
PM
1910/**
1911 * call_rcu_tasks_trace() - Queue a callback trace task-based grace period
1912 * @rhp: structure to be used for queueing the RCU updates.
1913 * @func: actual callback function to be invoked after the grace period
1914 *
ed42c380
NU
1915 * The callback function will be invoked some time after a trace rcu-tasks
1916 * grace period elapses, in other words after all currently executing
1917 * trace rcu-tasks read-side critical sections have completed. These
1918 * read-side critical sections are delimited by calls to rcu_read_lock_trace()
1919 * and rcu_read_unlock_trace().
d5f177d3
PM
1920 *
1921 * See the description of call_rcu() for more detailed information on
1922 * memory ordering guarantees.
1923 */
1924void call_rcu_tasks_trace(struct rcu_head *rhp, rcu_callback_t func)
1925{
1926 call_rcu_tasks_generic(rhp, func, &rcu_tasks_trace);
1927}
1928EXPORT_SYMBOL_GPL(call_rcu_tasks_trace);
1929
1930/**
1931 * synchronize_rcu_tasks_trace - wait for a trace rcu-tasks grace period
1932 *
1933 * Control will return to the caller some time after a trace rcu-tasks
c7dcf810 1934 * grace period has elapsed, in other words after all currently executing
ed42c380 1935 * trace rcu-tasks read-side critical sections have elapsed. These read-side
c7dcf810
PM
1936 * critical sections are delimited by calls to rcu_read_lock_trace()
1937 * and rcu_read_unlock_trace().
d5f177d3
PM
1938 *
1939 * This is a very specialized primitive, intended only for a few uses in
1940 * tracing and other situations requiring manipulation of function preambles
1941 * and profiling hooks. The synchronize_rcu_tasks_trace() function is not
1942 * (yet) intended for heavy use from multiple CPUs.
1943 *
1944 * See the description of synchronize_rcu() for more detailed information
1945 * on memory ordering guarantees.
1946 */
1947void synchronize_rcu_tasks_trace(void)
1948{
1949 RCU_LOCKDEP_WARN(lock_is_held(&rcu_trace_lock_map), "Illegal synchronize_rcu_tasks_trace() in RCU Tasks Trace read-side critical section");
1950 synchronize_rcu_tasks_generic(&rcu_tasks_trace);
1951}
1952EXPORT_SYMBOL_GPL(synchronize_rcu_tasks_trace);
1953
1954/**
1955 * rcu_barrier_tasks_trace - Wait for in-flight call_rcu_tasks_trace() callbacks.
1956 *
1957 * Although the current implementation is guaranteed to wait, it is not
1958 * obligated to, for example, if there are no pending callbacks.
1959 */
1960void rcu_barrier_tasks_trace(void)
1961{
ce9b1c66 1962 rcu_barrier_tasks_generic(&rcu_tasks_trace);
d5f177d3
PM
1963}
1964EXPORT_SYMBOL_GPL(rcu_barrier_tasks_trace);
1965
450d461a
PM
1966int rcu_tasks_trace_lazy_ms = -1;
1967module_param(rcu_tasks_trace_lazy_ms, int, 0444);
1968
d5f177d3
PM
1969static int __init rcu_spawn_tasks_trace_kthread(void)
1970{
2393a613 1971 if (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB)) {
4fe192df 1972 rcu_tasks_trace.gp_sleep = HZ / 10;
75dc2da5 1973 rcu_tasks_trace.init_fract = HZ / 10;
2393a613 1974 } else {
4fe192df
PM
1975 rcu_tasks_trace.gp_sleep = HZ / 200;
1976 if (rcu_tasks_trace.gp_sleep <= 0)
1977 rcu_tasks_trace.gp_sleep = 1;
75dc2da5 1978 rcu_tasks_trace.init_fract = HZ / 200;
2393a613
PM
1979 if (rcu_tasks_trace.init_fract <= 0)
1980 rcu_tasks_trace.init_fract = 1;
1981 }
450d461a
PM
1982 if (rcu_tasks_trace_lazy_ms >= 0)
1983 rcu_tasks_trace.lazy_jiffies = msecs_to_jiffies(rcu_tasks_trace_lazy_ms);
d5f177d3 1984 rcu_tasks_trace.pregp_func = rcu_tasks_trace_pregp_step;
d5f177d3
PM
1985 rcu_tasks_trace.postscan_func = rcu_tasks_trace_postscan;
1986 rcu_tasks_trace.holdouts_func = check_all_holdout_tasks_trace;
1987 rcu_tasks_trace.postgp_func = rcu_tasks_trace_postgp;
1988 rcu_spawn_tasks_kthread_generic(&rcu_tasks_trace);
1989 return 0;
1990}
d5f177d3 1991
27c0f144
PM
1992#if !defined(CONFIG_TINY_RCU)
1993void show_rcu_tasks_trace_gp_kthread(void)
e21408ce 1994{
40471509 1995 char buf[64];
e21408ce 1996
ffcc21a3
PM
1997 sprintf(buf, "N%lu h:%lu/%lu/%lu",
1998 data_race(n_trc_holdouts),
edf3775f 1999 data_race(n_heavy_reader_ofl_updates),
40471509
PM
2000 data_race(n_heavy_reader_updates),
2001 data_race(n_heavy_reader_attempts));
e21408ce
PM
2002 show_rcu_tasks_generic_gp_kthread(&rcu_tasks_trace, buf);
2003}
27c0f144
PM
2004EXPORT_SYMBOL_GPL(show_rcu_tasks_trace_gp_kthread);
2005#endif // !defined(CONFIG_TINY_RCU)
e21408ce 2006
5f8e3202
PM
2007struct task_struct *get_rcu_tasks_trace_gp_kthread(void)
2008{
2009 return rcu_tasks_trace.kthread_ptr;
2010}
2011EXPORT_SYMBOL_GPL(get_rcu_tasks_trace_gp_kthread);
2012
d5f177d3 2013#else /* #ifdef CONFIG_TASKS_TRACE_RCU */
25246fc8 2014static void exit_tasks_rcu_finish_trace(struct task_struct *t) { }
d5f177d3 2015#endif /* #else #ifdef CONFIG_TASKS_TRACE_RCU */
8fd8ca38 2016
8344496e 2017#ifndef CONFIG_TINY_RCU
e21408ce
PM
2018void show_rcu_tasks_gp_kthreads(void)
2019{
2020 show_rcu_tasks_classic_gp_kthread();
2021 show_rcu_tasks_rude_gp_kthread();
2022 show_rcu_tasks_trace_gp_kthread();
2023}
8344496e 2024#endif /* #ifndef CONFIG_TINY_RCU */
e21408ce 2025
bfba7ed0
URS
2026#ifdef CONFIG_PROVE_RCU
2027struct rcu_tasks_test_desc {
2028 struct rcu_head rh;
2029 const char *name;
2030 bool notrun;
1cf1144e 2031 unsigned long runstart;
bfba7ed0
URS
2032};
2033
2034static struct rcu_tasks_test_desc tests[] = {
2035 {
2036 .name = "call_rcu_tasks()",
2037 /* If not defined, the test is skipped. */
1cf1144e 2038 .notrun = IS_ENABLED(CONFIG_TASKS_RCU),
bfba7ed0
URS
2039 },
2040 {
2041 .name = "call_rcu_tasks_rude()",
2042 /* If not defined, the test is skipped. */
1cf1144e 2043 .notrun = IS_ENABLED(CONFIG_TASKS_RUDE_RCU),
bfba7ed0
URS
2044 },
2045 {
2046 .name = "call_rcu_tasks_trace()",
2047 /* If not defined, the test is skipped. */
1cf1144e 2048 .notrun = IS_ENABLED(CONFIG_TASKS_TRACE_RCU)
bfba7ed0
URS
2049 }
2050};
2051
2052static void test_rcu_tasks_callback(struct rcu_head *rhp)
2053{
2054 struct rcu_tasks_test_desc *rttd =
2055 container_of(rhp, struct rcu_tasks_test_desc, rh);
2056
2057 pr_info("Callback from %s invoked.\n", rttd->name);
2058
1cf1144e 2059 rttd->notrun = false;
bfba7ed0
URS
2060}
2061
2062static void rcu_tasks_initiate_self_tests(void)
2063{
bfba7ed0 2064#ifdef CONFIG_TASKS_RCU
92a708dc 2065 pr_info("Running RCU Tasks wait API self tests\n");
9420fb93 2066 tests[0].runstart = jiffies;
bfba7ed0
URS
2067 synchronize_rcu_tasks();
2068 call_rcu_tasks(&tests[0].rh, test_rcu_tasks_callback);
2069#endif
2070
2071#ifdef CONFIG_TASKS_RUDE_RCU
92a708dc 2072 pr_info("Running RCU Tasks Rude wait API self tests\n");
9420fb93 2073 tests[1].runstart = jiffies;
bfba7ed0
URS
2074 synchronize_rcu_tasks_rude();
2075 call_rcu_tasks_rude(&tests[1].rh, test_rcu_tasks_callback);
2076#endif
2077
2078#ifdef CONFIG_TASKS_TRACE_RCU
92a708dc 2079 pr_info("Running RCU Tasks Trace wait API self tests\n");
9420fb93 2080 tests[2].runstart = jiffies;
bfba7ed0
URS
2081 synchronize_rcu_tasks_trace();
2082 call_rcu_tasks_trace(&tests[2].rh, test_rcu_tasks_callback);
2083#endif
2084}
2085
e72ee5e1
WL
2086/*
2087 * Return: 0 - test passed
2088 * 1 - test failed, but have not timed out yet
2089 * -1 - test failed and timed out
2090 */
bfba7ed0
URS
2091static int rcu_tasks_verify_self_tests(void)
2092{
2093 int ret = 0;
2094 int i;
1cf1144e 2095 unsigned long bst = rcu_task_stall_timeout;
bfba7ed0 2096
1cf1144e
PM
2097 if (bst <= 0 || bst > RCU_TASK_BOOT_STALL_TIMEOUT)
2098 bst = RCU_TASK_BOOT_STALL_TIMEOUT;
bfba7ed0 2099 for (i = 0; i < ARRAY_SIZE(tests); i++) {
1cf1144e
PM
2100 while (tests[i].notrun) { // still hanging.
2101 if (time_after(jiffies, tests[i].runstart + bst)) {
2102 pr_err("%s has failed boot-time tests.\n", tests[i].name);
2103 ret = -1;
2104 break;
2105 }
e72ee5e1
WL
2106 ret = 1;
2107 break;
bfba7ed0
URS
2108 }
2109 }
e72ee5e1 2110 WARN_ON(ret < 0);
bfba7ed0
URS
2111
2112 return ret;
2113}
e72ee5e1
WL
2114
2115/*
2116 * Repeat the rcu_tasks_verify_self_tests() call once every second until the
2117 * test passes or has timed out.
2118 */
2119static struct delayed_work rcu_tasks_verify_work;
2120static void rcu_tasks_verify_work_fn(struct work_struct *work __maybe_unused)
2121{
2122 int ret = rcu_tasks_verify_self_tests();
2123
2124 if (ret <= 0)
2125 return;
2126
2127 /* Test fails but not timed out yet, reschedule another check */
2128 schedule_delayed_work(&rcu_tasks_verify_work, HZ);
2129}
2130
2131static int rcu_tasks_verify_schedule_work(void)
2132{
2133 INIT_DELAYED_WORK(&rcu_tasks_verify_work, rcu_tasks_verify_work_fn);
2134 rcu_tasks_verify_work_fn(NULL);
2135 return 0;
2136}
2137late_initcall(rcu_tasks_verify_schedule_work);
bfba7ed0
URS
2138#else /* #ifdef CONFIG_PROVE_RCU */
2139static void rcu_tasks_initiate_self_tests(void) { }
2140#endif /* #else #ifdef CONFIG_PROVE_RCU */
2141
30ef0963
PM
2142void __init tasks_cblist_init_generic(void)
2143{
2144 lockdep_assert_irqs_disabled();
2145 WARN_ON(num_online_cpus() > 1);
2146
2147#ifdef CONFIG_TASKS_RCU
2148 cblist_init_generic(&rcu_tasks);
2149#endif
2150
2151#ifdef CONFIG_TASKS_RUDE_RCU
2152 cblist_init_generic(&rcu_tasks_rude);
2153#endif
2154
2155#ifdef CONFIG_TASKS_TRACE_RCU
2156 cblist_init_generic(&rcu_tasks_trace);
2157#endif
2158}
2159
1b04fa99
URS
2160void __init rcu_init_tasks_generic(void)
2161{
2162#ifdef CONFIG_TASKS_RCU
2163 rcu_spawn_tasks_kthread();
2164#endif
2165
2166#ifdef CONFIG_TASKS_RUDE_RCU
2167 rcu_spawn_tasks_rude_kthread();
2168#endif
2169
2170#ifdef CONFIG_TASKS_TRACE_RCU
2171 rcu_spawn_tasks_trace_kthread();
2172#endif
bfba7ed0
URS
2173
2174 // Run the self-tests.
2175 rcu_tasks_initiate_self_tests();
1b04fa99
URS
2176}
2177
8fd8ca38
PM
2178#else /* #ifdef CONFIG_TASKS_RCU_GENERIC */
2179static inline void rcu_tasks_bootup_oddness(void) {}
2180#endif /* #else #ifdef CONFIG_TASKS_RCU_GENERIC */