1 /* SPDX-License-Identifier: GPL-2.0+ */
3 * Task-based RCU implementations.
5 * Copyright (C) 2020 Paul E. McKenney
8 #ifdef CONFIG_TASKS_RCU_GENERIC
9 #include "rcu_segcblist.h"
11 ////////////////////////////////////////////////////////////////////////
13 // Generic data structures.
16 typedef void (*rcu_tasks_gp_func_t)(struct rcu_tasks *rtp);
17 typedef void (*pregp_func_t)(struct list_head *hop);
18 typedef void (*pertask_func_t)(struct task_struct *t, struct list_head *hop);
19 typedef void (*postscan_func_t)(struct list_head *hop);
20 typedef void (*holdouts_func_t)(struct list_head *hop, bool ndrpt, bool *frptp);
21 typedef void (*postgp_func_t)(struct rcu_tasks *rtp);
24 * struct rcu_tasks_percpu - Per-CPU component of definition for a Tasks-RCU-like mechanism.
25 * @cblist: Callback list.
26 * @lock: Lock protecting per-CPU callback list.
27 * @rtp_jiffies: Jiffies counter value for statistics.
28 * @lazy_timer: Timer to unlazify callbacks.
29 * @urgent_gp: Number of additional non-lazy grace periods.
30 * @rtp_n_lock_retries: Rough lock-contention statistic.
31 * @rtp_work: Work queue for invoking callbacks.
32 * @rtp_irq_work: IRQ work queue for deferred wakeups.
33 * @barrier_q_head: RCU callback for barrier operation.
34 * @rtp_blkd_tasks: List of tasks blocked as readers.
35 * @rtp_exit_list: List of tasks in the latter portion of do_exit().
36 * @cpu: CPU number corresponding to this entry.
37 * @rtpp: Pointer to the rcu_tasks structure.
39 struct rcu_tasks_percpu {
40 struct rcu_segcblist cblist;
41 raw_spinlock_t __private lock;
42 unsigned long rtp_jiffies;
43 unsigned long rtp_n_lock_retries;
44 struct timer_list lazy_timer;
45 unsigned int urgent_gp;
46 struct work_struct rtp_work;
47 struct irq_work rtp_irq_work;
48 struct rcu_head barrier_q_head;
49 struct list_head rtp_blkd_tasks;
50 struct list_head rtp_exit_list;
52 struct rcu_tasks *rtpp;
56 * struct rcu_tasks - Definition for a Tasks-RCU-like mechanism.
57 * @cbs_wait: RCU wait allowing a new callback to get kthread's attention.
58 * @cbs_gbl_lock: Lock protecting callback list.
59 * @tasks_gp_mutex: Mutex protecting grace period, needed during mid-boot dead zone.
60 * @gp_func: This flavor's grace-period-wait function.
61 * @gp_state: Grace period's most recent state transition (debugging).
62 * @gp_sleep: Per-grace-period sleep to prevent CPU-bound looping.
63 * @init_fract: Initial backoff sleep interval.
64 * @gp_jiffies: Time of last @gp_state transition.
65 * @gp_start: Most recent grace-period start in jiffies.
66 * @tasks_gp_seq: Number of grace periods completed since boot.
67 * @n_ipis: Number of IPIs sent to encourage grace periods to end.
68 * @n_ipis_fails: Number of IPI-send failures.
69 * @kthread_ptr: This flavor's grace-period/callback-invocation kthread.
70 * @lazy_jiffies: Number of jiffies to allow callbacks to be lazy.
71 * @pregp_func: This flavor's pre-grace-period function (optional).
72 * @pertask_func: This flavor's per-task scan function (optional).
73 * @postscan_func: This flavor's post-task scan function (optional).
74 * @holdouts_func: This flavor's holdout-list scan function (optional).
75 * @postgp_func: This flavor's post-grace-period function (optional).
76 * @call_func: This flavor's call_rcu()-equivalent function.
77 * @rtpcpu: This flavor's rcu_tasks_percpu structure.
78 * @percpu_enqueue_shift: Shift down CPU ID this much when enqueuing callbacks.
79 * @percpu_enqueue_lim: Number of per-CPU callback queues in use for enqueuing.
80 * @percpu_dequeue_lim: Number of per-CPU callback queues in use for dequeuing.
81 * @percpu_dequeue_gpseq: RCU grace-period number to propagate enqueue limit to dequeuers.
82 * @barrier_q_mutex: Serialize barrier operations.
83 * @barrier_q_count: Number of queues being waited on.
84 * @barrier_q_completion: Barrier wait/wakeup mechanism.
85 * @barrier_q_seq: Sequence number for barrier operations.
86 * @name: This flavor's textual name.
87 * @kname: This flavor's kthread name.
90 struct rcuwait cbs_wait;
91 raw_spinlock_t cbs_gbl_lock;
92 struct mutex tasks_gp_mutex;
96 unsigned long gp_jiffies;
97 unsigned long gp_start;
98 unsigned long tasks_gp_seq;
100 unsigned long n_ipis_fails;
101 struct task_struct *kthread_ptr;
102 unsigned long lazy_jiffies;
103 rcu_tasks_gp_func_t gp_func;
104 pregp_func_t pregp_func;
105 pertask_func_t pertask_func;
106 postscan_func_t postscan_func;
107 holdouts_func_t holdouts_func;
108 postgp_func_t postgp_func;
109 call_rcu_func_t call_func;
110 struct rcu_tasks_percpu __percpu *rtpcpu;
111 int percpu_enqueue_shift;
112 int percpu_enqueue_lim;
113 int percpu_dequeue_lim;
114 unsigned long percpu_dequeue_gpseq;
115 struct mutex barrier_q_mutex;
116 atomic_t barrier_q_count;
117 struct completion barrier_q_completion;
118 unsigned long barrier_q_seq;
123 static void call_rcu_tasks_iw_wakeup(struct irq_work *iwp);
125 #define DEFINE_RCU_TASKS(rt_name, gp, call, n) \
126 static DEFINE_PER_CPU(struct rcu_tasks_percpu, rt_name ## __percpu) = { \
127 .lock = __RAW_SPIN_LOCK_UNLOCKED(rt_name ## __percpu.cbs_pcpu_lock), \
128 .rtp_irq_work = IRQ_WORK_INIT_HARD(call_rcu_tasks_iw_wakeup), \
130 static struct rcu_tasks rt_name = \
132 .cbs_wait = __RCUWAIT_INITIALIZER(rt_name.wait), \
133 .cbs_gbl_lock = __RAW_SPIN_LOCK_UNLOCKED(rt_name.cbs_gbl_lock), \
134 .tasks_gp_mutex = __MUTEX_INITIALIZER(rt_name.tasks_gp_mutex), \
137 .rtpcpu = &rt_name ## __percpu, \
138 .lazy_jiffies = DIV_ROUND_UP(HZ, 4), \
140 .percpu_enqueue_shift = order_base_2(CONFIG_NR_CPUS), \
141 .percpu_enqueue_lim = 1, \
142 .percpu_dequeue_lim = 1, \
143 .barrier_q_mutex = __MUTEX_INITIALIZER(rt_name.barrier_q_mutex), \
144 .barrier_q_seq = (0UL - 50UL) << RCU_SEQ_CTR_SHIFT, \
148 #ifdef CONFIG_TASKS_RCU
150 /* Report delay in synchronize_srcu() completion in rcu_tasks_postscan(). */
151 static void tasks_rcu_exit_srcu_stall(struct timer_list *unused);
152 static DEFINE_TIMER(tasks_rcu_exit_srcu_stall_timer, tasks_rcu_exit_srcu_stall);
155 /* Avoid IPIing CPUs early in the grace period. */
156 #define RCU_TASK_IPI_DELAY (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB) ? HZ / 2 : 0)
157 static int rcu_task_ipi_delay __read_mostly = RCU_TASK_IPI_DELAY;
158 module_param(rcu_task_ipi_delay, int, 0644);
160 /* Control stall timeouts. Disable with <= 0, otherwise jiffies till stall. */
161 #define RCU_TASK_BOOT_STALL_TIMEOUT (HZ * 30)
162 #define RCU_TASK_STALL_TIMEOUT (HZ * 60 * 10)
163 static int rcu_task_stall_timeout __read_mostly = RCU_TASK_STALL_TIMEOUT;
164 module_param(rcu_task_stall_timeout, int, 0644);
165 #define RCU_TASK_STALL_INFO (HZ * 10)
166 static int rcu_task_stall_info __read_mostly = RCU_TASK_STALL_INFO;
167 module_param(rcu_task_stall_info, int, 0644);
168 static int rcu_task_stall_info_mult __read_mostly = 3;
169 module_param(rcu_task_stall_info_mult, int, 0444);
171 static int rcu_task_enqueue_lim __read_mostly = -1;
172 module_param(rcu_task_enqueue_lim, int, 0444);
174 static bool rcu_task_cb_adjust;
175 static int rcu_task_contend_lim __read_mostly = 100;
176 module_param(rcu_task_contend_lim, int, 0444);
177 static int rcu_task_collapse_lim __read_mostly = 10;
178 module_param(rcu_task_collapse_lim, int, 0444);
179 static int rcu_task_lazy_lim __read_mostly = 32;
180 module_param(rcu_task_lazy_lim, int, 0444);
182 /* RCU tasks grace-period state for debugging. */
184 #define RTGS_WAIT_WAIT_CBS 1
185 #define RTGS_WAIT_GP 2
186 #define RTGS_PRE_WAIT_GP 3
187 #define RTGS_SCAN_TASKLIST 4
188 #define RTGS_POST_SCAN_TASKLIST 5
189 #define RTGS_WAIT_SCAN_HOLDOUTS 6
190 #define RTGS_SCAN_HOLDOUTS 7
191 #define RTGS_POST_GP 8
192 #define RTGS_WAIT_READERS 9
193 #define RTGS_INVOKE_CBS 10
194 #define RTGS_WAIT_CBS 11
195 #ifndef CONFIG_TINY_RCU
196 static const char * const rcu_tasks_gp_state_names[] = {
198 "RTGS_WAIT_WAIT_CBS",
201 "RTGS_SCAN_TASKLIST",
202 "RTGS_POST_SCAN_TASKLIST",
203 "RTGS_WAIT_SCAN_HOLDOUTS",
204 "RTGS_SCAN_HOLDOUTS",
210 #endif /* #ifndef CONFIG_TINY_RCU */
212 ////////////////////////////////////////////////////////////////////////
216 static void rcu_tasks_invoke_cbs_wq(struct work_struct *wp);
218 /* Record grace-period phase and time. */
219 static void set_tasks_gp_state(struct rcu_tasks *rtp, int newstate)
221 rtp->gp_state = newstate;
222 rtp->gp_jiffies = jiffies;
225 #ifndef CONFIG_TINY_RCU
226 /* Return state name. */
227 static const char *tasks_gp_state_getname(struct rcu_tasks *rtp)
229 int i = data_race(rtp->gp_state); // Let KCSAN detect update races
230 int j = READ_ONCE(i); // Prevent the compiler from reading twice
232 if (j >= ARRAY_SIZE(rcu_tasks_gp_state_names))
234 return rcu_tasks_gp_state_names[j];
236 #endif /* #ifndef CONFIG_TINY_RCU */
238 // Initialize per-CPU callback lists for the specified flavor of
239 // Tasks RCU. Do not enqueue callbacks before this function is invoked.
240 static void cblist_init_generic(struct rcu_tasks *rtp)
246 if (rcu_task_enqueue_lim < 0) {
247 rcu_task_enqueue_lim = 1;
248 rcu_task_cb_adjust = true;
249 } else if (rcu_task_enqueue_lim == 0) {
250 rcu_task_enqueue_lim = 1;
252 lim = rcu_task_enqueue_lim;
254 if (lim > nr_cpu_ids)
256 shift = ilog2(nr_cpu_ids / lim);
257 if (((nr_cpu_ids - 1) >> shift) >= lim)
259 WRITE_ONCE(rtp->percpu_enqueue_shift, shift);
260 WRITE_ONCE(rtp->percpu_dequeue_lim, lim);
261 smp_store_release(&rtp->percpu_enqueue_lim, lim);
262 for_each_possible_cpu(cpu) {
263 struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu);
265 WARN_ON_ONCE(!rtpcp);
267 raw_spin_lock_init(&ACCESS_PRIVATE(rtpcp, lock));
268 if (rcu_segcblist_empty(&rtpcp->cblist))
269 rcu_segcblist_init(&rtpcp->cblist);
270 INIT_WORK(&rtpcp->rtp_work, rcu_tasks_invoke_cbs_wq);
273 if (!rtpcp->rtp_blkd_tasks.next)
274 INIT_LIST_HEAD(&rtpcp->rtp_blkd_tasks);
275 if (!rtpcp->rtp_exit_list.next)
276 INIT_LIST_HEAD(&rtpcp->rtp_exit_list);
279 pr_info("%s: Setting shift to %d and lim to %d rcu_task_cb_adjust=%d.\n", rtp->name,
280 data_race(rtp->percpu_enqueue_shift), data_race(rtp->percpu_enqueue_lim), rcu_task_cb_adjust);
283 // Compute wakeup time for lazy callback timer.
284 static unsigned long rcu_tasks_lazy_time(struct rcu_tasks *rtp)
286 return jiffies + rtp->lazy_jiffies;
289 // Timer handler that unlazifies lazy callbacks.
290 static void call_rcu_tasks_generic_timer(struct timer_list *tlp)
293 bool needwake = false;
294 struct rcu_tasks *rtp;
295 struct rcu_tasks_percpu *rtpcp = from_timer(rtpcp, tlp, lazy_timer);
298 raw_spin_lock_irqsave_rcu_node(rtpcp, flags);
299 if (!rcu_segcblist_empty(&rtpcp->cblist) && rtp->lazy_jiffies) {
300 if (!rtpcp->urgent_gp)
301 rtpcp->urgent_gp = 1;
303 mod_timer(&rtpcp->lazy_timer, rcu_tasks_lazy_time(rtp));
305 raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags);
307 rcuwait_wake_up(&rtp->cbs_wait);
310 // IRQ-work handler that does deferred wakeup for call_rcu_tasks_generic().
311 static void call_rcu_tasks_iw_wakeup(struct irq_work *iwp)
313 struct rcu_tasks *rtp;
314 struct rcu_tasks_percpu *rtpcp = container_of(iwp, struct rcu_tasks_percpu, rtp_irq_work);
317 rcuwait_wake_up(&rtp->cbs_wait);
320 // Enqueue a callback for the specified flavor of Tasks RCU.
321 static void call_rcu_tasks_generic(struct rcu_head *rhp, rcu_callback_t func,
322 struct rcu_tasks *rtp)
326 bool havekthread = smp_load_acquire(&rtp->kthread_ptr);
329 bool needadjust = false;
331 struct rcu_tasks_percpu *rtpcp;
335 local_irq_save(flags);
337 ideal_cpu = smp_processor_id() >> READ_ONCE(rtp->percpu_enqueue_shift);
338 chosen_cpu = cpumask_next(ideal_cpu - 1, cpu_possible_mask);
339 rtpcp = per_cpu_ptr(rtp->rtpcpu, chosen_cpu);
340 if (!raw_spin_trylock_rcu_node(rtpcp)) { // irqs already disabled.
341 raw_spin_lock_rcu_node(rtpcp); // irqs already disabled.
343 if (rtpcp->rtp_jiffies != j) {
344 rtpcp->rtp_jiffies = j;
345 rtpcp->rtp_n_lock_retries = 0;
347 if (rcu_task_cb_adjust && ++rtpcp->rtp_n_lock_retries > rcu_task_contend_lim &&
348 READ_ONCE(rtp->percpu_enqueue_lim) != nr_cpu_ids)
349 needadjust = true; // Defer adjustment to avoid deadlock.
351 // Queuing callbacks before initialization not yet supported.
352 if (WARN_ON_ONCE(!rcu_segcblist_is_enabled(&rtpcp->cblist)))
353 rcu_segcblist_init(&rtpcp->cblist);
354 needwake = (func == wakeme_after_rcu) ||
355 (rcu_segcblist_n_cbs(&rtpcp->cblist) == rcu_task_lazy_lim);
356 if (havekthread && !needwake && !timer_pending(&rtpcp->lazy_timer)) {
357 if (rtp->lazy_jiffies)
358 mod_timer(&rtpcp->lazy_timer, rcu_tasks_lazy_time(rtp));
360 needwake = rcu_segcblist_empty(&rtpcp->cblist);
363 rtpcp->urgent_gp = 3;
364 rcu_segcblist_enqueue(&rtpcp->cblist, rhp);
365 raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags);
366 if (unlikely(needadjust)) {
367 raw_spin_lock_irqsave(&rtp->cbs_gbl_lock, flags);
368 if (rtp->percpu_enqueue_lim != nr_cpu_ids) {
369 WRITE_ONCE(rtp->percpu_enqueue_shift, 0);
370 WRITE_ONCE(rtp->percpu_dequeue_lim, nr_cpu_ids);
371 smp_store_release(&rtp->percpu_enqueue_lim, nr_cpu_ids);
372 pr_info("Switching %s to per-CPU callback queuing.\n", rtp->name);
374 raw_spin_unlock_irqrestore(&rtp->cbs_gbl_lock, flags);
377 /* We can't create the thread unless interrupts are enabled. */
378 if (needwake && READ_ONCE(rtp->kthread_ptr))
379 irq_work_queue(&rtpcp->rtp_irq_work);
382 // RCU callback function for rcu_barrier_tasks_generic().
383 static void rcu_barrier_tasks_generic_cb(struct rcu_head *rhp)
385 struct rcu_tasks *rtp;
386 struct rcu_tasks_percpu *rtpcp;
388 rtpcp = container_of(rhp, struct rcu_tasks_percpu, barrier_q_head);
390 if (atomic_dec_and_test(&rtp->barrier_q_count))
391 complete(&rtp->barrier_q_completion);
394 // Wait for all in-flight callbacks for the specified RCU Tasks flavor.
395 // Operates in a manner similar to rcu_barrier().
396 static void rcu_barrier_tasks_generic(struct rcu_tasks *rtp)
400 struct rcu_tasks_percpu *rtpcp;
401 unsigned long s = rcu_seq_snap(&rtp->barrier_q_seq);
403 mutex_lock(&rtp->barrier_q_mutex);
404 if (rcu_seq_done(&rtp->barrier_q_seq, s)) {
406 mutex_unlock(&rtp->barrier_q_mutex);
409 rcu_seq_start(&rtp->barrier_q_seq);
410 init_completion(&rtp->barrier_q_completion);
411 atomic_set(&rtp->barrier_q_count, 2);
412 for_each_possible_cpu(cpu) {
413 if (cpu >= smp_load_acquire(&rtp->percpu_dequeue_lim))
415 rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu);
416 rtpcp->barrier_q_head.func = rcu_barrier_tasks_generic_cb;
417 raw_spin_lock_irqsave_rcu_node(rtpcp, flags);
418 if (rcu_segcblist_entrain(&rtpcp->cblist, &rtpcp->barrier_q_head))
419 atomic_inc(&rtp->barrier_q_count);
420 raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags);
422 if (atomic_sub_and_test(2, &rtp->barrier_q_count))
423 complete(&rtp->barrier_q_completion);
424 wait_for_completion(&rtp->barrier_q_completion);
425 rcu_seq_end(&rtp->barrier_q_seq);
426 mutex_unlock(&rtp->barrier_q_mutex);
429 // Advance callbacks and indicate whether either a grace period or
430 // callback invocation is needed.
431 static int rcu_tasks_need_gpcb(struct rcu_tasks *rtp)
436 bool gpdone = poll_state_synchronize_rcu(rtp->percpu_dequeue_gpseq);
442 dequeue_limit = smp_load_acquire(&rtp->percpu_dequeue_lim);
443 for (cpu = 0; cpu < dequeue_limit; cpu++) {
444 struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu);
446 /* Advance and accelerate any new callbacks. */
447 if (!rcu_segcblist_n_cbs(&rtpcp->cblist))
449 raw_spin_lock_irqsave_rcu_node(rtpcp, flags);
450 // Should we shrink down to a single callback queue?
451 n = rcu_segcblist_n_cbs(&rtpcp->cblist);
457 rcu_segcblist_advance(&rtpcp->cblist, rcu_seq_current(&rtp->tasks_gp_seq));
458 (void)rcu_segcblist_accelerate(&rtpcp->cblist, rcu_seq_snap(&rtp->tasks_gp_seq));
459 if (rtpcp->urgent_gp > 0 && rcu_segcblist_pend_cbs(&rtpcp->cblist)) {
460 if (rtp->lazy_jiffies)
463 } else if (rcu_segcblist_empty(&rtpcp->cblist)) {
464 rtpcp->urgent_gp = 0;
466 if (rcu_segcblist_ready_cbs(&rtpcp->cblist))
468 raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags);
471 // Shrink down to a single callback queue if appropriate.
472 // This is done in two stages: (1) If there are no more than
473 // rcu_task_collapse_lim callbacks on CPU 0 and none on any other
474 // CPU, limit enqueueing to CPU 0. (2) After an RCU grace period,
475 // if there has not been an increase in callbacks, limit dequeuing
476 // to CPU 0. Note the matching RCU read-side critical section in
477 // call_rcu_tasks_generic().
478 if (rcu_task_cb_adjust && ncbs <= rcu_task_collapse_lim) {
479 raw_spin_lock_irqsave(&rtp->cbs_gbl_lock, flags);
480 if (rtp->percpu_enqueue_lim > 1) {
481 WRITE_ONCE(rtp->percpu_enqueue_shift, order_base_2(nr_cpu_ids));
482 smp_store_release(&rtp->percpu_enqueue_lim, 1);
483 rtp->percpu_dequeue_gpseq = get_state_synchronize_rcu();
485 pr_info("Starting switch %s to CPU-0 callback queuing.\n", rtp->name);
487 raw_spin_unlock_irqrestore(&rtp->cbs_gbl_lock, flags);
489 if (rcu_task_cb_adjust && !ncbsnz && gpdone) {
490 raw_spin_lock_irqsave(&rtp->cbs_gbl_lock, flags);
491 if (rtp->percpu_enqueue_lim < rtp->percpu_dequeue_lim) {
492 WRITE_ONCE(rtp->percpu_dequeue_lim, 1);
493 pr_info("Completing switch %s to CPU-0 callback queuing.\n", rtp->name);
495 if (rtp->percpu_dequeue_lim == 1) {
496 for (cpu = rtp->percpu_dequeue_lim; cpu < nr_cpu_ids; cpu++) {
497 struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu);
499 WARN_ON_ONCE(rcu_segcblist_n_cbs(&rtpcp->cblist));
502 raw_spin_unlock_irqrestore(&rtp->cbs_gbl_lock, flags);
508 // Advance callbacks and invoke any that are ready.
509 static void rcu_tasks_invoke_cbs(struct rcu_tasks *rtp, struct rcu_tasks_percpu *rtpcp)
516 struct rcu_head *rhp;
517 struct rcu_cblist rcl = RCU_CBLIST_INITIALIZER(rcl);
518 struct rcu_tasks_percpu *rtpcp_next;
521 cpunext = cpu * 2 + 1;
522 if (cpunext < smp_load_acquire(&rtp->percpu_dequeue_lim)) {
523 rtpcp_next = per_cpu_ptr(rtp->rtpcpu, cpunext);
524 cpuwq = rcu_cpu_beenfullyonline(cpunext) ? cpunext : WORK_CPU_UNBOUND;
525 queue_work_on(cpuwq, system_wq, &rtpcp_next->rtp_work);
527 if (cpunext < smp_load_acquire(&rtp->percpu_dequeue_lim)) {
528 rtpcp_next = per_cpu_ptr(rtp->rtpcpu, cpunext);
529 cpuwq = rcu_cpu_beenfullyonline(cpunext) ? cpunext : WORK_CPU_UNBOUND;
530 queue_work_on(cpuwq, system_wq, &rtpcp_next->rtp_work);
534 if (rcu_segcblist_empty(&rtpcp->cblist) || !cpu_possible(cpu))
536 raw_spin_lock_irqsave_rcu_node(rtpcp, flags);
537 rcu_segcblist_advance(&rtpcp->cblist, rcu_seq_current(&rtp->tasks_gp_seq));
538 rcu_segcblist_extract_done_cbs(&rtpcp->cblist, &rcl);
539 raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags);
541 for (rhp = rcu_cblist_dequeue(&rcl); rhp; rhp = rcu_cblist_dequeue(&rcl)) {
542 debug_rcu_head_callback(rhp);
548 raw_spin_lock_irqsave_rcu_node(rtpcp, flags);
549 rcu_segcblist_add_len(&rtpcp->cblist, -len);
550 (void)rcu_segcblist_accelerate(&rtpcp->cblist, rcu_seq_snap(&rtp->tasks_gp_seq));
551 raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags);
554 // Workqueue flood to advance callbacks and invoke any that are ready.
555 static void rcu_tasks_invoke_cbs_wq(struct work_struct *wp)
557 struct rcu_tasks *rtp;
558 struct rcu_tasks_percpu *rtpcp = container_of(wp, struct rcu_tasks_percpu, rtp_work);
561 rcu_tasks_invoke_cbs(rtp, rtpcp);
564 // Wait for one grace period.
565 static void rcu_tasks_one_gp(struct rcu_tasks *rtp, bool midboot)
569 mutex_lock(&rtp->tasks_gp_mutex);
571 // If there were none, wait a bit and start over.
572 if (unlikely(midboot)) {
575 mutex_unlock(&rtp->tasks_gp_mutex);
576 set_tasks_gp_state(rtp, RTGS_WAIT_CBS);
577 rcuwait_wait_event(&rtp->cbs_wait,
578 (needgpcb = rcu_tasks_need_gpcb(rtp)),
580 mutex_lock(&rtp->tasks_gp_mutex);
583 if (needgpcb & 0x2) {
584 // Wait for one grace period.
585 set_tasks_gp_state(rtp, RTGS_WAIT_GP);
586 rtp->gp_start = jiffies;
587 rcu_seq_start(&rtp->tasks_gp_seq);
589 rcu_seq_end(&rtp->tasks_gp_seq);
593 set_tasks_gp_state(rtp, RTGS_INVOKE_CBS);
594 rcu_tasks_invoke_cbs(rtp, per_cpu_ptr(rtp->rtpcpu, 0));
595 mutex_unlock(&rtp->tasks_gp_mutex);
598 // RCU-tasks kthread that detects grace periods and invokes callbacks.
599 static int __noreturn rcu_tasks_kthread(void *arg)
602 struct rcu_tasks *rtp = arg;
604 for_each_possible_cpu(cpu) {
605 struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu);
607 timer_setup(&rtpcp->lazy_timer, call_rcu_tasks_generic_timer, 0);
608 rtpcp->urgent_gp = 1;
611 /* Run on housekeeping CPUs by default. Sysadm can move if desired. */
612 housekeeping_affine(current, HK_TYPE_RCU);
613 smp_store_release(&rtp->kthread_ptr, current); // Let GPs start!
616 * Each pass through the following loop makes one check for
617 * newly arrived callbacks, and, if there are some, waits for
618 * one RCU-tasks grace period and then invokes the callbacks.
619 * This loop is terminated by the system going down. ;-)
622 // Wait for one grace period and invoke any callbacks
624 rcu_tasks_one_gp(rtp, false);
626 // Paranoid sleep to keep this from entering a tight loop.
627 schedule_timeout_idle(rtp->gp_sleep);
631 // Wait for a grace period for the specified flavor of Tasks RCU.
632 static void synchronize_rcu_tasks_generic(struct rcu_tasks *rtp)
634 /* Complain if the scheduler has not started. */
635 if (WARN_ONCE(rcu_scheduler_active == RCU_SCHEDULER_INACTIVE,
636 "synchronize_%s() called too soon", rtp->name))
639 // If the grace-period kthread is running, use it.
640 if (READ_ONCE(rtp->kthread_ptr)) {
641 wait_rcu_gp(rtp->call_func);
644 rcu_tasks_one_gp(rtp, true);
647 /* Spawn RCU-tasks grace-period kthread. */
648 static void __init rcu_spawn_tasks_kthread_generic(struct rcu_tasks *rtp)
650 struct task_struct *t;
652 t = kthread_run(rcu_tasks_kthread, rtp, "%s_kthread", rtp->kname);
653 if (WARN_ONCE(IS_ERR(t), "%s: Could not start %s grace-period kthread, OOM is now expected behavior\n", __func__, rtp->name))
655 smp_mb(); /* Ensure others see full kthread. */
658 #ifndef CONFIG_TINY_RCU
661 * Print any non-default Tasks RCU settings.
663 static void __init rcu_tasks_bootup_oddness(void)
665 #if defined(CONFIG_TASKS_RCU) || defined(CONFIG_TASKS_TRACE_RCU)
668 if (rcu_task_stall_timeout != RCU_TASK_STALL_TIMEOUT)
669 pr_info("\tTasks-RCU CPU stall warnings timeout set to %d (rcu_task_stall_timeout).\n", rcu_task_stall_timeout);
670 rtsimc = clamp(rcu_task_stall_info_mult, 1, 10);
671 if (rtsimc != rcu_task_stall_info_mult) {
672 pr_info("\tTasks-RCU CPU stall info multiplier clamped to %d (rcu_task_stall_info_mult).\n", rtsimc);
673 rcu_task_stall_info_mult = rtsimc;
675 #endif /* #ifdef CONFIG_TASKS_RCU */
676 #ifdef CONFIG_TASKS_RCU
677 pr_info("\tTrampoline variant of Tasks RCU enabled.\n");
678 #endif /* #ifdef CONFIG_TASKS_RCU */
679 #ifdef CONFIG_TASKS_RUDE_RCU
680 pr_info("\tRude variant of Tasks RCU enabled.\n");
681 #endif /* #ifdef CONFIG_TASKS_RUDE_RCU */
682 #ifdef CONFIG_TASKS_TRACE_RCU
683 pr_info("\tTracing variant of Tasks RCU enabled.\n");
684 #endif /* #ifdef CONFIG_TASKS_TRACE_RCU */
687 #endif /* #ifndef CONFIG_TINY_RCU */
689 #ifndef CONFIG_TINY_RCU
690 /* Dump out rcutorture-relevant state common to all RCU-tasks flavors. */
691 static void show_rcu_tasks_generic_gp_kthread(struct rcu_tasks *rtp, char *s)
694 bool havecbs = false;
695 bool haveurgent = false;
696 bool haveurgentcbs = false;
698 for_each_possible_cpu(cpu) {
699 struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu);
701 if (!data_race(rcu_segcblist_empty(&rtpcp->cblist)))
703 if (data_race(rtpcp->urgent_gp))
705 if (!data_race(rcu_segcblist_empty(&rtpcp->cblist)) && data_race(rtpcp->urgent_gp))
706 haveurgentcbs = true;
707 if (havecbs && haveurgent && haveurgentcbs)
710 pr_info("%s: %s(%d) since %lu g:%lu i:%lu/%lu %c%c%c%c l:%lu %s\n",
712 tasks_gp_state_getname(rtp), data_race(rtp->gp_state),
713 jiffies - data_race(rtp->gp_jiffies),
714 data_race(rcu_seq_current(&rtp->tasks_gp_seq)),
715 data_race(rtp->n_ipis_fails), data_race(rtp->n_ipis),
716 ".k"[!!data_race(rtp->kthread_ptr)],
723 #endif // #ifndef CONFIG_TINY_RCU
725 static void exit_tasks_rcu_finish_trace(struct task_struct *t);
727 #if defined(CONFIG_TASKS_RCU) || defined(CONFIG_TASKS_TRACE_RCU)
729 ////////////////////////////////////////////////////////////////////////
731 // Shared code between task-list-scanning variants of Tasks RCU.
733 /* Wait for one RCU-tasks grace period. */
734 static void rcu_tasks_wait_gp(struct rcu_tasks *rtp)
736 struct task_struct *g;
740 unsigned long lastinfo;
741 unsigned long lastreport;
742 bool reported = false;
744 struct task_struct *t;
746 set_tasks_gp_state(rtp, RTGS_PRE_WAIT_GP);
747 rtp->pregp_func(&holdouts);
750 * There were callbacks, so we need to wait for an RCU-tasks
751 * grace period. Start off by scanning the task list for tasks
752 * that are not already voluntarily blocked. Mark these tasks
753 * and make a list of them in holdouts.
755 set_tasks_gp_state(rtp, RTGS_SCAN_TASKLIST);
756 if (rtp->pertask_func) {
758 for_each_process_thread(g, t)
759 rtp->pertask_func(t, &holdouts);
763 set_tasks_gp_state(rtp, RTGS_POST_SCAN_TASKLIST);
764 rtp->postscan_func(&holdouts);
767 * Each pass through the following loop scans the list of holdout
768 * tasks, removing any that are no longer holdouts. When the list
769 * is empty, we are done.
771 lastreport = jiffies;
772 lastinfo = lastreport;
773 rtsi = READ_ONCE(rcu_task_stall_info);
775 // Start off with initial wait and slowly back off to 1 HZ wait.
776 fract = rtp->init_fract;
778 while (!list_empty(&holdouts)) {
784 // Slowly back off waiting for holdouts
785 set_tasks_gp_state(rtp, RTGS_WAIT_SCAN_HOLDOUTS);
786 if (!IS_ENABLED(CONFIG_PREEMPT_RT)) {
787 schedule_timeout_idle(fract);
789 exp = jiffies_to_nsecs(fract);
790 __set_current_state(TASK_IDLE);
791 schedule_hrtimeout_range(&exp, jiffies_to_nsecs(HZ / 2), HRTIMER_MODE_REL_HARD);
797 rtst = READ_ONCE(rcu_task_stall_timeout);
798 needreport = rtst > 0 && time_after(jiffies, lastreport + rtst);
800 lastreport = jiffies;
804 WARN_ON(signal_pending(current));
805 set_tasks_gp_state(rtp, RTGS_SCAN_HOLDOUTS);
806 rtp->holdouts_func(&holdouts, needreport, &firstreport);
808 // Print pre-stall informational messages if needed.
810 if (rtsi > 0 && !reported && time_after(j, lastinfo + rtsi)) {
812 rtsi = rtsi * rcu_task_stall_info_mult;
813 pr_info("%s: %s grace period number %lu (since boot) is %lu jiffies old.\n",
814 __func__, rtp->kname, rtp->tasks_gp_seq, j - rtp->gp_start);
818 set_tasks_gp_state(rtp, RTGS_POST_GP);
819 rtp->postgp_func(rtp);
822 #endif /* #if defined(CONFIG_TASKS_RCU) || defined(CONFIG_TASKS_TRACE_RCU) */
824 #ifdef CONFIG_TASKS_RCU
826 ////////////////////////////////////////////////////////////////////////
828 // Simple variant of RCU whose quiescent states are voluntary context
829 // switch, cond_resched_tasks_rcu_qs(), user-space execution, and idle.
830 // As such, grace periods can take one good long time. There are no
831 // read-side primitives similar to rcu_read_lock() and rcu_read_unlock()
832 // because this implementation is intended to get the system into a safe
833 // state for some of the manipulations involved in tracing and the like.
834 // Finally, this implementation does not support high call_rcu_tasks()
835 // rates from multiple CPUs. If this is required, per-CPU callback lists
838 // The implementation uses rcu_tasks_wait_gp(), which relies on function
839 // pointers in the rcu_tasks structure. The rcu_spawn_tasks_kthread()
840 // function sets these function pointers up so that rcu_tasks_wait_gp()
841 // invokes these functions in this order:
843 // rcu_tasks_pregp_step():
844 // Invokes synchronize_rcu() in order to wait for all in-flight
845 // t->on_rq and t->nvcsw transitions to complete. This works because
846 // all such transitions are carried out with interrupts disabled.
847 // rcu_tasks_pertask(), invoked on every non-idle task:
848 // For every runnable non-idle task other than the current one, use
849 // get_task_struct() to pin down that task, snapshot that task's
850 // number of voluntary context switches, and add that task to the
852 // rcu_tasks_postscan():
853 // Gather per-CPU lists of tasks in do_exit() to ensure that all
854 // tasks that were in the process of exiting (and which thus might
855 // not know to synchronize with this RCU Tasks grace period) have
856 // completed exiting. The synchronize_rcu() in rcu_tasks_postgp()
857 // will take care of any tasks stuck in the non-preemptible region
858 // of do_exit() following its call to exit_tasks_rcu_stop().
859 // check_all_holdout_tasks(), repeatedly until holdout list is empty:
860 // Scans the holdout list, attempting to identify a quiescent state
861 // for each task on the list. If there is a quiescent state, the
862 // corresponding task is removed from the holdout list.
863 // rcu_tasks_postgp():
864 // Invokes synchronize_rcu() in order to ensure that all prior
865 // t->on_rq and t->nvcsw transitions are seen by all CPUs and tasks
866 // to have happened before the end of this RCU Tasks grace period.
867 // Again, this works because all such transitions are carried out
868 // with interrupts disabled.
870 // For each exiting task, the exit_tasks_rcu_start() and
871 // exit_tasks_rcu_finish() functions add and remove, respectively, the
872 // current task to a per-CPU list of tasks that rcu_tasks_postscan() must
873 // wait on. This is necessary because rcu_tasks_postscan() must wait on
874 // tasks that have already been removed from the global list of tasks.
876 // Pre-grace-period update-side code is ordered before the grace
877 // via the raw_spin_lock.*rcu_node(). Pre-grace-period read-side code
878 // is ordered before the grace period via synchronize_rcu() call in
879 // rcu_tasks_pregp_step() and by the scheduler's locks and interrupt
882 /* Pre-grace-period preparation. */
883 static void rcu_tasks_pregp_step(struct list_head *hop)
886 * Wait for all pre-existing t->on_rq and t->nvcsw transitions
887 * to complete. Invoking synchronize_rcu() suffices because all
888 * these transitions occur with interrupts disabled. Without this
889 * synchronize_rcu(), a read-side critical section that started
890 * before the grace period might be incorrectly seen as having
891 * started after the grace period.
893 * This synchronize_rcu() also dispenses with the need for a
894 * memory barrier on the first store to t->rcu_tasks_holdout,
895 * as it forces the store to happen after the beginning of the
901 /* Check for quiescent states since the pregp's synchronize_rcu() */
902 static bool rcu_tasks_is_holdout(struct task_struct *t)
906 /* Has the task been seen voluntarily sleeping? */
907 if (!READ_ONCE(t->on_rq))
911 * Idle tasks (or idle injection) within the idle loop are RCU-tasks
912 * quiescent states. But CPU boot code performed by the idle task
913 * isn't a quiescent state.
920 /* Idle tasks on offline CPUs are RCU-tasks quiescent states. */
921 if (t == idle_task(cpu) && !rcu_cpu_online(cpu))
927 /* Per-task initial processing. */
928 static void rcu_tasks_pertask(struct task_struct *t, struct list_head *hop)
930 if (t != current && rcu_tasks_is_holdout(t)) {
932 t->rcu_tasks_nvcsw = READ_ONCE(t->nvcsw);
933 WRITE_ONCE(t->rcu_tasks_holdout, true);
934 list_add(&t->rcu_tasks_holdout_list, hop);
938 void call_rcu_tasks(struct rcu_head *rhp, rcu_callback_t func);
939 DEFINE_RCU_TASKS(rcu_tasks, rcu_tasks_wait_gp, call_rcu_tasks, "RCU Tasks");
941 /* Processing between scanning taskslist and draining the holdout list. */
942 static void rcu_tasks_postscan(struct list_head *hop)
945 int rtsi = READ_ONCE(rcu_task_stall_info);
947 if (!IS_ENABLED(CONFIG_TINY_RCU)) {
948 tasks_rcu_exit_srcu_stall_timer.expires = jiffies + rtsi;
949 add_timer(&tasks_rcu_exit_srcu_stall_timer);
953 * Exiting tasks may escape the tasklist scan. Those are vulnerable
954 * until their final schedule() with TASK_DEAD state. To cope with
955 * this, divide the fragile exit path part in two intersecting
956 * read side critical sections:
958 * 1) A task_struct list addition before calling exit_notify(),
959 * which may remove the task from the tasklist, with the
960 * removal after the final preempt_disable() call in do_exit().
962 * 2) An _RCU_ read side starting with the final preempt_disable()
963 * call in do_exit() and ending with the final call to schedule()
964 * with TASK_DEAD state.
966 * This handles the part 1). And postgp will handle part 2) with a
967 * call to synchronize_rcu().
970 for_each_possible_cpu(cpu) {
971 unsigned long j = jiffies + 1;
972 struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rcu_tasks.rtpcpu, cpu);
973 struct task_struct *t;
974 struct task_struct *t1;
975 struct list_head tmp;
977 raw_spin_lock_irq_rcu_node(rtpcp);
978 list_for_each_entry_safe(t, t1, &rtpcp->rtp_exit_list, rcu_tasks_exit_list) {
979 if (list_empty(&t->rcu_tasks_holdout_list))
980 rcu_tasks_pertask(t, hop);
982 // RT kernels need frequent pauses, otherwise
983 // pause at least once per pair of jiffies.
984 if (!IS_ENABLED(CONFIG_PREEMPT_RT) && time_before(jiffies, j))
987 // Keep our place in the list while pausing.
988 // Nothing else traverses this list, so adding a
989 // bare list_head is OK.
990 list_add(&tmp, &t->rcu_tasks_exit_list);
991 raw_spin_unlock_irq_rcu_node(rtpcp);
992 cond_resched(); // For CONFIG_PREEMPT=n kernels
993 raw_spin_lock_irq_rcu_node(rtpcp);
994 t1 = list_entry(tmp.next, struct task_struct, rcu_tasks_exit_list);
998 raw_spin_unlock_irq_rcu_node(rtpcp);
1001 if (!IS_ENABLED(CONFIG_TINY_RCU))
1002 del_timer_sync(&tasks_rcu_exit_srcu_stall_timer);
1005 /* See if tasks are still holding out, complain if so. */
1006 static void check_holdout_task(struct task_struct *t,
1007 bool needreport, bool *firstreport)
1011 if (!READ_ONCE(t->rcu_tasks_holdout) ||
1012 t->rcu_tasks_nvcsw != READ_ONCE(t->nvcsw) ||
1013 !rcu_tasks_is_holdout(t) ||
1014 (IS_ENABLED(CONFIG_NO_HZ_FULL) &&
1015 !is_idle_task(t) && READ_ONCE(t->rcu_tasks_idle_cpu) >= 0)) {
1016 WRITE_ONCE(t->rcu_tasks_holdout, false);
1017 list_del_init(&t->rcu_tasks_holdout_list);
1021 rcu_request_urgent_qs_task(t);
1025 pr_err("INFO: rcu_tasks detected stalls on tasks:\n");
1026 *firstreport = false;
1029 pr_alert("%p: %c%c nvcsw: %lu/%lu holdout: %d idle_cpu: %d/%d\n",
1030 t, ".I"[is_idle_task(t)],
1031 "N."[cpu < 0 || !tick_nohz_full_cpu(cpu)],
1032 t->rcu_tasks_nvcsw, t->nvcsw, t->rcu_tasks_holdout,
1033 data_race(t->rcu_tasks_idle_cpu), cpu);
1037 /* Scan the holdout lists for tasks no longer holding out. */
1038 static void check_all_holdout_tasks(struct list_head *hop,
1039 bool needreport, bool *firstreport)
1041 struct task_struct *t, *t1;
1043 list_for_each_entry_safe(t, t1, hop, rcu_tasks_holdout_list) {
1044 check_holdout_task(t, needreport, firstreport);
1049 /* Finish off the Tasks-RCU grace period. */
1050 static void rcu_tasks_postgp(struct rcu_tasks *rtp)
1053 * Because ->on_rq and ->nvcsw are not guaranteed to have a full
1054 * memory barriers prior to them in the schedule() path, memory
1055 * reordering on other CPUs could cause their RCU-tasks read-side
1056 * critical sections to extend past the end of the grace period.
1057 * However, because these ->nvcsw updates are carried out with
1058 * interrupts disabled, we can use synchronize_rcu() to force the
1059 * needed ordering on all such CPUs.
1061 * This synchronize_rcu() also confines all ->rcu_tasks_holdout
1062 * accesses to be within the grace period, avoiding the need for
1063 * memory barriers for ->rcu_tasks_holdout accesses.
1065 * In addition, this synchronize_rcu() waits for exiting tasks
1066 * to complete their final preempt_disable() region of execution,
1067 * enforcing the whole region before tasklist removal until
1068 * the final schedule() with TASK_DEAD state to be an RCU TASKS
1069 * read side critical section.
1074 static void tasks_rcu_exit_srcu_stall(struct timer_list *unused)
1076 #ifndef CONFIG_TINY_RCU
1079 rtsi = READ_ONCE(rcu_task_stall_info);
1080 pr_info("%s: %s grace period number %lu (since boot) gp_state: %s is %lu jiffies old.\n",
1081 __func__, rcu_tasks.kname, rcu_tasks.tasks_gp_seq,
1082 tasks_gp_state_getname(&rcu_tasks), jiffies - rcu_tasks.gp_jiffies);
1083 pr_info("Please check any exiting tasks stuck between calls to exit_tasks_rcu_start() and exit_tasks_rcu_finish()\n");
1084 tasks_rcu_exit_srcu_stall_timer.expires = jiffies + rtsi;
1085 add_timer(&tasks_rcu_exit_srcu_stall_timer);
1086 #endif // #ifndef CONFIG_TINY_RCU
1090 * call_rcu_tasks() - Queue an RCU for invocation task-based grace period
1091 * @rhp: structure to be used for queueing the RCU updates.
1092 * @func: actual callback function to be invoked after the grace period
1094 * The callback function will be invoked some time after a full grace
1095 * period elapses, in other words after all currently executing RCU
1096 * read-side critical sections have completed. call_rcu_tasks() assumes
1097 * that the read-side critical sections end at a voluntary context
1098 * switch (not a preemption!), cond_resched_tasks_rcu_qs(), entry into idle,
1099 * or transition to usermode execution. As such, there are no read-side
1100 * primitives analogous to rcu_read_lock() and rcu_read_unlock() because
1101 * this primitive is intended to determine that all tasks have passed
1102 * through a safe state, not so much for data-structure synchronization.
1104 * See the description of call_rcu() for more detailed information on
1105 * memory ordering guarantees.
1107 void call_rcu_tasks(struct rcu_head *rhp, rcu_callback_t func)
1109 call_rcu_tasks_generic(rhp, func, &rcu_tasks);
1111 EXPORT_SYMBOL_GPL(call_rcu_tasks);
1114 * synchronize_rcu_tasks - wait until an rcu-tasks grace period has elapsed.
1116 * Control will return to the caller some time after a full rcu-tasks
1117 * grace period has elapsed, in other words after all currently
1118 * executing rcu-tasks read-side critical sections have elapsed. These
1119 * read-side critical sections are delimited by calls to schedule(),
1120 * cond_resched_tasks_rcu_qs(), idle execution, userspace execution, calls
1121 * to synchronize_rcu_tasks(), and (in theory, anyway) cond_resched().
1123 * This is a very specialized primitive, intended only for a few uses in
1124 * tracing and other situations requiring manipulation of function
1125 * preambles and profiling hooks. The synchronize_rcu_tasks() function
1126 * is not (yet) intended for heavy use from multiple CPUs.
1128 * See the description of synchronize_rcu() for more detailed information
1129 * on memory ordering guarantees.
1131 void synchronize_rcu_tasks(void)
1133 synchronize_rcu_tasks_generic(&rcu_tasks);
1135 EXPORT_SYMBOL_GPL(synchronize_rcu_tasks);
1138 * rcu_barrier_tasks - Wait for in-flight call_rcu_tasks() callbacks.
1140 * Although the current implementation is guaranteed to wait, it is not
1141 * obligated to, for example, if there are no pending callbacks.
1143 void rcu_barrier_tasks(void)
1145 rcu_barrier_tasks_generic(&rcu_tasks);
1147 EXPORT_SYMBOL_GPL(rcu_barrier_tasks);
1149 static int rcu_tasks_lazy_ms = -1;
1150 module_param(rcu_tasks_lazy_ms, int, 0444);
1152 static int __init rcu_spawn_tasks_kthread(void)
1154 rcu_tasks.gp_sleep = HZ / 10;
1155 rcu_tasks.init_fract = HZ / 10;
1156 if (rcu_tasks_lazy_ms >= 0)
1157 rcu_tasks.lazy_jiffies = msecs_to_jiffies(rcu_tasks_lazy_ms);
1158 rcu_tasks.pregp_func = rcu_tasks_pregp_step;
1159 rcu_tasks.pertask_func = rcu_tasks_pertask;
1160 rcu_tasks.postscan_func = rcu_tasks_postscan;
1161 rcu_tasks.holdouts_func = check_all_holdout_tasks;
1162 rcu_tasks.postgp_func = rcu_tasks_postgp;
1163 rcu_spawn_tasks_kthread_generic(&rcu_tasks);
1167 #if !defined(CONFIG_TINY_RCU)
1168 void show_rcu_tasks_classic_gp_kthread(void)
1170 show_rcu_tasks_generic_gp_kthread(&rcu_tasks, "");
1172 EXPORT_SYMBOL_GPL(show_rcu_tasks_classic_gp_kthread);
1173 #endif // !defined(CONFIG_TINY_RCU)
1175 struct task_struct *get_rcu_tasks_gp_kthread(void)
1177 return rcu_tasks.kthread_ptr;
1179 EXPORT_SYMBOL_GPL(get_rcu_tasks_gp_kthread);
1182 * Protect against tasklist scan blind spot while the task is exiting and
1183 * may be removed from the tasklist. Do this by adding the task to yet
1186 * Note that the task will remove itself from this list, so there is no
1187 * need for get_task_struct(), except in the case where rcu_tasks_pertask()
1188 * adds it to the holdout list, in which case rcu_tasks_pertask() supplies
1189 * the needed get_task_struct().
1191 void exit_tasks_rcu_start(void)
1193 unsigned long flags;
1194 struct rcu_tasks_percpu *rtpcp;
1195 struct task_struct *t = current;
1197 WARN_ON_ONCE(!list_empty(&t->rcu_tasks_exit_list));
1199 rtpcp = this_cpu_ptr(rcu_tasks.rtpcpu);
1200 t->rcu_tasks_exit_cpu = smp_processor_id();
1201 raw_spin_lock_irqsave_rcu_node(rtpcp, flags);
1202 if (!rtpcp->rtp_exit_list.next)
1203 INIT_LIST_HEAD(&rtpcp->rtp_exit_list);
1204 list_add(&t->rcu_tasks_exit_list, &rtpcp->rtp_exit_list);
1205 raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags);
1210 * Remove the task from the "yet another list" because do_exit() is now
1211 * non-preemptible, allowing synchronize_rcu() to wait beyond this point.
1213 void exit_tasks_rcu_stop(void)
1215 unsigned long flags;
1216 struct rcu_tasks_percpu *rtpcp;
1217 struct task_struct *t = current;
1219 WARN_ON_ONCE(list_empty(&t->rcu_tasks_exit_list));
1220 rtpcp = per_cpu_ptr(rcu_tasks.rtpcpu, t->rcu_tasks_exit_cpu);
1221 raw_spin_lock_irqsave_rcu_node(rtpcp, flags);
1222 list_del_init(&t->rcu_tasks_exit_list);
1223 raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags);
1227 * Contribute to protect against tasklist scan blind spot while the
1228 * task is exiting and may be removed from the tasklist. See
1229 * corresponding synchronize_srcu() for further details.
1231 void exit_tasks_rcu_finish(void)
1233 exit_tasks_rcu_stop();
1234 exit_tasks_rcu_finish_trace(current);
1237 #else /* #ifdef CONFIG_TASKS_RCU */
1238 void exit_tasks_rcu_start(void) { }
1239 void exit_tasks_rcu_stop(void) { }
1240 void exit_tasks_rcu_finish(void) { exit_tasks_rcu_finish_trace(current); }
1241 #endif /* #else #ifdef CONFIG_TASKS_RCU */
1243 #ifdef CONFIG_TASKS_RUDE_RCU
1245 ////////////////////////////////////////////////////////////////////////
1247 // "Rude" variant of Tasks RCU, inspired by Steve Rostedt's trick of
1248 // passing an empty function to schedule_on_each_cpu(). This approach
1249 // provides an asynchronous call_rcu_tasks_rude() API and batching of
1250 // concurrent calls to the synchronous synchronize_rcu_tasks_rude() API.
1251 // This invokes schedule_on_each_cpu() in order to send IPIs far and wide
1252 // and induces otherwise unnecessary context switches on all online CPUs,
1253 // whether idle or not.
1255 // Callback handling is provided by the rcu_tasks_kthread() function.
1257 // Ordering is provided by the scheduler's context-switch code.
1259 // Empty function to allow workqueues to force a context switch.
1260 static void rcu_tasks_be_rude(struct work_struct *work)
1264 // Wait for one rude RCU-tasks grace period.
1265 static void rcu_tasks_rude_wait_gp(struct rcu_tasks *rtp)
1267 rtp->n_ipis += cpumask_weight(cpu_online_mask);
1268 schedule_on_each_cpu(rcu_tasks_be_rude);
1271 void call_rcu_tasks_rude(struct rcu_head *rhp, rcu_callback_t func);
1272 DEFINE_RCU_TASKS(rcu_tasks_rude, rcu_tasks_rude_wait_gp, call_rcu_tasks_rude,
1276 * call_rcu_tasks_rude() - Queue a callback rude task-based grace period
1277 * @rhp: structure to be used for queueing the RCU updates.
1278 * @func: actual callback function to be invoked after the grace period
1280 * The callback function will be invoked some time after a full grace
1281 * period elapses, in other words after all currently executing RCU
1282 * read-side critical sections have completed. call_rcu_tasks_rude()
1283 * assumes that the read-side critical sections end at context switch,
1284 * cond_resched_tasks_rcu_qs(), or transition to usermode execution (as
1285 * usermode execution is schedulable). As such, there are no read-side
1286 * primitives analogous to rcu_read_lock() and rcu_read_unlock() because
1287 * this primitive is intended to determine that all tasks have passed
1288 * through a safe state, not so much for data-structure synchronization.
1290 * See the description of call_rcu() for more detailed information on
1291 * memory ordering guarantees.
1293 void call_rcu_tasks_rude(struct rcu_head *rhp, rcu_callback_t func)
1295 call_rcu_tasks_generic(rhp, func, &rcu_tasks_rude);
1297 EXPORT_SYMBOL_GPL(call_rcu_tasks_rude);
1300 * synchronize_rcu_tasks_rude - wait for a rude rcu-tasks grace period
1302 * Control will return to the caller some time after a rude rcu-tasks
1303 * grace period has elapsed, in other words after all currently
1304 * executing rcu-tasks read-side critical sections have elapsed. These
1305 * read-side critical sections are delimited by calls to schedule(),
1306 * cond_resched_tasks_rcu_qs(), userspace execution (which is a schedulable
1307 * context), and (in theory, anyway) cond_resched().
1309 * This is a very specialized primitive, intended only for a few uses in
1310 * tracing and other situations requiring manipulation of function preambles
1311 * and profiling hooks. The synchronize_rcu_tasks_rude() function is not
1312 * (yet) intended for heavy use from multiple CPUs.
1314 * See the description of synchronize_rcu() for more detailed information
1315 * on memory ordering guarantees.
1317 void synchronize_rcu_tasks_rude(void)
1319 synchronize_rcu_tasks_generic(&rcu_tasks_rude);
1321 EXPORT_SYMBOL_GPL(synchronize_rcu_tasks_rude);
1324 * rcu_barrier_tasks_rude - Wait for in-flight call_rcu_tasks_rude() callbacks.
1326 * Although the current implementation is guaranteed to wait, it is not
1327 * obligated to, for example, if there are no pending callbacks.
1329 void rcu_barrier_tasks_rude(void)
1331 rcu_barrier_tasks_generic(&rcu_tasks_rude);
1333 EXPORT_SYMBOL_GPL(rcu_barrier_tasks_rude);
1335 int rcu_tasks_rude_lazy_ms = -1;
1336 module_param(rcu_tasks_rude_lazy_ms, int, 0444);
1338 static int __init rcu_spawn_tasks_rude_kthread(void)
1340 rcu_tasks_rude.gp_sleep = HZ / 10;
1341 if (rcu_tasks_rude_lazy_ms >= 0)
1342 rcu_tasks_rude.lazy_jiffies = msecs_to_jiffies(rcu_tasks_rude_lazy_ms);
1343 rcu_spawn_tasks_kthread_generic(&rcu_tasks_rude);
1347 #if !defined(CONFIG_TINY_RCU)
1348 void show_rcu_tasks_rude_gp_kthread(void)
1350 show_rcu_tasks_generic_gp_kthread(&rcu_tasks_rude, "");
1352 EXPORT_SYMBOL_GPL(show_rcu_tasks_rude_gp_kthread);
1353 #endif // !defined(CONFIG_TINY_RCU)
1355 struct task_struct *get_rcu_tasks_rude_gp_kthread(void)
1357 return rcu_tasks_rude.kthread_ptr;
1359 EXPORT_SYMBOL_GPL(get_rcu_tasks_rude_gp_kthread);
1361 #endif /* #ifdef CONFIG_TASKS_RUDE_RCU */
1363 ////////////////////////////////////////////////////////////////////////
1365 // Tracing variant of Tasks RCU. This variant is designed to be used
1366 // to protect tracing hooks, including those of BPF. This variant
1369 // 1. Has explicit read-side markers to allow finite grace periods
1370 // in the face of in-kernel loops for PREEMPT=n builds.
1372 // 2. Protects code in the idle loop, exception entry/exit, and
1373 // CPU-hotplug code paths, similar to the capabilities of SRCU.
1375 // 3. Avoids expensive read-side instructions, having overhead similar
1376 // to that of Preemptible RCU.
1378 // There are of course downsides. For example, the grace-period code
1379 // can send IPIs to CPUs, even when those CPUs are in the idle loop or
1380 // in nohz_full userspace. If needed, these downsides can be at least
1381 // partially remedied.
1383 // Perhaps most important, this variant of RCU does not affect the vanilla
1384 // flavors, rcu_preempt and rcu_sched. The fact that RCU Tasks Trace
1385 // readers can operate from idle, offline, and exception entry/exit in no
1386 // way allows rcu_preempt and rcu_sched readers to also do so.
1388 // The implementation uses rcu_tasks_wait_gp(), which relies on function
1389 // pointers in the rcu_tasks structure. The rcu_spawn_tasks_trace_kthread()
1390 // function sets these function pointers up so that rcu_tasks_wait_gp()
1391 // invokes these functions in this order:
1393 // rcu_tasks_trace_pregp_step():
1394 // Disables CPU hotplug, adds all currently executing tasks to the
1395 // holdout list, then checks the state of all tasks that blocked
1396 // or were preempted within their current RCU Tasks Trace read-side
1397 // critical section, adding them to the holdout list if appropriate.
1398 // Finally, this function re-enables CPU hotplug.
1399 // The ->pertask_func() pointer is NULL, so there is no per-task processing.
1400 // rcu_tasks_trace_postscan():
1401 // Invokes synchronize_rcu() to wait for late-stage exiting tasks
1402 // to finish exiting.
1403 // check_all_holdout_tasks_trace(), repeatedly until holdout list is empty:
1404 // Scans the holdout list, attempting to identify a quiescent state
1405 // for each task on the list. If there is a quiescent state, the
1406 // corresponding task is removed from the holdout list. Once this
1407 // list is empty, the grace period has completed.
1408 // rcu_tasks_trace_postgp():
1409 // Provides the needed full memory barrier and does debug checks.
1411 // The exit_tasks_rcu_finish_trace() synchronizes with exiting tasks.
1413 // Pre-grace-period update-side code is ordered before the grace period
1414 // via the ->cbs_lock and barriers in rcu_tasks_kthread(). Pre-grace-period
1415 // read-side code is ordered before the grace period by atomic operations
1416 // on .b.need_qs flag of each task involved in this process, or by scheduler
1417 // context-switch ordering (for locked-down non-running readers).
1419 // The lockdep state must be outside of #ifdef to be useful.
1420 #ifdef CONFIG_DEBUG_LOCK_ALLOC
1421 static struct lock_class_key rcu_lock_trace_key;
1422 struct lockdep_map rcu_trace_lock_map =
1423 STATIC_LOCKDEP_MAP_INIT("rcu_read_lock_trace", &rcu_lock_trace_key);
1424 EXPORT_SYMBOL_GPL(rcu_trace_lock_map);
1425 #endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
1427 #ifdef CONFIG_TASKS_TRACE_RCU
1429 // Record outstanding IPIs to each CPU. No point in sending two...
1430 static DEFINE_PER_CPU(bool, trc_ipi_to_cpu);
1432 // The number of detections of task quiescent state relying on
1433 // heavyweight readers executing explicit memory barriers.
1434 static unsigned long n_heavy_reader_attempts;
1435 static unsigned long n_heavy_reader_updates;
1436 static unsigned long n_heavy_reader_ofl_updates;
1437 static unsigned long n_trc_holdouts;
1439 void call_rcu_tasks_trace(struct rcu_head *rhp, rcu_callback_t func);
1440 DEFINE_RCU_TASKS(rcu_tasks_trace, rcu_tasks_wait_gp, call_rcu_tasks_trace,
1443 /* Load from ->trc_reader_special.b.need_qs with proper ordering. */
1444 static u8 rcu_ld_need_qs(struct task_struct *t)
1446 smp_mb(); // Enforce full grace-period ordering.
1447 return smp_load_acquire(&t->trc_reader_special.b.need_qs);
1450 /* Store to ->trc_reader_special.b.need_qs with proper ordering. */
1451 static void rcu_st_need_qs(struct task_struct *t, u8 v)
1453 smp_store_release(&t->trc_reader_special.b.need_qs, v);
1454 smp_mb(); // Enforce full grace-period ordering.
1458 * Do a cmpxchg() on ->trc_reader_special.b.need_qs, allowing for
1459 * the four-byte operand-size restriction of some platforms.
1460 * Returns the old value, which is often ignored.
1462 u8 rcu_trc_cmpxchg_need_qs(struct task_struct *t, u8 old, u8 new)
1464 union rcu_special ret;
1465 union rcu_special trs_old = READ_ONCE(t->trc_reader_special);
1466 union rcu_special trs_new = trs_old;
1468 if (trs_old.b.need_qs != old)
1469 return trs_old.b.need_qs;
1470 trs_new.b.need_qs = new;
1471 ret.s = cmpxchg(&t->trc_reader_special.s, trs_old.s, trs_new.s);
1472 return ret.b.need_qs;
1474 EXPORT_SYMBOL_GPL(rcu_trc_cmpxchg_need_qs);
1477 * If we are the last reader, signal the grace-period kthread.
1478 * Also remove from the per-CPU list of blocked tasks.
1480 void rcu_read_unlock_trace_special(struct task_struct *t)
1482 unsigned long flags;
1483 struct rcu_tasks_percpu *rtpcp;
1484 union rcu_special trs;
1486 // Open-coded full-word version of rcu_ld_need_qs().
1487 smp_mb(); // Enforce full grace-period ordering.
1488 trs = smp_load_acquire(&t->trc_reader_special);
1490 if (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB) && t->trc_reader_special.b.need_mb)
1491 smp_mb(); // Pairs with update-side barriers.
1492 // Update .need_qs before ->trc_reader_nesting for irq/NMI handlers.
1493 if (trs.b.need_qs == (TRC_NEED_QS_CHECKED | TRC_NEED_QS)) {
1494 u8 result = rcu_trc_cmpxchg_need_qs(t, TRC_NEED_QS_CHECKED | TRC_NEED_QS,
1495 TRC_NEED_QS_CHECKED);
1497 WARN_ONCE(result != trs.b.need_qs, "%s: result = %d", __func__, result);
1499 if (trs.b.blocked) {
1500 rtpcp = per_cpu_ptr(rcu_tasks_trace.rtpcpu, t->trc_blkd_cpu);
1501 raw_spin_lock_irqsave_rcu_node(rtpcp, flags);
1502 list_del_init(&t->trc_blkd_node);
1503 WRITE_ONCE(t->trc_reader_special.b.blocked, false);
1504 raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags);
1506 WRITE_ONCE(t->trc_reader_nesting, 0);
1508 EXPORT_SYMBOL_GPL(rcu_read_unlock_trace_special);
1510 /* Add a newly blocked reader task to its CPU's list. */
1511 void rcu_tasks_trace_qs_blkd(struct task_struct *t)
1513 unsigned long flags;
1514 struct rcu_tasks_percpu *rtpcp;
1516 local_irq_save(flags);
1517 rtpcp = this_cpu_ptr(rcu_tasks_trace.rtpcpu);
1518 raw_spin_lock_rcu_node(rtpcp); // irqs already disabled
1519 t->trc_blkd_cpu = smp_processor_id();
1520 if (!rtpcp->rtp_blkd_tasks.next)
1521 INIT_LIST_HEAD(&rtpcp->rtp_blkd_tasks);
1522 list_add(&t->trc_blkd_node, &rtpcp->rtp_blkd_tasks);
1523 WRITE_ONCE(t->trc_reader_special.b.blocked, true);
1524 raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags);
1526 EXPORT_SYMBOL_GPL(rcu_tasks_trace_qs_blkd);
1528 /* Add a task to the holdout list, if it is not already on the list. */
1529 static void trc_add_holdout(struct task_struct *t, struct list_head *bhp)
1531 if (list_empty(&t->trc_holdout_list)) {
1533 list_add(&t->trc_holdout_list, bhp);
1538 /* Remove a task from the holdout list, if it is in fact present. */
1539 static void trc_del_holdout(struct task_struct *t)
1541 if (!list_empty(&t->trc_holdout_list)) {
1542 list_del_init(&t->trc_holdout_list);
1548 /* IPI handler to check task state. */
1549 static void trc_read_check_handler(void *t_in)
1552 struct task_struct *t = current;
1553 struct task_struct *texp = t_in;
1555 // If the task is no longer running on this CPU, leave.
1556 if (unlikely(texp != t))
1557 goto reset_ipi; // Already on holdout list, so will check later.
1559 // If the task is not in a read-side critical section, and
1560 // if this is the last reader, awaken the grace-period kthread.
1561 nesting = READ_ONCE(t->trc_reader_nesting);
1562 if (likely(!nesting)) {
1563 rcu_trc_cmpxchg_need_qs(t, 0, TRC_NEED_QS_CHECKED);
1566 // If we are racing with an rcu_read_unlock_trace(), try again later.
1567 if (unlikely(nesting < 0))
1570 // Get here if the task is in a read-side critical section.
1571 // Set its state so that it will update state for the grace-period
1572 // kthread upon exit from that critical section.
1573 rcu_trc_cmpxchg_need_qs(t, 0, TRC_NEED_QS | TRC_NEED_QS_CHECKED);
1576 // Allow future IPIs to be sent on CPU and for task.
1577 // Also order this IPI handler against any later manipulations of
1578 // the intended task.
1579 smp_store_release(per_cpu_ptr(&trc_ipi_to_cpu, smp_processor_id()), false); // ^^^
1580 smp_store_release(&texp->trc_ipi_to_cpu, -1); // ^^^
1583 /* Callback function for scheduler to check locked-down task. */
1584 static int trc_inspect_reader(struct task_struct *t, void *bhp_in)
1586 struct list_head *bhp = bhp_in;
1587 int cpu = task_cpu(t);
1589 bool ofl = cpu_is_offline(cpu);
1591 if (task_curr(t) && !ofl) {
1592 // If no chance of heavyweight readers, do it the hard way.
1593 if (!IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB))
1596 // If heavyweight readers are enabled on the remote task,
1597 // we can inspect its state despite its currently running.
1598 // However, we cannot safely change its state.
1599 n_heavy_reader_attempts++;
1600 // Check for "running" idle tasks on offline CPUs.
1601 if (!rcu_dynticks_zero_in_eqs(cpu, &t->trc_reader_nesting))
1602 return -EINVAL; // No quiescent state, do it the hard way.
1603 n_heavy_reader_updates++;
1606 // The task is not running, so C-language access is safe.
1607 nesting = t->trc_reader_nesting;
1608 WARN_ON_ONCE(ofl && task_curr(t) && (t != idle_task(task_cpu(t))));
1609 if (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB) && ofl)
1610 n_heavy_reader_ofl_updates++;
1613 // If not exiting a read-side critical section, mark as checked
1614 // so that the grace-period kthread will remove it from the
1617 rcu_trc_cmpxchg_need_qs(t, 0, TRC_NEED_QS_CHECKED);
1618 return 0; // In QS, so done.
1621 return -EINVAL; // Reader transitioning, try again later.
1623 // The task is in a read-side critical section, so set up its
1624 // state so that it will update state upon exit from that critical
1626 if (!rcu_trc_cmpxchg_need_qs(t, 0, TRC_NEED_QS | TRC_NEED_QS_CHECKED))
1627 trc_add_holdout(t, bhp);
1631 /* Attempt to extract the state for the specified task. */
1632 static void trc_wait_for_one_reader(struct task_struct *t,
1633 struct list_head *bhp)
1637 // If a previous IPI is still in flight, let it complete.
1638 if (smp_load_acquire(&t->trc_ipi_to_cpu) != -1) // Order IPI
1641 // The current task had better be in a quiescent state.
1643 rcu_trc_cmpxchg_need_qs(t, 0, TRC_NEED_QS_CHECKED);
1644 WARN_ON_ONCE(READ_ONCE(t->trc_reader_nesting));
1648 // Attempt to nail down the task for inspection.
1650 if (!task_call_func(t, trc_inspect_reader, bhp)) {
1656 // If this task is not yet on the holdout list, then we are in
1657 // an RCU read-side critical section. Otherwise, the invocation of
1658 // trc_add_holdout() that added it to the list did the necessary
1659 // get_task_struct(). Either way, the task cannot be freed out
1660 // from under this code.
1662 // If currently running, send an IPI, either way, add to list.
1663 trc_add_holdout(t, bhp);
1665 time_after(jiffies + 1, rcu_tasks_trace.gp_start + rcu_task_ipi_delay)) {
1666 // The task is currently running, so try IPIing it.
1669 // If there is already an IPI outstanding, let it happen.
1670 if (per_cpu(trc_ipi_to_cpu, cpu) || t->trc_ipi_to_cpu >= 0)
1673 per_cpu(trc_ipi_to_cpu, cpu) = true;
1674 t->trc_ipi_to_cpu = cpu;
1675 rcu_tasks_trace.n_ipis++;
1676 if (smp_call_function_single(cpu, trc_read_check_handler, t, 0)) {
1677 // Just in case there is some other reason for
1678 // failure than the target CPU being offline.
1679 WARN_ONCE(1, "%s(): smp_call_function_single() failed for CPU: %d\n",
1681 rcu_tasks_trace.n_ipis_fails++;
1682 per_cpu(trc_ipi_to_cpu, cpu) = false;
1683 t->trc_ipi_to_cpu = -1;
1689 * Initialize for first-round processing for the specified task.
1690 * Return false if task is NULL or already taken care of, true otherwise.
1692 static bool rcu_tasks_trace_pertask_prep(struct task_struct *t, bool notself)
1694 // During early boot when there is only the one boot CPU, there
1695 // is no idle task for the other CPUs. Also, the grace-period
1696 // kthread is always in a quiescent state. In addition, just return
1697 // if this task is already on the list.
1698 if (unlikely(t == NULL) || (t == current && notself) || !list_empty(&t->trc_holdout_list))
1701 rcu_st_need_qs(t, 0);
1702 t->trc_ipi_to_cpu = -1;
1706 /* Do first-round processing for the specified task. */
1707 static void rcu_tasks_trace_pertask(struct task_struct *t, struct list_head *hop)
1709 if (rcu_tasks_trace_pertask_prep(t, true))
1710 trc_wait_for_one_reader(t, hop);
1713 /* Initialize for a new RCU-tasks-trace grace period. */
1714 static void rcu_tasks_trace_pregp_step(struct list_head *hop)
1716 LIST_HEAD(blkd_tasks);
1718 unsigned long flags;
1719 struct rcu_tasks_percpu *rtpcp;
1720 struct task_struct *t;
1722 // There shouldn't be any old IPIs, but...
1723 for_each_possible_cpu(cpu)
1724 WARN_ON_ONCE(per_cpu(trc_ipi_to_cpu, cpu));
1726 // Disable CPU hotplug across the CPU scan for the benefit of
1727 // any IPIs that might be needed. This also waits for all readers
1728 // in CPU-hotplug code paths.
1731 // These rcu_tasks_trace_pertask_prep() calls are serialized to
1732 // allow safe access to the hop list.
1733 for_each_online_cpu(cpu) {
1735 t = cpu_curr_snapshot(cpu);
1736 if (rcu_tasks_trace_pertask_prep(t, true))
1737 trc_add_holdout(t, hop);
1739 cond_resched_tasks_rcu_qs();
1742 // Only after all running tasks have been accounted for is it
1743 // safe to take care of the tasks that have blocked within their
1744 // current RCU tasks trace read-side critical section.
1745 for_each_possible_cpu(cpu) {
1746 rtpcp = per_cpu_ptr(rcu_tasks_trace.rtpcpu, cpu);
1747 raw_spin_lock_irqsave_rcu_node(rtpcp, flags);
1748 list_splice_init(&rtpcp->rtp_blkd_tasks, &blkd_tasks);
1749 while (!list_empty(&blkd_tasks)) {
1751 t = list_first_entry(&blkd_tasks, struct task_struct, trc_blkd_node);
1752 list_del_init(&t->trc_blkd_node);
1753 list_add(&t->trc_blkd_node, &rtpcp->rtp_blkd_tasks);
1754 raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags);
1755 rcu_tasks_trace_pertask(t, hop);
1757 raw_spin_lock_irqsave_rcu_node(rtpcp, flags);
1759 raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags);
1760 cond_resched_tasks_rcu_qs();
1763 // Re-enable CPU hotplug now that the holdout list is populated.
1768 * Do intermediate processing between task and holdout scans.
1770 static void rcu_tasks_trace_postscan(struct list_head *hop)
1772 // Wait for late-stage exiting tasks to finish exiting.
1773 // These might have passed the call to exit_tasks_rcu_finish().
1775 // If you remove the following line, update rcu_trace_implies_rcu_gp()!!!
1777 // Any tasks that exit after this point will set
1778 // TRC_NEED_QS_CHECKED in ->trc_reader_special.b.need_qs.
1781 /* Communicate task state back to the RCU tasks trace stall warning request. */
1782 struct trc_stall_chk_rdr {
1788 static int trc_check_slow_task(struct task_struct *t, void *arg)
1790 struct trc_stall_chk_rdr *trc_rdrp = arg;
1792 if (task_curr(t) && cpu_online(task_cpu(t)))
1793 return false; // It is running, so decline to inspect it.
1794 trc_rdrp->nesting = READ_ONCE(t->trc_reader_nesting);
1795 trc_rdrp->ipi_to_cpu = READ_ONCE(t->trc_ipi_to_cpu);
1796 trc_rdrp->needqs = rcu_ld_need_qs(t);
1800 /* Show the state of a task stalling the current RCU tasks trace GP. */
1801 static void show_stalled_task_trace(struct task_struct *t, bool *firstreport)
1804 struct trc_stall_chk_rdr trc_rdr;
1805 bool is_idle_tsk = is_idle_task(t);
1808 pr_err("INFO: rcu_tasks_trace detected stalls on tasks:\n");
1809 *firstreport = false;
1812 if (!task_call_func(t, trc_check_slow_task, &trc_rdr))
1813 pr_alert("P%d: %c%c\n",
1815 ".I"[t->trc_ipi_to_cpu >= 0],
1818 pr_alert("P%d: %c%c%c%c nesting: %d%c%c cpu: %d%s\n",
1820 ".I"[trc_rdr.ipi_to_cpu >= 0],
1822 ".N"[cpu >= 0 && tick_nohz_full_cpu(cpu)],
1823 ".B"[!!data_race(t->trc_reader_special.b.blocked)],
1825 " !CN"[trc_rdr.needqs & 0x3],
1826 " ?"[trc_rdr.needqs > 0x3],
1827 cpu, cpu_online(cpu) ? "" : "(offline)");
1831 /* List stalled IPIs for RCU tasks trace. */
1832 static void show_stalled_ipi_trace(void)
1836 for_each_possible_cpu(cpu)
1837 if (per_cpu(trc_ipi_to_cpu, cpu))
1838 pr_alert("\tIPI outstanding to CPU %d\n", cpu);
1841 /* Do one scan of the holdout list. */
1842 static void check_all_holdout_tasks_trace(struct list_head *hop,
1843 bool needreport, bool *firstreport)
1845 struct task_struct *g, *t;
1847 // Disable CPU hotplug across the holdout list scan for IPIs.
1850 list_for_each_entry_safe(t, g, hop, trc_holdout_list) {
1851 // If safe and needed, try to check the current task.
1852 if (READ_ONCE(t->trc_ipi_to_cpu) == -1 &&
1853 !(rcu_ld_need_qs(t) & TRC_NEED_QS_CHECKED))
1854 trc_wait_for_one_reader(t, hop);
1856 // If check succeeded, remove this task from the list.
1857 if (smp_load_acquire(&t->trc_ipi_to_cpu) == -1 &&
1858 rcu_ld_need_qs(t) == TRC_NEED_QS_CHECKED)
1860 else if (needreport)
1861 show_stalled_task_trace(t, firstreport);
1862 cond_resched_tasks_rcu_qs();
1865 // Re-enable CPU hotplug now that the holdout list scan has completed.
1870 pr_err("INFO: rcu_tasks_trace detected stalls? (Late IPI?)\n");
1871 show_stalled_ipi_trace();
1875 static void rcu_tasks_trace_empty_fn(void *unused)
1879 /* Wait for grace period to complete and provide ordering. */
1880 static void rcu_tasks_trace_postgp(struct rcu_tasks *rtp)
1884 // Wait for any lingering IPI handlers to complete. Note that
1885 // if a CPU has gone offline or transitioned to userspace in the
1886 // meantime, all IPI handlers should have been drained beforehand.
1887 // Yes, this assumes that CPUs process IPIs in order. If that ever
1888 // changes, there will need to be a recheck and/or timed wait.
1889 for_each_online_cpu(cpu)
1890 if (WARN_ON_ONCE(smp_load_acquire(per_cpu_ptr(&trc_ipi_to_cpu, cpu))))
1891 smp_call_function_single(cpu, rcu_tasks_trace_empty_fn, NULL, 1);
1893 smp_mb(); // Caller's code must be ordered after wakeup.
1894 // Pairs with pretty much every ordering primitive.
1897 /* Report any needed quiescent state for this exiting task. */
1898 static void exit_tasks_rcu_finish_trace(struct task_struct *t)
1900 union rcu_special trs = READ_ONCE(t->trc_reader_special);
1902 rcu_trc_cmpxchg_need_qs(t, 0, TRC_NEED_QS_CHECKED);
1903 WARN_ON_ONCE(READ_ONCE(t->trc_reader_nesting));
1904 if (WARN_ON_ONCE(rcu_ld_need_qs(t) & TRC_NEED_QS || trs.b.blocked))
1905 rcu_read_unlock_trace_special(t);
1907 WRITE_ONCE(t->trc_reader_nesting, 0);
1911 * call_rcu_tasks_trace() - Queue a callback trace task-based grace period
1912 * @rhp: structure to be used for queueing the RCU updates.
1913 * @func: actual callback function to be invoked after the grace period
1915 * The callback function will be invoked some time after a trace rcu-tasks
1916 * grace period elapses, in other words after all currently executing
1917 * trace rcu-tasks read-side critical sections have completed. These
1918 * read-side critical sections are delimited by calls to rcu_read_lock_trace()
1919 * and rcu_read_unlock_trace().
1921 * See the description of call_rcu() for more detailed information on
1922 * memory ordering guarantees.
1924 void call_rcu_tasks_trace(struct rcu_head *rhp, rcu_callback_t func)
1926 call_rcu_tasks_generic(rhp, func, &rcu_tasks_trace);
1928 EXPORT_SYMBOL_GPL(call_rcu_tasks_trace);
1931 * synchronize_rcu_tasks_trace - wait for a trace rcu-tasks grace period
1933 * Control will return to the caller some time after a trace rcu-tasks
1934 * grace period has elapsed, in other words after all currently executing
1935 * trace rcu-tasks read-side critical sections have elapsed. These read-side
1936 * critical sections are delimited by calls to rcu_read_lock_trace()
1937 * and rcu_read_unlock_trace().
1939 * This is a very specialized primitive, intended only for a few uses in
1940 * tracing and other situations requiring manipulation of function preambles
1941 * and profiling hooks. The synchronize_rcu_tasks_trace() function is not
1942 * (yet) intended for heavy use from multiple CPUs.
1944 * See the description of synchronize_rcu() for more detailed information
1945 * on memory ordering guarantees.
1947 void synchronize_rcu_tasks_trace(void)
1949 RCU_LOCKDEP_WARN(lock_is_held(&rcu_trace_lock_map), "Illegal synchronize_rcu_tasks_trace() in RCU Tasks Trace read-side critical section");
1950 synchronize_rcu_tasks_generic(&rcu_tasks_trace);
1952 EXPORT_SYMBOL_GPL(synchronize_rcu_tasks_trace);
1955 * rcu_barrier_tasks_trace - Wait for in-flight call_rcu_tasks_trace() callbacks.
1957 * Although the current implementation is guaranteed to wait, it is not
1958 * obligated to, for example, if there are no pending callbacks.
1960 void rcu_barrier_tasks_trace(void)
1962 rcu_barrier_tasks_generic(&rcu_tasks_trace);
1964 EXPORT_SYMBOL_GPL(rcu_barrier_tasks_trace);
1966 int rcu_tasks_trace_lazy_ms = -1;
1967 module_param(rcu_tasks_trace_lazy_ms, int, 0444);
1969 static int __init rcu_spawn_tasks_trace_kthread(void)
1971 if (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB)) {
1972 rcu_tasks_trace.gp_sleep = HZ / 10;
1973 rcu_tasks_trace.init_fract = HZ / 10;
1975 rcu_tasks_trace.gp_sleep = HZ / 200;
1976 if (rcu_tasks_trace.gp_sleep <= 0)
1977 rcu_tasks_trace.gp_sleep = 1;
1978 rcu_tasks_trace.init_fract = HZ / 200;
1979 if (rcu_tasks_trace.init_fract <= 0)
1980 rcu_tasks_trace.init_fract = 1;
1982 if (rcu_tasks_trace_lazy_ms >= 0)
1983 rcu_tasks_trace.lazy_jiffies = msecs_to_jiffies(rcu_tasks_trace_lazy_ms);
1984 rcu_tasks_trace.pregp_func = rcu_tasks_trace_pregp_step;
1985 rcu_tasks_trace.postscan_func = rcu_tasks_trace_postscan;
1986 rcu_tasks_trace.holdouts_func = check_all_holdout_tasks_trace;
1987 rcu_tasks_trace.postgp_func = rcu_tasks_trace_postgp;
1988 rcu_spawn_tasks_kthread_generic(&rcu_tasks_trace);
1992 #if !defined(CONFIG_TINY_RCU)
1993 void show_rcu_tasks_trace_gp_kthread(void)
1997 sprintf(buf, "N%lu h:%lu/%lu/%lu",
1998 data_race(n_trc_holdouts),
1999 data_race(n_heavy_reader_ofl_updates),
2000 data_race(n_heavy_reader_updates),
2001 data_race(n_heavy_reader_attempts));
2002 show_rcu_tasks_generic_gp_kthread(&rcu_tasks_trace, buf);
2004 EXPORT_SYMBOL_GPL(show_rcu_tasks_trace_gp_kthread);
2005 #endif // !defined(CONFIG_TINY_RCU)
2007 struct task_struct *get_rcu_tasks_trace_gp_kthread(void)
2009 return rcu_tasks_trace.kthread_ptr;
2011 EXPORT_SYMBOL_GPL(get_rcu_tasks_trace_gp_kthread);
2013 #else /* #ifdef CONFIG_TASKS_TRACE_RCU */
2014 static void exit_tasks_rcu_finish_trace(struct task_struct *t) { }
2015 #endif /* #else #ifdef CONFIG_TASKS_TRACE_RCU */
2017 #ifndef CONFIG_TINY_RCU
2018 void show_rcu_tasks_gp_kthreads(void)
2020 show_rcu_tasks_classic_gp_kthread();
2021 show_rcu_tasks_rude_gp_kthread();
2022 show_rcu_tasks_trace_gp_kthread();
2024 #endif /* #ifndef CONFIG_TINY_RCU */
2026 #ifdef CONFIG_PROVE_RCU
2027 struct rcu_tasks_test_desc {
2031 unsigned long runstart;
2034 static struct rcu_tasks_test_desc tests[] = {
2036 .name = "call_rcu_tasks()",
2037 /* If not defined, the test is skipped. */
2038 .notrun = IS_ENABLED(CONFIG_TASKS_RCU),
2041 .name = "call_rcu_tasks_rude()",
2042 /* If not defined, the test is skipped. */
2043 .notrun = IS_ENABLED(CONFIG_TASKS_RUDE_RCU),
2046 .name = "call_rcu_tasks_trace()",
2047 /* If not defined, the test is skipped. */
2048 .notrun = IS_ENABLED(CONFIG_TASKS_TRACE_RCU)
2052 static void test_rcu_tasks_callback(struct rcu_head *rhp)
2054 struct rcu_tasks_test_desc *rttd =
2055 container_of(rhp, struct rcu_tasks_test_desc, rh);
2057 pr_info("Callback from %s invoked.\n", rttd->name);
2059 rttd->notrun = false;
2062 static void rcu_tasks_initiate_self_tests(void)
2064 #ifdef CONFIG_TASKS_RCU
2065 pr_info("Running RCU Tasks wait API self tests\n");
2066 tests[0].runstart = jiffies;
2067 synchronize_rcu_tasks();
2068 call_rcu_tasks(&tests[0].rh, test_rcu_tasks_callback);
2071 #ifdef CONFIG_TASKS_RUDE_RCU
2072 pr_info("Running RCU Tasks Rude wait API self tests\n");
2073 tests[1].runstart = jiffies;
2074 synchronize_rcu_tasks_rude();
2075 call_rcu_tasks_rude(&tests[1].rh, test_rcu_tasks_callback);
2078 #ifdef CONFIG_TASKS_TRACE_RCU
2079 pr_info("Running RCU Tasks Trace wait API self tests\n");
2080 tests[2].runstart = jiffies;
2081 synchronize_rcu_tasks_trace();
2082 call_rcu_tasks_trace(&tests[2].rh, test_rcu_tasks_callback);
2087 * Return: 0 - test passed
2088 * 1 - test failed, but have not timed out yet
2089 * -1 - test failed and timed out
2091 static int rcu_tasks_verify_self_tests(void)
2095 unsigned long bst = rcu_task_stall_timeout;
2097 if (bst <= 0 || bst > RCU_TASK_BOOT_STALL_TIMEOUT)
2098 bst = RCU_TASK_BOOT_STALL_TIMEOUT;
2099 for (i = 0; i < ARRAY_SIZE(tests); i++) {
2100 while (tests[i].notrun) { // still hanging.
2101 if (time_after(jiffies, tests[i].runstart + bst)) {
2102 pr_err("%s has failed boot-time tests.\n", tests[i].name);
2116 * Repeat the rcu_tasks_verify_self_tests() call once every second until the
2117 * test passes or has timed out.
2119 static struct delayed_work rcu_tasks_verify_work;
2120 static void rcu_tasks_verify_work_fn(struct work_struct *work __maybe_unused)
2122 int ret = rcu_tasks_verify_self_tests();
2127 /* Test fails but not timed out yet, reschedule another check */
2128 schedule_delayed_work(&rcu_tasks_verify_work, HZ);
2131 static int rcu_tasks_verify_schedule_work(void)
2133 INIT_DELAYED_WORK(&rcu_tasks_verify_work, rcu_tasks_verify_work_fn);
2134 rcu_tasks_verify_work_fn(NULL);
2137 late_initcall(rcu_tasks_verify_schedule_work);
2138 #else /* #ifdef CONFIG_PROVE_RCU */
2139 static void rcu_tasks_initiate_self_tests(void) { }
2140 #endif /* #else #ifdef CONFIG_PROVE_RCU */
2142 void __init tasks_cblist_init_generic(void)
2144 lockdep_assert_irqs_disabled();
2145 WARN_ON(num_online_cpus() > 1);
2147 #ifdef CONFIG_TASKS_RCU
2148 cblist_init_generic(&rcu_tasks);
2151 #ifdef CONFIG_TASKS_RUDE_RCU
2152 cblist_init_generic(&rcu_tasks_rude);
2155 #ifdef CONFIG_TASKS_TRACE_RCU
2156 cblist_init_generic(&rcu_tasks_trace);
2160 void __init rcu_init_tasks_generic(void)
2162 #ifdef CONFIG_TASKS_RCU
2163 rcu_spawn_tasks_kthread();
2166 #ifdef CONFIG_TASKS_RUDE_RCU
2167 rcu_spawn_tasks_rude_kthread();
2170 #ifdef CONFIG_TASKS_TRACE_RCU
2171 rcu_spawn_tasks_trace_kthread();
2174 // Run the self-tests.
2175 rcu_tasks_initiate_self_tests();
2178 #else /* #ifdef CONFIG_TASKS_RCU_GENERIC */
2179 static inline void rcu_tasks_bootup_oddness(void) {}
2180 #endif /* #else #ifdef CONFIG_TASKS_RCU_GENERIC */