Merge tag 'mm-hotfixes-stable-2025-07-11-16-16' of git://git.kernel.org/pub/scm/linux...
[linux-block.git] / kernel / rcu / tasks.h
CommitLineData
eacd6f04
PM
1/* SPDX-License-Identifier: GPL-2.0+ */
2/*
3 * Task-based RCU implementations.
4 *
5 * Copyright (C) 2020 Paul E. McKenney
6 */
7
8fd8ca38 8#ifdef CONFIG_TASKS_RCU_GENERIC
9b073de1 9#include "rcu_segcblist.h"
5873b8a9
PM
10
11////////////////////////////////////////////////////////////////////////
12//
13// Generic data structures.
14
15struct rcu_tasks;
16typedef void (*rcu_tasks_gp_func_t)(struct rcu_tasks *rtp);
7460ade1 17typedef void (*pregp_func_t)(struct list_head *hop);
e4fe5dd6 18typedef void (*pertask_func_t)(struct task_struct *t, struct list_head *hop);
9796e1ae 19typedef void (*postscan_func_t)(struct list_head *hop);
e4fe5dd6 20typedef void (*holdouts_func_t)(struct list_head *hop, bool ndrpt, bool *frptp);
af051ca4 21typedef void (*postgp_func_t)(struct rcu_tasks *rtp);
eacd6f04 22
07e10515 23/**
cafafd67 24 * struct rcu_tasks_percpu - Per-CPU component of definition for a Tasks-RCU-like mechanism.
9b073de1 25 * @cblist: Callback list.
381a4f3b 26 * @lock: Lock protecting per-CPU callback list.
7d13d30b 27 * @rtp_jiffies: Jiffies counter value for statistics.
d119357d
PM
28 * @lazy_timer: Timer to unlazify callbacks.
29 * @urgent_gp: Number of additional non-lazy grace periods.
7d13d30b 30 * @rtp_n_lock_retries: Rough lock-contention statistic.
d363f833 31 * @rtp_work: Work queue for invoking callbacks.
3063b33a 32 * @rtp_irq_work: IRQ work queue for deferred wakeups.
ce9b1c66 33 * @barrier_q_head: RCU callback for barrier operation.
434c9eef 34 * @rtp_blkd_tasks: List of tasks blocked as readers.
bfe93930 35 * @rtp_exit_list: List of tasks in the latter portion of do_exit().
ce9b1c66 36 * @cpu: CPU number corresponding to this entry.
fd70e9f1 37 * @index: Index of this CPU in rtpcp_array of the rcu_tasks structure.
ce9b1c66 38 * @rtpp: Pointer to the rcu_tasks structure.
cafafd67
PM
39 */
40struct rcu_tasks_percpu {
9b073de1 41 struct rcu_segcblist cblist;
381a4f3b 42 raw_spinlock_t __private lock;
7d13d30b
PM
43 unsigned long rtp_jiffies;
44 unsigned long rtp_n_lock_retries;
d119357d
PM
45 struct timer_list lazy_timer;
46 unsigned int urgent_gp;
d363f833 47 struct work_struct rtp_work;
3063b33a 48 struct irq_work rtp_irq_work;
ce9b1c66 49 struct rcu_head barrier_q_head;
434c9eef 50 struct list_head rtp_blkd_tasks;
bfe93930 51 struct list_head rtp_exit_list;
d363f833 52 int cpu;
fd70e9f1 53 int index;
d363f833 54 struct rcu_tasks *rtpp;
cafafd67
PM
55};
56
57/**
58 * struct rcu_tasks - Definition for a Tasks-RCU-like mechanism.
88db792b 59 * @cbs_wait: RCU wait allowing a new callback to get kthread's attention.
cafafd67 60 * @cbs_gbl_lock: Lock protecting callback list.
d96225fd 61 * @tasks_gp_mutex: Mutex protecting grace period, needed during mid-boot dead zone.
5873b8a9 62 * @gp_func: This flavor's grace-period-wait function.
af051ca4 63 * @gp_state: Grace period's most recent state transition (debugging).
4fe192df 64 * @gp_sleep: Per-grace-period sleep to prevent CPU-bound looping.
2393a613 65 * @init_fract: Initial backoff sleep interval.
af051ca4
PM
66 * @gp_jiffies: Time of last @gp_state transition.
67 * @gp_start: Most recent grace-period start in jiffies.
52229577 68 * @tasks_gp_seq: Number of grace periods completed since boot in upper bits.
238dbce3 69 * @n_ipis: Number of IPIs sent to encourage grace periods to end.
7e0669c3 70 * @n_ipis_fails: Number of IPI-send failures.
d119357d
PM
71 * @kthread_ptr: This flavor's grace-period/callback-invocation kthread.
72 * @lazy_jiffies: Number of jiffies to allow callbacks to be lazy.
e4fe5dd6
PM
73 * @pregp_func: This flavor's pre-grace-period function (optional).
74 * @pertask_func: This flavor's per-task scan function (optional).
75 * @postscan_func: This flavor's post-task scan function (optional).
85b86994 76 * @holdouts_func: This flavor's holdout-list scan function (optional).
e4fe5dd6 77 * @postgp_func: This flavor's post-grace-period function (optional).
5873b8a9 78 * @call_func: This flavor's call_rcu()-equivalent function.
c342b42f 79 * @wait_state: Task state for synchronous grace-period waits (default TASK_UNINTERRUPTIBLE).
cafafd67 80 * @rtpcpu: This flavor's rcu_tasks_percpu structure.
fd70e9f1 81 * @rtpcp_array: Array of pointers to rcu_tasks_percpu structure of CPUs in cpu_possible_mask.
7a30871b 82 * @percpu_enqueue_shift: Shift down CPU ID this much when enqueuing callbacks.
2cee0789
PM
83 * @percpu_enqueue_lim: Number of per-CPU callback queues in use for enqueuing.
84 * @percpu_dequeue_lim: Number of per-CPU callback queues in use for dequeuing.
fd796e41 85 * @percpu_dequeue_gpseq: RCU grace-period number to propagate enqueue limit to dequeuers.
ce9b1c66
PM
86 * @barrier_q_mutex: Serialize barrier operations.
87 * @barrier_q_count: Number of queues being waited on.
88 * @barrier_q_completion: Barrier wait/wakeup mechanism.
89 * @barrier_q_seq: Sequence number for barrier operations.
591ce640 90 * @barrier_q_start: Most recent barrier start in jiffies.
c97d12a6
PM
91 * @name: This flavor's textual name.
92 * @kname: This flavor's kthread name.
07e10515
PM
93 */
94struct rcu_tasks {
88db792b 95 struct rcuwait cbs_wait;
cafafd67 96 raw_spinlock_t cbs_gbl_lock;
d96225fd 97 struct mutex tasks_gp_mutex;
af051ca4 98 int gp_state;
4fe192df 99 int gp_sleep;
2393a613 100 int init_fract;
af051ca4 101 unsigned long gp_jiffies;
88092d0c 102 unsigned long gp_start;
b14fb4fb 103 unsigned long tasks_gp_seq;
238dbce3 104 unsigned long n_ipis;
7e0669c3 105 unsigned long n_ipis_fails;
07e10515 106 struct task_struct *kthread_ptr;
d119357d 107 unsigned long lazy_jiffies;
5873b8a9 108 rcu_tasks_gp_func_t gp_func;
e4fe5dd6
PM
109 pregp_func_t pregp_func;
110 pertask_func_t pertask_func;
111 postscan_func_t postscan_func;
112 holdouts_func_t holdouts_func;
113 postgp_func_t postgp_func;
5873b8a9 114 call_rcu_func_t call_func;
c342b42f 115 unsigned int wait_state;
cafafd67 116 struct rcu_tasks_percpu __percpu *rtpcpu;
fd70e9f1 117 struct rcu_tasks_percpu **rtpcp_array;
7a30871b 118 int percpu_enqueue_shift;
8dd593fd 119 int percpu_enqueue_lim;
2cee0789 120 int percpu_dequeue_lim;
fd796e41 121 unsigned long percpu_dequeue_gpseq;
ce9b1c66
PM
122 struct mutex barrier_q_mutex;
123 atomic_t barrier_q_count;
124 struct completion barrier_q_completion;
125 unsigned long barrier_q_seq;
591ce640 126 unsigned long barrier_q_start;
c97d12a6
PM
127 char *name;
128 char *kname;
07e10515
PM
129};
130
3063b33a
PM
131static void call_rcu_tasks_iw_wakeup(struct irq_work *iwp);
132
cafafd67
PM
133#define DEFINE_RCU_TASKS(rt_name, gp, call, n) \
134static DEFINE_PER_CPU(struct rcu_tasks_percpu, rt_name ## __percpu) = { \
381a4f3b 135 .lock = __RAW_SPIN_LOCK_UNLOCKED(rt_name ## __percpu.cbs_pcpu_lock), \
88db792b 136 .rtp_irq_work = IRQ_WORK_INIT_HARD(call_rcu_tasks_iw_wakeup), \
cafafd67
PM
137}; \
138static struct rcu_tasks rt_name = \
139{ \
88db792b 140 .cbs_wait = __RCUWAIT_INITIALIZER(rt_name.wait), \
cafafd67 141 .cbs_gbl_lock = __RAW_SPIN_LOCK_UNLOCKED(rt_name.cbs_gbl_lock), \
d96225fd 142 .tasks_gp_mutex = __MUTEX_INITIALIZER(rt_name.tasks_gp_mutex), \
cafafd67
PM
143 .gp_func = gp, \
144 .call_func = call, \
c342b42f 145 .wait_state = TASK_UNINTERRUPTIBLE, \
cafafd67 146 .rtpcpu = &rt_name ## __percpu, \
d119357d 147 .lazy_jiffies = DIV_ROUND_UP(HZ, 4), \
cafafd67 148 .name = n, \
2bcd18e0 149 .percpu_enqueue_shift = order_base_2(CONFIG_NR_CPUS), \
8dd593fd 150 .percpu_enqueue_lim = 1, \
2cee0789 151 .percpu_dequeue_lim = 1, \
ce9b1c66
PM
152 .barrier_q_mutex = __MUTEX_INITIALIZER(rt_name.barrier_q_mutex), \
153 .barrier_q_seq = (0UL - 50UL) << RCU_SEQ_CTR_SHIFT, \
cafafd67 154 .kname = #rt_name, \
07e10515
PM
155}
156
2b4be548 157#ifdef CONFIG_TASKS_RCU
eacd6f04 158
5f48fa85 159/* Report delay of scan exiting tasklist in rcu_tasks_postscan(). */
a4533cc0
NU
160static void tasks_rcu_exit_srcu_stall(struct timer_list *unused);
161static DEFINE_TIMER(tasks_rcu_exit_srcu_stall_timer, tasks_rcu_exit_srcu_stall);
162#endif
163
b0afa0f0 164/* Avoid IPIing CPUs early in the grace period. */
574de876 165#define RCU_TASK_IPI_DELAY (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB) ? HZ / 2 : 0)
b0afa0f0
PM
166static int rcu_task_ipi_delay __read_mostly = RCU_TASK_IPI_DELAY;
167module_param(rcu_task_ipi_delay, int, 0644);
168
eacd6f04 169/* Control stall timeouts. Disable with <= 0, otherwise jiffies till stall. */
1cf1144e 170#define RCU_TASK_BOOT_STALL_TIMEOUT (HZ * 30)
eacd6f04
PM
171#define RCU_TASK_STALL_TIMEOUT (HZ * 60 * 10)
172static int rcu_task_stall_timeout __read_mostly = RCU_TASK_STALL_TIMEOUT;
173module_param(rcu_task_stall_timeout, int, 0644);
f2539003
PM
174#define RCU_TASK_STALL_INFO (HZ * 10)
175static int rcu_task_stall_info __read_mostly = RCU_TASK_STALL_INFO;
176module_param(rcu_task_stall_info, int, 0644);
177static int rcu_task_stall_info_mult __read_mostly = 3;
178module_param(rcu_task_stall_info_mult, int, 0444);
eacd6f04 179
8610b656
PM
180static int rcu_task_enqueue_lim __read_mostly = -1;
181module_param(rcu_task_enqueue_lim, int, 0444);
182
ab97152f
PM
183static bool rcu_task_cb_adjust;
184static int rcu_task_contend_lim __read_mostly = 100;
185module_param(rcu_task_contend_lim, int, 0444);
fd796e41
PM
186static int rcu_task_collapse_lim __read_mostly = 10;
187module_param(rcu_task_collapse_lim, int, 0444);
db13710a
PM
188static int rcu_task_lazy_lim __read_mostly = 32;
189module_param(rcu_task_lazy_lim, int, 0444);
ab97152f 190
fd70e9f1
Z
191static int rcu_task_cpu_ids;
192
af051ca4
PM
193/* RCU tasks grace-period state for debugging. */
194#define RTGS_INIT 0
195#define RTGS_WAIT_WAIT_CBS 1
196#define RTGS_WAIT_GP 2
197#define RTGS_PRE_WAIT_GP 3
198#define RTGS_SCAN_TASKLIST 4
199#define RTGS_POST_SCAN_TASKLIST 5
200#define RTGS_WAIT_SCAN_HOLDOUTS 6
201#define RTGS_SCAN_HOLDOUTS 7
202#define RTGS_POST_GP 8
203#define RTGS_WAIT_READERS 9
204#define RTGS_INVOKE_CBS 10
205#define RTGS_WAIT_CBS 11
8344496e 206#ifndef CONFIG_TINY_RCU
af051ca4
PM
207static const char * const rcu_tasks_gp_state_names[] = {
208 "RTGS_INIT",
209 "RTGS_WAIT_WAIT_CBS",
210 "RTGS_WAIT_GP",
211 "RTGS_PRE_WAIT_GP",
212 "RTGS_SCAN_TASKLIST",
213 "RTGS_POST_SCAN_TASKLIST",
214 "RTGS_WAIT_SCAN_HOLDOUTS",
215 "RTGS_SCAN_HOLDOUTS",
216 "RTGS_POST_GP",
217 "RTGS_WAIT_READERS",
218 "RTGS_INVOKE_CBS",
219 "RTGS_WAIT_CBS",
220};
8344496e 221#endif /* #ifndef CONFIG_TINY_RCU */
af051ca4 222
5873b8a9
PM
223////////////////////////////////////////////////////////////////////////
224//
225// Generic code.
226
d363f833
PM
227static void rcu_tasks_invoke_cbs_wq(struct work_struct *wp);
228
af051ca4
PM
229/* Record grace-period phase and time. */
230static void set_tasks_gp_state(struct rcu_tasks *rtp, int newstate)
231{
232 rtp->gp_state = newstate;
233 rtp->gp_jiffies = jiffies;
234}
235
8344496e 236#ifndef CONFIG_TINY_RCU
af051ca4
PM
237/* Return state name. */
238static const char *tasks_gp_state_getname(struct rcu_tasks *rtp)
239{
240 int i = data_race(rtp->gp_state); // Let KCSAN detect update races
241 int j = READ_ONCE(i); // Prevent the compiler from reading twice
242
243 if (j >= ARRAY_SIZE(rcu_tasks_gp_state_names))
244 return "???";
245 return rcu_tasks_gp_state_names[j];
246}
8344496e 247#endif /* #ifndef CONFIG_TINY_RCU */
af051ca4 248
cafafd67 249// Initialize per-CPU callback lists for the specified flavor of
cb88f7f5 250// Tasks RCU. Do not enqueue callbacks before this function is invoked.
cafafd67
PM
251static void cblist_init_generic(struct rcu_tasks *rtp)
252{
253 int cpu;
8610b656 254 int lim;
da123016 255 int shift;
fd70e9f1
Z
256 int maxcpu;
257 int index = 0;
cafafd67 258
ab97152f
PM
259 if (rcu_task_enqueue_lim < 0) {
260 rcu_task_enqueue_lim = 1;
261 rcu_task_cb_adjust = true;
ab97152f 262 } else if (rcu_task_enqueue_lim == 0) {
8610b656 263 rcu_task_enqueue_lim = 1;
ab97152f 264 }
8610b656
PM
265 lim = rcu_task_enqueue_lim;
266
fd70e9f1
Z
267 rtp->rtpcp_array = kcalloc(num_possible_cpus(), sizeof(struct rcu_tasks_percpu *), GFP_KERNEL);
268 BUG_ON(!rtp->rtpcp_array);
269
cafafd67
PM
270 for_each_possible_cpu(cpu) {
271 struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu);
272
273 WARN_ON_ONCE(!rtpcp);
274 if (cpu)
381a4f3b 275 raw_spin_lock_init(&ACCESS_PRIVATE(rtpcp, lock));
9b073de1
PM
276 if (rcu_segcblist_empty(&rtpcp->cblist))
277 rcu_segcblist_init(&rtpcp->cblist);
d363f833
PM
278 INIT_WORK(&rtpcp->rtp_work, rcu_tasks_invoke_cbs_wq);
279 rtpcp->cpu = cpu;
280 rtpcp->rtpp = rtp;
fd70e9f1
Z
281 rtpcp->index = index;
282 rtp->rtpcp_array[index] = rtpcp;
283 index++;
434c9eef
PM
284 if (!rtpcp->rtp_blkd_tasks.next)
285 INIT_LIST_HEAD(&rtpcp->rtp_blkd_tasks);
46faf9d8
PM
286 if (!rtpcp->rtp_exit_list.next)
287 INIT_LIST_HEAD(&rtpcp->rtp_exit_list);
d3f84aeb 288 rtpcp->barrier_q_head.next = &rtpcp->barrier_q_head;
fd70e9f1 289 maxcpu = cpu;
cafafd67 290 }
5fc8cbe4 291
fd70e9f1
Z
292 rcu_task_cpu_ids = maxcpu + 1;
293 if (lim > rcu_task_cpu_ids)
294 lim = rcu_task_cpu_ids;
295 shift = ilog2(rcu_task_cpu_ids / lim);
296 if (((rcu_task_cpu_ids - 1) >> shift) >= lim)
297 shift++;
298 WRITE_ONCE(rtp->percpu_enqueue_shift, shift);
299 WRITE_ONCE(rtp->percpu_dequeue_lim, lim);
300 smp_store_release(&rtp->percpu_enqueue_lim, lim);
301
302 pr_info("%s: Setting shift to %d and lim to %d rcu_task_cb_adjust=%d rcu_task_cpu_ids=%d.\n",
303 rtp->name, data_race(rtp->percpu_enqueue_shift), data_race(rtp->percpu_enqueue_lim),
304 rcu_task_cb_adjust, rcu_task_cpu_ids);
cafafd67
PM
305}
306
d119357d
PM
307// Compute wakeup time for lazy callback timer.
308static unsigned long rcu_tasks_lazy_time(struct rcu_tasks *rtp)
309{
310 return jiffies + rtp->lazy_jiffies;
311}
312
313// Timer handler that unlazifies lazy callbacks.
314static void call_rcu_tasks_generic_timer(struct timer_list *tlp)
315{
316 unsigned long flags;
317 bool needwake = false;
318 struct rcu_tasks *rtp;
41cb0855
IM
319 struct rcu_tasks_percpu *rtpcp = timer_container_of(rtpcp, tlp,
320 lazy_timer);
d119357d
PM
321
322 rtp = rtpcp->rtpp;
323 raw_spin_lock_irqsave_rcu_node(rtpcp, flags);
324 if (!rcu_segcblist_empty(&rtpcp->cblist) && rtp->lazy_jiffies) {
325 if (!rtpcp->urgent_gp)
326 rtpcp->urgent_gp = 1;
327 needwake = true;
328 mod_timer(&rtpcp->lazy_timer, rcu_tasks_lazy_time(rtp));
329 }
330 raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags);
331 if (needwake)
332 rcuwait_wake_up(&rtp->cbs_wait);
333}
334
3063b33a
PM
335// IRQ-work handler that does deferred wakeup for call_rcu_tasks_generic().
336static void call_rcu_tasks_iw_wakeup(struct irq_work *iwp)
337{
338 struct rcu_tasks *rtp;
339 struct rcu_tasks_percpu *rtpcp = container_of(iwp, struct rcu_tasks_percpu, rtp_irq_work);
340
341 rtp = rtpcp->rtpp;
88db792b 342 rcuwait_wake_up(&rtp->cbs_wait);
3063b33a
PM
343}
344
5873b8a9
PM
345// Enqueue a callback for the specified flavor of Tasks RCU.
346static void call_rcu_tasks_generic(struct rcu_head *rhp, rcu_callback_t func,
347 struct rcu_tasks *rtp)
eacd6f04 348{
07d95c34 349 int chosen_cpu;
eacd6f04 350 unsigned long flags;
d119357d 351 bool havekthread = smp_load_acquire(&rtp->kthread_ptr);
07d95c34 352 int ideal_cpu;
7d13d30b 353 unsigned long j;
ab97152f 354 bool needadjust = false;
eacd6f04 355 bool needwake;
cafafd67 356 struct rcu_tasks_percpu *rtpcp;
eacd6f04
PM
357
358 rhp->next = NULL;
359 rhp->func = func;
cafafd67 360 local_irq_save(flags);
fd796e41 361 rcu_read_lock();
07d95c34
ED
362 ideal_cpu = smp_processor_id() >> READ_ONCE(rtp->percpu_enqueue_shift);
363 chosen_cpu = cpumask_next(ideal_cpu - 1, cpu_possible_mask);
49f49266 364 WARN_ON_ONCE(chosen_cpu >= rcu_task_cpu_ids);
07d95c34 365 rtpcp = per_cpu_ptr(rtp->rtpcpu, chosen_cpu);
7d13d30b
PM
366 if (!raw_spin_trylock_rcu_node(rtpcp)) { // irqs already disabled.
367 raw_spin_lock_rcu_node(rtpcp); // irqs already disabled.
368 j = jiffies;
369 if (rtpcp->rtp_jiffies != j) {
370 rtpcp->rtp_jiffies = j;
371 rtpcp->rtp_n_lock_retries = 0;
372 }
ab97152f 373 if (rcu_task_cb_adjust && ++rtpcp->rtp_n_lock_retries > rcu_task_contend_lim &&
fd70e9f1 374 READ_ONCE(rtp->percpu_enqueue_lim) != rcu_task_cpu_ids)
ab97152f 375 needadjust = true; // Defer adjustment to avoid deadlock.
7d13d30b 376 }
cb88f7f5
PM
377 // Queuing callbacks before initialization not yet supported.
378 if (WARN_ON_ONCE(!rcu_segcblist_is_enabled(&rtpcp->cblist)))
379 rcu_segcblist_init(&rtpcp->cblist);
db13710a
PM
380 needwake = (func == wakeme_after_rcu) ||
381 (rcu_segcblist_n_cbs(&rtpcp->cblist) == rcu_task_lazy_lim);
382 if (havekthread && !needwake && !timer_pending(&rtpcp->lazy_timer)) {
d119357d
PM
383 if (rtp->lazy_jiffies)
384 mod_timer(&rtpcp->lazy_timer, rcu_tasks_lazy_time(rtp));
385 else
386 needwake = rcu_segcblist_empty(&rtpcp->cblist);
cafafd67 387 }
d119357d
PM
388 if (needwake)
389 rtpcp->urgent_gp = 3;
9b073de1 390 rcu_segcblist_enqueue(&rtpcp->cblist, rhp);
381a4f3b 391 raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags);
ab97152f
PM
392 if (unlikely(needadjust)) {
393 raw_spin_lock_irqsave(&rtp->cbs_gbl_lock, flags);
fd70e9f1 394 if (rtp->percpu_enqueue_lim != rcu_task_cpu_ids) {
00a8b4b5 395 WRITE_ONCE(rtp->percpu_enqueue_shift, 0);
fd70e9f1
Z
396 WRITE_ONCE(rtp->percpu_dequeue_lim, rcu_task_cpu_ids);
397 smp_store_release(&rtp->percpu_enqueue_lim, rcu_task_cpu_ids);
ab97152f
PM
398 pr_info("Switching %s to per-CPU callback queuing.\n", rtp->name);
399 }
400 raw_spin_unlock_irqrestore(&rtp->cbs_gbl_lock, flags);
401 }
fd796e41 402 rcu_read_unlock();
eacd6f04 403 /* We can't create the thread unless interrupts are enabled. */
07e10515 404 if (needwake && READ_ONCE(rtp->kthread_ptr))
3063b33a 405 irq_work_queue(&rtpcp->rtp_irq_work);
eacd6f04 406}
eacd6f04 407
ce9b1c66
PM
408// RCU callback function for rcu_barrier_tasks_generic().
409static void rcu_barrier_tasks_generic_cb(struct rcu_head *rhp)
410{
411 struct rcu_tasks *rtp;
412 struct rcu_tasks_percpu *rtpcp;
413
d3f84aeb 414 rhp->next = rhp; // Mark the callback as having been invoked.
ce9b1c66
PM
415 rtpcp = container_of(rhp, struct rcu_tasks_percpu, barrier_q_head);
416 rtp = rtpcp->rtpp;
417 if (atomic_dec_and_test(&rtp->barrier_q_count))
418 complete(&rtp->barrier_q_completion);
419}
420
421// Wait for all in-flight callbacks for the specified RCU Tasks flavor.
422// Operates in a manner similar to rcu_barrier().
7945b741 423static void __maybe_unused rcu_barrier_tasks_generic(struct rcu_tasks *rtp)
ce9b1c66
PM
424{
425 int cpu;
426 unsigned long flags;
427 struct rcu_tasks_percpu *rtpcp;
428 unsigned long s = rcu_seq_snap(&rtp->barrier_q_seq);
429
430 mutex_lock(&rtp->barrier_q_mutex);
431 if (rcu_seq_done(&rtp->barrier_q_seq, s)) {
432 smp_mb();
433 mutex_unlock(&rtp->barrier_q_mutex);
434 return;
435 }
591ce640 436 rtp->barrier_q_start = jiffies;
ce9b1c66
PM
437 rcu_seq_start(&rtp->barrier_q_seq);
438 init_completion(&rtp->barrier_q_completion);
439 atomic_set(&rtp->barrier_q_count, 2);
440 for_each_possible_cpu(cpu) {
2cee0789 441 if (cpu >= smp_load_acquire(&rtp->percpu_dequeue_lim))
ce9b1c66
PM
442 break;
443 rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu);
444 rtpcp->barrier_q_head.func = rcu_barrier_tasks_generic_cb;
445 raw_spin_lock_irqsave_rcu_node(rtpcp, flags);
446 if (rcu_segcblist_entrain(&rtpcp->cblist, &rtpcp->barrier_q_head))
447 atomic_inc(&rtp->barrier_q_count);
448 raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags);
449 }
450 if (atomic_sub_and_test(2, &rtp->barrier_q_count))
451 complete(&rtp->barrier_q_completion);
452 wait_for_completion(&rtp->barrier_q_completion);
453 rcu_seq_end(&rtp->barrier_q_seq);
454 mutex_unlock(&rtp->barrier_q_mutex);
455}
456
4d1114c0
PM
457// Advance callbacks and indicate whether either a grace period or
458// callback invocation is needed.
459static int rcu_tasks_need_gpcb(struct rcu_tasks *rtp)
460{
461 int cpu;
e62d8ae4 462 int dequeue_limit;
4d1114c0 463 unsigned long flags;
a4fcfbee 464 bool gpdone = poll_state_synchronize_rcu(rtp->percpu_dequeue_gpseq);
fd796e41
PM
465 long n;
466 long ncbs = 0;
467 long ncbsnz = 0;
4d1114c0
PM
468 int needgpcb = 0;
469
e62d8ae4
PM
470 dequeue_limit = smp_load_acquire(&rtp->percpu_dequeue_lim);
471 for (cpu = 0; cpu < dequeue_limit; cpu++) {
fd70e9f1
Z
472 if (!cpu_possible(cpu))
473 continue;
4d1114c0
PM
474 struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu);
475
476 /* Advance and accelerate any new callbacks. */
fd796e41 477 if (!rcu_segcblist_n_cbs(&rtpcp->cblist))
4d1114c0
PM
478 continue;
479 raw_spin_lock_irqsave_rcu_node(rtpcp, flags);
fd796e41
PM
480 // Should we shrink down to a single callback queue?
481 n = rcu_segcblist_n_cbs(&rtpcp->cblist);
482 if (n) {
483 ncbs += n;
484 if (cpu > 0)
485 ncbsnz += n;
486 }
4d1114c0
PM
487 rcu_segcblist_advance(&rtpcp->cblist, rcu_seq_current(&rtp->tasks_gp_seq));
488 (void)rcu_segcblist_accelerate(&rtpcp->cblist, rcu_seq_snap(&rtp->tasks_gp_seq));
d119357d
PM
489 if (rtpcp->urgent_gp > 0 && rcu_segcblist_pend_cbs(&rtpcp->cblist)) {
490 if (rtp->lazy_jiffies)
491 rtpcp->urgent_gp--;
4d1114c0 492 needgpcb |= 0x3;
d119357d
PM
493 } else if (rcu_segcblist_empty(&rtpcp->cblist)) {
494 rtpcp->urgent_gp = 0;
495 }
496 if (rcu_segcblist_ready_cbs(&rtpcp->cblist))
4d1114c0
PM
497 needgpcb |= 0x1;
498 raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags);
499 }
fd796e41
PM
500
501 // Shrink down to a single callback queue if appropriate.
502 // This is done in two stages: (1) If there are no more than
503 // rcu_task_collapse_lim callbacks on CPU 0 and none on any other
504 // CPU, limit enqueueing to CPU 0. (2) After an RCU grace period,
505 // if there has not been an increase in callbacks, limit dequeuing
506 // to CPU 0. Note the matching RCU read-side critical section in
507 // call_rcu_tasks_generic().
508 if (rcu_task_cb_adjust && ncbs <= rcu_task_collapse_lim) {
509 raw_spin_lock_irqsave(&rtp->cbs_gbl_lock, flags);
510 if (rtp->percpu_enqueue_lim > 1) {
fd70e9f1 511 WRITE_ONCE(rtp->percpu_enqueue_shift, order_base_2(rcu_task_cpu_ids));
fd796e41
PM
512 smp_store_release(&rtp->percpu_enqueue_lim, 1);
513 rtp->percpu_dequeue_gpseq = get_state_synchronize_rcu();
a4fcfbee 514 gpdone = false;
fd796e41
PM
515 pr_info("Starting switch %s to CPU-0 callback queuing.\n", rtp->name);
516 }
517 raw_spin_unlock_irqrestore(&rtp->cbs_gbl_lock, flags);
518 }
a4fcfbee 519 if (rcu_task_cb_adjust && !ncbsnz && gpdone) {
fd796e41
PM
520 raw_spin_lock_irqsave(&rtp->cbs_gbl_lock, flags);
521 if (rtp->percpu_enqueue_lim < rtp->percpu_dequeue_lim) {
522 WRITE_ONCE(rtp->percpu_dequeue_lim, 1);
523 pr_info("Completing switch %s to CPU-0 callback queuing.\n", rtp->name);
524 }
a4fcfbee 525 if (rtp->percpu_dequeue_lim == 1) {
fd70e9f1
Z
526 for (cpu = rtp->percpu_dequeue_lim; cpu < rcu_task_cpu_ids; cpu++) {
527 if (!cpu_possible(cpu))
528 continue;
a4fcfbee 529 struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu);
4cf0585c 530
a4fcfbee
Z
531 WARN_ON_ONCE(rcu_segcblist_n_cbs(&rtpcp->cblist));
532 }
4cf0585c 533 }
fd796e41
PM
534 raw_spin_unlock_irqrestore(&rtp->cbs_gbl_lock, flags);
535 }
536
4d1114c0
PM
537 return needgpcb;
538}
539
57881863 540// Advance callbacks and invoke any that are ready.
d363f833 541static void rcu_tasks_invoke_cbs(struct rcu_tasks *rtp, struct rcu_tasks_percpu *rtpcp)
eacd6f04 542{
401b0de3 543 int cpuwq;
eacd6f04 544 unsigned long flags;
9b073de1 545 int len;
fd70e9f1 546 int index;
9b073de1 547 struct rcu_head *rhp;
d363f833
PM
548 struct rcu_cblist rcl = RCU_CBLIST_INITIALIZER(rcl);
549 struct rcu_tasks_percpu *rtpcp_next;
550
fd70e9f1
Z
551 index = rtpcp->index * 2 + 1;
552 if (index < num_possible_cpus()) {
553 rtpcp_next = rtp->rtpcp_array[index];
554 if (rtpcp_next->cpu < smp_load_acquire(&rtp->percpu_dequeue_lim)) {
555 cpuwq = rcu_cpu_beenfullyonline(rtpcp_next->cpu) ? rtpcp_next->cpu : WORK_CPU_UNBOUND;
401b0de3 556 queue_work_on(cpuwq, system_wq, &rtpcp_next->rtp_work);
fd70e9f1
Z
557 index++;
558 if (index < num_possible_cpus()) {
559 rtpcp_next = rtp->rtpcp_array[index];
560 if (rtpcp_next->cpu < smp_load_acquire(&rtp->percpu_dequeue_lim)) {
561 cpuwq = rcu_cpu_beenfullyonline(rtpcp_next->cpu) ? rtpcp_next->cpu : WORK_CPU_UNBOUND;
562 queue_work_on(cpuwq, system_wq, &rtpcp_next->rtp_work);
563 }
564 }
57881863 565 }
57881863 566 }
d363f833 567
fd70e9f1 568 if (rcu_segcblist_empty(&rtpcp->cblist))
d363f833
PM
569 return;
570 raw_spin_lock_irqsave_rcu_node(rtpcp, flags);
571 rcu_segcblist_advance(&rtpcp->cblist, rcu_seq_current(&rtp->tasks_gp_seq));
572 rcu_segcblist_extract_done_cbs(&rtpcp->cblist, &rcl);
573 raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags);
574 len = rcl.len;
575 for (rhp = rcu_cblist_dequeue(&rcl); rhp; rhp = rcu_cblist_dequeue(&rcl)) {
2cbc482d 576 debug_rcu_head_callback(rhp);
d363f833
PM
577 local_bh_disable();
578 rhp->func(rhp);
579 local_bh_enable();
580 cond_resched();
581 }
582 raw_spin_lock_irqsave_rcu_node(rtpcp, flags);
583 rcu_segcblist_add_len(&rtpcp->cblist, -len);
584 (void)rcu_segcblist_accelerate(&rtpcp->cblist, rcu_seq_snap(&rtp->tasks_gp_seq));
585 raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags);
586}
587
588// Workqueue flood to advance callbacks and invoke any that are ready.
589static void rcu_tasks_invoke_cbs_wq(struct work_struct *wp)
590{
591 struct rcu_tasks *rtp;
592 struct rcu_tasks_percpu *rtpcp = container_of(wp, struct rcu_tasks_percpu, rtp_work);
593
594 rtp = rtpcp->rtpp;
595 rcu_tasks_invoke_cbs(rtp, rtpcp);
57881863
PM
596}
597
d96225fd 598// Wait for one grace period.
4a8cc433 599static void rcu_tasks_one_gp(struct rcu_tasks *rtp, bool midboot)
57881863
PM
600{
601 int needgpcb;
d96225fd
PM
602
603 mutex_lock(&rtp->tasks_gp_mutex);
d96225fd
PM
604
605 // If there were none, wait a bit and start over.
4a8cc433
PM
606 if (unlikely(midboot)) {
607 needgpcb = 0x2;
608 } else {
9d0cce2b 609 mutex_unlock(&rtp->tasks_gp_mutex);
4a8cc433
PM
610 set_tasks_gp_state(rtp, RTGS_WAIT_CBS);
611 rcuwait_wait_event(&rtp->cbs_wait,
612 (needgpcb = rcu_tasks_need_gpcb(rtp)),
613 TASK_IDLE);
9d0cce2b 614 mutex_lock(&rtp->tasks_gp_mutex);
4a8cc433 615 }
d96225fd
PM
616
617 if (needgpcb & 0x2) {
618 // Wait for one grace period.
619 set_tasks_gp_state(rtp, RTGS_WAIT_GP);
620 rtp->gp_start = jiffies;
621 rcu_seq_start(&rtp->tasks_gp_seq);
622 rtp->gp_func(rtp);
623 rcu_seq_end(&rtp->tasks_gp_seq);
624 }
625
626 // Invoke callbacks.
627 set_tasks_gp_state(rtp, RTGS_INVOKE_CBS);
628 rcu_tasks_invoke_cbs(rtp, per_cpu_ptr(rtp->rtpcpu, 0));
629 mutex_unlock(&rtp->tasks_gp_mutex);
630}
631
632// RCU-tasks kthread that detects grace periods and invokes callbacks.
633static int __noreturn rcu_tasks_kthread(void *arg)
634{
d119357d 635 int cpu;
07e10515 636 struct rcu_tasks *rtp = arg;
eacd6f04 637
d119357d
PM
638 for_each_possible_cpu(cpu) {
639 struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu);
640
641 timer_setup(&rtpcp->lazy_timer, call_rcu_tasks_generic_timer, 0);
642 rtpcp->urgent_gp = 1;
643 }
644
eacd6f04 645 /* Run on housekeeping CPUs by default. Sysadm can move if desired. */
04d4e665 646 housekeeping_affine(current, HK_TYPE_RCU);
d119357d 647 smp_store_release(&rtp->kthread_ptr, current); // Let GPs start!
eacd6f04
PM
648
649 /*
650 * Each pass through the following loop makes one check for
651 * newly arrived callbacks, and, if there are some, waits for
652 * one RCU-tasks grace period and then invokes the callbacks.
653 * This loop is terminated by the system going down. ;-)
654 */
655 for (;;) {
d96225fd
PM
656 // Wait for one grace period and invoke any callbacks
657 // that are ready.
4a8cc433 658 rcu_tasks_one_gp(rtp, false);
57881863 659
d96225fd 660 // Paranoid sleep to keep this from entering a tight loop.
4fe192df 661 schedule_timeout_idle(rtp->gp_sleep);
eacd6f04
PM
662 }
663}
664
68cb4720
PM
665// Wait for a grace period for the specified flavor of Tasks RCU.
666static void synchronize_rcu_tasks_generic(struct rcu_tasks *rtp)
667{
668 /* Complain if the scheduler has not started. */
ea5c8987
Z
669 if (WARN_ONCE(rcu_scheduler_active == RCU_SCHEDULER_INACTIVE,
670 "synchronize_%s() called too soon", rtp->name))
671 return;
68cb4720 672
4a8cc433
PM
673 // If the grace-period kthread is running, use it.
674 if (READ_ONCE(rtp->kthread_ptr)) {
c342b42f 675 wait_rcu_gp_state(rtp->wait_state, rtp->call_func);
4a8cc433
PM
676 return;
677 }
678 rcu_tasks_one_gp(rtp, true);
68cb4720
PM
679}
680
1b04fa99 681/* Spawn RCU-tasks grace-period kthread. */
5873b8a9 682static void __init rcu_spawn_tasks_kthread_generic(struct rcu_tasks *rtp)
eacd6f04
PM
683{
684 struct task_struct *t;
685
c97d12a6
PM
686 t = kthread_run(rcu_tasks_kthread, rtp, "%s_kthread", rtp->kname);
687 if (WARN_ONCE(IS_ERR(t), "%s: Could not start %s grace-period kthread, OOM is now expected behavior\n", __func__, rtp->name))
5873b8a9 688 return;
eacd6f04 689 smp_mb(); /* Ensure others see full kthread. */
eacd6f04 690}
eacd6f04 691
eacd6f04
PM
692#ifndef CONFIG_TINY_RCU
693
694/*
695 * Print any non-default Tasks RCU settings.
696 */
697static void __init rcu_tasks_bootup_oddness(void)
698{
d5f177d3 699#if defined(CONFIG_TASKS_RCU) || defined(CONFIG_TASKS_TRACE_RCU)
f2539003
PM
700 int rtsimc;
701
eacd6f04
PM
702 if (rcu_task_stall_timeout != RCU_TASK_STALL_TIMEOUT)
703 pr_info("\tTasks-RCU CPU stall warnings timeout set to %d (rcu_task_stall_timeout).\n", rcu_task_stall_timeout);
f2539003
PM
704 rtsimc = clamp(rcu_task_stall_info_mult, 1, 10);
705 if (rtsimc != rcu_task_stall_info_mult) {
706 pr_info("\tTasks-RCU CPU stall info multiplier clamped to %d (rcu_task_stall_info_mult).\n", rtsimc);
707 rcu_task_stall_info_mult = rtsimc;
708 }
d5f177d3
PM
709#endif /* #ifdef CONFIG_TASKS_RCU */
710#ifdef CONFIG_TASKS_RCU
711 pr_info("\tTrampoline variant of Tasks RCU enabled.\n");
eacd6f04 712#endif /* #ifdef CONFIG_TASKS_RCU */
c84aad76
PM
713#ifdef CONFIG_TASKS_RUDE_RCU
714 pr_info("\tRude variant of Tasks RCU enabled.\n");
715#endif /* #ifdef CONFIG_TASKS_RUDE_RCU */
d5f177d3
PM
716#ifdef CONFIG_TASKS_TRACE_RCU
717 pr_info("\tTracing variant of Tasks RCU enabled.\n");
718#endif /* #ifdef CONFIG_TASKS_TRACE_RCU */
eacd6f04
PM
719}
720
5873b8a9 721
e21408ce
PM
722/* Dump out rcutorture-relevant state common to all RCU-tasks flavors. */
723static void show_rcu_tasks_generic_gp_kthread(struct rcu_tasks *rtp, char *s)
724{
10b3742f
PM
725 int cpu;
726 bool havecbs = false;
d119357d
PM
727 bool haveurgent = false;
728 bool haveurgentcbs = false;
10b3742f
PM
729
730 for_each_possible_cpu(cpu) {
731 struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu);
732
d119357d 733 if (!data_race(rcu_segcblist_empty(&rtpcp->cblist)))
10b3742f 734 havecbs = true;
d119357d
PM
735 if (data_race(rtpcp->urgent_gp))
736 haveurgent = true;
737 if (!data_race(rcu_segcblist_empty(&rtpcp->cblist)) && data_race(rtpcp->urgent_gp))
738 haveurgentcbs = true;
739 if (havecbs && haveurgent && haveurgentcbs)
10b3742f 740 break;
10b3742f 741 }
d119357d 742 pr_info("%s: %s(%d) since %lu g:%lu i:%lu/%lu %c%c%c%c l:%lu %s\n",
e21408ce 743 rtp->kname,
7e0669c3 744 tasks_gp_state_getname(rtp), data_race(rtp->gp_state),
af051ca4 745 jiffies - data_race(rtp->gp_jiffies),
b14fb4fb 746 data_race(rcu_seq_current(&rtp->tasks_gp_seq)),
7e0669c3 747 data_race(rtp->n_ipis_fails), data_race(rtp->n_ipis),
e21408ce 748 ".k"[!!data_race(rtp->kthread_ptr)],
10b3742f 749 ".C"[havecbs],
d119357d
PM
750 ".u"[haveurgent],
751 ".U"[haveurgentcbs],
752 rtp->lazy_jiffies,
e21408ce
PM
753 s);
754}
fe91cf39
PM
755
756/* Dump out more rcutorture-relevant state common to all RCU-tasks flavors. */
757static void rcu_tasks_torture_stats_print_generic(struct rcu_tasks *rtp, char *tt,
758 char *tf, char *tst)
759{
760 cpumask_var_t cm;
761 int cpu;
762 bool gotcb = false;
763 unsigned long j = jiffies;
764
765 pr_alert("%s%s Tasks%s RCU g%ld gp_start %lu gp_jiffies %lu gp_state %d (%s).\n",
766 tt, tf, tst, data_race(rtp->tasks_gp_seq),
767 j - data_race(rtp->gp_start), j - data_race(rtp->gp_jiffies),
768 data_race(rtp->gp_state), tasks_gp_state_getname(rtp));
769 pr_alert("\tEnqueue shift %d limit %d Dequeue limit %d gpseq %lu.\n",
770 data_race(rtp->percpu_enqueue_shift),
771 data_race(rtp->percpu_enqueue_lim),
772 data_race(rtp->percpu_dequeue_lim),
773 data_race(rtp->percpu_dequeue_gpseq));
774 (void)zalloc_cpumask_var(&cm, GFP_KERNEL);
775 pr_alert("\tCallback counts:");
776 for_each_possible_cpu(cpu) {
777 long n;
778 struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu);
779
780 if (cpumask_available(cm) && !rcu_barrier_cb_is_done(&rtpcp->barrier_q_head))
781 cpumask_set_cpu(cpu, cm);
782 n = rcu_segcblist_n_cbs(&rtpcp->cblist);
783 if (!n)
784 continue;
785 pr_cont(" %d:%ld", cpu, n);
786 gotcb = true;
787 }
788 if (gotcb)
789 pr_cont(".\n");
790 else
791 pr_cont(" (none).\n");
591ce640
PM
792 pr_alert("\tBarrier seq %lu start %lu count %d holdout CPUs ",
793 data_race(rtp->barrier_q_seq), j - data_race(rtp->barrier_q_start),
794 atomic_read(&rtp->barrier_q_count));
fe91cf39
PM
795 if (cpumask_available(cm) && !cpumask_empty(cm))
796 pr_cont(" %*pbl.\n", cpumask_pr_args(cm));
797 else
798 pr_cont("(none).\n");
799 free_cpumask_var(cm);
800}
801
27c0f144 802#endif // #ifndef CONFIG_TINY_RCU
e21408ce 803
25246fc8
PM
804static void exit_tasks_rcu_finish_trace(struct task_struct *t);
805
806#if defined(CONFIG_TASKS_RCU) || defined(CONFIG_TASKS_TRACE_RCU)
5873b8a9 807
d01aa263
PM
808////////////////////////////////////////////////////////////////////////
809//
810// Shared code between task-list-scanning variants of Tasks RCU.
811
812/* Wait for one RCU-tasks grace period. */
813static void rcu_tasks_wait_gp(struct rcu_tasks *rtp)
814{
f2539003 815 struct task_struct *g;
d01aa263 816 int fract;
f2539003
PM
817 LIST_HEAD(holdouts);
818 unsigned long j;
819 unsigned long lastinfo;
820 unsigned long lastreport;
821 bool reported = false;
822 int rtsi;
823 struct task_struct *t;
d01aa263 824
af051ca4 825 set_tasks_gp_state(rtp, RTGS_PRE_WAIT_GP);
7460ade1 826 rtp->pregp_func(&holdouts);
d01aa263
PM
827
828 /*
829 * There were callbacks, so we need to wait for an RCU-tasks
830 * grace period. Start off by scanning the task list for tasks
831 * that are not already voluntarily blocked. Mark these tasks
832 * and make a list of them in holdouts.
833 */
af051ca4 834 set_tasks_gp_state(rtp, RTGS_SCAN_TASKLIST);
1a4a8153
PM
835 if (rtp->pertask_func) {
836 rcu_read_lock();
837 for_each_process_thread(g, t)
838 rtp->pertask_func(t, &holdouts);
839 rcu_read_unlock();
840 }
d01aa263 841
af051ca4 842 set_tasks_gp_state(rtp, RTGS_POST_SCAN_TASKLIST);
9796e1ae 843 rtp->postscan_func(&holdouts);
d01aa263
PM
844
845 /*
846 * Each pass through the following loop scans the list of holdout
847 * tasks, removing any that are no longer holdouts. When the list
848 * is empty, we are done.
849 */
850 lastreport = jiffies;
f2539003
PM
851 lastinfo = lastreport;
852 rtsi = READ_ONCE(rcu_task_stall_info);
d01aa263 853
2393a613
PM
854 // Start off with initial wait and slowly back off to 1 HZ wait.
855 fract = rtp->init_fract;
d01aa263 856
77dc1741 857 while (!list_empty(&holdouts)) {
777570d9 858 ktime_t exp;
d01aa263
PM
859 bool firstreport;
860 bool needreport;
861 int rtst;
862
f2539003 863 // Slowly back off waiting for holdouts
af051ca4 864 set_tasks_gp_state(rtp, RTGS_WAIT_SCAN_HOLDOUTS);
bddf7122
PM
865 if (!IS_ENABLED(CONFIG_PREEMPT_RT)) {
866 schedule_timeout_idle(fract);
867 } else {
868 exp = jiffies_to_nsecs(fract);
869 __set_current_state(TASK_IDLE);
870 schedule_hrtimeout_range(&exp, jiffies_to_nsecs(HZ / 2), HRTIMER_MODE_REL_HARD);
871 }
d01aa263 872
75dc2da5
PM
873 if (fract < HZ)
874 fract++;
d01aa263
PM
875
876 rtst = READ_ONCE(rcu_task_stall_timeout);
877 needreport = rtst > 0 && time_after(jiffies, lastreport + rtst);
f2539003 878 if (needreport) {
d01aa263 879 lastreport = jiffies;
f2539003
PM
880 reported = true;
881 }
d01aa263
PM
882 firstreport = true;
883 WARN_ON(signal_pending(current));
af051ca4 884 set_tasks_gp_state(rtp, RTGS_SCAN_HOLDOUTS);
d01aa263 885 rtp->holdouts_func(&holdouts, needreport, &firstreport);
f2539003
PM
886
887 // Print pre-stall informational messages if needed.
888 j = jiffies;
889 if (rtsi > 0 && !reported && time_after(j, lastinfo + rtsi)) {
890 lastinfo = j;
891 rtsi = rtsi * rcu_task_stall_info_mult;
df83fff7 892 pr_info("%s: %s grace period number %lu (since boot) is %lu jiffies old.\n",
f2539003
PM
893 __func__, rtp->kname, rtp->tasks_gp_seq, j - rtp->gp_start);
894 }
d01aa263
PM
895 }
896
af051ca4
PM
897 set_tasks_gp_state(rtp, RTGS_POST_GP);
898 rtp->postgp_func(rtp);
d01aa263
PM
899}
900
25246fc8
PM
901#endif /* #if defined(CONFIG_TASKS_RCU) || defined(CONFIG_TASKS_TRACE_RCU) */
902
903#ifdef CONFIG_TASKS_RCU
904
5873b8a9
PM
905////////////////////////////////////////////////////////////////////////
906//
907// Simple variant of RCU whose quiescent states are voluntary context
8af9e2c7 908// switch, cond_resched_tasks_rcu_qs(), user-space execution, and idle.
5873b8a9
PM
909// As such, grace periods can take one good long time. There are no
910// read-side primitives similar to rcu_read_lock() and rcu_read_unlock()
911// because this implementation is intended to get the system into a safe
912// state for some of the manipulations involved in tracing and the like.
913// Finally, this implementation does not support high call_rcu_tasks()
914// rates from multiple CPUs. If this is required, per-CPU callback lists
915// will be needed.
06a3ec92
PM
916//
917// The implementation uses rcu_tasks_wait_gp(), which relies on function
918// pointers in the rcu_tasks structure. The rcu_spawn_tasks_kthread()
919// function sets these function pointers up so that rcu_tasks_wait_gp()
920// invokes these functions in this order:
921//
922// rcu_tasks_pregp_step():
923// Invokes synchronize_rcu() in order to wait for all in-flight
924// t->on_rq and t->nvcsw transitions to complete. This works because
925// all such transitions are carried out with interrupts disabled.
926// rcu_tasks_pertask(), invoked on every non-idle task:
927// For every runnable non-idle task other than the current one, use
928// get_task_struct() to pin down that task, snapshot that task's
929// number of voluntary context switches, and add that task to the
930// holdout list.
931// rcu_tasks_postscan():
1612160b
PM
932// Gather per-CPU lists of tasks in do_exit() to ensure that all
933// tasks that were in the process of exiting (and which thus might
934// not know to synchronize with this RCU Tasks grace period) have
935// completed exiting. The synchronize_rcu() in rcu_tasks_postgp()
936// will take care of any tasks stuck in the non-preemptible region
9855c37e 937// of do_exit() following its call to exit_tasks_rcu_finish().
06a3ec92
PM
938// check_all_holdout_tasks(), repeatedly until holdout list is empty:
939// Scans the holdout list, attempting to identify a quiescent state
940// for each task on the list. If there is a quiescent state, the
941// corresponding task is removed from the holdout list.
942// rcu_tasks_postgp():
943// Invokes synchronize_rcu() in order to ensure that all prior
944// t->on_rq and t->nvcsw transitions are seen by all CPUs and tasks
945// to have happened before the end of this RCU Tasks grace period.
946// Again, this works because all such transitions are carried out
947// with interrupts disabled.
948//
949// For each exiting task, the exit_tasks_rcu_start() and
1612160b
PM
950// exit_tasks_rcu_finish() functions add and remove, respectively, the
951// current task to a per-CPU list of tasks that rcu_tasks_postscan() must
952// wait on. This is necessary because rcu_tasks_postscan() must wait on
953// tasks that have already been removed from the global list of tasks.
06a3ec92 954//
381a4f3b
PM
955// Pre-grace-period update-side code is ordered before the grace
956// via the raw_spin_lock.*rcu_node(). Pre-grace-period read-side code
957// is ordered before the grace period via synchronize_rcu() call in
958// rcu_tasks_pregp_step() and by the scheduler's locks and interrupt
06a3ec92 959// disabling.
5873b8a9 960
e4fe5dd6 961/* Pre-grace-period preparation. */
7460ade1 962static void rcu_tasks_pregp_step(struct list_head *hop)
e4fe5dd6
PM
963{
964 /*
965 * Wait for all pre-existing t->on_rq and t->nvcsw transitions
966 * to complete. Invoking synchronize_rcu() suffices because all
967 * these transitions occur with interrupts disabled. Without this
968 * synchronize_rcu(), a read-side critical section that started
969 * before the grace period might be incorrectly seen as having
970 * started after the grace period.
971 *
972 * This synchronize_rcu() also dispenses with the need for a
973 * memory barrier on the first store to t->rcu_tasks_holdout,
974 * as it forces the store to happen after the beginning of the
975 * grace period.
976 */
977 synchronize_rcu();
978}
979
9715ed50
FW
980/* Check for quiescent states since the pregp's synchronize_rcu() */
981static bool rcu_tasks_is_holdout(struct task_struct *t)
982{
983 int cpu;
984
985 /* Has the task been seen voluntarily sleeping? */
986 if (!READ_ONCE(t->on_rq))
987 return false;
988
cd9626e9
PZ
989 /*
990 * t->on_rq && !t->se.sched_delayed *could* be considered sleeping but
991 * since it is a spurious state (it will transition into the
992 * traditional blocked state or get woken up without outside
993 * dependencies), not considering it such should only affect timing.
994 *
995 * Be conservative for now and not include it.
996 */
997
9715ed50
FW
998 /*
999 * Idle tasks (or idle injection) within the idle loop are RCU-tasks
1000 * quiescent states. But CPU boot code performed by the idle task
1001 * isn't a quiescent state.
1002 */
1003 if (is_idle_task(t))
1004 return false;
1005
1006 cpu = task_cpu(t);
1007
1008 /* Idle tasks on offline CPUs are RCU-tasks quiescent states. */
1009 if (t == idle_task(cpu) && !rcu_cpu_online(cpu))
1010 return false;
1011
1012 return true;
1013}
1014
e4fe5dd6
PM
1015/* Per-task initial processing. */
1016static void rcu_tasks_pertask(struct task_struct *t, struct list_head *hop)
1017{
9715ed50 1018 if (t != current && rcu_tasks_is_holdout(t)) {
e4fe5dd6
PM
1019 get_task_struct(t);
1020 t->rcu_tasks_nvcsw = READ_ONCE(t->nvcsw);
1021 WRITE_ONCE(t->rcu_tasks_holdout, true);
1022 list_add(&t->rcu_tasks_holdout_list, hop);
1023 }
1024}
1025
1612160b
PM
1026void call_rcu_tasks(struct rcu_head *rhp, rcu_callback_t func);
1027DEFINE_RCU_TASKS(rcu_tasks, rcu_tasks_wait_gp, call_rcu_tasks, "RCU Tasks");
1028
e4fe5dd6 1029/* Processing between scanning taskslist and draining the holdout list. */
04a3c5aa 1030static void rcu_tasks_postscan(struct list_head *hop)
e4fe5dd6 1031{
1612160b 1032 int cpu;
a4533cc0
NU
1033 int rtsi = READ_ONCE(rcu_task_stall_info);
1034
1035 if (!IS_ENABLED(CONFIG_TINY_RCU)) {
1036 tasks_rcu_exit_srcu_stall_timer.expires = jiffies + rtsi;
1037 add_timer(&tasks_rcu_exit_srcu_stall_timer);
1038 }
1039
e4fe5dd6 1040 /*
e4e1e808
FW
1041 * Exiting tasks may escape the tasklist scan. Those are vulnerable
1042 * until their final schedule() with TASK_DEAD state. To cope with
1043 * this, divide the fragile exit path part in two intersecting
1044 * read side critical sections:
1045 *
1612160b
PM
1046 * 1) A task_struct list addition before calling exit_notify(),
1047 * which may remove the task from the tasklist, with the
1048 * removal after the final preempt_disable() call in do_exit().
e4e1e808
FW
1049 *
1050 * 2) An _RCU_ read side starting with the final preempt_disable()
1051 * call in do_exit() and ending with the final call to schedule()
1052 * with TASK_DEAD state.
1053 *
1054 * This handles the part 1). And postgp will handle part 2) with a
1055 * call to synchronize_rcu().
e4fe5dd6 1056 */
1612160b
PM
1057
1058 for_each_possible_cpu(cpu) {
0bb11a37 1059 unsigned long j = jiffies + 1;
1612160b
PM
1060 struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rcu_tasks.rtpcpu, cpu);
1061 struct task_struct *t;
0bb11a37
PM
1062 struct task_struct *t1;
1063 struct list_head tmp;
1612160b
PM
1064
1065 raw_spin_lock_irq_rcu_node(rtpcp);
0bb11a37 1066 list_for_each_entry_safe(t, t1, &rtpcp->rtp_exit_list, rcu_tasks_exit_list) {
1612160b
PM
1067 if (list_empty(&t->rcu_tasks_holdout_list))
1068 rcu_tasks_pertask(t, hop);
0bb11a37
PM
1069
1070 // RT kernels need frequent pauses, otherwise
1071 // pause at least once per pair of jiffies.
1072 if (!IS_ENABLED(CONFIG_PREEMPT_RT) && time_before(jiffies, j))
1073 continue;
1074
1075 // Keep our place in the list while pausing.
1076 // Nothing else traverses this list, so adding a
1077 // bare list_head is OK.
1078 list_add(&tmp, &t->rcu_tasks_exit_list);
1079 raw_spin_unlock_irq_rcu_node(rtpcp);
1080 cond_resched(); // For CONFIG_PREEMPT=n kernels
1081 raw_spin_lock_irq_rcu_node(rtpcp);
1082 t1 = list_entry(tmp.next, struct task_struct, rcu_tasks_exit_list);
1083 list_del(&tmp);
1084 j = jiffies + 1;
1085 }
1612160b
PM
1086 raw_spin_unlock_irq_rcu_node(rtpcp);
1087 }
a4533cc0
NU
1088
1089 if (!IS_ENABLED(CONFIG_TINY_RCU))
8fa7292f 1090 timer_delete_sync(&tasks_rcu_exit_srcu_stall_timer);
e4fe5dd6
PM
1091}
1092
5873b8a9
PM
1093/* See if tasks are still holding out, complain if so. */
1094static void check_holdout_task(struct task_struct *t,
1095 bool needreport, bool *firstreport)
1096{
1097 int cpu;
1098
1099 if (!READ_ONCE(t->rcu_tasks_holdout) ||
1100 t->rcu_tasks_nvcsw != READ_ONCE(t->nvcsw) ||
9715ed50 1101 !rcu_tasks_is_holdout(t) ||
5873b8a9 1102 (IS_ENABLED(CONFIG_NO_HZ_FULL) &&
18966f7b 1103 !is_idle_task(t) && READ_ONCE(t->rcu_tasks_idle_cpu) >= 0)) {
5873b8a9
PM
1104 WRITE_ONCE(t->rcu_tasks_holdout, false);
1105 list_del_init(&t->rcu_tasks_holdout_list);
1106 put_task_struct(t);
1107 return;
1108 }
1109 rcu_request_urgent_qs_task(t);
1110 if (!needreport)
1111 return;
1112 if (*firstreport) {
1113 pr_err("INFO: rcu_tasks detected stalls on tasks:\n");
1114 *firstreport = false;
1115 }
1116 cpu = task_cpu(t);
1117 pr_alert("%p: %c%c nvcsw: %lu/%lu holdout: %d idle_cpu: %d/%d\n",
1118 t, ".I"[is_idle_task(t)],
1119 "N."[cpu < 0 || !tick_nohz_full_cpu(cpu)],
1120 t->rcu_tasks_nvcsw, t->nvcsw, t->rcu_tasks_holdout,
18966f7b 1121 data_race(t->rcu_tasks_idle_cpu), cpu);
5873b8a9
PM
1122 sched_show_task(t);
1123}
1124
e4fe5dd6
PM
1125/* Scan the holdout lists for tasks no longer holding out. */
1126static void check_all_holdout_tasks(struct list_head *hop,
1127 bool needreport, bool *firstreport)
1128{
1129 struct task_struct *t, *t1;
1130
1131 list_for_each_entry_safe(t, t1, hop, rcu_tasks_holdout_list) {
1132 check_holdout_task(t, needreport, firstreport);
1133 cond_resched();
1134 }
1135}
1136
1137/* Finish off the Tasks-RCU grace period. */
af051ca4 1138static void rcu_tasks_postgp(struct rcu_tasks *rtp)
e4fe5dd6
PM
1139{
1140 /*
1141 * Because ->on_rq and ->nvcsw are not guaranteed to have a full
1142 * memory barriers prior to them in the schedule() path, memory
1143 * reordering on other CPUs could cause their RCU-tasks read-side
1144 * critical sections to extend past the end of the grace period.
1145 * However, because these ->nvcsw updates are carried out with
1146 * interrupts disabled, we can use synchronize_rcu() to force the
1147 * needed ordering on all such CPUs.
1148 *
1149 * This synchronize_rcu() also confines all ->rcu_tasks_holdout
1150 * accesses to be within the grace period, avoiding the need for
1151 * memory barriers for ->rcu_tasks_holdout accesses.
1152 *
1153 * In addition, this synchronize_rcu() waits for exiting tasks
1154 * to complete their final preempt_disable() region of execution,
e4e1e808
FW
1155 * enforcing the whole region before tasklist removal until
1156 * the final schedule() with TASK_DEAD state to be an RCU TASKS
1157 * read side critical section.
e4fe5dd6
PM
1158 */
1159 synchronize_rcu();
1160}
1161
a4533cc0
NU
1162static void tasks_rcu_exit_srcu_stall(struct timer_list *unused)
1163{
1164#ifndef CONFIG_TINY_RCU
1165 int rtsi;
1166
1167 rtsi = READ_ONCE(rcu_task_stall_info);
1168 pr_info("%s: %s grace period number %lu (since boot) gp_state: %s is %lu jiffies old.\n",
1169 __func__, rcu_tasks.kname, rcu_tasks.tasks_gp_seq,
1170 tasks_gp_state_getname(&rcu_tasks), jiffies - rcu_tasks.gp_jiffies);
1171 pr_info("Please check any exiting tasks stuck between calls to exit_tasks_rcu_start() and exit_tasks_rcu_finish()\n");
1172 tasks_rcu_exit_srcu_stall_timer.expires = jiffies + rtsi;
1173 add_timer(&tasks_rcu_exit_srcu_stall_timer);
1174#endif // #ifndef CONFIG_TINY_RCU
1175}
1176
5873b8a9
PM
1177/**
1178 * call_rcu_tasks() - Queue an RCU for invocation task-based grace period
1179 * @rhp: structure to be used for queueing the RCU updates.
1180 * @func: actual callback function to be invoked after the grace period
1181 *
1182 * The callback function will be invoked some time after a full grace
1183 * period elapses, in other words after all currently executing RCU
1184 * read-side critical sections have completed. call_rcu_tasks() assumes
1185 * that the read-side critical sections end at a voluntary context
8af9e2c7 1186 * switch (not a preemption!), cond_resched_tasks_rcu_qs(), entry into idle,
5873b8a9
PM
1187 * or transition to usermode execution. As such, there are no read-side
1188 * primitives analogous to rcu_read_lock() and rcu_read_unlock() because
1189 * this primitive is intended to determine that all tasks have passed
a616aec9 1190 * through a safe state, not so much for data-structure synchronization.
5873b8a9
PM
1191 *
1192 * See the description of call_rcu() for more detailed information on
1193 * memory ordering guarantees.
1194 */
1195void call_rcu_tasks(struct rcu_head *rhp, rcu_callback_t func)
1196{
1197 call_rcu_tasks_generic(rhp, func, &rcu_tasks);
1198}
1199EXPORT_SYMBOL_GPL(call_rcu_tasks);
1200
1201/**
1202 * synchronize_rcu_tasks - wait until an rcu-tasks grace period has elapsed.
1203 *
1204 * Control will return to the caller some time after a full rcu-tasks
1205 * grace period has elapsed, in other words after all currently
1206 * executing rcu-tasks read-side critical sections have elapsed. These
1207 * read-side critical sections are delimited by calls to schedule(),
1208 * cond_resched_tasks_rcu_qs(), idle execution, userspace execution, calls
1209 * to synchronize_rcu_tasks(), and (in theory, anyway) cond_resched().
1210 *
1211 * This is a very specialized primitive, intended only for a few uses in
1212 * tracing and other situations requiring manipulation of function
1213 * preambles and profiling hooks. The synchronize_rcu_tasks() function
1214 * is not (yet) intended for heavy use from multiple CPUs.
1215 *
1216 * See the description of synchronize_rcu() for more detailed information
1217 * on memory ordering guarantees.
1218 */
1219void synchronize_rcu_tasks(void)
1220{
1221 synchronize_rcu_tasks_generic(&rcu_tasks);
1222}
1223EXPORT_SYMBOL_GPL(synchronize_rcu_tasks);
1224
1225/**
1226 * rcu_barrier_tasks - Wait for in-flight call_rcu_tasks() callbacks.
1227 *
1228 * Although the current implementation is guaranteed to wait, it is not
1229 * obligated to, for example, if there are no pending callbacks.
1230 */
1231void rcu_barrier_tasks(void)
1232{
ce9b1c66 1233 rcu_barrier_tasks_generic(&rcu_tasks);
5873b8a9
PM
1234}
1235EXPORT_SYMBOL_GPL(rcu_barrier_tasks);
1236
0325e8a1 1237static int rcu_tasks_lazy_ms = -1;
450d461a
PM
1238module_param(rcu_tasks_lazy_ms, int, 0444);
1239
5873b8a9
PM
1240static int __init rcu_spawn_tasks_kthread(void)
1241{
4fe192df 1242 rcu_tasks.gp_sleep = HZ / 10;
75dc2da5 1243 rcu_tasks.init_fract = HZ / 10;
450d461a
PM
1244 if (rcu_tasks_lazy_ms >= 0)
1245 rcu_tasks.lazy_jiffies = msecs_to_jiffies(rcu_tasks_lazy_ms);
e4fe5dd6
PM
1246 rcu_tasks.pregp_func = rcu_tasks_pregp_step;
1247 rcu_tasks.pertask_func = rcu_tasks_pertask;
1248 rcu_tasks.postscan_func = rcu_tasks_postscan;
1249 rcu_tasks.holdouts_func = check_all_holdout_tasks;
1250 rcu_tasks.postgp_func = rcu_tasks_postgp;
c342b42f 1251 rcu_tasks.wait_state = TASK_IDLE;
5873b8a9
PM
1252 rcu_spawn_tasks_kthread_generic(&rcu_tasks);
1253 return 0;
1254}
5873b8a9 1255
27c0f144
PM
1256#if !defined(CONFIG_TINY_RCU)
1257void show_rcu_tasks_classic_gp_kthread(void)
e21408ce
PM
1258{
1259 show_rcu_tasks_generic_gp_kthread(&rcu_tasks, "");
1260}
27c0f144 1261EXPORT_SYMBOL_GPL(show_rcu_tasks_classic_gp_kthread);
fe91cf39
PM
1262
1263void rcu_tasks_torture_stats_print(char *tt, char *tf)
1264{
1265 rcu_tasks_torture_stats_print_generic(&rcu_tasks, tt, tf, "");
1266}
1267EXPORT_SYMBOL_GPL(rcu_tasks_torture_stats_print);
27c0f144 1268#endif // !defined(CONFIG_TINY_RCU)
e21408ce 1269
271a8467
PM
1270struct task_struct *get_rcu_tasks_gp_kthread(void)
1271{
1272 return rcu_tasks.kthread_ptr;
1273}
1274EXPORT_SYMBOL_GPL(get_rcu_tasks_gp_kthread);
1275
dddcddef
Z
1276void rcu_tasks_get_gp_data(int *flags, unsigned long *gp_seq)
1277{
1278 *flags = 0;
1279 *gp_seq = rcu_seq_current(&rcu_tasks.tasks_gp_seq);
1280}
1281EXPORT_SYMBOL_GPL(rcu_tasks_get_gp_data);
1282
e4e1e808 1283/*
6b70399f
PM
1284 * Protect against tasklist scan blind spot while the task is exiting and
1285 * may be removed from the tasklist. Do this by adding the task to yet
1286 * another list.
1287 *
1288 * Note that the task will remove itself from this list, so there is no
1289 * need for get_task_struct(), except in the case where rcu_tasks_pertask()
1290 * adds it to the holdout list, in which case rcu_tasks_pertask() supplies
1291 * the needed get_task_struct().
e4e1e808 1292 */
6b70399f 1293void exit_tasks_rcu_start(void)
25246fc8 1294{
6b70399f
PM
1295 unsigned long flags;
1296 struct rcu_tasks_percpu *rtpcp;
1297 struct task_struct *t = current;
1298
1299 WARN_ON_ONCE(!list_empty(&t->rcu_tasks_exit_list));
1300 preempt_disable();
1301 rtpcp = this_cpu_ptr(rcu_tasks.rtpcpu);
1302 t->rcu_tasks_exit_cpu = smp_processor_id();
1303 raw_spin_lock_irqsave_rcu_node(rtpcp, flags);
8db610c3 1304 WARN_ON_ONCE(!rtpcp->rtp_exit_list.next);
6b70399f
PM
1305 list_add(&t->rcu_tasks_exit_list, &rtpcp->rtp_exit_list);
1306 raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags);
1307 preempt_enable();
25246fc8
PM
1308}
1309
e4e1e808 1310/*
6b70399f
PM
1311 * Remove the task from the "yet another list" because do_exit() is now
1312 * non-preemptible, allowing synchronize_rcu() to wait beyond this point.
e4e1e808 1313 */
9855c37e 1314void exit_tasks_rcu_finish(void)
25246fc8 1315{
6b70399f
PM
1316 unsigned long flags;
1317 struct rcu_tasks_percpu *rtpcp;
25246fc8
PM
1318 struct task_struct *t = current;
1319
6b70399f
PM
1320 WARN_ON_ONCE(list_empty(&t->rcu_tasks_exit_list));
1321 rtpcp = per_cpu_ptr(rcu_tasks.rtpcpu, t->rcu_tasks_exit_cpu);
1322 raw_spin_lock_irqsave_rcu_node(rtpcp, flags);
1323 list_del_init(&t->rcu_tasks_exit_list);
1324 raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags);
28319d6d 1325
9855c37e 1326 exit_tasks_rcu_finish_trace(t);
25246fc8
PM
1327}
1328
e21408ce 1329#else /* #ifdef CONFIG_TASKS_RCU */
25246fc8
PM
1330void exit_tasks_rcu_start(void) { }
1331void exit_tasks_rcu_finish(void) { exit_tasks_rcu_finish_trace(current); }
e21408ce 1332#endif /* #else #ifdef CONFIG_TASKS_RCU */
c84aad76
PM
1333
1334#ifdef CONFIG_TASKS_RUDE_RCU
1335
1336////////////////////////////////////////////////////////////////////////
1337//
7945b741
PM
1338// "Rude" variant of Tasks RCU, inspired by Steve Rostedt's
1339// trick of passing an empty function to schedule_on_each_cpu().
1340// This approach provides batching of concurrent calls to the synchronous
1341// synchronize_rcu_tasks_rude() API. This invokes schedule_on_each_cpu()
1342// in order to send IPIs far and wide and induces otherwise unnecessary
1343// context switches on all online CPUs, whether idle or not.
9fc98e31
PM
1344//
1345// Callback handling is provided by the rcu_tasks_kthread() function.
1346//
1347// Ordering is provided by the scheduler's context-switch code.
c84aad76
PM
1348
1349// Empty function to allow workqueues to force a context switch.
1350static void rcu_tasks_be_rude(struct work_struct *work)
1351{
1352}
1353
1354// Wait for one rude RCU-tasks grace period.
1355static void rcu_tasks_rude_wait_gp(struct rcu_tasks *rtp)
1356{
238dbce3 1357 rtp->n_ipis += cpumask_weight(cpu_online_mask);
c84aad76
PM
1358 schedule_on_each_cpu(rcu_tasks_be_rude);
1359}
1360
7945b741 1361static void call_rcu_tasks_rude(struct rcu_head *rhp, rcu_callback_t func);
c97d12a6
PM
1362DEFINE_RCU_TASKS(rcu_tasks_rude, rcu_tasks_rude_wait_gp, call_rcu_tasks_rude,
1363 "RCU Tasks Rude");
c84aad76 1364
7945b741 1365/*
c84aad76
PM
1366 * call_rcu_tasks_rude() - Queue a callback rude task-based grace period
1367 * @rhp: structure to be used for queueing the RCU updates.
1368 * @func: actual callback function to be invoked after the grace period
1369 *
1370 * The callback function will be invoked some time after a full grace
1371 * period elapses, in other words after all currently executing RCU
1372 * read-side critical sections have completed. call_rcu_tasks_rude()
1373 * assumes that the read-side critical sections end at context switch,
8af9e2c7 1374 * cond_resched_tasks_rcu_qs(), or transition to usermode execution (as
a6517e9c
NU
1375 * usermode execution is schedulable). As such, there are no read-side
1376 * primitives analogous to rcu_read_lock() and rcu_read_unlock() because
1377 * this primitive is intended to determine that all tasks have passed
1378 * through a safe state, not so much for data-structure synchronization.
c84aad76
PM
1379 *
1380 * See the description of call_rcu() for more detailed information on
1381 * memory ordering guarantees.
7945b741
PM
1382 *
1383 * This is no longer exported, and is instead reserved for use by
1384 * synchronize_rcu_tasks_rude().
c84aad76 1385 */
7945b741 1386static void call_rcu_tasks_rude(struct rcu_head *rhp, rcu_callback_t func)
c84aad76
PM
1387{
1388 call_rcu_tasks_generic(rhp, func, &rcu_tasks_rude);
1389}
c84aad76
PM
1390
1391/**
1392 * synchronize_rcu_tasks_rude - wait for a rude rcu-tasks grace period
1393 *
1394 * Control will return to the caller some time after a rude rcu-tasks
1395 * grace period has elapsed, in other words after all currently
1396 * executing rcu-tasks read-side critical sections have elapsed. These
1397 * read-side critical sections are delimited by calls to schedule(),
a6517e9c
NU
1398 * cond_resched_tasks_rcu_qs(), userspace execution (which is a schedulable
1399 * context), and (in theory, anyway) cond_resched().
c84aad76
PM
1400 *
1401 * This is a very specialized primitive, intended only for a few uses in
1402 * tracing and other situations requiring manipulation of function preambles
1403 * and profiling hooks. The synchronize_rcu_tasks_rude() function is not
1404 * (yet) intended for heavy use from multiple CPUs.
1405 *
1406 * See the description of synchronize_rcu() for more detailed information
1407 * on memory ordering guarantees.
1408 */
1409void synchronize_rcu_tasks_rude(void)
1410{
481aa5fc
PM
1411 if (!IS_ENABLED(CONFIG_ARCH_WANTS_NO_INSTR) || IS_ENABLED(CONFIG_FORCE_TASKS_RUDE_RCU))
1412 synchronize_rcu_tasks_generic(&rcu_tasks_rude);
c84aad76
PM
1413}
1414EXPORT_SYMBOL_GPL(synchronize_rcu_tasks_rude);
1415
c84aad76
PM
1416static int __init rcu_spawn_tasks_rude_kthread(void)
1417{
4fe192df 1418 rcu_tasks_rude.gp_sleep = HZ / 10;
c84aad76
PM
1419 rcu_spawn_tasks_kthread_generic(&rcu_tasks_rude);
1420 return 0;
1421}
c84aad76 1422
27c0f144
PM
1423#if !defined(CONFIG_TINY_RCU)
1424void show_rcu_tasks_rude_gp_kthread(void)
e21408ce
PM
1425{
1426 show_rcu_tasks_generic_gp_kthread(&rcu_tasks_rude, "");
1427}
27c0f144 1428EXPORT_SYMBOL_GPL(show_rcu_tasks_rude_gp_kthread);
fe91cf39
PM
1429
1430void rcu_tasks_rude_torture_stats_print(char *tt, char *tf)
1431{
1432 rcu_tasks_torture_stats_print_generic(&rcu_tasks_rude, tt, tf, "");
1433}
1434EXPORT_SYMBOL_GPL(rcu_tasks_rude_torture_stats_print);
27c0f144 1435#endif // !defined(CONFIG_TINY_RCU)
a15ec57c
PM
1436
1437struct task_struct *get_rcu_tasks_rude_gp_kthread(void)
1438{
1439 return rcu_tasks_rude.kthread_ptr;
1440}
1441EXPORT_SYMBOL_GPL(get_rcu_tasks_rude_gp_kthread);
1442
dddcddef
Z
1443void rcu_tasks_rude_get_gp_data(int *flags, unsigned long *gp_seq)
1444{
1445 *flags = 0;
1446 *gp_seq = rcu_seq_current(&rcu_tasks_rude.tasks_gp_seq);
1447}
1448EXPORT_SYMBOL_GPL(rcu_tasks_rude_get_gp_data);
1449
27c0f144 1450#endif /* #ifdef CONFIG_TASKS_RUDE_RCU */
d5f177d3
PM
1451
1452////////////////////////////////////////////////////////////////////////
1453//
1454// Tracing variant of Tasks RCU. This variant is designed to be used
1455// to protect tracing hooks, including those of BPF. This variant
1456// therefore:
1457//
1458// 1. Has explicit read-side markers to allow finite grace periods
1459// in the face of in-kernel loops for PREEMPT=n builds.
1460//
1461// 2. Protects code in the idle loop, exception entry/exit, and
1462// CPU-hotplug code paths, similar to the capabilities of SRCU.
1463//
c4f113ac 1464// 3. Avoids expensive read-side instructions, having overhead similar
d5f177d3
PM
1465// to that of Preemptible RCU.
1466//
eea3423b
PM
1467// There are of course downsides. For example, the grace-period code
1468// can send IPIs to CPUs, even when those CPUs are in the idle loop or
1469// in nohz_full userspace. If needed, these downsides can be at least
1470// partially remedied.
d5f177d3
PM
1471//
1472// Perhaps most important, this variant of RCU does not affect the vanilla
1473// flavors, rcu_preempt and rcu_sched. The fact that RCU Tasks Trace
1474// readers can operate from idle, offline, and exception entry/exit in no
1475// way allows rcu_preempt and rcu_sched readers to also do so.
a434dd10
PM
1476//
1477// The implementation uses rcu_tasks_wait_gp(), which relies on function
1478// pointers in the rcu_tasks structure. The rcu_spawn_tasks_trace_kthread()
1479// function sets these function pointers up so that rcu_tasks_wait_gp()
1480// invokes these functions in this order:
1481//
1482// rcu_tasks_trace_pregp_step():
eea3423b
PM
1483// Disables CPU hotplug, adds all currently executing tasks to the
1484// holdout list, then checks the state of all tasks that blocked
1485// or were preempted within their current RCU Tasks Trace read-side
1486// critical section, adding them to the holdout list if appropriate.
1487// Finally, this function re-enables CPU hotplug.
1488// The ->pertask_func() pointer is NULL, so there is no per-task processing.
a434dd10 1489// rcu_tasks_trace_postscan():
eea3423b
PM
1490// Invokes synchronize_rcu() to wait for late-stage exiting tasks
1491// to finish exiting.
a434dd10
PM
1492// check_all_holdout_tasks_trace(), repeatedly until holdout list is empty:
1493// Scans the holdout list, attempting to identify a quiescent state
1494// for each task on the list. If there is a quiescent state, the
eea3423b
PM
1495// corresponding task is removed from the holdout list. Once this
1496// list is empty, the grace period has completed.
a434dd10 1497// rcu_tasks_trace_postgp():
eea3423b 1498// Provides the needed full memory barrier and does debug checks.
a434dd10
PM
1499//
1500// The exit_tasks_rcu_finish_trace() synchronizes with exiting tasks.
1501//
eea3423b
PM
1502// Pre-grace-period update-side code is ordered before the grace period
1503// via the ->cbs_lock and barriers in rcu_tasks_kthread(). Pre-grace-period
1504// read-side code is ordered before the grace period by atomic operations
1505// on .b.need_qs flag of each task involved in this process, or by scheduler
1506// context-switch ordering (for locked-down non-running readers).
d5f177d3
PM
1507
1508// The lockdep state must be outside of #ifdef to be useful.
1509#ifdef CONFIG_DEBUG_LOCK_ALLOC
1510static struct lock_class_key rcu_lock_trace_key;
1511struct lockdep_map rcu_trace_lock_map =
1512 STATIC_LOCKDEP_MAP_INIT("rcu_read_lock_trace", &rcu_lock_trace_key);
1513EXPORT_SYMBOL_GPL(rcu_trace_lock_map);
1514#endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
1515
1516#ifdef CONFIG_TASKS_TRACE_RCU
1517
d5f177d3
PM
1518// Record outstanding IPIs to each CPU. No point in sending two...
1519static DEFINE_PER_CPU(bool, trc_ipi_to_cpu);
1520
40471509
PM
1521// The number of detections of task quiescent state relying on
1522// heavyweight readers executing explicit memory barriers.
6731da9e
PM
1523static unsigned long n_heavy_reader_attempts;
1524static unsigned long n_heavy_reader_updates;
1525static unsigned long n_heavy_reader_ofl_updates;
ffcc21a3 1526static unsigned long n_trc_holdouts;
40471509 1527
b0afa0f0
PM
1528void call_rcu_tasks_trace(struct rcu_head *rhp, rcu_callback_t func);
1529DEFINE_RCU_TASKS(rcu_tasks_trace, rcu_tasks_wait_gp, call_rcu_tasks_trace,
1530 "RCU Tasks Trace");
1531
3847b645
PM
1532/* Load from ->trc_reader_special.b.need_qs with proper ordering. */
1533static u8 rcu_ld_need_qs(struct task_struct *t)
1534{
1535 smp_mb(); // Enforce full grace-period ordering.
1536 return smp_load_acquire(&t->trc_reader_special.b.need_qs);
1537}
1538
1539/* Store to ->trc_reader_special.b.need_qs with proper ordering. */
1540static void rcu_st_need_qs(struct task_struct *t, u8 v)
1541{
1542 smp_store_release(&t->trc_reader_special.b.need_qs, v);
1543 smp_mb(); // Enforce full grace-period ordering.
1544}
1545
1546/*
1547 * Do a cmpxchg() on ->trc_reader_special.b.need_qs, allowing for
1548 * the four-byte operand-size restriction of some platforms.
fc2897d2 1549 *
3847b645
PM
1550 * Returns the old value, which is often ignored.
1551 */
1552u8 rcu_trc_cmpxchg_need_qs(struct task_struct *t, u8 old, u8 new)
1553{
d4e287d7 1554 return cmpxchg(&t->trc_reader_special.b.need_qs, old, new);
3847b645
PM
1555}
1556EXPORT_SYMBOL_GPL(rcu_trc_cmpxchg_need_qs);
1557
eea3423b
PM
1558/*
1559 * If we are the last reader, signal the grace-period kthread.
1560 * Also remove from the per-CPU list of blocked tasks.
1561 */
a5c071cc 1562void rcu_read_unlock_trace_special(struct task_struct *t)
d5f177d3 1563{
0bcb3868
PM
1564 unsigned long flags;
1565 struct rcu_tasks_percpu *rtpcp;
1566 union rcu_special trs;
1567
1568 // Open-coded full-word version of rcu_ld_need_qs().
1569 smp_mb(); // Enforce full grace-period ordering.
1570 trs = smp_load_acquire(&t->trc_reader_special);
276c4104 1571
3847b645 1572 if (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB) && t->trc_reader_special.b.need_mb)
276c4104
PM
1573 smp_mb(); // Pairs with update-side barriers.
1574 // Update .need_qs before ->trc_reader_nesting for irq/NMI handlers.
0bcb3868 1575 if (trs.b.need_qs == (TRC_NEED_QS_CHECKED | TRC_NEED_QS)) {
3847b645
PM
1576 u8 result = rcu_trc_cmpxchg_need_qs(t, TRC_NEED_QS_CHECKED | TRC_NEED_QS,
1577 TRC_NEED_QS_CHECKED);
1578
0bcb3868
PM
1579 WARN_ONCE(result != trs.b.need_qs, "%s: result = %d", __func__, result);
1580 }
1581 if (trs.b.blocked) {
1582 rtpcp = per_cpu_ptr(rcu_tasks_trace.rtpcpu, t->trc_blkd_cpu);
1583 raw_spin_lock_irqsave_rcu_node(rtpcp, flags);
1584 list_del_init(&t->trc_blkd_node);
1585 WRITE_ONCE(t->trc_reader_special.b.blocked, false);
1586 raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags);
3847b645 1587 }
a5c071cc 1588 WRITE_ONCE(t->trc_reader_nesting, 0);
d5f177d3
PM
1589}
1590EXPORT_SYMBOL_GPL(rcu_read_unlock_trace_special);
1591
0356d4e6
PM
1592/* Add a newly blocked reader task to its CPU's list. */
1593void rcu_tasks_trace_qs_blkd(struct task_struct *t)
1594{
1595 unsigned long flags;
1596 struct rcu_tasks_percpu *rtpcp;
1597
1598 local_irq_save(flags);
1599 rtpcp = this_cpu_ptr(rcu_tasks_trace.rtpcpu);
1600 raw_spin_lock_rcu_node(rtpcp); // irqs already disabled
1601 t->trc_blkd_cpu = smp_processor_id();
1602 if (!rtpcp->rtp_blkd_tasks.next)
1603 INIT_LIST_HEAD(&rtpcp->rtp_blkd_tasks);
1604 list_add(&t->trc_blkd_node, &rtpcp->rtp_blkd_tasks);
0bcb3868 1605 WRITE_ONCE(t->trc_reader_special.b.blocked, true);
0356d4e6
PM
1606 raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags);
1607}
1608EXPORT_SYMBOL_GPL(rcu_tasks_trace_qs_blkd);
1609
d5f177d3
PM
1610/* Add a task to the holdout list, if it is not already on the list. */
1611static void trc_add_holdout(struct task_struct *t, struct list_head *bhp)
1612{
1613 if (list_empty(&t->trc_holdout_list)) {
1614 get_task_struct(t);
1615 list_add(&t->trc_holdout_list, bhp);
ffcc21a3 1616 n_trc_holdouts++;
d5f177d3
PM
1617 }
1618}
1619
1620/* Remove a task from the holdout list, if it is in fact present. */
1621static void trc_del_holdout(struct task_struct *t)
1622{
1623 if (!list_empty(&t->trc_holdout_list)) {
1624 list_del_init(&t->trc_holdout_list);
1625 put_task_struct(t);
ffcc21a3 1626 n_trc_holdouts--;
d5f177d3
PM
1627 }
1628}
1629
1630/* IPI handler to check task state. */
1631static void trc_read_check_handler(void *t_in)
1632{
9ff86b4c 1633 int nesting;
d5f177d3
PM
1634 struct task_struct *t = current;
1635 struct task_struct *texp = t_in;
1636
1637 // If the task is no longer running on this CPU, leave.
3847b645 1638 if (unlikely(texp != t))
d5f177d3 1639 goto reset_ipi; // Already on holdout list, so will check later.
d5f177d3
PM
1640
1641 // If the task is not in a read-side critical section, and
1642 // if this is the last reader, awaken the grace-period kthread.
9ff86b4c
PM
1643 nesting = READ_ONCE(t->trc_reader_nesting);
1644 if (likely(!nesting)) {
3847b645 1645 rcu_trc_cmpxchg_need_qs(t, 0, TRC_NEED_QS_CHECKED);
d5f177d3
PM
1646 goto reset_ipi;
1647 }
ba3a86e4 1648 // If we are racing with an rcu_read_unlock_trace(), try again later.
9ff86b4c 1649 if (unlikely(nesting < 0))
ba3a86e4 1650 goto reset_ipi;
d5f177d3 1651
eea3423b
PM
1652 // Get here if the task is in a read-side critical section.
1653 // Set its state so that it will update state for the grace-period
1654 // kthread upon exit from that critical section.
55061126 1655 rcu_trc_cmpxchg_need_qs(t, 0, TRC_NEED_QS | TRC_NEED_QS_CHECKED);
d5f177d3
PM
1656
1657reset_ipi:
1658 // Allow future IPIs to be sent on CPU and for task.
1659 // Also order this IPI handler against any later manipulations of
1660 // the intended task.
8211e922 1661 smp_store_release(per_cpu_ptr(&trc_ipi_to_cpu, smp_processor_id()), false); // ^^^
d5f177d3
PM
1662 smp_store_release(&texp->trc_ipi_to_cpu, -1); // ^^^
1663}
1664
1665/* Callback function for scheduler to check locked-down task. */
3847b645 1666static int trc_inspect_reader(struct task_struct *t, void *bhp_in)
d5f177d3 1667{
3847b645 1668 struct list_head *bhp = bhp_in;
7d0c9c50 1669 int cpu = task_cpu(t);
18f08e75 1670 int nesting;
7e3b70e0 1671 bool ofl = cpu_is_offline(cpu);
7d0c9c50 1672
897ba84d 1673 if (task_curr(t) && !ofl) {
7d0c9c50 1674 // If no chance of heavyweight readers, do it the hard way.
897ba84d 1675 if (!IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB))
9b3c4ab3 1676 return -EINVAL;
7d0c9c50
PM
1677
1678 // If heavyweight readers are enabled on the remote task,
1679 // we can inspect its state despite its currently running.
1680 // However, we cannot safely change its state.
40471509 1681 n_heavy_reader_attempts++;
897ba84d 1682 // Check for "running" idle tasks on offline CPUs.
fc1096ab 1683 if (!rcu_watching_zero_in_eqs(cpu, &t->trc_reader_nesting))
9b3c4ab3 1684 return -EINVAL; // No quiescent state, do it the hard way.
40471509 1685 n_heavy_reader_updates++;
18f08e75 1686 nesting = 0;
7d0c9c50 1687 } else {
bdb0cca0 1688 // The task is not running, so C-language access is safe.
18f08e75 1689 nesting = t->trc_reader_nesting;
a80712b9 1690 WARN_ON_ONCE(ofl && task_curr(t) && (t != idle_task(task_cpu(t))));
897ba84d
PM
1691 if (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB) && ofl)
1692 n_heavy_reader_ofl_updates++;
7d0c9c50 1693 }
d5f177d3 1694
18f08e75
PM
1695 // If not exiting a read-side critical section, mark as checked
1696 // so that the grace-period kthread will remove it from the
1697 // holdout list.
0968e892
PM
1698 if (!nesting) {
1699 rcu_trc_cmpxchg_need_qs(t, 0, TRC_NEED_QS_CHECKED);
1700 return 0; // In QS, so done.
3847b645 1701 }
0968e892 1702 if (nesting < 0)
eea3423b 1703 return -EINVAL; // Reader transitioning, try again later.
7d0c9c50
PM
1704
1705 // The task is in a read-side critical section, so set up its
0968e892
PM
1706 // state so that it will update state upon exit from that critical
1707 // section.
55061126 1708 if (!rcu_trc_cmpxchg_need_qs(t, 0, TRC_NEED_QS | TRC_NEED_QS_CHECKED))
3847b645 1709 trc_add_holdout(t, bhp);
9b3c4ab3 1710 return 0;
d5f177d3
PM
1711}
1712
1713/* Attempt to extract the state for the specified task. */
1714static void trc_wait_for_one_reader(struct task_struct *t,
1715 struct list_head *bhp)
1716{
1717 int cpu;
1718
1719 // If a previous IPI is still in flight, let it complete.
1720 if (smp_load_acquire(&t->trc_ipi_to_cpu) != -1) // Order IPI
1721 return;
1722
1723 // The current task had better be in a quiescent state.
1724 if (t == current) {
3847b645 1725 rcu_trc_cmpxchg_need_qs(t, 0, TRC_NEED_QS_CHECKED);
bdb0cca0 1726 WARN_ON_ONCE(READ_ONCE(t->trc_reader_nesting));
d5f177d3
PM
1727 return;
1728 }
1729
1730 // Attempt to nail down the task for inspection.
1731 get_task_struct(t);
3847b645 1732 if (!task_call_func(t, trc_inspect_reader, bhp)) {
d5f177d3
PM
1733 put_task_struct(t);
1734 return;
1735 }
1736 put_task_struct(t);
1737
45f4b4a2
PM
1738 // If this task is not yet on the holdout list, then we are in
1739 // an RCU read-side critical section. Otherwise, the invocation of
d0a85858 1740 // trc_add_holdout() that added it to the list did the necessary
45f4b4a2
PM
1741 // get_task_struct(). Either way, the task cannot be freed out
1742 // from under this code.
1743
d5f177d3
PM
1744 // If currently running, send an IPI, either way, add to list.
1745 trc_add_holdout(t, bhp);
574de876
PM
1746 if (task_curr(t) &&
1747 time_after(jiffies + 1, rcu_tasks_trace.gp_start + rcu_task_ipi_delay)) {
d5f177d3
PM
1748 // The task is currently running, so try IPIing it.
1749 cpu = task_cpu(t);
1750
1751 // If there is already an IPI outstanding, let it happen.
1752 if (per_cpu(trc_ipi_to_cpu, cpu) || t->trc_ipi_to_cpu >= 0)
1753 return;
1754
d5f177d3
PM
1755 per_cpu(trc_ipi_to_cpu, cpu) = true;
1756 t->trc_ipi_to_cpu = cpu;
238dbce3 1757 rcu_tasks_trace.n_ipis++;
96017bf9 1758 if (smp_call_function_single(cpu, trc_read_check_handler, t, 0)) {
d5f177d3
PM
1759 // Just in case there is some other reason for
1760 // failure than the target CPU being offline.
46aa886c
NU
1761 WARN_ONCE(1, "%s(): smp_call_function_single() failed for CPU: %d\n",
1762 __func__, cpu);
7e0669c3 1763 rcu_tasks_trace.n_ipis_fails++;
d5f177d3 1764 per_cpu(trc_ipi_to_cpu, cpu) = false;
46aa886c 1765 t->trc_ipi_to_cpu = -1;
d5f177d3
PM
1766 }
1767 }
1768}
1769
7460ade1
PM
1770/*
1771 * Initialize for first-round processing for the specified task.
1772 * Return false if task is NULL or already taken care of, true otherwise.
1773 */
1774static bool rcu_tasks_trace_pertask_prep(struct task_struct *t, bool notself)
d5f177d3 1775{
1b04fa99 1776 // During early boot when there is only the one boot CPU, there
19415004
PM
1777 // is no idle task for the other CPUs. Also, the grace-period
1778 // kthread is always in a quiescent state. In addition, just return
1779 // if this task is already on the list.
7460ade1
PM
1780 if (unlikely(t == NULL) || (t == current && notself) || !list_empty(&t->trc_holdout_list))
1781 return false;
1b04fa99 1782
3847b645 1783 rcu_st_need_qs(t, 0);
d5f177d3 1784 t->trc_ipi_to_cpu = -1;
7460ade1
PM
1785 return true;
1786}
1787
1788/* Do first-round processing for the specified task. */
1789static void rcu_tasks_trace_pertask(struct task_struct *t, struct list_head *hop)
1790{
1791 if (rcu_tasks_trace_pertask_prep(t, true))
1792 trc_wait_for_one_reader(t, hop);
1793}
1794
1fa98e2e 1795/* Initialize for a new RCU-tasks-trace grace period. */
7460ade1 1796static void rcu_tasks_trace_pregp_step(struct list_head *hop)
1fa98e2e 1797{
dc7d54b4 1798 LIST_HEAD(blkd_tasks);
1fa98e2e 1799 int cpu;
dc7d54b4
PM
1800 unsigned long flags;
1801 struct rcu_tasks_percpu *rtpcp;
1802 struct task_struct *t;
1fa98e2e
PM
1803
1804 // There shouldn't be any old IPIs, but...
1805 for_each_possible_cpu(cpu)
1806 WARN_ON_ONCE(per_cpu(trc_ipi_to_cpu, cpu));
1807
eea3423b
PM
1808 // Disable CPU hotplug across the CPU scan for the benefit of
1809 // any IPIs that might be needed. This also waits for all readers
1810 // in CPU-hotplug code paths.
1fa98e2e 1811 cpus_read_lock();
7460ade1 1812
eea3423b 1813 // These rcu_tasks_trace_pertask_prep() calls are serialized to
7460ade1 1814 // allow safe access to the hop list.
e386b672
PM
1815 for_each_online_cpu(cpu) {
1816 rcu_read_lock();
399ced95
FW
1817 // Note that cpu_curr_snapshot() picks up the target
1818 // CPU's current task while its runqueue is locked with
1819 // an smp_mb__after_spinlock(). This ensures that either
1820 // the grace-period kthread will see that task's read-side
1821 // critical section or the task will see the updater's pre-GP
1822 // accesses. The trailing smp_mb() in cpu_curr_snapshot()
1823 // does not currently play a role other than simplify
1824 // that function's ordering semantics. If these simplified
1825 // ordering semantics continue to be redundant, that smp_mb()
1826 // might be removed.
e386b672
PM
1827 t = cpu_curr_snapshot(cpu);
1828 if (rcu_tasks_trace_pertask_prep(t, true))
1829 trc_add_holdout(t, hop);
1830 rcu_read_unlock();
d6ad6063 1831 cond_resched_tasks_rcu_qs();
e386b672 1832 }
dc7d54b4
PM
1833
1834 // Only after all running tasks have been accounted for is it
1835 // safe to take care of the tasks that have blocked within their
1836 // current RCU tasks trace read-side critical section.
1837 for_each_possible_cpu(cpu) {
1838 rtpcp = per_cpu_ptr(rcu_tasks_trace.rtpcpu, cpu);
1839 raw_spin_lock_irqsave_rcu_node(rtpcp, flags);
1840 list_splice_init(&rtpcp->rtp_blkd_tasks, &blkd_tasks);
1841 while (!list_empty(&blkd_tasks)) {
1842 rcu_read_lock();
1843 t = list_first_entry(&blkd_tasks, struct task_struct, trc_blkd_node);
1844 list_del_init(&t->trc_blkd_node);
1845 list_add(&t->trc_blkd_node, &rtpcp->rtp_blkd_tasks);
1846 raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags);
1847 rcu_tasks_trace_pertask(t, hop);
1848 rcu_read_unlock();
1849 raw_spin_lock_irqsave_rcu_node(rtpcp, flags);
1850 }
1851 raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags);
d6ad6063 1852 cond_resched_tasks_rcu_qs();
dc7d54b4 1853 }
56096ecd
PM
1854
1855 // Re-enable CPU hotplug now that the holdout list is populated.
1856 cpus_read_unlock();
1fa98e2e
PM
1857}
1858
9796e1ae 1859/*
955a0192 1860 * Do intermediate processing between task and holdout scans.
9796e1ae
PM
1861 */
1862static void rcu_tasks_trace_postscan(struct list_head *hop)
d5f177d3
PM
1863{
1864 // Wait for late-stage exiting tasks to finish exiting.
1865 // These might have passed the call to exit_tasks_rcu_finish().
e6c86c51
PM
1866
1867 // If you remove the following line, update rcu_trace_implies_rcu_gp()!!!
d5f177d3 1868 synchronize_rcu();
3847b645
PM
1869 // Any tasks that exit after this point will set
1870 // TRC_NEED_QS_CHECKED in ->trc_reader_special.b.need_qs.
d5f177d3
PM
1871}
1872
65b629e7
NU
1873/* Communicate task state back to the RCU tasks trace stall warning request. */
1874struct trc_stall_chk_rdr {
1875 int nesting;
1876 int ipi_to_cpu;
1877 u8 needqs;
1878};
1879
1880static int trc_check_slow_task(struct task_struct *t, void *arg)
1881{
1882 struct trc_stall_chk_rdr *trc_rdrp = arg;
1883
f90f19da 1884 if (task_curr(t) && cpu_online(task_cpu(t)))
65b629e7
NU
1885 return false; // It is running, so decline to inspect it.
1886 trc_rdrp->nesting = READ_ONCE(t->trc_reader_nesting);
1887 trc_rdrp->ipi_to_cpu = READ_ONCE(t->trc_ipi_to_cpu);
3847b645 1888 trc_rdrp->needqs = rcu_ld_need_qs(t);
65b629e7
NU
1889 return true;
1890}
1891
4593e772
PM
1892/* Show the state of a task stalling the current RCU tasks trace GP. */
1893static void show_stalled_task_trace(struct task_struct *t, bool *firstreport)
1894{
1895 int cpu;
65b629e7
NU
1896 struct trc_stall_chk_rdr trc_rdr;
1897 bool is_idle_tsk = is_idle_task(t);
4593e772
PM
1898
1899 if (*firstreport) {
1900 pr_err("INFO: rcu_tasks_trace detected stalls on tasks:\n");
1901 *firstreport = false;
1902 }
4593e772 1903 cpu = task_cpu(t);
65b629e7 1904 if (!task_call_func(t, trc_check_slow_task, &trc_rdr))
9f3eb5fb 1905 pr_alert("P%d: %c%c\n",
65b629e7 1906 t->pid,
9f3eb5fb 1907 ".I"[t->trc_ipi_to_cpu >= 0],
65b629e7
NU
1908 ".i"[is_idle_tsk]);
1909 else
387c0ad7 1910 pr_alert("P%d: %c%c%c%c nesting: %d%c%c cpu: %d%s\n",
65b629e7
NU
1911 t->pid,
1912 ".I"[trc_rdr.ipi_to_cpu >= 0],
1913 ".i"[is_idle_tsk],
1914 ".N"[cpu >= 0 && tick_nohz_full_cpu(cpu)],
387c0ad7 1915 ".B"[!!data_race(t->trc_reader_special.b.blocked)],
65b629e7 1916 trc_rdr.nesting,
be15a164
PM
1917 " !CN"[trc_rdr.needqs & 0x3],
1918 " ?"[trc_rdr.needqs > 0x3],
c8c03ad9 1919 cpu, cpu_online(cpu) ? "" : "(offline)");
4593e772
PM
1920 sched_show_task(t);
1921}
1922
1923/* List stalled IPIs for RCU tasks trace. */
1924static void show_stalled_ipi_trace(void)
1925{
1926 int cpu;
1927
1928 for_each_possible_cpu(cpu)
1929 if (per_cpu(trc_ipi_to_cpu, cpu))
1930 pr_alert("\tIPI outstanding to CPU %d\n", cpu);
1931}
1932
d5f177d3
PM
1933/* Do one scan of the holdout list. */
1934static void check_all_holdout_tasks_trace(struct list_head *hop,
4593e772 1935 bool needreport, bool *firstreport)
d5f177d3
PM
1936{
1937 struct task_struct *g, *t;
1938
eea3423b 1939 // Disable CPU hotplug across the holdout list scan for IPIs.
81b4a7bc
PM
1940 cpus_read_lock();
1941
d5f177d3
PM
1942 list_for_each_entry_safe(t, g, hop, trc_holdout_list) {
1943 // If safe and needed, try to check the current task.
1944 if (READ_ONCE(t->trc_ipi_to_cpu) == -1 &&
3847b645 1945 !(rcu_ld_need_qs(t) & TRC_NEED_QS_CHECKED))
d5f177d3
PM
1946 trc_wait_for_one_reader(t, hop);
1947
1948 // If check succeeded, remove this task from the list.
f5dbc594 1949 if (smp_load_acquire(&t->trc_ipi_to_cpu) == -1 &&
3847b645 1950 rcu_ld_need_qs(t) == TRC_NEED_QS_CHECKED)
d5f177d3 1951 trc_del_holdout(t);
4593e772
PM
1952 else if (needreport)
1953 show_stalled_task_trace(t, firstreport);
d6ad6063 1954 cond_resched_tasks_rcu_qs();
4593e772 1955 }
81b4a7bc
PM
1956
1957 // Re-enable CPU hotplug now that the holdout list scan has completed.
1958 cpus_read_unlock();
1959
4593e772 1960 if (needreport) {
89401176 1961 if (*firstreport)
4593e772
PM
1962 pr_err("INFO: rcu_tasks_trace detected stalls? (Late IPI?)\n");
1963 show_stalled_ipi_trace();
d5f177d3
PM
1964 }
1965}
1966
cbe0d8d9
PM
1967static void rcu_tasks_trace_empty_fn(void *unused)
1968{
1969}
1970
d5f177d3 1971/* Wait for grace period to complete and provide ordering. */
af051ca4 1972static void rcu_tasks_trace_postgp(struct rcu_tasks *rtp)
d5f177d3 1973{
cbe0d8d9 1974 int cpu;
4593e772 1975
cbe0d8d9
PM
1976 // Wait for any lingering IPI handlers to complete. Note that
1977 // if a CPU has gone offline or transitioned to userspace in the
1978 // meantime, all IPI handlers should have been drained beforehand.
1979 // Yes, this assumes that CPUs process IPIs in order. If that ever
1980 // changes, there will need to be a recheck and/or timed wait.
1981 for_each_online_cpu(cpu)
f5dbc594 1982 if (WARN_ON_ONCE(smp_load_acquire(per_cpu_ptr(&trc_ipi_to_cpu, cpu))))
cbe0d8d9
PM
1983 smp_call_function_single(cpu, rcu_tasks_trace_empty_fn, NULL, 1);
1984
d5f177d3 1985 smp_mb(); // Caller's code must be ordered after wakeup.
43766c3e 1986 // Pairs with pretty much every ordering primitive.
d5f177d3
PM
1987}
1988
1989/* Report any needed quiescent state for this exiting task. */
25246fc8 1990static void exit_tasks_rcu_finish_trace(struct task_struct *t)
d5f177d3 1991{
0356d4e6
PM
1992 union rcu_special trs = READ_ONCE(t->trc_reader_special);
1993
3847b645 1994 rcu_trc_cmpxchg_need_qs(t, 0, TRC_NEED_QS_CHECKED);
bdb0cca0 1995 WARN_ON_ONCE(READ_ONCE(t->trc_reader_nesting));
0bcb3868 1996 if (WARN_ON_ONCE(rcu_ld_need_qs(t) & TRC_NEED_QS || trs.b.blocked))
a5c071cc 1997 rcu_read_unlock_trace_special(t);
3847b645
PM
1998 else
1999 WRITE_ONCE(t->trc_reader_nesting, 0);
d5f177d3
PM
2000}
2001
d5f177d3
PM
2002/**
2003 * call_rcu_tasks_trace() - Queue a callback trace task-based grace period
2004 * @rhp: structure to be used for queueing the RCU updates.
2005 * @func: actual callback function to be invoked after the grace period
2006 *
ed42c380
NU
2007 * The callback function will be invoked some time after a trace rcu-tasks
2008 * grace period elapses, in other words after all currently executing
2009 * trace rcu-tasks read-side critical sections have completed. These
2010 * read-side critical sections are delimited by calls to rcu_read_lock_trace()
2011 * and rcu_read_unlock_trace().
d5f177d3
PM
2012 *
2013 * See the description of call_rcu() for more detailed information on
2014 * memory ordering guarantees.
2015 */
2016void call_rcu_tasks_trace(struct rcu_head *rhp, rcu_callback_t func)
2017{
2018 call_rcu_tasks_generic(rhp, func, &rcu_tasks_trace);
2019}
2020EXPORT_SYMBOL_GPL(call_rcu_tasks_trace);
2021
2022/**
2023 * synchronize_rcu_tasks_trace - wait for a trace rcu-tasks grace period
2024 *
2025 * Control will return to the caller some time after a trace rcu-tasks
c7dcf810 2026 * grace period has elapsed, in other words after all currently executing
ed42c380 2027 * trace rcu-tasks read-side critical sections have elapsed. These read-side
c7dcf810
PM
2028 * critical sections are delimited by calls to rcu_read_lock_trace()
2029 * and rcu_read_unlock_trace().
d5f177d3
PM
2030 *
2031 * This is a very specialized primitive, intended only for a few uses in
2032 * tracing and other situations requiring manipulation of function preambles
2033 * and profiling hooks. The synchronize_rcu_tasks_trace() function is not
2034 * (yet) intended for heavy use from multiple CPUs.
2035 *
2036 * See the description of synchronize_rcu() for more detailed information
2037 * on memory ordering guarantees.
2038 */
2039void synchronize_rcu_tasks_trace(void)
2040{
2041 RCU_LOCKDEP_WARN(lock_is_held(&rcu_trace_lock_map), "Illegal synchronize_rcu_tasks_trace() in RCU Tasks Trace read-side critical section");
2042 synchronize_rcu_tasks_generic(&rcu_tasks_trace);
2043}
2044EXPORT_SYMBOL_GPL(synchronize_rcu_tasks_trace);
2045
2046/**
2047 * rcu_barrier_tasks_trace - Wait for in-flight call_rcu_tasks_trace() callbacks.
2048 *
2049 * Although the current implementation is guaranteed to wait, it is not
2050 * obligated to, for example, if there are no pending callbacks.
2051 */
2052void rcu_barrier_tasks_trace(void)
2053{
ce9b1c66 2054 rcu_barrier_tasks_generic(&rcu_tasks_trace);
d5f177d3
PM
2055}
2056EXPORT_SYMBOL_GPL(rcu_barrier_tasks_trace);
2057
450d461a
PM
2058int rcu_tasks_trace_lazy_ms = -1;
2059module_param(rcu_tasks_trace_lazy_ms, int, 0444);
2060
d5f177d3
PM
2061static int __init rcu_spawn_tasks_trace_kthread(void)
2062{
2393a613 2063 if (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB)) {
4fe192df 2064 rcu_tasks_trace.gp_sleep = HZ / 10;
75dc2da5 2065 rcu_tasks_trace.init_fract = HZ / 10;
2393a613 2066 } else {
4fe192df
PM
2067 rcu_tasks_trace.gp_sleep = HZ / 200;
2068 if (rcu_tasks_trace.gp_sleep <= 0)
2069 rcu_tasks_trace.gp_sleep = 1;
75dc2da5 2070 rcu_tasks_trace.init_fract = HZ / 200;
2393a613
PM
2071 if (rcu_tasks_trace.init_fract <= 0)
2072 rcu_tasks_trace.init_fract = 1;
2073 }
450d461a
PM
2074 if (rcu_tasks_trace_lazy_ms >= 0)
2075 rcu_tasks_trace.lazy_jiffies = msecs_to_jiffies(rcu_tasks_trace_lazy_ms);
d5f177d3 2076 rcu_tasks_trace.pregp_func = rcu_tasks_trace_pregp_step;
d5f177d3
PM
2077 rcu_tasks_trace.postscan_func = rcu_tasks_trace_postscan;
2078 rcu_tasks_trace.holdouts_func = check_all_holdout_tasks_trace;
2079 rcu_tasks_trace.postgp_func = rcu_tasks_trace_postgp;
2080 rcu_spawn_tasks_kthread_generic(&rcu_tasks_trace);
2081 return 0;
2082}
d5f177d3 2083
27c0f144
PM
2084#if !defined(CONFIG_TINY_RCU)
2085void show_rcu_tasks_trace_gp_kthread(void)
e21408ce 2086{
40471509 2087 char buf[64];
e21408ce 2088
cc5645fd 2089 snprintf(buf, sizeof(buf), "N%lu h:%lu/%lu/%lu",
ffcc21a3 2090 data_race(n_trc_holdouts),
edf3775f 2091 data_race(n_heavy_reader_ofl_updates),
40471509
PM
2092 data_race(n_heavy_reader_updates),
2093 data_race(n_heavy_reader_attempts));
e21408ce
PM
2094 show_rcu_tasks_generic_gp_kthread(&rcu_tasks_trace, buf);
2095}
27c0f144 2096EXPORT_SYMBOL_GPL(show_rcu_tasks_trace_gp_kthread);
fe91cf39
PM
2097
2098void rcu_tasks_trace_torture_stats_print(char *tt, char *tf)
2099{
2100 rcu_tasks_torture_stats_print_generic(&rcu_tasks_trace, tt, tf, "");
2101}
2102EXPORT_SYMBOL_GPL(rcu_tasks_trace_torture_stats_print);
27c0f144 2103#endif // !defined(CONFIG_TINY_RCU)
e21408ce 2104
5f8e3202
PM
2105struct task_struct *get_rcu_tasks_trace_gp_kthread(void)
2106{
2107 return rcu_tasks_trace.kthread_ptr;
2108}
2109EXPORT_SYMBOL_GPL(get_rcu_tasks_trace_gp_kthread);
2110
dddcddef
Z
2111void rcu_tasks_trace_get_gp_data(int *flags, unsigned long *gp_seq)
2112{
2113 *flags = 0;
2114 *gp_seq = rcu_seq_current(&rcu_tasks_trace.tasks_gp_seq);
2115}
2116EXPORT_SYMBOL_GPL(rcu_tasks_trace_get_gp_data);
2117
d5f177d3 2118#else /* #ifdef CONFIG_TASKS_TRACE_RCU */
25246fc8 2119static void exit_tasks_rcu_finish_trace(struct task_struct *t) { }
d5f177d3 2120#endif /* #else #ifdef CONFIG_TASKS_TRACE_RCU */
8fd8ca38 2121
8344496e 2122#ifndef CONFIG_TINY_RCU
e21408ce
PM
2123void show_rcu_tasks_gp_kthreads(void)
2124{
2125 show_rcu_tasks_classic_gp_kthread();
2126 show_rcu_tasks_rude_gp_kthread();
2127 show_rcu_tasks_trace_gp_kthread();
2128}
8344496e 2129#endif /* #ifndef CONFIG_TINY_RCU */
e21408ce 2130
bfba7ed0
URS
2131#ifdef CONFIG_PROVE_RCU
2132struct rcu_tasks_test_desc {
2133 struct rcu_head rh;
2134 const char *name;
2135 bool notrun;
1cf1144e 2136 unsigned long runstart;
bfba7ed0
URS
2137};
2138
2139static struct rcu_tasks_test_desc tests[] = {
2140 {
2141 .name = "call_rcu_tasks()",
2142 /* If not defined, the test is skipped. */
1cf1144e 2143 .notrun = IS_ENABLED(CONFIG_TASKS_RCU),
bfba7ed0 2144 },
bfba7ed0
URS
2145 {
2146 .name = "call_rcu_tasks_trace()",
2147 /* If not defined, the test is skipped. */
1cf1144e 2148 .notrun = IS_ENABLED(CONFIG_TASKS_TRACE_RCU)
bfba7ed0
URS
2149 }
2150};
2151
7945b741 2152#if defined(CONFIG_TASKS_RCU) || defined(CONFIG_TASKS_TRACE_RCU)
bfba7ed0
URS
2153static void test_rcu_tasks_callback(struct rcu_head *rhp)
2154{
2155 struct rcu_tasks_test_desc *rttd =
2156 container_of(rhp, struct rcu_tasks_test_desc, rh);
2157
2158 pr_info("Callback from %s invoked.\n", rttd->name);
2159
1cf1144e 2160 rttd->notrun = false;
bfba7ed0 2161}
7945b741 2162#endif // #if defined(CONFIG_TASKS_RCU) || defined(CONFIG_TASKS_TRACE_RCU)
bfba7ed0
URS
2163
2164static void rcu_tasks_initiate_self_tests(void)
2165{
bfba7ed0 2166#ifdef CONFIG_TASKS_RCU
92a708dc 2167 pr_info("Running RCU Tasks wait API self tests\n");
9420fb93 2168 tests[0].runstart = jiffies;
bfba7ed0
URS
2169 synchronize_rcu_tasks();
2170 call_rcu_tasks(&tests[0].rh, test_rcu_tasks_callback);
2171#endif
2172
2173#ifdef CONFIG_TASKS_RUDE_RCU
92a708dc 2174 pr_info("Running RCU Tasks Rude wait API self tests\n");
bfba7ed0 2175 synchronize_rcu_tasks_rude();
bfba7ed0
URS
2176#endif
2177
2178#ifdef CONFIG_TASKS_TRACE_RCU
92a708dc 2179 pr_info("Running RCU Tasks Trace wait API self tests\n");
7945b741 2180 tests[1].runstart = jiffies;
bfba7ed0 2181 synchronize_rcu_tasks_trace();
7945b741 2182 call_rcu_tasks_trace(&tests[1].rh, test_rcu_tasks_callback);
bfba7ed0
URS
2183#endif
2184}
2185
e72ee5e1
WL
2186/*
2187 * Return: 0 - test passed
2188 * 1 - test failed, but have not timed out yet
2189 * -1 - test failed and timed out
2190 */
bfba7ed0
URS
2191static int rcu_tasks_verify_self_tests(void)
2192{
2193 int ret = 0;
2194 int i;
1cf1144e 2195 unsigned long bst = rcu_task_stall_timeout;
bfba7ed0 2196
1cf1144e
PM
2197 if (bst <= 0 || bst > RCU_TASK_BOOT_STALL_TIMEOUT)
2198 bst = RCU_TASK_BOOT_STALL_TIMEOUT;
bfba7ed0 2199 for (i = 0; i < ARRAY_SIZE(tests); i++) {
1cf1144e
PM
2200 while (tests[i].notrun) { // still hanging.
2201 if (time_after(jiffies, tests[i].runstart + bst)) {
2202 pr_err("%s has failed boot-time tests.\n", tests[i].name);
2203 ret = -1;
2204 break;
2205 }
e72ee5e1
WL
2206 ret = 1;
2207 break;
bfba7ed0
URS
2208 }
2209 }
e72ee5e1 2210 WARN_ON(ret < 0);
bfba7ed0
URS
2211
2212 return ret;
2213}
e72ee5e1
WL
2214
2215/*
2216 * Repeat the rcu_tasks_verify_self_tests() call once every second until the
2217 * test passes or has timed out.
2218 */
2219static struct delayed_work rcu_tasks_verify_work;
2220static void rcu_tasks_verify_work_fn(struct work_struct *work __maybe_unused)
2221{
2222 int ret = rcu_tasks_verify_self_tests();
2223
2224 if (ret <= 0)
2225 return;
2226
2227 /* Test fails but not timed out yet, reschedule another check */
2228 schedule_delayed_work(&rcu_tasks_verify_work, HZ);
2229}
2230
2231static int rcu_tasks_verify_schedule_work(void)
2232{
2233 INIT_DELAYED_WORK(&rcu_tasks_verify_work, rcu_tasks_verify_work_fn);
2234 rcu_tasks_verify_work_fn(NULL);
2235 return 0;
2236}
2237late_initcall(rcu_tasks_verify_schedule_work);
bfba7ed0
URS
2238#else /* #ifdef CONFIG_PROVE_RCU */
2239static void rcu_tasks_initiate_self_tests(void) { }
2240#endif /* #else #ifdef CONFIG_PROVE_RCU */
2241
30ef0963
PM
2242void __init tasks_cblist_init_generic(void)
2243{
2244 lockdep_assert_irqs_disabled();
2245 WARN_ON(num_online_cpus() > 1);
2246
2247#ifdef CONFIG_TASKS_RCU
2248 cblist_init_generic(&rcu_tasks);
2249#endif
2250
2251#ifdef CONFIG_TASKS_RUDE_RCU
2252 cblist_init_generic(&rcu_tasks_rude);
2253#endif
2254
2255#ifdef CONFIG_TASKS_TRACE_RCU
2256 cblist_init_generic(&rcu_tasks_trace);
2257#endif
2258}
2259
23c22d91 2260static int __init rcu_init_tasks_generic(void)
1b04fa99
URS
2261{
2262#ifdef CONFIG_TASKS_RCU
2263 rcu_spawn_tasks_kthread();
2264#endif
2265
2266#ifdef CONFIG_TASKS_RUDE_RCU
2267 rcu_spawn_tasks_rude_kthread();
2268#endif
2269
2270#ifdef CONFIG_TASKS_TRACE_RCU
2271 rcu_spawn_tasks_trace_kthread();
2272#endif
bfba7ed0
URS
2273
2274 // Run the self-tests.
2275 rcu_tasks_initiate_self_tests();
23c22d91
PM
2276
2277 return 0;
1b04fa99 2278}
23c22d91 2279core_initcall(rcu_init_tasks_generic);
1b04fa99 2280
8fd8ca38
PM
2281#else /* #ifdef CONFIG_TASKS_RCU_GENERIC */
2282static inline void rcu_tasks_bootup_oddness(void) {}
2283#endif /* #else #ifdef CONFIG_TASKS_RCU_GENERIC */