rcu-tasks: Use order_base_2() instead of ilog2()
[linux-2.6-block.git] / kernel / rcu / tasks.h
CommitLineData
eacd6f04
PM
1/* SPDX-License-Identifier: GPL-2.0+ */
2/*
3 * Task-based RCU implementations.
4 *
5 * Copyright (C) 2020 Paul E. McKenney
6 */
7
8fd8ca38 8#ifdef CONFIG_TASKS_RCU_GENERIC
9b073de1 9#include "rcu_segcblist.h"
5873b8a9
PM
10
11////////////////////////////////////////////////////////////////////////
12//
13// Generic data structures.
14
15struct rcu_tasks;
16typedef void (*rcu_tasks_gp_func_t)(struct rcu_tasks *rtp);
e4fe5dd6
PM
17typedef void (*pregp_func_t)(void);
18typedef void (*pertask_func_t)(struct task_struct *t, struct list_head *hop);
9796e1ae 19typedef void (*postscan_func_t)(struct list_head *hop);
e4fe5dd6 20typedef void (*holdouts_func_t)(struct list_head *hop, bool ndrpt, bool *frptp);
af051ca4 21typedef void (*postgp_func_t)(struct rcu_tasks *rtp);
eacd6f04 22
07e10515 23/**
cafafd67 24 * struct rcu_tasks_percpu - Per-CPU component of definition for a Tasks-RCU-like mechanism.
9b073de1 25 * @cblist: Callback list.
381a4f3b 26 * @lock: Lock protecting per-CPU callback list.
7d13d30b
PM
27 * @rtp_jiffies: Jiffies counter value for statistics.
28 * @rtp_n_lock_retries: Rough lock-contention statistic.
d363f833 29 * @rtp_work: Work queue for invoking callbacks.
3063b33a 30 * @rtp_irq_work: IRQ work queue for deferred wakeups.
ce9b1c66
PM
31 * @barrier_q_head: RCU callback for barrier operation.
32 * @cpu: CPU number corresponding to this entry.
33 * @rtpp: Pointer to the rcu_tasks structure.
cafafd67
PM
34 */
35struct rcu_tasks_percpu {
9b073de1 36 struct rcu_segcblist cblist;
381a4f3b 37 raw_spinlock_t __private lock;
7d13d30b
PM
38 unsigned long rtp_jiffies;
39 unsigned long rtp_n_lock_retries;
d363f833 40 struct work_struct rtp_work;
3063b33a 41 struct irq_work rtp_irq_work;
ce9b1c66 42 struct rcu_head barrier_q_head;
d363f833
PM
43 int cpu;
44 struct rcu_tasks *rtpp;
cafafd67
PM
45};
46
47/**
48 * struct rcu_tasks - Definition for a Tasks-RCU-like mechanism.
a616aec9 49 * @cbs_wq: Wait queue allowing new callback to get kthread's attention.
cafafd67 50 * @cbs_gbl_lock: Lock protecting callback list.
07e10515 51 * @kthread_ptr: This flavor's grace-period/callback-invocation kthread.
5873b8a9 52 * @gp_func: This flavor's grace-period-wait function.
af051ca4 53 * @gp_state: Grace period's most recent state transition (debugging).
4fe192df 54 * @gp_sleep: Per-grace-period sleep to prevent CPU-bound looping.
2393a613 55 * @init_fract: Initial backoff sleep interval.
af051ca4
PM
56 * @gp_jiffies: Time of last @gp_state transition.
57 * @gp_start: Most recent grace-period start in jiffies.
b14fb4fb 58 * @tasks_gp_seq: Number of grace periods completed since boot.
238dbce3 59 * @n_ipis: Number of IPIs sent to encourage grace periods to end.
7e0669c3 60 * @n_ipis_fails: Number of IPI-send failures.
e4fe5dd6
PM
61 * @pregp_func: This flavor's pre-grace-period function (optional).
62 * @pertask_func: This flavor's per-task scan function (optional).
63 * @postscan_func: This flavor's post-task scan function (optional).
85b86994 64 * @holdouts_func: This flavor's holdout-list scan function (optional).
e4fe5dd6 65 * @postgp_func: This flavor's post-grace-period function (optional).
5873b8a9 66 * @call_func: This flavor's call_rcu()-equivalent function.
cafafd67 67 * @rtpcpu: This flavor's rcu_tasks_percpu structure.
7a30871b 68 * @percpu_enqueue_shift: Shift down CPU ID this much when enqueuing callbacks.
2cee0789
PM
69 * @percpu_enqueue_lim: Number of per-CPU callback queues in use for enqueuing.
70 * @percpu_dequeue_lim: Number of per-CPU callback queues in use for dequeuing.
fd796e41 71 * @percpu_dequeue_gpseq: RCU grace-period number to propagate enqueue limit to dequeuers.
ce9b1c66
PM
72 * @barrier_q_mutex: Serialize barrier operations.
73 * @barrier_q_count: Number of queues being waited on.
74 * @barrier_q_completion: Barrier wait/wakeup mechanism.
75 * @barrier_q_seq: Sequence number for barrier operations.
c97d12a6
PM
76 * @name: This flavor's textual name.
77 * @kname: This flavor's kthread name.
07e10515
PM
78 */
79struct rcu_tasks {
07e10515 80 struct wait_queue_head cbs_wq;
cafafd67 81 raw_spinlock_t cbs_gbl_lock;
af051ca4 82 int gp_state;
4fe192df 83 int gp_sleep;
2393a613 84 int init_fract;
af051ca4 85 unsigned long gp_jiffies;
88092d0c 86 unsigned long gp_start;
b14fb4fb 87 unsigned long tasks_gp_seq;
238dbce3 88 unsigned long n_ipis;
7e0669c3 89 unsigned long n_ipis_fails;
07e10515 90 struct task_struct *kthread_ptr;
5873b8a9 91 rcu_tasks_gp_func_t gp_func;
e4fe5dd6
PM
92 pregp_func_t pregp_func;
93 pertask_func_t pertask_func;
94 postscan_func_t postscan_func;
95 holdouts_func_t holdouts_func;
96 postgp_func_t postgp_func;
5873b8a9 97 call_rcu_func_t call_func;
cafafd67 98 struct rcu_tasks_percpu __percpu *rtpcpu;
7a30871b 99 int percpu_enqueue_shift;
8dd593fd 100 int percpu_enqueue_lim;
2cee0789 101 int percpu_dequeue_lim;
fd796e41 102 unsigned long percpu_dequeue_gpseq;
ce9b1c66
PM
103 struct mutex barrier_q_mutex;
104 atomic_t barrier_q_count;
105 struct completion barrier_q_completion;
106 unsigned long barrier_q_seq;
c97d12a6
PM
107 char *name;
108 char *kname;
07e10515
PM
109};
110
3063b33a
PM
111static void call_rcu_tasks_iw_wakeup(struct irq_work *iwp);
112
cafafd67
PM
113#define DEFINE_RCU_TASKS(rt_name, gp, call, n) \
114static DEFINE_PER_CPU(struct rcu_tasks_percpu, rt_name ## __percpu) = { \
381a4f3b 115 .lock = __RAW_SPIN_LOCK_UNLOCKED(rt_name ## __percpu.cbs_pcpu_lock), \
3063b33a 116 .rtp_irq_work = IRQ_WORK_INIT(call_rcu_tasks_iw_wakeup), \
cafafd67
PM
117}; \
118static struct rcu_tasks rt_name = \
119{ \
120 .cbs_wq = __WAIT_QUEUE_HEAD_INITIALIZER(rt_name.cbs_wq), \
121 .cbs_gbl_lock = __RAW_SPIN_LOCK_UNLOCKED(rt_name.cbs_gbl_lock), \
122 .gp_func = gp, \
123 .call_func = call, \
124 .rtpcpu = &rt_name ## __percpu, \
125 .name = n, \
2bcd18e0 126 .percpu_enqueue_shift = order_base_2(CONFIG_NR_CPUS), \
8dd593fd 127 .percpu_enqueue_lim = 1, \
2cee0789 128 .percpu_dequeue_lim = 1, \
ce9b1c66
PM
129 .barrier_q_mutex = __MUTEX_INITIALIZER(rt_name.barrier_q_mutex), \
130 .barrier_q_seq = (0UL - 50UL) << RCU_SEQ_CTR_SHIFT, \
cafafd67 131 .kname = #rt_name, \
07e10515
PM
132}
133
eacd6f04
PM
134/* Track exiting tasks in order to allow them to be waited for. */
135DEFINE_STATIC_SRCU(tasks_rcu_exit_srcu);
136
b0afa0f0 137/* Avoid IPIing CPUs early in the grace period. */
574de876 138#define RCU_TASK_IPI_DELAY (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB) ? HZ / 2 : 0)
b0afa0f0
PM
139static int rcu_task_ipi_delay __read_mostly = RCU_TASK_IPI_DELAY;
140module_param(rcu_task_ipi_delay, int, 0644);
141
eacd6f04
PM
142/* Control stall timeouts. Disable with <= 0, otherwise jiffies till stall. */
143#define RCU_TASK_STALL_TIMEOUT (HZ * 60 * 10)
144static int rcu_task_stall_timeout __read_mostly = RCU_TASK_STALL_TIMEOUT;
145module_param(rcu_task_stall_timeout, int, 0644);
146
8610b656
PM
147static int rcu_task_enqueue_lim __read_mostly = -1;
148module_param(rcu_task_enqueue_lim, int, 0444);
149
ab97152f
PM
150static bool rcu_task_cb_adjust;
151static int rcu_task_contend_lim __read_mostly = 100;
152module_param(rcu_task_contend_lim, int, 0444);
fd796e41
PM
153static int rcu_task_collapse_lim __read_mostly = 10;
154module_param(rcu_task_collapse_lim, int, 0444);
ab97152f 155
af051ca4
PM
156/* RCU tasks grace-period state for debugging. */
157#define RTGS_INIT 0
158#define RTGS_WAIT_WAIT_CBS 1
159#define RTGS_WAIT_GP 2
160#define RTGS_PRE_WAIT_GP 3
161#define RTGS_SCAN_TASKLIST 4
162#define RTGS_POST_SCAN_TASKLIST 5
163#define RTGS_WAIT_SCAN_HOLDOUTS 6
164#define RTGS_SCAN_HOLDOUTS 7
165#define RTGS_POST_GP 8
166#define RTGS_WAIT_READERS 9
167#define RTGS_INVOKE_CBS 10
168#define RTGS_WAIT_CBS 11
8344496e 169#ifndef CONFIG_TINY_RCU
af051ca4
PM
170static const char * const rcu_tasks_gp_state_names[] = {
171 "RTGS_INIT",
172 "RTGS_WAIT_WAIT_CBS",
173 "RTGS_WAIT_GP",
174 "RTGS_PRE_WAIT_GP",
175 "RTGS_SCAN_TASKLIST",
176 "RTGS_POST_SCAN_TASKLIST",
177 "RTGS_WAIT_SCAN_HOLDOUTS",
178 "RTGS_SCAN_HOLDOUTS",
179 "RTGS_POST_GP",
180 "RTGS_WAIT_READERS",
181 "RTGS_INVOKE_CBS",
182 "RTGS_WAIT_CBS",
183};
8344496e 184#endif /* #ifndef CONFIG_TINY_RCU */
af051ca4 185
5873b8a9
PM
186////////////////////////////////////////////////////////////////////////
187//
188// Generic code.
189
d363f833
PM
190static void rcu_tasks_invoke_cbs_wq(struct work_struct *wp);
191
af051ca4
PM
192/* Record grace-period phase and time. */
193static void set_tasks_gp_state(struct rcu_tasks *rtp, int newstate)
194{
195 rtp->gp_state = newstate;
196 rtp->gp_jiffies = jiffies;
197}
198
8344496e 199#ifndef CONFIG_TINY_RCU
af051ca4
PM
200/* Return state name. */
201static const char *tasks_gp_state_getname(struct rcu_tasks *rtp)
202{
203 int i = data_race(rtp->gp_state); // Let KCSAN detect update races
204 int j = READ_ONCE(i); // Prevent the compiler from reading twice
205
206 if (j >= ARRAY_SIZE(rcu_tasks_gp_state_names))
207 return "???";
208 return rcu_tasks_gp_state_names[j];
209}
8344496e 210#endif /* #ifndef CONFIG_TINY_RCU */
af051ca4 211
cafafd67
PM
212// Initialize per-CPU callback lists for the specified flavor of
213// Tasks RCU.
214static void cblist_init_generic(struct rcu_tasks *rtp)
215{
216 int cpu;
217 unsigned long flags;
8610b656 218 int lim;
da123016 219 int shift;
cafafd67
PM
220
221 raw_spin_lock_irqsave(&rtp->cbs_gbl_lock, flags);
ab97152f
PM
222 if (rcu_task_enqueue_lim < 0) {
223 rcu_task_enqueue_lim = 1;
224 rcu_task_cb_adjust = true;
225 pr_info("%s: Setting adjustable number of callback queues.\n", __func__);
226 } else if (rcu_task_enqueue_lim == 0) {
8610b656 227 rcu_task_enqueue_lim = 1;
ab97152f 228 }
8610b656
PM
229 lim = rcu_task_enqueue_lim;
230
231 if (lim > nr_cpu_ids)
232 lim = nr_cpu_ids;
da123016
PM
233 shift = ilog2(nr_cpu_ids / lim);
234 if (((nr_cpu_ids - 1) >> shift) >= lim)
235 shift++;
236 WRITE_ONCE(rtp->percpu_enqueue_shift, shift);
2cee0789 237 WRITE_ONCE(rtp->percpu_dequeue_lim, lim);
8610b656 238 smp_store_release(&rtp->percpu_enqueue_lim, lim);
cafafd67
PM
239 for_each_possible_cpu(cpu) {
240 struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu);
241
242 WARN_ON_ONCE(!rtpcp);
243 if (cpu)
381a4f3b
PM
244 raw_spin_lock_init(&ACCESS_PRIVATE(rtpcp, lock));
245 raw_spin_lock_rcu_node(rtpcp); // irqs already disabled.
9b073de1
PM
246 if (rcu_segcblist_empty(&rtpcp->cblist))
247 rcu_segcblist_init(&rtpcp->cblist);
d363f833
PM
248 INIT_WORK(&rtpcp->rtp_work, rcu_tasks_invoke_cbs_wq);
249 rtpcp->cpu = cpu;
250 rtpcp->rtpp = rtp;
381a4f3b 251 raw_spin_unlock_rcu_node(rtpcp); // irqs remain disabled.
cafafd67
PM
252 }
253 raw_spin_unlock_irqrestore(&rtp->cbs_gbl_lock, flags);
8610b656 254 pr_info("%s: Setting shift to %d and lim to %d.\n", __func__, data_race(rtp->percpu_enqueue_shift), data_race(rtp->percpu_enqueue_lim));
cafafd67
PM
255}
256
3063b33a
PM
257// IRQ-work handler that does deferred wakeup for call_rcu_tasks_generic().
258static void call_rcu_tasks_iw_wakeup(struct irq_work *iwp)
259{
260 struct rcu_tasks *rtp;
261 struct rcu_tasks_percpu *rtpcp = container_of(iwp, struct rcu_tasks_percpu, rtp_irq_work);
262
263 rtp = rtpcp->rtpp;
264 wake_up(&rtp->cbs_wq);
265}
266
5873b8a9
PM
267// Enqueue a callback for the specified flavor of Tasks RCU.
268static void call_rcu_tasks_generic(struct rcu_head *rhp, rcu_callback_t func,
269 struct rcu_tasks *rtp)
eacd6f04
PM
270{
271 unsigned long flags;
7d13d30b 272 unsigned long j;
ab97152f 273 bool needadjust = false;
eacd6f04 274 bool needwake;
cafafd67 275 struct rcu_tasks_percpu *rtpcp;
eacd6f04
PM
276
277 rhp->next = NULL;
278 rhp->func = func;
cafafd67 279 local_irq_save(flags);
fd796e41 280 rcu_read_lock();
7a30871b
PM
281 rtpcp = per_cpu_ptr(rtp->rtpcpu,
282 smp_processor_id() >> READ_ONCE(rtp->percpu_enqueue_shift));
7d13d30b
PM
283 if (!raw_spin_trylock_rcu_node(rtpcp)) { // irqs already disabled.
284 raw_spin_lock_rcu_node(rtpcp); // irqs already disabled.
285 j = jiffies;
286 if (rtpcp->rtp_jiffies != j) {
287 rtpcp->rtp_jiffies = j;
288 rtpcp->rtp_n_lock_retries = 0;
289 }
ab97152f
PM
290 if (rcu_task_cb_adjust && ++rtpcp->rtp_n_lock_retries > rcu_task_contend_lim &&
291 READ_ONCE(rtp->percpu_enqueue_lim) != nr_cpu_ids)
292 needadjust = true; // Defer adjustment to avoid deadlock.
7d13d30b 293 }
9b073de1 294 if (!rcu_segcblist_is_enabled(&rtpcp->cblist)) {
381a4f3b 295 raw_spin_unlock_rcu_node(rtpcp); // irqs remain disabled.
cafafd67 296 cblist_init_generic(rtp);
381a4f3b 297 raw_spin_lock_rcu_node(rtpcp); // irqs already disabled.
cafafd67 298 }
9b073de1
PM
299 needwake = rcu_segcblist_empty(&rtpcp->cblist);
300 rcu_segcblist_enqueue(&rtpcp->cblist, rhp);
381a4f3b 301 raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags);
ab97152f
PM
302 if (unlikely(needadjust)) {
303 raw_spin_lock_irqsave(&rtp->cbs_gbl_lock, flags);
304 if (rtp->percpu_enqueue_lim != nr_cpu_ids) {
2bcd18e0 305 WRITE_ONCE(rtp->percpu_enqueue_shift, order_base_2(nr_cpu_ids));
fd796e41 306 WRITE_ONCE(rtp->percpu_dequeue_lim, nr_cpu_ids);
ab97152f
PM
307 smp_store_release(&rtp->percpu_enqueue_lim, nr_cpu_ids);
308 pr_info("Switching %s to per-CPU callback queuing.\n", rtp->name);
309 }
310 raw_spin_unlock_irqrestore(&rtp->cbs_gbl_lock, flags);
311 }
fd796e41 312 rcu_read_unlock();
eacd6f04 313 /* We can't create the thread unless interrupts are enabled. */
07e10515 314 if (needwake && READ_ONCE(rtp->kthread_ptr))
3063b33a 315 irq_work_queue(&rtpcp->rtp_irq_work);
eacd6f04 316}
eacd6f04 317
5873b8a9
PM
318// Wait for a grace period for the specified flavor of Tasks RCU.
319static void synchronize_rcu_tasks_generic(struct rcu_tasks *rtp)
eacd6f04
PM
320{
321 /* Complain if the scheduler has not started. */
322 RCU_LOCKDEP_WARN(rcu_scheduler_active == RCU_SCHEDULER_INACTIVE,
323 "synchronize_rcu_tasks called too soon");
324
325 /* Wait for the grace period. */
5873b8a9 326 wait_rcu_gp(rtp->call_func);
eacd6f04
PM
327}
328
ce9b1c66
PM
329// RCU callback function for rcu_barrier_tasks_generic().
330static void rcu_barrier_tasks_generic_cb(struct rcu_head *rhp)
331{
332 struct rcu_tasks *rtp;
333 struct rcu_tasks_percpu *rtpcp;
334
335 rtpcp = container_of(rhp, struct rcu_tasks_percpu, barrier_q_head);
336 rtp = rtpcp->rtpp;
337 if (atomic_dec_and_test(&rtp->barrier_q_count))
338 complete(&rtp->barrier_q_completion);
339}
340
341// Wait for all in-flight callbacks for the specified RCU Tasks flavor.
342// Operates in a manner similar to rcu_barrier().
343static void rcu_barrier_tasks_generic(struct rcu_tasks *rtp)
344{
345 int cpu;
346 unsigned long flags;
347 struct rcu_tasks_percpu *rtpcp;
348 unsigned long s = rcu_seq_snap(&rtp->barrier_q_seq);
349
350 mutex_lock(&rtp->barrier_q_mutex);
351 if (rcu_seq_done(&rtp->barrier_q_seq, s)) {
352 smp_mb();
353 mutex_unlock(&rtp->barrier_q_mutex);
354 return;
355 }
356 rcu_seq_start(&rtp->barrier_q_seq);
357 init_completion(&rtp->barrier_q_completion);
358 atomic_set(&rtp->barrier_q_count, 2);
359 for_each_possible_cpu(cpu) {
2cee0789 360 if (cpu >= smp_load_acquire(&rtp->percpu_dequeue_lim))
ce9b1c66
PM
361 break;
362 rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu);
363 rtpcp->barrier_q_head.func = rcu_barrier_tasks_generic_cb;
364 raw_spin_lock_irqsave_rcu_node(rtpcp, flags);
365 if (rcu_segcblist_entrain(&rtpcp->cblist, &rtpcp->barrier_q_head))
366 atomic_inc(&rtp->barrier_q_count);
367 raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags);
368 }
369 if (atomic_sub_and_test(2, &rtp->barrier_q_count))
370 complete(&rtp->barrier_q_completion);
371 wait_for_completion(&rtp->barrier_q_completion);
372 rcu_seq_end(&rtp->barrier_q_seq);
373 mutex_unlock(&rtp->barrier_q_mutex);
374}
375
4d1114c0
PM
376// Advance callbacks and indicate whether either a grace period or
377// callback invocation is needed.
378static int rcu_tasks_need_gpcb(struct rcu_tasks *rtp)
379{
380 int cpu;
381 unsigned long flags;
fd796e41
PM
382 long n;
383 long ncbs = 0;
384 long ncbsnz = 0;
4d1114c0
PM
385 int needgpcb = 0;
386
2cee0789 387 for (cpu = 0; cpu < smp_load_acquire(&rtp->percpu_dequeue_lim); cpu++) {
4d1114c0
PM
388 struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu);
389
390 /* Advance and accelerate any new callbacks. */
fd796e41 391 if (!rcu_segcblist_n_cbs(&rtpcp->cblist))
4d1114c0
PM
392 continue;
393 raw_spin_lock_irqsave_rcu_node(rtpcp, flags);
fd796e41
PM
394 // Should we shrink down to a single callback queue?
395 n = rcu_segcblist_n_cbs(&rtpcp->cblist);
396 if (n) {
397 ncbs += n;
398 if (cpu > 0)
399 ncbsnz += n;
400 }
4d1114c0
PM
401 rcu_segcblist_advance(&rtpcp->cblist, rcu_seq_current(&rtp->tasks_gp_seq));
402 (void)rcu_segcblist_accelerate(&rtpcp->cblist, rcu_seq_snap(&rtp->tasks_gp_seq));
403 if (rcu_segcblist_pend_cbs(&rtpcp->cblist))
404 needgpcb |= 0x3;
405 if (!rcu_segcblist_empty(&rtpcp->cblist))
406 needgpcb |= 0x1;
407 raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags);
408 }
fd796e41
PM
409
410 // Shrink down to a single callback queue if appropriate.
411 // This is done in two stages: (1) If there are no more than
412 // rcu_task_collapse_lim callbacks on CPU 0 and none on any other
413 // CPU, limit enqueueing to CPU 0. (2) After an RCU grace period,
414 // if there has not been an increase in callbacks, limit dequeuing
415 // to CPU 0. Note the matching RCU read-side critical section in
416 // call_rcu_tasks_generic().
417 if (rcu_task_cb_adjust && ncbs <= rcu_task_collapse_lim) {
418 raw_spin_lock_irqsave(&rtp->cbs_gbl_lock, flags);
419 if (rtp->percpu_enqueue_lim > 1) {
2bcd18e0 420 WRITE_ONCE(rtp->percpu_enqueue_shift, order_base_2(nr_cpu_ids));
fd796e41
PM
421 smp_store_release(&rtp->percpu_enqueue_lim, 1);
422 rtp->percpu_dequeue_gpseq = get_state_synchronize_rcu();
423 pr_info("Starting switch %s to CPU-0 callback queuing.\n", rtp->name);
424 }
425 raw_spin_unlock_irqrestore(&rtp->cbs_gbl_lock, flags);
426 }
427 if (rcu_task_cb_adjust && !ncbsnz &&
428 poll_state_synchronize_rcu(rtp->percpu_dequeue_gpseq)) {
429 raw_spin_lock_irqsave(&rtp->cbs_gbl_lock, flags);
430 if (rtp->percpu_enqueue_lim < rtp->percpu_dequeue_lim) {
431 WRITE_ONCE(rtp->percpu_dequeue_lim, 1);
432 pr_info("Completing switch %s to CPU-0 callback queuing.\n", rtp->name);
433 }
434 raw_spin_unlock_irqrestore(&rtp->cbs_gbl_lock, flags);
435 }
436
4d1114c0
PM
437 return needgpcb;
438}
439
57881863 440// Advance callbacks and invoke any that are ready.
d363f833 441static void rcu_tasks_invoke_cbs(struct rcu_tasks *rtp, struct rcu_tasks_percpu *rtpcp)
eacd6f04 442{
57881863 443 int cpu;
d363f833 444 int cpunext;
eacd6f04 445 unsigned long flags;
9b073de1 446 int len;
9b073de1 447 struct rcu_head *rhp;
d363f833
PM
448 struct rcu_cblist rcl = RCU_CBLIST_INITIALIZER(rcl);
449 struct rcu_tasks_percpu *rtpcp_next;
450
451 cpu = rtpcp->cpu;
452 cpunext = cpu * 2 + 1;
2cee0789 453 if (cpunext < smp_load_acquire(&rtp->percpu_dequeue_lim)) {
d363f833
PM
454 rtpcp_next = per_cpu_ptr(rtp->rtpcpu, cpunext);
455 queue_work_on(cpunext, system_wq, &rtpcp_next->rtp_work);
456 cpunext++;
2cee0789 457 if (cpunext < smp_load_acquire(&rtp->percpu_dequeue_lim)) {
d363f833
PM
458 rtpcp_next = per_cpu_ptr(rtp->rtpcpu, cpunext);
459 queue_work_on(cpunext, system_wq, &rtpcp_next->rtp_work);
57881863 460 }
57881863 461 }
d363f833
PM
462
463 if (rcu_segcblist_empty(&rtpcp->cblist))
464 return;
465 raw_spin_lock_irqsave_rcu_node(rtpcp, flags);
466 rcu_segcblist_advance(&rtpcp->cblist, rcu_seq_current(&rtp->tasks_gp_seq));
467 rcu_segcblist_extract_done_cbs(&rtpcp->cblist, &rcl);
468 raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags);
469 len = rcl.len;
470 for (rhp = rcu_cblist_dequeue(&rcl); rhp; rhp = rcu_cblist_dequeue(&rcl)) {
471 local_bh_disable();
472 rhp->func(rhp);
473 local_bh_enable();
474 cond_resched();
475 }
476 raw_spin_lock_irqsave_rcu_node(rtpcp, flags);
477 rcu_segcblist_add_len(&rtpcp->cblist, -len);
478 (void)rcu_segcblist_accelerate(&rtpcp->cblist, rcu_seq_snap(&rtp->tasks_gp_seq));
479 raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags);
480}
481
482// Workqueue flood to advance callbacks and invoke any that are ready.
483static void rcu_tasks_invoke_cbs_wq(struct work_struct *wp)
484{
485 struct rcu_tasks *rtp;
486 struct rcu_tasks_percpu *rtpcp = container_of(wp, struct rcu_tasks_percpu, rtp_work);
487
488 rtp = rtpcp->rtpp;
489 rcu_tasks_invoke_cbs(rtp, rtpcp);
57881863
PM
490}
491
492/* RCU-tasks kthread that detects grace periods and invokes callbacks. */
493static int __noreturn rcu_tasks_kthread(void *arg)
494{
495 int needgpcb;
07e10515 496 struct rcu_tasks *rtp = arg;
eacd6f04
PM
497
498 /* Run on housekeeping CPUs by default. Sysadm can move if desired. */
499 housekeeping_affine(current, HK_FLAG_RCU);
07e10515 500 WRITE_ONCE(rtp->kthread_ptr, current); // Let GPs start!
eacd6f04
PM
501
502 /*
503 * Each pass through the following loop makes one check for
504 * newly arrived callbacks, and, if there are some, waits for
505 * one RCU-tasks grace period and then invokes the callbacks.
506 * This loop is terminated by the system going down. ;-)
507 */
508 for (;;) {
0db7c32a 509 set_tasks_gp_state(rtp, RTGS_WAIT_CBS);
eacd6f04 510
eacd6f04 511 /* If there were none, wait a bit and start over. */
4d1114c0 512 wait_event_idle(rtp->cbs_wq, (needgpcb = rcu_tasks_need_gpcb(rtp)));
eacd6f04 513
4d1114c0
PM
514 if (needgpcb & 0x2) {
515 // Wait for one grace period.
516 set_tasks_gp_state(rtp, RTGS_WAIT_GP);
517 rtp->gp_start = jiffies;
518 rcu_seq_start(&rtp->tasks_gp_seq);
519 rtp->gp_func(rtp);
520 rcu_seq_end(&rtp->tasks_gp_seq);
521 }
eacd6f04 522
57881863 523 /* Invoke callbacks. */
af051ca4 524 set_tasks_gp_state(rtp, RTGS_INVOKE_CBS);
d363f833 525 rcu_tasks_invoke_cbs(rtp, per_cpu_ptr(rtp->rtpcpu, 0));
57881863 526
eacd6f04 527 /* Paranoid sleep to keep this from entering a tight loop */
4fe192df 528 schedule_timeout_idle(rtp->gp_sleep);
eacd6f04
PM
529 }
530}
531
1b04fa99 532/* Spawn RCU-tasks grace-period kthread. */
5873b8a9 533static void __init rcu_spawn_tasks_kthread_generic(struct rcu_tasks *rtp)
eacd6f04
PM
534{
535 struct task_struct *t;
536
c97d12a6
PM
537 t = kthread_run(rcu_tasks_kthread, rtp, "%s_kthread", rtp->kname);
538 if (WARN_ONCE(IS_ERR(t), "%s: Could not start %s grace-period kthread, OOM is now expected behavior\n", __func__, rtp->name))
5873b8a9 539 return;
eacd6f04 540 smp_mb(); /* Ensure others see full kthread. */
eacd6f04 541}
eacd6f04 542
eacd6f04
PM
543#ifndef CONFIG_TINY_RCU
544
545/*
546 * Print any non-default Tasks RCU settings.
547 */
548static void __init rcu_tasks_bootup_oddness(void)
549{
d5f177d3 550#if defined(CONFIG_TASKS_RCU) || defined(CONFIG_TASKS_TRACE_RCU)
eacd6f04
PM
551 if (rcu_task_stall_timeout != RCU_TASK_STALL_TIMEOUT)
552 pr_info("\tTasks-RCU CPU stall warnings timeout set to %d (rcu_task_stall_timeout).\n", rcu_task_stall_timeout);
d5f177d3
PM
553#endif /* #ifdef CONFIG_TASKS_RCU */
554#ifdef CONFIG_TASKS_RCU
555 pr_info("\tTrampoline variant of Tasks RCU enabled.\n");
eacd6f04 556#endif /* #ifdef CONFIG_TASKS_RCU */
c84aad76
PM
557#ifdef CONFIG_TASKS_RUDE_RCU
558 pr_info("\tRude variant of Tasks RCU enabled.\n");
559#endif /* #ifdef CONFIG_TASKS_RUDE_RCU */
d5f177d3
PM
560#ifdef CONFIG_TASKS_TRACE_RCU
561 pr_info("\tTracing variant of Tasks RCU enabled.\n");
562#endif /* #ifdef CONFIG_TASKS_TRACE_RCU */
eacd6f04
PM
563}
564
565#endif /* #ifndef CONFIG_TINY_RCU */
5873b8a9 566
8344496e 567#ifndef CONFIG_TINY_RCU
e21408ce
PM
568/* Dump out rcutorture-relevant state common to all RCU-tasks flavors. */
569static void show_rcu_tasks_generic_gp_kthread(struct rcu_tasks *rtp, char *s)
570{
cafafd67 571 struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, 0); // for_each...
7e0669c3 572 pr_info("%s: %s(%d) since %lu g:%lu i:%lu/%lu %c%c %s\n",
e21408ce 573 rtp->kname,
7e0669c3 574 tasks_gp_state_getname(rtp), data_race(rtp->gp_state),
af051ca4 575 jiffies - data_race(rtp->gp_jiffies),
b14fb4fb 576 data_race(rcu_seq_current(&rtp->tasks_gp_seq)),
7e0669c3 577 data_race(rtp->n_ipis_fails), data_race(rtp->n_ipis),
e21408ce 578 ".k"[!!data_race(rtp->kthread_ptr)],
9b073de1 579 ".C"[!data_race(rcu_segcblist_empty(&rtpcp->cblist))],
e21408ce
PM
580 s);
581}
27c0f144 582#endif // #ifndef CONFIG_TINY_RCU
e21408ce 583
25246fc8
PM
584static void exit_tasks_rcu_finish_trace(struct task_struct *t);
585
586#if defined(CONFIG_TASKS_RCU) || defined(CONFIG_TASKS_TRACE_RCU)
5873b8a9 587
d01aa263
PM
588////////////////////////////////////////////////////////////////////////
589//
590// Shared code between task-list-scanning variants of Tasks RCU.
591
592/* Wait for one RCU-tasks grace period. */
593static void rcu_tasks_wait_gp(struct rcu_tasks *rtp)
594{
595 struct task_struct *g, *t;
596 unsigned long lastreport;
597 LIST_HEAD(holdouts);
598 int fract;
599
af051ca4 600 set_tasks_gp_state(rtp, RTGS_PRE_WAIT_GP);
d01aa263
PM
601 rtp->pregp_func();
602
603 /*
604 * There were callbacks, so we need to wait for an RCU-tasks
605 * grace period. Start off by scanning the task list for tasks
606 * that are not already voluntarily blocked. Mark these tasks
607 * and make a list of them in holdouts.
608 */
af051ca4 609 set_tasks_gp_state(rtp, RTGS_SCAN_TASKLIST);
d01aa263
PM
610 rcu_read_lock();
611 for_each_process_thread(g, t)
612 rtp->pertask_func(t, &holdouts);
613 rcu_read_unlock();
614
af051ca4 615 set_tasks_gp_state(rtp, RTGS_POST_SCAN_TASKLIST);
9796e1ae 616 rtp->postscan_func(&holdouts);
d01aa263
PM
617
618 /*
619 * Each pass through the following loop scans the list of holdout
620 * tasks, removing any that are no longer holdouts. When the list
621 * is empty, we are done.
622 */
623 lastreport = jiffies;
624
2393a613
PM
625 // Start off with initial wait and slowly back off to 1 HZ wait.
626 fract = rtp->init_fract;
d01aa263 627
77dc1741 628 while (!list_empty(&holdouts)) {
d01aa263
PM
629 bool firstreport;
630 bool needreport;
631 int rtst;
632
d01aa263 633 /* Slowly back off waiting for holdouts */
af051ca4 634 set_tasks_gp_state(rtp, RTGS_WAIT_SCAN_HOLDOUTS);
75dc2da5 635 schedule_timeout_idle(fract);
d01aa263 636
75dc2da5
PM
637 if (fract < HZ)
638 fract++;
d01aa263
PM
639
640 rtst = READ_ONCE(rcu_task_stall_timeout);
641 needreport = rtst > 0 && time_after(jiffies, lastreport + rtst);
642 if (needreport)
643 lastreport = jiffies;
644 firstreport = true;
645 WARN_ON(signal_pending(current));
af051ca4 646 set_tasks_gp_state(rtp, RTGS_SCAN_HOLDOUTS);
d01aa263
PM
647 rtp->holdouts_func(&holdouts, needreport, &firstreport);
648 }
649
af051ca4
PM
650 set_tasks_gp_state(rtp, RTGS_POST_GP);
651 rtp->postgp_func(rtp);
d01aa263
PM
652}
653
25246fc8
PM
654#endif /* #if defined(CONFIG_TASKS_RCU) || defined(CONFIG_TASKS_TRACE_RCU) */
655
656#ifdef CONFIG_TASKS_RCU
657
5873b8a9
PM
658////////////////////////////////////////////////////////////////////////
659//
660// Simple variant of RCU whose quiescent states are voluntary context
8af9e2c7 661// switch, cond_resched_tasks_rcu_qs(), user-space execution, and idle.
5873b8a9
PM
662// As such, grace periods can take one good long time. There are no
663// read-side primitives similar to rcu_read_lock() and rcu_read_unlock()
664// because this implementation is intended to get the system into a safe
665// state for some of the manipulations involved in tracing and the like.
666// Finally, this implementation does not support high call_rcu_tasks()
667// rates from multiple CPUs. If this is required, per-CPU callback lists
668// will be needed.
06a3ec92
PM
669//
670// The implementation uses rcu_tasks_wait_gp(), which relies on function
671// pointers in the rcu_tasks structure. The rcu_spawn_tasks_kthread()
672// function sets these function pointers up so that rcu_tasks_wait_gp()
673// invokes these functions in this order:
674//
675// rcu_tasks_pregp_step():
676// Invokes synchronize_rcu() in order to wait for all in-flight
677// t->on_rq and t->nvcsw transitions to complete. This works because
678// all such transitions are carried out with interrupts disabled.
679// rcu_tasks_pertask(), invoked on every non-idle task:
680// For every runnable non-idle task other than the current one, use
681// get_task_struct() to pin down that task, snapshot that task's
682// number of voluntary context switches, and add that task to the
683// holdout list.
684// rcu_tasks_postscan():
685// Invoke synchronize_srcu() to ensure that all tasks that were
686// in the process of exiting (and which thus might not know to
687// synchronize with this RCU Tasks grace period) have completed
688// exiting.
689// check_all_holdout_tasks(), repeatedly until holdout list is empty:
690// Scans the holdout list, attempting to identify a quiescent state
691// for each task on the list. If there is a quiescent state, the
692// corresponding task is removed from the holdout list.
693// rcu_tasks_postgp():
694// Invokes synchronize_rcu() in order to ensure that all prior
695// t->on_rq and t->nvcsw transitions are seen by all CPUs and tasks
696// to have happened before the end of this RCU Tasks grace period.
697// Again, this works because all such transitions are carried out
698// with interrupts disabled.
699//
700// For each exiting task, the exit_tasks_rcu_start() and
701// exit_tasks_rcu_finish() functions begin and end, respectively, the SRCU
702// read-side critical sections waited for by rcu_tasks_postscan().
703//
381a4f3b
PM
704// Pre-grace-period update-side code is ordered before the grace
705// via the raw_spin_lock.*rcu_node(). Pre-grace-period read-side code
706// is ordered before the grace period via synchronize_rcu() call in
707// rcu_tasks_pregp_step() and by the scheduler's locks and interrupt
06a3ec92 708// disabling.
5873b8a9 709
e4fe5dd6
PM
710/* Pre-grace-period preparation. */
711static void rcu_tasks_pregp_step(void)
712{
713 /*
714 * Wait for all pre-existing t->on_rq and t->nvcsw transitions
715 * to complete. Invoking synchronize_rcu() suffices because all
716 * these transitions occur with interrupts disabled. Without this
717 * synchronize_rcu(), a read-side critical section that started
718 * before the grace period might be incorrectly seen as having
719 * started after the grace period.
720 *
721 * This synchronize_rcu() also dispenses with the need for a
722 * memory barrier on the first store to t->rcu_tasks_holdout,
723 * as it forces the store to happen after the beginning of the
724 * grace period.
725 */
726 synchronize_rcu();
727}
728
729/* Per-task initial processing. */
730static void rcu_tasks_pertask(struct task_struct *t, struct list_head *hop)
731{
732 if (t != current && READ_ONCE(t->on_rq) && !is_idle_task(t)) {
733 get_task_struct(t);
734 t->rcu_tasks_nvcsw = READ_ONCE(t->nvcsw);
735 WRITE_ONCE(t->rcu_tasks_holdout, true);
736 list_add(&t->rcu_tasks_holdout_list, hop);
737 }
738}
739
740/* Processing between scanning taskslist and draining the holdout list. */
04a3c5aa 741static void rcu_tasks_postscan(struct list_head *hop)
e4fe5dd6
PM
742{
743 /*
744 * Wait for tasks that are in the process of exiting. This
745 * does only part of the job, ensuring that all tasks that were
746 * previously exiting reach the point where they have disabled
747 * preemption, allowing the later synchronize_rcu() to finish
748 * the job.
749 */
750 synchronize_srcu(&tasks_rcu_exit_srcu);
751}
752
5873b8a9
PM
753/* See if tasks are still holding out, complain if so. */
754static void check_holdout_task(struct task_struct *t,
755 bool needreport, bool *firstreport)
756{
757 int cpu;
758
759 if (!READ_ONCE(t->rcu_tasks_holdout) ||
760 t->rcu_tasks_nvcsw != READ_ONCE(t->nvcsw) ||
761 !READ_ONCE(t->on_rq) ||
762 (IS_ENABLED(CONFIG_NO_HZ_FULL) &&
763 !is_idle_task(t) && t->rcu_tasks_idle_cpu >= 0)) {
764 WRITE_ONCE(t->rcu_tasks_holdout, false);
765 list_del_init(&t->rcu_tasks_holdout_list);
766 put_task_struct(t);
767 return;
768 }
769 rcu_request_urgent_qs_task(t);
770 if (!needreport)
771 return;
772 if (*firstreport) {
773 pr_err("INFO: rcu_tasks detected stalls on tasks:\n");
774 *firstreport = false;
775 }
776 cpu = task_cpu(t);
777 pr_alert("%p: %c%c nvcsw: %lu/%lu holdout: %d idle_cpu: %d/%d\n",
778 t, ".I"[is_idle_task(t)],
779 "N."[cpu < 0 || !tick_nohz_full_cpu(cpu)],
780 t->rcu_tasks_nvcsw, t->nvcsw, t->rcu_tasks_holdout,
781 t->rcu_tasks_idle_cpu, cpu);
782 sched_show_task(t);
783}
784
e4fe5dd6
PM
785/* Scan the holdout lists for tasks no longer holding out. */
786static void check_all_holdout_tasks(struct list_head *hop,
787 bool needreport, bool *firstreport)
788{
789 struct task_struct *t, *t1;
790
791 list_for_each_entry_safe(t, t1, hop, rcu_tasks_holdout_list) {
792 check_holdout_task(t, needreport, firstreport);
793 cond_resched();
794 }
795}
796
797/* Finish off the Tasks-RCU grace period. */
af051ca4 798static void rcu_tasks_postgp(struct rcu_tasks *rtp)
e4fe5dd6
PM
799{
800 /*
801 * Because ->on_rq and ->nvcsw are not guaranteed to have a full
802 * memory barriers prior to them in the schedule() path, memory
803 * reordering on other CPUs could cause their RCU-tasks read-side
804 * critical sections to extend past the end of the grace period.
805 * However, because these ->nvcsw updates are carried out with
806 * interrupts disabled, we can use synchronize_rcu() to force the
807 * needed ordering on all such CPUs.
808 *
809 * This synchronize_rcu() also confines all ->rcu_tasks_holdout
810 * accesses to be within the grace period, avoiding the need for
811 * memory barriers for ->rcu_tasks_holdout accesses.
812 *
813 * In addition, this synchronize_rcu() waits for exiting tasks
814 * to complete their final preempt_disable() region of execution,
815 * cleaning up after the synchronize_srcu() above.
816 */
817 synchronize_rcu();
818}
819
5873b8a9 820void call_rcu_tasks(struct rcu_head *rhp, rcu_callback_t func);
c97d12a6 821DEFINE_RCU_TASKS(rcu_tasks, rcu_tasks_wait_gp, call_rcu_tasks, "RCU Tasks");
5873b8a9
PM
822
823/**
824 * call_rcu_tasks() - Queue an RCU for invocation task-based grace period
825 * @rhp: structure to be used for queueing the RCU updates.
826 * @func: actual callback function to be invoked after the grace period
827 *
828 * The callback function will be invoked some time after a full grace
829 * period elapses, in other words after all currently executing RCU
830 * read-side critical sections have completed. call_rcu_tasks() assumes
831 * that the read-side critical sections end at a voluntary context
8af9e2c7 832 * switch (not a preemption!), cond_resched_tasks_rcu_qs(), entry into idle,
5873b8a9
PM
833 * or transition to usermode execution. As such, there are no read-side
834 * primitives analogous to rcu_read_lock() and rcu_read_unlock() because
835 * this primitive is intended to determine that all tasks have passed
a616aec9 836 * through a safe state, not so much for data-structure synchronization.
5873b8a9
PM
837 *
838 * See the description of call_rcu() for more detailed information on
839 * memory ordering guarantees.
840 */
841void call_rcu_tasks(struct rcu_head *rhp, rcu_callback_t func)
842{
843 call_rcu_tasks_generic(rhp, func, &rcu_tasks);
844}
845EXPORT_SYMBOL_GPL(call_rcu_tasks);
846
847/**
848 * synchronize_rcu_tasks - wait until an rcu-tasks grace period has elapsed.
849 *
850 * Control will return to the caller some time after a full rcu-tasks
851 * grace period has elapsed, in other words after all currently
852 * executing rcu-tasks read-side critical sections have elapsed. These
853 * read-side critical sections are delimited by calls to schedule(),
854 * cond_resched_tasks_rcu_qs(), idle execution, userspace execution, calls
855 * to synchronize_rcu_tasks(), and (in theory, anyway) cond_resched().
856 *
857 * This is a very specialized primitive, intended only for a few uses in
858 * tracing and other situations requiring manipulation of function
859 * preambles and profiling hooks. The synchronize_rcu_tasks() function
860 * is not (yet) intended for heavy use from multiple CPUs.
861 *
862 * See the description of synchronize_rcu() for more detailed information
863 * on memory ordering guarantees.
864 */
865void synchronize_rcu_tasks(void)
866{
867 synchronize_rcu_tasks_generic(&rcu_tasks);
868}
869EXPORT_SYMBOL_GPL(synchronize_rcu_tasks);
870
871/**
872 * rcu_barrier_tasks - Wait for in-flight call_rcu_tasks() callbacks.
873 *
874 * Although the current implementation is guaranteed to wait, it is not
875 * obligated to, for example, if there are no pending callbacks.
876 */
877void rcu_barrier_tasks(void)
878{
ce9b1c66 879 rcu_barrier_tasks_generic(&rcu_tasks);
5873b8a9
PM
880}
881EXPORT_SYMBOL_GPL(rcu_barrier_tasks);
882
883static int __init rcu_spawn_tasks_kthread(void)
884{
cafafd67 885 cblist_init_generic(&rcu_tasks);
4fe192df 886 rcu_tasks.gp_sleep = HZ / 10;
75dc2da5 887 rcu_tasks.init_fract = HZ / 10;
e4fe5dd6
PM
888 rcu_tasks.pregp_func = rcu_tasks_pregp_step;
889 rcu_tasks.pertask_func = rcu_tasks_pertask;
890 rcu_tasks.postscan_func = rcu_tasks_postscan;
891 rcu_tasks.holdouts_func = check_all_holdout_tasks;
892 rcu_tasks.postgp_func = rcu_tasks_postgp;
5873b8a9
PM
893 rcu_spawn_tasks_kthread_generic(&rcu_tasks);
894 return 0;
895}
5873b8a9 896
27c0f144
PM
897#if !defined(CONFIG_TINY_RCU)
898void show_rcu_tasks_classic_gp_kthread(void)
e21408ce
PM
899{
900 show_rcu_tasks_generic_gp_kthread(&rcu_tasks, "");
901}
27c0f144
PM
902EXPORT_SYMBOL_GPL(show_rcu_tasks_classic_gp_kthread);
903#endif // !defined(CONFIG_TINY_RCU)
e21408ce 904
25246fc8
PM
905/* Do the srcu_read_lock() for the above synchronize_srcu(). */
906void exit_tasks_rcu_start(void) __acquires(&tasks_rcu_exit_srcu)
907{
908 preempt_disable();
909 current->rcu_tasks_idx = __srcu_read_lock(&tasks_rcu_exit_srcu);
910 preempt_enable();
911}
912
913/* Do the srcu_read_unlock() for the above synchronize_srcu(). */
914void exit_tasks_rcu_finish(void) __releases(&tasks_rcu_exit_srcu)
915{
916 struct task_struct *t = current;
917
918 preempt_disable();
919 __srcu_read_unlock(&tasks_rcu_exit_srcu, t->rcu_tasks_idx);
920 preempt_enable();
921 exit_tasks_rcu_finish_trace(t);
922}
923
e21408ce 924#else /* #ifdef CONFIG_TASKS_RCU */
25246fc8
PM
925void exit_tasks_rcu_start(void) { }
926void exit_tasks_rcu_finish(void) { exit_tasks_rcu_finish_trace(current); }
e21408ce 927#endif /* #else #ifdef CONFIG_TASKS_RCU */
c84aad76
PM
928
929#ifdef CONFIG_TASKS_RUDE_RCU
930
931////////////////////////////////////////////////////////////////////////
932//
933// "Rude" variant of Tasks RCU, inspired by Steve Rostedt's trick of
934// passing an empty function to schedule_on_each_cpu(). This approach
e4be1f44
PM
935// provides an asynchronous call_rcu_tasks_rude() API and batching of
936// concurrent calls to the synchronous synchronize_rcu_tasks_rude() API.
9fc98e31
PM
937// This invokes schedule_on_each_cpu() in order to send IPIs far and wide
938// and induces otherwise unnecessary context switches on all online CPUs,
939// whether idle or not.
940//
941// Callback handling is provided by the rcu_tasks_kthread() function.
942//
943// Ordering is provided by the scheduler's context-switch code.
c84aad76
PM
944
945// Empty function to allow workqueues to force a context switch.
946static void rcu_tasks_be_rude(struct work_struct *work)
947{
948}
949
950// Wait for one rude RCU-tasks grace period.
951static void rcu_tasks_rude_wait_gp(struct rcu_tasks *rtp)
952{
238dbce3 953 rtp->n_ipis += cpumask_weight(cpu_online_mask);
c84aad76
PM
954 schedule_on_each_cpu(rcu_tasks_be_rude);
955}
956
957void call_rcu_tasks_rude(struct rcu_head *rhp, rcu_callback_t func);
c97d12a6
PM
958DEFINE_RCU_TASKS(rcu_tasks_rude, rcu_tasks_rude_wait_gp, call_rcu_tasks_rude,
959 "RCU Tasks Rude");
c84aad76
PM
960
961/**
962 * call_rcu_tasks_rude() - Queue a callback rude task-based grace period
963 * @rhp: structure to be used for queueing the RCU updates.
964 * @func: actual callback function to be invoked after the grace period
965 *
966 * The callback function will be invoked some time after a full grace
967 * period elapses, in other words after all currently executing RCU
968 * read-side critical sections have completed. call_rcu_tasks_rude()
969 * assumes that the read-side critical sections end at context switch,
8af9e2c7 970 * cond_resched_tasks_rcu_qs(), or transition to usermode execution (as
a6517e9c
NU
971 * usermode execution is schedulable). As such, there are no read-side
972 * primitives analogous to rcu_read_lock() and rcu_read_unlock() because
973 * this primitive is intended to determine that all tasks have passed
974 * through a safe state, not so much for data-structure synchronization.
c84aad76
PM
975 *
976 * See the description of call_rcu() for more detailed information on
977 * memory ordering guarantees.
978 */
979void call_rcu_tasks_rude(struct rcu_head *rhp, rcu_callback_t func)
980{
981 call_rcu_tasks_generic(rhp, func, &rcu_tasks_rude);
982}
983EXPORT_SYMBOL_GPL(call_rcu_tasks_rude);
984
985/**
986 * synchronize_rcu_tasks_rude - wait for a rude rcu-tasks grace period
987 *
988 * Control will return to the caller some time after a rude rcu-tasks
989 * grace period has elapsed, in other words after all currently
990 * executing rcu-tasks read-side critical sections have elapsed. These
991 * read-side critical sections are delimited by calls to schedule(),
a6517e9c
NU
992 * cond_resched_tasks_rcu_qs(), userspace execution (which is a schedulable
993 * context), and (in theory, anyway) cond_resched().
c84aad76
PM
994 *
995 * This is a very specialized primitive, intended only for a few uses in
996 * tracing and other situations requiring manipulation of function preambles
997 * and profiling hooks. The synchronize_rcu_tasks_rude() function is not
998 * (yet) intended for heavy use from multiple CPUs.
999 *
1000 * See the description of synchronize_rcu() for more detailed information
1001 * on memory ordering guarantees.
1002 */
1003void synchronize_rcu_tasks_rude(void)
1004{
1005 synchronize_rcu_tasks_generic(&rcu_tasks_rude);
1006}
1007EXPORT_SYMBOL_GPL(synchronize_rcu_tasks_rude);
1008
1009/**
1010 * rcu_barrier_tasks_rude - Wait for in-flight call_rcu_tasks_rude() callbacks.
1011 *
1012 * Although the current implementation is guaranteed to wait, it is not
1013 * obligated to, for example, if there are no pending callbacks.
1014 */
1015void rcu_barrier_tasks_rude(void)
1016{
ce9b1c66 1017 rcu_barrier_tasks_generic(&rcu_tasks_rude);
c84aad76
PM
1018}
1019EXPORT_SYMBOL_GPL(rcu_barrier_tasks_rude);
1020
1021static int __init rcu_spawn_tasks_rude_kthread(void)
1022{
cafafd67 1023 cblist_init_generic(&rcu_tasks_rude);
4fe192df 1024 rcu_tasks_rude.gp_sleep = HZ / 10;
c84aad76
PM
1025 rcu_spawn_tasks_kthread_generic(&rcu_tasks_rude);
1026 return 0;
1027}
c84aad76 1028
27c0f144
PM
1029#if !defined(CONFIG_TINY_RCU)
1030void show_rcu_tasks_rude_gp_kthread(void)
e21408ce
PM
1031{
1032 show_rcu_tasks_generic_gp_kthread(&rcu_tasks_rude, "");
1033}
27c0f144
PM
1034EXPORT_SYMBOL_GPL(show_rcu_tasks_rude_gp_kthread);
1035#endif // !defined(CONFIG_TINY_RCU)
1036#endif /* #ifdef CONFIG_TASKS_RUDE_RCU */
d5f177d3
PM
1037
1038////////////////////////////////////////////////////////////////////////
1039//
1040// Tracing variant of Tasks RCU. This variant is designed to be used
1041// to protect tracing hooks, including those of BPF. This variant
1042// therefore:
1043//
1044// 1. Has explicit read-side markers to allow finite grace periods
1045// in the face of in-kernel loops for PREEMPT=n builds.
1046//
1047// 2. Protects code in the idle loop, exception entry/exit, and
1048// CPU-hotplug code paths, similar to the capabilities of SRCU.
1049//
c4f113ac 1050// 3. Avoids expensive read-side instructions, having overhead similar
d5f177d3
PM
1051// to that of Preemptible RCU.
1052//
1053// There are of course downsides. The grace-period code can send IPIs to
1054// CPUs, even when those CPUs are in the idle loop or in nohz_full userspace.
1055// It is necessary to scan the full tasklist, much as for Tasks RCU. There
1056// is a single callback queue guarded by a single lock, again, much as for
1057// Tasks RCU. If needed, these downsides can be at least partially remedied.
1058//
1059// Perhaps most important, this variant of RCU does not affect the vanilla
1060// flavors, rcu_preempt and rcu_sched. The fact that RCU Tasks Trace
1061// readers can operate from idle, offline, and exception entry/exit in no
1062// way allows rcu_preempt and rcu_sched readers to also do so.
a434dd10
PM
1063//
1064// The implementation uses rcu_tasks_wait_gp(), which relies on function
1065// pointers in the rcu_tasks structure. The rcu_spawn_tasks_trace_kthread()
1066// function sets these function pointers up so that rcu_tasks_wait_gp()
1067// invokes these functions in this order:
1068//
1069// rcu_tasks_trace_pregp_step():
1070// Initialize the count of readers and block CPU-hotplug operations.
1071// rcu_tasks_trace_pertask(), invoked on every non-idle task:
1072// Initialize per-task state and attempt to identify an immediate
1073// quiescent state for that task, or, failing that, attempt to
1074// set that task's .need_qs flag so that task's next outermost
1075// rcu_read_unlock_trace() will report the quiescent state (in which
1076// case the count of readers is incremented). If both attempts fail,
45f4b4a2
PM
1077// the task is added to a "holdout" list. Note that IPIs are used
1078// to invoke trc_read_check_handler() in the context of running tasks
1079// in order to avoid ordering overhead on common-case shared-variable
1080// accessses.
a434dd10
PM
1081// rcu_tasks_trace_postscan():
1082// Initialize state and attempt to identify an immediate quiescent
1083// state as above (but only for idle tasks), unblock CPU-hotplug
1084// operations, and wait for an RCU grace period to avoid races with
1085// tasks that are in the process of exiting.
1086// check_all_holdout_tasks_trace(), repeatedly until holdout list is empty:
1087// Scans the holdout list, attempting to identify a quiescent state
1088// for each task on the list. If there is a quiescent state, the
1089// corresponding task is removed from the holdout list.
1090// rcu_tasks_trace_postgp():
1091// Wait for the count of readers do drop to zero, reporting any stalls.
1092// Also execute full memory barriers to maintain ordering with code
1093// executing after the grace period.
1094//
1095// The exit_tasks_rcu_finish_trace() synchronizes with exiting tasks.
1096//
1097// Pre-grace-period update-side code is ordered before the grace
1098// period via the ->cbs_lock and barriers in rcu_tasks_kthread().
1099// Pre-grace-period read-side code is ordered before the grace period by
1100// atomic_dec_and_test() of the count of readers (for IPIed readers) and by
1101// scheduler context-switch ordering (for locked-down non-running readers).
d5f177d3
PM
1102
1103// The lockdep state must be outside of #ifdef to be useful.
1104#ifdef CONFIG_DEBUG_LOCK_ALLOC
1105static struct lock_class_key rcu_lock_trace_key;
1106struct lockdep_map rcu_trace_lock_map =
1107 STATIC_LOCKDEP_MAP_INIT("rcu_read_lock_trace", &rcu_lock_trace_key);
1108EXPORT_SYMBOL_GPL(rcu_trace_lock_map);
1109#endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
1110
1111#ifdef CONFIG_TASKS_TRACE_RCU
1112
30d8aa51
PM
1113static atomic_t trc_n_readers_need_end; // Number of waited-for readers.
1114static DECLARE_WAIT_QUEUE_HEAD(trc_wait); // List of holdout tasks.
d5f177d3
PM
1115
1116// Record outstanding IPIs to each CPU. No point in sending two...
1117static DEFINE_PER_CPU(bool, trc_ipi_to_cpu);
1118
40471509
PM
1119// The number of detections of task quiescent state relying on
1120// heavyweight readers executing explicit memory barriers.
6731da9e
PM
1121static unsigned long n_heavy_reader_attempts;
1122static unsigned long n_heavy_reader_updates;
1123static unsigned long n_heavy_reader_ofl_updates;
40471509 1124
b0afa0f0
PM
1125void call_rcu_tasks_trace(struct rcu_head *rhp, rcu_callback_t func);
1126DEFINE_RCU_TASKS(rcu_tasks_trace, rcu_tasks_wait_gp, call_rcu_tasks_trace,
1127 "RCU Tasks Trace");
1128
b38f57c1
PM
1129/*
1130 * This irq_work handler allows rcu_read_unlock_trace() to be invoked
1131 * while the scheduler locks are held.
1132 */
1133static void rcu_read_unlock_iw(struct irq_work *iwp)
1134{
1135 wake_up(&trc_wait);
1136}
1137static DEFINE_IRQ_WORK(rcu_tasks_trace_iw, rcu_read_unlock_iw);
1138
d5f177d3 1139/* If we are the last reader, wake up the grace-period kthread. */
a5c071cc 1140void rcu_read_unlock_trace_special(struct task_struct *t)
d5f177d3 1141{
f8ab3fad 1142 int nq = READ_ONCE(t->trc_reader_special.b.need_qs);
276c4104 1143
9ae58d7b
PM
1144 if (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB) &&
1145 t->trc_reader_special.b.need_mb)
276c4104
PM
1146 smp_mb(); // Pairs with update-side barriers.
1147 // Update .need_qs before ->trc_reader_nesting for irq/NMI handlers.
1148 if (nq)
1149 WRITE_ONCE(t->trc_reader_special.b.need_qs, false);
a5c071cc 1150 WRITE_ONCE(t->trc_reader_nesting, 0);
276c4104 1151 if (nq && atomic_dec_and_test(&trc_n_readers_need_end))
b38f57c1 1152 irq_work_queue(&rcu_tasks_trace_iw);
d5f177d3
PM
1153}
1154EXPORT_SYMBOL_GPL(rcu_read_unlock_trace_special);
1155
1156/* Add a task to the holdout list, if it is not already on the list. */
1157static void trc_add_holdout(struct task_struct *t, struct list_head *bhp)
1158{
1159 if (list_empty(&t->trc_holdout_list)) {
1160 get_task_struct(t);
1161 list_add(&t->trc_holdout_list, bhp);
1162 }
1163}
1164
1165/* Remove a task from the holdout list, if it is in fact present. */
1166static void trc_del_holdout(struct task_struct *t)
1167{
1168 if (!list_empty(&t->trc_holdout_list)) {
1169 list_del_init(&t->trc_holdout_list);
1170 put_task_struct(t);
1171 }
1172}
1173
1174/* IPI handler to check task state. */
1175static void trc_read_check_handler(void *t_in)
1176{
1177 struct task_struct *t = current;
1178 struct task_struct *texp = t_in;
1179
1180 // If the task is no longer running on this CPU, leave.
1181 if (unlikely(texp != t)) {
d5f177d3
PM
1182 goto reset_ipi; // Already on holdout list, so will check later.
1183 }
1184
1185 // If the task is not in a read-side critical section, and
1186 // if this is the last reader, awaken the grace-period kthread.
bdb0cca0 1187 if (likely(!READ_ONCE(t->trc_reader_nesting))) {
d5f177d3
PM
1188 WRITE_ONCE(t->trc_reader_checked, true);
1189 goto reset_ipi;
1190 }
ba3a86e4 1191 // If we are racing with an rcu_read_unlock_trace(), try again later.
96017bf9 1192 if (unlikely(READ_ONCE(t->trc_reader_nesting) < 0))
ba3a86e4 1193 goto reset_ipi;
d5f177d3
PM
1194 WRITE_ONCE(t->trc_reader_checked, true);
1195
1196 // Get here if the task is in a read-side critical section. Set
1197 // its state so that it will awaken the grace-period kthread upon
1198 // exit from that critical section.
96017bf9 1199 atomic_inc(&trc_n_readers_need_end); // One more to wait on.
f8ab3fad 1200 WARN_ON_ONCE(READ_ONCE(t->trc_reader_special.b.need_qs));
276c4104 1201 WRITE_ONCE(t->trc_reader_special.b.need_qs, true);
d5f177d3
PM
1202
1203reset_ipi:
1204 // Allow future IPIs to be sent on CPU and for task.
1205 // Also order this IPI handler against any later manipulations of
1206 // the intended task.
8211e922 1207 smp_store_release(per_cpu_ptr(&trc_ipi_to_cpu, smp_processor_id()), false); // ^^^
d5f177d3
PM
1208 smp_store_release(&texp->trc_ipi_to_cpu, -1); // ^^^
1209}
1210
1211/* Callback function for scheduler to check locked-down task. */
9b3c4ab3 1212static int trc_inspect_reader(struct task_struct *t, void *arg)
d5f177d3 1213{
7d0c9c50 1214 int cpu = task_cpu(t);
18f08e75 1215 int nesting;
7e3b70e0 1216 bool ofl = cpu_is_offline(cpu);
7d0c9c50
PM
1217
1218 if (task_curr(t)) {
30d8aa51 1219 WARN_ON_ONCE(ofl && !is_idle_task(t));
7e3b70e0 1220
7d0c9c50 1221 // If no chance of heavyweight readers, do it the hard way.
7e3b70e0 1222 if (!ofl && !IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB))
9b3c4ab3 1223 return -EINVAL;
7d0c9c50
PM
1224
1225 // If heavyweight readers are enabled on the remote task,
1226 // we can inspect its state despite its currently running.
1227 // However, we cannot safely change its state.
40471509 1228 n_heavy_reader_attempts++;
7e3b70e0
PM
1229 if (!ofl && // Check for "running" idle tasks on offline CPUs.
1230 !rcu_dynticks_zero_in_eqs(cpu, &t->trc_reader_nesting))
9b3c4ab3 1231 return -EINVAL; // No quiescent state, do it the hard way.
40471509 1232 n_heavy_reader_updates++;
edf3775f
PM
1233 if (ofl)
1234 n_heavy_reader_ofl_updates++;
18f08e75 1235 nesting = 0;
7d0c9c50 1236 } else {
bdb0cca0 1237 // The task is not running, so C-language access is safe.
18f08e75 1238 nesting = t->trc_reader_nesting;
7d0c9c50 1239 }
d5f177d3 1240
18f08e75
PM
1241 // If not exiting a read-side critical section, mark as checked
1242 // so that the grace-period kthread will remove it from the
1243 // holdout list.
1244 t->trc_reader_checked = nesting >= 0;
1245 if (nesting <= 0)
6fedc280 1246 return nesting ? -EINVAL : 0; // If in QS, done, otherwise try again later.
7d0c9c50
PM
1247
1248 // The task is in a read-side critical section, so set up its
1249 // state so that it will awaken the grace-period kthread upon exit
1250 // from that critical section.
1251 atomic_inc(&trc_n_readers_need_end); // One more to wait on.
f8ab3fad 1252 WARN_ON_ONCE(READ_ONCE(t->trc_reader_special.b.need_qs));
7d0c9c50 1253 WRITE_ONCE(t->trc_reader_special.b.need_qs, true);
9b3c4ab3 1254 return 0;
d5f177d3
PM
1255}
1256
1257/* Attempt to extract the state for the specified task. */
1258static void trc_wait_for_one_reader(struct task_struct *t,
1259 struct list_head *bhp)
1260{
1261 int cpu;
1262
1263 // If a previous IPI is still in flight, let it complete.
1264 if (smp_load_acquire(&t->trc_ipi_to_cpu) != -1) // Order IPI
1265 return;
1266
1267 // The current task had better be in a quiescent state.
1268 if (t == current) {
1269 t->trc_reader_checked = true;
bdb0cca0 1270 WARN_ON_ONCE(READ_ONCE(t->trc_reader_nesting));
d5f177d3
PM
1271 return;
1272 }
1273
1274 // Attempt to nail down the task for inspection.
1275 get_task_struct(t);
9b3c4ab3 1276 if (!task_call_func(t, trc_inspect_reader, NULL)) {
d5f177d3
PM
1277 put_task_struct(t);
1278 return;
1279 }
1280 put_task_struct(t);
1281
45f4b4a2
PM
1282 // If this task is not yet on the holdout list, then we are in
1283 // an RCU read-side critical section. Otherwise, the invocation of
d0a85858 1284 // trc_add_holdout() that added it to the list did the necessary
45f4b4a2
PM
1285 // get_task_struct(). Either way, the task cannot be freed out
1286 // from under this code.
1287
d5f177d3
PM
1288 // If currently running, send an IPI, either way, add to list.
1289 trc_add_holdout(t, bhp);
574de876
PM
1290 if (task_curr(t) &&
1291 time_after(jiffies + 1, rcu_tasks_trace.gp_start + rcu_task_ipi_delay)) {
d5f177d3
PM
1292 // The task is currently running, so try IPIing it.
1293 cpu = task_cpu(t);
1294
1295 // If there is already an IPI outstanding, let it happen.
1296 if (per_cpu(trc_ipi_to_cpu, cpu) || t->trc_ipi_to_cpu >= 0)
1297 return;
1298
d5f177d3
PM
1299 per_cpu(trc_ipi_to_cpu, cpu) = true;
1300 t->trc_ipi_to_cpu = cpu;
238dbce3 1301 rcu_tasks_trace.n_ipis++;
96017bf9 1302 if (smp_call_function_single(cpu, trc_read_check_handler, t, 0)) {
d5f177d3
PM
1303 // Just in case there is some other reason for
1304 // failure than the target CPU being offline.
46aa886c
NU
1305 WARN_ONCE(1, "%s(): smp_call_function_single() failed for CPU: %d\n",
1306 __func__, cpu);
7e0669c3 1307 rcu_tasks_trace.n_ipis_fails++;
d5f177d3 1308 per_cpu(trc_ipi_to_cpu, cpu) = false;
46aa886c 1309 t->trc_ipi_to_cpu = -1;
d5f177d3
PM
1310 }
1311 }
1312}
1313
1314/* Initialize for a new RCU-tasks-trace grace period. */
1315static void rcu_tasks_trace_pregp_step(void)
1316{
1317 int cpu;
1318
d5f177d3
PM
1319 // Allow for fast-acting IPIs.
1320 atomic_set(&trc_n_readers_need_end, 1);
1321
1322 // There shouldn't be any old IPIs, but...
1323 for_each_possible_cpu(cpu)
1324 WARN_ON_ONCE(per_cpu(trc_ipi_to_cpu, cpu));
81b4a7bc
PM
1325
1326 // Disable CPU hotplug across the tasklist scan.
1327 // This also waits for all readers in CPU-hotplug code paths.
1328 cpus_read_lock();
d5f177d3
PM
1329}
1330
1331/* Do first-round processing for the specified task. */
1332static void rcu_tasks_trace_pertask(struct task_struct *t,
1333 struct list_head *hop)
1334{
1b04fa99
URS
1335 // During early boot when there is only the one boot CPU, there
1336 // is no idle task for the other CPUs. Just return.
1337 if (unlikely(t == NULL))
1338 return;
1339
276c4104 1340 WRITE_ONCE(t->trc_reader_special.b.need_qs, false);
43766c3e 1341 WRITE_ONCE(t->trc_reader_checked, false);
d5f177d3
PM
1342 t->trc_ipi_to_cpu = -1;
1343 trc_wait_for_one_reader(t, hop);
1344}
1345
9796e1ae
PM
1346/*
1347 * Do intermediate processing between task and holdout scans and
1348 * pick up the idle tasks.
1349 */
1350static void rcu_tasks_trace_postscan(struct list_head *hop)
d5f177d3 1351{
9796e1ae
PM
1352 int cpu;
1353
1354 for_each_possible_cpu(cpu)
1355 rcu_tasks_trace_pertask(idle_task(cpu), hop);
1356
81b4a7bc
PM
1357 // Re-enable CPU hotplug now that the tasklist scan has completed.
1358 cpus_read_unlock();
1359
d5f177d3
PM
1360 // Wait for late-stage exiting tasks to finish exiting.
1361 // These might have passed the call to exit_tasks_rcu_finish().
1362 synchronize_rcu();
1363 // Any tasks that exit after this point will set ->trc_reader_checked.
1364}
1365
65b629e7
NU
1366/* Communicate task state back to the RCU tasks trace stall warning request. */
1367struct trc_stall_chk_rdr {
1368 int nesting;
1369 int ipi_to_cpu;
1370 u8 needqs;
1371};
1372
1373static int trc_check_slow_task(struct task_struct *t, void *arg)
1374{
1375 struct trc_stall_chk_rdr *trc_rdrp = arg;
1376
1377 if (task_curr(t))
1378 return false; // It is running, so decline to inspect it.
1379 trc_rdrp->nesting = READ_ONCE(t->trc_reader_nesting);
1380 trc_rdrp->ipi_to_cpu = READ_ONCE(t->trc_ipi_to_cpu);
1381 trc_rdrp->needqs = READ_ONCE(t->trc_reader_special.b.need_qs);
1382 return true;
1383}
1384
4593e772
PM
1385/* Show the state of a task stalling the current RCU tasks trace GP. */
1386static void show_stalled_task_trace(struct task_struct *t, bool *firstreport)
1387{
1388 int cpu;
65b629e7
NU
1389 struct trc_stall_chk_rdr trc_rdr;
1390 bool is_idle_tsk = is_idle_task(t);
4593e772
PM
1391
1392 if (*firstreport) {
1393 pr_err("INFO: rcu_tasks_trace detected stalls on tasks:\n");
1394 *firstreport = false;
1395 }
4593e772 1396 cpu = task_cpu(t);
65b629e7
NU
1397 if (!task_call_func(t, trc_check_slow_task, &trc_rdr))
1398 pr_alert("P%d: %c\n",
1399 t->pid,
1400 ".i"[is_idle_tsk]);
1401 else
1402 pr_alert("P%d: %c%c%c nesting: %d%c cpu: %d\n",
1403 t->pid,
1404 ".I"[trc_rdr.ipi_to_cpu >= 0],
1405 ".i"[is_idle_tsk],
1406 ".N"[cpu >= 0 && tick_nohz_full_cpu(cpu)],
1407 trc_rdr.nesting,
1408 " N"[!!trc_rdr.needqs],
1409 cpu);
4593e772
PM
1410 sched_show_task(t);
1411}
1412
1413/* List stalled IPIs for RCU tasks trace. */
1414static void show_stalled_ipi_trace(void)
1415{
1416 int cpu;
1417
1418 for_each_possible_cpu(cpu)
1419 if (per_cpu(trc_ipi_to_cpu, cpu))
1420 pr_alert("\tIPI outstanding to CPU %d\n", cpu);
1421}
1422
d5f177d3
PM
1423/* Do one scan of the holdout list. */
1424static void check_all_holdout_tasks_trace(struct list_head *hop,
4593e772 1425 bool needreport, bool *firstreport)
d5f177d3
PM
1426{
1427 struct task_struct *g, *t;
1428
81b4a7bc
PM
1429 // Disable CPU hotplug across the holdout list scan.
1430 cpus_read_lock();
1431
d5f177d3
PM
1432 list_for_each_entry_safe(t, g, hop, trc_holdout_list) {
1433 // If safe and needed, try to check the current task.
1434 if (READ_ONCE(t->trc_ipi_to_cpu) == -1 &&
1435 !READ_ONCE(t->trc_reader_checked))
1436 trc_wait_for_one_reader(t, hop);
1437
1438 // If check succeeded, remove this task from the list.
f5dbc594
PM
1439 if (smp_load_acquire(&t->trc_ipi_to_cpu) == -1 &&
1440 READ_ONCE(t->trc_reader_checked))
d5f177d3 1441 trc_del_holdout(t);
4593e772
PM
1442 else if (needreport)
1443 show_stalled_task_trace(t, firstreport);
1444 }
81b4a7bc
PM
1445
1446 // Re-enable CPU hotplug now that the holdout list scan has completed.
1447 cpus_read_unlock();
1448
4593e772 1449 if (needreport) {
89401176 1450 if (*firstreport)
4593e772
PM
1451 pr_err("INFO: rcu_tasks_trace detected stalls? (Late IPI?)\n");
1452 show_stalled_ipi_trace();
d5f177d3
PM
1453 }
1454}
1455
cbe0d8d9
PM
1456static void rcu_tasks_trace_empty_fn(void *unused)
1457{
1458}
1459
d5f177d3 1460/* Wait for grace period to complete and provide ordering. */
af051ca4 1461static void rcu_tasks_trace_postgp(struct rcu_tasks *rtp)
d5f177d3 1462{
cbe0d8d9 1463 int cpu;
4593e772
PM
1464 bool firstreport;
1465 struct task_struct *g, *t;
1466 LIST_HEAD(holdouts);
1467 long ret;
1468
cbe0d8d9
PM
1469 // Wait for any lingering IPI handlers to complete. Note that
1470 // if a CPU has gone offline or transitioned to userspace in the
1471 // meantime, all IPI handlers should have been drained beforehand.
1472 // Yes, this assumes that CPUs process IPIs in order. If that ever
1473 // changes, there will need to be a recheck and/or timed wait.
1474 for_each_online_cpu(cpu)
f5dbc594 1475 if (WARN_ON_ONCE(smp_load_acquire(per_cpu_ptr(&trc_ipi_to_cpu, cpu))))
cbe0d8d9
PM
1476 smp_call_function_single(cpu, rcu_tasks_trace_empty_fn, NULL, 1);
1477
d5f177d3
PM
1478 // Remove the safety count.
1479 smp_mb__before_atomic(); // Order vs. earlier atomics
1480 atomic_dec(&trc_n_readers_need_end);
1481 smp_mb__after_atomic(); // Order vs. later atomics
1482
1483 // Wait for readers.
af051ca4 1484 set_tasks_gp_state(rtp, RTGS_WAIT_READERS);
4593e772
PM
1485 for (;;) {
1486 ret = wait_event_idle_exclusive_timeout(
1487 trc_wait,
1488 atomic_read(&trc_n_readers_need_end) == 0,
1489 READ_ONCE(rcu_task_stall_timeout));
1490 if (ret)
1491 break; // Count reached zero.
af051ca4 1492 // Stall warning time, so make a list of the offenders.
f747c7e1 1493 rcu_read_lock();
4593e772 1494 for_each_process_thread(g, t)
276c4104 1495 if (READ_ONCE(t->trc_reader_special.b.need_qs))
4593e772 1496 trc_add_holdout(t, &holdouts);
f747c7e1 1497 rcu_read_unlock();
4593e772 1498 firstreport = true;
592031cc
PM
1499 list_for_each_entry_safe(t, g, &holdouts, trc_holdout_list) {
1500 if (READ_ONCE(t->trc_reader_special.b.need_qs))
4593e772 1501 show_stalled_task_trace(t, &firstreport);
592031cc
PM
1502 trc_del_holdout(t); // Release task_struct reference.
1503 }
4593e772
PM
1504 if (firstreport)
1505 pr_err("INFO: rcu_tasks_trace detected stalls? (Counter/taskslist mismatch?)\n");
1506 show_stalled_ipi_trace();
1507 pr_err("\t%d holdouts\n", atomic_read(&trc_n_readers_need_end));
1508 }
d5f177d3 1509 smp_mb(); // Caller's code must be ordered after wakeup.
43766c3e 1510 // Pairs with pretty much every ordering primitive.
d5f177d3
PM
1511}
1512
1513/* Report any needed quiescent state for this exiting task. */
25246fc8 1514static void exit_tasks_rcu_finish_trace(struct task_struct *t)
d5f177d3
PM
1515{
1516 WRITE_ONCE(t->trc_reader_checked, true);
bdb0cca0 1517 WARN_ON_ONCE(READ_ONCE(t->trc_reader_nesting));
d5f177d3 1518 WRITE_ONCE(t->trc_reader_nesting, 0);
276c4104 1519 if (WARN_ON_ONCE(READ_ONCE(t->trc_reader_special.b.need_qs)))
a5c071cc 1520 rcu_read_unlock_trace_special(t);
d5f177d3
PM
1521}
1522
d5f177d3
PM
1523/**
1524 * call_rcu_tasks_trace() - Queue a callback trace task-based grace period
1525 * @rhp: structure to be used for queueing the RCU updates.
1526 * @func: actual callback function to be invoked after the grace period
1527 *
ed42c380
NU
1528 * The callback function will be invoked some time after a trace rcu-tasks
1529 * grace period elapses, in other words after all currently executing
1530 * trace rcu-tasks read-side critical sections have completed. These
1531 * read-side critical sections are delimited by calls to rcu_read_lock_trace()
1532 * and rcu_read_unlock_trace().
d5f177d3
PM
1533 *
1534 * See the description of call_rcu() for more detailed information on
1535 * memory ordering guarantees.
1536 */
1537void call_rcu_tasks_trace(struct rcu_head *rhp, rcu_callback_t func)
1538{
1539 call_rcu_tasks_generic(rhp, func, &rcu_tasks_trace);
1540}
1541EXPORT_SYMBOL_GPL(call_rcu_tasks_trace);
1542
1543/**
1544 * synchronize_rcu_tasks_trace - wait for a trace rcu-tasks grace period
1545 *
1546 * Control will return to the caller some time after a trace rcu-tasks
c7dcf810 1547 * grace period has elapsed, in other words after all currently executing
ed42c380 1548 * trace rcu-tasks read-side critical sections have elapsed. These read-side
c7dcf810
PM
1549 * critical sections are delimited by calls to rcu_read_lock_trace()
1550 * and rcu_read_unlock_trace().
d5f177d3
PM
1551 *
1552 * This is a very specialized primitive, intended only for a few uses in
1553 * tracing and other situations requiring manipulation of function preambles
1554 * and profiling hooks. The synchronize_rcu_tasks_trace() function is not
1555 * (yet) intended for heavy use from multiple CPUs.
1556 *
1557 * See the description of synchronize_rcu() for more detailed information
1558 * on memory ordering guarantees.
1559 */
1560void synchronize_rcu_tasks_trace(void)
1561{
1562 RCU_LOCKDEP_WARN(lock_is_held(&rcu_trace_lock_map), "Illegal synchronize_rcu_tasks_trace() in RCU Tasks Trace read-side critical section");
1563 synchronize_rcu_tasks_generic(&rcu_tasks_trace);
1564}
1565EXPORT_SYMBOL_GPL(synchronize_rcu_tasks_trace);
1566
1567/**
1568 * rcu_barrier_tasks_trace - Wait for in-flight call_rcu_tasks_trace() callbacks.
1569 *
1570 * Although the current implementation is guaranteed to wait, it is not
1571 * obligated to, for example, if there are no pending callbacks.
1572 */
1573void rcu_barrier_tasks_trace(void)
1574{
ce9b1c66 1575 rcu_barrier_tasks_generic(&rcu_tasks_trace);
d5f177d3
PM
1576}
1577EXPORT_SYMBOL_GPL(rcu_barrier_tasks_trace);
1578
1579static int __init rcu_spawn_tasks_trace_kthread(void)
1580{
cafafd67 1581 cblist_init_generic(&rcu_tasks_trace);
2393a613 1582 if (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB)) {
4fe192df 1583 rcu_tasks_trace.gp_sleep = HZ / 10;
75dc2da5 1584 rcu_tasks_trace.init_fract = HZ / 10;
2393a613 1585 } else {
4fe192df
PM
1586 rcu_tasks_trace.gp_sleep = HZ / 200;
1587 if (rcu_tasks_trace.gp_sleep <= 0)
1588 rcu_tasks_trace.gp_sleep = 1;
75dc2da5 1589 rcu_tasks_trace.init_fract = HZ / 200;
2393a613
PM
1590 if (rcu_tasks_trace.init_fract <= 0)
1591 rcu_tasks_trace.init_fract = 1;
1592 }
d5f177d3
PM
1593 rcu_tasks_trace.pregp_func = rcu_tasks_trace_pregp_step;
1594 rcu_tasks_trace.pertask_func = rcu_tasks_trace_pertask;
1595 rcu_tasks_trace.postscan_func = rcu_tasks_trace_postscan;
1596 rcu_tasks_trace.holdouts_func = check_all_holdout_tasks_trace;
1597 rcu_tasks_trace.postgp_func = rcu_tasks_trace_postgp;
1598 rcu_spawn_tasks_kthread_generic(&rcu_tasks_trace);
1599 return 0;
1600}
d5f177d3 1601
27c0f144
PM
1602#if !defined(CONFIG_TINY_RCU)
1603void show_rcu_tasks_trace_gp_kthread(void)
e21408ce 1604{
40471509 1605 char buf[64];
e21408ce 1606
edf3775f
PM
1607 sprintf(buf, "N%d h:%lu/%lu/%lu", atomic_read(&trc_n_readers_need_end),
1608 data_race(n_heavy_reader_ofl_updates),
40471509
PM
1609 data_race(n_heavy_reader_updates),
1610 data_race(n_heavy_reader_attempts));
e21408ce
PM
1611 show_rcu_tasks_generic_gp_kthread(&rcu_tasks_trace, buf);
1612}
27c0f144
PM
1613EXPORT_SYMBOL_GPL(show_rcu_tasks_trace_gp_kthread);
1614#endif // !defined(CONFIG_TINY_RCU)
e21408ce 1615
d5f177d3 1616#else /* #ifdef CONFIG_TASKS_TRACE_RCU */
25246fc8 1617static void exit_tasks_rcu_finish_trace(struct task_struct *t) { }
d5f177d3 1618#endif /* #else #ifdef CONFIG_TASKS_TRACE_RCU */
8fd8ca38 1619
8344496e 1620#ifndef CONFIG_TINY_RCU
e21408ce
PM
1621void show_rcu_tasks_gp_kthreads(void)
1622{
1623 show_rcu_tasks_classic_gp_kthread();
1624 show_rcu_tasks_rude_gp_kthread();
1625 show_rcu_tasks_trace_gp_kthread();
1626}
8344496e 1627#endif /* #ifndef CONFIG_TINY_RCU */
e21408ce 1628
bfba7ed0
URS
1629#ifdef CONFIG_PROVE_RCU
1630struct rcu_tasks_test_desc {
1631 struct rcu_head rh;
1632 const char *name;
1633 bool notrun;
1634};
1635
1636static struct rcu_tasks_test_desc tests[] = {
1637 {
1638 .name = "call_rcu_tasks()",
1639 /* If not defined, the test is skipped. */
1640 .notrun = !IS_ENABLED(CONFIG_TASKS_RCU),
1641 },
1642 {
1643 .name = "call_rcu_tasks_rude()",
1644 /* If not defined, the test is skipped. */
1645 .notrun = !IS_ENABLED(CONFIG_TASKS_RUDE_RCU),
1646 },
1647 {
1648 .name = "call_rcu_tasks_trace()",
1649 /* If not defined, the test is skipped. */
1650 .notrun = !IS_ENABLED(CONFIG_TASKS_TRACE_RCU)
1651 }
1652};
1653
1654static void test_rcu_tasks_callback(struct rcu_head *rhp)
1655{
1656 struct rcu_tasks_test_desc *rttd =
1657 container_of(rhp, struct rcu_tasks_test_desc, rh);
1658
1659 pr_info("Callback from %s invoked.\n", rttd->name);
1660
1661 rttd->notrun = true;
1662}
1663
1664static void rcu_tasks_initiate_self_tests(void)
1665{
1666 pr_info("Running RCU-tasks wait API self tests\n");
1667#ifdef CONFIG_TASKS_RCU
1668 synchronize_rcu_tasks();
1669 call_rcu_tasks(&tests[0].rh, test_rcu_tasks_callback);
1670#endif
1671
1672#ifdef CONFIG_TASKS_RUDE_RCU
1673 synchronize_rcu_tasks_rude();
1674 call_rcu_tasks_rude(&tests[1].rh, test_rcu_tasks_callback);
1675#endif
1676
1677#ifdef CONFIG_TASKS_TRACE_RCU
1678 synchronize_rcu_tasks_trace();
1679 call_rcu_tasks_trace(&tests[2].rh, test_rcu_tasks_callback);
1680#endif
1681}
1682
1683static int rcu_tasks_verify_self_tests(void)
1684{
1685 int ret = 0;
1686 int i;
1687
1688 for (i = 0; i < ARRAY_SIZE(tests); i++) {
1689 if (!tests[i].notrun) { // still hanging.
1690 pr_err("%s has been failed.\n", tests[i].name);
1691 ret = -1;
1692 }
1693 }
1694
1695 if (ret)
1696 WARN_ON(1);
1697
1698 return ret;
1699}
1700late_initcall(rcu_tasks_verify_self_tests);
1701#else /* #ifdef CONFIG_PROVE_RCU */
1702static void rcu_tasks_initiate_self_tests(void) { }
1703#endif /* #else #ifdef CONFIG_PROVE_RCU */
1704
1b04fa99
URS
1705void __init rcu_init_tasks_generic(void)
1706{
1707#ifdef CONFIG_TASKS_RCU
1708 rcu_spawn_tasks_kthread();
1709#endif
1710
1711#ifdef CONFIG_TASKS_RUDE_RCU
1712 rcu_spawn_tasks_rude_kthread();
1713#endif
1714
1715#ifdef CONFIG_TASKS_TRACE_RCU
1716 rcu_spawn_tasks_trace_kthread();
1717#endif
bfba7ed0
URS
1718
1719 // Run the self-tests.
1720 rcu_tasks_initiate_self_tests();
1b04fa99
URS
1721}
1722
8fd8ca38
PM
1723#else /* #ifdef CONFIG_TASKS_RCU_GENERIC */
1724static inline void rcu_tasks_bootup_oddness(void) {}
1725#endif /* #else #ifdef CONFIG_TASKS_RCU_GENERIC */