Commit | Line | Data |
---|---|---|
eacd6f04 PM |
1 | /* SPDX-License-Identifier: GPL-2.0+ */ |
2 | /* | |
3 | * Task-based RCU implementations. | |
4 | * | |
5 | * Copyright (C) 2020 Paul E. McKenney | |
6 | */ | |
7 | ||
8fd8ca38 | 8 | #ifdef CONFIG_TASKS_RCU_GENERIC |
9b073de1 | 9 | #include "rcu_segcblist.h" |
5873b8a9 PM |
10 | |
11 | //////////////////////////////////////////////////////////////////////// | |
12 | // | |
13 | // Generic data structures. | |
14 | ||
15 | struct rcu_tasks; | |
16 | typedef void (*rcu_tasks_gp_func_t)(struct rcu_tasks *rtp); | |
e4fe5dd6 PM |
17 | typedef void (*pregp_func_t)(void); |
18 | typedef void (*pertask_func_t)(struct task_struct *t, struct list_head *hop); | |
9796e1ae | 19 | typedef void (*postscan_func_t)(struct list_head *hop); |
e4fe5dd6 | 20 | typedef void (*holdouts_func_t)(struct list_head *hop, bool ndrpt, bool *frptp); |
af051ca4 | 21 | typedef void (*postgp_func_t)(struct rcu_tasks *rtp); |
eacd6f04 | 22 | |
07e10515 | 23 | /** |
cafafd67 | 24 | * struct rcu_tasks_percpu - Per-CPU component of definition for a Tasks-RCU-like mechanism. |
9b073de1 | 25 | * @cblist: Callback list. |
381a4f3b | 26 | * @lock: Lock protecting per-CPU callback list. |
cafafd67 PM |
27 | */ |
28 | struct rcu_tasks_percpu { | |
9b073de1 | 29 | struct rcu_segcblist cblist; |
381a4f3b | 30 | raw_spinlock_t __private lock; |
cafafd67 PM |
31 | }; |
32 | ||
33 | /** | |
34 | * struct rcu_tasks - Definition for a Tasks-RCU-like mechanism. | |
a616aec9 | 35 | * @cbs_wq: Wait queue allowing new callback to get kthread's attention. |
cafafd67 | 36 | * @cbs_gbl_lock: Lock protecting callback list. |
07e10515 | 37 | * @kthread_ptr: This flavor's grace-period/callback-invocation kthread. |
5873b8a9 | 38 | * @gp_func: This flavor's grace-period-wait function. |
af051ca4 | 39 | * @gp_state: Grace period's most recent state transition (debugging). |
4fe192df | 40 | * @gp_sleep: Per-grace-period sleep to prevent CPU-bound looping. |
2393a613 | 41 | * @init_fract: Initial backoff sleep interval. |
af051ca4 PM |
42 | * @gp_jiffies: Time of last @gp_state transition. |
43 | * @gp_start: Most recent grace-period start in jiffies. | |
b14fb4fb | 44 | * @tasks_gp_seq: Number of grace periods completed since boot. |
238dbce3 | 45 | * @n_ipis: Number of IPIs sent to encourage grace periods to end. |
7e0669c3 | 46 | * @n_ipis_fails: Number of IPI-send failures. |
e4fe5dd6 PM |
47 | * @pregp_func: This flavor's pre-grace-period function (optional). |
48 | * @pertask_func: This flavor's per-task scan function (optional). | |
49 | * @postscan_func: This flavor's post-task scan function (optional). | |
85b86994 | 50 | * @holdouts_func: This flavor's holdout-list scan function (optional). |
e4fe5dd6 | 51 | * @postgp_func: This flavor's post-grace-period function (optional). |
5873b8a9 | 52 | * @call_func: This flavor's call_rcu()-equivalent function. |
cafafd67 | 53 | * @rtpcpu: This flavor's rcu_tasks_percpu structure. |
7a30871b | 54 | * @percpu_enqueue_shift: Shift down CPU ID this much when enqueuing callbacks. |
c97d12a6 PM |
55 | * @name: This flavor's textual name. |
56 | * @kname: This flavor's kthread name. | |
07e10515 PM |
57 | */ |
58 | struct rcu_tasks { | |
07e10515 | 59 | struct wait_queue_head cbs_wq; |
cafafd67 | 60 | raw_spinlock_t cbs_gbl_lock; |
af051ca4 | 61 | int gp_state; |
4fe192df | 62 | int gp_sleep; |
2393a613 | 63 | int init_fract; |
af051ca4 | 64 | unsigned long gp_jiffies; |
88092d0c | 65 | unsigned long gp_start; |
b14fb4fb | 66 | unsigned long tasks_gp_seq; |
238dbce3 | 67 | unsigned long n_ipis; |
7e0669c3 | 68 | unsigned long n_ipis_fails; |
07e10515 | 69 | struct task_struct *kthread_ptr; |
5873b8a9 | 70 | rcu_tasks_gp_func_t gp_func; |
e4fe5dd6 PM |
71 | pregp_func_t pregp_func; |
72 | pertask_func_t pertask_func; | |
73 | postscan_func_t postscan_func; | |
74 | holdouts_func_t holdouts_func; | |
75 | postgp_func_t postgp_func; | |
5873b8a9 | 76 | call_rcu_func_t call_func; |
cafafd67 | 77 | struct rcu_tasks_percpu __percpu *rtpcpu; |
7a30871b | 78 | int percpu_enqueue_shift; |
c97d12a6 PM |
79 | char *name; |
80 | char *kname; | |
07e10515 PM |
81 | }; |
82 | ||
cafafd67 PM |
83 | #define DEFINE_RCU_TASKS(rt_name, gp, call, n) \ |
84 | static DEFINE_PER_CPU(struct rcu_tasks_percpu, rt_name ## __percpu) = { \ | |
381a4f3b | 85 | .lock = __RAW_SPIN_LOCK_UNLOCKED(rt_name ## __percpu.cbs_pcpu_lock), \ |
cafafd67 PM |
86 | }; \ |
87 | static struct rcu_tasks rt_name = \ | |
88 | { \ | |
89 | .cbs_wq = __WAIT_QUEUE_HEAD_INITIALIZER(rt_name.cbs_wq), \ | |
90 | .cbs_gbl_lock = __RAW_SPIN_LOCK_UNLOCKED(rt_name.cbs_gbl_lock), \ | |
91 | .gp_func = gp, \ | |
92 | .call_func = call, \ | |
93 | .rtpcpu = &rt_name ## __percpu, \ | |
94 | .name = n, \ | |
7a30871b | 95 | .percpu_enqueue_shift = ilog2(CONFIG_NR_CPUS), \ |
cafafd67 | 96 | .kname = #rt_name, \ |
07e10515 PM |
97 | } |
98 | ||
eacd6f04 PM |
99 | /* Track exiting tasks in order to allow them to be waited for. */ |
100 | DEFINE_STATIC_SRCU(tasks_rcu_exit_srcu); | |
101 | ||
b0afa0f0 | 102 | /* Avoid IPIing CPUs early in the grace period. */ |
574de876 | 103 | #define RCU_TASK_IPI_DELAY (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB) ? HZ / 2 : 0) |
b0afa0f0 PM |
104 | static int rcu_task_ipi_delay __read_mostly = RCU_TASK_IPI_DELAY; |
105 | module_param(rcu_task_ipi_delay, int, 0644); | |
106 | ||
eacd6f04 PM |
107 | /* Control stall timeouts. Disable with <= 0, otherwise jiffies till stall. */ |
108 | #define RCU_TASK_STALL_TIMEOUT (HZ * 60 * 10) | |
109 | static int rcu_task_stall_timeout __read_mostly = RCU_TASK_STALL_TIMEOUT; | |
110 | module_param(rcu_task_stall_timeout, int, 0644); | |
111 | ||
af051ca4 PM |
112 | /* RCU tasks grace-period state for debugging. */ |
113 | #define RTGS_INIT 0 | |
114 | #define RTGS_WAIT_WAIT_CBS 1 | |
115 | #define RTGS_WAIT_GP 2 | |
116 | #define RTGS_PRE_WAIT_GP 3 | |
117 | #define RTGS_SCAN_TASKLIST 4 | |
118 | #define RTGS_POST_SCAN_TASKLIST 5 | |
119 | #define RTGS_WAIT_SCAN_HOLDOUTS 6 | |
120 | #define RTGS_SCAN_HOLDOUTS 7 | |
121 | #define RTGS_POST_GP 8 | |
122 | #define RTGS_WAIT_READERS 9 | |
123 | #define RTGS_INVOKE_CBS 10 | |
124 | #define RTGS_WAIT_CBS 11 | |
8344496e | 125 | #ifndef CONFIG_TINY_RCU |
af051ca4 PM |
126 | static const char * const rcu_tasks_gp_state_names[] = { |
127 | "RTGS_INIT", | |
128 | "RTGS_WAIT_WAIT_CBS", | |
129 | "RTGS_WAIT_GP", | |
130 | "RTGS_PRE_WAIT_GP", | |
131 | "RTGS_SCAN_TASKLIST", | |
132 | "RTGS_POST_SCAN_TASKLIST", | |
133 | "RTGS_WAIT_SCAN_HOLDOUTS", | |
134 | "RTGS_SCAN_HOLDOUTS", | |
135 | "RTGS_POST_GP", | |
136 | "RTGS_WAIT_READERS", | |
137 | "RTGS_INVOKE_CBS", | |
138 | "RTGS_WAIT_CBS", | |
139 | }; | |
8344496e | 140 | #endif /* #ifndef CONFIG_TINY_RCU */ |
af051ca4 | 141 | |
5873b8a9 PM |
142 | //////////////////////////////////////////////////////////////////////// |
143 | // | |
144 | // Generic code. | |
145 | ||
af051ca4 PM |
146 | /* Record grace-period phase and time. */ |
147 | static void set_tasks_gp_state(struct rcu_tasks *rtp, int newstate) | |
148 | { | |
149 | rtp->gp_state = newstate; | |
150 | rtp->gp_jiffies = jiffies; | |
151 | } | |
152 | ||
8344496e | 153 | #ifndef CONFIG_TINY_RCU |
af051ca4 PM |
154 | /* Return state name. */ |
155 | static const char *tasks_gp_state_getname(struct rcu_tasks *rtp) | |
156 | { | |
157 | int i = data_race(rtp->gp_state); // Let KCSAN detect update races | |
158 | int j = READ_ONCE(i); // Prevent the compiler from reading twice | |
159 | ||
160 | if (j >= ARRAY_SIZE(rcu_tasks_gp_state_names)) | |
161 | return "???"; | |
162 | return rcu_tasks_gp_state_names[j]; | |
163 | } | |
8344496e | 164 | #endif /* #ifndef CONFIG_TINY_RCU */ |
af051ca4 | 165 | |
cafafd67 PM |
166 | // Initialize per-CPU callback lists for the specified flavor of |
167 | // Tasks RCU. | |
168 | static void cblist_init_generic(struct rcu_tasks *rtp) | |
169 | { | |
170 | int cpu; | |
171 | unsigned long flags; | |
172 | ||
173 | raw_spin_lock_irqsave(&rtp->cbs_gbl_lock, flags); | |
7a30871b | 174 | rtp->percpu_enqueue_shift = ilog2(nr_cpu_ids); |
cafafd67 PM |
175 | for_each_possible_cpu(cpu) { |
176 | struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu); | |
177 | ||
178 | WARN_ON_ONCE(!rtpcp); | |
179 | if (cpu) | |
381a4f3b PM |
180 | raw_spin_lock_init(&ACCESS_PRIVATE(rtpcp, lock)); |
181 | raw_spin_lock_rcu_node(rtpcp); // irqs already disabled. | |
9b073de1 PM |
182 | if (rcu_segcblist_empty(&rtpcp->cblist)) |
183 | rcu_segcblist_init(&rtpcp->cblist); | |
381a4f3b | 184 | raw_spin_unlock_rcu_node(rtpcp); // irqs remain disabled. |
cafafd67 PM |
185 | } |
186 | raw_spin_unlock_irqrestore(&rtp->cbs_gbl_lock, flags); | |
187 | ||
188 | } | |
189 | ||
5873b8a9 PM |
190 | // Enqueue a callback for the specified flavor of Tasks RCU. |
191 | static void call_rcu_tasks_generic(struct rcu_head *rhp, rcu_callback_t func, | |
192 | struct rcu_tasks *rtp) | |
eacd6f04 PM |
193 | { |
194 | unsigned long flags; | |
195 | bool needwake; | |
cafafd67 | 196 | struct rcu_tasks_percpu *rtpcp; |
eacd6f04 PM |
197 | |
198 | rhp->next = NULL; | |
199 | rhp->func = func; | |
cafafd67 | 200 | local_irq_save(flags); |
7a30871b PM |
201 | rtpcp = per_cpu_ptr(rtp->rtpcpu, |
202 | smp_processor_id() >> READ_ONCE(rtp->percpu_enqueue_shift)); | |
381a4f3b | 203 | raw_spin_lock_rcu_node(rtpcp); // irqs already disabled. |
9b073de1 | 204 | if (!rcu_segcblist_is_enabled(&rtpcp->cblist)) { |
381a4f3b | 205 | raw_spin_unlock_rcu_node(rtpcp); // irqs remain disabled. |
cafafd67 | 206 | cblist_init_generic(rtp); |
381a4f3b | 207 | raw_spin_lock_rcu_node(rtpcp); // irqs already disabled. |
cafafd67 | 208 | } |
9b073de1 PM |
209 | needwake = rcu_segcblist_empty(&rtpcp->cblist); |
210 | rcu_segcblist_enqueue(&rtpcp->cblist, rhp); | |
381a4f3b | 211 | raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags); |
eacd6f04 | 212 | /* We can't create the thread unless interrupts are enabled. */ |
07e10515 PM |
213 | if (needwake && READ_ONCE(rtp->kthread_ptr)) |
214 | wake_up(&rtp->cbs_wq); | |
eacd6f04 | 215 | } |
eacd6f04 | 216 | |
5873b8a9 PM |
217 | // Wait for a grace period for the specified flavor of Tasks RCU. |
218 | static void synchronize_rcu_tasks_generic(struct rcu_tasks *rtp) | |
eacd6f04 PM |
219 | { |
220 | /* Complain if the scheduler has not started. */ | |
221 | RCU_LOCKDEP_WARN(rcu_scheduler_active == RCU_SCHEDULER_INACTIVE, | |
222 | "synchronize_rcu_tasks called too soon"); | |
223 | ||
224 | /* Wait for the grace period. */ | |
5873b8a9 | 225 | wait_rcu_gp(rtp->call_func); |
eacd6f04 PM |
226 | } |
227 | ||
228 | /* RCU-tasks kthread that detects grace periods and invokes callbacks. */ | |
229 | static int __noreturn rcu_tasks_kthread(void *arg) | |
230 | { | |
231 | unsigned long flags; | |
9b073de1 PM |
232 | int len; |
233 | struct rcu_cblist rcl = RCU_CBLIST_INITIALIZER(rcl); | |
234 | struct rcu_head *rhp; | |
07e10515 | 235 | struct rcu_tasks *rtp = arg; |
eacd6f04 PM |
236 | |
237 | /* Run on housekeeping CPUs by default. Sysadm can move if desired. */ | |
238 | housekeeping_affine(current, HK_FLAG_RCU); | |
07e10515 | 239 | WRITE_ONCE(rtp->kthread_ptr, current); // Let GPs start! |
eacd6f04 PM |
240 | |
241 | /* | |
242 | * Each pass through the following loop makes one check for | |
243 | * newly arrived callbacks, and, if there are some, waits for | |
244 | * one RCU-tasks grace period and then invokes the callbacks. | |
245 | * This loop is terminated by the system going down. ;-) | |
246 | */ | |
247 | for (;;) { | |
cafafd67 PM |
248 | struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, 0); // for_each... |
249 | ||
0db7c32a | 250 | set_tasks_gp_state(rtp, RTGS_WAIT_CBS); |
eacd6f04 PM |
251 | |
252 | /* Pick up any new callbacks. */ | |
381a4f3b | 253 | raw_spin_lock_irqsave_rcu_node(rtpcp, flags); |
9b073de1 PM |
254 | rcu_segcblist_advance(&rtpcp->cblist, rcu_seq_current(&rtp->tasks_gp_seq)); |
255 | (void)rcu_segcblist_accelerate(&rtpcp->cblist, rcu_seq_snap(&rtp->tasks_gp_seq)); | |
381a4f3b | 256 | raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags); |
eacd6f04 PM |
257 | |
258 | /* If there were none, wait a bit and start over. */ | |
9b073de1 | 259 | if (!rcu_segcblist_pend_cbs(&rtpcp->cblist)) { |
07e10515 | 260 | wait_event_interruptible(rtp->cbs_wq, |
9b073de1 PM |
261 | rcu_segcblist_pend_cbs(&rtpcp->cblist)); |
262 | if (!rcu_segcblist_pend_cbs(&rtpcp->cblist)) { | |
eacd6f04 | 263 | WARN_ON(signal_pending(current)); |
af051ca4 | 264 | set_tasks_gp_state(rtp, RTGS_WAIT_WAIT_CBS); |
ea6eed9f | 265 | schedule_timeout_idle(HZ/10); |
eacd6f04 PM |
266 | } |
267 | continue; | |
268 | } | |
269 | ||
5873b8a9 | 270 | // Wait for one grace period. |
af051ca4 | 271 | set_tasks_gp_state(rtp, RTGS_WAIT_GP); |
88092d0c | 272 | rtp->gp_start = jiffies; |
b14fb4fb | 273 | rcu_seq_start(&rtp->tasks_gp_seq); |
5873b8a9 | 274 | rtp->gp_func(rtp); |
b14fb4fb | 275 | rcu_seq_end(&rtp->tasks_gp_seq); |
eacd6f04 PM |
276 | |
277 | /* Invoke the callbacks. */ | |
af051ca4 | 278 | set_tasks_gp_state(rtp, RTGS_INVOKE_CBS); |
381a4f3b | 279 | raw_spin_lock_irqsave_rcu_node(rtpcp, flags); |
9b073de1 PM |
280 | rcu_segcblist_advance(&rtpcp->cblist, rcu_seq_current(&rtp->tasks_gp_seq)); |
281 | rcu_segcblist_extract_done_cbs(&rtpcp->cblist, &rcl); | |
381a4f3b | 282 | raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags); |
9b073de1 PM |
283 | len = rcl.len; |
284 | for (rhp = rcu_cblist_dequeue(&rcl); rhp; rhp = rcu_cblist_dequeue(&rcl)) { | |
eacd6f04 | 285 | local_bh_disable(); |
9b073de1 | 286 | rhp->func(rhp); |
eacd6f04 | 287 | local_bh_enable(); |
eacd6f04 PM |
288 | cond_resched(); |
289 | } | |
381a4f3b | 290 | raw_spin_lock_irqsave_rcu_node(rtpcp, flags); |
9b073de1 PM |
291 | rcu_segcblist_add_len(&rtpcp->cblist, -len); |
292 | (void)rcu_segcblist_accelerate(&rtpcp->cblist, rcu_seq_snap(&rtp->tasks_gp_seq)); | |
381a4f3b | 293 | raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags); |
eacd6f04 | 294 | /* Paranoid sleep to keep this from entering a tight loop */ |
4fe192df | 295 | schedule_timeout_idle(rtp->gp_sleep); |
eacd6f04 PM |
296 | } |
297 | } | |
298 | ||
1b04fa99 | 299 | /* Spawn RCU-tasks grace-period kthread. */ |
5873b8a9 | 300 | static void __init rcu_spawn_tasks_kthread_generic(struct rcu_tasks *rtp) |
eacd6f04 PM |
301 | { |
302 | struct task_struct *t; | |
303 | ||
c97d12a6 PM |
304 | t = kthread_run(rcu_tasks_kthread, rtp, "%s_kthread", rtp->kname); |
305 | if (WARN_ONCE(IS_ERR(t), "%s: Could not start %s grace-period kthread, OOM is now expected behavior\n", __func__, rtp->name)) | |
5873b8a9 | 306 | return; |
eacd6f04 | 307 | smp_mb(); /* Ensure others see full kthread. */ |
eacd6f04 | 308 | } |
eacd6f04 | 309 | |
eacd6f04 PM |
310 | #ifndef CONFIG_TINY_RCU |
311 | ||
312 | /* | |
313 | * Print any non-default Tasks RCU settings. | |
314 | */ | |
315 | static void __init rcu_tasks_bootup_oddness(void) | |
316 | { | |
d5f177d3 | 317 | #if defined(CONFIG_TASKS_RCU) || defined(CONFIG_TASKS_TRACE_RCU) |
eacd6f04 PM |
318 | if (rcu_task_stall_timeout != RCU_TASK_STALL_TIMEOUT) |
319 | pr_info("\tTasks-RCU CPU stall warnings timeout set to %d (rcu_task_stall_timeout).\n", rcu_task_stall_timeout); | |
d5f177d3 PM |
320 | #endif /* #ifdef CONFIG_TASKS_RCU */ |
321 | #ifdef CONFIG_TASKS_RCU | |
322 | pr_info("\tTrampoline variant of Tasks RCU enabled.\n"); | |
eacd6f04 | 323 | #endif /* #ifdef CONFIG_TASKS_RCU */ |
c84aad76 PM |
324 | #ifdef CONFIG_TASKS_RUDE_RCU |
325 | pr_info("\tRude variant of Tasks RCU enabled.\n"); | |
326 | #endif /* #ifdef CONFIG_TASKS_RUDE_RCU */ | |
d5f177d3 PM |
327 | #ifdef CONFIG_TASKS_TRACE_RCU |
328 | pr_info("\tTracing variant of Tasks RCU enabled.\n"); | |
329 | #endif /* #ifdef CONFIG_TASKS_TRACE_RCU */ | |
eacd6f04 PM |
330 | } |
331 | ||
332 | #endif /* #ifndef CONFIG_TINY_RCU */ | |
5873b8a9 | 333 | |
8344496e | 334 | #ifndef CONFIG_TINY_RCU |
e21408ce PM |
335 | /* Dump out rcutorture-relevant state common to all RCU-tasks flavors. */ |
336 | static void show_rcu_tasks_generic_gp_kthread(struct rcu_tasks *rtp, char *s) | |
337 | { | |
cafafd67 | 338 | struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, 0); // for_each... |
7e0669c3 | 339 | pr_info("%s: %s(%d) since %lu g:%lu i:%lu/%lu %c%c %s\n", |
e21408ce | 340 | rtp->kname, |
7e0669c3 | 341 | tasks_gp_state_getname(rtp), data_race(rtp->gp_state), |
af051ca4 | 342 | jiffies - data_race(rtp->gp_jiffies), |
b14fb4fb | 343 | data_race(rcu_seq_current(&rtp->tasks_gp_seq)), |
7e0669c3 | 344 | data_race(rtp->n_ipis_fails), data_race(rtp->n_ipis), |
e21408ce | 345 | ".k"[!!data_race(rtp->kthread_ptr)], |
9b073de1 | 346 | ".C"[!data_race(rcu_segcblist_empty(&rtpcp->cblist))], |
e21408ce PM |
347 | s); |
348 | } | |
27c0f144 | 349 | #endif // #ifndef CONFIG_TINY_RCU |
e21408ce | 350 | |
25246fc8 PM |
351 | static void exit_tasks_rcu_finish_trace(struct task_struct *t); |
352 | ||
353 | #if defined(CONFIG_TASKS_RCU) || defined(CONFIG_TASKS_TRACE_RCU) | |
5873b8a9 | 354 | |
d01aa263 PM |
355 | //////////////////////////////////////////////////////////////////////// |
356 | // | |
357 | // Shared code between task-list-scanning variants of Tasks RCU. | |
358 | ||
359 | /* Wait for one RCU-tasks grace period. */ | |
360 | static void rcu_tasks_wait_gp(struct rcu_tasks *rtp) | |
361 | { | |
362 | struct task_struct *g, *t; | |
363 | unsigned long lastreport; | |
364 | LIST_HEAD(holdouts); | |
365 | int fract; | |
366 | ||
af051ca4 | 367 | set_tasks_gp_state(rtp, RTGS_PRE_WAIT_GP); |
d01aa263 PM |
368 | rtp->pregp_func(); |
369 | ||
370 | /* | |
371 | * There were callbacks, so we need to wait for an RCU-tasks | |
372 | * grace period. Start off by scanning the task list for tasks | |
373 | * that are not already voluntarily blocked. Mark these tasks | |
374 | * and make a list of them in holdouts. | |
375 | */ | |
af051ca4 | 376 | set_tasks_gp_state(rtp, RTGS_SCAN_TASKLIST); |
d01aa263 PM |
377 | rcu_read_lock(); |
378 | for_each_process_thread(g, t) | |
379 | rtp->pertask_func(t, &holdouts); | |
380 | rcu_read_unlock(); | |
381 | ||
af051ca4 | 382 | set_tasks_gp_state(rtp, RTGS_POST_SCAN_TASKLIST); |
9796e1ae | 383 | rtp->postscan_func(&holdouts); |
d01aa263 PM |
384 | |
385 | /* | |
386 | * Each pass through the following loop scans the list of holdout | |
387 | * tasks, removing any that are no longer holdouts. When the list | |
388 | * is empty, we are done. | |
389 | */ | |
390 | lastreport = jiffies; | |
391 | ||
2393a613 PM |
392 | // Start off with initial wait and slowly back off to 1 HZ wait. |
393 | fract = rtp->init_fract; | |
d01aa263 | 394 | |
77dc1741 | 395 | while (!list_empty(&holdouts)) { |
d01aa263 PM |
396 | bool firstreport; |
397 | bool needreport; | |
398 | int rtst; | |
399 | ||
d01aa263 | 400 | /* Slowly back off waiting for holdouts */ |
af051ca4 | 401 | set_tasks_gp_state(rtp, RTGS_WAIT_SCAN_HOLDOUTS); |
75dc2da5 | 402 | schedule_timeout_idle(fract); |
d01aa263 | 403 | |
75dc2da5 PM |
404 | if (fract < HZ) |
405 | fract++; | |
d01aa263 PM |
406 | |
407 | rtst = READ_ONCE(rcu_task_stall_timeout); | |
408 | needreport = rtst > 0 && time_after(jiffies, lastreport + rtst); | |
409 | if (needreport) | |
410 | lastreport = jiffies; | |
411 | firstreport = true; | |
412 | WARN_ON(signal_pending(current)); | |
af051ca4 | 413 | set_tasks_gp_state(rtp, RTGS_SCAN_HOLDOUTS); |
d01aa263 PM |
414 | rtp->holdouts_func(&holdouts, needreport, &firstreport); |
415 | } | |
416 | ||
af051ca4 PM |
417 | set_tasks_gp_state(rtp, RTGS_POST_GP); |
418 | rtp->postgp_func(rtp); | |
d01aa263 PM |
419 | } |
420 | ||
25246fc8 PM |
421 | #endif /* #if defined(CONFIG_TASKS_RCU) || defined(CONFIG_TASKS_TRACE_RCU) */ |
422 | ||
423 | #ifdef CONFIG_TASKS_RCU | |
424 | ||
5873b8a9 PM |
425 | //////////////////////////////////////////////////////////////////////// |
426 | // | |
427 | // Simple variant of RCU whose quiescent states are voluntary context | |
8af9e2c7 | 428 | // switch, cond_resched_tasks_rcu_qs(), user-space execution, and idle. |
5873b8a9 PM |
429 | // As such, grace periods can take one good long time. There are no |
430 | // read-side primitives similar to rcu_read_lock() and rcu_read_unlock() | |
431 | // because this implementation is intended to get the system into a safe | |
432 | // state for some of the manipulations involved in tracing and the like. | |
433 | // Finally, this implementation does not support high call_rcu_tasks() | |
434 | // rates from multiple CPUs. If this is required, per-CPU callback lists | |
435 | // will be needed. | |
06a3ec92 PM |
436 | // |
437 | // The implementation uses rcu_tasks_wait_gp(), which relies on function | |
438 | // pointers in the rcu_tasks structure. The rcu_spawn_tasks_kthread() | |
439 | // function sets these function pointers up so that rcu_tasks_wait_gp() | |
440 | // invokes these functions in this order: | |
441 | // | |
442 | // rcu_tasks_pregp_step(): | |
443 | // Invokes synchronize_rcu() in order to wait for all in-flight | |
444 | // t->on_rq and t->nvcsw transitions to complete. This works because | |
445 | // all such transitions are carried out with interrupts disabled. | |
446 | // rcu_tasks_pertask(), invoked on every non-idle task: | |
447 | // For every runnable non-idle task other than the current one, use | |
448 | // get_task_struct() to pin down that task, snapshot that task's | |
449 | // number of voluntary context switches, and add that task to the | |
450 | // holdout list. | |
451 | // rcu_tasks_postscan(): | |
452 | // Invoke synchronize_srcu() to ensure that all tasks that were | |
453 | // in the process of exiting (and which thus might not know to | |
454 | // synchronize with this RCU Tasks grace period) have completed | |
455 | // exiting. | |
456 | // check_all_holdout_tasks(), repeatedly until holdout list is empty: | |
457 | // Scans the holdout list, attempting to identify a quiescent state | |
458 | // for each task on the list. If there is a quiescent state, the | |
459 | // corresponding task is removed from the holdout list. | |
460 | // rcu_tasks_postgp(): | |
461 | // Invokes synchronize_rcu() in order to ensure that all prior | |
462 | // t->on_rq and t->nvcsw transitions are seen by all CPUs and tasks | |
463 | // to have happened before the end of this RCU Tasks grace period. | |
464 | // Again, this works because all such transitions are carried out | |
465 | // with interrupts disabled. | |
466 | // | |
467 | // For each exiting task, the exit_tasks_rcu_start() and | |
468 | // exit_tasks_rcu_finish() functions begin and end, respectively, the SRCU | |
469 | // read-side critical sections waited for by rcu_tasks_postscan(). | |
470 | // | |
381a4f3b PM |
471 | // Pre-grace-period update-side code is ordered before the grace |
472 | // via the raw_spin_lock.*rcu_node(). Pre-grace-period read-side code | |
473 | // is ordered before the grace period via synchronize_rcu() call in | |
474 | // rcu_tasks_pregp_step() and by the scheduler's locks and interrupt | |
06a3ec92 | 475 | // disabling. |
5873b8a9 | 476 | |
e4fe5dd6 PM |
477 | /* Pre-grace-period preparation. */ |
478 | static void rcu_tasks_pregp_step(void) | |
479 | { | |
480 | /* | |
481 | * Wait for all pre-existing t->on_rq and t->nvcsw transitions | |
482 | * to complete. Invoking synchronize_rcu() suffices because all | |
483 | * these transitions occur with interrupts disabled. Without this | |
484 | * synchronize_rcu(), a read-side critical section that started | |
485 | * before the grace period might be incorrectly seen as having | |
486 | * started after the grace period. | |
487 | * | |
488 | * This synchronize_rcu() also dispenses with the need for a | |
489 | * memory barrier on the first store to t->rcu_tasks_holdout, | |
490 | * as it forces the store to happen after the beginning of the | |
491 | * grace period. | |
492 | */ | |
493 | synchronize_rcu(); | |
494 | } | |
495 | ||
496 | /* Per-task initial processing. */ | |
497 | static void rcu_tasks_pertask(struct task_struct *t, struct list_head *hop) | |
498 | { | |
499 | if (t != current && READ_ONCE(t->on_rq) && !is_idle_task(t)) { | |
500 | get_task_struct(t); | |
501 | t->rcu_tasks_nvcsw = READ_ONCE(t->nvcsw); | |
502 | WRITE_ONCE(t->rcu_tasks_holdout, true); | |
503 | list_add(&t->rcu_tasks_holdout_list, hop); | |
504 | } | |
505 | } | |
506 | ||
507 | /* Processing between scanning taskslist and draining the holdout list. */ | |
04a3c5aa | 508 | static void rcu_tasks_postscan(struct list_head *hop) |
e4fe5dd6 PM |
509 | { |
510 | /* | |
511 | * Wait for tasks that are in the process of exiting. This | |
512 | * does only part of the job, ensuring that all tasks that were | |
513 | * previously exiting reach the point where they have disabled | |
514 | * preemption, allowing the later synchronize_rcu() to finish | |
515 | * the job. | |
516 | */ | |
517 | synchronize_srcu(&tasks_rcu_exit_srcu); | |
518 | } | |
519 | ||
5873b8a9 PM |
520 | /* See if tasks are still holding out, complain if so. */ |
521 | static void check_holdout_task(struct task_struct *t, | |
522 | bool needreport, bool *firstreport) | |
523 | { | |
524 | int cpu; | |
525 | ||
526 | if (!READ_ONCE(t->rcu_tasks_holdout) || | |
527 | t->rcu_tasks_nvcsw != READ_ONCE(t->nvcsw) || | |
528 | !READ_ONCE(t->on_rq) || | |
529 | (IS_ENABLED(CONFIG_NO_HZ_FULL) && | |
530 | !is_idle_task(t) && t->rcu_tasks_idle_cpu >= 0)) { | |
531 | WRITE_ONCE(t->rcu_tasks_holdout, false); | |
532 | list_del_init(&t->rcu_tasks_holdout_list); | |
533 | put_task_struct(t); | |
534 | return; | |
535 | } | |
536 | rcu_request_urgent_qs_task(t); | |
537 | if (!needreport) | |
538 | return; | |
539 | if (*firstreport) { | |
540 | pr_err("INFO: rcu_tasks detected stalls on tasks:\n"); | |
541 | *firstreport = false; | |
542 | } | |
543 | cpu = task_cpu(t); | |
544 | pr_alert("%p: %c%c nvcsw: %lu/%lu holdout: %d idle_cpu: %d/%d\n", | |
545 | t, ".I"[is_idle_task(t)], | |
546 | "N."[cpu < 0 || !tick_nohz_full_cpu(cpu)], | |
547 | t->rcu_tasks_nvcsw, t->nvcsw, t->rcu_tasks_holdout, | |
548 | t->rcu_tasks_idle_cpu, cpu); | |
549 | sched_show_task(t); | |
550 | } | |
551 | ||
e4fe5dd6 PM |
552 | /* Scan the holdout lists for tasks no longer holding out. */ |
553 | static void check_all_holdout_tasks(struct list_head *hop, | |
554 | bool needreport, bool *firstreport) | |
555 | { | |
556 | struct task_struct *t, *t1; | |
557 | ||
558 | list_for_each_entry_safe(t, t1, hop, rcu_tasks_holdout_list) { | |
559 | check_holdout_task(t, needreport, firstreport); | |
560 | cond_resched(); | |
561 | } | |
562 | } | |
563 | ||
564 | /* Finish off the Tasks-RCU grace period. */ | |
af051ca4 | 565 | static void rcu_tasks_postgp(struct rcu_tasks *rtp) |
e4fe5dd6 PM |
566 | { |
567 | /* | |
568 | * Because ->on_rq and ->nvcsw are not guaranteed to have a full | |
569 | * memory barriers prior to them in the schedule() path, memory | |
570 | * reordering on other CPUs could cause their RCU-tasks read-side | |
571 | * critical sections to extend past the end of the grace period. | |
572 | * However, because these ->nvcsw updates are carried out with | |
573 | * interrupts disabled, we can use synchronize_rcu() to force the | |
574 | * needed ordering on all such CPUs. | |
575 | * | |
576 | * This synchronize_rcu() also confines all ->rcu_tasks_holdout | |
577 | * accesses to be within the grace period, avoiding the need for | |
578 | * memory barriers for ->rcu_tasks_holdout accesses. | |
579 | * | |
580 | * In addition, this synchronize_rcu() waits for exiting tasks | |
581 | * to complete their final preempt_disable() region of execution, | |
582 | * cleaning up after the synchronize_srcu() above. | |
583 | */ | |
584 | synchronize_rcu(); | |
585 | } | |
586 | ||
5873b8a9 | 587 | void call_rcu_tasks(struct rcu_head *rhp, rcu_callback_t func); |
c97d12a6 | 588 | DEFINE_RCU_TASKS(rcu_tasks, rcu_tasks_wait_gp, call_rcu_tasks, "RCU Tasks"); |
5873b8a9 PM |
589 | |
590 | /** | |
591 | * call_rcu_tasks() - Queue an RCU for invocation task-based grace period | |
592 | * @rhp: structure to be used for queueing the RCU updates. | |
593 | * @func: actual callback function to be invoked after the grace period | |
594 | * | |
595 | * The callback function will be invoked some time after a full grace | |
596 | * period elapses, in other words after all currently executing RCU | |
597 | * read-side critical sections have completed. call_rcu_tasks() assumes | |
598 | * that the read-side critical sections end at a voluntary context | |
8af9e2c7 | 599 | * switch (not a preemption!), cond_resched_tasks_rcu_qs(), entry into idle, |
5873b8a9 PM |
600 | * or transition to usermode execution. As such, there are no read-side |
601 | * primitives analogous to rcu_read_lock() and rcu_read_unlock() because | |
602 | * this primitive is intended to determine that all tasks have passed | |
a616aec9 | 603 | * through a safe state, not so much for data-structure synchronization. |
5873b8a9 PM |
604 | * |
605 | * See the description of call_rcu() for more detailed information on | |
606 | * memory ordering guarantees. | |
607 | */ | |
608 | void call_rcu_tasks(struct rcu_head *rhp, rcu_callback_t func) | |
609 | { | |
610 | call_rcu_tasks_generic(rhp, func, &rcu_tasks); | |
611 | } | |
612 | EXPORT_SYMBOL_GPL(call_rcu_tasks); | |
613 | ||
614 | /** | |
615 | * synchronize_rcu_tasks - wait until an rcu-tasks grace period has elapsed. | |
616 | * | |
617 | * Control will return to the caller some time after a full rcu-tasks | |
618 | * grace period has elapsed, in other words after all currently | |
619 | * executing rcu-tasks read-side critical sections have elapsed. These | |
620 | * read-side critical sections are delimited by calls to schedule(), | |
621 | * cond_resched_tasks_rcu_qs(), idle execution, userspace execution, calls | |
622 | * to synchronize_rcu_tasks(), and (in theory, anyway) cond_resched(). | |
623 | * | |
624 | * This is a very specialized primitive, intended only for a few uses in | |
625 | * tracing and other situations requiring manipulation of function | |
626 | * preambles and profiling hooks. The synchronize_rcu_tasks() function | |
627 | * is not (yet) intended for heavy use from multiple CPUs. | |
628 | * | |
629 | * See the description of synchronize_rcu() for more detailed information | |
630 | * on memory ordering guarantees. | |
631 | */ | |
632 | void synchronize_rcu_tasks(void) | |
633 | { | |
634 | synchronize_rcu_tasks_generic(&rcu_tasks); | |
635 | } | |
636 | EXPORT_SYMBOL_GPL(synchronize_rcu_tasks); | |
637 | ||
638 | /** | |
639 | * rcu_barrier_tasks - Wait for in-flight call_rcu_tasks() callbacks. | |
640 | * | |
641 | * Although the current implementation is guaranteed to wait, it is not | |
642 | * obligated to, for example, if there are no pending callbacks. | |
643 | */ | |
644 | void rcu_barrier_tasks(void) | |
645 | { | |
646 | /* There is only one callback queue, so this is easy. ;-) */ | |
647 | synchronize_rcu_tasks(); | |
648 | } | |
649 | EXPORT_SYMBOL_GPL(rcu_barrier_tasks); | |
650 | ||
651 | static int __init rcu_spawn_tasks_kthread(void) | |
652 | { | |
cafafd67 | 653 | cblist_init_generic(&rcu_tasks); |
4fe192df | 654 | rcu_tasks.gp_sleep = HZ / 10; |
75dc2da5 | 655 | rcu_tasks.init_fract = HZ / 10; |
e4fe5dd6 PM |
656 | rcu_tasks.pregp_func = rcu_tasks_pregp_step; |
657 | rcu_tasks.pertask_func = rcu_tasks_pertask; | |
658 | rcu_tasks.postscan_func = rcu_tasks_postscan; | |
659 | rcu_tasks.holdouts_func = check_all_holdout_tasks; | |
660 | rcu_tasks.postgp_func = rcu_tasks_postgp; | |
5873b8a9 PM |
661 | rcu_spawn_tasks_kthread_generic(&rcu_tasks); |
662 | return 0; | |
663 | } | |
5873b8a9 | 664 | |
27c0f144 PM |
665 | #if !defined(CONFIG_TINY_RCU) |
666 | void show_rcu_tasks_classic_gp_kthread(void) | |
e21408ce PM |
667 | { |
668 | show_rcu_tasks_generic_gp_kthread(&rcu_tasks, ""); | |
669 | } | |
27c0f144 PM |
670 | EXPORT_SYMBOL_GPL(show_rcu_tasks_classic_gp_kthread); |
671 | #endif // !defined(CONFIG_TINY_RCU) | |
e21408ce | 672 | |
25246fc8 PM |
673 | /* Do the srcu_read_lock() for the above synchronize_srcu(). */ |
674 | void exit_tasks_rcu_start(void) __acquires(&tasks_rcu_exit_srcu) | |
675 | { | |
676 | preempt_disable(); | |
677 | current->rcu_tasks_idx = __srcu_read_lock(&tasks_rcu_exit_srcu); | |
678 | preempt_enable(); | |
679 | } | |
680 | ||
681 | /* Do the srcu_read_unlock() for the above synchronize_srcu(). */ | |
682 | void exit_tasks_rcu_finish(void) __releases(&tasks_rcu_exit_srcu) | |
683 | { | |
684 | struct task_struct *t = current; | |
685 | ||
686 | preempt_disable(); | |
687 | __srcu_read_unlock(&tasks_rcu_exit_srcu, t->rcu_tasks_idx); | |
688 | preempt_enable(); | |
689 | exit_tasks_rcu_finish_trace(t); | |
690 | } | |
691 | ||
e21408ce | 692 | #else /* #ifdef CONFIG_TASKS_RCU */ |
25246fc8 PM |
693 | void exit_tasks_rcu_start(void) { } |
694 | void exit_tasks_rcu_finish(void) { exit_tasks_rcu_finish_trace(current); } | |
e21408ce | 695 | #endif /* #else #ifdef CONFIG_TASKS_RCU */ |
c84aad76 PM |
696 | |
697 | #ifdef CONFIG_TASKS_RUDE_RCU | |
698 | ||
699 | //////////////////////////////////////////////////////////////////////// | |
700 | // | |
701 | // "Rude" variant of Tasks RCU, inspired by Steve Rostedt's trick of | |
702 | // passing an empty function to schedule_on_each_cpu(). This approach | |
e4be1f44 PM |
703 | // provides an asynchronous call_rcu_tasks_rude() API and batching of |
704 | // concurrent calls to the synchronous synchronize_rcu_tasks_rude() API. | |
9fc98e31 PM |
705 | // This invokes schedule_on_each_cpu() in order to send IPIs far and wide |
706 | // and induces otherwise unnecessary context switches on all online CPUs, | |
707 | // whether idle or not. | |
708 | // | |
709 | // Callback handling is provided by the rcu_tasks_kthread() function. | |
710 | // | |
711 | // Ordering is provided by the scheduler's context-switch code. | |
c84aad76 PM |
712 | |
713 | // Empty function to allow workqueues to force a context switch. | |
714 | static void rcu_tasks_be_rude(struct work_struct *work) | |
715 | { | |
716 | } | |
717 | ||
718 | // Wait for one rude RCU-tasks grace period. | |
719 | static void rcu_tasks_rude_wait_gp(struct rcu_tasks *rtp) | |
720 | { | |
238dbce3 | 721 | rtp->n_ipis += cpumask_weight(cpu_online_mask); |
c84aad76 PM |
722 | schedule_on_each_cpu(rcu_tasks_be_rude); |
723 | } | |
724 | ||
725 | void call_rcu_tasks_rude(struct rcu_head *rhp, rcu_callback_t func); | |
c97d12a6 PM |
726 | DEFINE_RCU_TASKS(rcu_tasks_rude, rcu_tasks_rude_wait_gp, call_rcu_tasks_rude, |
727 | "RCU Tasks Rude"); | |
c84aad76 PM |
728 | |
729 | /** | |
730 | * call_rcu_tasks_rude() - Queue a callback rude task-based grace period | |
731 | * @rhp: structure to be used for queueing the RCU updates. | |
732 | * @func: actual callback function to be invoked after the grace period | |
733 | * | |
734 | * The callback function will be invoked some time after a full grace | |
735 | * period elapses, in other words after all currently executing RCU | |
736 | * read-side critical sections have completed. call_rcu_tasks_rude() | |
737 | * assumes that the read-side critical sections end at context switch, | |
8af9e2c7 | 738 | * cond_resched_tasks_rcu_qs(), or transition to usermode execution (as |
a6517e9c NU |
739 | * usermode execution is schedulable). As such, there are no read-side |
740 | * primitives analogous to rcu_read_lock() and rcu_read_unlock() because | |
741 | * this primitive is intended to determine that all tasks have passed | |
742 | * through a safe state, not so much for data-structure synchronization. | |
c84aad76 PM |
743 | * |
744 | * See the description of call_rcu() for more detailed information on | |
745 | * memory ordering guarantees. | |
746 | */ | |
747 | void call_rcu_tasks_rude(struct rcu_head *rhp, rcu_callback_t func) | |
748 | { | |
749 | call_rcu_tasks_generic(rhp, func, &rcu_tasks_rude); | |
750 | } | |
751 | EXPORT_SYMBOL_GPL(call_rcu_tasks_rude); | |
752 | ||
753 | /** | |
754 | * synchronize_rcu_tasks_rude - wait for a rude rcu-tasks grace period | |
755 | * | |
756 | * Control will return to the caller some time after a rude rcu-tasks | |
757 | * grace period has elapsed, in other words after all currently | |
758 | * executing rcu-tasks read-side critical sections have elapsed. These | |
759 | * read-side critical sections are delimited by calls to schedule(), | |
a6517e9c NU |
760 | * cond_resched_tasks_rcu_qs(), userspace execution (which is a schedulable |
761 | * context), and (in theory, anyway) cond_resched(). | |
c84aad76 PM |
762 | * |
763 | * This is a very specialized primitive, intended only for a few uses in | |
764 | * tracing and other situations requiring manipulation of function preambles | |
765 | * and profiling hooks. The synchronize_rcu_tasks_rude() function is not | |
766 | * (yet) intended for heavy use from multiple CPUs. | |
767 | * | |
768 | * See the description of synchronize_rcu() for more detailed information | |
769 | * on memory ordering guarantees. | |
770 | */ | |
771 | void synchronize_rcu_tasks_rude(void) | |
772 | { | |
773 | synchronize_rcu_tasks_generic(&rcu_tasks_rude); | |
774 | } | |
775 | EXPORT_SYMBOL_GPL(synchronize_rcu_tasks_rude); | |
776 | ||
777 | /** | |
778 | * rcu_barrier_tasks_rude - Wait for in-flight call_rcu_tasks_rude() callbacks. | |
779 | * | |
780 | * Although the current implementation is guaranteed to wait, it is not | |
781 | * obligated to, for example, if there are no pending callbacks. | |
782 | */ | |
783 | void rcu_barrier_tasks_rude(void) | |
784 | { | |
785 | /* There is only one callback queue, so this is easy. ;-) */ | |
786 | synchronize_rcu_tasks_rude(); | |
787 | } | |
788 | EXPORT_SYMBOL_GPL(rcu_barrier_tasks_rude); | |
789 | ||
790 | static int __init rcu_spawn_tasks_rude_kthread(void) | |
791 | { | |
cafafd67 | 792 | cblist_init_generic(&rcu_tasks_rude); |
4fe192df | 793 | rcu_tasks_rude.gp_sleep = HZ / 10; |
c84aad76 PM |
794 | rcu_spawn_tasks_kthread_generic(&rcu_tasks_rude); |
795 | return 0; | |
796 | } | |
c84aad76 | 797 | |
27c0f144 PM |
798 | #if !defined(CONFIG_TINY_RCU) |
799 | void show_rcu_tasks_rude_gp_kthread(void) | |
e21408ce PM |
800 | { |
801 | show_rcu_tasks_generic_gp_kthread(&rcu_tasks_rude, ""); | |
802 | } | |
27c0f144 PM |
803 | EXPORT_SYMBOL_GPL(show_rcu_tasks_rude_gp_kthread); |
804 | #endif // !defined(CONFIG_TINY_RCU) | |
805 | #endif /* #ifdef CONFIG_TASKS_RUDE_RCU */ | |
d5f177d3 PM |
806 | |
807 | //////////////////////////////////////////////////////////////////////// | |
808 | // | |
809 | // Tracing variant of Tasks RCU. This variant is designed to be used | |
810 | // to protect tracing hooks, including those of BPF. This variant | |
811 | // therefore: | |
812 | // | |
813 | // 1. Has explicit read-side markers to allow finite grace periods | |
814 | // in the face of in-kernel loops for PREEMPT=n builds. | |
815 | // | |
816 | // 2. Protects code in the idle loop, exception entry/exit, and | |
817 | // CPU-hotplug code paths, similar to the capabilities of SRCU. | |
818 | // | |
c4f113ac | 819 | // 3. Avoids expensive read-side instructions, having overhead similar |
d5f177d3 PM |
820 | // to that of Preemptible RCU. |
821 | // | |
822 | // There are of course downsides. The grace-period code can send IPIs to | |
823 | // CPUs, even when those CPUs are in the idle loop or in nohz_full userspace. | |
824 | // It is necessary to scan the full tasklist, much as for Tasks RCU. There | |
825 | // is a single callback queue guarded by a single lock, again, much as for | |
826 | // Tasks RCU. If needed, these downsides can be at least partially remedied. | |
827 | // | |
828 | // Perhaps most important, this variant of RCU does not affect the vanilla | |
829 | // flavors, rcu_preempt and rcu_sched. The fact that RCU Tasks Trace | |
830 | // readers can operate from idle, offline, and exception entry/exit in no | |
831 | // way allows rcu_preempt and rcu_sched readers to also do so. | |
a434dd10 PM |
832 | // |
833 | // The implementation uses rcu_tasks_wait_gp(), which relies on function | |
834 | // pointers in the rcu_tasks structure. The rcu_spawn_tasks_trace_kthread() | |
835 | // function sets these function pointers up so that rcu_tasks_wait_gp() | |
836 | // invokes these functions in this order: | |
837 | // | |
838 | // rcu_tasks_trace_pregp_step(): | |
839 | // Initialize the count of readers and block CPU-hotplug operations. | |
840 | // rcu_tasks_trace_pertask(), invoked on every non-idle task: | |
841 | // Initialize per-task state and attempt to identify an immediate | |
842 | // quiescent state for that task, or, failing that, attempt to | |
843 | // set that task's .need_qs flag so that task's next outermost | |
844 | // rcu_read_unlock_trace() will report the quiescent state (in which | |
845 | // case the count of readers is incremented). If both attempts fail, | |
45f4b4a2 PM |
846 | // the task is added to a "holdout" list. Note that IPIs are used |
847 | // to invoke trc_read_check_handler() in the context of running tasks | |
848 | // in order to avoid ordering overhead on common-case shared-variable | |
849 | // accessses. | |
a434dd10 PM |
850 | // rcu_tasks_trace_postscan(): |
851 | // Initialize state and attempt to identify an immediate quiescent | |
852 | // state as above (but only for idle tasks), unblock CPU-hotplug | |
853 | // operations, and wait for an RCU grace period to avoid races with | |
854 | // tasks that are in the process of exiting. | |
855 | // check_all_holdout_tasks_trace(), repeatedly until holdout list is empty: | |
856 | // Scans the holdout list, attempting to identify a quiescent state | |
857 | // for each task on the list. If there is a quiescent state, the | |
858 | // corresponding task is removed from the holdout list. | |
859 | // rcu_tasks_trace_postgp(): | |
860 | // Wait for the count of readers do drop to zero, reporting any stalls. | |
861 | // Also execute full memory barriers to maintain ordering with code | |
862 | // executing after the grace period. | |
863 | // | |
864 | // The exit_tasks_rcu_finish_trace() synchronizes with exiting tasks. | |
865 | // | |
866 | // Pre-grace-period update-side code is ordered before the grace | |
867 | // period via the ->cbs_lock and barriers in rcu_tasks_kthread(). | |
868 | // Pre-grace-period read-side code is ordered before the grace period by | |
869 | // atomic_dec_and_test() of the count of readers (for IPIed readers) and by | |
870 | // scheduler context-switch ordering (for locked-down non-running readers). | |
d5f177d3 PM |
871 | |
872 | // The lockdep state must be outside of #ifdef to be useful. | |
873 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | |
874 | static struct lock_class_key rcu_lock_trace_key; | |
875 | struct lockdep_map rcu_trace_lock_map = | |
876 | STATIC_LOCKDEP_MAP_INIT("rcu_read_lock_trace", &rcu_lock_trace_key); | |
877 | EXPORT_SYMBOL_GPL(rcu_trace_lock_map); | |
878 | #endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ | |
879 | ||
880 | #ifdef CONFIG_TASKS_TRACE_RCU | |
881 | ||
30d8aa51 PM |
882 | static atomic_t trc_n_readers_need_end; // Number of waited-for readers. |
883 | static DECLARE_WAIT_QUEUE_HEAD(trc_wait); // List of holdout tasks. | |
d5f177d3 PM |
884 | |
885 | // Record outstanding IPIs to each CPU. No point in sending two... | |
886 | static DEFINE_PER_CPU(bool, trc_ipi_to_cpu); | |
887 | ||
40471509 PM |
888 | // The number of detections of task quiescent state relying on |
889 | // heavyweight readers executing explicit memory barriers. | |
6731da9e PM |
890 | static unsigned long n_heavy_reader_attempts; |
891 | static unsigned long n_heavy_reader_updates; | |
892 | static unsigned long n_heavy_reader_ofl_updates; | |
40471509 | 893 | |
b0afa0f0 PM |
894 | void call_rcu_tasks_trace(struct rcu_head *rhp, rcu_callback_t func); |
895 | DEFINE_RCU_TASKS(rcu_tasks_trace, rcu_tasks_wait_gp, call_rcu_tasks_trace, | |
896 | "RCU Tasks Trace"); | |
897 | ||
b38f57c1 PM |
898 | /* |
899 | * This irq_work handler allows rcu_read_unlock_trace() to be invoked | |
900 | * while the scheduler locks are held. | |
901 | */ | |
902 | static void rcu_read_unlock_iw(struct irq_work *iwp) | |
903 | { | |
904 | wake_up(&trc_wait); | |
905 | } | |
906 | static DEFINE_IRQ_WORK(rcu_tasks_trace_iw, rcu_read_unlock_iw); | |
907 | ||
d5f177d3 | 908 | /* If we are the last reader, wake up the grace-period kthread. */ |
a5c071cc | 909 | void rcu_read_unlock_trace_special(struct task_struct *t) |
d5f177d3 | 910 | { |
f8ab3fad | 911 | int nq = READ_ONCE(t->trc_reader_special.b.need_qs); |
276c4104 | 912 | |
9ae58d7b PM |
913 | if (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB) && |
914 | t->trc_reader_special.b.need_mb) | |
276c4104 PM |
915 | smp_mb(); // Pairs with update-side barriers. |
916 | // Update .need_qs before ->trc_reader_nesting for irq/NMI handlers. | |
917 | if (nq) | |
918 | WRITE_ONCE(t->trc_reader_special.b.need_qs, false); | |
a5c071cc | 919 | WRITE_ONCE(t->trc_reader_nesting, 0); |
276c4104 | 920 | if (nq && atomic_dec_and_test(&trc_n_readers_need_end)) |
b38f57c1 | 921 | irq_work_queue(&rcu_tasks_trace_iw); |
d5f177d3 PM |
922 | } |
923 | EXPORT_SYMBOL_GPL(rcu_read_unlock_trace_special); | |
924 | ||
925 | /* Add a task to the holdout list, if it is not already on the list. */ | |
926 | static void trc_add_holdout(struct task_struct *t, struct list_head *bhp) | |
927 | { | |
928 | if (list_empty(&t->trc_holdout_list)) { | |
929 | get_task_struct(t); | |
930 | list_add(&t->trc_holdout_list, bhp); | |
931 | } | |
932 | } | |
933 | ||
934 | /* Remove a task from the holdout list, if it is in fact present. */ | |
935 | static void trc_del_holdout(struct task_struct *t) | |
936 | { | |
937 | if (!list_empty(&t->trc_holdout_list)) { | |
938 | list_del_init(&t->trc_holdout_list); | |
939 | put_task_struct(t); | |
940 | } | |
941 | } | |
942 | ||
943 | /* IPI handler to check task state. */ | |
944 | static void trc_read_check_handler(void *t_in) | |
945 | { | |
946 | struct task_struct *t = current; | |
947 | struct task_struct *texp = t_in; | |
948 | ||
949 | // If the task is no longer running on this CPU, leave. | |
950 | if (unlikely(texp != t)) { | |
d5f177d3 PM |
951 | goto reset_ipi; // Already on holdout list, so will check later. |
952 | } | |
953 | ||
954 | // If the task is not in a read-side critical section, and | |
955 | // if this is the last reader, awaken the grace-period kthread. | |
bdb0cca0 | 956 | if (likely(!READ_ONCE(t->trc_reader_nesting))) { |
d5f177d3 PM |
957 | WRITE_ONCE(t->trc_reader_checked, true); |
958 | goto reset_ipi; | |
959 | } | |
ba3a86e4 | 960 | // If we are racing with an rcu_read_unlock_trace(), try again later. |
96017bf9 | 961 | if (unlikely(READ_ONCE(t->trc_reader_nesting) < 0)) |
ba3a86e4 | 962 | goto reset_ipi; |
d5f177d3 PM |
963 | WRITE_ONCE(t->trc_reader_checked, true); |
964 | ||
965 | // Get here if the task is in a read-side critical section. Set | |
966 | // its state so that it will awaken the grace-period kthread upon | |
967 | // exit from that critical section. | |
96017bf9 | 968 | atomic_inc(&trc_n_readers_need_end); // One more to wait on. |
f8ab3fad | 969 | WARN_ON_ONCE(READ_ONCE(t->trc_reader_special.b.need_qs)); |
276c4104 | 970 | WRITE_ONCE(t->trc_reader_special.b.need_qs, true); |
d5f177d3 PM |
971 | |
972 | reset_ipi: | |
973 | // Allow future IPIs to be sent on CPU and for task. | |
974 | // Also order this IPI handler against any later manipulations of | |
975 | // the intended task. | |
8211e922 | 976 | smp_store_release(per_cpu_ptr(&trc_ipi_to_cpu, smp_processor_id()), false); // ^^^ |
d5f177d3 PM |
977 | smp_store_release(&texp->trc_ipi_to_cpu, -1); // ^^^ |
978 | } | |
979 | ||
980 | /* Callback function for scheduler to check locked-down task. */ | |
9b3c4ab3 | 981 | static int trc_inspect_reader(struct task_struct *t, void *arg) |
d5f177d3 | 982 | { |
7d0c9c50 | 983 | int cpu = task_cpu(t); |
18f08e75 | 984 | int nesting; |
7e3b70e0 | 985 | bool ofl = cpu_is_offline(cpu); |
7d0c9c50 PM |
986 | |
987 | if (task_curr(t)) { | |
30d8aa51 | 988 | WARN_ON_ONCE(ofl && !is_idle_task(t)); |
7e3b70e0 | 989 | |
7d0c9c50 | 990 | // If no chance of heavyweight readers, do it the hard way. |
7e3b70e0 | 991 | if (!ofl && !IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB)) |
9b3c4ab3 | 992 | return -EINVAL; |
7d0c9c50 PM |
993 | |
994 | // If heavyweight readers are enabled on the remote task, | |
995 | // we can inspect its state despite its currently running. | |
996 | // However, we cannot safely change its state. | |
40471509 | 997 | n_heavy_reader_attempts++; |
7e3b70e0 PM |
998 | if (!ofl && // Check for "running" idle tasks on offline CPUs. |
999 | !rcu_dynticks_zero_in_eqs(cpu, &t->trc_reader_nesting)) | |
9b3c4ab3 | 1000 | return -EINVAL; // No quiescent state, do it the hard way. |
40471509 | 1001 | n_heavy_reader_updates++; |
edf3775f PM |
1002 | if (ofl) |
1003 | n_heavy_reader_ofl_updates++; | |
18f08e75 | 1004 | nesting = 0; |
7d0c9c50 | 1005 | } else { |
bdb0cca0 | 1006 | // The task is not running, so C-language access is safe. |
18f08e75 | 1007 | nesting = t->trc_reader_nesting; |
7d0c9c50 | 1008 | } |
d5f177d3 | 1009 | |
18f08e75 PM |
1010 | // If not exiting a read-side critical section, mark as checked |
1011 | // so that the grace-period kthread will remove it from the | |
1012 | // holdout list. | |
1013 | t->trc_reader_checked = nesting >= 0; | |
1014 | if (nesting <= 0) | |
6fedc280 | 1015 | return nesting ? -EINVAL : 0; // If in QS, done, otherwise try again later. |
7d0c9c50 PM |
1016 | |
1017 | // The task is in a read-side critical section, so set up its | |
1018 | // state so that it will awaken the grace-period kthread upon exit | |
1019 | // from that critical section. | |
1020 | atomic_inc(&trc_n_readers_need_end); // One more to wait on. | |
f8ab3fad | 1021 | WARN_ON_ONCE(READ_ONCE(t->trc_reader_special.b.need_qs)); |
7d0c9c50 | 1022 | WRITE_ONCE(t->trc_reader_special.b.need_qs, true); |
9b3c4ab3 | 1023 | return 0; |
d5f177d3 PM |
1024 | } |
1025 | ||
1026 | /* Attempt to extract the state for the specified task. */ | |
1027 | static void trc_wait_for_one_reader(struct task_struct *t, | |
1028 | struct list_head *bhp) | |
1029 | { | |
1030 | int cpu; | |
1031 | ||
1032 | // If a previous IPI is still in flight, let it complete. | |
1033 | if (smp_load_acquire(&t->trc_ipi_to_cpu) != -1) // Order IPI | |
1034 | return; | |
1035 | ||
1036 | // The current task had better be in a quiescent state. | |
1037 | if (t == current) { | |
1038 | t->trc_reader_checked = true; | |
bdb0cca0 | 1039 | WARN_ON_ONCE(READ_ONCE(t->trc_reader_nesting)); |
d5f177d3 PM |
1040 | return; |
1041 | } | |
1042 | ||
1043 | // Attempt to nail down the task for inspection. | |
1044 | get_task_struct(t); | |
9b3c4ab3 | 1045 | if (!task_call_func(t, trc_inspect_reader, NULL)) { |
d5f177d3 PM |
1046 | put_task_struct(t); |
1047 | return; | |
1048 | } | |
1049 | put_task_struct(t); | |
1050 | ||
45f4b4a2 PM |
1051 | // If this task is not yet on the holdout list, then we are in |
1052 | // an RCU read-side critical section. Otherwise, the invocation of | |
d0a85858 | 1053 | // trc_add_holdout() that added it to the list did the necessary |
45f4b4a2 PM |
1054 | // get_task_struct(). Either way, the task cannot be freed out |
1055 | // from under this code. | |
1056 | ||
d5f177d3 PM |
1057 | // If currently running, send an IPI, either way, add to list. |
1058 | trc_add_holdout(t, bhp); | |
574de876 PM |
1059 | if (task_curr(t) && |
1060 | time_after(jiffies + 1, rcu_tasks_trace.gp_start + rcu_task_ipi_delay)) { | |
d5f177d3 PM |
1061 | // The task is currently running, so try IPIing it. |
1062 | cpu = task_cpu(t); | |
1063 | ||
1064 | // If there is already an IPI outstanding, let it happen. | |
1065 | if (per_cpu(trc_ipi_to_cpu, cpu) || t->trc_ipi_to_cpu >= 0) | |
1066 | return; | |
1067 | ||
d5f177d3 PM |
1068 | per_cpu(trc_ipi_to_cpu, cpu) = true; |
1069 | t->trc_ipi_to_cpu = cpu; | |
238dbce3 | 1070 | rcu_tasks_trace.n_ipis++; |
96017bf9 | 1071 | if (smp_call_function_single(cpu, trc_read_check_handler, t, 0)) { |
d5f177d3 PM |
1072 | // Just in case there is some other reason for |
1073 | // failure than the target CPU being offline. | |
46aa886c NU |
1074 | WARN_ONCE(1, "%s(): smp_call_function_single() failed for CPU: %d\n", |
1075 | __func__, cpu); | |
7e0669c3 | 1076 | rcu_tasks_trace.n_ipis_fails++; |
d5f177d3 | 1077 | per_cpu(trc_ipi_to_cpu, cpu) = false; |
46aa886c | 1078 | t->trc_ipi_to_cpu = -1; |
d5f177d3 PM |
1079 | } |
1080 | } | |
1081 | } | |
1082 | ||
1083 | /* Initialize for a new RCU-tasks-trace grace period. */ | |
1084 | static void rcu_tasks_trace_pregp_step(void) | |
1085 | { | |
1086 | int cpu; | |
1087 | ||
d5f177d3 PM |
1088 | // Allow for fast-acting IPIs. |
1089 | atomic_set(&trc_n_readers_need_end, 1); | |
1090 | ||
1091 | // There shouldn't be any old IPIs, but... | |
1092 | for_each_possible_cpu(cpu) | |
1093 | WARN_ON_ONCE(per_cpu(trc_ipi_to_cpu, cpu)); | |
81b4a7bc PM |
1094 | |
1095 | // Disable CPU hotplug across the tasklist scan. | |
1096 | // This also waits for all readers in CPU-hotplug code paths. | |
1097 | cpus_read_lock(); | |
d5f177d3 PM |
1098 | } |
1099 | ||
1100 | /* Do first-round processing for the specified task. */ | |
1101 | static void rcu_tasks_trace_pertask(struct task_struct *t, | |
1102 | struct list_head *hop) | |
1103 | { | |
1b04fa99 URS |
1104 | // During early boot when there is only the one boot CPU, there |
1105 | // is no idle task for the other CPUs. Just return. | |
1106 | if (unlikely(t == NULL)) | |
1107 | return; | |
1108 | ||
276c4104 | 1109 | WRITE_ONCE(t->trc_reader_special.b.need_qs, false); |
43766c3e | 1110 | WRITE_ONCE(t->trc_reader_checked, false); |
d5f177d3 PM |
1111 | t->trc_ipi_to_cpu = -1; |
1112 | trc_wait_for_one_reader(t, hop); | |
1113 | } | |
1114 | ||
9796e1ae PM |
1115 | /* |
1116 | * Do intermediate processing between task and holdout scans and | |
1117 | * pick up the idle tasks. | |
1118 | */ | |
1119 | static void rcu_tasks_trace_postscan(struct list_head *hop) | |
d5f177d3 | 1120 | { |
9796e1ae PM |
1121 | int cpu; |
1122 | ||
1123 | for_each_possible_cpu(cpu) | |
1124 | rcu_tasks_trace_pertask(idle_task(cpu), hop); | |
1125 | ||
81b4a7bc PM |
1126 | // Re-enable CPU hotplug now that the tasklist scan has completed. |
1127 | cpus_read_unlock(); | |
1128 | ||
d5f177d3 PM |
1129 | // Wait for late-stage exiting tasks to finish exiting. |
1130 | // These might have passed the call to exit_tasks_rcu_finish(). | |
1131 | synchronize_rcu(); | |
1132 | // Any tasks that exit after this point will set ->trc_reader_checked. | |
1133 | } | |
1134 | ||
4593e772 PM |
1135 | /* Show the state of a task stalling the current RCU tasks trace GP. */ |
1136 | static void show_stalled_task_trace(struct task_struct *t, bool *firstreport) | |
1137 | { | |
1138 | int cpu; | |
1139 | ||
1140 | if (*firstreport) { | |
1141 | pr_err("INFO: rcu_tasks_trace detected stalls on tasks:\n"); | |
1142 | *firstreport = false; | |
1143 | } | |
1144 | // FIXME: This should attempt to use try_invoke_on_nonrunning_task(). | |
1145 | cpu = task_cpu(t); | |
1146 | pr_alert("P%d: %c%c%c nesting: %d%c cpu: %d\n", | |
1147 | t->pid, | |
d39ec8f3 | 1148 | ".I"[READ_ONCE(t->trc_ipi_to_cpu) >= 0], |
4593e772 | 1149 | ".i"[is_idle_task(t)], |
d39ec8f3 | 1150 | ".N"[cpu >= 0 && tick_nohz_full_cpu(cpu)], |
bdb0cca0 | 1151 | READ_ONCE(t->trc_reader_nesting), |
f8ab3fad | 1152 | " N"[!!READ_ONCE(t->trc_reader_special.b.need_qs)], |
4593e772 PM |
1153 | cpu); |
1154 | sched_show_task(t); | |
1155 | } | |
1156 | ||
1157 | /* List stalled IPIs for RCU tasks trace. */ | |
1158 | static void show_stalled_ipi_trace(void) | |
1159 | { | |
1160 | int cpu; | |
1161 | ||
1162 | for_each_possible_cpu(cpu) | |
1163 | if (per_cpu(trc_ipi_to_cpu, cpu)) | |
1164 | pr_alert("\tIPI outstanding to CPU %d\n", cpu); | |
1165 | } | |
1166 | ||
d5f177d3 PM |
1167 | /* Do one scan of the holdout list. */ |
1168 | static void check_all_holdout_tasks_trace(struct list_head *hop, | |
4593e772 | 1169 | bool needreport, bool *firstreport) |
d5f177d3 PM |
1170 | { |
1171 | struct task_struct *g, *t; | |
1172 | ||
81b4a7bc PM |
1173 | // Disable CPU hotplug across the holdout list scan. |
1174 | cpus_read_lock(); | |
1175 | ||
d5f177d3 PM |
1176 | list_for_each_entry_safe(t, g, hop, trc_holdout_list) { |
1177 | // If safe and needed, try to check the current task. | |
1178 | if (READ_ONCE(t->trc_ipi_to_cpu) == -1 && | |
1179 | !READ_ONCE(t->trc_reader_checked)) | |
1180 | trc_wait_for_one_reader(t, hop); | |
1181 | ||
1182 | // If check succeeded, remove this task from the list. | |
f5dbc594 PM |
1183 | if (smp_load_acquire(&t->trc_ipi_to_cpu) == -1 && |
1184 | READ_ONCE(t->trc_reader_checked)) | |
d5f177d3 | 1185 | trc_del_holdout(t); |
4593e772 PM |
1186 | else if (needreport) |
1187 | show_stalled_task_trace(t, firstreport); | |
1188 | } | |
81b4a7bc PM |
1189 | |
1190 | // Re-enable CPU hotplug now that the holdout list scan has completed. | |
1191 | cpus_read_unlock(); | |
1192 | ||
4593e772 | 1193 | if (needreport) { |
89401176 | 1194 | if (*firstreport) |
4593e772 PM |
1195 | pr_err("INFO: rcu_tasks_trace detected stalls? (Late IPI?)\n"); |
1196 | show_stalled_ipi_trace(); | |
d5f177d3 PM |
1197 | } |
1198 | } | |
1199 | ||
cbe0d8d9 PM |
1200 | static void rcu_tasks_trace_empty_fn(void *unused) |
1201 | { | |
1202 | } | |
1203 | ||
d5f177d3 | 1204 | /* Wait for grace period to complete and provide ordering. */ |
af051ca4 | 1205 | static void rcu_tasks_trace_postgp(struct rcu_tasks *rtp) |
d5f177d3 | 1206 | { |
cbe0d8d9 | 1207 | int cpu; |
4593e772 PM |
1208 | bool firstreport; |
1209 | struct task_struct *g, *t; | |
1210 | LIST_HEAD(holdouts); | |
1211 | long ret; | |
1212 | ||
cbe0d8d9 PM |
1213 | // Wait for any lingering IPI handlers to complete. Note that |
1214 | // if a CPU has gone offline or transitioned to userspace in the | |
1215 | // meantime, all IPI handlers should have been drained beforehand. | |
1216 | // Yes, this assumes that CPUs process IPIs in order. If that ever | |
1217 | // changes, there will need to be a recheck and/or timed wait. | |
1218 | for_each_online_cpu(cpu) | |
f5dbc594 | 1219 | if (WARN_ON_ONCE(smp_load_acquire(per_cpu_ptr(&trc_ipi_to_cpu, cpu)))) |
cbe0d8d9 PM |
1220 | smp_call_function_single(cpu, rcu_tasks_trace_empty_fn, NULL, 1); |
1221 | ||
d5f177d3 PM |
1222 | // Remove the safety count. |
1223 | smp_mb__before_atomic(); // Order vs. earlier atomics | |
1224 | atomic_dec(&trc_n_readers_need_end); | |
1225 | smp_mb__after_atomic(); // Order vs. later atomics | |
1226 | ||
1227 | // Wait for readers. | |
af051ca4 | 1228 | set_tasks_gp_state(rtp, RTGS_WAIT_READERS); |
4593e772 PM |
1229 | for (;;) { |
1230 | ret = wait_event_idle_exclusive_timeout( | |
1231 | trc_wait, | |
1232 | atomic_read(&trc_n_readers_need_end) == 0, | |
1233 | READ_ONCE(rcu_task_stall_timeout)); | |
1234 | if (ret) | |
1235 | break; // Count reached zero. | |
af051ca4 | 1236 | // Stall warning time, so make a list of the offenders. |
f747c7e1 | 1237 | rcu_read_lock(); |
4593e772 | 1238 | for_each_process_thread(g, t) |
276c4104 | 1239 | if (READ_ONCE(t->trc_reader_special.b.need_qs)) |
4593e772 | 1240 | trc_add_holdout(t, &holdouts); |
f747c7e1 | 1241 | rcu_read_unlock(); |
4593e772 | 1242 | firstreport = true; |
592031cc PM |
1243 | list_for_each_entry_safe(t, g, &holdouts, trc_holdout_list) { |
1244 | if (READ_ONCE(t->trc_reader_special.b.need_qs)) | |
4593e772 | 1245 | show_stalled_task_trace(t, &firstreport); |
592031cc PM |
1246 | trc_del_holdout(t); // Release task_struct reference. |
1247 | } | |
4593e772 PM |
1248 | if (firstreport) |
1249 | pr_err("INFO: rcu_tasks_trace detected stalls? (Counter/taskslist mismatch?)\n"); | |
1250 | show_stalled_ipi_trace(); | |
1251 | pr_err("\t%d holdouts\n", atomic_read(&trc_n_readers_need_end)); | |
1252 | } | |
d5f177d3 | 1253 | smp_mb(); // Caller's code must be ordered after wakeup. |
43766c3e | 1254 | // Pairs with pretty much every ordering primitive. |
d5f177d3 PM |
1255 | } |
1256 | ||
1257 | /* Report any needed quiescent state for this exiting task. */ | |
25246fc8 | 1258 | static void exit_tasks_rcu_finish_trace(struct task_struct *t) |
d5f177d3 PM |
1259 | { |
1260 | WRITE_ONCE(t->trc_reader_checked, true); | |
bdb0cca0 | 1261 | WARN_ON_ONCE(READ_ONCE(t->trc_reader_nesting)); |
d5f177d3 | 1262 | WRITE_ONCE(t->trc_reader_nesting, 0); |
276c4104 | 1263 | if (WARN_ON_ONCE(READ_ONCE(t->trc_reader_special.b.need_qs))) |
a5c071cc | 1264 | rcu_read_unlock_trace_special(t); |
d5f177d3 PM |
1265 | } |
1266 | ||
d5f177d3 PM |
1267 | /** |
1268 | * call_rcu_tasks_trace() - Queue a callback trace task-based grace period | |
1269 | * @rhp: structure to be used for queueing the RCU updates. | |
1270 | * @func: actual callback function to be invoked after the grace period | |
1271 | * | |
ed42c380 NU |
1272 | * The callback function will be invoked some time after a trace rcu-tasks |
1273 | * grace period elapses, in other words after all currently executing | |
1274 | * trace rcu-tasks read-side critical sections have completed. These | |
1275 | * read-side critical sections are delimited by calls to rcu_read_lock_trace() | |
1276 | * and rcu_read_unlock_trace(). | |
d5f177d3 PM |
1277 | * |
1278 | * See the description of call_rcu() for more detailed information on | |
1279 | * memory ordering guarantees. | |
1280 | */ | |
1281 | void call_rcu_tasks_trace(struct rcu_head *rhp, rcu_callback_t func) | |
1282 | { | |
1283 | call_rcu_tasks_generic(rhp, func, &rcu_tasks_trace); | |
1284 | } | |
1285 | EXPORT_SYMBOL_GPL(call_rcu_tasks_trace); | |
1286 | ||
1287 | /** | |
1288 | * synchronize_rcu_tasks_trace - wait for a trace rcu-tasks grace period | |
1289 | * | |
1290 | * Control will return to the caller some time after a trace rcu-tasks | |
c7dcf810 | 1291 | * grace period has elapsed, in other words after all currently executing |
ed42c380 | 1292 | * trace rcu-tasks read-side critical sections have elapsed. These read-side |
c7dcf810 PM |
1293 | * critical sections are delimited by calls to rcu_read_lock_trace() |
1294 | * and rcu_read_unlock_trace(). | |
d5f177d3 PM |
1295 | * |
1296 | * This is a very specialized primitive, intended only for a few uses in | |
1297 | * tracing and other situations requiring manipulation of function preambles | |
1298 | * and profiling hooks. The synchronize_rcu_tasks_trace() function is not | |
1299 | * (yet) intended for heavy use from multiple CPUs. | |
1300 | * | |
1301 | * See the description of synchronize_rcu() for more detailed information | |
1302 | * on memory ordering guarantees. | |
1303 | */ | |
1304 | void synchronize_rcu_tasks_trace(void) | |
1305 | { | |
1306 | RCU_LOCKDEP_WARN(lock_is_held(&rcu_trace_lock_map), "Illegal synchronize_rcu_tasks_trace() in RCU Tasks Trace read-side critical section"); | |
1307 | synchronize_rcu_tasks_generic(&rcu_tasks_trace); | |
1308 | } | |
1309 | EXPORT_SYMBOL_GPL(synchronize_rcu_tasks_trace); | |
1310 | ||
1311 | /** | |
1312 | * rcu_barrier_tasks_trace - Wait for in-flight call_rcu_tasks_trace() callbacks. | |
1313 | * | |
1314 | * Although the current implementation is guaranteed to wait, it is not | |
1315 | * obligated to, for example, if there are no pending callbacks. | |
1316 | */ | |
1317 | void rcu_barrier_tasks_trace(void) | |
1318 | { | |
1319 | /* There is only one callback queue, so this is easy. ;-) */ | |
1320 | synchronize_rcu_tasks_trace(); | |
1321 | } | |
1322 | EXPORT_SYMBOL_GPL(rcu_barrier_tasks_trace); | |
1323 | ||
1324 | static int __init rcu_spawn_tasks_trace_kthread(void) | |
1325 | { | |
cafafd67 | 1326 | cblist_init_generic(&rcu_tasks_trace); |
2393a613 | 1327 | if (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB)) { |
4fe192df | 1328 | rcu_tasks_trace.gp_sleep = HZ / 10; |
75dc2da5 | 1329 | rcu_tasks_trace.init_fract = HZ / 10; |
2393a613 | 1330 | } else { |
4fe192df PM |
1331 | rcu_tasks_trace.gp_sleep = HZ / 200; |
1332 | if (rcu_tasks_trace.gp_sleep <= 0) | |
1333 | rcu_tasks_trace.gp_sleep = 1; | |
75dc2da5 | 1334 | rcu_tasks_trace.init_fract = HZ / 200; |
2393a613 PM |
1335 | if (rcu_tasks_trace.init_fract <= 0) |
1336 | rcu_tasks_trace.init_fract = 1; | |
1337 | } | |
d5f177d3 PM |
1338 | rcu_tasks_trace.pregp_func = rcu_tasks_trace_pregp_step; |
1339 | rcu_tasks_trace.pertask_func = rcu_tasks_trace_pertask; | |
1340 | rcu_tasks_trace.postscan_func = rcu_tasks_trace_postscan; | |
1341 | rcu_tasks_trace.holdouts_func = check_all_holdout_tasks_trace; | |
1342 | rcu_tasks_trace.postgp_func = rcu_tasks_trace_postgp; | |
1343 | rcu_spawn_tasks_kthread_generic(&rcu_tasks_trace); | |
1344 | return 0; | |
1345 | } | |
d5f177d3 | 1346 | |
27c0f144 PM |
1347 | #if !defined(CONFIG_TINY_RCU) |
1348 | void show_rcu_tasks_trace_gp_kthread(void) | |
e21408ce | 1349 | { |
40471509 | 1350 | char buf[64]; |
e21408ce | 1351 | |
edf3775f PM |
1352 | sprintf(buf, "N%d h:%lu/%lu/%lu", atomic_read(&trc_n_readers_need_end), |
1353 | data_race(n_heavy_reader_ofl_updates), | |
40471509 PM |
1354 | data_race(n_heavy_reader_updates), |
1355 | data_race(n_heavy_reader_attempts)); | |
e21408ce PM |
1356 | show_rcu_tasks_generic_gp_kthread(&rcu_tasks_trace, buf); |
1357 | } | |
27c0f144 PM |
1358 | EXPORT_SYMBOL_GPL(show_rcu_tasks_trace_gp_kthread); |
1359 | #endif // !defined(CONFIG_TINY_RCU) | |
e21408ce | 1360 | |
d5f177d3 | 1361 | #else /* #ifdef CONFIG_TASKS_TRACE_RCU */ |
25246fc8 | 1362 | static void exit_tasks_rcu_finish_trace(struct task_struct *t) { } |
d5f177d3 | 1363 | #endif /* #else #ifdef CONFIG_TASKS_TRACE_RCU */ |
8fd8ca38 | 1364 | |
8344496e | 1365 | #ifndef CONFIG_TINY_RCU |
e21408ce PM |
1366 | void show_rcu_tasks_gp_kthreads(void) |
1367 | { | |
1368 | show_rcu_tasks_classic_gp_kthread(); | |
1369 | show_rcu_tasks_rude_gp_kthread(); | |
1370 | show_rcu_tasks_trace_gp_kthread(); | |
1371 | } | |
8344496e | 1372 | #endif /* #ifndef CONFIG_TINY_RCU */ |
e21408ce | 1373 | |
bfba7ed0 URS |
1374 | #ifdef CONFIG_PROVE_RCU |
1375 | struct rcu_tasks_test_desc { | |
1376 | struct rcu_head rh; | |
1377 | const char *name; | |
1378 | bool notrun; | |
1379 | }; | |
1380 | ||
1381 | static struct rcu_tasks_test_desc tests[] = { | |
1382 | { | |
1383 | .name = "call_rcu_tasks()", | |
1384 | /* If not defined, the test is skipped. */ | |
1385 | .notrun = !IS_ENABLED(CONFIG_TASKS_RCU), | |
1386 | }, | |
1387 | { | |
1388 | .name = "call_rcu_tasks_rude()", | |
1389 | /* If not defined, the test is skipped. */ | |
1390 | .notrun = !IS_ENABLED(CONFIG_TASKS_RUDE_RCU), | |
1391 | }, | |
1392 | { | |
1393 | .name = "call_rcu_tasks_trace()", | |
1394 | /* If not defined, the test is skipped. */ | |
1395 | .notrun = !IS_ENABLED(CONFIG_TASKS_TRACE_RCU) | |
1396 | } | |
1397 | }; | |
1398 | ||
1399 | static void test_rcu_tasks_callback(struct rcu_head *rhp) | |
1400 | { | |
1401 | struct rcu_tasks_test_desc *rttd = | |
1402 | container_of(rhp, struct rcu_tasks_test_desc, rh); | |
1403 | ||
1404 | pr_info("Callback from %s invoked.\n", rttd->name); | |
1405 | ||
1406 | rttd->notrun = true; | |
1407 | } | |
1408 | ||
1409 | static void rcu_tasks_initiate_self_tests(void) | |
1410 | { | |
1411 | pr_info("Running RCU-tasks wait API self tests\n"); | |
1412 | #ifdef CONFIG_TASKS_RCU | |
1413 | synchronize_rcu_tasks(); | |
1414 | call_rcu_tasks(&tests[0].rh, test_rcu_tasks_callback); | |
1415 | #endif | |
1416 | ||
1417 | #ifdef CONFIG_TASKS_RUDE_RCU | |
1418 | synchronize_rcu_tasks_rude(); | |
1419 | call_rcu_tasks_rude(&tests[1].rh, test_rcu_tasks_callback); | |
1420 | #endif | |
1421 | ||
1422 | #ifdef CONFIG_TASKS_TRACE_RCU | |
1423 | synchronize_rcu_tasks_trace(); | |
1424 | call_rcu_tasks_trace(&tests[2].rh, test_rcu_tasks_callback); | |
1425 | #endif | |
1426 | } | |
1427 | ||
1428 | static int rcu_tasks_verify_self_tests(void) | |
1429 | { | |
1430 | int ret = 0; | |
1431 | int i; | |
1432 | ||
1433 | for (i = 0; i < ARRAY_SIZE(tests); i++) { | |
1434 | if (!tests[i].notrun) { // still hanging. | |
1435 | pr_err("%s has been failed.\n", tests[i].name); | |
1436 | ret = -1; | |
1437 | } | |
1438 | } | |
1439 | ||
1440 | if (ret) | |
1441 | WARN_ON(1); | |
1442 | ||
1443 | return ret; | |
1444 | } | |
1445 | late_initcall(rcu_tasks_verify_self_tests); | |
1446 | #else /* #ifdef CONFIG_PROVE_RCU */ | |
1447 | static void rcu_tasks_initiate_self_tests(void) { } | |
1448 | #endif /* #else #ifdef CONFIG_PROVE_RCU */ | |
1449 | ||
1b04fa99 URS |
1450 | void __init rcu_init_tasks_generic(void) |
1451 | { | |
1452 | #ifdef CONFIG_TASKS_RCU | |
1453 | rcu_spawn_tasks_kthread(); | |
1454 | #endif | |
1455 | ||
1456 | #ifdef CONFIG_TASKS_RUDE_RCU | |
1457 | rcu_spawn_tasks_rude_kthread(); | |
1458 | #endif | |
1459 | ||
1460 | #ifdef CONFIG_TASKS_TRACE_RCU | |
1461 | rcu_spawn_tasks_trace_kthread(); | |
1462 | #endif | |
bfba7ed0 URS |
1463 | |
1464 | // Run the self-tests. | |
1465 | rcu_tasks_initiate_self_tests(); | |
1b04fa99 URS |
1466 | } |
1467 | ||
8fd8ca38 PM |
1468 | #else /* #ifdef CONFIG_TASKS_RCU_GENERIC */ |
1469 | static inline void rcu_tasks_bootup_oddness(void) {} | |
1470 | #endif /* #else #ifdef CONFIG_TASKS_RCU_GENERIC */ |