Commit | Line | Data |
---|---|---|
eacd6f04 PM |
1 | /* SPDX-License-Identifier: GPL-2.0+ */ |
2 | /* | |
3 | * Task-based RCU implementations. | |
4 | * | |
5 | * Copyright (C) 2020 Paul E. McKenney | |
6 | */ | |
7 | ||
8fd8ca38 | 8 | #ifdef CONFIG_TASKS_RCU_GENERIC |
5873b8a9 PM |
9 | |
10 | //////////////////////////////////////////////////////////////////////// | |
11 | // | |
12 | // Generic data structures. | |
13 | ||
14 | struct rcu_tasks; | |
15 | typedef void (*rcu_tasks_gp_func_t)(struct rcu_tasks *rtp); | |
e4fe5dd6 PM |
16 | typedef void (*pregp_func_t)(void); |
17 | typedef void (*pertask_func_t)(struct task_struct *t, struct list_head *hop); | |
18 | typedef void (*postscan_func_t)(void); | |
19 | typedef void (*holdouts_func_t)(struct list_head *hop, bool ndrpt, bool *frptp); | |
af051ca4 | 20 | typedef void (*postgp_func_t)(struct rcu_tasks *rtp); |
eacd6f04 | 21 | |
07e10515 PM |
22 | /** |
23 | * Definition for a Tasks-RCU-like mechanism. | |
24 | * @cbs_head: Head of callback list. | |
25 | * @cbs_tail: Tail pointer for callback list. | |
26 | * @cbs_wq: Wait queue allowning new callback to get kthread's attention. | |
27 | * @cbs_lock: Lock protecting callback list. | |
28 | * @kthread_ptr: This flavor's grace-period/callback-invocation kthread. | |
5873b8a9 | 29 | * @gp_func: This flavor's grace-period-wait function. |
af051ca4 PM |
30 | * @gp_state: Grace period's most recent state transition (debugging). |
31 | * @gp_jiffies: Time of last @gp_state transition. | |
32 | * @gp_start: Most recent grace-period start in jiffies. | |
e4fe5dd6 PM |
33 | * @pregp_func: This flavor's pre-grace-period function (optional). |
34 | * @pertask_func: This flavor's per-task scan function (optional). | |
35 | * @postscan_func: This flavor's post-task scan function (optional). | |
36 | * @holdout_func: This flavor's holdout-list scan function (optional). | |
37 | * @postgp_func: This flavor's post-grace-period function (optional). | |
5873b8a9 | 38 | * @call_func: This flavor's call_rcu()-equivalent function. |
c97d12a6 PM |
39 | * @name: This flavor's textual name. |
40 | * @kname: This flavor's kthread name. | |
07e10515 PM |
41 | */ |
42 | struct rcu_tasks { | |
43 | struct rcu_head *cbs_head; | |
44 | struct rcu_head **cbs_tail; | |
45 | struct wait_queue_head cbs_wq; | |
46 | raw_spinlock_t cbs_lock; | |
af051ca4 PM |
47 | int gp_state; |
48 | unsigned long gp_jiffies; | |
88092d0c | 49 | unsigned long gp_start; |
07e10515 | 50 | struct task_struct *kthread_ptr; |
5873b8a9 | 51 | rcu_tasks_gp_func_t gp_func; |
e4fe5dd6 PM |
52 | pregp_func_t pregp_func; |
53 | pertask_func_t pertask_func; | |
54 | postscan_func_t postscan_func; | |
55 | holdouts_func_t holdouts_func; | |
56 | postgp_func_t postgp_func; | |
5873b8a9 | 57 | call_rcu_func_t call_func; |
c97d12a6 PM |
58 | char *name; |
59 | char *kname; | |
07e10515 PM |
60 | }; |
61 | ||
c97d12a6 PM |
62 | #define DEFINE_RCU_TASKS(rt_name, gp, call, n) \ |
63 | static struct rcu_tasks rt_name = \ | |
07e10515 | 64 | { \ |
c97d12a6 PM |
65 | .cbs_tail = &rt_name.cbs_head, \ |
66 | .cbs_wq = __WAIT_QUEUE_HEAD_INITIALIZER(rt_name.cbs_wq), \ | |
67 | .cbs_lock = __RAW_SPIN_LOCK_UNLOCKED(rt_name.cbs_lock), \ | |
5873b8a9 PM |
68 | .gp_func = gp, \ |
69 | .call_func = call, \ | |
c97d12a6 PM |
70 | .name = n, \ |
71 | .kname = #rt_name, \ | |
07e10515 PM |
72 | } |
73 | ||
eacd6f04 PM |
74 | /* Track exiting tasks in order to allow them to be waited for. */ |
75 | DEFINE_STATIC_SRCU(tasks_rcu_exit_srcu); | |
76 | ||
77 | /* Control stall timeouts. Disable with <= 0, otherwise jiffies till stall. */ | |
78 | #define RCU_TASK_STALL_TIMEOUT (HZ * 60 * 10) | |
79 | static int rcu_task_stall_timeout __read_mostly = RCU_TASK_STALL_TIMEOUT; | |
80 | module_param(rcu_task_stall_timeout, int, 0644); | |
81 | ||
af051ca4 PM |
82 | /* RCU tasks grace-period state for debugging. */ |
83 | #define RTGS_INIT 0 | |
84 | #define RTGS_WAIT_WAIT_CBS 1 | |
85 | #define RTGS_WAIT_GP 2 | |
86 | #define RTGS_PRE_WAIT_GP 3 | |
87 | #define RTGS_SCAN_TASKLIST 4 | |
88 | #define RTGS_POST_SCAN_TASKLIST 5 | |
89 | #define RTGS_WAIT_SCAN_HOLDOUTS 6 | |
90 | #define RTGS_SCAN_HOLDOUTS 7 | |
91 | #define RTGS_POST_GP 8 | |
92 | #define RTGS_WAIT_READERS 9 | |
93 | #define RTGS_INVOKE_CBS 10 | |
94 | #define RTGS_WAIT_CBS 11 | |
95 | static const char * const rcu_tasks_gp_state_names[] = { | |
96 | "RTGS_INIT", | |
97 | "RTGS_WAIT_WAIT_CBS", | |
98 | "RTGS_WAIT_GP", | |
99 | "RTGS_PRE_WAIT_GP", | |
100 | "RTGS_SCAN_TASKLIST", | |
101 | "RTGS_POST_SCAN_TASKLIST", | |
102 | "RTGS_WAIT_SCAN_HOLDOUTS", | |
103 | "RTGS_SCAN_HOLDOUTS", | |
104 | "RTGS_POST_GP", | |
105 | "RTGS_WAIT_READERS", | |
106 | "RTGS_INVOKE_CBS", | |
107 | "RTGS_WAIT_CBS", | |
108 | }; | |
109 | ||
5873b8a9 PM |
110 | //////////////////////////////////////////////////////////////////////// |
111 | // | |
112 | // Generic code. | |
113 | ||
af051ca4 PM |
114 | /* Record grace-period phase and time. */ |
115 | static void set_tasks_gp_state(struct rcu_tasks *rtp, int newstate) | |
116 | { | |
117 | rtp->gp_state = newstate; | |
118 | rtp->gp_jiffies = jiffies; | |
119 | } | |
120 | ||
121 | /* Return state name. */ | |
122 | static const char *tasks_gp_state_getname(struct rcu_tasks *rtp) | |
123 | { | |
124 | int i = data_race(rtp->gp_state); // Let KCSAN detect update races | |
125 | int j = READ_ONCE(i); // Prevent the compiler from reading twice | |
126 | ||
127 | if (j >= ARRAY_SIZE(rcu_tasks_gp_state_names)) | |
128 | return "???"; | |
129 | return rcu_tasks_gp_state_names[j]; | |
130 | } | |
131 | ||
5873b8a9 PM |
132 | // Enqueue a callback for the specified flavor of Tasks RCU. |
133 | static void call_rcu_tasks_generic(struct rcu_head *rhp, rcu_callback_t func, | |
134 | struct rcu_tasks *rtp) | |
eacd6f04 PM |
135 | { |
136 | unsigned long flags; | |
137 | bool needwake; | |
138 | ||
139 | rhp->next = NULL; | |
140 | rhp->func = func; | |
07e10515 PM |
141 | raw_spin_lock_irqsave(&rtp->cbs_lock, flags); |
142 | needwake = !rtp->cbs_head; | |
143 | WRITE_ONCE(*rtp->cbs_tail, rhp); | |
144 | rtp->cbs_tail = &rhp->next; | |
145 | raw_spin_unlock_irqrestore(&rtp->cbs_lock, flags); | |
eacd6f04 | 146 | /* We can't create the thread unless interrupts are enabled. */ |
07e10515 PM |
147 | if (needwake && READ_ONCE(rtp->kthread_ptr)) |
148 | wake_up(&rtp->cbs_wq); | |
eacd6f04 | 149 | } |
eacd6f04 | 150 | |
5873b8a9 PM |
151 | // Wait for a grace period for the specified flavor of Tasks RCU. |
152 | static void synchronize_rcu_tasks_generic(struct rcu_tasks *rtp) | |
eacd6f04 PM |
153 | { |
154 | /* Complain if the scheduler has not started. */ | |
155 | RCU_LOCKDEP_WARN(rcu_scheduler_active == RCU_SCHEDULER_INACTIVE, | |
156 | "synchronize_rcu_tasks called too soon"); | |
157 | ||
158 | /* Wait for the grace period. */ | |
5873b8a9 | 159 | wait_rcu_gp(rtp->call_func); |
eacd6f04 PM |
160 | } |
161 | ||
162 | /* RCU-tasks kthread that detects grace periods and invokes callbacks. */ | |
163 | static int __noreturn rcu_tasks_kthread(void *arg) | |
164 | { | |
165 | unsigned long flags; | |
eacd6f04 PM |
166 | struct rcu_head *list; |
167 | struct rcu_head *next; | |
07e10515 | 168 | struct rcu_tasks *rtp = arg; |
eacd6f04 PM |
169 | |
170 | /* Run on housekeeping CPUs by default. Sysadm can move if desired. */ | |
171 | housekeeping_affine(current, HK_FLAG_RCU); | |
07e10515 | 172 | WRITE_ONCE(rtp->kthread_ptr, current); // Let GPs start! |
eacd6f04 PM |
173 | |
174 | /* | |
175 | * Each pass through the following loop makes one check for | |
176 | * newly arrived callbacks, and, if there are some, waits for | |
177 | * one RCU-tasks grace period and then invokes the callbacks. | |
178 | * This loop is terminated by the system going down. ;-) | |
179 | */ | |
180 | for (;;) { | |
181 | ||
182 | /* Pick up any new callbacks. */ | |
07e10515 | 183 | raw_spin_lock_irqsave(&rtp->cbs_lock, flags); |
43766c3e | 184 | smp_mb__after_spinlock(); // Order updates vs. GP. |
07e10515 PM |
185 | list = rtp->cbs_head; |
186 | rtp->cbs_head = NULL; | |
187 | rtp->cbs_tail = &rtp->cbs_head; | |
188 | raw_spin_unlock_irqrestore(&rtp->cbs_lock, flags); | |
eacd6f04 PM |
189 | |
190 | /* If there were none, wait a bit and start over. */ | |
191 | if (!list) { | |
07e10515 PM |
192 | wait_event_interruptible(rtp->cbs_wq, |
193 | READ_ONCE(rtp->cbs_head)); | |
194 | if (!rtp->cbs_head) { | |
eacd6f04 | 195 | WARN_ON(signal_pending(current)); |
af051ca4 | 196 | set_tasks_gp_state(rtp, RTGS_WAIT_WAIT_CBS); |
eacd6f04 PM |
197 | schedule_timeout_interruptible(HZ/10); |
198 | } | |
199 | continue; | |
200 | } | |
201 | ||
5873b8a9 | 202 | // Wait for one grace period. |
af051ca4 | 203 | set_tasks_gp_state(rtp, RTGS_WAIT_GP); |
88092d0c | 204 | rtp->gp_start = jiffies; |
5873b8a9 | 205 | rtp->gp_func(rtp); |
eacd6f04 PM |
206 | |
207 | /* Invoke the callbacks. */ | |
af051ca4 | 208 | set_tasks_gp_state(rtp, RTGS_INVOKE_CBS); |
eacd6f04 PM |
209 | while (list) { |
210 | next = list->next; | |
211 | local_bh_disable(); | |
212 | list->func(list); | |
213 | local_bh_enable(); | |
214 | list = next; | |
215 | cond_resched(); | |
216 | } | |
217 | /* Paranoid sleep to keep this from entering a tight loop */ | |
218 | schedule_timeout_uninterruptible(HZ/10); | |
af051ca4 PM |
219 | |
220 | set_tasks_gp_state(rtp, RTGS_WAIT_CBS); | |
eacd6f04 PM |
221 | } |
222 | } | |
223 | ||
5873b8a9 PM |
224 | /* Spawn RCU-tasks grace-period kthread, e.g., at core_initcall() time. */ |
225 | static void __init rcu_spawn_tasks_kthread_generic(struct rcu_tasks *rtp) | |
eacd6f04 PM |
226 | { |
227 | struct task_struct *t; | |
228 | ||
c97d12a6 PM |
229 | t = kthread_run(rcu_tasks_kthread, rtp, "%s_kthread", rtp->kname); |
230 | if (WARN_ONCE(IS_ERR(t), "%s: Could not start %s grace-period kthread, OOM is now expected behavior\n", __func__, rtp->name)) | |
5873b8a9 | 231 | return; |
eacd6f04 | 232 | smp_mb(); /* Ensure others see full kthread. */ |
eacd6f04 | 233 | } |
eacd6f04 PM |
234 | |
235 | /* Do the srcu_read_lock() for the above synchronize_srcu(). */ | |
236 | void exit_tasks_rcu_start(void) __acquires(&tasks_rcu_exit_srcu) | |
237 | { | |
238 | preempt_disable(); | |
239 | current->rcu_tasks_idx = __srcu_read_lock(&tasks_rcu_exit_srcu); | |
240 | preempt_enable(); | |
241 | } | |
242 | ||
d5f177d3 PM |
243 | static void exit_tasks_rcu_finish_trace(struct task_struct *t); |
244 | ||
eacd6f04 PM |
245 | /* Do the srcu_read_unlock() for the above synchronize_srcu(). */ |
246 | void exit_tasks_rcu_finish(void) __releases(&tasks_rcu_exit_srcu) | |
247 | { | |
d5f177d3 PM |
248 | struct task_struct *t = current; |
249 | ||
eacd6f04 | 250 | preempt_disable(); |
d5f177d3 | 251 | __srcu_read_unlock(&tasks_rcu_exit_srcu, t->rcu_tasks_idx); |
eacd6f04 | 252 | preempt_enable(); |
d5f177d3 | 253 | exit_tasks_rcu_finish_trace(t); |
eacd6f04 PM |
254 | } |
255 | ||
eacd6f04 PM |
256 | #ifndef CONFIG_TINY_RCU |
257 | ||
258 | /* | |
259 | * Print any non-default Tasks RCU settings. | |
260 | */ | |
261 | static void __init rcu_tasks_bootup_oddness(void) | |
262 | { | |
d5f177d3 | 263 | #if defined(CONFIG_TASKS_RCU) || defined(CONFIG_TASKS_TRACE_RCU) |
eacd6f04 PM |
264 | if (rcu_task_stall_timeout != RCU_TASK_STALL_TIMEOUT) |
265 | pr_info("\tTasks-RCU CPU stall warnings timeout set to %d (rcu_task_stall_timeout).\n", rcu_task_stall_timeout); | |
d5f177d3 PM |
266 | #endif /* #ifdef CONFIG_TASKS_RCU */ |
267 | #ifdef CONFIG_TASKS_RCU | |
268 | pr_info("\tTrampoline variant of Tasks RCU enabled.\n"); | |
eacd6f04 | 269 | #endif /* #ifdef CONFIG_TASKS_RCU */ |
c84aad76 PM |
270 | #ifdef CONFIG_TASKS_RUDE_RCU |
271 | pr_info("\tRude variant of Tasks RCU enabled.\n"); | |
272 | #endif /* #ifdef CONFIG_TASKS_RUDE_RCU */ | |
d5f177d3 PM |
273 | #ifdef CONFIG_TASKS_TRACE_RCU |
274 | pr_info("\tTracing variant of Tasks RCU enabled.\n"); | |
275 | #endif /* #ifdef CONFIG_TASKS_TRACE_RCU */ | |
eacd6f04 PM |
276 | } |
277 | ||
278 | #endif /* #ifndef CONFIG_TINY_RCU */ | |
5873b8a9 | 279 | |
e21408ce PM |
280 | /* Dump out rcutorture-relevant state common to all RCU-tasks flavors. */ |
281 | static void show_rcu_tasks_generic_gp_kthread(struct rcu_tasks *rtp, char *s) | |
282 | { | |
af051ca4 | 283 | pr_info("%s: %s(%d) since %lu %c%c %s\n", |
e21408ce | 284 | rtp->kname, |
af051ca4 PM |
285 | tasks_gp_state_getname(rtp), |
286 | data_race(rtp->gp_state), | |
287 | jiffies - data_race(rtp->gp_jiffies), | |
e21408ce PM |
288 | ".k"[!!data_race(rtp->kthread_ptr)], |
289 | ".C"[!!data_race(rtp->cbs_head)], | |
290 | s); | |
291 | } | |
292 | ||
5873b8a9 PM |
293 | #ifdef CONFIG_TASKS_RCU |
294 | ||
d01aa263 PM |
295 | //////////////////////////////////////////////////////////////////////// |
296 | // | |
297 | // Shared code between task-list-scanning variants of Tasks RCU. | |
298 | ||
299 | /* Wait for one RCU-tasks grace period. */ | |
300 | static void rcu_tasks_wait_gp(struct rcu_tasks *rtp) | |
301 | { | |
302 | struct task_struct *g, *t; | |
303 | unsigned long lastreport; | |
304 | LIST_HEAD(holdouts); | |
305 | int fract; | |
306 | ||
af051ca4 | 307 | set_tasks_gp_state(rtp, RTGS_PRE_WAIT_GP); |
d01aa263 PM |
308 | rtp->pregp_func(); |
309 | ||
310 | /* | |
311 | * There were callbacks, so we need to wait for an RCU-tasks | |
312 | * grace period. Start off by scanning the task list for tasks | |
313 | * that are not already voluntarily blocked. Mark these tasks | |
314 | * and make a list of them in holdouts. | |
315 | */ | |
af051ca4 | 316 | set_tasks_gp_state(rtp, RTGS_SCAN_TASKLIST); |
d01aa263 PM |
317 | rcu_read_lock(); |
318 | for_each_process_thread(g, t) | |
319 | rtp->pertask_func(t, &holdouts); | |
320 | rcu_read_unlock(); | |
321 | ||
af051ca4 | 322 | set_tasks_gp_state(rtp, RTGS_POST_SCAN_TASKLIST); |
d01aa263 PM |
323 | rtp->postscan_func(); |
324 | ||
325 | /* | |
326 | * Each pass through the following loop scans the list of holdout | |
327 | * tasks, removing any that are no longer holdouts. When the list | |
328 | * is empty, we are done. | |
329 | */ | |
330 | lastreport = jiffies; | |
331 | ||
332 | /* Start off with HZ/10 wait and slowly back off to 1 HZ wait. */ | |
333 | fract = 10; | |
334 | ||
335 | for (;;) { | |
336 | bool firstreport; | |
337 | bool needreport; | |
338 | int rtst; | |
339 | ||
340 | if (list_empty(&holdouts)) | |
341 | break; | |
342 | ||
343 | /* Slowly back off waiting for holdouts */ | |
af051ca4 | 344 | set_tasks_gp_state(rtp, RTGS_WAIT_SCAN_HOLDOUTS); |
d01aa263 PM |
345 | schedule_timeout_interruptible(HZ/fract); |
346 | ||
347 | if (fract > 1) | |
348 | fract--; | |
349 | ||
350 | rtst = READ_ONCE(rcu_task_stall_timeout); | |
351 | needreport = rtst > 0 && time_after(jiffies, lastreport + rtst); | |
352 | if (needreport) | |
353 | lastreport = jiffies; | |
354 | firstreport = true; | |
355 | WARN_ON(signal_pending(current)); | |
af051ca4 | 356 | set_tasks_gp_state(rtp, RTGS_SCAN_HOLDOUTS); |
d01aa263 PM |
357 | rtp->holdouts_func(&holdouts, needreport, &firstreport); |
358 | } | |
359 | ||
af051ca4 PM |
360 | set_tasks_gp_state(rtp, RTGS_POST_GP); |
361 | rtp->postgp_func(rtp); | |
d01aa263 PM |
362 | } |
363 | ||
5873b8a9 PM |
364 | //////////////////////////////////////////////////////////////////////// |
365 | // | |
366 | // Simple variant of RCU whose quiescent states are voluntary context | |
367 | // switch, cond_resched_rcu_qs(), user-space execution, and idle. | |
368 | // As such, grace periods can take one good long time. There are no | |
369 | // read-side primitives similar to rcu_read_lock() and rcu_read_unlock() | |
370 | // because this implementation is intended to get the system into a safe | |
371 | // state for some of the manipulations involved in tracing and the like. | |
372 | // Finally, this implementation does not support high call_rcu_tasks() | |
373 | // rates from multiple CPUs. If this is required, per-CPU callback lists | |
374 | // will be needed. | |
375 | ||
e4fe5dd6 PM |
376 | /* Pre-grace-period preparation. */ |
377 | static void rcu_tasks_pregp_step(void) | |
378 | { | |
379 | /* | |
380 | * Wait for all pre-existing t->on_rq and t->nvcsw transitions | |
381 | * to complete. Invoking synchronize_rcu() suffices because all | |
382 | * these transitions occur with interrupts disabled. Without this | |
383 | * synchronize_rcu(), a read-side critical section that started | |
384 | * before the grace period might be incorrectly seen as having | |
385 | * started after the grace period. | |
386 | * | |
387 | * This synchronize_rcu() also dispenses with the need for a | |
388 | * memory barrier on the first store to t->rcu_tasks_holdout, | |
389 | * as it forces the store to happen after the beginning of the | |
390 | * grace period. | |
391 | */ | |
392 | synchronize_rcu(); | |
393 | } | |
394 | ||
395 | /* Per-task initial processing. */ | |
396 | static void rcu_tasks_pertask(struct task_struct *t, struct list_head *hop) | |
397 | { | |
398 | if (t != current && READ_ONCE(t->on_rq) && !is_idle_task(t)) { | |
399 | get_task_struct(t); | |
400 | t->rcu_tasks_nvcsw = READ_ONCE(t->nvcsw); | |
401 | WRITE_ONCE(t->rcu_tasks_holdout, true); | |
402 | list_add(&t->rcu_tasks_holdout_list, hop); | |
403 | } | |
404 | } | |
405 | ||
406 | /* Processing between scanning taskslist and draining the holdout list. */ | |
407 | void rcu_tasks_postscan(void) | |
408 | { | |
409 | /* | |
410 | * Wait for tasks that are in the process of exiting. This | |
411 | * does only part of the job, ensuring that all tasks that were | |
412 | * previously exiting reach the point where they have disabled | |
413 | * preemption, allowing the later synchronize_rcu() to finish | |
414 | * the job. | |
415 | */ | |
416 | synchronize_srcu(&tasks_rcu_exit_srcu); | |
417 | } | |
418 | ||
5873b8a9 PM |
419 | /* See if tasks are still holding out, complain if so. */ |
420 | static void check_holdout_task(struct task_struct *t, | |
421 | bool needreport, bool *firstreport) | |
422 | { | |
423 | int cpu; | |
424 | ||
425 | if (!READ_ONCE(t->rcu_tasks_holdout) || | |
426 | t->rcu_tasks_nvcsw != READ_ONCE(t->nvcsw) || | |
427 | !READ_ONCE(t->on_rq) || | |
428 | (IS_ENABLED(CONFIG_NO_HZ_FULL) && | |
429 | !is_idle_task(t) && t->rcu_tasks_idle_cpu >= 0)) { | |
430 | WRITE_ONCE(t->rcu_tasks_holdout, false); | |
431 | list_del_init(&t->rcu_tasks_holdout_list); | |
432 | put_task_struct(t); | |
433 | return; | |
434 | } | |
435 | rcu_request_urgent_qs_task(t); | |
436 | if (!needreport) | |
437 | return; | |
438 | if (*firstreport) { | |
439 | pr_err("INFO: rcu_tasks detected stalls on tasks:\n"); | |
440 | *firstreport = false; | |
441 | } | |
442 | cpu = task_cpu(t); | |
443 | pr_alert("%p: %c%c nvcsw: %lu/%lu holdout: %d idle_cpu: %d/%d\n", | |
444 | t, ".I"[is_idle_task(t)], | |
445 | "N."[cpu < 0 || !tick_nohz_full_cpu(cpu)], | |
446 | t->rcu_tasks_nvcsw, t->nvcsw, t->rcu_tasks_holdout, | |
447 | t->rcu_tasks_idle_cpu, cpu); | |
448 | sched_show_task(t); | |
449 | } | |
450 | ||
e4fe5dd6 PM |
451 | /* Scan the holdout lists for tasks no longer holding out. */ |
452 | static void check_all_holdout_tasks(struct list_head *hop, | |
453 | bool needreport, bool *firstreport) | |
454 | { | |
455 | struct task_struct *t, *t1; | |
456 | ||
457 | list_for_each_entry_safe(t, t1, hop, rcu_tasks_holdout_list) { | |
458 | check_holdout_task(t, needreport, firstreport); | |
459 | cond_resched(); | |
460 | } | |
461 | } | |
462 | ||
463 | /* Finish off the Tasks-RCU grace period. */ | |
af051ca4 | 464 | static void rcu_tasks_postgp(struct rcu_tasks *rtp) |
e4fe5dd6 PM |
465 | { |
466 | /* | |
467 | * Because ->on_rq and ->nvcsw are not guaranteed to have a full | |
468 | * memory barriers prior to them in the schedule() path, memory | |
469 | * reordering on other CPUs could cause their RCU-tasks read-side | |
470 | * critical sections to extend past the end of the grace period. | |
471 | * However, because these ->nvcsw updates are carried out with | |
472 | * interrupts disabled, we can use synchronize_rcu() to force the | |
473 | * needed ordering on all such CPUs. | |
474 | * | |
475 | * This synchronize_rcu() also confines all ->rcu_tasks_holdout | |
476 | * accesses to be within the grace period, avoiding the need for | |
477 | * memory barriers for ->rcu_tasks_holdout accesses. | |
478 | * | |
479 | * In addition, this synchronize_rcu() waits for exiting tasks | |
480 | * to complete their final preempt_disable() region of execution, | |
481 | * cleaning up after the synchronize_srcu() above. | |
482 | */ | |
483 | synchronize_rcu(); | |
484 | } | |
485 | ||
5873b8a9 | 486 | void call_rcu_tasks(struct rcu_head *rhp, rcu_callback_t func); |
c97d12a6 | 487 | DEFINE_RCU_TASKS(rcu_tasks, rcu_tasks_wait_gp, call_rcu_tasks, "RCU Tasks"); |
5873b8a9 PM |
488 | |
489 | /** | |
490 | * call_rcu_tasks() - Queue an RCU for invocation task-based grace period | |
491 | * @rhp: structure to be used for queueing the RCU updates. | |
492 | * @func: actual callback function to be invoked after the grace period | |
493 | * | |
494 | * The callback function will be invoked some time after a full grace | |
495 | * period elapses, in other words after all currently executing RCU | |
496 | * read-side critical sections have completed. call_rcu_tasks() assumes | |
497 | * that the read-side critical sections end at a voluntary context | |
498 | * switch (not a preemption!), cond_resched_rcu_qs(), entry into idle, | |
499 | * or transition to usermode execution. As such, there are no read-side | |
500 | * primitives analogous to rcu_read_lock() and rcu_read_unlock() because | |
501 | * this primitive is intended to determine that all tasks have passed | |
502 | * through a safe state, not so much for data-strcuture synchronization. | |
503 | * | |
504 | * See the description of call_rcu() for more detailed information on | |
505 | * memory ordering guarantees. | |
506 | */ | |
507 | void call_rcu_tasks(struct rcu_head *rhp, rcu_callback_t func) | |
508 | { | |
509 | call_rcu_tasks_generic(rhp, func, &rcu_tasks); | |
510 | } | |
511 | EXPORT_SYMBOL_GPL(call_rcu_tasks); | |
512 | ||
513 | /** | |
514 | * synchronize_rcu_tasks - wait until an rcu-tasks grace period has elapsed. | |
515 | * | |
516 | * Control will return to the caller some time after a full rcu-tasks | |
517 | * grace period has elapsed, in other words after all currently | |
518 | * executing rcu-tasks read-side critical sections have elapsed. These | |
519 | * read-side critical sections are delimited by calls to schedule(), | |
520 | * cond_resched_tasks_rcu_qs(), idle execution, userspace execution, calls | |
521 | * to synchronize_rcu_tasks(), and (in theory, anyway) cond_resched(). | |
522 | * | |
523 | * This is a very specialized primitive, intended only for a few uses in | |
524 | * tracing and other situations requiring manipulation of function | |
525 | * preambles and profiling hooks. The synchronize_rcu_tasks() function | |
526 | * is not (yet) intended for heavy use from multiple CPUs. | |
527 | * | |
528 | * See the description of synchronize_rcu() for more detailed information | |
529 | * on memory ordering guarantees. | |
530 | */ | |
531 | void synchronize_rcu_tasks(void) | |
532 | { | |
533 | synchronize_rcu_tasks_generic(&rcu_tasks); | |
534 | } | |
535 | EXPORT_SYMBOL_GPL(synchronize_rcu_tasks); | |
536 | ||
537 | /** | |
538 | * rcu_barrier_tasks - Wait for in-flight call_rcu_tasks() callbacks. | |
539 | * | |
540 | * Although the current implementation is guaranteed to wait, it is not | |
541 | * obligated to, for example, if there are no pending callbacks. | |
542 | */ | |
543 | void rcu_barrier_tasks(void) | |
544 | { | |
545 | /* There is only one callback queue, so this is easy. ;-) */ | |
546 | synchronize_rcu_tasks(); | |
547 | } | |
548 | EXPORT_SYMBOL_GPL(rcu_barrier_tasks); | |
549 | ||
550 | static int __init rcu_spawn_tasks_kthread(void) | |
551 | { | |
e4fe5dd6 PM |
552 | rcu_tasks.pregp_func = rcu_tasks_pregp_step; |
553 | rcu_tasks.pertask_func = rcu_tasks_pertask; | |
554 | rcu_tasks.postscan_func = rcu_tasks_postscan; | |
555 | rcu_tasks.holdouts_func = check_all_holdout_tasks; | |
556 | rcu_tasks.postgp_func = rcu_tasks_postgp; | |
5873b8a9 PM |
557 | rcu_spawn_tasks_kthread_generic(&rcu_tasks); |
558 | return 0; | |
559 | } | |
560 | core_initcall(rcu_spawn_tasks_kthread); | |
561 | ||
e21408ce PM |
562 | static void show_rcu_tasks_classic_gp_kthread(void) |
563 | { | |
564 | show_rcu_tasks_generic_gp_kthread(&rcu_tasks, ""); | |
565 | } | |
566 | ||
567 | #else /* #ifdef CONFIG_TASKS_RCU */ | |
568 | static void show_rcu_tasks_classic_gp_kthread(void) { } | |
569 | #endif /* #else #ifdef CONFIG_TASKS_RCU */ | |
c84aad76 PM |
570 | |
571 | #ifdef CONFIG_TASKS_RUDE_RCU | |
572 | ||
573 | //////////////////////////////////////////////////////////////////////// | |
574 | // | |
575 | // "Rude" variant of Tasks RCU, inspired by Steve Rostedt's trick of | |
576 | // passing an empty function to schedule_on_each_cpu(). This approach | |
577 | // provides an asynchronous call_rcu_tasks_rude() API and batching | |
578 | // of concurrent calls to the synchronous synchronize_rcu_rude() API. | |
579 | // This sends IPIs far and wide and induces otherwise unnecessary context | |
580 | // switches on all online CPUs, whether idle or not. | |
581 | ||
582 | // Empty function to allow workqueues to force a context switch. | |
583 | static void rcu_tasks_be_rude(struct work_struct *work) | |
584 | { | |
585 | } | |
586 | ||
587 | // Wait for one rude RCU-tasks grace period. | |
588 | static void rcu_tasks_rude_wait_gp(struct rcu_tasks *rtp) | |
589 | { | |
590 | schedule_on_each_cpu(rcu_tasks_be_rude); | |
591 | } | |
592 | ||
593 | void call_rcu_tasks_rude(struct rcu_head *rhp, rcu_callback_t func); | |
c97d12a6 PM |
594 | DEFINE_RCU_TASKS(rcu_tasks_rude, rcu_tasks_rude_wait_gp, call_rcu_tasks_rude, |
595 | "RCU Tasks Rude"); | |
c84aad76 PM |
596 | |
597 | /** | |
598 | * call_rcu_tasks_rude() - Queue a callback rude task-based grace period | |
599 | * @rhp: structure to be used for queueing the RCU updates. | |
600 | * @func: actual callback function to be invoked after the grace period | |
601 | * | |
602 | * The callback function will be invoked some time after a full grace | |
603 | * period elapses, in other words after all currently executing RCU | |
604 | * read-side critical sections have completed. call_rcu_tasks_rude() | |
605 | * assumes that the read-side critical sections end at context switch, | |
606 | * cond_resched_rcu_qs(), or transition to usermode execution. As such, | |
607 | * there are no read-side primitives analogous to rcu_read_lock() and | |
608 | * rcu_read_unlock() because this primitive is intended to determine | |
609 | * that all tasks have passed through a safe state, not so much for | |
610 | * data-strcuture synchronization. | |
611 | * | |
612 | * See the description of call_rcu() for more detailed information on | |
613 | * memory ordering guarantees. | |
614 | */ | |
615 | void call_rcu_tasks_rude(struct rcu_head *rhp, rcu_callback_t func) | |
616 | { | |
617 | call_rcu_tasks_generic(rhp, func, &rcu_tasks_rude); | |
618 | } | |
619 | EXPORT_SYMBOL_GPL(call_rcu_tasks_rude); | |
620 | ||
621 | /** | |
622 | * synchronize_rcu_tasks_rude - wait for a rude rcu-tasks grace period | |
623 | * | |
624 | * Control will return to the caller some time after a rude rcu-tasks | |
625 | * grace period has elapsed, in other words after all currently | |
626 | * executing rcu-tasks read-side critical sections have elapsed. These | |
627 | * read-side critical sections are delimited by calls to schedule(), | |
628 | * cond_resched_tasks_rcu_qs(), userspace execution, and (in theory, | |
629 | * anyway) cond_resched(). | |
630 | * | |
631 | * This is a very specialized primitive, intended only for a few uses in | |
632 | * tracing and other situations requiring manipulation of function preambles | |
633 | * and profiling hooks. The synchronize_rcu_tasks_rude() function is not | |
634 | * (yet) intended for heavy use from multiple CPUs. | |
635 | * | |
636 | * See the description of synchronize_rcu() for more detailed information | |
637 | * on memory ordering guarantees. | |
638 | */ | |
639 | void synchronize_rcu_tasks_rude(void) | |
640 | { | |
641 | synchronize_rcu_tasks_generic(&rcu_tasks_rude); | |
642 | } | |
643 | EXPORT_SYMBOL_GPL(synchronize_rcu_tasks_rude); | |
644 | ||
645 | /** | |
646 | * rcu_barrier_tasks_rude - Wait for in-flight call_rcu_tasks_rude() callbacks. | |
647 | * | |
648 | * Although the current implementation is guaranteed to wait, it is not | |
649 | * obligated to, for example, if there are no pending callbacks. | |
650 | */ | |
651 | void rcu_barrier_tasks_rude(void) | |
652 | { | |
653 | /* There is only one callback queue, so this is easy. ;-) */ | |
654 | synchronize_rcu_tasks_rude(); | |
655 | } | |
656 | EXPORT_SYMBOL_GPL(rcu_barrier_tasks_rude); | |
657 | ||
658 | static int __init rcu_spawn_tasks_rude_kthread(void) | |
659 | { | |
660 | rcu_spawn_tasks_kthread_generic(&rcu_tasks_rude); | |
661 | return 0; | |
662 | } | |
663 | core_initcall(rcu_spawn_tasks_rude_kthread); | |
664 | ||
e21408ce PM |
665 | static void show_rcu_tasks_rude_gp_kthread(void) |
666 | { | |
667 | show_rcu_tasks_generic_gp_kthread(&rcu_tasks_rude, ""); | |
668 | } | |
669 | ||
670 | #else /* #ifdef CONFIG_TASKS_RUDE_RCU */ | |
671 | static void show_rcu_tasks_rude_gp_kthread(void) {} | |
672 | #endif /* #else #ifdef CONFIG_TASKS_RUDE_RCU */ | |
d5f177d3 PM |
673 | |
674 | //////////////////////////////////////////////////////////////////////// | |
675 | // | |
676 | // Tracing variant of Tasks RCU. This variant is designed to be used | |
677 | // to protect tracing hooks, including those of BPF. This variant | |
678 | // therefore: | |
679 | // | |
680 | // 1. Has explicit read-side markers to allow finite grace periods | |
681 | // in the face of in-kernel loops for PREEMPT=n builds. | |
682 | // | |
683 | // 2. Protects code in the idle loop, exception entry/exit, and | |
684 | // CPU-hotplug code paths, similar to the capabilities of SRCU. | |
685 | // | |
686 | // 3. Avoids expensive read-side instruction, having overhead similar | |
687 | // to that of Preemptible RCU. | |
688 | // | |
689 | // There are of course downsides. The grace-period code can send IPIs to | |
690 | // CPUs, even when those CPUs are in the idle loop or in nohz_full userspace. | |
691 | // It is necessary to scan the full tasklist, much as for Tasks RCU. There | |
692 | // is a single callback queue guarded by a single lock, again, much as for | |
693 | // Tasks RCU. If needed, these downsides can be at least partially remedied. | |
694 | // | |
695 | // Perhaps most important, this variant of RCU does not affect the vanilla | |
696 | // flavors, rcu_preempt and rcu_sched. The fact that RCU Tasks Trace | |
697 | // readers can operate from idle, offline, and exception entry/exit in no | |
698 | // way allows rcu_preempt and rcu_sched readers to also do so. | |
699 | ||
700 | // The lockdep state must be outside of #ifdef to be useful. | |
701 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | |
702 | static struct lock_class_key rcu_lock_trace_key; | |
703 | struct lockdep_map rcu_trace_lock_map = | |
704 | STATIC_LOCKDEP_MAP_INIT("rcu_read_lock_trace", &rcu_lock_trace_key); | |
705 | EXPORT_SYMBOL_GPL(rcu_trace_lock_map); | |
706 | #endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ | |
707 | ||
708 | #ifdef CONFIG_TASKS_TRACE_RCU | |
709 | ||
710 | atomic_t trc_n_readers_need_end; // Number of waited-for readers. | |
711 | DECLARE_WAIT_QUEUE_HEAD(trc_wait); // List of holdout tasks. | |
712 | ||
713 | // Record outstanding IPIs to each CPU. No point in sending two... | |
714 | static DEFINE_PER_CPU(bool, trc_ipi_to_cpu); | |
715 | ||
716 | /* If we are the last reader, wake up the grace-period kthread. */ | |
717 | void rcu_read_unlock_trace_special(struct task_struct *t) | |
718 | { | |
719 | WRITE_ONCE(t->trc_reader_need_end, false); | |
720 | if (atomic_dec_and_test(&trc_n_readers_need_end)) | |
721 | wake_up(&trc_wait); | |
722 | } | |
723 | EXPORT_SYMBOL_GPL(rcu_read_unlock_trace_special); | |
724 | ||
725 | /* Add a task to the holdout list, if it is not already on the list. */ | |
726 | static void trc_add_holdout(struct task_struct *t, struct list_head *bhp) | |
727 | { | |
728 | if (list_empty(&t->trc_holdout_list)) { | |
729 | get_task_struct(t); | |
730 | list_add(&t->trc_holdout_list, bhp); | |
731 | } | |
732 | } | |
733 | ||
734 | /* Remove a task from the holdout list, if it is in fact present. */ | |
735 | static void trc_del_holdout(struct task_struct *t) | |
736 | { | |
737 | if (!list_empty(&t->trc_holdout_list)) { | |
738 | list_del_init(&t->trc_holdout_list); | |
739 | put_task_struct(t); | |
740 | } | |
741 | } | |
742 | ||
743 | /* IPI handler to check task state. */ | |
744 | static void trc_read_check_handler(void *t_in) | |
745 | { | |
746 | struct task_struct *t = current; | |
747 | struct task_struct *texp = t_in; | |
748 | ||
749 | // If the task is no longer running on this CPU, leave. | |
750 | if (unlikely(texp != t)) { | |
751 | if (WARN_ON_ONCE(atomic_dec_and_test(&trc_n_readers_need_end))) | |
752 | wake_up(&trc_wait); | |
753 | goto reset_ipi; // Already on holdout list, so will check later. | |
754 | } | |
755 | ||
756 | // If the task is not in a read-side critical section, and | |
757 | // if this is the last reader, awaken the grace-period kthread. | |
758 | if (likely(!t->trc_reader_nesting)) { | |
759 | if (WARN_ON_ONCE(atomic_dec_and_test(&trc_n_readers_need_end))) | |
760 | wake_up(&trc_wait); | |
761 | // Mark as checked after decrement to avoid false | |
762 | // positives on the above WARN_ON_ONCE(). | |
763 | WRITE_ONCE(t->trc_reader_checked, true); | |
764 | goto reset_ipi; | |
765 | } | |
766 | WRITE_ONCE(t->trc_reader_checked, true); | |
767 | ||
768 | // Get here if the task is in a read-side critical section. Set | |
769 | // its state so that it will awaken the grace-period kthread upon | |
770 | // exit from that critical section. | |
771 | WARN_ON_ONCE(t->trc_reader_need_end); | |
772 | WRITE_ONCE(t->trc_reader_need_end, true); | |
773 | ||
774 | reset_ipi: | |
775 | // Allow future IPIs to be sent on CPU and for task. | |
776 | // Also order this IPI handler against any later manipulations of | |
777 | // the intended task. | |
778 | smp_store_release(&per_cpu(trc_ipi_to_cpu, smp_processor_id()), false); // ^^^ | |
779 | smp_store_release(&texp->trc_ipi_to_cpu, -1); // ^^^ | |
780 | } | |
781 | ||
782 | /* Callback function for scheduler to check locked-down task. */ | |
783 | static bool trc_inspect_reader(struct task_struct *t, void *arg) | |
784 | { | |
785 | if (task_curr(t)) | |
786 | return false; // It is running, so decline to inspect it. | |
787 | ||
788 | // Mark as checked. Because this is called from the grace-period | |
789 | // kthread, also remove the task from the holdout list. | |
790 | t->trc_reader_checked = true; | |
791 | trc_del_holdout(t); | |
792 | ||
793 | // If the task is in a read-side critical section, set up its | |
794 | // its state so that it will awaken the grace-period kthread upon | |
795 | // exit from that critical section. | |
796 | if (unlikely(t->trc_reader_nesting)) { | |
797 | atomic_inc(&trc_n_readers_need_end); // One more to wait on. | |
798 | WARN_ON_ONCE(t->trc_reader_need_end); | |
799 | WRITE_ONCE(t->trc_reader_need_end, true); | |
800 | } | |
801 | return true; | |
802 | } | |
803 | ||
804 | /* Attempt to extract the state for the specified task. */ | |
805 | static void trc_wait_for_one_reader(struct task_struct *t, | |
806 | struct list_head *bhp) | |
807 | { | |
808 | int cpu; | |
809 | ||
810 | // If a previous IPI is still in flight, let it complete. | |
811 | if (smp_load_acquire(&t->trc_ipi_to_cpu) != -1) // Order IPI | |
812 | return; | |
813 | ||
814 | // The current task had better be in a quiescent state. | |
815 | if (t == current) { | |
816 | t->trc_reader_checked = true; | |
817 | trc_del_holdout(t); | |
818 | WARN_ON_ONCE(t->trc_reader_nesting); | |
819 | return; | |
820 | } | |
821 | ||
822 | // Attempt to nail down the task for inspection. | |
823 | get_task_struct(t); | |
824 | if (try_invoke_on_locked_down_task(t, trc_inspect_reader, NULL)) { | |
825 | put_task_struct(t); | |
826 | return; | |
827 | } | |
828 | put_task_struct(t); | |
829 | ||
830 | // If currently running, send an IPI, either way, add to list. | |
831 | trc_add_holdout(t, bhp); | |
832 | if (task_curr(t) && time_after(jiffies, rcu_tasks_trace.gp_start + rcu_task_ipi_delay)) { | |
833 | // The task is currently running, so try IPIing it. | |
834 | cpu = task_cpu(t); | |
835 | ||
836 | // If there is already an IPI outstanding, let it happen. | |
837 | if (per_cpu(trc_ipi_to_cpu, cpu) || t->trc_ipi_to_cpu >= 0) | |
838 | return; | |
839 | ||
840 | atomic_inc(&trc_n_readers_need_end); | |
841 | per_cpu(trc_ipi_to_cpu, cpu) = true; | |
842 | t->trc_ipi_to_cpu = cpu; | |
843 | if (smp_call_function_single(cpu, | |
844 | trc_read_check_handler, t, 0)) { | |
845 | // Just in case there is some other reason for | |
846 | // failure than the target CPU being offline. | |
847 | per_cpu(trc_ipi_to_cpu, cpu) = false; | |
848 | t->trc_ipi_to_cpu = cpu; | |
849 | if (atomic_dec_and_test(&trc_n_readers_need_end)) { | |
850 | WARN_ON_ONCE(1); | |
851 | wake_up(&trc_wait); | |
852 | } | |
853 | } | |
854 | } | |
855 | } | |
856 | ||
857 | /* Initialize for a new RCU-tasks-trace grace period. */ | |
858 | static void rcu_tasks_trace_pregp_step(void) | |
859 | { | |
860 | int cpu; | |
861 | ||
862 | // Wait for CPU-hotplug paths to complete. | |
863 | cpus_read_lock(); | |
864 | cpus_read_unlock(); | |
865 | ||
866 | // Allow for fast-acting IPIs. | |
867 | atomic_set(&trc_n_readers_need_end, 1); | |
868 | ||
869 | // There shouldn't be any old IPIs, but... | |
870 | for_each_possible_cpu(cpu) | |
871 | WARN_ON_ONCE(per_cpu(trc_ipi_to_cpu, cpu)); | |
872 | } | |
873 | ||
874 | /* Do first-round processing for the specified task. */ | |
875 | static void rcu_tasks_trace_pertask(struct task_struct *t, | |
876 | struct list_head *hop) | |
877 | { | |
878 | WRITE_ONCE(t->trc_reader_need_end, false); | |
43766c3e | 879 | WRITE_ONCE(t->trc_reader_checked, false); |
d5f177d3 PM |
880 | t->trc_ipi_to_cpu = -1; |
881 | trc_wait_for_one_reader(t, hop); | |
882 | } | |
883 | ||
884 | /* Do intermediate processing between task and holdout scans. */ | |
885 | static void rcu_tasks_trace_postscan(void) | |
886 | { | |
887 | // Wait for late-stage exiting tasks to finish exiting. | |
888 | // These might have passed the call to exit_tasks_rcu_finish(). | |
889 | synchronize_rcu(); | |
890 | // Any tasks that exit after this point will set ->trc_reader_checked. | |
891 | } | |
892 | ||
4593e772 PM |
893 | /* Show the state of a task stalling the current RCU tasks trace GP. */ |
894 | static void show_stalled_task_trace(struct task_struct *t, bool *firstreport) | |
895 | { | |
896 | int cpu; | |
897 | ||
898 | if (*firstreport) { | |
899 | pr_err("INFO: rcu_tasks_trace detected stalls on tasks:\n"); | |
900 | *firstreport = false; | |
901 | } | |
902 | // FIXME: This should attempt to use try_invoke_on_nonrunning_task(). | |
903 | cpu = task_cpu(t); | |
904 | pr_alert("P%d: %c%c%c nesting: %d%c cpu: %d\n", | |
905 | t->pid, | |
906 | ".I"[READ_ONCE(t->trc_ipi_to_cpu) > 0], | |
907 | ".i"[is_idle_task(t)], | |
908 | ".N"[cpu > 0 && tick_nohz_full_cpu(cpu)], | |
909 | t->trc_reader_nesting, | |
910 | " N"[!!t->trc_reader_need_end], | |
911 | cpu); | |
912 | sched_show_task(t); | |
913 | } | |
914 | ||
915 | /* List stalled IPIs for RCU tasks trace. */ | |
916 | static void show_stalled_ipi_trace(void) | |
917 | { | |
918 | int cpu; | |
919 | ||
920 | for_each_possible_cpu(cpu) | |
921 | if (per_cpu(trc_ipi_to_cpu, cpu)) | |
922 | pr_alert("\tIPI outstanding to CPU %d\n", cpu); | |
923 | } | |
924 | ||
d5f177d3 PM |
925 | /* Do one scan of the holdout list. */ |
926 | static void check_all_holdout_tasks_trace(struct list_head *hop, | |
4593e772 | 927 | bool needreport, bool *firstreport) |
d5f177d3 PM |
928 | { |
929 | struct task_struct *g, *t; | |
930 | ||
931 | list_for_each_entry_safe(t, g, hop, trc_holdout_list) { | |
932 | // If safe and needed, try to check the current task. | |
933 | if (READ_ONCE(t->trc_ipi_to_cpu) == -1 && | |
934 | !READ_ONCE(t->trc_reader_checked)) | |
935 | trc_wait_for_one_reader(t, hop); | |
936 | ||
937 | // If check succeeded, remove this task from the list. | |
938 | if (READ_ONCE(t->trc_reader_checked)) | |
939 | trc_del_holdout(t); | |
4593e772 PM |
940 | else if (needreport) |
941 | show_stalled_task_trace(t, firstreport); | |
942 | } | |
943 | if (needreport) { | |
944 | if (firstreport) | |
945 | pr_err("INFO: rcu_tasks_trace detected stalls? (Late IPI?)\n"); | |
946 | show_stalled_ipi_trace(); | |
d5f177d3 PM |
947 | } |
948 | } | |
949 | ||
950 | /* Wait for grace period to complete and provide ordering. */ | |
af051ca4 | 951 | static void rcu_tasks_trace_postgp(struct rcu_tasks *rtp) |
d5f177d3 | 952 | { |
4593e772 PM |
953 | bool firstreport; |
954 | struct task_struct *g, *t; | |
955 | LIST_HEAD(holdouts); | |
956 | long ret; | |
957 | ||
d5f177d3 PM |
958 | // Remove the safety count. |
959 | smp_mb__before_atomic(); // Order vs. earlier atomics | |
960 | atomic_dec(&trc_n_readers_need_end); | |
961 | smp_mb__after_atomic(); // Order vs. later atomics | |
962 | ||
963 | // Wait for readers. | |
af051ca4 | 964 | set_tasks_gp_state(rtp, RTGS_WAIT_READERS); |
4593e772 PM |
965 | for (;;) { |
966 | ret = wait_event_idle_exclusive_timeout( | |
967 | trc_wait, | |
968 | atomic_read(&trc_n_readers_need_end) == 0, | |
969 | READ_ONCE(rcu_task_stall_timeout)); | |
970 | if (ret) | |
971 | break; // Count reached zero. | |
af051ca4 | 972 | // Stall warning time, so make a list of the offenders. |
4593e772 PM |
973 | for_each_process_thread(g, t) |
974 | if (READ_ONCE(t->trc_reader_need_end)) | |
975 | trc_add_holdout(t, &holdouts); | |
976 | firstreport = true; | |
977 | list_for_each_entry_safe(t, g, &holdouts, trc_holdout_list) | |
978 | if (READ_ONCE(t->trc_reader_need_end)) { | |
979 | show_stalled_task_trace(t, &firstreport); | |
980 | trc_del_holdout(t); | |
981 | } | |
982 | if (firstreport) | |
983 | pr_err("INFO: rcu_tasks_trace detected stalls? (Counter/taskslist mismatch?)\n"); | |
984 | show_stalled_ipi_trace(); | |
985 | pr_err("\t%d holdouts\n", atomic_read(&trc_n_readers_need_end)); | |
986 | } | |
d5f177d3 | 987 | smp_mb(); // Caller's code must be ordered after wakeup. |
43766c3e | 988 | // Pairs with pretty much every ordering primitive. |
d5f177d3 PM |
989 | } |
990 | ||
991 | /* Report any needed quiescent state for this exiting task. */ | |
992 | void exit_tasks_rcu_finish_trace(struct task_struct *t) | |
993 | { | |
994 | WRITE_ONCE(t->trc_reader_checked, true); | |
995 | WARN_ON_ONCE(t->trc_reader_nesting); | |
996 | WRITE_ONCE(t->trc_reader_nesting, 0); | |
997 | if (WARN_ON_ONCE(READ_ONCE(t->trc_reader_need_end))) | |
998 | rcu_read_unlock_trace_special(t); | |
999 | } | |
1000 | ||
1001 | void call_rcu_tasks_trace(struct rcu_head *rhp, rcu_callback_t func); | |
1002 | DEFINE_RCU_TASKS(rcu_tasks_trace, rcu_tasks_wait_gp, call_rcu_tasks_trace, | |
1003 | "RCU Tasks Trace"); | |
1004 | ||
1005 | /** | |
1006 | * call_rcu_tasks_trace() - Queue a callback trace task-based grace period | |
1007 | * @rhp: structure to be used for queueing the RCU updates. | |
1008 | * @func: actual callback function to be invoked after the grace period | |
1009 | * | |
1010 | * The callback function will be invoked some time after a full grace | |
1011 | * period elapses, in other words after all currently executing RCU | |
1012 | * read-side critical sections have completed. call_rcu_tasks_trace() | |
1013 | * assumes that the read-side critical sections end at context switch, | |
1014 | * cond_resched_rcu_qs(), or transition to usermode execution. As such, | |
1015 | * there are no read-side primitives analogous to rcu_read_lock() and | |
1016 | * rcu_read_unlock() because this primitive is intended to determine | |
1017 | * that all tasks have passed through a safe state, not so much for | |
1018 | * data-strcuture synchronization. | |
1019 | * | |
1020 | * See the description of call_rcu() for more detailed information on | |
1021 | * memory ordering guarantees. | |
1022 | */ | |
1023 | void call_rcu_tasks_trace(struct rcu_head *rhp, rcu_callback_t func) | |
1024 | { | |
1025 | call_rcu_tasks_generic(rhp, func, &rcu_tasks_trace); | |
1026 | } | |
1027 | EXPORT_SYMBOL_GPL(call_rcu_tasks_trace); | |
1028 | ||
1029 | /** | |
1030 | * synchronize_rcu_tasks_trace - wait for a trace rcu-tasks grace period | |
1031 | * | |
1032 | * Control will return to the caller some time after a trace rcu-tasks | |
1033 | * grace period has elapsed, in other words after all currently | |
1034 | * executing rcu-tasks read-side critical sections have elapsed. These | |
1035 | * read-side critical sections are delimited by calls to schedule(), | |
1036 | * cond_resched_tasks_rcu_qs(), userspace execution, and (in theory, | |
1037 | * anyway) cond_resched(). | |
1038 | * | |
1039 | * This is a very specialized primitive, intended only for a few uses in | |
1040 | * tracing and other situations requiring manipulation of function preambles | |
1041 | * and profiling hooks. The synchronize_rcu_tasks_trace() function is not | |
1042 | * (yet) intended for heavy use from multiple CPUs. | |
1043 | * | |
1044 | * See the description of synchronize_rcu() for more detailed information | |
1045 | * on memory ordering guarantees. | |
1046 | */ | |
1047 | void synchronize_rcu_tasks_trace(void) | |
1048 | { | |
1049 | RCU_LOCKDEP_WARN(lock_is_held(&rcu_trace_lock_map), "Illegal synchronize_rcu_tasks_trace() in RCU Tasks Trace read-side critical section"); | |
1050 | synchronize_rcu_tasks_generic(&rcu_tasks_trace); | |
1051 | } | |
1052 | EXPORT_SYMBOL_GPL(synchronize_rcu_tasks_trace); | |
1053 | ||
1054 | /** | |
1055 | * rcu_barrier_tasks_trace - Wait for in-flight call_rcu_tasks_trace() callbacks. | |
1056 | * | |
1057 | * Although the current implementation is guaranteed to wait, it is not | |
1058 | * obligated to, for example, if there are no pending callbacks. | |
1059 | */ | |
1060 | void rcu_barrier_tasks_trace(void) | |
1061 | { | |
1062 | /* There is only one callback queue, so this is easy. ;-) */ | |
1063 | synchronize_rcu_tasks_trace(); | |
1064 | } | |
1065 | EXPORT_SYMBOL_GPL(rcu_barrier_tasks_trace); | |
1066 | ||
1067 | static int __init rcu_spawn_tasks_trace_kthread(void) | |
1068 | { | |
1069 | rcu_tasks_trace.pregp_func = rcu_tasks_trace_pregp_step; | |
1070 | rcu_tasks_trace.pertask_func = rcu_tasks_trace_pertask; | |
1071 | rcu_tasks_trace.postscan_func = rcu_tasks_trace_postscan; | |
1072 | rcu_tasks_trace.holdouts_func = check_all_holdout_tasks_trace; | |
1073 | rcu_tasks_trace.postgp_func = rcu_tasks_trace_postgp; | |
1074 | rcu_spawn_tasks_kthread_generic(&rcu_tasks_trace); | |
1075 | return 0; | |
1076 | } | |
1077 | core_initcall(rcu_spawn_tasks_trace_kthread); | |
1078 | ||
e21408ce PM |
1079 | static void show_rcu_tasks_trace_gp_kthread(void) |
1080 | { | |
1081 | char buf[32]; | |
1082 | ||
1083 | sprintf(buf, "N%d", atomic_read(&trc_n_readers_need_end)); | |
1084 | show_rcu_tasks_generic_gp_kthread(&rcu_tasks_trace, buf); | |
1085 | } | |
1086 | ||
d5f177d3 PM |
1087 | #else /* #ifdef CONFIG_TASKS_TRACE_RCU */ |
1088 | void exit_tasks_rcu_finish_trace(struct task_struct *t) { } | |
e21408ce | 1089 | static inline void show_rcu_tasks_trace_gp_kthread(void) {} |
d5f177d3 | 1090 | #endif /* #else #ifdef CONFIG_TASKS_TRACE_RCU */ |
8fd8ca38 | 1091 | |
e21408ce PM |
1092 | void show_rcu_tasks_gp_kthreads(void) |
1093 | { | |
1094 | show_rcu_tasks_classic_gp_kthread(); | |
1095 | show_rcu_tasks_rude_gp_kthread(); | |
1096 | show_rcu_tasks_trace_gp_kthread(); | |
1097 | } | |
1098 | ||
8fd8ca38 PM |
1099 | #else /* #ifdef CONFIG_TASKS_RCU_GENERIC */ |
1100 | static inline void rcu_tasks_bootup_oddness(void) {} | |
e21408ce | 1101 | void show_rcu_tasks_gp_kthreads(void) {} |
8fd8ca38 | 1102 | #endif /* #else #ifdef CONFIG_TASKS_RCU_GENERIC */ |