Commit | Line | Data |
---|---|---|
eacd6f04 PM |
1 | /* SPDX-License-Identifier: GPL-2.0+ */ |
2 | /* | |
3 | * Task-based RCU implementations. | |
4 | * | |
5 | * Copyright (C) 2020 Paul E. McKenney | |
6 | */ | |
7 | ||
8fd8ca38 | 8 | #ifdef CONFIG_TASKS_RCU_GENERIC |
9b073de1 | 9 | #include "rcu_segcblist.h" |
5873b8a9 PM |
10 | |
11 | //////////////////////////////////////////////////////////////////////// | |
12 | // | |
13 | // Generic data structures. | |
14 | ||
15 | struct rcu_tasks; | |
16 | typedef void (*rcu_tasks_gp_func_t)(struct rcu_tasks *rtp); | |
7460ade1 | 17 | typedef void (*pregp_func_t)(struct list_head *hop); |
e4fe5dd6 | 18 | typedef void (*pertask_func_t)(struct task_struct *t, struct list_head *hop); |
9796e1ae | 19 | typedef void (*postscan_func_t)(struct list_head *hop); |
e4fe5dd6 | 20 | typedef void (*holdouts_func_t)(struct list_head *hop, bool ndrpt, bool *frptp); |
af051ca4 | 21 | typedef void (*postgp_func_t)(struct rcu_tasks *rtp); |
eacd6f04 | 22 | |
07e10515 | 23 | /** |
cafafd67 | 24 | * struct rcu_tasks_percpu - Per-CPU component of definition for a Tasks-RCU-like mechanism. |
9b073de1 | 25 | * @cblist: Callback list. |
381a4f3b | 26 | * @lock: Lock protecting per-CPU callback list. |
7d13d30b PM |
27 | * @rtp_jiffies: Jiffies counter value for statistics. |
28 | * @rtp_n_lock_retries: Rough lock-contention statistic. | |
d363f833 | 29 | * @rtp_work: Work queue for invoking callbacks. |
3063b33a | 30 | * @rtp_irq_work: IRQ work queue for deferred wakeups. |
ce9b1c66 | 31 | * @barrier_q_head: RCU callback for barrier operation. |
434c9eef | 32 | * @rtp_blkd_tasks: List of tasks blocked as readers. |
ce9b1c66 PM |
33 | * @cpu: CPU number corresponding to this entry. |
34 | * @rtpp: Pointer to the rcu_tasks structure. | |
cafafd67 PM |
35 | */ |
36 | struct rcu_tasks_percpu { | |
9b073de1 | 37 | struct rcu_segcblist cblist; |
381a4f3b | 38 | raw_spinlock_t __private lock; |
7d13d30b PM |
39 | unsigned long rtp_jiffies; |
40 | unsigned long rtp_n_lock_retries; | |
d363f833 | 41 | struct work_struct rtp_work; |
3063b33a | 42 | struct irq_work rtp_irq_work; |
ce9b1c66 | 43 | struct rcu_head barrier_q_head; |
434c9eef | 44 | struct list_head rtp_blkd_tasks; |
d363f833 PM |
45 | int cpu; |
46 | struct rcu_tasks *rtpp; | |
cafafd67 PM |
47 | }; |
48 | ||
49 | /** | |
50 | * struct rcu_tasks - Definition for a Tasks-RCU-like mechanism. | |
88db792b | 51 | * @cbs_wait: RCU wait allowing a new callback to get kthread's attention. |
cafafd67 | 52 | * @cbs_gbl_lock: Lock protecting callback list. |
d96225fd | 53 | * @tasks_gp_mutex: Mutex protecting grace period, needed during mid-boot dead zone. |
07e10515 | 54 | * @kthread_ptr: This flavor's grace-period/callback-invocation kthread. |
5873b8a9 | 55 | * @gp_func: This flavor's grace-period-wait function. |
af051ca4 | 56 | * @gp_state: Grace period's most recent state transition (debugging). |
4fe192df | 57 | * @gp_sleep: Per-grace-period sleep to prevent CPU-bound looping. |
2393a613 | 58 | * @init_fract: Initial backoff sleep interval. |
af051ca4 PM |
59 | * @gp_jiffies: Time of last @gp_state transition. |
60 | * @gp_start: Most recent grace-period start in jiffies. | |
b14fb4fb | 61 | * @tasks_gp_seq: Number of grace periods completed since boot. |
238dbce3 | 62 | * @n_ipis: Number of IPIs sent to encourage grace periods to end. |
7e0669c3 | 63 | * @n_ipis_fails: Number of IPI-send failures. |
e4fe5dd6 PM |
64 | * @pregp_func: This flavor's pre-grace-period function (optional). |
65 | * @pertask_func: This flavor's per-task scan function (optional). | |
66 | * @postscan_func: This flavor's post-task scan function (optional). | |
85b86994 | 67 | * @holdouts_func: This flavor's holdout-list scan function (optional). |
e4fe5dd6 | 68 | * @postgp_func: This flavor's post-grace-period function (optional). |
5873b8a9 | 69 | * @call_func: This flavor's call_rcu()-equivalent function. |
cafafd67 | 70 | * @rtpcpu: This flavor's rcu_tasks_percpu structure. |
7a30871b | 71 | * @percpu_enqueue_shift: Shift down CPU ID this much when enqueuing callbacks. |
2cee0789 PM |
72 | * @percpu_enqueue_lim: Number of per-CPU callback queues in use for enqueuing. |
73 | * @percpu_dequeue_lim: Number of per-CPU callback queues in use for dequeuing. | |
fd796e41 | 74 | * @percpu_dequeue_gpseq: RCU grace-period number to propagate enqueue limit to dequeuers. |
ce9b1c66 PM |
75 | * @barrier_q_mutex: Serialize barrier operations. |
76 | * @barrier_q_count: Number of queues being waited on. | |
77 | * @barrier_q_completion: Barrier wait/wakeup mechanism. | |
78 | * @barrier_q_seq: Sequence number for barrier operations. | |
c97d12a6 PM |
79 | * @name: This flavor's textual name. |
80 | * @kname: This flavor's kthread name. | |
07e10515 PM |
81 | */ |
82 | struct rcu_tasks { | |
88db792b | 83 | struct rcuwait cbs_wait; |
cafafd67 | 84 | raw_spinlock_t cbs_gbl_lock; |
d96225fd | 85 | struct mutex tasks_gp_mutex; |
af051ca4 | 86 | int gp_state; |
4fe192df | 87 | int gp_sleep; |
2393a613 | 88 | int init_fract; |
af051ca4 | 89 | unsigned long gp_jiffies; |
88092d0c | 90 | unsigned long gp_start; |
b14fb4fb | 91 | unsigned long tasks_gp_seq; |
238dbce3 | 92 | unsigned long n_ipis; |
7e0669c3 | 93 | unsigned long n_ipis_fails; |
07e10515 | 94 | struct task_struct *kthread_ptr; |
5873b8a9 | 95 | rcu_tasks_gp_func_t gp_func; |
e4fe5dd6 PM |
96 | pregp_func_t pregp_func; |
97 | pertask_func_t pertask_func; | |
98 | postscan_func_t postscan_func; | |
99 | holdouts_func_t holdouts_func; | |
100 | postgp_func_t postgp_func; | |
5873b8a9 | 101 | call_rcu_func_t call_func; |
cafafd67 | 102 | struct rcu_tasks_percpu __percpu *rtpcpu; |
7a30871b | 103 | int percpu_enqueue_shift; |
8dd593fd | 104 | int percpu_enqueue_lim; |
2cee0789 | 105 | int percpu_dequeue_lim; |
fd796e41 | 106 | unsigned long percpu_dequeue_gpseq; |
ce9b1c66 PM |
107 | struct mutex barrier_q_mutex; |
108 | atomic_t barrier_q_count; | |
109 | struct completion barrier_q_completion; | |
110 | unsigned long barrier_q_seq; | |
c97d12a6 PM |
111 | char *name; |
112 | char *kname; | |
07e10515 PM |
113 | }; |
114 | ||
3063b33a PM |
115 | static void call_rcu_tasks_iw_wakeup(struct irq_work *iwp); |
116 | ||
cafafd67 PM |
117 | #define DEFINE_RCU_TASKS(rt_name, gp, call, n) \ |
118 | static DEFINE_PER_CPU(struct rcu_tasks_percpu, rt_name ## __percpu) = { \ | |
381a4f3b | 119 | .lock = __RAW_SPIN_LOCK_UNLOCKED(rt_name ## __percpu.cbs_pcpu_lock), \ |
88db792b | 120 | .rtp_irq_work = IRQ_WORK_INIT_HARD(call_rcu_tasks_iw_wakeup), \ |
cafafd67 PM |
121 | }; \ |
122 | static struct rcu_tasks rt_name = \ | |
123 | { \ | |
88db792b | 124 | .cbs_wait = __RCUWAIT_INITIALIZER(rt_name.wait), \ |
cafafd67 | 125 | .cbs_gbl_lock = __RAW_SPIN_LOCK_UNLOCKED(rt_name.cbs_gbl_lock), \ |
d96225fd | 126 | .tasks_gp_mutex = __MUTEX_INITIALIZER(rt_name.tasks_gp_mutex), \ |
cafafd67 PM |
127 | .gp_func = gp, \ |
128 | .call_func = call, \ | |
129 | .rtpcpu = &rt_name ## __percpu, \ | |
130 | .name = n, \ | |
2bcd18e0 | 131 | .percpu_enqueue_shift = order_base_2(CONFIG_NR_CPUS), \ |
8dd593fd | 132 | .percpu_enqueue_lim = 1, \ |
2cee0789 | 133 | .percpu_dequeue_lim = 1, \ |
ce9b1c66 PM |
134 | .barrier_q_mutex = __MUTEX_INITIALIZER(rt_name.barrier_q_mutex), \ |
135 | .barrier_q_seq = (0UL - 50UL) << RCU_SEQ_CTR_SHIFT, \ | |
cafafd67 | 136 | .kname = #rt_name, \ |
07e10515 PM |
137 | } |
138 | ||
eacd6f04 PM |
139 | /* Track exiting tasks in order to allow them to be waited for. */ |
140 | DEFINE_STATIC_SRCU(tasks_rcu_exit_srcu); | |
141 | ||
b0afa0f0 | 142 | /* Avoid IPIing CPUs early in the grace period. */ |
574de876 | 143 | #define RCU_TASK_IPI_DELAY (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB) ? HZ / 2 : 0) |
b0afa0f0 PM |
144 | static int rcu_task_ipi_delay __read_mostly = RCU_TASK_IPI_DELAY; |
145 | module_param(rcu_task_ipi_delay, int, 0644); | |
146 | ||
eacd6f04 | 147 | /* Control stall timeouts. Disable with <= 0, otherwise jiffies till stall. */ |
1cf1144e | 148 | #define RCU_TASK_BOOT_STALL_TIMEOUT (HZ * 30) |
eacd6f04 PM |
149 | #define RCU_TASK_STALL_TIMEOUT (HZ * 60 * 10) |
150 | static int rcu_task_stall_timeout __read_mostly = RCU_TASK_STALL_TIMEOUT; | |
151 | module_param(rcu_task_stall_timeout, int, 0644); | |
f2539003 PM |
152 | #define RCU_TASK_STALL_INFO (HZ * 10) |
153 | static int rcu_task_stall_info __read_mostly = RCU_TASK_STALL_INFO; | |
154 | module_param(rcu_task_stall_info, int, 0644); | |
155 | static int rcu_task_stall_info_mult __read_mostly = 3; | |
156 | module_param(rcu_task_stall_info_mult, int, 0444); | |
eacd6f04 | 157 | |
8610b656 PM |
158 | static int rcu_task_enqueue_lim __read_mostly = -1; |
159 | module_param(rcu_task_enqueue_lim, int, 0444); | |
160 | ||
ab97152f PM |
161 | static bool rcu_task_cb_adjust; |
162 | static int rcu_task_contend_lim __read_mostly = 100; | |
163 | module_param(rcu_task_contend_lim, int, 0444); | |
fd796e41 PM |
164 | static int rcu_task_collapse_lim __read_mostly = 10; |
165 | module_param(rcu_task_collapse_lim, int, 0444); | |
ab97152f | 166 | |
af051ca4 PM |
167 | /* RCU tasks grace-period state for debugging. */ |
168 | #define RTGS_INIT 0 | |
169 | #define RTGS_WAIT_WAIT_CBS 1 | |
170 | #define RTGS_WAIT_GP 2 | |
171 | #define RTGS_PRE_WAIT_GP 3 | |
172 | #define RTGS_SCAN_TASKLIST 4 | |
173 | #define RTGS_POST_SCAN_TASKLIST 5 | |
174 | #define RTGS_WAIT_SCAN_HOLDOUTS 6 | |
175 | #define RTGS_SCAN_HOLDOUTS 7 | |
176 | #define RTGS_POST_GP 8 | |
177 | #define RTGS_WAIT_READERS 9 | |
178 | #define RTGS_INVOKE_CBS 10 | |
179 | #define RTGS_WAIT_CBS 11 | |
8344496e | 180 | #ifndef CONFIG_TINY_RCU |
af051ca4 PM |
181 | static const char * const rcu_tasks_gp_state_names[] = { |
182 | "RTGS_INIT", | |
183 | "RTGS_WAIT_WAIT_CBS", | |
184 | "RTGS_WAIT_GP", | |
185 | "RTGS_PRE_WAIT_GP", | |
186 | "RTGS_SCAN_TASKLIST", | |
187 | "RTGS_POST_SCAN_TASKLIST", | |
188 | "RTGS_WAIT_SCAN_HOLDOUTS", | |
189 | "RTGS_SCAN_HOLDOUTS", | |
190 | "RTGS_POST_GP", | |
191 | "RTGS_WAIT_READERS", | |
192 | "RTGS_INVOKE_CBS", | |
193 | "RTGS_WAIT_CBS", | |
194 | }; | |
8344496e | 195 | #endif /* #ifndef CONFIG_TINY_RCU */ |
af051ca4 | 196 | |
5873b8a9 PM |
197 | //////////////////////////////////////////////////////////////////////// |
198 | // | |
199 | // Generic code. | |
200 | ||
d363f833 PM |
201 | static void rcu_tasks_invoke_cbs_wq(struct work_struct *wp); |
202 | ||
af051ca4 PM |
203 | /* Record grace-period phase and time. */ |
204 | static void set_tasks_gp_state(struct rcu_tasks *rtp, int newstate) | |
205 | { | |
206 | rtp->gp_state = newstate; | |
207 | rtp->gp_jiffies = jiffies; | |
208 | } | |
209 | ||
8344496e | 210 | #ifndef CONFIG_TINY_RCU |
af051ca4 PM |
211 | /* Return state name. */ |
212 | static const char *tasks_gp_state_getname(struct rcu_tasks *rtp) | |
213 | { | |
214 | int i = data_race(rtp->gp_state); // Let KCSAN detect update races | |
215 | int j = READ_ONCE(i); // Prevent the compiler from reading twice | |
216 | ||
217 | if (j >= ARRAY_SIZE(rcu_tasks_gp_state_names)) | |
218 | return "???"; | |
219 | return rcu_tasks_gp_state_names[j]; | |
220 | } | |
8344496e | 221 | #endif /* #ifndef CONFIG_TINY_RCU */ |
af051ca4 | 222 | |
cafafd67 PM |
223 | // Initialize per-CPU callback lists for the specified flavor of |
224 | // Tasks RCU. | |
225 | static void cblist_init_generic(struct rcu_tasks *rtp) | |
226 | { | |
227 | int cpu; | |
228 | unsigned long flags; | |
8610b656 | 229 | int lim; |
da123016 | 230 | int shift; |
cafafd67 PM |
231 | |
232 | raw_spin_lock_irqsave(&rtp->cbs_gbl_lock, flags); | |
ab97152f PM |
233 | if (rcu_task_enqueue_lim < 0) { |
234 | rcu_task_enqueue_lim = 1; | |
235 | rcu_task_cb_adjust = true; | |
236 | pr_info("%s: Setting adjustable number of callback queues.\n", __func__); | |
237 | } else if (rcu_task_enqueue_lim == 0) { | |
8610b656 | 238 | rcu_task_enqueue_lim = 1; |
ab97152f | 239 | } |
8610b656 PM |
240 | lim = rcu_task_enqueue_lim; |
241 | ||
242 | if (lim > nr_cpu_ids) | |
243 | lim = nr_cpu_ids; | |
da123016 PM |
244 | shift = ilog2(nr_cpu_ids / lim); |
245 | if (((nr_cpu_ids - 1) >> shift) >= lim) | |
246 | shift++; | |
247 | WRITE_ONCE(rtp->percpu_enqueue_shift, shift); | |
2cee0789 | 248 | WRITE_ONCE(rtp->percpu_dequeue_lim, lim); |
8610b656 | 249 | smp_store_release(&rtp->percpu_enqueue_lim, lim); |
cafafd67 PM |
250 | for_each_possible_cpu(cpu) { |
251 | struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu); | |
252 | ||
253 | WARN_ON_ONCE(!rtpcp); | |
254 | if (cpu) | |
381a4f3b PM |
255 | raw_spin_lock_init(&ACCESS_PRIVATE(rtpcp, lock)); |
256 | raw_spin_lock_rcu_node(rtpcp); // irqs already disabled. | |
9b073de1 PM |
257 | if (rcu_segcblist_empty(&rtpcp->cblist)) |
258 | rcu_segcblist_init(&rtpcp->cblist); | |
d363f833 PM |
259 | INIT_WORK(&rtpcp->rtp_work, rcu_tasks_invoke_cbs_wq); |
260 | rtpcp->cpu = cpu; | |
261 | rtpcp->rtpp = rtp; | |
434c9eef PM |
262 | if (!rtpcp->rtp_blkd_tasks.next) |
263 | INIT_LIST_HEAD(&rtpcp->rtp_blkd_tasks); | |
381a4f3b | 264 | raw_spin_unlock_rcu_node(rtpcp); // irqs remain disabled. |
cafafd67 PM |
265 | } |
266 | raw_spin_unlock_irqrestore(&rtp->cbs_gbl_lock, flags); | |
8610b656 | 267 | pr_info("%s: Setting shift to %d and lim to %d.\n", __func__, data_race(rtp->percpu_enqueue_shift), data_race(rtp->percpu_enqueue_lim)); |
cafafd67 PM |
268 | } |
269 | ||
3063b33a PM |
270 | // IRQ-work handler that does deferred wakeup for call_rcu_tasks_generic(). |
271 | static void call_rcu_tasks_iw_wakeup(struct irq_work *iwp) | |
272 | { | |
273 | struct rcu_tasks *rtp; | |
274 | struct rcu_tasks_percpu *rtpcp = container_of(iwp, struct rcu_tasks_percpu, rtp_irq_work); | |
275 | ||
276 | rtp = rtpcp->rtpp; | |
88db792b | 277 | rcuwait_wake_up(&rtp->cbs_wait); |
3063b33a PM |
278 | } |
279 | ||
5873b8a9 PM |
280 | // Enqueue a callback for the specified flavor of Tasks RCU. |
281 | static void call_rcu_tasks_generic(struct rcu_head *rhp, rcu_callback_t func, | |
282 | struct rcu_tasks *rtp) | |
eacd6f04 | 283 | { |
07d95c34 | 284 | int chosen_cpu; |
eacd6f04 | 285 | unsigned long flags; |
07d95c34 | 286 | int ideal_cpu; |
7d13d30b | 287 | unsigned long j; |
ab97152f | 288 | bool needadjust = false; |
eacd6f04 | 289 | bool needwake; |
cafafd67 | 290 | struct rcu_tasks_percpu *rtpcp; |
eacd6f04 PM |
291 | |
292 | rhp->next = NULL; | |
293 | rhp->func = func; | |
cafafd67 | 294 | local_irq_save(flags); |
fd796e41 | 295 | rcu_read_lock(); |
07d95c34 ED |
296 | ideal_cpu = smp_processor_id() >> READ_ONCE(rtp->percpu_enqueue_shift); |
297 | chosen_cpu = cpumask_next(ideal_cpu - 1, cpu_possible_mask); | |
298 | rtpcp = per_cpu_ptr(rtp->rtpcpu, chosen_cpu); | |
7d13d30b PM |
299 | if (!raw_spin_trylock_rcu_node(rtpcp)) { // irqs already disabled. |
300 | raw_spin_lock_rcu_node(rtpcp); // irqs already disabled. | |
301 | j = jiffies; | |
302 | if (rtpcp->rtp_jiffies != j) { | |
303 | rtpcp->rtp_jiffies = j; | |
304 | rtpcp->rtp_n_lock_retries = 0; | |
305 | } | |
ab97152f PM |
306 | if (rcu_task_cb_adjust && ++rtpcp->rtp_n_lock_retries > rcu_task_contend_lim && |
307 | READ_ONCE(rtp->percpu_enqueue_lim) != nr_cpu_ids) | |
308 | needadjust = true; // Defer adjustment to avoid deadlock. | |
7d13d30b | 309 | } |
9b073de1 | 310 | if (!rcu_segcblist_is_enabled(&rtpcp->cblist)) { |
381a4f3b | 311 | raw_spin_unlock_rcu_node(rtpcp); // irqs remain disabled. |
cafafd67 | 312 | cblist_init_generic(rtp); |
381a4f3b | 313 | raw_spin_lock_rcu_node(rtpcp); // irqs already disabled. |
cafafd67 | 314 | } |
9b073de1 PM |
315 | needwake = rcu_segcblist_empty(&rtpcp->cblist); |
316 | rcu_segcblist_enqueue(&rtpcp->cblist, rhp); | |
381a4f3b | 317 | raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags); |
ab97152f PM |
318 | if (unlikely(needadjust)) { |
319 | raw_spin_lock_irqsave(&rtp->cbs_gbl_lock, flags); | |
320 | if (rtp->percpu_enqueue_lim != nr_cpu_ids) { | |
00a8b4b5 | 321 | WRITE_ONCE(rtp->percpu_enqueue_shift, 0); |
fd796e41 | 322 | WRITE_ONCE(rtp->percpu_dequeue_lim, nr_cpu_ids); |
ab97152f PM |
323 | smp_store_release(&rtp->percpu_enqueue_lim, nr_cpu_ids); |
324 | pr_info("Switching %s to per-CPU callback queuing.\n", rtp->name); | |
325 | } | |
326 | raw_spin_unlock_irqrestore(&rtp->cbs_gbl_lock, flags); | |
327 | } | |
fd796e41 | 328 | rcu_read_unlock(); |
eacd6f04 | 329 | /* We can't create the thread unless interrupts are enabled. */ |
07e10515 | 330 | if (needwake && READ_ONCE(rtp->kthread_ptr)) |
3063b33a | 331 | irq_work_queue(&rtpcp->rtp_irq_work); |
eacd6f04 | 332 | } |
eacd6f04 | 333 | |
ce9b1c66 PM |
334 | // RCU callback function for rcu_barrier_tasks_generic(). |
335 | static void rcu_barrier_tasks_generic_cb(struct rcu_head *rhp) | |
336 | { | |
337 | struct rcu_tasks *rtp; | |
338 | struct rcu_tasks_percpu *rtpcp; | |
339 | ||
340 | rtpcp = container_of(rhp, struct rcu_tasks_percpu, barrier_q_head); | |
341 | rtp = rtpcp->rtpp; | |
342 | if (atomic_dec_and_test(&rtp->barrier_q_count)) | |
343 | complete(&rtp->barrier_q_completion); | |
344 | } | |
345 | ||
346 | // Wait for all in-flight callbacks for the specified RCU Tasks flavor. | |
347 | // Operates in a manner similar to rcu_barrier(). | |
348 | static void rcu_barrier_tasks_generic(struct rcu_tasks *rtp) | |
349 | { | |
350 | int cpu; | |
351 | unsigned long flags; | |
352 | struct rcu_tasks_percpu *rtpcp; | |
353 | unsigned long s = rcu_seq_snap(&rtp->barrier_q_seq); | |
354 | ||
355 | mutex_lock(&rtp->barrier_q_mutex); | |
356 | if (rcu_seq_done(&rtp->barrier_q_seq, s)) { | |
357 | smp_mb(); | |
358 | mutex_unlock(&rtp->barrier_q_mutex); | |
359 | return; | |
360 | } | |
361 | rcu_seq_start(&rtp->barrier_q_seq); | |
362 | init_completion(&rtp->barrier_q_completion); | |
363 | atomic_set(&rtp->barrier_q_count, 2); | |
364 | for_each_possible_cpu(cpu) { | |
2cee0789 | 365 | if (cpu >= smp_load_acquire(&rtp->percpu_dequeue_lim)) |
ce9b1c66 PM |
366 | break; |
367 | rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu); | |
368 | rtpcp->barrier_q_head.func = rcu_barrier_tasks_generic_cb; | |
369 | raw_spin_lock_irqsave_rcu_node(rtpcp, flags); | |
370 | if (rcu_segcblist_entrain(&rtpcp->cblist, &rtpcp->barrier_q_head)) | |
371 | atomic_inc(&rtp->barrier_q_count); | |
372 | raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags); | |
373 | } | |
374 | if (atomic_sub_and_test(2, &rtp->barrier_q_count)) | |
375 | complete(&rtp->barrier_q_completion); | |
376 | wait_for_completion(&rtp->barrier_q_completion); | |
377 | rcu_seq_end(&rtp->barrier_q_seq); | |
378 | mutex_unlock(&rtp->barrier_q_mutex); | |
379 | } | |
380 | ||
4d1114c0 PM |
381 | // Advance callbacks and indicate whether either a grace period or |
382 | // callback invocation is needed. | |
383 | static int rcu_tasks_need_gpcb(struct rcu_tasks *rtp) | |
384 | { | |
385 | int cpu; | |
386 | unsigned long flags; | |
fd796e41 PM |
387 | long n; |
388 | long ncbs = 0; | |
389 | long ncbsnz = 0; | |
4d1114c0 PM |
390 | int needgpcb = 0; |
391 | ||
2cee0789 | 392 | for (cpu = 0; cpu < smp_load_acquire(&rtp->percpu_dequeue_lim); cpu++) { |
4d1114c0 PM |
393 | struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu); |
394 | ||
395 | /* Advance and accelerate any new callbacks. */ | |
fd796e41 | 396 | if (!rcu_segcblist_n_cbs(&rtpcp->cblist)) |
4d1114c0 PM |
397 | continue; |
398 | raw_spin_lock_irqsave_rcu_node(rtpcp, flags); | |
fd796e41 PM |
399 | // Should we shrink down to a single callback queue? |
400 | n = rcu_segcblist_n_cbs(&rtpcp->cblist); | |
401 | if (n) { | |
402 | ncbs += n; | |
403 | if (cpu > 0) | |
404 | ncbsnz += n; | |
405 | } | |
4d1114c0 PM |
406 | rcu_segcblist_advance(&rtpcp->cblist, rcu_seq_current(&rtp->tasks_gp_seq)); |
407 | (void)rcu_segcblist_accelerate(&rtpcp->cblist, rcu_seq_snap(&rtp->tasks_gp_seq)); | |
408 | if (rcu_segcblist_pend_cbs(&rtpcp->cblist)) | |
409 | needgpcb |= 0x3; | |
410 | if (!rcu_segcblist_empty(&rtpcp->cblist)) | |
411 | needgpcb |= 0x1; | |
412 | raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags); | |
413 | } | |
fd796e41 PM |
414 | |
415 | // Shrink down to a single callback queue if appropriate. | |
416 | // This is done in two stages: (1) If there are no more than | |
417 | // rcu_task_collapse_lim callbacks on CPU 0 and none on any other | |
418 | // CPU, limit enqueueing to CPU 0. (2) After an RCU grace period, | |
419 | // if there has not been an increase in callbacks, limit dequeuing | |
420 | // to CPU 0. Note the matching RCU read-side critical section in | |
421 | // call_rcu_tasks_generic(). | |
422 | if (rcu_task_cb_adjust && ncbs <= rcu_task_collapse_lim) { | |
423 | raw_spin_lock_irqsave(&rtp->cbs_gbl_lock, flags); | |
424 | if (rtp->percpu_enqueue_lim > 1) { | |
2bcd18e0 | 425 | WRITE_ONCE(rtp->percpu_enqueue_shift, order_base_2(nr_cpu_ids)); |
fd796e41 PM |
426 | smp_store_release(&rtp->percpu_enqueue_lim, 1); |
427 | rtp->percpu_dequeue_gpseq = get_state_synchronize_rcu(); | |
428 | pr_info("Starting switch %s to CPU-0 callback queuing.\n", rtp->name); | |
429 | } | |
430 | raw_spin_unlock_irqrestore(&rtp->cbs_gbl_lock, flags); | |
431 | } | |
432 | if (rcu_task_cb_adjust && !ncbsnz && | |
433 | poll_state_synchronize_rcu(rtp->percpu_dequeue_gpseq)) { | |
434 | raw_spin_lock_irqsave(&rtp->cbs_gbl_lock, flags); | |
435 | if (rtp->percpu_enqueue_lim < rtp->percpu_dequeue_lim) { | |
436 | WRITE_ONCE(rtp->percpu_dequeue_lim, 1); | |
437 | pr_info("Completing switch %s to CPU-0 callback queuing.\n", rtp->name); | |
438 | } | |
4cf0585c PM |
439 | for (cpu = rtp->percpu_dequeue_lim; cpu < nr_cpu_ids; cpu++) { |
440 | struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu); | |
441 | ||
442 | WARN_ON_ONCE(rcu_segcblist_n_cbs(&rtpcp->cblist)); | |
443 | } | |
fd796e41 PM |
444 | raw_spin_unlock_irqrestore(&rtp->cbs_gbl_lock, flags); |
445 | } | |
446 | ||
4d1114c0 PM |
447 | return needgpcb; |
448 | } | |
449 | ||
57881863 | 450 | // Advance callbacks and invoke any that are ready. |
d363f833 | 451 | static void rcu_tasks_invoke_cbs(struct rcu_tasks *rtp, struct rcu_tasks_percpu *rtpcp) |
eacd6f04 | 452 | { |
57881863 | 453 | int cpu; |
d363f833 | 454 | int cpunext; |
eacd6f04 | 455 | unsigned long flags; |
9b073de1 | 456 | int len; |
9b073de1 | 457 | struct rcu_head *rhp; |
d363f833 PM |
458 | struct rcu_cblist rcl = RCU_CBLIST_INITIALIZER(rcl); |
459 | struct rcu_tasks_percpu *rtpcp_next; | |
460 | ||
461 | cpu = rtpcp->cpu; | |
462 | cpunext = cpu * 2 + 1; | |
2cee0789 | 463 | if (cpunext < smp_load_acquire(&rtp->percpu_dequeue_lim)) { |
d363f833 PM |
464 | rtpcp_next = per_cpu_ptr(rtp->rtpcpu, cpunext); |
465 | queue_work_on(cpunext, system_wq, &rtpcp_next->rtp_work); | |
466 | cpunext++; | |
2cee0789 | 467 | if (cpunext < smp_load_acquire(&rtp->percpu_dequeue_lim)) { |
d363f833 PM |
468 | rtpcp_next = per_cpu_ptr(rtp->rtpcpu, cpunext); |
469 | queue_work_on(cpunext, system_wq, &rtpcp_next->rtp_work); | |
57881863 | 470 | } |
57881863 | 471 | } |
d363f833 | 472 | |
ab2756ea | 473 | if (rcu_segcblist_empty(&rtpcp->cblist) || !cpu_possible(cpu)) |
d363f833 PM |
474 | return; |
475 | raw_spin_lock_irqsave_rcu_node(rtpcp, flags); | |
476 | rcu_segcblist_advance(&rtpcp->cblist, rcu_seq_current(&rtp->tasks_gp_seq)); | |
477 | rcu_segcblist_extract_done_cbs(&rtpcp->cblist, &rcl); | |
478 | raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags); | |
479 | len = rcl.len; | |
480 | for (rhp = rcu_cblist_dequeue(&rcl); rhp; rhp = rcu_cblist_dequeue(&rcl)) { | |
481 | local_bh_disable(); | |
482 | rhp->func(rhp); | |
483 | local_bh_enable(); | |
484 | cond_resched(); | |
485 | } | |
486 | raw_spin_lock_irqsave_rcu_node(rtpcp, flags); | |
487 | rcu_segcblist_add_len(&rtpcp->cblist, -len); | |
488 | (void)rcu_segcblist_accelerate(&rtpcp->cblist, rcu_seq_snap(&rtp->tasks_gp_seq)); | |
489 | raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags); | |
490 | } | |
491 | ||
492 | // Workqueue flood to advance callbacks and invoke any that are ready. | |
493 | static void rcu_tasks_invoke_cbs_wq(struct work_struct *wp) | |
494 | { | |
495 | struct rcu_tasks *rtp; | |
496 | struct rcu_tasks_percpu *rtpcp = container_of(wp, struct rcu_tasks_percpu, rtp_work); | |
497 | ||
498 | rtp = rtpcp->rtpp; | |
499 | rcu_tasks_invoke_cbs(rtp, rtpcp); | |
57881863 PM |
500 | } |
501 | ||
d96225fd | 502 | // Wait for one grace period. |
4a8cc433 | 503 | static void rcu_tasks_one_gp(struct rcu_tasks *rtp, bool midboot) |
57881863 PM |
504 | { |
505 | int needgpcb; | |
d96225fd PM |
506 | |
507 | mutex_lock(&rtp->tasks_gp_mutex); | |
d96225fd PM |
508 | |
509 | // If there were none, wait a bit and start over. | |
4a8cc433 PM |
510 | if (unlikely(midboot)) { |
511 | needgpcb = 0x2; | |
512 | } else { | |
513 | set_tasks_gp_state(rtp, RTGS_WAIT_CBS); | |
514 | rcuwait_wait_event(&rtp->cbs_wait, | |
515 | (needgpcb = rcu_tasks_need_gpcb(rtp)), | |
516 | TASK_IDLE); | |
517 | } | |
d96225fd PM |
518 | |
519 | if (needgpcb & 0x2) { | |
520 | // Wait for one grace period. | |
521 | set_tasks_gp_state(rtp, RTGS_WAIT_GP); | |
522 | rtp->gp_start = jiffies; | |
523 | rcu_seq_start(&rtp->tasks_gp_seq); | |
524 | rtp->gp_func(rtp); | |
525 | rcu_seq_end(&rtp->tasks_gp_seq); | |
526 | } | |
527 | ||
528 | // Invoke callbacks. | |
529 | set_tasks_gp_state(rtp, RTGS_INVOKE_CBS); | |
530 | rcu_tasks_invoke_cbs(rtp, per_cpu_ptr(rtp->rtpcpu, 0)); | |
531 | mutex_unlock(&rtp->tasks_gp_mutex); | |
532 | } | |
533 | ||
534 | // RCU-tasks kthread that detects grace periods and invokes callbacks. | |
535 | static int __noreturn rcu_tasks_kthread(void *arg) | |
536 | { | |
07e10515 | 537 | struct rcu_tasks *rtp = arg; |
eacd6f04 PM |
538 | |
539 | /* Run on housekeeping CPUs by default. Sysadm can move if desired. */ | |
04d4e665 | 540 | housekeeping_affine(current, HK_TYPE_RCU); |
07e10515 | 541 | WRITE_ONCE(rtp->kthread_ptr, current); // Let GPs start! |
eacd6f04 PM |
542 | |
543 | /* | |
544 | * Each pass through the following loop makes one check for | |
545 | * newly arrived callbacks, and, if there are some, waits for | |
546 | * one RCU-tasks grace period and then invokes the callbacks. | |
547 | * This loop is terminated by the system going down. ;-) | |
548 | */ | |
549 | for (;;) { | |
d96225fd PM |
550 | // Wait for one grace period and invoke any callbacks |
551 | // that are ready. | |
4a8cc433 | 552 | rcu_tasks_one_gp(rtp, false); |
57881863 | 553 | |
d96225fd | 554 | // Paranoid sleep to keep this from entering a tight loop. |
4fe192df | 555 | schedule_timeout_idle(rtp->gp_sleep); |
eacd6f04 PM |
556 | } |
557 | } | |
558 | ||
68cb4720 PM |
559 | // Wait for a grace period for the specified flavor of Tasks RCU. |
560 | static void synchronize_rcu_tasks_generic(struct rcu_tasks *rtp) | |
561 | { | |
562 | /* Complain if the scheduler has not started. */ | |
fcd53c8a | 563 | WARN_ONCE(rcu_scheduler_active == RCU_SCHEDULER_INACTIVE, |
68cb4720 PM |
564 | "synchronize_rcu_tasks called too soon"); |
565 | ||
4a8cc433 PM |
566 | // If the grace-period kthread is running, use it. |
567 | if (READ_ONCE(rtp->kthread_ptr)) { | |
568 | wait_rcu_gp(rtp->call_func); | |
569 | return; | |
570 | } | |
571 | rcu_tasks_one_gp(rtp, true); | |
68cb4720 PM |
572 | } |
573 | ||
1b04fa99 | 574 | /* Spawn RCU-tasks grace-period kthread. */ |
5873b8a9 | 575 | static void __init rcu_spawn_tasks_kthread_generic(struct rcu_tasks *rtp) |
eacd6f04 PM |
576 | { |
577 | struct task_struct *t; | |
578 | ||
c97d12a6 PM |
579 | t = kthread_run(rcu_tasks_kthread, rtp, "%s_kthread", rtp->kname); |
580 | if (WARN_ONCE(IS_ERR(t), "%s: Could not start %s grace-period kthread, OOM is now expected behavior\n", __func__, rtp->name)) | |
5873b8a9 | 581 | return; |
eacd6f04 | 582 | smp_mb(); /* Ensure others see full kthread. */ |
eacd6f04 | 583 | } |
eacd6f04 | 584 | |
eacd6f04 PM |
585 | #ifndef CONFIG_TINY_RCU |
586 | ||
587 | /* | |
588 | * Print any non-default Tasks RCU settings. | |
589 | */ | |
590 | static void __init rcu_tasks_bootup_oddness(void) | |
591 | { | |
d5f177d3 | 592 | #if defined(CONFIG_TASKS_RCU) || defined(CONFIG_TASKS_TRACE_RCU) |
f2539003 PM |
593 | int rtsimc; |
594 | ||
eacd6f04 PM |
595 | if (rcu_task_stall_timeout != RCU_TASK_STALL_TIMEOUT) |
596 | pr_info("\tTasks-RCU CPU stall warnings timeout set to %d (rcu_task_stall_timeout).\n", rcu_task_stall_timeout); | |
f2539003 PM |
597 | rtsimc = clamp(rcu_task_stall_info_mult, 1, 10); |
598 | if (rtsimc != rcu_task_stall_info_mult) { | |
599 | pr_info("\tTasks-RCU CPU stall info multiplier clamped to %d (rcu_task_stall_info_mult).\n", rtsimc); | |
600 | rcu_task_stall_info_mult = rtsimc; | |
601 | } | |
d5f177d3 PM |
602 | #endif /* #ifdef CONFIG_TASKS_RCU */ |
603 | #ifdef CONFIG_TASKS_RCU | |
604 | pr_info("\tTrampoline variant of Tasks RCU enabled.\n"); | |
eacd6f04 | 605 | #endif /* #ifdef CONFIG_TASKS_RCU */ |
c84aad76 PM |
606 | #ifdef CONFIG_TASKS_RUDE_RCU |
607 | pr_info("\tRude variant of Tasks RCU enabled.\n"); | |
608 | #endif /* #ifdef CONFIG_TASKS_RUDE_RCU */ | |
d5f177d3 PM |
609 | #ifdef CONFIG_TASKS_TRACE_RCU |
610 | pr_info("\tTracing variant of Tasks RCU enabled.\n"); | |
611 | #endif /* #ifdef CONFIG_TASKS_TRACE_RCU */ | |
eacd6f04 PM |
612 | } |
613 | ||
614 | #endif /* #ifndef CONFIG_TINY_RCU */ | |
5873b8a9 | 615 | |
8344496e | 616 | #ifndef CONFIG_TINY_RCU |
e21408ce PM |
617 | /* Dump out rcutorture-relevant state common to all RCU-tasks flavors. */ |
618 | static void show_rcu_tasks_generic_gp_kthread(struct rcu_tasks *rtp, char *s) | |
619 | { | |
10b3742f PM |
620 | int cpu; |
621 | bool havecbs = false; | |
622 | ||
623 | for_each_possible_cpu(cpu) { | |
624 | struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu); | |
625 | ||
626 | if (!data_race(rcu_segcblist_empty(&rtpcp->cblist))) { | |
627 | havecbs = true; | |
628 | break; | |
629 | } | |
630 | } | |
7e0669c3 | 631 | pr_info("%s: %s(%d) since %lu g:%lu i:%lu/%lu %c%c %s\n", |
e21408ce | 632 | rtp->kname, |
7e0669c3 | 633 | tasks_gp_state_getname(rtp), data_race(rtp->gp_state), |
af051ca4 | 634 | jiffies - data_race(rtp->gp_jiffies), |
b14fb4fb | 635 | data_race(rcu_seq_current(&rtp->tasks_gp_seq)), |
7e0669c3 | 636 | data_race(rtp->n_ipis_fails), data_race(rtp->n_ipis), |
e21408ce | 637 | ".k"[!!data_race(rtp->kthread_ptr)], |
10b3742f | 638 | ".C"[havecbs], |
e21408ce PM |
639 | s); |
640 | } | |
27c0f144 | 641 | #endif // #ifndef CONFIG_TINY_RCU |
e21408ce | 642 | |
25246fc8 PM |
643 | static void exit_tasks_rcu_finish_trace(struct task_struct *t); |
644 | ||
645 | #if defined(CONFIG_TASKS_RCU) || defined(CONFIG_TASKS_TRACE_RCU) | |
5873b8a9 | 646 | |
d01aa263 PM |
647 | //////////////////////////////////////////////////////////////////////// |
648 | // | |
649 | // Shared code between task-list-scanning variants of Tasks RCU. | |
650 | ||
651 | /* Wait for one RCU-tasks grace period. */ | |
652 | static void rcu_tasks_wait_gp(struct rcu_tasks *rtp) | |
653 | { | |
f2539003 | 654 | struct task_struct *g; |
d01aa263 | 655 | int fract; |
f2539003 PM |
656 | LIST_HEAD(holdouts); |
657 | unsigned long j; | |
658 | unsigned long lastinfo; | |
659 | unsigned long lastreport; | |
660 | bool reported = false; | |
661 | int rtsi; | |
662 | struct task_struct *t; | |
d01aa263 | 663 | |
af051ca4 | 664 | set_tasks_gp_state(rtp, RTGS_PRE_WAIT_GP); |
7460ade1 | 665 | rtp->pregp_func(&holdouts); |
d01aa263 PM |
666 | |
667 | /* | |
668 | * There were callbacks, so we need to wait for an RCU-tasks | |
669 | * grace period. Start off by scanning the task list for tasks | |
670 | * that are not already voluntarily blocked. Mark these tasks | |
671 | * and make a list of them in holdouts. | |
672 | */ | |
af051ca4 | 673 | set_tasks_gp_state(rtp, RTGS_SCAN_TASKLIST); |
1a4a8153 PM |
674 | if (rtp->pertask_func) { |
675 | rcu_read_lock(); | |
676 | for_each_process_thread(g, t) | |
677 | rtp->pertask_func(t, &holdouts); | |
678 | rcu_read_unlock(); | |
679 | } | |
d01aa263 | 680 | |
af051ca4 | 681 | set_tasks_gp_state(rtp, RTGS_POST_SCAN_TASKLIST); |
9796e1ae | 682 | rtp->postscan_func(&holdouts); |
d01aa263 PM |
683 | |
684 | /* | |
685 | * Each pass through the following loop scans the list of holdout | |
686 | * tasks, removing any that are no longer holdouts. When the list | |
687 | * is empty, we are done. | |
688 | */ | |
689 | lastreport = jiffies; | |
f2539003 PM |
690 | lastinfo = lastreport; |
691 | rtsi = READ_ONCE(rcu_task_stall_info); | |
d01aa263 | 692 | |
2393a613 PM |
693 | // Start off with initial wait and slowly back off to 1 HZ wait. |
694 | fract = rtp->init_fract; | |
d01aa263 | 695 | |
77dc1741 | 696 | while (!list_empty(&holdouts)) { |
777570d9 | 697 | ktime_t exp; |
d01aa263 PM |
698 | bool firstreport; |
699 | bool needreport; | |
700 | int rtst; | |
701 | ||
f2539003 | 702 | // Slowly back off waiting for holdouts |
af051ca4 | 703 | set_tasks_gp_state(rtp, RTGS_WAIT_SCAN_HOLDOUTS); |
bddf7122 PM |
704 | if (!IS_ENABLED(CONFIG_PREEMPT_RT)) { |
705 | schedule_timeout_idle(fract); | |
706 | } else { | |
707 | exp = jiffies_to_nsecs(fract); | |
708 | __set_current_state(TASK_IDLE); | |
709 | schedule_hrtimeout_range(&exp, jiffies_to_nsecs(HZ / 2), HRTIMER_MODE_REL_HARD); | |
710 | } | |
d01aa263 | 711 | |
75dc2da5 PM |
712 | if (fract < HZ) |
713 | fract++; | |
d01aa263 PM |
714 | |
715 | rtst = READ_ONCE(rcu_task_stall_timeout); | |
716 | needreport = rtst > 0 && time_after(jiffies, lastreport + rtst); | |
f2539003 | 717 | if (needreport) { |
d01aa263 | 718 | lastreport = jiffies; |
f2539003 PM |
719 | reported = true; |
720 | } | |
d01aa263 PM |
721 | firstreport = true; |
722 | WARN_ON(signal_pending(current)); | |
af051ca4 | 723 | set_tasks_gp_state(rtp, RTGS_SCAN_HOLDOUTS); |
d01aa263 | 724 | rtp->holdouts_func(&holdouts, needreport, &firstreport); |
f2539003 PM |
725 | |
726 | // Print pre-stall informational messages if needed. | |
727 | j = jiffies; | |
728 | if (rtsi > 0 && !reported && time_after(j, lastinfo + rtsi)) { | |
729 | lastinfo = j; | |
730 | rtsi = rtsi * rcu_task_stall_info_mult; | |
731 | pr_info("%s: %s grace period %lu is %lu jiffies old.\n", | |
732 | __func__, rtp->kname, rtp->tasks_gp_seq, j - rtp->gp_start); | |
733 | } | |
d01aa263 PM |
734 | } |
735 | ||
af051ca4 PM |
736 | set_tasks_gp_state(rtp, RTGS_POST_GP); |
737 | rtp->postgp_func(rtp); | |
d01aa263 PM |
738 | } |
739 | ||
25246fc8 PM |
740 | #endif /* #if defined(CONFIG_TASKS_RCU) || defined(CONFIG_TASKS_TRACE_RCU) */ |
741 | ||
742 | #ifdef CONFIG_TASKS_RCU | |
743 | ||
5873b8a9 PM |
744 | //////////////////////////////////////////////////////////////////////// |
745 | // | |
746 | // Simple variant of RCU whose quiescent states are voluntary context | |
8af9e2c7 | 747 | // switch, cond_resched_tasks_rcu_qs(), user-space execution, and idle. |
5873b8a9 PM |
748 | // As such, grace periods can take one good long time. There are no |
749 | // read-side primitives similar to rcu_read_lock() and rcu_read_unlock() | |
750 | // because this implementation is intended to get the system into a safe | |
751 | // state for some of the manipulations involved in tracing and the like. | |
752 | // Finally, this implementation does not support high call_rcu_tasks() | |
753 | // rates from multiple CPUs. If this is required, per-CPU callback lists | |
754 | // will be needed. | |
06a3ec92 PM |
755 | // |
756 | // The implementation uses rcu_tasks_wait_gp(), which relies on function | |
757 | // pointers in the rcu_tasks structure. The rcu_spawn_tasks_kthread() | |
758 | // function sets these function pointers up so that rcu_tasks_wait_gp() | |
759 | // invokes these functions in this order: | |
760 | // | |
761 | // rcu_tasks_pregp_step(): | |
762 | // Invokes synchronize_rcu() in order to wait for all in-flight | |
763 | // t->on_rq and t->nvcsw transitions to complete. This works because | |
764 | // all such transitions are carried out with interrupts disabled. | |
765 | // rcu_tasks_pertask(), invoked on every non-idle task: | |
766 | // For every runnable non-idle task other than the current one, use | |
767 | // get_task_struct() to pin down that task, snapshot that task's | |
768 | // number of voluntary context switches, and add that task to the | |
769 | // holdout list. | |
770 | // rcu_tasks_postscan(): | |
771 | // Invoke synchronize_srcu() to ensure that all tasks that were | |
772 | // in the process of exiting (and which thus might not know to | |
773 | // synchronize with this RCU Tasks grace period) have completed | |
774 | // exiting. | |
775 | // check_all_holdout_tasks(), repeatedly until holdout list is empty: | |
776 | // Scans the holdout list, attempting to identify a quiescent state | |
777 | // for each task on the list. If there is a quiescent state, the | |
778 | // corresponding task is removed from the holdout list. | |
779 | // rcu_tasks_postgp(): | |
780 | // Invokes synchronize_rcu() in order to ensure that all prior | |
781 | // t->on_rq and t->nvcsw transitions are seen by all CPUs and tasks | |
782 | // to have happened before the end of this RCU Tasks grace period. | |
783 | // Again, this works because all such transitions are carried out | |
784 | // with interrupts disabled. | |
785 | // | |
786 | // For each exiting task, the exit_tasks_rcu_start() and | |
787 | // exit_tasks_rcu_finish() functions begin and end, respectively, the SRCU | |
788 | // read-side critical sections waited for by rcu_tasks_postscan(). | |
789 | // | |
381a4f3b PM |
790 | // Pre-grace-period update-side code is ordered before the grace |
791 | // via the raw_spin_lock.*rcu_node(). Pre-grace-period read-side code | |
792 | // is ordered before the grace period via synchronize_rcu() call in | |
793 | // rcu_tasks_pregp_step() and by the scheduler's locks and interrupt | |
06a3ec92 | 794 | // disabling. |
5873b8a9 | 795 | |
e4fe5dd6 | 796 | /* Pre-grace-period preparation. */ |
7460ade1 | 797 | static void rcu_tasks_pregp_step(struct list_head *hop) |
e4fe5dd6 PM |
798 | { |
799 | /* | |
800 | * Wait for all pre-existing t->on_rq and t->nvcsw transitions | |
801 | * to complete. Invoking synchronize_rcu() suffices because all | |
802 | * these transitions occur with interrupts disabled. Without this | |
803 | * synchronize_rcu(), a read-side critical section that started | |
804 | * before the grace period might be incorrectly seen as having | |
805 | * started after the grace period. | |
806 | * | |
807 | * This synchronize_rcu() also dispenses with the need for a | |
808 | * memory barrier on the first store to t->rcu_tasks_holdout, | |
809 | * as it forces the store to happen after the beginning of the | |
810 | * grace period. | |
811 | */ | |
812 | synchronize_rcu(); | |
813 | } | |
814 | ||
815 | /* Per-task initial processing. */ | |
816 | static void rcu_tasks_pertask(struct task_struct *t, struct list_head *hop) | |
817 | { | |
818 | if (t != current && READ_ONCE(t->on_rq) && !is_idle_task(t)) { | |
819 | get_task_struct(t); | |
820 | t->rcu_tasks_nvcsw = READ_ONCE(t->nvcsw); | |
821 | WRITE_ONCE(t->rcu_tasks_holdout, true); | |
822 | list_add(&t->rcu_tasks_holdout_list, hop); | |
823 | } | |
824 | } | |
825 | ||
826 | /* Processing between scanning taskslist and draining the holdout list. */ | |
04a3c5aa | 827 | static void rcu_tasks_postscan(struct list_head *hop) |
e4fe5dd6 PM |
828 | { |
829 | /* | |
830 | * Wait for tasks that are in the process of exiting. This | |
831 | * does only part of the job, ensuring that all tasks that were | |
832 | * previously exiting reach the point where they have disabled | |
833 | * preemption, allowing the later synchronize_rcu() to finish | |
834 | * the job. | |
835 | */ | |
836 | synchronize_srcu(&tasks_rcu_exit_srcu); | |
837 | } | |
838 | ||
5873b8a9 PM |
839 | /* See if tasks are still holding out, complain if so. */ |
840 | static void check_holdout_task(struct task_struct *t, | |
841 | bool needreport, bool *firstreport) | |
842 | { | |
843 | int cpu; | |
844 | ||
845 | if (!READ_ONCE(t->rcu_tasks_holdout) || | |
846 | t->rcu_tasks_nvcsw != READ_ONCE(t->nvcsw) || | |
847 | !READ_ONCE(t->on_rq) || | |
848 | (IS_ENABLED(CONFIG_NO_HZ_FULL) && | |
849 | !is_idle_task(t) && t->rcu_tasks_idle_cpu >= 0)) { | |
850 | WRITE_ONCE(t->rcu_tasks_holdout, false); | |
851 | list_del_init(&t->rcu_tasks_holdout_list); | |
852 | put_task_struct(t); | |
853 | return; | |
854 | } | |
855 | rcu_request_urgent_qs_task(t); | |
856 | if (!needreport) | |
857 | return; | |
858 | if (*firstreport) { | |
859 | pr_err("INFO: rcu_tasks detected stalls on tasks:\n"); | |
860 | *firstreport = false; | |
861 | } | |
862 | cpu = task_cpu(t); | |
863 | pr_alert("%p: %c%c nvcsw: %lu/%lu holdout: %d idle_cpu: %d/%d\n", | |
864 | t, ".I"[is_idle_task(t)], | |
865 | "N."[cpu < 0 || !tick_nohz_full_cpu(cpu)], | |
866 | t->rcu_tasks_nvcsw, t->nvcsw, t->rcu_tasks_holdout, | |
867 | t->rcu_tasks_idle_cpu, cpu); | |
868 | sched_show_task(t); | |
869 | } | |
870 | ||
e4fe5dd6 PM |
871 | /* Scan the holdout lists for tasks no longer holding out. */ |
872 | static void check_all_holdout_tasks(struct list_head *hop, | |
873 | bool needreport, bool *firstreport) | |
874 | { | |
875 | struct task_struct *t, *t1; | |
876 | ||
877 | list_for_each_entry_safe(t, t1, hop, rcu_tasks_holdout_list) { | |
878 | check_holdout_task(t, needreport, firstreport); | |
879 | cond_resched(); | |
880 | } | |
881 | } | |
882 | ||
883 | /* Finish off the Tasks-RCU grace period. */ | |
af051ca4 | 884 | static void rcu_tasks_postgp(struct rcu_tasks *rtp) |
e4fe5dd6 PM |
885 | { |
886 | /* | |
887 | * Because ->on_rq and ->nvcsw are not guaranteed to have a full | |
888 | * memory barriers prior to them in the schedule() path, memory | |
889 | * reordering on other CPUs could cause their RCU-tasks read-side | |
890 | * critical sections to extend past the end of the grace period. | |
891 | * However, because these ->nvcsw updates are carried out with | |
892 | * interrupts disabled, we can use synchronize_rcu() to force the | |
893 | * needed ordering on all such CPUs. | |
894 | * | |
895 | * This synchronize_rcu() also confines all ->rcu_tasks_holdout | |
896 | * accesses to be within the grace period, avoiding the need for | |
897 | * memory barriers for ->rcu_tasks_holdout accesses. | |
898 | * | |
899 | * In addition, this synchronize_rcu() waits for exiting tasks | |
900 | * to complete their final preempt_disable() region of execution, | |
901 | * cleaning up after the synchronize_srcu() above. | |
902 | */ | |
903 | synchronize_rcu(); | |
904 | } | |
905 | ||
5873b8a9 | 906 | void call_rcu_tasks(struct rcu_head *rhp, rcu_callback_t func); |
c97d12a6 | 907 | DEFINE_RCU_TASKS(rcu_tasks, rcu_tasks_wait_gp, call_rcu_tasks, "RCU Tasks"); |
5873b8a9 PM |
908 | |
909 | /** | |
910 | * call_rcu_tasks() - Queue an RCU for invocation task-based grace period | |
911 | * @rhp: structure to be used for queueing the RCU updates. | |
912 | * @func: actual callback function to be invoked after the grace period | |
913 | * | |
914 | * The callback function will be invoked some time after a full grace | |
915 | * period elapses, in other words after all currently executing RCU | |
916 | * read-side critical sections have completed. call_rcu_tasks() assumes | |
917 | * that the read-side critical sections end at a voluntary context | |
8af9e2c7 | 918 | * switch (not a preemption!), cond_resched_tasks_rcu_qs(), entry into idle, |
5873b8a9 PM |
919 | * or transition to usermode execution. As such, there are no read-side |
920 | * primitives analogous to rcu_read_lock() and rcu_read_unlock() because | |
921 | * this primitive is intended to determine that all tasks have passed | |
a616aec9 | 922 | * through a safe state, not so much for data-structure synchronization. |
5873b8a9 PM |
923 | * |
924 | * See the description of call_rcu() for more detailed information on | |
925 | * memory ordering guarantees. | |
926 | */ | |
927 | void call_rcu_tasks(struct rcu_head *rhp, rcu_callback_t func) | |
928 | { | |
929 | call_rcu_tasks_generic(rhp, func, &rcu_tasks); | |
930 | } | |
931 | EXPORT_SYMBOL_GPL(call_rcu_tasks); | |
932 | ||
933 | /** | |
934 | * synchronize_rcu_tasks - wait until an rcu-tasks grace period has elapsed. | |
935 | * | |
936 | * Control will return to the caller some time after a full rcu-tasks | |
937 | * grace period has elapsed, in other words after all currently | |
938 | * executing rcu-tasks read-side critical sections have elapsed. These | |
939 | * read-side critical sections are delimited by calls to schedule(), | |
940 | * cond_resched_tasks_rcu_qs(), idle execution, userspace execution, calls | |
941 | * to synchronize_rcu_tasks(), and (in theory, anyway) cond_resched(). | |
942 | * | |
943 | * This is a very specialized primitive, intended only for a few uses in | |
944 | * tracing and other situations requiring manipulation of function | |
945 | * preambles and profiling hooks. The synchronize_rcu_tasks() function | |
946 | * is not (yet) intended for heavy use from multiple CPUs. | |
947 | * | |
948 | * See the description of synchronize_rcu() for more detailed information | |
949 | * on memory ordering guarantees. | |
950 | */ | |
951 | void synchronize_rcu_tasks(void) | |
952 | { | |
953 | synchronize_rcu_tasks_generic(&rcu_tasks); | |
954 | } | |
955 | EXPORT_SYMBOL_GPL(synchronize_rcu_tasks); | |
956 | ||
957 | /** | |
958 | * rcu_barrier_tasks - Wait for in-flight call_rcu_tasks() callbacks. | |
959 | * | |
960 | * Although the current implementation is guaranteed to wait, it is not | |
961 | * obligated to, for example, if there are no pending callbacks. | |
962 | */ | |
963 | void rcu_barrier_tasks(void) | |
964 | { | |
ce9b1c66 | 965 | rcu_barrier_tasks_generic(&rcu_tasks); |
5873b8a9 PM |
966 | } |
967 | EXPORT_SYMBOL_GPL(rcu_barrier_tasks); | |
968 | ||
969 | static int __init rcu_spawn_tasks_kthread(void) | |
970 | { | |
cafafd67 | 971 | cblist_init_generic(&rcu_tasks); |
4fe192df | 972 | rcu_tasks.gp_sleep = HZ / 10; |
75dc2da5 | 973 | rcu_tasks.init_fract = HZ / 10; |
e4fe5dd6 PM |
974 | rcu_tasks.pregp_func = rcu_tasks_pregp_step; |
975 | rcu_tasks.pertask_func = rcu_tasks_pertask; | |
976 | rcu_tasks.postscan_func = rcu_tasks_postscan; | |
977 | rcu_tasks.holdouts_func = check_all_holdout_tasks; | |
978 | rcu_tasks.postgp_func = rcu_tasks_postgp; | |
5873b8a9 PM |
979 | rcu_spawn_tasks_kthread_generic(&rcu_tasks); |
980 | return 0; | |
981 | } | |
5873b8a9 | 982 | |
27c0f144 PM |
983 | #if !defined(CONFIG_TINY_RCU) |
984 | void show_rcu_tasks_classic_gp_kthread(void) | |
e21408ce PM |
985 | { |
986 | show_rcu_tasks_generic_gp_kthread(&rcu_tasks, ""); | |
987 | } | |
27c0f144 PM |
988 | EXPORT_SYMBOL_GPL(show_rcu_tasks_classic_gp_kthread); |
989 | #endif // !defined(CONFIG_TINY_RCU) | |
e21408ce | 990 | |
25246fc8 PM |
991 | /* Do the srcu_read_lock() for the above synchronize_srcu(). */ |
992 | void exit_tasks_rcu_start(void) __acquires(&tasks_rcu_exit_srcu) | |
993 | { | |
994 | preempt_disable(); | |
995 | current->rcu_tasks_idx = __srcu_read_lock(&tasks_rcu_exit_srcu); | |
996 | preempt_enable(); | |
997 | } | |
998 | ||
999 | /* Do the srcu_read_unlock() for the above synchronize_srcu(). */ | |
1000 | void exit_tasks_rcu_finish(void) __releases(&tasks_rcu_exit_srcu) | |
1001 | { | |
1002 | struct task_struct *t = current; | |
1003 | ||
1004 | preempt_disable(); | |
1005 | __srcu_read_unlock(&tasks_rcu_exit_srcu, t->rcu_tasks_idx); | |
1006 | preempt_enable(); | |
1007 | exit_tasks_rcu_finish_trace(t); | |
1008 | } | |
1009 | ||
e21408ce | 1010 | #else /* #ifdef CONFIG_TASKS_RCU */ |
25246fc8 PM |
1011 | void exit_tasks_rcu_start(void) { } |
1012 | void exit_tasks_rcu_finish(void) { exit_tasks_rcu_finish_trace(current); } | |
e21408ce | 1013 | #endif /* #else #ifdef CONFIG_TASKS_RCU */ |
c84aad76 PM |
1014 | |
1015 | #ifdef CONFIG_TASKS_RUDE_RCU | |
1016 | ||
1017 | //////////////////////////////////////////////////////////////////////// | |
1018 | // | |
1019 | // "Rude" variant of Tasks RCU, inspired by Steve Rostedt's trick of | |
1020 | // passing an empty function to schedule_on_each_cpu(). This approach | |
e4be1f44 PM |
1021 | // provides an asynchronous call_rcu_tasks_rude() API and batching of |
1022 | // concurrent calls to the synchronous synchronize_rcu_tasks_rude() API. | |
9fc98e31 PM |
1023 | // This invokes schedule_on_each_cpu() in order to send IPIs far and wide |
1024 | // and induces otherwise unnecessary context switches on all online CPUs, | |
1025 | // whether idle or not. | |
1026 | // | |
1027 | // Callback handling is provided by the rcu_tasks_kthread() function. | |
1028 | // | |
1029 | // Ordering is provided by the scheduler's context-switch code. | |
c84aad76 PM |
1030 | |
1031 | // Empty function to allow workqueues to force a context switch. | |
1032 | static void rcu_tasks_be_rude(struct work_struct *work) | |
1033 | { | |
1034 | } | |
1035 | ||
1036 | // Wait for one rude RCU-tasks grace period. | |
1037 | static void rcu_tasks_rude_wait_gp(struct rcu_tasks *rtp) | |
1038 | { | |
f75fd4b9 PS |
1039 | if (num_online_cpus() <= 1) |
1040 | return; // Fastpath for only one CPU. | |
1041 | ||
238dbce3 | 1042 | rtp->n_ipis += cpumask_weight(cpu_online_mask); |
c84aad76 PM |
1043 | schedule_on_each_cpu(rcu_tasks_be_rude); |
1044 | } | |
1045 | ||
1046 | void call_rcu_tasks_rude(struct rcu_head *rhp, rcu_callback_t func); | |
c97d12a6 PM |
1047 | DEFINE_RCU_TASKS(rcu_tasks_rude, rcu_tasks_rude_wait_gp, call_rcu_tasks_rude, |
1048 | "RCU Tasks Rude"); | |
c84aad76 PM |
1049 | |
1050 | /** | |
1051 | * call_rcu_tasks_rude() - Queue a callback rude task-based grace period | |
1052 | * @rhp: structure to be used for queueing the RCU updates. | |
1053 | * @func: actual callback function to be invoked after the grace period | |
1054 | * | |
1055 | * The callback function will be invoked some time after a full grace | |
1056 | * period elapses, in other words after all currently executing RCU | |
1057 | * read-side critical sections have completed. call_rcu_tasks_rude() | |
1058 | * assumes that the read-side critical sections end at context switch, | |
8af9e2c7 | 1059 | * cond_resched_tasks_rcu_qs(), or transition to usermode execution (as |
a6517e9c NU |
1060 | * usermode execution is schedulable). As such, there are no read-side |
1061 | * primitives analogous to rcu_read_lock() and rcu_read_unlock() because | |
1062 | * this primitive is intended to determine that all tasks have passed | |
1063 | * through a safe state, not so much for data-structure synchronization. | |
c84aad76 PM |
1064 | * |
1065 | * See the description of call_rcu() for more detailed information on | |
1066 | * memory ordering guarantees. | |
1067 | */ | |
1068 | void call_rcu_tasks_rude(struct rcu_head *rhp, rcu_callback_t func) | |
1069 | { | |
1070 | call_rcu_tasks_generic(rhp, func, &rcu_tasks_rude); | |
1071 | } | |
1072 | EXPORT_SYMBOL_GPL(call_rcu_tasks_rude); | |
1073 | ||
1074 | /** | |
1075 | * synchronize_rcu_tasks_rude - wait for a rude rcu-tasks grace period | |
1076 | * | |
1077 | * Control will return to the caller some time after a rude rcu-tasks | |
1078 | * grace period has elapsed, in other words after all currently | |
1079 | * executing rcu-tasks read-side critical sections have elapsed. These | |
1080 | * read-side critical sections are delimited by calls to schedule(), | |
a6517e9c NU |
1081 | * cond_resched_tasks_rcu_qs(), userspace execution (which is a schedulable |
1082 | * context), and (in theory, anyway) cond_resched(). | |
c84aad76 PM |
1083 | * |
1084 | * This is a very specialized primitive, intended only for a few uses in | |
1085 | * tracing and other situations requiring manipulation of function preambles | |
1086 | * and profiling hooks. The synchronize_rcu_tasks_rude() function is not | |
1087 | * (yet) intended for heavy use from multiple CPUs. | |
1088 | * | |
1089 | * See the description of synchronize_rcu() for more detailed information | |
1090 | * on memory ordering guarantees. | |
1091 | */ | |
1092 | void synchronize_rcu_tasks_rude(void) | |
1093 | { | |
1094 | synchronize_rcu_tasks_generic(&rcu_tasks_rude); | |
1095 | } | |
1096 | EXPORT_SYMBOL_GPL(synchronize_rcu_tasks_rude); | |
1097 | ||
1098 | /** | |
1099 | * rcu_barrier_tasks_rude - Wait for in-flight call_rcu_tasks_rude() callbacks. | |
1100 | * | |
1101 | * Although the current implementation is guaranteed to wait, it is not | |
1102 | * obligated to, for example, if there are no pending callbacks. | |
1103 | */ | |
1104 | void rcu_barrier_tasks_rude(void) | |
1105 | { | |
ce9b1c66 | 1106 | rcu_barrier_tasks_generic(&rcu_tasks_rude); |
c84aad76 PM |
1107 | } |
1108 | EXPORT_SYMBOL_GPL(rcu_barrier_tasks_rude); | |
1109 | ||
1110 | static int __init rcu_spawn_tasks_rude_kthread(void) | |
1111 | { | |
cafafd67 | 1112 | cblist_init_generic(&rcu_tasks_rude); |
4fe192df | 1113 | rcu_tasks_rude.gp_sleep = HZ / 10; |
c84aad76 PM |
1114 | rcu_spawn_tasks_kthread_generic(&rcu_tasks_rude); |
1115 | return 0; | |
1116 | } | |
c84aad76 | 1117 | |
27c0f144 PM |
1118 | #if !defined(CONFIG_TINY_RCU) |
1119 | void show_rcu_tasks_rude_gp_kthread(void) | |
e21408ce PM |
1120 | { |
1121 | show_rcu_tasks_generic_gp_kthread(&rcu_tasks_rude, ""); | |
1122 | } | |
27c0f144 PM |
1123 | EXPORT_SYMBOL_GPL(show_rcu_tasks_rude_gp_kthread); |
1124 | #endif // !defined(CONFIG_TINY_RCU) | |
1125 | #endif /* #ifdef CONFIG_TASKS_RUDE_RCU */ | |
d5f177d3 PM |
1126 | |
1127 | //////////////////////////////////////////////////////////////////////// | |
1128 | // | |
1129 | // Tracing variant of Tasks RCU. This variant is designed to be used | |
1130 | // to protect tracing hooks, including those of BPF. This variant | |
1131 | // therefore: | |
1132 | // | |
1133 | // 1. Has explicit read-side markers to allow finite grace periods | |
1134 | // in the face of in-kernel loops for PREEMPT=n builds. | |
1135 | // | |
1136 | // 2. Protects code in the idle loop, exception entry/exit, and | |
1137 | // CPU-hotplug code paths, similar to the capabilities of SRCU. | |
1138 | // | |
c4f113ac | 1139 | // 3. Avoids expensive read-side instructions, having overhead similar |
d5f177d3 PM |
1140 | // to that of Preemptible RCU. |
1141 | // | |
eea3423b PM |
1142 | // There are of course downsides. For example, the grace-period code |
1143 | // can send IPIs to CPUs, even when those CPUs are in the idle loop or | |
1144 | // in nohz_full userspace. If needed, these downsides can be at least | |
1145 | // partially remedied. | |
d5f177d3 PM |
1146 | // |
1147 | // Perhaps most important, this variant of RCU does not affect the vanilla | |
1148 | // flavors, rcu_preempt and rcu_sched. The fact that RCU Tasks Trace | |
1149 | // readers can operate from idle, offline, and exception entry/exit in no | |
1150 | // way allows rcu_preempt and rcu_sched readers to also do so. | |
a434dd10 PM |
1151 | // |
1152 | // The implementation uses rcu_tasks_wait_gp(), which relies on function | |
1153 | // pointers in the rcu_tasks structure. The rcu_spawn_tasks_trace_kthread() | |
1154 | // function sets these function pointers up so that rcu_tasks_wait_gp() | |
1155 | // invokes these functions in this order: | |
1156 | // | |
1157 | // rcu_tasks_trace_pregp_step(): | |
eea3423b PM |
1158 | // Disables CPU hotplug, adds all currently executing tasks to the |
1159 | // holdout list, then checks the state of all tasks that blocked | |
1160 | // or were preempted within their current RCU Tasks Trace read-side | |
1161 | // critical section, adding them to the holdout list if appropriate. | |
1162 | // Finally, this function re-enables CPU hotplug. | |
1163 | // The ->pertask_func() pointer is NULL, so there is no per-task processing. | |
a434dd10 | 1164 | // rcu_tasks_trace_postscan(): |
eea3423b PM |
1165 | // Invokes synchronize_rcu() to wait for late-stage exiting tasks |
1166 | // to finish exiting. | |
a434dd10 PM |
1167 | // check_all_holdout_tasks_trace(), repeatedly until holdout list is empty: |
1168 | // Scans the holdout list, attempting to identify a quiescent state | |
1169 | // for each task on the list. If there is a quiescent state, the | |
eea3423b PM |
1170 | // corresponding task is removed from the holdout list. Once this |
1171 | // list is empty, the grace period has completed. | |
a434dd10 | 1172 | // rcu_tasks_trace_postgp(): |
eea3423b | 1173 | // Provides the needed full memory barrier and does debug checks. |
a434dd10 PM |
1174 | // |
1175 | // The exit_tasks_rcu_finish_trace() synchronizes with exiting tasks. | |
1176 | // | |
eea3423b PM |
1177 | // Pre-grace-period update-side code is ordered before the grace period |
1178 | // via the ->cbs_lock and barriers in rcu_tasks_kthread(). Pre-grace-period | |
1179 | // read-side code is ordered before the grace period by atomic operations | |
1180 | // on .b.need_qs flag of each task involved in this process, or by scheduler | |
1181 | // context-switch ordering (for locked-down non-running readers). | |
d5f177d3 PM |
1182 | |
1183 | // The lockdep state must be outside of #ifdef to be useful. | |
1184 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | |
1185 | static struct lock_class_key rcu_lock_trace_key; | |
1186 | struct lockdep_map rcu_trace_lock_map = | |
1187 | STATIC_LOCKDEP_MAP_INIT("rcu_read_lock_trace", &rcu_lock_trace_key); | |
1188 | EXPORT_SYMBOL_GPL(rcu_trace_lock_map); | |
1189 | #endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ | |
1190 | ||
1191 | #ifdef CONFIG_TASKS_TRACE_RCU | |
1192 | ||
d5f177d3 PM |
1193 | // Record outstanding IPIs to each CPU. No point in sending two... |
1194 | static DEFINE_PER_CPU(bool, trc_ipi_to_cpu); | |
1195 | ||
40471509 PM |
1196 | // The number of detections of task quiescent state relying on |
1197 | // heavyweight readers executing explicit memory barriers. | |
6731da9e PM |
1198 | static unsigned long n_heavy_reader_attempts; |
1199 | static unsigned long n_heavy_reader_updates; | |
1200 | static unsigned long n_heavy_reader_ofl_updates; | |
ffcc21a3 | 1201 | static unsigned long n_trc_holdouts; |
40471509 | 1202 | |
b0afa0f0 PM |
1203 | void call_rcu_tasks_trace(struct rcu_head *rhp, rcu_callback_t func); |
1204 | DEFINE_RCU_TASKS(rcu_tasks_trace, rcu_tasks_wait_gp, call_rcu_tasks_trace, | |
1205 | "RCU Tasks Trace"); | |
1206 | ||
3847b645 PM |
1207 | /* Load from ->trc_reader_special.b.need_qs with proper ordering. */ |
1208 | static u8 rcu_ld_need_qs(struct task_struct *t) | |
1209 | { | |
1210 | smp_mb(); // Enforce full grace-period ordering. | |
1211 | return smp_load_acquire(&t->trc_reader_special.b.need_qs); | |
1212 | } | |
1213 | ||
1214 | /* Store to ->trc_reader_special.b.need_qs with proper ordering. */ | |
1215 | static void rcu_st_need_qs(struct task_struct *t, u8 v) | |
1216 | { | |
1217 | smp_store_release(&t->trc_reader_special.b.need_qs, v); | |
1218 | smp_mb(); // Enforce full grace-period ordering. | |
1219 | } | |
1220 | ||
1221 | /* | |
1222 | * Do a cmpxchg() on ->trc_reader_special.b.need_qs, allowing for | |
1223 | * the four-byte operand-size restriction of some platforms. | |
1224 | * Returns the old value, which is often ignored. | |
1225 | */ | |
1226 | u8 rcu_trc_cmpxchg_need_qs(struct task_struct *t, u8 old, u8 new) | |
1227 | { | |
1228 | union rcu_special ret; | |
1229 | union rcu_special trs_old = READ_ONCE(t->trc_reader_special); | |
1230 | union rcu_special trs_new = trs_old; | |
1231 | ||
1232 | if (trs_old.b.need_qs != old) | |
1233 | return trs_old.b.need_qs; | |
1234 | trs_new.b.need_qs = new; | |
1235 | ret.s = cmpxchg(&t->trc_reader_special.s, trs_old.s, trs_new.s); | |
1236 | return ret.b.need_qs; | |
1237 | } | |
1238 | EXPORT_SYMBOL_GPL(rcu_trc_cmpxchg_need_qs); | |
1239 | ||
eea3423b PM |
1240 | /* |
1241 | * If we are the last reader, signal the grace-period kthread. | |
1242 | * Also remove from the per-CPU list of blocked tasks. | |
1243 | */ | |
a5c071cc | 1244 | void rcu_read_unlock_trace_special(struct task_struct *t) |
d5f177d3 | 1245 | { |
0bcb3868 PM |
1246 | unsigned long flags; |
1247 | struct rcu_tasks_percpu *rtpcp; | |
1248 | union rcu_special trs; | |
1249 | ||
1250 | // Open-coded full-word version of rcu_ld_need_qs(). | |
1251 | smp_mb(); // Enforce full grace-period ordering. | |
1252 | trs = smp_load_acquire(&t->trc_reader_special); | |
276c4104 | 1253 | |
3847b645 | 1254 | if (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB) && t->trc_reader_special.b.need_mb) |
276c4104 PM |
1255 | smp_mb(); // Pairs with update-side barriers. |
1256 | // Update .need_qs before ->trc_reader_nesting for irq/NMI handlers. | |
0bcb3868 | 1257 | if (trs.b.need_qs == (TRC_NEED_QS_CHECKED | TRC_NEED_QS)) { |
3847b645 PM |
1258 | u8 result = rcu_trc_cmpxchg_need_qs(t, TRC_NEED_QS_CHECKED | TRC_NEED_QS, |
1259 | TRC_NEED_QS_CHECKED); | |
1260 | ||
0bcb3868 PM |
1261 | WARN_ONCE(result != trs.b.need_qs, "%s: result = %d", __func__, result); |
1262 | } | |
1263 | if (trs.b.blocked) { | |
1264 | rtpcp = per_cpu_ptr(rcu_tasks_trace.rtpcpu, t->trc_blkd_cpu); | |
1265 | raw_spin_lock_irqsave_rcu_node(rtpcp, flags); | |
1266 | list_del_init(&t->trc_blkd_node); | |
1267 | WRITE_ONCE(t->trc_reader_special.b.blocked, false); | |
1268 | raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags); | |
3847b645 | 1269 | } |
a5c071cc | 1270 | WRITE_ONCE(t->trc_reader_nesting, 0); |
d5f177d3 PM |
1271 | } |
1272 | EXPORT_SYMBOL_GPL(rcu_read_unlock_trace_special); | |
1273 | ||
0356d4e6 PM |
1274 | /* Add a newly blocked reader task to its CPU's list. */ |
1275 | void rcu_tasks_trace_qs_blkd(struct task_struct *t) | |
1276 | { | |
1277 | unsigned long flags; | |
1278 | struct rcu_tasks_percpu *rtpcp; | |
1279 | ||
1280 | local_irq_save(flags); | |
1281 | rtpcp = this_cpu_ptr(rcu_tasks_trace.rtpcpu); | |
1282 | raw_spin_lock_rcu_node(rtpcp); // irqs already disabled | |
1283 | t->trc_blkd_cpu = smp_processor_id(); | |
1284 | if (!rtpcp->rtp_blkd_tasks.next) | |
1285 | INIT_LIST_HEAD(&rtpcp->rtp_blkd_tasks); | |
1286 | list_add(&t->trc_blkd_node, &rtpcp->rtp_blkd_tasks); | |
0bcb3868 | 1287 | WRITE_ONCE(t->trc_reader_special.b.blocked, true); |
0356d4e6 PM |
1288 | raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags); |
1289 | } | |
1290 | EXPORT_SYMBOL_GPL(rcu_tasks_trace_qs_blkd); | |
1291 | ||
d5f177d3 PM |
1292 | /* Add a task to the holdout list, if it is not already on the list. */ |
1293 | static void trc_add_holdout(struct task_struct *t, struct list_head *bhp) | |
1294 | { | |
1295 | if (list_empty(&t->trc_holdout_list)) { | |
1296 | get_task_struct(t); | |
1297 | list_add(&t->trc_holdout_list, bhp); | |
ffcc21a3 | 1298 | n_trc_holdouts++; |
d5f177d3 PM |
1299 | } |
1300 | } | |
1301 | ||
1302 | /* Remove a task from the holdout list, if it is in fact present. */ | |
1303 | static void trc_del_holdout(struct task_struct *t) | |
1304 | { | |
1305 | if (!list_empty(&t->trc_holdout_list)) { | |
1306 | list_del_init(&t->trc_holdout_list); | |
1307 | put_task_struct(t); | |
ffcc21a3 | 1308 | n_trc_holdouts--; |
d5f177d3 PM |
1309 | } |
1310 | } | |
1311 | ||
1312 | /* IPI handler to check task state. */ | |
1313 | static void trc_read_check_handler(void *t_in) | |
1314 | { | |
9ff86b4c | 1315 | int nesting; |
d5f177d3 PM |
1316 | struct task_struct *t = current; |
1317 | struct task_struct *texp = t_in; | |
1318 | ||
1319 | // If the task is no longer running on this CPU, leave. | |
3847b645 | 1320 | if (unlikely(texp != t)) |
d5f177d3 | 1321 | goto reset_ipi; // Already on holdout list, so will check later. |
d5f177d3 PM |
1322 | |
1323 | // If the task is not in a read-side critical section, and | |
1324 | // if this is the last reader, awaken the grace-period kthread. | |
9ff86b4c PM |
1325 | nesting = READ_ONCE(t->trc_reader_nesting); |
1326 | if (likely(!nesting)) { | |
3847b645 | 1327 | rcu_trc_cmpxchg_need_qs(t, 0, TRC_NEED_QS_CHECKED); |
d5f177d3 PM |
1328 | goto reset_ipi; |
1329 | } | |
ba3a86e4 | 1330 | // If we are racing with an rcu_read_unlock_trace(), try again later. |
9ff86b4c | 1331 | if (unlikely(nesting < 0)) |
ba3a86e4 | 1332 | goto reset_ipi; |
d5f177d3 | 1333 | |
eea3423b PM |
1334 | // Get here if the task is in a read-side critical section. |
1335 | // Set its state so that it will update state for the grace-period | |
1336 | // kthread upon exit from that critical section. | |
55061126 | 1337 | rcu_trc_cmpxchg_need_qs(t, 0, TRC_NEED_QS | TRC_NEED_QS_CHECKED); |
d5f177d3 PM |
1338 | |
1339 | reset_ipi: | |
1340 | // Allow future IPIs to be sent on CPU and for task. | |
1341 | // Also order this IPI handler against any later manipulations of | |
1342 | // the intended task. | |
8211e922 | 1343 | smp_store_release(per_cpu_ptr(&trc_ipi_to_cpu, smp_processor_id()), false); // ^^^ |
d5f177d3 PM |
1344 | smp_store_release(&texp->trc_ipi_to_cpu, -1); // ^^^ |
1345 | } | |
1346 | ||
1347 | /* Callback function for scheduler to check locked-down task. */ | |
3847b645 | 1348 | static int trc_inspect_reader(struct task_struct *t, void *bhp_in) |
d5f177d3 | 1349 | { |
3847b645 | 1350 | struct list_head *bhp = bhp_in; |
7d0c9c50 | 1351 | int cpu = task_cpu(t); |
18f08e75 | 1352 | int nesting; |
7e3b70e0 | 1353 | bool ofl = cpu_is_offline(cpu); |
7d0c9c50 | 1354 | |
897ba84d | 1355 | if (task_curr(t) && !ofl) { |
7d0c9c50 | 1356 | // If no chance of heavyweight readers, do it the hard way. |
897ba84d | 1357 | if (!IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB)) |
9b3c4ab3 | 1358 | return -EINVAL; |
7d0c9c50 PM |
1359 | |
1360 | // If heavyweight readers are enabled on the remote task, | |
1361 | // we can inspect its state despite its currently running. | |
1362 | // However, we cannot safely change its state. | |
40471509 | 1363 | n_heavy_reader_attempts++; |
897ba84d PM |
1364 | // Check for "running" idle tasks on offline CPUs. |
1365 | if (!rcu_dynticks_zero_in_eqs(cpu, &t->trc_reader_nesting)) | |
9b3c4ab3 | 1366 | return -EINVAL; // No quiescent state, do it the hard way. |
40471509 | 1367 | n_heavy_reader_updates++; |
18f08e75 | 1368 | nesting = 0; |
7d0c9c50 | 1369 | } else { |
bdb0cca0 | 1370 | // The task is not running, so C-language access is safe. |
18f08e75 | 1371 | nesting = t->trc_reader_nesting; |
897ba84d PM |
1372 | WARN_ON_ONCE(ofl && task_curr(t) && !is_idle_task(t)); |
1373 | if (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB) && ofl) | |
1374 | n_heavy_reader_ofl_updates++; | |
7d0c9c50 | 1375 | } |
d5f177d3 | 1376 | |
18f08e75 PM |
1377 | // If not exiting a read-side critical section, mark as checked |
1378 | // so that the grace-period kthread will remove it from the | |
1379 | // holdout list. | |
0968e892 PM |
1380 | if (!nesting) { |
1381 | rcu_trc_cmpxchg_need_qs(t, 0, TRC_NEED_QS_CHECKED); | |
1382 | return 0; // In QS, so done. | |
3847b645 | 1383 | } |
0968e892 | 1384 | if (nesting < 0) |
eea3423b | 1385 | return -EINVAL; // Reader transitioning, try again later. |
7d0c9c50 PM |
1386 | |
1387 | // The task is in a read-side critical section, so set up its | |
0968e892 PM |
1388 | // state so that it will update state upon exit from that critical |
1389 | // section. | |
55061126 | 1390 | if (!rcu_trc_cmpxchg_need_qs(t, 0, TRC_NEED_QS | TRC_NEED_QS_CHECKED)) |
3847b645 | 1391 | trc_add_holdout(t, bhp); |
9b3c4ab3 | 1392 | return 0; |
d5f177d3 PM |
1393 | } |
1394 | ||
1395 | /* Attempt to extract the state for the specified task. */ | |
1396 | static void trc_wait_for_one_reader(struct task_struct *t, | |
1397 | struct list_head *bhp) | |
1398 | { | |
1399 | int cpu; | |
1400 | ||
1401 | // If a previous IPI is still in flight, let it complete. | |
1402 | if (smp_load_acquire(&t->trc_ipi_to_cpu) != -1) // Order IPI | |
1403 | return; | |
1404 | ||
1405 | // The current task had better be in a quiescent state. | |
1406 | if (t == current) { | |
3847b645 | 1407 | rcu_trc_cmpxchg_need_qs(t, 0, TRC_NEED_QS_CHECKED); |
bdb0cca0 | 1408 | WARN_ON_ONCE(READ_ONCE(t->trc_reader_nesting)); |
d5f177d3 PM |
1409 | return; |
1410 | } | |
1411 | ||
1412 | // Attempt to nail down the task for inspection. | |
1413 | get_task_struct(t); | |
3847b645 | 1414 | if (!task_call_func(t, trc_inspect_reader, bhp)) { |
d5f177d3 PM |
1415 | put_task_struct(t); |
1416 | return; | |
1417 | } | |
1418 | put_task_struct(t); | |
1419 | ||
45f4b4a2 PM |
1420 | // If this task is not yet on the holdout list, then we are in |
1421 | // an RCU read-side critical section. Otherwise, the invocation of | |
d0a85858 | 1422 | // trc_add_holdout() that added it to the list did the necessary |
45f4b4a2 PM |
1423 | // get_task_struct(). Either way, the task cannot be freed out |
1424 | // from under this code. | |
1425 | ||
d5f177d3 PM |
1426 | // If currently running, send an IPI, either way, add to list. |
1427 | trc_add_holdout(t, bhp); | |
574de876 PM |
1428 | if (task_curr(t) && |
1429 | time_after(jiffies + 1, rcu_tasks_trace.gp_start + rcu_task_ipi_delay)) { | |
d5f177d3 PM |
1430 | // The task is currently running, so try IPIing it. |
1431 | cpu = task_cpu(t); | |
1432 | ||
1433 | // If there is already an IPI outstanding, let it happen. | |
1434 | if (per_cpu(trc_ipi_to_cpu, cpu) || t->trc_ipi_to_cpu >= 0) | |
1435 | return; | |
1436 | ||
d5f177d3 PM |
1437 | per_cpu(trc_ipi_to_cpu, cpu) = true; |
1438 | t->trc_ipi_to_cpu = cpu; | |
238dbce3 | 1439 | rcu_tasks_trace.n_ipis++; |
96017bf9 | 1440 | if (smp_call_function_single(cpu, trc_read_check_handler, t, 0)) { |
d5f177d3 PM |
1441 | // Just in case there is some other reason for |
1442 | // failure than the target CPU being offline. | |
46aa886c NU |
1443 | WARN_ONCE(1, "%s(): smp_call_function_single() failed for CPU: %d\n", |
1444 | __func__, cpu); | |
7e0669c3 | 1445 | rcu_tasks_trace.n_ipis_fails++; |
d5f177d3 | 1446 | per_cpu(trc_ipi_to_cpu, cpu) = false; |
46aa886c | 1447 | t->trc_ipi_to_cpu = -1; |
d5f177d3 PM |
1448 | } |
1449 | } | |
1450 | } | |
1451 | ||
7460ade1 PM |
1452 | /* |
1453 | * Initialize for first-round processing for the specified task. | |
1454 | * Return false if task is NULL or already taken care of, true otherwise. | |
1455 | */ | |
1456 | static bool rcu_tasks_trace_pertask_prep(struct task_struct *t, bool notself) | |
d5f177d3 | 1457 | { |
1b04fa99 | 1458 | // During early boot when there is only the one boot CPU, there |
19415004 PM |
1459 | // is no idle task for the other CPUs. Also, the grace-period |
1460 | // kthread is always in a quiescent state. In addition, just return | |
1461 | // if this task is already on the list. | |
7460ade1 PM |
1462 | if (unlikely(t == NULL) || (t == current && notself) || !list_empty(&t->trc_holdout_list)) |
1463 | return false; | |
1b04fa99 | 1464 | |
3847b645 | 1465 | rcu_st_need_qs(t, 0); |
d5f177d3 | 1466 | t->trc_ipi_to_cpu = -1; |
7460ade1 PM |
1467 | return true; |
1468 | } | |
1469 | ||
1470 | /* Do first-round processing for the specified task. */ | |
1471 | static void rcu_tasks_trace_pertask(struct task_struct *t, struct list_head *hop) | |
1472 | { | |
1473 | if (rcu_tasks_trace_pertask_prep(t, true)) | |
1474 | trc_wait_for_one_reader(t, hop); | |
1475 | } | |
1476 | ||
1fa98e2e | 1477 | /* Initialize for a new RCU-tasks-trace grace period. */ |
7460ade1 | 1478 | static void rcu_tasks_trace_pregp_step(struct list_head *hop) |
1fa98e2e | 1479 | { |
dc7d54b4 | 1480 | LIST_HEAD(blkd_tasks); |
1fa98e2e | 1481 | int cpu; |
dc7d54b4 PM |
1482 | unsigned long flags; |
1483 | struct rcu_tasks_percpu *rtpcp; | |
1484 | struct task_struct *t; | |
1fa98e2e PM |
1485 | |
1486 | // There shouldn't be any old IPIs, but... | |
1487 | for_each_possible_cpu(cpu) | |
1488 | WARN_ON_ONCE(per_cpu(trc_ipi_to_cpu, cpu)); | |
1489 | ||
eea3423b PM |
1490 | // Disable CPU hotplug across the CPU scan for the benefit of |
1491 | // any IPIs that might be needed. This also waits for all readers | |
1492 | // in CPU-hotplug code paths. | |
1fa98e2e | 1493 | cpus_read_lock(); |
7460ade1 | 1494 | |
eea3423b | 1495 | // These rcu_tasks_trace_pertask_prep() calls are serialized to |
7460ade1 | 1496 | // allow safe access to the hop list. |
e386b672 PM |
1497 | for_each_online_cpu(cpu) { |
1498 | rcu_read_lock(); | |
1499 | t = cpu_curr_snapshot(cpu); | |
1500 | if (rcu_tasks_trace_pertask_prep(t, true)) | |
1501 | trc_add_holdout(t, hop); | |
1502 | rcu_read_unlock(); | |
1503 | } | |
dc7d54b4 PM |
1504 | |
1505 | // Only after all running tasks have been accounted for is it | |
1506 | // safe to take care of the tasks that have blocked within their | |
1507 | // current RCU tasks trace read-side critical section. | |
1508 | for_each_possible_cpu(cpu) { | |
1509 | rtpcp = per_cpu_ptr(rcu_tasks_trace.rtpcpu, cpu); | |
1510 | raw_spin_lock_irqsave_rcu_node(rtpcp, flags); | |
1511 | list_splice_init(&rtpcp->rtp_blkd_tasks, &blkd_tasks); | |
1512 | while (!list_empty(&blkd_tasks)) { | |
1513 | rcu_read_lock(); | |
1514 | t = list_first_entry(&blkd_tasks, struct task_struct, trc_blkd_node); | |
1515 | list_del_init(&t->trc_blkd_node); | |
1516 | list_add(&t->trc_blkd_node, &rtpcp->rtp_blkd_tasks); | |
1517 | raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags); | |
1518 | rcu_tasks_trace_pertask(t, hop); | |
1519 | rcu_read_unlock(); | |
1520 | raw_spin_lock_irqsave_rcu_node(rtpcp, flags); | |
1521 | } | |
1522 | raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags); | |
1523 | } | |
56096ecd PM |
1524 | |
1525 | // Re-enable CPU hotplug now that the holdout list is populated. | |
1526 | cpus_read_unlock(); | |
1fa98e2e PM |
1527 | } |
1528 | ||
9796e1ae | 1529 | /* |
955a0192 | 1530 | * Do intermediate processing between task and holdout scans. |
9796e1ae PM |
1531 | */ |
1532 | static void rcu_tasks_trace_postscan(struct list_head *hop) | |
d5f177d3 PM |
1533 | { |
1534 | // Wait for late-stage exiting tasks to finish exiting. | |
1535 | // These might have passed the call to exit_tasks_rcu_finish(). | |
1536 | synchronize_rcu(); | |
3847b645 PM |
1537 | // Any tasks that exit after this point will set |
1538 | // TRC_NEED_QS_CHECKED in ->trc_reader_special.b.need_qs. | |
d5f177d3 PM |
1539 | } |
1540 | ||
65b629e7 NU |
1541 | /* Communicate task state back to the RCU tasks trace stall warning request. */ |
1542 | struct trc_stall_chk_rdr { | |
1543 | int nesting; | |
1544 | int ipi_to_cpu; | |
1545 | u8 needqs; | |
1546 | }; | |
1547 | ||
1548 | static int trc_check_slow_task(struct task_struct *t, void *arg) | |
1549 | { | |
1550 | struct trc_stall_chk_rdr *trc_rdrp = arg; | |
1551 | ||
f90f19da | 1552 | if (task_curr(t) && cpu_online(task_cpu(t))) |
65b629e7 NU |
1553 | return false; // It is running, so decline to inspect it. |
1554 | trc_rdrp->nesting = READ_ONCE(t->trc_reader_nesting); | |
1555 | trc_rdrp->ipi_to_cpu = READ_ONCE(t->trc_ipi_to_cpu); | |
3847b645 | 1556 | trc_rdrp->needqs = rcu_ld_need_qs(t); |
65b629e7 NU |
1557 | return true; |
1558 | } | |
1559 | ||
4593e772 PM |
1560 | /* Show the state of a task stalling the current RCU tasks trace GP. */ |
1561 | static void show_stalled_task_trace(struct task_struct *t, bool *firstreport) | |
1562 | { | |
1563 | int cpu; | |
65b629e7 NU |
1564 | struct trc_stall_chk_rdr trc_rdr; |
1565 | bool is_idle_tsk = is_idle_task(t); | |
4593e772 PM |
1566 | |
1567 | if (*firstreport) { | |
1568 | pr_err("INFO: rcu_tasks_trace detected stalls on tasks:\n"); | |
1569 | *firstreport = false; | |
1570 | } | |
4593e772 | 1571 | cpu = task_cpu(t); |
65b629e7 | 1572 | if (!task_call_func(t, trc_check_slow_task, &trc_rdr)) |
9f3eb5fb | 1573 | pr_alert("P%d: %c%c\n", |
65b629e7 | 1574 | t->pid, |
9f3eb5fb | 1575 | ".I"[t->trc_ipi_to_cpu >= 0], |
65b629e7 NU |
1576 | ".i"[is_idle_tsk]); |
1577 | else | |
387c0ad7 | 1578 | pr_alert("P%d: %c%c%c%c nesting: %d%c%c cpu: %d%s\n", |
65b629e7 NU |
1579 | t->pid, |
1580 | ".I"[trc_rdr.ipi_to_cpu >= 0], | |
1581 | ".i"[is_idle_tsk], | |
1582 | ".N"[cpu >= 0 && tick_nohz_full_cpu(cpu)], | |
387c0ad7 | 1583 | ".B"[!!data_race(t->trc_reader_special.b.blocked)], |
65b629e7 | 1584 | trc_rdr.nesting, |
be15a164 PM |
1585 | " !CN"[trc_rdr.needqs & 0x3], |
1586 | " ?"[trc_rdr.needqs > 0x3], | |
c8c03ad9 | 1587 | cpu, cpu_online(cpu) ? "" : "(offline)"); |
4593e772 PM |
1588 | sched_show_task(t); |
1589 | } | |
1590 | ||
1591 | /* List stalled IPIs for RCU tasks trace. */ | |
1592 | static void show_stalled_ipi_trace(void) | |
1593 | { | |
1594 | int cpu; | |
1595 | ||
1596 | for_each_possible_cpu(cpu) | |
1597 | if (per_cpu(trc_ipi_to_cpu, cpu)) | |
1598 | pr_alert("\tIPI outstanding to CPU %d\n", cpu); | |
1599 | } | |
1600 | ||
d5f177d3 PM |
1601 | /* Do one scan of the holdout list. */ |
1602 | static void check_all_holdout_tasks_trace(struct list_head *hop, | |
4593e772 | 1603 | bool needreport, bool *firstreport) |
d5f177d3 PM |
1604 | { |
1605 | struct task_struct *g, *t; | |
1606 | ||
eea3423b | 1607 | // Disable CPU hotplug across the holdout list scan for IPIs. |
81b4a7bc PM |
1608 | cpus_read_lock(); |
1609 | ||
d5f177d3 PM |
1610 | list_for_each_entry_safe(t, g, hop, trc_holdout_list) { |
1611 | // If safe and needed, try to check the current task. | |
1612 | if (READ_ONCE(t->trc_ipi_to_cpu) == -1 && | |
3847b645 | 1613 | !(rcu_ld_need_qs(t) & TRC_NEED_QS_CHECKED)) |
d5f177d3 PM |
1614 | trc_wait_for_one_reader(t, hop); |
1615 | ||
1616 | // If check succeeded, remove this task from the list. | |
f5dbc594 | 1617 | if (smp_load_acquire(&t->trc_ipi_to_cpu) == -1 && |
3847b645 | 1618 | rcu_ld_need_qs(t) == TRC_NEED_QS_CHECKED) |
d5f177d3 | 1619 | trc_del_holdout(t); |
4593e772 PM |
1620 | else if (needreport) |
1621 | show_stalled_task_trace(t, firstreport); | |
1622 | } | |
81b4a7bc PM |
1623 | |
1624 | // Re-enable CPU hotplug now that the holdout list scan has completed. | |
1625 | cpus_read_unlock(); | |
1626 | ||
4593e772 | 1627 | if (needreport) { |
89401176 | 1628 | if (*firstreport) |
4593e772 PM |
1629 | pr_err("INFO: rcu_tasks_trace detected stalls? (Late IPI?)\n"); |
1630 | show_stalled_ipi_trace(); | |
d5f177d3 PM |
1631 | } |
1632 | } | |
1633 | ||
cbe0d8d9 PM |
1634 | static void rcu_tasks_trace_empty_fn(void *unused) |
1635 | { | |
1636 | } | |
1637 | ||
d5f177d3 | 1638 | /* Wait for grace period to complete and provide ordering. */ |
af051ca4 | 1639 | static void rcu_tasks_trace_postgp(struct rcu_tasks *rtp) |
d5f177d3 | 1640 | { |
cbe0d8d9 | 1641 | int cpu; |
4593e772 | 1642 | |
cbe0d8d9 PM |
1643 | // Wait for any lingering IPI handlers to complete. Note that |
1644 | // if a CPU has gone offline or transitioned to userspace in the | |
1645 | // meantime, all IPI handlers should have been drained beforehand. | |
1646 | // Yes, this assumes that CPUs process IPIs in order. If that ever | |
1647 | // changes, there will need to be a recheck and/or timed wait. | |
1648 | for_each_online_cpu(cpu) | |
f5dbc594 | 1649 | if (WARN_ON_ONCE(smp_load_acquire(per_cpu_ptr(&trc_ipi_to_cpu, cpu)))) |
cbe0d8d9 PM |
1650 | smp_call_function_single(cpu, rcu_tasks_trace_empty_fn, NULL, 1); |
1651 | ||
d5f177d3 | 1652 | smp_mb(); // Caller's code must be ordered after wakeup. |
43766c3e | 1653 | // Pairs with pretty much every ordering primitive. |
d5f177d3 PM |
1654 | } |
1655 | ||
1656 | /* Report any needed quiescent state for this exiting task. */ | |
25246fc8 | 1657 | static void exit_tasks_rcu_finish_trace(struct task_struct *t) |
d5f177d3 | 1658 | { |
0356d4e6 PM |
1659 | union rcu_special trs = READ_ONCE(t->trc_reader_special); |
1660 | ||
3847b645 | 1661 | rcu_trc_cmpxchg_need_qs(t, 0, TRC_NEED_QS_CHECKED); |
bdb0cca0 | 1662 | WARN_ON_ONCE(READ_ONCE(t->trc_reader_nesting)); |
0bcb3868 | 1663 | if (WARN_ON_ONCE(rcu_ld_need_qs(t) & TRC_NEED_QS || trs.b.blocked)) |
a5c071cc | 1664 | rcu_read_unlock_trace_special(t); |
3847b645 PM |
1665 | else |
1666 | WRITE_ONCE(t->trc_reader_nesting, 0); | |
d5f177d3 PM |
1667 | } |
1668 | ||
d5f177d3 PM |
1669 | /** |
1670 | * call_rcu_tasks_trace() - Queue a callback trace task-based grace period | |
1671 | * @rhp: structure to be used for queueing the RCU updates. | |
1672 | * @func: actual callback function to be invoked after the grace period | |
1673 | * | |
ed42c380 NU |
1674 | * The callback function will be invoked some time after a trace rcu-tasks |
1675 | * grace period elapses, in other words after all currently executing | |
1676 | * trace rcu-tasks read-side critical sections have completed. These | |
1677 | * read-side critical sections are delimited by calls to rcu_read_lock_trace() | |
1678 | * and rcu_read_unlock_trace(). | |
d5f177d3 PM |
1679 | * |
1680 | * See the description of call_rcu() for more detailed information on | |
1681 | * memory ordering guarantees. | |
1682 | */ | |
1683 | void call_rcu_tasks_trace(struct rcu_head *rhp, rcu_callback_t func) | |
1684 | { | |
1685 | call_rcu_tasks_generic(rhp, func, &rcu_tasks_trace); | |
1686 | } | |
1687 | EXPORT_SYMBOL_GPL(call_rcu_tasks_trace); | |
1688 | ||
1689 | /** | |
1690 | * synchronize_rcu_tasks_trace - wait for a trace rcu-tasks grace period | |
1691 | * | |
1692 | * Control will return to the caller some time after a trace rcu-tasks | |
c7dcf810 | 1693 | * grace period has elapsed, in other words after all currently executing |
ed42c380 | 1694 | * trace rcu-tasks read-side critical sections have elapsed. These read-side |
c7dcf810 PM |
1695 | * critical sections are delimited by calls to rcu_read_lock_trace() |
1696 | * and rcu_read_unlock_trace(). | |
d5f177d3 PM |
1697 | * |
1698 | * This is a very specialized primitive, intended only for a few uses in | |
1699 | * tracing and other situations requiring manipulation of function preambles | |
1700 | * and profiling hooks. The synchronize_rcu_tasks_trace() function is not | |
1701 | * (yet) intended for heavy use from multiple CPUs. | |
1702 | * | |
1703 | * See the description of synchronize_rcu() for more detailed information | |
1704 | * on memory ordering guarantees. | |
1705 | */ | |
1706 | void synchronize_rcu_tasks_trace(void) | |
1707 | { | |
1708 | RCU_LOCKDEP_WARN(lock_is_held(&rcu_trace_lock_map), "Illegal synchronize_rcu_tasks_trace() in RCU Tasks Trace read-side critical section"); | |
1709 | synchronize_rcu_tasks_generic(&rcu_tasks_trace); | |
1710 | } | |
1711 | EXPORT_SYMBOL_GPL(synchronize_rcu_tasks_trace); | |
1712 | ||
1713 | /** | |
1714 | * rcu_barrier_tasks_trace - Wait for in-flight call_rcu_tasks_trace() callbacks. | |
1715 | * | |
1716 | * Although the current implementation is guaranteed to wait, it is not | |
1717 | * obligated to, for example, if there are no pending callbacks. | |
1718 | */ | |
1719 | void rcu_barrier_tasks_trace(void) | |
1720 | { | |
ce9b1c66 | 1721 | rcu_barrier_tasks_generic(&rcu_tasks_trace); |
d5f177d3 PM |
1722 | } |
1723 | EXPORT_SYMBOL_GPL(rcu_barrier_tasks_trace); | |
1724 | ||
1725 | static int __init rcu_spawn_tasks_trace_kthread(void) | |
1726 | { | |
cafafd67 | 1727 | cblist_init_generic(&rcu_tasks_trace); |
2393a613 | 1728 | if (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB)) { |
4fe192df | 1729 | rcu_tasks_trace.gp_sleep = HZ / 10; |
75dc2da5 | 1730 | rcu_tasks_trace.init_fract = HZ / 10; |
2393a613 | 1731 | } else { |
4fe192df PM |
1732 | rcu_tasks_trace.gp_sleep = HZ / 200; |
1733 | if (rcu_tasks_trace.gp_sleep <= 0) | |
1734 | rcu_tasks_trace.gp_sleep = 1; | |
75dc2da5 | 1735 | rcu_tasks_trace.init_fract = HZ / 200; |
2393a613 PM |
1736 | if (rcu_tasks_trace.init_fract <= 0) |
1737 | rcu_tasks_trace.init_fract = 1; | |
1738 | } | |
d5f177d3 | 1739 | rcu_tasks_trace.pregp_func = rcu_tasks_trace_pregp_step; |
d5f177d3 PM |
1740 | rcu_tasks_trace.postscan_func = rcu_tasks_trace_postscan; |
1741 | rcu_tasks_trace.holdouts_func = check_all_holdout_tasks_trace; | |
1742 | rcu_tasks_trace.postgp_func = rcu_tasks_trace_postgp; | |
1743 | rcu_spawn_tasks_kthread_generic(&rcu_tasks_trace); | |
1744 | return 0; | |
1745 | } | |
d5f177d3 | 1746 | |
27c0f144 PM |
1747 | #if !defined(CONFIG_TINY_RCU) |
1748 | void show_rcu_tasks_trace_gp_kthread(void) | |
e21408ce | 1749 | { |
40471509 | 1750 | char buf[64]; |
e21408ce | 1751 | |
ffcc21a3 PM |
1752 | sprintf(buf, "N%lu h:%lu/%lu/%lu", |
1753 | data_race(n_trc_holdouts), | |
edf3775f | 1754 | data_race(n_heavy_reader_ofl_updates), |
40471509 PM |
1755 | data_race(n_heavy_reader_updates), |
1756 | data_race(n_heavy_reader_attempts)); | |
e21408ce PM |
1757 | show_rcu_tasks_generic_gp_kthread(&rcu_tasks_trace, buf); |
1758 | } | |
27c0f144 PM |
1759 | EXPORT_SYMBOL_GPL(show_rcu_tasks_trace_gp_kthread); |
1760 | #endif // !defined(CONFIG_TINY_RCU) | |
e21408ce | 1761 | |
d5f177d3 | 1762 | #else /* #ifdef CONFIG_TASKS_TRACE_RCU */ |
25246fc8 | 1763 | static void exit_tasks_rcu_finish_trace(struct task_struct *t) { } |
d5f177d3 | 1764 | #endif /* #else #ifdef CONFIG_TASKS_TRACE_RCU */ |
8fd8ca38 | 1765 | |
8344496e | 1766 | #ifndef CONFIG_TINY_RCU |
e21408ce PM |
1767 | void show_rcu_tasks_gp_kthreads(void) |
1768 | { | |
1769 | show_rcu_tasks_classic_gp_kthread(); | |
1770 | show_rcu_tasks_rude_gp_kthread(); | |
1771 | show_rcu_tasks_trace_gp_kthread(); | |
1772 | } | |
8344496e | 1773 | #endif /* #ifndef CONFIG_TINY_RCU */ |
e21408ce | 1774 | |
bfba7ed0 URS |
1775 | #ifdef CONFIG_PROVE_RCU |
1776 | struct rcu_tasks_test_desc { | |
1777 | struct rcu_head rh; | |
1778 | const char *name; | |
1779 | bool notrun; | |
1cf1144e | 1780 | unsigned long runstart; |
bfba7ed0 URS |
1781 | }; |
1782 | ||
1783 | static struct rcu_tasks_test_desc tests[] = { | |
1784 | { | |
1785 | .name = "call_rcu_tasks()", | |
1786 | /* If not defined, the test is skipped. */ | |
1cf1144e | 1787 | .notrun = IS_ENABLED(CONFIG_TASKS_RCU), |
bfba7ed0 URS |
1788 | }, |
1789 | { | |
1790 | .name = "call_rcu_tasks_rude()", | |
1791 | /* If not defined, the test is skipped. */ | |
1cf1144e | 1792 | .notrun = IS_ENABLED(CONFIG_TASKS_RUDE_RCU), |
bfba7ed0 URS |
1793 | }, |
1794 | { | |
1795 | .name = "call_rcu_tasks_trace()", | |
1796 | /* If not defined, the test is skipped. */ | |
1cf1144e | 1797 | .notrun = IS_ENABLED(CONFIG_TASKS_TRACE_RCU) |
bfba7ed0 URS |
1798 | } |
1799 | }; | |
1800 | ||
1801 | static void test_rcu_tasks_callback(struct rcu_head *rhp) | |
1802 | { | |
1803 | struct rcu_tasks_test_desc *rttd = | |
1804 | container_of(rhp, struct rcu_tasks_test_desc, rh); | |
1805 | ||
1806 | pr_info("Callback from %s invoked.\n", rttd->name); | |
1807 | ||
1cf1144e | 1808 | rttd->notrun = false; |
bfba7ed0 URS |
1809 | } |
1810 | ||
1811 | static void rcu_tasks_initiate_self_tests(void) | |
1812 | { | |
1cf1144e PM |
1813 | unsigned long j = jiffies; |
1814 | ||
bfba7ed0 URS |
1815 | pr_info("Running RCU-tasks wait API self tests\n"); |
1816 | #ifdef CONFIG_TASKS_RCU | |
1cf1144e | 1817 | tests[0].runstart = j; |
bfba7ed0 URS |
1818 | synchronize_rcu_tasks(); |
1819 | call_rcu_tasks(&tests[0].rh, test_rcu_tasks_callback); | |
1820 | #endif | |
1821 | ||
1822 | #ifdef CONFIG_TASKS_RUDE_RCU | |
1cf1144e | 1823 | tests[1].runstart = j; |
bfba7ed0 URS |
1824 | synchronize_rcu_tasks_rude(); |
1825 | call_rcu_tasks_rude(&tests[1].rh, test_rcu_tasks_callback); | |
1826 | #endif | |
1827 | ||
1828 | #ifdef CONFIG_TASKS_TRACE_RCU | |
1cf1144e | 1829 | tests[2].runstart = j; |
bfba7ed0 URS |
1830 | synchronize_rcu_tasks_trace(); |
1831 | call_rcu_tasks_trace(&tests[2].rh, test_rcu_tasks_callback); | |
1832 | #endif | |
1833 | } | |
1834 | ||
e72ee5e1 WL |
1835 | /* |
1836 | * Return: 0 - test passed | |
1837 | * 1 - test failed, but have not timed out yet | |
1838 | * -1 - test failed and timed out | |
1839 | */ | |
bfba7ed0 URS |
1840 | static int rcu_tasks_verify_self_tests(void) |
1841 | { | |
1842 | int ret = 0; | |
1843 | int i; | |
1cf1144e | 1844 | unsigned long bst = rcu_task_stall_timeout; |
bfba7ed0 | 1845 | |
1cf1144e PM |
1846 | if (bst <= 0 || bst > RCU_TASK_BOOT_STALL_TIMEOUT) |
1847 | bst = RCU_TASK_BOOT_STALL_TIMEOUT; | |
bfba7ed0 | 1848 | for (i = 0; i < ARRAY_SIZE(tests); i++) { |
1cf1144e PM |
1849 | while (tests[i].notrun) { // still hanging. |
1850 | if (time_after(jiffies, tests[i].runstart + bst)) { | |
1851 | pr_err("%s has failed boot-time tests.\n", tests[i].name); | |
1852 | ret = -1; | |
1853 | break; | |
1854 | } | |
e72ee5e1 WL |
1855 | ret = 1; |
1856 | break; | |
bfba7ed0 URS |
1857 | } |
1858 | } | |
e72ee5e1 | 1859 | WARN_ON(ret < 0); |
bfba7ed0 URS |
1860 | |
1861 | return ret; | |
1862 | } | |
e72ee5e1 WL |
1863 | |
1864 | /* | |
1865 | * Repeat the rcu_tasks_verify_self_tests() call once every second until the | |
1866 | * test passes or has timed out. | |
1867 | */ | |
1868 | static struct delayed_work rcu_tasks_verify_work; | |
1869 | static void rcu_tasks_verify_work_fn(struct work_struct *work __maybe_unused) | |
1870 | { | |
1871 | int ret = rcu_tasks_verify_self_tests(); | |
1872 | ||
1873 | if (ret <= 0) | |
1874 | return; | |
1875 | ||
1876 | /* Test fails but not timed out yet, reschedule another check */ | |
1877 | schedule_delayed_work(&rcu_tasks_verify_work, HZ); | |
1878 | } | |
1879 | ||
1880 | static int rcu_tasks_verify_schedule_work(void) | |
1881 | { | |
1882 | INIT_DELAYED_WORK(&rcu_tasks_verify_work, rcu_tasks_verify_work_fn); | |
1883 | rcu_tasks_verify_work_fn(NULL); | |
1884 | return 0; | |
1885 | } | |
1886 | late_initcall(rcu_tasks_verify_schedule_work); | |
bfba7ed0 URS |
1887 | #else /* #ifdef CONFIG_PROVE_RCU */ |
1888 | static void rcu_tasks_initiate_self_tests(void) { } | |
1889 | #endif /* #else #ifdef CONFIG_PROVE_RCU */ | |
1890 | ||
1b04fa99 URS |
1891 | void __init rcu_init_tasks_generic(void) |
1892 | { | |
1893 | #ifdef CONFIG_TASKS_RCU | |
1894 | rcu_spawn_tasks_kthread(); | |
1895 | #endif | |
1896 | ||
1897 | #ifdef CONFIG_TASKS_RUDE_RCU | |
1898 | rcu_spawn_tasks_rude_kthread(); | |
1899 | #endif | |
1900 | ||
1901 | #ifdef CONFIG_TASKS_TRACE_RCU | |
1902 | rcu_spawn_tasks_trace_kthread(); | |
1903 | #endif | |
bfba7ed0 URS |
1904 | |
1905 | // Run the self-tests. | |
1906 | rcu_tasks_initiate_self_tests(); | |
1b04fa99 URS |
1907 | } |
1908 | ||
8fd8ca38 PM |
1909 | #else /* #ifdef CONFIG_TASKS_RCU_GENERIC */ |
1910 | static inline void rcu_tasks_bootup_oddness(void) {} | |
1911 | #endif /* #else #ifdef CONFIG_TASKS_RCU_GENERIC */ |