Commit | Line | Data |
---|---|---|
eacd6f04 PM |
1 | /* SPDX-License-Identifier: GPL-2.0+ */ |
2 | /* | |
3 | * Task-based RCU implementations. | |
4 | * | |
5 | * Copyright (C) 2020 Paul E. McKenney | |
6 | */ | |
7 | ||
8fd8ca38 | 8 | #ifdef CONFIG_TASKS_RCU_GENERIC |
9b073de1 | 9 | #include "rcu_segcblist.h" |
5873b8a9 PM |
10 | |
11 | //////////////////////////////////////////////////////////////////////// | |
12 | // | |
13 | // Generic data structures. | |
14 | ||
15 | struct rcu_tasks; | |
16 | typedef void (*rcu_tasks_gp_func_t)(struct rcu_tasks *rtp); | |
7460ade1 | 17 | typedef void (*pregp_func_t)(struct list_head *hop); |
e4fe5dd6 | 18 | typedef void (*pertask_func_t)(struct task_struct *t, struct list_head *hop); |
9796e1ae | 19 | typedef void (*postscan_func_t)(struct list_head *hop); |
e4fe5dd6 | 20 | typedef void (*holdouts_func_t)(struct list_head *hop, bool ndrpt, bool *frptp); |
af051ca4 | 21 | typedef void (*postgp_func_t)(struct rcu_tasks *rtp); |
eacd6f04 | 22 | |
07e10515 | 23 | /** |
cafafd67 | 24 | * struct rcu_tasks_percpu - Per-CPU component of definition for a Tasks-RCU-like mechanism. |
9b073de1 | 25 | * @cblist: Callback list. |
381a4f3b | 26 | * @lock: Lock protecting per-CPU callback list. |
7d13d30b | 27 | * @rtp_jiffies: Jiffies counter value for statistics. |
d119357d PM |
28 | * @lazy_timer: Timer to unlazify callbacks. |
29 | * @urgent_gp: Number of additional non-lazy grace periods. | |
7d13d30b | 30 | * @rtp_n_lock_retries: Rough lock-contention statistic. |
d363f833 | 31 | * @rtp_work: Work queue for invoking callbacks. |
3063b33a | 32 | * @rtp_irq_work: IRQ work queue for deferred wakeups. |
ce9b1c66 | 33 | * @barrier_q_head: RCU callback for barrier operation. |
434c9eef | 34 | * @rtp_blkd_tasks: List of tasks blocked as readers. |
bfe93930 | 35 | * @rtp_exit_list: List of tasks in the latter portion of do_exit(). |
ce9b1c66 PM |
36 | * @cpu: CPU number corresponding to this entry. |
37 | * @rtpp: Pointer to the rcu_tasks structure. | |
cafafd67 PM |
38 | */ |
39 | struct rcu_tasks_percpu { | |
9b073de1 | 40 | struct rcu_segcblist cblist; |
381a4f3b | 41 | raw_spinlock_t __private lock; |
7d13d30b PM |
42 | unsigned long rtp_jiffies; |
43 | unsigned long rtp_n_lock_retries; | |
d119357d PM |
44 | struct timer_list lazy_timer; |
45 | unsigned int urgent_gp; | |
d363f833 | 46 | struct work_struct rtp_work; |
3063b33a | 47 | struct irq_work rtp_irq_work; |
ce9b1c66 | 48 | struct rcu_head barrier_q_head; |
434c9eef | 49 | struct list_head rtp_blkd_tasks; |
bfe93930 | 50 | struct list_head rtp_exit_list; |
d363f833 PM |
51 | int cpu; |
52 | struct rcu_tasks *rtpp; | |
cafafd67 PM |
53 | }; |
54 | ||
55 | /** | |
56 | * struct rcu_tasks - Definition for a Tasks-RCU-like mechanism. | |
88db792b | 57 | * @cbs_wait: RCU wait allowing a new callback to get kthread's attention. |
cafafd67 | 58 | * @cbs_gbl_lock: Lock protecting callback list. |
d96225fd | 59 | * @tasks_gp_mutex: Mutex protecting grace period, needed during mid-boot dead zone. |
5873b8a9 | 60 | * @gp_func: This flavor's grace-period-wait function. |
af051ca4 | 61 | * @gp_state: Grace period's most recent state transition (debugging). |
4fe192df | 62 | * @gp_sleep: Per-grace-period sleep to prevent CPU-bound looping. |
2393a613 | 63 | * @init_fract: Initial backoff sleep interval. |
af051ca4 PM |
64 | * @gp_jiffies: Time of last @gp_state transition. |
65 | * @gp_start: Most recent grace-period start in jiffies. | |
b14fb4fb | 66 | * @tasks_gp_seq: Number of grace periods completed since boot. |
238dbce3 | 67 | * @n_ipis: Number of IPIs sent to encourage grace periods to end. |
7e0669c3 | 68 | * @n_ipis_fails: Number of IPI-send failures. |
d119357d PM |
69 | * @kthread_ptr: This flavor's grace-period/callback-invocation kthread. |
70 | * @lazy_jiffies: Number of jiffies to allow callbacks to be lazy. | |
e4fe5dd6 PM |
71 | * @pregp_func: This flavor's pre-grace-period function (optional). |
72 | * @pertask_func: This flavor's per-task scan function (optional). | |
73 | * @postscan_func: This flavor's post-task scan function (optional). | |
85b86994 | 74 | * @holdouts_func: This flavor's holdout-list scan function (optional). |
e4fe5dd6 | 75 | * @postgp_func: This flavor's post-grace-period function (optional). |
5873b8a9 | 76 | * @call_func: This flavor's call_rcu()-equivalent function. |
c342b42f | 77 | * @wait_state: Task state for synchronous grace-period waits (default TASK_UNINTERRUPTIBLE). |
cafafd67 | 78 | * @rtpcpu: This flavor's rcu_tasks_percpu structure. |
7a30871b | 79 | * @percpu_enqueue_shift: Shift down CPU ID this much when enqueuing callbacks. |
2cee0789 PM |
80 | * @percpu_enqueue_lim: Number of per-CPU callback queues in use for enqueuing. |
81 | * @percpu_dequeue_lim: Number of per-CPU callback queues in use for dequeuing. | |
fd796e41 | 82 | * @percpu_dequeue_gpseq: RCU grace-period number to propagate enqueue limit to dequeuers. |
ce9b1c66 PM |
83 | * @barrier_q_mutex: Serialize barrier operations. |
84 | * @barrier_q_count: Number of queues being waited on. | |
85 | * @barrier_q_completion: Barrier wait/wakeup mechanism. | |
86 | * @barrier_q_seq: Sequence number for barrier operations. | |
c97d12a6 PM |
87 | * @name: This flavor's textual name. |
88 | * @kname: This flavor's kthread name. | |
07e10515 PM |
89 | */ |
90 | struct rcu_tasks { | |
88db792b | 91 | struct rcuwait cbs_wait; |
cafafd67 | 92 | raw_spinlock_t cbs_gbl_lock; |
d96225fd | 93 | struct mutex tasks_gp_mutex; |
af051ca4 | 94 | int gp_state; |
4fe192df | 95 | int gp_sleep; |
2393a613 | 96 | int init_fract; |
af051ca4 | 97 | unsigned long gp_jiffies; |
88092d0c | 98 | unsigned long gp_start; |
b14fb4fb | 99 | unsigned long tasks_gp_seq; |
238dbce3 | 100 | unsigned long n_ipis; |
7e0669c3 | 101 | unsigned long n_ipis_fails; |
07e10515 | 102 | struct task_struct *kthread_ptr; |
d119357d | 103 | unsigned long lazy_jiffies; |
5873b8a9 | 104 | rcu_tasks_gp_func_t gp_func; |
e4fe5dd6 PM |
105 | pregp_func_t pregp_func; |
106 | pertask_func_t pertask_func; | |
107 | postscan_func_t postscan_func; | |
108 | holdouts_func_t holdouts_func; | |
109 | postgp_func_t postgp_func; | |
5873b8a9 | 110 | call_rcu_func_t call_func; |
c342b42f | 111 | unsigned int wait_state; |
cafafd67 | 112 | struct rcu_tasks_percpu __percpu *rtpcpu; |
7a30871b | 113 | int percpu_enqueue_shift; |
8dd593fd | 114 | int percpu_enqueue_lim; |
2cee0789 | 115 | int percpu_dequeue_lim; |
fd796e41 | 116 | unsigned long percpu_dequeue_gpseq; |
ce9b1c66 PM |
117 | struct mutex barrier_q_mutex; |
118 | atomic_t barrier_q_count; | |
119 | struct completion barrier_q_completion; | |
120 | unsigned long barrier_q_seq; | |
c97d12a6 PM |
121 | char *name; |
122 | char *kname; | |
07e10515 PM |
123 | }; |
124 | ||
3063b33a PM |
125 | static void call_rcu_tasks_iw_wakeup(struct irq_work *iwp); |
126 | ||
cafafd67 PM |
127 | #define DEFINE_RCU_TASKS(rt_name, gp, call, n) \ |
128 | static DEFINE_PER_CPU(struct rcu_tasks_percpu, rt_name ## __percpu) = { \ | |
381a4f3b | 129 | .lock = __RAW_SPIN_LOCK_UNLOCKED(rt_name ## __percpu.cbs_pcpu_lock), \ |
88db792b | 130 | .rtp_irq_work = IRQ_WORK_INIT_HARD(call_rcu_tasks_iw_wakeup), \ |
cafafd67 PM |
131 | }; \ |
132 | static struct rcu_tasks rt_name = \ | |
133 | { \ | |
88db792b | 134 | .cbs_wait = __RCUWAIT_INITIALIZER(rt_name.wait), \ |
cafafd67 | 135 | .cbs_gbl_lock = __RAW_SPIN_LOCK_UNLOCKED(rt_name.cbs_gbl_lock), \ |
d96225fd | 136 | .tasks_gp_mutex = __MUTEX_INITIALIZER(rt_name.tasks_gp_mutex), \ |
cafafd67 PM |
137 | .gp_func = gp, \ |
138 | .call_func = call, \ | |
c342b42f | 139 | .wait_state = TASK_UNINTERRUPTIBLE, \ |
cafafd67 | 140 | .rtpcpu = &rt_name ## __percpu, \ |
d119357d | 141 | .lazy_jiffies = DIV_ROUND_UP(HZ, 4), \ |
cafafd67 | 142 | .name = n, \ |
2bcd18e0 | 143 | .percpu_enqueue_shift = order_base_2(CONFIG_NR_CPUS), \ |
8dd593fd | 144 | .percpu_enqueue_lim = 1, \ |
2cee0789 | 145 | .percpu_dequeue_lim = 1, \ |
ce9b1c66 PM |
146 | .barrier_q_mutex = __MUTEX_INITIALIZER(rt_name.barrier_q_mutex), \ |
147 | .barrier_q_seq = (0UL - 50UL) << RCU_SEQ_CTR_SHIFT, \ | |
cafafd67 | 148 | .kname = #rt_name, \ |
07e10515 PM |
149 | } |
150 | ||
2b4be548 | 151 | #ifdef CONFIG_TASKS_RCU |
eacd6f04 | 152 | |
5f48fa85 | 153 | /* Report delay of scan exiting tasklist in rcu_tasks_postscan(). */ |
a4533cc0 NU |
154 | static void tasks_rcu_exit_srcu_stall(struct timer_list *unused); |
155 | static DEFINE_TIMER(tasks_rcu_exit_srcu_stall_timer, tasks_rcu_exit_srcu_stall); | |
156 | #endif | |
157 | ||
b0afa0f0 | 158 | /* Avoid IPIing CPUs early in the grace period. */ |
574de876 | 159 | #define RCU_TASK_IPI_DELAY (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB) ? HZ / 2 : 0) |
b0afa0f0 PM |
160 | static int rcu_task_ipi_delay __read_mostly = RCU_TASK_IPI_DELAY; |
161 | module_param(rcu_task_ipi_delay, int, 0644); | |
162 | ||
eacd6f04 | 163 | /* Control stall timeouts. Disable with <= 0, otherwise jiffies till stall. */ |
1cf1144e | 164 | #define RCU_TASK_BOOT_STALL_TIMEOUT (HZ * 30) |
eacd6f04 PM |
165 | #define RCU_TASK_STALL_TIMEOUT (HZ * 60 * 10) |
166 | static int rcu_task_stall_timeout __read_mostly = RCU_TASK_STALL_TIMEOUT; | |
167 | module_param(rcu_task_stall_timeout, int, 0644); | |
f2539003 PM |
168 | #define RCU_TASK_STALL_INFO (HZ * 10) |
169 | static int rcu_task_stall_info __read_mostly = RCU_TASK_STALL_INFO; | |
170 | module_param(rcu_task_stall_info, int, 0644); | |
171 | static int rcu_task_stall_info_mult __read_mostly = 3; | |
172 | module_param(rcu_task_stall_info_mult, int, 0444); | |
eacd6f04 | 173 | |
8610b656 PM |
174 | static int rcu_task_enqueue_lim __read_mostly = -1; |
175 | module_param(rcu_task_enqueue_lim, int, 0444); | |
176 | ||
ab97152f PM |
177 | static bool rcu_task_cb_adjust; |
178 | static int rcu_task_contend_lim __read_mostly = 100; | |
179 | module_param(rcu_task_contend_lim, int, 0444); | |
fd796e41 PM |
180 | static int rcu_task_collapse_lim __read_mostly = 10; |
181 | module_param(rcu_task_collapse_lim, int, 0444); | |
db13710a PM |
182 | static int rcu_task_lazy_lim __read_mostly = 32; |
183 | module_param(rcu_task_lazy_lim, int, 0444); | |
ab97152f | 184 | |
af051ca4 PM |
185 | /* RCU tasks grace-period state for debugging. */ |
186 | #define RTGS_INIT 0 | |
187 | #define RTGS_WAIT_WAIT_CBS 1 | |
188 | #define RTGS_WAIT_GP 2 | |
189 | #define RTGS_PRE_WAIT_GP 3 | |
190 | #define RTGS_SCAN_TASKLIST 4 | |
191 | #define RTGS_POST_SCAN_TASKLIST 5 | |
192 | #define RTGS_WAIT_SCAN_HOLDOUTS 6 | |
193 | #define RTGS_SCAN_HOLDOUTS 7 | |
194 | #define RTGS_POST_GP 8 | |
195 | #define RTGS_WAIT_READERS 9 | |
196 | #define RTGS_INVOKE_CBS 10 | |
197 | #define RTGS_WAIT_CBS 11 | |
8344496e | 198 | #ifndef CONFIG_TINY_RCU |
af051ca4 PM |
199 | static const char * const rcu_tasks_gp_state_names[] = { |
200 | "RTGS_INIT", | |
201 | "RTGS_WAIT_WAIT_CBS", | |
202 | "RTGS_WAIT_GP", | |
203 | "RTGS_PRE_WAIT_GP", | |
204 | "RTGS_SCAN_TASKLIST", | |
205 | "RTGS_POST_SCAN_TASKLIST", | |
206 | "RTGS_WAIT_SCAN_HOLDOUTS", | |
207 | "RTGS_SCAN_HOLDOUTS", | |
208 | "RTGS_POST_GP", | |
209 | "RTGS_WAIT_READERS", | |
210 | "RTGS_INVOKE_CBS", | |
211 | "RTGS_WAIT_CBS", | |
212 | }; | |
8344496e | 213 | #endif /* #ifndef CONFIG_TINY_RCU */ |
af051ca4 | 214 | |
5873b8a9 PM |
215 | //////////////////////////////////////////////////////////////////////// |
216 | // | |
217 | // Generic code. | |
218 | ||
d363f833 PM |
219 | static void rcu_tasks_invoke_cbs_wq(struct work_struct *wp); |
220 | ||
af051ca4 PM |
221 | /* Record grace-period phase and time. */ |
222 | static void set_tasks_gp_state(struct rcu_tasks *rtp, int newstate) | |
223 | { | |
224 | rtp->gp_state = newstate; | |
225 | rtp->gp_jiffies = jiffies; | |
226 | } | |
227 | ||
8344496e | 228 | #ifndef CONFIG_TINY_RCU |
af051ca4 PM |
229 | /* Return state name. */ |
230 | static const char *tasks_gp_state_getname(struct rcu_tasks *rtp) | |
231 | { | |
232 | int i = data_race(rtp->gp_state); // Let KCSAN detect update races | |
233 | int j = READ_ONCE(i); // Prevent the compiler from reading twice | |
234 | ||
235 | if (j >= ARRAY_SIZE(rcu_tasks_gp_state_names)) | |
236 | return "???"; | |
237 | return rcu_tasks_gp_state_names[j]; | |
238 | } | |
8344496e | 239 | #endif /* #ifndef CONFIG_TINY_RCU */ |
af051ca4 | 240 | |
cafafd67 | 241 | // Initialize per-CPU callback lists for the specified flavor of |
cb88f7f5 | 242 | // Tasks RCU. Do not enqueue callbacks before this function is invoked. |
cafafd67 PM |
243 | static void cblist_init_generic(struct rcu_tasks *rtp) |
244 | { | |
245 | int cpu; | |
8610b656 | 246 | int lim; |
da123016 | 247 | int shift; |
cafafd67 | 248 | |
ab97152f PM |
249 | if (rcu_task_enqueue_lim < 0) { |
250 | rcu_task_enqueue_lim = 1; | |
251 | rcu_task_cb_adjust = true; | |
ab97152f | 252 | } else if (rcu_task_enqueue_lim == 0) { |
8610b656 | 253 | rcu_task_enqueue_lim = 1; |
ab97152f | 254 | } |
8610b656 PM |
255 | lim = rcu_task_enqueue_lim; |
256 | ||
257 | if (lim > nr_cpu_ids) | |
258 | lim = nr_cpu_ids; | |
da123016 PM |
259 | shift = ilog2(nr_cpu_ids / lim); |
260 | if (((nr_cpu_ids - 1) >> shift) >= lim) | |
261 | shift++; | |
262 | WRITE_ONCE(rtp->percpu_enqueue_shift, shift); | |
2cee0789 | 263 | WRITE_ONCE(rtp->percpu_dequeue_lim, lim); |
8610b656 | 264 | smp_store_release(&rtp->percpu_enqueue_lim, lim); |
cafafd67 PM |
265 | for_each_possible_cpu(cpu) { |
266 | struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu); | |
267 | ||
268 | WARN_ON_ONCE(!rtpcp); | |
269 | if (cpu) | |
381a4f3b | 270 | raw_spin_lock_init(&ACCESS_PRIVATE(rtpcp, lock)); |
9b073de1 PM |
271 | if (rcu_segcblist_empty(&rtpcp->cblist)) |
272 | rcu_segcblist_init(&rtpcp->cblist); | |
d363f833 PM |
273 | INIT_WORK(&rtpcp->rtp_work, rcu_tasks_invoke_cbs_wq); |
274 | rtpcp->cpu = cpu; | |
275 | rtpcp->rtpp = rtp; | |
434c9eef PM |
276 | if (!rtpcp->rtp_blkd_tasks.next) |
277 | INIT_LIST_HEAD(&rtpcp->rtp_blkd_tasks); | |
46faf9d8 PM |
278 | if (!rtpcp->rtp_exit_list.next) |
279 | INIT_LIST_HEAD(&rtpcp->rtp_exit_list); | |
cafafd67 | 280 | } |
5fc8cbe4 | 281 | |
edff5e9a Z |
282 | pr_info("%s: Setting shift to %d and lim to %d rcu_task_cb_adjust=%d.\n", rtp->name, |
283 | data_race(rtp->percpu_enqueue_shift), data_race(rtp->percpu_enqueue_lim), rcu_task_cb_adjust); | |
cafafd67 PM |
284 | } |
285 | ||
d119357d PM |
286 | // Compute wakeup time for lazy callback timer. |
287 | static unsigned long rcu_tasks_lazy_time(struct rcu_tasks *rtp) | |
288 | { | |
289 | return jiffies + rtp->lazy_jiffies; | |
290 | } | |
291 | ||
292 | // Timer handler that unlazifies lazy callbacks. | |
293 | static void call_rcu_tasks_generic_timer(struct timer_list *tlp) | |
294 | { | |
295 | unsigned long flags; | |
296 | bool needwake = false; | |
297 | struct rcu_tasks *rtp; | |
298 | struct rcu_tasks_percpu *rtpcp = from_timer(rtpcp, tlp, lazy_timer); | |
299 | ||
300 | rtp = rtpcp->rtpp; | |
301 | raw_spin_lock_irqsave_rcu_node(rtpcp, flags); | |
302 | if (!rcu_segcblist_empty(&rtpcp->cblist) && rtp->lazy_jiffies) { | |
303 | if (!rtpcp->urgent_gp) | |
304 | rtpcp->urgent_gp = 1; | |
305 | needwake = true; | |
306 | mod_timer(&rtpcp->lazy_timer, rcu_tasks_lazy_time(rtp)); | |
307 | } | |
308 | raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags); | |
309 | if (needwake) | |
310 | rcuwait_wake_up(&rtp->cbs_wait); | |
311 | } | |
312 | ||
3063b33a PM |
313 | // IRQ-work handler that does deferred wakeup for call_rcu_tasks_generic(). |
314 | static void call_rcu_tasks_iw_wakeup(struct irq_work *iwp) | |
315 | { | |
316 | struct rcu_tasks *rtp; | |
317 | struct rcu_tasks_percpu *rtpcp = container_of(iwp, struct rcu_tasks_percpu, rtp_irq_work); | |
318 | ||
319 | rtp = rtpcp->rtpp; | |
88db792b | 320 | rcuwait_wake_up(&rtp->cbs_wait); |
3063b33a PM |
321 | } |
322 | ||
5873b8a9 PM |
323 | // Enqueue a callback for the specified flavor of Tasks RCU. |
324 | static void call_rcu_tasks_generic(struct rcu_head *rhp, rcu_callback_t func, | |
325 | struct rcu_tasks *rtp) | |
eacd6f04 | 326 | { |
07d95c34 | 327 | int chosen_cpu; |
eacd6f04 | 328 | unsigned long flags; |
d119357d | 329 | bool havekthread = smp_load_acquire(&rtp->kthread_ptr); |
07d95c34 | 330 | int ideal_cpu; |
7d13d30b | 331 | unsigned long j; |
ab97152f | 332 | bool needadjust = false; |
eacd6f04 | 333 | bool needwake; |
cafafd67 | 334 | struct rcu_tasks_percpu *rtpcp; |
eacd6f04 PM |
335 | |
336 | rhp->next = NULL; | |
337 | rhp->func = func; | |
cafafd67 | 338 | local_irq_save(flags); |
fd796e41 | 339 | rcu_read_lock(); |
07d95c34 ED |
340 | ideal_cpu = smp_processor_id() >> READ_ONCE(rtp->percpu_enqueue_shift); |
341 | chosen_cpu = cpumask_next(ideal_cpu - 1, cpu_possible_mask); | |
342 | rtpcp = per_cpu_ptr(rtp->rtpcpu, chosen_cpu); | |
7d13d30b PM |
343 | if (!raw_spin_trylock_rcu_node(rtpcp)) { // irqs already disabled. |
344 | raw_spin_lock_rcu_node(rtpcp); // irqs already disabled. | |
345 | j = jiffies; | |
346 | if (rtpcp->rtp_jiffies != j) { | |
347 | rtpcp->rtp_jiffies = j; | |
348 | rtpcp->rtp_n_lock_retries = 0; | |
349 | } | |
ab97152f PM |
350 | if (rcu_task_cb_adjust && ++rtpcp->rtp_n_lock_retries > rcu_task_contend_lim && |
351 | READ_ONCE(rtp->percpu_enqueue_lim) != nr_cpu_ids) | |
352 | needadjust = true; // Defer adjustment to avoid deadlock. | |
7d13d30b | 353 | } |
cb88f7f5 PM |
354 | // Queuing callbacks before initialization not yet supported. |
355 | if (WARN_ON_ONCE(!rcu_segcblist_is_enabled(&rtpcp->cblist))) | |
356 | rcu_segcblist_init(&rtpcp->cblist); | |
db13710a PM |
357 | needwake = (func == wakeme_after_rcu) || |
358 | (rcu_segcblist_n_cbs(&rtpcp->cblist) == rcu_task_lazy_lim); | |
359 | if (havekthread && !needwake && !timer_pending(&rtpcp->lazy_timer)) { | |
d119357d PM |
360 | if (rtp->lazy_jiffies) |
361 | mod_timer(&rtpcp->lazy_timer, rcu_tasks_lazy_time(rtp)); | |
362 | else | |
363 | needwake = rcu_segcblist_empty(&rtpcp->cblist); | |
cafafd67 | 364 | } |
d119357d PM |
365 | if (needwake) |
366 | rtpcp->urgent_gp = 3; | |
9b073de1 | 367 | rcu_segcblist_enqueue(&rtpcp->cblist, rhp); |
381a4f3b | 368 | raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags); |
ab97152f PM |
369 | if (unlikely(needadjust)) { |
370 | raw_spin_lock_irqsave(&rtp->cbs_gbl_lock, flags); | |
371 | if (rtp->percpu_enqueue_lim != nr_cpu_ids) { | |
00a8b4b5 | 372 | WRITE_ONCE(rtp->percpu_enqueue_shift, 0); |
fd796e41 | 373 | WRITE_ONCE(rtp->percpu_dequeue_lim, nr_cpu_ids); |
ab97152f PM |
374 | smp_store_release(&rtp->percpu_enqueue_lim, nr_cpu_ids); |
375 | pr_info("Switching %s to per-CPU callback queuing.\n", rtp->name); | |
376 | } | |
377 | raw_spin_unlock_irqrestore(&rtp->cbs_gbl_lock, flags); | |
378 | } | |
fd796e41 | 379 | rcu_read_unlock(); |
eacd6f04 | 380 | /* We can't create the thread unless interrupts are enabled. */ |
07e10515 | 381 | if (needwake && READ_ONCE(rtp->kthread_ptr)) |
3063b33a | 382 | irq_work_queue(&rtpcp->rtp_irq_work); |
eacd6f04 | 383 | } |
eacd6f04 | 384 | |
ce9b1c66 PM |
385 | // RCU callback function for rcu_barrier_tasks_generic(). |
386 | static void rcu_barrier_tasks_generic_cb(struct rcu_head *rhp) | |
387 | { | |
388 | struct rcu_tasks *rtp; | |
389 | struct rcu_tasks_percpu *rtpcp; | |
390 | ||
391 | rtpcp = container_of(rhp, struct rcu_tasks_percpu, barrier_q_head); | |
392 | rtp = rtpcp->rtpp; | |
393 | if (atomic_dec_and_test(&rtp->barrier_q_count)) | |
394 | complete(&rtp->barrier_q_completion); | |
395 | } | |
396 | ||
397 | // Wait for all in-flight callbacks for the specified RCU Tasks flavor. | |
398 | // Operates in a manner similar to rcu_barrier(). | |
399 | static void rcu_barrier_tasks_generic(struct rcu_tasks *rtp) | |
400 | { | |
401 | int cpu; | |
402 | unsigned long flags; | |
403 | struct rcu_tasks_percpu *rtpcp; | |
404 | unsigned long s = rcu_seq_snap(&rtp->barrier_q_seq); | |
405 | ||
406 | mutex_lock(&rtp->barrier_q_mutex); | |
407 | if (rcu_seq_done(&rtp->barrier_q_seq, s)) { | |
408 | smp_mb(); | |
409 | mutex_unlock(&rtp->barrier_q_mutex); | |
410 | return; | |
411 | } | |
412 | rcu_seq_start(&rtp->barrier_q_seq); | |
413 | init_completion(&rtp->barrier_q_completion); | |
414 | atomic_set(&rtp->barrier_q_count, 2); | |
415 | for_each_possible_cpu(cpu) { | |
2cee0789 | 416 | if (cpu >= smp_load_acquire(&rtp->percpu_dequeue_lim)) |
ce9b1c66 PM |
417 | break; |
418 | rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu); | |
419 | rtpcp->barrier_q_head.func = rcu_barrier_tasks_generic_cb; | |
420 | raw_spin_lock_irqsave_rcu_node(rtpcp, flags); | |
421 | if (rcu_segcblist_entrain(&rtpcp->cblist, &rtpcp->barrier_q_head)) | |
422 | atomic_inc(&rtp->barrier_q_count); | |
423 | raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags); | |
424 | } | |
425 | if (atomic_sub_and_test(2, &rtp->barrier_q_count)) | |
426 | complete(&rtp->barrier_q_completion); | |
427 | wait_for_completion(&rtp->barrier_q_completion); | |
428 | rcu_seq_end(&rtp->barrier_q_seq); | |
429 | mutex_unlock(&rtp->barrier_q_mutex); | |
430 | } | |
431 | ||
4d1114c0 PM |
432 | // Advance callbacks and indicate whether either a grace period or |
433 | // callback invocation is needed. | |
434 | static int rcu_tasks_need_gpcb(struct rcu_tasks *rtp) | |
435 | { | |
436 | int cpu; | |
e62d8ae4 | 437 | int dequeue_limit; |
4d1114c0 | 438 | unsigned long flags; |
a4fcfbee | 439 | bool gpdone = poll_state_synchronize_rcu(rtp->percpu_dequeue_gpseq); |
fd796e41 PM |
440 | long n; |
441 | long ncbs = 0; | |
442 | long ncbsnz = 0; | |
4d1114c0 PM |
443 | int needgpcb = 0; |
444 | ||
e62d8ae4 PM |
445 | dequeue_limit = smp_load_acquire(&rtp->percpu_dequeue_lim); |
446 | for (cpu = 0; cpu < dequeue_limit; cpu++) { | |
4d1114c0 PM |
447 | struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu); |
448 | ||
449 | /* Advance and accelerate any new callbacks. */ | |
fd796e41 | 450 | if (!rcu_segcblist_n_cbs(&rtpcp->cblist)) |
4d1114c0 PM |
451 | continue; |
452 | raw_spin_lock_irqsave_rcu_node(rtpcp, flags); | |
fd796e41 PM |
453 | // Should we shrink down to a single callback queue? |
454 | n = rcu_segcblist_n_cbs(&rtpcp->cblist); | |
455 | if (n) { | |
456 | ncbs += n; | |
457 | if (cpu > 0) | |
458 | ncbsnz += n; | |
459 | } | |
4d1114c0 PM |
460 | rcu_segcblist_advance(&rtpcp->cblist, rcu_seq_current(&rtp->tasks_gp_seq)); |
461 | (void)rcu_segcblist_accelerate(&rtpcp->cblist, rcu_seq_snap(&rtp->tasks_gp_seq)); | |
d119357d PM |
462 | if (rtpcp->urgent_gp > 0 && rcu_segcblist_pend_cbs(&rtpcp->cblist)) { |
463 | if (rtp->lazy_jiffies) | |
464 | rtpcp->urgent_gp--; | |
4d1114c0 | 465 | needgpcb |= 0x3; |
d119357d PM |
466 | } else if (rcu_segcblist_empty(&rtpcp->cblist)) { |
467 | rtpcp->urgent_gp = 0; | |
468 | } | |
469 | if (rcu_segcblist_ready_cbs(&rtpcp->cblist)) | |
4d1114c0 PM |
470 | needgpcb |= 0x1; |
471 | raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags); | |
472 | } | |
fd796e41 PM |
473 | |
474 | // Shrink down to a single callback queue if appropriate. | |
475 | // This is done in two stages: (1) If there are no more than | |
476 | // rcu_task_collapse_lim callbacks on CPU 0 and none on any other | |
477 | // CPU, limit enqueueing to CPU 0. (2) After an RCU grace period, | |
478 | // if there has not been an increase in callbacks, limit dequeuing | |
479 | // to CPU 0. Note the matching RCU read-side critical section in | |
480 | // call_rcu_tasks_generic(). | |
481 | if (rcu_task_cb_adjust && ncbs <= rcu_task_collapse_lim) { | |
482 | raw_spin_lock_irqsave(&rtp->cbs_gbl_lock, flags); | |
483 | if (rtp->percpu_enqueue_lim > 1) { | |
2bcd18e0 | 484 | WRITE_ONCE(rtp->percpu_enqueue_shift, order_base_2(nr_cpu_ids)); |
fd796e41 PM |
485 | smp_store_release(&rtp->percpu_enqueue_lim, 1); |
486 | rtp->percpu_dequeue_gpseq = get_state_synchronize_rcu(); | |
a4fcfbee | 487 | gpdone = false; |
fd796e41 PM |
488 | pr_info("Starting switch %s to CPU-0 callback queuing.\n", rtp->name); |
489 | } | |
490 | raw_spin_unlock_irqrestore(&rtp->cbs_gbl_lock, flags); | |
491 | } | |
a4fcfbee | 492 | if (rcu_task_cb_adjust && !ncbsnz && gpdone) { |
fd796e41 PM |
493 | raw_spin_lock_irqsave(&rtp->cbs_gbl_lock, flags); |
494 | if (rtp->percpu_enqueue_lim < rtp->percpu_dequeue_lim) { | |
495 | WRITE_ONCE(rtp->percpu_dequeue_lim, 1); | |
496 | pr_info("Completing switch %s to CPU-0 callback queuing.\n", rtp->name); | |
497 | } | |
a4fcfbee Z |
498 | if (rtp->percpu_dequeue_lim == 1) { |
499 | for (cpu = rtp->percpu_dequeue_lim; cpu < nr_cpu_ids; cpu++) { | |
500 | struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu); | |
4cf0585c | 501 | |
a4fcfbee Z |
502 | WARN_ON_ONCE(rcu_segcblist_n_cbs(&rtpcp->cblist)); |
503 | } | |
4cf0585c | 504 | } |
fd796e41 PM |
505 | raw_spin_unlock_irqrestore(&rtp->cbs_gbl_lock, flags); |
506 | } | |
507 | ||
4d1114c0 PM |
508 | return needgpcb; |
509 | } | |
510 | ||
57881863 | 511 | // Advance callbacks and invoke any that are ready. |
d363f833 | 512 | static void rcu_tasks_invoke_cbs(struct rcu_tasks *rtp, struct rcu_tasks_percpu *rtpcp) |
eacd6f04 | 513 | { |
57881863 | 514 | int cpu; |
d363f833 | 515 | int cpunext; |
401b0de3 | 516 | int cpuwq; |
eacd6f04 | 517 | unsigned long flags; |
9b073de1 | 518 | int len; |
9b073de1 | 519 | struct rcu_head *rhp; |
d363f833 PM |
520 | struct rcu_cblist rcl = RCU_CBLIST_INITIALIZER(rcl); |
521 | struct rcu_tasks_percpu *rtpcp_next; | |
522 | ||
523 | cpu = rtpcp->cpu; | |
524 | cpunext = cpu * 2 + 1; | |
2cee0789 | 525 | if (cpunext < smp_load_acquire(&rtp->percpu_dequeue_lim)) { |
d363f833 | 526 | rtpcp_next = per_cpu_ptr(rtp->rtpcpu, cpunext); |
401b0de3 PM |
527 | cpuwq = rcu_cpu_beenfullyonline(cpunext) ? cpunext : WORK_CPU_UNBOUND; |
528 | queue_work_on(cpuwq, system_wq, &rtpcp_next->rtp_work); | |
d363f833 | 529 | cpunext++; |
2cee0789 | 530 | if (cpunext < smp_load_acquire(&rtp->percpu_dequeue_lim)) { |
d363f833 | 531 | rtpcp_next = per_cpu_ptr(rtp->rtpcpu, cpunext); |
401b0de3 PM |
532 | cpuwq = rcu_cpu_beenfullyonline(cpunext) ? cpunext : WORK_CPU_UNBOUND; |
533 | queue_work_on(cpuwq, system_wq, &rtpcp_next->rtp_work); | |
57881863 | 534 | } |
57881863 | 535 | } |
d363f833 | 536 | |
ab2756ea | 537 | if (rcu_segcblist_empty(&rtpcp->cblist) || !cpu_possible(cpu)) |
d363f833 PM |
538 | return; |
539 | raw_spin_lock_irqsave_rcu_node(rtpcp, flags); | |
540 | rcu_segcblist_advance(&rtpcp->cblist, rcu_seq_current(&rtp->tasks_gp_seq)); | |
541 | rcu_segcblist_extract_done_cbs(&rtpcp->cblist, &rcl); | |
542 | raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags); | |
543 | len = rcl.len; | |
544 | for (rhp = rcu_cblist_dequeue(&rcl); rhp; rhp = rcu_cblist_dequeue(&rcl)) { | |
2cbc482d | 545 | debug_rcu_head_callback(rhp); |
d363f833 PM |
546 | local_bh_disable(); |
547 | rhp->func(rhp); | |
548 | local_bh_enable(); | |
549 | cond_resched(); | |
550 | } | |
551 | raw_spin_lock_irqsave_rcu_node(rtpcp, flags); | |
552 | rcu_segcblist_add_len(&rtpcp->cblist, -len); | |
553 | (void)rcu_segcblist_accelerate(&rtpcp->cblist, rcu_seq_snap(&rtp->tasks_gp_seq)); | |
554 | raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags); | |
555 | } | |
556 | ||
557 | // Workqueue flood to advance callbacks and invoke any that are ready. | |
558 | static void rcu_tasks_invoke_cbs_wq(struct work_struct *wp) | |
559 | { | |
560 | struct rcu_tasks *rtp; | |
561 | struct rcu_tasks_percpu *rtpcp = container_of(wp, struct rcu_tasks_percpu, rtp_work); | |
562 | ||
563 | rtp = rtpcp->rtpp; | |
564 | rcu_tasks_invoke_cbs(rtp, rtpcp); | |
57881863 PM |
565 | } |
566 | ||
d96225fd | 567 | // Wait for one grace period. |
4a8cc433 | 568 | static void rcu_tasks_one_gp(struct rcu_tasks *rtp, bool midboot) |
57881863 PM |
569 | { |
570 | int needgpcb; | |
d96225fd PM |
571 | |
572 | mutex_lock(&rtp->tasks_gp_mutex); | |
d96225fd PM |
573 | |
574 | // If there were none, wait a bit and start over. | |
4a8cc433 PM |
575 | if (unlikely(midboot)) { |
576 | needgpcb = 0x2; | |
577 | } else { | |
9d0cce2b | 578 | mutex_unlock(&rtp->tasks_gp_mutex); |
4a8cc433 PM |
579 | set_tasks_gp_state(rtp, RTGS_WAIT_CBS); |
580 | rcuwait_wait_event(&rtp->cbs_wait, | |
581 | (needgpcb = rcu_tasks_need_gpcb(rtp)), | |
582 | TASK_IDLE); | |
9d0cce2b | 583 | mutex_lock(&rtp->tasks_gp_mutex); |
4a8cc433 | 584 | } |
d96225fd PM |
585 | |
586 | if (needgpcb & 0x2) { | |
587 | // Wait for one grace period. | |
588 | set_tasks_gp_state(rtp, RTGS_WAIT_GP); | |
589 | rtp->gp_start = jiffies; | |
590 | rcu_seq_start(&rtp->tasks_gp_seq); | |
591 | rtp->gp_func(rtp); | |
592 | rcu_seq_end(&rtp->tasks_gp_seq); | |
593 | } | |
594 | ||
595 | // Invoke callbacks. | |
596 | set_tasks_gp_state(rtp, RTGS_INVOKE_CBS); | |
597 | rcu_tasks_invoke_cbs(rtp, per_cpu_ptr(rtp->rtpcpu, 0)); | |
598 | mutex_unlock(&rtp->tasks_gp_mutex); | |
599 | } | |
600 | ||
601 | // RCU-tasks kthread that detects grace periods and invokes callbacks. | |
602 | static int __noreturn rcu_tasks_kthread(void *arg) | |
603 | { | |
d119357d | 604 | int cpu; |
07e10515 | 605 | struct rcu_tasks *rtp = arg; |
eacd6f04 | 606 | |
d119357d PM |
607 | for_each_possible_cpu(cpu) { |
608 | struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu); | |
609 | ||
610 | timer_setup(&rtpcp->lazy_timer, call_rcu_tasks_generic_timer, 0); | |
611 | rtpcp->urgent_gp = 1; | |
612 | } | |
613 | ||
eacd6f04 | 614 | /* Run on housekeeping CPUs by default. Sysadm can move if desired. */ |
04d4e665 | 615 | housekeeping_affine(current, HK_TYPE_RCU); |
d119357d | 616 | smp_store_release(&rtp->kthread_ptr, current); // Let GPs start! |
eacd6f04 PM |
617 | |
618 | /* | |
619 | * Each pass through the following loop makes one check for | |
620 | * newly arrived callbacks, and, if there are some, waits for | |
621 | * one RCU-tasks grace period and then invokes the callbacks. | |
622 | * This loop is terminated by the system going down. ;-) | |
623 | */ | |
624 | for (;;) { | |
d96225fd PM |
625 | // Wait for one grace period and invoke any callbacks |
626 | // that are ready. | |
4a8cc433 | 627 | rcu_tasks_one_gp(rtp, false); |
57881863 | 628 | |
d96225fd | 629 | // Paranoid sleep to keep this from entering a tight loop. |
4fe192df | 630 | schedule_timeout_idle(rtp->gp_sleep); |
eacd6f04 PM |
631 | } |
632 | } | |
633 | ||
68cb4720 PM |
634 | // Wait for a grace period for the specified flavor of Tasks RCU. |
635 | static void synchronize_rcu_tasks_generic(struct rcu_tasks *rtp) | |
636 | { | |
637 | /* Complain if the scheduler has not started. */ | |
ea5c8987 Z |
638 | if (WARN_ONCE(rcu_scheduler_active == RCU_SCHEDULER_INACTIVE, |
639 | "synchronize_%s() called too soon", rtp->name)) | |
640 | return; | |
68cb4720 | 641 | |
4a8cc433 PM |
642 | // If the grace-period kthread is running, use it. |
643 | if (READ_ONCE(rtp->kthread_ptr)) { | |
c342b42f | 644 | wait_rcu_gp_state(rtp->wait_state, rtp->call_func); |
4a8cc433 PM |
645 | return; |
646 | } | |
647 | rcu_tasks_one_gp(rtp, true); | |
68cb4720 PM |
648 | } |
649 | ||
1b04fa99 | 650 | /* Spawn RCU-tasks grace-period kthread. */ |
5873b8a9 | 651 | static void __init rcu_spawn_tasks_kthread_generic(struct rcu_tasks *rtp) |
eacd6f04 PM |
652 | { |
653 | struct task_struct *t; | |
654 | ||
c97d12a6 PM |
655 | t = kthread_run(rcu_tasks_kthread, rtp, "%s_kthread", rtp->kname); |
656 | if (WARN_ONCE(IS_ERR(t), "%s: Could not start %s grace-period kthread, OOM is now expected behavior\n", __func__, rtp->name)) | |
5873b8a9 | 657 | return; |
eacd6f04 | 658 | smp_mb(); /* Ensure others see full kthread. */ |
eacd6f04 | 659 | } |
eacd6f04 | 660 | |
eacd6f04 PM |
661 | #ifndef CONFIG_TINY_RCU |
662 | ||
663 | /* | |
664 | * Print any non-default Tasks RCU settings. | |
665 | */ | |
666 | static void __init rcu_tasks_bootup_oddness(void) | |
667 | { | |
d5f177d3 | 668 | #if defined(CONFIG_TASKS_RCU) || defined(CONFIG_TASKS_TRACE_RCU) |
f2539003 PM |
669 | int rtsimc; |
670 | ||
eacd6f04 PM |
671 | if (rcu_task_stall_timeout != RCU_TASK_STALL_TIMEOUT) |
672 | pr_info("\tTasks-RCU CPU stall warnings timeout set to %d (rcu_task_stall_timeout).\n", rcu_task_stall_timeout); | |
f2539003 PM |
673 | rtsimc = clamp(rcu_task_stall_info_mult, 1, 10); |
674 | if (rtsimc != rcu_task_stall_info_mult) { | |
675 | pr_info("\tTasks-RCU CPU stall info multiplier clamped to %d (rcu_task_stall_info_mult).\n", rtsimc); | |
676 | rcu_task_stall_info_mult = rtsimc; | |
677 | } | |
d5f177d3 PM |
678 | #endif /* #ifdef CONFIG_TASKS_RCU */ |
679 | #ifdef CONFIG_TASKS_RCU | |
680 | pr_info("\tTrampoline variant of Tasks RCU enabled.\n"); | |
eacd6f04 | 681 | #endif /* #ifdef CONFIG_TASKS_RCU */ |
c84aad76 PM |
682 | #ifdef CONFIG_TASKS_RUDE_RCU |
683 | pr_info("\tRude variant of Tasks RCU enabled.\n"); | |
684 | #endif /* #ifdef CONFIG_TASKS_RUDE_RCU */ | |
d5f177d3 PM |
685 | #ifdef CONFIG_TASKS_TRACE_RCU |
686 | pr_info("\tTracing variant of Tasks RCU enabled.\n"); | |
687 | #endif /* #ifdef CONFIG_TASKS_TRACE_RCU */ | |
eacd6f04 PM |
688 | } |
689 | ||
690 | #endif /* #ifndef CONFIG_TINY_RCU */ | |
5873b8a9 | 691 | |
8344496e | 692 | #ifndef CONFIG_TINY_RCU |
e21408ce PM |
693 | /* Dump out rcutorture-relevant state common to all RCU-tasks flavors. */ |
694 | static void show_rcu_tasks_generic_gp_kthread(struct rcu_tasks *rtp, char *s) | |
695 | { | |
10b3742f PM |
696 | int cpu; |
697 | bool havecbs = false; | |
d119357d PM |
698 | bool haveurgent = false; |
699 | bool haveurgentcbs = false; | |
10b3742f PM |
700 | |
701 | for_each_possible_cpu(cpu) { | |
702 | struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu); | |
703 | ||
d119357d | 704 | if (!data_race(rcu_segcblist_empty(&rtpcp->cblist))) |
10b3742f | 705 | havecbs = true; |
d119357d PM |
706 | if (data_race(rtpcp->urgent_gp)) |
707 | haveurgent = true; | |
708 | if (!data_race(rcu_segcblist_empty(&rtpcp->cblist)) && data_race(rtpcp->urgent_gp)) | |
709 | haveurgentcbs = true; | |
710 | if (havecbs && haveurgent && haveurgentcbs) | |
10b3742f | 711 | break; |
10b3742f | 712 | } |
d119357d | 713 | pr_info("%s: %s(%d) since %lu g:%lu i:%lu/%lu %c%c%c%c l:%lu %s\n", |
e21408ce | 714 | rtp->kname, |
7e0669c3 | 715 | tasks_gp_state_getname(rtp), data_race(rtp->gp_state), |
af051ca4 | 716 | jiffies - data_race(rtp->gp_jiffies), |
b14fb4fb | 717 | data_race(rcu_seq_current(&rtp->tasks_gp_seq)), |
7e0669c3 | 718 | data_race(rtp->n_ipis_fails), data_race(rtp->n_ipis), |
e21408ce | 719 | ".k"[!!data_race(rtp->kthread_ptr)], |
10b3742f | 720 | ".C"[havecbs], |
d119357d PM |
721 | ".u"[haveurgent], |
722 | ".U"[haveurgentcbs], | |
723 | rtp->lazy_jiffies, | |
e21408ce PM |
724 | s); |
725 | } | |
27c0f144 | 726 | #endif // #ifndef CONFIG_TINY_RCU |
e21408ce | 727 | |
25246fc8 PM |
728 | static void exit_tasks_rcu_finish_trace(struct task_struct *t); |
729 | ||
730 | #if defined(CONFIG_TASKS_RCU) || defined(CONFIG_TASKS_TRACE_RCU) | |
5873b8a9 | 731 | |
d01aa263 PM |
732 | //////////////////////////////////////////////////////////////////////// |
733 | // | |
734 | // Shared code between task-list-scanning variants of Tasks RCU. | |
735 | ||
736 | /* Wait for one RCU-tasks grace period. */ | |
737 | static void rcu_tasks_wait_gp(struct rcu_tasks *rtp) | |
738 | { | |
f2539003 | 739 | struct task_struct *g; |
d01aa263 | 740 | int fract; |
f2539003 PM |
741 | LIST_HEAD(holdouts); |
742 | unsigned long j; | |
743 | unsigned long lastinfo; | |
744 | unsigned long lastreport; | |
745 | bool reported = false; | |
746 | int rtsi; | |
747 | struct task_struct *t; | |
d01aa263 | 748 | |
af051ca4 | 749 | set_tasks_gp_state(rtp, RTGS_PRE_WAIT_GP); |
7460ade1 | 750 | rtp->pregp_func(&holdouts); |
d01aa263 PM |
751 | |
752 | /* | |
753 | * There were callbacks, so we need to wait for an RCU-tasks | |
754 | * grace period. Start off by scanning the task list for tasks | |
755 | * that are not already voluntarily blocked. Mark these tasks | |
756 | * and make a list of them in holdouts. | |
757 | */ | |
af051ca4 | 758 | set_tasks_gp_state(rtp, RTGS_SCAN_TASKLIST); |
1a4a8153 PM |
759 | if (rtp->pertask_func) { |
760 | rcu_read_lock(); | |
761 | for_each_process_thread(g, t) | |
762 | rtp->pertask_func(t, &holdouts); | |
763 | rcu_read_unlock(); | |
764 | } | |
d01aa263 | 765 | |
af051ca4 | 766 | set_tasks_gp_state(rtp, RTGS_POST_SCAN_TASKLIST); |
9796e1ae | 767 | rtp->postscan_func(&holdouts); |
d01aa263 PM |
768 | |
769 | /* | |
770 | * Each pass through the following loop scans the list of holdout | |
771 | * tasks, removing any that are no longer holdouts. When the list | |
772 | * is empty, we are done. | |
773 | */ | |
774 | lastreport = jiffies; | |
f2539003 PM |
775 | lastinfo = lastreport; |
776 | rtsi = READ_ONCE(rcu_task_stall_info); | |
d01aa263 | 777 | |
2393a613 PM |
778 | // Start off with initial wait and slowly back off to 1 HZ wait. |
779 | fract = rtp->init_fract; | |
d01aa263 | 780 | |
77dc1741 | 781 | while (!list_empty(&holdouts)) { |
777570d9 | 782 | ktime_t exp; |
d01aa263 PM |
783 | bool firstreport; |
784 | bool needreport; | |
785 | int rtst; | |
786 | ||
f2539003 | 787 | // Slowly back off waiting for holdouts |
af051ca4 | 788 | set_tasks_gp_state(rtp, RTGS_WAIT_SCAN_HOLDOUTS); |
bddf7122 PM |
789 | if (!IS_ENABLED(CONFIG_PREEMPT_RT)) { |
790 | schedule_timeout_idle(fract); | |
791 | } else { | |
792 | exp = jiffies_to_nsecs(fract); | |
793 | __set_current_state(TASK_IDLE); | |
794 | schedule_hrtimeout_range(&exp, jiffies_to_nsecs(HZ / 2), HRTIMER_MODE_REL_HARD); | |
795 | } | |
d01aa263 | 796 | |
75dc2da5 PM |
797 | if (fract < HZ) |
798 | fract++; | |
d01aa263 PM |
799 | |
800 | rtst = READ_ONCE(rcu_task_stall_timeout); | |
801 | needreport = rtst > 0 && time_after(jiffies, lastreport + rtst); | |
f2539003 | 802 | if (needreport) { |
d01aa263 | 803 | lastreport = jiffies; |
f2539003 PM |
804 | reported = true; |
805 | } | |
d01aa263 PM |
806 | firstreport = true; |
807 | WARN_ON(signal_pending(current)); | |
af051ca4 | 808 | set_tasks_gp_state(rtp, RTGS_SCAN_HOLDOUTS); |
d01aa263 | 809 | rtp->holdouts_func(&holdouts, needreport, &firstreport); |
f2539003 PM |
810 | |
811 | // Print pre-stall informational messages if needed. | |
812 | j = jiffies; | |
813 | if (rtsi > 0 && !reported && time_after(j, lastinfo + rtsi)) { | |
814 | lastinfo = j; | |
815 | rtsi = rtsi * rcu_task_stall_info_mult; | |
df83fff7 | 816 | pr_info("%s: %s grace period number %lu (since boot) is %lu jiffies old.\n", |
f2539003 PM |
817 | __func__, rtp->kname, rtp->tasks_gp_seq, j - rtp->gp_start); |
818 | } | |
d01aa263 PM |
819 | } |
820 | ||
af051ca4 PM |
821 | set_tasks_gp_state(rtp, RTGS_POST_GP); |
822 | rtp->postgp_func(rtp); | |
d01aa263 PM |
823 | } |
824 | ||
25246fc8 PM |
825 | #endif /* #if defined(CONFIG_TASKS_RCU) || defined(CONFIG_TASKS_TRACE_RCU) */ |
826 | ||
827 | #ifdef CONFIG_TASKS_RCU | |
828 | ||
5873b8a9 PM |
829 | //////////////////////////////////////////////////////////////////////// |
830 | // | |
831 | // Simple variant of RCU whose quiescent states are voluntary context | |
8af9e2c7 | 832 | // switch, cond_resched_tasks_rcu_qs(), user-space execution, and idle. |
5873b8a9 PM |
833 | // As such, grace periods can take one good long time. There are no |
834 | // read-side primitives similar to rcu_read_lock() and rcu_read_unlock() | |
835 | // because this implementation is intended to get the system into a safe | |
836 | // state for some of the manipulations involved in tracing and the like. | |
837 | // Finally, this implementation does not support high call_rcu_tasks() | |
838 | // rates from multiple CPUs. If this is required, per-CPU callback lists | |
839 | // will be needed. | |
06a3ec92 PM |
840 | // |
841 | // The implementation uses rcu_tasks_wait_gp(), which relies on function | |
842 | // pointers in the rcu_tasks structure. The rcu_spawn_tasks_kthread() | |
843 | // function sets these function pointers up so that rcu_tasks_wait_gp() | |
844 | // invokes these functions in this order: | |
845 | // | |
846 | // rcu_tasks_pregp_step(): | |
847 | // Invokes synchronize_rcu() in order to wait for all in-flight | |
848 | // t->on_rq and t->nvcsw transitions to complete. This works because | |
849 | // all such transitions are carried out with interrupts disabled. | |
850 | // rcu_tasks_pertask(), invoked on every non-idle task: | |
851 | // For every runnable non-idle task other than the current one, use | |
852 | // get_task_struct() to pin down that task, snapshot that task's | |
853 | // number of voluntary context switches, and add that task to the | |
854 | // holdout list. | |
855 | // rcu_tasks_postscan(): | |
1612160b PM |
856 | // Gather per-CPU lists of tasks in do_exit() to ensure that all |
857 | // tasks that were in the process of exiting (and which thus might | |
858 | // not know to synchronize with this RCU Tasks grace period) have | |
859 | // completed exiting. The synchronize_rcu() in rcu_tasks_postgp() | |
860 | // will take care of any tasks stuck in the non-preemptible region | |
861 | // of do_exit() following its call to exit_tasks_rcu_stop(). | |
06a3ec92 PM |
862 | // check_all_holdout_tasks(), repeatedly until holdout list is empty: |
863 | // Scans the holdout list, attempting to identify a quiescent state | |
864 | // for each task on the list. If there is a quiescent state, the | |
865 | // corresponding task is removed from the holdout list. | |
866 | // rcu_tasks_postgp(): | |
867 | // Invokes synchronize_rcu() in order to ensure that all prior | |
868 | // t->on_rq and t->nvcsw transitions are seen by all CPUs and tasks | |
869 | // to have happened before the end of this RCU Tasks grace period. | |
870 | // Again, this works because all such transitions are carried out | |
871 | // with interrupts disabled. | |
872 | // | |
873 | // For each exiting task, the exit_tasks_rcu_start() and | |
1612160b PM |
874 | // exit_tasks_rcu_finish() functions add and remove, respectively, the |
875 | // current task to a per-CPU list of tasks that rcu_tasks_postscan() must | |
876 | // wait on. This is necessary because rcu_tasks_postscan() must wait on | |
877 | // tasks that have already been removed from the global list of tasks. | |
06a3ec92 | 878 | // |
381a4f3b PM |
879 | // Pre-grace-period update-side code is ordered before the grace |
880 | // via the raw_spin_lock.*rcu_node(). Pre-grace-period read-side code | |
881 | // is ordered before the grace period via synchronize_rcu() call in | |
882 | // rcu_tasks_pregp_step() and by the scheduler's locks and interrupt | |
06a3ec92 | 883 | // disabling. |
5873b8a9 | 884 | |
e4fe5dd6 | 885 | /* Pre-grace-period preparation. */ |
7460ade1 | 886 | static void rcu_tasks_pregp_step(struct list_head *hop) |
e4fe5dd6 PM |
887 | { |
888 | /* | |
889 | * Wait for all pre-existing t->on_rq and t->nvcsw transitions | |
890 | * to complete. Invoking synchronize_rcu() suffices because all | |
891 | * these transitions occur with interrupts disabled. Without this | |
892 | * synchronize_rcu(), a read-side critical section that started | |
893 | * before the grace period might be incorrectly seen as having | |
894 | * started after the grace period. | |
895 | * | |
896 | * This synchronize_rcu() also dispenses with the need for a | |
897 | * memory barrier on the first store to t->rcu_tasks_holdout, | |
898 | * as it forces the store to happen after the beginning of the | |
899 | * grace period. | |
900 | */ | |
901 | synchronize_rcu(); | |
902 | } | |
903 | ||
9715ed50 FW |
904 | /* Check for quiescent states since the pregp's synchronize_rcu() */ |
905 | static bool rcu_tasks_is_holdout(struct task_struct *t) | |
906 | { | |
907 | int cpu; | |
908 | ||
909 | /* Has the task been seen voluntarily sleeping? */ | |
910 | if (!READ_ONCE(t->on_rq)) | |
911 | return false; | |
912 | ||
913 | /* | |
914 | * Idle tasks (or idle injection) within the idle loop are RCU-tasks | |
915 | * quiescent states. But CPU boot code performed by the idle task | |
916 | * isn't a quiescent state. | |
917 | */ | |
918 | if (is_idle_task(t)) | |
919 | return false; | |
920 | ||
921 | cpu = task_cpu(t); | |
922 | ||
923 | /* Idle tasks on offline CPUs are RCU-tasks quiescent states. */ | |
924 | if (t == idle_task(cpu) && !rcu_cpu_online(cpu)) | |
925 | return false; | |
926 | ||
927 | return true; | |
928 | } | |
929 | ||
e4fe5dd6 PM |
930 | /* Per-task initial processing. */ |
931 | static void rcu_tasks_pertask(struct task_struct *t, struct list_head *hop) | |
932 | { | |
9715ed50 | 933 | if (t != current && rcu_tasks_is_holdout(t)) { |
e4fe5dd6 PM |
934 | get_task_struct(t); |
935 | t->rcu_tasks_nvcsw = READ_ONCE(t->nvcsw); | |
936 | WRITE_ONCE(t->rcu_tasks_holdout, true); | |
937 | list_add(&t->rcu_tasks_holdout_list, hop); | |
938 | } | |
939 | } | |
940 | ||
1612160b PM |
941 | void call_rcu_tasks(struct rcu_head *rhp, rcu_callback_t func); |
942 | DEFINE_RCU_TASKS(rcu_tasks, rcu_tasks_wait_gp, call_rcu_tasks, "RCU Tasks"); | |
943 | ||
e4fe5dd6 | 944 | /* Processing between scanning taskslist and draining the holdout list. */ |
04a3c5aa | 945 | static void rcu_tasks_postscan(struct list_head *hop) |
e4fe5dd6 | 946 | { |
1612160b | 947 | int cpu; |
a4533cc0 NU |
948 | int rtsi = READ_ONCE(rcu_task_stall_info); |
949 | ||
950 | if (!IS_ENABLED(CONFIG_TINY_RCU)) { | |
951 | tasks_rcu_exit_srcu_stall_timer.expires = jiffies + rtsi; | |
952 | add_timer(&tasks_rcu_exit_srcu_stall_timer); | |
953 | } | |
954 | ||
e4fe5dd6 | 955 | /* |
e4e1e808 FW |
956 | * Exiting tasks may escape the tasklist scan. Those are vulnerable |
957 | * until their final schedule() with TASK_DEAD state. To cope with | |
958 | * this, divide the fragile exit path part in two intersecting | |
959 | * read side critical sections: | |
960 | * | |
1612160b PM |
961 | * 1) A task_struct list addition before calling exit_notify(), |
962 | * which may remove the task from the tasklist, with the | |
963 | * removal after the final preempt_disable() call in do_exit(). | |
e4e1e808 FW |
964 | * |
965 | * 2) An _RCU_ read side starting with the final preempt_disable() | |
966 | * call in do_exit() and ending with the final call to schedule() | |
967 | * with TASK_DEAD state. | |
968 | * | |
969 | * This handles the part 1). And postgp will handle part 2) with a | |
970 | * call to synchronize_rcu(). | |
e4fe5dd6 | 971 | */ |
1612160b PM |
972 | |
973 | for_each_possible_cpu(cpu) { | |
0bb11a37 | 974 | unsigned long j = jiffies + 1; |
1612160b PM |
975 | struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rcu_tasks.rtpcpu, cpu); |
976 | struct task_struct *t; | |
0bb11a37 PM |
977 | struct task_struct *t1; |
978 | struct list_head tmp; | |
1612160b PM |
979 | |
980 | raw_spin_lock_irq_rcu_node(rtpcp); | |
0bb11a37 | 981 | list_for_each_entry_safe(t, t1, &rtpcp->rtp_exit_list, rcu_tasks_exit_list) { |
1612160b PM |
982 | if (list_empty(&t->rcu_tasks_holdout_list)) |
983 | rcu_tasks_pertask(t, hop); | |
0bb11a37 PM |
984 | |
985 | // RT kernels need frequent pauses, otherwise | |
986 | // pause at least once per pair of jiffies. | |
987 | if (!IS_ENABLED(CONFIG_PREEMPT_RT) && time_before(jiffies, j)) | |
988 | continue; | |
989 | ||
990 | // Keep our place in the list while pausing. | |
991 | // Nothing else traverses this list, so adding a | |
992 | // bare list_head is OK. | |
993 | list_add(&tmp, &t->rcu_tasks_exit_list); | |
994 | raw_spin_unlock_irq_rcu_node(rtpcp); | |
995 | cond_resched(); // For CONFIG_PREEMPT=n kernels | |
996 | raw_spin_lock_irq_rcu_node(rtpcp); | |
997 | t1 = list_entry(tmp.next, struct task_struct, rcu_tasks_exit_list); | |
998 | list_del(&tmp); | |
999 | j = jiffies + 1; | |
1000 | } | |
1612160b PM |
1001 | raw_spin_unlock_irq_rcu_node(rtpcp); |
1002 | } | |
a4533cc0 NU |
1003 | |
1004 | if (!IS_ENABLED(CONFIG_TINY_RCU)) | |
1005 | del_timer_sync(&tasks_rcu_exit_srcu_stall_timer); | |
e4fe5dd6 PM |
1006 | } |
1007 | ||
5873b8a9 PM |
1008 | /* See if tasks are still holding out, complain if so. */ |
1009 | static void check_holdout_task(struct task_struct *t, | |
1010 | bool needreport, bool *firstreport) | |
1011 | { | |
1012 | int cpu; | |
1013 | ||
1014 | if (!READ_ONCE(t->rcu_tasks_holdout) || | |
1015 | t->rcu_tasks_nvcsw != READ_ONCE(t->nvcsw) || | |
9715ed50 | 1016 | !rcu_tasks_is_holdout(t) || |
5873b8a9 | 1017 | (IS_ENABLED(CONFIG_NO_HZ_FULL) && |
18966f7b | 1018 | !is_idle_task(t) && READ_ONCE(t->rcu_tasks_idle_cpu) >= 0)) { |
5873b8a9 PM |
1019 | WRITE_ONCE(t->rcu_tasks_holdout, false); |
1020 | list_del_init(&t->rcu_tasks_holdout_list); | |
1021 | put_task_struct(t); | |
1022 | return; | |
1023 | } | |
1024 | rcu_request_urgent_qs_task(t); | |
1025 | if (!needreport) | |
1026 | return; | |
1027 | if (*firstreport) { | |
1028 | pr_err("INFO: rcu_tasks detected stalls on tasks:\n"); | |
1029 | *firstreport = false; | |
1030 | } | |
1031 | cpu = task_cpu(t); | |
1032 | pr_alert("%p: %c%c nvcsw: %lu/%lu holdout: %d idle_cpu: %d/%d\n", | |
1033 | t, ".I"[is_idle_task(t)], | |
1034 | "N."[cpu < 0 || !tick_nohz_full_cpu(cpu)], | |
1035 | t->rcu_tasks_nvcsw, t->nvcsw, t->rcu_tasks_holdout, | |
18966f7b | 1036 | data_race(t->rcu_tasks_idle_cpu), cpu); |
5873b8a9 PM |
1037 | sched_show_task(t); |
1038 | } | |
1039 | ||
e4fe5dd6 PM |
1040 | /* Scan the holdout lists for tasks no longer holding out. */ |
1041 | static void check_all_holdout_tasks(struct list_head *hop, | |
1042 | bool needreport, bool *firstreport) | |
1043 | { | |
1044 | struct task_struct *t, *t1; | |
1045 | ||
1046 | list_for_each_entry_safe(t, t1, hop, rcu_tasks_holdout_list) { | |
1047 | check_holdout_task(t, needreport, firstreport); | |
1048 | cond_resched(); | |
1049 | } | |
1050 | } | |
1051 | ||
1052 | /* Finish off the Tasks-RCU grace period. */ | |
af051ca4 | 1053 | static void rcu_tasks_postgp(struct rcu_tasks *rtp) |
e4fe5dd6 PM |
1054 | { |
1055 | /* | |
1056 | * Because ->on_rq and ->nvcsw are not guaranteed to have a full | |
1057 | * memory barriers prior to them in the schedule() path, memory | |
1058 | * reordering on other CPUs could cause their RCU-tasks read-side | |
1059 | * critical sections to extend past the end of the grace period. | |
1060 | * However, because these ->nvcsw updates are carried out with | |
1061 | * interrupts disabled, we can use synchronize_rcu() to force the | |
1062 | * needed ordering on all such CPUs. | |
1063 | * | |
1064 | * This synchronize_rcu() also confines all ->rcu_tasks_holdout | |
1065 | * accesses to be within the grace period, avoiding the need for | |
1066 | * memory barriers for ->rcu_tasks_holdout accesses. | |
1067 | * | |
1068 | * In addition, this synchronize_rcu() waits for exiting tasks | |
1069 | * to complete their final preempt_disable() region of execution, | |
e4e1e808 FW |
1070 | * enforcing the whole region before tasklist removal until |
1071 | * the final schedule() with TASK_DEAD state to be an RCU TASKS | |
1072 | * read side critical section. | |
e4fe5dd6 PM |
1073 | */ |
1074 | synchronize_rcu(); | |
1075 | } | |
1076 | ||
a4533cc0 NU |
1077 | static void tasks_rcu_exit_srcu_stall(struct timer_list *unused) |
1078 | { | |
1079 | #ifndef CONFIG_TINY_RCU | |
1080 | int rtsi; | |
1081 | ||
1082 | rtsi = READ_ONCE(rcu_task_stall_info); | |
1083 | pr_info("%s: %s grace period number %lu (since boot) gp_state: %s is %lu jiffies old.\n", | |
1084 | __func__, rcu_tasks.kname, rcu_tasks.tasks_gp_seq, | |
1085 | tasks_gp_state_getname(&rcu_tasks), jiffies - rcu_tasks.gp_jiffies); | |
1086 | pr_info("Please check any exiting tasks stuck between calls to exit_tasks_rcu_start() and exit_tasks_rcu_finish()\n"); | |
1087 | tasks_rcu_exit_srcu_stall_timer.expires = jiffies + rtsi; | |
1088 | add_timer(&tasks_rcu_exit_srcu_stall_timer); | |
1089 | #endif // #ifndef CONFIG_TINY_RCU | |
1090 | } | |
1091 | ||
5873b8a9 PM |
1092 | /** |
1093 | * call_rcu_tasks() - Queue an RCU for invocation task-based grace period | |
1094 | * @rhp: structure to be used for queueing the RCU updates. | |
1095 | * @func: actual callback function to be invoked after the grace period | |
1096 | * | |
1097 | * The callback function will be invoked some time after a full grace | |
1098 | * period elapses, in other words after all currently executing RCU | |
1099 | * read-side critical sections have completed. call_rcu_tasks() assumes | |
1100 | * that the read-side critical sections end at a voluntary context | |
8af9e2c7 | 1101 | * switch (not a preemption!), cond_resched_tasks_rcu_qs(), entry into idle, |
5873b8a9 PM |
1102 | * or transition to usermode execution. As such, there are no read-side |
1103 | * primitives analogous to rcu_read_lock() and rcu_read_unlock() because | |
1104 | * this primitive is intended to determine that all tasks have passed | |
a616aec9 | 1105 | * through a safe state, not so much for data-structure synchronization. |
5873b8a9 PM |
1106 | * |
1107 | * See the description of call_rcu() for more detailed information on | |
1108 | * memory ordering guarantees. | |
1109 | */ | |
1110 | void call_rcu_tasks(struct rcu_head *rhp, rcu_callback_t func) | |
1111 | { | |
1112 | call_rcu_tasks_generic(rhp, func, &rcu_tasks); | |
1113 | } | |
1114 | EXPORT_SYMBOL_GPL(call_rcu_tasks); | |
1115 | ||
1116 | /** | |
1117 | * synchronize_rcu_tasks - wait until an rcu-tasks grace period has elapsed. | |
1118 | * | |
1119 | * Control will return to the caller some time after a full rcu-tasks | |
1120 | * grace period has elapsed, in other words after all currently | |
1121 | * executing rcu-tasks read-side critical sections have elapsed. These | |
1122 | * read-side critical sections are delimited by calls to schedule(), | |
1123 | * cond_resched_tasks_rcu_qs(), idle execution, userspace execution, calls | |
1124 | * to synchronize_rcu_tasks(), and (in theory, anyway) cond_resched(). | |
1125 | * | |
1126 | * This is a very specialized primitive, intended only for a few uses in | |
1127 | * tracing and other situations requiring manipulation of function | |
1128 | * preambles and profiling hooks. The synchronize_rcu_tasks() function | |
1129 | * is not (yet) intended for heavy use from multiple CPUs. | |
1130 | * | |
1131 | * See the description of synchronize_rcu() for more detailed information | |
1132 | * on memory ordering guarantees. | |
1133 | */ | |
1134 | void synchronize_rcu_tasks(void) | |
1135 | { | |
1136 | synchronize_rcu_tasks_generic(&rcu_tasks); | |
1137 | } | |
1138 | EXPORT_SYMBOL_GPL(synchronize_rcu_tasks); | |
1139 | ||
1140 | /** | |
1141 | * rcu_barrier_tasks - Wait for in-flight call_rcu_tasks() callbacks. | |
1142 | * | |
1143 | * Although the current implementation is guaranteed to wait, it is not | |
1144 | * obligated to, for example, if there are no pending callbacks. | |
1145 | */ | |
1146 | void rcu_barrier_tasks(void) | |
1147 | { | |
ce9b1c66 | 1148 | rcu_barrier_tasks_generic(&rcu_tasks); |
5873b8a9 PM |
1149 | } |
1150 | EXPORT_SYMBOL_GPL(rcu_barrier_tasks); | |
1151 | ||
0325e8a1 | 1152 | static int rcu_tasks_lazy_ms = -1; |
450d461a PM |
1153 | module_param(rcu_tasks_lazy_ms, int, 0444); |
1154 | ||
5873b8a9 PM |
1155 | static int __init rcu_spawn_tasks_kthread(void) |
1156 | { | |
4fe192df | 1157 | rcu_tasks.gp_sleep = HZ / 10; |
75dc2da5 | 1158 | rcu_tasks.init_fract = HZ / 10; |
450d461a PM |
1159 | if (rcu_tasks_lazy_ms >= 0) |
1160 | rcu_tasks.lazy_jiffies = msecs_to_jiffies(rcu_tasks_lazy_ms); | |
e4fe5dd6 PM |
1161 | rcu_tasks.pregp_func = rcu_tasks_pregp_step; |
1162 | rcu_tasks.pertask_func = rcu_tasks_pertask; | |
1163 | rcu_tasks.postscan_func = rcu_tasks_postscan; | |
1164 | rcu_tasks.holdouts_func = check_all_holdout_tasks; | |
1165 | rcu_tasks.postgp_func = rcu_tasks_postgp; | |
c342b42f | 1166 | rcu_tasks.wait_state = TASK_IDLE; |
5873b8a9 PM |
1167 | rcu_spawn_tasks_kthread_generic(&rcu_tasks); |
1168 | return 0; | |
1169 | } | |
5873b8a9 | 1170 | |
27c0f144 PM |
1171 | #if !defined(CONFIG_TINY_RCU) |
1172 | void show_rcu_tasks_classic_gp_kthread(void) | |
e21408ce PM |
1173 | { |
1174 | show_rcu_tasks_generic_gp_kthread(&rcu_tasks, ""); | |
1175 | } | |
27c0f144 PM |
1176 | EXPORT_SYMBOL_GPL(show_rcu_tasks_classic_gp_kthread); |
1177 | #endif // !defined(CONFIG_TINY_RCU) | |
e21408ce | 1178 | |
271a8467 PM |
1179 | struct task_struct *get_rcu_tasks_gp_kthread(void) |
1180 | { | |
1181 | return rcu_tasks.kthread_ptr; | |
1182 | } | |
1183 | EXPORT_SYMBOL_GPL(get_rcu_tasks_gp_kthread); | |
1184 | ||
dddcddef Z |
1185 | void rcu_tasks_get_gp_data(int *flags, unsigned long *gp_seq) |
1186 | { | |
1187 | *flags = 0; | |
1188 | *gp_seq = rcu_seq_current(&rcu_tasks.tasks_gp_seq); | |
1189 | } | |
1190 | EXPORT_SYMBOL_GPL(rcu_tasks_get_gp_data); | |
1191 | ||
e4e1e808 | 1192 | /* |
6b70399f PM |
1193 | * Protect against tasklist scan blind spot while the task is exiting and |
1194 | * may be removed from the tasklist. Do this by adding the task to yet | |
1195 | * another list. | |
1196 | * | |
1197 | * Note that the task will remove itself from this list, so there is no | |
1198 | * need for get_task_struct(), except in the case where rcu_tasks_pertask() | |
1199 | * adds it to the holdout list, in which case rcu_tasks_pertask() supplies | |
1200 | * the needed get_task_struct(). | |
e4e1e808 | 1201 | */ |
6b70399f | 1202 | void exit_tasks_rcu_start(void) |
25246fc8 | 1203 | { |
6b70399f PM |
1204 | unsigned long flags; |
1205 | struct rcu_tasks_percpu *rtpcp; | |
1206 | struct task_struct *t = current; | |
1207 | ||
1208 | WARN_ON_ONCE(!list_empty(&t->rcu_tasks_exit_list)); | |
1209 | preempt_disable(); | |
1210 | rtpcp = this_cpu_ptr(rcu_tasks.rtpcpu); | |
1211 | t->rcu_tasks_exit_cpu = smp_processor_id(); | |
1212 | raw_spin_lock_irqsave_rcu_node(rtpcp, flags); | |
8db610c3 | 1213 | WARN_ON_ONCE(!rtpcp->rtp_exit_list.next); |
6b70399f PM |
1214 | list_add(&t->rcu_tasks_exit_list, &rtpcp->rtp_exit_list); |
1215 | raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags); | |
1216 | preempt_enable(); | |
25246fc8 PM |
1217 | } |
1218 | ||
e4e1e808 | 1219 | /* |
6b70399f PM |
1220 | * Remove the task from the "yet another list" because do_exit() is now |
1221 | * non-preemptible, allowing synchronize_rcu() to wait beyond this point. | |
e4e1e808 | 1222 | */ |
6b70399f | 1223 | void exit_tasks_rcu_stop(void) |
25246fc8 | 1224 | { |
6b70399f PM |
1225 | unsigned long flags; |
1226 | struct rcu_tasks_percpu *rtpcp; | |
25246fc8 PM |
1227 | struct task_struct *t = current; |
1228 | ||
6b70399f PM |
1229 | WARN_ON_ONCE(list_empty(&t->rcu_tasks_exit_list)); |
1230 | rtpcp = per_cpu_ptr(rcu_tasks.rtpcpu, t->rcu_tasks_exit_cpu); | |
1231 | raw_spin_lock_irqsave_rcu_node(rtpcp, flags); | |
1232 | list_del_init(&t->rcu_tasks_exit_list); | |
1233 | raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags); | |
28319d6d FW |
1234 | } |
1235 | ||
1236 | /* | |
1237 | * Contribute to protect against tasklist scan blind spot while the | |
1238 | * task is exiting and may be removed from the tasklist. See | |
1239 | * corresponding synchronize_srcu() for further details. | |
1240 | */ | |
1241 | void exit_tasks_rcu_finish(void) | |
1242 | { | |
1243 | exit_tasks_rcu_stop(); | |
1244 | exit_tasks_rcu_finish_trace(current); | |
25246fc8 PM |
1245 | } |
1246 | ||
e21408ce | 1247 | #else /* #ifdef CONFIG_TASKS_RCU */ |
25246fc8 | 1248 | void exit_tasks_rcu_start(void) { } |
28319d6d | 1249 | void exit_tasks_rcu_stop(void) { } |
25246fc8 | 1250 | void exit_tasks_rcu_finish(void) { exit_tasks_rcu_finish_trace(current); } |
e21408ce | 1251 | #endif /* #else #ifdef CONFIG_TASKS_RCU */ |
c84aad76 PM |
1252 | |
1253 | #ifdef CONFIG_TASKS_RUDE_RCU | |
1254 | ||
1255 | //////////////////////////////////////////////////////////////////////// | |
1256 | // | |
1257 | // "Rude" variant of Tasks RCU, inspired by Steve Rostedt's trick of | |
1258 | // passing an empty function to schedule_on_each_cpu(). This approach | |
e4be1f44 PM |
1259 | // provides an asynchronous call_rcu_tasks_rude() API and batching of |
1260 | // concurrent calls to the synchronous synchronize_rcu_tasks_rude() API. | |
9fc98e31 PM |
1261 | // This invokes schedule_on_each_cpu() in order to send IPIs far and wide |
1262 | // and induces otherwise unnecessary context switches on all online CPUs, | |
1263 | // whether idle or not. | |
1264 | // | |
1265 | // Callback handling is provided by the rcu_tasks_kthread() function. | |
1266 | // | |
1267 | // Ordering is provided by the scheduler's context-switch code. | |
c84aad76 PM |
1268 | |
1269 | // Empty function to allow workqueues to force a context switch. | |
1270 | static void rcu_tasks_be_rude(struct work_struct *work) | |
1271 | { | |
1272 | } | |
1273 | ||
1274 | // Wait for one rude RCU-tasks grace period. | |
1275 | static void rcu_tasks_rude_wait_gp(struct rcu_tasks *rtp) | |
1276 | { | |
238dbce3 | 1277 | rtp->n_ipis += cpumask_weight(cpu_online_mask); |
c84aad76 PM |
1278 | schedule_on_each_cpu(rcu_tasks_be_rude); |
1279 | } | |
1280 | ||
1281 | void call_rcu_tasks_rude(struct rcu_head *rhp, rcu_callback_t func); | |
c97d12a6 PM |
1282 | DEFINE_RCU_TASKS(rcu_tasks_rude, rcu_tasks_rude_wait_gp, call_rcu_tasks_rude, |
1283 | "RCU Tasks Rude"); | |
c84aad76 PM |
1284 | |
1285 | /** | |
1286 | * call_rcu_tasks_rude() - Queue a callback rude task-based grace period | |
1287 | * @rhp: structure to be used for queueing the RCU updates. | |
1288 | * @func: actual callback function to be invoked after the grace period | |
1289 | * | |
1290 | * The callback function will be invoked some time after a full grace | |
1291 | * period elapses, in other words after all currently executing RCU | |
1292 | * read-side critical sections have completed. call_rcu_tasks_rude() | |
1293 | * assumes that the read-side critical sections end at context switch, | |
8af9e2c7 | 1294 | * cond_resched_tasks_rcu_qs(), or transition to usermode execution (as |
a6517e9c NU |
1295 | * usermode execution is schedulable). As such, there are no read-side |
1296 | * primitives analogous to rcu_read_lock() and rcu_read_unlock() because | |
1297 | * this primitive is intended to determine that all tasks have passed | |
1298 | * through a safe state, not so much for data-structure synchronization. | |
c84aad76 PM |
1299 | * |
1300 | * See the description of call_rcu() for more detailed information on | |
1301 | * memory ordering guarantees. | |
1302 | */ | |
1303 | void call_rcu_tasks_rude(struct rcu_head *rhp, rcu_callback_t func) | |
1304 | { | |
1305 | call_rcu_tasks_generic(rhp, func, &rcu_tasks_rude); | |
1306 | } | |
1307 | EXPORT_SYMBOL_GPL(call_rcu_tasks_rude); | |
1308 | ||
1309 | /** | |
1310 | * synchronize_rcu_tasks_rude - wait for a rude rcu-tasks grace period | |
1311 | * | |
1312 | * Control will return to the caller some time after a rude rcu-tasks | |
1313 | * grace period has elapsed, in other words after all currently | |
1314 | * executing rcu-tasks read-side critical sections have elapsed. These | |
1315 | * read-side critical sections are delimited by calls to schedule(), | |
a6517e9c NU |
1316 | * cond_resched_tasks_rcu_qs(), userspace execution (which is a schedulable |
1317 | * context), and (in theory, anyway) cond_resched(). | |
c84aad76 PM |
1318 | * |
1319 | * This is a very specialized primitive, intended only for a few uses in | |
1320 | * tracing and other situations requiring manipulation of function preambles | |
1321 | * and profiling hooks. The synchronize_rcu_tasks_rude() function is not | |
1322 | * (yet) intended for heavy use from multiple CPUs. | |
1323 | * | |
1324 | * See the description of synchronize_rcu() for more detailed information | |
1325 | * on memory ordering guarantees. | |
1326 | */ | |
1327 | void synchronize_rcu_tasks_rude(void) | |
1328 | { | |
1329 | synchronize_rcu_tasks_generic(&rcu_tasks_rude); | |
1330 | } | |
1331 | EXPORT_SYMBOL_GPL(synchronize_rcu_tasks_rude); | |
1332 | ||
1333 | /** | |
1334 | * rcu_barrier_tasks_rude - Wait for in-flight call_rcu_tasks_rude() callbacks. | |
1335 | * | |
1336 | * Although the current implementation is guaranteed to wait, it is not | |
1337 | * obligated to, for example, if there are no pending callbacks. | |
1338 | */ | |
1339 | void rcu_barrier_tasks_rude(void) | |
1340 | { | |
ce9b1c66 | 1341 | rcu_barrier_tasks_generic(&rcu_tasks_rude); |
c84aad76 PM |
1342 | } |
1343 | EXPORT_SYMBOL_GPL(rcu_barrier_tasks_rude); | |
1344 | ||
450d461a PM |
1345 | int rcu_tasks_rude_lazy_ms = -1; |
1346 | module_param(rcu_tasks_rude_lazy_ms, int, 0444); | |
1347 | ||
c84aad76 PM |
1348 | static int __init rcu_spawn_tasks_rude_kthread(void) |
1349 | { | |
4fe192df | 1350 | rcu_tasks_rude.gp_sleep = HZ / 10; |
450d461a PM |
1351 | if (rcu_tasks_rude_lazy_ms >= 0) |
1352 | rcu_tasks_rude.lazy_jiffies = msecs_to_jiffies(rcu_tasks_rude_lazy_ms); | |
c84aad76 PM |
1353 | rcu_spawn_tasks_kthread_generic(&rcu_tasks_rude); |
1354 | return 0; | |
1355 | } | |
c84aad76 | 1356 | |
27c0f144 PM |
1357 | #if !defined(CONFIG_TINY_RCU) |
1358 | void show_rcu_tasks_rude_gp_kthread(void) | |
e21408ce PM |
1359 | { |
1360 | show_rcu_tasks_generic_gp_kthread(&rcu_tasks_rude, ""); | |
1361 | } | |
27c0f144 PM |
1362 | EXPORT_SYMBOL_GPL(show_rcu_tasks_rude_gp_kthread); |
1363 | #endif // !defined(CONFIG_TINY_RCU) | |
a15ec57c PM |
1364 | |
1365 | struct task_struct *get_rcu_tasks_rude_gp_kthread(void) | |
1366 | { | |
1367 | return rcu_tasks_rude.kthread_ptr; | |
1368 | } | |
1369 | EXPORT_SYMBOL_GPL(get_rcu_tasks_rude_gp_kthread); | |
1370 | ||
dddcddef Z |
1371 | void rcu_tasks_rude_get_gp_data(int *flags, unsigned long *gp_seq) |
1372 | { | |
1373 | *flags = 0; | |
1374 | *gp_seq = rcu_seq_current(&rcu_tasks_rude.tasks_gp_seq); | |
1375 | } | |
1376 | EXPORT_SYMBOL_GPL(rcu_tasks_rude_get_gp_data); | |
1377 | ||
27c0f144 | 1378 | #endif /* #ifdef CONFIG_TASKS_RUDE_RCU */ |
d5f177d3 PM |
1379 | |
1380 | //////////////////////////////////////////////////////////////////////// | |
1381 | // | |
1382 | // Tracing variant of Tasks RCU. This variant is designed to be used | |
1383 | // to protect tracing hooks, including those of BPF. This variant | |
1384 | // therefore: | |
1385 | // | |
1386 | // 1. Has explicit read-side markers to allow finite grace periods | |
1387 | // in the face of in-kernel loops for PREEMPT=n builds. | |
1388 | // | |
1389 | // 2. Protects code in the idle loop, exception entry/exit, and | |
1390 | // CPU-hotplug code paths, similar to the capabilities of SRCU. | |
1391 | // | |
c4f113ac | 1392 | // 3. Avoids expensive read-side instructions, having overhead similar |
d5f177d3 PM |
1393 | // to that of Preemptible RCU. |
1394 | // | |
eea3423b PM |
1395 | // There are of course downsides. For example, the grace-period code |
1396 | // can send IPIs to CPUs, even when those CPUs are in the idle loop or | |
1397 | // in nohz_full userspace. If needed, these downsides can be at least | |
1398 | // partially remedied. | |
d5f177d3 PM |
1399 | // |
1400 | // Perhaps most important, this variant of RCU does not affect the vanilla | |
1401 | // flavors, rcu_preempt and rcu_sched. The fact that RCU Tasks Trace | |
1402 | // readers can operate from idle, offline, and exception entry/exit in no | |
1403 | // way allows rcu_preempt and rcu_sched readers to also do so. | |
a434dd10 PM |
1404 | // |
1405 | // The implementation uses rcu_tasks_wait_gp(), which relies on function | |
1406 | // pointers in the rcu_tasks structure. The rcu_spawn_tasks_trace_kthread() | |
1407 | // function sets these function pointers up so that rcu_tasks_wait_gp() | |
1408 | // invokes these functions in this order: | |
1409 | // | |
1410 | // rcu_tasks_trace_pregp_step(): | |
eea3423b PM |
1411 | // Disables CPU hotplug, adds all currently executing tasks to the |
1412 | // holdout list, then checks the state of all tasks that blocked | |
1413 | // or were preempted within their current RCU Tasks Trace read-side | |
1414 | // critical section, adding them to the holdout list if appropriate. | |
1415 | // Finally, this function re-enables CPU hotplug. | |
1416 | // The ->pertask_func() pointer is NULL, so there is no per-task processing. | |
a434dd10 | 1417 | // rcu_tasks_trace_postscan(): |
eea3423b PM |
1418 | // Invokes synchronize_rcu() to wait for late-stage exiting tasks |
1419 | // to finish exiting. | |
a434dd10 PM |
1420 | // check_all_holdout_tasks_trace(), repeatedly until holdout list is empty: |
1421 | // Scans the holdout list, attempting to identify a quiescent state | |
1422 | // for each task on the list. If there is a quiescent state, the | |
eea3423b PM |
1423 | // corresponding task is removed from the holdout list. Once this |
1424 | // list is empty, the grace period has completed. | |
a434dd10 | 1425 | // rcu_tasks_trace_postgp(): |
eea3423b | 1426 | // Provides the needed full memory barrier and does debug checks. |
a434dd10 PM |
1427 | // |
1428 | // The exit_tasks_rcu_finish_trace() synchronizes with exiting tasks. | |
1429 | // | |
eea3423b PM |
1430 | // Pre-grace-period update-side code is ordered before the grace period |
1431 | // via the ->cbs_lock and barriers in rcu_tasks_kthread(). Pre-grace-period | |
1432 | // read-side code is ordered before the grace period by atomic operations | |
1433 | // on .b.need_qs flag of each task involved in this process, or by scheduler | |
1434 | // context-switch ordering (for locked-down non-running readers). | |
d5f177d3 PM |
1435 | |
1436 | // The lockdep state must be outside of #ifdef to be useful. | |
1437 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | |
1438 | static struct lock_class_key rcu_lock_trace_key; | |
1439 | struct lockdep_map rcu_trace_lock_map = | |
1440 | STATIC_LOCKDEP_MAP_INIT("rcu_read_lock_trace", &rcu_lock_trace_key); | |
1441 | EXPORT_SYMBOL_GPL(rcu_trace_lock_map); | |
1442 | #endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ | |
1443 | ||
1444 | #ifdef CONFIG_TASKS_TRACE_RCU | |
1445 | ||
d5f177d3 PM |
1446 | // Record outstanding IPIs to each CPU. No point in sending two... |
1447 | static DEFINE_PER_CPU(bool, trc_ipi_to_cpu); | |
1448 | ||
40471509 PM |
1449 | // The number of detections of task quiescent state relying on |
1450 | // heavyweight readers executing explicit memory barriers. | |
6731da9e PM |
1451 | static unsigned long n_heavy_reader_attempts; |
1452 | static unsigned long n_heavy_reader_updates; | |
1453 | static unsigned long n_heavy_reader_ofl_updates; | |
ffcc21a3 | 1454 | static unsigned long n_trc_holdouts; |
40471509 | 1455 | |
b0afa0f0 PM |
1456 | void call_rcu_tasks_trace(struct rcu_head *rhp, rcu_callback_t func); |
1457 | DEFINE_RCU_TASKS(rcu_tasks_trace, rcu_tasks_wait_gp, call_rcu_tasks_trace, | |
1458 | "RCU Tasks Trace"); | |
1459 | ||
3847b645 PM |
1460 | /* Load from ->trc_reader_special.b.need_qs with proper ordering. */ |
1461 | static u8 rcu_ld_need_qs(struct task_struct *t) | |
1462 | { | |
1463 | smp_mb(); // Enforce full grace-period ordering. | |
1464 | return smp_load_acquire(&t->trc_reader_special.b.need_qs); | |
1465 | } | |
1466 | ||
1467 | /* Store to ->trc_reader_special.b.need_qs with proper ordering. */ | |
1468 | static void rcu_st_need_qs(struct task_struct *t, u8 v) | |
1469 | { | |
1470 | smp_store_release(&t->trc_reader_special.b.need_qs, v); | |
1471 | smp_mb(); // Enforce full grace-period ordering. | |
1472 | } | |
1473 | ||
1474 | /* | |
1475 | * Do a cmpxchg() on ->trc_reader_special.b.need_qs, allowing for | |
1476 | * the four-byte operand-size restriction of some platforms. | |
fc2897d2 | 1477 | * |
3847b645 PM |
1478 | * Returns the old value, which is often ignored. |
1479 | */ | |
1480 | u8 rcu_trc_cmpxchg_need_qs(struct task_struct *t, u8 old, u8 new) | |
1481 | { | |
1482 | union rcu_special ret; | |
1483 | union rcu_special trs_old = READ_ONCE(t->trc_reader_special); | |
1484 | union rcu_special trs_new = trs_old; | |
1485 | ||
1486 | if (trs_old.b.need_qs != old) | |
1487 | return trs_old.b.need_qs; | |
1488 | trs_new.b.need_qs = new; | |
fc2897d2 PM |
1489 | |
1490 | // Although cmpxchg() appears to KCSAN to update all four bytes, | |
1491 | // only the .b.need_qs byte actually changes. | |
1492 | instrument_atomic_read_write(&t->trc_reader_special.b.need_qs, | |
1493 | sizeof(t->trc_reader_special.b.need_qs)); | |
1494 | // Avoid false-positive KCSAN failures. | |
1495 | ret.s = data_race(cmpxchg(&t->trc_reader_special.s, trs_old.s, trs_new.s)); | |
1496 | ||
3847b645 PM |
1497 | return ret.b.need_qs; |
1498 | } | |
1499 | EXPORT_SYMBOL_GPL(rcu_trc_cmpxchg_need_qs); | |
1500 | ||
eea3423b PM |
1501 | /* |
1502 | * If we are the last reader, signal the grace-period kthread. | |
1503 | * Also remove from the per-CPU list of blocked tasks. | |
1504 | */ | |
a5c071cc | 1505 | void rcu_read_unlock_trace_special(struct task_struct *t) |
d5f177d3 | 1506 | { |
0bcb3868 PM |
1507 | unsigned long flags; |
1508 | struct rcu_tasks_percpu *rtpcp; | |
1509 | union rcu_special trs; | |
1510 | ||
1511 | // Open-coded full-word version of rcu_ld_need_qs(). | |
1512 | smp_mb(); // Enforce full grace-period ordering. | |
1513 | trs = smp_load_acquire(&t->trc_reader_special); | |
276c4104 | 1514 | |
3847b645 | 1515 | if (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB) && t->trc_reader_special.b.need_mb) |
276c4104 PM |
1516 | smp_mb(); // Pairs with update-side barriers. |
1517 | // Update .need_qs before ->trc_reader_nesting for irq/NMI handlers. | |
0bcb3868 | 1518 | if (trs.b.need_qs == (TRC_NEED_QS_CHECKED | TRC_NEED_QS)) { |
3847b645 PM |
1519 | u8 result = rcu_trc_cmpxchg_need_qs(t, TRC_NEED_QS_CHECKED | TRC_NEED_QS, |
1520 | TRC_NEED_QS_CHECKED); | |
1521 | ||
0bcb3868 PM |
1522 | WARN_ONCE(result != trs.b.need_qs, "%s: result = %d", __func__, result); |
1523 | } | |
1524 | if (trs.b.blocked) { | |
1525 | rtpcp = per_cpu_ptr(rcu_tasks_trace.rtpcpu, t->trc_blkd_cpu); | |
1526 | raw_spin_lock_irqsave_rcu_node(rtpcp, flags); | |
1527 | list_del_init(&t->trc_blkd_node); | |
1528 | WRITE_ONCE(t->trc_reader_special.b.blocked, false); | |
1529 | raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags); | |
3847b645 | 1530 | } |
a5c071cc | 1531 | WRITE_ONCE(t->trc_reader_nesting, 0); |
d5f177d3 PM |
1532 | } |
1533 | EXPORT_SYMBOL_GPL(rcu_read_unlock_trace_special); | |
1534 | ||
0356d4e6 PM |
1535 | /* Add a newly blocked reader task to its CPU's list. */ |
1536 | void rcu_tasks_trace_qs_blkd(struct task_struct *t) | |
1537 | { | |
1538 | unsigned long flags; | |
1539 | struct rcu_tasks_percpu *rtpcp; | |
1540 | ||
1541 | local_irq_save(flags); | |
1542 | rtpcp = this_cpu_ptr(rcu_tasks_trace.rtpcpu); | |
1543 | raw_spin_lock_rcu_node(rtpcp); // irqs already disabled | |
1544 | t->trc_blkd_cpu = smp_processor_id(); | |
1545 | if (!rtpcp->rtp_blkd_tasks.next) | |
1546 | INIT_LIST_HEAD(&rtpcp->rtp_blkd_tasks); | |
1547 | list_add(&t->trc_blkd_node, &rtpcp->rtp_blkd_tasks); | |
0bcb3868 | 1548 | WRITE_ONCE(t->trc_reader_special.b.blocked, true); |
0356d4e6 PM |
1549 | raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags); |
1550 | } | |
1551 | EXPORT_SYMBOL_GPL(rcu_tasks_trace_qs_blkd); | |
1552 | ||
d5f177d3 PM |
1553 | /* Add a task to the holdout list, if it is not already on the list. */ |
1554 | static void trc_add_holdout(struct task_struct *t, struct list_head *bhp) | |
1555 | { | |
1556 | if (list_empty(&t->trc_holdout_list)) { | |
1557 | get_task_struct(t); | |
1558 | list_add(&t->trc_holdout_list, bhp); | |
ffcc21a3 | 1559 | n_trc_holdouts++; |
d5f177d3 PM |
1560 | } |
1561 | } | |
1562 | ||
1563 | /* Remove a task from the holdout list, if it is in fact present. */ | |
1564 | static void trc_del_holdout(struct task_struct *t) | |
1565 | { | |
1566 | if (!list_empty(&t->trc_holdout_list)) { | |
1567 | list_del_init(&t->trc_holdout_list); | |
1568 | put_task_struct(t); | |
ffcc21a3 | 1569 | n_trc_holdouts--; |
d5f177d3 PM |
1570 | } |
1571 | } | |
1572 | ||
1573 | /* IPI handler to check task state. */ | |
1574 | static void trc_read_check_handler(void *t_in) | |
1575 | { | |
9ff86b4c | 1576 | int nesting; |
d5f177d3 PM |
1577 | struct task_struct *t = current; |
1578 | struct task_struct *texp = t_in; | |
1579 | ||
1580 | // If the task is no longer running on this CPU, leave. | |
3847b645 | 1581 | if (unlikely(texp != t)) |
d5f177d3 | 1582 | goto reset_ipi; // Already on holdout list, so will check later. |
d5f177d3 PM |
1583 | |
1584 | // If the task is not in a read-side critical section, and | |
1585 | // if this is the last reader, awaken the grace-period kthread. | |
9ff86b4c PM |
1586 | nesting = READ_ONCE(t->trc_reader_nesting); |
1587 | if (likely(!nesting)) { | |
3847b645 | 1588 | rcu_trc_cmpxchg_need_qs(t, 0, TRC_NEED_QS_CHECKED); |
d5f177d3 PM |
1589 | goto reset_ipi; |
1590 | } | |
ba3a86e4 | 1591 | // If we are racing with an rcu_read_unlock_trace(), try again later. |
9ff86b4c | 1592 | if (unlikely(nesting < 0)) |
ba3a86e4 | 1593 | goto reset_ipi; |
d5f177d3 | 1594 | |
eea3423b PM |
1595 | // Get here if the task is in a read-side critical section. |
1596 | // Set its state so that it will update state for the grace-period | |
1597 | // kthread upon exit from that critical section. | |
55061126 | 1598 | rcu_trc_cmpxchg_need_qs(t, 0, TRC_NEED_QS | TRC_NEED_QS_CHECKED); |
d5f177d3 PM |
1599 | |
1600 | reset_ipi: | |
1601 | // Allow future IPIs to be sent on CPU and for task. | |
1602 | // Also order this IPI handler against any later manipulations of | |
1603 | // the intended task. | |
8211e922 | 1604 | smp_store_release(per_cpu_ptr(&trc_ipi_to_cpu, smp_processor_id()), false); // ^^^ |
d5f177d3 PM |
1605 | smp_store_release(&texp->trc_ipi_to_cpu, -1); // ^^^ |
1606 | } | |
1607 | ||
1608 | /* Callback function for scheduler to check locked-down task. */ | |
3847b645 | 1609 | static int trc_inspect_reader(struct task_struct *t, void *bhp_in) |
d5f177d3 | 1610 | { |
3847b645 | 1611 | struct list_head *bhp = bhp_in; |
7d0c9c50 | 1612 | int cpu = task_cpu(t); |
18f08e75 | 1613 | int nesting; |
7e3b70e0 | 1614 | bool ofl = cpu_is_offline(cpu); |
7d0c9c50 | 1615 | |
897ba84d | 1616 | if (task_curr(t) && !ofl) { |
7d0c9c50 | 1617 | // If no chance of heavyweight readers, do it the hard way. |
897ba84d | 1618 | if (!IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB)) |
9b3c4ab3 | 1619 | return -EINVAL; |
7d0c9c50 PM |
1620 | |
1621 | // If heavyweight readers are enabled on the remote task, | |
1622 | // we can inspect its state despite its currently running. | |
1623 | // However, we cannot safely change its state. | |
40471509 | 1624 | n_heavy_reader_attempts++; |
897ba84d PM |
1625 | // Check for "running" idle tasks on offline CPUs. |
1626 | if (!rcu_dynticks_zero_in_eqs(cpu, &t->trc_reader_nesting)) | |
9b3c4ab3 | 1627 | return -EINVAL; // No quiescent state, do it the hard way. |
40471509 | 1628 | n_heavy_reader_updates++; |
18f08e75 | 1629 | nesting = 0; |
7d0c9c50 | 1630 | } else { |
bdb0cca0 | 1631 | // The task is not running, so C-language access is safe. |
18f08e75 | 1632 | nesting = t->trc_reader_nesting; |
a80712b9 | 1633 | WARN_ON_ONCE(ofl && task_curr(t) && (t != idle_task(task_cpu(t)))); |
897ba84d PM |
1634 | if (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB) && ofl) |
1635 | n_heavy_reader_ofl_updates++; | |
7d0c9c50 | 1636 | } |
d5f177d3 | 1637 | |
18f08e75 PM |
1638 | // If not exiting a read-side critical section, mark as checked |
1639 | // so that the grace-period kthread will remove it from the | |
1640 | // holdout list. | |
0968e892 PM |
1641 | if (!nesting) { |
1642 | rcu_trc_cmpxchg_need_qs(t, 0, TRC_NEED_QS_CHECKED); | |
1643 | return 0; // In QS, so done. | |
3847b645 | 1644 | } |
0968e892 | 1645 | if (nesting < 0) |
eea3423b | 1646 | return -EINVAL; // Reader transitioning, try again later. |
7d0c9c50 PM |
1647 | |
1648 | // The task is in a read-side critical section, so set up its | |
0968e892 PM |
1649 | // state so that it will update state upon exit from that critical |
1650 | // section. | |
55061126 | 1651 | if (!rcu_trc_cmpxchg_need_qs(t, 0, TRC_NEED_QS | TRC_NEED_QS_CHECKED)) |
3847b645 | 1652 | trc_add_holdout(t, bhp); |
9b3c4ab3 | 1653 | return 0; |
d5f177d3 PM |
1654 | } |
1655 | ||
1656 | /* Attempt to extract the state for the specified task. */ | |
1657 | static void trc_wait_for_one_reader(struct task_struct *t, | |
1658 | struct list_head *bhp) | |
1659 | { | |
1660 | int cpu; | |
1661 | ||
1662 | // If a previous IPI is still in flight, let it complete. | |
1663 | if (smp_load_acquire(&t->trc_ipi_to_cpu) != -1) // Order IPI | |
1664 | return; | |
1665 | ||
1666 | // The current task had better be in a quiescent state. | |
1667 | if (t == current) { | |
3847b645 | 1668 | rcu_trc_cmpxchg_need_qs(t, 0, TRC_NEED_QS_CHECKED); |
bdb0cca0 | 1669 | WARN_ON_ONCE(READ_ONCE(t->trc_reader_nesting)); |
d5f177d3 PM |
1670 | return; |
1671 | } | |
1672 | ||
1673 | // Attempt to nail down the task for inspection. | |
1674 | get_task_struct(t); | |
3847b645 | 1675 | if (!task_call_func(t, trc_inspect_reader, bhp)) { |
d5f177d3 PM |
1676 | put_task_struct(t); |
1677 | return; | |
1678 | } | |
1679 | put_task_struct(t); | |
1680 | ||
45f4b4a2 PM |
1681 | // If this task is not yet on the holdout list, then we are in |
1682 | // an RCU read-side critical section. Otherwise, the invocation of | |
d0a85858 | 1683 | // trc_add_holdout() that added it to the list did the necessary |
45f4b4a2 PM |
1684 | // get_task_struct(). Either way, the task cannot be freed out |
1685 | // from under this code. | |
1686 | ||
d5f177d3 PM |
1687 | // If currently running, send an IPI, either way, add to list. |
1688 | trc_add_holdout(t, bhp); | |
574de876 PM |
1689 | if (task_curr(t) && |
1690 | time_after(jiffies + 1, rcu_tasks_trace.gp_start + rcu_task_ipi_delay)) { | |
d5f177d3 PM |
1691 | // The task is currently running, so try IPIing it. |
1692 | cpu = task_cpu(t); | |
1693 | ||
1694 | // If there is already an IPI outstanding, let it happen. | |
1695 | if (per_cpu(trc_ipi_to_cpu, cpu) || t->trc_ipi_to_cpu >= 0) | |
1696 | return; | |
1697 | ||
d5f177d3 PM |
1698 | per_cpu(trc_ipi_to_cpu, cpu) = true; |
1699 | t->trc_ipi_to_cpu = cpu; | |
238dbce3 | 1700 | rcu_tasks_trace.n_ipis++; |
96017bf9 | 1701 | if (smp_call_function_single(cpu, trc_read_check_handler, t, 0)) { |
d5f177d3 PM |
1702 | // Just in case there is some other reason for |
1703 | // failure than the target CPU being offline. | |
46aa886c NU |
1704 | WARN_ONCE(1, "%s(): smp_call_function_single() failed for CPU: %d\n", |
1705 | __func__, cpu); | |
7e0669c3 | 1706 | rcu_tasks_trace.n_ipis_fails++; |
d5f177d3 | 1707 | per_cpu(trc_ipi_to_cpu, cpu) = false; |
46aa886c | 1708 | t->trc_ipi_to_cpu = -1; |
d5f177d3 PM |
1709 | } |
1710 | } | |
1711 | } | |
1712 | ||
7460ade1 PM |
1713 | /* |
1714 | * Initialize for first-round processing for the specified task. | |
1715 | * Return false if task is NULL or already taken care of, true otherwise. | |
1716 | */ | |
1717 | static bool rcu_tasks_trace_pertask_prep(struct task_struct *t, bool notself) | |
d5f177d3 | 1718 | { |
1b04fa99 | 1719 | // During early boot when there is only the one boot CPU, there |
19415004 PM |
1720 | // is no idle task for the other CPUs. Also, the grace-period |
1721 | // kthread is always in a quiescent state. In addition, just return | |
1722 | // if this task is already on the list. | |
7460ade1 PM |
1723 | if (unlikely(t == NULL) || (t == current && notself) || !list_empty(&t->trc_holdout_list)) |
1724 | return false; | |
1b04fa99 | 1725 | |
3847b645 | 1726 | rcu_st_need_qs(t, 0); |
d5f177d3 | 1727 | t->trc_ipi_to_cpu = -1; |
7460ade1 PM |
1728 | return true; |
1729 | } | |
1730 | ||
1731 | /* Do first-round processing for the specified task. */ | |
1732 | static void rcu_tasks_trace_pertask(struct task_struct *t, struct list_head *hop) | |
1733 | { | |
1734 | if (rcu_tasks_trace_pertask_prep(t, true)) | |
1735 | trc_wait_for_one_reader(t, hop); | |
1736 | } | |
1737 | ||
1fa98e2e | 1738 | /* Initialize for a new RCU-tasks-trace grace period. */ |
7460ade1 | 1739 | static void rcu_tasks_trace_pregp_step(struct list_head *hop) |
1fa98e2e | 1740 | { |
dc7d54b4 | 1741 | LIST_HEAD(blkd_tasks); |
1fa98e2e | 1742 | int cpu; |
dc7d54b4 PM |
1743 | unsigned long flags; |
1744 | struct rcu_tasks_percpu *rtpcp; | |
1745 | struct task_struct *t; | |
1fa98e2e PM |
1746 | |
1747 | // There shouldn't be any old IPIs, but... | |
1748 | for_each_possible_cpu(cpu) | |
1749 | WARN_ON_ONCE(per_cpu(trc_ipi_to_cpu, cpu)); | |
1750 | ||
eea3423b PM |
1751 | // Disable CPU hotplug across the CPU scan for the benefit of |
1752 | // any IPIs that might be needed. This also waits for all readers | |
1753 | // in CPU-hotplug code paths. | |
1fa98e2e | 1754 | cpus_read_lock(); |
7460ade1 | 1755 | |
eea3423b | 1756 | // These rcu_tasks_trace_pertask_prep() calls are serialized to |
7460ade1 | 1757 | // allow safe access to the hop list. |
e386b672 PM |
1758 | for_each_online_cpu(cpu) { |
1759 | rcu_read_lock(); | |
1760 | t = cpu_curr_snapshot(cpu); | |
1761 | if (rcu_tasks_trace_pertask_prep(t, true)) | |
1762 | trc_add_holdout(t, hop); | |
1763 | rcu_read_unlock(); | |
d6ad6063 | 1764 | cond_resched_tasks_rcu_qs(); |
e386b672 | 1765 | } |
dc7d54b4 PM |
1766 | |
1767 | // Only after all running tasks have been accounted for is it | |
1768 | // safe to take care of the tasks that have blocked within their | |
1769 | // current RCU tasks trace read-side critical section. | |
1770 | for_each_possible_cpu(cpu) { | |
1771 | rtpcp = per_cpu_ptr(rcu_tasks_trace.rtpcpu, cpu); | |
1772 | raw_spin_lock_irqsave_rcu_node(rtpcp, flags); | |
1773 | list_splice_init(&rtpcp->rtp_blkd_tasks, &blkd_tasks); | |
1774 | while (!list_empty(&blkd_tasks)) { | |
1775 | rcu_read_lock(); | |
1776 | t = list_first_entry(&blkd_tasks, struct task_struct, trc_blkd_node); | |
1777 | list_del_init(&t->trc_blkd_node); | |
1778 | list_add(&t->trc_blkd_node, &rtpcp->rtp_blkd_tasks); | |
1779 | raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags); | |
1780 | rcu_tasks_trace_pertask(t, hop); | |
1781 | rcu_read_unlock(); | |
1782 | raw_spin_lock_irqsave_rcu_node(rtpcp, flags); | |
1783 | } | |
1784 | raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags); | |
d6ad6063 | 1785 | cond_resched_tasks_rcu_qs(); |
dc7d54b4 | 1786 | } |
56096ecd PM |
1787 | |
1788 | // Re-enable CPU hotplug now that the holdout list is populated. | |
1789 | cpus_read_unlock(); | |
1fa98e2e PM |
1790 | } |
1791 | ||
9796e1ae | 1792 | /* |
955a0192 | 1793 | * Do intermediate processing between task and holdout scans. |
9796e1ae PM |
1794 | */ |
1795 | static void rcu_tasks_trace_postscan(struct list_head *hop) | |
d5f177d3 PM |
1796 | { |
1797 | // Wait for late-stage exiting tasks to finish exiting. | |
1798 | // These might have passed the call to exit_tasks_rcu_finish(). | |
e6c86c51 PM |
1799 | |
1800 | // If you remove the following line, update rcu_trace_implies_rcu_gp()!!! | |
d5f177d3 | 1801 | synchronize_rcu(); |
3847b645 PM |
1802 | // Any tasks that exit after this point will set |
1803 | // TRC_NEED_QS_CHECKED in ->trc_reader_special.b.need_qs. | |
d5f177d3 PM |
1804 | } |
1805 | ||
65b629e7 NU |
1806 | /* Communicate task state back to the RCU tasks trace stall warning request. */ |
1807 | struct trc_stall_chk_rdr { | |
1808 | int nesting; | |
1809 | int ipi_to_cpu; | |
1810 | u8 needqs; | |
1811 | }; | |
1812 | ||
1813 | static int trc_check_slow_task(struct task_struct *t, void *arg) | |
1814 | { | |
1815 | struct trc_stall_chk_rdr *trc_rdrp = arg; | |
1816 | ||
f90f19da | 1817 | if (task_curr(t) && cpu_online(task_cpu(t))) |
65b629e7 NU |
1818 | return false; // It is running, so decline to inspect it. |
1819 | trc_rdrp->nesting = READ_ONCE(t->trc_reader_nesting); | |
1820 | trc_rdrp->ipi_to_cpu = READ_ONCE(t->trc_ipi_to_cpu); | |
3847b645 | 1821 | trc_rdrp->needqs = rcu_ld_need_qs(t); |
65b629e7 NU |
1822 | return true; |
1823 | } | |
1824 | ||
4593e772 PM |
1825 | /* Show the state of a task stalling the current RCU tasks trace GP. */ |
1826 | static void show_stalled_task_trace(struct task_struct *t, bool *firstreport) | |
1827 | { | |
1828 | int cpu; | |
65b629e7 NU |
1829 | struct trc_stall_chk_rdr trc_rdr; |
1830 | bool is_idle_tsk = is_idle_task(t); | |
4593e772 PM |
1831 | |
1832 | if (*firstreport) { | |
1833 | pr_err("INFO: rcu_tasks_trace detected stalls on tasks:\n"); | |
1834 | *firstreport = false; | |
1835 | } | |
4593e772 | 1836 | cpu = task_cpu(t); |
65b629e7 | 1837 | if (!task_call_func(t, trc_check_slow_task, &trc_rdr)) |
9f3eb5fb | 1838 | pr_alert("P%d: %c%c\n", |
65b629e7 | 1839 | t->pid, |
9f3eb5fb | 1840 | ".I"[t->trc_ipi_to_cpu >= 0], |
65b629e7 NU |
1841 | ".i"[is_idle_tsk]); |
1842 | else | |
387c0ad7 | 1843 | pr_alert("P%d: %c%c%c%c nesting: %d%c%c cpu: %d%s\n", |
65b629e7 NU |
1844 | t->pid, |
1845 | ".I"[trc_rdr.ipi_to_cpu >= 0], | |
1846 | ".i"[is_idle_tsk], | |
1847 | ".N"[cpu >= 0 && tick_nohz_full_cpu(cpu)], | |
387c0ad7 | 1848 | ".B"[!!data_race(t->trc_reader_special.b.blocked)], |
65b629e7 | 1849 | trc_rdr.nesting, |
be15a164 PM |
1850 | " !CN"[trc_rdr.needqs & 0x3], |
1851 | " ?"[trc_rdr.needqs > 0x3], | |
c8c03ad9 | 1852 | cpu, cpu_online(cpu) ? "" : "(offline)"); |
4593e772 PM |
1853 | sched_show_task(t); |
1854 | } | |
1855 | ||
1856 | /* List stalled IPIs for RCU tasks trace. */ | |
1857 | static void show_stalled_ipi_trace(void) | |
1858 | { | |
1859 | int cpu; | |
1860 | ||
1861 | for_each_possible_cpu(cpu) | |
1862 | if (per_cpu(trc_ipi_to_cpu, cpu)) | |
1863 | pr_alert("\tIPI outstanding to CPU %d\n", cpu); | |
1864 | } | |
1865 | ||
d5f177d3 PM |
1866 | /* Do one scan of the holdout list. */ |
1867 | static void check_all_holdout_tasks_trace(struct list_head *hop, | |
4593e772 | 1868 | bool needreport, bool *firstreport) |
d5f177d3 PM |
1869 | { |
1870 | struct task_struct *g, *t; | |
1871 | ||
eea3423b | 1872 | // Disable CPU hotplug across the holdout list scan for IPIs. |
81b4a7bc PM |
1873 | cpus_read_lock(); |
1874 | ||
d5f177d3 PM |
1875 | list_for_each_entry_safe(t, g, hop, trc_holdout_list) { |
1876 | // If safe and needed, try to check the current task. | |
1877 | if (READ_ONCE(t->trc_ipi_to_cpu) == -1 && | |
3847b645 | 1878 | !(rcu_ld_need_qs(t) & TRC_NEED_QS_CHECKED)) |
d5f177d3 PM |
1879 | trc_wait_for_one_reader(t, hop); |
1880 | ||
1881 | // If check succeeded, remove this task from the list. | |
f5dbc594 | 1882 | if (smp_load_acquire(&t->trc_ipi_to_cpu) == -1 && |
3847b645 | 1883 | rcu_ld_need_qs(t) == TRC_NEED_QS_CHECKED) |
d5f177d3 | 1884 | trc_del_holdout(t); |
4593e772 PM |
1885 | else if (needreport) |
1886 | show_stalled_task_trace(t, firstreport); | |
d6ad6063 | 1887 | cond_resched_tasks_rcu_qs(); |
4593e772 | 1888 | } |
81b4a7bc PM |
1889 | |
1890 | // Re-enable CPU hotplug now that the holdout list scan has completed. | |
1891 | cpus_read_unlock(); | |
1892 | ||
4593e772 | 1893 | if (needreport) { |
89401176 | 1894 | if (*firstreport) |
4593e772 PM |
1895 | pr_err("INFO: rcu_tasks_trace detected stalls? (Late IPI?)\n"); |
1896 | show_stalled_ipi_trace(); | |
d5f177d3 PM |
1897 | } |
1898 | } | |
1899 | ||
cbe0d8d9 PM |
1900 | static void rcu_tasks_trace_empty_fn(void *unused) |
1901 | { | |
1902 | } | |
1903 | ||
d5f177d3 | 1904 | /* Wait for grace period to complete and provide ordering. */ |
af051ca4 | 1905 | static void rcu_tasks_trace_postgp(struct rcu_tasks *rtp) |
d5f177d3 | 1906 | { |
cbe0d8d9 | 1907 | int cpu; |
4593e772 | 1908 | |
cbe0d8d9 PM |
1909 | // Wait for any lingering IPI handlers to complete. Note that |
1910 | // if a CPU has gone offline or transitioned to userspace in the | |
1911 | // meantime, all IPI handlers should have been drained beforehand. | |
1912 | // Yes, this assumes that CPUs process IPIs in order. If that ever | |
1913 | // changes, there will need to be a recheck and/or timed wait. | |
1914 | for_each_online_cpu(cpu) | |
f5dbc594 | 1915 | if (WARN_ON_ONCE(smp_load_acquire(per_cpu_ptr(&trc_ipi_to_cpu, cpu)))) |
cbe0d8d9 PM |
1916 | smp_call_function_single(cpu, rcu_tasks_trace_empty_fn, NULL, 1); |
1917 | ||
d5f177d3 | 1918 | smp_mb(); // Caller's code must be ordered after wakeup. |
43766c3e | 1919 | // Pairs with pretty much every ordering primitive. |
d5f177d3 PM |
1920 | } |
1921 | ||
1922 | /* Report any needed quiescent state for this exiting task. */ | |
25246fc8 | 1923 | static void exit_tasks_rcu_finish_trace(struct task_struct *t) |
d5f177d3 | 1924 | { |
0356d4e6 PM |
1925 | union rcu_special trs = READ_ONCE(t->trc_reader_special); |
1926 | ||
3847b645 | 1927 | rcu_trc_cmpxchg_need_qs(t, 0, TRC_NEED_QS_CHECKED); |
bdb0cca0 | 1928 | WARN_ON_ONCE(READ_ONCE(t->trc_reader_nesting)); |
0bcb3868 | 1929 | if (WARN_ON_ONCE(rcu_ld_need_qs(t) & TRC_NEED_QS || trs.b.blocked)) |
a5c071cc | 1930 | rcu_read_unlock_trace_special(t); |
3847b645 PM |
1931 | else |
1932 | WRITE_ONCE(t->trc_reader_nesting, 0); | |
d5f177d3 PM |
1933 | } |
1934 | ||
d5f177d3 PM |
1935 | /** |
1936 | * call_rcu_tasks_trace() - Queue a callback trace task-based grace period | |
1937 | * @rhp: structure to be used for queueing the RCU updates. | |
1938 | * @func: actual callback function to be invoked after the grace period | |
1939 | * | |
ed42c380 NU |
1940 | * The callback function will be invoked some time after a trace rcu-tasks |
1941 | * grace period elapses, in other words after all currently executing | |
1942 | * trace rcu-tasks read-side critical sections have completed. These | |
1943 | * read-side critical sections are delimited by calls to rcu_read_lock_trace() | |
1944 | * and rcu_read_unlock_trace(). | |
d5f177d3 PM |
1945 | * |
1946 | * See the description of call_rcu() for more detailed information on | |
1947 | * memory ordering guarantees. | |
1948 | */ | |
1949 | void call_rcu_tasks_trace(struct rcu_head *rhp, rcu_callback_t func) | |
1950 | { | |
1951 | call_rcu_tasks_generic(rhp, func, &rcu_tasks_trace); | |
1952 | } | |
1953 | EXPORT_SYMBOL_GPL(call_rcu_tasks_trace); | |
1954 | ||
1955 | /** | |
1956 | * synchronize_rcu_tasks_trace - wait for a trace rcu-tasks grace period | |
1957 | * | |
1958 | * Control will return to the caller some time after a trace rcu-tasks | |
c7dcf810 | 1959 | * grace period has elapsed, in other words after all currently executing |
ed42c380 | 1960 | * trace rcu-tasks read-side critical sections have elapsed. These read-side |
c7dcf810 PM |
1961 | * critical sections are delimited by calls to rcu_read_lock_trace() |
1962 | * and rcu_read_unlock_trace(). | |
d5f177d3 PM |
1963 | * |
1964 | * This is a very specialized primitive, intended only for a few uses in | |
1965 | * tracing and other situations requiring manipulation of function preambles | |
1966 | * and profiling hooks. The synchronize_rcu_tasks_trace() function is not | |
1967 | * (yet) intended for heavy use from multiple CPUs. | |
1968 | * | |
1969 | * See the description of synchronize_rcu() for more detailed information | |
1970 | * on memory ordering guarantees. | |
1971 | */ | |
1972 | void synchronize_rcu_tasks_trace(void) | |
1973 | { | |
1974 | RCU_LOCKDEP_WARN(lock_is_held(&rcu_trace_lock_map), "Illegal synchronize_rcu_tasks_trace() in RCU Tasks Trace read-side critical section"); | |
1975 | synchronize_rcu_tasks_generic(&rcu_tasks_trace); | |
1976 | } | |
1977 | EXPORT_SYMBOL_GPL(synchronize_rcu_tasks_trace); | |
1978 | ||
1979 | /** | |
1980 | * rcu_barrier_tasks_trace - Wait for in-flight call_rcu_tasks_trace() callbacks. | |
1981 | * | |
1982 | * Although the current implementation is guaranteed to wait, it is not | |
1983 | * obligated to, for example, if there are no pending callbacks. | |
1984 | */ | |
1985 | void rcu_barrier_tasks_trace(void) | |
1986 | { | |
ce9b1c66 | 1987 | rcu_barrier_tasks_generic(&rcu_tasks_trace); |
d5f177d3 PM |
1988 | } |
1989 | EXPORT_SYMBOL_GPL(rcu_barrier_tasks_trace); | |
1990 | ||
450d461a PM |
1991 | int rcu_tasks_trace_lazy_ms = -1; |
1992 | module_param(rcu_tasks_trace_lazy_ms, int, 0444); | |
1993 | ||
d5f177d3 PM |
1994 | static int __init rcu_spawn_tasks_trace_kthread(void) |
1995 | { | |
2393a613 | 1996 | if (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB)) { |
4fe192df | 1997 | rcu_tasks_trace.gp_sleep = HZ / 10; |
75dc2da5 | 1998 | rcu_tasks_trace.init_fract = HZ / 10; |
2393a613 | 1999 | } else { |
4fe192df PM |
2000 | rcu_tasks_trace.gp_sleep = HZ / 200; |
2001 | if (rcu_tasks_trace.gp_sleep <= 0) | |
2002 | rcu_tasks_trace.gp_sleep = 1; | |
75dc2da5 | 2003 | rcu_tasks_trace.init_fract = HZ / 200; |
2393a613 PM |
2004 | if (rcu_tasks_trace.init_fract <= 0) |
2005 | rcu_tasks_trace.init_fract = 1; | |
2006 | } | |
450d461a PM |
2007 | if (rcu_tasks_trace_lazy_ms >= 0) |
2008 | rcu_tasks_trace.lazy_jiffies = msecs_to_jiffies(rcu_tasks_trace_lazy_ms); | |
d5f177d3 | 2009 | rcu_tasks_trace.pregp_func = rcu_tasks_trace_pregp_step; |
d5f177d3 PM |
2010 | rcu_tasks_trace.postscan_func = rcu_tasks_trace_postscan; |
2011 | rcu_tasks_trace.holdouts_func = check_all_holdout_tasks_trace; | |
2012 | rcu_tasks_trace.postgp_func = rcu_tasks_trace_postgp; | |
2013 | rcu_spawn_tasks_kthread_generic(&rcu_tasks_trace); | |
2014 | return 0; | |
2015 | } | |
d5f177d3 | 2016 | |
27c0f144 PM |
2017 | #if !defined(CONFIG_TINY_RCU) |
2018 | void show_rcu_tasks_trace_gp_kthread(void) | |
e21408ce | 2019 | { |
40471509 | 2020 | char buf[64]; |
e21408ce | 2021 | |
cc5645fd | 2022 | snprintf(buf, sizeof(buf), "N%lu h:%lu/%lu/%lu", |
ffcc21a3 | 2023 | data_race(n_trc_holdouts), |
edf3775f | 2024 | data_race(n_heavy_reader_ofl_updates), |
40471509 PM |
2025 | data_race(n_heavy_reader_updates), |
2026 | data_race(n_heavy_reader_attempts)); | |
e21408ce PM |
2027 | show_rcu_tasks_generic_gp_kthread(&rcu_tasks_trace, buf); |
2028 | } | |
27c0f144 PM |
2029 | EXPORT_SYMBOL_GPL(show_rcu_tasks_trace_gp_kthread); |
2030 | #endif // !defined(CONFIG_TINY_RCU) | |
e21408ce | 2031 | |
5f8e3202 PM |
2032 | struct task_struct *get_rcu_tasks_trace_gp_kthread(void) |
2033 | { | |
2034 | return rcu_tasks_trace.kthread_ptr; | |
2035 | } | |
2036 | EXPORT_SYMBOL_GPL(get_rcu_tasks_trace_gp_kthread); | |
2037 | ||
dddcddef Z |
2038 | void rcu_tasks_trace_get_gp_data(int *flags, unsigned long *gp_seq) |
2039 | { | |
2040 | *flags = 0; | |
2041 | *gp_seq = rcu_seq_current(&rcu_tasks_trace.tasks_gp_seq); | |
2042 | } | |
2043 | EXPORT_SYMBOL_GPL(rcu_tasks_trace_get_gp_data); | |
2044 | ||
d5f177d3 | 2045 | #else /* #ifdef CONFIG_TASKS_TRACE_RCU */ |
25246fc8 | 2046 | static void exit_tasks_rcu_finish_trace(struct task_struct *t) { } |
d5f177d3 | 2047 | #endif /* #else #ifdef CONFIG_TASKS_TRACE_RCU */ |
8fd8ca38 | 2048 | |
8344496e | 2049 | #ifndef CONFIG_TINY_RCU |
e21408ce PM |
2050 | void show_rcu_tasks_gp_kthreads(void) |
2051 | { | |
2052 | show_rcu_tasks_classic_gp_kthread(); | |
2053 | show_rcu_tasks_rude_gp_kthread(); | |
2054 | show_rcu_tasks_trace_gp_kthread(); | |
2055 | } | |
8344496e | 2056 | #endif /* #ifndef CONFIG_TINY_RCU */ |
e21408ce | 2057 | |
bfba7ed0 URS |
2058 | #ifdef CONFIG_PROVE_RCU |
2059 | struct rcu_tasks_test_desc { | |
2060 | struct rcu_head rh; | |
2061 | const char *name; | |
2062 | bool notrun; | |
1cf1144e | 2063 | unsigned long runstart; |
bfba7ed0 URS |
2064 | }; |
2065 | ||
2066 | static struct rcu_tasks_test_desc tests[] = { | |
2067 | { | |
2068 | .name = "call_rcu_tasks()", | |
2069 | /* If not defined, the test is skipped. */ | |
1cf1144e | 2070 | .notrun = IS_ENABLED(CONFIG_TASKS_RCU), |
bfba7ed0 URS |
2071 | }, |
2072 | { | |
2073 | .name = "call_rcu_tasks_rude()", | |
2074 | /* If not defined, the test is skipped. */ | |
1cf1144e | 2075 | .notrun = IS_ENABLED(CONFIG_TASKS_RUDE_RCU), |
bfba7ed0 URS |
2076 | }, |
2077 | { | |
2078 | .name = "call_rcu_tasks_trace()", | |
2079 | /* If not defined, the test is skipped. */ | |
1cf1144e | 2080 | .notrun = IS_ENABLED(CONFIG_TASKS_TRACE_RCU) |
bfba7ed0 URS |
2081 | } |
2082 | }; | |
2083 | ||
2084 | static void test_rcu_tasks_callback(struct rcu_head *rhp) | |
2085 | { | |
2086 | struct rcu_tasks_test_desc *rttd = | |
2087 | container_of(rhp, struct rcu_tasks_test_desc, rh); | |
2088 | ||
2089 | pr_info("Callback from %s invoked.\n", rttd->name); | |
2090 | ||
1cf1144e | 2091 | rttd->notrun = false; |
bfba7ed0 URS |
2092 | } |
2093 | ||
2094 | static void rcu_tasks_initiate_self_tests(void) | |
2095 | { | |
bfba7ed0 | 2096 | #ifdef CONFIG_TASKS_RCU |
92a708dc | 2097 | pr_info("Running RCU Tasks wait API self tests\n"); |
9420fb93 | 2098 | tests[0].runstart = jiffies; |
bfba7ed0 URS |
2099 | synchronize_rcu_tasks(); |
2100 | call_rcu_tasks(&tests[0].rh, test_rcu_tasks_callback); | |
2101 | #endif | |
2102 | ||
2103 | #ifdef CONFIG_TASKS_RUDE_RCU | |
92a708dc | 2104 | pr_info("Running RCU Tasks Rude wait API self tests\n"); |
9420fb93 | 2105 | tests[1].runstart = jiffies; |
bfba7ed0 URS |
2106 | synchronize_rcu_tasks_rude(); |
2107 | call_rcu_tasks_rude(&tests[1].rh, test_rcu_tasks_callback); | |
2108 | #endif | |
2109 | ||
2110 | #ifdef CONFIG_TASKS_TRACE_RCU | |
92a708dc | 2111 | pr_info("Running RCU Tasks Trace wait API self tests\n"); |
9420fb93 | 2112 | tests[2].runstart = jiffies; |
bfba7ed0 URS |
2113 | synchronize_rcu_tasks_trace(); |
2114 | call_rcu_tasks_trace(&tests[2].rh, test_rcu_tasks_callback); | |
2115 | #endif | |
2116 | } | |
2117 | ||
e72ee5e1 WL |
2118 | /* |
2119 | * Return: 0 - test passed | |
2120 | * 1 - test failed, but have not timed out yet | |
2121 | * -1 - test failed and timed out | |
2122 | */ | |
bfba7ed0 URS |
2123 | static int rcu_tasks_verify_self_tests(void) |
2124 | { | |
2125 | int ret = 0; | |
2126 | int i; | |
1cf1144e | 2127 | unsigned long bst = rcu_task_stall_timeout; |
bfba7ed0 | 2128 | |
1cf1144e PM |
2129 | if (bst <= 0 || bst > RCU_TASK_BOOT_STALL_TIMEOUT) |
2130 | bst = RCU_TASK_BOOT_STALL_TIMEOUT; | |
bfba7ed0 | 2131 | for (i = 0; i < ARRAY_SIZE(tests); i++) { |
1cf1144e PM |
2132 | while (tests[i].notrun) { // still hanging. |
2133 | if (time_after(jiffies, tests[i].runstart + bst)) { | |
2134 | pr_err("%s has failed boot-time tests.\n", tests[i].name); | |
2135 | ret = -1; | |
2136 | break; | |
2137 | } | |
e72ee5e1 WL |
2138 | ret = 1; |
2139 | break; | |
bfba7ed0 URS |
2140 | } |
2141 | } | |
e72ee5e1 | 2142 | WARN_ON(ret < 0); |
bfba7ed0 URS |
2143 | |
2144 | return ret; | |
2145 | } | |
e72ee5e1 WL |
2146 | |
2147 | /* | |
2148 | * Repeat the rcu_tasks_verify_self_tests() call once every second until the | |
2149 | * test passes or has timed out. | |
2150 | */ | |
2151 | static struct delayed_work rcu_tasks_verify_work; | |
2152 | static void rcu_tasks_verify_work_fn(struct work_struct *work __maybe_unused) | |
2153 | { | |
2154 | int ret = rcu_tasks_verify_self_tests(); | |
2155 | ||
2156 | if (ret <= 0) | |
2157 | return; | |
2158 | ||
2159 | /* Test fails but not timed out yet, reschedule another check */ | |
2160 | schedule_delayed_work(&rcu_tasks_verify_work, HZ); | |
2161 | } | |
2162 | ||
2163 | static int rcu_tasks_verify_schedule_work(void) | |
2164 | { | |
2165 | INIT_DELAYED_WORK(&rcu_tasks_verify_work, rcu_tasks_verify_work_fn); | |
2166 | rcu_tasks_verify_work_fn(NULL); | |
2167 | return 0; | |
2168 | } | |
2169 | late_initcall(rcu_tasks_verify_schedule_work); | |
bfba7ed0 URS |
2170 | #else /* #ifdef CONFIG_PROVE_RCU */ |
2171 | static void rcu_tasks_initiate_self_tests(void) { } | |
2172 | #endif /* #else #ifdef CONFIG_PROVE_RCU */ | |
2173 | ||
30ef0963 PM |
2174 | void __init tasks_cblist_init_generic(void) |
2175 | { | |
2176 | lockdep_assert_irqs_disabled(); | |
2177 | WARN_ON(num_online_cpus() > 1); | |
2178 | ||
2179 | #ifdef CONFIG_TASKS_RCU | |
2180 | cblist_init_generic(&rcu_tasks); | |
2181 | #endif | |
2182 | ||
2183 | #ifdef CONFIG_TASKS_RUDE_RCU | |
2184 | cblist_init_generic(&rcu_tasks_rude); | |
2185 | #endif | |
2186 | ||
2187 | #ifdef CONFIG_TASKS_TRACE_RCU | |
2188 | cblist_init_generic(&rcu_tasks_trace); | |
2189 | #endif | |
2190 | } | |
2191 | ||
1b04fa99 URS |
2192 | void __init rcu_init_tasks_generic(void) |
2193 | { | |
2194 | #ifdef CONFIG_TASKS_RCU | |
2195 | rcu_spawn_tasks_kthread(); | |
2196 | #endif | |
2197 | ||
2198 | #ifdef CONFIG_TASKS_RUDE_RCU | |
2199 | rcu_spawn_tasks_rude_kthread(); | |
2200 | #endif | |
2201 | ||
2202 | #ifdef CONFIG_TASKS_TRACE_RCU | |
2203 | rcu_spawn_tasks_trace_kthread(); | |
2204 | #endif | |
bfba7ed0 URS |
2205 | |
2206 | // Run the self-tests. | |
2207 | rcu_tasks_initiate_self_tests(); | |
1b04fa99 URS |
2208 | } |
2209 | ||
8fd8ca38 PM |
2210 | #else /* #ifdef CONFIG_TASKS_RCU_GENERIC */ |
2211 | static inline void rcu_tasks_bootup_oddness(void) {} | |
2212 | #endif /* #else #ifdef CONFIG_TASKS_RCU_GENERIC */ |