Commit | Line | Data |
---|---|---|
10462d6f PM |
1 | // SPDX-License-Identifier: GPL-2.0+ |
2 | /* | |
3 | * RCU CPU stall warnings for normal RCU grace periods | |
4 | * | |
5 | * Copyright IBM Corporation, 2019 | |
6 | * | |
7 | * Author: Paul E. McKenney <paulmck@linux.ibm.com> | |
8 | */ | |
9 | ||
e23344c2 PM |
10 | ////////////////////////////////////////////////////////////////////////////// |
11 | // | |
12 | // Controlling CPU stall warnings, including delay calculation. | |
10462d6f | 13 | |
32255d51 PM |
14 | /* panic() on RCU Stall sysctl. */ |
15 | int sysctl_panic_on_rcu_stall __read_mostly; | |
16 | ||
10462d6f PM |
17 | #ifdef CONFIG_PROVE_RCU |
18 | #define RCU_STALL_DELAY_DELTA (5 * HZ) | |
19 | #else | |
20 | #define RCU_STALL_DELAY_DELTA 0 | |
21 | #endif | |
22 | ||
e23344c2 | 23 | /* Limit-check stall timeouts specified at boottime and runtime. */ |
10462d6f PM |
24 | int rcu_jiffies_till_stall_check(void) |
25 | { | |
26 | int till_stall_check = READ_ONCE(rcu_cpu_stall_timeout); | |
27 | ||
28 | /* | |
29 | * Limit check must be consistent with the Kconfig limits | |
30 | * for CONFIG_RCU_CPU_STALL_TIMEOUT. | |
31 | */ | |
32 | if (till_stall_check < 3) { | |
33 | WRITE_ONCE(rcu_cpu_stall_timeout, 3); | |
34 | till_stall_check = 3; | |
35 | } else if (till_stall_check > 300) { | |
36 | WRITE_ONCE(rcu_cpu_stall_timeout, 300); | |
37 | till_stall_check = 300; | |
38 | } | |
39 | return till_stall_check * HZ + RCU_STALL_DELAY_DELTA; | |
40 | } | |
41 | EXPORT_SYMBOL_GPL(rcu_jiffies_till_stall_check); | |
42 | ||
e23344c2 | 43 | /* Don't do RCU CPU stall warnings during long sysrq printouts. */ |
10462d6f PM |
44 | void rcu_sysrq_start(void) |
45 | { | |
46 | if (!rcu_cpu_stall_suppress) | |
47 | rcu_cpu_stall_suppress = 2; | |
48 | } | |
49 | ||
50 | void rcu_sysrq_end(void) | |
51 | { | |
52 | if (rcu_cpu_stall_suppress == 2) | |
53 | rcu_cpu_stall_suppress = 0; | |
54 | } | |
55 | ||
e23344c2 | 56 | /* Don't print RCU CPU stall warnings during a kernel panic. */ |
10462d6f PM |
57 | static int rcu_panic(struct notifier_block *this, unsigned long ev, void *ptr) |
58 | { | |
59 | rcu_cpu_stall_suppress = 1; | |
60 | return NOTIFY_DONE; | |
61 | } | |
62 | ||
63 | static struct notifier_block rcu_panic_block = { | |
64 | .notifier_call = rcu_panic, | |
65 | }; | |
66 | ||
67 | static int __init check_cpu_stall_init(void) | |
68 | { | |
69 | atomic_notifier_chain_register(&panic_notifier_list, &rcu_panic_block); | |
70 | return 0; | |
71 | } | |
72 | early_initcall(check_cpu_stall_init); | |
3fc3d170 | 73 | |
e23344c2 PM |
74 | /* If so specified via sysctl, panic, yielding cleaner stall-warning output. */ |
75 | static void panic_on_rcu_stall(void) | |
76 | { | |
77 | if (sysctl_panic_on_rcu_stall) | |
78 | panic("RCU Stall\n"); | |
79 | } | |
80 | ||
81 | /** | |
82 | * rcu_cpu_stall_reset - prevent further stall warnings in current grace period | |
83 | * | |
84 | * Set the stall-warning timeout way off into the future, thus preventing | |
85 | * any RCU CPU stall-warning messages from appearing in the current set of | |
86 | * RCU grace periods. | |
87 | * | |
88 | * The caller must disable hard irqs. | |
89 | */ | |
90 | void rcu_cpu_stall_reset(void) | |
91 | { | |
92 | WRITE_ONCE(rcu_state.jiffies_stall, jiffies + ULONG_MAX / 2); | |
93 | } | |
94 | ||
95 | ////////////////////////////////////////////////////////////////////////////// | |
96 | // | |
97 | // Interaction with RCU grace periods | |
98 | ||
99 | /* Start of new grace period, so record stall time (and forcing times). */ | |
100 | static void record_gp_stall_check_time(void) | |
101 | { | |
102 | unsigned long j = jiffies; | |
103 | unsigned long j1; | |
104 | ||
105 | rcu_state.gp_start = j; | |
106 | j1 = rcu_jiffies_till_stall_check(); | |
107 | /* Record ->gp_start before ->jiffies_stall. */ | |
108 | smp_store_release(&rcu_state.jiffies_stall, j + j1); /* ^^^ */ | |
109 | rcu_state.jiffies_resched = j + j1 / 2; | |
110 | rcu_state.n_force_qs_gpstart = READ_ONCE(rcu_state.n_force_qs); | |
111 | } | |
112 | ||
113 | /* Zero ->ticks_this_gp and snapshot the number of RCU softirq handlers. */ | |
114 | static void zero_cpu_stall_ticks(struct rcu_data *rdp) | |
115 | { | |
116 | rdp->ticks_this_gp = 0; | |
117 | rdp->softirq_snap = kstat_softirqs_cpu(RCU_SOFTIRQ, smp_processor_id()); | |
118 | WRITE_ONCE(rdp->last_fqs_resched, jiffies); | |
119 | } | |
120 | ||
121 | /* | |
122 | * If too much time has passed in the current grace period, and if | |
123 | * so configured, go kick the relevant kthreads. | |
124 | */ | |
125 | static void rcu_stall_kick_kthreads(void) | |
126 | { | |
127 | unsigned long j; | |
128 | ||
129 | if (!rcu_kick_kthreads) | |
130 | return; | |
131 | j = READ_ONCE(rcu_state.jiffies_kick_kthreads); | |
132 | if (time_after(jiffies, j) && rcu_state.gp_kthread && | |
133 | (rcu_gp_in_progress() || READ_ONCE(rcu_state.gp_flags))) { | |
134 | WARN_ONCE(1, "Kicking %s grace-period kthread\n", | |
135 | rcu_state.name); | |
136 | rcu_ftrace_dump(DUMP_ALL); | |
137 | wake_up_process(rcu_state.gp_kthread); | |
138 | WRITE_ONCE(rcu_state.jiffies_kick_kthreads, j + HZ); | |
139 | } | |
140 | } | |
141 | ||
7ac1907c PM |
142 | /* |
143 | * Handler for the irq_work request posted about halfway into the RCU CPU | |
144 | * stall timeout, and used to detect excessive irq disabling. Set state | |
145 | * appropriately, but just complain if there is unexpected state on entry. | |
146 | */ | |
147 | static void rcu_iw_handler(struct irq_work *iwp) | |
148 | { | |
149 | struct rcu_data *rdp; | |
150 | struct rcu_node *rnp; | |
151 | ||
152 | rdp = container_of(iwp, struct rcu_data, rcu_iw); | |
153 | rnp = rdp->mynode; | |
154 | raw_spin_lock_rcu_node(rnp); | |
155 | if (!WARN_ON_ONCE(!rdp->rcu_iw_pending)) { | |
156 | rdp->rcu_iw_gp_seq = rnp->gp_seq; | |
157 | rdp->rcu_iw_pending = false; | |
158 | } | |
159 | raw_spin_unlock_rcu_node(rnp); | |
160 | } | |
161 | ||
e23344c2 PM |
162 | ////////////////////////////////////////////////////////////////////////////// |
163 | // | |
164 | // Printing RCU CPU stall warnings | |
165 | ||
3fc3d170 PM |
166 | #ifdef CONFIG_PREEMPT |
167 | ||
168 | /* | |
169 | * Dump detailed information for all tasks blocking the current RCU | |
170 | * grace period on the specified rcu_node structure. | |
171 | */ | |
172 | static void rcu_print_detail_task_stall_rnp(struct rcu_node *rnp) | |
173 | { | |
174 | unsigned long flags; | |
175 | struct task_struct *t; | |
176 | ||
177 | raw_spin_lock_irqsave_rcu_node(rnp, flags); | |
178 | if (!rcu_preempt_blocked_readers_cgp(rnp)) { | |
179 | raw_spin_unlock_irqrestore_rcu_node(rnp, flags); | |
180 | return; | |
181 | } | |
182 | t = list_entry(rnp->gp_tasks->prev, | |
183 | struct task_struct, rcu_node_entry); | |
184 | list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) { | |
185 | /* | |
186 | * We could be printing a lot while holding a spinlock. | |
187 | * Avoid triggering hard lockup. | |
188 | */ | |
189 | touch_nmi_watchdog(); | |
190 | sched_show_task(t); | |
191 | } | |
192 | raw_spin_unlock_irqrestore_rcu_node(rnp, flags); | |
193 | } | |
194 | ||
3fc3d170 PM |
195 | /* |
196 | * Scan the current list of tasks blocked within RCU read-side critical | |
197 | * sections, printing out the tid of each. | |
198 | */ | |
199 | static int rcu_print_task_stall(struct rcu_node *rnp) | |
200 | { | |
201 | struct task_struct *t; | |
202 | int ndetected = 0; | |
203 | ||
204 | if (!rcu_preempt_blocked_readers_cgp(rnp)) | |
205 | return 0; | |
21d0d79a PM |
206 | pr_err("\tTasks blocked on level-%d rcu_node (CPUs %d-%d):", |
207 | rnp->level, rnp->grplo, rnp->grphi); | |
3fc3d170 PM |
208 | t = list_entry(rnp->gp_tasks->prev, |
209 | struct task_struct, rcu_node_entry); | |
210 | list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) { | |
211 | pr_cont(" P%d", t->pid); | |
212 | ndetected++; | |
213 | } | |
21d0d79a | 214 | pr_cont("\n"); |
3fc3d170 PM |
215 | return ndetected; |
216 | } | |
217 | ||
218 | #else /* #ifdef CONFIG_PREEMPT */ | |
219 | ||
220 | /* | |
221 | * Because preemptible RCU does not exist, we never have to check for | |
222 | * tasks blocked within RCU read-side critical sections. | |
223 | */ | |
21d0d79a | 224 | static void rcu_print_detail_task_stall_rnp(struct rcu_node *rnp) |
3fc3d170 PM |
225 | { |
226 | } | |
227 | ||
228 | /* | |
229 | * Because preemptible RCU does not exist, we never have to check for | |
230 | * tasks blocked within RCU read-side critical sections. | |
231 | */ | |
232 | static int rcu_print_task_stall(struct rcu_node *rnp) | |
233 | { | |
234 | return 0; | |
235 | } | |
236 | #endif /* #else #ifdef CONFIG_PREEMPT */ | |
32255d51 | 237 | |
32255d51 PM |
238 | /* |
239 | * Dump stacks of all tasks running on stalled CPUs. First try using | |
240 | * NMIs, but fall back to manual remote stack tracing on architectures | |
241 | * that don't support NMI-based stack dumps. The NMI-triggered stack | |
242 | * traces are more accurate because they are printed by the target CPU. | |
243 | */ | |
244 | static void rcu_dump_cpu_stacks(void) | |
245 | { | |
246 | int cpu; | |
247 | unsigned long flags; | |
248 | struct rcu_node *rnp; | |
249 | ||
250 | rcu_for_each_leaf_node(rnp) { | |
251 | raw_spin_lock_irqsave_rcu_node(rnp, flags); | |
252 | for_each_leaf_node_possible_cpu(rnp, cpu) | |
253 | if (rnp->qsmask & leaf_node_cpu_bit(rnp, cpu)) | |
254 | if (!trigger_single_cpu_backtrace(cpu)) | |
255 | dump_cpu_task(cpu); | |
256 | raw_spin_unlock_irqrestore_rcu_node(rnp, flags); | |
257 | } | |
258 | } | |
259 | ||
59b73a27 PM |
260 | #ifdef CONFIG_RCU_FAST_NO_HZ |
261 | ||
262 | static void print_cpu_stall_fast_no_hz(char *cp, int cpu) | |
263 | { | |
264 | struct rcu_data *rdp = &per_cpu(rcu_data, cpu); | |
265 | ||
266 | sprintf(cp, "last_accelerate: %04lx/%04lx, Nonlazy posted: %c%c%c", | |
267 | rdp->last_accelerate & 0xffff, jiffies & 0xffff, | |
268 | ".l"[rdp->all_lazy], | |
269 | ".L"[!rcu_segcblist_n_nonlazy_cbs(&rdp->cblist)], | |
6c70e9cd | 270 | ".D"[!!rdp->tick_nohz_enabled_snap]); |
59b73a27 PM |
271 | } |
272 | ||
273 | #else /* #ifdef CONFIG_RCU_FAST_NO_HZ */ | |
274 | ||
275 | static void print_cpu_stall_fast_no_hz(char *cp, int cpu) | |
276 | { | |
277 | *cp = '\0'; | |
278 | } | |
279 | ||
280 | #endif /* #else #ifdef CONFIG_RCU_FAST_NO_HZ */ | |
281 | ||
282 | /* | |
283 | * Print out diagnostic information for the specified stalled CPU. | |
284 | * | |
285 | * If the specified CPU is aware of the current RCU grace period, then | |
286 | * print the number of scheduling clock interrupts the CPU has taken | |
287 | * during the time that it has been aware. Otherwise, print the number | |
288 | * of RCU grace periods that this CPU is ignorant of, for example, "1" | |
289 | * if the CPU was aware of the previous grace period. | |
290 | * | |
291 | * Also print out idle and (if CONFIG_RCU_FAST_NO_HZ) idle-entry info. | |
292 | */ | |
293 | static void print_cpu_stall_info(int cpu) | |
294 | { | |
295 | unsigned long delta; | |
296 | char fast_no_hz[72]; | |
297 | struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); | |
298 | char *ticks_title; | |
299 | unsigned long ticks_value; | |
300 | ||
301 | /* | |
302 | * We could be printing a lot while holding a spinlock. Avoid | |
303 | * triggering hard lockup. | |
304 | */ | |
305 | touch_nmi_watchdog(); | |
306 | ||
307 | ticks_value = rcu_seq_ctr(rcu_state.gp_seq - rdp->gp_seq); | |
308 | if (ticks_value) { | |
309 | ticks_title = "GPs behind"; | |
310 | } else { | |
311 | ticks_title = "ticks this GP"; | |
312 | ticks_value = rdp->ticks_this_gp; | |
313 | } | |
314 | print_cpu_stall_fast_no_hz(fast_no_hz, cpu); | |
315 | delta = rcu_seq_ctr(rdp->mynode->gp_seq - rdp->rcu_iw_gp_seq); | |
316 | pr_err("\t%d-%c%c%c%c: (%lu %s) idle=%03x/%ld/%#lx softirq=%u/%u fqs=%ld %s\n", | |
317 | cpu, | |
318 | "O."[!!cpu_online(cpu)], | |
319 | "o."[!!(rdp->grpmask & rdp->mynode->qsmaskinit)], | |
320 | "N."[!!(rdp->grpmask & rdp->mynode->qsmaskinitnext)], | |
321 | !IS_ENABLED(CONFIG_IRQ_WORK) ? '?' : | |
322 | rdp->rcu_iw_pending ? (int)min(delta, 9UL) + '0' : | |
323 | "!."[!delta], | |
324 | ticks_value, ticks_title, | |
325 | rcu_dynticks_snap(rdp) & 0xfff, | |
326 | rdp->dynticks_nesting, rdp->dynticks_nmi_nesting, | |
327 | rdp->softirq_snap, kstat_softirqs_cpu(RCU_SOFTIRQ, cpu), | |
328 | READ_ONCE(rcu_state.n_force_qs) - rcu_state.n_force_qs_gpstart, | |
329 | fast_no_hz); | |
330 | } | |
331 | ||
e23344c2 PM |
332 | /* Complain about starvation of grace-period kthread. */ |
333 | static void rcu_check_gp_kthread_starvation(void) | |
59b73a27 | 334 | { |
e23344c2 PM |
335 | struct task_struct *gpk = rcu_state.gp_kthread; |
336 | unsigned long j; | |
337 | ||
338 | j = jiffies - READ_ONCE(rcu_state.gp_activity); | |
339 | if (j > 2 * HZ) { | |
340 | pr_err("%s kthread starved for %ld jiffies! g%ld f%#x %s(%d) ->state=%#lx ->cpu=%d\n", | |
341 | rcu_state.name, j, | |
342 | (long)rcu_seq_current(&rcu_state.gp_seq), | |
343 | READ_ONCE(rcu_state.gp_flags), | |
344 | gp_state_getname(rcu_state.gp_state), rcu_state.gp_state, | |
345 | gpk ? gpk->state : ~0, gpk ? task_cpu(gpk) : -1); | |
346 | if (gpk) { | |
347 | pr_err("RCU grace-period kthread stack dump:\n"); | |
348 | sched_show_task(gpk); | |
349 | wake_up_process(gpk); | |
350 | } | |
351 | } | |
59b73a27 PM |
352 | } |
353 | ||
32255d51 PM |
354 | static void print_other_cpu_stall(unsigned long gp_seq) |
355 | { | |
356 | int cpu; | |
357 | unsigned long flags; | |
358 | unsigned long gpa; | |
359 | unsigned long j; | |
360 | int ndetected = 0; | |
21d0d79a | 361 | struct rcu_node *rnp; |
32255d51 PM |
362 | long totqlen = 0; |
363 | ||
364 | /* Kick and suppress, if so configured. */ | |
365 | rcu_stall_kick_kthreads(); | |
366 | if (rcu_cpu_stall_suppress) | |
367 | return; | |
368 | ||
369 | /* | |
370 | * OK, time to rat on our buddy... | |
371 | * See Documentation/RCU/stallwarn.txt for info on how to debug | |
372 | * RCU CPU stall warnings. | |
373 | */ | |
40e69ac7 | 374 | pr_err("INFO: %s detected stalls on CPUs/tasks:\n", rcu_state.name); |
32255d51 PM |
375 | rcu_for_each_leaf_node(rnp) { |
376 | raw_spin_lock_irqsave_rcu_node(rnp, flags); | |
377 | ndetected += rcu_print_task_stall(rnp); | |
378 | if (rnp->qsmask != 0) { | |
379 | for_each_leaf_node_possible_cpu(rnp, cpu) | |
380 | if (rnp->qsmask & leaf_node_cpu_bit(rnp, cpu)) { | |
381 | print_cpu_stall_info(cpu); | |
382 | ndetected++; | |
383 | } | |
384 | } | |
385 | raw_spin_unlock_irqrestore_rcu_node(rnp, flags); | |
386 | } | |
387 | ||
32255d51 PM |
388 | for_each_possible_cpu(cpu) |
389 | totqlen += rcu_get_n_cbs_cpu(cpu); | |
40e69ac7 | 390 | pr_cont("\t(detected by %d, t=%ld jiffies, g=%ld, q=%lu)\n", |
32255d51 PM |
391 | smp_processor_id(), (long)(jiffies - rcu_state.gp_start), |
392 | (long)rcu_seq_current(&rcu_state.gp_seq), totqlen); | |
393 | if (ndetected) { | |
394 | rcu_dump_cpu_stacks(); | |
395 | ||
396 | /* Complain about tasks blocking the grace period. */ | |
21d0d79a PM |
397 | rcu_for_each_leaf_node(rnp) |
398 | rcu_print_detail_task_stall_rnp(rnp); | |
32255d51 PM |
399 | } else { |
400 | if (rcu_seq_current(&rcu_state.gp_seq) != gp_seq) { | |
401 | pr_err("INFO: Stall ended before state dump start\n"); | |
402 | } else { | |
403 | j = jiffies; | |
404 | gpa = READ_ONCE(rcu_state.gp_activity); | |
405 | pr_err("All QSes seen, last %s kthread activity %ld (%ld-%ld), jiffies_till_next_fqs=%ld, root ->qsmask %#lx\n", | |
406 | rcu_state.name, j - gpa, j, gpa, | |
407 | READ_ONCE(jiffies_till_next_fqs), | |
408 | rcu_get_root()->qsmask); | |
409 | /* In this case, the current CPU might be at fault. */ | |
410 | sched_show_task(current); | |
411 | } | |
412 | } | |
413 | /* Rewrite if needed in case of slow consoles. */ | |
414 | if (ULONG_CMP_GE(jiffies, READ_ONCE(rcu_state.jiffies_stall))) | |
415 | WRITE_ONCE(rcu_state.jiffies_stall, | |
416 | jiffies + 3 * rcu_jiffies_till_stall_check() + 3); | |
417 | ||
418 | rcu_check_gp_kthread_starvation(); | |
419 | ||
420 | panic_on_rcu_stall(); | |
421 | ||
422 | rcu_force_quiescent_state(); /* Kick them all. */ | |
423 | } | |
424 | ||
425 | static void print_cpu_stall(void) | |
426 | { | |
427 | int cpu; | |
428 | unsigned long flags; | |
429 | struct rcu_data *rdp = this_cpu_ptr(&rcu_data); | |
430 | struct rcu_node *rnp = rcu_get_root(); | |
431 | long totqlen = 0; | |
432 | ||
433 | /* Kick and suppress, if so configured. */ | |
434 | rcu_stall_kick_kthreads(); | |
435 | if (rcu_cpu_stall_suppress) | |
436 | return; | |
437 | ||
438 | /* | |
439 | * OK, time to rat on ourselves... | |
440 | * See Documentation/RCU/stallwarn.txt for info on how to debug | |
441 | * RCU CPU stall warnings. | |
442 | */ | |
40e69ac7 | 443 | pr_err("INFO: %s self-detected stall on CPU\n", rcu_state.name); |
32255d51 PM |
444 | raw_spin_lock_irqsave_rcu_node(rdp->mynode, flags); |
445 | print_cpu_stall_info(smp_processor_id()); | |
446 | raw_spin_unlock_irqrestore_rcu_node(rdp->mynode, flags); | |
32255d51 PM |
447 | for_each_possible_cpu(cpu) |
448 | totqlen += rcu_get_n_cbs_cpu(cpu); | |
40e69ac7 | 449 | pr_cont("\t(t=%lu jiffies g=%ld q=%lu)\n", |
32255d51 PM |
450 | jiffies - rcu_state.gp_start, |
451 | (long)rcu_seq_current(&rcu_state.gp_seq), totqlen); | |
452 | ||
453 | rcu_check_gp_kthread_starvation(); | |
454 | ||
455 | rcu_dump_cpu_stacks(); | |
456 | ||
457 | raw_spin_lock_irqsave_rcu_node(rnp, flags); | |
458 | /* Rewrite if needed in case of slow consoles. */ | |
459 | if (ULONG_CMP_GE(jiffies, READ_ONCE(rcu_state.jiffies_stall))) | |
460 | WRITE_ONCE(rcu_state.jiffies_stall, | |
461 | jiffies + 3 * rcu_jiffies_till_stall_check() + 3); | |
462 | raw_spin_unlock_irqrestore_rcu_node(rnp, flags); | |
463 | ||
464 | panic_on_rcu_stall(); | |
465 | ||
466 | /* | |
467 | * Attempt to revive the RCU machinery by forcing a context switch. | |
468 | * | |
469 | * A context switch would normally allow the RCU state machine to make | |
470 | * progress and it could be we're stuck in kernel space without context | |
471 | * switches for an entirely unreasonable amount of time. | |
472 | */ | |
473 | set_tsk_need_resched(current); | |
474 | set_preempt_need_resched(); | |
475 | } | |
476 | ||
477 | static void check_cpu_stall(struct rcu_data *rdp) | |
478 | { | |
479 | unsigned long gs1; | |
480 | unsigned long gs2; | |
481 | unsigned long gps; | |
482 | unsigned long j; | |
483 | unsigned long jn; | |
484 | unsigned long js; | |
485 | struct rcu_node *rnp; | |
486 | ||
487 | if ((rcu_cpu_stall_suppress && !rcu_kick_kthreads) || | |
488 | !rcu_gp_in_progress()) | |
489 | return; | |
490 | rcu_stall_kick_kthreads(); | |
491 | j = jiffies; | |
492 | ||
493 | /* | |
494 | * Lots of memory barriers to reject false positives. | |
495 | * | |
496 | * The idea is to pick up rcu_state.gp_seq, then | |
497 | * rcu_state.jiffies_stall, then rcu_state.gp_start, and finally | |
498 | * another copy of rcu_state.gp_seq. These values are updated in | |
499 | * the opposite order with memory barriers (or equivalent) during | |
500 | * grace-period initialization and cleanup. Now, a false positive | |
501 | * can occur if we get an new value of rcu_state.gp_start and a old | |
502 | * value of rcu_state.jiffies_stall. But given the memory barriers, | |
503 | * the only way that this can happen is if one grace period ends | |
504 | * and another starts between these two fetches. This is detected | |
505 | * by comparing the second fetch of rcu_state.gp_seq with the | |
506 | * previous fetch from rcu_state.gp_seq. | |
507 | * | |
508 | * Given this check, comparisons of jiffies, rcu_state.jiffies_stall, | |
509 | * and rcu_state.gp_start suffice to forestall false positives. | |
510 | */ | |
511 | gs1 = READ_ONCE(rcu_state.gp_seq); | |
512 | smp_rmb(); /* Pick up ->gp_seq first... */ | |
513 | js = READ_ONCE(rcu_state.jiffies_stall); | |
514 | smp_rmb(); /* ...then ->jiffies_stall before the rest... */ | |
515 | gps = READ_ONCE(rcu_state.gp_start); | |
516 | smp_rmb(); /* ...and finally ->gp_start before ->gp_seq again. */ | |
517 | gs2 = READ_ONCE(rcu_state.gp_seq); | |
518 | if (gs1 != gs2 || | |
519 | ULONG_CMP_LT(j, js) || | |
520 | ULONG_CMP_GE(gps, js)) | |
521 | return; /* No stall or GP completed since entering function. */ | |
522 | rnp = rdp->mynode; | |
523 | jn = jiffies + 3 * rcu_jiffies_till_stall_check() + 3; | |
524 | if (rcu_gp_in_progress() && | |
525 | (READ_ONCE(rnp->qsmask) & rdp->grpmask) && | |
526 | cmpxchg(&rcu_state.jiffies_stall, js, jn) == js) { | |
527 | ||
528 | /* We haven't checked in, so go dump stack. */ | |
529 | print_cpu_stall(); | |
530 | ||
531 | } else if (rcu_gp_in_progress() && | |
532 | ULONG_CMP_GE(j, js + RCU_STALL_RAT_DELAY) && | |
533 | cmpxchg(&rcu_state.jiffies_stall, js, jn) == js) { | |
534 | ||
535 | /* They had a few time units to dump stack, so complain. */ | |
536 | print_other_cpu_stall(gs2); | |
537 | } | |
538 | } | |
b51bcbbf PM |
539 | |
540 | ////////////////////////////////////////////////////////////////////////////// | |
541 | // | |
542 | // RCU forward-progress mechanisms, including of callback invocation. | |
543 | ||
544 | ||
545 | /* | |
546 | * Show the state of the grace-period kthreads. | |
547 | */ | |
548 | void show_rcu_gp_kthreads(void) | |
549 | { | |
550 | int cpu; | |
551 | unsigned long j; | |
552 | unsigned long ja; | |
553 | unsigned long jr; | |
554 | unsigned long jw; | |
555 | struct rcu_data *rdp; | |
556 | struct rcu_node *rnp; | |
557 | ||
558 | j = jiffies; | |
559 | ja = j - READ_ONCE(rcu_state.gp_activity); | |
560 | jr = j - READ_ONCE(rcu_state.gp_req_activity); | |
561 | jw = j - READ_ONCE(rcu_state.gp_wake_time); | |
562 | pr_info("%s: wait state: %s(%d) ->state: %#lx delta ->gp_activity %lu ->gp_req_activity %lu ->gp_wake_time %lu ->gp_wake_seq %ld ->gp_seq %ld ->gp_seq_needed %ld ->gp_flags %#x\n", | |
563 | rcu_state.name, gp_state_getname(rcu_state.gp_state), | |
564 | rcu_state.gp_state, | |
565 | rcu_state.gp_kthread ? rcu_state.gp_kthread->state : 0x1ffffL, | |
566 | ja, jr, jw, (long)READ_ONCE(rcu_state.gp_wake_seq), | |
567 | (long)READ_ONCE(rcu_state.gp_seq), | |
568 | (long)READ_ONCE(rcu_get_root()->gp_seq_needed), | |
569 | READ_ONCE(rcu_state.gp_flags)); | |
570 | rcu_for_each_node_breadth_first(rnp) { | |
571 | if (ULONG_CMP_GE(rcu_state.gp_seq, rnp->gp_seq_needed)) | |
572 | continue; | |
573 | pr_info("\trcu_node %d:%d ->gp_seq %ld ->gp_seq_needed %ld\n", | |
574 | rnp->grplo, rnp->grphi, (long)rnp->gp_seq, | |
575 | (long)rnp->gp_seq_needed); | |
576 | if (!rcu_is_leaf_node(rnp)) | |
577 | continue; | |
578 | for_each_leaf_node_possible_cpu(rnp, cpu) { | |
579 | rdp = per_cpu_ptr(&rcu_data, cpu); | |
580 | if (rdp->gpwrap || | |
581 | ULONG_CMP_GE(rcu_state.gp_seq, | |
582 | rdp->gp_seq_needed)) | |
583 | continue; | |
584 | pr_info("\tcpu %d ->gp_seq_needed %ld\n", | |
585 | cpu, (long)rdp->gp_seq_needed); | |
586 | } | |
587 | } | |
588 | /* sched_show_task(rcu_state.gp_kthread); */ | |
589 | } | |
590 | EXPORT_SYMBOL_GPL(show_rcu_gp_kthreads); | |
591 | ||
592 | /* | |
593 | * This function checks for grace-period requests that fail to motivate | |
594 | * RCU to come out of its idle mode. | |
595 | */ | |
596 | static void rcu_check_gp_start_stall(struct rcu_node *rnp, struct rcu_data *rdp, | |
597 | const unsigned long gpssdelay) | |
598 | { | |
599 | unsigned long flags; | |
600 | unsigned long j; | |
601 | struct rcu_node *rnp_root = rcu_get_root(); | |
602 | static atomic_t warned = ATOMIC_INIT(0); | |
603 | ||
604 | if (!IS_ENABLED(CONFIG_PROVE_RCU) || rcu_gp_in_progress() || | |
605 | ULONG_CMP_GE(rnp_root->gp_seq, rnp_root->gp_seq_needed)) | |
606 | return; | |
607 | j = jiffies; /* Expensive access, and in common case don't get here. */ | |
608 | if (time_before(j, READ_ONCE(rcu_state.gp_req_activity) + gpssdelay) || | |
609 | time_before(j, READ_ONCE(rcu_state.gp_activity) + gpssdelay) || | |
610 | atomic_read(&warned)) | |
611 | return; | |
612 | ||
613 | raw_spin_lock_irqsave_rcu_node(rnp, flags); | |
614 | j = jiffies; | |
615 | if (rcu_gp_in_progress() || | |
616 | ULONG_CMP_GE(rnp_root->gp_seq, rnp_root->gp_seq_needed) || | |
617 | time_before(j, READ_ONCE(rcu_state.gp_req_activity) + gpssdelay) || | |
618 | time_before(j, READ_ONCE(rcu_state.gp_activity) + gpssdelay) || | |
619 | atomic_read(&warned)) { | |
620 | raw_spin_unlock_irqrestore_rcu_node(rnp, flags); | |
621 | return; | |
622 | } | |
623 | /* Hold onto the leaf lock to make others see warned==1. */ | |
624 | ||
625 | if (rnp_root != rnp) | |
626 | raw_spin_lock_rcu_node(rnp_root); /* irqs already disabled. */ | |
627 | j = jiffies; | |
628 | if (rcu_gp_in_progress() || | |
629 | ULONG_CMP_GE(rnp_root->gp_seq, rnp_root->gp_seq_needed) || | |
630 | time_before(j, rcu_state.gp_req_activity + gpssdelay) || | |
631 | time_before(j, rcu_state.gp_activity + gpssdelay) || | |
632 | atomic_xchg(&warned, 1)) { | |
633 | raw_spin_unlock_rcu_node(rnp_root); /* irqs remain disabled. */ | |
634 | raw_spin_unlock_irqrestore_rcu_node(rnp, flags); | |
635 | return; | |
636 | } | |
637 | WARN_ON(1); | |
638 | if (rnp_root != rnp) | |
639 | raw_spin_unlock_rcu_node(rnp_root); | |
640 | raw_spin_unlock_irqrestore_rcu_node(rnp, flags); | |
641 | show_rcu_gp_kthreads(); | |
642 | } | |
643 | ||
644 | /* | |
645 | * Do a forward-progress check for rcutorture. This is normally invoked | |
646 | * due to an OOM event. The argument "j" gives the time period during | |
647 | * which rcutorture would like progress to have been made. | |
648 | */ | |
649 | void rcu_fwd_progress_check(unsigned long j) | |
650 | { | |
651 | unsigned long cbs; | |
652 | int cpu; | |
653 | unsigned long max_cbs = 0; | |
654 | int max_cpu = -1; | |
655 | struct rcu_data *rdp; | |
656 | ||
657 | if (rcu_gp_in_progress()) { | |
658 | pr_info("%s: GP age %lu jiffies\n", | |
659 | __func__, jiffies - rcu_state.gp_start); | |
660 | show_rcu_gp_kthreads(); | |
661 | } else { | |
662 | pr_info("%s: Last GP end %lu jiffies ago\n", | |
663 | __func__, jiffies - rcu_state.gp_end); | |
664 | preempt_disable(); | |
665 | rdp = this_cpu_ptr(&rcu_data); | |
666 | rcu_check_gp_start_stall(rdp->mynode, rdp, j); | |
667 | preempt_enable(); | |
668 | } | |
669 | for_each_possible_cpu(cpu) { | |
670 | cbs = rcu_get_n_cbs_cpu(cpu); | |
671 | if (!cbs) | |
672 | continue; | |
673 | if (max_cpu < 0) | |
674 | pr_info("%s: callbacks", __func__); | |
675 | pr_cont(" %d: %lu", cpu, cbs); | |
676 | if (cbs <= max_cbs) | |
677 | continue; | |
678 | max_cbs = cbs; | |
679 | max_cpu = cpu; | |
680 | } | |
681 | if (max_cpu >= 0) | |
682 | pr_cont("\n"); | |
683 | } | |
684 | EXPORT_SYMBOL_GPL(rcu_fwd_progress_check); | |
685 | ||
686 | /* Commandeer a sysrq key to dump RCU's tree. */ | |
687 | static bool sysrq_rcu; | |
688 | module_param(sysrq_rcu, bool, 0444); | |
689 | ||
690 | /* Dump grace-period-request information due to commandeered sysrq. */ | |
691 | static void sysrq_show_rcu(int key) | |
692 | { | |
693 | show_rcu_gp_kthreads(); | |
694 | } | |
695 | ||
696 | static struct sysrq_key_op sysrq_rcudump_op = { | |
697 | .handler = sysrq_show_rcu, | |
698 | .help_msg = "show-rcu(y)", | |
699 | .action_msg = "Show RCU tree", | |
700 | .enable_mask = SYSRQ_ENABLE_DUMP, | |
701 | }; | |
702 | ||
703 | static int __init rcu_sysrq_init(void) | |
704 | { | |
705 | if (sysrq_rcu) | |
706 | return register_sysrq_key('y', &sysrq_rcudump_op); | |
707 | return 0; | |
708 | } | |
709 | early_initcall(rcu_sysrq_init); |