1 // SPDX-License-Identifier: GPL-2.0+
3 * RCU CPU stall warnings for normal RCU grace periods
5 * Copyright IBM Corporation, 2019
7 * Author: Paul E. McKenney <paulmck@linux.ibm.com>
10 #include <linux/kvm_para.h>
12 //////////////////////////////////////////////////////////////////////////////
14 // Controlling CPU stall warnings, including delay calculation.
16 /* panic() on RCU Stall sysctl. */
17 int sysctl_panic_on_rcu_stall __read_mostly;
18 int sysctl_max_rcu_stall_to_panic __read_mostly;
20 #ifdef CONFIG_PROVE_RCU
21 #define RCU_STALL_DELAY_DELTA (5 * HZ)
23 #define RCU_STALL_DELAY_DELTA 0
25 #define RCU_STALL_MIGHT_DIV 8
26 #define RCU_STALL_MIGHT_MIN (2 * HZ)
28 /* Limit-check stall timeouts specified at boottime and runtime. */
29 int rcu_jiffies_till_stall_check(void)
31 int till_stall_check = READ_ONCE(rcu_cpu_stall_timeout);
34 * Limit check must be consistent with the Kconfig limits
35 * for CONFIG_RCU_CPU_STALL_TIMEOUT.
37 if (till_stall_check < 3) {
38 WRITE_ONCE(rcu_cpu_stall_timeout, 3);
40 } else if (till_stall_check > 300) {
41 WRITE_ONCE(rcu_cpu_stall_timeout, 300);
42 till_stall_check = 300;
44 return till_stall_check * HZ + RCU_STALL_DELAY_DELTA;
46 EXPORT_SYMBOL_GPL(rcu_jiffies_till_stall_check);
49 * rcu_gp_might_be_stalled - Is it likely that the grace period is stalled?
51 * Returns @true if the current grace period is sufficiently old that
52 * it is reasonable to assume that it might be stalled. This can be
53 * useful when deciding whether to allocate memory to enable RCU-mediated
54 * freeing on the one hand or just invoking synchronize_rcu() on the other.
55 * The latter is preferable when the grace period is stalled.
57 * Note that sampling of the .gp_start and .gp_seq fields must be done
58 * carefully to avoid false positives at the beginnings and ends of
61 bool rcu_gp_might_be_stalled(void)
63 unsigned long d = rcu_jiffies_till_stall_check() / RCU_STALL_MIGHT_DIV;
64 unsigned long j = jiffies;
66 if (d < RCU_STALL_MIGHT_MIN)
67 d = RCU_STALL_MIGHT_MIN;
68 smp_mb(); // jiffies before .gp_seq to avoid false positives.
69 if (!rcu_gp_in_progress())
71 // Long delays at this point avoids false positive, but a delay
72 // of ULONG_MAX/4 jiffies voids your no-false-positive warranty.
73 smp_mb(); // .gp_seq before second .gp_start
75 return !time_before(j, READ_ONCE(rcu_state.gp_start) + d);
78 /* Don't do RCU CPU stall warnings during long sysrq printouts. */
79 void rcu_sysrq_start(void)
81 if (!rcu_cpu_stall_suppress)
82 rcu_cpu_stall_suppress = 2;
85 void rcu_sysrq_end(void)
87 if (rcu_cpu_stall_suppress == 2)
88 rcu_cpu_stall_suppress = 0;
91 /* Don't print RCU CPU stall warnings during a kernel panic. */
92 static int rcu_panic(struct notifier_block *this, unsigned long ev, void *ptr)
94 rcu_cpu_stall_suppress = 1;
98 static struct notifier_block rcu_panic_block = {
99 .notifier_call = rcu_panic,
102 static int __init check_cpu_stall_init(void)
104 atomic_notifier_chain_register(&panic_notifier_list, &rcu_panic_block);
107 early_initcall(check_cpu_stall_init);
109 /* If so specified via sysctl, panic, yielding cleaner stall-warning output. */
110 static void panic_on_rcu_stall(void)
112 static int cpu_stall;
114 if (++cpu_stall < sysctl_max_rcu_stall_to_panic)
117 if (sysctl_panic_on_rcu_stall)
118 panic("RCU Stall\n");
122 * rcu_cpu_stall_reset - restart stall-warning timeout for current grace period
124 * The caller must disable hard irqs.
126 void rcu_cpu_stall_reset(void)
128 WRITE_ONCE(rcu_state.jiffies_stall,
129 jiffies + rcu_jiffies_till_stall_check());
132 //////////////////////////////////////////////////////////////////////////////
134 // Interaction with RCU grace periods
136 /* Start of new grace period, so record stall time (and forcing times). */
137 static void record_gp_stall_check_time(void)
139 unsigned long j = jiffies;
142 WRITE_ONCE(rcu_state.gp_start, j);
143 j1 = rcu_jiffies_till_stall_check();
144 smp_mb(); // ->gp_start before ->jiffies_stall and caller's ->gp_seq.
145 WRITE_ONCE(rcu_state.jiffies_stall, j + j1);
146 rcu_state.jiffies_resched = j + j1 / 2;
147 rcu_state.n_force_qs_gpstart = READ_ONCE(rcu_state.n_force_qs);
150 /* Zero ->ticks_this_gp and snapshot the number of RCU softirq handlers. */
151 static void zero_cpu_stall_ticks(struct rcu_data *rdp)
153 rdp->ticks_this_gp = 0;
154 rdp->softirq_snap = kstat_softirqs_cpu(RCU_SOFTIRQ, smp_processor_id());
155 WRITE_ONCE(rdp->last_fqs_resched, jiffies);
159 * If too much time has passed in the current grace period, and if
160 * so configured, go kick the relevant kthreads.
162 static void rcu_stall_kick_kthreads(void)
166 if (!READ_ONCE(rcu_kick_kthreads))
168 j = READ_ONCE(rcu_state.jiffies_kick_kthreads);
169 if (time_after(jiffies, j) && rcu_state.gp_kthread &&
170 (rcu_gp_in_progress() || READ_ONCE(rcu_state.gp_flags))) {
171 WARN_ONCE(1, "Kicking %s grace-period kthread\n",
173 rcu_ftrace_dump(DUMP_ALL);
174 wake_up_process(rcu_state.gp_kthread);
175 WRITE_ONCE(rcu_state.jiffies_kick_kthreads, j + HZ);
180 * Handler for the irq_work request posted about halfway into the RCU CPU
181 * stall timeout, and used to detect excessive irq disabling. Set state
182 * appropriately, but just complain if there is unexpected state on entry.
184 static void rcu_iw_handler(struct irq_work *iwp)
186 struct rcu_data *rdp;
187 struct rcu_node *rnp;
189 rdp = container_of(iwp, struct rcu_data, rcu_iw);
191 raw_spin_lock_rcu_node(rnp);
192 if (!WARN_ON_ONCE(!rdp->rcu_iw_pending)) {
193 rdp->rcu_iw_gp_seq = rnp->gp_seq;
194 rdp->rcu_iw_pending = false;
196 raw_spin_unlock_rcu_node(rnp);
199 //////////////////////////////////////////////////////////////////////////////
201 // Printing RCU CPU stall warnings
203 #ifdef CONFIG_PREEMPT_RCU
206 * Dump detailed information for all tasks blocking the current RCU
207 * grace period on the specified rcu_node structure.
209 static void rcu_print_detail_task_stall_rnp(struct rcu_node *rnp)
212 struct task_struct *t;
214 raw_spin_lock_irqsave_rcu_node(rnp, flags);
215 if (!rcu_preempt_blocked_readers_cgp(rnp)) {
216 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
219 t = list_entry(rnp->gp_tasks->prev,
220 struct task_struct, rcu_node_entry);
221 list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) {
223 * We could be printing a lot while holding a spinlock.
224 * Avoid triggering hard lockup.
226 touch_nmi_watchdog();
229 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
232 // Communicate task state back to the RCU CPU stall warning request.
233 struct rcu_stall_chk_rdr {
235 union rcu_special rs;
240 * Report out the state of a not-running task that is stalling the
241 * current RCU grace period.
243 static int check_slow_task(struct task_struct *t, void *arg)
245 struct rcu_stall_chk_rdr *rscrp = arg;
248 return -EBUSY; // It is running, so decline to inspect it.
249 rscrp->nesting = t->rcu_read_lock_nesting;
250 rscrp->rs = t->rcu_read_unlock_special;
251 rscrp->on_blkd_list = !list_empty(&t->rcu_node_entry);
256 * Scan the current list of tasks blocked within RCU read-side critical
257 * sections, printing out the tid of each of the first few of them.
259 static int rcu_print_task_stall(struct rcu_node *rnp, unsigned long flags)
260 __releases(rnp->lock)
264 struct rcu_stall_chk_rdr rscr;
265 struct task_struct *t;
266 struct task_struct *ts[8];
268 lockdep_assert_irqs_disabled();
269 if (!rcu_preempt_blocked_readers_cgp(rnp)) {
270 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
273 pr_err("\tTasks blocked on level-%d rcu_node (CPUs %d-%d):",
274 rnp->level, rnp->grplo, rnp->grphi);
275 t = list_entry(rnp->gp_tasks->prev,
276 struct task_struct, rcu_node_entry);
277 list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) {
280 if (i >= ARRAY_SIZE(ts))
283 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
286 if (task_call_func(t, check_slow_task, &rscr))
287 pr_cont(" P%d", t->pid);
289 pr_cont(" P%d/%d:%c%c%c%c",
290 t->pid, rscr.nesting,
291 ".b"[rscr.rs.b.blocked],
292 ".q"[rscr.rs.b.need_qs],
293 ".e"[rscr.rs.b.exp_hint],
294 ".l"[rscr.on_blkd_list]);
295 lockdep_assert_irqs_disabled();
303 #else /* #ifdef CONFIG_PREEMPT_RCU */
306 * Because preemptible RCU does not exist, we never have to check for
307 * tasks blocked within RCU read-side critical sections.
309 static void rcu_print_detail_task_stall_rnp(struct rcu_node *rnp)
314 * Because preemptible RCU does not exist, we never have to check for
315 * tasks blocked within RCU read-side critical sections.
317 static int rcu_print_task_stall(struct rcu_node *rnp, unsigned long flags)
318 __releases(rnp->lock)
320 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
323 #endif /* #else #ifdef CONFIG_PREEMPT_RCU */
326 * Dump stacks of all tasks running on stalled CPUs. First try using
327 * NMIs, but fall back to manual remote stack tracing on architectures
328 * that don't support NMI-based stack dumps. The NMI-triggered stack
329 * traces are more accurate because they are printed by the target CPU.
331 static void rcu_dump_cpu_stacks(void)
335 struct rcu_node *rnp;
337 rcu_for_each_leaf_node(rnp) {
338 raw_spin_lock_irqsave_rcu_node(rnp, flags);
339 for_each_leaf_node_possible_cpu(rnp, cpu)
340 if (rnp->qsmask & leaf_node_cpu_bit(rnp, cpu)) {
341 if (cpu_is_offline(cpu))
342 pr_err("Offline CPU %d blocking current GP.\n", cpu);
343 else if (!trigger_single_cpu_backtrace(cpu))
346 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
350 static const char * const gp_state_names[] = {
351 [RCU_GP_IDLE] = "RCU_GP_IDLE",
352 [RCU_GP_WAIT_GPS] = "RCU_GP_WAIT_GPS",
353 [RCU_GP_DONE_GPS] = "RCU_GP_DONE_GPS",
354 [RCU_GP_ONOFF] = "RCU_GP_ONOFF",
355 [RCU_GP_INIT] = "RCU_GP_INIT",
356 [RCU_GP_WAIT_FQS] = "RCU_GP_WAIT_FQS",
357 [RCU_GP_DOING_FQS] = "RCU_GP_DOING_FQS",
358 [RCU_GP_CLEANUP] = "RCU_GP_CLEANUP",
359 [RCU_GP_CLEANED] = "RCU_GP_CLEANED",
363 * Convert a ->gp_state value to a character string.
365 static const char *gp_state_getname(short gs)
367 if (gs < 0 || gs >= ARRAY_SIZE(gp_state_names))
369 return gp_state_names[gs];
372 /* Is the RCU grace-period kthread being starved of CPU time? */
373 static bool rcu_is_gp_kthread_starving(unsigned long *jp)
375 unsigned long j = jiffies - READ_ONCE(rcu_state.gp_activity);
383 * Print out diagnostic information for the specified stalled CPU.
385 * If the specified CPU is aware of the current RCU grace period, then
386 * print the number of scheduling clock interrupts the CPU has taken
387 * during the time that it has been aware. Otherwise, print the number
388 * of RCU grace periods that this CPU is ignorant of, for example, "1"
389 * if the CPU was aware of the previous grace period.
391 * Also print out idle info.
393 static void print_cpu_stall_info(int cpu)
397 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
399 unsigned long ticks_value;
402 * We could be printing a lot while holding a spinlock. Avoid
403 * triggering hard lockup.
405 touch_nmi_watchdog();
407 ticks_value = rcu_seq_ctr(rcu_state.gp_seq - rdp->gp_seq);
409 ticks_title = "GPs behind";
411 ticks_title = "ticks this GP";
412 ticks_value = rdp->ticks_this_gp;
414 delta = rcu_seq_ctr(rdp->mynode->gp_seq - rdp->rcu_iw_gp_seq);
415 falsepositive = rcu_is_gp_kthread_starving(NULL) &&
416 rcu_dynticks_in_eqs(rcu_dynticks_snap(rdp));
417 pr_err("\t%d-%c%c%c%c: (%lu %s) idle=%03x/%ld/%#lx softirq=%u/%u fqs=%ld %s\n",
419 "O."[!!cpu_online(cpu)],
420 "o."[!!(rdp->grpmask & rdp->mynode->qsmaskinit)],
421 "N."[!!(rdp->grpmask & rdp->mynode->qsmaskinitnext)],
422 !IS_ENABLED(CONFIG_IRQ_WORK) ? '?' :
423 rdp->rcu_iw_pending ? (int)min(delta, 9UL) + '0' :
425 ticks_value, ticks_title,
426 rcu_dynticks_snap(rdp) & 0xfff,
427 rdp->dynticks_nesting, rdp->dynticks_nmi_nesting,
428 rdp->softirq_snap, kstat_softirqs_cpu(RCU_SOFTIRQ, cpu),
429 data_race(rcu_state.n_force_qs) - rcu_state.n_force_qs_gpstart,
430 falsepositive ? " (false positive?)" : "");
433 /* Complain about starvation of grace-period kthread. */
434 static void rcu_check_gp_kthread_starvation(void)
437 struct task_struct *gpk = rcu_state.gp_kthread;
440 if (rcu_is_gp_kthread_starving(&j)) {
441 cpu = gpk ? task_cpu(gpk) : -1;
442 pr_err("%s kthread starved for %ld jiffies! g%ld f%#x %s(%d) ->state=%#x ->cpu=%d\n",
444 (long)rcu_seq_current(&rcu_state.gp_seq),
445 data_race(READ_ONCE(rcu_state.gp_flags)),
446 gp_state_getname(rcu_state.gp_state),
447 data_race(READ_ONCE(rcu_state.gp_state)),
448 gpk ? data_race(READ_ONCE(gpk->__state)) : ~0, cpu);
450 pr_err("\tUnless %s kthread gets sufficient CPU time, OOM is now expected behavior.\n", rcu_state.name);
451 pr_err("RCU grace-period kthread stack dump:\n");
452 sched_show_task(gpk);
454 if (cpu_is_offline(cpu)) {
455 pr_err("RCU GP kthread last ran on offline CPU %d.\n", cpu);
457 pr_err("Stack dump where RCU GP kthread last ran:\n");
458 if (!trigger_single_cpu_backtrace(cpu))
462 wake_up_process(gpk);
467 /* Complain about missing wakeups from expired fqs wait timer */
468 static void rcu_check_gp_kthread_expired_fqs_timer(void)
470 struct task_struct *gpk = rcu_state.gp_kthread;
472 unsigned long jiffies_fqs;
476 * Order reads of .gp_state and .jiffies_force_qs.
477 * Matching smp_wmb() is present in rcu_gp_fqs_loop().
479 gp_state = smp_load_acquire(&rcu_state.gp_state);
480 jiffies_fqs = READ_ONCE(rcu_state.jiffies_force_qs);
482 if (gp_state == RCU_GP_WAIT_FQS &&
483 time_after(jiffies, jiffies_fqs + RCU_STALL_MIGHT_MIN) &&
484 gpk && !READ_ONCE(gpk->on_rq)) {
486 pr_err("%s kthread timer wakeup didn't happen for %ld jiffies! g%ld f%#x %s(%d) ->state=%#x\n",
487 rcu_state.name, (jiffies - jiffies_fqs),
488 (long)rcu_seq_current(&rcu_state.gp_seq),
489 data_race(rcu_state.gp_flags),
490 gp_state_getname(RCU_GP_WAIT_FQS), RCU_GP_WAIT_FQS,
491 data_race(READ_ONCE(gpk->__state)));
492 pr_err("\tPossible timer handling issue on cpu=%d timer-softirq=%u\n",
493 cpu, kstat_softirqs_cpu(TIMER_SOFTIRQ, cpu));
497 static void print_other_cpu_stall(unsigned long gp_seq, unsigned long gps)
504 struct rcu_node *rnp;
507 lockdep_assert_irqs_disabled();
509 /* Kick and suppress, if so configured. */
510 rcu_stall_kick_kthreads();
511 if (rcu_stall_is_suppressed())
515 * OK, time to rat on our buddy...
516 * See Documentation/RCU/stallwarn.rst for info on how to debug
517 * RCU CPU stall warnings.
519 trace_rcu_stall_warning(rcu_state.name, TPS("StallDetected"));
520 pr_err("INFO: %s detected stalls on CPUs/tasks:\n", rcu_state.name);
521 rcu_for_each_leaf_node(rnp) {
522 raw_spin_lock_irqsave_rcu_node(rnp, flags);
523 if (rnp->qsmask != 0) {
524 for_each_leaf_node_possible_cpu(rnp, cpu)
525 if (rnp->qsmask & leaf_node_cpu_bit(rnp, cpu)) {
526 print_cpu_stall_info(cpu);
530 ndetected += rcu_print_task_stall(rnp, flags); // Releases rnp->lock.
531 lockdep_assert_irqs_disabled();
534 for_each_possible_cpu(cpu)
535 totqlen += rcu_get_n_cbs_cpu(cpu);
536 pr_cont("\t(detected by %d, t=%ld jiffies, g=%ld, q=%lu)\n",
537 smp_processor_id(), (long)(jiffies - gps),
538 (long)rcu_seq_current(&rcu_state.gp_seq), totqlen);
540 rcu_dump_cpu_stacks();
542 /* Complain about tasks blocking the grace period. */
543 rcu_for_each_leaf_node(rnp)
544 rcu_print_detail_task_stall_rnp(rnp);
546 if (rcu_seq_current(&rcu_state.gp_seq) != gp_seq) {
547 pr_err("INFO: Stall ended before state dump start\n");
550 gpa = data_race(READ_ONCE(rcu_state.gp_activity));
551 pr_err("All QSes seen, last %s kthread activity %ld (%ld-%ld), jiffies_till_next_fqs=%ld, root ->qsmask %#lx\n",
552 rcu_state.name, j - gpa, j, gpa,
553 data_race(READ_ONCE(jiffies_till_next_fqs)),
554 data_race(READ_ONCE(rcu_get_root()->qsmask)));
557 /* Rewrite if needed in case of slow consoles. */
558 if (ULONG_CMP_GE(jiffies, READ_ONCE(rcu_state.jiffies_stall)))
559 WRITE_ONCE(rcu_state.jiffies_stall,
560 jiffies + 3 * rcu_jiffies_till_stall_check() + 3);
562 rcu_check_gp_kthread_expired_fqs_timer();
563 rcu_check_gp_kthread_starvation();
565 panic_on_rcu_stall();
567 rcu_force_quiescent_state(); /* Kick them all. */
570 static void print_cpu_stall(unsigned long gps)
574 struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
575 struct rcu_node *rnp = rcu_get_root();
578 lockdep_assert_irqs_disabled();
580 /* Kick and suppress, if so configured. */
581 rcu_stall_kick_kthreads();
582 if (rcu_stall_is_suppressed())
586 * OK, time to rat on ourselves...
587 * See Documentation/RCU/stallwarn.rst for info on how to debug
588 * RCU CPU stall warnings.
590 trace_rcu_stall_warning(rcu_state.name, TPS("SelfDetected"));
591 pr_err("INFO: %s self-detected stall on CPU\n", rcu_state.name);
592 raw_spin_lock_irqsave_rcu_node(rdp->mynode, flags);
593 print_cpu_stall_info(smp_processor_id());
594 raw_spin_unlock_irqrestore_rcu_node(rdp->mynode, flags);
595 for_each_possible_cpu(cpu)
596 totqlen += rcu_get_n_cbs_cpu(cpu);
597 pr_cont("\t(t=%lu jiffies g=%ld q=%lu)\n",
599 (long)rcu_seq_current(&rcu_state.gp_seq), totqlen);
601 rcu_check_gp_kthread_expired_fqs_timer();
602 rcu_check_gp_kthread_starvation();
604 rcu_dump_cpu_stacks();
606 raw_spin_lock_irqsave_rcu_node(rnp, flags);
607 /* Rewrite if needed in case of slow consoles. */
608 if (ULONG_CMP_GE(jiffies, READ_ONCE(rcu_state.jiffies_stall)))
609 WRITE_ONCE(rcu_state.jiffies_stall,
610 jiffies + 3 * rcu_jiffies_till_stall_check() + 3);
611 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
613 panic_on_rcu_stall();
616 * Attempt to revive the RCU machinery by forcing a context switch.
618 * A context switch would normally allow the RCU state machine to make
619 * progress and it could be we're stuck in kernel space without context
620 * switches for an entirely unreasonable amount of time.
622 set_tsk_need_resched(current);
623 set_preempt_need_resched();
626 static void check_cpu_stall(struct rcu_data *rdp)
628 bool didstall = false;
635 struct rcu_node *rnp;
637 lockdep_assert_irqs_disabled();
638 if ((rcu_stall_is_suppressed() && !READ_ONCE(rcu_kick_kthreads)) ||
639 !rcu_gp_in_progress())
641 rcu_stall_kick_kthreads();
645 * Lots of memory barriers to reject false positives.
647 * The idea is to pick up rcu_state.gp_seq, then
648 * rcu_state.jiffies_stall, then rcu_state.gp_start, and finally
649 * another copy of rcu_state.gp_seq. These values are updated in
650 * the opposite order with memory barriers (or equivalent) during
651 * grace-period initialization and cleanup. Now, a false positive
652 * can occur if we get an new value of rcu_state.gp_start and a old
653 * value of rcu_state.jiffies_stall. But given the memory barriers,
654 * the only way that this can happen is if one grace period ends
655 * and another starts between these two fetches. This is detected
656 * by comparing the second fetch of rcu_state.gp_seq with the
657 * previous fetch from rcu_state.gp_seq.
659 * Given this check, comparisons of jiffies, rcu_state.jiffies_stall,
660 * and rcu_state.gp_start suffice to forestall false positives.
662 gs1 = READ_ONCE(rcu_state.gp_seq);
663 smp_rmb(); /* Pick up ->gp_seq first... */
664 js = READ_ONCE(rcu_state.jiffies_stall);
665 smp_rmb(); /* ...then ->jiffies_stall before the rest... */
666 gps = READ_ONCE(rcu_state.gp_start);
667 smp_rmb(); /* ...and finally ->gp_start before ->gp_seq again. */
668 gs2 = READ_ONCE(rcu_state.gp_seq);
670 ULONG_CMP_LT(j, js) ||
671 ULONG_CMP_GE(gps, js))
672 return; /* No stall or GP completed since entering function. */
674 jn = jiffies + ULONG_MAX / 2;
675 if (rcu_gp_in_progress() &&
676 (READ_ONCE(rnp->qsmask) & rdp->grpmask) &&
677 cmpxchg(&rcu_state.jiffies_stall, js, jn) == js) {
680 * If a virtual machine is stopped by the host it can look to
681 * the watchdog like an RCU stall. Check to see if the host
684 if (kvm_check_and_clear_guest_paused())
687 /* We haven't checked in, so go dump stack. */
688 print_cpu_stall(gps);
689 if (READ_ONCE(rcu_cpu_stall_ftrace_dump))
690 rcu_ftrace_dump(DUMP_ALL);
693 } else if (rcu_gp_in_progress() &&
694 ULONG_CMP_GE(j, js + RCU_STALL_RAT_DELAY) &&
695 cmpxchg(&rcu_state.jiffies_stall, js, jn) == js) {
698 * If a virtual machine is stopped by the host it can look to
699 * the watchdog like an RCU stall. Check to see if the host
702 if (kvm_check_and_clear_guest_paused())
705 /* They had a few time units to dump stack, so complain. */
706 print_other_cpu_stall(gs2, gps);
707 if (READ_ONCE(rcu_cpu_stall_ftrace_dump))
708 rcu_ftrace_dump(DUMP_ALL);
711 if (didstall && READ_ONCE(rcu_state.jiffies_stall) == jn) {
712 jn = jiffies + 3 * rcu_jiffies_till_stall_check() + 3;
713 WRITE_ONCE(rcu_state.jiffies_stall, jn);
717 //////////////////////////////////////////////////////////////////////////////
719 // RCU forward-progress mechanisms, including of callback invocation.
723 * Check to see if a failure to end RCU priority inversion was due to
724 * a CPU not passing through a quiescent state. When this happens, there
725 * is nothing that RCU priority boosting can do to help, so we shouldn't
726 * count this as an RCU priority boosting failure. A return of true says
727 * RCU priority boosting is to blame, and false says otherwise. If false
728 * is returned, the first of the CPUs to blame is stored through cpup.
729 * If there was no CPU blocking the current grace period, but also nothing
730 * in need of being boosted, *cpup is set to -1. This can happen in case
731 * of vCPU preemption while the last CPU is reporting its quiscent state,
734 * If cpup is NULL, then a lockless quick check is carried out, suitable
735 * for high-rate usage. On the other hand, if cpup is non-NULL, each
736 * rcu_node structure's ->lock is acquired, ruling out high-rate usage.
738 bool rcu_check_boost_fail(unsigned long gp_state, int *cpup)
743 struct rcu_node *rnp;
745 rcu_for_each_leaf_node(rnp) {
747 if (data_race(READ_ONCE(rnp->qsmask))) {
750 if (READ_ONCE(rnp->gp_tasks))
756 raw_spin_lock_irqsave_rcu_node(rnp, flags);
760 // No CPUs without quiescent states for this rnp.
761 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
764 // Find the first holdout CPU.
765 for_each_leaf_node_possible_cpu(rnp, cpu) {
766 if (rnp->qsmask & (1UL << (cpu - rnp->grplo))) {
767 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
772 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
774 // Can't blame CPUs, so must blame RCU priority boosting.
777 EXPORT_SYMBOL_GPL(rcu_check_boost_fail);
780 * Show the state of the grace-period kthreads.
782 void show_rcu_gp_kthreads(void)
784 unsigned long cbs = 0;
791 struct rcu_data *rdp;
792 struct rcu_node *rnp;
793 struct task_struct *t = READ_ONCE(rcu_state.gp_kthread);
796 ja = j - data_race(READ_ONCE(rcu_state.gp_activity));
797 jr = j - data_race(READ_ONCE(rcu_state.gp_req_activity));
798 js = j - data_race(READ_ONCE(rcu_state.gp_start));
799 jw = j - data_race(READ_ONCE(rcu_state.gp_wake_time));
800 pr_info("%s: wait state: %s(%d) ->state: %#x ->rt_priority %u delta ->gp_start %lu ->gp_activity %lu ->gp_req_activity %lu ->gp_wake_time %lu ->gp_wake_seq %ld ->gp_seq %ld ->gp_seq_needed %ld ->gp_max %lu ->gp_flags %#x\n",
801 rcu_state.name, gp_state_getname(rcu_state.gp_state),
802 data_race(READ_ONCE(rcu_state.gp_state)),
803 t ? data_race(READ_ONCE(t->__state)) : 0x1ffff, t ? t->rt_priority : 0xffU,
804 js, ja, jr, jw, (long)data_race(READ_ONCE(rcu_state.gp_wake_seq)),
805 (long)data_race(READ_ONCE(rcu_state.gp_seq)),
806 (long)data_race(READ_ONCE(rcu_get_root()->gp_seq_needed)),
807 data_race(READ_ONCE(rcu_state.gp_max)),
808 data_race(READ_ONCE(rcu_state.gp_flags)));
809 rcu_for_each_node_breadth_first(rnp) {
810 if (ULONG_CMP_GE(READ_ONCE(rcu_state.gp_seq), READ_ONCE(rnp->gp_seq_needed)) &&
811 !data_race(READ_ONCE(rnp->qsmask)) && !data_race(READ_ONCE(rnp->boost_tasks)) &&
812 !data_race(READ_ONCE(rnp->exp_tasks)) && !data_race(READ_ONCE(rnp->gp_tasks)))
814 pr_info("\trcu_node %d:%d ->gp_seq %ld ->gp_seq_needed %ld ->qsmask %#lx %c%c%c%c ->n_boosts %ld\n",
815 rnp->grplo, rnp->grphi,
816 (long)data_race(READ_ONCE(rnp->gp_seq)),
817 (long)data_race(READ_ONCE(rnp->gp_seq_needed)),
818 data_race(READ_ONCE(rnp->qsmask)),
819 ".b"[!!data_race(READ_ONCE(rnp->boost_kthread_task))],
820 ".B"[!!data_race(READ_ONCE(rnp->boost_tasks))],
821 ".E"[!!data_race(READ_ONCE(rnp->exp_tasks))],
822 ".G"[!!data_race(READ_ONCE(rnp->gp_tasks))],
823 data_race(READ_ONCE(rnp->n_boosts)));
824 if (!rcu_is_leaf_node(rnp))
826 for_each_leaf_node_possible_cpu(rnp, cpu) {
827 rdp = per_cpu_ptr(&rcu_data, cpu);
828 if (READ_ONCE(rdp->gpwrap) ||
829 ULONG_CMP_GE(READ_ONCE(rcu_state.gp_seq),
830 READ_ONCE(rdp->gp_seq_needed)))
832 pr_info("\tcpu %d ->gp_seq_needed %ld\n",
833 cpu, (long)data_race(READ_ONCE(rdp->gp_seq_needed)));
836 for_each_possible_cpu(cpu) {
837 rdp = per_cpu_ptr(&rcu_data, cpu);
838 cbs += data_race(READ_ONCE(rdp->n_cbs_invoked));
839 if (rcu_segcblist_is_offloaded(&rdp->cblist))
840 show_rcu_nocb_state(rdp);
842 pr_info("RCU callbacks invoked since boot: %lu\n", cbs);
843 show_rcu_tasks_gp_kthreads();
845 EXPORT_SYMBOL_GPL(show_rcu_gp_kthreads);
848 * This function checks for grace-period requests that fail to motivate
849 * RCU to come out of its idle mode.
851 static void rcu_check_gp_start_stall(struct rcu_node *rnp, struct rcu_data *rdp,
852 const unsigned long gpssdelay)
856 struct rcu_node *rnp_root = rcu_get_root();
857 static atomic_t warned = ATOMIC_INIT(0);
859 if (!IS_ENABLED(CONFIG_PROVE_RCU) || rcu_gp_in_progress() ||
860 ULONG_CMP_GE(READ_ONCE(rnp_root->gp_seq),
861 READ_ONCE(rnp_root->gp_seq_needed)) ||
862 !smp_load_acquire(&rcu_state.gp_kthread)) // Get stable kthread.
864 j = jiffies; /* Expensive access, and in common case don't get here. */
865 if (time_before(j, READ_ONCE(rcu_state.gp_req_activity) + gpssdelay) ||
866 time_before(j, READ_ONCE(rcu_state.gp_activity) + gpssdelay) ||
867 atomic_read(&warned))
870 raw_spin_lock_irqsave_rcu_node(rnp, flags);
872 if (rcu_gp_in_progress() ||
873 ULONG_CMP_GE(READ_ONCE(rnp_root->gp_seq),
874 READ_ONCE(rnp_root->gp_seq_needed)) ||
875 time_before(j, READ_ONCE(rcu_state.gp_req_activity) + gpssdelay) ||
876 time_before(j, READ_ONCE(rcu_state.gp_activity) + gpssdelay) ||
877 atomic_read(&warned)) {
878 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
881 /* Hold onto the leaf lock to make others see warned==1. */
884 raw_spin_lock_rcu_node(rnp_root); /* irqs already disabled. */
886 if (rcu_gp_in_progress() ||
887 ULONG_CMP_GE(READ_ONCE(rnp_root->gp_seq),
888 READ_ONCE(rnp_root->gp_seq_needed)) ||
889 time_before(j, READ_ONCE(rcu_state.gp_req_activity) + gpssdelay) ||
890 time_before(j, READ_ONCE(rcu_state.gp_activity) + gpssdelay) ||
891 atomic_xchg(&warned, 1)) {
893 /* irqs remain disabled. */
894 raw_spin_unlock_rcu_node(rnp_root);
895 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
900 raw_spin_unlock_rcu_node(rnp_root);
901 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
902 show_rcu_gp_kthreads();
906 * Do a forward-progress check for rcutorture. This is normally invoked
907 * due to an OOM event. The argument "j" gives the time period during
908 * which rcutorture would like progress to have been made.
910 void rcu_fwd_progress_check(unsigned long j)
914 unsigned long max_cbs = 0;
916 struct rcu_data *rdp;
918 if (rcu_gp_in_progress()) {
919 pr_info("%s: GP age %lu jiffies\n",
920 __func__, jiffies - data_race(READ_ONCE(rcu_state.gp_start)));
921 show_rcu_gp_kthreads();
923 pr_info("%s: Last GP end %lu jiffies ago\n",
924 __func__, jiffies - data_race(READ_ONCE(rcu_state.gp_end)));
926 rdp = this_cpu_ptr(&rcu_data);
927 rcu_check_gp_start_stall(rdp->mynode, rdp, j);
930 for_each_possible_cpu(cpu) {
931 cbs = rcu_get_n_cbs_cpu(cpu);
935 pr_info("%s: callbacks", __func__);
936 pr_cont(" %d: %lu", cpu, cbs);
945 EXPORT_SYMBOL_GPL(rcu_fwd_progress_check);
947 /* Commandeer a sysrq key to dump RCU's tree. */
948 static bool sysrq_rcu;
949 module_param(sysrq_rcu, bool, 0444);
951 /* Dump grace-period-request information due to commandeered sysrq. */
952 static void sysrq_show_rcu(int key)
954 show_rcu_gp_kthreads();
957 static const struct sysrq_key_op sysrq_rcudump_op = {
958 .handler = sysrq_show_rcu,
959 .help_msg = "show-rcu(y)",
960 .action_msg = "Show RCU tree",
961 .enable_mask = SYSRQ_ENABLE_DUMP,
964 static int __init rcu_sysrq_init(void)
967 return register_sysrq_key('y', &sysrq_rcudump_op);
970 early_initcall(rcu_sysrq_init);