Merge tag 'pci-v6.16-fixes-3' of git://git.kernel.org/pub/scm/linux/kernel/git/pci/pci
[linux-2.6-block.git] / kernel / hung_task.c
CommitLineData
457c8996 1// SPDX-License-Identifier: GPL-2.0-only
e162b39a
MSB
2/*
3 * Detect Hung Task
4 *
5 * kernel/hung_task.c - kernel thread for detecting tasks stuck in D state
6 *
7 */
8
9#include <linux/mm.h>
10#include <linux/cpu.h>
11#include <linux/nmi.h>
12#include <linux/init.h>
13#include <linux/delay.h>
14#include <linux/freezer.h>
15#include <linux/kthread.h>
16#include <linux/lockdep.h>
9984de1a 17#include <linux/export.h>
f39650de 18#include <linux/panic_notifier.h>
e162b39a 19#include <linux/sysctl.h>
a1c6ca3c 20#include <linux/suspend.h>
41e85ce8 21#include <linux/utsname.h>
3f07c014 22#include <linux/sched/signal.h>
b17b0153 23#include <linux/sched/debug.h>
a98eb6f1 24#include <linux/sched/sysctl.h>
e711faaa 25#include <linux/hung_task.h>
3f07c014 26
6a716c90 27#include <trace/events/sched.h>
e162b39a
MSB
28
29/*
ce9dbe24 30 * The number of tasks checked:
e162b39a 31 */
882c5b26 32static int __read_mostly sysctl_hung_task_check_count = PID_MAX_LIMIT;
ce9dbe24 33
03ecb24d
LY
34/*
35 * Total number of tasks detected as hung since boot:
36 */
37static unsigned long __read_mostly sysctl_hung_task_detect_count;
38
ce9dbe24
MSB
39/*
40 * Limit number of tasks checked in a batch.
41 *
42 * This value controls the preemptibility of khungtaskd since preemption
43 * is disabled during the critical section. It also controls the size of
44 * the RCU grace period. So it needs to be upper-bound.
45 */
304ae427 46#define HUNG_TASK_LOCK_BREAK (HZ / 10)
e162b39a
MSB
47
48/*
49 * Zero means infinite timeout - no checking done:
50 */
e11feaa1 51unsigned long __read_mostly sysctl_hung_task_timeout_secs = CONFIG_DEFAULT_HUNG_TASK_TIMEOUT;
5c3273ec 52EXPORT_SYMBOL_GPL(sysctl_hung_task_timeout_secs);
e162b39a 53
a2e51445
DV
54/*
55 * Zero (default value) means use sysctl_hung_task_timeout_secs:
56 */
882c5b26 57static unsigned long __read_mostly sysctl_hung_task_check_interval_secs;
a2e51445 58
882c5b26 59static int __read_mostly sysctl_hung_task_warnings = 10;
e162b39a
MSB
60
61static int __read_mostly did_panic;
780cbcf2 62static bool hung_task_show_lock;
401c636a 63static bool hung_task_call_panic;
0ec9dc9b 64static bool hung_task_show_all_bt;
e162b39a
MSB
65
66static struct task_struct *watchdog_task;
67
0ec9dc9b
GP
68#ifdef CONFIG_SMP
69/*
70 * Should we dump all CPUs backtraces in a hung task event?
71 * Defaults to 0, can be changed via sysctl.
72 */
bbe7a10e
XN
73static unsigned int __read_mostly sysctl_hung_task_all_cpu_backtrace;
74#else
75#define sysctl_hung_task_all_cpu_backtrace 0
0ec9dc9b
GP
76#endif /* CONFIG_SMP */
77
e162b39a
MSB
78/*
79 * Should we panic (and reboot, if panic_timeout= is set) when a
80 * hung task is detected:
81 */
882c5b26
TR
82static unsigned int __read_mostly sysctl_hung_task_panic =
83 IS_ENABLED(CONFIG_BOOTPARAM_HUNG_TASK_PANIC);
e162b39a 84
e162b39a
MSB
85static int
86hung_task_panic(struct notifier_block *this, unsigned long event, void *ptr)
87{
88 did_panic = 1;
89
90 return NOTIFY_DONE;
91}
92
93static struct notifier_block panic_block = {
94 .notifier_call = hung_task_panic,
95};
96
3cf67d61
MHG
97
98#ifdef CONFIG_DETECT_HUNG_TASK_BLOCKER
99static void debug_show_blocker(struct task_struct *task)
100{
101 struct task_struct *g, *t;
194a9b9e 102 unsigned long owner, blocker, blocker_type;
3cf67d61
MHG
103
104 RCU_LOCKDEP_WARN(!rcu_read_lock_held(), "No rcu lock held");
105
e711faaa 106 blocker = READ_ONCE(task->blocker);
194a9b9e 107 if (!blocker)
3cf67d61
MHG
108 return;
109
194a9b9e
LY
110 blocker_type = hung_task_get_blocker_type(blocker);
111
112 switch (blocker_type) {
113 case BLOCKER_TYPE_MUTEX:
114 owner = mutex_get_owner(
115 (struct mutex *)hung_task_blocker_to_lock(blocker));
116 break;
117 case BLOCKER_TYPE_SEM:
118 owner = sem_last_holder(
119 (struct semaphore *)hung_task_blocker_to_lock(blocker));
120 break;
121 default:
122 WARN_ON_ONCE(1);
123 return;
124 }
125
e711faaa 126
3cf67d61 127 if (unlikely(!owner)) {
194a9b9e
LY
128 switch (blocker_type) {
129 case BLOCKER_TYPE_MUTEX:
130 pr_err("INFO: task %s:%d is blocked on a mutex, but the owner is not found.\n",
131 task->comm, task->pid);
132 break;
133 case BLOCKER_TYPE_SEM:
134 pr_err("INFO: task %s:%d is blocked on a semaphore, but the last holder is not found.\n",
135 task->comm, task->pid);
136 break;
137 }
3cf67d61
MHG
138 return;
139 }
140
141 /* Ensure the owner information is correct. */
142 for_each_process_thread(g, t) {
194a9b9e
LY
143 if ((unsigned long)t != owner)
144 continue;
145
146 switch (blocker_type) {
147 case BLOCKER_TYPE_MUTEX:
3cf67d61 148 pr_err("INFO: task %s:%d is blocked on a mutex likely owned by task %s:%d.\n",
194a9b9e
LY
149 task->comm, task->pid, t->comm, t->pid);
150 break;
151 case BLOCKER_TYPE_SEM:
152 pr_err("INFO: task %s:%d blocked on a semaphore likely last held by task %s:%d\n",
153 task->comm, task->pid, t->comm, t->pid);
154 break;
3cf67d61 155 }
194a9b9e
LY
156 sched_show_task(t);
157 return;
3cf67d61
MHG
158 }
159}
160#else
161static inline void debug_show_blocker(struct task_struct *task)
162{
163}
164#endif
165
17406b82 166static void check_hung_task(struct task_struct *t, unsigned long timeout)
e162b39a
MSB
167{
168 unsigned long switch_count = t->nvcsw + t->nivcsw;
169
cf2592f5
FW
170 /*
171 * Ensure the task is not frozen.
f9fab10b 172 * Also, skip vfork and any other user process that freezer should skip.
cf2592f5 173 */
fdf756f7 174 if (unlikely(READ_ONCE(t->__state) & TASK_FROZEN))
f5d39b02 175 return;
f9fab10b
MSB
176
177 /*
178 * When a freshly created task is scheduled once, changes its state to
179 * TASK_UNINTERRUPTIBLE without having ever been switched out once, it
180 * musn't be checked.
181 */
182 if (unlikely(!switch_count))
e162b39a
MSB
183 return;
184
17406b82 185 if (switch_count != t->last_switch_count) {
e162b39a 186 t->last_switch_count = switch_count;
a2e51445 187 t->last_switch_time = jiffies;
e162b39a
MSB
188 return;
189 }
a2e51445
DV
190 if (time_is_after_jiffies(t->last_switch_time + timeout * HZ))
191 return;
6a716c90 192
03ecb24d
LY
193 /*
194 * This counter tracks the total number of tasks detected as hung
195 * since boot.
196 */
197 sysctl_hung_task_detect_count++;
198
6a716c90
ON
199 trace_sched_process_hang(t);
200
168e06f7
LC
201 if (sysctl_hung_task_panic) {
202 console_verbose();
203 hung_task_show_lock = true;
204 hung_task_call_panic = true;
205 }
270750db 206
e162b39a
MSB
207 /*
208 * Ok, the task did not get scheduled for more than 2 minutes,
209 * complain:
210 */
abd8ac05 211 if (sysctl_hung_task_warnings || hung_task_call_panic) {
4ca5ede0
TH
212 if (sysctl_hung_task_warnings > 0)
213 sysctl_hung_task_warnings--;
48a6d64e 214 pr_err("INFO: task %s:%d blocked for more than %ld seconds.\n",
b014beba 215 t->comm, t->pid, (jiffies - t->last_switch_time) / HZ);
48a6d64e
JS
216 pr_err(" %s %s %.*s\n",
217 print_tainted(), init_utsname()->release,
218 (int)strcspn(init_utsname()->version, " "),
219 init_utsname()->version);
65ef17aa
OK
220 if (t->flags & PF_POSTCOREDUMP)
221 pr_err(" Blocked by coredump.\n");
48a6d64e
JS
222 pr_err("\"echo 0 > /proc/sys/kernel/hung_task_timeout_secs\""
223 " disables this message.\n");
224 sched_show_task(t);
3cf67d61 225 debug_show_blocker(t);
780cbcf2 226 hung_task_show_lock = true;
0ec9dc9b
GP
227
228 if (sysctl_hung_task_all_cpu_backtrace)
229 hung_task_show_all_bt = true;
b1f712b3 230 if (!sysctl_hung_task_warnings)
231 pr_info("Future hung task reports are suppressed, see sysctl kernel.hung_task_warnings\n");
48a6d64e 232 }
e162b39a 233
e162b39a 234 touch_nmi_watchdog();
e162b39a
MSB
235}
236
ce9dbe24
MSB
237/*
238 * To avoid extending the RCU grace period for an unbounded amount of time,
239 * periodically exit the critical section and enter a new one.
240 *
241 * For preemptible RCU it is sufficient to call rcu_read_unlock in order
6a103b0d 242 * to exit the grace period. For classic RCU, a reschedule is required.
ce9dbe24 243 */
6027ce49 244static bool rcu_lock_break(struct task_struct *g, struct task_struct *t)
ce9dbe24 245{
6027ce49
ON
246 bool can_cont;
247
ce9dbe24
MSB
248 get_task_struct(g);
249 get_task_struct(t);
250 rcu_read_unlock();
251 cond_resched();
252 rcu_read_lock();
6027ce49 253 can_cont = pid_alive(g) && pid_alive(t);
ce9dbe24
MSB
254 put_task_struct(t);
255 put_task_struct(g);
6027ce49
ON
256
257 return can_cont;
ce9dbe24
MSB
258}
259
e162b39a
MSB
260/*
261 * Check whether a TASK_UNINTERRUPTIBLE does not get woken up for
262 * a really long time (120 seconds). If that happens, print out
263 * a warning.
264 */
603a148f 265static void check_hung_uninterruptible_tasks(unsigned long timeout)
e162b39a
MSB
266{
267 int max_count = sysctl_hung_task_check_count;
304ae427 268 unsigned long last_break = jiffies;
e162b39a
MSB
269 struct task_struct *g, *t;
270
271 /*
272 * If the system crashed already then all bets are off,
273 * do not report extra hung tasks:
274 */
275 if (test_taint(TAINT_DIE) || did_panic)
276 return;
277
780cbcf2 278 hung_task_show_lock = false;
94be52dc 279 rcu_read_lock();
972fae69 280 for_each_process_thread(g, t) {
5aec788a
PZ
281 unsigned int state;
282
e5af0226 283 if (!max_count--)
e162b39a 284 goto unlock;
304ae427 285 if (time_after(jiffies, last_break + HUNG_TASK_LOCK_BREAK)) {
6027ce49 286 if (!rcu_lock_break(g, t))
ce9dbe24 287 goto unlock;
304ae427 288 last_break = jiffies;
ce9dbe24 289 }
fdf756f7
PZ
290 /*
291 * skip the TASK_KILLABLE tasks -- these can be killed
292 * skip the TASK_IDLE tasks -- those are genuinely idle
293 */
5aec788a
PZ
294 state = READ_ONCE(t->__state);
295 if ((state & TASK_UNINTERRUPTIBLE) &&
fdf756f7
PZ
296 !(state & TASK_WAKEKILL) &&
297 !(state & TASK_NOLOAD))
17406b82 298 check_hung_task(t, timeout);
972fae69 299 }
e162b39a 300 unlock:
94be52dc 301 rcu_read_unlock();
07a22b61 302 if (hung_task_show_lock)
780cbcf2 303 debug_show_all_locks();
0ec9dc9b
GP
304
305 if (hung_task_show_all_bt) {
306 hung_task_show_all_bt = false;
401c636a 307 trigger_all_cpu_backtrace();
401c636a 308 }
0ec9dc9b
GP
309
310 if (hung_task_call_panic)
311 panic("hung_task: blocked tasks");
e162b39a
MSB
312}
313
b4aa14a6
TH
314static long hung_timeout_jiffies(unsigned long last_checked,
315 unsigned long timeout)
e162b39a
MSB
316{
317 /* timeout of 0 will disable the watchdog */
b4aa14a6
TH
318 return timeout ? last_checked - jiffies + timeout * HZ :
319 MAX_SCHEDULE_TIMEOUT;
e162b39a
MSB
320}
321
bbe7a10e 322#ifdef CONFIG_SYSCTL
e162b39a
MSB
323/*
324 * Process updating of timeout sysctl
325 */
78eb4ea2 326static int proc_dohung_task_timeout_secs(const struct ctl_table *table, int write,
591c32bd 327 void *buffer,
bbe7a10e 328 size_t *lenp, loff_t *ppos)
e162b39a
MSB
329{
330 int ret;
331
8d65af78 332 ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
e162b39a
MSB
333
334 if (ret || !write)
335 goto out;
336
e162b39a
MSB
337 wake_up_process(watchdog_task);
338
339 out:
340 return ret;
341}
342
bbe7a10e
XN
343/*
344 * This is needed for proc_doulongvec_minmax of sysctl_hung_task_timeout_secs
345 * and hung_task_check_interval_secs
346 */
347static const unsigned long hung_task_timeout_max = (LONG_MAX / HZ);
1751f872 348static const struct ctl_table hung_task_sysctls[] = {
bbe7a10e
XN
349#ifdef CONFIG_SMP
350 {
351 .procname = "hung_task_all_cpu_backtrace",
352 .data = &sysctl_hung_task_all_cpu_backtrace,
353 .maxlen = sizeof(int),
354 .mode = 0644,
355 .proc_handler = proc_dointvec_minmax,
356 .extra1 = SYSCTL_ZERO,
357 .extra2 = SYSCTL_ONE,
358 },
359#endif /* CONFIG_SMP */
360 {
361 .procname = "hung_task_panic",
362 .data = &sysctl_hung_task_panic,
363 .maxlen = sizeof(int),
364 .mode = 0644,
365 .proc_handler = proc_dointvec_minmax,
366 .extra1 = SYSCTL_ZERO,
367 .extra2 = SYSCTL_ONE,
368 },
369 {
370 .procname = "hung_task_check_count",
371 .data = &sysctl_hung_task_check_count,
372 .maxlen = sizeof(int),
373 .mode = 0644,
374 .proc_handler = proc_dointvec_minmax,
375 .extra1 = SYSCTL_ZERO,
376 },
377 {
378 .procname = "hung_task_timeout_secs",
379 .data = &sysctl_hung_task_timeout_secs,
380 .maxlen = sizeof(unsigned long),
381 .mode = 0644,
382 .proc_handler = proc_dohung_task_timeout_secs,
383 .extra2 = (void *)&hung_task_timeout_max,
384 },
385 {
386 .procname = "hung_task_check_interval_secs",
387 .data = &sysctl_hung_task_check_interval_secs,
388 .maxlen = sizeof(unsigned long),
389 .mode = 0644,
390 .proc_handler = proc_dohung_task_timeout_secs,
391 .extra2 = (void *)&hung_task_timeout_max,
392 },
393 {
394 .procname = "hung_task_warnings",
395 .data = &sysctl_hung_task_warnings,
396 .maxlen = sizeof(int),
397 .mode = 0644,
398 .proc_handler = proc_dointvec_minmax,
399 .extra1 = SYSCTL_NEG_ONE,
400 },
03ecb24d
LY
401 {
402 .procname = "hung_task_detect_count",
403 .data = &sysctl_hung_task_detect_count,
404 .maxlen = sizeof(unsigned long),
405 .mode = 0444,
406 .proc_handler = proc_doulongvec_minmax,
407 },
bbe7a10e
XN
408};
409
410static void __init hung_task_sysctl_init(void)
411{
412 register_sysctl_init("kernel", hung_task_sysctls);
413}
414#else
415#define hung_task_sysctl_init() do { } while (0)
416#endif /* CONFIG_SYSCTL */
417
418
8b414521
MT
419static atomic_t reset_hung_task = ATOMIC_INIT(0);
420
421void reset_hung_task_detector(void)
422{
423 atomic_set(&reset_hung_task, 1);
424}
425EXPORT_SYMBOL_GPL(reset_hung_task_detector);
426
a1c6ca3c
VK
427static bool hung_detector_suspended;
428
429static int hungtask_pm_notify(struct notifier_block *self,
430 unsigned long action, void *hcpu)
431{
432 switch (action) {
433 case PM_SUSPEND_PREPARE:
434 case PM_HIBERNATION_PREPARE:
435 case PM_RESTORE_PREPARE:
436 hung_detector_suspended = true;
437 break;
438 case PM_POST_SUSPEND:
439 case PM_POST_HIBERNATION:
440 case PM_POST_RESTORE:
441 hung_detector_suspended = false;
442 break;
443 default:
444 break;
445 }
446 return NOTIFY_OK;
447}
448
e162b39a
MSB
449/*
450 * kthread which checks for tasks stuck in D state
451 */
452static int watchdog(void *dummy)
453{
b4aa14a6
TH
454 unsigned long hung_last_checked = jiffies;
455
e162b39a 456 set_user_nice(current, 0);
e162b39a
MSB
457
458 for ( ; ; ) {
17406b82 459 unsigned long timeout = sysctl_hung_task_timeout_secs;
a2e51445
DV
460 unsigned long interval = sysctl_hung_task_check_interval_secs;
461 long t;
603a148f 462
a2e51445
DV
463 if (interval == 0)
464 interval = timeout;
465 interval = min_t(unsigned long, interval, timeout);
466 t = hung_timeout_jiffies(hung_last_checked, interval);
b4aa14a6 467 if (t <= 0) {
a1c6ca3c
VK
468 if (!atomic_xchg(&reset_hung_task, 0) &&
469 !hung_detector_suspended)
b4aa14a6
TH
470 check_hung_uninterruptible_tasks(timeout);
471 hung_last_checked = jiffies;
8b414521 472 continue;
b4aa14a6
TH
473 }
474 schedule_timeout_interruptible(t);
e162b39a
MSB
475 }
476
477 return 0;
478}
479
480static int __init hung_task_init(void)
481{
482 atomic_notifier_chain_register(&panic_notifier_list, &panic_block);
a1c6ca3c
VK
483
484 /* Disable hung task detector on suspend */
485 pm_notifier(hungtask_pm_notify, 0);
486
e162b39a 487 watchdog_task = kthread_run(watchdog, NULL, "khungtaskd");
bbe7a10e 488 hung_task_sysctl_init();
e162b39a
MSB
489
490 return 0;
491}
c96d6660 492subsys_initcall(hung_task_init);