Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/kernel/panic.c | |
3 | * | |
4 | * Copyright (C) 1991, 1992 Linus Torvalds | |
5 | */ | |
6 | ||
7 | /* | |
8 | * This function is used through-out the kernel (including mm and fs) | |
9 | * to indicate a major problem. | |
10 | */ | |
c95dbf27 | 11 | #include <linux/debug_locks.h> |
b17b0153 | 12 | #include <linux/sched/debug.h> |
c95dbf27 | 13 | #include <linux/interrupt.h> |
456b565c | 14 | #include <linux/kmsg_dump.h> |
c95dbf27 IM |
15 | #include <linux/kallsyms.h> |
16 | #include <linux/notifier.h> | |
c7c3f05e | 17 | #include <linux/vt_kern.h> |
1da177e4 | 18 | #include <linux/module.h> |
c95dbf27 | 19 | #include <linux/random.h> |
de7edd31 | 20 | #include <linux/ftrace.h> |
1da177e4 | 21 | #include <linux/reboot.h> |
c95dbf27 IM |
22 | #include <linux/delay.h> |
23 | #include <linux/kexec.h> | |
24 | #include <linux/sched.h> | |
1da177e4 | 25 | #include <linux/sysrq.h> |
c95dbf27 | 26 | #include <linux/init.h> |
1da177e4 | 27 | #include <linux/nmi.h> |
08d78658 | 28 | #include <linux/console.h> |
2553b67a | 29 | #include <linux/bug.h> |
7a46ec0e | 30 | #include <linux/ratelimit.h> |
b1fca27d AK |
31 | #include <linux/debugfs.h> |
32 | #include <asm/sections.h> | |
1da177e4 | 33 | |
c7ff0d9c TS |
34 | #define PANIC_TIMER_STEP 100 |
35 | #define PANIC_BLINK_SPD 18 | |
36 | ||
2a01bb38 | 37 | int panic_on_oops = CONFIG_PANIC_ON_OOPS_VALUE; |
bc4f2f54 KC |
38 | static unsigned long tainted_mask = |
39 | IS_ENABLED(CONFIG_GCC_PLUGIN_RANDSTRUCT) ? (1 << TAINT_RANDSTRUCT) : 0; | |
dd287796 AM |
40 | static int pause_on_oops; |
41 | static int pause_on_oops_flag; | |
42 | static DEFINE_SPINLOCK(pause_on_oops_lock); | |
5375b708 | 43 | bool crash_kexec_post_notifiers; |
9e3961a0 | 44 | int panic_on_warn __read_mostly; |
1da177e4 | 45 | |
5800dc3c | 46 | int panic_timeout = CONFIG_PANIC_TIMEOUT; |
81e88fdc | 47 | EXPORT_SYMBOL_GPL(panic_timeout); |
1da177e4 | 48 | |
d999bd93 FT |
49 | #define PANIC_PRINT_TASK_INFO 0x00000001 |
50 | #define PANIC_PRINT_MEM_INFO 0x00000002 | |
51 | #define PANIC_PRINT_TIMER_INFO 0x00000004 | |
52 | #define PANIC_PRINT_LOCK_INFO 0x00000008 | |
53 | #define PANIC_PRINT_FTRACE_INFO 0x00000010 | |
81c9d43f | 54 | unsigned long panic_print; |
d999bd93 | 55 | |
e041c683 | 56 | ATOMIC_NOTIFIER_HEAD(panic_notifier_list); |
1da177e4 LT |
57 | |
58 | EXPORT_SYMBOL(panic_notifier_list); | |
59 | ||
c7ff0d9c | 60 | static long no_blink(int state) |
8aeee85a | 61 | { |
c7ff0d9c | 62 | return 0; |
8aeee85a AB |
63 | } |
64 | ||
c7ff0d9c TS |
65 | /* Returns how long it waited in ms */ |
66 | long (*panic_blink)(int state); | |
67 | EXPORT_SYMBOL(panic_blink); | |
68 | ||
93e13a36 MH |
69 | /* |
70 | * Stop ourself in panic -- architecture code may override this | |
71 | */ | |
72 | void __weak panic_smp_self_stop(void) | |
73 | { | |
74 | while (1) | |
75 | cpu_relax(); | |
76 | } | |
77 | ||
58c5661f HK |
78 | /* |
79 | * Stop ourselves in NMI context if another CPU has already panicked. Arch code | |
80 | * may override this to prepare for crash dumping, e.g. save regs info. | |
81 | */ | |
82 | void __weak nmi_panic_self_stop(struct pt_regs *regs) | |
83 | { | |
84 | panic_smp_self_stop(); | |
85 | } | |
86 | ||
0ee59413 HK |
87 | /* |
88 | * Stop other CPUs in panic. Architecture dependent code may override this | |
89 | * with more suitable version. For example, if the architecture supports | |
90 | * crash dump, it should save registers of each stopped CPU and disable | |
91 | * per-CPU features such as virtualization extensions. | |
92 | */ | |
93 | void __weak crash_smp_send_stop(void) | |
94 | { | |
95 | static int cpus_stopped; | |
96 | ||
97 | /* | |
98 | * This function can be called twice in panic path, but obviously | |
99 | * we execute this only once. | |
100 | */ | |
101 | if (cpus_stopped) | |
102 | return; | |
103 | ||
104 | /* | |
105 | * Note smp_send_stop is the usual smp shutdown function, which | |
106 | * unfortunately means it may not be hardened to work in a panic | |
107 | * situation. | |
108 | */ | |
109 | smp_send_stop(); | |
110 | cpus_stopped = 1; | |
111 | } | |
112 | ||
1717f209 HK |
113 | atomic_t panic_cpu = ATOMIC_INIT(PANIC_CPU_INVALID); |
114 | ||
ebc41f20 HK |
115 | /* |
116 | * A variant of panic() called from NMI context. We return if we've already | |
117 | * panicked on this CPU. If another CPU already panicked, loop in | |
118 | * nmi_panic_self_stop() which can provide architecture dependent code such | |
119 | * as saving register state for crash dump. | |
120 | */ | |
121 | void nmi_panic(struct pt_regs *regs, const char *msg) | |
122 | { | |
123 | int old_cpu, cpu; | |
124 | ||
125 | cpu = raw_smp_processor_id(); | |
126 | old_cpu = atomic_cmpxchg(&panic_cpu, PANIC_CPU_INVALID, cpu); | |
127 | ||
128 | if (old_cpu == PANIC_CPU_INVALID) | |
129 | panic("%s", msg); | |
130 | else if (old_cpu != cpu) | |
131 | nmi_panic_self_stop(regs); | |
132 | } | |
133 | EXPORT_SYMBOL(nmi_panic); | |
134 | ||
d999bd93 FT |
135 | static void panic_print_sys_info(void) |
136 | { | |
137 | if (panic_print & PANIC_PRINT_TASK_INFO) | |
138 | show_state(); | |
139 | ||
140 | if (panic_print & PANIC_PRINT_MEM_INFO) | |
141 | show_mem(0, NULL); | |
142 | ||
143 | if (panic_print & PANIC_PRINT_TIMER_INFO) | |
144 | sysrq_timer_list_show(); | |
145 | ||
146 | if (panic_print & PANIC_PRINT_LOCK_INFO) | |
147 | debug_show_all_locks(); | |
148 | ||
149 | if (panic_print & PANIC_PRINT_FTRACE_INFO) | |
150 | ftrace_dump(DUMP_ALL); | |
151 | } | |
152 | ||
1da177e4 LT |
153 | /** |
154 | * panic - halt the system | |
155 | * @fmt: The text string to print | |
156 | * | |
157 | * Display a message, then perform cleanups. | |
158 | * | |
159 | * This function never returns. | |
160 | */ | |
9402c95f | 161 | void panic(const char *fmt, ...) |
1da177e4 | 162 | { |
1da177e4 LT |
163 | static char buf[1024]; |
164 | va_list args; | |
b49dec1c | 165 | long i, i_next = 0, len; |
c7ff0d9c | 166 | int state = 0; |
1717f209 | 167 | int old_cpu, this_cpu; |
b26e27dd | 168 | bool _crash_kexec_post_notifiers = crash_kexec_post_notifiers; |
1da177e4 | 169 | |
190320c3 VM |
170 | /* |
171 | * Disable local interrupts. This will prevent panic_smp_self_stop | |
172 | * from deadlocking the first cpu that invokes the panic, since | |
173 | * there is nothing to prevent an interrupt handler (that runs | |
1717f209 | 174 | * after setting panic_cpu) from invoking panic() again. |
190320c3 VM |
175 | */ |
176 | local_irq_disable(); | |
177 | ||
dc009d92 | 178 | /* |
c95dbf27 IM |
179 | * It's possible to come here directly from a panic-assertion and |
180 | * not have preempt disabled. Some functions called from here want | |
dc009d92 | 181 | * preempt to be disabled. No point enabling it later though... |
93e13a36 MH |
182 | * |
183 | * Only one CPU is allowed to execute the panic code from here. For | |
184 | * multiple parallel invocations of panic, all other CPUs either | |
185 | * stop themself or will wait until they are stopped by the 1st CPU | |
186 | * with smp_send_stop(). | |
1717f209 HK |
187 | * |
188 | * `old_cpu == PANIC_CPU_INVALID' means this is the 1st CPU which | |
189 | * comes here, so go ahead. | |
190 | * `old_cpu == this_cpu' means we came from nmi_panic() which sets | |
191 | * panic_cpu to this CPU. In this case, this is also the 1st CPU. | |
dc009d92 | 192 | */ |
1717f209 HK |
193 | this_cpu = raw_smp_processor_id(); |
194 | old_cpu = atomic_cmpxchg(&panic_cpu, PANIC_CPU_INVALID, this_cpu); | |
195 | ||
196 | if (old_cpu != PANIC_CPU_INVALID && old_cpu != this_cpu) | |
93e13a36 | 197 | panic_smp_self_stop(); |
dc009d92 | 198 | |
5b530fc1 | 199 | console_verbose(); |
1da177e4 LT |
200 | bust_spinlocks(1); |
201 | va_start(args, fmt); | |
b49dec1c | 202 | len = vscnprintf(buf, sizeof(buf), fmt, args); |
1da177e4 | 203 | va_end(args); |
b49dec1c BP |
204 | |
205 | if (len && buf[len - 1] == '\n') | |
206 | buf[len - 1] = '\0'; | |
207 | ||
d7c0847f | 208 | pr_emerg("Kernel panic - not syncing: %s\n", buf); |
5cb27301 | 209 | #ifdef CONFIG_DEBUG_BUGVERBOSE |
6e6f0a1f AK |
210 | /* |
211 | * Avoid nested stack-dumping if a panic occurs during oops processing | |
212 | */ | |
026ee1f6 | 213 | if (!test_taint(TAINT_DIE) && oops_in_progress <= 1) |
6e6f0a1f | 214 | dump_stack(); |
5cb27301 | 215 | #endif |
1da177e4 | 216 | |
dc009d92 EB |
217 | /* |
218 | * If we have crashed and we have a crash kernel loaded let it handle | |
219 | * everything else. | |
f06e5153 MH |
220 | * If we want to run this after calling panic_notifiers, pass |
221 | * the "crash_kexec_post_notifiers" option to the kernel. | |
7bbee5ca HK |
222 | * |
223 | * Bypass the panic_cpu check and call __crash_kexec directly. | |
dc009d92 | 224 | */ |
b26e27dd | 225 | if (!_crash_kexec_post_notifiers) { |
f92bac3b | 226 | printk_safe_flush_on_panic(); |
7bbee5ca | 227 | __crash_kexec(NULL); |
dc009d92 | 228 | |
0ee59413 HK |
229 | /* |
230 | * Note smp_send_stop is the usual smp shutdown function, which | |
231 | * unfortunately means it may not be hardened to work in a | |
232 | * panic situation. | |
233 | */ | |
234 | smp_send_stop(); | |
235 | } else { | |
236 | /* | |
237 | * If we want to do crash dump after notifier calls and | |
238 | * kmsg_dump, we will need architecture dependent extra | |
239 | * works in addition to stopping other CPUs. | |
240 | */ | |
241 | crash_smp_send_stop(); | |
242 | } | |
1da177e4 | 243 | |
6723734c KC |
244 | /* |
245 | * Run any panic handlers, including those that might need to | |
246 | * add information to the kmsg dump output. | |
247 | */ | |
e041c683 | 248 | atomic_notifier_call_chain(&panic_notifier_list, 0, buf); |
1da177e4 | 249 | |
cf9b1106 | 250 | /* Call flush even twice. It tries harder with a single online CPU */ |
f92bac3b | 251 | printk_safe_flush_on_panic(); |
6723734c KC |
252 | kmsg_dump(KMSG_DUMP_PANIC); |
253 | ||
f06e5153 MH |
254 | /* |
255 | * If you doubt kdump always works fine in any situation, | |
256 | * "crash_kexec_post_notifiers" offers you a chance to run | |
257 | * panic_notifiers and dumping kmsg before kdump. | |
258 | * Note: since some panic_notifiers can make crashed kernel | |
259 | * more unstable, it can increase risks of the kdump failure too. | |
7bbee5ca HK |
260 | * |
261 | * Bypass the panic_cpu check and call __crash_kexec directly. | |
f06e5153 | 262 | */ |
b26e27dd | 263 | if (_crash_kexec_post_notifiers) |
7bbee5ca | 264 | __crash_kexec(NULL); |
f06e5153 | 265 | |
c7c3f05e SS |
266 | #ifdef CONFIG_VT |
267 | unblank_screen(); | |
268 | #endif | |
269 | console_unblank(); | |
d014e889 | 270 | |
08d78658 VK |
271 | /* |
272 | * We may have ended up stopping the CPU holding the lock (in | |
273 | * smp_send_stop()) while still having some valuable data in the console | |
274 | * buffer. Try to acquire the lock then release it regardless of the | |
7625b3a0 VK |
275 | * result. The release will also print the buffers out. Locks debug |
276 | * should be disabled to avoid reporting bad unlock balance when | |
277 | * panic() is not being callled from OOPS. | |
08d78658 | 278 | */ |
7625b3a0 | 279 | debug_locks_off(); |
8d91f8b1 | 280 | console_flush_on_panic(); |
08d78658 | 281 | |
d999bd93 FT |
282 | panic_print_sys_info(); |
283 | ||
c7ff0d9c TS |
284 | if (!panic_blink) |
285 | panic_blink = no_blink; | |
286 | ||
dc009d92 | 287 | if (panic_timeout > 0) { |
1da177e4 | 288 | /* |
c95dbf27 IM |
289 | * Delay timeout seconds before rebooting the machine. |
290 | * We can't use the "normal" timers since we just panicked. | |
291 | */ | |
ff7a28a0 | 292 | pr_emerg("Rebooting in %d seconds..\n", panic_timeout); |
c95dbf27 | 293 | |
c7ff0d9c | 294 | for (i = 0; i < panic_timeout * 1000; i += PANIC_TIMER_STEP) { |
1da177e4 | 295 | touch_nmi_watchdog(); |
c7ff0d9c TS |
296 | if (i >= i_next) { |
297 | i += panic_blink(state ^= 1); | |
298 | i_next = i + 3600 / PANIC_BLINK_SPD; | |
299 | } | |
300 | mdelay(PANIC_TIMER_STEP); | |
1da177e4 | 301 | } |
4302fbc8 HD |
302 | } |
303 | if (panic_timeout != 0) { | |
c95dbf27 IM |
304 | /* |
305 | * This will not be a clean reboot, with everything | |
306 | * shutting down. But if there is a chance of | |
307 | * rebooting the system it will be rebooted. | |
1da177e4 | 308 | */ |
2f048ea8 | 309 | emergency_restart(); |
1da177e4 LT |
310 | } |
311 | #ifdef __sparc__ | |
312 | { | |
313 | extern int stop_a_enabled; | |
a271c241 | 314 | /* Make sure the user can actually press Stop-A (L1-A) */ |
1da177e4 | 315 | stop_a_enabled = 1; |
7db60d05 VK |
316 | pr_emerg("Press Stop-A (L1-A) from sun keyboard or send break\n" |
317 | "twice on console to return to the boot prom\n"); | |
1da177e4 LT |
318 | } |
319 | #endif | |
347a8dc3 | 320 | #if defined(CONFIG_S390) |
c95dbf27 IM |
321 | { |
322 | unsigned long caller; | |
323 | ||
324 | caller = (unsigned long)__builtin_return_address(0); | |
325 | disabled_wait(caller); | |
326 | } | |
1da177e4 | 327 | #endif |
5ad75105 | 328 | pr_emerg("---[ end Kernel panic - not syncing: %s ]---\n", buf); |
1da177e4 | 329 | local_irq_enable(); |
c7ff0d9c | 330 | for (i = 0; ; i += PANIC_TIMER_STEP) { |
c22db941 | 331 | touch_softlockup_watchdog(); |
c7ff0d9c TS |
332 | if (i >= i_next) { |
333 | i += panic_blink(state ^= 1); | |
334 | i_next = i + 3600 / PANIC_BLINK_SPD; | |
335 | } | |
336 | mdelay(PANIC_TIMER_STEP); | |
1da177e4 LT |
337 | } |
338 | } | |
339 | ||
340 | EXPORT_SYMBOL(panic); | |
341 | ||
7fd8329b PM |
342 | /* |
343 | * TAINT_FORCED_RMMOD could be a per-module flag but the module | |
344 | * is being removed anyway. | |
345 | */ | |
346 | const struct taint_flag taint_flags[TAINT_FLAGS_COUNT] = { | |
47d4b263 KC |
347 | [ TAINT_PROPRIETARY_MODULE ] = { 'P', 'G', true }, |
348 | [ TAINT_FORCED_MODULE ] = { 'F', ' ', true }, | |
349 | [ TAINT_CPU_OUT_OF_SPEC ] = { 'S', ' ', false }, | |
350 | [ TAINT_FORCED_RMMOD ] = { 'R', ' ', false }, | |
351 | [ TAINT_MACHINE_CHECK ] = { 'M', ' ', false }, | |
352 | [ TAINT_BAD_PAGE ] = { 'B', ' ', false }, | |
353 | [ TAINT_USER ] = { 'U', ' ', false }, | |
354 | [ TAINT_DIE ] = { 'D', ' ', false }, | |
355 | [ TAINT_OVERRIDDEN_ACPI_TABLE ] = { 'A', ' ', false }, | |
356 | [ TAINT_WARN ] = { 'W', ' ', false }, | |
357 | [ TAINT_CRAP ] = { 'C', ' ', true }, | |
358 | [ TAINT_FIRMWARE_WORKAROUND ] = { 'I', ' ', false }, | |
359 | [ TAINT_OOT_MODULE ] = { 'O', ' ', true }, | |
360 | [ TAINT_UNSIGNED_MODULE ] = { 'E', ' ', true }, | |
361 | [ TAINT_SOFTLOCKUP ] = { 'L', ' ', false }, | |
362 | [ TAINT_LIVEPATCH ] = { 'K', ' ', true }, | |
363 | [ TAINT_AUX ] = { 'X', ' ', true }, | |
bc4f2f54 | 364 | [ TAINT_RANDSTRUCT ] = { 'T', ' ', true }, |
25ddbb18 AK |
365 | }; |
366 | ||
1da177e4 | 367 | /** |
9c4560e5 | 368 | * print_tainted - return a string to represent the kernel taint state. |
1da177e4 | 369 | * |
9c4560e5 | 370 | * For individual taint flag meanings, see Documentation/sysctl/kernel.txt |
1da177e4 | 371 | * |
9c4560e5 KC |
372 | * The string is overwritten by the next call to print_tainted(), |
373 | * but is always NULL terminated. | |
1da177e4 | 374 | */ |
1da177e4 LT |
375 | const char *print_tainted(void) |
376 | { | |
7fd8329b | 377 | static char buf[TAINT_FLAGS_COUNT + sizeof("Tainted: ")]; |
25ddbb18 | 378 | |
47d4b263 KC |
379 | BUILD_BUG_ON(ARRAY_SIZE(taint_flags) != TAINT_FLAGS_COUNT); |
380 | ||
25ddbb18 AK |
381 | if (tainted_mask) { |
382 | char *s; | |
383 | int i; | |
384 | ||
385 | s = buf + sprintf(buf, "Tainted: "); | |
7fd8329b PM |
386 | for (i = 0; i < TAINT_FLAGS_COUNT; i++) { |
387 | const struct taint_flag *t = &taint_flags[i]; | |
388 | *s++ = test_bit(i, &tainted_mask) ? | |
5eb7c0d0 | 389 | t->c_true : t->c_false; |
25ddbb18 AK |
390 | } |
391 | *s = 0; | |
392 | } else | |
1da177e4 | 393 | snprintf(buf, sizeof(buf), "Not tainted"); |
c95dbf27 IM |
394 | |
395 | return buf; | |
1da177e4 LT |
396 | } |
397 | ||
25ddbb18 | 398 | int test_taint(unsigned flag) |
1da177e4 | 399 | { |
25ddbb18 AK |
400 | return test_bit(flag, &tainted_mask); |
401 | } | |
402 | EXPORT_SYMBOL(test_taint); | |
403 | ||
404 | unsigned long get_taint(void) | |
405 | { | |
406 | return tainted_mask; | |
1da177e4 | 407 | } |
dd287796 | 408 | |
373d4d09 RR |
409 | /** |
410 | * add_taint: add a taint flag if not already set. | |
411 | * @flag: one of the TAINT_* constants. | |
412 | * @lockdep_ok: whether lock debugging is still OK. | |
413 | * | |
414 | * If something bad has gone wrong, you'll want @lockdebug_ok = false, but for | |
415 | * some notewortht-but-not-corrupting cases, it can be set to true. | |
416 | */ | |
417 | void add_taint(unsigned flag, enum lockdep_ok lockdep_ok) | |
dd287796 | 418 | { |
373d4d09 | 419 | if (lockdep_ok == LOCKDEP_NOW_UNRELIABLE && __debug_locks_off()) |
d7c0847f | 420 | pr_warn("Disabling lock debugging due to kernel taint\n"); |
9eeba613 | 421 | |
25ddbb18 | 422 | set_bit(flag, &tainted_mask); |
dd287796 | 423 | } |
1da177e4 | 424 | EXPORT_SYMBOL(add_taint); |
dd287796 AM |
425 | |
426 | static void spin_msec(int msecs) | |
427 | { | |
428 | int i; | |
429 | ||
430 | for (i = 0; i < msecs; i++) { | |
431 | touch_nmi_watchdog(); | |
432 | mdelay(1); | |
433 | } | |
434 | } | |
435 | ||
436 | /* | |
437 | * It just happens that oops_enter() and oops_exit() are identically | |
438 | * implemented... | |
439 | */ | |
440 | static void do_oops_enter_exit(void) | |
441 | { | |
442 | unsigned long flags; | |
443 | static int spin_counter; | |
444 | ||
445 | if (!pause_on_oops) | |
446 | return; | |
447 | ||
448 | spin_lock_irqsave(&pause_on_oops_lock, flags); | |
449 | if (pause_on_oops_flag == 0) { | |
450 | /* This CPU may now print the oops message */ | |
451 | pause_on_oops_flag = 1; | |
452 | } else { | |
453 | /* We need to stall this CPU */ | |
454 | if (!spin_counter) { | |
455 | /* This CPU gets to do the counting */ | |
456 | spin_counter = pause_on_oops; | |
457 | do { | |
458 | spin_unlock(&pause_on_oops_lock); | |
459 | spin_msec(MSEC_PER_SEC); | |
460 | spin_lock(&pause_on_oops_lock); | |
461 | } while (--spin_counter); | |
462 | pause_on_oops_flag = 0; | |
463 | } else { | |
464 | /* This CPU waits for a different one */ | |
465 | while (spin_counter) { | |
466 | spin_unlock(&pause_on_oops_lock); | |
467 | spin_msec(1); | |
468 | spin_lock(&pause_on_oops_lock); | |
469 | } | |
470 | } | |
471 | } | |
472 | spin_unlock_irqrestore(&pause_on_oops_lock, flags); | |
473 | } | |
474 | ||
475 | /* | |
c95dbf27 IM |
476 | * Return true if the calling CPU is allowed to print oops-related info. |
477 | * This is a bit racy.. | |
dd287796 AM |
478 | */ |
479 | int oops_may_print(void) | |
480 | { | |
481 | return pause_on_oops_flag == 0; | |
482 | } | |
483 | ||
484 | /* | |
485 | * Called when the architecture enters its oops handler, before it prints | |
c95dbf27 IM |
486 | * anything. If this is the first CPU to oops, and it's oopsing the first |
487 | * time then let it proceed. | |
dd287796 | 488 | * |
c95dbf27 IM |
489 | * This is all enabled by the pause_on_oops kernel boot option. We do all |
490 | * this to ensure that oopses don't scroll off the screen. It has the | |
491 | * side-effect of preventing later-oopsing CPUs from mucking up the display, | |
492 | * too. | |
dd287796 | 493 | * |
c95dbf27 IM |
494 | * It turns out that the CPU which is allowed to print ends up pausing for |
495 | * the right duration, whereas all the other CPUs pause for twice as long: | |
496 | * once in oops_enter(), once in oops_exit(). | |
dd287796 AM |
497 | */ |
498 | void oops_enter(void) | |
499 | { | |
bdff7870 | 500 | tracing_off(); |
c95dbf27 IM |
501 | /* can't trust the integrity of the kernel anymore: */ |
502 | debug_locks_off(); | |
dd287796 AM |
503 | do_oops_enter_exit(); |
504 | } | |
505 | ||
2c3b20e9 AV |
506 | /* |
507 | * 64-bit random ID for oopses: | |
508 | */ | |
509 | static u64 oops_id; | |
510 | ||
511 | static int init_oops_id(void) | |
512 | { | |
513 | if (!oops_id) | |
514 | get_random_bytes(&oops_id, sizeof(oops_id)); | |
d6624f99 AV |
515 | else |
516 | oops_id++; | |
2c3b20e9 AV |
517 | |
518 | return 0; | |
519 | } | |
520 | late_initcall(init_oops_id); | |
521 | ||
863a6049 | 522 | void print_oops_end_marker(void) |
71c33911 AV |
523 | { |
524 | init_oops_id(); | |
d7c0847f | 525 | pr_warn("---[ end trace %016llx ]---\n", (unsigned long long)oops_id); |
71c33911 AV |
526 | } |
527 | ||
dd287796 AM |
528 | /* |
529 | * Called when the architecture exits its oops handler, after printing | |
530 | * everything. | |
531 | */ | |
532 | void oops_exit(void) | |
533 | { | |
534 | do_oops_enter_exit(); | |
71c33911 | 535 | print_oops_end_marker(); |
456b565c | 536 | kmsg_dump(KMSG_DUMP_OOPS); |
dd287796 | 537 | } |
3162f751 | 538 | |
2553b67a | 539 | struct warn_args { |
0f6f49a8 | 540 | const char *fmt; |
a8f18b90 | 541 | va_list args; |
0f6f49a8 | 542 | }; |
bd89bb29 | 543 | |
2553b67a JP |
544 | void __warn(const char *file, int line, void *caller, unsigned taint, |
545 | struct pt_regs *regs, struct warn_args *args) | |
0f6f49a8 | 546 | { |
de7edd31 SRRH |
547 | disable_trace_on_warning(); |
548 | ||
a7bed27a KC |
549 | if (args) |
550 | pr_warn(CUT_HERE); | |
2553b67a JP |
551 | |
552 | if (file) | |
553 | pr_warn("WARNING: CPU: %d PID: %d at %s:%d %pS\n", | |
554 | raw_smp_processor_id(), current->pid, file, line, | |
555 | caller); | |
556 | else | |
557 | pr_warn("WARNING: CPU: %d PID: %d at %pS\n", | |
558 | raw_smp_processor_id(), current->pid, caller); | |
74853dba | 559 | |
0f6f49a8 LT |
560 | if (args) |
561 | vprintk(args->fmt, args->args); | |
a8f18b90 | 562 | |
9e3961a0 PB |
563 | if (panic_on_warn) { |
564 | /* | |
565 | * This thread may hit another WARN() in the panic path. | |
566 | * Resetting this prevents additional WARN() from panicking the | |
567 | * system on this thread. Other threads are blocked by the | |
568 | * panic_mutex in panic(). | |
569 | */ | |
570 | panic_on_warn = 0; | |
571 | panic("panic_on_warn set ...\n"); | |
572 | } | |
573 | ||
a8f18b90 | 574 | print_modules(); |
2553b67a JP |
575 | |
576 | if (regs) | |
577 | show_regs(regs); | |
578 | else | |
579 | dump_stack(); | |
580 | ||
4c281074 SRV |
581 | print_irqtrace_events(current); |
582 | ||
a8f18b90 | 583 | print_oops_end_marker(); |
2553b67a | 584 | |
373d4d09 RR |
585 | /* Just a warning, don't kill lockdep. */ |
586 | add_taint(taint, LOCKDEP_STILL_OK); | |
a8f18b90 | 587 | } |
0f6f49a8 | 588 | |
2553b67a | 589 | #ifdef WANT_WARN_ON_SLOWPATH |
0f6f49a8 LT |
590 | void warn_slowpath_fmt(const char *file, int line, const char *fmt, ...) |
591 | { | |
2553b67a | 592 | struct warn_args args; |
0f6f49a8 LT |
593 | |
594 | args.fmt = fmt; | |
595 | va_start(args.args, fmt); | |
2553b67a JP |
596 | __warn(file, line, __builtin_return_address(0), TAINT_WARN, NULL, |
597 | &args); | |
0f6f49a8 LT |
598 | va_end(args.args); |
599 | } | |
57adc4d2 AK |
600 | EXPORT_SYMBOL(warn_slowpath_fmt); |
601 | ||
b2be0527 BH |
602 | void warn_slowpath_fmt_taint(const char *file, int line, |
603 | unsigned taint, const char *fmt, ...) | |
604 | { | |
2553b67a | 605 | struct warn_args args; |
b2be0527 BH |
606 | |
607 | args.fmt = fmt; | |
608 | va_start(args.args, fmt); | |
2553b67a | 609 | __warn(file, line, __builtin_return_address(0), taint, NULL, &args); |
b2be0527 BH |
610 | va_end(args.args); |
611 | } | |
612 | EXPORT_SYMBOL(warn_slowpath_fmt_taint); | |
613 | ||
57adc4d2 AK |
614 | void warn_slowpath_null(const char *file, int line) |
615 | { | |
a7bed27a | 616 | pr_warn(CUT_HERE); |
2553b67a | 617 | __warn(file, line, __builtin_return_address(0), TAINT_WARN, NULL, NULL); |
57adc4d2 AK |
618 | } |
619 | EXPORT_SYMBOL(warn_slowpath_null); | |
a7bed27a KC |
620 | #else |
621 | void __warn_printk(const char *fmt, ...) | |
622 | { | |
623 | va_list args; | |
624 | ||
625 | pr_warn(CUT_HERE); | |
626 | ||
627 | va_start(args, fmt); | |
628 | vprintk(fmt, args); | |
629 | va_end(args); | |
630 | } | |
631 | EXPORT_SYMBOL(__warn_printk); | |
79b4cc5e AV |
632 | #endif |
633 | ||
b1fca27d AK |
634 | #ifdef CONFIG_BUG |
635 | ||
636 | /* Support resetting WARN*_ONCE state */ | |
637 | ||
638 | static int clear_warn_once_set(void *data, u64 val) | |
639 | { | |
aaf5dcfb | 640 | generic_bug_clear_once(); |
b1fca27d AK |
641 | memset(__start_once, 0, __end_once - __start_once); |
642 | return 0; | |
643 | } | |
644 | ||
4169680e Y |
645 | DEFINE_DEBUGFS_ATTRIBUTE(clear_warn_once_fops, NULL, clear_warn_once_set, |
646 | "%lld\n"); | |
b1fca27d AK |
647 | |
648 | static __init int register_warn_debugfs(void) | |
649 | { | |
650 | /* Don't care about failure */ | |
4169680e Y |
651 | debugfs_create_file_unsafe("clear_warn_once", 0200, NULL, NULL, |
652 | &clear_warn_once_fops); | |
b1fca27d AK |
653 | return 0; |
654 | } | |
655 | ||
656 | device_initcall(register_warn_debugfs); | |
657 | #endif | |
658 | ||
050e9baa | 659 | #ifdef CONFIG_STACKPROTECTOR |
54371a43 | 660 | |
3162f751 AV |
661 | /* |
662 | * Called when gcc's -fstack-protector feature is used, and | |
663 | * gcc detects corruption of the on-stack canary value | |
664 | */ | |
a7330c99 | 665 | __visible void __stack_chk_fail(void) |
3162f751 | 666 | { |
95c4fb78 | 667 | panic("stack-protector: Kernel stack is corrupted in: %pB", |
517a92c4 | 668 | __builtin_return_address(0)); |
3162f751 AV |
669 | } |
670 | EXPORT_SYMBOL(__stack_chk_fail); | |
54371a43 | 671 | |
3162f751 | 672 | #endif |
f44dd164 | 673 | |
7a46ec0e KC |
674 | #ifdef CONFIG_ARCH_HAS_REFCOUNT |
675 | void refcount_error_report(struct pt_regs *regs, const char *err) | |
676 | { | |
677 | WARN_RATELIMIT(1, "refcount_t %s at %pB in %s[%d], uid/euid: %u/%u\n", | |
678 | err, (void *)instruction_pointer(regs), | |
679 | current->comm, task_pid_nr(current), | |
680 | from_kuid_munged(&init_user_ns, current_uid()), | |
681 | from_kuid_munged(&init_user_ns, current_euid())); | |
682 | } | |
683 | #endif | |
684 | ||
f44dd164 | 685 | core_param(panic, panic_timeout, int, 0644); |
d999bd93 | 686 | core_param(panic_print, panic_print, ulong, 0644); |
f44dd164 | 687 | core_param(pause_on_oops, pause_on_oops, int, 0644); |
9e3961a0 | 688 | core_param(panic_on_warn, panic_on_warn, int, 0644); |
b26e27dd | 689 | core_param(crash_kexec_post_notifiers, crash_kexec_post_notifiers, bool, 0644); |
f06e5153 | 690 | |
d404ab0a OH |
691 | static int __init oops_setup(char *s) |
692 | { | |
693 | if (!s) | |
694 | return -EINVAL; | |
695 | if (!strcmp(s, "panic")) | |
696 | panic_on_oops = 1; | |
697 | return 0; | |
698 | } | |
699 | early_param("oops", oops_setup); |