1 // SPDX-License-Identifier: GPL-2.0-only
3 * crash.c - kernel crash support code.
4 * Copyright (C) 2002-2004 Eric Biederman <ebiederm@xmission.com>
7 #include <linux/buildid.h>
8 #include <linux/init.h>
9 #include <linux/utsname.h>
10 #include <linux/vmalloc.h>
11 #include <linux/sizes.h>
12 #include <linux/kexec.h>
13 #include <linux/memory.h>
15 #include <linux/cpuhotplug.h>
16 #include <linux/memblock.h>
17 #include <linux/kmemleak.h>
18 #include <linux/crash_core.h>
19 #include <linux/reboot.h>
20 #include <linux/btf.h>
21 #include <linux/objtool.h>
24 #include <asm/sections.h>
26 #include <crypto/sha1.h>
28 #include "kallsyms_internal.h"
29 #include "kexec_internal.h"
31 /* Per cpu memory for storing cpu states in case of system crash. */
32 note_buf_t __percpu *crash_notes;
34 #ifdef CONFIG_CRASH_DUMP
36 int kimage_crash_copy_vmcoreinfo(struct kimage *image)
38 struct page *vmcoreinfo_page;
41 if (!IS_ENABLED(CONFIG_CRASH_DUMP))
43 if (image->type != KEXEC_TYPE_CRASH)
47 * For kdump, allocate one vmcoreinfo safe copy from the
48 * crash memory. as we have arch_kexec_protect_crashkres()
49 * after kexec syscall, we naturally protect it from write
50 * (even read) access under kernel direct mapping. But on
51 * the other hand, we still need to operate it when crash
52 * happens to generate vmcoreinfo note, hereby we rely on
53 * vmap for this purpose.
55 vmcoreinfo_page = kimage_alloc_control_pages(image, 0);
56 if (!vmcoreinfo_page) {
57 pr_warn("Could not allocate vmcoreinfo buffer\n");
60 safecopy = vmap(&vmcoreinfo_page, 1, VM_MAP, PAGE_KERNEL);
62 pr_warn("Could not vmap vmcoreinfo buffer\n");
66 image->vmcoreinfo_data_copy = safecopy;
67 crash_update_vmcoreinfo_safecopy(safecopy);
74 int kexec_should_crash(struct task_struct *p)
77 * If crash_kexec_post_notifiers is enabled, don't run
78 * crash_kexec() here yet, which must be run after panic
79 * notifiers in panic().
81 if (crash_kexec_post_notifiers)
84 * There are 4 panic() calls in make_task_dead() path, each of which
85 * corresponds to each of these 4 conditions.
87 if (in_interrupt() || !p->pid || is_global_init(p) || panic_on_oops)
92 int kexec_crash_loaded(void)
94 return !!kexec_crash_image;
96 EXPORT_SYMBOL_GPL(kexec_crash_loaded);
99 * No panic_cpu check version of crash_kexec(). This function is called
100 * only when panic_cpu holds the current CPU number; this is the only CPU
101 * which processes crash_kexec routines.
103 void __noclone __crash_kexec(struct pt_regs *regs)
105 /* Take the kexec_lock here to prevent sys_kexec_load
106 * running on one cpu from replacing the crash kernel
107 * we are using after a panic on a different cpu.
109 * If the crash kernel was not located in a fixed area
110 * of memory the xchg(&kexec_crash_image) would be
111 * sufficient. But since I reuse the memory...
113 if (kexec_trylock()) {
114 if (kexec_crash_image) {
115 struct pt_regs fixed_regs;
117 crash_setup_regs(&fixed_regs, regs);
118 crash_save_vmcoreinfo();
119 machine_crash_shutdown(&fixed_regs);
120 machine_kexec(kexec_crash_image);
125 STACK_FRAME_NON_STANDARD(__crash_kexec);
127 __bpf_kfunc void crash_kexec(struct pt_regs *regs)
129 int old_cpu, this_cpu;
132 * Only one CPU is allowed to execute the crash_kexec() code as with
133 * panic(). Otherwise parallel calls of panic() and crash_kexec()
134 * may stop each other. To exclude them, we use panic_cpu here too.
136 old_cpu = PANIC_CPU_INVALID;
137 this_cpu = raw_smp_processor_id();
139 if (atomic_try_cmpxchg(&panic_cpu, &old_cpu, this_cpu)) {
140 /* This is the 1st CPU which comes here, so go ahead. */
144 * Reset panic_cpu to allow another panic()/crash_kexec()
147 atomic_set(&panic_cpu, PANIC_CPU_INVALID);
151 static inline resource_size_t crash_resource_size(const struct resource *res)
153 return !res->end ? 0 : resource_size(res);
159 int crash_prepare_elf64_headers(struct crash_mem *mem, int need_kernel_map,
160 void **addr, unsigned long *sz)
164 unsigned long nr_cpus = num_possible_cpus(), nr_phdr, elf_sz;
167 unsigned long long notes_addr;
168 unsigned long mstart, mend;
170 /* extra phdr for vmcoreinfo ELF note */
171 nr_phdr = nr_cpus + 1;
172 nr_phdr += mem->nr_ranges;
175 * kexec-tools creates an extra PT_LOAD phdr for kernel text mapping
176 * area (for example, ffffffff80000000 - ffffffffa0000000 on x86_64).
177 * I think this is required by tools like gdb. So same physical
178 * memory will be mapped in two ELF headers. One will contain kernel
179 * text virtual addresses and other will have __va(physical) addresses.
183 elf_sz = sizeof(Elf64_Ehdr) + nr_phdr * sizeof(Elf64_Phdr);
184 elf_sz = ALIGN(elf_sz, ELF_CORE_HEADER_ALIGN);
186 buf = vzalloc(elf_sz);
190 ehdr = (Elf64_Ehdr *)buf;
191 phdr = (Elf64_Phdr *)(ehdr + 1);
192 memcpy(ehdr->e_ident, ELFMAG, SELFMAG);
193 ehdr->e_ident[EI_CLASS] = ELFCLASS64;
194 ehdr->e_ident[EI_DATA] = ELFDATA2LSB;
195 ehdr->e_ident[EI_VERSION] = EV_CURRENT;
196 ehdr->e_ident[EI_OSABI] = ELF_OSABI;
197 memset(ehdr->e_ident + EI_PAD, 0, EI_NIDENT - EI_PAD);
198 ehdr->e_type = ET_CORE;
199 ehdr->e_machine = ELF_ARCH;
200 ehdr->e_version = EV_CURRENT;
201 ehdr->e_phoff = sizeof(Elf64_Ehdr);
202 ehdr->e_ehsize = sizeof(Elf64_Ehdr);
203 ehdr->e_phentsize = sizeof(Elf64_Phdr);
205 /* Prepare one phdr of type PT_NOTE for each possible CPU */
206 for_each_possible_cpu(cpu) {
207 phdr->p_type = PT_NOTE;
208 notes_addr = per_cpu_ptr_to_phys(per_cpu_ptr(crash_notes, cpu));
209 phdr->p_offset = phdr->p_paddr = notes_addr;
210 phdr->p_filesz = phdr->p_memsz = sizeof(note_buf_t);
215 /* Prepare one PT_NOTE header for vmcoreinfo */
216 phdr->p_type = PT_NOTE;
217 phdr->p_offset = phdr->p_paddr = paddr_vmcoreinfo_note();
218 phdr->p_filesz = phdr->p_memsz = VMCOREINFO_NOTE_SIZE;
222 /* Prepare PT_LOAD type program header for kernel text region */
223 if (need_kernel_map) {
224 phdr->p_type = PT_LOAD;
225 phdr->p_flags = PF_R|PF_W|PF_X;
226 phdr->p_vaddr = (unsigned long) _text;
227 phdr->p_filesz = phdr->p_memsz = _end - _text;
228 phdr->p_offset = phdr->p_paddr = __pa_symbol(_text);
233 /* Go through all the ranges in mem->ranges[] and prepare phdr */
234 for (i = 0; i < mem->nr_ranges; i++) {
235 mstart = mem->ranges[i].start;
236 mend = mem->ranges[i].end;
238 phdr->p_type = PT_LOAD;
239 phdr->p_flags = PF_R|PF_W|PF_X;
240 phdr->p_offset = mstart;
242 phdr->p_paddr = mstart;
243 phdr->p_vaddr = (unsigned long) __va(mstart);
244 phdr->p_filesz = phdr->p_memsz = mend - mstart + 1;
247 #ifdef CONFIG_KEXEC_FILE
248 kexec_dprintk("Crash PT_LOAD ELF header. phdr=%p vaddr=0x%llx, paddr=0x%llx, sz=0x%llx e_phnum=%d p_offset=0x%llx\n",
249 phdr, phdr->p_vaddr, phdr->p_paddr, phdr->p_filesz,
250 ehdr->e_phnum, phdr->p_offset);
260 int crash_exclude_mem_range(struct crash_mem *mem,
261 unsigned long long mstart, unsigned long long mend)
264 unsigned long long start, end, p_start, p_end;
266 for (i = 0; i < mem->nr_ranges; i++) {
267 start = mem->ranges[i].start;
268 end = mem->ranges[i].end;
276 * Because the memory ranges in mem->ranges are stored in
277 * ascending order, when we detect `p_end < start`, we can
278 * immediately exit the for loop, as the subsequent memory
279 * ranges will definitely be outside the range we are looking
285 /* Truncate any area outside of range */
291 /* Found completely overlapping range */
292 if (p_start == start && p_end == end) {
293 memmove(&mem->ranges[i], &mem->ranges[i + 1],
294 (mem->nr_ranges - (i + 1)) * sizeof(mem->ranges[i]));
297 } else if (p_start > start && p_end < end) {
298 /* Split original range */
299 if (mem->nr_ranges >= mem->max_nr_ranges)
302 memmove(&mem->ranges[i + 2], &mem->ranges[i + 1],
303 (mem->nr_ranges - (i + 1)) * sizeof(mem->ranges[i]));
305 mem->ranges[i].end = p_start - 1;
306 mem->ranges[i + 1].start = p_end + 1;
307 mem->ranges[i + 1].end = end;
311 } else if (p_start != start)
312 mem->ranges[i].end = p_start - 1;
314 mem->ranges[i].start = p_end + 1;
320 ssize_t crash_get_memory_size(void)
324 if (!kexec_trylock())
327 size += crash_resource_size(&crashk_res);
328 size += crash_resource_size(&crashk_low_res);
334 static int __crash_shrink_memory(struct resource *old_res,
335 unsigned long new_size)
337 struct resource *ram_res;
339 ram_res = kzalloc(sizeof(*ram_res), GFP_KERNEL);
343 ram_res->start = old_res->start + new_size;
344 ram_res->end = old_res->end;
345 ram_res->flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM;
346 ram_res->name = "System RAM";
349 release_resource(old_res);
353 crashk_res.end = ram_res->start - 1;
356 crash_free_reserved_phys_range(ram_res->start, ram_res->end);
357 insert_resource(&iomem_resource, ram_res);
362 int crash_shrink_memory(unsigned long new_size)
365 unsigned long old_size, low_size;
367 if (!kexec_trylock())
370 if (kexec_crash_image) {
375 low_size = crash_resource_size(&crashk_low_res);
376 old_size = crash_resource_size(&crashk_res) + low_size;
377 new_size = roundup(new_size, KEXEC_CRASH_MEM_ALIGN);
378 if (new_size >= old_size) {
379 ret = (new_size == old_size) ? 0 : -EINVAL;
384 * (low_size > new_size) implies that low_size is greater than zero.
385 * This also means that if low_size is zero, the else branch is taken.
387 * If low_size is greater than 0, (low_size > new_size) indicates that
388 * crashk_low_res also needs to be shrunken. Otherwise, only crashk_res
389 * needs to be shrunken.
391 if (low_size > new_size) {
392 ret = __crash_shrink_memory(&crashk_res, 0);
396 ret = __crash_shrink_memory(&crashk_low_res, new_size);
398 ret = __crash_shrink_memory(&crashk_res, new_size - low_size);
401 /* Swap crashk_res and crashk_low_res if needed */
402 if (!crashk_res.end && crashk_low_res.end) {
403 crashk_res.start = crashk_low_res.start;
404 crashk_res.end = crashk_low_res.end;
405 release_resource(&crashk_low_res);
406 crashk_low_res.start = 0;
407 crashk_low_res.end = 0;
408 insert_resource(&iomem_resource, &crashk_res);
416 void crash_save_cpu(struct pt_regs *regs, int cpu)
418 struct elf_prstatus prstatus;
421 if ((cpu < 0) || (cpu >= nr_cpu_ids))
424 /* Using ELF notes here is opportunistic.
425 * I need a well defined structure format
426 * for the data I pass, and I need tags
427 * on the data to indicate what information I have
428 * squirrelled away. ELF notes happen to provide
429 * all of that, so there is no need to invent something new.
431 buf = (u32 *)per_cpu_ptr(crash_notes, cpu);
434 memset(&prstatus, 0, sizeof(prstatus));
435 prstatus.common.pr_pid = current->pid;
436 elf_core_copy_regs(&prstatus.pr_reg, regs);
437 buf = append_elf_note(buf, KEXEC_CORE_NOTE_NAME, NT_PRSTATUS,
438 &prstatus, sizeof(prstatus));
444 static int __init crash_notes_memory_init(void)
446 /* Allocate memory for saving cpu registers. */
450 * crash_notes could be allocated across 2 vmalloc pages when percpu
451 * is vmalloc based . vmalloc doesn't guarantee 2 continuous vmalloc
452 * pages are also on 2 continuous physical pages. In this case the
453 * 2nd part of crash_notes in 2nd page could be lost since only the
454 * starting address and size of crash_notes are exported through sysfs.
455 * Here round up the size of crash_notes to the nearest power of two
456 * and pass it to __alloc_percpu as align value. This can make sure
457 * crash_notes is allocated inside one physical page.
459 size = sizeof(note_buf_t);
460 align = min(roundup_pow_of_two(sizeof(note_buf_t)), PAGE_SIZE);
463 * Break compile if size is bigger than PAGE_SIZE since crash_notes
464 * definitely will be in 2 pages with that.
466 BUILD_BUG_ON(size > PAGE_SIZE);
468 crash_notes = __alloc_percpu(size, align);
470 pr_warn("Memory allocation for saving cpu register states failed\n");
475 subsys_initcall(crash_notes_memory_init);
477 #endif /*CONFIG_CRASH_DUMP*/
479 #ifdef CONFIG_CRASH_HOTPLUG
481 #define pr_fmt(fmt) "crash hp: " fmt
484 * Different than kexec/kdump loading/unloading/jumping/shrinking which
485 * usually rarely happen, there will be many crash hotplug events notified
486 * during one short period, e.g one memory board is hot added and memory
487 * regions are online. So mutex lock __crash_hotplug_lock is used to
488 * serialize the crash hotplug handling specifically.
490 static DEFINE_MUTEX(__crash_hotplug_lock);
491 #define crash_hotplug_lock() mutex_lock(&__crash_hotplug_lock)
492 #define crash_hotplug_unlock() mutex_unlock(&__crash_hotplug_lock)
495 * This routine utilized when the crash_hotplug sysfs node is read.
496 * It reflects the kernel's ability/permission to update the crash
497 * elfcorehdr directly.
499 int crash_check_update_elfcorehdr(void)
503 crash_hotplug_lock();
504 /* Obtain lock while reading crash information */
505 if (!kexec_trylock()) {
506 pr_info("kexec_trylock() failed, elfcorehdr may be inaccurate\n");
507 crash_hotplug_unlock();
510 if (kexec_crash_image) {
511 if (kexec_crash_image->file_mode)
514 rc = kexec_crash_image->update_elfcorehdr;
516 /* Release lock now that update complete */
518 crash_hotplug_unlock();
524 * To accurately reflect hot un/plug changes of cpu and memory resources
525 * (including onling and offlining of those resources), the elfcorehdr
526 * (which is passed to the crash kernel via the elfcorehdr= parameter)
527 * must be updated with the new list of CPUs and memories.
529 * In order to make changes to elfcorehdr, two conditions are needed:
530 * First, the segment containing the elfcorehdr must be large enough
531 * to permit a growing number of resources; the elfcorehdr memory size
532 * is based on NR_CPUS_DEFAULT and CRASH_MAX_MEMORY_RANGES.
533 * Second, purgatory must explicitly exclude the elfcorehdr from the
534 * list of segments it checks (since the elfcorehdr changes and thus
535 * would require an update to purgatory itself to update the digest).
537 static void crash_handle_hotplug_event(unsigned int hp_action, unsigned int cpu)
539 struct kimage *image;
541 crash_hotplug_lock();
542 /* Obtain lock while changing crash information */
543 if (!kexec_trylock()) {
544 pr_info("kexec_trylock() failed, elfcorehdr may be inaccurate\n");
545 crash_hotplug_unlock();
549 /* Check kdump is not loaded */
550 if (!kexec_crash_image)
553 image = kexec_crash_image;
555 /* Check that updating elfcorehdr is permitted */
556 if (!(image->file_mode || image->update_elfcorehdr))
559 if (hp_action == KEXEC_CRASH_HP_ADD_CPU ||
560 hp_action == KEXEC_CRASH_HP_REMOVE_CPU)
561 pr_debug("hp_action %u, cpu %u\n", hp_action, cpu);
563 pr_debug("hp_action %u\n", hp_action);
566 * The elfcorehdr_index is set to -1 when the struct kimage
567 * is allocated. Find the segment containing the elfcorehdr,
568 * if not already found.
570 if (image->elfcorehdr_index < 0) {
575 for (n = 0; n < image->nr_segments; n++) {
576 mem = image->segment[n].mem;
577 ptr = kmap_local_page(pfn_to_page(mem >> PAGE_SHIFT));
579 /* The segment containing elfcorehdr */
580 if (memcmp(ptr, ELFMAG, SELFMAG) == 0)
581 image->elfcorehdr_index = (int)n;
587 if (image->elfcorehdr_index < 0) {
588 pr_err("unable to locate elfcorehdr segment");
592 /* Needed in order for the segments to be updated */
593 arch_kexec_unprotect_crashkres();
595 /* Differentiate between normal load and hotplug update */
596 image->hp_action = hp_action;
598 /* Now invoke arch-specific update handler */
599 arch_crash_handle_hotplug_event(image);
601 /* No longer handling a hotplug event */
602 image->hp_action = KEXEC_CRASH_HP_NONE;
603 image->elfcorehdr_updated = true;
605 /* Change back to read-only */
606 arch_kexec_protect_crashkres();
608 /* Errors in the callback is not a reason to rollback state */
610 /* Release lock now that update complete */
612 crash_hotplug_unlock();
615 static int crash_memhp_notifier(struct notifier_block *nb, unsigned long val, void *v)
619 crash_handle_hotplug_event(KEXEC_CRASH_HP_ADD_MEMORY,
620 KEXEC_CRASH_HP_INVALID_CPU);
624 crash_handle_hotplug_event(KEXEC_CRASH_HP_REMOVE_MEMORY,
625 KEXEC_CRASH_HP_INVALID_CPU);
631 static struct notifier_block crash_memhp_nb = {
632 .notifier_call = crash_memhp_notifier,
636 static int crash_cpuhp_online(unsigned int cpu)
638 crash_handle_hotplug_event(KEXEC_CRASH_HP_ADD_CPU, cpu);
642 static int crash_cpuhp_offline(unsigned int cpu)
644 crash_handle_hotplug_event(KEXEC_CRASH_HP_REMOVE_CPU, cpu);
648 static int __init crash_hotplug_init(void)
652 if (IS_ENABLED(CONFIG_MEMORY_HOTPLUG))
653 register_memory_notifier(&crash_memhp_nb);
655 if (IS_ENABLED(CONFIG_HOTPLUG_CPU)) {
656 result = cpuhp_setup_state_nocalls(CPUHP_BP_PREPARE_DYN,
657 "crash/cpuhp", crash_cpuhp_online, crash_cpuhp_offline);
663 subsys_initcall(crash_hotplug_init);