Commit | Line | Data |
---|---|---|
5033cba0 | 1 | /* |
62a31a03 | 2 | * Architecture specific (i386/x86_64) functions for kexec based crash dumps. |
5033cba0 EB |
3 | * |
4 | * Created by: Hariprasad Nellitheertha (hari@in.ibm.com) | |
5 | * | |
6 | * Copyright (C) IBM Corporation, 2004. All rights reserved. | |
dd5f7260 VG |
7 | * Copyright (C) Red Hat Inc., 2014. All rights reserved. |
8 | * Authors: | |
9 | * Vivek Goyal <vgoyal@redhat.com> | |
5033cba0 EB |
10 | * |
11 | */ | |
12 | ||
dd5f7260 VG |
13 | #define pr_fmt(fmt) "kexec: " fmt |
14 | ||
5033cba0 EB |
15 | #include <linux/types.h> |
16 | #include <linux/kernel.h> | |
17 | #include <linux/smp.h> | |
5033cba0 EB |
18 | #include <linux/reboot.h> |
19 | #include <linux/kexec.h> | |
5033cba0 EB |
20 | #include <linux/delay.h> |
21 | #include <linux/elf.h> | |
22 | #include <linux/elfcore.h> | |
186f4360 | 23 | #include <linux/export.h> |
dd5f7260 | 24 | #include <linux/slab.h> |
d6472302 | 25 | #include <linux/vmalloc.h> |
5033cba0 EB |
26 | |
27 | #include <asm/processor.h> | |
28 | #include <asm/hardirq.h> | |
29 | #include <asm/nmi.h> | |
30 | #include <asm/hw_irq.h> | |
19842d67 | 31 | #include <asm/apic.h> |
5520b7e7 | 32 | #include <asm/e820/types.h> |
8643e28d | 33 | #include <asm/io_apic.h> |
0c1b2724 | 34 | #include <asm/hpet.h> |
1eeb66a1 | 35 | #include <linux/kdebug.h> |
96b89dc6 | 36 | #include <asm/cpu.h> |
ed23dc6f | 37 | #include <asm/reboot.h> |
2340b62f | 38 | #include <asm/virtext.h> |
da06a43d | 39 | #include <asm/intel_pt.h> |
8e294786 | 40 | |
dd5f7260 VG |
41 | /* Used while preparing memory map entries for second kernel */ |
42 | struct crash_memmap_data { | |
43 | struct boot_params *params; | |
44 | /* Type of memory */ | |
45 | unsigned int type; | |
46 | }; | |
47 | ||
f23d1f4a ZY |
48 | /* |
49 | * This is used to VMCLEAR all VMCSs loaded on the | |
50 | * processor. And when loading kvm_intel module, the | |
51 | * callback function pointer will be assigned. | |
52 | * | |
53 | * protected by rcu. | |
54 | */ | |
0ca0d818 | 55 | crash_vmclear_fn __rcu *crash_vmclear_loaded_vmcss = NULL; |
f23d1f4a | 56 | EXPORT_SYMBOL_GPL(crash_vmclear_loaded_vmcss); |
dd5f7260 | 57 | unsigned long crash_zero_bytes; |
f23d1f4a ZY |
58 | |
59 | static inline void cpu_crash_vmclear_loaded_vmcss(void) | |
60 | { | |
0ca0d818 | 61 | crash_vmclear_fn *do_vmclear_operation = NULL; |
f23d1f4a ZY |
62 | |
63 | rcu_read_lock(); | |
64 | do_vmclear_operation = rcu_dereference(crash_vmclear_loaded_vmcss); | |
65 | if (do_vmclear_operation) | |
66 | do_vmclear_operation(); | |
67 | rcu_read_unlock(); | |
68 | } | |
69 | ||
b2bbe71b EH |
70 | #if defined(CONFIG_SMP) && defined(CONFIG_X86_LOCAL_APIC) |
71 | ||
9c48f1c6 | 72 | static void kdump_nmi_callback(int cpu, struct pt_regs *regs) |
c4ac4263 | 73 | { |
1fb473d8 | 74 | #ifdef CONFIG_X86_32 |
4d55476c | 75 | struct pt_regs fixed_regs; |
a7d41820 | 76 | |
f39b6f0e | 77 | if (!user_mode(regs)) { |
a7d41820 EH |
78 | crash_fixup_ss_esp(&fixed_regs, regs); |
79 | regs = &fixed_regs; | |
80 | } | |
81 | #endif | |
82 | crash_save_cpu(regs, cpu); | |
83 | ||
f23d1f4a ZY |
84 | /* |
85 | * VMCLEAR VMCSs loaded on all cpus if needed. | |
86 | */ | |
87 | cpu_crash_vmclear_loaded_vmcss(); | |
88 | ||
2340b62f EH |
89 | /* Disable VMX or SVM if needed. |
90 | * | |
91 | * We need to disable virtualization on all CPUs. | |
92 | * Having VMX or SVM enabled on any CPU may break rebooting | |
93 | * after the kdump kernel has finished its task. | |
94 | */ | |
95 | cpu_emergency_vmxoff(); | |
96 | cpu_emergency_svm_disable(); | |
97 | ||
da06a43d TI |
98 | /* |
99 | * Disable Intel PT to stop its logging | |
100 | */ | |
101 | cpu_emergency_stop_pt(); | |
102 | ||
a7d41820 EH |
103 | disable_local_APIC(); |
104 | } | |
105 | ||
0ee59413 | 106 | void kdump_nmi_shootdown_cpus(void) |
d1e7b91c | 107 | { |
8e294786 | 108 | nmi_shootdown_cpus(kdump_nmi_callback); |
d1e7b91c | 109 | |
19842d67 | 110 | disable_local_APIC(); |
c4ac4263 | 111 | } |
d1e7b91c | 112 | |
0ee59413 HK |
113 | /* Override the weak function in kernel/panic.c */ |
114 | void crash_smp_send_stop(void) | |
115 | { | |
116 | static int cpus_stopped; | |
117 | ||
118 | if (cpus_stopped) | |
119 | return; | |
120 | ||
121 | if (smp_ops.crash_stop_other_cpus) | |
122 | smp_ops.crash_stop_other_cpus(); | |
123 | else | |
124 | smp_send_stop(); | |
125 | ||
126 | cpus_stopped = 1; | |
127 | } | |
128 | ||
c4ac4263 | 129 | #else |
0ee59413 | 130 | void crash_smp_send_stop(void) |
c4ac4263 EB |
131 | { |
132 | /* There are no cpus to shootdown */ | |
133 | } | |
134 | #endif | |
135 | ||
ed23dc6f | 136 | void native_machine_crash_shutdown(struct pt_regs *regs) |
5033cba0 EB |
137 | { |
138 | /* This function is only called after the system | |
f18190bd | 139 | * has panicked or is otherwise in a critical state. |
5033cba0 EB |
140 | * The minimum amount of code to allow a kexec'd kernel |
141 | * to run successfully needs to happen here. | |
142 | * | |
143 | * In practice this means shooting down the other cpus in | |
144 | * an SMP system. | |
145 | */ | |
c4ac4263 EB |
146 | /* The kernel is broken so disable interrupts */ |
147 | local_irq_disable(); | |
a3ea8ac8 | 148 | |
0ee59413 | 149 | crash_smp_send_stop(); |
2340b62f | 150 | |
f23d1f4a ZY |
151 | /* |
152 | * VMCLEAR VMCSs loaded on this cpu if needed. | |
153 | */ | |
154 | cpu_crash_vmclear_loaded_vmcss(); | |
155 | ||
2340b62f EH |
156 | /* Booting kdump kernel with VMX or SVM enabled won't work, |
157 | * because (among other limitations) we can't disable paging | |
158 | * with the virt flags. | |
159 | */ | |
160 | cpu_emergency_vmxoff(); | |
161 | cpu_emergency_svm_disable(); | |
162 | ||
da06a43d TI |
163 | /* |
164 | * Disable Intel PT to stop its logging | |
165 | */ | |
166 | cpu_emergency_stop_pt(); | |
167 | ||
17405453 YY |
168 | #ifdef CONFIG_X86_IO_APIC |
169 | /* Prevent crash_kexec() from deadlocking on ioapic_lock. */ | |
170 | ioapic_zap_locks(); | |
339b2ae0 | 171 | clear_IO_APIC(); |
0c1b2724 | 172 | #endif |
522e6646 | 173 | lapic_shutdown(); |
339b2ae0 | 174 | restore_boot_irq_mode(); |
0c1b2724 OH |
175 | #ifdef CONFIG_HPET_TIMER |
176 | hpet_disable(); | |
19842d67 | 177 | #endif |
85916f81 | 178 | crash_save_cpu(regs, safe_smp_processor_id()); |
5033cba0 | 179 | } |
dd5f7260 | 180 | |
74ca317c | 181 | #ifdef CONFIG_KEXEC_FILE |
1d2e733b | 182 | static int get_nr_ram_ranges_callback(struct resource *res, void *arg) |
dd5f7260 | 183 | { |
e3c41e37 | 184 | unsigned int *nr_ranges = arg; |
dd5f7260 VG |
185 | |
186 | (*nr_ranges)++; | |
187 | return 0; | |
188 | } | |
189 | ||
dd5f7260 | 190 | /* Gather all the required information to prepare elf headers for ram regions */ |
8d5f894a | 191 | static struct crash_mem *fill_up_crash_elf_data(void) |
dd5f7260 VG |
192 | { |
193 | unsigned int nr_ranges = 0; | |
8d5f894a | 194 | struct crash_mem *cmem; |
dd5f7260 | 195 | |
e3c41e37 | 196 | walk_system_ram_res(0, -1, &nr_ranges, |
dd5f7260 | 197 | get_nr_ram_ranges_callback); |
8d5f894a AT |
198 | if (!nr_ranges) |
199 | return NULL; | |
dd5f7260 | 200 | |
8d5f894a AT |
201 | /* |
202 | * Exclusion of crash region and/or crashk_low_res may cause | |
203 | * another range split. So add extra two slots here. | |
204 | */ | |
205 | nr_ranges += 2; | |
206 | cmem = vzalloc(sizeof(struct crash_mem) + | |
207 | sizeof(struct crash_mem_range) * nr_ranges); | |
208 | if (!cmem) | |
209 | return NULL; | |
dd5f7260 | 210 | |
8d5f894a AT |
211 | cmem->max_nr_ranges = nr_ranges; |
212 | cmem->nr_ranges = 0; | |
dd5f7260 | 213 | |
8d5f894a | 214 | return cmem; |
dd5f7260 VG |
215 | } |
216 | ||
dd5f7260 VG |
217 | /* |
218 | * Look for any unwanted ranges between mstart, mend and remove them. This | |
8d5f894a | 219 | * might lead to split and split ranges are put in cmem->ranges[] array |
dd5f7260 | 220 | */ |
8d5f894a | 221 | static int elf_header_exclude_ranges(struct crash_mem *cmem) |
dd5f7260 | 222 | { |
dd5f7260 VG |
223 | int ret = 0; |
224 | ||
dd5f7260 | 225 | /* Exclude crashkernel region */ |
babac4a8 | 226 | ret = crash_exclude_mem_range(cmem, crashk_res.start, crashk_res.end); |
dd5f7260 VG |
227 | if (ret) |
228 | return ret; | |
229 | ||
a2d6aa8f | 230 | if (crashk_low_res.end) { |
babac4a8 AT |
231 | ret = crash_exclude_mem_range(cmem, crashk_low_res.start, |
232 | crashk_low_res.end); | |
a2d6aa8f BH |
233 | if (ret) |
234 | return ret; | |
235 | } | |
dd5f7260 | 236 | |
dd5f7260 VG |
237 | return ret; |
238 | } | |
239 | ||
1d2e733b | 240 | static int prepare_elf64_ram_headers_callback(struct resource *res, void *arg) |
dd5f7260 | 241 | { |
8d5f894a | 242 | struct crash_mem *cmem = arg; |
dd5f7260 | 243 | |
cbe66016 AT |
244 | cmem->ranges[cmem->nr_ranges].start = res->start; |
245 | cmem->ranges[cmem->nr_ranges].end = res->end; | |
246 | cmem->nr_ranges++; | |
dd5f7260 | 247 | |
cbe66016 | 248 | return 0; |
dd5f7260 VG |
249 | } |
250 | ||
dd5f7260 VG |
251 | /* Prepare elf headers. Return addr and size */ |
252 | static int prepare_elf_headers(struct kimage *image, void **addr, | |
253 | unsigned long *sz) | |
254 | { | |
8d5f894a | 255 | struct crash_mem *cmem; |
cbe66016 AT |
256 | Elf64_Ehdr *ehdr; |
257 | Elf64_Phdr *phdr; | |
258 | int ret, i; | |
dd5f7260 | 259 | |
8d5f894a AT |
260 | cmem = fill_up_crash_elf_data(); |
261 | if (!cmem) | |
dd5f7260 VG |
262 | return -ENOMEM; |
263 | ||
8d5f894a | 264 | ret = walk_system_ram_res(0, -1, cmem, |
cbe66016 AT |
265 | prepare_elf64_ram_headers_callback); |
266 | if (ret) | |
267 | goto out; | |
268 | ||
269 | /* Exclude unwanted mem ranges */ | |
8d5f894a | 270 | ret = elf_header_exclude_ranges(cmem); |
cbe66016 AT |
271 | if (ret) |
272 | goto out; | |
273 | ||
dd5f7260 | 274 | /* By default prepare 64bit headers */ |
babac4a8 AT |
275 | ret = crash_prepare_elf64_headers(cmem, |
276 | IS_ENABLED(CONFIG_X86_64), addr, sz); | |
cbe66016 AT |
277 | if (ret) |
278 | goto out; | |
279 | ||
280 | /* | |
281 | * If a range matches backup region, adjust offset to backup | |
282 | * segment. | |
283 | */ | |
284 | ehdr = (Elf64_Ehdr *)*addr; | |
285 | phdr = (Elf64_Phdr *)(ehdr + 1); | |
286 | for (i = 0; i < ehdr->e_phnum; phdr++, i++) | |
287 | if (phdr->p_type == PT_LOAD && | |
288 | phdr->p_paddr == image->arch.backup_src_start && | |
289 | phdr->p_memsz == image->arch.backup_src_sz) { | |
290 | phdr->p_offset = image->arch.backup_load_addr; | |
291 | break; | |
292 | } | |
293 | out: | |
8d5f894a | 294 | vfree(cmem); |
dd5f7260 VG |
295 | return ret; |
296 | } | |
297 | ||
8ec67d97 | 298 | static int add_e820_entry(struct boot_params *params, struct e820_entry *entry) |
dd5f7260 VG |
299 | { |
300 | unsigned int nr_e820_entries; | |
301 | ||
302 | nr_e820_entries = params->e820_entries; | |
08b46d5d | 303 | if (nr_e820_entries >= E820_MAX_ENTRIES_ZEROPAGE) |
dd5f7260 VG |
304 | return 1; |
305 | ||
61a50101 | 306 | memcpy(¶ms->e820_table[nr_e820_entries], entry, |
8ec67d97 | 307 | sizeof(struct e820_entry)); |
dd5f7260 VG |
308 | params->e820_entries++; |
309 | return 0; | |
310 | } | |
311 | ||
1d2e733b | 312 | static int memmap_entry_callback(struct resource *res, void *arg) |
dd5f7260 VG |
313 | { |
314 | struct crash_memmap_data *cmd = arg; | |
315 | struct boot_params *params = cmd->params; | |
8ec67d97 | 316 | struct e820_entry ei; |
dd5f7260 | 317 | |
1d2e733b | 318 | ei.addr = res->start; |
9275b933 | 319 | ei.size = resource_size(res); |
dd5f7260 VG |
320 | ei.type = cmd->type; |
321 | add_e820_entry(params, &ei); | |
322 | ||
323 | return 0; | |
324 | } | |
325 | ||
326 | static int memmap_exclude_ranges(struct kimage *image, struct crash_mem *cmem, | |
327 | unsigned long long mstart, | |
328 | unsigned long long mend) | |
329 | { | |
330 | unsigned long start, end; | |
331 | int ret = 0; | |
332 | ||
333 | cmem->ranges[0].start = mstart; | |
334 | cmem->ranges[0].end = mend; | |
335 | cmem->nr_ranges = 1; | |
336 | ||
337 | /* Exclude Backup region */ | |
338 | start = image->arch.backup_load_addr; | |
339 | end = start + image->arch.backup_src_sz - 1; | |
babac4a8 | 340 | ret = crash_exclude_mem_range(cmem, start, end); |
dd5f7260 VG |
341 | if (ret) |
342 | return ret; | |
343 | ||
344 | /* Exclude elf header region */ | |
345 | start = image->arch.elf_load_addr; | |
346 | end = start + image->arch.elf_headers_sz - 1; | |
babac4a8 | 347 | return crash_exclude_mem_range(cmem, start, end); |
dd5f7260 VG |
348 | } |
349 | ||
350 | /* Prepare memory map for crash dump kernel */ | |
351 | int crash_setup_memmap_entries(struct kimage *image, struct boot_params *params) | |
352 | { | |
353 | int i, ret = 0; | |
354 | unsigned long flags; | |
8ec67d97 | 355 | struct e820_entry ei; |
dd5f7260 VG |
356 | struct crash_memmap_data cmd; |
357 | struct crash_mem *cmem; | |
358 | ||
359 | cmem = vzalloc(sizeof(struct crash_mem)); | |
360 | if (!cmem) | |
361 | return -ENOMEM; | |
362 | ||
363 | memset(&cmd, 0, sizeof(struct crash_memmap_data)); | |
364 | cmd.params = params; | |
365 | ||
366 | /* Add first 640K segment */ | |
367 | ei.addr = image->arch.backup_src_start; | |
368 | ei.size = image->arch.backup_src_sz; | |
09821ff1 | 369 | ei.type = E820_TYPE_RAM; |
dd5f7260 VG |
370 | add_e820_entry(params, &ei); |
371 | ||
372 | /* Add ACPI tables */ | |
09821ff1 | 373 | cmd.type = E820_TYPE_ACPI; |
dd5f7260 | 374 | flags = IORESOURCE_MEM | IORESOURCE_BUSY; |
f0f4711a | 375 | walk_iomem_res_desc(IORES_DESC_ACPI_TABLES, flags, 0, -1, &cmd, |
dd5f7260 VG |
376 | memmap_entry_callback); |
377 | ||
378 | /* Add ACPI Non-volatile Storage */ | |
09821ff1 | 379 | cmd.type = E820_TYPE_NVS; |
f0f4711a | 380 | walk_iomem_res_desc(IORES_DESC_ACPI_NV_STORAGE, flags, 0, -1, &cmd, |
dd5f7260 VG |
381 | memmap_entry_callback); |
382 | ||
383 | /* Add crashk_low_res region */ | |
384 | if (crashk_low_res.end) { | |
385 | ei.addr = crashk_low_res.start; | |
386 | ei.size = crashk_low_res.end - crashk_low_res.start + 1; | |
09821ff1 | 387 | ei.type = E820_TYPE_RAM; |
dd5f7260 VG |
388 | add_e820_entry(params, &ei); |
389 | } | |
390 | ||
391 | /* Exclude some ranges from crashk_res and add rest to memmap */ | |
392 | ret = memmap_exclude_ranges(image, cmem, crashk_res.start, | |
393 | crashk_res.end); | |
394 | if (ret) | |
395 | goto out; | |
396 | ||
397 | for (i = 0; i < cmem->nr_ranges; i++) { | |
398 | ei.size = cmem->ranges[i].end - cmem->ranges[i].start + 1; | |
399 | ||
400 | /* If entry is less than a page, skip it */ | |
401 | if (ei.size < PAGE_SIZE) | |
402 | continue; | |
403 | ei.addr = cmem->ranges[i].start; | |
09821ff1 | 404 | ei.type = E820_TYPE_RAM; |
dd5f7260 VG |
405 | add_e820_entry(params, &ei); |
406 | } | |
407 | ||
408 | out: | |
409 | vfree(cmem); | |
410 | return ret; | |
411 | } | |
412 | ||
1d2e733b | 413 | static int determine_backup_region(struct resource *res, void *arg) |
dd5f7260 VG |
414 | { |
415 | struct kimage *image = arg; | |
416 | ||
1d2e733b | 417 | image->arch.backup_src_start = res->start; |
9275b933 | 418 | image->arch.backup_src_sz = resource_size(res); |
dd5f7260 VG |
419 | |
420 | /* Expecting only one range for backup region */ | |
421 | return 1; | |
422 | } | |
423 | ||
424 | int crash_load_segments(struct kimage *image) | |
425 | { | |
dd5f7260 | 426 | int ret; |
ec2b9bfa TJB |
427 | struct kexec_buf kbuf = { .image = image, .buf_min = 0, |
428 | .buf_max = ULONG_MAX, .top_down = false }; | |
dd5f7260 VG |
429 | |
430 | /* | |
431 | * Determine and load a segment for backup area. First 640K RAM | |
432 | * region is backup source | |
433 | */ | |
434 | ||
435 | ret = walk_system_ram_res(KEXEC_BACKUP_SRC_START, KEXEC_BACKUP_SRC_END, | |
436 | image, determine_backup_region); | |
437 | ||
438 | /* Zero or postive return values are ok */ | |
439 | if (ret < 0) | |
440 | return ret; | |
441 | ||
dd5f7260 | 442 | /* Add backup segment. */ |
ec2b9bfa TJB |
443 | if (image->arch.backup_src_sz) { |
444 | kbuf.buffer = &crash_zero_bytes; | |
445 | kbuf.bufsz = sizeof(crash_zero_bytes); | |
446 | kbuf.memsz = image->arch.backup_src_sz; | |
447 | kbuf.buf_align = PAGE_SIZE; | |
dd5f7260 VG |
448 | /* |
449 | * Ideally there is no source for backup segment. This is | |
450 | * copied in purgatory after crash. Just add a zero filled | |
451 | * segment for now to make sure checksum logic works fine. | |
452 | */ | |
ec2b9bfa | 453 | ret = kexec_add_buffer(&kbuf); |
dd5f7260 VG |
454 | if (ret) |
455 | return ret; | |
ec2b9bfa | 456 | image->arch.backup_load_addr = kbuf.mem; |
dd5f7260 | 457 | pr_debug("Loaded backup region at 0x%lx backup_start=0x%lx memsz=0x%lx\n", |
ec2b9bfa TJB |
458 | image->arch.backup_load_addr, |
459 | image->arch.backup_src_start, kbuf.memsz); | |
dd5f7260 VG |
460 | } |
461 | ||
462 | /* Prepare elf headers and add a segment */ | |
ec2b9bfa | 463 | ret = prepare_elf_headers(image, &kbuf.buffer, &kbuf.bufsz); |
dd5f7260 VG |
464 | if (ret) |
465 | return ret; | |
466 | ||
ec2b9bfa TJB |
467 | image->arch.elf_headers = kbuf.buffer; |
468 | image->arch.elf_headers_sz = kbuf.bufsz; | |
dd5f7260 | 469 | |
ec2b9bfa TJB |
470 | kbuf.memsz = kbuf.bufsz; |
471 | kbuf.buf_align = ELF_CORE_HEADER_ALIGN; | |
472 | ret = kexec_add_buffer(&kbuf); | |
dd5f7260 VG |
473 | if (ret) { |
474 | vfree((void *)image->arch.elf_headers); | |
475 | return ret; | |
476 | } | |
ec2b9bfa | 477 | image->arch.elf_load_addr = kbuf.mem; |
dd5f7260 | 478 | pr_debug("Loaded ELF headers at 0x%lx bufsz=0x%lx memsz=0x%lx\n", |
ec2b9bfa | 479 | image->arch.elf_load_addr, kbuf.bufsz, kbuf.bufsz); |
dd5f7260 VG |
480 | |
481 | return ret; | |
482 | } | |
74ca317c | 483 | #endif /* CONFIG_KEXEC_FILE */ |