x86: kexec_file: remove X86_64 dependency from prepare_elf64_headers()
[linux-2.6-block.git] / arch / x86 / kernel / crash.c
CommitLineData
5033cba0 1/*
62a31a03 2 * Architecture specific (i386/x86_64) functions for kexec based crash dumps.
5033cba0
EB
3 *
4 * Created by: Hariprasad Nellitheertha (hari@in.ibm.com)
5 *
6 * Copyright (C) IBM Corporation, 2004. All rights reserved.
dd5f7260
VG
7 * Copyright (C) Red Hat Inc., 2014. All rights reserved.
8 * Authors:
9 * Vivek Goyal <vgoyal@redhat.com>
5033cba0
EB
10 *
11 */
12
dd5f7260
VG
13#define pr_fmt(fmt) "kexec: " fmt
14
5033cba0
EB
15#include <linux/types.h>
16#include <linux/kernel.h>
17#include <linux/smp.h>
5033cba0
EB
18#include <linux/reboot.h>
19#include <linux/kexec.h>
5033cba0
EB
20#include <linux/delay.h>
21#include <linux/elf.h>
22#include <linux/elfcore.h>
186f4360 23#include <linux/export.h>
dd5f7260 24#include <linux/slab.h>
d6472302 25#include <linux/vmalloc.h>
5033cba0
EB
26
27#include <asm/processor.h>
28#include <asm/hardirq.h>
29#include <asm/nmi.h>
30#include <asm/hw_irq.h>
19842d67 31#include <asm/apic.h>
5520b7e7 32#include <asm/e820/types.h>
8643e28d 33#include <asm/io_apic.h>
0c1b2724 34#include <asm/hpet.h>
1eeb66a1 35#include <linux/kdebug.h>
96b89dc6 36#include <asm/cpu.h>
ed23dc6f 37#include <asm/reboot.h>
2340b62f 38#include <asm/virtext.h>
da06a43d 39#include <asm/intel_pt.h>
8e294786 40
dd5f7260
VG
41/* Alignment required for elf header segment */
42#define ELF_CORE_HEADER_ALIGN 4096
43
44/* This primarily represents number of split ranges due to exclusion */
45#define CRASH_MAX_RANGES 16
46
47struct crash_mem_range {
48 u64 start, end;
49};
50
51struct crash_mem {
52 unsigned int nr_ranges;
53 struct crash_mem_range ranges[CRASH_MAX_RANGES];
54};
55
56/* Misc data about ram ranges needed to prepare elf headers */
57struct crash_elf_data {
58 struct kimage *image;
59 /*
60 * Total number of ram ranges we have after various adjustments for
f296f263 61 * crash reserved region, etc.
dd5f7260
VG
62 */
63 unsigned int max_nr_ranges;
dd5f7260
VG
64
65 /* Pointer to elf header */
66 void *ehdr;
67 /* Pointer to next phdr */
68 void *bufp;
69 struct crash_mem mem;
70};
71
72/* Used while preparing memory map entries for second kernel */
73struct crash_memmap_data {
74 struct boot_params *params;
75 /* Type of memory */
76 unsigned int type;
77};
78
f23d1f4a
ZY
79/*
80 * This is used to VMCLEAR all VMCSs loaded on the
81 * processor. And when loading kvm_intel module, the
82 * callback function pointer will be assigned.
83 *
84 * protected by rcu.
85 */
0ca0d818 86crash_vmclear_fn __rcu *crash_vmclear_loaded_vmcss = NULL;
f23d1f4a 87EXPORT_SYMBOL_GPL(crash_vmclear_loaded_vmcss);
dd5f7260 88unsigned long crash_zero_bytes;
f23d1f4a
ZY
89
90static inline void cpu_crash_vmclear_loaded_vmcss(void)
91{
0ca0d818 92 crash_vmclear_fn *do_vmclear_operation = NULL;
f23d1f4a
ZY
93
94 rcu_read_lock();
95 do_vmclear_operation = rcu_dereference(crash_vmclear_loaded_vmcss);
96 if (do_vmclear_operation)
97 do_vmclear_operation();
98 rcu_read_unlock();
99}
100
b2bbe71b
EH
101#if defined(CONFIG_SMP) && defined(CONFIG_X86_LOCAL_APIC)
102
9c48f1c6 103static void kdump_nmi_callback(int cpu, struct pt_regs *regs)
c4ac4263 104{
1fb473d8 105#ifdef CONFIG_X86_32
4d55476c 106 struct pt_regs fixed_regs;
a7d41820 107
f39b6f0e 108 if (!user_mode(regs)) {
a7d41820
EH
109 crash_fixup_ss_esp(&fixed_regs, regs);
110 regs = &fixed_regs;
111 }
112#endif
113 crash_save_cpu(regs, cpu);
114
f23d1f4a
ZY
115 /*
116 * VMCLEAR VMCSs loaded on all cpus if needed.
117 */
118 cpu_crash_vmclear_loaded_vmcss();
119
2340b62f
EH
120 /* Disable VMX or SVM if needed.
121 *
122 * We need to disable virtualization on all CPUs.
123 * Having VMX or SVM enabled on any CPU may break rebooting
124 * after the kdump kernel has finished its task.
125 */
126 cpu_emergency_vmxoff();
127 cpu_emergency_svm_disable();
128
da06a43d
TI
129 /*
130 * Disable Intel PT to stop its logging
131 */
132 cpu_emergency_stop_pt();
133
a7d41820
EH
134 disable_local_APIC();
135}
136
0ee59413 137void kdump_nmi_shootdown_cpus(void)
d1e7b91c 138{
8e294786 139 nmi_shootdown_cpus(kdump_nmi_callback);
d1e7b91c 140
19842d67 141 disable_local_APIC();
c4ac4263 142}
d1e7b91c 143
0ee59413
HK
144/* Override the weak function in kernel/panic.c */
145void crash_smp_send_stop(void)
146{
147 static int cpus_stopped;
148
149 if (cpus_stopped)
150 return;
151
152 if (smp_ops.crash_stop_other_cpus)
153 smp_ops.crash_stop_other_cpus();
154 else
155 smp_send_stop();
156
157 cpus_stopped = 1;
158}
159
c4ac4263 160#else
0ee59413 161void crash_smp_send_stop(void)
c4ac4263
EB
162{
163 /* There are no cpus to shootdown */
164}
165#endif
166
ed23dc6f 167void native_machine_crash_shutdown(struct pt_regs *regs)
5033cba0
EB
168{
169 /* This function is only called after the system
f18190bd 170 * has panicked or is otherwise in a critical state.
5033cba0
EB
171 * The minimum amount of code to allow a kexec'd kernel
172 * to run successfully needs to happen here.
173 *
174 * In practice this means shooting down the other cpus in
175 * an SMP system.
176 */
c4ac4263
EB
177 /* The kernel is broken so disable interrupts */
178 local_irq_disable();
a3ea8ac8 179
0ee59413 180 crash_smp_send_stop();
2340b62f 181
f23d1f4a
ZY
182 /*
183 * VMCLEAR VMCSs loaded on this cpu if needed.
184 */
185 cpu_crash_vmclear_loaded_vmcss();
186
2340b62f
EH
187 /* Booting kdump kernel with VMX or SVM enabled won't work,
188 * because (among other limitations) we can't disable paging
189 * with the virt flags.
190 */
191 cpu_emergency_vmxoff();
192 cpu_emergency_svm_disable();
193
da06a43d
TI
194 /*
195 * Disable Intel PT to stop its logging
196 */
197 cpu_emergency_stop_pt();
198
17405453
YY
199#ifdef CONFIG_X86_IO_APIC
200 /* Prevent crash_kexec() from deadlocking on ioapic_lock. */
201 ioapic_zap_locks();
339b2ae0 202 clear_IO_APIC();
0c1b2724 203#endif
522e6646 204 lapic_shutdown();
339b2ae0 205 restore_boot_irq_mode();
0c1b2724
OH
206#ifdef CONFIG_HPET_TIMER
207 hpet_disable();
19842d67 208#endif
85916f81 209 crash_save_cpu(regs, safe_smp_processor_id());
5033cba0 210}
dd5f7260 211
74ca317c 212#ifdef CONFIG_KEXEC_FILE
1d2e733b 213static int get_nr_ram_ranges_callback(struct resource *res, void *arg)
dd5f7260 214{
e3c41e37 215 unsigned int *nr_ranges = arg;
dd5f7260
VG
216
217 (*nr_ranges)++;
218 return 0;
219}
220
dd5f7260
VG
221
222/* Gather all the required information to prepare elf headers for ram regions */
223static void fill_up_crash_elf_data(struct crash_elf_data *ced,
224 struct kimage *image)
225{
226 unsigned int nr_ranges = 0;
227
228 ced->image = image;
229
e3c41e37 230 walk_system_ram_res(0, -1, &nr_ranges,
dd5f7260
VG
231 get_nr_ram_ranges_callback);
232
233 ced->max_nr_ranges = nr_ranges;
234
dd5f7260
VG
235 /* Exclusion of crash region could split memory ranges */
236 ced->max_nr_ranges++;
237
238 /* If crashk_low_res is not 0, another range split possible */
a2d6aa8f 239 if (crashk_low_res.end)
dd5f7260
VG
240 ced->max_nr_ranges++;
241}
242
243static int exclude_mem_range(struct crash_mem *mem,
244 unsigned long long mstart, unsigned long long mend)
245{
246 int i, j;
247 unsigned long long start, end;
248 struct crash_mem_range temp_range = {0, 0};
249
250 for (i = 0; i < mem->nr_ranges; i++) {
251 start = mem->ranges[i].start;
252 end = mem->ranges[i].end;
253
254 if (mstart > end || mend < start)
255 continue;
256
257 /* Truncate any area outside of range */
258 if (mstart < start)
259 mstart = start;
260 if (mend > end)
261 mend = end;
262
263 /* Found completely overlapping range */
264 if (mstart == start && mend == end) {
265 mem->ranges[i].start = 0;
266 mem->ranges[i].end = 0;
267 if (i < mem->nr_ranges - 1) {
268 /* Shift rest of the ranges to left */
269 for (j = i; j < mem->nr_ranges - 1; j++) {
270 mem->ranges[j].start =
271 mem->ranges[j+1].start;
272 mem->ranges[j].end =
273 mem->ranges[j+1].end;
274 }
275 }
276 mem->nr_ranges--;
277 return 0;
278 }
279
280 if (mstart > start && mend < end) {
281 /* Split original range */
282 mem->ranges[i].end = mstart - 1;
283 temp_range.start = mend + 1;
284 temp_range.end = end;
285 } else if (mstart != start)
286 mem->ranges[i].end = mstart - 1;
287 else
288 mem->ranges[i].start = mend + 1;
289 break;
290 }
291
292 /* If a split happend, add the split to array */
293 if (!temp_range.end)
294 return 0;
295
296 /* Split happened */
297 if (i == CRASH_MAX_RANGES - 1) {
298 pr_err("Too many crash ranges after split\n");
299 return -ENOMEM;
300 }
301
302 /* Location where new range should go */
303 j = i + 1;
304 if (j < mem->nr_ranges) {
305 /* Move over all ranges one slot towards the end */
306 for (i = mem->nr_ranges - 1; i >= j; i--)
307 mem->ranges[i + 1] = mem->ranges[i];
308 }
309
310 mem->ranges[j].start = temp_range.start;
311 mem->ranges[j].end = temp_range.end;
312 mem->nr_ranges++;
313 return 0;
314}
315
316/*
317 * Look for any unwanted ranges between mstart, mend and remove them. This
318 * might lead to split and split ranges are put in ced->mem.ranges[] array
319 */
cbe66016 320static int elf_header_exclude_ranges(struct crash_elf_data *ced)
dd5f7260
VG
321{
322 struct crash_mem *cmem = &ced->mem;
323 int ret = 0;
324
dd5f7260
VG
325 /* Exclude crashkernel region */
326 ret = exclude_mem_range(cmem, crashk_res.start, crashk_res.end);
327 if (ret)
328 return ret;
329
a2d6aa8f
BH
330 if (crashk_low_res.end) {
331 ret = exclude_mem_range(cmem, crashk_low_res.start, crashk_low_res.end);
332 if (ret)
333 return ret;
334 }
dd5f7260 335
dd5f7260
VG
336 return ret;
337}
338
1d2e733b 339static int prepare_elf64_ram_headers_callback(struct resource *res, void *arg)
dd5f7260
VG
340{
341 struct crash_elf_data *ced = arg;
cbe66016 342 struct crash_mem *cmem = &ced->mem;
dd5f7260 343
cbe66016
AT
344 cmem->ranges[cmem->nr_ranges].start = res->start;
345 cmem->ranges[cmem->nr_ranges].end = res->end;
346 cmem->nr_ranges++;
dd5f7260 347
cbe66016 348 return 0;
dd5f7260
VG
349}
350
c72c7e67 351static int prepare_elf64_headers(struct crash_elf_data *ced, bool kernel_map,
dd5f7260
VG
352 void **addr, unsigned long *sz)
353{
354 Elf64_Ehdr *ehdr;
355 Elf64_Phdr *phdr;
356 unsigned long nr_cpus = num_possible_cpus(), nr_phdr, elf_sz;
357 unsigned char *buf, *bufp;
cbe66016 358 unsigned int cpu, i;
dd5f7260 359 unsigned long long notes_addr;
cbe66016
AT
360 struct crash_mem *cmem = &ced->mem;
361 unsigned long mstart, mend;
dd5f7260
VG
362
363 /* extra phdr for vmcoreinfo elf note */
364 nr_phdr = nr_cpus + 1;
365 nr_phdr += ced->max_nr_ranges;
366
367 /*
368 * kexec-tools creates an extra PT_LOAD phdr for kernel text mapping
369 * area on x86_64 (ffffffff80000000 - ffffffffa0000000).
370 * I think this is required by tools like gdb. So same physical
371 * memory will be mapped in two elf headers. One will contain kernel
372 * text virtual addresses and other will have __va(physical) addresses.
373 */
374
375 nr_phdr++;
376 elf_sz = sizeof(Elf64_Ehdr) + nr_phdr * sizeof(Elf64_Phdr);
377 elf_sz = ALIGN(elf_sz, ELF_CORE_HEADER_ALIGN);
378
379 buf = vzalloc(elf_sz);
380 if (!buf)
381 return -ENOMEM;
382
383 bufp = buf;
384 ehdr = (Elf64_Ehdr *)bufp;
385 bufp += sizeof(Elf64_Ehdr);
386 memcpy(ehdr->e_ident, ELFMAG, SELFMAG);
387 ehdr->e_ident[EI_CLASS] = ELFCLASS64;
388 ehdr->e_ident[EI_DATA] = ELFDATA2LSB;
389 ehdr->e_ident[EI_VERSION] = EV_CURRENT;
390 ehdr->e_ident[EI_OSABI] = ELF_OSABI;
391 memset(ehdr->e_ident + EI_PAD, 0, EI_NIDENT - EI_PAD);
392 ehdr->e_type = ET_CORE;
393 ehdr->e_machine = ELF_ARCH;
394 ehdr->e_version = EV_CURRENT;
395 ehdr->e_phoff = sizeof(Elf64_Ehdr);
396 ehdr->e_ehsize = sizeof(Elf64_Ehdr);
397 ehdr->e_phentsize = sizeof(Elf64_Phdr);
398
399 /* Prepare one phdr of type PT_NOTE for each present cpu */
400 for_each_present_cpu(cpu) {
401 phdr = (Elf64_Phdr *)bufp;
402 bufp += sizeof(Elf64_Phdr);
403 phdr->p_type = PT_NOTE;
404 notes_addr = per_cpu_ptr_to_phys(per_cpu_ptr(crash_notes, cpu));
405 phdr->p_offset = phdr->p_paddr = notes_addr;
406 phdr->p_filesz = phdr->p_memsz = sizeof(note_buf_t);
407 (ehdr->e_phnum)++;
408 }
409
410 /* Prepare one PT_NOTE header for vmcoreinfo */
411 phdr = (Elf64_Phdr *)bufp;
412 bufp += sizeof(Elf64_Phdr);
413 phdr->p_type = PT_NOTE;
414 phdr->p_offset = phdr->p_paddr = paddr_vmcoreinfo_note();
203e9e41 415 phdr->p_filesz = phdr->p_memsz = VMCOREINFO_NOTE_SIZE;
dd5f7260
VG
416 (ehdr->e_phnum)++;
417
dd5f7260 418 /* Prepare PT_LOAD type program header for kernel text region */
c72c7e67
AT
419 if (kernel_map) {
420 phdr = (Elf64_Phdr *)bufp;
421 bufp += sizeof(Elf64_Phdr);
422 phdr->p_type = PT_LOAD;
423 phdr->p_flags = PF_R|PF_W|PF_X;
424 phdr->p_vaddr = (Elf64_Addr)_text;
425 phdr->p_filesz = phdr->p_memsz = _end - _text;
426 phdr->p_offset = phdr->p_paddr = __pa_symbol(_text);
427 (ehdr->e_phnum)++;
428 }
dd5f7260 429
cbe66016
AT
430 /* Go through all the ranges in cmem->ranges[] and prepare phdr */
431 for (i = 0; i < cmem->nr_ranges; i++) {
432 mstart = cmem->ranges[i].start;
433 mend = cmem->ranges[i].end;
434
435 phdr->p_type = PT_LOAD;
436 phdr->p_flags = PF_R|PF_W|PF_X;
437 phdr->p_offset = mstart;
438
439 phdr->p_paddr = mstart;
440 phdr->p_vaddr = (unsigned long long) __va(mstart);
441 phdr->p_filesz = phdr->p_memsz = mend - mstart + 1;
442 phdr->p_align = 0;
443 ehdr->e_phnum++;
444 phdr++;
445 pr_debug("Crash PT_LOAD elf header. phdr=%p vaddr=0x%llx, paddr=0x%llx, sz=0x%llx e_phnum=%d p_offset=0x%llx\n",
446 phdr, phdr->p_vaddr, phdr->p_paddr, phdr->p_filesz,
447 ehdr->e_phnum, phdr->p_offset);
448 }
dd5f7260
VG
449
450 *addr = buf;
451 *sz = elf_sz;
452 return 0;
453}
454
455/* Prepare elf headers. Return addr and size */
456static int prepare_elf_headers(struct kimage *image, void **addr,
457 unsigned long *sz)
458{
459 struct crash_elf_data *ced;
cbe66016
AT
460 Elf64_Ehdr *ehdr;
461 Elf64_Phdr *phdr;
462 int ret, i;
dd5f7260
VG
463
464 ced = kzalloc(sizeof(*ced), GFP_KERNEL);
465 if (!ced)
466 return -ENOMEM;
467
468 fill_up_crash_elf_data(ced, image);
469
cbe66016
AT
470 ret = walk_system_ram_res(0, -1, ced,
471 prepare_elf64_ram_headers_callback);
472 if (ret)
473 goto out;
474
475 /* Exclude unwanted mem ranges */
476 ret = elf_header_exclude_ranges(ced);
477 if (ret)
478 goto out;
479
dd5f7260 480 /* By default prepare 64bit headers */
c72c7e67 481 ret = prepare_elf64_headers(ced, IS_ENABLED(CONFIG_X86_64), addr, sz);
cbe66016
AT
482 if (ret)
483 goto out;
484
485 /*
486 * If a range matches backup region, adjust offset to backup
487 * segment.
488 */
489 ehdr = (Elf64_Ehdr *)*addr;
490 phdr = (Elf64_Phdr *)(ehdr + 1);
491 for (i = 0; i < ehdr->e_phnum; phdr++, i++)
492 if (phdr->p_type == PT_LOAD &&
493 phdr->p_paddr == image->arch.backup_src_start &&
494 phdr->p_memsz == image->arch.backup_src_sz) {
495 phdr->p_offset = image->arch.backup_load_addr;
496 break;
497 }
498out:
dd5f7260
VG
499 kfree(ced);
500 return ret;
501}
502
8ec67d97 503static int add_e820_entry(struct boot_params *params, struct e820_entry *entry)
dd5f7260
VG
504{
505 unsigned int nr_e820_entries;
506
507 nr_e820_entries = params->e820_entries;
08b46d5d 508 if (nr_e820_entries >= E820_MAX_ENTRIES_ZEROPAGE)
dd5f7260
VG
509 return 1;
510
61a50101 511 memcpy(&params->e820_table[nr_e820_entries], entry,
8ec67d97 512 sizeof(struct e820_entry));
dd5f7260
VG
513 params->e820_entries++;
514 return 0;
515}
516
1d2e733b 517static int memmap_entry_callback(struct resource *res, void *arg)
dd5f7260
VG
518{
519 struct crash_memmap_data *cmd = arg;
520 struct boot_params *params = cmd->params;
8ec67d97 521 struct e820_entry ei;
dd5f7260 522
1d2e733b 523 ei.addr = res->start;
9275b933 524 ei.size = resource_size(res);
dd5f7260
VG
525 ei.type = cmd->type;
526 add_e820_entry(params, &ei);
527
528 return 0;
529}
530
531static int memmap_exclude_ranges(struct kimage *image, struct crash_mem *cmem,
532 unsigned long long mstart,
533 unsigned long long mend)
534{
535 unsigned long start, end;
536 int ret = 0;
537
538 cmem->ranges[0].start = mstart;
539 cmem->ranges[0].end = mend;
540 cmem->nr_ranges = 1;
541
542 /* Exclude Backup region */
543 start = image->arch.backup_load_addr;
544 end = start + image->arch.backup_src_sz - 1;
545 ret = exclude_mem_range(cmem, start, end);
546 if (ret)
547 return ret;
548
549 /* Exclude elf header region */
550 start = image->arch.elf_load_addr;
551 end = start + image->arch.elf_headers_sz - 1;
552 return exclude_mem_range(cmem, start, end);
553}
554
555/* Prepare memory map for crash dump kernel */
556int crash_setup_memmap_entries(struct kimage *image, struct boot_params *params)
557{
558 int i, ret = 0;
559 unsigned long flags;
8ec67d97 560 struct e820_entry ei;
dd5f7260
VG
561 struct crash_memmap_data cmd;
562 struct crash_mem *cmem;
563
564 cmem = vzalloc(sizeof(struct crash_mem));
565 if (!cmem)
566 return -ENOMEM;
567
568 memset(&cmd, 0, sizeof(struct crash_memmap_data));
569 cmd.params = params;
570
571 /* Add first 640K segment */
572 ei.addr = image->arch.backup_src_start;
573 ei.size = image->arch.backup_src_sz;
09821ff1 574 ei.type = E820_TYPE_RAM;
dd5f7260
VG
575 add_e820_entry(params, &ei);
576
577 /* Add ACPI tables */
09821ff1 578 cmd.type = E820_TYPE_ACPI;
dd5f7260 579 flags = IORESOURCE_MEM | IORESOURCE_BUSY;
f0f4711a 580 walk_iomem_res_desc(IORES_DESC_ACPI_TABLES, flags, 0, -1, &cmd,
dd5f7260
VG
581 memmap_entry_callback);
582
583 /* Add ACPI Non-volatile Storage */
09821ff1 584 cmd.type = E820_TYPE_NVS;
f0f4711a 585 walk_iomem_res_desc(IORES_DESC_ACPI_NV_STORAGE, flags, 0, -1, &cmd,
dd5f7260
VG
586 memmap_entry_callback);
587
588 /* Add crashk_low_res region */
589 if (crashk_low_res.end) {
590 ei.addr = crashk_low_res.start;
591 ei.size = crashk_low_res.end - crashk_low_res.start + 1;
09821ff1 592 ei.type = E820_TYPE_RAM;
dd5f7260
VG
593 add_e820_entry(params, &ei);
594 }
595
596 /* Exclude some ranges from crashk_res and add rest to memmap */
597 ret = memmap_exclude_ranges(image, cmem, crashk_res.start,
598 crashk_res.end);
599 if (ret)
600 goto out;
601
602 for (i = 0; i < cmem->nr_ranges; i++) {
603 ei.size = cmem->ranges[i].end - cmem->ranges[i].start + 1;
604
605 /* If entry is less than a page, skip it */
606 if (ei.size < PAGE_SIZE)
607 continue;
608 ei.addr = cmem->ranges[i].start;
09821ff1 609 ei.type = E820_TYPE_RAM;
dd5f7260
VG
610 add_e820_entry(params, &ei);
611 }
612
613out:
614 vfree(cmem);
615 return ret;
616}
617
1d2e733b 618static int determine_backup_region(struct resource *res, void *arg)
dd5f7260
VG
619{
620 struct kimage *image = arg;
621
1d2e733b 622 image->arch.backup_src_start = res->start;
9275b933 623 image->arch.backup_src_sz = resource_size(res);
dd5f7260
VG
624
625 /* Expecting only one range for backup region */
626 return 1;
627}
628
629int crash_load_segments(struct kimage *image)
630{
dd5f7260 631 int ret;
ec2b9bfa
TJB
632 struct kexec_buf kbuf = { .image = image, .buf_min = 0,
633 .buf_max = ULONG_MAX, .top_down = false };
dd5f7260
VG
634
635 /*
636 * Determine and load a segment for backup area. First 640K RAM
637 * region is backup source
638 */
639
640 ret = walk_system_ram_res(KEXEC_BACKUP_SRC_START, KEXEC_BACKUP_SRC_END,
641 image, determine_backup_region);
642
643 /* Zero or postive return values are ok */
644 if (ret < 0)
645 return ret;
646
dd5f7260 647 /* Add backup segment. */
ec2b9bfa
TJB
648 if (image->arch.backup_src_sz) {
649 kbuf.buffer = &crash_zero_bytes;
650 kbuf.bufsz = sizeof(crash_zero_bytes);
651 kbuf.memsz = image->arch.backup_src_sz;
652 kbuf.buf_align = PAGE_SIZE;
dd5f7260
VG
653 /*
654 * Ideally there is no source for backup segment. This is
655 * copied in purgatory after crash. Just add a zero filled
656 * segment for now to make sure checksum logic works fine.
657 */
ec2b9bfa 658 ret = kexec_add_buffer(&kbuf);
dd5f7260
VG
659 if (ret)
660 return ret;
ec2b9bfa 661 image->arch.backup_load_addr = kbuf.mem;
dd5f7260 662 pr_debug("Loaded backup region at 0x%lx backup_start=0x%lx memsz=0x%lx\n",
ec2b9bfa
TJB
663 image->arch.backup_load_addr,
664 image->arch.backup_src_start, kbuf.memsz);
dd5f7260
VG
665 }
666
667 /* Prepare elf headers and add a segment */
ec2b9bfa 668 ret = prepare_elf_headers(image, &kbuf.buffer, &kbuf.bufsz);
dd5f7260
VG
669 if (ret)
670 return ret;
671
ec2b9bfa
TJB
672 image->arch.elf_headers = kbuf.buffer;
673 image->arch.elf_headers_sz = kbuf.bufsz;
dd5f7260 674
ec2b9bfa
TJB
675 kbuf.memsz = kbuf.bufsz;
676 kbuf.buf_align = ELF_CORE_HEADER_ALIGN;
677 ret = kexec_add_buffer(&kbuf);
dd5f7260
VG
678 if (ret) {
679 vfree((void *)image->arch.elf_headers);
680 return ret;
681 }
ec2b9bfa 682 image->arch.elf_load_addr = kbuf.mem;
dd5f7260 683 pr_debug("Loaded ELF headers at 0x%lx bufsz=0x%lx memsz=0x%lx\n",
ec2b9bfa 684 image->arch.elf_load_addr, kbuf.bufsz, kbuf.bufsz);
dd5f7260
VG
685
686 return ret;
687}
74ca317c 688#endif /* CONFIG_KEXEC_FILE */