1 // SPDX-License-Identifier: GPL-2.0-only
3 * ppc64 code to implement the kexec_file_load syscall
5 * Copyright (C) 2004 Adam Litke (agl@us.ibm.com)
6 * Copyright (C) 2004 IBM Corp.
7 * Copyright (C) 2004,2005 Milton D Miller II, IBM Corporation
8 * Copyright (C) 2005 R Sharada (sharada@in.ibm.com)
9 * Copyright (C) 2006 Mohan Kumar M (mohan@in.ibm.com)
10 * Copyright (C) 2020 IBM Corporation
12 * Based on kexec-tools' kexec-ppc64.c, kexec-elf-rel-ppc64.c, fs2dt.c.
13 * Heavily modified for the kernel by
14 * Hari Bathini, IBM Corporation.
17 #include <linux/kexec.h>
18 #include <linux/of_fdt.h>
19 #include <linux/libfdt.h>
20 #include <linux/of_device.h>
21 #include <linux/memblock.h>
22 #include <linux/slab.h>
23 #include <linux/vmalloc.h>
24 #include <asm/setup.h>
25 #include <asm/drmem.h>
26 #include <asm/firmware.h>
27 #include <asm/kexec_ranges.h>
28 #include <asm/crashdump-ppc64.h>
32 u64 *buf; /* data buffer for usable-memory property */
33 u32 size; /* size allocated for the data buffer */
34 u32 max_entries; /* maximum no. of entries */
35 u32 idx; /* index of current entry */
37 /* usable memory ranges to look up */
38 unsigned int nr_ranges;
39 const struct range *ranges;
42 const struct kexec_file_ops * const kexec_file_loaders[] = {
48 * get_exclude_memory_ranges - Get exclude memory ranges. This list includes
49 * regions like opal/rtas, tce-table, initrd,
50 * kernel, htab which should be avoided while
51 * setting up kexec load segments.
52 * @mem_ranges: Range list to add the memory ranges to.
54 * Returns 0 on success, negative errno on error.
56 static int get_exclude_memory_ranges(struct crash_mem **mem_ranges)
60 ret = add_tce_mem_ranges(mem_ranges);
64 ret = add_initrd_mem_range(mem_ranges);
68 ret = add_htab_mem_range(mem_ranges);
72 ret = add_kernel_mem_range(mem_ranges);
76 ret = add_rtas_mem_range(mem_ranges);
80 ret = add_opal_mem_range(mem_ranges);
84 ret = add_reserved_mem_ranges(mem_ranges);
88 /* exclude memory ranges should be sorted for easy lookup */
89 sort_memory_ranges(*mem_ranges, true);
92 pr_err("Failed to setup exclude memory ranges\n");
97 * get_usable_memory_ranges - Get usable memory ranges. This list includes
98 * regions like crashkernel, opal/rtas & tce-table,
99 * that kdump kernel could use.
100 * @mem_ranges: Range list to add the memory ranges to.
102 * Returns 0 on success, negative errno on error.
104 static int get_usable_memory_ranges(struct crash_mem **mem_ranges)
109 * Early boot failure observed on guests when low memory (first memory
110 * block?) is not added to usable memory. So, add [0, crashk_res.end]
111 * instead of [crashk_res.start, crashk_res.end] to workaround it.
112 * Also, crashed kernel's memory must be added to reserve map to
113 * avoid kdump kernel from using it.
115 ret = add_mem_range(mem_ranges, 0, crashk_res.end + 1);
119 ret = add_rtas_mem_range(mem_ranges);
123 ret = add_opal_mem_range(mem_ranges);
127 ret = add_tce_mem_ranges(mem_ranges);
130 pr_err("Failed to setup usable memory ranges\n");
135 * get_crash_memory_ranges - Get crash memory ranges. This list includes
136 * first/crashing kernel's memory regions that
137 * would be exported via an elfcore.
138 * @mem_ranges: Range list to add the memory ranges to.
140 * Returns 0 on success, negative errno on error.
142 static int get_crash_memory_ranges(struct crash_mem **mem_ranges)
144 phys_addr_t base, end;
145 struct crash_mem *tmem;
149 for_each_mem_range(i, &base, &end) {
150 u64 size = end - base;
152 /* Skip backup memory region, which needs a separate entry */
153 if (base == BACKUP_SRC_START) {
154 if (size > BACKUP_SRC_SIZE) {
155 base = BACKUP_SRC_END + 1;
156 size -= BACKUP_SRC_SIZE;
161 ret = add_mem_range(mem_ranges, base, size);
165 /* Try merging adjacent ranges before reallocation attempt */
166 if ((*mem_ranges)->nr_ranges == (*mem_ranges)->max_nr_ranges)
167 sort_memory_ranges(*mem_ranges, true);
170 /* Reallocate memory ranges if there is no space to split ranges */
172 if (tmem && (tmem->nr_ranges == tmem->max_nr_ranges)) {
173 tmem = realloc_mem_ranges(mem_ranges);
178 /* Exclude crashkernel region */
179 ret = crash_exclude_mem_range(tmem, crashk_res.start, crashk_res.end);
184 * FIXME: For now, stay in parity with kexec-tools but if RTAS/OPAL
185 * regions are exported to save their context at the time of
186 * crash, they should actually be backed up just like the
187 * first 64K bytes of memory.
189 ret = add_rtas_mem_range(mem_ranges);
193 ret = add_opal_mem_range(mem_ranges);
197 /* create a separate program header for the backup region */
198 ret = add_mem_range(mem_ranges, BACKUP_SRC_START, BACKUP_SRC_SIZE);
202 sort_memory_ranges(*mem_ranges, false);
205 pr_err("Failed to setup crash memory ranges\n");
210 * get_reserved_memory_ranges - Get reserve memory ranges. This list includes
211 * memory regions that should be added to the
212 * memory reserve map to ensure the region is
213 * protected from any mischief.
214 * @mem_ranges: Range list to add the memory ranges to.
216 * Returns 0 on success, negative errno on error.
218 static int get_reserved_memory_ranges(struct crash_mem **mem_ranges)
222 ret = add_rtas_mem_range(mem_ranges);
226 ret = add_tce_mem_ranges(mem_ranges);
230 ret = add_reserved_mem_ranges(mem_ranges);
233 pr_err("Failed to setup reserved memory ranges\n");
238 * __locate_mem_hole_top_down - Looks top down for a large enough memory hole
239 * in the memory regions between buf_min & buf_max
240 * for the buffer. If found, sets kbuf->mem.
241 * @kbuf: Buffer contents and memory parameters.
242 * @buf_min: Minimum address for the buffer.
243 * @buf_max: Maximum address for the buffer.
245 * Returns 0 on success, negative errno on error.
247 static int __locate_mem_hole_top_down(struct kexec_buf *kbuf,
248 u64 buf_min, u64 buf_max)
250 int ret = -EADDRNOTAVAIL;
251 phys_addr_t start, end;
254 for_each_mem_range_rev(i, &start, &end) {
256 * memblock uses [start, end) convention while it is
257 * [start, end] here. Fix the off-by-one to have the
265 /* Memory hole not found */
269 /* Adjust memory region based on the given range */
275 start = ALIGN(start, kbuf->buf_align);
276 if (start < end && (end - start + 1) >= kbuf->memsz) {
277 /* Suitable memory range found. Set kbuf->mem */
278 kbuf->mem = ALIGN_DOWN(end - kbuf->memsz + 1,
289 * locate_mem_hole_top_down_ppc64 - Skip special memory regions to find a
290 * suitable buffer with top down approach.
291 * @kbuf: Buffer contents and memory parameters.
292 * @buf_min: Minimum address for the buffer.
293 * @buf_max: Maximum address for the buffer.
294 * @emem: Exclude memory ranges.
296 * Returns 0 on success, negative errno on error.
298 static int locate_mem_hole_top_down_ppc64(struct kexec_buf *kbuf,
299 u64 buf_min, u64 buf_max,
300 const struct crash_mem *emem)
302 int i, ret = 0, err = -EADDRNOTAVAIL;
303 u64 start, end, tmin, tmax;
306 for (i = (emem->nr_ranges - 1); i >= 0; i--) {
307 start = emem->ranges[i].start;
308 end = emem->ranges[i].end;
314 tmin = (end < buf_min ? buf_min : end + 1);
315 ret = __locate_mem_hole_top_down(kbuf, tmin, tmax);
322 if (tmax < buf_min) {
331 ret = __locate_mem_hole_top_down(kbuf, tmin, tmax);
337 * __locate_mem_hole_bottom_up - Looks bottom up for a large enough memory hole
338 * in the memory regions between buf_min & buf_max
339 * for the buffer. If found, sets kbuf->mem.
340 * @kbuf: Buffer contents and memory parameters.
341 * @buf_min: Minimum address for the buffer.
342 * @buf_max: Maximum address for the buffer.
344 * Returns 0 on success, negative errno on error.
346 static int __locate_mem_hole_bottom_up(struct kexec_buf *kbuf,
347 u64 buf_min, u64 buf_max)
349 int ret = -EADDRNOTAVAIL;
350 phys_addr_t start, end;
353 for_each_mem_range(i, &start, &end) {
355 * memblock uses [start, end) convention while it is
356 * [start, end] here. Fix the off-by-one to have the
364 /* Memory hole not found */
368 /* Adjust memory region based on the given range */
374 start = ALIGN(start, kbuf->buf_align);
375 if (start < end && (end - start + 1) >= kbuf->memsz) {
376 /* Suitable memory range found. Set kbuf->mem */
387 * locate_mem_hole_bottom_up_ppc64 - Skip special memory regions to find a
388 * suitable buffer with bottom up approach.
389 * @kbuf: Buffer contents and memory parameters.
390 * @buf_min: Minimum address for the buffer.
391 * @buf_max: Maximum address for the buffer.
392 * @emem: Exclude memory ranges.
394 * Returns 0 on success, negative errno on error.
396 static int locate_mem_hole_bottom_up_ppc64(struct kexec_buf *kbuf,
397 u64 buf_min, u64 buf_max,
398 const struct crash_mem *emem)
400 int i, ret = 0, err = -EADDRNOTAVAIL;
401 u64 start, end, tmin, tmax;
404 for (i = 0; i < emem->nr_ranges; i++) {
405 start = emem->ranges[i].start;
406 end = emem->ranges[i].end;
412 tmax = (start > buf_max ? buf_max : start - 1);
413 ret = __locate_mem_hole_bottom_up(kbuf, tmin, tmax);
420 if (tmin > buf_max) {
429 ret = __locate_mem_hole_bottom_up(kbuf, tmin, tmax);
435 * check_realloc_usable_mem - Reallocate buffer if it can't accommodate entries
436 * @um_info: Usable memory buffer and ranges info.
437 * @cnt: No. of entries to accommodate.
439 * Frees up the old buffer if memory reallocation fails.
441 * Returns buffer on success, NULL on error.
443 static u64 *check_realloc_usable_mem(struct umem_info *um_info, int cnt)
448 if ((um_info->idx + cnt) <= um_info->max_entries)
451 new_size = um_info->size + MEM_RANGE_CHUNK_SZ;
452 tbuf = krealloc(um_info->buf, new_size, GFP_KERNEL);
455 um_info->size = new_size;
456 um_info->max_entries = (um_info->size / sizeof(u64));
463 * add_usable_mem - Add the usable memory ranges within the given memory range
465 * @um_info: Usable memory buffer and ranges info.
466 * @base: Base address of memory range to look for.
467 * @end: End address of memory range to look for.
469 * Returns 0 on success, negative errno on error.
471 static int add_usable_mem(struct umem_info *um_info, u64 base, u64 end)
473 u64 loc_base, loc_end;
477 for (i = 0; i < um_info->nr_ranges; i++) {
479 loc_base = um_info->ranges[i].start;
480 loc_end = um_info->ranges[i].end;
481 if (loc_base >= base && loc_end <= end)
483 else if (base < loc_end && end > loc_base) {
492 if (!check_realloc_usable_mem(um_info, 2))
495 um_info->buf[um_info->idx++] = cpu_to_be64(loc_base);
496 um_info->buf[um_info->idx++] =
497 cpu_to_be64(loc_end - loc_base + 1);
505 * kdump_setup_usable_lmb - This is a callback function that gets called by
506 * walk_drmem_lmbs for every LMB to set its
507 * usable memory ranges.
509 * @usm: linux,drconf-usable-memory property value.
510 * @data: Pointer to usable memory buffer and ranges info.
512 * Returns 0 on success, negative errno on error.
514 static int kdump_setup_usable_lmb(struct drmem_lmb *lmb, const __be32 **usm,
517 struct umem_info *um_info;
522 * kdump load isn't supported on kernels already booted with
523 * linux,drconf-usable-memory property.
526 pr_err("linux,drconf-usable-memory property already exists!");
531 tmp_idx = um_info->idx;
532 if (!check_realloc_usable_mem(um_info, 1))
536 base = lmb->base_addr;
537 end = base + drmem_lmb_size() - 1;
538 ret = add_usable_mem(um_info, base, end);
541 * Update the no. of ranges added. Two entries (base & size)
542 * for every range added.
544 um_info->buf[tmp_idx] =
545 cpu_to_be64((um_info->idx - tmp_idx - 1) / 2);
551 #define NODE_PATH_LEN 256
553 * add_usable_mem_property - Add usable memory property for the given
555 * @fdt: Flattened device tree for the kdump kernel.
557 * @um_info: Usable memory buffer and ranges info.
559 * Returns 0 on success, negative errno on error.
561 static int add_usable_mem_property(void *fdt, struct device_node *dn,
562 struct umem_info *um_info)
564 int n_mem_addr_cells, n_mem_size_cells, node;
565 char path[NODE_PATH_LEN];
566 int i, len, ranges, ret;
572 if (snprintf(path, NODE_PATH_LEN, "%pOF", dn) > (NODE_PATH_LEN - 1)) {
573 pr_err("Buffer (%d) too small for memory node: %pOF\n",
577 pr_debug("Memory node path: %s\n", path);
579 /* Now that we know the path, find its offset in kdump kernel's fdt */
580 node = fdt_path_offset(fdt, path);
582 pr_err("Malformed device tree: error reading %s\n", path);
587 /* Get the address & size cells */
588 n_mem_addr_cells = of_n_addr_cells(dn);
589 n_mem_size_cells = of_n_size_cells(dn);
590 pr_debug("address cells: %d, size cells: %d\n", n_mem_addr_cells,
594 if (!check_realloc_usable_mem(um_info, 2)) {
599 prop = of_get_property(dn, "reg", &len);
600 if (!prop || len <= 0) {
606 * "reg" property represents sequence of (addr,size) tuples
607 * each representing a memory range.
609 ranges = (len >> 2) / (n_mem_addr_cells + n_mem_size_cells);
611 for (i = 0; i < ranges; i++) {
612 base = of_read_number(prop, n_mem_addr_cells);
613 prop += n_mem_addr_cells;
614 end = base + of_read_number(prop, n_mem_size_cells) - 1;
615 prop += n_mem_size_cells;
617 ret = add_usable_mem(um_info, base, end);
623 * No kdump kernel usable memory found in this memory node.
624 * Write (0,0) tuple in linux,usable-memory property for
625 * this region to be ignored.
627 if (um_info->idx == 0) {
633 ret = fdt_setprop(fdt, node, "linux,usable-memory", um_info->buf,
634 (um_info->idx * sizeof(u64)));
643 * update_usable_mem_fdt - Updates kdump kernel's fdt with linux,usable-memory
644 * and linux,drconf-usable-memory DT properties as
645 * appropriate to restrict its memory usage.
646 * @fdt: Flattened device tree for the kdump kernel.
647 * @usable_mem: Usable memory ranges for kdump kernel.
649 * Returns 0 on success, negative errno on error.
651 static int update_usable_mem_fdt(void *fdt, struct crash_mem *usable_mem)
653 struct umem_info um_info;
654 struct device_node *dn;
658 pr_err("Usable memory ranges for kdump kernel not found\n");
662 node = fdt_path_offset(fdt, "/ibm,dynamic-reconfiguration-memory");
663 if (node == -FDT_ERR_NOTFOUND)
664 pr_debug("No dynamic reconfiguration memory found\n");
666 pr_err("Malformed device tree: error reading /ibm,dynamic-reconfiguration-memory.\n");
672 um_info.max_entries = 0;
674 /* Memory ranges to look up */
675 um_info.ranges = &(usable_mem->ranges[0]);
676 um_info.nr_ranges = usable_mem->nr_ranges;
678 dn = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
680 ret = walk_drmem_lmbs(dn, &um_info, kdump_setup_usable_lmb);
684 pr_err("Could not setup linux,drconf-usable-memory property for kdump\n");
688 ret = fdt_setprop(fdt, node, "linux,drconf-usable-memory",
689 um_info.buf, (um_info.idx * sizeof(u64)));
691 pr_err("Failed to update fdt with linux,drconf-usable-memory property");
697 * Walk through each memory node and set linux,usable-memory property
698 * for the corresponding node in kdump kernel's fdt.
700 for_each_node_by_type(dn, "memory") {
701 ret = add_usable_mem_property(fdt, dn, &um_info);
703 pr_err("Failed to set linux,usable-memory property for %s node",
716 * load_backup_segment - Locate a memory hole to place the backup region.
717 * @image: Kexec image.
718 * @kbuf: Buffer contents and memory parameters.
720 * Returns 0 on success, negative errno on error.
722 static int load_backup_segment(struct kimage *image, struct kexec_buf *kbuf)
728 * Setup a source buffer for backup segment.
730 * A source buffer has no meaning for backup region as data will
731 * be copied from backup source, after crash, in the purgatory.
732 * But as load segment code doesn't recognize such segments,
733 * setup a dummy source buffer to keep it happy for now.
735 buf = vzalloc(BACKUP_SRC_SIZE);
740 kbuf->mem = KEXEC_BUF_MEM_UNKNOWN;
741 kbuf->bufsz = kbuf->memsz = BACKUP_SRC_SIZE;
742 kbuf->top_down = false;
744 ret = kexec_add_buffer(kbuf);
750 image->arch.backup_buf = buf;
751 image->arch.backup_start = kbuf->mem;
756 * update_backup_region_phdr - Update backup region's offset for the core to
757 * export the region appropriately.
758 * @image: Kexec image.
759 * @ehdr: ELF core header.
761 * Assumes an exclusive program header is setup for the backup region
766 static void update_backup_region_phdr(struct kimage *image, Elf64_Ehdr *ehdr)
771 phdr = (Elf64_Phdr *)(ehdr + 1);
772 for (i = 0; i < ehdr->e_phnum; i++) {
773 if (phdr->p_paddr == BACKUP_SRC_START) {
774 phdr->p_offset = image->arch.backup_start;
775 pr_debug("Backup region offset updated to 0x%lx\n",
776 image->arch.backup_start);
783 * load_elfcorehdr_segment - Setup crash memory ranges and initialize elfcorehdr
784 * segment needed to load kdump kernel.
785 * @image: Kexec image.
786 * @kbuf: Buffer contents and memory parameters.
788 * Returns 0 on success, negative errno on error.
790 static int load_elfcorehdr_segment(struct kimage *image, struct kexec_buf *kbuf)
792 struct crash_mem *cmem = NULL;
793 unsigned long headers_sz;
794 void *headers = NULL;
797 ret = get_crash_memory_ranges(&cmem);
801 /* Setup elfcorehdr segment */
802 ret = crash_prepare_elf64_headers(cmem, false, &headers, &headers_sz);
804 pr_err("Failed to prepare elf headers for the core\n");
808 /* Fix the offset for backup region in the ELF header */
809 update_backup_region_phdr(image, headers);
811 kbuf->buffer = headers;
812 kbuf->mem = KEXEC_BUF_MEM_UNKNOWN;
813 kbuf->bufsz = kbuf->memsz = headers_sz;
814 kbuf->top_down = false;
816 ret = kexec_add_buffer(kbuf);
822 image->elf_load_addr = kbuf->mem;
823 image->elf_headers_sz = headers_sz;
824 image->elf_headers = headers;
831 * load_crashdump_segments_ppc64 - Initialize the additional segements needed
832 * to load kdump kernel.
833 * @image: Kexec image.
834 * @kbuf: Buffer contents and memory parameters.
836 * Returns 0 on success, negative errno on error.
838 int load_crashdump_segments_ppc64(struct kimage *image,
839 struct kexec_buf *kbuf)
843 /* Load backup segment - first 64K bytes of the crashing kernel */
844 ret = load_backup_segment(image, kbuf);
846 pr_err("Failed to load backup segment\n");
849 pr_debug("Loaded the backup region at 0x%lx\n", kbuf->mem);
851 /* Load elfcorehdr segment - to export crashing kernel's vmcore */
852 ret = load_elfcorehdr_segment(image, kbuf);
854 pr_err("Failed to load elfcorehdr segment\n");
857 pr_debug("Loaded elf core header at 0x%lx, bufsz=0x%lx memsz=0x%lx\n",
858 image->elf_load_addr, kbuf->bufsz, kbuf->memsz);
864 * setup_purgatory_ppc64 - initialize PPC64 specific purgatory's global
865 * variables and call setup_purgatory() to initialize
866 * common global variable.
867 * @image: kexec image.
868 * @slave_code: Slave code for the purgatory.
869 * @fdt: Flattened device tree for the next kernel.
870 * @kernel_load_addr: Address where the kernel is loaded.
871 * @fdt_load_addr: Address where the flattened device tree is loaded.
873 * Returns 0 on success, negative errno on error.
875 int setup_purgatory_ppc64(struct kimage *image, const void *slave_code,
876 const void *fdt, unsigned long kernel_load_addr,
877 unsigned long fdt_load_addr)
879 struct device_node *dn = NULL;
882 ret = setup_purgatory(image, slave_code, fdt, kernel_load_addr,
887 if (image->type == KEXEC_TYPE_CRASH) {
888 u32 my_run_at_load = 1;
891 * Tell relocatable kernel to run at load address
892 * via the word meant for that at 0x5c.
894 ret = kexec_purgatory_get_set_symbol(image, "run_at_load",
896 sizeof(my_run_at_load),
902 /* Tell purgatory where to look for backup region */
903 ret = kexec_purgatory_get_set_symbol(image, "backup_start",
904 &image->arch.backup_start,
905 sizeof(image->arch.backup_start),
910 /* Setup OPAL base & entry values */
911 dn = of_find_node_by_path("/ibm,opal");
915 of_property_read_u64(dn, "opal-base-address", &val);
916 ret = kexec_purgatory_get_set_symbol(image, "opal_base", &val,
921 of_property_read_u64(dn, "opal-entry-address", &val);
922 ret = kexec_purgatory_get_set_symbol(image, "opal_entry", &val,
927 pr_err("Failed to setup purgatory symbols");
933 * get_cpu_node_size - Compute the size of a CPU node in the FDT.
934 * This should be done only once and the value is stored in
936 * Returns the max size of a CPU node in the FDT.
938 static unsigned int cpu_node_size(void)
940 static unsigned int size;
941 struct device_node *dn;
945 * Don't compute it twice, we are assuming that the per CPU node size
946 * doesn't change during the system's life.
951 dn = of_find_node_by_type(NULL, "cpu");
952 if (WARN_ON_ONCE(!dn)) {
953 // Unlikely to happen
958 * We compute the sub node size for a CPU node, assuming it
959 * will be the same for all.
961 size += strlen(dn->name) + 5;
962 for_each_property_of_node(dn, pp) {
963 size += strlen(pp->name);
972 * kexec_extra_fdt_size_ppc64 - Return the estimated additional size needed to
973 * setup FDT for kexec/kdump kernel.
974 * @image: kexec image being loaded.
976 * Returns the estimated extra size needed for kexec/kdump kernel FDT.
978 unsigned int kexec_extra_fdt_size_ppc64(struct kimage *image)
980 unsigned int cpu_nodes, extra_size;
981 struct device_node *dn;
984 if (image->type != KEXEC_TYPE_CRASH)
988 * For kdump kernel, account for linux,usable-memory and
989 * linux,drconf-usable-memory properties. Get an approximate on the
990 * number of usable memory entries and use for FDT size estimation.
992 usm_entries = ((memblock_end_of_DRAM() / drmem_lmb_size()) +
993 (2 * (resource_size(&crashk_res) / drmem_lmb_size())));
995 extra_size = (unsigned int)(usm_entries * sizeof(u64));
998 * Get the number of CPU nodes in the current DT. This allows to
999 * reserve places for CPU nodes added since the boot time.
1002 for_each_node_by_type(dn, "cpu") {
1006 if (cpu_nodes > boot_cpu_node_count)
1007 extra_size += (cpu_nodes - boot_cpu_node_count) * cpu_node_size();
1013 * add_node_props - Reads node properties from device node structure and add
1015 * @fdt: Flattened device tree of the kernel
1016 * @node_offset: offset of the node to add a property at
1017 * @dn: device node pointer
1019 * Returns 0 on success, negative errno on error.
1021 static int add_node_props(void *fdt, int node_offset, const struct device_node *dn)
1024 struct property *pp;
1029 for_each_property_of_node(dn, pp) {
1030 ret = fdt_setprop(fdt, node_offset, pp->name, pp->value, pp->length);
1032 pr_err("Unable to add %s property: %s\n", pp->name, fdt_strerror(ret));
1040 * update_cpus_node - Update cpus node of flattened device tree using of_root
1042 * @fdt: Flattened device tree of the kernel.
1044 * Returns 0 on success, negative errno on error.
1046 static int update_cpus_node(void *fdt)
1048 struct device_node *cpus_node, *dn;
1049 int cpus_offset, cpus_subnode_offset, ret = 0;
1051 cpus_offset = fdt_path_offset(fdt, "/cpus");
1052 if (cpus_offset < 0 && cpus_offset != -FDT_ERR_NOTFOUND) {
1053 pr_err("Malformed device tree: error reading /cpus node: %s\n",
1054 fdt_strerror(cpus_offset));
1058 if (cpus_offset > 0) {
1059 ret = fdt_del_node(fdt, cpus_offset);
1061 pr_err("Error deleting /cpus node: %s\n", fdt_strerror(ret));
1066 /* Add cpus node to fdt */
1067 cpus_offset = fdt_add_subnode(fdt, fdt_path_offset(fdt, "/"), "cpus");
1068 if (cpus_offset < 0) {
1069 pr_err("Error creating /cpus node: %s\n", fdt_strerror(cpus_offset));
1073 /* Add cpus node properties */
1074 cpus_node = of_find_node_by_path("/cpus");
1075 ret = add_node_props(fdt, cpus_offset, cpus_node);
1076 of_node_put(cpus_node);
1080 /* Loop through all subnodes of cpus and add them to fdt */
1081 for_each_node_by_type(dn, "cpu") {
1082 cpus_subnode_offset = fdt_add_subnode(fdt, cpus_offset, dn->full_name);
1083 if (cpus_subnode_offset < 0) {
1084 pr_err("Unable to add %s subnode: %s\n", dn->full_name,
1085 fdt_strerror(cpus_subnode_offset));
1086 ret = cpus_subnode_offset;
1090 ret = add_node_props(fdt, cpus_subnode_offset, dn);
1099 static int copy_property(void *fdt, int node_offset, const struct device_node *dn,
1100 const char *propname)
1102 const void *prop, *fdtprop;
1103 int len = 0, fdtlen = 0;
1105 prop = of_get_property(dn, propname, &len);
1106 fdtprop = fdt_getprop(fdt, node_offset, propname, &fdtlen);
1108 if (fdtprop && !prop)
1109 return fdt_delprop(fdt, node_offset, propname);
1111 return fdt_setprop(fdt, node_offset, propname, prop, len);
1113 return -FDT_ERR_NOTFOUND;
1116 static int update_pci_dma_nodes(void *fdt, const char *dmapropname)
1118 struct device_node *dn;
1119 int pci_offset, root_offset, ret = 0;
1121 if (!firmware_has_feature(FW_FEATURE_LPAR))
1124 root_offset = fdt_path_offset(fdt, "/");
1125 for_each_node_with_property(dn, dmapropname) {
1126 pci_offset = fdt_subnode_offset(fdt, root_offset, of_node_full_name(dn));
1130 ret = copy_property(fdt, pci_offset, dn, "ibm,dma-window");
1133 ret = copy_property(fdt, pci_offset, dn, dmapropname);
1142 * setup_new_fdt_ppc64 - Update the flattend device-tree of the kernel
1144 * @image: kexec image being loaded.
1145 * @fdt: Flattened device tree for the next kernel.
1146 * @initrd_load_addr: Address where the next initrd will be loaded.
1147 * @initrd_len: Size of the next initrd, or 0 if there will be none.
1148 * @cmdline: Command line for the next kernel, or NULL if there will
1151 * Returns 0 on success, negative errno on error.
1153 int setup_new_fdt_ppc64(const struct kimage *image, void *fdt,
1154 unsigned long initrd_load_addr,
1155 unsigned long initrd_len, const char *cmdline)
1157 struct crash_mem *umem = NULL, *rmem = NULL;
1158 int i, nr_ranges, ret;
1161 * Restrict memory usage for kdump kernel by setting up
1162 * usable memory ranges and memory reserve map.
1164 if (image->type == KEXEC_TYPE_CRASH) {
1165 ret = get_usable_memory_ranges(&umem);
1169 ret = update_usable_mem_fdt(fdt, umem);
1171 pr_err("Error setting up usable-memory property for kdump kernel\n");
1176 * Ensure we don't touch crashed kernel's memory except the
1177 * first 64K of RAM, which will be backed up.
1179 ret = fdt_add_mem_rsv(fdt, BACKUP_SRC_END + 1,
1180 crashk_res.start - BACKUP_SRC_SIZE);
1182 pr_err("Error reserving crash memory: %s\n",
1187 /* Ensure backup region is not used by kdump/capture kernel */
1188 ret = fdt_add_mem_rsv(fdt, image->arch.backup_start,
1191 pr_err("Error reserving memory for backup: %s\n",
1197 /* Update cpus nodes information to account hotplug CPUs. */
1198 ret = update_cpus_node(fdt);
1202 #define DIRECT64_PROPNAME "linux,direct64-ddr-window-info"
1203 #define DMA64_PROPNAME "linux,dma64-ddr-window-info"
1204 ret = update_pci_dma_nodes(fdt, DIRECT64_PROPNAME);
1208 ret = update_pci_dma_nodes(fdt, DMA64_PROPNAME);
1211 #undef DMA64_PROPNAME
1212 #undef DIRECT64_PROPNAME
1214 /* Update memory reserve map */
1215 ret = get_reserved_memory_ranges(&rmem);
1219 nr_ranges = rmem ? rmem->nr_ranges : 0;
1220 for (i = 0; i < nr_ranges; i++) {
1223 base = rmem->ranges[i].start;
1224 size = rmem->ranges[i].end - base + 1;
1225 ret = fdt_add_mem_rsv(fdt, base, size);
1227 pr_err("Error updating memory reserve map: %s\n",
1240 * arch_kexec_locate_mem_hole - Skip special memory regions like rtas, opal,
1241 * tce-table, reserved-ranges & such (exclude
1242 * memory ranges) as they can't be used for kexec
1243 * segment buffer. Sets kbuf->mem when a suitable
1244 * memory hole is found.
1245 * @kbuf: Buffer contents and memory parameters.
1247 * Assumes minimum of PAGE_SIZE alignment for kbuf->memsz & kbuf->buf_align.
1249 * Returns 0 on success, negative errno on error.
1251 int arch_kexec_locate_mem_hole(struct kexec_buf *kbuf)
1253 struct crash_mem **emem;
1254 u64 buf_min, buf_max;
1257 /* Look up the exclude ranges list while locating the memory hole */
1258 emem = &(kbuf->image->arch.exclude_ranges);
1259 if (!(*emem) || ((*emem)->nr_ranges == 0)) {
1260 pr_warn("No exclude range list. Using the default locate mem hole method\n");
1261 return kexec_locate_mem_hole(kbuf);
1264 buf_min = kbuf->buf_min;
1265 buf_max = kbuf->buf_max;
1266 /* Segments for kdump kernel should be within crashkernel region */
1267 if (kbuf->image->type == KEXEC_TYPE_CRASH) {
1268 buf_min = (buf_min < crashk_res.start ?
1269 crashk_res.start : buf_min);
1270 buf_max = (buf_max > crashk_res.end ?
1271 crashk_res.end : buf_max);
1274 if (buf_min > buf_max) {
1275 pr_err("Invalid buffer min and/or max values\n");
1280 ret = locate_mem_hole_top_down_ppc64(kbuf, buf_min, buf_max,
1283 ret = locate_mem_hole_bottom_up_ppc64(kbuf, buf_min, buf_max,
1286 /* Add the buffer allocated to the exclude list for the next lookup */
1288 add_mem_range(emem, kbuf->mem, kbuf->memsz);
1289 sort_memory_ranges(*emem, true);
1291 pr_err("Failed to locate memory buffer of size %lu\n",
1298 * arch_kexec_kernel_image_probe - Does additional handling needed to setup
1300 * @image: kexec image being loaded.
1301 * @buf: Buffer pointing to elf data.
1302 * @buf_len: Length of the buffer.
1304 * Returns 0 on success, negative errno on error.
1306 int arch_kexec_kernel_image_probe(struct kimage *image, void *buf,
1307 unsigned long buf_len)
1311 /* Get exclude memory ranges needed for setting up kexec segments */
1312 ret = get_exclude_memory_ranges(&(image->arch.exclude_ranges));
1314 pr_err("Failed to setup exclude memory ranges for buffer lookup\n");
1318 return kexec_image_probe_default(image, buf, buf_len);
1322 * arch_kimage_file_post_load_cleanup - Frees up all the allocations done
1323 * while loading the image.
1324 * @image: kexec image being loaded.
1326 * Returns 0 on success, negative errno on error.
1328 int arch_kimage_file_post_load_cleanup(struct kimage *image)
1330 kfree(image->arch.exclude_ranges);
1331 image->arch.exclude_ranges = NULL;
1333 vfree(image->arch.backup_buf);
1334 image->arch.backup_buf = NULL;
1336 vfree(image->elf_headers);
1337 image->elf_headers = NULL;
1338 image->elf_headers_sz = 0;
1340 kvfree(image->arch.fdt);
1341 image->arch.fdt = NULL;
1343 return kexec_image_post_load_cleanup_default(image);