1 /* SPDX-License-Identifier: GPL-2.0-only */
5 * Copyright (C) Linaro.
6 * Copyright (C) Huawei Futurewei Technologies.
7 * Copyright (C) 2021, Microsoft Corporation.
8 * Pasha Tatashin <pasha.tatashin@soleen.com>
11 #include <linux/kexec.h>
12 #include <linux/linkage.h>
14 #include <asm/assembler.h>
15 #include <asm/kexec.h>
17 #include <asm/sysreg.h>
20 .macro turn_off_mmu tmp1, tmp2
21 mov_q \tmp1, INIT_SCTLR_EL1_MMU_OFF
22 pre_disable_mmu_workaround
27 .section ".kexec_relocate.text", "ax"
29 * arm64_relocate_new_kernel - Put a 2nd stage image in place and boot it.
31 * The memory that the old kernel occupies may be overwritten when copying the
32 * new image to its final location. To assure that the
33 * arm64_relocate_new_kernel routine which does that copy is not overwritten,
34 * all code and data needed by arm64_relocate_new_kernel must be between the
35 * symbols arm64_relocate_new_kernel and arm64_relocate_new_kernel_end. The
36 * machine_kexec() routine will copy arm64_relocate_new_kernel to the kexec
37 * safe memory that has been set up to be preserved during the copy operation.
39 SYM_CODE_START(arm64_relocate_new_kernel)
40 /* Setup the list loop variables. */
41 ldr x18, [x0, #KIMAGE_ARCH_ZERO_PAGE] /* x18 = zero page for BBM */
42 ldr x17, [x0, #KIMAGE_ARCH_TTBR1] /* x17 = linear map copy */
43 ldr x16, [x0, #KIMAGE_HEAD] /* x16 = kimage_head */
44 ldr x22, [x0, #KIMAGE_ARCH_PHYS_OFFSET] /* x22 phys_offset */
45 raw_dcache_line_size x15, x1 /* x15 = dcache line size */
46 break_before_make_ttbr_switch x18, x17, x1, x2 /* set linear map */
48 and x12, x16, PAGE_MASK /* x12 = addr */
49 sub x12, x12, x22 /* Convert x12 to virt */
50 /* Test the entry flags. */
52 tbz x16, IND_SOURCE_BIT, .Ltest_indirection
54 /* Invalidate dest page to PoC. */
56 copy_page x13, x12, x1, x2, x3, x4, x5, x6, x7, x8
57 add x1, x19, #PAGE_SIZE
58 dcache_by_myline_op civac, sy, x19, x1, x15, x20
61 tbz x16, IND_INDIRECTION_BIT, .Ltest_destination
62 mov x14, x12 /* ptr = addr */
65 tbz x16, IND_DESTINATION_BIT, .Lnext
66 mov x13, x12 /* dest = addr */
68 ldr x16, [x14], #8 /* entry = *ptr++ */
69 tbz x16, IND_DONE_BIT, .Lloop /* while (!(entry & DONE)) */
70 /* wait for writes from copy_page to finish */
75 ldr x4, [x0, #KIMAGE_START] /* relocation start */
76 ldr x1, [x0, #KIMAGE_ARCH_EL2_VECTORS] /* relocation start */
77 ldr x0, [x0, #KIMAGE_ARCH_DTB_MEM] /* dtb address */
80 /* Start new image. */
82 mov x1, x4 /* relocation start */
83 mov x2, x0 /* dtb address */
86 mov x0, #HVC_SOFT_RESTART
87 hvc #0 /* Jumps from el2 */
91 br x4 /* Jumps from el1 */
92 SYM_CODE_END(arm64_relocate_new_kernel)