Merge tag 'powerpc-6.10-1' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc...
[linux-block.git] / arch / x86 / include / asm / kexec.h
CommitLineData
b2441318 1/* SPDX-License-Identifier: GPL-2.0 */
1965aae3
PA
2#ifndef _ASM_X86_KEXEC_H
3#define _ASM_X86_KEXEC_H
3c233d13 4
96a388de 5#ifdef CONFIG_X86_32
3c233d13
HH
6# define PA_CONTROL_PAGE 0
7# define VA_CONTROL_PAGE 1
8# define PA_PGD 2
9868ee63
HY
9# define PA_SWAP_PAGE 3
10# define PAGES_NR 4
96a388de 11#else
3c233d13 12# define PA_CONTROL_PAGE 0
fee7b0d8
HY
13# define VA_CONTROL_PAGE 1
14# define PA_TABLE_PAGE 2
15# define PA_SWAP_PAGE 3
16# define PAGES_NR 4
96a388de 17#endif
3c233d13 18
fb45daa6 19# define KEXEC_CONTROL_CODE_MAX_SIZE 2048
fb45daa6 20
3c233d13
HH
21#ifndef __ASSEMBLY__
22
23#include <linux/string.h>
de0d22e5 24#include <linux/kernel.h>
3c233d13
HH
25
26#include <asm/page.h>
27#include <asm/ptrace.h>
28
dd5f7260
VG
29struct kimage;
30
3c233d13
HH
31/*
32 * KEXEC_SOURCE_MEMORY_LIMIT maximum page get_free_page can return.
33 * I.e. Maximum page that is mapped directly into kernel memory,
34 * and kmap is not required.
35 *
36 * So far x86_64 is limited to 40 physical address bits.
37 */
38#ifdef CONFIG_X86_32
39/* Maximum physical address we can use pages from */
40# define KEXEC_SOURCE_MEMORY_LIMIT (-1UL)
41/* Maximum address we can reach in physical address mode */
42# define KEXEC_DESTINATION_MEMORY_LIMIT (-1UL)
43/* Maximum address we can use for the control code buffer */
44# define KEXEC_CONTROL_MEMORY_LIMIT TASK_SIZE
45
163f6876 46# define KEXEC_CONTROL_PAGE_SIZE 4096
3c233d13
HH
47
48/* The native architecture */
49# define KEXEC_ARCH KEXEC_ARCH_386
50
51/* We can also handle crash dumps from 64 bit kernel. */
52# define vmcore_elf_check_arch_cross(x) ((x)->e_machine == EM_X86_64)
53#else
54/* Maximum physical address we can use pages from */
577af55d 55# define KEXEC_SOURCE_MEMORY_LIMIT (MAXMEM-1)
3c233d13 56/* Maximum address we can reach in physical address mode */
577af55d 57# define KEXEC_DESTINATION_MEMORY_LIMIT (MAXMEM-1)
3c233d13 58/* Maximum address we can use for the control pages */
577af55d 59# define KEXEC_CONTROL_MEMORY_LIMIT (MAXMEM-1)
3c233d13
HH
60
61/* Allocate one page for the pdp and the second for the code */
163f6876 62# define KEXEC_CONTROL_PAGE_SIZE (4096UL + 4096UL)
3c233d13
HH
63
64/* The native architecture */
65# define KEXEC_ARCH KEXEC_ARCH_X86_64
66#endif
67
3c233d13
HH
68/*
69 * This function is responsible for capturing register states if coming
70 * via panic otherwise just fix up the ss and sp if coming via kernel
71 * mode exception.
72 */
73static inline void crash_setup_regs(struct pt_regs *newregs,
74 struct pt_regs *oldregs)
75{
76 if (oldregs) {
77 memcpy(newregs, oldregs, sizeof(*newregs));
3c233d13
HH
78 } else {
79#ifdef CONFIG_X86_32
b69a3f9d
JP
80 asm volatile("movl %%ebx,%0" : "=m"(newregs->bx));
81 asm volatile("movl %%ecx,%0" : "=m"(newregs->cx));
82 asm volatile("movl %%edx,%0" : "=m"(newregs->dx));
83 asm volatile("movl %%esi,%0" : "=m"(newregs->si));
84 asm volatile("movl %%edi,%0" : "=m"(newregs->di));
85 asm volatile("movl %%ebp,%0" : "=m"(newregs->bp));
86 asm volatile("movl %%eax,%0" : "=m"(newregs->ax));
87 asm volatile("movl %%esp,%0" : "=m"(newregs->sp));
88 asm volatile("movl %%ss, %%eax;" :"=a"(newregs->ss));
89 asm volatile("movl %%cs, %%eax;" :"=a"(newregs->cs));
90 asm volatile("movl %%ds, %%eax;" :"=a"(newregs->ds));
91 asm volatile("movl %%es, %%eax;" :"=a"(newregs->es));
92 asm volatile("pushfl; popl %0" :"=m"(newregs->flags));
3c233d13 93#else
b69a3f9d
JP
94 asm volatile("movq %%rbx,%0" : "=m"(newregs->bx));
95 asm volatile("movq %%rcx,%0" : "=m"(newregs->cx));
96 asm volatile("movq %%rdx,%0" : "=m"(newregs->dx));
97 asm volatile("movq %%rsi,%0" : "=m"(newregs->si));
98 asm volatile("movq %%rdi,%0" : "=m"(newregs->di));
99 asm volatile("movq %%rbp,%0" : "=m"(newregs->bp));
100 asm volatile("movq %%rax,%0" : "=m"(newregs->ax));
101 asm volatile("movq %%rsp,%0" : "=m"(newregs->sp));
102 asm volatile("movq %%r8,%0" : "=m"(newregs->r8));
103 asm volatile("movq %%r9,%0" : "=m"(newregs->r9));
104 asm volatile("movq %%r10,%0" : "=m"(newregs->r10));
105 asm volatile("movq %%r11,%0" : "=m"(newregs->r11));
106 asm volatile("movq %%r12,%0" : "=m"(newregs->r12));
107 asm volatile("movq %%r13,%0" : "=m"(newregs->r13));
108 asm volatile("movq %%r14,%0" : "=m"(newregs->r14));
109 asm volatile("movq %%r15,%0" : "=m"(newregs->r15));
110 asm volatile("movl %%ss, %%eax;" :"=a"(newregs->ss));
111 asm volatile("movl %%cs, %%eax;" :"=a"(newregs->cs));
112 asm volatile("pushfq; popq %0" :"=m"(newregs->flags));
3c233d13 113#endif
de0d22e5 114 newregs->ip = _THIS_IP_;
3c233d13
HH
115 }
116}
117
118#ifdef CONFIG_X86_32
3ab83521 119asmlinkage unsigned long
3c233d13
HH
120relocate_kernel(unsigned long indirection_page,
121 unsigned long control_page,
122 unsigned long start_address,
3ab83521
HY
123 unsigned int has_pae,
124 unsigned int preserve_context);
3c233d13 125#else
fee7b0d8 126unsigned long
3c233d13
HH
127relocate_kernel(unsigned long indirection_page,
128 unsigned long page_list,
fee7b0d8 129 unsigned long start_address,
4e237903 130 unsigned int preserve_context,
32cb4d02 131 unsigned int host_mem_enc_active);
3c233d13
HH
132#endif
133
92be3d6b
HY
134#define ARCH_HAS_KIMAGE_ARCH
135
f5deb796 136#ifdef CONFIG_X86_32
92be3d6b
HY
137struct kimage_arch {
138 pgd_t *pgd;
139#ifdef CONFIG_X86_PAE
140 pmd_t *pmd0;
141 pmd_t *pmd1;
142#endif
143 pte_t *pte0;
144 pte_t *pte1;
145};
f5deb796
HY
146#else
147struct kimage_arch {
7f689041 148 p4d_t *p4d;
f5deb796
HY
149 pud_t *pud;
150 pmd_t *pmd;
151 pte_t *pte;
152};
dd5f7260 153#endif /* CONFIG_X86_32 */
27f48d3e 154
dd5f7260
VG
155#ifdef CONFIG_X86_64
156/*
157 * Number of elements and order of elements in this structure should match
158 * with the ones in arch/x86/purgatory/entry64.S. If you make a change here
159 * make an appropriate change in purgatory too.
160 */
27f48d3e
VG
161struct kexec_entry64_regs {
162 uint64_t rax;
27f48d3e
VG
163 uint64_t rcx;
164 uint64_t rdx;
dd5f7260 165 uint64_t rbx;
27f48d3e
VG
166 uint64_t rsp;
167 uint64_t rbp;
dd5f7260
VG
168 uint64_t rsi;
169 uint64_t rdi;
27f48d3e
VG
170 uint64_t r8;
171 uint64_t r9;
172 uint64_t r10;
173 uint64_t r11;
174 uint64_t r12;
175 uint64_t r13;
176 uint64_t r14;
177 uint64_t r15;
178 uint64_t rip;
179};
bba4ed01
TL
180
181extern int arch_kexec_post_alloc_pages(void *vaddr, unsigned int pages,
182 gfp_t gfp);
183#define arch_kexec_post_alloc_pages arch_kexec_post_alloc_pages
184
185extern void arch_kexec_pre_free_pages(void *vaddr, unsigned int pages);
186#define arch_kexec_pre_free_pages arch_kexec_pre_free_pages
187
0738eceb
NR
188void arch_kexec_protect_crashkres(void);
189#define arch_kexec_protect_crashkres arch_kexec_protect_crashkres
190
191void arch_kexec_unprotect_crashkres(void);
192#define arch_kexec_unprotect_crashkres arch_kexec_unprotect_crashkres
193
3e35142e
NR
194#ifdef CONFIG_KEXEC_FILE
195struct purgatory_info;
196int arch_kexec_apply_relocations_add(struct purgatory_info *pi,
197 Elf_Shdr *section,
198 const Elf_Shdr *relsec,
199 const Elf_Shdr *symtab);
200#define arch_kexec_apply_relocations_add arch_kexec_apply_relocations_add
65d9a9a6 201
65d9a9a6
NR
202int arch_kimage_file_post_load_cleanup(struct kimage *image);
203#define arch_kimage_file_post_load_cleanup arch_kimage_file_post_load_cleanup
3e35142e 204#endif
92be3d6b
HY
205#endif
206
0ee59413 207extern void kdump_nmi_shootdown_cpus(void);
f23d1f4a 208
ea53ad9c 209#ifdef CONFIG_CRASH_HOTPLUG
11800571 210void arch_crash_handle_hotplug_event(struct kimage *image, void *arg);
ea53ad9c
ED
211#define arch_crash_handle_hotplug_event arch_crash_handle_hotplug_event
212
79365026
SJ
213int arch_crash_hotplug_support(struct kimage *image, unsigned long kexec_flags);
214#define arch_crash_hotplug_support arch_crash_hotplug_support
a72bbec7
ED
215
216unsigned int arch_crash_get_elfcorehdr_size(void);
217#define crash_get_elfcorehdr_size arch_crash_get_elfcorehdr_size
ea53ad9c
ED
218#endif
219
3c233d13
HH
220#endif /* __ASSEMBLY__ */
221
1965aae3 222#endif /* _ASM_X86_KEXEC_H */