License cleanup: add SPDX GPL-2.0 license identifier to files with no license
[linux-2.6-block.git] / arch / x86 / include / asm / kexec.h
CommitLineData
b2441318 1/* SPDX-License-Identifier: GPL-2.0 */
1965aae3
PA
2#ifndef _ASM_X86_KEXEC_H
3#define _ASM_X86_KEXEC_H
3c233d13 4
96a388de 5#ifdef CONFIG_X86_32
3c233d13
HH
6# define PA_CONTROL_PAGE 0
7# define VA_CONTROL_PAGE 1
8# define PA_PGD 2
9868ee63
HY
9# define PA_SWAP_PAGE 3
10# define PAGES_NR 4
96a388de 11#else
3c233d13 12# define PA_CONTROL_PAGE 0
fee7b0d8
HY
13# define VA_CONTROL_PAGE 1
14# define PA_TABLE_PAGE 2
15# define PA_SWAP_PAGE 3
16# define PAGES_NR 4
96a388de 17#endif
3c233d13 18
fb45daa6 19# define KEXEC_CONTROL_CODE_MAX_SIZE 2048
fb45daa6 20
3c233d13
HH
21#ifndef __ASSEMBLY__
22
23#include <linux/string.h>
24
25#include <asm/page.h>
26#include <asm/ptrace.h>
27f48d3e 27#include <asm/bootparam.h>
3c233d13 28
dd5f7260
VG
29struct kimage;
30
3c233d13
HH
31/*
32 * KEXEC_SOURCE_MEMORY_LIMIT maximum page get_free_page can return.
33 * I.e. Maximum page that is mapped directly into kernel memory,
34 * and kmap is not required.
35 *
36 * So far x86_64 is limited to 40 physical address bits.
37 */
38#ifdef CONFIG_X86_32
39/* Maximum physical address we can use pages from */
40# define KEXEC_SOURCE_MEMORY_LIMIT (-1UL)
41/* Maximum address we can reach in physical address mode */
42# define KEXEC_DESTINATION_MEMORY_LIMIT (-1UL)
43/* Maximum address we can use for the control code buffer */
44# define KEXEC_CONTROL_MEMORY_LIMIT TASK_SIZE
45
163f6876 46# define KEXEC_CONTROL_PAGE_SIZE 4096
3c233d13
HH
47
48/* The native architecture */
49# define KEXEC_ARCH KEXEC_ARCH_386
50
51/* We can also handle crash dumps from 64 bit kernel. */
52# define vmcore_elf_check_arch_cross(x) ((x)->e_machine == EM_X86_64)
53#else
54/* Maximum physical address we can use pages from */
577af55d 55# define KEXEC_SOURCE_MEMORY_LIMIT (MAXMEM-1)
3c233d13 56/* Maximum address we can reach in physical address mode */
577af55d 57# define KEXEC_DESTINATION_MEMORY_LIMIT (MAXMEM-1)
3c233d13 58/* Maximum address we can use for the control pages */
577af55d 59# define KEXEC_CONTROL_MEMORY_LIMIT (MAXMEM-1)
3c233d13
HH
60
61/* Allocate one page for the pdp and the second for the code */
163f6876 62# define KEXEC_CONTROL_PAGE_SIZE (4096UL + 4096UL)
3c233d13
HH
63
64/* The native architecture */
65# define KEXEC_ARCH KEXEC_ARCH_X86_64
66#endif
67
dd5f7260
VG
68/* Memory to backup during crash kdump */
69#define KEXEC_BACKUP_SRC_START (0UL)
70#define KEXEC_BACKUP_SRC_END (640 * 1024UL) /* 640K */
71
3c233d13
HH
72/*
73 * CPU does not save ss and sp on stack if execution is already
74 * running in kernel mode at the time of NMI occurrence. This code
75 * fixes it.
76 */
77static inline void crash_fixup_ss_esp(struct pt_regs *newregs,
78 struct pt_regs *oldregs)
79{
80#ifdef CONFIG_X86_32
81 newregs->sp = (unsigned long)&(oldregs->sp);
b69a3f9d
JP
82 asm volatile("xorl %%eax, %%eax\n\t"
83 "movw %%ss, %%ax\n\t"
84 :"=a"(newregs->ss));
3c233d13
HH
85#endif
86}
87
88/*
89 * This function is responsible for capturing register states if coming
90 * via panic otherwise just fix up the ss and sp if coming via kernel
91 * mode exception.
92 */
93static inline void crash_setup_regs(struct pt_regs *newregs,
94 struct pt_regs *oldregs)
95{
96 if (oldregs) {
97 memcpy(newregs, oldregs, sizeof(*newregs));
98 crash_fixup_ss_esp(newregs, oldregs);
99 } else {
100#ifdef CONFIG_X86_32
b69a3f9d
JP
101 asm volatile("movl %%ebx,%0" : "=m"(newregs->bx));
102 asm volatile("movl %%ecx,%0" : "=m"(newregs->cx));
103 asm volatile("movl %%edx,%0" : "=m"(newregs->dx));
104 asm volatile("movl %%esi,%0" : "=m"(newregs->si));
105 asm volatile("movl %%edi,%0" : "=m"(newregs->di));
106 asm volatile("movl %%ebp,%0" : "=m"(newregs->bp));
107 asm volatile("movl %%eax,%0" : "=m"(newregs->ax));
108 asm volatile("movl %%esp,%0" : "=m"(newregs->sp));
109 asm volatile("movl %%ss, %%eax;" :"=a"(newregs->ss));
110 asm volatile("movl %%cs, %%eax;" :"=a"(newregs->cs));
111 asm volatile("movl %%ds, %%eax;" :"=a"(newregs->ds));
112 asm volatile("movl %%es, %%eax;" :"=a"(newregs->es));
113 asm volatile("pushfl; popl %0" :"=m"(newregs->flags));
3c233d13 114#else
b69a3f9d
JP
115 asm volatile("movq %%rbx,%0" : "=m"(newregs->bx));
116 asm volatile("movq %%rcx,%0" : "=m"(newregs->cx));
117 asm volatile("movq %%rdx,%0" : "=m"(newregs->dx));
118 asm volatile("movq %%rsi,%0" : "=m"(newregs->si));
119 asm volatile("movq %%rdi,%0" : "=m"(newregs->di));
120 asm volatile("movq %%rbp,%0" : "=m"(newregs->bp));
121 asm volatile("movq %%rax,%0" : "=m"(newregs->ax));
122 asm volatile("movq %%rsp,%0" : "=m"(newregs->sp));
123 asm volatile("movq %%r8,%0" : "=m"(newregs->r8));
124 asm volatile("movq %%r9,%0" : "=m"(newregs->r9));
125 asm volatile("movq %%r10,%0" : "=m"(newregs->r10));
126 asm volatile("movq %%r11,%0" : "=m"(newregs->r11));
127 asm volatile("movq %%r12,%0" : "=m"(newregs->r12));
128 asm volatile("movq %%r13,%0" : "=m"(newregs->r13));
129 asm volatile("movq %%r14,%0" : "=m"(newregs->r14));
130 asm volatile("movq %%r15,%0" : "=m"(newregs->r15));
131 asm volatile("movl %%ss, %%eax;" :"=a"(newregs->ss));
132 asm volatile("movl %%cs, %%eax;" :"=a"(newregs->cs));
133 asm volatile("pushfq; popq %0" :"=m"(newregs->flags));
3c233d13
HH
134#endif
135 newregs->ip = (unsigned long)current_text_addr();
136 }
137}
138
139#ifdef CONFIG_X86_32
3ab83521 140asmlinkage unsigned long
3c233d13
HH
141relocate_kernel(unsigned long indirection_page,
142 unsigned long control_page,
143 unsigned long start_address,
3ab83521
HY
144 unsigned int has_pae,
145 unsigned int preserve_context);
3c233d13 146#else
fee7b0d8 147unsigned long
3c233d13
HH
148relocate_kernel(unsigned long indirection_page,
149 unsigned long page_list,
fee7b0d8 150 unsigned long start_address,
4e237903
TL
151 unsigned int preserve_context,
152 unsigned int sme_active);
3c233d13
HH
153#endif
154
92be3d6b
HY
155#define ARCH_HAS_KIMAGE_ARCH
156
f5deb796 157#ifdef CONFIG_X86_32
92be3d6b
HY
158struct kimage_arch {
159 pgd_t *pgd;
160#ifdef CONFIG_X86_PAE
161 pmd_t *pmd0;
162 pmd_t *pmd1;
163#endif
164 pte_t *pte0;
165 pte_t *pte1;
166};
f5deb796
HY
167#else
168struct kimage_arch {
7f689041 169 p4d_t *p4d;
f5deb796
HY
170 pud_t *pud;
171 pmd_t *pmd;
172 pte_t *pte;
dd5f7260
VG
173 /* Details of backup region */
174 unsigned long backup_src_start;
175 unsigned long backup_src_sz;
176
177 /* Physical address of backup segment */
178 unsigned long backup_load_addr;
179
180 /* Core ELF header buffer */
181 void *elf_headers;
182 unsigned long elf_headers_sz;
183 unsigned long elf_load_addr;
f5deb796 184};
dd5f7260 185#endif /* CONFIG_X86_32 */
27f48d3e 186
dd5f7260
VG
187#ifdef CONFIG_X86_64
188/*
189 * Number of elements and order of elements in this structure should match
190 * with the ones in arch/x86/purgatory/entry64.S. If you make a change here
191 * make an appropriate change in purgatory too.
192 */
27f48d3e
VG
193struct kexec_entry64_regs {
194 uint64_t rax;
27f48d3e
VG
195 uint64_t rcx;
196 uint64_t rdx;
dd5f7260 197 uint64_t rbx;
27f48d3e
VG
198 uint64_t rsp;
199 uint64_t rbp;
dd5f7260
VG
200 uint64_t rsi;
201 uint64_t rdi;
27f48d3e
VG
202 uint64_t r8;
203 uint64_t r9;
204 uint64_t r10;
205 uint64_t r11;
206 uint64_t r12;
207 uint64_t r13;
208 uint64_t r14;
209 uint64_t r15;
210 uint64_t rip;
211};
bba4ed01
TL
212
213extern int arch_kexec_post_alloc_pages(void *vaddr, unsigned int pages,
214 gfp_t gfp);
215#define arch_kexec_post_alloc_pages arch_kexec_post_alloc_pages
216
217extern void arch_kexec_pre_free_pages(void *vaddr, unsigned int pages);
218#define arch_kexec_pre_free_pages arch_kexec_pre_free_pages
219
92be3d6b
HY
220#endif
221
0ca0d818
ZY
222typedef void crash_vmclear_fn(void);
223extern crash_vmclear_fn __rcu *crash_vmclear_loaded_vmcss;
0ee59413 224extern void kdump_nmi_shootdown_cpus(void);
f23d1f4a 225
3c233d13
HH
226#endif /* __ASSEMBLY__ */
227
1965aae3 228#endif /* _ASM_X86_KEXEC_H */