Commit | Line | Data |
---|---|---|
40b0b3f8 | 1 | // SPDX-License-Identifier: GPL-2.0-only |
dc009d92 | 2 | /* |
2965faa5 | 3 | * kexec.c - kexec_load system call |
dc009d92 | 4 | * Copyright (C) 2002-2004 Eric Biederman <ebiederm@xmission.com> |
dc009d92 EB |
5 | */ |
6 | ||
de90a6bc MH |
7 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
8 | ||
c59ede7b | 9 | #include <linux/capability.h> |
dc009d92 EB |
10 | #include <linux/mm.h> |
11 | #include <linux/file.h> | |
a210fd32 | 12 | #include <linux/security.h> |
dc009d92 | 13 | #include <linux/kexec.h> |
8c5a1cf0 | 14 | #include <linux/mutex.h> |
dc009d92 | 15 | #include <linux/list.h> |
dc009d92 | 16 | #include <linux/syscalls.h> |
a43cac0d | 17 | #include <linux/vmalloc.h> |
2965faa5 | 18 | #include <linux/slab.h> |
dc009d92 | 19 | |
a43cac0d DY |
20 | #include "kexec_internal.h" |
21 | ||
255aedd9 VG |
22 | static int kimage_alloc_init(struct kimage **rimage, unsigned long entry, |
23 | unsigned long nr_segments, | |
5d700a0f | 24 | struct kexec_segment *segments, |
255aedd9 | 25 | unsigned long flags) |
dc009d92 | 26 | { |
255aedd9 | 27 | int ret; |
dc009d92 | 28 | struct kimage *image; |
255aedd9 VG |
29 | bool kexec_on_panic = flags & KEXEC_ON_CRASH; |
30 | ||
02aff848 | 31 | #ifdef CONFIG_CRASH_DUMP |
255aedd9 VG |
32 | if (kexec_on_panic) { |
33 | /* Verify we have a valid entry point */ | |
43546d86 RK |
34 | if ((entry < phys_to_boot_phys(crashk_res.start)) || |
35 | (entry > phys_to_boot_phys(crashk_res.end))) | |
255aedd9 VG |
36 | return -EADDRNOTAVAIL; |
37 | } | |
02aff848 | 38 | #endif |
dc009d92 EB |
39 | |
40 | /* Allocate and initialize a controlling structure */ | |
dabe7862 VG |
41 | image = do_kimage_alloc_init(); |
42 | if (!image) | |
43 | return -ENOMEM; | |
44 | ||
45 | image->start = entry; | |
5d700a0f AB |
46 | image->nr_segments = nr_segments; |
47 | memcpy(image->segment, segments, nr_segments * sizeof(*segments)); | |
dabe7862 | 48 | |
02aff848 | 49 | #ifdef CONFIG_CRASH_DUMP |
255aedd9 | 50 | if (kexec_on_panic) { |
cdf4b3fa | 51 | /* Enable special crash kernel control page alloc policy. */ |
255aedd9 VG |
52 | image->control_page = crashk_res.start; |
53 | image->type = KEXEC_TYPE_CRASH; | |
54 | } | |
02aff848 | 55 | #endif |
255aedd9 | 56 | |
cdf4b3fa XP |
57 | ret = sanity_check_segment_list(image); |
58 | if (ret) | |
59 | goto out_free_image; | |
60 | ||
dc009d92 EB |
61 | /* |
62 | * Find a location for the control code buffer, and add it | |
63 | * the vector of segments so that it's pages will also be | |
64 | * counted as destination pages. | |
65 | */ | |
255aedd9 | 66 | ret = -ENOMEM; |
dc009d92 | 67 | image->control_code_page = kimage_alloc_control_pages(image, |
163f6876 | 68 | get_order(KEXEC_CONTROL_PAGE_SIZE)); |
dc009d92 | 69 | if (!image->control_code_page) { |
e1bebcf4 | 70 | pr_err("Could not allocate control_code_buffer\n"); |
dabe7862 | 71 | goto out_free_image; |
dc009d92 EB |
72 | } |
73 | ||
255aedd9 VG |
74 | if (!kexec_on_panic) { |
75 | image->swap_page = kimage_alloc_control_pages(image, 0); | |
76 | if (!image->swap_page) { | |
77 | pr_err("Could not allocate swap buffer\n"); | |
78 | goto out_free_control_pages; | |
79 | } | |
3ab83521 HY |
80 | } |
81 | ||
b92e7e0d ZY |
82 | *rimage = image; |
83 | return 0; | |
dabe7862 | 84 | out_free_control_pages: |
b92e7e0d | 85 | kimage_free_page_list(&image->control_pages); |
dabe7862 | 86 | out_free_image: |
b92e7e0d | 87 | kfree(image); |
255aedd9 | 88 | return ret; |
dc009d92 EB |
89 | } |
90 | ||
0eea0867 | 91 | static int do_kexec_load(unsigned long entry, unsigned long nr_segments, |
5d700a0f | 92 | struct kexec_segment *segments, unsigned long flags) |
0eea0867 MH |
93 | { |
94 | struct kimage **dest_image, *image; | |
95 | unsigned long i; | |
96 | int ret; | |
97 | ||
4b692e86 AB |
98 | /* |
99 | * Because we write directly to the reserved memory region when loading | |
05c62574 VS |
100 | * crash kernels we need a serialization here to prevent multiple crash |
101 | * kernels from attempting to load simultaneously. | |
4b692e86 | 102 | */ |
05c62574 | 103 | if (!kexec_trylock()) |
4b692e86 AB |
104 | return -EBUSY; |
105 | ||
02aff848 | 106 | #ifdef CONFIG_CRASH_DUMP |
0eea0867 MH |
107 | if (flags & KEXEC_ON_CRASH) { |
108 | dest_image = &kexec_crash_image; | |
109 | if (kexec_crash_image) | |
110 | arch_kexec_unprotect_crashkres(); | |
02aff848 BH |
111 | } else |
112 | #endif | |
0eea0867 | 113 | dest_image = &kexec_image; |
0eea0867 MH |
114 | |
115 | if (nr_segments == 0) { | |
116 | /* Uninstall image */ | |
117 | kimage_free(xchg(dest_image, NULL)); | |
4b692e86 AB |
118 | ret = 0; |
119 | goto out_unlock; | |
0eea0867 MH |
120 | } |
121 | if (flags & KEXEC_ON_CRASH) { | |
122 | /* | |
123 | * Loading another kernel to switch to if this one | |
124 | * crashes. Free any current crash dump kernel before | |
125 | * we corrupt it. | |
126 | */ | |
127 | kimage_free(xchg(&kexec_crash_image, NULL)); | |
128 | } | |
129 | ||
130 | ret = kimage_alloc_init(&image, entry, nr_segments, segments, flags); | |
131 | if (ret) | |
4b692e86 | 132 | goto out_unlock; |
0eea0867 | 133 | |
0eea0867 MH |
134 | if (flags & KEXEC_PRESERVE_CONTEXT) |
135 | image->preserve_context = 1; | |
136 | ||
a72bbec7 ED |
137 | #ifdef CONFIG_CRASH_HOTPLUG |
138 | if (flags & KEXEC_UPDATE_ELFCOREHDR) | |
139 | image->update_elfcorehdr = 1; | |
140 | #endif | |
141 | ||
0eea0867 MH |
142 | ret = machine_kexec_prepare(image); |
143 | if (ret) | |
144 | goto out; | |
145 | ||
1229384f XP |
146 | /* |
147 | * Some architecture(like S390) may touch the crash memory before | |
148 | * machine_kexec_prepare(), we must copy vmcoreinfo data after it. | |
149 | */ | |
150 | ret = kimage_crash_copy_vmcoreinfo(image); | |
151 | if (ret) | |
152 | goto out; | |
153 | ||
0eea0867 MH |
154 | for (i = 0; i < nr_segments; i++) { |
155 | ret = kimage_load_segment(image, &image->segment[i]); | |
156 | if (ret) | |
157 | goto out; | |
158 | } | |
159 | ||
160 | kimage_terminate(image); | |
161 | ||
de68e4da PT |
162 | ret = machine_kexec_post_load(image); |
163 | if (ret) | |
164 | goto out; | |
165 | ||
0eea0867 MH |
166 | /* Install the new kernel and uninstall the old */ |
167 | image = xchg(dest_image, image); | |
168 | ||
169 | out: | |
02aff848 | 170 | #ifdef CONFIG_CRASH_DUMP |
0eea0867 MH |
171 | if ((flags & KEXEC_ON_CRASH) && kexec_crash_image) |
172 | arch_kexec_protect_crashkres(); | |
02aff848 | 173 | #endif |
0eea0867 | 174 | |
0eea0867 | 175 | kimage_free(image); |
4b692e86 | 176 | out_unlock: |
05c62574 | 177 | kexec_unlock(); |
0eea0867 MH |
178 | return ret; |
179 | } | |
180 | ||
dc009d92 EB |
181 | /* |
182 | * Exec Kernel system call: for obvious reasons only root may call it. | |
183 | * | |
184 | * This call breaks up into three pieces. | |
185 | * - A generic part which loads the new kernel from the current | |
186 | * address space, and very carefully places the data in the | |
187 | * allocated pages. | |
188 | * | |
189 | * - A generic part that interacts with the kernel and tells all of | |
190 | * the devices to shut down. Preventing on-going dmas, and placing | |
191 | * the devices in a consistent state so a later kernel can | |
192 | * reinitialize them. | |
193 | * | |
194 | * - A machine specific part that includes the syscall number | |
002ace78 | 195 | * and then copies the image to it's final destination. And |
dc009d92 EB |
196 | * jumps into the image at entry. |
197 | * | |
198 | * kexec does not sync, or unmount filesystems so if you need | |
199 | * that to happen you need to do that yourself. | |
200 | */ | |
8c5a1cf0 | 201 | |
6b27aef0 DB |
202 | static inline int kexec_load_check(unsigned long nr_segments, |
203 | unsigned long flags) | |
dc009d92 | 204 | { |
a42aaad2 RR |
205 | int image_type = (flags & KEXEC_ON_CRASH) ? |
206 | KEXEC_TYPE_CRASH : KEXEC_TYPE_DEFAULT; | |
a210fd32 MZ |
207 | int result; |
208 | ||
dc009d92 | 209 | /* We only trust the superuser with rebooting the system. */ |
a42aaad2 | 210 | if (!kexec_load_permitted(image_type)) |
dc009d92 EB |
211 | return -EPERM; |
212 | ||
a210fd32 | 213 | /* Permit LSMs and IMA to fail the kexec */ |
b64fcae7 | 214 | result = security_kernel_load_data(LOADING_KEXEC_IMAGE, false); |
a210fd32 MZ |
215 | if (result < 0) |
216 | return result; | |
217 | ||
7d31f460 MG |
218 | /* |
219 | * kexec can be used to circumvent module loading restrictions, so | |
220 | * prevent loading in that case | |
221 | */ | |
222 | result = security_locked_down(LOCKDOWN_KEXEC); | |
223 | if (result) | |
224 | return result; | |
225 | ||
dc009d92 EB |
226 | /* |
227 | * Verify we have a legal set of flags | |
228 | * This leaves us room for future extensions. | |
229 | */ | |
230 | if ((flags & KEXEC_FLAGS) != (flags & ~KEXEC_ARCH_MASK)) | |
231 | return -EINVAL; | |
232 | ||
dc009d92 EB |
233 | /* Put an artificial cap on the number |
234 | * of segments passed to kexec_load. | |
235 | */ | |
236 | if (nr_segments > KEXEC_SEGMENT_MAX) | |
237 | return -EINVAL; | |
238 | ||
6b27aef0 DB |
239 | return 0; |
240 | } | |
241 | ||
242 | SYSCALL_DEFINE4(kexec_load, unsigned long, entry, unsigned long, nr_segments, | |
243 | struct kexec_segment __user *, segments, unsigned long, flags) | |
244 | { | |
5d700a0f AB |
245 | struct kexec_segment *ksegments; |
246 | unsigned long result; | |
6b27aef0 DB |
247 | |
248 | result = kexec_load_check(nr_segments, flags); | |
249 | if (result) | |
250 | return result; | |
251 | ||
252 | /* Verify we are on the appropriate architecture */ | |
253 | if (((flags & KEXEC_ARCH_MASK) != KEXEC_ARCH) && | |
254 | ((flags & KEXEC_ARCH_MASK) != KEXEC_ARCH_DEFAULT)) | |
255 | return -EINVAL; | |
256 | ||
569c8d82 | 257 | ksegments = memdup_array_user(segments, nr_segments, sizeof(ksegments[0])); |
5d700a0f AB |
258 | if (IS_ERR(ksegments)) |
259 | return PTR_ERR(ksegments); | |
260 | ||
261 | result = do_kexec_load(entry, nr_segments, ksegments, flags); | |
262 | kfree(ksegments); | |
dc009d92 | 263 | |
dc009d92 EB |
264 | return result; |
265 | } | |
266 | ||
267 | #ifdef CONFIG_COMPAT | |
ca2c405a HC |
268 | COMPAT_SYSCALL_DEFINE4(kexec_load, compat_ulong_t, entry, |
269 | compat_ulong_t, nr_segments, | |
270 | struct compat_kexec_segment __user *, segments, | |
271 | compat_ulong_t, flags) | |
dc009d92 EB |
272 | { |
273 | struct compat_kexec_segment in; | |
5d700a0f | 274 | struct kexec_segment *ksegments; |
dc009d92 EB |
275 | unsigned long i, result; |
276 | ||
6b27aef0 DB |
277 | result = kexec_load_check(nr_segments, flags); |
278 | if (result) | |
279 | return result; | |
280 | ||
dc009d92 EB |
281 | /* Don't allow clients that don't understand the native |
282 | * architecture to do anything. | |
283 | */ | |
72414d3f | 284 | if ((flags & KEXEC_ARCH_MASK) == KEXEC_ARCH_DEFAULT) |
dc009d92 | 285 | return -EINVAL; |
dc009d92 | 286 | |
5d700a0f AB |
287 | ksegments = kmalloc_array(nr_segments, sizeof(ksegments[0]), |
288 | GFP_KERNEL); | |
289 | if (!ksegments) | |
290 | return -ENOMEM; | |
291 | ||
e1bebcf4 | 292 | for (i = 0; i < nr_segments; i++) { |
dc009d92 | 293 | result = copy_from_user(&in, &segments[i], sizeof(in)); |
72414d3f | 294 | if (result) |
5d700a0f | 295 | goto fail; |
dc009d92 | 296 | |
5d700a0f AB |
297 | ksegments[i].buf = compat_ptr(in.buf); |
298 | ksegments[i].bufsz = in.bufsz; | |
299 | ksegments[i].mem = in.mem; | |
300 | ksegments[i].memsz = in.memsz; | |
dc009d92 EB |
301 | } |
302 | ||
6b27aef0 DB |
303 | result = do_kexec_load(entry, nr_segments, ksegments, flags); |
304 | ||
5d700a0f AB |
305 | fail: |
306 | kfree(ksegments); | |
6b27aef0 | 307 | return result; |
dc009d92 EB |
308 | } |
309 | #endif |