kexec: call do_kexec_load() in compat syscall directly
[linux-2.6-block.git] / kernel / kexec.c
CommitLineData
dc009d92 1/*
2965faa5 2 * kexec.c - kexec_load system call
dc009d92
EB
3 * Copyright (C) 2002-2004 Eric Biederman <ebiederm@xmission.com>
4 *
5 * This source code is licensed under the GNU General Public License,
6 * Version 2. See the file COPYING for more details.
7 */
8
de90a6bc
MH
9#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10
c59ede7b 11#include <linux/capability.h>
dc009d92
EB
12#include <linux/mm.h>
13#include <linux/file.h>
dc009d92 14#include <linux/kexec.h>
8c5a1cf0 15#include <linux/mutex.h>
dc009d92 16#include <linux/list.h>
dc009d92 17#include <linux/syscalls.h>
a43cac0d 18#include <linux/vmalloc.h>
2965faa5 19#include <linux/slab.h>
dc009d92 20
a43cac0d
DY
21#include "kexec_internal.h"
22
dabe7862
VG
23static int copy_user_segment_list(struct kimage *image,
24 unsigned long nr_segments,
25 struct kexec_segment __user *segments)
dc009d92 26{
dabe7862 27 int ret;
dc009d92 28 size_t segment_bytes;
dc009d92
EB
29
30 /* Read in the segments */
31 image->nr_segments = nr_segments;
32 segment_bytes = nr_segments * sizeof(*segments);
dabe7862
VG
33 ret = copy_from_user(image->segment, segments, segment_bytes);
34 if (ret)
35 ret = -EFAULT;
36
37 return ret;
38}
39
255aedd9
VG
40static int kimage_alloc_init(struct kimage **rimage, unsigned long entry,
41 unsigned long nr_segments,
42 struct kexec_segment __user *segments,
43 unsigned long flags)
dc009d92 44{
255aedd9 45 int ret;
dc009d92 46 struct kimage *image;
255aedd9
VG
47 bool kexec_on_panic = flags & KEXEC_ON_CRASH;
48
49 if (kexec_on_panic) {
50 /* Verify we have a valid entry point */
43546d86
RK
51 if ((entry < phys_to_boot_phys(crashk_res.start)) ||
52 (entry > phys_to_boot_phys(crashk_res.end)))
255aedd9
VG
53 return -EADDRNOTAVAIL;
54 }
dc009d92
EB
55
56 /* Allocate and initialize a controlling structure */
dabe7862
VG
57 image = do_kimage_alloc_init();
58 if (!image)
59 return -ENOMEM;
60
61 image->start = entry;
62
255aedd9
VG
63 ret = copy_user_segment_list(image, nr_segments, segments);
64 if (ret)
dabe7862
VG
65 goto out_free_image;
66
255aedd9 67 if (kexec_on_panic) {
cdf4b3fa 68 /* Enable special crash kernel control page alloc policy. */
255aedd9
VG
69 image->control_page = crashk_res.start;
70 image->type = KEXEC_TYPE_CRASH;
71 }
72
cdf4b3fa
XP
73 ret = sanity_check_segment_list(image);
74 if (ret)
75 goto out_free_image;
76
dc009d92
EB
77 /*
78 * Find a location for the control code buffer, and add it
79 * the vector of segments so that it's pages will also be
80 * counted as destination pages.
81 */
255aedd9 82 ret = -ENOMEM;
dc009d92 83 image->control_code_page = kimage_alloc_control_pages(image,
163f6876 84 get_order(KEXEC_CONTROL_PAGE_SIZE));
dc009d92 85 if (!image->control_code_page) {
e1bebcf4 86 pr_err("Could not allocate control_code_buffer\n");
dabe7862 87 goto out_free_image;
dc009d92
EB
88 }
89
255aedd9
VG
90 if (!kexec_on_panic) {
91 image->swap_page = kimage_alloc_control_pages(image, 0);
92 if (!image->swap_page) {
93 pr_err("Could not allocate swap buffer\n");
94 goto out_free_control_pages;
95 }
3ab83521
HY
96 }
97
b92e7e0d
ZY
98 *rimage = image;
99 return 0;
dabe7862 100out_free_control_pages:
b92e7e0d 101 kimage_free_page_list(&image->control_pages);
dabe7862 102out_free_image:
b92e7e0d 103 kfree(image);
255aedd9 104 return ret;
dc009d92
EB
105}
106
0eea0867
MH
107static int do_kexec_load(unsigned long entry, unsigned long nr_segments,
108 struct kexec_segment __user *segments, unsigned long flags)
109{
110 struct kimage **dest_image, *image;
111 unsigned long i;
112 int ret;
113
114 if (flags & KEXEC_ON_CRASH) {
115 dest_image = &kexec_crash_image;
116 if (kexec_crash_image)
117 arch_kexec_unprotect_crashkres();
118 } else {
119 dest_image = &kexec_image;
120 }
121
122 if (nr_segments == 0) {
123 /* Uninstall image */
124 kimage_free(xchg(dest_image, NULL));
125 return 0;
126 }
127 if (flags & KEXEC_ON_CRASH) {
128 /*
129 * Loading another kernel to switch to if this one
130 * crashes. Free any current crash dump kernel before
131 * we corrupt it.
132 */
133 kimage_free(xchg(&kexec_crash_image, NULL));
134 }
135
136 ret = kimage_alloc_init(&image, entry, nr_segments, segments, flags);
137 if (ret)
138 return ret;
139
0eea0867
MH
140 if (flags & KEXEC_PRESERVE_CONTEXT)
141 image->preserve_context = 1;
142
143 ret = machine_kexec_prepare(image);
144 if (ret)
145 goto out;
146
1229384f
XP
147 /*
148 * Some architecture(like S390) may touch the crash memory before
149 * machine_kexec_prepare(), we must copy vmcoreinfo data after it.
150 */
151 ret = kimage_crash_copy_vmcoreinfo(image);
152 if (ret)
153 goto out;
154
0eea0867
MH
155 for (i = 0; i < nr_segments; i++) {
156 ret = kimage_load_segment(image, &image->segment[i]);
157 if (ret)
158 goto out;
159 }
160
161 kimage_terminate(image);
162
163 /* Install the new kernel and uninstall the old */
164 image = xchg(dest_image, image);
165
166out:
167 if ((flags & KEXEC_ON_CRASH) && kexec_crash_image)
168 arch_kexec_protect_crashkres();
169
0eea0867
MH
170 kimage_free(image);
171 return ret;
172}
173
dc009d92
EB
174/*
175 * Exec Kernel system call: for obvious reasons only root may call it.
176 *
177 * This call breaks up into three pieces.
178 * - A generic part which loads the new kernel from the current
179 * address space, and very carefully places the data in the
180 * allocated pages.
181 *
182 * - A generic part that interacts with the kernel and tells all of
183 * the devices to shut down. Preventing on-going dmas, and placing
184 * the devices in a consistent state so a later kernel can
185 * reinitialize them.
186 *
187 * - A machine specific part that includes the syscall number
002ace78 188 * and then copies the image to it's final destination. And
dc009d92
EB
189 * jumps into the image at entry.
190 *
191 * kexec does not sync, or unmount filesystems so if you need
192 * that to happen you need to do that yourself.
193 */
8c5a1cf0 194
6b27aef0
DB
195static inline int kexec_load_check(unsigned long nr_segments,
196 unsigned long flags)
dc009d92 197{
dc009d92 198 /* We only trust the superuser with rebooting the system. */
7984754b 199 if (!capable(CAP_SYS_BOOT) || kexec_load_disabled)
dc009d92
EB
200 return -EPERM;
201
202 /*
203 * Verify we have a legal set of flags
204 * This leaves us room for future extensions.
205 */
206 if ((flags & KEXEC_FLAGS) != (flags & ~KEXEC_ARCH_MASK))
207 return -EINVAL;
208
dc009d92
EB
209 /* Put an artificial cap on the number
210 * of segments passed to kexec_load.
211 */
212 if (nr_segments > KEXEC_SEGMENT_MAX)
213 return -EINVAL;
214
6b27aef0
DB
215 return 0;
216}
217
218SYSCALL_DEFINE4(kexec_load, unsigned long, entry, unsigned long, nr_segments,
219 struct kexec_segment __user *, segments, unsigned long, flags)
220{
221 int result;
222
223 result = kexec_load_check(nr_segments, flags);
224 if (result)
225 return result;
226
227 /* Verify we are on the appropriate architecture */
228 if (((flags & KEXEC_ARCH_MASK) != KEXEC_ARCH) &&
229 ((flags & KEXEC_ARCH_MASK) != KEXEC_ARCH_DEFAULT))
230 return -EINVAL;
231
dc009d92
EB
232 /* Because we write directly to the reserved memory
233 * region when loading crash kernels we need a mutex here to
234 * prevent multiple crash kernels from attempting to load
235 * simultaneously, and to prevent a crash kernel from loading
236 * over the top of a in use crash kernel.
237 *
238 * KISS: always take the mutex.
239 */
8c5a1cf0 240 if (!mutex_trylock(&kexec_mutex))
dc009d92 241 return -EBUSY;
72414d3f 242
0eea0867 243 result = do_kexec_load(entry, nr_segments, segments, flags);
dc009d92 244
8c5a1cf0 245 mutex_unlock(&kexec_mutex);
72414d3f 246
dc009d92
EB
247 return result;
248}
249
250#ifdef CONFIG_COMPAT
ca2c405a
HC
251COMPAT_SYSCALL_DEFINE4(kexec_load, compat_ulong_t, entry,
252 compat_ulong_t, nr_segments,
253 struct compat_kexec_segment __user *, segments,
254 compat_ulong_t, flags)
dc009d92
EB
255{
256 struct compat_kexec_segment in;
257 struct kexec_segment out, __user *ksegments;
258 unsigned long i, result;
259
6b27aef0
DB
260 result = kexec_load_check(nr_segments, flags);
261 if (result)
262 return result;
263
dc009d92
EB
264 /* Don't allow clients that don't understand the native
265 * architecture to do anything.
266 */
72414d3f 267 if ((flags & KEXEC_ARCH_MASK) == KEXEC_ARCH_DEFAULT)
dc009d92 268 return -EINVAL;
dc009d92 269
dc009d92 270 ksegments = compat_alloc_user_space(nr_segments * sizeof(out));
e1bebcf4 271 for (i = 0; i < nr_segments; i++) {
dc009d92 272 result = copy_from_user(&in, &segments[i], sizeof(in));
72414d3f 273 if (result)
dc009d92 274 return -EFAULT;
dc009d92
EB
275
276 out.buf = compat_ptr(in.buf);
277 out.bufsz = in.bufsz;
278 out.mem = in.mem;
279 out.memsz = in.memsz;
280
281 result = copy_to_user(&ksegments[i], &out, sizeof(out));
72414d3f 282 if (result)
dc009d92 283 return -EFAULT;
dc009d92
EB
284 }
285
6b27aef0
DB
286 /* Because we write directly to the reserved memory
287 * region when loading crash kernels we need a mutex here to
288 * prevent multiple crash kernels from attempting to load
289 * simultaneously, and to prevent a crash kernel from loading
290 * over the top of a in use crash kernel.
291 *
292 * KISS: always take the mutex.
293 */
294 if (!mutex_trylock(&kexec_mutex))
295 return -EBUSY;
296
297 result = do_kexec_load(entry, nr_segments, ksegments, flags);
298
299 mutex_unlock(&kexec_mutex);
300
301 return result;
dc009d92
EB
302}
303#endif