Commit | Line | Data |
---|---|---|
dc009d92 EB |
1 | /* |
2 | * kexec.c - kexec system call | |
3 | * Copyright (C) 2002-2004 Eric Biederman <ebiederm@xmission.com> | |
4 | * | |
5 | * This source code is licensed under the GNU General Public License, | |
6 | * Version 2. See the file COPYING for more details. | |
7 | */ | |
8 | ||
c59ede7b | 9 | #include <linux/capability.h> |
dc009d92 EB |
10 | #include <linux/mm.h> |
11 | #include <linux/file.h> | |
12 | #include <linux/slab.h> | |
13 | #include <linux/fs.h> | |
14 | #include <linux/kexec.h> | |
15 | #include <linux/spinlock.h> | |
16 | #include <linux/list.h> | |
17 | #include <linux/highmem.h> | |
18 | #include <linux/syscalls.h> | |
19 | #include <linux/reboot.h> | |
dc009d92 | 20 | #include <linux/ioport.h> |
6e274d14 | 21 | #include <linux/hardirq.h> |
85916f81 MD |
22 | #include <linux/elf.h> |
23 | #include <linux/elfcore.h> | |
fd59d231 KO |
24 | #include <linux/utsrelease.h> |
25 | #include <linux/utsname.h> | |
26 | #include <linux/numa.h> | |
3ab83521 HY |
27 | #include <linux/suspend.h> |
28 | #include <linux/device.h> | |
6e274d14 | 29 | |
dc009d92 EB |
30 | #include <asm/page.h> |
31 | #include <asm/uaccess.h> | |
32 | #include <asm/io.h> | |
33 | #include <asm/system.h> | |
fd59d231 | 34 | #include <asm/sections.h> |
dc009d92 | 35 | |
cc571658 VG |
36 | /* Per cpu memory for storing cpu states in case of system crash. */ |
37 | note_buf_t* crash_notes; | |
38 | ||
fd59d231 KO |
39 | /* vmcoreinfo stuff */ |
40 | unsigned char vmcoreinfo_data[VMCOREINFO_BYTES]; | |
41 | u32 vmcoreinfo_note[VMCOREINFO_NOTE_SIZE/4]; | |
d768281e KO |
42 | size_t vmcoreinfo_size; |
43 | size_t vmcoreinfo_max_size = sizeof(vmcoreinfo_data); | |
fd59d231 | 44 | |
dc009d92 EB |
45 | /* Location of the reserved area for the crash kernel */ |
46 | struct resource crashk_res = { | |
47 | .name = "Crash kernel", | |
48 | .start = 0, | |
49 | .end = 0, | |
50 | .flags = IORESOURCE_BUSY | IORESOURCE_MEM | |
51 | }; | |
52 | ||
6e274d14 AN |
53 | int kexec_should_crash(struct task_struct *p) |
54 | { | |
b460cbc5 | 55 | if (in_interrupt() || !p->pid || is_global_init(p) || panic_on_oops) |
6e274d14 AN |
56 | return 1; |
57 | return 0; | |
58 | } | |
59 | ||
dc009d92 EB |
60 | /* |
61 | * When kexec transitions to the new kernel there is a one-to-one | |
62 | * mapping between physical and virtual addresses. On processors | |
63 | * where you can disable the MMU this is trivial, and easy. For | |
64 | * others it is still a simple predictable page table to setup. | |
65 | * | |
66 | * In that environment kexec copies the new kernel to its final | |
67 | * resting place. This means I can only support memory whose | |
68 | * physical address can fit in an unsigned long. In particular | |
69 | * addresses where (pfn << PAGE_SHIFT) > ULONG_MAX cannot be handled. | |
70 | * If the assembly stub has more restrictive requirements | |
71 | * KEXEC_SOURCE_MEMORY_LIMIT and KEXEC_DEST_MEMORY_LIMIT can be | |
72 | * defined more restrictively in <asm/kexec.h>. | |
73 | * | |
74 | * The code for the transition from the current kernel to the | |
75 | * the new kernel is placed in the control_code_buffer, whose size | |
76 | * is given by KEXEC_CONTROL_CODE_SIZE. In the best case only a single | |
77 | * page of memory is necessary, but some architectures require more. | |
78 | * Because this memory must be identity mapped in the transition from | |
79 | * virtual to physical addresses it must live in the range | |
80 | * 0 - TASK_SIZE, as only the user space mappings are arbitrarily | |
81 | * modifiable. | |
82 | * | |
83 | * The assembly stub in the control code buffer is passed a linked list | |
84 | * of descriptor pages detailing the source pages of the new kernel, | |
85 | * and the destination addresses of those source pages. As this data | |
86 | * structure is not used in the context of the current OS, it must | |
87 | * be self-contained. | |
88 | * | |
89 | * The code has been made to work with highmem pages and will use a | |
90 | * destination page in its final resting place (if it happens | |
91 | * to allocate it). The end product of this is that most of the | |
92 | * physical address space, and most of RAM can be used. | |
93 | * | |
94 | * Future directions include: | |
95 | * - allocating a page table with the control code buffer identity | |
96 | * mapped, to simplify machine_kexec and make kexec_on_panic more | |
97 | * reliable. | |
98 | */ | |
99 | ||
100 | /* | |
101 | * KIMAGE_NO_DEST is an impossible destination address..., for | |
102 | * allocating pages whose destination address we do not care about. | |
103 | */ | |
104 | #define KIMAGE_NO_DEST (-1UL) | |
105 | ||
72414d3f MS |
106 | static int kimage_is_destination_range(struct kimage *image, |
107 | unsigned long start, unsigned long end); | |
108 | static struct page *kimage_alloc_page(struct kimage *image, | |
9796fdd8 | 109 | gfp_t gfp_mask, |
72414d3f | 110 | unsigned long dest); |
dc009d92 EB |
111 | |
112 | static int do_kimage_alloc(struct kimage **rimage, unsigned long entry, | |
72414d3f MS |
113 | unsigned long nr_segments, |
114 | struct kexec_segment __user *segments) | |
dc009d92 EB |
115 | { |
116 | size_t segment_bytes; | |
117 | struct kimage *image; | |
118 | unsigned long i; | |
119 | int result; | |
120 | ||
121 | /* Allocate a controlling structure */ | |
122 | result = -ENOMEM; | |
4668edc3 | 123 | image = kzalloc(sizeof(*image), GFP_KERNEL); |
72414d3f | 124 | if (!image) |
dc009d92 | 125 | goto out; |
72414d3f | 126 | |
dc009d92 EB |
127 | image->head = 0; |
128 | image->entry = &image->head; | |
129 | image->last_entry = &image->head; | |
130 | image->control_page = ~0; /* By default this does not apply */ | |
131 | image->start = entry; | |
132 | image->type = KEXEC_TYPE_DEFAULT; | |
133 | ||
134 | /* Initialize the list of control pages */ | |
135 | INIT_LIST_HEAD(&image->control_pages); | |
136 | ||
137 | /* Initialize the list of destination pages */ | |
138 | INIT_LIST_HEAD(&image->dest_pages); | |
139 | ||
140 | /* Initialize the list of unuseable pages */ | |
141 | INIT_LIST_HEAD(&image->unuseable_pages); | |
142 | ||
143 | /* Read in the segments */ | |
144 | image->nr_segments = nr_segments; | |
145 | segment_bytes = nr_segments * sizeof(*segments); | |
146 | result = copy_from_user(image->segment, segments, segment_bytes); | |
147 | if (result) | |
148 | goto out; | |
149 | ||
150 | /* | |
151 | * Verify we have good destination addresses. The caller is | |
152 | * responsible for making certain we don't attempt to load | |
153 | * the new image into invalid or reserved areas of RAM. This | |
154 | * just verifies it is an address we can use. | |
155 | * | |
156 | * Since the kernel does everything in page size chunks ensure | |
157 | * the destination addreses are page aligned. Too many | |
158 | * special cases crop of when we don't do this. The most | |
159 | * insidious is getting overlapping destination addresses | |
160 | * simply because addresses are changed to page size | |
161 | * granularity. | |
162 | */ | |
163 | result = -EADDRNOTAVAIL; | |
164 | for (i = 0; i < nr_segments; i++) { | |
165 | unsigned long mstart, mend; | |
72414d3f | 166 | |
dc009d92 EB |
167 | mstart = image->segment[i].mem; |
168 | mend = mstart + image->segment[i].memsz; | |
169 | if ((mstart & ~PAGE_MASK) || (mend & ~PAGE_MASK)) | |
170 | goto out; | |
171 | if (mend >= KEXEC_DESTINATION_MEMORY_LIMIT) | |
172 | goto out; | |
173 | } | |
174 | ||
175 | /* Verify our destination addresses do not overlap. | |
176 | * If we alloed overlapping destination addresses | |
177 | * through very weird things can happen with no | |
178 | * easy explanation as one segment stops on another. | |
179 | */ | |
180 | result = -EINVAL; | |
72414d3f | 181 | for (i = 0; i < nr_segments; i++) { |
dc009d92 EB |
182 | unsigned long mstart, mend; |
183 | unsigned long j; | |
72414d3f | 184 | |
dc009d92 EB |
185 | mstart = image->segment[i].mem; |
186 | mend = mstart + image->segment[i].memsz; | |
72414d3f | 187 | for (j = 0; j < i; j++) { |
dc009d92 EB |
188 | unsigned long pstart, pend; |
189 | pstart = image->segment[j].mem; | |
190 | pend = pstart + image->segment[j].memsz; | |
191 | /* Do the segments overlap ? */ | |
192 | if ((mend > pstart) && (mstart < pend)) | |
193 | goto out; | |
194 | } | |
195 | } | |
196 | ||
197 | /* Ensure our buffer sizes are strictly less than | |
198 | * our memory sizes. This should always be the case, | |
199 | * and it is easier to check up front than to be surprised | |
200 | * later on. | |
201 | */ | |
202 | result = -EINVAL; | |
72414d3f | 203 | for (i = 0; i < nr_segments; i++) { |
dc009d92 EB |
204 | if (image->segment[i].bufsz > image->segment[i].memsz) |
205 | goto out; | |
206 | } | |
207 | ||
dc009d92 | 208 | result = 0; |
72414d3f MS |
209 | out: |
210 | if (result == 0) | |
dc009d92 | 211 | *rimage = image; |
72414d3f | 212 | else |
dc009d92 | 213 | kfree(image); |
72414d3f | 214 | |
dc009d92 EB |
215 | return result; |
216 | ||
217 | } | |
218 | ||
219 | static int kimage_normal_alloc(struct kimage **rimage, unsigned long entry, | |
72414d3f MS |
220 | unsigned long nr_segments, |
221 | struct kexec_segment __user *segments) | |
dc009d92 EB |
222 | { |
223 | int result; | |
224 | struct kimage *image; | |
225 | ||
226 | /* Allocate and initialize a controlling structure */ | |
227 | image = NULL; | |
228 | result = do_kimage_alloc(&image, entry, nr_segments, segments); | |
72414d3f | 229 | if (result) |
dc009d92 | 230 | goto out; |
72414d3f | 231 | |
dc009d92 EB |
232 | *rimage = image; |
233 | ||
234 | /* | |
235 | * Find a location for the control code buffer, and add it | |
236 | * the vector of segments so that it's pages will also be | |
237 | * counted as destination pages. | |
238 | */ | |
239 | result = -ENOMEM; | |
240 | image->control_code_page = kimage_alloc_control_pages(image, | |
72414d3f | 241 | get_order(KEXEC_CONTROL_CODE_SIZE)); |
dc009d92 EB |
242 | if (!image->control_code_page) { |
243 | printk(KERN_ERR "Could not allocate control_code_buffer\n"); | |
244 | goto out; | |
245 | } | |
246 | ||
3ab83521 HY |
247 | image->swap_page = kimage_alloc_control_pages(image, 0); |
248 | if (!image->swap_page) { | |
249 | printk(KERN_ERR "Could not allocate swap buffer\n"); | |
250 | goto out; | |
251 | } | |
252 | ||
dc009d92 EB |
253 | result = 0; |
254 | out: | |
72414d3f | 255 | if (result == 0) |
dc009d92 | 256 | *rimage = image; |
72414d3f | 257 | else |
dc009d92 | 258 | kfree(image); |
72414d3f | 259 | |
dc009d92 EB |
260 | return result; |
261 | } | |
262 | ||
263 | static int kimage_crash_alloc(struct kimage **rimage, unsigned long entry, | |
72414d3f | 264 | unsigned long nr_segments, |
314b6a4d | 265 | struct kexec_segment __user *segments) |
dc009d92 EB |
266 | { |
267 | int result; | |
268 | struct kimage *image; | |
269 | unsigned long i; | |
270 | ||
271 | image = NULL; | |
272 | /* Verify we have a valid entry point */ | |
273 | if ((entry < crashk_res.start) || (entry > crashk_res.end)) { | |
274 | result = -EADDRNOTAVAIL; | |
275 | goto out; | |
276 | } | |
277 | ||
278 | /* Allocate and initialize a controlling structure */ | |
279 | result = do_kimage_alloc(&image, entry, nr_segments, segments); | |
72414d3f | 280 | if (result) |
dc009d92 | 281 | goto out; |
dc009d92 EB |
282 | |
283 | /* Enable the special crash kernel control page | |
284 | * allocation policy. | |
285 | */ | |
286 | image->control_page = crashk_res.start; | |
287 | image->type = KEXEC_TYPE_CRASH; | |
288 | ||
289 | /* | |
290 | * Verify we have good destination addresses. Normally | |
291 | * the caller is responsible for making certain we don't | |
292 | * attempt to load the new image into invalid or reserved | |
293 | * areas of RAM. But crash kernels are preloaded into a | |
294 | * reserved area of ram. We must ensure the addresses | |
295 | * are in the reserved area otherwise preloading the | |
296 | * kernel could corrupt things. | |
297 | */ | |
298 | result = -EADDRNOTAVAIL; | |
299 | for (i = 0; i < nr_segments; i++) { | |
300 | unsigned long mstart, mend; | |
72414d3f | 301 | |
dc009d92 | 302 | mstart = image->segment[i].mem; |
50cccc69 | 303 | mend = mstart + image->segment[i].memsz - 1; |
dc009d92 EB |
304 | /* Ensure we are within the crash kernel limits */ |
305 | if ((mstart < crashk_res.start) || (mend > crashk_res.end)) | |
306 | goto out; | |
307 | } | |
308 | ||
dc009d92 EB |
309 | /* |
310 | * Find a location for the control code buffer, and add | |
311 | * the vector of segments so that it's pages will also be | |
312 | * counted as destination pages. | |
313 | */ | |
314 | result = -ENOMEM; | |
315 | image->control_code_page = kimage_alloc_control_pages(image, | |
72414d3f | 316 | get_order(KEXEC_CONTROL_CODE_SIZE)); |
dc009d92 EB |
317 | if (!image->control_code_page) { |
318 | printk(KERN_ERR "Could not allocate control_code_buffer\n"); | |
319 | goto out; | |
320 | } | |
321 | ||
322 | result = 0; | |
72414d3f MS |
323 | out: |
324 | if (result == 0) | |
dc009d92 | 325 | *rimage = image; |
72414d3f | 326 | else |
dc009d92 | 327 | kfree(image); |
72414d3f | 328 | |
dc009d92 EB |
329 | return result; |
330 | } | |
331 | ||
72414d3f MS |
332 | static int kimage_is_destination_range(struct kimage *image, |
333 | unsigned long start, | |
334 | unsigned long end) | |
dc009d92 EB |
335 | { |
336 | unsigned long i; | |
337 | ||
338 | for (i = 0; i < image->nr_segments; i++) { | |
339 | unsigned long mstart, mend; | |
72414d3f | 340 | |
dc009d92 | 341 | mstart = image->segment[i].mem; |
72414d3f MS |
342 | mend = mstart + image->segment[i].memsz; |
343 | if ((end > mstart) && (start < mend)) | |
dc009d92 | 344 | return 1; |
dc009d92 | 345 | } |
72414d3f | 346 | |
dc009d92 EB |
347 | return 0; |
348 | } | |
349 | ||
9796fdd8 | 350 | static struct page *kimage_alloc_pages(gfp_t gfp_mask, unsigned int order) |
dc009d92 EB |
351 | { |
352 | struct page *pages; | |
72414d3f | 353 | |
dc009d92 EB |
354 | pages = alloc_pages(gfp_mask, order); |
355 | if (pages) { | |
356 | unsigned int count, i; | |
357 | pages->mapping = NULL; | |
4c21e2f2 | 358 | set_page_private(pages, order); |
dc009d92 | 359 | count = 1 << order; |
72414d3f | 360 | for (i = 0; i < count; i++) |
dc009d92 | 361 | SetPageReserved(pages + i); |
dc009d92 | 362 | } |
72414d3f | 363 | |
dc009d92 EB |
364 | return pages; |
365 | } | |
366 | ||
367 | static void kimage_free_pages(struct page *page) | |
368 | { | |
369 | unsigned int order, count, i; | |
72414d3f | 370 | |
4c21e2f2 | 371 | order = page_private(page); |
dc009d92 | 372 | count = 1 << order; |
72414d3f | 373 | for (i = 0; i < count; i++) |
dc009d92 | 374 | ClearPageReserved(page + i); |
dc009d92 EB |
375 | __free_pages(page, order); |
376 | } | |
377 | ||
378 | static void kimage_free_page_list(struct list_head *list) | |
379 | { | |
380 | struct list_head *pos, *next; | |
72414d3f | 381 | |
dc009d92 EB |
382 | list_for_each_safe(pos, next, list) { |
383 | struct page *page; | |
384 | ||
385 | page = list_entry(pos, struct page, lru); | |
386 | list_del(&page->lru); | |
dc009d92 EB |
387 | kimage_free_pages(page); |
388 | } | |
389 | } | |
390 | ||
72414d3f MS |
391 | static struct page *kimage_alloc_normal_control_pages(struct kimage *image, |
392 | unsigned int order) | |
dc009d92 EB |
393 | { |
394 | /* Control pages are special, they are the intermediaries | |
395 | * that are needed while we copy the rest of the pages | |
396 | * to their final resting place. As such they must | |
397 | * not conflict with either the destination addresses | |
398 | * or memory the kernel is already using. | |
399 | * | |
400 | * The only case where we really need more than one of | |
401 | * these are for architectures where we cannot disable | |
402 | * the MMU and must instead generate an identity mapped | |
403 | * page table for all of the memory. | |
404 | * | |
405 | * At worst this runs in O(N) of the image size. | |
406 | */ | |
407 | struct list_head extra_pages; | |
408 | struct page *pages; | |
409 | unsigned int count; | |
410 | ||
411 | count = 1 << order; | |
412 | INIT_LIST_HEAD(&extra_pages); | |
413 | ||
414 | /* Loop while I can allocate a page and the page allocated | |
415 | * is a destination page. | |
416 | */ | |
417 | do { | |
418 | unsigned long pfn, epfn, addr, eaddr; | |
72414d3f | 419 | |
dc009d92 EB |
420 | pages = kimage_alloc_pages(GFP_KERNEL, order); |
421 | if (!pages) | |
422 | break; | |
423 | pfn = page_to_pfn(pages); | |
424 | epfn = pfn + count; | |
425 | addr = pfn << PAGE_SHIFT; | |
426 | eaddr = epfn << PAGE_SHIFT; | |
427 | if ((epfn >= (KEXEC_CONTROL_MEMORY_LIMIT >> PAGE_SHIFT)) || | |
72414d3f | 428 | kimage_is_destination_range(image, addr, eaddr)) { |
dc009d92 EB |
429 | list_add(&pages->lru, &extra_pages); |
430 | pages = NULL; | |
431 | } | |
72414d3f MS |
432 | } while (!pages); |
433 | ||
dc009d92 EB |
434 | if (pages) { |
435 | /* Remember the allocated page... */ | |
436 | list_add(&pages->lru, &image->control_pages); | |
437 | ||
438 | /* Because the page is already in it's destination | |
439 | * location we will never allocate another page at | |
440 | * that address. Therefore kimage_alloc_pages | |
441 | * will not return it (again) and we don't need | |
442 | * to give it an entry in image->segment[]. | |
443 | */ | |
444 | } | |
445 | /* Deal with the destination pages I have inadvertently allocated. | |
446 | * | |
447 | * Ideally I would convert multi-page allocations into single | |
448 | * page allocations, and add everyting to image->dest_pages. | |
449 | * | |
450 | * For now it is simpler to just free the pages. | |
451 | */ | |
452 | kimage_free_page_list(&extra_pages); | |
dc009d92 | 453 | |
72414d3f | 454 | return pages; |
dc009d92 EB |
455 | } |
456 | ||
72414d3f MS |
457 | static struct page *kimage_alloc_crash_control_pages(struct kimage *image, |
458 | unsigned int order) | |
dc009d92 EB |
459 | { |
460 | /* Control pages are special, they are the intermediaries | |
461 | * that are needed while we copy the rest of the pages | |
462 | * to their final resting place. As such they must | |
463 | * not conflict with either the destination addresses | |
464 | * or memory the kernel is already using. | |
465 | * | |
466 | * Control pages are also the only pags we must allocate | |
467 | * when loading a crash kernel. All of the other pages | |
468 | * are specified by the segments and we just memcpy | |
469 | * into them directly. | |
470 | * | |
471 | * The only case where we really need more than one of | |
472 | * these are for architectures where we cannot disable | |
473 | * the MMU and must instead generate an identity mapped | |
474 | * page table for all of the memory. | |
475 | * | |
476 | * Given the low demand this implements a very simple | |
477 | * allocator that finds the first hole of the appropriate | |
478 | * size in the reserved memory region, and allocates all | |
479 | * of the memory up to and including the hole. | |
480 | */ | |
481 | unsigned long hole_start, hole_end, size; | |
482 | struct page *pages; | |
72414d3f | 483 | |
dc009d92 EB |
484 | pages = NULL; |
485 | size = (1 << order) << PAGE_SHIFT; | |
486 | hole_start = (image->control_page + (size - 1)) & ~(size - 1); | |
487 | hole_end = hole_start + size - 1; | |
72414d3f | 488 | while (hole_end <= crashk_res.end) { |
dc009d92 | 489 | unsigned long i; |
72414d3f MS |
490 | |
491 | if (hole_end > KEXEC_CONTROL_MEMORY_LIMIT) | |
dc009d92 | 492 | break; |
72414d3f | 493 | if (hole_end > crashk_res.end) |
dc009d92 | 494 | break; |
dc009d92 | 495 | /* See if I overlap any of the segments */ |
72414d3f | 496 | for (i = 0; i < image->nr_segments; i++) { |
dc009d92 | 497 | unsigned long mstart, mend; |
72414d3f | 498 | |
dc009d92 EB |
499 | mstart = image->segment[i].mem; |
500 | mend = mstart + image->segment[i].memsz - 1; | |
501 | if ((hole_end >= mstart) && (hole_start <= mend)) { | |
502 | /* Advance the hole to the end of the segment */ | |
503 | hole_start = (mend + (size - 1)) & ~(size - 1); | |
504 | hole_end = hole_start + size - 1; | |
505 | break; | |
506 | } | |
507 | } | |
508 | /* If I don't overlap any segments I have found my hole! */ | |
509 | if (i == image->nr_segments) { | |
510 | pages = pfn_to_page(hole_start >> PAGE_SHIFT); | |
511 | break; | |
512 | } | |
513 | } | |
72414d3f | 514 | if (pages) |
dc009d92 | 515 | image->control_page = hole_end; |
72414d3f | 516 | |
dc009d92 EB |
517 | return pages; |
518 | } | |
519 | ||
520 | ||
72414d3f MS |
521 | struct page *kimage_alloc_control_pages(struct kimage *image, |
522 | unsigned int order) | |
dc009d92 EB |
523 | { |
524 | struct page *pages = NULL; | |
72414d3f MS |
525 | |
526 | switch (image->type) { | |
dc009d92 EB |
527 | case KEXEC_TYPE_DEFAULT: |
528 | pages = kimage_alloc_normal_control_pages(image, order); | |
529 | break; | |
530 | case KEXEC_TYPE_CRASH: | |
531 | pages = kimage_alloc_crash_control_pages(image, order); | |
532 | break; | |
533 | } | |
72414d3f | 534 | |
dc009d92 EB |
535 | return pages; |
536 | } | |
537 | ||
538 | static int kimage_add_entry(struct kimage *image, kimage_entry_t entry) | |
539 | { | |
72414d3f | 540 | if (*image->entry != 0) |
dc009d92 | 541 | image->entry++; |
72414d3f | 542 | |
dc009d92 EB |
543 | if (image->entry == image->last_entry) { |
544 | kimage_entry_t *ind_page; | |
545 | struct page *page; | |
72414d3f | 546 | |
dc009d92 | 547 | page = kimage_alloc_page(image, GFP_KERNEL, KIMAGE_NO_DEST); |
72414d3f | 548 | if (!page) |
dc009d92 | 549 | return -ENOMEM; |
72414d3f | 550 | |
dc009d92 EB |
551 | ind_page = page_address(page); |
552 | *image->entry = virt_to_phys(ind_page) | IND_INDIRECTION; | |
553 | image->entry = ind_page; | |
72414d3f MS |
554 | image->last_entry = ind_page + |
555 | ((PAGE_SIZE/sizeof(kimage_entry_t)) - 1); | |
dc009d92 EB |
556 | } |
557 | *image->entry = entry; | |
558 | image->entry++; | |
559 | *image->entry = 0; | |
72414d3f | 560 | |
dc009d92 EB |
561 | return 0; |
562 | } | |
563 | ||
72414d3f MS |
564 | static int kimage_set_destination(struct kimage *image, |
565 | unsigned long destination) | |
dc009d92 EB |
566 | { |
567 | int result; | |
568 | ||
569 | destination &= PAGE_MASK; | |
570 | result = kimage_add_entry(image, destination | IND_DESTINATION); | |
72414d3f | 571 | if (result == 0) |
dc009d92 | 572 | image->destination = destination; |
72414d3f | 573 | |
dc009d92 EB |
574 | return result; |
575 | } | |
576 | ||
577 | ||
578 | static int kimage_add_page(struct kimage *image, unsigned long page) | |
579 | { | |
580 | int result; | |
581 | ||
582 | page &= PAGE_MASK; | |
583 | result = kimage_add_entry(image, page | IND_SOURCE); | |
72414d3f | 584 | if (result == 0) |
dc009d92 | 585 | image->destination += PAGE_SIZE; |
72414d3f | 586 | |
dc009d92 EB |
587 | return result; |
588 | } | |
589 | ||
590 | ||
591 | static void kimage_free_extra_pages(struct kimage *image) | |
592 | { | |
593 | /* Walk through and free any extra destination pages I may have */ | |
594 | kimage_free_page_list(&image->dest_pages); | |
595 | ||
596 | /* Walk through and free any unuseable pages I have cached */ | |
597 | kimage_free_page_list(&image->unuseable_pages); | |
598 | ||
599 | } | |
7fccf032 | 600 | static void kimage_terminate(struct kimage *image) |
dc009d92 | 601 | { |
72414d3f | 602 | if (*image->entry != 0) |
dc009d92 | 603 | image->entry++; |
72414d3f | 604 | |
dc009d92 | 605 | *image->entry = IND_DONE; |
dc009d92 EB |
606 | } |
607 | ||
608 | #define for_each_kimage_entry(image, ptr, entry) \ | |
609 | for (ptr = &image->head; (entry = *ptr) && !(entry & IND_DONE); \ | |
610 | ptr = (entry & IND_INDIRECTION)? \ | |
611 | phys_to_virt((entry & PAGE_MASK)): ptr +1) | |
612 | ||
613 | static void kimage_free_entry(kimage_entry_t entry) | |
614 | { | |
615 | struct page *page; | |
616 | ||
617 | page = pfn_to_page(entry >> PAGE_SHIFT); | |
618 | kimage_free_pages(page); | |
619 | } | |
620 | ||
621 | static void kimage_free(struct kimage *image) | |
622 | { | |
623 | kimage_entry_t *ptr, entry; | |
624 | kimage_entry_t ind = 0; | |
625 | ||
626 | if (!image) | |
627 | return; | |
72414d3f | 628 | |
dc009d92 EB |
629 | kimage_free_extra_pages(image); |
630 | for_each_kimage_entry(image, ptr, entry) { | |
631 | if (entry & IND_INDIRECTION) { | |
632 | /* Free the previous indirection page */ | |
72414d3f | 633 | if (ind & IND_INDIRECTION) |
dc009d92 | 634 | kimage_free_entry(ind); |
dc009d92 EB |
635 | /* Save this indirection page until we are |
636 | * done with it. | |
637 | */ | |
638 | ind = entry; | |
639 | } | |
72414d3f | 640 | else if (entry & IND_SOURCE) |
dc009d92 | 641 | kimage_free_entry(entry); |
dc009d92 EB |
642 | } |
643 | /* Free the final indirection page */ | |
72414d3f | 644 | if (ind & IND_INDIRECTION) |
dc009d92 | 645 | kimage_free_entry(ind); |
dc009d92 EB |
646 | |
647 | /* Handle any machine specific cleanup */ | |
648 | machine_kexec_cleanup(image); | |
649 | ||
650 | /* Free the kexec control pages... */ | |
651 | kimage_free_page_list(&image->control_pages); | |
652 | kfree(image); | |
653 | } | |
654 | ||
72414d3f MS |
655 | static kimage_entry_t *kimage_dst_used(struct kimage *image, |
656 | unsigned long page) | |
dc009d92 EB |
657 | { |
658 | kimage_entry_t *ptr, entry; | |
659 | unsigned long destination = 0; | |
660 | ||
661 | for_each_kimage_entry(image, ptr, entry) { | |
72414d3f | 662 | if (entry & IND_DESTINATION) |
dc009d92 | 663 | destination = entry & PAGE_MASK; |
dc009d92 | 664 | else if (entry & IND_SOURCE) { |
72414d3f | 665 | if (page == destination) |
dc009d92 | 666 | return ptr; |
dc009d92 EB |
667 | destination += PAGE_SIZE; |
668 | } | |
669 | } | |
72414d3f | 670 | |
314b6a4d | 671 | return NULL; |
dc009d92 EB |
672 | } |
673 | ||
72414d3f | 674 | static struct page *kimage_alloc_page(struct kimage *image, |
9796fdd8 | 675 | gfp_t gfp_mask, |
72414d3f | 676 | unsigned long destination) |
dc009d92 EB |
677 | { |
678 | /* | |
679 | * Here we implement safeguards to ensure that a source page | |
680 | * is not copied to its destination page before the data on | |
681 | * the destination page is no longer useful. | |
682 | * | |
683 | * To do this we maintain the invariant that a source page is | |
684 | * either its own destination page, or it is not a | |
685 | * destination page at all. | |
686 | * | |
687 | * That is slightly stronger than required, but the proof | |
688 | * that no problems will not occur is trivial, and the | |
689 | * implementation is simply to verify. | |
690 | * | |
691 | * When allocating all pages normally this algorithm will run | |
692 | * in O(N) time, but in the worst case it will run in O(N^2) | |
693 | * time. If the runtime is a problem the data structures can | |
694 | * be fixed. | |
695 | */ | |
696 | struct page *page; | |
697 | unsigned long addr; | |
698 | ||
699 | /* | |
700 | * Walk through the list of destination pages, and see if I | |
701 | * have a match. | |
702 | */ | |
703 | list_for_each_entry(page, &image->dest_pages, lru) { | |
704 | addr = page_to_pfn(page) << PAGE_SHIFT; | |
705 | if (addr == destination) { | |
706 | list_del(&page->lru); | |
707 | return page; | |
708 | } | |
709 | } | |
710 | page = NULL; | |
711 | while (1) { | |
712 | kimage_entry_t *old; | |
713 | ||
714 | /* Allocate a page, if we run out of memory give up */ | |
715 | page = kimage_alloc_pages(gfp_mask, 0); | |
72414d3f | 716 | if (!page) |
314b6a4d | 717 | return NULL; |
dc009d92 | 718 | /* If the page cannot be used file it away */ |
72414d3f MS |
719 | if (page_to_pfn(page) > |
720 | (KEXEC_SOURCE_MEMORY_LIMIT >> PAGE_SHIFT)) { | |
dc009d92 EB |
721 | list_add(&page->lru, &image->unuseable_pages); |
722 | continue; | |
723 | } | |
724 | addr = page_to_pfn(page) << PAGE_SHIFT; | |
725 | ||
726 | /* If it is the destination page we want use it */ | |
727 | if (addr == destination) | |
728 | break; | |
729 | ||
730 | /* If the page is not a destination page use it */ | |
72414d3f MS |
731 | if (!kimage_is_destination_range(image, addr, |
732 | addr + PAGE_SIZE)) | |
dc009d92 EB |
733 | break; |
734 | ||
735 | /* | |
736 | * I know that the page is someones destination page. | |
737 | * See if there is already a source page for this | |
738 | * destination page. And if so swap the source pages. | |
739 | */ | |
740 | old = kimage_dst_used(image, addr); | |
741 | if (old) { | |
742 | /* If so move it */ | |
743 | unsigned long old_addr; | |
744 | struct page *old_page; | |
745 | ||
746 | old_addr = *old & PAGE_MASK; | |
747 | old_page = pfn_to_page(old_addr >> PAGE_SHIFT); | |
748 | copy_highpage(page, old_page); | |
749 | *old = addr | (*old & ~PAGE_MASK); | |
750 | ||
751 | /* The old page I have found cannot be a | |
752 | * destination page, so return it. | |
753 | */ | |
754 | addr = old_addr; | |
755 | page = old_page; | |
756 | break; | |
757 | } | |
758 | else { | |
759 | /* Place the page on the destination list I | |
760 | * will use it later. | |
761 | */ | |
762 | list_add(&page->lru, &image->dest_pages); | |
763 | } | |
764 | } | |
72414d3f | 765 | |
dc009d92 EB |
766 | return page; |
767 | } | |
768 | ||
769 | static int kimage_load_normal_segment(struct kimage *image, | |
72414d3f | 770 | struct kexec_segment *segment) |
dc009d92 EB |
771 | { |
772 | unsigned long maddr; | |
773 | unsigned long ubytes, mbytes; | |
774 | int result; | |
314b6a4d | 775 | unsigned char __user *buf; |
dc009d92 EB |
776 | |
777 | result = 0; | |
778 | buf = segment->buf; | |
779 | ubytes = segment->bufsz; | |
780 | mbytes = segment->memsz; | |
781 | maddr = segment->mem; | |
782 | ||
783 | result = kimage_set_destination(image, maddr); | |
72414d3f | 784 | if (result < 0) |
dc009d92 | 785 | goto out; |
72414d3f MS |
786 | |
787 | while (mbytes) { | |
dc009d92 EB |
788 | struct page *page; |
789 | char *ptr; | |
790 | size_t uchunk, mchunk; | |
72414d3f | 791 | |
dc009d92 | 792 | page = kimage_alloc_page(image, GFP_HIGHUSER, maddr); |
c80544dc | 793 | if (!page) { |
dc009d92 EB |
794 | result = -ENOMEM; |
795 | goto out; | |
796 | } | |
72414d3f MS |
797 | result = kimage_add_page(image, page_to_pfn(page) |
798 | << PAGE_SHIFT); | |
799 | if (result < 0) | |
dc009d92 | 800 | goto out; |
72414d3f | 801 | |
dc009d92 EB |
802 | ptr = kmap(page); |
803 | /* Start with a clear page */ | |
804 | memset(ptr, 0, PAGE_SIZE); | |
805 | ptr += maddr & ~PAGE_MASK; | |
806 | mchunk = PAGE_SIZE - (maddr & ~PAGE_MASK); | |
72414d3f | 807 | if (mchunk > mbytes) |
dc009d92 | 808 | mchunk = mbytes; |
72414d3f | 809 | |
dc009d92 | 810 | uchunk = mchunk; |
72414d3f | 811 | if (uchunk > ubytes) |
dc009d92 | 812 | uchunk = ubytes; |
72414d3f | 813 | |
dc009d92 EB |
814 | result = copy_from_user(ptr, buf, uchunk); |
815 | kunmap(page); | |
816 | if (result) { | |
817 | result = (result < 0) ? result : -EIO; | |
818 | goto out; | |
819 | } | |
820 | ubytes -= uchunk; | |
821 | maddr += mchunk; | |
822 | buf += mchunk; | |
823 | mbytes -= mchunk; | |
824 | } | |
72414d3f | 825 | out: |
dc009d92 EB |
826 | return result; |
827 | } | |
828 | ||
829 | static int kimage_load_crash_segment(struct kimage *image, | |
72414d3f | 830 | struct kexec_segment *segment) |
dc009d92 EB |
831 | { |
832 | /* For crash dumps kernels we simply copy the data from | |
833 | * user space to it's destination. | |
834 | * We do things a page at a time for the sake of kmap. | |
835 | */ | |
836 | unsigned long maddr; | |
837 | unsigned long ubytes, mbytes; | |
838 | int result; | |
314b6a4d | 839 | unsigned char __user *buf; |
dc009d92 EB |
840 | |
841 | result = 0; | |
842 | buf = segment->buf; | |
843 | ubytes = segment->bufsz; | |
844 | mbytes = segment->memsz; | |
845 | maddr = segment->mem; | |
72414d3f | 846 | while (mbytes) { |
dc009d92 EB |
847 | struct page *page; |
848 | char *ptr; | |
849 | size_t uchunk, mchunk; | |
72414d3f | 850 | |
dc009d92 | 851 | page = pfn_to_page(maddr >> PAGE_SHIFT); |
c80544dc | 852 | if (!page) { |
dc009d92 EB |
853 | result = -ENOMEM; |
854 | goto out; | |
855 | } | |
856 | ptr = kmap(page); | |
857 | ptr += maddr & ~PAGE_MASK; | |
858 | mchunk = PAGE_SIZE - (maddr & ~PAGE_MASK); | |
72414d3f | 859 | if (mchunk > mbytes) |
dc009d92 | 860 | mchunk = mbytes; |
72414d3f | 861 | |
dc009d92 EB |
862 | uchunk = mchunk; |
863 | if (uchunk > ubytes) { | |
864 | uchunk = ubytes; | |
865 | /* Zero the trailing part of the page */ | |
866 | memset(ptr + uchunk, 0, mchunk - uchunk); | |
867 | } | |
868 | result = copy_from_user(ptr, buf, uchunk); | |
a7956113 | 869 | kexec_flush_icache_page(page); |
dc009d92 EB |
870 | kunmap(page); |
871 | if (result) { | |
872 | result = (result < 0) ? result : -EIO; | |
873 | goto out; | |
874 | } | |
875 | ubytes -= uchunk; | |
876 | maddr += mchunk; | |
877 | buf += mchunk; | |
878 | mbytes -= mchunk; | |
879 | } | |
72414d3f | 880 | out: |
dc009d92 EB |
881 | return result; |
882 | } | |
883 | ||
884 | static int kimage_load_segment(struct kimage *image, | |
72414d3f | 885 | struct kexec_segment *segment) |
dc009d92 EB |
886 | { |
887 | int result = -ENOMEM; | |
72414d3f MS |
888 | |
889 | switch (image->type) { | |
dc009d92 EB |
890 | case KEXEC_TYPE_DEFAULT: |
891 | result = kimage_load_normal_segment(image, segment); | |
892 | break; | |
893 | case KEXEC_TYPE_CRASH: | |
894 | result = kimage_load_crash_segment(image, segment); | |
895 | break; | |
896 | } | |
72414d3f | 897 | |
dc009d92 EB |
898 | return result; |
899 | } | |
900 | ||
901 | /* | |
902 | * Exec Kernel system call: for obvious reasons only root may call it. | |
903 | * | |
904 | * This call breaks up into three pieces. | |
905 | * - A generic part which loads the new kernel from the current | |
906 | * address space, and very carefully places the data in the | |
907 | * allocated pages. | |
908 | * | |
909 | * - A generic part that interacts with the kernel and tells all of | |
910 | * the devices to shut down. Preventing on-going dmas, and placing | |
911 | * the devices in a consistent state so a later kernel can | |
912 | * reinitialize them. | |
913 | * | |
914 | * - A machine specific part that includes the syscall number | |
915 | * and the copies the image to it's final destination. And | |
916 | * jumps into the image at entry. | |
917 | * | |
918 | * kexec does not sync, or unmount filesystems so if you need | |
919 | * that to happen you need to do that yourself. | |
920 | */ | |
c330dda9 JM |
921 | struct kimage *kexec_image; |
922 | struct kimage *kexec_crash_image; | |
dc009d92 EB |
923 | /* |
924 | * A home grown binary mutex. | |
925 | * Nothing can wait so this mutex is safe to use | |
926 | * in interrupt context :) | |
927 | */ | |
c330dda9 | 928 | static int kexec_lock; |
dc009d92 | 929 | |
72414d3f MS |
930 | asmlinkage long sys_kexec_load(unsigned long entry, unsigned long nr_segments, |
931 | struct kexec_segment __user *segments, | |
932 | unsigned long flags) | |
dc009d92 EB |
933 | { |
934 | struct kimage **dest_image, *image; | |
935 | int locked; | |
936 | int result; | |
937 | ||
938 | /* We only trust the superuser with rebooting the system. */ | |
939 | if (!capable(CAP_SYS_BOOT)) | |
940 | return -EPERM; | |
941 | ||
942 | /* | |
943 | * Verify we have a legal set of flags | |
944 | * This leaves us room for future extensions. | |
945 | */ | |
946 | if ((flags & KEXEC_FLAGS) != (flags & ~KEXEC_ARCH_MASK)) | |
947 | return -EINVAL; | |
948 | ||
949 | /* Verify we are on the appropriate architecture */ | |
950 | if (((flags & KEXEC_ARCH_MASK) != KEXEC_ARCH) && | |
951 | ((flags & KEXEC_ARCH_MASK) != KEXEC_ARCH_DEFAULT)) | |
dc009d92 | 952 | return -EINVAL; |
dc009d92 EB |
953 | |
954 | /* Put an artificial cap on the number | |
955 | * of segments passed to kexec_load. | |
956 | */ | |
957 | if (nr_segments > KEXEC_SEGMENT_MAX) | |
958 | return -EINVAL; | |
959 | ||
960 | image = NULL; | |
961 | result = 0; | |
962 | ||
963 | /* Because we write directly to the reserved memory | |
964 | * region when loading crash kernels we need a mutex here to | |
965 | * prevent multiple crash kernels from attempting to load | |
966 | * simultaneously, and to prevent a crash kernel from loading | |
967 | * over the top of a in use crash kernel. | |
968 | * | |
969 | * KISS: always take the mutex. | |
970 | */ | |
971 | locked = xchg(&kexec_lock, 1); | |
72414d3f | 972 | if (locked) |
dc009d92 | 973 | return -EBUSY; |
72414d3f | 974 | |
dc009d92 | 975 | dest_image = &kexec_image; |
72414d3f | 976 | if (flags & KEXEC_ON_CRASH) |
dc009d92 | 977 | dest_image = &kexec_crash_image; |
dc009d92 EB |
978 | if (nr_segments > 0) { |
979 | unsigned long i; | |
72414d3f | 980 | |
dc009d92 | 981 | /* Loading another kernel to reboot into */ |
72414d3f MS |
982 | if ((flags & KEXEC_ON_CRASH) == 0) |
983 | result = kimage_normal_alloc(&image, entry, | |
984 | nr_segments, segments); | |
dc009d92 EB |
985 | /* Loading another kernel to switch to if this one crashes */ |
986 | else if (flags & KEXEC_ON_CRASH) { | |
987 | /* Free any current crash dump kernel before | |
988 | * we corrupt it. | |
989 | */ | |
990 | kimage_free(xchg(&kexec_crash_image, NULL)); | |
72414d3f MS |
991 | result = kimage_crash_alloc(&image, entry, |
992 | nr_segments, segments); | |
dc009d92 | 993 | } |
72414d3f | 994 | if (result) |
dc009d92 | 995 | goto out; |
72414d3f | 996 | |
3ab83521 HY |
997 | if (flags & KEXEC_PRESERVE_CONTEXT) |
998 | image->preserve_context = 1; | |
dc009d92 | 999 | result = machine_kexec_prepare(image); |
72414d3f | 1000 | if (result) |
dc009d92 | 1001 | goto out; |
72414d3f MS |
1002 | |
1003 | for (i = 0; i < nr_segments; i++) { | |
dc009d92 | 1004 | result = kimage_load_segment(image, &image->segment[i]); |
72414d3f | 1005 | if (result) |
dc009d92 | 1006 | goto out; |
dc009d92 | 1007 | } |
7fccf032 | 1008 | kimage_terminate(image); |
dc009d92 EB |
1009 | } |
1010 | /* Install the new kernel, and Uninstall the old */ | |
1011 | image = xchg(dest_image, image); | |
1012 | ||
72414d3f | 1013 | out: |
0b4a8a78 RM |
1014 | locked = xchg(&kexec_lock, 0); /* Release the mutex */ |
1015 | BUG_ON(!locked); | |
dc009d92 | 1016 | kimage_free(image); |
72414d3f | 1017 | |
dc009d92 EB |
1018 | return result; |
1019 | } | |
1020 | ||
1021 | #ifdef CONFIG_COMPAT | |
1022 | asmlinkage long compat_sys_kexec_load(unsigned long entry, | |
72414d3f MS |
1023 | unsigned long nr_segments, |
1024 | struct compat_kexec_segment __user *segments, | |
1025 | unsigned long flags) | |
dc009d92 EB |
1026 | { |
1027 | struct compat_kexec_segment in; | |
1028 | struct kexec_segment out, __user *ksegments; | |
1029 | unsigned long i, result; | |
1030 | ||
1031 | /* Don't allow clients that don't understand the native | |
1032 | * architecture to do anything. | |
1033 | */ | |
72414d3f | 1034 | if ((flags & KEXEC_ARCH_MASK) == KEXEC_ARCH_DEFAULT) |
dc009d92 | 1035 | return -EINVAL; |
dc009d92 | 1036 | |
72414d3f | 1037 | if (nr_segments > KEXEC_SEGMENT_MAX) |
dc009d92 | 1038 | return -EINVAL; |
dc009d92 EB |
1039 | |
1040 | ksegments = compat_alloc_user_space(nr_segments * sizeof(out)); | |
1041 | for (i=0; i < nr_segments; i++) { | |
1042 | result = copy_from_user(&in, &segments[i], sizeof(in)); | |
72414d3f | 1043 | if (result) |
dc009d92 | 1044 | return -EFAULT; |
dc009d92 EB |
1045 | |
1046 | out.buf = compat_ptr(in.buf); | |
1047 | out.bufsz = in.bufsz; | |
1048 | out.mem = in.mem; | |
1049 | out.memsz = in.memsz; | |
1050 | ||
1051 | result = copy_to_user(&ksegments[i], &out, sizeof(out)); | |
72414d3f | 1052 | if (result) |
dc009d92 | 1053 | return -EFAULT; |
dc009d92 EB |
1054 | } |
1055 | ||
1056 | return sys_kexec_load(entry, nr_segments, ksegments, flags); | |
1057 | } | |
1058 | #endif | |
1059 | ||
6e274d14 | 1060 | void crash_kexec(struct pt_regs *regs) |
dc009d92 | 1061 | { |
dc009d92 EB |
1062 | int locked; |
1063 | ||
1064 | ||
1065 | /* Take the kexec_lock here to prevent sys_kexec_load | |
1066 | * running on one cpu from replacing the crash kernel | |
1067 | * we are using after a panic on a different cpu. | |
1068 | * | |
1069 | * If the crash kernel was not located in a fixed area | |
1070 | * of memory the xchg(&kexec_crash_image) would be | |
1071 | * sufficient. But since I reuse the memory... | |
1072 | */ | |
1073 | locked = xchg(&kexec_lock, 1); | |
1074 | if (!locked) { | |
c0ce7d08 | 1075 | if (kexec_crash_image) { |
e996e581 VG |
1076 | struct pt_regs fixed_regs; |
1077 | crash_setup_regs(&fixed_regs, regs); | |
fd59d231 | 1078 | crash_save_vmcoreinfo(); |
e996e581 | 1079 | machine_crash_shutdown(&fixed_regs); |
c0ce7d08 | 1080 | machine_kexec(kexec_crash_image); |
dc009d92 | 1081 | } |
0b4a8a78 RM |
1082 | locked = xchg(&kexec_lock, 0); |
1083 | BUG_ON(!locked); | |
dc009d92 EB |
1084 | } |
1085 | } | |
cc571658 | 1086 | |
85916f81 MD |
1087 | static u32 *append_elf_note(u32 *buf, char *name, unsigned type, void *data, |
1088 | size_t data_len) | |
1089 | { | |
1090 | struct elf_note note; | |
1091 | ||
1092 | note.n_namesz = strlen(name) + 1; | |
1093 | note.n_descsz = data_len; | |
1094 | note.n_type = type; | |
1095 | memcpy(buf, ¬e, sizeof(note)); | |
1096 | buf += (sizeof(note) + 3)/4; | |
1097 | memcpy(buf, name, note.n_namesz); | |
1098 | buf += (note.n_namesz + 3)/4; | |
1099 | memcpy(buf, data, note.n_descsz); | |
1100 | buf += (note.n_descsz + 3)/4; | |
1101 | ||
1102 | return buf; | |
1103 | } | |
1104 | ||
1105 | static void final_note(u32 *buf) | |
1106 | { | |
1107 | struct elf_note note; | |
1108 | ||
1109 | note.n_namesz = 0; | |
1110 | note.n_descsz = 0; | |
1111 | note.n_type = 0; | |
1112 | memcpy(buf, ¬e, sizeof(note)); | |
1113 | } | |
1114 | ||
1115 | void crash_save_cpu(struct pt_regs *regs, int cpu) | |
1116 | { | |
1117 | struct elf_prstatus prstatus; | |
1118 | u32 *buf; | |
1119 | ||
1120 | if ((cpu < 0) || (cpu >= NR_CPUS)) | |
1121 | return; | |
1122 | ||
1123 | /* Using ELF notes here is opportunistic. | |
1124 | * I need a well defined structure format | |
1125 | * for the data I pass, and I need tags | |
1126 | * on the data to indicate what information I have | |
1127 | * squirrelled away. ELF notes happen to provide | |
1128 | * all of that, so there is no need to invent something new. | |
1129 | */ | |
1130 | buf = (u32*)per_cpu_ptr(crash_notes, cpu); | |
1131 | if (!buf) | |
1132 | return; | |
1133 | memset(&prstatus, 0, sizeof(prstatus)); | |
1134 | prstatus.pr_pid = current->pid; | |
1135 | elf_core_copy_regs(&prstatus.pr_reg, regs); | |
6672f76a SH |
1136 | buf = append_elf_note(buf, KEXEC_CORE_NOTE_NAME, NT_PRSTATUS, |
1137 | &prstatus, sizeof(prstatus)); | |
85916f81 MD |
1138 | final_note(buf); |
1139 | } | |
1140 | ||
cc571658 VG |
1141 | static int __init crash_notes_memory_init(void) |
1142 | { | |
1143 | /* Allocate memory for saving cpu registers. */ | |
1144 | crash_notes = alloc_percpu(note_buf_t); | |
1145 | if (!crash_notes) { | |
1146 | printk("Kexec: Memory allocation for saving cpu register" | |
1147 | " states failed\n"); | |
1148 | return -ENOMEM; | |
1149 | } | |
1150 | return 0; | |
1151 | } | |
1152 | module_init(crash_notes_memory_init) | |
fd59d231 | 1153 | |
cba63c30 BW |
1154 | |
1155 | /* | |
1156 | * parsing the "crashkernel" commandline | |
1157 | * | |
1158 | * this code is intended to be called from architecture specific code | |
1159 | */ | |
1160 | ||
1161 | ||
1162 | /* | |
1163 | * This function parses command lines in the format | |
1164 | * | |
1165 | * crashkernel=ramsize-range:size[,...][@offset] | |
1166 | * | |
1167 | * The function returns 0 on success and -EINVAL on failure. | |
1168 | */ | |
1169 | static int __init parse_crashkernel_mem(char *cmdline, | |
1170 | unsigned long long system_ram, | |
1171 | unsigned long long *crash_size, | |
1172 | unsigned long long *crash_base) | |
1173 | { | |
1174 | char *cur = cmdline, *tmp; | |
1175 | ||
1176 | /* for each entry of the comma-separated list */ | |
1177 | do { | |
1178 | unsigned long long start, end = ULLONG_MAX, size; | |
1179 | ||
1180 | /* get the start of the range */ | |
1181 | start = memparse(cur, &tmp); | |
1182 | if (cur == tmp) { | |
1183 | pr_warning("crashkernel: Memory value expected\n"); | |
1184 | return -EINVAL; | |
1185 | } | |
1186 | cur = tmp; | |
1187 | if (*cur != '-') { | |
1188 | pr_warning("crashkernel: '-' expected\n"); | |
1189 | return -EINVAL; | |
1190 | } | |
1191 | cur++; | |
1192 | ||
1193 | /* if no ':' is here, than we read the end */ | |
1194 | if (*cur != ':') { | |
1195 | end = memparse(cur, &tmp); | |
1196 | if (cur == tmp) { | |
1197 | pr_warning("crashkernel: Memory " | |
1198 | "value expected\n"); | |
1199 | return -EINVAL; | |
1200 | } | |
1201 | cur = tmp; | |
1202 | if (end <= start) { | |
1203 | pr_warning("crashkernel: end <= start\n"); | |
1204 | return -EINVAL; | |
1205 | } | |
1206 | } | |
1207 | ||
1208 | if (*cur != ':') { | |
1209 | pr_warning("crashkernel: ':' expected\n"); | |
1210 | return -EINVAL; | |
1211 | } | |
1212 | cur++; | |
1213 | ||
1214 | size = memparse(cur, &tmp); | |
1215 | if (cur == tmp) { | |
1216 | pr_warning("Memory value expected\n"); | |
1217 | return -EINVAL; | |
1218 | } | |
1219 | cur = tmp; | |
1220 | if (size >= system_ram) { | |
1221 | pr_warning("crashkernel: invalid size\n"); | |
1222 | return -EINVAL; | |
1223 | } | |
1224 | ||
1225 | /* match ? */ | |
be089d79 | 1226 | if (system_ram >= start && system_ram < end) { |
cba63c30 BW |
1227 | *crash_size = size; |
1228 | break; | |
1229 | } | |
1230 | } while (*cur++ == ','); | |
1231 | ||
1232 | if (*crash_size > 0) { | |
1233 | while (*cur != ' ' && *cur != '@') | |
1234 | cur++; | |
1235 | if (*cur == '@') { | |
1236 | cur++; | |
1237 | *crash_base = memparse(cur, &tmp); | |
1238 | if (cur == tmp) { | |
1239 | pr_warning("Memory value expected " | |
1240 | "after '@'\n"); | |
1241 | return -EINVAL; | |
1242 | } | |
1243 | } | |
1244 | } | |
1245 | ||
1246 | return 0; | |
1247 | } | |
1248 | ||
1249 | /* | |
1250 | * That function parses "simple" (old) crashkernel command lines like | |
1251 | * | |
1252 | * crashkernel=size[@offset] | |
1253 | * | |
1254 | * It returns 0 on success and -EINVAL on failure. | |
1255 | */ | |
1256 | static int __init parse_crashkernel_simple(char *cmdline, | |
1257 | unsigned long long *crash_size, | |
1258 | unsigned long long *crash_base) | |
1259 | { | |
1260 | char *cur = cmdline; | |
1261 | ||
1262 | *crash_size = memparse(cmdline, &cur); | |
1263 | if (cmdline == cur) { | |
1264 | pr_warning("crashkernel: memory value expected\n"); | |
1265 | return -EINVAL; | |
1266 | } | |
1267 | ||
1268 | if (*cur == '@') | |
1269 | *crash_base = memparse(cur+1, &cur); | |
1270 | ||
1271 | return 0; | |
1272 | } | |
1273 | ||
1274 | /* | |
1275 | * That function is the entry point for command line parsing and should be | |
1276 | * called from the arch-specific code. | |
1277 | */ | |
1278 | int __init parse_crashkernel(char *cmdline, | |
1279 | unsigned long long system_ram, | |
1280 | unsigned long long *crash_size, | |
1281 | unsigned long long *crash_base) | |
1282 | { | |
1283 | char *p = cmdline, *ck_cmdline = NULL; | |
1284 | char *first_colon, *first_space; | |
1285 | ||
1286 | BUG_ON(!crash_size || !crash_base); | |
1287 | *crash_size = 0; | |
1288 | *crash_base = 0; | |
1289 | ||
1290 | /* find crashkernel and use the last one if there are more */ | |
1291 | p = strstr(p, "crashkernel="); | |
1292 | while (p) { | |
1293 | ck_cmdline = p; | |
1294 | p = strstr(p+1, "crashkernel="); | |
1295 | } | |
1296 | ||
1297 | if (!ck_cmdline) | |
1298 | return -EINVAL; | |
1299 | ||
1300 | ck_cmdline += 12; /* strlen("crashkernel=") */ | |
1301 | ||
1302 | /* | |
1303 | * if the commandline contains a ':', then that's the extended | |
1304 | * syntax -- if not, it must be the classic syntax | |
1305 | */ | |
1306 | first_colon = strchr(ck_cmdline, ':'); | |
1307 | first_space = strchr(ck_cmdline, ' '); | |
1308 | if (first_colon && (!first_space || first_colon < first_space)) | |
1309 | return parse_crashkernel_mem(ck_cmdline, system_ram, | |
1310 | crash_size, crash_base); | |
1311 | else | |
1312 | return parse_crashkernel_simple(ck_cmdline, crash_size, | |
1313 | crash_base); | |
1314 | ||
1315 | return 0; | |
1316 | } | |
1317 | ||
1318 | ||
1319 | ||
fd59d231 KO |
1320 | void crash_save_vmcoreinfo(void) |
1321 | { | |
1322 | u32 *buf; | |
1323 | ||
1324 | if (!vmcoreinfo_size) | |
1325 | return; | |
1326 | ||
d768281e | 1327 | vmcoreinfo_append_str("CRASHTIME=%ld", get_seconds()); |
fd59d231 KO |
1328 | |
1329 | buf = (u32 *)vmcoreinfo_note; | |
1330 | ||
1331 | buf = append_elf_note(buf, VMCOREINFO_NOTE_NAME, 0, vmcoreinfo_data, | |
1332 | vmcoreinfo_size); | |
1333 | ||
1334 | final_note(buf); | |
1335 | } | |
1336 | ||
1337 | void vmcoreinfo_append_str(const char *fmt, ...) | |
1338 | { | |
1339 | va_list args; | |
1340 | char buf[0x50]; | |
1341 | int r; | |
1342 | ||
1343 | va_start(args, fmt); | |
1344 | r = vsnprintf(buf, sizeof(buf), fmt, args); | |
1345 | va_end(args); | |
1346 | ||
1347 | if (r + vmcoreinfo_size > vmcoreinfo_max_size) | |
1348 | r = vmcoreinfo_max_size - vmcoreinfo_size; | |
1349 | ||
1350 | memcpy(&vmcoreinfo_data[vmcoreinfo_size], buf, r); | |
1351 | ||
1352 | vmcoreinfo_size += r; | |
1353 | } | |
1354 | ||
1355 | /* | |
1356 | * provide an empty default implementation here -- architecture | |
1357 | * code may override this | |
1358 | */ | |
1359 | void __attribute__ ((weak)) arch_crash_save_vmcoreinfo(void) | |
1360 | {} | |
1361 | ||
1362 | unsigned long __attribute__ ((weak)) paddr_vmcoreinfo_note(void) | |
1363 | { | |
1364 | return __pa((unsigned long)(char *)&vmcoreinfo_note); | |
1365 | } | |
1366 | ||
1367 | static int __init crash_save_vmcoreinfo_init(void) | |
1368 | { | |
bba1f603 KO |
1369 | VMCOREINFO_OSRELEASE(init_uts_ns.name.release); |
1370 | VMCOREINFO_PAGESIZE(PAGE_SIZE); | |
fd59d231 | 1371 | |
bcbba6c1 KO |
1372 | VMCOREINFO_SYMBOL(init_uts_ns); |
1373 | VMCOREINFO_SYMBOL(node_online_map); | |
1374 | VMCOREINFO_SYMBOL(swapper_pg_dir); | |
1375 | VMCOREINFO_SYMBOL(_stext); | |
fd59d231 KO |
1376 | |
1377 | #ifndef CONFIG_NEED_MULTIPLE_NODES | |
bcbba6c1 KO |
1378 | VMCOREINFO_SYMBOL(mem_map); |
1379 | VMCOREINFO_SYMBOL(contig_page_data); | |
fd59d231 KO |
1380 | #endif |
1381 | #ifdef CONFIG_SPARSEMEM | |
bcbba6c1 KO |
1382 | VMCOREINFO_SYMBOL(mem_section); |
1383 | VMCOREINFO_LENGTH(mem_section, NR_SECTION_ROOTS); | |
c76f860c | 1384 | VMCOREINFO_STRUCT_SIZE(mem_section); |
bcbba6c1 | 1385 | VMCOREINFO_OFFSET(mem_section, section_mem_map); |
fd59d231 | 1386 | #endif |
c76f860c KO |
1387 | VMCOREINFO_STRUCT_SIZE(page); |
1388 | VMCOREINFO_STRUCT_SIZE(pglist_data); | |
1389 | VMCOREINFO_STRUCT_SIZE(zone); | |
1390 | VMCOREINFO_STRUCT_SIZE(free_area); | |
1391 | VMCOREINFO_STRUCT_SIZE(list_head); | |
1392 | VMCOREINFO_SIZE(nodemask_t); | |
bcbba6c1 KO |
1393 | VMCOREINFO_OFFSET(page, flags); |
1394 | VMCOREINFO_OFFSET(page, _count); | |
1395 | VMCOREINFO_OFFSET(page, mapping); | |
1396 | VMCOREINFO_OFFSET(page, lru); | |
1397 | VMCOREINFO_OFFSET(pglist_data, node_zones); | |
1398 | VMCOREINFO_OFFSET(pglist_data, nr_zones); | |
fd59d231 | 1399 | #ifdef CONFIG_FLAT_NODE_MEM_MAP |
bcbba6c1 | 1400 | VMCOREINFO_OFFSET(pglist_data, node_mem_map); |
fd59d231 | 1401 | #endif |
bcbba6c1 KO |
1402 | VMCOREINFO_OFFSET(pglist_data, node_start_pfn); |
1403 | VMCOREINFO_OFFSET(pglist_data, node_spanned_pages); | |
1404 | VMCOREINFO_OFFSET(pglist_data, node_id); | |
1405 | VMCOREINFO_OFFSET(zone, free_area); | |
1406 | VMCOREINFO_OFFSET(zone, vm_stat); | |
1407 | VMCOREINFO_OFFSET(zone, spanned_pages); | |
1408 | VMCOREINFO_OFFSET(free_area, free_list); | |
1409 | VMCOREINFO_OFFSET(list_head, next); | |
1410 | VMCOREINFO_OFFSET(list_head, prev); | |
1411 | VMCOREINFO_LENGTH(zone.free_area, MAX_ORDER); | |
83a08e7c | 1412 | VMCOREINFO_LENGTH(free_area.free_list, MIGRATE_TYPES); |
bcbba6c1 | 1413 | VMCOREINFO_NUMBER(NR_FREE_PAGES); |
122c7a59 KO |
1414 | VMCOREINFO_NUMBER(PG_lru); |
1415 | VMCOREINFO_NUMBER(PG_private); | |
1416 | VMCOREINFO_NUMBER(PG_swapcache); | |
fd59d231 KO |
1417 | |
1418 | arch_crash_save_vmcoreinfo(); | |
1419 | ||
1420 | return 0; | |
1421 | } | |
1422 | ||
1423 | module_init(crash_save_vmcoreinfo_init) | |
3ab83521 HY |
1424 | |
1425 | /** | |
1426 | * kernel_kexec - reboot the system | |
1427 | * | |
1428 | * Move into place and start executing a preloaded standalone | |
1429 | * executable. If nothing was preloaded return an error. | |
1430 | */ | |
1431 | int kernel_kexec(void) | |
1432 | { | |
1433 | int error = 0; | |
1434 | ||
1435 | if (xchg(&kexec_lock, 1)) | |
1436 | return -EBUSY; | |
1437 | if (!kexec_image) { | |
1438 | error = -EINVAL; | |
1439 | goto Unlock; | |
1440 | } | |
1441 | ||
1442 | if (kexec_image->preserve_context) { | |
1443 | #ifdef CONFIG_KEXEC_JUMP | |
1444 | local_irq_disable(); | |
1445 | save_processor_state(); | |
1446 | #endif | |
1447 | } else { | |
1448 | blocking_notifier_call_chain(&reboot_notifier_list, | |
1449 | SYS_RESTART, NULL); | |
1450 | system_state = SYSTEM_RESTART; | |
1451 | device_shutdown(); | |
1452 | sysdev_shutdown(); | |
1453 | printk(KERN_EMERG "Starting new kernel\n"); | |
1454 | machine_shutdown(); | |
1455 | } | |
1456 | ||
1457 | machine_kexec(kexec_image); | |
1458 | ||
1459 | if (kexec_image->preserve_context) { | |
1460 | #ifdef CONFIG_KEXEC_JUMP | |
1461 | restore_processor_state(); | |
1462 | local_irq_enable(); | |
1463 | #endif | |
1464 | } | |
1465 | ||
1466 | Unlock: | |
1467 | xchg(&kexec_lock, 0); | |
1468 | ||
1469 | return error; | |
1470 | } |