kexec: save PG_head_mask in VMCOREINFO
[linux-2.6-block.git] / kernel / kexec.c
CommitLineData
dc009d92
EB
1/*
2 * kexec.c - kexec system call
3 * Copyright (C) 2002-2004 Eric Biederman <ebiederm@xmission.com>
4 *
5 * This source code is licensed under the GNU General Public License,
6 * Version 2. See the file COPYING for more details.
7 */
8
c59ede7b 9#include <linux/capability.h>
dc009d92
EB
10#include <linux/mm.h>
11#include <linux/file.h>
12#include <linux/slab.h>
13#include <linux/fs.h>
14#include <linux/kexec.h>
8c5a1cf0 15#include <linux/mutex.h>
dc009d92
EB
16#include <linux/list.h>
17#include <linux/highmem.h>
18#include <linux/syscalls.h>
19#include <linux/reboot.h>
dc009d92 20#include <linux/ioport.h>
6e274d14 21#include <linux/hardirq.h>
85916f81
MD
22#include <linux/elf.h>
23#include <linux/elfcore.h>
fd59d231
KO
24#include <linux/utsname.h>
25#include <linux/numa.h>
3ab83521
HY
26#include <linux/suspend.h>
27#include <linux/device.h>
89081d17
HY
28#include <linux/freezer.h>
29#include <linux/pm.h>
30#include <linux/cpu.h>
31#include <linux/console.h>
5f41b8cd 32#include <linux/vmalloc.h>
06a7f711 33#include <linux/swap.h>
19234c08 34#include <linux/syscore_ops.h>
52f5684c 35#include <linux/compiler.h>
6e274d14 36
dc009d92
EB
37#include <asm/page.h>
38#include <asm/uaccess.h>
39#include <asm/io.h>
fd59d231 40#include <asm/sections.h>
dc009d92 41
cc571658 42/* Per cpu memory for storing cpu states in case of system crash. */
43cf38eb 43note_buf_t __percpu *crash_notes;
cc571658 44
fd59d231 45/* vmcoreinfo stuff */
edb79a21 46static unsigned char vmcoreinfo_data[VMCOREINFO_BYTES];
fd59d231 47u32 vmcoreinfo_note[VMCOREINFO_NOTE_SIZE/4];
d768281e
KO
48size_t vmcoreinfo_size;
49size_t vmcoreinfo_max_size = sizeof(vmcoreinfo_data);
fd59d231 50
4fc9bbf9
KA
51/* Flag to indicate we are going to kexec a new kernel */
52bool kexec_in_progress = false;
53
dc009d92
EB
54/* Location of the reserved area for the crash kernel */
55struct resource crashk_res = {
56 .name = "Crash kernel",
57 .start = 0,
58 .end = 0,
59 .flags = IORESOURCE_BUSY | IORESOURCE_MEM
60};
0212f915 61struct resource crashk_low_res = {
157752d8 62 .name = "Crash kernel",
0212f915
YL
63 .start = 0,
64 .end = 0,
65 .flags = IORESOURCE_BUSY | IORESOURCE_MEM
66};
dc009d92 67
6e274d14
AN
68int kexec_should_crash(struct task_struct *p)
69{
b460cbc5 70 if (in_interrupt() || !p->pid || is_global_init(p) || panic_on_oops)
6e274d14
AN
71 return 1;
72 return 0;
73}
74
dc009d92
EB
75/*
76 * When kexec transitions to the new kernel there is a one-to-one
77 * mapping between physical and virtual addresses. On processors
78 * where you can disable the MMU this is trivial, and easy. For
79 * others it is still a simple predictable page table to setup.
80 *
81 * In that environment kexec copies the new kernel to its final
82 * resting place. This means I can only support memory whose
83 * physical address can fit in an unsigned long. In particular
84 * addresses where (pfn << PAGE_SHIFT) > ULONG_MAX cannot be handled.
85 * If the assembly stub has more restrictive requirements
86 * KEXEC_SOURCE_MEMORY_LIMIT and KEXEC_DEST_MEMORY_LIMIT can be
87 * defined more restrictively in <asm/kexec.h>.
88 *
89 * The code for the transition from the current kernel to the
90 * the new kernel is placed in the control_code_buffer, whose size
163f6876 91 * is given by KEXEC_CONTROL_PAGE_SIZE. In the best case only a single
dc009d92
EB
92 * page of memory is necessary, but some architectures require more.
93 * Because this memory must be identity mapped in the transition from
94 * virtual to physical addresses it must live in the range
95 * 0 - TASK_SIZE, as only the user space mappings are arbitrarily
96 * modifiable.
97 *
98 * The assembly stub in the control code buffer is passed a linked list
99 * of descriptor pages detailing the source pages of the new kernel,
100 * and the destination addresses of those source pages. As this data
101 * structure is not used in the context of the current OS, it must
102 * be self-contained.
103 *
104 * The code has been made to work with highmem pages and will use a
105 * destination page in its final resting place (if it happens
106 * to allocate it). The end product of this is that most of the
107 * physical address space, and most of RAM can be used.
108 *
109 * Future directions include:
110 * - allocating a page table with the control code buffer identity
111 * mapped, to simplify machine_kexec and make kexec_on_panic more
112 * reliable.
113 */
114
115/*
116 * KIMAGE_NO_DEST is an impossible destination address..., for
117 * allocating pages whose destination address we do not care about.
118 */
119#define KIMAGE_NO_DEST (-1UL)
120
72414d3f
MS
121static int kimage_is_destination_range(struct kimage *image,
122 unsigned long start, unsigned long end);
123static struct page *kimage_alloc_page(struct kimage *image,
9796fdd8 124 gfp_t gfp_mask,
72414d3f 125 unsigned long dest);
dc009d92
EB
126
127static int do_kimage_alloc(struct kimage **rimage, unsigned long entry,
e1bebcf4
FF
128 unsigned long nr_segments,
129 struct kexec_segment __user *segments)
dc009d92
EB
130{
131 size_t segment_bytes;
132 struct kimage *image;
133 unsigned long i;
134 int result;
135
136 /* Allocate a controlling structure */
137 result = -ENOMEM;
4668edc3 138 image = kzalloc(sizeof(*image), GFP_KERNEL);
72414d3f 139 if (!image)
dc009d92 140 goto out;
72414d3f 141
dc009d92
EB
142 image->head = 0;
143 image->entry = &image->head;
144 image->last_entry = &image->head;
145 image->control_page = ~0; /* By default this does not apply */
146 image->start = entry;
147 image->type = KEXEC_TYPE_DEFAULT;
148
149 /* Initialize the list of control pages */
150 INIT_LIST_HEAD(&image->control_pages);
151
152 /* Initialize the list of destination pages */
153 INIT_LIST_HEAD(&image->dest_pages);
154
25985edc 155 /* Initialize the list of unusable pages */
dc009d92
EB
156 INIT_LIST_HEAD(&image->unuseable_pages);
157
158 /* Read in the segments */
159 image->nr_segments = nr_segments;
160 segment_bytes = nr_segments * sizeof(*segments);
161 result = copy_from_user(image->segment, segments, segment_bytes);
f65a03f6
DC
162 if (result) {
163 result = -EFAULT;
dc009d92 164 goto out;
f65a03f6 165 }
dc009d92
EB
166
167 /*
168 * Verify we have good destination addresses. The caller is
169 * responsible for making certain we don't attempt to load
170 * the new image into invalid or reserved areas of RAM. This
171 * just verifies it is an address we can use.
172 *
173 * Since the kernel does everything in page size chunks ensure
b595076a 174 * the destination addresses are page aligned. Too many
dc009d92
EB
175 * special cases crop of when we don't do this. The most
176 * insidious is getting overlapping destination addresses
177 * simply because addresses are changed to page size
178 * granularity.
179 */
180 result = -EADDRNOTAVAIL;
181 for (i = 0; i < nr_segments; i++) {
182 unsigned long mstart, mend;
72414d3f 183
dc009d92
EB
184 mstart = image->segment[i].mem;
185 mend = mstart + image->segment[i].memsz;
186 if ((mstart & ~PAGE_MASK) || (mend & ~PAGE_MASK))
187 goto out;
188 if (mend >= KEXEC_DESTINATION_MEMORY_LIMIT)
189 goto out;
190 }
191
192 /* Verify our destination addresses do not overlap.
193 * If we alloed overlapping destination addresses
194 * through very weird things can happen with no
195 * easy explanation as one segment stops on another.
196 */
197 result = -EINVAL;
72414d3f 198 for (i = 0; i < nr_segments; i++) {
dc009d92
EB
199 unsigned long mstart, mend;
200 unsigned long j;
72414d3f 201
dc009d92
EB
202 mstart = image->segment[i].mem;
203 mend = mstart + image->segment[i].memsz;
72414d3f 204 for (j = 0; j < i; j++) {
dc009d92
EB
205 unsigned long pstart, pend;
206 pstart = image->segment[j].mem;
207 pend = pstart + image->segment[j].memsz;
208 /* Do the segments overlap ? */
209 if ((mend > pstart) && (mstart < pend))
210 goto out;
211 }
212 }
213
214 /* Ensure our buffer sizes are strictly less than
215 * our memory sizes. This should always be the case,
216 * and it is easier to check up front than to be surprised
217 * later on.
218 */
219 result = -EINVAL;
72414d3f 220 for (i = 0; i < nr_segments; i++) {
dc009d92
EB
221 if (image->segment[i].bufsz > image->segment[i].memsz)
222 goto out;
223 }
224
dc009d92 225 result = 0;
72414d3f
MS
226out:
227 if (result == 0)
dc009d92 228 *rimage = image;
72414d3f 229 else
dc009d92 230 kfree(image);
72414d3f 231
dc009d92
EB
232 return result;
233
234}
235
b92e7e0d
ZY
236static void kimage_free_page_list(struct list_head *list);
237
dc009d92 238static int kimage_normal_alloc(struct kimage **rimage, unsigned long entry,
72414d3f
MS
239 unsigned long nr_segments,
240 struct kexec_segment __user *segments)
dc009d92
EB
241{
242 int result;
243 struct kimage *image;
244
245 /* Allocate and initialize a controlling structure */
246 image = NULL;
247 result = do_kimage_alloc(&image, entry, nr_segments, segments);
72414d3f 248 if (result)
dc009d92 249 goto out;
72414d3f 250
dc009d92
EB
251 /*
252 * Find a location for the control code buffer, and add it
253 * the vector of segments so that it's pages will also be
254 * counted as destination pages.
255 */
256 result = -ENOMEM;
257 image->control_code_page = kimage_alloc_control_pages(image,
163f6876 258 get_order(KEXEC_CONTROL_PAGE_SIZE));
dc009d92 259 if (!image->control_code_page) {
e1bebcf4 260 pr_err("Could not allocate control_code_buffer\n");
b92e7e0d 261 goto out_free;
dc009d92
EB
262 }
263
3ab83521
HY
264 image->swap_page = kimage_alloc_control_pages(image, 0);
265 if (!image->swap_page) {
e1bebcf4 266 pr_err("Could not allocate swap buffer\n");
b92e7e0d 267 goto out_free;
3ab83521
HY
268 }
269
b92e7e0d
ZY
270 *rimage = image;
271 return 0;
72414d3f 272
b92e7e0d
ZY
273out_free:
274 kimage_free_page_list(&image->control_pages);
275 kfree(image);
276out:
dc009d92
EB
277 return result;
278}
279
280static int kimage_crash_alloc(struct kimage **rimage, unsigned long entry,
72414d3f 281 unsigned long nr_segments,
314b6a4d 282 struct kexec_segment __user *segments)
dc009d92
EB
283{
284 int result;
285 struct kimage *image;
286 unsigned long i;
287
288 image = NULL;
289 /* Verify we have a valid entry point */
290 if ((entry < crashk_res.start) || (entry > crashk_res.end)) {
291 result = -EADDRNOTAVAIL;
292 goto out;
293 }
294
295 /* Allocate and initialize a controlling structure */
296 result = do_kimage_alloc(&image, entry, nr_segments, segments);
72414d3f 297 if (result)
dc009d92 298 goto out;
dc009d92
EB
299
300 /* Enable the special crash kernel control page
301 * allocation policy.
302 */
303 image->control_page = crashk_res.start;
304 image->type = KEXEC_TYPE_CRASH;
305
306 /*
307 * Verify we have good destination addresses. Normally
308 * the caller is responsible for making certain we don't
309 * attempt to load the new image into invalid or reserved
310 * areas of RAM. But crash kernels are preloaded into a
311 * reserved area of ram. We must ensure the addresses
312 * are in the reserved area otherwise preloading the
313 * kernel could corrupt things.
314 */
315 result = -EADDRNOTAVAIL;
316 for (i = 0; i < nr_segments; i++) {
317 unsigned long mstart, mend;
72414d3f 318
dc009d92 319 mstart = image->segment[i].mem;
50cccc69 320 mend = mstart + image->segment[i].memsz - 1;
dc009d92
EB
321 /* Ensure we are within the crash kernel limits */
322 if ((mstart < crashk_res.start) || (mend > crashk_res.end))
8c333ac2 323 goto out_free;
dc009d92
EB
324 }
325
dc009d92
EB
326 /*
327 * Find a location for the control code buffer, and add
328 * the vector of segments so that it's pages will also be
329 * counted as destination pages.
330 */
331 result = -ENOMEM;
332 image->control_code_page = kimage_alloc_control_pages(image,
163f6876 333 get_order(KEXEC_CONTROL_PAGE_SIZE));
dc009d92 334 if (!image->control_code_page) {
e1bebcf4 335 pr_err("Could not allocate control_code_buffer\n");
8c333ac2 336 goto out_free;
dc009d92
EB
337 }
338
8c333ac2
ZY
339 *rimage = image;
340 return 0;
72414d3f 341
8c333ac2
ZY
342out_free:
343 kfree(image);
344out:
dc009d92
EB
345 return result;
346}
347
72414d3f
MS
348static int kimage_is_destination_range(struct kimage *image,
349 unsigned long start,
350 unsigned long end)
dc009d92
EB
351{
352 unsigned long i;
353
354 for (i = 0; i < image->nr_segments; i++) {
355 unsigned long mstart, mend;
72414d3f 356
dc009d92 357 mstart = image->segment[i].mem;
72414d3f
MS
358 mend = mstart + image->segment[i].memsz;
359 if ((end > mstart) && (start < mend))
dc009d92 360 return 1;
dc009d92 361 }
72414d3f 362
dc009d92
EB
363 return 0;
364}
365
9796fdd8 366static struct page *kimage_alloc_pages(gfp_t gfp_mask, unsigned int order)
dc009d92
EB
367{
368 struct page *pages;
72414d3f 369
dc009d92
EB
370 pages = alloc_pages(gfp_mask, order);
371 if (pages) {
372 unsigned int count, i;
373 pages->mapping = NULL;
4c21e2f2 374 set_page_private(pages, order);
dc009d92 375 count = 1 << order;
72414d3f 376 for (i = 0; i < count; i++)
dc009d92 377 SetPageReserved(pages + i);
dc009d92 378 }
72414d3f 379
dc009d92
EB
380 return pages;
381}
382
383static void kimage_free_pages(struct page *page)
384{
385 unsigned int order, count, i;
72414d3f 386
4c21e2f2 387 order = page_private(page);
dc009d92 388 count = 1 << order;
72414d3f 389 for (i = 0; i < count; i++)
dc009d92 390 ClearPageReserved(page + i);
dc009d92
EB
391 __free_pages(page, order);
392}
393
394static void kimage_free_page_list(struct list_head *list)
395{
396 struct list_head *pos, *next;
72414d3f 397
dc009d92
EB
398 list_for_each_safe(pos, next, list) {
399 struct page *page;
400
401 page = list_entry(pos, struct page, lru);
402 list_del(&page->lru);
dc009d92
EB
403 kimage_free_pages(page);
404 }
405}
406
72414d3f
MS
407static struct page *kimage_alloc_normal_control_pages(struct kimage *image,
408 unsigned int order)
dc009d92
EB
409{
410 /* Control pages are special, they are the intermediaries
411 * that are needed while we copy the rest of the pages
412 * to their final resting place. As such they must
413 * not conflict with either the destination addresses
414 * or memory the kernel is already using.
415 *
416 * The only case where we really need more than one of
417 * these are for architectures where we cannot disable
418 * the MMU and must instead generate an identity mapped
419 * page table for all of the memory.
420 *
421 * At worst this runs in O(N) of the image size.
422 */
423 struct list_head extra_pages;
424 struct page *pages;
425 unsigned int count;
426
427 count = 1 << order;
428 INIT_LIST_HEAD(&extra_pages);
429
430 /* Loop while I can allocate a page and the page allocated
431 * is a destination page.
432 */
433 do {
434 unsigned long pfn, epfn, addr, eaddr;
72414d3f 435
dc009d92
EB
436 pages = kimage_alloc_pages(GFP_KERNEL, order);
437 if (!pages)
438 break;
439 pfn = page_to_pfn(pages);
440 epfn = pfn + count;
441 addr = pfn << PAGE_SHIFT;
442 eaddr = epfn << PAGE_SHIFT;
443 if ((epfn >= (KEXEC_CONTROL_MEMORY_LIMIT >> PAGE_SHIFT)) ||
72414d3f 444 kimage_is_destination_range(image, addr, eaddr)) {
dc009d92
EB
445 list_add(&pages->lru, &extra_pages);
446 pages = NULL;
447 }
72414d3f
MS
448 } while (!pages);
449
dc009d92
EB
450 if (pages) {
451 /* Remember the allocated page... */
452 list_add(&pages->lru, &image->control_pages);
453
454 /* Because the page is already in it's destination
455 * location we will never allocate another page at
456 * that address. Therefore kimage_alloc_pages
457 * will not return it (again) and we don't need
458 * to give it an entry in image->segment[].
459 */
460 }
461 /* Deal with the destination pages I have inadvertently allocated.
462 *
463 * Ideally I would convert multi-page allocations into single
25985edc 464 * page allocations, and add everything to image->dest_pages.
dc009d92
EB
465 *
466 * For now it is simpler to just free the pages.
467 */
468 kimage_free_page_list(&extra_pages);
dc009d92 469
72414d3f 470 return pages;
dc009d92
EB
471}
472
72414d3f
MS
473static struct page *kimage_alloc_crash_control_pages(struct kimage *image,
474 unsigned int order)
dc009d92
EB
475{
476 /* Control pages are special, they are the intermediaries
477 * that are needed while we copy the rest of the pages
478 * to their final resting place. As such they must
479 * not conflict with either the destination addresses
480 * or memory the kernel is already using.
481 *
482 * Control pages are also the only pags we must allocate
483 * when loading a crash kernel. All of the other pages
484 * are specified by the segments and we just memcpy
485 * into them directly.
486 *
487 * The only case where we really need more than one of
488 * these are for architectures where we cannot disable
489 * the MMU and must instead generate an identity mapped
490 * page table for all of the memory.
491 *
492 * Given the low demand this implements a very simple
493 * allocator that finds the first hole of the appropriate
494 * size in the reserved memory region, and allocates all
495 * of the memory up to and including the hole.
496 */
497 unsigned long hole_start, hole_end, size;
498 struct page *pages;
72414d3f 499
dc009d92
EB
500 pages = NULL;
501 size = (1 << order) << PAGE_SHIFT;
502 hole_start = (image->control_page + (size - 1)) & ~(size - 1);
503 hole_end = hole_start + size - 1;
72414d3f 504 while (hole_end <= crashk_res.end) {
dc009d92 505 unsigned long i;
72414d3f 506
3d214fae 507 if (hole_end > KEXEC_CRASH_CONTROL_MEMORY_LIMIT)
dc009d92 508 break;
dc009d92 509 /* See if I overlap any of the segments */
72414d3f 510 for (i = 0; i < image->nr_segments; i++) {
dc009d92 511 unsigned long mstart, mend;
72414d3f 512
dc009d92
EB
513 mstart = image->segment[i].mem;
514 mend = mstart + image->segment[i].memsz - 1;
515 if ((hole_end >= mstart) && (hole_start <= mend)) {
516 /* Advance the hole to the end of the segment */
517 hole_start = (mend + (size - 1)) & ~(size - 1);
518 hole_end = hole_start + size - 1;
519 break;
520 }
521 }
522 /* If I don't overlap any segments I have found my hole! */
523 if (i == image->nr_segments) {
524 pages = pfn_to_page(hole_start >> PAGE_SHIFT);
525 break;
526 }
527 }
72414d3f 528 if (pages)
dc009d92 529 image->control_page = hole_end;
72414d3f 530
dc009d92
EB
531 return pages;
532}
533
534
72414d3f
MS
535struct page *kimage_alloc_control_pages(struct kimage *image,
536 unsigned int order)
dc009d92
EB
537{
538 struct page *pages = NULL;
72414d3f
MS
539
540 switch (image->type) {
dc009d92
EB
541 case KEXEC_TYPE_DEFAULT:
542 pages = kimage_alloc_normal_control_pages(image, order);
543 break;
544 case KEXEC_TYPE_CRASH:
545 pages = kimage_alloc_crash_control_pages(image, order);
546 break;
547 }
72414d3f 548
dc009d92
EB
549 return pages;
550}
551
552static int kimage_add_entry(struct kimage *image, kimage_entry_t entry)
553{
72414d3f 554 if (*image->entry != 0)
dc009d92 555 image->entry++;
72414d3f 556
dc009d92
EB
557 if (image->entry == image->last_entry) {
558 kimage_entry_t *ind_page;
559 struct page *page;
72414d3f 560
dc009d92 561 page = kimage_alloc_page(image, GFP_KERNEL, KIMAGE_NO_DEST);
72414d3f 562 if (!page)
dc009d92 563 return -ENOMEM;
72414d3f 564
dc009d92
EB
565 ind_page = page_address(page);
566 *image->entry = virt_to_phys(ind_page) | IND_INDIRECTION;
567 image->entry = ind_page;
72414d3f
MS
568 image->last_entry = ind_page +
569 ((PAGE_SIZE/sizeof(kimage_entry_t)) - 1);
dc009d92
EB
570 }
571 *image->entry = entry;
572 image->entry++;
573 *image->entry = 0;
72414d3f 574
dc009d92
EB
575 return 0;
576}
577
72414d3f
MS
578static int kimage_set_destination(struct kimage *image,
579 unsigned long destination)
dc009d92
EB
580{
581 int result;
582
583 destination &= PAGE_MASK;
584 result = kimage_add_entry(image, destination | IND_DESTINATION);
72414d3f 585 if (result == 0)
dc009d92 586 image->destination = destination;
72414d3f 587
dc009d92
EB
588 return result;
589}
590
591
592static int kimage_add_page(struct kimage *image, unsigned long page)
593{
594 int result;
595
596 page &= PAGE_MASK;
597 result = kimage_add_entry(image, page | IND_SOURCE);
72414d3f 598 if (result == 0)
dc009d92 599 image->destination += PAGE_SIZE;
72414d3f 600
dc009d92
EB
601 return result;
602}
603
604
605static void kimage_free_extra_pages(struct kimage *image)
606{
607 /* Walk through and free any extra destination pages I may have */
608 kimage_free_page_list(&image->dest_pages);
609
25985edc 610 /* Walk through and free any unusable pages I have cached */
dc009d92
EB
611 kimage_free_page_list(&image->unuseable_pages);
612
613}
7fccf032 614static void kimage_terminate(struct kimage *image)
dc009d92 615{
72414d3f 616 if (*image->entry != 0)
dc009d92 617 image->entry++;
72414d3f 618
dc009d92 619 *image->entry = IND_DONE;
dc009d92
EB
620}
621
622#define for_each_kimage_entry(image, ptr, entry) \
623 for (ptr = &image->head; (entry = *ptr) && !(entry & IND_DONE); \
e1bebcf4
FF
624 ptr = (entry & IND_INDIRECTION) ? \
625 phys_to_virt((entry & PAGE_MASK)) : ptr + 1)
dc009d92
EB
626
627static void kimage_free_entry(kimage_entry_t entry)
628{
629 struct page *page;
630
631 page = pfn_to_page(entry >> PAGE_SHIFT);
632 kimage_free_pages(page);
633}
634
635static void kimage_free(struct kimage *image)
636{
637 kimage_entry_t *ptr, entry;
638 kimage_entry_t ind = 0;
639
640 if (!image)
641 return;
72414d3f 642
dc009d92
EB
643 kimage_free_extra_pages(image);
644 for_each_kimage_entry(image, ptr, entry) {
645 if (entry & IND_INDIRECTION) {
646 /* Free the previous indirection page */
72414d3f 647 if (ind & IND_INDIRECTION)
dc009d92 648 kimage_free_entry(ind);
dc009d92
EB
649 /* Save this indirection page until we are
650 * done with it.
651 */
652 ind = entry;
e1bebcf4 653 } else if (entry & IND_SOURCE)
dc009d92 654 kimage_free_entry(entry);
dc009d92
EB
655 }
656 /* Free the final indirection page */
72414d3f 657 if (ind & IND_INDIRECTION)
dc009d92 658 kimage_free_entry(ind);
dc009d92
EB
659
660 /* Handle any machine specific cleanup */
661 machine_kexec_cleanup(image);
662
663 /* Free the kexec control pages... */
664 kimage_free_page_list(&image->control_pages);
665 kfree(image);
666}
667
72414d3f
MS
668static kimage_entry_t *kimage_dst_used(struct kimage *image,
669 unsigned long page)
dc009d92
EB
670{
671 kimage_entry_t *ptr, entry;
672 unsigned long destination = 0;
673
674 for_each_kimage_entry(image, ptr, entry) {
72414d3f 675 if (entry & IND_DESTINATION)
dc009d92 676 destination = entry & PAGE_MASK;
dc009d92 677 else if (entry & IND_SOURCE) {
72414d3f 678 if (page == destination)
dc009d92 679 return ptr;
dc009d92
EB
680 destination += PAGE_SIZE;
681 }
682 }
72414d3f 683
314b6a4d 684 return NULL;
dc009d92
EB
685}
686
72414d3f 687static struct page *kimage_alloc_page(struct kimage *image,
9796fdd8 688 gfp_t gfp_mask,
72414d3f 689 unsigned long destination)
dc009d92
EB
690{
691 /*
692 * Here we implement safeguards to ensure that a source page
693 * is not copied to its destination page before the data on
694 * the destination page is no longer useful.
695 *
696 * To do this we maintain the invariant that a source page is
697 * either its own destination page, or it is not a
698 * destination page at all.
699 *
700 * That is slightly stronger than required, but the proof
701 * that no problems will not occur is trivial, and the
702 * implementation is simply to verify.
703 *
704 * When allocating all pages normally this algorithm will run
705 * in O(N) time, but in the worst case it will run in O(N^2)
706 * time. If the runtime is a problem the data structures can
707 * be fixed.
708 */
709 struct page *page;
710 unsigned long addr;
711
712 /*
713 * Walk through the list of destination pages, and see if I
714 * have a match.
715 */
716 list_for_each_entry(page, &image->dest_pages, lru) {
717 addr = page_to_pfn(page) << PAGE_SHIFT;
718 if (addr == destination) {
719 list_del(&page->lru);
720 return page;
721 }
722 }
723 page = NULL;
724 while (1) {
725 kimage_entry_t *old;
726
727 /* Allocate a page, if we run out of memory give up */
728 page = kimage_alloc_pages(gfp_mask, 0);
72414d3f 729 if (!page)
314b6a4d 730 return NULL;
dc009d92 731 /* If the page cannot be used file it away */
72414d3f
MS
732 if (page_to_pfn(page) >
733 (KEXEC_SOURCE_MEMORY_LIMIT >> PAGE_SHIFT)) {
dc009d92
EB
734 list_add(&page->lru, &image->unuseable_pages);
735 continue;
736 }
737 addr = page_to_pfn(page) << PAGE_SHIFT;
738
739 /* If it is the destination page we want use it */
740 if (addr == destination)
741 break;
742
743 /* If the page is not a destination page use it */
72414d3f
MS
744 if (!kimage_is_destination_range(image, addr,
745 addr + PAGE_SIZE))
dc009d92
EB
746 break;
747
748 /*
749 * I know that the page is someones destination page.
750 * See if there is already a source page for this
751 * destination page. And if so swap the source pages.
752 */
753 old = kimage_dst_used(image, addr);
754 if (old) {
755 /* If so move it */
756 unsigned long old_addr;
757 struct page *old_page;
758
759 old_addr = *old & PAGE_MASK;
760 old_page = pfn_to_page(old_addr >> PAGE_SHIFT);
761 copy_highpage(page, old_page);
762 *old = addr | (*old & ~PAGE_MASK);
763
764 /* The old page I have found cannot be a
f9092f35
JS
765 * destination page, so return it if it's
766 * gfp_flags honor the ones passed in.
dc009d92 767 */
f9092f35
JS
768 if (!(gfp_mask & __GFP_HIGHMEM) &&
769 PageHighMem(old_page)) {
770 kimage_free_pages(old_page);
771 continue;
772 }
dc009d92
EB
773 addr = old_addr;
774 page = old_page;
775 break;
e1bebcf4 776 } else {
dc009d92
EB
777 /* Place the page on the destination list I
778 * will use it later.
779 */
780 list_add(&page->lru, &image->dest_pages);
781 }
782 }
72414d3f 783
dc009d92
EB
784 return page;
785}
786
787static int kimage_load_normal_segment(struct kimage *image,
72414d3f 788 struct kexec_segment *segment)
dc009d92
EB
789{
790 unsigned long maddr;
310faaa9 791 size_t ubytes, mbytes;
dc009d92 792 int result;
314b6a4d 793 unsigned char __user *buf;
dc009d92
EB
794
795 result = 0;
796 buf = segment->buf;
797 ubytes = segment->bufsz;
798 mbytes = segment->memsz;
799 maddr = segment->mem;
800
801 result = kimage_set_destination(image, maddr);
72414d3f 802 if (result < 0)
dc009d92 803 goto out;
72414d3f
MS
804
805 while (mbytes) {
dc009d92
EB
806 struct page *page;
807 char *ptr;
808 size_t uchunk, mchunk;
72414d3f 809
dc009d92 810 page = kimage_alloc_page(image, GFP_HIGHUSER, maddr);
c80544dc 811 if (!page) {
dc009d92
EB
812 result = -ENOMEM;
813 goto out;
814 }
72414d3f
MS
815 result = kimage_add_page(image, page_to_pfn(page)
816 << PAGE_SHIFT);
817 if (result < 0)
dc009d92 818 goto out;
72414d3f 819
dc009d92
EB
820 ptr = kmap(page);
821 /* Start with a clear page */
3ecb01df 822 clear_page(ptr);
dc009d92 823 ptr += maddr & ~PAGE_MASK;
31c3a3fe
ZY
824 mchunk = min_t(size_t, mbytes,
825 PAGE_SIZE - (maddr & ~PAGE_MASK));
826 uchunk = min(ubytes, mchunk);
72414d3f 827
dc009d92
EB
828 result = copy_from_user(ptr, buf, uchunk);
829 kunmap(page);
830 if (result) {
f65a03f6 831 result = -EFAULT;
dc009d92
EB
832 goto out;
833 }
834 ubytes -= uchunk;
835 maddr += mchunk;
836 buf += mchunk;
837 mbytes -= mchunk;
838 }
72414d3f 839out:
dc009d92
EB
840 return result;
841}
842
843static int kimage_load_crash_segment(struct kimage *image,
72414d3f 844 struct kexec_segment *segment)
dc009d92
EB
845{
846 /* For crash dumps kernels we simply copy the data from
847 * user space to it's destination.
848 * We do things a page at a time for the sake of kmap.
849 */
850 unsigned long maddr;
310faaa9 851 size_t ubytes, mbytes;
dc009d92 852 int result;
314b6a4d 853 unsigned char __user *buf;
dc009d92
EB
854
855 result = 0;
856 buf = segment->buf;
857 ubytes = segment->bufsz;
858 mbytes = segment->memsz;
859 maddr = segment->mem;
72414d3f 860 while (mbytes) {
dc009d92
EB
861 struct page *page;
862 char *ptr;
863 size_t uchunk, mchunk;
72414d3f 864
dc009d92 865 page = pfn_to_page(maddr >> PAGE_SHIFT);
c80544dc 866 if (!page) {
dc009d92
EB
867 result = -ENOMEM;
868 goto out;
869 }
870 ptr = kmap(page);
871 ptr += maddr & ~PAGE_MASK;
31c3a3fe
ZY
872 mchunk = min_t(size_t, mbytes,
873 PAGE_SIZE - (maddr & ~PAGE_MASK));
874 uchunk = min(ubytes, mchunk);
875 if (mchunk > uchunk) {
dc009d92
EB
876 /* Zero the trailing part of the page */
877 memset(ptr + uchunk, 0, mchunk - uchunk);
878 }
879 result = copy_from_user(ptr, buf, uchunk);
a7956113 880 kexec_flush_icache_page(page);
dc009d92
EB
881 kunmap(page);
882 if (result) {
f65a03f6 883 result = -EFAULT;
dc009d92
EB
884 goto out;
885 }
886 ubytes -= uchunk;
887 maddr += mchunk;
888 buf += mchunk;
889 mbytes -= mchunk;
890 }
72414d3f 891out:
dc009d92
EB
892 return result;
893}
894
895static int kimage_load_segment(struct kimage *image,
72414d3f 896 struct kexec_segment *segment)
dc009d92
EB
897{
898 int result = -ENOMEM;
72414d3f
MS
899
900 switch (image->type) {
dc009d92
EB
901 case KEXEC_TYPE_DEFAULT:
902 result = kimage_load_normal_segment(image, segment);
903 break;
904 case KEXEC_TYPE_CRASH:
905 result = kimage_load_crash_segment(image, segment);
906 break;
907 }
72414d3f 908
dc009d92
EB
909 return result;
910}
911
912/*
913 * Exec Kernel system call: for obvious reasons only root may call it.
914 *
915 * This call breaks up into three pieces.
916 * - A generic part which loads the new kernel from the current
917 * address space, and very carefully places the data in the
918 * allocated pages.
919 *
920 * - A generic part that interacts with the kernel and tells all of
921 * the devices to shut down. Preventing on-going dmas, and placing
922 * the devices in a consistent state so a later kernel can
923 * reinitialize them.
924 *
925 * - A machine specific part that includes the syscall number
002ace78 926 * and then copies the image to it's final destination. And
dc009d92
EB
927 * jumps into the image at entry.
928 *
929 * kexec does not sync, or unmount filesystems so if you need
930 * that to happen you need to do that yourself.
931 */
c330dda9
JM
932struct kimage *kexec_image;
933struct kimage *kexec_crash_image;
7984754b 934int kexec_load_disabled;
8c5a1cf0
AM
935
936static DEFINE_MUTEX(kexec_mutex);
dc009d92 937
754fe8d2
HC
938SYSCALL_DEFINE4(kexec_load, unsigned long, entry, unsigned long, nr_segments,
939 struct kexec_segment __user *, segments, unsigned long, flags)
dc009d92
EB
940{
941 struct kimage **dest_image, *image;
dc009d92
EB
942 int result;
943
944 /* We only trust the superuser with rebooting the system. */
7984754b 945 if (!capable(CAP_SYS_BOOT) || kexec_load_disabled)
dc009d92
EB
946 return -EPERM;
947
948 /*
949 * Verify we have a legal set of flags
950 * This leaves us room for future extensions.
951 */
952 if ((flags & KEXEC_FLAGS) != (flags & ~KEXEC_ARCH_MASK))
953 return -EINVAL;
954
955 /* Verify we are on the appropriate architecture */
956 if (((flags & KEXEC_ARCH_MASK) != KEXEC_ARCH) &&
957 ((flags & KEXEC_ARCH_MASK) != KEXEC_ARCH_DEFAULT))
dc009d92 958 return -EINVAL;
dc009d92
EB
959
960 /* Put an artificial cap on the number
961 * of segments passed to kexec_load.
962 */
963 if (nr_segments > KEXEC_SEGMENT_MAX)
964 return -EINVAL;
965
966 image = NULL;
967 result = 0;
968
969 /* Because we write directly to the reserved memory
970 * region when loading crash kernels we need a mutex here to
971 * prevent multiple crash kernels from attempting to load
972 * simultaneously, and to prevent a crash kernel from loading
973 * over the top of a in use crash kernel.
974 *
975 * KISS: always take the mutex.
976 */
8c5a1cf0 977 if (!mutex_trylock(&kexec_mutex))
dc009d92 978 return -EBUSY;
72414d3f 979
dc009d92 980 dest_image = &kexec_image;
72414d3f 981 if (flags & KEXEC_ON_CRASH)
dc009d92 982 dest_image = &kexec_crash_image;
dc009d92
EB
983 if (nr_segments > 0) {
984 unsigned long i;
72414d3f 985
dc009d92 986 /* Loading another kernel to reboot into */
72414d3f
MS
987 if ((flags & KEXEC_ON_CRASH) == 0)
988 result = kimage_normal_alloc(&image, entry,
989 nr_segments, segments);
dc009d92
EB
990 /* Loading another kernel to switch to if this one crashes */
991 else if (flags & KEXEC_ON_CRASH) {
992 /* Free any current crash dump kernel before
993 * we corrupt it.
994 */
995 kimage_free(xchg(&kexec_crash_image, NULL));
72414d3f
MS
996 result = kimage_crash_alloc(&image, entry,
997 nr_segments, segments);
558df720 998 crash_map_reserved_pages();
dc009d92 999 }
72414d3f 1000 if (result)
dc009d92 1001 goto out;
72414d3f 1002
3ab83521
HY
1003 if (flags & KEXEC_PRESERVE_CONTEXT)
1004 image->preserve_context = 1;
dc009d92 1005 result = machine_kexec_prepare(image);
72414d3f 1006 if (result)
dc009d92 1007 goto out;
72414d3f
MS
1008
1009 for (i = 0; i < nr_segments; i++) {
dc009d92 1010 result = kimage_load_segment(image, &image->segment[i]);
72414d3f 1011 if (result)
dc009d92 1012 goto out;
dc009d92 1013 }
7fccf032 1014 kimage_terminate(image);
558df720
MH
1015 if (flags & KEXEC_ON_CRASH)
1016 crash_unmap_reserved_pages();
dc009d92
EB
1017 }
1018 /* Install the new kernel, and Uninstall the old */
1019 image = xchg(dest_image, image);
1020
72414d3f 1021out:
8c5a1cf0 1022 mutex_unlock(&kexec_mutex);
dc009d92 1023 kimage_free(image);
72414d3f 1024
dc009d92
EB
1025 return result;
1026}
1027
558df720
MH
1028/*
1029 * Add and remove page tables for crashkernel memory
1030 *
1031 * Provide an empty default implementation here -- architecture
1032 * code may override this
1033 */
1034void __weak crash_map_reserved_pages(void)
1035{}
1036
1037void __weak crash_unmap_reserved_pages(void)
1038{}
1039
dc009d92 1040#ifdef CONFIG_COMPAT
ca2c405a
HC
1041COMPAT_SYSCALL_DEFINE4(kexec_load, compat_ulong_t, entry,
1042 compat_ulong_t, nr_segments,
1043 struct compat_kexec_segment __user *, segments,
1044 compat_ulong_t, flags)
dc009d92
EB
1045{
1046 struct compat_kexec_segment in;
1047 struct kexec_segment out, __user *ksegments;
1048 unsigned long i, result;
1049
1050 /* Don't allow clients that don't understand the native
1051 * architecture to do anything.
1052 */
72414d3f 1053 if ((flags & KEXEC_ARCH_MASK) == KEXEC_ARCH_DEFAULT)
dc009d92 1054 return -EINVAL;
dc009d92 1055
72414d3f 1056 if (nr_segments > KEXEC_SEGMENT_MAX)
dc009d92 1057 return -EINVAL;
dc009d92
EB
1058
1059 ksegments = compat_alloc_user_space(nr_segments * sizeof(out));
e1bebcf4 1060 for (i = 0; i < nr_segments; i++) {
dc009d92 1061 result = copy_from_user(&in, &segments[i], sizeof(in));
72414d3f 1062 if (result)
dc009d92 1063 return -EFAULT;
dc009d92
EB
1064
1065 out.buf = compat_ptr(in.buf);
1066 out.bufsz = in.bufsz;
1067 out.mem = in.mem;
1068 out.memsz = in.memsz;
1069
1070 result = copy_to_user(&ksegments[i], &out, sizeof(out));
72414d3f 1071 if (result)
dc009d92 1072 return -EFAULT;
dc009d92
EB
1073 }
1074
1075 return sys_kexec_load(entry, nr_segments, ksegments, flags);
1076}
1077#endif
1078
6e274d14 1079void crash_kexec(struct pt_regs *regs)
dc009d92 1080{
8c5a1cf0 1081 /* Take the kexec_mutex here to prevent sys_kexec_load
dc009d92
EB
1082 * running on one cpu from replacing the crash kernel
1083 * we are using after a panic on a different cpu.
1084 *
1085 * If the crash kernel was not located in a fixed area
1086 * of memory the xchg(&kexec_crash_image) would be
1087 * sufficient. But since I reuse the memory...
1088 */
8c5a1cf0 1089 if (mutex_trylock(&kexec_mutex)) {
c0ce7d08 1090 if (kexec_crash_image) {
e996e581 1091 struct pt_regs fixed_regs;
0f4bd46e 1092
e996e581 1093 crash_setup_regs(&fixed_regs, regs);
fd59d231 1094 crash_save_vmcoreinfo();
e996e581 1095 machine_crash_shutdown(&fixed_regs);
c0ce7d08 1096 machine_kexec(kexec_crash_image);
dc009d92 1097 }
8c5a1cf0 1098 mutex_unlock(&kexec_mutex);
dc009d92
EB
1099 }
1100}
cc571658 1101
06a7f711
AW
1102size_t crash_get_memory_size(void)
1103{
e05bd336 1104 size_t size = 0;
06a7f711 1105 mutex_lock(&kexec_mutex);
e05bd336 1106 if (crashk_res.end != crashk_res.start)
28f65c11 1107 size = resource_size(&crashk_res);
06a7f711
AW
1108 mutex_unlock(&kexec_mutex);
1109 return size;
1110}
1111
c0bb9e45
AB
1112void __weak crash_free_reserved_phys_range(unsigned long begin,
1113 unsigned long end)
06a7f711
AW
1114{
1115 unsigned long addr;
1116
e07cee23
JL
1117 for (addr = begin; addr < end; addr += PAGE_SIZE)
1118 free_reserved_page(pfn_to_page(addr >> PAGE_SHIFT));
06a7f711
AW
1119}
1120
1121int crash_shrink_memory(unsigned long new_size)
1122{
1123 int ret = 0;
1124 unsigned long start, end;
bec013c4 1125 unsigned long old_size;
6480e5a0 1126 struct resource *ram_res;
06a7f711
AW
1127
1128 mutex_lock(&kexec_mutex);
1129
1130 if (kexec_crash_image) {
1131 ret = -ENOENT;
1132 goto unlock;
1133 }
1134 start = crashk_res.start;
1135 end = crashk_res.end;
bec013c4
MH
1136 old_size = (end == 0) ? 0 : end - start + 1;
1137 if (new_size >= old_size) {
1138 ret = (new_size == old_size) ? 0 : -EINVAL;
06a7f711
AW
1139 goto unlock;
1140 }
1141
6480e5a0
MH
1142 ram_res = kzalloc(sizeof(*ram_res), GFP_KERNEL);
1143 if (!ram_res) {
1144 ret = -ENOMEM;
1145 goto unlock;
1146 }
1147
558df720
MH
1148 start = roundup(start, KEXEC_CRASH_MEM_ALIGN);
1149 end = roundup(start + new_size, KEXEC_CRASH_MEM_ALIGN);
06a7f711 1150
558df720 1151 crash_map_reserved_pages();
c0bb9e45 1152 crash_free_reserved_phys_range(end, crashk_res.end);
06a7f711 1153
e05bd336 1154 if ((start == end) && (crashk_res.parent != NULL))
06a7f711 1155 release_resource(&crashk_res);
6480e5a0
MH
1156
1157 ram_res->start = end;
1158 ram_res->end = crashk_res.end;
1159 ram_res->flags = IORESOURCE_BUSY | IORESOURCE_MEM;
1160 ram_res->name = "System RAM";
1161
475f9aa6 1162 crashk_res.end = end - 1;
6480e5a0
MH
1163
1164 insert_resource(&iomem_resource, ram_res);
558df720 1165 crash_unmap_reserved_pages();
06a7f711
AW
1166
1167unlock:
1168 mutex_unlock(&kexec_mutex);
1169 return ret;
1170}
1171
85916f81
MD
1172static u32 *append_elf_note(u32 *buf, char *name, unsigned type, void *data,
1173 size_t data_len)
1174{
1175 struct elf_note note;
1176
1177 note.n_namesz = strlen(name) + 1;
1178 note.n_descsz = data_len;
1179 note.n_type = type;
1180 memcpy(buf, &note, sizeof(note));
1181 buf += (sizeof(note) + 3)/4;
1182 memcpy(buf, name, note.n_namesz);
1183 buf += (note.n_namesz + 3)/4;
1184 memcpy(buf, data, note.n_descsz);
1185 buf += (note.n_descsz + 3)/4;
1186
1187 return buf;
1188}
1189
1190static void final_note(u32 *buf)
1191{
1192 struct elf_note note;
1193
1194 note.n_namesz = 0;
1195 note.n_descsz = 0;
1196 note.n_type = 0;
1197 memcpy(buf, &note, sizeof(note));
1198}
1199
1200void crash_save_cpu(struct pt_regs *regs, int cpu)
1201{
1202 struct elf_prstatus prstatus;
1203 u32 *buf;
1204
4f4b6c1a 1205 if ((cpu < 0) || (cpu >= nr_cpu_ids))
85916f81
MD
1206 return;
1207
1208 /* Using ELF notes here is opportunistic.
1209 * I need a well defined structure format
1210 * for the data I pass, and I need tags
1211 * on the data to indicate what information I have
1212 * squirrelled away. ELF notes happen to provide
1213 * all of that, so there is no need to invent something new.
1214 */
e1bebcf4 1215 buf = (u32 *)per_cpu_ptr(crash_notes, cpu);
85916f81
MD
1216 if (!buf)
1217 return;
1218 memset(&prstatus, 0, sizeof(prstatus));
1219 prstatus.pr_pid = current->pid;
6cd61c0b 1220 elf_core_copy_kernel_regs(&prstatus.pr_reg, regs);
6672f76a 1221 buf = append_elf_note(buf, KEXEC_CORE_NOTE_NAME, NT_PRSTATUS,
e1bebcf4 1222 &prstatus, sizeof(prstatus));
85916f81
MD
1223 final_note(buf);
1224}
1225
cc571658
VG
1226static int __init crash_notes_memory_init(void)
1227{
1228 /* Allocate memory for saving cpu registers. */
1229 crash_notes = alloc_percpu(note_buf_t);
1230 if (!crash_notes) {
e1bebcf4 1231 pr_warn("Kexec: Memory allocation for saving cpu register states failed\n");
cc571658
VG
1232 return -ENOMEM;
1233 }
1234 return 0;
1235}
c96d6660 1236subsys_initcall(crash_notes_memory_init);
fd59d231 1237
cba63c30
BW
1238
1239/*
1240 * parsing the "crashkernel" commandline
1241 *
1242 * this code is intended to be called from architecture specific code
1243 */
1244
1245
1246/*
1247 * This function parses command lines in the format
1248 *
1249 * crashkernel=ramsize-range:size[,...][@offset]
1250 *
1251 * The function returns 0 on success and -EINVAL on failure.
1252 */
e1bebcf4
FF
1253static int __init parse_crashkernel_mem(char *cmdline,
1254 unsigned long long system_ram,
1255 unsigned long long *crash_size,
1256 unsigned long long *crash_base)
cba63c30
BW
1257{
1258 char *cur = cmdline, *tmp;
1259
1260 /* for each entry of the comma-separated list */
1261 do {
1262 unsigned long long start, end = ULLONG_MAX, size;
1263
1264 /* get the start of the range */
1265 start = memparse(cur, &tmp);
1266 if (cur == tmp) {
e1bebcf4 1267 pr_warn("crashkernel: Memory value expected\n");
cba63c30
BW
1268 return -EINVAL;
1269 }
1270 cur = tmp;
1271 if (*cur != '-') {
e1bebcf4 1272 pr_warn("crashkernel: '-' expected\n");
cba63c30
BW
1273 return -EINVAL;
1274 }
1275 cur++;
1276
1277 /* if no ':' is here, than we read the end */
1278 if (*cur != ':') {
1279 end = memparse(cur, &tmp);
1280 if (cur == tmp) {
e1bebcf4 1281 pr_warn("crashkernel: Memory value expected\n");
cba63c30
BW
1282 return -EINVAL;
1283 }
1284 cur = tmp;
1285 if (end <= start) {
e1bebcf4 1286 pr_warn("crashkernel: end <= start\n");
cba63c30
BW
1287 return -EINVAL;
1288 }
1289 }
1290
1291 if (*cur != ':') {
e1bebcf4 1292 pr_warn("crashkernel: ':' expected\n");
cba63c30
BW
1293 return -EINVAL;
1294 }
1295 cur++;
1296
1297 size = memparse(cur, &tmp);
1298 if (cur == tmp) {
e1bebcf4 1299 pr_warn("Memory value expected\n");
cba63c30
BW
1300 return -EINVAL;
1301 }
1302 cur = tmp;
1303 if (size >= system_ram) {
e1bebcf4 1304 pr_warn("crashkernel: invalid size\n");
cba63c30
BW
1305 return -EINVAL;
1306 }
1307
1308 /* match ? */
be089d79 1309 if (system_ram >= start && system_ram < end) {
cba63c30
BW
1310 *crash_size = size;
1311 break;
1312 }
1313 } while (*cur++ == ',');
1314
1315 if (*crash_size > 0) {
11c7da4b 1316 while (*cur && *cur != ' ' && *cur != '@')
cba63c30
BW
1317 cur++;
1318 if (*cur == '@') {
1319 cur++;
1320 *crash_base = memparse(cur, &tmp);
1321 if (cur == tmp) {
e1bebcf4 1322 pr_warn("Memory value expected after '@'\n");
cba63c30
BW
1323 return -EINVAL;
1324 }
1325 }
1326 }
1327
1328 return 0;
1329}
1330
1331/*
1332 * That function parses "simple" (old) crashkernel command lines like
1333 *
e1bebcf4 1334 * crashkernel=size[@offset]
cba63c30
BW
1335 *
1336 * It returns 0 on success and -EINVAL on failure.
1337 */
e1bebcf4
FF
1338static int __init parse_crashkernel_simple(char *cmdline,
1339 unsigned long long *crash_size,
1340 unsigned long long *crash_base)
cba63c30
BW
1341{
1342 char *cur = cmdline;
1343
1344 *crash_size = memparse(cmdline, &cur);
1345 if (cmdline == cur) {
e1bebcf4 1346 pr_warn("crashkernel: memory value expected\n");
cba63c30
BW
1347 return -EINVAL;
1348 }
1349
1350 if (*cur == '@')
1351 *crash_base = memparse(cur+1, &cur);
eaa3be6a 1352 else if (*cur != ' ' && *cur != '\0') {
e1bebcf4 1353 pr_warn("crashkernel: unrecognized char\n");
eaa3be6a
ZD
1354 return -EINVAL;
1355 }
cba63c30
BW
1356
1357 return 0;
1358}
1359
adbc742b
YL
1360#define SUFFIX_HIGH 0
1361#define SUFFIX_LOW 1
1362#define SUFFIX_NULL 2
1363static __initdata char *suffix_tbl[] = {
1364 [SUFFIX_HIGH] = ",high",
1365 [SUFFIX_LOW] = ",low",
1366 [SUFFIX_NULL] = NULL,
1367};
1368
cba63c30 1369/*
adbc742b
YL
1370 * That function parses "suffix" crashkernel command lines like
1371 *
1372 * crashkernel=size,[high|low]
1373 *
1374 * It returns 0 on success and -EINVAL on failure.
cba63c30 1375 */
adbc742b
YL
1376static int __init parse_crashkernel_suffix(char *cmdline,
1377 unsigned long long *crash_size,
1378 unsigned long long *crash_base,
1379 const char *suffix)
1380{
1381 char *cur = cmdline;
1382
1383 *crash_size = memparse(cmdline, &cur);
1384 if (cmdline == cur) {
1385 pr_warn("crashkernel: memory value expected\n");
1386 return -EINVAL;
1387 }
1388
1389 /* check with suffix */
1390 if (strncmp(cur, suffix, strlen(suffix))) {
1391 pr_warn("crashkernel: unrecognized char\n");
1392 return -EINVAL;
1393 }
1394 cur += strlen(suffix);
1395 if (*cur != ' ' && *cur != '\0') {
1396 pr_warn("crashkernel: unrecognized char\n");
1397 return -EINVAL;
1398 }
1399
1400 return 0;
1401}
1402
1403static __init char *get_last_crashkernel(char *cmdline,
1404 const char *name,
1405 const char *suffix)
1406{
1407 char *p = cmdline, *ck_cmdline = NULL;
1408
1409 /* find crashkernel and use the last one if there are more */
1410 p = strstr(p, name);
1411 while (p) {
1412 char *end_p = strchr(p, ' ');
1413 char *q;
1414
1415 if (!end_p)
1416 end_p = p + strlen(p);
1417
1418 if (!suffix) {
1419 int i;
1420
1421 /* skip the one with any known suffix */
1422 for (i = 0; suffix_tbl[i]; i++) {
1423 q = end_p - strlen(suffix_tbl[i]);
1424 if (!strncmp(q, suffix_tbl[i],
1425 strlen(suffix_tbl[i])))
1426 goto next;
1427 }
1428 ck_cmdline = p;
1429 } else {
1430 q = end_p - strlen(suffix);
1431 if (!strncmp(q, suffix, strlen(suffix)))
1432 ck_cmdline = p;
1433 }
1434next:
1435 p = strstr(p+1, name);
1436 }
1437
1438 if (!ck_cmdline)
1439 return NULL;
1440
1441 return ck_cmdline;
1442}
1443
0212f915 1444static int __init __parse_crashkernel(char *cmdline,
cba63c30
BW
1445 unsigned long long system_ram,
1446 unsigned long long *crash_size,
0212f915 1447 unsigned long long *crash_base,
adbc742b
YL
1448 const char *name,
1449 const char *suffix)
cba63c30 1450{
cba63c30 1451 char *first_colon, *first_space;
adbc742b 1452 char *ck_cmdline;
cba63c30
BW
1453
1454 BUG_ON(!crash_size || !crash_base);
1455 *crash_size = 0;
1456 *crash_base = 0;
1457
adbc742b 1458 ck_cmdline = get_last_crashkernel(cmdline, name, suffix);
cba63c30
BW
1459
1460 if (!ck_cmdline)
1461 return -EINVAL;
1462
0212f915 1463 ck_cmdline += strlen(name);
cba63c30 1464
adbc742b
YL
1465 if (suffix)
1466 return parse_crashkernel_suffix(ck_cmdline, crash_size,
1467 crash_base, suffix);
cba63c30
BW
1468 /*
1469 * if the commandline contains a ':', then that's the extended
1470 * syntax -- if not, it must be the classic syntax
1471 */
1472 first_colon = strchr(ck_cmdline, ':');
1473 first_space = strchr(ck_cmdline, ' ');
1474 if (first_colon && (!first_space || first_colon < first_space))
1475 return parse_crashkernel_mem(ck_cmdline, system_ram,
1476 crash_size, crash_base);
cba63c30 1477
80c74f6a 1478 return parse_crashkernel_simple(ck_cmdline, crash_size, crash_base);
cba63c30
BW
1479}
1480
adbc742b
YL
1481/*
1482 * That function is the entry point for command line parsing and should be
1483 * called from the arch-specific code.
1484 */
0212f915
YL
1485int __init parse_crashkernel(char *cmdline,
1486 unsigned long long system_ram,
1487 unsigned long long *crash_size,
1488 unsigned long long *crash_base)
1489{
1490 return __parse_crashkernel(cmdline, system_ram, crash_size, crash_base,
adbc742b 1491 "crashkernel=", NULL);
0212f915 1492}
55a20ee7
YL
1493
1494int __init parse_crashkernel_high(char *cmdline,
1495 unsigned long long system_ram,
1496 unsigned long long *crash_size,
1497 unsigned long long *crash_base)
1498{
1499 return __parse_crashkernel(cmdline, system_ram, crash_size, crash_base,
adbc742b 1500 "crashkernel=", suffix_tbl[SUFFIX_HIGH]);
55a20ee7 1501}
0212f915
YL
1502
1503int __init parse_crashkernel_low(char *cmdline,
1504 unsigned long long system_ram,
1505 unsigned long long *crash_size,
1506 unsigned long long *crash_base)
1507{
1508 return __parse_crashkernel(cmdline, system_ram, crash_size, crash_base,
adbc742b 1509 "crashkernel=", suffix_tbl[SUFFIX_LOW]);
0212f915 1510}
cba63c30 1511
fa8ff292 1512static void update_vmcoreinfo_note(void)
fd59d231 1513{
fa8ff292 1514 u32 *buf = vmcoreinfo_note;
fd59d231
KO
1515
1516 if (!vmcoreinfo_size)
1517 return;
fd59d231
KO
1518 buf = append_elf_note(buf, VMCOREINFO_NOTE_NAME, 0, vmcoreinfo_data,
1519 vmcoreinfo_size);
fd59d231
KO
1520 final_note(buf);
1521}
1522
fa8ff292
MH
1523void crash_save_vmcoreinfo(void)
1524{
63dca8d5 1525 vmcoreinfo_append_str("CRASHTIME=%ld\n", get_seconds());
fa8ff292
MH
1526 update_vmcoreinfo_note();
1527}
1528
fd59d231
KO
1529void vmcoreinfo_append_str(const char *fmt, ...)
1530{
1531 va_list args;
1532 char buf[0x50];
310faaa9 1533 size_t r;
fd59d231
KO
1534
1535 va_start(args, fmt);
a19428e5 1536 r = vscnprintf(buf, sizeof(buf), fmt, args);
fd59d231
KO
1537 va_end(args);
1538
31c3a3fe 1539 r = min(r, vmcoreinfo_max_size - vmcoreinfo_size);
fd59d231
KO
1540
1541 memcpy(&vmcoreinfo_data[vmcoreinfo_size], buf, r);
1542
1543 vmcoreinfo_size += r;
1544}
1545
1546/*
1547 * provide an empty default implementation here -- architecture
1548 * code may override this
1549 */
52f5684c 1550void __weak arch_crash_save_vmcoreinfo(void)
fd59d231
KO
1551{}
1552
52f5684c 1553unsigned long __weak paddr_vmcoreinfo_note(void)
fd59d231
KO
1554{
1555 return __pa((unsigned long)(char *)&vmcoreinfo_note);
1556}
1557
1558static int __init crash_save_vmcoreinfo_init(void)
1559{
bba1f603
KO
1560 VMCOREINFO_OSRELEASE(init_uts_ns.name.release);
1561 VMCOREINFO_PAGESIZE(PAGE_SIZE);
fd59d231 1562
bcbba6c1
KO
1563 VMCOREINFO_SYMBOL(init_uts_ns);
1564 VMCOREINFO_SYMBOL(node_online_map);
d034cfab 1565#ifdef CONFIG_MMU
bcbba6c1 1566 VMCOREINFO_SYMBOL(swapper_pg_dir);
d034cfab 1567#endif
bcbba6c1 1568 VMCOREINFO_SYMBOL(_stext);
f1c4069e 1569 VMCOREINFO_SYMBOL(vmap_area_list);
fd59d231
KO
1570
1571#ifndef CONFIG_NEED_MULTIPLE_NODES
bcbba6c1
KO
1572 VMCOREINFO_SYMBOL(mem_map);
1573 VMCOREINFO_SYMBOL(contig_page_data);
fd59d231
KO
1574#endif
1575#ifdef CONFIG_SPARSEMEM
bcbba6c1
KO
1576 VMCOREINFO_SYMBOL(mem_section);
1577 VMCOREINFO_LENGTH(mem_section, NR_SECTION_ROOTS);
c76f860c 1578 VMCOREINFO_STRUCT_SIZE(mem_section);
bcbba6c1 1579 VMCOREINFO_OFFSET(mem_section, section_mem_map);
fd59d231 1580#endif
c76f860c
KO
1581 VMCOREINFO_STRUCT_SIZE(page);
1582 VMCOREINFO_STRUCT_SIZE(pglist_data);
1583 VMCOREINFO_STRUCT_SIZE(zone);
1584 VMCOREINFO_STRUCT_SIZE(free_area);
1585 VMCOREINFO_STRUCT_SIZE(list_head);
1586 VMCOREINFO_SIZE(nodemask_t);
bcbba6c1
KO
1587 VMCOREINFO_OFFSET(page, flags);
1588 VMCOREINFO_OFFSET(page, _count);
1589 VMCOREINFO_OFFSET(page, mapping);
1590 VMCOREINFO_OFFSET(page, lru);
8d67091e
AK
1591 VMCOREINFO_OFFSET(page, _mapcount);
1592 VMCOREINFO_OFFSET(page, private);
bcbba6c1
KO
1593 VMCOREINFO_OFFSET(pglist_data, node_zones);
1594 VMCOREINFO_OFFSET(pglist_data, nr_zones);
fd59d231 1595#ifdef CONFIG_FLAT_NODE_MEM_MAP
bcbba6c1 1596 VMCOREINFO_OFFSET(pglist_data, node_mem_map);
fd59d231 1597#endif
bcbba6c1
KO
1598 VMCOREINFO_OFFSET(pglist_data, node_start_pfn);
1599 VMCOREINFO_OFFSET(pglist_data, node_spanned_pages);
1600 VMCOREINFO_OFFSET(pglist_data, node_id);
1601 VMCOREINFO_OFFSET(zone, free_area);
1602 VMCOREINFO_OFFSET(zone, vm_stat);
1603 VMCOREINFO_OFFSET(zone, spanned_pages);
1604 VMCOREINFO_OFFSET(free_area, free_list);
1605 VMCOREINFO_OFFSET(list_head, next);
1606 VMCOREINFO_OFFSET(list_head, prev);
13ba3fcb
AK
1607 VMCOREINFO_OFFSET(vmap_area, va_start);
1608 VMCOREINFO_OFFSET(vmap_area, list);
bcbba6c1 1609 VMCOREINFO_LENGTH(zone.free_area, MAX_ORDER);
04d491ab 1610 log_buf_kexec_setup();
83a08e7c 1611 VMCOREINFO_LENGTH(free_area.free_list, MIGRATE_TYPES);
bcbba6c1 1612 VMCOREINFO_NUMBER(NR_FREE_PAGES);
122c7a59
KO
1613 VMCOREINFO_NUMBER(PG_lru);
1614 VMCOREINFO_NUMBER(PG_private);
1615 VMCOREINFO_NUMBER(PG_swapcache);
8d67091e 1616 VMCOREINFO_NUMBER(PG_slab);
0d0bf667
MT
1617#ifdef CONFIG_MEMORY_FAILURE
1618 VMCOREINFO_NUMBER(PG_hwpoison);
1619#endif
b3acc56b 1620 VMCOREINFO_NUMBER(PG_head_mask);
8d67091e 1621 VMCOREINFO_NUMBER(PAGE_BUDDY_MAPCOUNT_VALUE);
fd59d231
KO
1622
1623 arch_crash_save_vmcoreinfo();
fa8ff292 1624 update_vmcoreinfo_note();
fd59d231
KO
1625
1626 return 0;
1627}
1628
c96d6660 1629subsys_initcall(crash_save_vmcoreinfo_init);
3ab83521 1630
7ade3fcc
HY
1631/*
1632 * Move into place and start executing a preloaded standalone
1633 * executable. If nothing was preloaded return an error.
3ab83521
HY
1634 */
1635int kernel_kexec(void)
1636{
1637 int error = 0;
1638
8c5a1cf0 1639 if (!mutex_trylock(&kexec_mutex))
3ab83521
HY
1640 return -EBUSY;
1641 if (!kexec_image) {
1642 error = -EINVAL;
1643 goto Unlock;
1644 }
1645
3ab83521 1646#ifdef CONFIG_KEXEC_JUMP
7ade3fcc 1647 if (kexec_image->preserve_context) {
bcda53fa 1648 lock_system_sleep();
89081d17
HY
1649 pm_prepare_console();
1650 error = freeze_processes();
1651 if (error) {
1652 error = -EBUSY;
1653 goto Restore_console;
1654 }
1655 suspend_console();
d1616302 1656 error = dpm_suspend_start(PMSG_FREEZE);
89081d17
HY
1657 if (error)
1658 goto Resume_console;
d1616302 1659 /* At this point, dpm_suspend_start() has been called,
cf579dfb
RW
1660 * but *not* dpm_suspend_end(). We *must* call
1661 * dpm_suspend_end() now. Otherwise, drivers for
89081d17
HY
1662 * some devices (e.g. interrupt controllers) become
1663 * desynchronized with the actual state of the
1664 * hardware at resume time, and evil weirdness ensues.
1665 */
cf579dfb 1666 error = dpm_suspend_end(PMSG_FREEZE);
89081d17 1667 if (error)
749b0afc
RW
1668 goto Resume_devices;
1669 error = disable_nonboot_cpus();
1670 if (error)
1671 goto Enable_cpus;
2ed8d2b3 1672 local_irq_disable();
2e711c04 1673 error = syscore_suspend();
770824bd 1674 if (error)
749b0afc 1675 goto Enable_irqs;
7ade3fcc 1676 } else
3ab83521 1677#endif
7ade3fcc 1678 {
4fc9bbf9 1679 kexec_in_progress = true;
ca195b7f 1680 kernel_restart_prepare(NULL);
c97102ba 1681 migrate_to_reboot_cpu();
011e4b02
SB
1682
1683 /*
1684 * migrate_to_reboot_cpu() disables CPU hotplug assuming that
1685 * no further code needs to use CPU hotplug (which is true in
1686 * the reboot case). However, the kexec path depends on using
1687 * CPU hotplug again; so re-enable it here.
1688 */
1689 cpu_hotplug_enable();
e1bebcf4 1690 pr_emerg("Starting new kernel\n");
3ab83521
HY
1691 machine_shutdown();
1692 }
1693
1694 machine_kexec(kexec_image);
1695
3ab83521 1696#ifdef CONFIG_KEXEC_JUMP
7ade3fcc 1697 if (kexec_image->preserve_context) {
19234c08 1698 syscore_resume();
749b0afc 1699 Enable_irqs:
3ab83521 1700 local_irq_enable();
749b0afc 1701 Enable_cpus:
89081d17 1702 enable_nonboot_cpus();
cf579dfb 1703 dpm_resume_start(PMSG_RESTORE);
89081d17 1704 Resume_devices:
d1616302 1705 dpm_resume_end(PMSG_RESTORE);
89081d17
HY
1706 Resume_console:
1707 resume_console();
1708 thaw_processes();
1709 Restore_console:
1710 pm_restore_console();
bcda53fa 1711 unlock_system_sleep();
3ab83521 1712 }
7ade3fcc 1713#endif
3ab83521
HY
1714
1715 Unlock:
8c5a1cf0 1716 mutex_unlock(&kexec_mutex);
3ab83521
HY
1717 return error;
1718}