Commit | Line | Data |
---|---|---|
40b0b3f8 | 1 | // SPDX-License-Identifier: GPL-2.0-only |
2965faa5 DY |
2 | /* |
3 | * kexec.c - kexec system call core code. | |
4 | * Copyright (C) 2002-2004 Eric Biederman <ebiederm@xmission.com> | |
2965faa5 DY |
5 | */ |
6 | ||
de90a6bc | 7 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
2965faa5 DY |
8 | |
9 | #include <linux/capability.h> | |
10 | #include <linux/mm.h> | |
11 | #include <linux/file.h> | |
12 | #include <linux/slab.h> | |
13 | #include <linux/fs.h> | |
14 | #include <linux/kexec.h> | |
15 | #include <linux/mutex.h> | |
16 | #include <linux/list.h> | |
17 | #include <linux/highmem.h> | |
18 | #include <linux/syscalls.h> | |
19 | #include <linux/reboot.h> | |
20 | #include <linux/ioport.h> | |
21 | #include <linux/hardirq.h> | |
22 | #include <linux/elf.h> | |
23 | #include <linux/elfcore.h> | |
24 | #include <linux/utsname.h> | |
25 | #include <linux/numa.h> | |
26 | #include <linux/suspend.h> | |
27 | #include <linux/device.h> | |
28 | #include <linux/freezer.h> | |
f39650de | 29 | #include <linux/panic_notifier.h> |
2965faa5 DY |
30 | #include <linux/pm.h> |
31 | #include <linux/cpu.h> | |
32 | #include <linux/uaccess.h> | |
33 | #include <linux/io.h> | |
34 | #include <linux/console.h> | |
35 | #include <linux/vmalloc.h> | |
36 | #include <linux/swap.h> | |
37 | #include <linux/syscore_ops.h> | |
38 | #include <linux/compiler.h> | |
39 | #include <linux/hugetlb.h> | |
00089c04 | 40 | #include <linux/objtool.h> |
b2075dbb | 41 | #include <linux/kmsg_dump.h> |
2965faa5 DY |
42 | |
43 | #include <asm/page.h> | |
44 | #include <asm/sections.h> | |
45 | ||
46 | #include <crypto/hash.h> | |
2965faa5 DY |
47 | #include "kexec_internal.h" |
48 | ||
05c62574 | 49 | atomic_t __kexec_lock = ATOMIC_INIT(0); |
2965faa5 DY |
50 | |
51 | /* Per cpu memory for storing cpu states in case of system crash. */ | |
52 | note_buf_t __percpu *crash_notes; | |
53 | ||
2965faa5 DY |
54 | /* Flag to indicate we are going to kexec a new kernel */ |
55 | bool kexec_in_progress = false; | |
56 | ||
57 | ||
58 | /* Location of the reserved area for the crash kernel */ | |
59 | struct resource crashk_res = { | |
60 | .name = "Crash kernel", | |
61 | .start = 0, | |
62 | .end = 0, | |
1a085d07 TK |
63 | .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM, |
64 | .desc = IORES_DESC_CRASH_KERNEL | |
2965faa5 DY |
65 | }; |
66 | struct resource crashk_low_res = { | |
67 | .name = "Crash kernel", | |
68 | .start = 0, | |
69 | .end = 0, | |
1a085d07 TK |
70 | .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM, |
71 | .desc = IORES_DESC_CRASH_KERNEL | |
2965faa5 DY |
72 | }; |
73 | ||
74 | int kexec_should_crash(struct task_struct *p) | |
75 | { | |
76 | /* | |
77 | * If crash_kexec_post_notifiers is enabled, don't run | |
78 | * crash_kexec() here yet, which must be run after panic | |
79 | * notifiers in panic(). | |
80 | */ | |
81 | if (crash_kexec_post_notifiers) | |
82 | return 0; | |
83 | /* | |
05ea0424 | 84 | * There are 4 panic() calls in make_task_dead() path, each of which |
2965faa5 DY |
85 | * corresponds to each of these 4 conditions. |
86 | */ | |
87 | if (in_interrupt() || !p->pid || is_global_init(p) || panic_on_oops) | |
88 | return 1; | |
89 | return 0; | |
90 | } | |
91 | ||
21db79e8 PT |
92 | int kexec_crash_loaded(void) |
93 | { | |
94 | return !!kexec_crash_image; | |
95 | } | |
96 | EXPORT_SYMBOL_GPL(kexec_crash_loaded); | |
97 | ||
2965faa5 DY |
98 | /* |
99 | * When kexec transitions to the new kernel there is a one-to-one | |
100 | * mapping between physical and virtual addresses. On processors | |
101 | * where you can disable the MMU this is trivial, and easy. For | |
102 | * others it is still a simple predictable page table to setup. | |
103 | * | |
104 | * In that environment kexec copies the new kernel to its final | |
105 | * resting place. This means I can only support memory whose | |
106 | * physical address can fit in an unsigned long. In particular | |
107 | * addresses where (pfn << PAGE_SHIFT) > ULONG_MAX cannot be handled. | |
108 | * If the assembly stub has more restrictive requirements | |
109 | * KEXEC_SOURCE_MEMORY_LIMIT and KEXEC_DEST_MEMORY_LIMIT can be | |
110 | * defined more restrictively in <asm/kexec.h>. | |
111 | * | |
112 | * The code for the transition from the current kernel to the | |
7b7b8a2c | 113 | * new kernel is placed in the control_code_buffer, whose size |
2965faa5 DY |
114 | * is given by KEXEC_CONTROL_PAGE_SIZE. In the best case only a single |
115 | * page of memory is necessary, but some architectures require more. | |
116 | * Because this memory must be identity mapped in the transition from | |
117 | * virtual to physical addresses it must live in the range | |
118 | * 0 - TASK_SIZE, as only the user space mappings are arbitrarily | |
119 | * modifiable. | |
120 | * | |
121 | * The assembly stub in the control code buffer is passed a linked list | |
122 | * of descriptor pages detailing the source pages of the new kernel, | |
123 | * and the destination addresses of those source pages. As this data | |
124 | * structure is not used in the context of the current OS, it must | |
125 | * be self-contained. | |
126 | * | |
127 | * The code has been made to work with highmem pages and will use a | |
128 | * destination page in its final resting place (if it happens | |
129 | * to allocate it). The end product of this is that most of the | |
130 | * physical address space, and most of RAM can be used. | |
131 | * | |
132 | * Future directions include: | |
133 | * - allocating a page table with the control code buffer identity | |
134 | * mapped, to simplify machine_kexec and make kexec_on_panic more | |
135 | * reliable. | |
136 | */ | |
137 | ||
138 | /* | |
139 | * KIMAGE_NO_DEST is an impossible destination address..., for | |
140 | * allocating pages whose destination address we do not care about. | |
141 | */ | |
142 | #define KIMAGE_NO_DEST (-1UL) | |
1730f146 | 143 | #define PAGE_COUNT(x) (((x) + PAGE_SIZE - 1) >> PAGE_SHIFT) |
2965faa5 DY |
144 | |
145 | static struct page *kimage_alloc_page(struct kimage *image, | |
146 | gfp_t gfp_mask, | |
147 | unsigned long dest); | |
148 | ||
149 | int sanity_check_segment_list(struct kimage *image) | |
150 | { | |
4caf9615 | 151 | int i; |
2965faa5 | 152 | unsigned long nr_segments = image->nr_segments; |
1730f146 | 153 | unsigned long total_pages = 0; |
ca79b0c2 | 154 | unsigned long nr_pages = totalram_pages(); |
2965faa5 DY |
155 | |
156 | /* | |
157 | * Verify we have good destination addresses. The caller is | |
158 | * responsible for making certain we don't attempt to load | |
159 | * the new image into invalid or reserved areas of RAM. This | |
160 | * just verifies it is an address we can use. | |
161 | * | |
162 | * Since the kernel does everything in page size chunks ensure | |
163 | * the destination addresses are page aligned. Too many | |
164 | * special cases crop of when we don't do this. The most | |
165 | * insidious is getting overlapping destination addresses | |
166 | * simply because addresses are changed to page size | |
167 | * granularity. | |
168 | */ | |
2965faa5 DY |
169 | for (i = 0; i < nr_segments; i++) { |
170 | unsigned long mstart, mend; | |
171 | ||
172 | mstart = image->segment[i].mem; | |
173 | mend = mstart + image->segment[i].memsz; | |
465d3777 RK |
174 | if (mstart > mend) |
175 | return -EADDRNOTAVAIL; | |
2965faa5 | 176 | if ((mstart & ~PAGE_MASK) || (mend & ~PAGE_MASK)) |
4caf9615 | 177 | return -EADDRNOTAVAIL; |
2965faa5 | 178 | if (mend >= KEXEC_DESTINATION_MEMORY_LIMIT) |
4caf9615 | 179 | return -EADDRNOTAVAIL; |
2965faa5 DY |
180 | } |
181 | ||
182 | /* Verify our destination addresses do not overlap. | |
183 | * If we alloed overlapping destination addresses | |
184 | * through very weird things can happen with no | |
185 | * easy explanation as one segment stops on another. | |
186 | */ | |
2965faa5 DY |
187 | for (i = 0; i < nr_segments; i++) { |
188 | unsigned long mstart, mend; | |
189 | unsigned long j; | |
190 | ||
191 | mstart = image->segment[i].mem; | |
192 | mend = mstart + image->segment[i].memsz; | |
193 | for (j = 0; j < i; j++) { | |
194 | unsigned long pstart, pend; | |
195 | ||
196 | pstart = image->segment[j].mem; | |
197 | pend = pstart + image->segment[j].memsz; | |
198 | /* Do the segments overlap ? */ | |
199 | if ((mend > pstart) && (mstart < pend)) | |
4caf9615 | 200 | return -EINVAL; |
2965faa5 DY |
201 | } |
202 | } | |
203 | ||
204 | /* Ensure our buffer sizes are strictly less than | |
205 | * our memory sizes. This should always be the case, | |
206 | * and it is easier to check up front than to be surprised | |
207 | * later on. | |
208 | */ | |
2965faa5 DY |
209 | for (i = 0; i < nr_segments; i++) { |
210 | if (image->segment[i].bufsz > image->segment[i].memsz) | |
4caf9615 | 211 | return -EINVAL; |
2965faa5 DY |
212 | } |
213 | ||
1730f146 | 214 | /* |
215 | * Verify that no more than half of memory will be consumed. If the | |
216 | * request from userspace is too large, a large amount of time will be | |
217 | * wasted allocating pages, which can cause a soft lockup. | |
218 | */ | |
219 | for (i = 0; i < nr_segments; i++) { | |
3d6357de | 220 | if (PAGE_COUNT(image->segment[i].memsz) > nr_pages / 2) |
1730f146 | 221 | return -EINVAL; |
222 | ||
223 | total_pages += PAGE_COUNT(image->segment[i].memsz); | |
224 | } | |
225 | ||
3d6357de | 226 | if (total_pages > nr_pages / 2) |
1730f146 | 227 | return -EINVAL; |
228 | ||
2965faa5 DY |
229 | /* |
230 | * Verify we have good destination addresses. Normally | |
231 | * the caller is responsible for making certain we don't | |
232 | * attempt to load the new image into invalid or reserved | |
233 | * areas of RAM. But crash kernels are preloaded into a | |
234 | * reserved area of ram. We must ensure the addresses | |
235 | * are in the reserved area otherwise preloading the | |
236 | * kernel could corrupt things. | |
237 | */ | |
238 | ||
239 | if (image->type == KEXEC_TYPE_CRASH) { | |
2965faa5 DY |
240 | for (i = 0; i < nr_segments; i++) { |
241 | unsigned long mstart, mend; | |
242 | ||
243 | mstart = image->segment[i].mem; | |
244 | mend = mstart + image->segment[i].memsz - 1; | |
245 | /* Ensure we are within the crash kernel limits */ | |
43546d86 RK |
246 | if ((mstart < phys_to_boot_phys(crashk_res.start)) || |
247 | (mend > phys_to_boot_phys(crashk_res.end))) | |
4caf9615 | 248 | return -EADDRNOTAVAIL; |
2965faa5 DY |
249 | } |
250 | } | |
251 | ||
252 | return 0; | |
253 | } | |
254 | ||
255 | struct kimage *do_kimage_alloc_init(void) | |
256 | { | |
257 | struct kimage *image; | |
258 | ||
259 | /* Allocate a controlling structure */ | |
260 | image = kzalloc(sizeof(*image), GFP_KERNEL); | |
261 | if (!image) | |
262 | return NULL; | |
263 | ||
264 | image->head = 0; | |
265 | image->entry = &image->head; | |
266 | image->last_entry = &image->head; | |
267 | image->control_page = ~0; /* By default this does not apply */ | |
268 | image->type = KEXEC_TYPE_DEFAULT; | |
269 | ||
270 | /* Initialize the list of control pages */ | |
271 | INIT_LIST_HEAD(&image->control_pages); | |
272 | ||
273 | /* Initialize the list of destination pages */ | |
274 | INIT_LIST_HEAD(&image->dest_pages); | |
275 | ||
276 | /* Initialize the list of unusable pages */ | |
277 | INIT_LIST_HEAD(&image->unusable_pages); | |
278 | ||
279 | return image; | |
280 | } | |
281 | ||
282 | int kimage_is_destination_range(struct kimage *image, | |
283 | unsigned long start, | |
284 | unsigned long end) | |
285 | { | |
286 | unsigned long i; | |
287 | ||
288 | for (i = 0; i < image->nr_segments; i++) { | |
289 | unsigned long mstart, mend; | |
290 | ||
291 | mstart = image->segment[i].mem; | |
292 | mend = mstart + image->segment[i].memsz; | |
293 | if ((end > mstart) && (start < mend)) | |
294 | return 1; | |
295 | } | |
296 | ||
297 | return 0; | |
298 | } | |
299 | ||
300 | static struct page *kimage_alloc_pages(gfp_t gfp_mask, unsigned int order) | |
301 | { | |
302 | struct page *pages; | |
303 | ||
7c3a6aed TH |
304 | if (fatal_signal_pending(current)) |
305 | return NULL; | |
bba4ed01 | 306 | pages = alloc_pages(gfp_mask & ~__GFP_ZERO, order); |
2965faa5 DY |
307 | if (pages) { |
308 | unsigned int count, i; | |
309 | ||
310 | pages->mapping = NULL; | |
311 | set_page_private(pages, order); | |
312 | count = 1 << order; | |
313 | for (i = 0; i < count; i++) | |
314 | SetPageReserved(pages + i); | |
bba4ed01 TL |
315 | |
316 | arch_kexec_post_alloc_pages(page_address(pages), count, | |
317 | gfp_mask); | |
318 | ||
319 | if (gfp_mask & __GFP_ZERO) | |
320 | for (i = 0; i < count; i++) | |
321 | clear_highpage(pages + i); | |
2965faa5 DY |
322 | } |
323 | ||
324 | return pages; | |
325 | } | |
326 | ||
327 | static void kimage_free_pages(struct page *page) | |
328 | { | |
329 | unsigned int order, count, i; | |
330 | ||
331 | order = page_private(page); | |
332 | count = 1 << order; | |
bba4ed01 TL |
333 | |
334 | arch_kexec_pre_free_pages(page_address(page), count); | |
335 | ||
2965faa5 DY |
336 | for (i = 0; i < count; i++) |
337 | ClearPageReserved(page + i); | |
338 | __free_pages(page, order); | |
339 | } | |
340 | ||
341 | void kimage_free_page_list(struct list_head *list) | |
342 | { | |
2b24692b | 343 | struct page *page, *next; |
2965faa5 | 344 | |
2b24692b | 345 | list_for_each_entry_safe(page, next, list, lru) { |
2965faa5 DY |
346 | list_del(&page->lru); |
347 | kimage_free_pages(page); | |
348 | } | |
349 | } | |
350 | ||
351 | static struct page *kimage_alloc_normal_control_pages(struct kimage *image, | |
352 | unsigned int order) | |
353 | { | |
354 | /* Control pages are special, they are the intermediaries | |
355 | * that are needed while we copy the rest of the pages | |
356 | * to their final resting place. As such they must | |
357 | * not conflict with either the destination addresses | |
358 | * or memory the kernel is already using. | |
359 | * | |
360 | * The only case where we really need more than one of | |
361 | * these are for architectures where we cannot disable | |
362 | * the MMU and must instead generate an identity mapped | |
363 | * page table for all of the memory. | |
364 | * | |
365 | * At worst this runs in O(N) of the image size. | |
366 | */ | |
367 | struct list_head extra_pages; | |
368 | struct page *pages; | |
369 | unsigned int count; | |
370 | ||
371 | count = 1 << order; | |
372 | INIT_LIST_HEAD(&extra_pages); | |
373 | ||
374 | /* Loop while I can allocate a page and the page allocated | |
375 | * is a destination page. | |
376 | */ | |
377 | do { | |
378 | unsigned long pfn, epfn, addr, eaddr; | |
379 | ||
380 | pages = kimage_alloc_pages(KEXEC_CONTROL_MEMORY_GFP, order); | |
381 | if (!pages) | |
382 | break; | |
43546d86 | 383 | pfn = page_to_boot_pfn(pages); |
2965faa5 DY |
384 | epfn = pfn + count; |
385 | addr = pfn << PAGE_SHIFT; | |
386 | eaddr = epfn << PAGE_SHIFT; | |
387 | if ((epfn >= (KEXEC_CONTROL_MEMORY_LIMIT >> PAGE_SHIFT)) || | |
388 | kimage_is_destination_range(image, addr, eaddr)) { | |
389 | list_add(&pages->lru, &extra_pages); | |
390 | pages = NULL; | |
391 | } | |
392 | } while (!pages); | |
393 | ||
394 | if (pages) { | |
395 | /* Remember the allocated page... */ | |
396 | list_add(&pages->lru, &image->control_pages); | |
397 | ||
398 | /* Because the page is already in it's destination | |
399 | * location we will never allocate another page at | |
400 | * that address. Therefore kimage_alloc_pages | |
401 | * will not return it (again) and we don't need | |
402 | * to give it an entry in image->segment[]. | |
403 | */ | |
404 | } | |
405 | /* Deal with the destination pages I have inadvertently allocated. | |
406 | * | |
407 | * Ideally I would convert multi-page allocations into single | |
408 | * page allocations, and add everything to image->dest_pages. | |
409 | * | |
410 | * For now it is simpler to just free the pages. | |
411 | */ | |
412 | kimage_free_page_list(&extra_pages); | |
413 | ||
414 | return pages; | |
415 | } | |
416 | ||
417 | static struct page *kimage_alloc_crash_control_pages(struct kimage *image, | |
418 | unsigned int order) | |
419 | { | |
420 | /* Control pages are special, they are the intermediaries | |
421 | * that are needed while we copy the rest of the pages | |
422 | * to their final resting place. As such they must | |
423 | * not conflict with either the destination addresses | |
424 | * or memory the kernel is already using. | |
425 | * | |
426 | * Control pages are also the only pags we must allocate | |
427 | * when loading a crash kernel. All of the other pages | |
428 | * are specified by the segments and we just memcpy | |
429 | * into them directly. | |
430 | * | |
431 | * The only case where we really need more than one of | |
432 | * these are for architectures where we cannot disable | |
433 | * the MMU and must instead generate an identity mapped | |
434 | * page table for all of the memory. | |
435 | * | |
436 | * Given the low demand this implements a very simple | |
437 | * allocator that finds the first hole of the appropriate | |
438 | * size in the reserved memory region, and allocates all | |
439 | * of the memory up to and including the hole. | |
440 | */ | |
441 | unsigned long hole_start, hole_end, size; | |
442 | struct page *pages; | |
443 | ||
444 | pages = NULL; | |
445 | size = (1 << order) << PAGE_SHIFT; | |
446 | hole_start = (image->control_page + (size - 1)) & ~(size - 1); | |
447 | hole_end = hole_start + size - 1; | |
448 | while (hole_end <= crashk_res.end) { | |
449 | unsigned long i; | |
450 | ||
8e53c073 | 451 | cond_resched(); |
452 | ||
2965faa5 DY |
453 | if (hole_end > KEXEC_CRASH_CONTROL_MEMORY_LIMIT) |
454 | break; | |
455 | /* See if I overlap any of the segments */ | |
456 | for (i = 0; i < image->nr_segments; i++) { | |
457 | unsigned long mstart, mend; | |
458 | ||
459 | mstart = image->segment[i].mem; | |
460 | mend = mstart + image->segment[i].memsz - 1; | |
461 | if ((hole_end >= mstart) && (hole_start <= mend)) { | |
462 | /* Advance the hole to the end of the segment */ | |
463 | hole_start = (mend + (size - 1)) & ~(size - 1); | |
464 | hole_end = hole_start + size - 1; | |
465 | break; | |
466 | } | |
467 | } | |
468 | /* If I don't overlap any segments I have found my hole! */ | |
469 | if (i == image->nr_segments) { | |
470 | pages = pfn_to_page(hole_start >> PAGE_SHIFT); | |
04e9949b | 471 | image->control_page = hole_end; |
2965faa5 DY |
472 | break; |
473 | } | |
474 | } | |
2965faa5 | 475 | |
9cf38d55 LJ |
476 | /* Ensure that these pages are decrypted if SME is enabled. */ |
477 | if (pages) | |
478 | arch_kexec_post_alloc_pages(page_address(pages), 1 << order, 0); | |
479 | ||
2965faa5 DY |
480 | return pages; |
481 | } | |
482 | ||
483 | ||
484 | struct page *kimage_alloc_control_pages(struct kimage *image, | |
485 | unsigned int order) | |
486 | { | |
487 | struct page *pages = NULL; | |
488 | ||
489 | switch (image->type) { | |
490 | case KEXEC_TYPE_DEFAULT: | |
491 | pages = kimage_alloc_normal_control_pages(image, order); | |
492 | break; | |
493 | case KEXEC_TYPE_CRASH: | |
494 | pages = kimage_alloc_crash_control_pages(image, order); | |
495 | break; | |
496 | } | |
497 | ||
498 | return pages; | |
499 | } | |
500 | ||
1229384f XP |
501 | int kimage_crash_copy_vmcoreinfo(struct kimage *image) |
502 | { | |
503 | struct page *vmcoreinfo_page; | |
504 | void *safecopy; | |
505 | ||
506 | if (image->type != KEXEC_TYPE_CRASH) | |
507 | return 0; | |
508 | ||
509 | /* | |
510 | * For kdump, allocate one vmcoreinfo safe copy from the | |
511 | * crash memory. as we have arch_kexec_protect_crashkres() | |
512 | * after kexec syscall, we naturally protect it from write | |
513 | * (even read) access under kernel direct mapping. But on | |
514 | * the other hand, we still need to operate it when crash | |
515 | * happens to generate vmcoreinfo note, hereby we rely on | |
516 | * vmap for this purpose. | |
517 | */ | |
518 | vmcoreinfo_page = kimage_alloc_control_pages(image, 0); | |
519 | if (!vmcoreinfo_page) { | |
520 | pr_warn("Could not allocate vmcoreinfo buffer\n"); | |
521 | return -ENOMEM; | |
522 | } | |
523 | safecopy = vmap(&vmcoreinfo_page, 1, VM_MAP, PAGE_KERNEL); | |
524 | if (!safecopy) { | |
525 | pr_warn("Could not vmap vmcoreinfo buffer\n"); | |
526 | return -ENOMEM; | |
527 | } | |
528 | ||
529 | image->vmcoreinfo_data_copy = safecopy; | |
530 | crash_update_vmcoreinfo_safecopy(safecopy); | |
531 | ||
532 | return 0; | |
533 | } | |
534 | ||
2965faa5 DY |
535 | static int kimage_add_entry(struct kimage *image, kimage_entry_t entry) |
536 | { | |
537 | if (*image->entry != 0) | |
538 | image->entry++; | |
539 | ||
540 | if (image->entry == image->last_entry) { | |
541 | kimage_entry_t *ind_page; | |
542 | struct page *page; | |
543 | ||
544 | page = kimage_alloc_page(image, GFP_KERNEL, KIMAGE_NO_DEST); | |
545 | if (!page) | |
546 | return -ENOMEM; | |
547 | ||
548 | ind_page = page_address(page); | |
43546d86 | 549 | *image->entry = virt_to_boot_phys(ind_page) | IND_INDIRECTION; |
2965faa5 DY |
550 | image->entry = ind_page; |
551 | image->last_entry = ind_page + | |
552 | ((PAGE_SIZE/sizeof(kimage_entry_t)) - 1); | |
553 | } | |
554 | *image->entry = entry; | |
555 | image->entry++; | |
556 | *image->entry = 0; | |
557 | ||
558 | return 0; | |
559 | } | |
560 | ||
561 | static int kimage_set_destination(struct kimage *image, | |
562 | unsigned long destination) | |
563 | { | |
2965faa5 | 564 | destination &= PAGE_MASK; |
2965faa5 | 565 | |
32d0c98e | 566 | return kimage_add_entry(image, destination | IND_DESTINATION); |
2965faa5 DY |
567 | } |
568 | ||
569 | ||
570 | static int kimage_add_page(struct kimage *image, unsigned long page) | |
571 | { | |
2965faa5 | 572 | page &= PAGE_MASK; |
2965faa5 | 573 | |
32d0c98e | 574 | return kimage_add_entry(image, page | IND_SOURCE); |
2965faa5 DY |
575 | } |
576 | ||
577 | ||
578 | static void kimage_free_extra_pages(struct kimage *image) | |
579 | { | |
580 | /* Walk through and free any extra destination pages I may have */ | |
581 | kimage_free_page_list(&image->dest_pages); | |
582 | ||
583 | /* Walk through and free any unusable pages I have cached */ | |
584 | kimage_free_page_list(&image->unusable_pages); | |
585 | ||
586 | } | |
de68e4da | 587 | |
2965faa5 DY |
588 | void kimage_terminate(struct kimage *image) |
589 | { | |
590 | if (*image->entry != 0) | |
591 | image->entry++; | |
592 | ||
593 | *image->entry = IND_DONE; | |
594 | } | |
595 | ||
596 | #define for_each_kimage_entry(image, ptr, entry) \ | |
597 | for (ptr = &image->head; (entry = *ptr) && !(entry & IND_DONE); \ | |
598 | ptr = (entry & IND_INDIRECTION) ? \ | |
43546d86 | 599 | boot_phys_to_virt((entry & PAGE_MASK)) : ptr + 1) |
2965faa5 DY |
600 | |
601 | static void kimage_free_entry(kimage_entry_t entry) | |
602 | { | |
603 | struct page *page; | |
604 | ||
43546d86 | 605 | page = boot_pfn_to_page(entry >> PAGE_SHIFT); |
2965faa5 DY |
606 | kimage_free_pages(page); |
607 | } | |
608 | ||
609 | void kimage_free(struct kimage *image) | |
610 | { | |
611 | kimage_entry_t *ptr, entry; | |
612 | kimage_entry_t ind = 0; | |
613 | ||
614 | if (!image) | |
615 | return; | |
616 | ||
1229384f XP |
617 | if (image->vmcoreinfo_data_copy) { |
618 | crash_update_vmcoreinfo_safecopy(NULL); | |
619 | vunmap(image->vmcoreinfo_data_copy); | |
620 | } | |
621 | ||
2965faa5 DY |
622 | kimage_free_extra_pages(image); |
623 | for_each_kimage_entry(image, ptr, entry) { | |
624 | if (entry & IND_INDIRECTION) { | |
625 | /* Free the previous indirection page */ | |
626 | if (ind & IND_INDIRECTION) | |
627 | kimage_free_entry(ind); | |
628 | /* Save this indirection page until we are | |
629 | * done with it. | |
630 | */ | |
631 | ind = entry; | |
632 | } else if (entry & IND_SOURCE) | |
633 | kimage_free_entry(entry); | |
634 | } | |
635 | /* Free the final indirection page */ | |
636 | if (ind & IND_INDIRECTION) | |
637 | kimage_free_entry(ind); | |
638 | ||
639 | /* Handle any machine specific cleanup */ | |
640 | machine_kexec_cleanup(image); | |
641 | ||
642 | /* Free the kexec control pages... */ | |
643 | kimage_free_page_list(&image->control_pages); | |
644 | ||
645 | /* | |
646 | * Free up any temporary buffers allocated. This might hit if | |
647 | * error occurred much later after buffer allocation. | |
648 | */ | |
649 | if (image->file_mode) | |
650 | kimage_file_post_load_cleanup(image); | |
651 | ||
652 | kfree(image); | |
653 | } | |
654 | ||
655 | static kimage_entry_t *kimage_dst_used(struct kimage *image, | |
656 | unsigned long page) | |
657 | { | |
658 | kimage_entry_t *ptr, entry; | |
659 | unsigned long destination = 0; | |
660 | ||
661 | for_each_kimage_entry(image, ptr, entry) { | |
662 | if (entry & IND_DESTINATION) | |
663 | destination = entry & PAGE_MASK; | |
664 | else if (entry & IND_SOURCE) { | |
665 | if (page == destination) | |
666 | return ptr; | |
667 | destination += PAGE_SIZE; | |
668 | } | |
669 | } | |
670 | ||
671 | return NULL; | |
672 | } | |
673 | ||
674 | static struct page *kimage_alloc_page(struct kimage *image, | |
675 | gfp_t gfp_mask, | |
676 | unsigned long destination) | |
677 | { | |
678 | /* | |
679 | * Here we implement safeguards to ensure that a source page | |
680 | * is not copied to its destination page before the data on | |
681 | * the destination page is no longer useful. | |
682 | * | |
683 | * To do this we maintain the invariant that a source page is | |
684 | * either its own destination page, or it is not a | |
685 | * destination page at all. | |
686 | * | |
687 | * That is slightly stronger than required, but the proof | |
688 | * that no problems will not occur is trivial, and the | |
689 | * implementation is simply to verify. | |
690 | * | |
691 | * When allocating all pages normally this algorithm will run | |
692 | * in O(N) time, but in the worst case it will run in O(N^2) | |
693 | * time. If the runtime is a problem the data structures can | |
694 | * be fixed. | |
695 | */ | |
696 | struct page *page; | |
697 | unsigned long addr; | |
698 | ||
699 | /* | |
700 | * Walk through the list of destination pages, and see if I | |
701 | * have a match. | |
702 | */ | |
703 | list_for_each_entry(page, &image->dest_pages, lru) { | |
43546d86 | 704 | addr = page_to_boot_pfn(page) << PAGE_SHIFT; |
2965faa5 DY |
705 | if (addr == destination) { |
706 | list_del(&page->lru); | |
707 | return page; | |
708 | } | |
709 | } | |
710 | page = NULL; | |
711 | while (1) { | |
712 | kimage_entry_t *old; | |
713 | ||
714 | /* Allocate a page, if we run out of memory give up */ | |
715 | page = kimage_alloc_pages(gfp_mask, 0); | |
716 | if (!page) | |
717 | return NULL; | |
718 | /* If the page cannot be used file it away */ | |
43546d86 | 719 | if (page_to_boot_pfn(page) > |
2965faa5 DY |
720 | (KEXEC_SOURCE_MEMORY_LIMIT >> PAGE_SHIFT)) { |
721 | list_add(&page->lru, &image->unusable_pages); | |
722 | continue; | |
723 | } | |
43546d86 | 724 | addr = page_to_boot_pfn(page) << PAGE_SHIFT; |
2965faa5 DY |
725 | |
726 | /* If it is the destination page we want use it */ | |
727 | if (addr == destination) | |
728 | break; | |
729 | ||
730 | /* If the page is not a destination page use it */ | |
731 | if (!kimage_is_destination_range(image, addr, | |
732 | addr + PAGE_SIZE)) | |
733 | break; | |
734 | ||
735 | /* | |
736 | * I know that the page is someones destination page. | |
737 | * See if there is already a source page for this | |
738 | * destination page. And if so swap the source pages. | |
739 | */ | |
740 | old = kimage_dst_used(image, addr); | |
741 | if (old) { | |
742 | /* If so move it */ | |
743 | unsigned long old_addr; | |
744 | struct page *old_page; | |
745 | ||
746 | old_addr = *old & PAGE_MASK; | |
43546d86 | 747 | old_page = boot_pfn_to_page(old_addr >> PAGE_SHIFT); |
2965faa5 DY |
748 | copy_highpage(page, old_page); |
749 | *old = addr | (*old & ~PAGE_MASK); | |
750 | ||
751 | /* The old page I have found cannot be a | |
752 | * destination page, so return it if it's | |
753 | * gfp_flags honor the ones passed in. | |
754 | */ | |
755 | if (!(gfp_mask & __GFP_HIGHMEM) && | |
756 | PageHighMem(old_page)) { | |
757 | kimage_free_pages(old_page); | |
758 | continue; | |
759 | } | |
2965faa5 DY |
760 | page = old_page; |
761 | break; | |
762 | } | |
763 | /* Place the page on the destination list, to be used later */ | |
764 | list_add(&page->lru, &image->dest_pages); | |
765 | } | |
766 | ||
767 | return page; | |
768 | } | |
769 | ||
770 | static int kimage_load_normal_segment(struct kimage *image, | |
771 | struct kexec_segment *segment) | |
772 | { | |
773 | unsigned long maddr; | |
774 | size_t ubytes, mbytes; | |
775 | int result; | |
776 | unsigned char __user *buf = NULL; | |
777 | unsigned char *kbuf = NULL; | |
778 | ||
2965faa5 DY |
779 | if (image->file_mode) |
780 | kbuf = segment->kbuf; | |
781 | else | |
782 | buf = segment->buf; | |
783 | ubytes = segment->bufsz; | |
784 | mbytes = segment->memsz; | |
785 | maddr = segment->mem; | |
786 | ||
787 | result = kimage_set_destination(image, maddr); | |
788 | if (result < 0) | |
789 | goto out; | |
790 | ||
791 | while (mbytes) { | |
792 | struct page *page; | |
793 | char *ptr; | |
794 | size_t uchunk, mchunk; | |
795 | ||
796 | page = kimage_alloc_page(image, GFP_HIGHUSER, maddr); | |
797 | if (!page) { | |
798 | result = -ENOMEM; | |
799 | goto out; | |
800 | } | |
43546d86 | 801 | result = kimage_add_page(image, page_to_boot_pfn(page) |
2965faa5 DY |
802 | << PAGE_SHIFT); |
803 | if (result < 0) | |
804 | goto out; | |
805 | ||
948084f0 | 806 | ptr = kmap_local_page(page); |
2965faa5 DY |
807 | /* Start with a clear page */ |
808 | clear_page(ptr); | |
809 | ptr += maddr & ~PAGE_MASK; | |
810 | mchunk = min_t(size_t, mbytes, | |
811 | PAGE_SIZE - (maddr & ~PAGE_MASK)); | |
812 | uchunk = min(ubytes, mchunk); | |
813 | ||
814 | /* For file based kexec, source pages are in kernel memory */ | |
815 | if (image->file_mode) | |
816 | memcpy(ptr, kbuf, uchunk); | |
817 | else | |
818 | result = copy_from_user(ptr, buf, uchunk); | |
948084f0 | 819 | kunmap_local(ptr); |
2965faa5 DY |
820 | if (result) { |
821 | result = -EFAULT; | |
822 | goto out; | |
823 | } | |
824 | ubytes -= uchunk; | |
825 | maddr += mchunk; | |
826 | if (image->file_mode) | |
827 | kbuf += mchunk; | |
828 | else | |
829 | buf += mchunk; | |
830 | mbytes -= mchunk; | |
a8311f64 JF |
831 | |
832 | cond_resched(); | |
2965faa5 DY |
833 | } |
834 | out: | |
835 | return result; | |
836 | } | |
837 | ||
838 | static int kimage_load_crash_segment(struct kimage *image, | |
839 | struct kexec_segment *segment) | |
840 | { | |
841 | /* For crash dumps kernels we simply copy the data from | |
842 | * user space to it's destination. | |
843 | * We do things a page at a time for the sake of kmap. | |
844 | */ | |
845 | unsigned long maddr; | |
846 | size_t ubytes, mbytes; | |
847 | int result; | |
848 | unsigned char __user *buf = NULL; | |
849 | unsigned char *kbuf = NULL; | |
850 | ||
851 | result = 0; | |
852 | if (image->file_mode) | |
853 | kbuf = segment->kbuf; | |
854 | else | |
855 | buf = segment->buf; | |
856 | ubytes = segment->bufsz; | |
857 | mbytes = segment->memsz; | |
858 | maddr = segment->mem; | |
859 | while (mbytes) { | |
860 | struct page *page; | |
861 | char *ptr; | |
862 | size_t uchunk, mchunk; | |
863 | ||
43546d86 | 864 | page = boot_pfn_to_page(maddr >> PAGE_SHIFT); |
2965faa5 DY |
865 | if (!page) { |
866 | result = -ENOMEM; | |
867 | goto out; | |
868 | } | |
9cf38d55 | 869 | arch_kexec_post_alloc_pages(page_address(page), 1, 0); |
948084f0 | 870 | ptr = kmap_local_page(page); |
2965faa5 DY |
871 | ptr += maddr & ~PAGE_MASK; |
872 | mchunk = min_t(size_t, mbytes, | |
873 | PAGE_SIZE - (maddr & ~PAGE_MASK)); | |
874 | uchunk = min(ubytes, mchunk); | |
875 | if (mchunk > uchunk) { | |
876 | /* Zero the trailing part of the page */ | |
877 | memset(ptr + uchunk, 0, mchunk - uchunk); | |
878 | } | |
879 | ||
880 | /* For file based kexec, source pages are in kernel memory */ | |
881 | if (image->file_mode) | |
882 | memcpy(ptr, kbuf, uchunk); | |
883 | else | |
884 | result = copy_from_user(ptr, buf, uchunk); | |
885 | kexec_flush_icache_page(page); | |
948084f0 | 886 | kunmap_local(ptr); |
9cf38d55 | 887 | arch_kexec_pre_free_pages(page_address(page), 1); |
2965faa5 DY |
888 | if (result) { |
889 | result = -EFAULT; | |
890 | goto out; | |
891 | } | |
892 | ubytes -= uchunk; | |
893 | maddr += mchunk; | |
894 | if (image->file_mode) | |
895 | kbuf += mchunk; | |
896 | else | |
897 | buf += mchunk; | |
898 | mbytes -= mchunk; | |
a8311f64 JF |
899 | |
900 | cond_resched(); | |
2965faa5 DY |
901 | } |
902 | out: | |
903 | return result; | |
904 | } | |
905 | ||
906 | int kimage_load_segment(struct kimage *image, | |
907 | struct kexec_segment *segment) | |
908 | { | |
909 | int result = -ENOMEM; | |
910 | ||
911 | switch (image->type) { | |
912 | case KEXEC_TYPE_DEFAULT: | |
913 | result = kimage_load_normal_segment(image, segment); | |
914 | break; | |
915 | case KEXEC_TYPE_CRASH: | |
916 | result = kimage_load_crash_segment(image, segment); | |
917 | break; | |
918 | } | |
919 | ||
920 | return result; | |
921 | } | |
922 | ||
923 | struct kimage *kexec_image; | |
924 | struct kimage *kexec_crash_image; | |
925 | int kexec_load_disabled; | |
a467257f | 926 | #ifdef CONFIG_SYSCTL |
927 | static struct ctl_table kexec_core_sysctls[] = { | |
928 | { | |
929 | .procname = "kexec_load_disabled", | |
930 | .data = &kexec_load_disabled, | |
931 | .maxlen = sizeof(int), | |
932 | .mode = 0644, | |
933 | /* only handle a transition from default "0" to "1" */ | |
934 | .proc_handler = proc_dointvec_minmax, | |
935 | .extra1 = SYSCTL_ONE, | |
936 | .extra2 = SYSCTL_ONE, | |
937 | }, | |
938 | { } | |
939 | }; | |
940 | ||
941 | static int __init kexec_core_sysctl_init(void) | |
942 | { | |
943 | register_sysctl_init("kernel", kexec_core_sysctls); | |
944 | return 0; | |
945 | } | |
946 | late_initcall(kexec_core_sysctl_init); | |
947 | #endif | |
2965faa5 | 948 | |
7bbee5ca HK |
949 | /* |
950 | * No panic_cpu check version of crash_kexec(). This function is called | |
951 | * only when panic_cpu holds the current CPU number; this is the only CPU | |
952 | * which processes crash_kexec routines. | |
953 | */ | |
c207aee4 | 954 | void __noclone __crash_kexec(struct pt_regs *regs) |
2965faa5 | 955 | { |
05c62574 | 956 | /* Take the kexec_lock here to prevent sys_kexec_load |
2965faa5 DY |
957 | * running on one cpu from replacing the crash kernel |
958 | * we are using after a panic on a different cpu. | |
959 | * | |
960 | * If the crash kernel was not located in a fixed area | |
961 | * of memory the xchg(&kexec_crash_image) would be | |
962 | * sufficient. But since I reuse the memory... | |
963 | */ | |
05c62574 | 964 | if (kexec_trylock()) { |
2965faa5 DY |
965 | if (kexec_crash_image) { |
966 | struct pt_regs fixed_regs; | |
967 | ||
968 | crash_setup_regs(&fixed_regs, regs); | |
969 | crash_save_vmcoreinfo(); | |
970 | machine_crash_shutdown(&fixed_regs); | |
971 | machine_kexec(kexec_crash_image); | |
972 | } | |
05c62574 | 973 | kexec_unlock(); |
2965faa5 DY |
974 | } |
975 | } | |
c207aee4 | 976 | STACK_FRAME_NON_STANDARD(__crash_kexec); |
2965faa5 | 977 | |
7bbee5ca HK |
978 | void crash_kexec(struct pt_regs *regs) |
979 | { | |
980 | int old_cpu, this_cpu; | |
981 | ||
982 | /* | |
983 | * Only one CPU is allowed to execute the crash_kexec() code as with | |
984 | * panic(). Otherwise parallel calls of panic() and crash_kexec() | |
985 | * may stop each other. To exclude them, we use panic_cpu here too. | |
986 | */ | |
987 | this_cpu = raw_smp_processor_id(); | |
988 | old_cpu = atomic_cmpxchg(&panic_cpu, PANIC_CPU_INVALID, this_cpu); | |
989 | if (old_cpu == PANIC_CPU_INVALID) { | |
990 | /* This is the 1st CPU which comes here, so go ahead. */ | |
991 | __crash_kexec(regs); | |
992 | ||
993 | /* | |
994 | * Reset panic_cpu to allow another panic()/crash_kexec() | |
995 | * call. | |
996 | */ | |
997 | atomic_set(&panic_cpu, PANIC_CPU_INVALID); | |
998 | } | |
999 | } | |
1000 | ||
7bb5da0d | 1001 | ssize_t crash_get_memory_size(void) |
2965faa5 | 1002 | { |
7bb5da0d VS |
1003 | ssize_t size = 0; |
1004 | ||
05c62574 | 1005 | if (!kexec_trylock()) |
7bb5da0d | 1006 | return -EBUSY; |
2965faa5 | 1007 | |
2965faa5 DY |
1008 | if (crashk_res.end != crashk_res.start) |
1009 | size = resource_size(&crashk_res); | |
7bb5da0d | 1010 | |
05c62574 | 1011 | kexec_unlock(); |
2965faa5 DY |
1012 | return size; |
1013 | } | |
1014 | ||
2965faa5 DY |
1015 | int crash_shrink_memory(unsigned long new_size) |
1016 | { | |
1017 | int ret = 0; | |
1018 | unsigned long start, end; | |
1019 | unsigned long old_size; | |
1020 | struct resource *ram_res; | |
1021 | ||
05c62574 | 1022 | if (!kexec_trylock()) |
7bb5da0d | 1023 | return -EBUSY; |
2965faa5 DY |
1024 | |
1025 | if (kexec_crash_image) { | |
1026 | ret = -ENOENT; | |
1027 | goto unlock; | |
1028 | } | |
1029 | start = crashk_res.start; | |
1030 | end = crashk_res.end; | |
1031 | old_size = (end == 0) ? 0 : end - start + 1; | |
1032 | if (new_size >= old_size) { | |
1033 | ret = (new_size == old_size) ? 0 : -EINVAL; | |
1034 | goto unlock; | |
1035 | } | |
1036 | ||
1037 | ram_res = kzalloc(sizeof(*ram_res), GFP_KERNEL); | |
1038 | if (!ram_res) { | |
1039 | ret = -ENOMEM; | |
1040 | goto unlock; | |
1041 | } | |
1042 | ||
1043 | start = roundup(start, KEXEC_CRASH_MEM_ALIGN); | |
1044 | end = roundup(start + new_size, KEXEC_CRASH_MEM_ALIGN); | |
1045 | ||
2965faa5 DY |
1046 | crash_free_reserved_phys_range(end, crashk_res.end); |
1047 | ||
1048 | if ((start == end) && (crashk_res.parent != NULL)) | |
1049 | release_resource(&crashk_res); | |
1050 | ||
1051 | ram_res->start = end; | |
1052 | ram_res->end = crashk_res.end; | |
1a085d07 | 1053 | ram_res->flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM; |
2965faa5 DY |
1054 | ram_res->name = "System RAM"; |
1055 | ||
1056 | crashk_res.end = end - 1; | |
1057 | ||
1058 | insert_resource(&iomem_resource, ram_res); | |
2965faa5 DY |
1059 | |
1060 | unlock: | |
05c62574 | 1061 | kexec_unlock(); |
2965faa5 DY |
1062 | return ret; |
1063 | } | |
1064 | ||
2965faa5 DY |
1065 | void crash_save_cpu(struct pt_regs *regs, int cpu) |
1066 | { | |
1067 | struct elf_prstatus prstatus; | |
1068 | u32 *buf; | |
1069 | ||
1070 | if ((cpu < 0) || (cpu >= nr_cpu_ids)) | |
1071 | return; | |
1072 | ||
1073 | /* Using ELF notes here is opportunistic. | |
1074 | * I need a well defined structure format | |
1075 | * for the data I pass, and I need tags | |
1076 | * on the data to indicate what information I have | |
1077 | * squirrelled away. ELF notes happen to provide | |
1078 | * all of that, so there is no need to invent something new. | |
1079 | */ | |
1080 | buf = (u32 *)per_cpu_ptr(crash_notes, cpu); | |
1081 | if (!buf) | |
1082 | return; | |
1083 | memset(&prstatus, 0, sizeof(prstatus)); | |
f2485a2d | 1084 | prstatus.common.pr_pid = current->pid; |
9554e908 | 1085 | elf_core_copy_regs(&prstatus.pr_reg, regs); |
2965faa5 DY |
1086 | buf = append_elf_note(buf, KEXEC_CORE_NOTE_NAME, NT_PRSTATUS, |
1087 | &prstatus, sizeof(prstatus)); | |
1088 | final_note(buf); | |
1089 | } | |
1090 | ||
1091 | static int __init crash_notes_memory_init(void) | |
1092 | { | |
1093 | /* Allocate memory for saving cpu registers. */ | |
bbb78b8f BH |
1094 | size_t size, align; |
1095 | ||
1096 | /* | |
1097 | * crash_notes could be allocated across 2 vmalloc pages when percpu | |
1098 | * is vmalloc based . vmalloc doesn't guarantee 2 continuous vmalloc | |
1099 | * pages are also on 2 continuous physical pages. In this case the | |
1100 | * 2nd part of crash_notes in 2nd page could be lost since only the | |
1101 | * starting address and size of crash_notes are exported through sysfs. | |
1102 | * Here round up the size of crash_notes to the nearest power of two | |
1103 | * and pass it to __alloc_percpu as align value. This can make sure | |
1104 | * crash_notes is allocated inside one physical page. | |
1105 | */ | |
1106 | size = sizeof(note_buf_t); | |
1107 | align = min(roundup_pow_of_two(sizeof(note_buf_t)), PAGE_SIZE); | |
1108 | ||
1109 | /* | |
1110 | * Break compile if size is bigger than PAGE_SIZE since crash_notes | |
1111 | * definitely will be in 2 pages with that. | |
1112 | */ | |
1113 | BUILD_BUG_ON(size > PAGE_SIZE); | |
1114 | ||
1115 | crash_notes = __alloc_percpu(size, align); | |
2965faa5 | 1116 | if (!crash_notes) { |
de90a6bc | 1117 | pr_warn("Memory allocation for saving cpu register states failed\n"); |
2965faa5 DY |
1118 | return -ENOMEM; |
1119 | } | |
1120 | return 0; | |
1121 | } | |
1122 | subsys_initcall(crash_notes_memory_init); | |
1123 | ||
1124 | ||
2965faa5 DY |
1125 | /* |
1126 | * Move into place and start executing a preloaded standalone | |
1127 | * executable. If nothing was preloaded return an error. | |
1128 | */ | |
1129 | int kernel_kexec(void) | |
1130 | { | |
1131 | int error = 0; | |
1132 | ||
05c62574 | 1133 | if (!kexec_trylock()) |
2965faa5 DY |
1134 | return -EBUSY; |
1135 | if (!kexec_image) { | |
1136 | error = -EINVAL; | |
1137 | goto Unlock; | |
1138 | } | |
1139 | ||
1140 | #ifdef CONFIG_KEXEC_JUMP | |
1141 | if (kexec_image->preserve_context) { | |
2965faa5 DY |
1142 | pm_prepare_console(); |
1143 | error = freeze_processes(); | |
1144 | if (error) { | |
1145 | error = -EBUSY; | |
1146 | goto Restore_console; | |
1147 | } | |
1148 | suspend_console(); | |
1149 | error = dpm_suspend_start(PMSG_FREEZE); | |
1150 | if (error) | |
1151 | goto Resume_console; | |
1152 | /* At this point, dpm_suspend_start() has been called, | |
1153 | * but *not* dpm_suspend_end(). We *must* call | |
1154 | * dpm_suspend_end() now. Otherwise, drivers for | |
1155 | * some devices (e.g. interrupt controllers) become | |
1156 | * desynchronized with the actual state of the | |
1157 | * hardware at resume time, and evil weirdness ensues. | |
1158 | */ | |
1159 | error = dpm_suspend_end(PMSG_FREEZE); | |
1160 | if (error) | |
1161 | goto Resume_devices; | |
2f1a6fbb | 1162 | error = suspend_disable_secondary_cpus(); |
2965faa5 DY |
1163 | if (error) |
1164 | goto Enable_cpus; | |
1165 | local_irq_disable(); | |
1166 | error = syscore_suspend(); | |
1167 | if (error) | |
1168 | goto Enable_irqs; | |
1169 | } else | |
1170 | #endif | |
1171 | { | |
1172 | kexec_in_progress = true; | |
a119b4e5 | 1173 | kernel_restart_prepare("kexec reboot"); |
2965faa5 DY |
1174 | migrate_to_reboot_cpu(); |
1175 | ||
1176 | /* | |
1177 | * migrate_to_reboot_cpu() disables CPU hotplug assuming that | |
1178 | * no further code needs to use CPU hotplug (which is true in | |
1179 | * the reboot case). However, the kexec path depends on using | |
1180 | * CPU hotplug again; so re-enable it here. | |
1181 | */ | |
1182 | cpu_hotplug_enable(); | |
d42cc530 | 1183 | pr_notice("Starting new kernel\n"); |
2965faa5 DY |
1184 | machine_shutdown(); |
1185 | } | |
1186 | ||
b2075dbb | 1187 | kmsg_dump(KMSG_DUMP_SHUTDOWN); |
2965faa5 DY |
1188 | machine_kexec(kexec_image); |
1189 | ||
1190 | #ifdef CONFIG_KEXEC_JUMP | |
1191 | if (kexec_image->preserve_context) { | |
1192 | syscore_resume(); | |
1193 | Enable_irqs: | |
1194 | local_irq_enable(); | |
1195 | Enable_cpus: | |
2f1a6fbb | 1196 | suspend_enable_secondary_cpus(); |
2965faa5 DY |
1197 | dpm_resume_start(PMSG_RESTORE); |
1198 | Resume_devices: | |
1199 | dpm_resume_end(PMSG_RESTORE); | |
1200 | Resume_console: | |
1201 | resume_console(); | |
1202 | thaw_processes(); | |
1203 | Restore_console: | |
1204 | pm_restore_console(); | |
2965faa5 DY |
1205 | } |
1206 | #endif | |
1207 | ||
1208 | Unlock: | |
05c62574 | 1209 | kexec_unlock(); |
2965faa5 DY |
1210 | return error; |
1211 | } |