Commit | Line | Data |
---|---|---|
b2441318 | 1 | // SPDX-License-Identifier: GPL-2.0 |
1da177e4 LT |
2 | /* |
3 | * misc.c | |
818a08f8 | 4 | * |
c0402881 KC |
5 | * This is a collection of several routines used to extract the kernel |
6 | * which includes KASLR relocation, decompression, ELF parsing, and | |
7 | * relocation processing. Additionally included are the screen and serial | |
8 | * output functions and related debugging support functions. | |
1da177e4 LT |
9 | * |
10 | * malloc by Hannu Savolainen 1993 and Matthias Urlichs 1994 | |
11 | * puts by Nick Holloway 1993, better puts by Martin Mares 1995 | |
12 | * High loaded stuff by Hans Lermen & Werner Almesberger, Feb. 1996 | |
13 | */ | |
14 | ||
8fee13a4 | 15 | #include "misc.h" |
dc425a6e | 16 | #include "error.h" |
820e8fec | 17 | #include "../string.h" |
67b66625 | 18 | #include "../voffset.h" |
8c5477e8 | 19 | #include <asm/bootparam_utils.h> |
968de4f0 | 20 | |
968de4f0 | 21 | /* |
4252db10 BH |
22 | * WARNING!! |
23 | * This code is compiled with -fPIC and it is relocated dynamically at | |
24 | * run time, but no relocation processing is performed. This means that | |
25 | * it is not safe to place pointers in static structures. | |
968de4f0 | 26 | */ |
1da177e4 | 27 | |
1f208de3 | 28 | /* Macros used by the included decompressor code below. */ |
1180e01d | 29 | #define STATIC static |
33f98a97 KC |
30 | /* Define an externally visible malloc()/free(). */ |
31 | #define MALLOC_VISIBLE | |
32 | #include <linux/decompress/mm.h> | |
1da177e4 | 33 | |
04999550 | 34 | /* |
394b19d6 AS |
35 | * Provide definitions of memzero and memmove as some of the decompressors will |
36 | * try to define their own functions if these are not defined as macros. | |
04999550 | 37 | */ |
1180e01d | 38 | #define memzero(s, n) memset((s), 0, (n)) |
938a000e | 39 | #ifndef memmove |
81b785f3 | 40 | #define memmove memmove |
1f208de3 | 41 | /* Functions used by the included decompressor code below. */ |
81b785f3 | 42 | void *memmove(void *dest, const void *src, size_t n); |
938a000e | 43 | #endif |
fd77c7ca | 44 | |
1da177e4 LT |
45 | /* |
46 | * This is set up by the setup-routine at boot-time | |
47 | */ | |
d55d5bc5 | 48 | struct boot_params *boot_params_ptr; |
1da177e4 | 49 | |
eb4ea1ae KS |
50 | struct port_io_ops pio_ops; |
51 | ||
82fa9637 KC |
52 | memptr free_mem_ptr; |
53 | memptr free_mem_end_ptr; | |
ac456ca0 | 54 | int spurious_nmi_count; |
1da177e4 | 55 | |
03056c88 | 56 | static char *vidmem; |
1da177e4 | 57 | static int vidport; |
6044d159 MR |
58 | |
59 | /* These might be accessed before .bss is cleared, so use .data instead. */ | |
60 | static int lines __section(".data"); | |
61 | static int cols __section(".data"); | |
1da177e4 | 62 | |
ae03c499 AK |
63 | #ifdef CONFIG_KERNEL_GZIP |
64 | #include "../../../../lib/decompress_inflate.c" | |
65 | #endif | |
66 | ||
67 | #ifdef CONFIG_KERNEL_BZIP2 | |
68 | #include "../../../../lib/decompress_bunzip2.c" | |
69 | #endif | |
70 | ||
71 | #ifdef CONFIG_KERNEL_LZMA | |
72 | #include "../../../../lib/decompress_unlzma.c" | |
73 | #endif | |
1da177e4 | 74 | |
30314804 LC |
75 | #ifdef CONFIG_KERNEL_XZ |
76 | #include "../../../../lib/decompress_unxz.c" | |
77 | #endif | |
78 | ||
13510997 AT |
79 | #ifdef CONFIG_KERNEL_LZO |
80 | #include "../../../../lib/decompress_unlzo.c" | |
81 | #endif | |
82 | ||
f9b493ac KL |
83 | #ifdef CONFIG_KERNEL_LZ4 |
84 | #include "../../../../lib/decompress_unlz4.c" | |
85 | #endif | |
fb46d057 NT |
86 | |
87 | #ifdef CONFIG_KERNEL_ZSTD | |
88 | #include "../../../../lib/decompress_unzstd.c" | |
89 | #endif | |
4252db10 BH |
90 | /* |
91 | * NOTE: When adding a new decompressor, please update the analysis in | |
92 | * ../header.S. | |
93 | */ | |
f9b493ac | 94 | |
1da177e4 LT |
95 | static void scroll(void) |
96 | { | |
97 | int i; | |
98 | ||
81b785f3 | 99 | memmove(vidmem, vidmem + cols * 2, (lines - 1) * cols * 2); |
fd77c7ca | 100 | for (i = (lines - 1) * cols * 2; i < lines * cols * 2; i += 2) |
1da177e4 LT |
101 | vidmem[i] = ' '; |
102 | } | |
103 | ||
8fee13a4 YL |
104 | #define XMTRDY 0x20 |
105 | ||
106 | #define TXR 0 /* Transmit register (WRITE) */ | |
107 | #define LSR 5 /* Line Status */ | |
108 | static void serial_putchar(int ch) | |
109 | { | |
110 | unsigned timeout = 0xffff; | |
111 | ||
112 | while ((inb(early_serial_base + LSR) & XMTRDY) == 0 && --timeout) | |
113 | cpu_relax(); | |
114 | ||
115 | outb(ch, early_serial_base + TXR); | |
116 | } | |
117 | ||
7aac3015 | 118 | void __putstr(const char *s) |
1da177e4 | 119 | { |
fd77c7ca | 120 | int x, y, pos; |
1da177e4 LT |
121 | char c; |
122 | ||
8fee13a4 YL |
123 | if (early_serial_base) { |
124 | const char *str = s; | |
125 | while (*str) { | |
126 | if (*str == '\n') | |
127 | serial_putchar('\r'); | |
128 | serial_putchar(*str++); | |
129 | } | |
130 | } | |
6bcb13b3 | 131 | |
fb1cc2f9 | 132 | if (lines == 0 || cols == 0) |
a24e7851 RR |
133 | return; |
134 | ||
d55d5bc5 AB |
135 | x = boot_params_ptr->screen_info.orig_x; |
136 | y = boot_params_ptr->screen_info.orig_y; | |
1da177e4 | 137 | |
fd77c7ca PC |
138 | while ((c = *s++) != '\0') { |
139 | if (c == '\n') { | |
1da177e4 | 140 | x = 0; |
fd77c7ca | 141 | if (++y >= lines) { |
1da177e4 LT |
142 | scroll(); |
143 | y--; | |
144 | } | |
145 | } else { | |
020878ac | 146 | vidmem[(x + cols * y) * 2] = c; |
fd77c7ca | 147 | if (++x >= cols) { |
1da177e4 | 148 | x = 0; |
fd77c7ca | 149 | if (++y >= lines) { |
1da177e4 LT |
150 | scroll(); |
151 | y--; | |
152 | } | |
153 | } | |
154 | } | |
155 | } | |
156 | ||
d55d5bc5 AB |
157 | boot_params_ptr->screen_info.orig_x = x; |
158 | boot_params_ptr->screen_info.orig_y = y; | |
1da177e4 LT |
159 | |
160 | pos = (x + cols * y) * 2; /* Update cursor position */ | |
b02aae9c RH |
161 | outb(14, vidport); |
162 | outb(0xff & (pos >> 9), vidport+1); | |
163 | outb(15, vidport); | |
164 | outb(0xff & (pos >> 1), vidport+1); | |
1da177e4 LT |
165 | } |
166 | ||
9ba8ec8e PA |
167 | static noinline void __putnum(unsigned long value, unsigned int base, |
168 | int mindig) | |
79063a7c | 169 | { |
9ba8ec8e PA |
170 | char buf[8*sizeof(value)+1]; |
171 | char *p; | |
79063a7c | 172 | |
9ba8ec8e PA |
173 | p = buf + sizeof(buf); |
174 | *--p = '\0'; | |
79063a7c | 175 | |
9ba8ec8e PA |
176 | while (mindig-- > 0 || value) { |
177 | unsigned char digit = value % base; | |
178 | digit += (digit >= 10) ? ('a'-10) : '0'; | |
179 | *--p = digit; | |
79063a7c | 180 | |
9ba8ec8e | 181 | value /= base; |
79063a7c | 182 | } |
9ba8ec8e PA |
183 | |
184 | __putstr(p); | |
185 | } | |
186 | ||
187 | void __puthex(unsigned long value) | |
188 | { | |
189 | __putnum(value, 16, sizeof(value)*2); | |
190 | } | |
191 | ||
192 | void __putdec(unsigned long value) | |
193 | { | |
194 | __putnum(value, 10, 1); | |
79063a7c KC |
195 | } |
196 | ||
a554e740 | 197 | #ifdef CONFIG_X86_NEED_RELOCS |
8391c73c BH |
198 | static void handle_relocations(void *output, unsigned long output_len, |
199 | unsigned long virt_addr) | |
a0215061 KC |
200 | { |
201 | int *reloc; | |
202 | unsigned long delta, map, ptr; | |
203 | unsigned long min_addr = (unsigned long)output; | |
4abf061b | 204 | unsigned long max_addr = min_addr + (VO___bss_start - VO__text); |
a0215061 KC |
205 | |
206 | /* | |
207 | * Calculate the delta between where vmlinux was linked to load | |
208 | * and where it was actually loaded. | |
209 | */ | |
210 | delta = min_addr - LOAD_PHYSICAL_ADDR; | |
a0215061 KC |
211 | |
212 | /* | |
213 | * The kernel contains a table of relocation addresses. Those | |
214 | * addresses have the final load address of the kernel in virtual | |
215 | * memory. We are currently working in the self map. So we need to | |
216 | * create an adjustment for kernel memory addresses to the self map. | |
217 | * This will involve subtracting out the base address of the kernel. | |
218 | */ | |
219 | map = delta - __START_KERNEL_map; | |
220 | ||
8391c73c BH |
221 | /* |
222 | * 32-bit always performs relocations. 64-bit relocations are only | |
223 | * needed if KASLR has chosen a different starting address offset | |
224 | * from __START_KERNEL_map. | |
225 | */ | |
226 | if (IS_ENABLED(CONFIG_X86_64)) | |
227 | delta = virt_addr - LOAD_PHYSICAL_ADDR; | |
228 | ||
229 | if (!delta) { | |
230 | debug_putstr("No relocation needed... "); | |
231 | return; | |
232 | } | |
233 | debug_putstr("Performing relocations... "); | |
234 | ||
a0215061 KC |
235 | /* |
236 | * Process relocations: 32 bit relocations first then 64 bit after. | |
a8327be7 | 237 | * Two sets of binary relocations are added to the end of the kernel |
a0215061 KC |
238 | * before compression. Each relocation table entry is the kernel |
239 | * address of the location which needs to be updated stored as a | |
240 | * 32-bit value which is sign extended to 64 bits. | |
241 | * | |
242 | * Format is: | |
243 | * | |
244 | * kernel bits... | |
245 | * 0 - zero terminator for 64 bit relocations | |
246 | * 64 bit relocation repeated | |
247 | * 0 - zero terminator for 32 bit relocations | |
248 | * 32 bit relocation repeated | |
249 | * | |
250 | * So we work backwards from the end of the decompressed image. | |
251 | */ | |
252 | for (reloc = output + output_len - sizeof(*reloc); *reloc; reloc--) { | |
6f9af75f | 253 | long extended = *reloc; |
a0215061 KC |
254 | extended += map; |
255 | ||
256 | ptr = (unsigned long)extended; | |
257 | if (ptr < min_addr || ptr > max_addr) | |
258 | error("32-bit relocation outside of kernel!\n"); | |
259 | ||
260 | *(uint32_t *)ptr += delta; | |
261 | } | |
262 | #ifdef CONFIG_X86_64 | |
263 | for (reloc--; *reloc; reloc--) { | |
264 | long extended = *reloc; | |
265 | extended += map; | |
266 | ||
267 | ptr = (unsigned long)extended; | |
268 | if (ptr < min_addr || ptr > max_addr) | |
269 | error("64-bit relocation outside of kernel!\n"); | |
270 | ||
271 | *(uint64_t *)ptr += delta; | |
272 | } | |
273 | #endif | |
274 | } | |
275 | #else | |
8391c73c BH |
276 | static inline void handle_relocations(void *output, unsigned long output_len, |
277 | unsigned long virt_addr) | |
a0215061 KC |
278 | { } |
279 | #endif | |
280 | ||
7734a0f3 | 281 | static size_t parse_elf(void *output) |
099e1377 IC |
282 | { |
283 | #ifdef CONFIG_X86_64 | |
284 | Elf64_Ehdr ehdr; | |
285 | Elf64_Phdr *phdrs, *phdr; | |
286 | #else | |
287 | Elf32_Ehdr ehdr; | |
288 | Elf32_Phdr *phdrs, *phdr; | |
289 | #endif | |
290 | void *dest; | |
291 | int i; | |
292 | ||
293 | memcpy(&ehdr, output, sizeof(ehdr)); | |
fd77c7ca | 294 | if (ehdr.e_ident[EI_MAG0] != ELFMAG0 || |
099e1377 IC |
295 | ehdr.e_ident[EI_MAG1] != ELFMAG1 || |
296 | ehdr.e_ident[EI_MAG2] != ELFMAG2 || | |
7734a0f3 | 297 | ehdr.e_ident[EI_MAG3] != ELFMAG3) |
099e1377 | 298 | error("Kernel is not a valid ELF file"); |
099e1377 | 299 | |
e605a425 | 300 | debug_putstr("Parsing ELF... "); |
099e1377 IC |
301 | |
302 | phdrs = malloc(sizeof(*phdrs) * ehdr.e_phnum); | |
303 | if (!phdrs) | |
304 | error("Failed to allocate space for phdrs"); | |
305 | ||
306 | memcpy(phdrs, output + ehdr.e_phoff, sizeof(*phdrs) * ehdr.e_phnum); | |
307 | ||
fd77c7ca | 308 | for (i = 0; i < ehdr.e_phnum; i++) { |
099e1377 IC |
309 | phdr = &phdrs[i]; |
310 | ||
311 | switch (phdr->p_type) { | |
312 | case PT_LOAD: | |
c55b8550 L |
313 | #ifdef CONFIG_X86_64 |
314 | if ((phdr->p_align % 0x200000) != 0) | |
315 | error("Alignment of LOAD segment isn't multiple of 2MB"); | |
316 | #endif | |
099e1377 IC |
317 | #ifdef CONFIG_RELOCATABLE |
318 | dest = output; | |
319 | dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR); | |
320 | #else | |
fd77c7ca | 321 | dest = (void *)(phdr->p_paddr); |
099e1377 | 322 | #endif |
81b785f3 | 323 | memmove(dest, output + phdr->p_offset, phdr->p_filesz); |
099e1377 IC |
324 | break; |
325 | default: /* Ignore other PT_* */ break; | |
326 | } | |
327 | } | |
5067cf53 JJ |
328 | |
329 | free(phdrs); | |
7734a0f3 AL |
330 | |
331 | return ehdr.e_entry - LOAD_PHYSICAL_ADDR; | |
099e1377 IC |
332 | } |
333 | ||
9c554610 | 334 | const unsigned long kernel_text_size = VO___start_rodata - VO__text; |
83381519 AB |
335 | const unsigned long kernel_total_size = VO__end - VO__text; |
336 | ||
24388292 AB |
337 | static u8 boot_heap[BOOT_HEAP_SIZE] __aligned(4); |
338 | ||
339 | extern unsigned char input_data[]; | |
340 | extern unsigned int input_len, output_len; | |
341 | ||
83381519 AB |
342 | unsigned long decompress_kernel(unsigned char *outbuf, unsigned long virt_addr, |
343 | void (*error)(char *x)) | |
344 | { | |
345 | unsigned long entry; | |
346 | ||
347 | if (!free_mem_ptr) { | |
348 | free_mem_ptr = (unsigned long)boot_heap; | |
349 | free_mem_end_ptr = (unsigned long)boot_heap + sizeof(boot_heap); | |
350 | } | |
351 | ||
352 | if (__decompress(input_data, input_len, NULL, NULL, outbuf, output_len, | |
353 | NULL, error) < 0) | |
354 | return ULONG_MAX; | |
355 | ||
356 | entry = parse_elf(outbuf); | |
357 | handle_relocations(outbuf, output_len, virt_addr); | |
358 | ||
359 | return entry; | |
360 | } | |
361 | ||
cd0d9d92 AB |
362 | /* |
363 | * Set the memory encryption xloadflag based on the mem_encrypt= command line | |
364 | * parameter, if provided. | |
365 | */ | |
366 | static void parse_mem_encrypt(struct setup_header *hdr) | |
367 | { | |
368 | int on = cmdline_find_option_bool("mem_encrypt=on"); | |
369 | int off = cmdline_find_option_bool("mem_encrypt=off"); | |
370 | ||
371 | if (on > off) | |
372 | hdr->xloadflags |= XLF_MEM_ENCRYPTION; | |
373 | } | |
374 | ||
f30470c1 AK |
375 | static void early_sev_detect(void) |
376 | { | |
377 | /* | |
378 | * Accessing video memory causes guest termination because | |
379 | * the boot stage2 #VC handler of SEV-ES/SNP guests does not | |
380 | * support MMIO handling and kexec -c adds screen_info to the | |
381 | * boot parameters passed to the kexec kernel, which causes | |
382 | * console output to be dumped to both video and serial. | |
383 | */ | |
384 | if (sev_status & MSR_AMD64_SEV_ES_ENABLED) | |
385 | lines = cols = 0; | |
386 | } | |
387 | ||
974f221c YL |
388 | /* |
389 | * The compressed kernel image (ZO), has been moved so that its position | |
390 | * is against the end of the buffer used to hold the uncompressed kernel | |
391 | * image (VO) and the execution environment (.bss, .brk), which makes sure | |
392 | * there is room to do the in-place decompression. (See header.S for the | |
393 | * calculations.) | |
394 | * | |
395 | * |-----compressed kernel image------| | |
396 | * V V | |
397 | * 0 extract_offset +INIT_SIZE | |
398 | * |-----------|---------------|-------------------------|--------| | |
399 | * | | | | | |
400 | * VO__text startup_32 of ZO VO__end ZO__end | |
401 | * ^ ^ | |
402 | * |-------uncompressed kernel image---------| | |
403 | * | |
404 | */ | |
24388292 | 405 | asmlinkage __visible void *extract_kernel(void *rmode, unsigned char *output) |
1da177e4 | 406 | { |
8eabf42a | 407 | unsigned long virt_addr = LOAD_PHYSICAL_ADDR; |
24388292 | 408 | memptr heap = (memptr)boot_heap; |
1869dbe8 | 409 | unsigned long needed_size; |
7734a0f3 | 410 | size_t entry_offset; |
f285f4a2 | 411 | |
6655e0aa | 412 | /* Retain x86 boot parameters pointer passed from startup_32/64. */ |
d55d5bc5 | 413 | boot_params_ptr = rmode; |
1da177e4 | 414 | |
6655e0aa | 415 | /* Clear flags intended for solely in-kernel use. */ |
d55d5bc5 | 416 | boot_params_ptr->hdr.loadflags &= ~KASLR_FLAG; |
78cac48c | 417 | |
cd0d9d92 AB |
418 | parse_mem_encrypt(&boot_params_ptr->hdr); |
419 | ||
d55d5bc5 | 420 | sanitize_boot_params(boot_params_ptr); |
5dcd14ec | 421 | |
d55d5bc5 | 422 | if (boot_params_ptr->screen_info.orig_video_mode == 7) { |
1da177e4 LT |
423 | vidmem = (char *) 0xb0000; |
424 | vidport = 0x3b4; | |
425 | } else { | |
426 | vidmem = (char *) 0xb8000; | |
427 | vidport = 0x3d4; | |
428 | } | |
429 | ||
d55d5bc5 AB |
430 | lines = boot_params_ptr->screen_info.orig_video_lines; |
431 | cols = boot_params_ptr->screen_info.orig_video_cols; | |
1da177e4 | 432 | |
eb4ea1ae KS |
433 | init_default_io_ops(); |
434 | ||
4b05f815 KS |
435 | /* |
436 | * Detect TDX guest environment. | |
437 | * | |
438 | * It has to be done before console_init() in order to use | |
439 | * paravirtualized port I/O operations if needed. | |
440 | */ | |
441 | early_tdx_detect(); | |
442 | ||
f30470c1 AK |
443 | early_sev_detect(); |
444 | ||
8fee13a4 | 445 | console_init(); |
5b51ae96 BP |
446 | |
447 | /* | |
448 | * Save RSDP address for later use. Have this after console_init() | |
449 | * so that early debugging output from the RSDP parsing code can be | |
450 | * collected. | |
451 | */ | |
d55d5bc5 | 452 | boot_params_ptr->acpi_rsdp_addr = get_rsdp_addr(); |
5b51ae96 | 453 | |
c0402881 | 454 | debug_putstr("early console in extract_kernel\n"); |
8fee13a4 | 455 | |
4c83d653 | 456 | free_mem_ptr = heap; /* Heap */ |
7c539764 | 457 | free_mem_end_ptr = heap + BOOT_HEAP_SIZE; |
968de4f0 | 458 | |
1869dbe8 SW |
459 | /* |
460 | * The memory hole needed for the kernel is the larger of either | |
461 | * the entire decompressed kernel plus relocation table, or the | |
462 | * entire decompressed kernel plus .bss and .brk sections. | |
463 | * | |
464 | * On X86_64, the memory is mapped with PMD pages. Round the | |
465 | * size up so that the full extent of PMD pages mapped is | |
466 | * included in the check against the valid memory table | |
467 | * entries. This ensures the full mapped area is usable RAM | |
468 | * and doesn't include any reserved areas. | |
469 | */ | |
24388292 | 470 | needed_size = max_t(unsigned long, output_len, kernel_total_size); |
1869dbe8 SW |
471 | #ifdef CONFIG_X86_64 |
472 | needed_size = ALIGN(needed_size, MIN_KERNEL_ALIGN); | |
473 | #endif | |
474 | ||
79063a7c KC |
475 | /* Report initial kernel position details. */ |
476 | debug_putaddr(input_data); | |
477 | debug_putaddr(input_len); | |
478 | debug_putaddr(output); | |
479 | debug_putaddr(output_len); | |
4d2d5424 | 480 | debug_putaddr(kernel_total_size); |
1869dbe8 | 481 | debug_putaddr(needed_size); |
79063a7c | 482 | |
3548e131 KS |
483 | #ifdef CONFIG_X86_64 |
484 | /* Report address of 32-bit trampoline */ | |
485 | debug_putaddr(trampoline_32bit); | |
486 | #endif | |
487 | ||
8391c73c BH |
488 | choose_random_location((unsigned long)input_data, input_len, |
489 | (unsigned long *)&output, | |
1869dbe8 | 490 | needed_size, |
8391c73c | 491 | &virt_addr); |
8ab3820f KC |
492 | |
493 | /* Validate memory location choices. */ | |
7ed42a28 | 494 | if ((unsigned long)output & (MIN_KERNEL_ALIGN - 1)) |
8391c73c BH |
495 | error("Destination physical address inappropriately aligned"); |
496 | if (virt_addr & (MIN_KERNEL_ALIGN - 1)) | |
497 | error("Destination virtual address inappropriately aligned"); | |
778cb929 | 498 | #ifdef CONFIG_X86_64 |
7ed42a28 | 499 | if (heap > 0x3fffffffffffUL) |
778cb929 | 500 | error("Destination address too large"); |
24388292 | 501 | if (virt_addr + needed_size > KERNEL_IMAGE_SIZE) |
b892cb87 | 502 | error("Destination virtual address is beyond the kernel mapping area"); |
778cb929 | 503 | #else |
147dd561 | 504 | if (heap > ((-__PAGE_OFFSET-(128<<20)-1) & 0x7fffffff)) |
968de4f0 | 505 | error("Destination address too large"); |
7ed42a28 | 506 | #endif |
968de4f0 | 507 | #ifndef CONFIG_RELOCATABLE |
8eabf42a | 508 | if (virt_addr != LOAD_PHYSICAL_ADDR) |
8391c73c | 509 | error("Destination virtual address changed when not relocatable"); |
968de4f0 | 510 | #endif |
1da177e4 | 511 | |
e605a425 | 512 | debug_putstr("\nDecompressing Linux... "); |
3fd1239a KS |
513 | |
514 | if (init_unaccepted_memory()) { | |
515 | debug_putstr("Accepting memory... "); | |
5adfeaec | 516 | accept_memory(__pa(output), needed_size); |
3fd1239a KS |
517 | } |
518 | ||
83381519 | 519 | entry_offset = decompress_kernel(output, virt_addr, error); |
7734a0f3 AL |
520 | |
521 | debug_putstr("done.\nBooting the kernel (entry_offset: 0x"); | |
522 | debug_puthex(entry_offset); | |
523 | debug_putstr(").\n"); | |
597cfe48 | 524 | |
b099155e JR |
525 | /* Disable exception handling before booting the kernel */ |
526 | cleanup_exception_handling(); | |
597cfe48 | 527 | |
ac456ca0 NJ |
528 | if (spurious_nmi_count) { |
529 | error_putstr("Spurious early NMIs ignored: "); | |
530 | error_putdec(spurious_nmi_count); | |
531 | error_putstr("\n"); | |
532 | } | |
533 | ||
7734a0f3 | 534 | return output + entry_offset; |
1da177e4 | 535 | } |