1 /* SPDX-License-Identifier: GPL-2.0 */
3 * ld script for the x86 kernel
5 * Historic 32-bit version written by Martin Mares <mj@atrey.karlin.mff.cuni.cz>
7 * Modernisation, unification and other changes and fixes:
8 * Copyright (C) 2007-2009 Sam Ravnborg <sam@ravnborg.org>
11 * Don't define absolute symbols until and unless you know that symbol
12 * value is should remain constant even if kernel image is relocated
13 * at run time. Absolute symbols are not relocated. If symbol value should
14 * change if kernel is relocated, make the symbol section relative and
15 * put it inside the section definition.
18 #define LOAD_OFFSET __START_KERNEL_map
20 #define RUNTIME_DISCARD_EXIT
22 #define RO_EXCEPTION_TABLE_ALIGN 16
24 #include <asm-generic/vmlinux.lds.h>
25 #include <asm/asm-offsets.h>
26 #include <asm/thread_info.h>
27 #include <asm/page_types.h>
28 #include <asm/orc_lookup.h>
29 #include <asm/cache.h>
31 #include <asm/kexec.h>
33 #undef i386 /* in case the preprocessor is a 32bit one */
35 OUTPUT_FORMAT(CONFIG_OUTPUT_FORMAT)
39 ENTRY(phys_startup_32)
41 OUTPUT_ARCH(i386:x86-64)
42 ENTRY(phys_startup_64)
46 const_current_task = current_task;
47 const_cpu_current_top_of_stack = cpu_current_top_of_stack;
49 #if defined(CONFIG_X86_64)
51 * On 64-bit, align RODATA to 2MB so we retain large page mappings for
52 * boundaries spanning kernel text, rodata and data sections.
54 * However, kernel identity mappings will have different RWX permissions
55 * to the pages mapping to text and to the pages padding (which are freed) the
56 * text section. Hence kernel identity mappings will be broken to smaller
57 * pages. For 64-bit, kernel text and kernel identity mappings are different,
58 * so we can enable protection checks as well as retain 2MB large page
59 * mappings for kernel text.
61 #define X86_ALIGN_RODATA_BEGIN . = ALIGN(HPAGE_SIZE);
63 #define X86_ALIGN_RODATA_END \
64 . = ALIGN(HPAGE_SIZE); \
65 __end_rodata_hpage_align = .; \
66 __end_rodata_aligned = .;
68 #define ALIGN_ENTRY_TEXT_BEGIN . = ALIGN(PMD_SIZE);
69 #define ALIGN_ENTRY_TEXT_END . = ALIGN(PMD_SIZE);
72 * This section contains data which will be mapped as decrypted. Memory
73 * encryption operates on a page basis. Make this section PMD-aligned
74 * to avoid splitting the pages while mapping the section early.
76 * Note: We use a separate section so that only this section gets
77 * decrypted to avoid exposing more than we wish.
79 #define BSS_DECRYPTED \
80 . = ALIGN(PMD_SIZE); \
81 __start_bss_decrypted = .; \
82 __pi___start_bss_decrypted = .; \
84 . = ALIGN(PAGE_SIZE); \
85 __start_bss_decrypted_unused = .; \
86 . = ALIGN(PMD_SIZE); \
87 __end_bss_decrypted = .; \
88 __pi___end_bss_decrypted = .; \
92 #define X86_ALIGN_RODATA_BEGIN
93 #define X86_ALIGN_RODATA_END \
94 . = ALIGN(PAGE_SIZE); \
95 __end_rodata_aligned = .;
97 #define ALIGN_ENTRY_TEXT_BEGIN
98 #define ALIGN_ENTRY_TEXT_END
102 #if defined(CONFIG_X86_64) && defined(CONFIG_KEXEC_CORE)
103 #define KEXEC_RELOCATE_KERNEL \
105 __relocate_kernel_start = .; \
106 *(.text..relocate_kernel); \
107 *(.data..relocate_kernel); \
108 __relocate_kernel_end = .;
110 ASSERT(__relocate_kernel_end - __relocate_kernel_start <= KEXEC_CONTROL_CODE_MAX_SIZE,
111 "relocate_kernel code too large!")
113 #define KEXEC_RELOCATE_KERNEL
116 text PT_LOAD FLAGS(5); /* R_E */
117 data PT_LOAD FLAGS(6); /* RW_ */
118 note PT_NOTE FLAGS(0); /* ___ */
125 phys_startup_32 = ABSOLUTE(startup_32 - LOAD_OFFSET);
127 phys_startup_64 = ABSOLUTE(startup_64 - LOAD_OFFSET);
130 /* Text and read-only data */
131 .text : AT(ADDR(.text) - LOAD_OFFSET) {
135 ALIGN_ENTRY_TEXT_BEGIN
136 *(.text..__x86.rethunk_untrain)
139 #ifdef CONFIG_MITIGATION_SRSO
141 * See the comment above srso_alias_untrain_ret()'s
144 . = srso_alias_untrain_ret | (1 << 2) | (1 << 8) | (1 << 14) | (1 << 20);
145 *(.text..__x86.rethunk_safe)
154 #ifdef CONFIG_MITIGATION_RETPOLINE
155 *(.text..__x86.indirect_thunk)
156 *(.text..__x86.return_thunk)
163 /* bootstrapping code */
164 .head.text : AT(ADDR(.head.text) - LOAD_OFFSET) {
168 /* End of text section, which should occupy whole number of pages */
170 . = ALIGN(PAGE_SIZE);
172 X86_ALIGN_RODATA_BEGIN
177 .data : AT(ADDR(.data) - LOAD_OFFSET) {
178 /* Start of data section */
182 INIT_TASK_DATA(THREAD_SIZE)
184 /* equivalent to task_pt_regs(&init_task) */
185 __top_init_kernel_stack = __end_init_stack - TOP_OF_KERNEL_STACK_PADDING - PTREGS_SIZE;
188 /* 32 bit has nosave before _edata */
192 PAGE_ALIGNED_DATA(PAGE_SIZE)
194 CACHE_HOT_DATA(L1_CACHE_BYTES)
196 CACHELINE_ALIGNED_DATA(L1_CACHE_BYTES)
200 KEXEC_RELOCATE_KERNEL
202 /* rarely changed data like cpu maps */
203 READ_MOSTLY_DATA(INTERNODE_CACHE_BYTES)
205 /* End of data section */
213 /* Init code and data - will be freed after init */
214 . = ALIGN(PAGE_SIZE);
215 .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
216 __init_begin = .; /* paired with __init_end */
219 INIT_TEXT_SECTION(PAGE_SIZE)
222 * Section for code used exclusively before alternatives are run. All
223 * references to such code must be patched out by alternatives, normally
224 * by using X86_FEATURE_ALWAYS CPU feature bit.
226 * See static_cpu_has() for an example.
228 .altinstr_aux : AT(ADDR(.altinstr_aux) - LOAD_OFFSET) {
232 INIT_DATA_SECTION(16)
234 .x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) {
235 __x86_cpu_dev_start = .;
237 __x86_cpu_dev_end = .;
240 #ifdef CONFIG_X86_INTEL_MID
241 .x86_intel_mid_dev.init : AT(ADDR(.x86_intel_mid_dev.init) - \
243 __x86_intel_mid_dev_start = .;
244 *(.x86_intel_mid_dev.init)
245 __x86_intel_mid_dev_end = .;
249 #ifdef CONFIG_MITIGATION_RETPOLINE
251 * List of instructions that call/jmp/jcc to retpoline thunks
252 * __x86_indirect_thunk_*(). These instructions can be patched along
253 * with alternatives, after which the section can be freed.
256 .retpoline_sites : AT(ADDR(.retpoline_sites) - LOAD_OFFSET) {
257 __retpoline_sites = .;
259 __retpoline_sites_end = .;
263 .return_sites : AT(ADDR(.return_sites) - LOAD_OFFSET) {
266 __return_sites_end = .;
270 .call_sites : AT(ADDR(.call_sites) - LOAD_OFFSET) {
273 __call_sites_end = .;
277 #ifdef CONFIG_X86_KERNEL_IBT
279 .ibt_endbr_seal : AT(ADDR(.ibt_endbr_seal) - LOAD_OFFSET) {
280 __ibt_endbr_seal = .;
282 __ibt_endbr_seal_end = .;
286 #ifdef CONFIG_FINEIBT
288 .cfi_sites : AT(ADDR(.cfi_sites) - LOAD_OFFSET) {
296 * struct alt_inst entries. From the header (alternative.h):
297 * "Alternative instructions for different CPU types or capabilities"
298 * Think locking instructions on spinlocks.
301 .altinstructions : AT(ADDR(.altinstructions) - LOAD_OFFSET) {
302 __alt_instructions = .;
304 __alt_instructions_end = .;
308 * And here are the replacement instructions. The linker sticks
309 * them as binary blobs. The .altinstructions has enough data to
310 * get the address and the length of them to patch the kernel safely.
312 .altinstr_replacement : AT(ADDR(.altinstr_replacement) - LOAD_OFFSET) {
313 *(.altinstr_replacement)
317 .apicdrivers : AT(ADDR(.apicdrivers) - LOAD_OFFSET) {
320 __apicdrivers_end = .;
325 * .exit.text is discarded at runtime, not link time, to deal with
326 * references from .altinstructions
328 .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
332 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
336 PERCPU_SECTION(L1_CACHE_BYTES)
337 ASSERT(__per_cpu_hot_end - __per_cpu_hot_start <= 64, "percpu cache hot data too large")
339 RUNTIME_CONST_VARIABLES
340 RUNTIME_CONST(ptr, USER_PTR_MAX)
342 . = ALIGN(PAGE_SIZE);
344 /* freed after init ends here */
345 .init.end : AT(ADDR(.init.end) - LOAD_OFFSET) {
350 * smp_locks might be freed after init
351 * start/end must be page aligned
353 . = ALIGN(PAGE_SIZE);
354 .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
357 . = ALIGN(PAGE_SIZE);
362 .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
368 . = ALIGN(PAGE_SIZE);
369 .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
371 *(.bss..page_aligned)
372 . = ALIGN(PAGE_SIZE);
375 . = ALIGN(PAGE_SIZE);
380 * The memory occupied from _text to here, __end_of_kernel_reserve, is
381 * automatically reserved in setup_arch(). Anything after here must be
382 * explicitly reserved using memblock_reserve() or it will be discarded
383 * and treated as available memory.
385 __end_of_kernel_reserve = .;
387 . = ALIGN(PAGE_SIZE);
388 .brk : AT(ADDR(.brk) - LOAD_OFFSET) {
390 . += 64 * 1024; /* 64k alignment slop space */
391 *(.bss..brk) /* areas brk users have reserved */
395 . = ALIGN(PAGE_SIZE); /* keep VO_INIT_SIZE page aligned */
399 #ifdef CONFIG_AMD_MEM_ENCRYPT
401 * Early scratch/workarea section: Lives outside of the kernel proper
404 * Resides after _end because even though the .brk section is after
405 * __end_of_kernel_reserve, the .brk section is later reserved as a
406 * part of the kernel. Since it is located after __end_of_kernel_reserve
407 * it will be discarded and become part of the available memory. As
408 * such, it can only be used by very early boot code and must not be
411 * Currently used by SME for performing in-place encryption of the
412 * kernel during boot. Resides on a 2MB boundary to simplify the
413 * pagetable setup used for SME in-place encryption.
415 . = ALIGN(HPAGE_SIZE);
416 .init.scratch : AT(ADDR(.init.scratch) - LOAD_OFFSET) {
417 __init_scratch_begin = .;
419 . = ALIGN(HPAGE_SIZE);
420 __init_scratch_end = .;
426 #ifdef CONFIG_PROPELLER_CLANG
427 .llvm_bb_addr_map : { *(.llvm_bb_addr_map) }
435 * Make sure that the .got.plt is either completely empty or it
436 * contains only the lazy dispatch entries.
438 .got.plt (INFO) : { *(.got.plt) }
439 ASSERT(SIZEOF(.got.plt) == 0 ||
441 SIZEOF(.got.plt) == 0x18,
443 SIZEOF(.got.plt) == 0xc,
445 "Unexpected GOT/PLT entries detected!")
448 * Sections that should stay zero sized, which is safer to
449 * explicitly check instead of blindly discarding.
454 ASSERT(SIZEOF(.got) == 0, "Unexpected GOT entries detected!")
457 *(.plt) *(.plt.*) *(.iplt)
459 ASSERT(SIZEOF(.plt) == 0, "Unexpected run-time procedure linkages detected!")
464 ASSERT(SIZEOF(.rel.dyn) == 0, "Unexpected run-time relocations (.rel) detected!")
467 *(.rela.*) *(.rela_*)
469 ASSERT(SIZEOF(.rela.dyn) == 0, "Unexpected run-time relocations (.rela) detected!")
473 * COMPILE_TEST kernels can be large - CONFIG_KASAN, for example, can cause
474 * this. Let's assume that nobody will be running a COMPILE_TEST kernel and
475 * let's assert that fuller build coverage is more valuable than being able to
476 * run a COMPILE_TEST kernel.
478 #ifndef CONFIG_COMPILE_TEST
480 * The ASSERT() sync to . is intentional, for binutils 2.14 compatibility:
482 . = ASSERT((_end - LOAD_OFFSET <= KERNEL_IMAGE_SIZE),
483 "kernel image bigger than KERNEL_IMAGE_SIZE");
486 /* needed for Clang - see arch/x86/entry/entry.S */
487 PROVIDE(__ref_stack_chk_guard = __stack_chk_guard);
491 #ifdef CONFIG_MITIGATION_UNRET_ENTRY
492 . = ASSERT((retbleed_return_thunk & 0x3f) == 0, "retbleed_return_thunk not cacheline-aligned");
495 #ifdef CONFIG_MITIGATION_SRSO
496 . = ASSERT((srso_safe_ret & 0x3f) == 0, "srso_safe_ret not cacheline-aligned");
498 * GNU ld cannot do XOR until 2.41.
499 * https://sourceware.org/git/?p=binutils-gdb.git;a=commit;h=f6f78318fca803c4907fb8d7f6ded8295f1947b1
501 * LLVM lld cannot do XOR until lld-17.
502 * https://github.com/llvm/llvm-project/commit/fae96104d4378166cbe5c875ef8ed808a356f3fb
504 * Instead do: (A | B) - (A & B) in order to compute the XOR
505 * of the two function addresses:
507 . = ASSERT(((ABSOLUTE(srso_alias_untrain_ret) | srso_alias_safe_ret) -
508 (ABSOLUTE(srso_alias_untrain_ret) & srso_alias_safe_ret)) == ((1 << 2) | (1 << 8) | (1 << 14) | (1 << 20)),
509 "SRSO function pair won't alias");
512 #if defined(CONFIG_MITIGATION_ITS) && !defined(CONFIG_DEBUG_FORCE_FUNCTION_ALIGN_64B)
513 . = ASSERT(__x86_indirect_its_thunk_rax & 0x20, "__x86_indirect_thunk_rax not in second half of cacheline");
514 . = ASSERT(((__x86_indirect_its_thunk_rcx - __x86_indirect_its_thunk_rax) % 64) == 0, "Indirect thunks are not cacheline apart");
515 . = ASSERT(__x86_indirect_its_thunk_array == __x86_indirect_its_thunk_rax, "Gap in ITS thunk array");
518 #if defined(CONFIG_MITIGATION_ITS) && !defined(CONFIG_DEBUG_FORCE_FUNCTION_ALIGN_64B)
519 . = ASSERT(its_return_thunk & 0x20, "its_return_thunk not in second half of cacheline");
522 #endif /* CONFIG_X86_64 */
525 * The symbols below are referenced using relative relocations in the
526 * respective ELF notes. This produces build time constants that the
527 * linker will never mark as relocatable. (Using just ABSOLUTE() is not
528 * sufficient for that).
531 xen_elfnote_entry_value =
532 ABSOLUTE(xen_elfnote_entry) + ABSOLUTE(startup_xen);
535 xen_elfnote_phys32_entry_value =
536 ABSOLUTE(xen_elfnote_phys32_entry) + ABSOLUTE(pvh_start_xen - LOAD_OFFSET);