x86/srso: Disable the mitigation on unaffected configurations
[linux-2.6-block.git] / arch / x86 / kernel / vmlinux.lds.S
CommitLineData
b2441318 1/* SPDX-License-Identifier: GPL-2.0 */
17ce265d
SR
2/*
3 * ld script for the x86 kernel
4 *
5 * Historic 32-bit version written by Martin Mares <mj@atrey.karlin.mff.cuni.cz>
6 *
91fd7fe8
IM
7 * Modernisation, unification and other changes and fixes:
8 * Copyright (C) 2007-2009 Sam Ravnborg <sam@ravnborg.org>
17ce265d
SR
9 *
10 *
11 * Don't define absolute symbols until and unless you know that symbol
12 * value is should remain constant even if kernel image is relocated
13 * at run time. Absolute symbols are not relocated. If symbol value should
14 * change if kernel is relocated, make the symbol section relative and
15 * put it inside the section definition.
16 */
17
18#ifdef CONFIG_X86_32
19#define LOAD_OFFSET __PAGE_OFFSET
20#else
21#define LOAD_OFFSET __START_KERNEL_map
22#endif
23
84d5f77f 24#define RUNTIME_DISCARD_EXIT
441110a5 25#define EMITS_PT_NOTE
f0d7ee17 26#define RO_EXCEPTION_TABLE_ALIGN 16
441110a5 27
17ce265d
SR
28#include <asm-generic/vmlinux.lds.h>
29#include <asm/asm-offsets.h>
30#include <asm/thread_info.h>
31#include <asm/page_types.h>
ee9f8fce 32#include <asm/orc_lookup.h>
17ce265d
SR
33#include <asm/cache.h>
34#include <asm/boot.h>
35
36#undef i386 /* in case the preprocessor is a 32bit one */
37
e6d7bc0b 38OUTPUT_FORMAT(CONFIG_OUTPUT_FORMAT)
17ce265d
SR
39
40#ifdef CONFIG_X86_32
41OUTPUT_ARCH(i386)
42ENTRY(phys_startup_32)
17ce265d
SR
43#else
44OUTPUT_ARCH(i386:x86-64)
45ENTRY(phys_startup_64)
17ce265d
SR
46#endif
47
d8ad6d39
BH
48jiffies = jiffies_64;
49
9ccaf77c 50#if defined(CONFIG_X86_64)
d6cc1c3a 51/*
9ccaf77c
KC
52 * On 64-bit, align RODATA to 2MB so we retain large page mappings for
53 * boundaries spanning kernel text, rodata and data sections.
d6cc1c3a
SS
54 *
55 * However, kernel identity mappings will have different RWX permissions
56 * to the pages mapping to text and to the pages padding (which are freed) the
57 * text section. Hence kernel identity mappings will be broken to smaller
58 * pages. For 64-bit, kernel text and kernel identity mappings are different,
9ccaf77c
KC
59 * so we can enable protection checks as well as retain 2MB large page
60 * mappings for kernel text.
d6cc1c3a 61 */
39d668e0 62#define X86_ALIGN_RODATA_BEGIN . = ALIGN(HPAGE_SIZE);
74e08179 63
39d668e0 64#define X86_ALIGN_RODATA_END \
74e08179 65 . = ALIGN(HPAGE_SIZE); \
39d668e0
JR
66 __end_rodata_hpage_align = .; \
67 __end_rodata_aligned = .;
74e08179 68
2f7412ba
TG
69#define ALIGN_ENTRY_TEXT_BEGIN . = ALIGN(PMD_SIZE);
70#define ALIGN_ENTRY_TEXT_END . = ALIGN(PMD_SIZE);
71
b3f0907c
BS
72/*
73 * This section contains data which will be mapped as decrypted. Memory
74 * encryption operates on a page basis. Make this section PMD-aligned
75 * to avoid splitting the pages while mapping the section early.
76 *
77 * Note: We use a separate section so that only this section gets
78 * decrypted to avoid exposing more than we wish.
79 */
80#define BSS_DECRYPTED \
81 . = ALIGN(PMD_SIZE); \
82 __start_bss_decrypted = .; \
83 *(.bss..decrypted); \
84 . = ALIGN(PAGE_SIZE); \
85 __start_bss_decrypted_unused = .; \
86 . = ALIGN(PMD_SIZE); \
87 __end_bss_decrypted = .; \
88
74e08179
SS
89#else
90
39d668e0
JR
91#define X86_ALIGN_RODATA_BEGIN
92#define X86_ALIGN_RODATA_END \
93 . = ALIGN(PAGE_SIZE); \
94 __end_rodata_aligned = .;
74e08179 95
2f7412ba
TG
96#define ALIGN_ENTRY_TEXT_BEGIN
97#define ALIGN_ENTRY_TEXT_END
b3f0907c 98#define BSS_DECRYPTED
2f7412ba 99
74e08179
SS
100#endif
101
afb8095a
SR
102PHDRS {
103 text PT_LOAD FLAGS(5); /* R_E */
5bd5a452 104 data PT_LOAD FLAGS(6); /* RW_ */
afb8095a 105#ifdef CONFIG_X86_64
afb8095a 106#ifdef CONFIG_SMP
8d0cc631 107 percpu PT_LOAD FLAGS(6); /* RW_ */
afb8095a 108#endif
c62e4320 109 init PT_LOAD FLAGS(7); /* RWE */
afb8095a
SR
110#endif
111 note PT_NOTE FLAGS(0); /* ___ */
112}
17ce265d 113
444e0ae4
SR
114SECTIONS
115{
116#ifdef CONFIG_X86_32
142b9e6c
AB
117 . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
118 phys_startup_32 = ABSOLUTE(startup_32 - LOAD_OFFSET);
444e0ae4 119#else
142b9e6c
AB
120 . = __START_KERNEL;
121 phys_startup_64 = ABSOLUTE(startup_64 - LOAD_OFFSET);
444e0ae4
SR
122#endif
123
dfc20895 124 /* Text and read-only data */
dfc20895 125 .text : AT(ADDR(.text) - LOAD_OFFSET) {
4ae59b91 126 _text = .;
e728f61c 127 _stext = .;
4ae59b91
TA
128 /* bootstrapping code */
129 HEAD_TEXT
dfc20895
SR
130 TEXT_TEXT
131 SCHED_TEXT
132 LOCK_TEXT
133 KPROBES_TEXT
be7635e7 134 SOFTIRQENTRY_TEXT
736e80a4
MH
135#ifdef CONFIG_RETPOLINE
136 __indirect_thunk_start = .;
fb3bd914
BPA
137 *(.text.__x86.indirect_thunk)
138 *(.text.__x86.return_thunk)
736e80a4
MH
139 __indirect_thunk_end = .;
140#endif
24a9c543
TG
141 STATIC_CALL_TEXT
142
143 ALIGN_ENTRY_TEXT_BEGIN
fb3bd914
BPA
144#ifdef CONFIG_CPU_SRSO
145 *(.text.__x86.rethunk_untrain)
146#endif
147
24a9c543 148 ENTRY_TEXT
fb3bd914
BPA
149
150#ifdef CONFIG_CPU_SRSO
151 /*
152 * See the comment above srso_untrain_ret_alias()'s
153 * definition.
154 */
155 . = srso_untrain_ret_alias | (1 << 2) | (1 << 8) | (1 << 14) | (1 << 20);
156 *(.text.__x86.rethunk_safe)
157#endif
24a9c543
TG
158 ALIGN_ENTRY_TEXT_END
159 *(.gnu.warning)
160
7705dc85 161 } :text =0xcccc
392bef70 162
b9076938
KC
163 /* End of text section, which should occupy whole number of pages */
164 _etext = .;
5bd5a452 165 . = ALIGN(PAGE_SIZE);
b9076938 166
39d668e0 167 X86_ALIGN_RODATA_BEGIN
c62e4320 168 RO_DATA(PAGE_SIZE)
39d668e0 169 X86_ALIGN_RODATA_END
448bc3ab 170
1f6397ba 171 /* Data */
1f6397ba 172 .data : AT(ADDR(.data) - LOAD_OFFSET) {
1260866a
CM
173 /* Start of data section */
174 _sdata = .;
c62e4320
JB
175
176 /* init_task */
177 INIT_TASK_DATA(THREAD_SIZE)
1f6397ba
SR
178
179#ifdef CONFIG_X86_32
c62e4320
JB
180 /* 32 bit has nosave before _edata */
181 NOSAVE_DATA
1f6397ba
SR
182#endif
183
c62e4320 184 PAGE_ALIGNED_DATA(PAGE_SIZE)
1f6397ba 185
350f8f56 186 CACHELINE_ALIGNED_DATA(L1_CACHE_BYTES)
1f6397ba 187
c62e4320
JB
188 DATA_DATA
189 CONSTRUCTORS
190
191 /* rarely changed data like cpu maps */
350f8f56 192 READ_MOSTLY_DATA(INTERNODE_CACHE_BYTES)
1f6397ba 193
1f6397ba
SR
194 /* End of data section */
195 _edata = .;
c62e4320 196 } :data
1f6397ba 197
b5effd38 198 BUG_TABLE
ff6f87e1 199
ee9f8fce
JP
200 ORC_UNWIND_TABLE
201
9c40818d
AL
202 . = ALIGN(PAGE_SIZE);
203 __vvar_page = .;
204
205 .vvar : AT(ADDR(.vvar) - LOAD_OFFSET) {
f670bb76
AL
206 /* work around gold bug 13023 */
207 __vvar_beginning_hack = .;
9c40818d 208
f670bb76 209 /* Place all vvars at the offsets in asm/vvar.h. */
64b302ab 210#define EMIT_VVAR(name, offset) \
f670bb76 211 . = __vvar_beginning_hack + offset; \
9c40818d 212 *(.vvar_ ## name)
9c40818d 213#include <asm/vvar.h>
9c40818d
AL
214#undef EMIT_VVAR
215
309944be
AL
216 /*
217 * Pad the rest of the page with zeros. Otherwise the loader
218 * can leave garbage here.
219 */
220 . = __vvar_beginning_hack + PAGE_SIZE;
9c40818d
AL
221 } :data
222
a06cc94f 223 . = ALIGN(__vvar_page + PAGE_SIZE, PAGE_SIZE);
9c40818d 224
c62e4320
JB
225 /* Init code and data - will be freed after init */
226 . = ALIGN(PAGE_SIZE);
227 .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
228 __init_begin = .; /* paired with __init_end */
e58bdaa8 229 }
e58bdaa8 230
c62e4320 231#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
e58bdaa8 232 /*
c62e4320
JB
233 * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
234 * output PHDR, so the next output section - .init.text - should
235 * start another segment - init.
e58bdaa8 236 */
19df0c2f 237 PERCPU_VADDR(INTERNODE_CACHE_BYTES, 0, :percpu)
97b67ae5
JB
238 ASSERT(SIZEOF(.data..percpu) < CONFIG_PHYSICAL_START,
239 "per-CPU data too large - increase CONFIG_PHYSICAL_START")
c62e4320 240#endif
e58bdaa8 241
123f3e1d 242 INIT_TEXT_SECTION(PAGE_SIZE)
c62e4320
JB
243#ifdef CONFIG_X86_64
244 :init
245#endif
e58bdaa8 246
337e4cc8
BP
247 /*
248 * Section for code used exclusively before alternatives are run. All
249 * references to such code must be patched out by alternatives, normally
250 * by using X86_FEATURE_ALWAYS CPU feature bit.
251 *
252 * See static_cpu_has() for an example.
253 */
254 .altinstr_aux : AT(ADDR(.altinstr_aux) - LOAD_OFFSET) {
255 *(.altinstr_aux)
256 }
257
123f3e1d 258 INIT_DATA_SECTION(16)
e58bdaa8
SR
259
260 .x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) {
261 __x86_cpu_dev_start = .;
262 *(.x86_cpu_dev.init)
263 __x86_cpu_dev_end = .;
264 }
265
66ac5013
DC
266#ifdef CONFIG_X86_INTEL_MID
267 .x86_intel_mid_dev.init : AT(ADDR(.x86_intel_mid_dev.init) - \
268 LOAD_OFFSET) {
269 __x86_intel_mid_dev_start = .;
270 *(.x86_intel_mid_dev.init)
271 __x86_intel_mid_dev_end = .;
272 }
273#endif
274
6f44d033
KRW
275 /*
276 * start address and size of operations which during runtime
277 * can be patched with virtualization friendly instructions or
278 * baremetal native ones. Think page table operations.
279 * Details in paravirt_types.h
280 */
ae618362
SR
281 . = ALIGN(8);
282 .parainstructions : AT(ADDR(.parainstructions) - LOAD_OFFSET) {
283 __parainstructions = .;
284 *(.parainstructions)
285 __parainstructions_end = .;
286 }
287
134ab5bd
PZ
288#ifdef CONFIG_RETPOLINE
289 /*
290 * List of instructions that call/jmp/jcc to retpoline thunks
291 * __x86_indirect_thunk_*(). These instructions can be patched along
292 * with alternatives, after which the section can be freed.
293 */
294 . = ALIGN(8);
295 .retpoline_sites : AT(ADDR(.retpoline_sites) - LOAD_OFFSET) {
296 __retpoline_sites = .;
297 *(.retpoline_sites)
298 __retpoline_sites_end = .;
299 }
15e67227
PZ
300
301 . = ALIGN(8);
302 .return_sites : AT(ADDR(.return_sites) - LOAD_OFFSET) {
303 __return_sites = .;
304 *(.return_sites)
305 __return_sites_end = .;
306 }
00abd384
PZ
307
308 . = ALIGN(8);
309 .call_sites : AT(ADDR(.call_sites) - LOAD_OFFSET) {
310 __call_sites = .;
311 *(.call_sites)
312 __call_sites_end = .;
313 }
134ab5bd
PZ
314#endif
315
89bc853e
PZ
316#ifdef CONFIG_X86_KERNEL_IBT
317 . = ALIGN(8);
318 .ibt_endbr_seal : AT(ADDR(.ibt_endbr_seal) - LOAD_OFFSET) {
319 __ibt_endbr_seal = .;
320 *(.ibt_endbr_seal)
321 __ibt_endbr_seal_end = .;
322 }
323#endif
324
931ab636
PZ
325#ifdef CONFIG_FINEIBT
326 . = ALIGN(8);
327 .cfi_sites : AT(ADDR(.cfi_sites) - LOAD_OFFSET) {
328 __cfi_sites = .;
329 *(.cfi_sites)
330 __cfi_sites_end = .;
331 }
332#endif
333
6f44d033
KRW
334 /*
335 * struct alt_inst entries. From the header (alternative.h):
336 * "Alternative instructions for different CPU types or capabilities"
337 * Think locking instructions on spinlocks.
338 */
ae618362
SR
339 . = ALIGN(8);
340 .altinstructions : AT(ADDR(.altinstructions) - LOAD_OFFSET) {
341 __alt_instructions = .;
342 *(.altinstructions)
343 __alt_instructions_end = .;
344 }
345
6f44d033
KRW
346 /*
347 * And here are the replacement instructions. The linker sticks
348 * them as binary blobs. The .altinstructions has enough data to
349 * get the address and the length of them to patch the kernel safely.
350 */
ae618362
SR
351 .altinstr_replacement : AT(ADDR(.altinstr_replacement) - LOAD_OFFSET) {
352 *(.altinstr_replacement)
353 }
354
107e0e0c
SS
355 . = ALIGN(8);
356 .apicdrivers : AT(ADDR(.apicdrivers) - LOAD_OFFSET) {
357 __apicdrivers = .;
358 *(.apicdrivers);
359 __apicdrivers_end = .;
360 }
361
7ac41ccf 362 . = ALIGN(8);
bf6a5741 363 /*
6f8f0dc9
AS
364 * .exit.text is discarded at runtime, not link time, to deal with
365 * references from .altinstructions
bf6a5741
SR
366 */
367 .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
368 EXIT_TEXT
369 }
370
371 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
372 EXIT_DATA
373 }
374
c62e4320 375#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
0415b00d 376 PERCPU_SECTION(INTERNODE_CACHE_BYTES)
9d16e783
SR
377#endif
378
379 . = ALIGN(PAGE_SIZE);
fd073194 380
9d16e783 381 /* freed after init ends here */
fd073194
IM
382 .init.end : AT(ADDR(.init.end) - LOAD_OFFSET) {
383 __init_end = .;
384 }
9d16e783 385
c62e4320
JB
386 /*
387 * smp_locks might be freed after init
388 * start/end must be page aligned
389 */
390 . = ALIGN(PAGE_SIZE);
391 .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
392 __smp_locks = .;
393 *(.smp_locks)
c62e4320 394 . = ALIGN(PAGE_SIZE);
596b711e 395 __smp_locks_end = .;
c62e4320
JB
396 }
397
9d16e783
SR
398#ifdef CONFIG_X86_64
399 .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
c62e4320
JB
400 NOSAVE_DATA
401 }
9d16e783
SR
402#endif
403
091e52c3
SR
404 /* BSS */
405 . = ALIGN(PAGE_SIZE);
406 .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
407 __bss_start = .;
7c74df07 408 *(.bss..page_aligned)
de2b41be 409 . = ALIGN(PAGE_SIZE);
6a03469a 410 *(BSS_MAIN)
b3f0907c 411 BSS_DECRYPTED
5bd5a452 412 . = ALIGN(PAGE_SIZE);
091e52c3
SR
413 __bss_stop = .;
414 }
9d16e783 415
c603a309
TL
416 /*
417 * The memory occupied from _text to here, __end_of_kernel_reserve, is
418 * automatically reserved in setup_arch(). Anything after here must be
419 * explicitly reserved using memblock_reserve() or it will be discarded
420 * and treated as available memory.
421 */
422 __end_of_kernel_reserve = .;
423
091e52c3 424 . = ALIGN(PAGE_SIZE);
7e09ac27 425 .brk : AT(ADDR(.brk) - LOAD_OFFSET) {
091e52c3
SR
426 __brk_base = .;
427 . += 64 * 1024; /* 64k alignment slop space */
e32683c6 428 *(.bss..brk) /* areas brk users have reserved */
091e52c3
SR
429 __brk_limit = .;
430 }
431
974f221c 432 . = ALIGN(PAGE_SIZE); /* keep VO_INIT_SIZE page aligned */
873b5271 433 _end = .;
091e52c3 434
e1bfa873
TL
435#ifdef CONFIG_AMD_MEM_ENCRYPT
436 /*
437 * Early scratch/workarea section: Lives outside of the kernel proper
438 * (_text - _end).
439 *
440 * Resides after _end because even though the .brk section is after
441 * __end_of_kernel_reserve, the .brk section is later reserved as a
442 * part of the kernel. Since it is located after __end_of_kernel_reserve
443 * it will be discarded and become part of the available memory. As
444 * such, it can only be used by very early boot code and must not be
445 * needed afterwards.
446 *
447 * Currently used by SME for performing in-place encryption of the
448 * kernel during boot. Resides on a 2MB boundary to simplify the
449 * pagetable setup used for SME in-place encryption.
450 */
451 . = ALIGN(HPAGE_SIZE);
452 .init.scratch : AT(ADDR(.init.scratch) - LOAD_OFFSET) {
453 __init_scratch_begin = .;
454 *(.init.scratch)
455 . = ALIGN(HPAGE_SIZE);
456 __init_scratch_end = .;
457 }
458#endif
459
a06cc94f
C
460 STABS_DEBUG
461 DWARF_DEBUG
c604abc3 462 ELF_DETAILS
023bf6f1 463
023bf6f1 464 DISCARDS
444e0ae4 465
815d6807
KC
466 /*
467 * Make sure that the .got.plt is either completely empty or it
468 * contains only the lazy dispatch entries.
469 */
470 .got.plt (INFO) : { *(.got.plt) }
471 ASSERT(SIZEOF(.got.plt) == 0 ||
472#ifdef CONFIG_X86_64
473 SIZEOF(.got.plt) == 0x18,
474#else
475 SIZEOF(.got.plt) == 0xc,
476#endif
477 "Unexpected GOT/PLT entries detected!")
5354e845
KC
478
479 /*
480 * Sections that should stay zero sized, which is safer to
481 * explicitly check instead of blindly discarding.
482 */
483 .got : {
484 *(.got) *(.igot.*)
485 }
486 ASSERT(SIZEOF(.got) == 0, "Unexpected GOT entries detected!")
487
488 .plt : {
489 *(.plt) *(.plt.*) *(.iplt)
490 }
491 ASSERT(SIZEOF(.plt) == 0, "Unexpected run-time procedure linkages detected!")
492
493 .rel.dyn : {
494 *(.rel.*) *(.rel_*)
495 }
496 ASSERT(SIZEOF(.rel.dyn) == 0, "Unexpected run-time relocations (.rel) detected!")
497
498 .rela.dyn : {
499 *(.rela.*) *(.rela_*)
500 }
501 ASSERT(SIZEOF(.rela.dyn) == 0, "Unexpected run-time relocations (.rela) detected!")
815d6807 502}
17ce265d 503
a5912f6b
IM
504/*
505 * The ASSERT() sink to . is intentional, for binutils 2.14 compatibility:
506 */
d2ba8b21
PA
507. = ASSERT((_end - LOAD_OFFSET <= KERNEL_IMAGE_SIZE),
508 "kernel image bigger than KERNEL_IMAGE_SIZE");
ea3186b9
AS
509
510#ifdef CONFIG_X86_64
17ce265d
SR
511/*
512 * Per-cpu symbols which need to be offset from __per_cpu_load
513 * for the boot processor.
514 */
d071ae09 515#define INIT_PER_CPU(x) init_per_cpu__##x = ABSOLUTE(x) + __per_cpu_load
17ce265d 516INIT_PER_CPU(gdt_page);
e6401c13
AL
517INIT_PER_CPU(fixed_percpu_data);
518INIT_PER_CPU(irq_stack_backing_store);
17ce265d 519
17ce265d 520#ifdef CONFIG_SMP
e6401c13
AL
521. = ASSERT((fixed_percpu_data == 0),
522 "fixed_percpu_data is not at start of per-cpu area");
17ce265d
SR
523#endif
524
f220125b 525#ifdef CONFIG_RETHUNK
fb3bd914
BPA
526. = ASSERT((__ret & 0x3f) == 0, "__ret not cacheline-aligned");
527. = ASSERT((srso_safe_ret & 0x3f) == 0, "srso_safe_ret not cacheline-aligned");
528#endif
529
530#ifdef CONFIG_CPU_SRSO
531/*
cbe8ded4
ND
532 * GNU ld cannot do XOR until 2.41.
533 * https://sourceware.org/git/?p=binutils-gdb.git;a=commit;h=f6f78318fca803c4907fb8d7f6ded8295f1947b1
534 *
535 * LLVM lld cannot do XOR until lld-17.
536 * https://github.com/llvm/llvm-project/commit/fae96104d4378166cbe5c875ef8ed808a356f3fb
537 *
538 * Instead do: (A | B) - (A & B) in order to compute the XOR
fb3bd914
BPA
539 * of the two function addresses:
540 */
cbe8ded4
ND
541. = ASSERT(((ABSOLUTE(srso_untrain_ret_alias) | srso_safe_ret_alias) -
542 (ABSOLUTE(srso_untrain_ret_alias) & srso_safe_ret_alias)) == ((1 << 2) | (1 << 8) | (1 << 14) | (1 << 20)),
fb3bd914 543 "SRSO function pair won't alias");
f220125b
BPA
544#endif
545
ea3186b9 546#endif /* CONFIG_X86_64 */