Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
17ce265d SR |
2 | /* |
3 | * ld script for the x86 kernel | |
4 | * | |
5 | * Historic 32-bit version written by Martin Mares <mj@atrey.karlin.mff.cuni.cz> | |
6 | * | |
91fd7fe8 IM |
7 | * Modernisation, unification and other changes and fixes: |
8 | * Copyright (C) 2007-2009 Sam Ravnborg <sam@ravnborg.org> | |
17ce265d SR |
9 | * |
10 | * | |
11 | * Don't define absolute symbols until and unless you know that symbol | |
12 | * value is should remain constant even if kernel image is relocated | |
13 | * at run time. Absolute symbols are not relocated. If symbol value should | |
14 | * change if kernel is relocated, make the symbol section relative and | |
15 | * put it inside the section definition. | |
16 | */ | |
17 | ||
17ce265d | 18 | #define LOAD_OFFSET __START_KERNEL_map |
17ce265d | 19 | |
84d5f77f | 20 | #define RUNTIME_DISCARD_EXIT |
441110a5 | 21 | #define EMITS_PT_NOTE |
f0d7ee17 | 22 | #define RO_EXCEPTION_TABLE_ALIGN 16 |
441110a5 | 23 | |
17ce265d SR |
24 | #include <asm-generic/vmlinux.lds.h> |
25 | #include <asm/asm-offsets.h> | |
26 | #include <asm/thread_info.h> | |
27 | #include <asm/page_types.h> | |
ee9f8fce | 28 | #include <asm/orc_lookup.h> |
17ce265d SR |
29 | #include <asm/cache.h> |
30 | #include <asm/boot.h> | |
cb33ff9e | 31 | #include <asm/kexec.h> |
17ce265d SR |
32 | |
33 | #undef i386 /* in case the preprocessor is a 32bit one */ | |
34 | ||
e6d7bc0b | 35 | OUTPUT_FORMAT(CONFIG_OUTPUT_FORMAT) |
17ce265d SR |
36 | |
37 | #ifdef CONFIG_X86_32 | |
38 | OUTPUT_ARCH(i386) | |
39 | ENTRY(phys_startup_32) | |
17ce265d SR |
40 | #else |
41 | OUTPUT_ARCH(i386:x86-64) | |
42 | ENTRY(phys_startup_64) | |
17ce265d SR |
43 | #endif |
44 | ||
d8ad6d39 | 45 | jiffies = jiffies_64; |
a1e4cc01 | 46 | const_current_task = current_task; |
385f72c8 | 47 | const_cpu_current_top_of_stack = cpu_current_top_of_stack; |
d8ad6d39 | 48 | |
9ccaf77c | 49 | #if defined(CONFIG_X86_64) |
d6cc1c3a | 50 | /* |
9ccaf77c KC |
51 | * On 64-bit, align RODATA to 2MB so we retain large page mappings for |
52 | * boundaries spanning kernel text, rodata and data sections. | |
d6cc1c3a SS |
53 | * |
54 | * However, kernel identity mappings will have different RWX permissions | |
55 | * to the pages mapping to text and to the pages padding (which are freed) the | |
56 | * text section. Hence kernel identity mappings will be broken to smaller | |
57 | * pages. For 64-bit, kernel text and kernel identity mappings are different, | |
9ccaf77c KC |
58 | * so we can enable protection checks as well as retain 2MB large page |
59 | * mappings for kernel text. | |
d6cc1c3a | 60 | */ |
39d668e0 | 61 | #define X86_ALIGN_RODATA_BEGIN . = ALIGN(HPAGE_SIZE); |
74e08179 | 62 | |
39d668e0 | 63 | #define X86_ALIGN_RODATA_END \ |
74e08179 | 64 | . = ALIGN(HPAGE_SIZE); \ |
39d668e0 JR |
65 | __end_rodata_hpage_align = .; \ |
66 | __end_rodata_aligned = .; | |
74e08179 | 67 | |
2f7412ba TG |
68 | #define ALIGN_ENTRY_TEXT_BEGIN . = ALIGN(PMD_SIZE); |
69 | #define ALIGN_ENTRY_TEXT_END . = ALIGN(PMD_SIZE); | |
70 | ||
b3f0907c BS |
71 | /* |
72 | * This section contains data which will be mapped as decrypted. Memory | |
73 | * encryption operates on a page basis. Make this section PMD-aligned | |
74 | * to avoid splitting the pages while mapping the section early. | |
75 | * | |
76 | * Note: We use a separate section so that only this section gets | |
77 | * decrypted to avoid exposing more than we wish. | |
78 | */ | |
79 | #define BSS_DECRYPTED \ | |
80 | . = ALIGN(PMD_SIZE); \ | |
81 | __start_bss_decrypted = .; \ | |
419cbaf6 | 82 | __pi___start_bss_decrypted = .; \ |
b3f0907c BS |
83 | *(.bss..decrypted); \ |
84 | . = ALIGN(PAGE_SIZE); \ | |
85 | __start_bss_decrypted_unused = .; \ | |
86 | . = ALIGN(PMD_SIZE); \ | |
87 | __end_bss_decrypted = .; \ | |
419cbaf6 | 88 | __pi___end_bss_decrypted = .; \ |
b3f0907c | 89 | |
74e08179 SS |
90 | #else |
91 | ||
39d668e0 JR |
92 | #define X86_ALIGN_RODATA_BEGIN |
93 | #define X86_ALIGN_RODATA_END \ | |
94 | . = ALIGN(PAGE_SIZE); \ | |
95 | __end_rodata_aligned = .; | |
74e08179 | 96 | |
2f7412ba TG |
97 | #define ALIGN_ENTRY_TEXT_BEGIN |
98 | #define ALIGN_ENTRY_TEXT_END | |
b3f0907c | 99 | #define BSS_DECRYPTED |
2f7412ba | 100 | |
74e08179 | 101 | #endif |
cb33ff9e DW |
102 | #if defined(CONFIG_X86_64) && defined(CONFIG_KEXEC_CORE) |
103 | #define KEXEC_RELOCATE_KERNEL \ | |
104 | . = ALIGN(0x100); \ | |
105 | __relocate_kernel_start = .; \ | |
eeed9150 NC |
106 | *(.text..relocate_kernel); \ |
107 | *(.data..relocate_kernel); \ | |
cb33ff9e DW |
108 | __relocate_kernel_end = .; |
109 | ||
110 | ASSERT(__relocate_kernel_end - __relocate_kernel_start <= KEXEC_CONTROL_CODE_MAX_SIZE, | |
111 | "relocate_kernel code too large!") | |
112 | #else | |
113 | #define KEXEC_RELOCATE_KERNEL | |
114 | #endif | |
afb8095a SR |
115 | PHDRS { |
116 | text PT_LOAD FLAGS(5); /* R_E */ | |
5bd5a452 | 117 | data PT_LOAD FLAGS(6); /* RW_ */ |
afb8095a SR |
118 | note PT_NOTE FLAGS(0); /* ___ */ |
119 | } | |
17ce265d | 120 | |
444e0ae4 SR |
121 | SECTIONS |
122 | { | |
9b67ce2c | 123 | . = __START_KERNEL; |
444e0ae4 | 124 | #ifdef CONFIG_X86_32 |
142b9e6c | 125 | phys_startup_32 = ABSOLUTE(startup_32 - LOAD_OFFSET); |
444e0ae4 | 126 | #else |
142b9e6c | 127 | phys_startup_64 = ABSOLUTE(startup_64 - LOAD_OFFSET); |
444e0ae4 SR |
128 | #endif |
129 | ||
dfc20895 | 130 | /* Text and read-only data */ |
dfc20895 | 131 | .text : AT(ADDR(.text) - LOAD_OFFSET) { |
4ae59b91 | 132 | _text = .; |
419cbaf6 | 133 | __pi__text = .; |
e728f61c | 134 | _stext = .; |
24a9c543 | 135 | ALIGN_ENTRY_TEXT_BEGIN |
79cd2a11 | 136 | *(.text..__x86.rethunk_untrain) |
24a9c543 | 137 | ENTRY_TEXT |
fb3bd914 | 138 | |
a033eec9 | 139 | #ifdef CONFIG_MITIGATION_SRSO |
fb3bd914 | 140 | /* |
42be649d | 141 | * See the comment above srso_alias_untrain_ret()'s |
fb3bd914 BPA |
142 | * definition. |
143 | */ | |
42be649d | 144 | . = srso_alias_untrain_ret | (1 << 2) | (1 << 8) | (1 << 14) | (1 << 20); |
79cd2a11 | 145 | *(.text..__x86.rethunk_safe) |
fb3bd914 | 146 | #endif |
24a9c543 | 147 | ALIGN_ENTRY_TEXT_END |
35350eb6 | 148 | |
35350eb6 AB |
149 | TEXT_TEXT |
150 | SCHED_TEXT | |
151 | LOCK_TEXT | |
152 | KPROBES_TEXT | |
153 | SOFTIRQENTRY_TEXT | |
154 | #ifdef CONFIG_MITIGATION_RETPOLINE | |
155 | *(.text..__x86.indirect_thunk) | |
156 | *(.text..__x86.return_thunk) | |
157 | #endif | |
158 | STATIC_CALL_TEXT | |
24a9c543 TG |
159 | *(.gnu.warning) |
160 | ||
65e71089 | 161 | } :text = 0xcccccccc |
392bef70 | 162 | |
a6a4ae9c AB |
163 | /* bootstrapping code */ |
164 | .head.text : AT(ADDR(.head.text) - LOAD_OFFSET) { | |
165 | HEAD_TEXT | |
166 | } :text = 0xcccccccc | |
167 | ||
b9076938 KC |
168 | /* End of text section, which should occupy whole number of pages */ |
169 | _etext = .; | |
5bd5a452 | 170 | . = ALIGN(PAGE_SIZE); |
b9076938 | 171 | |
39d668e0 | 172 | X86_ALIGN_RODATA_BEGIN |
c62e4320 | 173 | RO_DATA(PAGE_SIZE) |
39d668e0 | 174 | X86_ALIGN_RODATA_END |
448bc3ab | 175 | |
1f6397ba | 176 | /* Data */ |
1f6397ba | 177 | .data : AT(ADDR(.data) - LOAD_OFFSET) { |
1260866a CM |
178 | /* Start of data section */ |
179 | _sdata = .; | |
c62e4320 JB |
180 | |
181 | /* init_task */ | |
182 | INIT_TASK_DATA(THREAD_SIZE) | |
1f6397ba | 183 | |
2cb16181 | 184 | /* equivalent to task_pt_regs(&init_task) */ |
8f69cba0 | 185 | __top_init_kernel_stack = __end_init_stack - TOP_OF_KERNEL_STACK_PADDING - PTREGS_SIZE; |
2cb16181 | 186 | |
1f6397ba | 187 | #ifdef CONFIG_X86_32 |
c62e4320 JB |
188 | /* 32 bit has nosave before _edata */ |
189 | NOSAVE_DATA | |
1f6397ba SR |
190 | #endif |
191 | ||
c62e4320 | 192 | PAGE_ALIGNED_DATA(PAGE_SIZE) |
1f6397ba | 193 | |
972f9cdf BG |
194 | CACHE_HOT_DATA(L1_CACHE_BYTES) |
195 | ||
350f8f56 | 196 | CACHELINE_ALIGNED_DATA(L1_CACHE_BYTES) |
1f6397ba | 197 | |
c62e4320 JB |
198 | DATA_DATA |
199 | CONSTRUCTORS | |
cb33ff9e | 200 | KEXEC_RELOCATE_KERNEL |
c62e4320 JB |
201 | |
202 | /* rarely changed data like cpu maps */ | |
350f8f56 | 203 | READ_MOSTLY_DATA(INTERNODE_CACHE_BYTES) |
1f6397ba | 204 | |
1f6397ba SR |
205 | /* End of data section */ |
206 | _edata = .; | |
c62e4320 | 207 | } :data |
1f6397ba | 208 | |
b5effd38 | 209 | BUG_TABLE |
ff6f87e1 | 210 | |
ee9f8fce JP |
211 | ORC_UNWIND_TABLE |
212 | ||
c62e4320 JB |
213 | /* Init code and data - will be freed after init */ |
214 | . = ALIGN(PAGE_SIZE); | |
215 | .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) { | |
216 | __init_begin = .; /* paired with __init_end */ | |
e58bdaa8 | 217 | } |
e58bdaa8 | 218 | |
123f3e1d | 219 | INIT_TEXT_SECTION(PAGE_SIZE) |
e58bdaa8 | 220 | |
337e4cc8 BP |
221 | /* |
222 | * Section for code used exclusively before alternatives are run. All | |
223 | * references to such code must be patched out by alternatives, normally | |
224 | * by using X86_FEATURE_ALWAYS CPU feature bit. | |
225 | * | |
226 | * See static_cpu_has() for an example. | |
227 | */ | |
228 | .altinstr_aux : AT(ADDR(.altinstr_aux) - LOAD_OFFSET) { | |
229 | *(.altinstr_aux) | |
230 | } | |
231 | ||
123f3e1d | 232 | INIT_DATA_SECTION(16) |
e58bdaa8 SR |
233 | |
234 | .x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) { | |
235 | __x86_cpu_dev_start = .; | |
236 | *(.x86_cpu_dev.init) | |
237 | __x86_cpu_dev_end = .; | |
238 | } | |
239 | ||
66ac5013 DC |
240 | #ifdef CONFIG_X86_INTEL_MID |
241 | .x86_intel_mid_dev.init : AT(ADDR(.x86_intel_mid_dev.init) - \ | |
242 | LOAD_OFFSET) { | |
243 | __x86_intel_mid_dev_start = .; | |
244 | *(.x86_intel_mid_dev.init) | |
245 | __x86_intel_mid_dev_end = .; | |
246 | } | |
247 | #endif | |
248 | ||
aefb2f2e | 249 | #ifdef CONFIG_MITIGATION_RETPOLINE |
134ab5bd PZ |
250 | /* |
251 | * List of instructions that call/jmp/jcc to retpoline thunks | |
252 | * __x86_indirect_thunk_*(). These instructions can be patched along | |
253 | * with alternatives, after which the section can be freed. | |
254 | */ | |
255 | . = ALIGN(8); | |
256 | .retpoline_sites : AT(ADDR(.retpoline_sites) - LOAD_OFFSET) { | |
257 | __retpoline_sites = .; | |
258 | *(.retpoline_sites) | |
259 | __retpoline_sites_end = .; | |
260 | } | |
15e67227 PZ |
261 | |
262 | . = ALIGN(8); | |
263 | .return_sites : AT(ADDR(.return_sites) - LOAD_OFFSET) { | |
264 | __return_sites = .; | |
265 | *(.return_sites) | |
266 | __return_sites_end = .; | |
267 | } | |
00abd384 PZ |
268 | |
269 | . = ALIGN(8); | |
270 | .call_sites : AT(ADDR(.call_sites) - LOAD_OFFSET) { | |
271 | __call_sites = .; | |
272 | *(.call_sites) | |
273 | __call_sites_end = .; | |
274 | } | |
134ab5bd PZ |
275 | #endif |
276 | ||
89bc853e PZ |
277 | #ifdef CONFIG_X86_KERNEL_IBT |
278 | . = ALIGN(8); | |
279 | .ibt_endbr_seal : AT(ADDR(.ibt_endbr_seal) - LOAD_OFFSET) { | |
280 | __ibt_endbr_seal = .; | |
281 | *(.ibt_endbr_seal) | |
282 | __ibt_endbr_seal_end = .; | |
283 | } | |
284 | #endif | |
285 | ||
931ab636 PZ |
286 | #ifdef CONFIG_FINEIBT |
287 | . = ALIGN(8); | |
288 | .cfi_sites : AT(ADDR(.cfi_sites) - LOAD_OFFSET) { | |
289 | __cfi_sites = .; | |
290 | *(.cfi_sites) | |
291 | __cfi_sites_end = .; | |
292 | } | |
293 | #endif | |
294 | ||
6f44d033 KRW |
295 | /* |
296 | * struct alt_inst entries. From the header (alternative.h): | |
297 | * "Alternative instructions for different CPU types or capabilities" | |
298 | * Think locking instructions on spinlocks. | |
299 | */ | |
ae618362 SR |
300 | . = ALIGN(8); |
301 | .altinstructions : AT(ADDR(.altinstructions) - LOAD_OFFSET) { | |
302 | __alt_instructions = .; | |
303 | *(.altinstructions) | |
304 | __alt_instructions_end = .; | |
305 | } | |
306 | ||
6f44d033 KRW |
307 | /* |
308 | * And here are the replacement instructions. The linker sticks | |
309 | * them as binary blobs. The .altinstructions has enough data to | |
310 | * get the address and the length of them to patch the kernel safely. | |
311 | */ | |
ae618362 SR |
312 | .altinstr_replacement : AT(ADDR(.altinstr_replacement) - LOAD_OFFSET) { |
313 | *(.altinstr_replacement) | |
314 | } | |
315 | ||
107e0e0c SS |
316 | . = ALIGN(8); |
317 | .apicdrivers : AT(ADDR(.apicdrivers) - LOAD_OFFSET) { | |
318 | __apicdrivers = .; | |
319 | *(.apicdrivers); | |
320 | __apicdrivers_end = .; | |
321 | } | |
322 | ||
7ac41ccf | 323 | . = ALIGN(8); |
bf6a5741 | 324 | /* |
6f8f0dc9 AS |
325 | * .exit.text is discarded at runtime, not link time, to deal with |
326 | * references from .altinstructions | |
bf6a5741 SR |
327 | */ |
328 | .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) { | |
329 | EXIT_TEXT | |
330 | } | |
331 | ||
332 | .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) { | |
333 | EXIT_DATA | |
334 | } | |
335 | ||
972f9cdf | 336 | PERCPU_SECTION(L1_CACHE_BYTES) |
6d536cad | 337 | ASSERT(__per_cpu_hot_end - __per_cpu_hot_start <= 64, "percpu cache hot data too large") |
9d16e783 | 338 | |
92a10d38 | 339 | RUNTIME_CONST_VARIABLES |
86e6b154 | 340 | RUNTIME_CONST(ptr, USER_PTR_MAX) |
e3c92e81 | 341 | |
9d16e783 | 342 | . = ALIGN(PAGE_SIZE); |
fd073194 | 343 | |
9d16e783 | 344 | /* freed after init ends here */ |
fd073194 IM |
345 | .init.end : AT(ADDR(.init.end) - LOAD_OFFSET) { |
346 | __init_end = .; | |
347 | } | |
9d16e783 | 348 | |
c62e4320 JB |
349 | /* |
350 | * smp_locks might be freed after init | |
351 | * start/end must be page aligned | |
352 | */ | |
353 | . = ALIGN(PAGE_SIZE); | |
354 | .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) { | |
355 | __smp_locks = .; | |
356 | *(.smp_locks) | |
c62e4320 | 357 | . = ALIGN(PAGE_SIZE); |
596b711e | 358 | __smp_locks_end = .; |
c62e4320 JB |
359 | } |
360 | ||
9d16e783 SR |
361 | #ifdef CONFIG_X86_64 |
362 | .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) { | |
c62e4320 JB |
363 | NOSAVE_DATA |
364 | } | |
9d16e783 SR |
365 | #endif |
366 | ||
091e52c3 SR |
367 | /* BSS */ |
368 | . = ALIGN(PAGE_SIZE); | |
369 | .bss : AT(ADDR(.bss) - LOAD_OFFSET) { | |
370 | __bss_start = .; | |
7c74df07 | 371 | *(.bss..page_aligned) |
de2b41be | 372 | . = ALIGN(PAGE_SIZE); |
6a03469a | 373 | *(BSS_MAIN) |
b3f0907c | 374 | BSS_DECRYPTED |
5bd5a452 | 375 | . = ALIGN(PAGE_SIZE); |
091e52c3 SR |
376 | __bss_stop = .; |
377 | } | |
9d16e783 | 378 | |
c603a309 TL |
379 | /* |
380 | * The memory occupied from _text to here, __end_of_kernel_reserve, is | |
381 | * automatically reserved in setup_arch(). Anything after here must be | |
382 | * explicitly reserved using memblock_reserve() or it will be discarded | |
383 | * and treated as available memory. | |
384 | */ | |
385 | __end_of_kernel_reserve = .; | |
386 | ||
091e52c3 | 387 | . = ALIGN(PAGE_SIZE); |
7e09ac27 | 388 | .brk : AT(ADDR(.brk) - LOAD_OFFSET) { |
091e52c3 SR |
389 | __brk_base = .; |
390 | . += 64 * 1024; /* 64k alignment slop space */ | |
e32683c6 | 391 | *(.bss..brk) /* areas brk users have reserved */ |
091e52c3 SR |
392 | __brk_limit = .; |
393 | } | |
394 | ||
974f221c | 395 | . = ALIGN(PAGE_SIZE); /* keep VO_INIT_SIZE page aligned */ |
873b5271 | 396 | _end = .; |
419cbaf6 | 397 | __pi__end = .; |
091e52c3 | 398 | |
e1bfa873 TL |
399 | #ifdef CONFIG_AMD_MEM_ENCRYPT |
400 | /* | |
401 | * Early scratch/workarea section: Lives outside of the kernel proper | |
402 | * (_text - _end). | |
403 | * | |
404 | * Resides after _end because even though the .brk section is after | |
405 | * __end_of_kernel_reserve, the .brk section is later reserved as a | |
406 | * part of the kernel. Since it is located after __end_of_kernel_reserve | |
407 | * it will be discarded and become part of the available memory. As | |
408 | * such, it can only be used by very early boot code and must not be | |
409 | * needed afterwards. | |
410 | * | |
411 | * Currently used by SME for performing in-place encryption of the | |
412 | * kernel during boot. Resides on a 2MB boundary to simplify the | |
413 | * pagetable setup used for SME in-place encryption. | |
414 | */ | |
415 | . = ALIGN(HPAGE_SIZE); | |
416 | .init.scratch : AT(ADDR(.init.scratch) - LOAD_OFFSET) { | |
417 | __init_scratch_begin = .; | |
418 | *(.init.scratch) | |
419 | . = ALIGN(HPAGE_SIZE); | |
420 | __init_scratch_end = .; | |
421 | } | |
422 | #endif | |
423 | ||
a06cc94f C |
424 | STABS_DEBUG |
425 | DWARF_DEBUG | |
d5dc9583 RX |
426 | #ifdef CONFIG_PROPELLER_CLANG |
427 | .llvm_bb_addr_map : { *(.llvm_bb_addr_map) } | |
428 | #endif | |
429 | ||
c604abc3 | 430 | ELF_DETAILS |
023bf6f1 | 431 | |
023bf6f1 | 432 | DISCARDS |
444e0ae4 | 433 | |
815d6807 KC |
434 | /* |
435 | * Make sure that the .got.plt is either completely empty or it | |
436 | * contains only the lazy dispatch entries. | |
437 | */ | |
438 | .got.plt (INFO) : { *(.got.plt) } | |
439 | ASSERT(SIZEOF(.got.plt) == 0 || | |
440 | #ifdef CONFIG_X86_64 | |
441 | SIZEOF(.got.plt) == 0x18, | |
442 | #else | |
443 | SIZEOF(.got.plt) == 0xc, | |
444 | #endif | |
445 | "Unexpected GOT/PLT entries detected!") | |
5354e845 KC |
446 | |
447 | /* | |
448 | * Sections that should stay zero sized, which is safer to | |
449 | * explicitly check instead of blindly discarding. | |
450 | */ | |
451 | .got : { | |
452 | *(.got) *(.igot.*) | |
453 | } | |
454 | ASSERT(SIZEOF(.got) == 0, "Unexpected GOT entries detected!") | |
455 | ||
456 | .plt : { | |
457 | *(.plt) *(.plt.*) *(.iplt) | |
458 | } | |
459 | ASSERT(SIZEOF(.plt) == 0, "Unexpected run-time procedure linkages detected!") | |
460 | ||
461 | .rel.dyn : { | |
462 | *(.rel.*) *(.rel_*) | |
463 | } | |
464 | ASSERT(SIZEOF(.rel.dyn) == 0, "Unexpected run-time relocations (.rel) detected!") | |
465 | ||
466 | .rela.dyn : { | |
467 | *(.rela.*) *(.rela_*) | |
468 | } | |
469 | ASSERT(SIZEOF(.rela.dyn) == 0, "Unexpected run-time relocations (.rela) detected!") | |
815d6807 | 470 | } |
17ce265d | 471 | |
a5912f6b | 472 | /* |
00a241f5 GR |
473 | * COMPILE_TEST kernels can be large - CONFIG_KASAN, for example, can cause |
474 | * this. Let's assume that nobody will be running a COMPILE_TEST kernel and | |
475 | * let's assert that fuller build coverage is more valuable than being able to | |
476 | * run a COMPILE_TEST kernel. | |
477 | */ | |
478 | #ifndef CONFIG_COMPILE_TEST | |
479 | /* | |
480 | * The ASSERT() sync to . is intentional, for binutils 2.14 compatibility: | |
a5912f6b | 481 | */ |
d2ba8b21 PA |
482 | . = ASSERT((_end - LOAD_OFFSET <= KERNEL_IMAGE_SIZE), |
483 | "kernel image bigger than KERNEL_IMAGE_SIZE"); | |
00a241f5 | 484 | #endif |
ea3186b9 | 485 | |
577c134d AB |
486 | /* needed for Clang - see arch/x86/entry/entry.S */ |
487 | PROVIDE(__ref_stack_chk_guard = __stack_chk_guard); | |
488 | ||
ea3186b9 | 489 | #ifdef CONFIG_X86_64 |
17ce265d | 490 | |
ac61d439 | 491 | #ifdef CONFIG_MITIGATION_UNRET_ENTRY |
d025b7ba | 492 | . = ASSERT((retbleed_return_thunk & 0x3f) == 0, "retbleed_return_thunk not cacheline-aligned"); |
fb3bd914 BPA |
493 | #endif |
494 | ||
a033eec9 | 495 | #ifdef CONFIG_MITIGATION_SRSO |
34a3cae7 | 496 | . = ASSERT((srso_safe_ret & 0x3f) == 0, "srso_safe_ret not cacheline-aligned"); |
fb3bd914 | 497 | /* |
cbe8ded4 ND |
498 | * GNU ld cannot do XOR until 2.41. |
499 | * https://sourceware.org/git/?p=binutils-gdb.git;a=commit;h=f6f78318fca803c4907fb8d7f6ded8295f1947b1 | |
500 | * | |
501 | * LLVM lld cannot do XOR until lld-17. | |
502 | * https://github.com/llvm/llvm-project/commit/fae96104d4378166cbe5c875ef8ed808a356f3fb | |
503 | * | |
504 | * Instead do: (A | B) - (A & B) in order to compute the XOR | |
fb3bd914 BPA |
505 | * of the two function addresses: |
506 | */ | |
42be649d PZ |
507 | . = ASSERT(((ABSOLUTE(srso_alias_untrain_ret) | srso_alias_safe_ret) - |
508 | (ABSOLUTE(srso_alias_untrain_ret) & srso_alias_safe_ret)) == ((1 << 2) | (1 << 8) | (1 << 14) | (1 << 20)), | |
fb3bd914 | 509 | "SRSO function pair won't alias"); |
f220125b BPA |
510 | #endif |
511 | ||
8754e67a PG |
512 | #if defined(CONFIG_MITIGATION_ITS) && !defined(CONFIG_DEBUG_FORCE_FUNCTION_ALIGN_64B) |
513 | . = ASSERT(__x86_indirect_its_thunk_rax & 0x20, "__x86_indirect_thunk_rax not in second half of cacheline"); | |
514 | . = ASSERT(((__x86_indirect_its_thunk_rcx - __x86_indirect_its_thunk_rax) % 64) == 0, "Indirect thunks are not cacheline apart"); | |
515 | . = ASSERT(__x86_indirect_its_thunk_array == __x86_indirect_its_thunk_rax, "Gap in ITS thunk array"); | |
516 | #endif | |
517 | ||
a75bf27f PG |
518 | #if defined(CONFIG_MITIGATION_ITS) && !defined(CONFIG_DEBUG_FORCE_FUNCTION_ALIGN_64B) |
519 | . = ASSERT(its_return_thunk & 0x20, "its_return_thunk not in second half of cacheline"); | |
520 | #endif | |
521 | ||
ea3186b9 | 522 | #endif /* CONFIG_X86_64 */ |
223abe96 AB |
523 | |
524 | /* | |
525 | * The symbols below are referenced using relative relocations in the | |
526 | * respective ELF notes. This produces build time constants that the | |
527 | * linker will never mark as relocatable. (Using just ABSOLUTE() is not | |
528 | * sufficient for that). | |
529 | */ | |
223abe96 AB |
530 | #ifdef CONFIG_XEN_PV |
531 | xen_elfnote_entry_value = | |
532 | ABSOLUTE(xen_elfnote_entry) + ABSOLUTE(startup_xen); | |
533 | #endif | |
223abe96 AB |
534 | #ifdef CONFIG_PVH |
535 | xen_elfnote_phys32_entry_value = | |
536 | ABSOLUTE(xen_elfnote_phys32_entry) + ABSOLUTE(pvh_start_xen - LOAD_OFFSET); | |
537 | #endif |