1 /* SPDX-License-Identifier: GPL-2.0 */
2 #define BSS_FIRST_SECTIONS *(.bss.prominit)
4 #define RO_EXCEPTION_TABLE_ALIGN 0
5 #define RUNTIME_DISCARD_EXIT
7 #define SOFT_MASK_TABLE(align) \
9 __soft_mask_table : AT(ADDR(__soft_mask_table) - LOAD_OFFSET) { \
10 __start___soft_mask_table = .; \
11 KEEP(*(__soft_mask_table)) \
12 __stop___soft_mask_table = .; \
15 #define RESTART_TABLE(align) \
17 __restart_table : AT(ADDR(__restart_table) - LOAD_OFFSET) { \
18 __start___restart_table = .; \
19 KEEP(*(__restart_table)) \
20 __stop___restart_table = .; \
24 #include <asm-generic/vmlinux.lds.h>
25 #include <asm/cache.h>
26 #include <asm/thread_info.h>
28 #define STRICT_ALIGN_SIZE (1 << CONFIG_DATA_SHIFT)
30 #if STRICT_ALIGN_SIZE < PAGE_SIZE
31 #error "CONFIG_DATA_SHIFT must be >= PAGE_SHIFT"
37 text PT_LOAD FLAGS(7); /* RWX */
38 note PT_NOTE FLAGS(0);
42 OUTPUT_ARCH(powerpc:common64)
45 OUTPUT_ARCH(powerpc:common)
46 jiffies = jiffies_64 + 4;
53 * Text, read only data and other permanent read-only sections
61 * This needs to be in its own output section to avoid ld placing
62 * branch trampoline stubs randomly throughout the fixed sections,
63 * which it will do (even if the branch comes from another section)
64 * in order to optimize stub generation.
66 .head.text : AT(ADDR(.head.text) - LOAD_OFFSET) {
68 KEEP(*(.head.text.first_256B));
69 #ifdef CONFIG_PPC_BOOK3E_64
71 KEEP(*(.head.text.real_vectors));
72 *(.head.text.real_trampolines);
73 KEEP(*(.head.text.virt_vectors));
74 *(.head.text.virt_trampolines);
75 # if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV)
76 KEEP(*(.head.data.fwnmi_page));
79 #else /* !CONFIG_PPC64 */
88 * ALIGN(0) overrides the default output section alignment because
89 * this needs to start right after .head.text in order for fixed
90 * section placement to work.
92 .text ALIGN(0) : AT(ADDR(.text) - LOAD_OFFSET) {
93 #ifdef CONFIG_LD_HEAD_STUB_CATCH
94 KEEP(*(.linker_stub_catch));
99 .text : AT(ADDR(.text) - LOAD_OFFSET) {
102 /* careful! __ftr_alt_* sections need to be close to .text */
103 *(.text.hot .text.hot.* TEXT_MAIN .text.fixup .text.unlikely .text.unlikely.* .fixup __ftr_alt_* .ref.text);
104 *(.tramp.ftrace.text);
112 * -Os builds call FP save/restore functions. The powerpc64
113 * linker generates those on demand in the .sfpr section.
114 * .sfpr gets placed at the beginning of a group of input
115 * sections, which can break start-of-text offset if it is
116 * included with the main text sections, so put it by itself.
119 *(.text.asan.* .text.tsan.*)
122 . = ALIGN(PAGE_SIZE);
129 .sdata2 : AT(ADDR(.sdata2) - LOAD_OFFSET) {
134 .data.rel.ro : AT(ADDR(.data.rel.ro) - LOAD_OFFSET) {
135 *(.data.rel.ro .data.rel.ro.*)
138 .branch_lt : AT(ADDR(.branch_lt) - LOAD_OFFSET) {
143 .got1 : AT(ADDR(.got1) - LOAD_OFFSET) {
146 .got2 : AT(ADDR(.got2) - LOAD_OFFSET) {
151 .got : AT(ADDR(.got) - LOAD_OFFSET) {
155 .plt : AT(ADDR(.plt) - LOAD_OFFSET) {
156 /* XXX: is .plt (and .got.plt) required? */
160 #else /* CONFIG_PPC32 */
161 #ifndef CONFIG_PPC_KERNEL_PCREL
162 .toc1 : AT(ADDR(.toc1) - LOAD_OFFSET) {
167 .got : AT(ADDR(.got) - LOAD_OFFSET) ALIGN(256) {
168 #ifdef CONFIG_PPC_KERNEL_PCREL
178 #ifdef CONFIG_PPC64_ELF_ABI_V1
179 .opd : AT(ADDR(.opd) - LOAD_OFFSET) {
187 __stf_entry_barrier_fixup : AT(ADDR(__stf_entry_barrier_fixup) - LOAD_OFFSET) {
188 __start___stf_entry_barrier_fixup = .;
189 *(__stf_entry_barrier_fixup)
190 __stop___stf_entry_barrier_fixup = .;
194 __uaccess_flush_fixup : AT(ADDR(__uaccess_flush_fixup) - LOAD_OFFSET) {
195 __start___uaccess_flush_fixup = .;
196 *(__uaccess_flush_fixup)
197 __stop___uaccess_flush_fixup = .;
201 __entry_flush_fixup : AT(ADDR(__entry_flush_fixup) - LOAD_OFFSET) {
202 __start___entry_flush_fixup = .;
203 *(__entry_flush_fixup)
204 __stop___entry_flush_fixup = .;
208 __scv_entry_flush_fixup : AT(ADDR(__scv_entry_flush_fixup) - LOAD_OFFSET) {
209 __start___scv_entry_flush_fixup = .;
210 *(__scv_entry_flush_fixup)
211 __stop___scv_entry_flush_fixup = .;
215 __stf_exit_barrier_fixup : AT(ADDR(__stf_exit_barrier_fixup) - LOAD_OFFSET) {
216 __start___stf_exit_barrier_fixup = .;
217 *(__stf_exit_barrier_fixup)
218 __stop___stf_exit_barrier_fixup = .;
222 __rfi_flush_fixup : AT(ADDR(__rfi_flush_fixup) - LOAD_OFFSET) {
223 __start___rfi_flush_fixup = .;
225 __stop___rfi_flush_fixup = .;
227 #endif /* CONFIG_PPC32 */
229 #ifdef CONFIG_PPC_BARRIER_NOSPEC
231 __spec_barrier_fixup : AT(ADDR(__spec_barrier_fixup) - LOAD_OFFSET) {
232 __start___barrier_nospec_fixup = .;
233 *(__barrier_nospec_fixup)
234 __stop___barrier_nospec_fixup = .;
236 #endif /* CONFIG_PPC_BARRIER_NOSPEC */
238 #ifdef CONFIG_PPC_E500
240 __spec_btb_flush_fixup : AT(ADDR(__spec_btb_flush_fixup) - LOAD_OFFSET) {
241 __start__btb_flush_fixup = .;
243 __stop__btb_flush_fixup = .;
248 * Various code relies on __init_begin being at the strict RWX boundary.
250 . = ALIGN(STRICT_ALIGN_SIZE);
256 * Init sections discarded at runtime
258 .init.text : AT(ADDR(.init.text) - LOAD_OFFSET) {
261 *(.tramp.ftrace.init);
263 *.init.text might be RO so we must ensure this section ends on
266 . = ALIGN(PAGE_SIZE);
270 /* .exit.text is discarded at runtime, not link time,
271 * to deal with references from __bug_table
273 .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
274 __exittext_begin = .;
279 . = ALIGN(PAGE_SIZE);
281 INIT_DATA_SECTION(16)
284 __ftr_fixup : AT(ADDR(__ftr_fixup) - LOAD_OFFSET) {
285 __start___ftr_fixup = .;
287 __stop___ftr_fixup = .;
290 __mmu_ftr_fixup : AT(ADDR(__mmu_ftr_fixup) - LOAD_OFFSET) {
291 __start___mmu_ftr_fixup = .;
292 KEEP(*(__mmu_ftr_fixup))
293 __stop___mmu_ftr_fixup = .;
296 __lwsync_fixup : AT(ADDR(__lwsync_fixup) - LOAD_OFFSET) {
297 __start___lwsync_fixup = .;
298 KEEP(*(__lwsync_fixup))
299 __stop___lwsync_fixup = .;
303 __fw_ftr_fixup : AT(ADDR(__fw_ftr_fixup) - LOAD_OFFSET) {
304 __start___fw_ftr_fixup = .;
305 KEEP(*(__fw_ftr_fixup))
306 __stop___fw_ftr_fixup = .;
310 PERCPU_SECTION(L1_CACHE_BYTES)
313 .machine.desc : AT(ADDR(.machine.desc) - LOAD_OFFSET) {
314 __machine_desc_start = . ;
315 KEEP(*(.machine.desc))
316 __machine_desc_end = . ;
318 #ifdef CONFIG_RELOCATABLE
320 .dynsym : AT(ADDR(.dynsym) - LOAD_OFFSET)
322 __dynamic_symtab = .;
325 .dynstr : AT(ADDR(.dynstr) - LOAD_OFFSET) { *(.dynstr) }
326 .dynamic : AT(ADDR(.dynamic) - LOAD_OFFSET)
331 .hash : AT(ADDR(.hash) - LOAD_OFFSET) { *(.hash) }
332 .gnu.hash : AT(ADDR(.gnu.hash) - LOAD_OFFSET) { *(.gnu.hash) }
333 .interp : AT(ADDR(.interp) - LOAD_OFFSET) { *(.interp) }
334 .rela.dyn : AT(ADDR(.rela.dyn) - LOAD_OFFSET)
336 __rela_dyn_start = .;
340 /* .exit.data is discarded at runtime, not link time,
341 * to deal with references from .exit.text
343 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
347 /* freed after init ends here */
348 . = ALIGN(PAGE_SIZE);
352 * And now the various read/write data
355 . = ALIGN(PAGE_SIZE);
358 .data : AT(ADDR(.data) - LOAD_OFFSET) {
366 /* The initial task and kernel stack */
367 INIT_TASK_DATA_SECTION(THREAD_ALIGN)
369 .data..page_aligned : AT(ADDR(.data..page_aligned) - LOAD_OFFSET) {
370 PAGE_ALIGNED_DATA(PAGE_SIZE)
373 .data..cacheline_aligned : AT(ADDR(.data..cacheline_aligned) - LOAD_OFFSET) {
374 CACHELINE_ALIGNED_DATA(L1_CACHE_BYTES)
377 .data..read_mostly : AT(ADDR(.data..read_mostly) - LOAD_OFFSET) {
378 READ_MOSTLY_DATA(L1_CACHE_BYTES)
381 . = ALIGN(PAGE_SIZE);
382 .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
388 . = ALIGN(PAGE_SIZE);
392 * And finally the bss
397 . = ALIGN(PAGE_SIZE);
410 #ifndef CONFIG_RELOCATABLE