Merge branch 'linus' into perf/urgent, to synchronize with upstream
[linux-block.git] / arch / arm / kernel / vmlinux-xip.lds.S
CommitLineData
b2441318 1/* SPDX-License-Identifier: GPL-2.0 */
538bf469
CB
2/* ld script to make ARM Linux kernel
3 * taken from the i386 version by Russell King
4 * Written by Martin Mares <mj@atrey.karlin.mff.cuni.cz>
5 */
6
2a381106
RK
7/* No __ro_after_init data in the .rodata section - which will always be ro */
8#define RO_AFTER_INIT_DATA
9
21621830
VM
10#include <linux/sizes.h>
11
538bf469
CB
12#include <asm-generic/vmlinux.lds.h>
13#include <asm/cache.h>
14#include <asm/thread_info.h>
15#include <asm/memory.h>
046835b4 16#include <asm/mpu.h>
538bf469 17#include <asm/page.h>
538bf469 18
2f181e0b 19#include "vmlinux.lds.h"
538bf469
CB
20
21OUTPUT_ARCH(arm)
22ENTRY(stext)
23
24#ifndef __ARMEB__
25jiffies = jiffies_64;
26#else
27jiffies = jiffies_64 + 4;
28#endif
29
30SECTIONS
31{
32 /*
33 * XXX: The linker does not define how output sections are
34 * assigned to input sections when there are multiple statements
35 * matching the same input section name. There is no documented
36 * order of matching.
37 *
38 * unwind exit sections must be discarded before the rest of the
39 * unwind sections get included.
40 */
41 /DISCARD/ : {
ab42fad0 42 ARM_DISCARD
538bf469 43 *(.alt.smp.init)
ab42fad0 44 *(.pv_table)
538bf469
CB
45 }
46
47 . = XIP_VIRT_ADDR(CONFIG_XIP_PHYS_ADDR);
02afa9a8 48 _xiprom = .; /* XIP ROM area to be mapped */
538bf469
CB
49
50 .head.text : {
51 _text = .;
52 HEAD_TEXT
53 }
54
538bf469
CB
55 .text : { /* Real text segment */
56 _stext = .; /* Text and read-only data */
47b4c77d 57 ARM_TEXT
538bf469
CB
58 }
59
538bf469
CB
60 RO_DATA(PAGE_SIZE)
61
62 . = ALIGN(4);
63 __ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) {
64 __start___ex_table = .;
ab42fad0 65 ARM_MMU_KEEP(*(__ex_table))
538bf469
CB
66 __stop___ex_table = .;
67 }
68
69#ifdef CONFIG_ARM_UNWIND
d9a46e6e 70 ARM_UNWIND_SECTIONS
538bf469
CB
71#endif
72
538bf469
CB
73 _etext = .; /* End of text and rodata section */
74
91470958 75 ARM_VECTORS
538bf469
CB
76 INIT_TEXT_SECTION(8)
77 .exit.text : {
78 ARM_EXIT_KEEP(EXIT_TEXT)
79 }
80 .init.proc.info : {
81 ARM_CPU_DISCARD(PROC_INFO)
82 }
83 .init.arch.info : {
84 __arch_info_begin = .;
85 *(.arch.info.init)
86 __arch_info_end = .;
87 }
88 .init.tagtable : {
89 __tagtable_begin = .;
90 *(.taglist.init)
91 __tagtable_end = .;
92 }
0d302c71 93 .init.rodata : {
538bf469
CB
94 INIT_SETUP(16)
95 INIT_CALLS
96 CON_INITCALL
538bf469
CB
97 INIT_RAM_FS
98 }
99
21621830
VM
100#ifdef CONFIG_ARM_MPU
101 . = ALIGN(SZ_128K);
538bf469 102#endif
02afa9a8 103 _exiprom = .; /* End of XIP ROM area */
538bf469 104
0d302c71
NP
105/*
106 * From this point, stuff is considered writable and will be copied to RAM
107 */
108 __data_loc = ALIGN(4); /* location in file */
109 . = PAGE_OFFSET + TEXT_OFFSET; /* location in memory */
110#undef LOAD_OFFSET
111#define LOAD_OFFSET (PAGE_OFFSET + TEXT_OFFSET - __data_loc)
112
113 . = ALIGN(THREAD_SIZE);
114 _sdata = .;
c9174047 115 RW_DATA(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE)
0d302c71
NP
116 .data.ro_after_init : AT(ADDR(.data.ro_after_init) - LOAD_OFFSET) {
117 *(.data..ro_after_init)
118 }
119 _edata = .;
538bf469 120
0d302c71
NP
121 . = ALIGN(PAGE_SIZE);
122 __init_begin = .;
123 .init.data : AT(ADDR(.init.data) - LOAD_OFFSET) {
538bf469 124 INIT_DATA
0d302c71
NP
125 }
126 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
538bf469 127 ARM_EXIT_KEEP(EXIT_DATA)
538bf469 128 }
0d302c71
NP
129#ifdef CONFIG_SMP
130 PERCPU_SECTION(L1_CACHE_BYTES)
131#endif
132
b54290e5
NP
133#ifdef CONFIG_HAVE_TCM
134 ARM_TCM
135#endif
136
0d302c71
NP
137 /*
138 * End of copied data. We need a dummy section to get its LMA.
139 * Also located before final ALIGN() as trailing padding is not stored
140 * in the resulting binary file and useless to copy.
141 */
142 .data.endmark : AT(ADDR(.data.endmark) - LOAD_OFFSET) { }
143 _edata_loc = LOADADDR(.data.endmark);
538bf469 144
0d302c71
NP
145 . = ALIGN(PAGE_SIZE);
146 __init_end = .;
b5effd38 147
9520b1a1 148 BSS_SECTION(0, 0, 8)
046835b4
VM
149#ifdef CONFIG_ARM_MPU
150 . = ALIGN(PMSAv8_MINALIGN);
151#endif
538bf469
CB
152 _end = .;
153
154 STABS_DEBUG
155}
156
157/*
158 * These must never be empty
159 * If you have to comment these two assert statements out, your
160 * binutils is too old (for other reasons as well)
161 */
162ASSERT((__proc_info_end - __proc_info_begin), "missing CPU support")
163ASSERT((__arch_info_end - __arch_info_begin), "no machine record defined")
164
165/*
166 * The HYP init code can't be more than a page long,
167 * and should not cross a page boundary.
168 * The above comment applies as well.
169 */
170ASSERT(__hyp_idmap_text_end - (__hyp_idmap_text_start & PAGE_MASK) <= PAGE_SIZE,
171 "HYP init code too big or misaligned")
ca8b5d97
NP
172
173#ifdef CONFIG_XIP_DEFLATED_DATA
174/*
175 * The .bss is used as a stack area for __inflate_kernel_data() whose stack
176 * frame is 9568 bytes. Make sure it has extra room left.
177 */
178ASSERT((_end - __bss_start) >= 12288, ".bss too small for CONFIG_XIP_DEFLATED_DATA")
179#endif
21621830
VM
180
181#ifdef CONFIG_ARM_MPU
182/*
183 * Due to PMSAv7 restriction on base address and size we have to
184 * enforce minimal alignment restrictions. It was seen that weaker
185 * alignment restriction on _xiprom will likely force XIP address
186 * space spawns multiple MPU regions thus it is likely we run in
187 * situation when we are reprogramming MPU region we run on with
188 * something which doesn't cover reprogramming code itself, so as soon
189 * as we update MPU settings we'd immediately try to execute straight
190 * from background region which is XN.
191 * It seem that alignment in 1M should suit most users.
192 * _exiprom is aligned as 1/8 of 1M so can be covered by subregion
193 * disable
194 */
195ASSERT(!(_xiprom & (SZ_1M - 1)), "XIP start address may cause MPU programming issues")
196ASSERT(!(_exiprom & (SZ_128K - 1)), "XIP end address may cause MPU programming issues")
197#endif