mn10300: Clean up linker script using higher-level macros.
[linux-2.6-block.git] / arch / blackfin / kernel / vmlinux.lds.S
CommitLineData
1394f032
BW
1/*
2 * File: arch/blackfin/kernel/vmlinux.lds.S
3 * Based on: none - original work
4 * Author:
5 *
6 * Created: Tue Sep 21 2004
7 * Description: Master linker script for blackfin architecture
8 *
9 * Modified:
de6a9520 10 * Copyright 2004-2007 Analog Devices Inc.
1394f032
BW
11 *
12 * Bugs: Enter bugs at http://blackfin.uclinux.org/
13 *
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation; either version 2 of the License, or
17 * (at your option) any later version.
18 *
19 * This program is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU General Public License for more details.
23 *
24 * You should have received a copy of the GNU General Public License
25 * along with this program; if not, see the file COPYING, or write
26 * to the Free Software Foundation, Inc.,
27 * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
28 */
29
30#define VMLINUX_SYMBOL(_sym_) _##_sym_
31
32#include <asm-generic/vmlinux.lds.h>
33#include <asm/mem_map.h>
520473b0 34#include <asm/page.h>
0fa63ad7 35#include <asm/thread_info.h>
1394f032 36
1394f032
BW
37OUTPUT_FORMAT("elf32-bfin")
38ENTRY(__start)
39_jiffies = _jiffies_64;
40
1394f032
BW
41SECTIONS
42{
43 . = CONFIG_BOOT_LOAD;
b7627acc
MF
44 /* Neither the text, ro_data or bss section need to be aligned
45 * So pack them back to back
46 */
1394f032
BW
47 .text :
48 {
de6a9520
MF
49 __text = .;
50 _text = .;
51 __stext = .;
7664709b 52 TEXT_TEXT
b8d0c778 53#ifndef CONFIG_SCHEDULE_L1
1394f032 54 SCHED_TEXT
b8d0c778 55#endif
de6a9520 56 LOCK_TEXT
1ee76d7e 57 IRQENTRY_TEXT
27d875f2
MF
58 KPROBES_TEXT
59 *(.text.*)
de6a9520
MF
60 *(.fixup)
61
bc6e0fa1
MF
62#if !L1_CODE_LENGTH
63 *(.l1.text)
64#endif
65
1394f032 66 . = ALIGN(16);
de6a9520 67 ___start___ex_table = .;
1394f032 68 *(__ex_table)
de6a9520 69 ___stop___ex_table = .;
1394f032 70
1394f032 71 __etext = .;
de6a9520
MF
72 }
73
6f985294
BS
74 NOTES
75
b7627acc
MF
76 /* Just in case the first read only is a 32-bit access */
77 RO_DATA(4)
78
79 .bss :
80 {
81 . = ALIGN(4);
82 ___bss_start = .;
83 *(.bss .bss.*)
84 *(COMMON)
bc6e0fa1
MF
85#if !L1_DATA_A_LENGTH
86 *(.l1.bss)
87#endif
88#if !L1_DATA_B_LENGTH
89 *(.l1.bss.B)
90#endif
13752046 91 . = ALIGN(4);
b7627acc
MF
92 ___bss_stop = .;
93 }
de6a9520
MF
94
95 .data :
96 {
97 __sdata = .;
b7627acc 98 /* This gets done first, so the glob doesn't suck it in */
de6a9520
MF
99 . = ALIGN(32);
100 *(.data.cacheline_aligned)
101
b85b82d9
SZ
102#if !L1_DATA_A_LENGTH
103 . = ALIGN(32);
104 *(.data_l1.cacheline_aligned)
bc6e0fa1
MF
105 *(.l1.data)
106#endif
107#if !L1_DATA_B_LENGTH
108 *(.l1.data.B)
b85b82d9 109#endif
07aa7be5 110#if !L2_LENGTH
262c3825
SZ
111 . = ALIGN(32);
112 *(.data_l2.cacheline_aligned)
113 *(.l2.data)
114#endif
b85b82d9 115
27d875f2 116 DATA_DATA
27d875f2
MF
117 CONSTRUCTORS
118
b7627acc
MF
119 /* make sure the init_task is aligned to the
120 * kernel thread size so we can locate the kernel
121 * stack properly and quickly.
122 */
0fa63ad7 123 . = ALIGN(THREAD_SIZE);
b7627acc
MF
124 *(.init_task.data)
125
de6a9520
MF
126 __edata = .;
127 }
1394f032 128
b7627acc
MF
129 /* The init section should be last, so when we free it, it goes into
130 * the general memory pool, and (hopefully) will decrease fragmentation
131 * a tiny bit. The init section has a _requirement_ that it be
132 * PAGE_SIZE aligned
133 */
134 . = ALIGN(PAGE_SIZE);
de6a9520 135 ___init_begin = .;
27d875f2
MF
136
137 .init.text :
1394f032 138 {
0fa63ad7 139 . = ALIGN(PAGE_SIZE);
1394f032 140 __sinittext = .;
01ba2bdc 141 INIT_TEXT
1394f032 142 __einittext = .;
27d875f2
MF
143 }
144 .init.data :
145 {
146 . = ALIGN(16);
01ba2bdc 147 INIT_DATA
27d875f2
MF
148 }
149 .init.setup :
150 {
1394f032
BW
151 . = ALIGN(16);
152 ___setup_start = .;
153 *(.init.setup)
154 ___setup_end = .;
27d875f2
MF
155 }
156 .initcall.init :
157 {
1394f032
BW
158 ___initcall_start = .;
159 INITCALLS
160 ___initcall_end = .;
27d875f2
MF
161 }
162 .con_initcall.init :
163 {
1394f032
BW
164 ___con_initcall_start = .;
165 *(.con_initcall.init)
166 ___con_initcall_end = .;
27d875f2 167 }
46fa5eec 168 PERCPU(4)
27d875f2 169 SECURITY_INIT
70f12567
MF
170
171 /* we have to discard exit text and such at runtime, not link time, to
172 * handle embedded cross-section references (alt instructions, bug
173 * table, eh_frame, etc...)
174 */
175 .exit.text :
176 {
177 EXIT_TEXT
178 }
179 .exit.data :
180 {
181 EXIT_DATA
182 }
183
27d875f2
MF
184 .init.ramfs :
185 {
1394f032
BW
186 . = ALIGN(4);
187 ___initramfs_start = .;
188 *(.init.ramfs)
6f985294 189 . = ALIGN(4);
1394f032 190 ___initramfs_end = .;
de6a9520 191 }
1394f032 192
de6a9520 193 __l1_lma_start = .;
1394f032 194
27d875f2 195 .text_l1 L1_CODE_START : AT(LOADADDR(.init.ramfs) + SIZEOF(.init.ramfs))
1394f032
BW
196 {
197 . = ALIGN(4);
de6a9520 198 __stext_l1 = .;
bc6e0fa1 199 *(.l1.text)
b8d0c778
RG
200#ifdef CONFIG_SCHEDULE_L1
201 SCHED_TEXT
202#endif
1394f032 203 . = ALIGN(4);
de6a9520
MF
204 __etext_l1 = .;
205 }
4636b301 206 ASSERT (SIZEOF(.text_l1) <= L1_CODE_LENGTH, "L1 text overflow!")
1394f032 207
de6a9520 208 .data_l1 L1_DATA_A_START : AT(LOADADDR(.text_l1) + SIZEOF(.text_l1))
1394f032
BW
209 {
210 . = ALIGN(4);
de6a9520 211 __sdata_l1 = .;
bc6e0fa1 212 *(.l1.data)
de6a9520 213 __edata_l1 = .;
1394f032 214
1394f032 215 . = ALIGN(32);
bc6e0fa1 216 *(.data_l1.cacheline_aligned)
1394f032 217
262c3825
SZ
218 . = ALIGN(4);
219 __sbss_l1 = .;
220 *(.l1.bss)
1394f032 221 . = ALIGN(4);
de6a9520
MF
222 __ebss_l1 = .;
223 }
8d7ac69f 224 ASSERT (SIZEOF(.data_l1) <= L1_DATA_A_LENGTH, "L1 data A overflow!")
de6a9520
MF
225
226 .data_b_l1 L1_DATA_B_START : AT(LOADADDR(.data_l1) + SIZEOF(.data_l1))
1394f032
BW
227 {
228 . = ALIGN(4);
229 __sdata_b_l1 = .;
bc6e0fa1 230 *(.l1.data.B)
1394f032
BW
231 __edata_b_l1 = .;
232
233 . = ALIGN(4);
234 __sbss_b_l1 = .;
bc6e0fa1 235 *(.l1.bss.B)
1394f032
BW
236 . = ALIGN(4);
237 __ebss_b_l1 = .;
de6a9520 238 }
4636b301 239 ASSERT (SIZEOF(.data_b_l1) <= L1_DATA_B_LENGTH, "L1 data B overflow!")
1394f032 240
6f985294 241 __l2_lma_start = LOADADDR(.data_b_l1) + SIZEOF(.data_b_l1);
262c3825
SZ
242
243 .text_data_l2 L2_START : AT(LOADADDR(.data_b_l1) + SIZEOF(.data_b_l1))
244 {
245 . = ALIGN(4);
246 __stext_l2 = .;
07aa7be5 247 *(.l2.text)
262c3825
SZ
248 . = ALIGN(4);
249 __etext_l2 = .;
250
251 . = ALIGN(4);
252 __sdata_l2 = .;
07aa7be5 253 *(.l2.data)
262c3825
SZ
254 __edata_l2 = .;
255
256 . = ALIGN(32);
257 *(.data_l2.cacheline_aligned)
258
259 . = ALIGN(4);
260 __sbss_l2 = .;
07aa7be5 261 *(.l2.bss)
262c3825
SZ
262 . = ALIGN(4);
263 __ebss_l2 = .;
264 }
8d7ac69f 265 ASSERT (SIZEOF(.text_data_l2) <= L2_LENGTH, "L2 overflow!")
6f985294 266
36208059
MF
267 /* Force trailing alignment of our init section so that when we
268 * free our init memory, we don't leave behind a partial page.
269 */
6f985294 270 . = LOADADDR(.text_data_l2) + SIZEOF(.text_data_l2);
36208059
MF
271 . = ALIGN(PAGE_SIZE);
272 ___init_end = .;
273
b7627acc 274 __end =.;
de6a9520 275
c11b5776
MF
276 STABS_DEBUG
277
278 DWARF_DEBUG
279
023bf6f1 280 DISCARDS
1394f032 281}