[PATCH] add EXPORT_SYMBOL_GPL_FUTURE()
[linux-2.6-block.git] / include / asm-generic / vmlinux.lds.h
CommitLineData
1da177e4
LT
1#ifndef LOAD_OFFSET
2#define LOAD_OFFSET 0
3#endif
4
5#ifndef VMLINUX_SYMBOL
6#define VMLINUX_SYMBOL(_sym_) _sym_
7#endif
8
6d30e3a8
SR
9/* Align . to a 8 byte boundary equals to maximum function alignment. */
10#define ALIGN_FUNCTION() . = ALIGN(8)
11
1da177e4 12#define RODATA \
37b73c82
AV
13 . = ALIGN(4096); \
14 __start_rodata = .; \
1da177e4
LT
15 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
16 *(.rodata) *(.rodata.*) \
17 *(__vermagic) /* Kernel version magic */ \
18 } \
19 \
20 .rodata1 : AT(ADDR(.rodata1) - LOAD_OFFSET) { \
21 *(.rodata1) \
22 } \
23 \
24 /* PCI quirks */ \
25 .pci_fixup : AT(ADDR(.pci_fixup) - LOAD_OFFSET) { \
26 VMLINUX_SYMBOL(__start_pci_fixups_early) = .; \
27 *(.pci_fixup_early) \
28 VMLINUX_SYMBOL(__end_pci_fixups_early) = .; \
29 VMLINUX_SYMBOL(__start_pci_fixups_header) = .; \
30 *(.pci_fixup_header) \
31 VMLINUX_SYMBOL(__end_pci_fixups_header) = .; \
32 VMLINUX_SYMBOL(__start_pci_fixups_final) = .; \
33 *(.pci_fixup_final) \
34 VMLINUX_SYMBOL(__end_pci_fixups_final) = .; \
35 VMLINUX_SYMBOL(__start_pci_fixups_enable) = .; \
36 *(.pci_fixup_enable) \
37 VMLINUX_SYMBOL(__end_pci_fixups_enable) = .; \
38 } \
39 \
394b701c
MP
40 /* RapidIO route ops */ \
41 .rio_route : AT(ADDR(.rio_route) - LOAD_OFFSET) { \
42 VMLINUX_SYMBOL(__start_rio_route_ops) = .; \
43 *(.rio_route_ops) \
44 VMLINUX_SYMBOL(__end_rio_route_ops) = .; \
45 } \
46 \
1da177e4
LT
47 /* Kernel symbol table: Normal symbols */ \
48 __ksymtab : AT(ADDR(__ksymtab) - LOAD_OFFSET) { \
49 VMLINUX_SYMBOL(__start___ksymtab) = .; \
50 *(__ksymtab) \
51 VMLINUX_SYMBOL(__stop___ksymtab) = .; \
52 } \
53 \
54 /* Kernel symbol table: GPL-only symbols */ \
55 __ksymtab_gpl : AT(ADDR(__ksymtab_gpl) - LOAD_OFFSET) { \
56 VMLINUX_SYMBOL(__start___ksymtab_gpl) = .; \
57 *(__ksymtab_gpl) \
58 VMLINUX_SYMBOL(__stop___ksymtab_gpl) = .; \
59 } \
60 \
9f28bb7e
GKH
61 /* Kernel symbol table: GPL-future-only symbols */ \
62 __ksymtab_gpl_future : AT(ADDR(__ksymtab_gpl_future) - LOAD_OFFSET) { \
63 VMLINUX_SYMBOL(__start___ksymtab_gpl_future) = .; \
64 *(__ksymtab_gpl_future) \
65 VMLINUX_SYMBOL(__stop___ksymtab_gpl_future) = .; \
66 } \
67 \
1da177e4
LT
68 /* Kernel symbol table: Normal symbols */ \
69 __kcrctab : AT(ADDR(__kcrctab) - LOAD_OFFSET) { \
70 VMLINUX_SYMBOL(__start___kcrctab) = .; \
71 *(__kcrctab) \
72 VMLINUX_SYMBOL(__stop___kcrctab) = .; \
73 } \
74 \
75 /* Kernel symbol table: GPL-only symbols */ \
76 __kcrctab_gpl : AT(ADDR(__kcrctab_gpl) - LOAD_OFFSET) { \
77 VMLINUX_SYMBOL(__start___kcrctab_gpl) = .; \
78 *(__kcrctab_gpl) \
79 VMLINUX_SYMBOL(__stop___kcrctab_gpl) = .; \
80 } \
81 \
9f28bb7e
GKH
82 /* Kernel symbol table: GPL-future-only symbols */ \
83 __kcrctab_gpl_future : AT(ADDR(__kcrctab_gpl_future) - LOAD_OFFSET) { \
84 VMLINUX_SYMBOL(__start___kcrctab_gpl_future) = .; \
85 *(__kcrctab_gpl_future) \
86 VMLINUX_SYMBOL(__stop___kcrctab_gpl_future) = .; \
87 } \
88 \
1da177e4
LT
89 /* Kernel symbol table: strings */ \
90 __ksymtab_strings : AT(ADDR(__ksymtab_strings) - LOAD_OFFSET) { \
91 *(__ksymtab_strings) \
92 } \
37b73c82
AV
93 __end_rodata = .; \
94 . = ALIGN(4096); \
1da177e4
LT
95 \
96 /* Built-in module parameters. */ \
97 __param : AT(ADDR(__param) - LOAD_OFFSET) { \
98 VMLINUX_SYMBOL(__start___param) = .; \
99 *(__param) \
100 VMLINUX_SYMBOL(__stop___param) = .; \
101 }
102
103#define SECURITY_INIT \
60bad7fa 104 .security_initcall.init : AT(ADDR(.security_initcall.init) - LOAD_OFFSET) { \
1da177e4
LT
105 VMLINUX_SYMBOL(__security_initcall_start) = .; \
106 *(.security_initcall.init) \
107 VMLINUX_SYMBOL(__security_initcall_end) = .; \
108 }
109
6d30e3a8
SR
110/* sched.text is aling to function alignment to secure we have same
111 * address even at second ld pass when generating System.map */
1da177e4 112#define SCHED_TEXT \
6d30e3a8 113 ALIGN_FUNCTION(); \
1da177e4
LT
114 VMLINUX_SYMBOL(__sched_text_start) = .; \
115 *(.sched.text) \
116 VMLINUX_SYMBOL(__sched_text_end) = .;
117
6d30e3a8
SR
118/* spinlock.text is aling to function alignment to secure we have same
119 * address even at second ld pass when generating System.map */
1da177e4 120#define LOCK_TEXT \
6d30e3a8 121 ALIGN_FUNCTION(); \
1da177e4
LT
122 VMLINUX_SYMBOL(__lock_text_start) = .; \
123 *(.spinlock.text) \
124 VMLINUX_SYMBOL(__lock_text_end) = .;
d0aaff97
PP
125
126#define KPROBES_TEXT \
127 ALIGN_FUNCTION(); \
128 VMLINUX_SYMBOL(__kprobes_text_start) = .; \
129 *(.kprobes.text) \
130 VMLINUX_SYMBOL(__kprobes_text_end) = .;
a7d0c210
PBG
131
132 /* DWARF debug sections.
133 Symbols in the DWARF debugging sections are relative to
134 the beginning of the section so we begin them at 0. */
135#define DWARF_DEBUG \
136 /* DWARF 1 */ \
137 .debug 0 : { *(.debug) } \
138 .line 0 : { *(.line) } \
139 /* GNU DWARF 1 extensions */ \
140 .debug_srcinfo 0 : { *(.debug_srcinfo) } \
141 .debug_sfnames 0 : { *(.debug_sfnames) } \
142 /* DWARF 1.1 and DWARF 2 */ \
143 .debug_aranges 0 : { *(.debug_aranges) } \
144 .debug_pubnames 0 : { *(.debug_pubnames) } \
145 /* DWARF 2 */ \
146 .debug_info 0 : { *(.debug_info \
147 .gnu.linkonce.wi.*) } \
148 .debug_abbrev 0 : { *(.debug_abbrev) } \
149 .debug_line 0 : { *(.debug_line) } \
150 .debug_frame 0 : { *(.debug_frame) } \
151 .debug_str 0 : { *(.debug_str) } \
152 .debug_loc 0 : { *(.debug_loc) } \
153 .debug_macinfo 0 : { *(.debug_macinfo) } \
154 /* SGI/MIPS DWARF 2 extensions */ \
155 .debug_weaknames 0 : { *(.debug_weaknames) } \
156 .debug_funcnames 0 : { *(.debug_funcnames) } \
157 .debug_typenames 0 : { *(.debug_typenames) } \
158 .debug_varnames 0 : { *(.debug_varnames) } \
159
160 /* Stabs debugging sections. */
161#define STABS_DEBUG \
162 .stab 0 : { *(.stab) } \
163 .stabstr 0 : { *(.stabstr) } \
164 .stab.excl 0 : { *(.stab.excl) } \
165 .stab.exclstr 0 : { *(.stab.exclstr) } \
166 .stab.index 0 : { *(.stab.index) } \
167 .stab.indexstr 0 : { *(.stab.indexstr) } \
168 .comment 0 : { *(.comment) }