video: Kill off leaked CONFIG_FB_SH7343VOU reference.
[linux-2.6-block.git] / arch / sh / kernel / head_64.S
CommitLineData
1da177e4 1/*
a23ba435 2 * arch/sh/kernel/head_64.S
1da177e4
LT
3 *
4 * Copyright (C) 2000, 2001 Paolo Alberelli
5 * Copyright (C) 2003, 2004 Paul Mundt
6 *
a23ba435
PM
7 * This file is subject to the terms and conditions of the GNU General Public
8 * License. See the file "COPYING" in the main directory of this archive
9 * for more details.
1da177e4 10 */
1da177e4 11#include <asm/page.h>
1da177e4
LT
12#include <asm/cache.h>
13#include <asm/tlb.h>
959f7d58
PM
14#include <asm/cpu/registers.h>
15#include <asm/cpu/mmu_context.h>
1da177e4
LT
16#include <asm/thread_info.h>
17
18/*
19 * MMU defines: TLB boundaries.
20 */
21
22#define MMUIR_FIRST ITLB_FIXED
23#define MMUIR_END ITLB_LAST_VAR_UNRESTRICTED+TLB_STEP
24#define MMUIR_STEP TLB_STEP
25
26#define MMUDR_FIRST DTLB_FIXED
27#define MMUDR_END DTLB_LAST_VAR_UNRESTRICTED+TLB_STEP
28#define MMUDR_STEP TLB_STEP
29
36763b22
PM
30/* Safety check : CONFIG_PAGE_OFFSET has to be a multiple of 512Mb */
31#if (CONFIG_PAGE_OFFSET & ((1UL<<29)-1))
32#error "CONFIG_PAGE_OFFSET must be a multiple of 512Mb"
1da177e4
LT
33#endif
34
35/*
36 * MMU defines: Fixed TLBs.
37 */
38/* Deal safely with the case where the base of RAM is not 512Mb aligned */
39
40#define ALIGN_512M_MASK (0xffffffffe0000000)
36763b22 41#define ALIGNED_EFFECTIVE ((CONFIG_PAGE_OFFSET + CONFIG_MEMORY_START) & ALIGN_512M_MASK)
1da177e4
LT
42#define ALIGNED_PHYSICAL (CONFIG_MEMORY_START & ALIGN_512M_MASK)
43
44#define MMUIR_TEXT_H (0x0000000000000003 | ALIGNED_EFFECTIVE)
45 /* Enabled, Shared, ASID 0, Eff. Add. 0xA0000000 */
46
47#define MMUIR_TEXT_L (0x000000000000009a | ALIGNED_PHYSICAL)
48 /* 512 Mb, Cacheable, Write-back, execute, Not User, Ph. Add. */
49
50#define MMUDR_CACHED_H 0x0000000000000003 | ALIGNED_EFFECTIVE
51 /* Enabled, Shared, ASID 0, Eff. Add. 0xA0000000 */
52#define MMUDR_CACHED_L 0x000000000000015a | ALIGNED_PHYSICAL
53 /* 512 Mb, Cacheable, Write-back, read/write, Not User, Ph. Add. */
54
c96bcf95 55#ifdef CONFIG_CACHE_OFF
1da177e4
LT
56#define ICCR0_INIT_VAL ICCR0_OFF /* ICACHE off */
57#else
58#define ICCR0_INIT_VAL ICCR0_ON | ICCR0_ICI /* ICE + ICI */
59#endif
60#define ICCR1_INIT_VAL ICCR1_NOLOCK /* No locking */
61
c96bcf95 62#if defined (CONFIG_CACHE_OFF)
1da177e4 63#define OCCR0_INIT_VAL OCCR0_OFF /* D-cache: off */
c96bcf95 64#elif defined (CONFIG_CACHE_WRITETHROUGH)
1da177e4
LT
65#define OCCR0_INIT_VAL OCCR0_ON | OCCR0_OCI | OCCR0_WT /* D-cache: on, */
66 /* WT, invalidate */
c96bcf95 67#elif defined (CONFIG_CACHE_WRITEBACK)
1da177e4
LT
68#define OCCR0_INIT_VAL OCCR0_ON | OCCR0_OCI | OCCR0_WB /* D-cache: on, */
69 /* WB, invalidate */
70#else
c96bcf95 71#error preprocessor flag CONFIG_CACHE_... not recognized!
1da177e4
LT
72#endif
73
74#define OCCR1_INIT_VAL OCCR1_NOLOCK /* No locking */
75
76 .section .empty_zero_page, "aw"
77 .global empty_zero_page
78
79empty_zero_page:
80 .long 1 /* MOUNT_ROOT_RDONLY */
81 .long 0 /* RAMDISK_FLAGS */
82 .long 0x0200 /* ORIG_ROOT_DEV */
83 .long 1 /* LOADER_TYPE */
84 .long 0x00800000 /* INITRD_START */
85 .long 0x00800000 /* INITRD_SIZE */
86 .long 0
87
88 .text
89 .balign 4096,0,4096
90
91 .section .data, "aw"
92 .balign PAGE_SIZE
93
94 .section .data, "aw"
95 .balign PAGE_SIZE
96
061854fd
PM
97 .global mmu_pdtp_cache
98mmu_pdtp_cache:
1da177e4
LT
99 .space PAGE_SIZE, 0
100
101 .global empty_bad_page
102empty_bad_page:
103 .space PAGE_SIZE, 0
104
105 .global empty_bad_pte_table
106empty_bad_pte_table:
107 .space PAGE_SIZE, 0
108
109 .global fpu_in_use
110fpu_in_use: .quad 0
111
112
56982002 113 .section .text.head, "ax"
1da177e4
LT
114 .balign L1_CACHE_BYTES
115/*
116 * Condition at the entry of __stext:
117 * . Reset state:
118 * . SR.FD = 1 (FPU disabled)
119 * . SR.BL = 1 (Exceptions disabled)
120 * . SR.MD = 1 (Privileged Mode)
121 * . SR.MMU = 0 (MMU Disabled)
122 * . SR.CD = 0 (CTC User Visible)
123 * . SR.IMASK = Undefined (Interrupt Mask)
124 *
125 * Operations supposed to be performed by __stext:
126 * . prevent speculative fetch onto device memory while MMU is off
127 * . reflect as much as possible SH5 ABI (r15, r26, r27, r18)
128 * . first, save CPU state and set it to something harmless
129 * . any CPU detection and/or endianness settings (?)
130 * . initialize EMI/LMI (but not TMU/RTC/INTC/SCIF): TBD
131 * . set initial TLB entries for cached and uncached regions
132 * (no fine granularity paging)
133 * . set initial cache state
134 * . enable MMU and caches
135 * . set CPU to a consistent state
136 * . registers (including stack pointer and current/KCR0)
137 * . NOT expecting to set Exception handling nor VBR/RESVEC/DCR
138 * at this stage. This is all to later Linux initialization steps.
139 * . initialize FPU
140 * . clear BSS
141 * . jump into start_kernel()
142 * . be prepared to hopeless start_kernel() returns.
143 *
144 */
145 .global _stext
146_stext:
147 /*
148 * Prevent speculative fetch on device memory due to
149 * uninitialized target registers.
150 */
151 ptabs/u ZERO, tr0
152 ptabs/u ZERO, tr1
153 ptabs/u ZERO, tr2
154 ptabs/u ZERO, tr3
155 ptabs/u ZERO, tr4
156 ptabs/u ZERO, tr5
157 ptabs/u ZERO, tr6
158 ptabs/u ZERO, tr7
159 synci
160
161 /*
162 * Read/Set CPU state. After this block:
163 * r29 = Initial SR
164 */
165 getcon SR, r29
166 movi SR_HARMLESS, r20
167 putcon r20, SR
168
169 /*
170 * Initialize EMI/LMI. To Be Done.
171 */
172
173 /*
174 * CPU detection and/or endianness settings (?). To Be Done.
175 * Pure PIC code here, please ! Just save state into r30.
176 * After this block:
177 * r30 = CPU type/Platform Endianness
178 */
179
180 /*
181 * Set initial TLB entries for cached and uncached regions.
182 * Note: PTA/BLINK is PIC code, PTABS/BLINK isn't !
183 */
184 /* Clear ITLBs */
185 pta clear_ITLB, tr1
186 movi MMUIR_FIRST, r21
187 movi MMUIR_END, r22
188clear_ITLB:
189 putcfg r21, 0, ZERO /* Clear MMUIR[n].PTEH.V */
190 addi r21, MMUIR_STEP, r21
191 bne r21, r22, tr1
192
193 /* Clear DTLBs */
194 pta clear_DTLB, tr1
195 movi MMUDR_FIRST, r21
196 movi MMUDR_END, r22
197clear_DTLB:
198 putcfg r21, 0, ZERO /* Clear MMUDR[n].PTEH.V */
199 addi r21, MMUDR_STEP, r21
200 bne r21, r22, tr1
201
202 /* Map one big (512Mb) page for ITLB */
203 movi MMUIR_FIRST, r21
204 movi MMUIR_TEXT_L, r22 /* PTEL first */
205 add.l r22, r63, r22 /* Sign extend */
206 putcfg r21, 1, r22 /* Set MMUIR[0].PTEL */
207 movi MMUIR_TEXT_H, r22 /* PTEH last */
208 add.l r22, r63, r22 /* Sign extend */
209 putcfg r21, 0, r22 /* Set MMUIR[0].PTEH */
210
211 /* Map one big CACHED (512Mb) page for DTLB */
212 movi MMUDR_FIRST, r21
213 movi MMUDR_CACHED_L, r22 /* PTEL first */
214 add.l r22, r63, r22 /* Sign extend */
215 putcfg r21, 1, r22 /* Set MMUDR[0].PTEL */
216 movi MMUDR_CACHED_H, r22 /* PTEH last */
217 add.l r22, r63, r22 /* Sign extend */
218 putcfg r21, 0, r22 /* Set MMUDR[0].PTEH */
219
220#ifdef CONFIG_EARLY_PRINTK
221 /*
222 * Setup a DTLB translation for SCIF phys.
223 */
224 addi r21, MMUDR_STEP, r21
225 movi 0x0a03, r22 /* SCIF phys */
226 shori 0x0148, r22
227 putcfg r21, 1, r22 /* PTEL first */
228 movi 0xfa03, r22 /* 0xfa030000, fixed SCIF virt */
229 shori 0x0003, r22
230 putcfg r21, 0, r22 /* PTEH last */
231#endif
232
233 /*
234 * Set cache behaviours.
235 */
236 /* ICache */
237 movi ICCR_BASE, r21
238 movi ICCR0_INIT_VAL, r22
239 movi ICCR1_INIT_VAL, r23
240 putcfg r21, ICCR_REG0, r22
241 putcfg r21, ICCR_REG1, r23
242
243 /* OCache */
244 movi OCCR_BASE, r21
245 movi OCCR0_INIT_VAL, r22
246 movi OCCR1_INIT_VAL, r23
247 putcfg r21, OCCR_REG0, r22
248 putcfg r21, OCCR_REG1, r23
249
250
251 /*
252 * Enable Caches and MMU. Do the first non-PIC jump.
253 * Now head.S global variables, constants and externs
254 * can be used.
255 */
256 getcon SR, r21
257 movi SR_ENABLE_MMU, r22
258 or r21, r22, r21
259 putcon r21, SSR
260 movi hyperspace, r22
261 ori r22, 1, r22 /* Make it SHmedia, not required but..*/
262 putcon r22, SPC
263 synco
264 rte /* And now go into the hyperspace ... */
265hyperspace: /* ... that's the next instruction ! */
266
267 /*
268 * Set CPU to a consistent state.
269 * r31 = FPU support flag
270 * tr0/tr7 in use. Others give a chance to loop somewhere safe
271 */
272 movi start_kernel, r32
273 ori r32, 1, r32
274
275 ptabs r32, tr0 /* r32 = _start_kernel address */
276 pta/u hopeless, tr1
277 pta/u hopeless, tr2
278 pta/u hopeless, tr3
279 pta/u hopeless, tr4
280 pta/u hopeless, tr5
281 pta/u hopeless, tr6
282 pta/u hopeless, tr7
283 gettr tr1, r28 /* r28 = hopeless address */
284
285 /* Set initial stack pointer */
286 movi init_thread_union, SP
287 putcon SP, KCR0 /* Set current to init_task */
288 movi THREAD_SIZE, r22 /* Point to the end */
289 add SP, r22, SP
290
291 /*
292 * Initialize FPU.
293 * Keep FPU flag in r31. After this block:
294 * r31 = FPU flag
295 */
296 movi fpu_in_use, r31 /* Temporary */
297
298#ifdef CONFIG_SH_FPU
299 getcon SR, r21
300 movi SR_ENABLE_FPU, r22
301 and r21, r22, r22
302 putcon r22, SR /* Try to enable */
303 getcon SR, r22
304 xor r21, r22, r21
305 shlri r21, 15, r21 /* Supposedly 0/1 */
306 st.q r31, 0 , r21 /* Set fpu_in_use */
307#else
308 movi 0, r21
309 st.q r31, 0 , r21 /* Set fpu_in_use */
310#endif
311 or r21, ZERO, r31 /* Set FPU flag at last */
312
313#ifndef CONFIG_SH_NO_BSS_INIT
314/* Don't clear BSS if running on slow platforms such as an RTL simulation,
315 remote memory via SHdebug link, etc. For these the memory can be guaranteed
316 to be all zero on boot anyway. */
317 /*
318 * Clear bss
319 */
320 pta clear_quad, tr1
321 movi __bss_start, r22
322 movi _end, r23
323clear_quad:
324 st.q r22, 0, ZERO
325 addi r22, 8, r22
326 bne r22, r23, tr1 /* Both quad aligned, see vmlinux.lds.S */
327#endif
328 pta/u hopeless, tr1
329
330 /* Say bye to head.S but be prepared to wrongly get back ... */
331 blink tr0, LINK
332
333 /* If we ever get back here through LINK/tr1-tr7 */
334 pta/u hopeless, tr7
335
336hopeless:
337 /*
338 * Something's badly wrong here. Loop endlessly,
339 * there's nothing more we can do about it.
340 *
341 * Note on hopeless: it can be jumped into invariably
342 * before or after jumping into hyperspace. The only
343 * requirement is to be PIC called (PTA) before and
344 * any way (PTA/PTABS) after. According to Virtual
345 * to Physical mapping a simulator/emulator can easily
346 * tell where we came here from just looking at hopeless
347 * (PC) address.
348 *
349 * For debugging purposes:
350 * (r28) hopeless/loop address
351 * (r29) Original SR
352 * (r30) CPU type/Platform endianness
353 * (r31) FPU Support
354 * (r32) _start_kernel address
355 */
356 blink tr7, ZERO