Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * arch/sh/kernel/cpu/init.c | |
3 | * | |
4 | * CPU init code | |
5 | * | |
ffe1b4e9 | 6 | * Copyright (C) 2002 - 2007 Paul Mundt |
b638d0b9 | 7 | * Copyright (C) 2003 Richard Curnow |
1da177e4 LT |
8 | * |
9 | * This file is subject to the terms and conditions of the GNU General Public | |
10 | * License. See the file "COPYING" in the main directory of this archive | |
11 | * for more details. | |
12 | */ | |
13 | #include <linux/init.h> | |
14 | #include <linux/kernel.h> | |
aec5e0e1 PM |
15 | #include <linux/mm.h> |
16 | #include <asm/mmu_context.h> | |
1da177e4 LT |
17 | #include <asm/processor.h> |
18 | #include <asm/uaccess.h> | |
f3c25758 | 19 | #include <asm/page.h> |
1da177e4 LT |
20 | #include <asm/system.h> |
21 | #include <asm/cacheflush.h> | |
22 | #include <asm/cache.h> | |
23 | #include <asm/io.h> | |
357d5946 | 24 | #include <asm/ubc.h> |
1da177e4 LT |
25 | |
26 | /* | |
27 | * Generic wrapper for command line arguments to disable on-chip | |
28 | * peripherals (nofpu, nodsp, and so forth). | |
29 | */ | |
30 | #define onchip_setup(x) \ | |
31 | static int x##_disabled __initdata = 0; \ | |
32 | \ | |
33 | static int __init x##_setup(char *opts) \ | |
34 | { \ | |
35 | x##_disabled = 1; \ | |
9b41046c | 36 | return 1; \ |
1da177e4 LT |
37 | } \ |
38 | __setup("no" __stringify(x), x##_setup); | |
39 | ||
40 | onchip_setup(fpu); | |
41 | onchip_setup(dsp); | |
42 | ||
45ed285b PM |
43 | #ifdef CONFIG_SPECULATIVE_EXECUTION |
44 | #define CPUOPM 0xff2f0000 | |
45 | #define CPUOPM_RABD (1 << 5) | |
46 | ||
47 | static void __init speculative_execution_init(void) | |
48 | { | |
49 | /* Clear RABD */ | |
50 | ctrl_outl(ctrl_inl(CPUOPM) & ~CPUOPM_RABD, CPUOPM); | |
51 | ||
52 | /* Flush the update */ | |
53 | (void)ctrl_inl(CPUOPM); | |
54 | ctrl_barrier(); | |
55 | } | |
56 | #else | |
57 | #define speculative_execution_init() do { } while (0) | |
58 | #endif | |
59 | ||
1da177e4 LT |
60 | /* |
61 | * Generic first-level cache init | |
62 | */ | |
63 | static void __init cache_init(void) | |
64 | { | |
65 | unsigned long ccr, flags; | |
66 | ||
ffe1b4e9 PM |
67 | /* First setup the rest of the I-cache info */ |
68 | current_cpu_data.icache.entry_mask = current_cpu_data.icache.way_incr - | |
69 | current_cpu_data.icache.linesz; | |
70 | ||
71 | current_cpu_data.icache.way_size = current_cpu_data.icache.sets * | |
72 | current_cpu_data.icache.linesz; | |
73 | ||
74 | /* And the D-cache too */ | |
75 | current_cpu_data.dcache.entry_mask = current_cpu_data.dcache.way_incr - | |
76 | current_cpu_data.dcache.linesz; | |
77 | ||
78 | current_cpu_data.dcache.way_size = current_cpu_data.dcache.sets * | |
79 | current_cpu_data.dcache.linesz; | |
1da177e4 LT |
80 | |
81 | jump_to_P2(); | |
82 | ccr = ctrl_inl(CCR); | |
83 | ||
84 | /* | |
b638d0b9 RC |
85 | * At this point we don't know whether the cache is enabled or not - a |
86 | * bootloader may have enabled it. There are at least 2 things that | |
87 | * could be dirty in the cache at this point: | |
88 | * 1. kernel command line set up by boot loader | |
89 | * 2. spilled registers from the prolog of this function | |
90 | * => before re-initialising the cache, we must do a purge of the whole | |
91 | * cache out to memory for safety. As long as nothing is spilled | |
92 | * during the loop to lines that have already been done, this is safe. | |
93 | * - RPC | |
1da177e4 LT |
94 | */ |
95 | if (ccr & CCR_CACHE_ENABLE) { | |
96 | unsigned long ways, waysize, addrstart; | |
97 | ||
11c19656 | 98 | waysize = current_cpu_data.dcache.sets; |
1da177e4 | 99 | |
9d4436a6 | 100 | #ifdef CCR_CACHE_ORA |
1da177e4 LT |
101 | /* |
102 | * If the OC is already in RAM mode, we only have | |
103 | * half of the entries to flush.. | |
104 | */ | |
105 | if (ccr & CCR_CACHE_ORA) | |
106 | waysize >>= 1; | |
9d4436a6 | 107 | #endif |
1da177e4 | 108 | |
11c19656 | 109 | waysize <<= current_cpu_data.dcache.entry_shift; |
1da177e4 LT |
110 | |
111 | #ifdef CCR_CACHE_EMODE | |
112 | /* If EMODE is not set, we only have 1 way to flush. */ | |
113 | if (!(ccr & CCR_CACHE_EMODE)) | |
114 | ways = 1; | |
115 | else | |
116 | #endif | |
11c19656 | 117 | ways = current_cpu_data.dcache.ways; |
1da177e4 LT |
118 | |
119 | addrstart = CACHE_OC_ADDRESS_ARRAY; | |
120 | do { | |
121 | unsigned long addr; | |
122 | ||
123 | for (addr = addrstart; | |
124 | addr < addrstart + waysize; | |
11c19656 | 125 | addr += current_cpu_data.dcache.linesz) |
1da177e4 LT |
126 | ctrl_outl(0, addr); |
127 | ||
11c19656 | 128 | addrstart += current_cpu_data.dcache.way_incr; |
1da177e4 LT |
129 | } while (--ways); |
130 | } | |
131 | ||
132 | /* | |
133 | * Default CCR values .. enable the caches | |
134 | * and invalidate them immediately.. | |
135 | */ | |
136 | flags = CCR_CACHE_ENABLE | CCR_CACHE_INVALIDATE; | |
137 | ||
138 | #ifdef CCR_CACHE_EMODE | |
139 | /* Force EMODE if possible */ | |
11c19656 | 140 | if (current_cpu_data.dcache.ways > 1) |
1da177e4 | 141 | flags |= CCR_CACHE_EMODE; |
b638d0b9 RC |
142 | else |
143 | flags &= ~CCR_CACHE_EMODE; | |
1da177e4 LT |
144 | #endif |
145 | ||
e7bd34a1 PM |
146 | #if defined(CONFIG_CACHE_WRITETHROUGH) |
147 | /* Write-through */ | |
1da177e4 | 148 | flags |= CCR_CACHE_WT; |
e7bd34a1 PM |
149 | #elif defined(CONFIG_CACHE_WRITEBACK) |
150 | /* Write-back */ | |
1da177e4 | 151 | flags |= CCR_CACHE_CB; |
e7bd34a1 PM |
152 | #else |
153 | /* Off */ | |
154 | flags &= ~CCR_CACHE_ENABLE; | |
1da177e4 LT |
155 | #endif |
156 | ||
1da177e4 LT |
157 | ctrl_outl(flags, CCR); |
158 | back_to_P1(); | |
159 | } | |
160 | ||
161 | #ifdef CONFIG_SH_DSP | |
162 | static void __init release_dsp(void) | |
163 | { | |
164 | unsigned long sr; | |
165 | ||
166 | /* Clear SR.DSP bit */ | |
167 | __asm__ __volatile__ ( | |
168 | "stc\tsr, %0\n\t" | |
169 | "and\t%1, %0\n\t" | |
170 | "ldc\t%0, sr\n\t" | |
171 | : "=&r" (sr) | |
172 | : "r" (~SR_DSP) | |
173 | ); | |
174 | } | |
175 | ||
176 | static void __init dsp_init(void) | |
177 | { | |
178 | unsigned long sr; | |
179 | ||
180 | /* | |
181 | * Set the SR.DSP bit, wait for one instruction, and then read | |
182 | * back the SR value. | |
183 | */ | |
184 | __asm__ __volatile__ ( | |
185 | "stc\tsr, %0\n\t" | |
186 | "or\t%1, %0\n\t" | |
187 | "ldc\t%0, sr\n\t" | |
188 | "nop\n\t" | |
189 | "stc\tsr, %0\n\t" | |
190 | : "=&r" (sr) | |
191 | : "r" (SR_DSP) | |
192 | ); | |
193 | ||
194 | /* If the DSP bit is still set, this CPU has a DSP */ | |
195 | if (sr & SR_DSP) | |
11c19656 | 196 | current_cpu_data.flags |= CPU_HAS_DSP; |
1da177e4 LT |
197 | |
198 | /* Now that we've determined the DSP status, clear the DSP bit. */ | |
199 | release_dsp(); | |
200 | } | |
201 | #endif /* CONFIG_SH_DSP */ | |
202 | ||
203 | /** | |
204 | * sh_cpu_init | |
205 | * | |
206 | * This is our initial entry point for each CPU, and is invoked on the boot | |
207 | * CPU prior to calling start_kernel(). For SMP, a combination of this and | |
208 | * start_secondary() will bring up each processor to a ready state prior | |
209 | * to hand forking the idle loop. | |
210 | * | |
211 | * We do all of the basic processor init here, including setting up the | |
212 | * caches, FPU, DSP, kicking the UBC, etc. By the time start_kernel() is | |
213 | * hit (and subsequently platform_setup()) things like determining the | |
214 | * CPU subtype and initial configuration will all be done. | |
215 | * | |
216 | * Each processor family is still responsible for doing its own probing | |
217 | * and cache configuration in detect_cpu_and_cache_system(). | |
218 | */ | |
219 | asmlinkage void __init sh_cpu_init(void) | |
220 | { | |
221 | /* First, probe the CPU */ | |
222 | detect_cpu_and_cache_system(); | |
223 | ||
ffe1b4e9 PM |
224 | if (current_cpu_data.type == CPU_SH_NONE) |
225 | panic("Unknown CPU"); | |
226 | ||
1da177e4 LT |
227 | /* Init the cache */ |
228 | cache_init(); | |
229 | ||
f3c25758 | 230 | shm_align_mask = max_t(unsigned long, |
11c19656 | 231 | current_cpu_data.dcache.way_size - 1, |
f3c25758 PM |
232 | PAGE_SIZE - 1); |
233 | ||
1da177e4 LT |
234 | /* Disable the FPU */ |
235 | if (fpu_disabled) { | |
236 | printk("FPU Disabled\n"); | |
11c19656 | 237 | current_cpu_data.flags &= ~CPU_HAS_FPU; |
1da177e4 LT |
238 | disable_fpu(); |
239 | } | |
240 | ||
241 | /* FPU initialization */ | |
11c19656 | 242 | if ((current_cpu_data.flags & CPU_HAS_FPU)) { |
1da177e4 LT |
243 | clear_thread_flag(TIF_USEDFPU); |
244 | clear_used_math(); | |
245 | } | |
246 | ||
aec5e0e1 PM |
247 | /* |
248 | * Initialize the per-CPU ASID cache very early, since the | |
249 | * TLB flushing routines depend on this being setup. | |
250 | */ | |
251 | current_cpu_data.asid_cache = NO_CONTEXT; | |
252 | ||
1da177e4 LT |
253 | #ifdef CONFIG_SH_DSP |
254 | /* Probe for DSP */ | |
255 | dsp_init(); | |
256 | ||
257 | /* Disable the DSP */ | |
258 | if (dsp_disabled) { | |
259 | printk("DSP Disabled\n"); | |
11c19656 | 260 | current_cpu_data.flags &= ~CPU_HAS_DSP; |
1da177e4 LT |
261 | release_dsp(); |
262 | } | |
263 | #endif | |
264 | ||
1da177e4 LT |
265 | /* |
266 | * Some brain-damaged loaders decided it would be a good idea to put | |
267 | * the UBC to sleep. This causes some issues when it comes to things | |
268 | * like PTRACE_SINGLESTEP or doing hardware watchpoints in GDB. So .. | |
269 | * we wake it up and hope that all is well. | |
270 | */ | |
271 | ubc_wakeup(); | |
45ed285b | 272 | speculative_execution_init(); |
1da177e4 | 273 | } |