Commit | Line | Data |
---|---|---|
5b3b1688 DD |
1 | /* |
2 | * This file is subject to the terms and conditions of the GNU General Public | |
3 | * License. See the file "COPYING" in the main directory of this archive | |
4 | * for more details. | |
5 | * | |
6 | * Copyright (C) 2005-2007 Cavium Networks | |
7 | */ | |
f65aad41 | 8 | #include <linux/export.h> |
5b3b1688 DD |
9 | #include <linux/init.h> |
10 | #include <linux/kernel.h> | |
11 | #include <linux/sched.h> | |
631330f5 | 12 | #include <linux/smp.h> |
5b3b1688 DD |
13 | #include <linux/mm.h> |
14 | #include <linux/bitops.h> | |
15 | #include <linux/cpu.h> | |
16 | #include <linux/io.h> | |
17 | ||
18 | #include <asm/bcache.h> | |
19 | #include <asm/bootinfo.h> | |
20 | #include <asm/cacheops.h> | |
21 | #include <asm/cpu-features.h> | |
22 | #include <asm/page.h> | |
23 | #include <asm/pgtable.h> | |
24 | #include <asm/r4kcache.h> | |
586016eb | 25 | #include <asm/traps.h> |
5b3b1688 DD |
26 | #include <asm/mmu_context.h> |
27 | #include <asm/war.h> | |
28 | ||
29 | #include <asm/octeon/octeon.h> | |
30 | ||
31 | unsigned long long cache_err_dcache[NR_CPUS]; | |
f65aad41 | 32 | EXPORT_SYMBOL_GPL(cache_err_dcache); |
5b3b1688 DD |
33 | |
34 | /** | |
35 | * Octeon automatically flushes the dcache on tlb changes, so | |
36 | * from Linux's viewpoint it acts much like a physically | |
37 | * tagged cache. No flushing is needed | |
38 | * | |
39 | */ | |
40 | static void octeon_flush_data_cache_page(unsigned long addr) | |
41 | { | |
42 | /* Nothing to do */ | |
43 | } | |
44 | ||
45 | static inline void octeon_local_flush_icache(void) | |
46 | { | |
47 | asm volatile ("synci 0($0)"); | |
48 | } | |
49 | ||
50 | /* | |
51 | * Flush local I-cache for the specified range. | |
52 | */ | |
53 | static void local_octeon_flush_icache_range(unsigned long start, | |
54 | unsigned long end) | |
55 | { | |
56 | octeon_local_flush_icache(); | |
57 | } | |
58 | ||
59 | /** | |
60 | * Flush caches as necessary for all cores affected by a | |
61 | * vma. If no vma is supplied, all cores are flushed. | |
62 | * | |
63 | * @vma: VMA to flush or NULL to flush all icaches. | |
64 | */ | |
65 | static void octeon_flush_icache_all_cores(struct vm_area_struct *vma) | |
66 | { | |
67 | extern void octeon_send_ipi_single(int cpu, unsigned int action); | |
68 | #ifdef CONFIG_SMP | |
69 | int cpu; | |
70 | cpumask_t mask; | |
71 | #endif | |
72 | ||
73 | mb(); | |
74 | octeon_local_flush_icache(); | |
75 | #ifdef CONFIG_SMP | |
76 | preempt_disable(); | |
77 | cpu = smp_processor_id(); | |
78 | ||
79 | /* | |
80 | * If we have a vma structure, we only need to worry about | |
81 | * cores it has been used on | |
82 | */ | |
83 | if (vma) | |
55b8cab4 | 84 | mask = *mm_cpumask(vma->vm_mm); |
5b3b1688 | 85 | else |
0b5f9c00 RR |
86 | mask = *cpu_online_mask; |
87 | cpumask_clear_cpu(cpu, &mask); | |
88 | for_each_cpu(cpu, &mask) | |
5b3b1688 DD |
89 | octeon_send_ipi_single(cpu, SMP_ICACHE_FLUSH); |
90 | ||
91 | preempt_enable(); | |
92 | #endif | |
93 | } | |
94 | ||
95 | ||
96 | /** | |
97 | * Called to flush the icache on all cores | |
98 | */ | |
99 | static void octeon_flush_icache_all(void) | |
100 | { | |
101 | octeon_flush_icache_all_cores(NULL); | |
102 | } | |
103 | ||
104 | ||
105 | /** | |
106 | * Called to flush all memory associated with a memory | |
107 | * context. | |
108 | * | |
70342287 | 109 | * @mm: Memory context to flush |
5b3b1688 DD |
110 | */ |
111 | static void octeon_flush_cache_mm(struct mm_struct *mm) | |
112 | { | |
113 | /* | |
114 | * According to the R4K version of this file, CPUs without | |
115 | * dcache aliases don't need to do anything here | |
116 | */ | |
117 | } | |
118 | ||
119 | ||
120 | /** | |
121 | * Flush a range of kernel addresses out of the icache | |
122 | * | |
123 | */ | |
124 | static void octeon_flush_icache_range(unsigned long start, unsigned long end) | |
125 | { | |
126 | octeon_flush_icache_all_cores(NULL); | |
127 | } | |
128 | ||
129 | ||
130 | /** | |
131 | * Flush the icache for a trampoline. These are used for interrupt | |
132 | * and exception hooking. | |
133 | * | |
134 | * @addr: Address to flush | |
135 | */ | |
136 | static void octeon_flush_cache_sigtramp(unsigned long addr) | |
137 | { | |
138 | struct vm_area_struct *vma; | |
139 | ||
140 | vma = find_vma(current->mm, addr); | |
141 | octeon_flush_icache_all_cores(vma); | |
142 | } | |
143 | ||
144 | ||
145 | /** | |
146 | * Flush a range out of a vma | |
147 | * | |
148 | * @vma: VMA to flush | |
149 | * @start: | |
150 | * @end: | |
151 | */ | |
152 | static void octeon_flush_cache_range(struct vm_area_struct *vma, | |
153 | unsigned long start, unsigned long end) | |
154 | { | |
155 | if (vma->vm_flags & VM_EXEC) | |
156 | octeon_flush_icache_all_cores(vma); | |
157 | } | |
158 | ||
159 | ||
160 | /** | |
161 | * Flush a specific page of a vma | |
162 | * | |
163 | * @vma: VMA to flush page for | |
164 | * @page: Page to flush | |
165 | * @pfn: | |
166 | */ | |
167 | static void octeon_flush_cache_page(struct vm_area_struct *vma, | |
168 | unsigned long page, unsigned long pfn) | |
169 | { | |
170 | if (vma->vm_flags & VM_EXEC) | |
171 | octeon_flush_icache_all_cores(vma); | |
172 | } | |
173 | ||
d9cdc901 RB |
174 | static void octeon_flush_kernel_vmap_range(unsigned long vaddr, int size) |
175 | { | |
176 | BUG(); | |
177 | } | |
5b3b1688 DD |
178 | |
179 | /** | |
180 | * Probe Octeon's caches | |
181 | * | |
182 | */ | |
078a55fc | 183 | static void probe_octeon(void) |
5b3b1688 DD |
184 | { |
185 | unsigned long icache_size; | |
186 | unsigned long dcache_size; | |
187 | unsigned int config1; | |
188 | struct cpuinfo_mips *c = ¤t_cpu_data; | |
189 | ||
f8bf7e68 | 190 | config1 = read_c0_config1(); |
5b3b1688 DD |
191 | switch (c->cputype) { |
192 | case CPU_CAVIUM_OCTEON: | |
6f329468 | 193 | case CPU_CAVIUM_OCTEON_PLUS: |
5b3b1688 DD |
194 | c->icache.linesz = 2 << ((config1 >> 19) & 7); |
195 | c->icache.sets = 64 << ((config1 >> 22) & 7); | |
196 | c->icache.ways = 1 + ((config1 >> 16) & 7); | |
197 | c->icache.flags |= MIPS_CACHE_VTAG; | |
198 | icache_size = | |
199 | c->icache.sets * c->icache.ways * c->icache.linesz; | |
200 | c->icache.waybit = ffs(icache_size / c->icache.ways) - 1; | |
201 | c->dcache.linesz = 128; | |
6f329468 | 202 | if (c->cputype == CPU_CAVIUM_OCTEON_PLUS) |
5b3b1688 | 203 | c->dcache.sets = 2; /* CN5XXX has two Dcache sets */ |
6f329468 DD |
204 | else |
205 | c->dcache.sets = 1; /* CN3XXX has one Dcache set */ | |
5b3b1688 DD |
206 | c->dcache.ways = 64; |
207 | dcache_size = | |
208 | c->dcache.sets * c->dcache.ways * c->dcache.linesz; | |
209 | c->dcache.waybit = ffs(dcache_size / c->dcache.ways) - 1; | |
210 | c->options |= MIPS_CPU_PREFETCH; | |
211 | break; | |
212 | ||
f8bf7e68 DD |
213 | case CPU_CAVIUM_OCTEON2: |
214 | c->icache.linesz = 2 << ((config1 >> 19) & 7); | |
215 | c->icache.sets = 8; | |
216 | c->icache.ways = 37; | |
217 | c->icache.flags |= MIPS_CACHE_VTAG; | |
218 | icache_size = c->icache.sets * c->icache.ways * c->icache.linesz; | |
219 | ||
220 | c->dcache.linesz = 128; | |
221 | c->dcache.ways = 32; | |
222 | c->dcache.sets = 8; | |
223 | dcache_size = c->dcache.sets * c->dcache.ways * c->dcache.linesz; | |
224 | c->options |= MIPS_CPU_PREFETCH; | |
225 | break; | |
226 | ||
5b3b1688 | 227 | default: |
ab75dc02 | 228 | panic("Unsupported Cavium Networks CPU type"); |
5b3b1688 DD |
229 | break; |
230 | } | |
231 | ||
232 | /* compute a couple of other cache variables */ | |
233 | c->icache.waysize = icache_size / c->icache.ways; | |
234 | c->dcache.waysize = dcache_size / c->dcache.ways; | |
235 | ||
236 | c->icache.sets = icache_size / (c->icache.linesz * c->icache.ways); | |
237 | c->dcache.sets = dcache_size / (c->dcache.linesz * c->dcache.ways); | |
238 | ||
239 | if (smp_processor_id() == 0) { | |
240 | pr_notice("Primary instruction cache %ldkB, %s, %d way, " | |
241 | "%d sets, linesize %d bytes.\n", | |
242 | icache_size >> 10, | |
243 | cpu_has_vtag_icache ? | |
244 | "virtually tagged" : "physically tagged", | |
245 | c->icache.ways, c->icache.sets, c->icache.linesz); | |
246 | ||
247 | pr_notice("Primary data cache %ldkB, %d-way, %d sets, " | |
248 | "linesize %d bytes.\n", | |
249 | dcache_size >> 10, c->dcache.ways, | |
250 | c->dcache.sets, c->dcache.linesz); | |
251 | } | |
252 | } | |
253 | ||
078a55fc | 254 | static void octeon_cache_error_setup(void) |
586016eb DD |
255 | { |
256 | extern char except_vec2_octeon; | |
257 | set_handler(0x100, &except_vec2_octeon, 0x80); | |
258 | } | |
5b3b1688 DD |
259 | |
260 | /** | |
261 | * Setup the Octeon cache flush routines | |
262 | * | |
263 | */ | |
078a55fc | 264 | void octeon_cache_init(void) |
5b3b1688 | 265 | { |
5b3b1688 DD |
266 | probe_octeon(); |
267 | ||
268 | shm_align_mask = PAGE_SIZE - 1; | |
269 | ||
270 | flush_cache_all = octeon_flush_icache_all; | |
271 | __flush_cache_all = octeon_flush_icache_all; | |
272 | flush_cache_mm = octeon_flush_cache_mm; | |
273 | flush_cache_page = octeon_flush_cache_page; | |
274 | flush_cache_range = octeon_flush_cache_range; | |
275 | flush_cache_sigtramp = octeon_flush_cache_sigtramp; | |
276 | flush_icache_all = octeon_flush_icache_all; | |
277 | flush_data_cache_page = octeon_flush_data_cache_page; | |
278 | flush_icache_range = octeon_flush_icache_range; | |
279 | local_flush_icache_range = local_octeon_flush_icache_range; | |
280 | ||
d9cdc901 RB |
281 | __flush_kernel_vmap_range = octeon_flush_kernel_vmap_range; |
282 | ||
5b3b1688 DD |
283 | build_clear_page(); |
284 | build_copy_page(); | |
586016eb DD |
285 | |
286 | board_cache_error_setup = octeon_cache_error_setup; | |
5b3b1688 DD |
287 | } |
288 | ||
e1ced097 | 289 | /* |
5b3b1688 DD |
290 | * Handle a cache error exception |
291 | */ | |
f65aad41 RB |
292 | static RAW_NOTIFIER_HEAD(co_cache_error_chain); |
293 | ||
294 | int register_co_cache_error_notifier(struct notifier_block *nb) | |
5b3b1688 | 295 | { |
f65aad41 RB |
296 | return raw_notifier_chain_register(&co_cache_error_chain, nb); |
297 | } | |
298 | EXPORT_SYMBOL_GPL(register_co_cache_error_notifier); | |
5b3b1688 | 299 | |
f65aad41 RB |
300 | int unregister_co_cache_error_notifier(struct notifier_block *nb) |
301 | { | |
302 | return raw_notifier_chain_unregister(&co_cache_error_chain, nb); | |
303 | } | |
304 | EXPORT_SYMBOL_GPL(unregister_co_cache_error_notifier); | |
305 | ||
e1ced097 | 306 | static void co_cache_error_call_notifiers(unsigned long val) |
f65aad41 | 307 | { |
e1ced097 DD |
308 | int rv = raw_notifier_call_chain(&co_cache_error_chain, val, NULL); |
309 | if ((rv & ~NOTIFY_STOP_MASK) != NOTIFY_OK) { | |
310 | u64 dcache_err; | |
311 | unsigned long coreid = cvmx_get_core_num(); | |
312 | u64 icache_err = read_octeon_c0_icacheerr(); | |
313 | ||
314 | if (val) { | |
315 | dcache_err = cache_err_dcache[coreid]; | |
316 | cache_err_dcache[coreid] = 0; | |
317 | } else { | |
318 | dcache_err = read_octeon_c0_dcacheerr(); | |
319 | } | |
320 | ||
321 | pr_err("Core%lu: Cache error exception:\n", coreid); | |
322 | pr_err("cp0_errorepc == %lx\n", read_c0_errorepc()); | |
323 | if (icache_err & 1) { | |
324 | pr_err("CacheErr (Icache) == %llx\n", | |
325 | (unsigned long long)icache_err); | |
326 | write_octeon_c0_icacheerr(0); | |
327 | } | |
328 | if (dcache_err & 1) { | |
329 | pr_err("CacheErr (Dcache) == %llx\n", | |
330 | (unsigned long long)dcache_err); | |
331 | } | |
332 | } | |
5b3b1688 DD |
333 | } |
334 | ||
e1ced097 | 335 | /* |
1c1a90d8 | 336 | * Called when the the exception is recoverable |
5b3b1688 | 337 | */ |
e1ced097 | 338 | |
5b3b1688 DD |
339 | asmlinkage void cache_parity_error_octeon_recoverable(void) |
340 | { | |
f65aad41 | 341 | co_cache_error_call_notifiers(0); |
5b3b1688 DD |
342 | } |
343 | ||
344 | /** | |
1c1a90d8 | 345 | * Called when the the exception is not recoverable |
5b3b1688 | 346 | */ |
e1ced097 | 347 | |
5b3b1688 DD |
348 | asmlinkage void cache_parity_error_octeon_non_recoverable(void) |
349 | { | |
f65aad41 RB |
350 | co_cache_error_call_notifiers(1); |
351 | panic("Can't handle cache error: nested exception"); | |
5b3b1688 | 352 | } |