Commit | Line | Data |
---|---|---|
99c6dc11 LB |
1 | /* |
2 | * arch/arm/mm/cache-feroceon-l2.c - Feroceon L2 cache controller support | |
3 | * | |
4 | * Copyright (C) 2008 Marvell Semiconductor | |
5 | * | |
6 | * This file is licensed under the terms of the GNU General Public | |
7 | * License version 2. This program is licensed "as is" without any | |
8 | * warranty of any kind, whether express or implied. | |
9 | * | |
10 | * References: | |
11 | * - Unified Layer 2 Cache for Feroceon CPU Cores, | |
12 | * Document ID MV-S104858-00, Rev. A, October 23 2007. | |
13 | */ | |
14 | ||
15 | #include <linux/init.h> | |
6d3e6d36 | 16 | #include <linux/highmem.h> |
99c6dc11 | 17 | #include <asm/cacheflush.h> |
15d07dc9 | 18 | #include <asm/cp15.h> |
3c317d00 | 19 | #include <asm/hardware/cache-feroceon-l2.h> |
99c6dc11 LB |
20 | |
21 | /* | |
22 | * Low-level cache maintenance operations. | |
23 | * | |
24 | * As well as the regular 'clean/invalidate/flush L2 cache line by | |
25 | * MVA' instructions, the Feroceon L2 cache controller also features | |
26 | * 'clean/invalidate L2 range by MVA' operations. | |
27 | * | |
28 | * Cache range operations are initiated by writing the start and | |
29 | * end addresses to successive cp15 registers, and process every | |
30 | * cache line whose first byte address lies in the inclusive range | |
31 | * [start:end]. | |
32 | * | |
33 | * The cache range operations stall the CPU pipeline until completion. | |
34 | * | |
35 | * The range operations require two successive cp15 writes, in | |
36 | * between which we don't want to be preempted. | |
37 | */ | |
1bb77267 | 38 | |
6d3e6d36 | 39 | static inline unsigned long l2_get_va(unsigned long paddr) |
1bb77267 NP |
40 | { |
41 | #ifdef CONFIG_HIGHMEM | |
42 | /* | |
1bb77267 NP |
43 | * Because range ops can't be done on physical addresses, |
44 | * we simply install a virtual mapping for it only for the | |
45 | * TLB lookup to occur, hence no need to flush the untouched | |
6d3e6d36 NP |
46 | * memory mapping afterwards (note: a cache flush may happen |
47 | * in some circumstances depending on the path taken in kunmap_atomic). | |
1bb77267 | 48 | */ |
6d3e6d36 NP |
49 | void *vaddr = kmap_atomic_pfn(paddr >> PAGE_SHIFT); |
50 | return (unsigned long)vaddr + (paddr & ~PAGE_MASK); | |
1bb77267 NP |
51 | #else |
52 | return __phys_to_virt(paddr); | |
53 | #endif | |
54 | } | |
55 | ||
6d3e6d36 NP |
56 | static inline void l2_put_va(unsigned long vaddr) |
57 | { | |
58 | #ifdef CONFIG_HIGHMEM | |
59 | kunmap_atomic((void *)vaddr); | |
60 | #endif | |
61 | } | |
62 | ||
99c6dc11 LB |
63 | static inline void l2_clean_pa(unsigned long addr) |
64 | { | |
65 | __asm__("mcr p15, 1, %0, c15, c9, 3" : : "r" (addr)); | |
66 | } | |
67 | ||
1bb77267 | 68 | static inline void l2_clean_pa_range(unsigned long start, unsigned long end) |
99c6dc11 | 69 | { |
1bb77267 | 70 | unsigned long va_start, va_end, flags; |
99c6dc11 LB |
71 | |
72 | /* | |
73 | * Make sure 'start' and 'end' reference the same page, as | |
74 | * L2 is PIPT and range operations only do a TLB lookup on | |
75 | * the start address. | |
76 | */ | |
99c6bb39 | 77 | BUG_ON((start ^ end) >> PAGE_SHIFT); |
99c6dc11 | 78 | |
6d3e6d36 | 79 | va_start = l2_get_va(start); |
1bb77267 | 80 | va_end = va_start + (end - start); |
6d3e6d36 | 81 | raw_local_irq_save(flags); |
99c6bb39 NP |
82 | __asm__("mcr p15, 1, %0, c15, c9, 4\n\t" |
83 | "mcr p15, 1, %1, c15, c9, 5" | |
1bb77267 | 84 | : : "r" (va_start), "r" (va_end)); |
99c6dc11 | 85 | raw_local_irq_restore(flags); |
6d3e6d36 | 86 | l2_put_va(va_start); |
99c6dc11 LB |
87 | } |
88 | ||
99c6dc11 LB |
89 | static inline void l2_clean_inv_pa(unsigned long addr) |
90 | { | |
91 | __asm__("mcr p15, 1, %0, c15, c10, 3" : : "r" (addr)); | |
92 | } | |
93 | ||
94 | static inline void l2_inv_pa(unsigned long addr) | |
95 | { | |
96 | __asm__("mcr p15, 1, %0, c15, c11, 3" : : "r" (addr)); | |
97 | } | |
98 | ||
1bb77267 | 99 | static inline void l2_inv_pa_range(unsigned long start, unsigned long end) |
99c6dc11 | 100 | { |
1bb77267 | 101 | unsigned long va_start, va_end, flags; |
99c6dc11 LB |
102 | |
103 | /* | |
104 | * Make sure 'start' and 'end' reference the same page, as | |
105 | * L2 is PIPT and range operations only do a TLB lookup on | |
106 | * the start address. | |
107 | */ | |
99c6bb39 | 108 | BUG_ON((start ^ end) >> PAGE_SHIFT); |
99c6dc11 | 109 | |
6d3e6d36 | 110 | va_start = l2_get_va(start); |
1bb77267 | 111 | va_end = va_start + (end - start); |
6d3e6d36 | 112 | raw_local_irq_save(flags); |
99c6bb39 NP |
113 | __asm__("mcr p15, 1, %0, c15, c11, 4\n\t" |
114 | "mcr p15, 1, %1, c15, c11, 5" | |
1bb77267 | 115 | : : "r" (va_start), "r" (va_end)); |
99c6dc11 | 116 | raw_local_irq_restore(flags); |
6d3e6d36 | 117 | l2_put_va(va_start); |
99c6dc11 LB |
118 | } |
119 | ||
d75de087 MB |
120 | static inline void l2_inv_all(void) |
121 | { | |
122 | __asm__("mcr p15, 1, %0, c15, c11, 0" : : "r" (0)); | |
123 | } | |
99c6dc11 LB |
124 | |
125 | /* | |
126 | * Linux primitives. | |
127 | * | |
128 | * Note that the end addresses passed to Linux primitives are | |
129 | * noninclusive, while the hardware cache range operations use | |
130 | * inclusive start and end addresses. | |
131 | */ | |
132 | #define CACHE_LINE_SIZE 32 | |
133 | #define MAX_RANGE_SIZE 1024 | |
134 | ||
135 | static int l2_wt_override; | |
136 | ||
137 | static unsigned long calc_range_end(unsigned long start, unsigned long end) | |
138 | { | |
139 | unsigned long range_end; | |
140 | ||
141 | BUG_ON(start & (CACHE_LINE_SIZE - 1)); | |
142 | BUG_ON(end & (CACHE_LINE_SIZE - 1)); | |
143 | ||
144 | /* | |
145 | * Try to process all cache lines between 'start' and 'end'. | |
146 | */ | |
147 | range_end = end; | |
148 | ||
149 | /* | |
150 | * Limit the number of cache lines processed at once, | |
151 | * since cache range operations stall the CPU pipeline | |
152 | * until completion. | |
153 | */ | |
154 | if (range_end > start + MAX_RANGE_SIZE) | |
155 | range_end = start + MAX_RANGE_SIZE; | |
156 | ||
157 | /* | |
158 | * Cache range operations can't straddle a page boundary. | |
159 | */ | |
160 | if (range_end > (start | (PAGE_SIZE - 1)) + 1) | |
161 | range_end = (start | (PAGE_SIZE - 1)) + 1; | |
162 | ||
163 | return range_end; | |
164 | } | |
165 | ||
166 | static void feroceon_l2_inv_range(unsigned long start, unsigned long end) | |
167 | { | |
168 | /* | |
169 | * Clean and invalidate partial first cache line. | |
170 | */ | |
171 | if (start & (CACHE_LINE_SIZE - 1)) { | |
172 | l2_clean_inv_pa(start & ~(CACHE_LINE_SIZE - 1)); | |
173 | start = (start | (CACHE_LINE_SIZE - 1)) + 1; | |
174 | } | |
175 | ||
176 | /* | |
177 | * Clean and invalidate partial last cache line. | |
178 | */ | |
72bc2b1a | 179 | if (start < end && end & (CACHE_LINE_SIZE - 1)) { |
99c6dc11 LB |
180 | l2_clean_inv_pa(end & ~(CACHE_LINE_SIZE - 1)); |
181 | end &= ~(CACHE_LINE_SIZE - 1); | |
182 | } | |
183 | ||
184 | /* | |
185 | * Invalidate all full cache lines between 'start' and 'end'. | |
186 | */ | |
72bc2b1a | 187 | while (start < end) { |
99c6dc11 LB |
188 | unsigned long range_end = calc_range_end(start, end); |
189 | l2_inv_pa_range(start, range_end - CACHE_LINE_SIZE); | |
190 | start = range_end; | |
191 | } | |
192 | ||
193 | dsb(); | |
194 | } | |
195 | ||
196 | static void feroceon_l2_clean_range(unsigned long start, unsigned long end) | |
197 | { | |
198 | /* | |
199 | * If L2 is forced to WT, the L2 will always be clean and we | |
200 | * don't need to do anything here. | |
201 | */ | |
202 | if (!l2_wt_override) { | |
203 | start &= ~(CACHE_LINE_SIZE - 1); | |
204 | end = (end + CACHE_LINE_SIZE - 1) & ~(CACHE_LINE_SIZE - 1); | |
205 | while (start != end) { | |
206 | unsigned long range_end = calc_range_end(start, end); | |
207 | l2_clean_pa_range(start, range_end - CACHE_LINE_SIZE); | |
208 | start = range_end; | |
209 | } | |
210 | } | |
211 | ||
212 | dsb(); | |
213 | } | |
214 | ||
215 | static void feroceon_l2_flush_range(unsigned long start, unsigned long end) | |
216 | { | |
217 | start &= ~(CACHE_LINE_SIZE - 1); | |
218 | end = (end + CACHE_LINE_SIZE - 1) & ~(CACHE_LINE_SIZE - 1); | |
219 | while (start != end) { | |
220 | unsigned long range_end = calc_range_end(start, end); | |
221 | if (!l2_wt_override) | |
222 | l2_clean_pa_range(start, range_end - CACHE_LINE_SIZE); | |
223 | l2_inv_pa_range(start, range_end - CACHE_LINE_SIZE); | |
224 | start = range_end; | |
225 | } | |
226 | ||
227 | dsb(); | |
228 | } | |
229 | ||
230 | ||
231 | /* | |
232 | * Routines to disable and re-enable the D-cache and I-cache at run | |
233 | * time. These are necessary because the L2 cache can only be enabled | |
234 | * or disabled while the L1 Dcache and Icache are both disabled. | |
235 | */ | |
99c6bb39 | 236 | static int __init flush_and_disable_dcache(void) |
99c6dc11 LB |
237 | { |
238 | u32 cr; | |
239 | ||
240 | cr = get_cr(); | |
241 | if (cr & CR_C) { | |
242 | unsigned long flags; | |
243 | ||
244 | raw_local_irq_save(flags); | |
245 | flush_cache_all(); | |
246 | set_cr(cr & ~CR_C); | |
247 | raw_local_irq_restore(flags); | |
99c6bb39 | 248 | return 1; |
99c6dc11 | 249 | } |
99c6bb39 | 250 | return 0; |
99c6dc11 LB |
251 | } |
252 | ||
253 | static void __init enable_dcache(void) | |
254 | { | |
255 | u32 cr; | |
256 | ||
257 | cr = get_cr(); | |
99c6bb39 | 258 | set_cr(cr | CR_C); |
99c6dc11 LB |
259 | } |
260 | ||
261 | static void __init __invalidate_icache(void) | |
262 | { | |
f000328a | 263 | __asm__("mcr p15, 0, %0, c7, c5, 0" : : "r" (0)); |
99c6dc11 LB |
264 | } |
265 | ||
99c6bb39 | 266 | static int __init invalidate_and_disable_icache(void) |
99c6dc11 LB |
267 | { |
268 | u32 cr; | |
269 | ||
270 | cr = get_cr(); | |
271 | if (cr & CR_I) { | |
272 | set_cr(cr & ~CR_I); | |
273 | __invalidate_icache(); | |
99c6bb39 | 274 | return 1; |
99c6dc11 | 275 | } |
99c6bb39 | 276 | return 0; |
99c6dc11 LB |
277 | } |
278 | ||
279 | static void __init enable_icache(void) | |
280 | { | |
281 | u32 cr; | |
282 | ||
283 | cr = get_cr(); | |
99c6bb39 | 284 | set_cr(cr | CR_I); |
99c6dc11 LB |
285 | } |
286 | ||
287 | static inline u32 read_extra_features(void) | |
288 | { | |
289 | u32 u; | |
290 | ||
291 | __asm__("mrc p15, 1, %0, c15, c1, 0" : "=r" (u)); | |
292 | ||
293 | return u; | |
294 | } | |
295 | ||
296 | static inline void write_extra_features(u32 u) | |
297 | { | |
298 | __asm__("mcr p15, 1, %0, c15, c1, 0" : : "r" (u)); | |
299 | } | |
300 | ||
301 | static void __init disable_l2_prefetch(void) | |
302 | { | |
303 | u32 u; | |
304 | ||
305 | /* | |
306 | * Read the CPU Extra Features register and verify that the | |
307 | * Disable L2 Prefetch bit is set. | |
308 | */ | |
309 | u = read_extra_features(); | |
310 | if (!(u & 0x01000000)) { | |
311 | printk(KERN_INFO "Feroceon L2: Disabling L2 prefetch.\n"); | |
312 | write_extra_features(u | 0x01000000); | |
313 | } | |
314 | } | |
315 | ||
316 | static void __init enable_l2(void) | |
317 | { | |
318 | u32 u; | |
319 | ||
320 | u = read_extra_features(); | |
321 | if (!(u & 0x00400000)) { | |
99c6bb39 NP |
322 | int i, d; |
323 | ||
99c6dc11 LB |
324 | printk(KERN_INFO "Feroceon L2: Enabling L2\n"); |
325 | ||
99c6bb39 NP |
326 | d = flush_and_disable_dcache(); |
327 | i = invalidate_and_disable_icache(); | |
d75de087 | 328 | l2_inv_all(); |
99c6dc11 | 329 | write_extra_features(u | 0x00400000); |
99c6bb39 NP |
330 | if (i) |
331 | enable_icache(); | |
332 | if (d) | |
333 | enable_dcache(); | |
99c6dc11 LB |
334 | } |
335 | } | |
336 | ||
337 | void __init feroceon_l2_init(int __l2_wt_override) | |
338 | { | |
339 | l2_wt_override = __l2_wt_override; | |
340 | ||
341 | disable_l2_prefetch(); | |
342 | ||
343 | outer_cache.inv_range = feroceon_l2_inv_range; | |
344 | outer_cache.clean_range = feroceon_l2_clean_range; | |
345 | outer_cache.flush_range = feroceon_l2_flush_range; | |
cd272d1e | 346 | outer_cache.inv_all = l2_inv_all; |
99c6dc11 LB |
347 | |
348 | enable_l2(); | |
349 | ||
350 | printk(KERN_INFO "Feroceon L2: Cache support initialised%s.\n", | |
351 | l2_wt_override ? ", in WT override mode" : ""); | |
352 | } |