Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/arch/arm/mm/cache-v4wt.S | |
3 | * | |
4 | * Copyright (C) 1997-2002 Russell king | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or modify | |
7 | * it under the terms of the GNU General Public License version 2 as | |
8 | * published by the Free Software Foundation. | |
9 | * | |
10 | * ARMv4 write through cache operations support. | |
11 | * | |
12 | * We assume that the write buffer is not enabled. | |
13 | */ | |
14 | #include <linux/linkage.h> | |
15 | #include <linux/init.h> | |
1da177e4 LT |
16 | #include <asm/page.h> |
17 | #include "proc-macros.S" | |
18 | ||
19 | /* | |
20 | * The size of one data cache line. | |
21 | */ | |
22 | #define CACHE_DLINESIZE 32 | |
23 | ||
24 | /* | |
25 | * The number of data cache segments. | |
26 | */ | |
27 | #define CACHE_DSEGMENTS 8 | |
28 | ||
29 | /* | |
30 | * The number of lines in a cache segment. | |
31 | */ | |
32 | #define CACHE_DENTRIES 64 | |
33 | ||
34 | /* | |
35 | * This is the size at which it becomes more efficient to | |
36 | * clean the whole cache, rather than using the individual | |
37 | * cache line maintainence instructions. | |
38 | * | |
39 | * *** This needs benchmarking | |
40 | */ | |
41 | #define CACHE_DLIMIT 16384 | |
42 | ||
43 | /* | |
44 | * flush_user_cache_all() | |
45 | * | |
46 | * Invalidate all cache entries in a particular address | |
47 | * space. | |
48 | */ | |
49 | ENTRY(v4wt_flush_user_cache_all) | |
50 | /* FALLTHROUGH */ | |
51 | /* | |
52 | * flush_kern_cache_all() | |
53 | * | |
54 | * Clean and invalidate the entire cache. | |
55 | */ | |
56 | ENTRY(v4wt_flush_kern_cache_all) | |
57 | mov r2, #VM_EXEC | |
58 | mov ip, #0 | |
59 | __flush_whole_cache: | |
60 | tst r2, #VM_EXEC | |
61 | mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache | |
62 | mcr p15, 0, ip, c7, c6, 0 @ invalidate D cache | |
63 | mov pc, lr | |
64 | ||
65 | /* | |
66 | * flush_user_cache_range(start, end, flags) | |
67 | * | |
68 | * Clean and invalidate a range of cache entries in the specified | |
69 | * address space. | |
70 | * | |
71 | * - start - start address (inclusive, page aligned) | |
72 | * - end - end address (exclusive, page aligned) | |
73 | * - flags - vma_area_struct flags describing address space | |
74 | */ | |
75 | ENTRY(v4wt_flush_user_cache_range) | |
76 | sub r3, r1, r0 @ calculate total size | |
77 | cmp r3, #CACHE_DLIMIT | |
78 | bhs __flush_whole_cache | |
79 | ||
80 | 1: mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry | |
81 | tst r2, #VM_EXEC | |
82 | mcrne p15, 0, r0, c7, c5, 1 @ invalidate I entry | |
83 | add r0, r0, #CACHE_DLINESIZE | |
84 | cmp r0, r1 | |
85 | blo 1b | |
86 | mov pc, lr | |
87 | ||
88 | /* | |
89 | * coherent_kern_range(start, end) | |
90 | * | |
91 | * Ensure coherency between the Icache and the Dcache in the | |
92 | * region described by start. If you have non-snooping | |
93 | * Harvard caches, you need to implement this function. | |
94 | * | |
95 | * - start - virtual start address | |
96 | * - end - virtual end address | |
97 | */ | |
98 | ENTRY(v4wt_coherent_kern_range) | |
99 | /* FALLTRHOUGH */ | |
100 | ||
101 | /* | |
102 | * coherent_user_range(start, end) | |
103 | * | |
104 | * Ensure coherency between the Icache and the Dcache in the | |
105 | * region described by start. If you have non-snooping | |
106 | * Harvard caches, you need to implement this function. | |
107 | * | |
108 | * - start - virtual start address | |
109 | * - end - virtual end address | |
110 | */ | |
111 | ENTRY(v4wt_coherent_user_range) | |
112 | bic r0, r0, #CACHE_DLINESIZE - 1 | |
113 | 1: mcr p15, 0, r0, c7, c5, 1 @ invalidate I entry | |
114 | add r0, r0, #CACHE_DLINESIZE | |
115 | cmp r0, r1 | |
116 | blo 1b | |
117 | mov pc, lr | |
118 | ||
119 | /* | |
2c9b9c84 | 120 | * flush_kern_dcache_area(void *addr, size_t size) |
1da177e4 LT |
121 | * |
122 | * Ensure no D cache aliasing occurs, either with itself or | |
123 | * the I cache | |
124 | * | |
2c9b9c84 RK |
125 | * - addr - kernel address |
126 | * - size - region size | |
1da177e4 | 127 | */ |
2c9b9c84 | 128 | ENTRY(v4wt_flush_kern_dcache_area) |
1da177e4 LT |
129 | mov r2, #0 |
130 | mcr p15, 0, r2, c7, c5, 0 @ invalidate I cache | |
2c9b9c84 | 131 | add r1, r0, r1 |
1da177e4 LT |
132 | /* fallthrough */ |
133 | ||
134 | /* | |
135 | * dma_inv_range(start, end) | |
136 | * | |
137 | * Invalidate (discard) the specified virtual address range. | |
138 | * May not write back any entries. If 'start' or 'end' | |
139 | * are not cache line aligned, those lines must be written | |
140 | * back. | |
141 | * | |
142 | * - start - virtual start address | |
143 | * - end - virtual end address | |
144 | */ | |
702b94bf | 145 | v4wt_dma_inv_range: |
1da177e4 LT |
146 | bic r0, r0, #CACHE_DLINESIZE - 1 |
147 | 1: mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry | |
148 | add r0, r0, #CACHE_DLINESIZE | |
149 | cmp r0, r1 | |
150 | blo 1b | |
1da177e4 LT |
151 | mov pc, lr |
152 | ||
153 | /* | |
154 | * dma_flush_range(start, end) | |
155 | * | |
156 | * Clean and invalidate the specified virtual address range. | |
157 | * | |
158 | * - start - virtual start address | |
159 | * - end - virtual end address | |
160 | */ | |
161 | .globl v4wt_dma_flush_range | |
162 | .equ v4wt_dma_flush_range, v4wt_dma_inv_range | |
163 | ||
a9c9147e RK |
164 | /* |
165 | * dma_unmap_area(start, size, dir) | |
166 | * - start - kernel virtual start address | |
167 | * - size - size of region | |
168 | * - dir - DMA direction | |
169 | */ | |
170 | ENTRY(v4wt_dma_unmap_area) | |
171 | add r1, r1, r0 | |
172 | teq r2, #DMA_TO_DEVICE | |
173 | bne v4wt_dma_inv_range | |
174 | /* FALLTHROUGH */ | |
175 | ||
176 | /* | |
177 | * dma_map_area(start, size, dir) | |
178 | * - start - kernel virtual start address | |
179 | * - size - size of region | |
180 | * - dir - DMA direction | |
181 | */ | |
182 | ENTRY(v4wt_dma_map_area) | |
183 | mov pc, lr | |
184 | ENDPROC(v4wt_dma_unmap_area) | |
185 | ENDPROC(v4wt_dma_map_area) | |
186 | ||
1da177e4 LT |
187 | __INITDATA |
188 | ||
189 | .type v4wt_cache_fns, #object | |
190 | ENTRY(v4wt_cache_fns) | |
191 | .long v4wt_flush_kern_cache_all | |
192 | .long v4wt_flush_user_cache_all | |
193 | .long v4wt_flush_user_cache_range | |
194 | .long v4wt_coherent_kern_range | |
195 | .long v4wt_coherent_user_range | |
2c9b9c84 | 196 | .long v4wt_flush_kern_dcache_area |
a9c9147e RK |
197 | .long v4wt_dma_map_area |
198 | .long v4wt_dma_unmap_area | |
1da177e4 LT |
199 | .long v4wt_dma_flush_range |
200 | .size v4wt_cache_fns, . - v4wt_cache_fns |