Merge tag 'arm64-perf' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux
[linux-2.6-block.git] / arch / arm64 / mm / cache.S
CommitLineData
f1a0c4aa
CM
1/*
2 * Cache maintenance
3 *
4 * Copyright (C) 2001 Deep Blue Solutions Ltd.
5 * Copyright (C) 2012 ARM Ltd.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19
a2d25a53 20#include <linux/errno.h>
f1a0c4aa
CM
21#include <linux/linkage.h>
22#include <linux/init.h>
23#include <asm/assembler.h>
301bcfac 24#include <asm/cpufeature.h>
8d883b23 25#include <asm/alternative.h>
f1a0c4aa 26
f1a0c4aa
CM
27/*
28 * flush_icache_range(start,end)
29 *
30 * Ensure that the I and D caches are coherent within specified region.
31 * This is typically used when code has been written to a memory region,
32 * and will be executed.
33 *
34 * - start - virtual start address of region
35 * - end - virtual end address of region
36 */
37ENTRY(flush_icache_range)
38 /* FALLTHROUGH */
39
40/*
41 * __flush_cache_user_range(start,end)
42 *
43 * Ensure that the I and D caches are coherent within specified region.
44 * This is typically used when code has been written to a memory region,
45 * and will be executed.
46 *
47 * - start - virtual start address of region
48 * - end - virtual end address of region
49 */
50ENTRY(__flush_cache_user_range)
51 dcache_line_size x2, x3
52 sub x3, x2, #1
53 bic x4, x0, x3
541:
55USER(9f, dc cvau, x4 ) // clean D line to PoU
56 add x4, x4, x2
57 cmp x4, x1
58 b.lo 1b
dc60b777 59 dsb ish
f1a0c4aa
CM
60
61 icache_line_size x2, x3
62 sub x3, x2, #1
63 bic x4, x0, x3
641:
65USER(9f, ic ivau, x4 ) // invalidate I line PoU
66 add x4, x4, x2
67 cmp x4, x1
68 b.lo 1b
dc60b777 69 dsb ish
f1a0c4aa 70 isb
a2d25a53
VM
71 mov x0, #0
72 ret
739:
74 mov x0, #-EFAULT
f1a0c4aa
CM
75 ret
76ENDPROC(flush_icache_range)
77ENDPROC(__flush_cache_user_range)
78
79/*
03324e6e 80 * __flush_dcache_area(kaddr, size)
f1a0c4aa 81 *
0a28714c
AK
82 * Ensure that any D-cache lines for the interval [kaddr, kaddr+size)
83 * are cleaned and invalidated to the PoC.
f1a0c4aa
CM
84 *
85 * - kaddr - kernel address
86 * - size - size in question
87 */
88ENTRY(__flush_dcache_area)
0a28714c 89 dcache_by_line_op civac, sy, x0, x1, x2, x3
f1a0c4aa 90 ret
20791846 91ENDPIPROC(__flush_dcache_area)
7363590d 92
0a28714c
AK
93/*
94 * __clean_dcache_area_pou(kaddr, size)
95 *
96 * Ensure that any D-cache lines for the interval [kaddr, kaddr+size)
97 * are cleaned to the PoU.
98 *
99 * - kaddr - kernel address
100 * - size - size in question
101 */
102ENTRY(__clean_dcache_area_pou)
103 dcache_by_line_op cvau, ish, x0, x1, x2, x3
104 ret
105ENDPROC(__clean_dcache_area_pou)
106
c218bca7
CM
107/*
108 * __inval_cache_range(start, end)
109 * - start - start address of region
110 * - end - end address of region
111 */
112ENTRY(__inval_cache_range)
113 /* FALLTHROUGH */
114
7363590d
CM
115/*
116 * __dma_inv_range(start, end)
117 * - start - virtual start address of region
118 * - end - virtual end address of region
119 */
120__dma_inv_range:
121 dcache_line_size x2, x3
122 sub x3, x2, #1
ebf81a93 123 tst x1, x3 // end cache line aligned?
7363590d 124 bic x1, x1, x3
ebf81a93
CM
125 b.eq 1f
126 dc civac, x1 // clean & invalidate D / U line
1271: tst x0, x3 // start cache line aligned?
128 bic x0, x0, x3
129 b.eq 2f
130 dc civac, x0 // clean & invalidate D / U line
131 b 3f
1322: dc ivac, x0 // invalidate D / U line
1333: add x0, x0, x2
7363590d 134 cmp x0, x1
ebf81a93 135 b.lo 2b
7363590d
CM
136 dsb sy
137 ret
20791846 138ENDPIPROC(__inval_cache_range)
7363590d
CM
139ENDPROC(__dma_inv_range)
140
141/*
142 * __dma_clean_range(start, end)
143 * - start - virtual start address of region
144 * - end - virtual end address of region
145 */
146__dma_clean_range:
147 dcache_line_size x2, x3
148 sub x3, x2, #1
149 bic x0, x0, x3
271d35eb
DT
1501:
151alternative_if_not ARM64_WORKAROUND_CLEAN_CACHE
152 dc cvac, x0
153alternative_else
154 dc civac, x0
155alternative_endif
7363590d
CM
156 add x0, x0, x2
157 cmp x0, x1
158 b.lo 1b
159 dsb sy
160 ret
161ENDPROC(__dma_clean_range)
162
163/*
164 * __dma_flush_range(start, end)
165 * - start - virtual start address of region
166 * - end - virtual end address of region
167 */
168ENTRY(__dma_flush_range)
169 dcache_line_size x2, x3
170 sub x3, x2, #1
171 bic x0, x0, x3
1721: dc civac, x0 // clean & invalidate D / U line
173 add x0, x0, x2
174 cmp x0, x1
175 b.lo 1b
176 dsb sy
177 ret
20791846 178ENDPIPROC(__dma_flush_range)
7363590d
CM
179
180/*
181 * __dma_map_area(start, size, dir)
182 * - start - kernel virtual start address
183 * - size - size of region
184 * - dir - DMA direction
185 */
186ENTRY(__dma_map_area)
187 add x1, x1, x0
188 cmp w2, #DMA_FROM_DEVICE
189 b.eq __dma_inv_range
190 b __dma_clean_range
20791846 191ENDPIPROC(__dma_map_area)
7363590d
CM
192
193/*
194 * __dma_unmap_area(start, size, dir)
195 * - start - kernel virtual start address
196 * - size - size of region
197 * - dir - DMA direction
198 */
199ENTRY(__dma_unmap_area)
200 add x1, x1, x0
201 cmp w2, #DMA_TO_DEVICE
202 b.ne __dma_inv_range
203 ret
20791846 204ENDPIPROC(__dma_unmap_area)