Commit | Line | Data |
---|---|---|
d2912cb1 | 1 | /* SPDX-License-Identifier: GPL-2.0-only */ |
bbe88886 CM |
2 | /* |
3 | * linux/arch/arm/mm/cache-v7.S | |
4 | * | |
5 | * Copyright (C) 2001 Deep Blue Solutions Ltd. | |
6 | * Copyright (C) 2005 ARM Ltd. | |
7 | * | |
bbe88886 CM |
8 | * This is the "shell" of the ARMv7 processor support. |
9 | */ | |
10 | #include <linux/linkage.h> | |
11 | #include <linux/init.h> | |
1036b895 | 12 | #include <linux/cfi_types.h> |
bbe88886 | 13 | #include <asm/assembler.h> |
c5102f59 | 14 | #include <asm/errno.h> |
32cfb1b1 | 15 | #include <asm/unwind.h> |
1238c4fd | 16 | #include <asm/hardware/cache-b15-rac.h> |
bbe88886 CM |
17 | |
18 | #include "proc-macros.S" | |
19 | ||
a2faac39 ND |
20 | .arch armv7-a |
21 | ||
5f41f919 MS |
22 | #ifdef CONFIG_CPU_ICACHE_MISMATCH_WORKAROUND |
23 | .globl icache_size | |
24 | .data | |
25 | .align 2 | |
26 | icache_size: | |
27 | .long 64 | |
28 | .text | |
29 | #endif | |
c08e20d2 DN |
30 | /* |
31 | * The secondary kernel init calls v7_flush_dcache_all before it enables | |
32 | * the L1; however, the L1 comes out of reset in an undefined state, so | |
33 | * the clean + invalidate performed by v7_flush_dcache_all causes a bunch | |
34 | * of cache lines with uninitialized data and uninitialized tags to get | |
35 | * written out to memory, which does really unpleasant things to the main | |
36 | * processor. We fix this by performing an invalidate, rather than a | |
37 | * clean + invalidate, before jumping into the kernel. | |
38 | * | |
f9e7a99f AB |
39 | * This function needs to be called for both secondary cores startup and |
40 | * primary core resume procedures. | |
c08e20d2 DN |
41 | */ |
42 | ENTRY(v7_invalidate_l1) | |
c0e50736 AB |
43 | mov r0, #0 |
44 | mcr p15, 2, r0, c0, c0, 0 @ select L1 data cache in CSSELR | |
45 | isb | |
46 | mrc p15, 1, r0, c0, c0, 0 @ read cache geometry from CCSIDR | |
c08e20d2 | 47 | |
f9e7a99f AB |
48 | movw r3, #0x3ff |
49 | and r3, r3, r0, lsr #3 @ 'Associativity' in CCSIDR[12:3] | |
50 | clz r1, r3 @ WayShift | |
51 | mov r2, #1 | |
52 | mov r3, r3, lsl r1 @ NumWays-1 shifted into bits [31:...] | |
53 | movs r1, r2, lsl r1 @ #1 shifted left by same amount | |
54 | moveq r1, #1 @ r1 needs value > 0 even if only 1 way | |
c08e20d2 | 55 | |
f9e7a99f AB |
56 | and r2, r0, #0x7 |
57 | add r2, r2, #4 @ SetShift | |
c08e20d2 | 58 | |
95731b8e AB |
59 | 1: movw ip, #0x7fff |
60 | and r0, ip, r0, lsr #13 @ 'NumSets' in CCSIDR[27:13] | |
c08e20d2 | 61 | |
95731b8e AB |
62 | 2: mov ip, r0, lsl r2 @ NumSet << SetShift |
63 | orr ip, ip, r3 @ Reg = (Temp<<WayShift)|(NumSets<<SetShift) | |
64 | mcr p15, 0, ip, c7, c6, 2 | |
f9e7a99f AB |
65 | subs r0, r0, #1 @ Set-- |
66 | bpl 2b | |
67 | subs r3, r3, r1 @ Way-- | |
68 | bcc 3f | |
69 | mrc p15, 1, r0, c0, c0, 0 @ re-read cache geometry from CCSIDR | |
70 | b 1b | |
71 | 3: dsb st | |
72 | isb | |
73 | ret lr | |
c08e20d2 DN |
74 | ENDPROC(v7_invalidate_l1) |
75 | ||
81d11955 TL |
76 | /* |
77 | * v7_flush_icache_all() | |
78 | * | |
79 | * Flush the whole I-cache. | |
80 | * | |
81 | * Registers: | |
82 | * r0 - set to 0 | |
83 | */ | |
1036b895 | 84 | SYM_TYPED_FUNC_START(v7_flush_icache_all) |
81d11955 TL |
85 | mov r0, #0 |
86 | ALT_SMP(mcr p15, 0, r0, c7, c1, 0) @ invalidate I-cache inner shareable | |
87 | ALT_UP(mcr p15, 0, r0, c7, c5, 0) @ I+BTB cache invalidate | |
6ebbf2ce | 88 | ret lr |
1036b895 | 89 | SYM_FUNC_END(v7_flush_icache_all) |
81d11955 | 90 | |
031bd879 LP |
91 | /* |
92 | * v7_flush_dcache_louis() | |
93 | * | |
94 | * Flush the D-cache up to the Level of Unification Inner Shareable | |
95 | * | |
1f640552 | 96 | * Corrupted registers: r0-r6, r9-r10 |
031bd879 LP |
97 | */ |
98 | ||
99 | ENTRY(v7_flush_dcache_louis) | |
100 | dmb @ ensure ordering with previous memory accesses | |
101 | mrc p15, 1, r0, c0, c0, 1 @ read clidr, r0 = clidr | |
47b8484e RK |
102 | ALT_SMP(mov r3, r0, lsr #20) @ move LoUIS into position |
103 | ALT_UP( mov r3, r0, lsr #26) @ move LoUU into position | |
104 | ands r3, r3, #7 << 1 @ extract LoU*2 field from clidr | |
d3cd451d | 105 | bne start_flush_levels @ LoU != 0, start flushing |
69155794 | 106 | #ifdef CONFIG_ARM_ERRATA_643719 |
d3cd451d RK |
107 | ALT_SMP(mrc p15, 0, r2, c0, c0, 0) @ read main ID register |
108 | ALT_UP( ret lr) @ LoUU is zero, so nothing to do | |
aaf4b5d9 RK |
109 | movw r1, #:lower16:(0x410fc090 >> 4) @ ID of ARM Cortex A9 r0p? |
110 | movt r1, #:upper16:(0x410fc090 >> 4) | |
111 | teq r1, r2, lsr #4 @ test for errata affected core and if so... | |
d3cd451d RK |
112 | moveq r3, #1 << 1 @ fix LoUIS value |
113 | beq start_flush_levels @ start flushing cache levels | |
69155794 | 114 | #endif |
d3cd451d | 115 | ret lr |
031bd879 LP |
116 | ENDPROC(v7_flush_dcache_louis) |
117 | ||
bbe88886 CM |
118 | /* |
119 | * v7_flush_dcache_all() | |
120 | * | |
121 | * Flush the whole D-cache. | |
122 | * | |
1f640552 | 123 | * Corrupted registers: r0-r6, r9-r10 |
bbe88886 CM |
124 | * |
125 | * - mm - mm_struct describing address space | |
126 | */ | |
127 | ENTRY(v7_flush_dcache_all) | |
c30c2f99 | 128 | dmb @ ensure ordering with previous memory accesses |
bbe88886 | 129 | mrc p15, 1, r0, c0, c0, 1 @ read clidr |
47b8484e RK |
130 | mov r3, r0, lsr #23 @ move LoC into position |
131 | ands r3, r3, #7 << 1 @ extract LoC*2 from clidr | |
bbe88886 | 132 | beq finished @ if loc is 0, then no need to clean |
cd8b24d9 | 133 | start_flush_levels: |
bbe88886 | 134 | mov r10, #0 @ start clean at cache level 0 |
3287be8c | 135 | flush_levels: |
bbe88886 CM |
136 | add r2, r10, r10, lsr #1 @ work out 3x current cache level |
137 | mov r1, r0, lsr r2 @ extract cache type bits from clidr | |
138 | and r1, r1, #7 @ mask of the bits for current cache only | |
139 | cmp r1, #2 @ see what cache we have at this level | |
140 | blt skip @ skip if no cache, or just i-cache | |
e7289c6d | 141 | #ifdef CONFIG_PREEMPTION |
8e43a905 | 142 | save_and_disable_irqs_notrace r9 @ make cssr&csidr read atomic |
b46c0f74 | 143 | #endif |
bbe88886 CM |
144 | mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr |
145 | isb @ isb to sych the new cssr&csidr | |
146 | mrc p15, 1, r1, c0, c0, 0 @ read the new csidr | |
e7289c6d | 147 | #ifdef CONFIG_PREEMPTION |
b46c0f74 SB |
148 | restore_irqs_notrace r9 |
149 | #endif | |
bbe88886 CM |
150 | and r2, r1, #7 @ extract the length of the cache lines |
151 | add r2, r2, #4 @ add 4 (line length offset) | |
5aca3708 | 152 | movw r4, #0x3ff |
bbe88886 CM |
153 | ands r4, r4, r1, lsr #3 @ find maximum number on the way size |
154 | clz r5, r4 @ find bit position of way size increment | |
1f640552 AB |
155 | movw r6, #0x7fff |
156 | and r1, r6, r1, lsr #13 @ extract max number of the index size | |
157 | mov r6, #1 | |
158 | movne r4, r4, lsl r5 @ # of ways shifted into bits [31:...] | |
159 | movne r6, r6, lsl r5 @ 1 shifted left by same amount | |
3287be8c | 160 | loop1: |
1f640552 | 161 | mov r9, r1 @ create working copy of max index |
3287be8c | 162 | loop2: |
1f640552 AB |
163 | mov r5, r9, lsl r2 @ factor set number into r5 |
164 | orr r5, r5, r4 @ factor way number into r5 | |
165 | orr r5, r5, r10 @ factor cache level into r5 | |
166 | mcr p15, 0, r5, c7, c14, 2 @ clean & invalidate by set/way | |
70f665fe | 167 | subs r9, r9, #1 @ decrement the index |
bbe88886 | 168 | bge loop2 |
1f640552 AB |
169 | subs r4, r4, r6 @ decrement the way |
170 | bcs loop1 | |
bbe88886 CM |
171 | skip: |
172 | add r10, r10, #2 @ increment cache number | |
173 | cmp r3, r10 | |
779eb41c BG |
174 | #ifdef CONFIG_ARM_ERRATA_814220 |
175 | dsb | |
176 | #endif | |
3287be8c | 177 | bgt flush_levels |
bbe88886 | 178 | finished: |
08a7e621 | 179 | mov r10, #0 @ switch back to cache level 0 |
bbe88886 | 180 | mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr |
9581960a | 181 | dsb st |
bbe88886 | 182 | isb |
6ebbf2ce | 183 | ret lr |
93ed3970 | 184 | ENDPROC(v7_flush_dcache_all) |
bbe88886 CM |
185 | |
186 | /* | |
187 | * v7_flush_cache_all() | |
188 | * | |
189 | * Flush the entire cache system. | |
190 | * The data cache flush is now achieved using atomic clean / invalidates | |
191 | * working outwards from L1 cache. This is done using Set/Way based cache | |
25985edc | 192 | * maintenance instructions. |
bbe88886 CM |
193 | * The instruction cache can still be invalidated back to the point of |
194 | * unification in a single instruction. | |
195 | * | |
196 | */ | |
1036b895 | 197 | SYM_TYPED_FUNC_START(v7_flush_kern_cache_all) |
1f640552 | 198 | stmfd sp!, {r4-r6, r9-r10, lr} |
bbe88886 CM |
199 | bl v7_flush_dcache_all |
200 | mov r0, #0 | |
f00ec48f RK |
201 | ALT_SMP(mcr p15, 0, r0, c7, c1, 0) @ invalidate I-cache inner shareable |
202 | ALT_UP(mcr p15, 0, r0, c7, c5, 0) @ I+BTB cache invalidate | |
1f640552 | 203 | ldmfd sp!, {r4-r6, r9-r10, lr} |
6ebbf2ce | 204 | ret lr |
1036b895 | 205 | SYM_FUNC_END(v7_flush_kern_cache_all) |
bbe88886 | 206 | |
031bd879 LP |
207 | /* |
208 | * v7_flush_kern_cache_louis(void) | |
209 | * | |
210 | * Flush the data cache up to Level of Unification Inner Shareable. | |
211 | * Invalidate the I-cache to the point of unification. | |
212 | */ | |
1036b895 | 213 | SYM_TYPED_FUNC_START(v7_flush_kern_cache_louis) |
1f640552 | 214 | stmfd sp!, {r4-r6, r9-r10, lr} |
031bd879 LP |
215 | bl v7_flush_dcache_louis |
216 | mov r0, #0 | |
217 | ALT_SMP(mcr p15, 0, r0, c7, c1, 0) @ invalidate I-cache inner shareable | |
218 | ALT_UP(mcr p15, 0, r0, c7, c5, 0) @ I+BTB cache invalidate | |
1f640552 | 219 | ldmfd sp!, {r4-r6, r9-r10, lr} |
6ebbf2ce | 220 | ret lr |
1036b895 | 221 | SYM_FUNC_END(v7_flush_kern_cache_louis) |
031bd879 | 222 | |
bbe88886 CM |
223 | /* |
224 | * v7_flush_cache_all() | |
225 | * | |
226 | * Flush all TLB entries in a particular address space | |
227 | * | |
228 | * - mm - mm_struct describing address space | |
229 | */ | |
1036b895 LW |
230 | SYM_TYPED_FUNC_START(v7_flush_user_cache_all) |
231 | ret lr | |
232 | SYM_FUNC_END(v7_flush_user_cache_all) | |
bbe88886 CM |
233 | |
234 | /* | |
235 | * v7_flush_cache_range(start, end, flags) | |
236 | * | |
237 | * Flush a range of TLB entries in the specified address space. | |
238 | * | |
239 | * - start - start address (may not be aligned) | |
240 | * - end - end address (exclusive, may not be aligned) | |
241 | * - flags - vm_area_struct flags describing address space | |
242 | * | |
243 | * It is assumed that: | |
244 | * - we have a VIPT cache. | |
245 | */ | |
1036b895 | 246 | SYM_TYPED_FUNC_START(v7_flush_user_cache_range) |
6ebbf2ce | 247 | ret lr |
1036b895 | 248 | SYM_FUNC_END(v7_flush_user_cache_range) |
bbe88886 CM |
249 | |
250 | /* | |
251 | * v7_coherent_kern_range(start,end) | |
252 | * | |
253 | * Ensure that the I and D caches are coherent within specified | |
254 | * region. This is typically used when code has been written to | |
255 | * a memory region, and will be executed. | |
256 | * | |
257 | * - start - virtual start address of region | |
258 | * - end - virtual end address of region | |
259 | * | |
260 | * It is assumed that: | |
261 | * - the Icache does not read data from the write buffer | |
262 | */ | |
1036b895 | 263 | SYM_TYPED_FUNC_START(v7_coherent_kern_range) |
7b749aad | 264 | #ifdef CONFIG_CFI_CLANG /* Fallthrough if !CFI */ |
1036b895 | 265 | b v7_coherent_user_range |
7b749aad | 266 | #endif |
1036b895 | 267 | SYM_FUNC_END(v7_coherent_kern_range) |
bbe88886 CM |
268 | |
269 | /* | |
270 | * v7_coherent_user_range(start,end) | |
271 | * | |
272 | * Ensure that the I and D caches are coherent within specified | |
273 | * region. This is typically used when code has been written to | |
274 | * a memory region, and will be executed. | |
275 | * | |
276 | * - start - virtual start address of region | |
277 | * - end - virtual end address of region | |
278 | * | |
279 | * It is assumed that: | |
280 | * - the Icache does not read data from the write buffer | |
281 | */ | |
1036b895 | 282 | SYM_TYPED_FUNC_START(v7_coherent_user_range) |
32cfb1b1 | 283 | UNWIND(.fnstart ) |
bbe88886 CM |
284 | dcache_line_size r2, r3 |
285 | sub r3, r2, #1 | |
da30e0ac | 286 | bic r12, r0, r3 |
f630c1bd WD |
287 | #ifdef CONFIG_ARM_ERRATA_764369 |
288 | ALT_SMP(W(dsb)) | |
289 | ALT_UP(W(nop)) | |
290 | #endif | |
32cfb1b1 | 291 | 1: |
da30e0ac CM |
292 | USER( mcr p15, 0, r12, c7, c11, 1 ) @ clean D line to the point of unification |
293 | add r12, r12, r2 | |
294 | cmp r12, r1 | |
295 | blo 1b | |
6abdd491 | 296 | dsb ishst |
5f41f919 MS |
297 | #ifdef CONFIG_CPU_ICACHE_MISMATCH_WORKAROUND |
298 | ldr r3, =icache_size | |
299 | ldr r2, [r3, #0] | |
300 | #else | |
da30e0ac | 301 | icache_line_size r2, r3 |
5f41f919 | 302 | #endif |
da30e0ac CM |
303 | sub r3, r2, #1 |
304 | bic r12, r0, r3 | |
32cfb1b1 | 305 | 2: |
da30e0ac CM |
306 | USER( mcr p15, 0, r12, c7, c5, 1 ) @ invalidate I line |
307 | add r12, r12, r2 | |
308 | cmp r12, r1 | |
309 | blo 2b | |
bbe88886 | 310 | mov r0, #0 |
f00ec48f RK |
311 | ALT_SMP(mcr p15, 0, r0, c7, c1, 6) @ invalidate BTB Inner Shareable |
312 | ALT_UP(mcr p15, 0, r0, c7, c5, 6) @ invalidate BTB | |
6abdd491 | 313 | dsb ishst |
bbe88886 | 314 | isb |
6ebbf2ce | 315 | ret lr |
32cfb1b1 CM |
316 | |
317 | /* | |
318 | * Fault handling for the cache operation above. If the virtual address in r0 | |
c5102f59 | 319 | * isn't mapped, fail with -EFAULT. |
32cfb1b1 CM |
320 | */ |
321 | 9001: | |
7253b85c SH |
322 | #ifdef CONFIG_ARM_ERRATA_775420 |
323 | dsb | |
324 | #endif | |
c5102f59 | 325 | mov r0, #-EFAULT |
6ebbf2ce | 326 | ret lr |
32cfb1b1 | 327 | UNWIND(.fnend ) |
1036b895 | 328 | SYM_FUNC_END(v7_coherent_user_range) |
bbe88886 CM |
329 | |
330 | /* | |
2c9b9c84 | 331 | * v7_flush_kern_dcache_area(void *addr, size_t size) |
bbe88886 CM |
332 | * |
333 | * Ensure that the data held in the page kaddr is written back | |
334 | * to the page in question. | |
335 | * | |
2c9b9c84 RK |
336 | * - addr - kernel address |
337 | * - size - region size | |
bbe88886 | 338 | */ |
1036b895 | 339 | SYM_TYPED_FUNC_START(v7_flush_kern_dcache_area) |
bbe88886 | 340 | dcache_line_size r2, r3 |
2c9b9c84 | 341 | add r1, r0, r1 |
a248b13b WD |
342 | sub r3, r2, #1 |
343 | bic r0, r0, r3 | |
f630c1bd WD |
344 | #ifdef CONFIG_ARM_ERRATA_764369 |
345 | ALT_SMP(W(dsb)) | |
346 | ALT_UP(W(nop)) | |
347 | #endif | |
bbe88886 CM |
348 | 1: |
349 | mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D line / unified line | |
350 | add r0, r0, r2 | |
351 | cmp r0, r1 | |
352 | blo 1b | |
9581960a | 353 | dsb st |
6ebbf2ce | 354 | ret lr |
1036b895 | 355 | SYM_FUNC_END(v7_flush_kern_dcache_area) |
bbe88886 CM |
356 | |
357 | /* | |
358 | * v7_dma_inv_range(start,end) | |
359 | * | |
360 | * Invalidate the data cache within the specified region; we will | |
361 | * be performing a DMA operation in this region and we want to | |
362 | * purge old data in the cache. | |
363 | * | |
364 | * - start - virtual start address of region | |
365 | * - end - virtual end address of region | |
366 | */ | |
702b94bf | 367 | v7_dma_inv_range: |
bbe88886 CM |
368 | dcache_line_size r2, r3 |
369 | sub r3, r2, #1 | |
370 | tst r0, r3 | |
371 | bic r0, r0, r3 | |
f630c1bd WD |
372 | #ifdef CONFIG_ARM_ERRATA_764369 |
373 | ALT_SMP(W(dsb)) | |
374 | ALT_UP(W(nop)) | |
375 | #endif | |
bbe88886 | 376 | mcrne p15, 0, r0, c7, c14, 1 @ clean & invalidate D / U line |
a1208f6a | 377 | addne r0, r0, r2 |
bbe88886 CM |
378 | |
379 | tst r1, r3 | |
380 | bic r1, r1, r3 | |
381 | mcrne p15, 0, r1, c7, c14, 1 @ clean & invalidate D / U line | |
bbe88886 | 382 | cmp r0, r1 |
a1208f6a CC |
383 | 1: |
384 | mcrlo p15, 0, r0, c7, c6, 1 @ invalidate D / U line | |
385 | addlo r0, r0, r2 | |
386 | cmplo r0, r1 | |
bbe88886 | 387 | blo 1b |
9581960a | 388 | dsb st |
6ebbf2ce | 389 | ret lr |
93ed3970 | 390 | ENDPROC(v7_dma_inv_range) |
bbe88886 CM |
391 | |
392 | /* | |
393 | * v7_dma_clean_range(start,end) | |
394 | * - start - virtual start address of region | |
395 | * - end - virtual end address of region | |
396 | */ | |
702b94bf | 397 | v7_dma_clean_range: |
bbe88886 CM |
398 | dcache_line_size r2, r3 |
399 | sub r3, r2, #1 | |
400 | bic r0, r0, r3 | |
f630c1bd WD |
401 | #ifdef CONFIG_ARM_ERRATA_764369 |
402 | ALT_SMP(W(dsb)) | |
403 | ALT_UP(W(nop)) | |
404 | #endif | |
bbe88886 CM |
405 | 1: |
406 | mcr p15, 0, r0, c7, c10, 1 @ clean D / U line | |
407 | add r0, r0, r2 | |
408 | cmp r0, r1 | |
409 | blo 1b | |
9581960a | 410 | dsb st |
6ebbf2ce | 411 | ret lr |
93ed3970 | 412 | ENDPROC(v7_dma_clean_range) |
bbe88886 CM |
413 | |
414 | /* | |
415 | * v7_dma_flush_range(start,end) | |
416 | * - start - virtual start address of region | |
417 | * - end - virtual end address of region | |
418 | */ | |
1036b895 | 419 | SYM_TYPED_FUNC_START(v7_dma_flush_range) |
bbe88886 CM |
420 | dcache_line_size r2, r3 |
421 | sub r3, r2, #1 | |
422 | bic r0, r0, r3 | |
f630c1bd WD |
423 | #ifdef CONFIG_ARM_ERRATA_764369 |
424 | ALT_SMP(W(dsb)) | |
425 | ALT_UP(W(nop)) | |
426 | #endif | |
bbe88886 CM |
427 | 1: |
428 | mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D / U line | |
429 | add r0, r0, r2 | |
430 | cmp r0, r1 | |
431 | blo 1b | |
9581960a | 432 | dsb st |
6ebbf2ce | 433 | ret lr |
1036b895 | 434 | SYM_FUNC_END(v7_dma_flush_range) |
bbe88886 | 435 | |
a9c9147e RK |
436 | /* |
437 | * dma_map_area(start, size, dir) | |
438 | * - start - kernel virtual start address | |
439 | * - size - size of region | |
440 | * - dir - DMA direction | |
441 | */ | |
1036b895 | 442 | SYM_TYPED_FUNC_START(v7_dma_map_area) |
a9c9147e | 443 | add r1, r1, r0 |
2ffe2da3 RK |
444 | teq r2, #DMA_FROM_DEVICE |
445 | beq v7_dma_inv_range | |
446 | b v7_dma_clean_range | |
1036b895 | 447 | SYM_FUNC_END(v7_dma_map_area) |
a9c9147e RK |
448 | |
449 | /* | |
450 | * dma_unmap_area(start, size, dir) | |
451 | * - start - kernel virtual start address | |
452 | * - size - size of region | |
453 | * - dir - DMA direction | |
454 | */ | |
1036b895 | 455 | SYM_TYPED_FUNC_START(v7_dma_unmap_area) |
2ffe2da3 RK |
456 | add r1, r1, r0 |
457 | teq r2, #DMA_TO_DEVICE | |
458 | bne v7_dma_inv_range | |
6ebbf2ce | 459 | ret lr |
1036b895 | 460 | SYM_FUNC_END(v7_dma_unmap_area) |