Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * This file is subject to the terms and conditions of the GNU General Public | |
3 | * License. See the file "COPYING" in the main directory of this archive | |
4 | * for more details. | |
5 | * | |
6 | * Copyright (C) 2000 Ani Joshi <ajoshi@unixbox.com> | |
9a88cbb5 | 7 | * Copyright (C) 2000, 2001, 06 Ralf Baechle <ralf@linux-mips.org> |
1da177e4 LT |
8 | * swiped from i386, and cloned for MIPS by Geert, polished by Ralf. |
9 | */ | |
9a88cbb5 | 10 | |
1da177e4 | 11 | #include <linux/types.h> |
9a88cbb5 | 12 | #include <linux/dma-mapping.h> |
1da177e4 LT |
13 | #include <linux/mm.h> |
14 | #include <linux/module.h> | |
4fcc47a0 | 15 | #include <linux/scatterlist.h> |
6e86b0bf | 16 | #include <linux/string.h> |
5a0e3ad6 | 17 | #include <linux/gfp.h> |
1da177e4 LT |
18 | |
19 | #include <asm/cache.h> | |
20 | #include <asm/io.h> | |
21 | ||
9a88cbb5 RB |
22 | #include <dma-coherence.h> |
23 | ||
3807ef3f KC |
24 | static inline unsigned long dma_addr_to_virt(struct device *dev, |
25 | dma_addr_t dma_addr) | |
c9d06962 | 26 | { |
3807ef3f | 27 | unsigned long addr = plat_dma_addr_to_phys(dev, dma_addr); |
c9d06962 FBH |
28 | |
29 | return (unsigned long)phys_to_virt(addr); | |
30 | } | |
31 | ||
1da177e4 LT |
32 | /* |
33 | * Warning on the terminology - Linux calls an uncached area coherent; | |
34 | * MIPS terminology calls memory areas with hardware maintained coherency | |
35 | * coherent. | |
36 | */ | |
37 | ||
9a88cbb5 RB |
38 | static inline int cpu_is_noncoherent_r10000(struct device *dev) |
39 | { | |
40 | return !plat_device_is_coherent(dev) && | |
10cc3529 RB |
41 | (current_cpu_type() == CPU_R10000 || |
42 | current_cpu_type() == CPU_R12000); | |
9a88cbb5 RB |
43 | } |
44 | ||
cce335ae RB |
45 | static gfp_t massage_gfp_flags(const struct device *dev, gfp_t gfp) |
46 | { | |
a2e715a8 RB |
47 | gfp_t dma_flag; |
48 | ||
cce335ae RB |
49 | /* ignore region specifiers */ |
50 | gfp &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM); | |
51 | ||
a2e715a8 | 52 | #ifdef CONFIG_ISA |
cce335ae | 53 | if (dev == NULL) |
a2e715a8 | 54 | dma_flag = __GFP_DMA; |
cce335ae RB |
55 | else |
56 | #endif | |
a2e715a8 | 57 | #if defined(CONFIG_ZONE_DMA32) && defined(CONFIG_ZONE_DMA) |
cce335ae | 58 | if (dev->coherent_dma_mask < DMA_BIT_MASK(32)) |
a2e715a8 RB |
59 | dma_flag = __GFP_DMA; |
60 | else if (dev->coherent_dma_mask < DMA_BIT_MASK(64)) | |
61 | dma_flag = __GFP_DMA32; | |
62 | else | |
63 | #endif | |
64 | #if defined(CONFIG_ZONE_DMA32) && !defined(CONFIG_ZONE_DMA) | |
65 | if (dev->coherent_dma_mask < DMA_BIT_MASK(64)) | |
66 | dma_flag = __GFP_DMA32; | |
67 | else | |
68 | #endif | |
69 | #if defined(CONFIG_ZONE_DMA) && !defined(CONFIG_ZONE_DMA32) | |
70 | if (dev->coherent_dma_mask < DMA_BIT_MASK(64)) | |
71 | dma_flag = __GFP_DMA; | |
cce335ae RB |
72 | else |
73 | #endif | |
a2e715a8 | 74 | dma_flag = 0; |
cce335ae RB |
75 | |
76 | /* Don't invoke OOM killer */ | |
77 | gfp |= __GFP_NORETRY; | |
78 | ||
a2e715a8 | 79 | return gfp | dma_flag; |
cce335ae RB |
80 | } |
81 | ||
1da177e4 | 82 | void *dma_alloc_noncoherent(struct device *dev, size_t size, |
185a8ff5 | 83 | dma_addr_t * dma_handle, gfp_t gfp) |
1da177e4 LT |
84 | { |
85 | void *ret; | |
9a88cbb5 | 86 | |
cce335ae | 87 | gfp = massage_gfp_flags(dev, gfp); |
1da177e4 | 88 | |
1da177e4 LT |
89 | ret = (void *) __get_free_pages(gfp, get_order(size)); |
90 | ||
91 | if (ret != NULL) { | |
92 | memset(ret, 0, size); | |
9a88cbb5 | 93 | *dma_handle = plat_map_dma_mem(dev, ret, size); |
1da177e4 LT |
94 | } |
95 | ||
96 | return ret; | |
97 | } | |
98 | ||
99 | EXPORT_SYMBOL(dma_alloc_noncoherent); | |
100 | ||
101 | void *dma_alloc_coherent(struct device *dev, size_t size, | |
185a8ff5 | 102 | dma_addr_t * dma_handle, gfp_t gfp) |
1da177e4 LT |
103 | { |
104 | void *ret; | |
105 | ||
f8ac0425 YY |
106 | if (dma_alloc_from_coherent(dev, size, dma_handle, &ret)) |
107 | return ret; | |
108 | ||
cce335ae | 109 | gfp = massage_gfp_flags(dev, gfp); |
9a88cbb5 | 110 | |
9a88cbb5 RB |
111 | ret = (void *) __get_free_pages(gfp, get_order(size)); |
112 | ||
1da177e4 | 113 | if (ret) { |
9a88cbb5 RB |
114 | memset(ret, 0, size); |
115 | *dma_handle = plat_map_dma_mem(dev, ret, size); | |
116 | ||
117 | if (!plat_device_is_coherent(dev)) { | |
118 | dma_cache_wback_inv((unsigned long) ret, size); | |
119 | ret = UNCAC_ADDR(ret); | |
120 | } | |
1da177e4 LT |
121 | } |
122 | ||
123 | return ret; | |
124 | } | |
125 | ||
126 | EXPORT_SYMBOL(dma_alloc_coherent); | |
127 | ||
128 | void dma_free_noncoherent(struct device *dev, size_t size, void *vaddr, | |
129 | dma_addr_t dma_handle) | |
130 | { | |
d3f634b9 | 131 | plat_unmap_dma_mem(dev, dma_handle, size, DMA_BIDIRECTIONAL); |
1da177e4 LT |
132 | free_pages((unsigned long) vaddr, get_order(size)); |
133 | } | |
134 | ||
135 | EXPORT_SYMBOL(dma_free_noncoherent); | |
136 | ||
137 | void dma_free_coherent(struct device *dev, size_t size, void *vaddr, | |
138 | dma_addr_t dma_handle) | |
139 | { | |
140 | unsigned long addr = (unsigned long) vaddr; | |
f8ac0425 YY |
141 | int order = get_order(size); |
142 | ||
143 | if (dma_release_from_coherent(dev, order, vaddr)) | |
144 | return; | |
1da177e4 | 145 | |
d3f634b9 | 146 | plat_unmap_dma_mem(dev, dma_handle, size, DMA_BIDIRECTIONAL); |
11531ac2 | 147 | |
9a88cbb5 RB |
148 | if (!plat_device_is_coherent(dev)) |
149 | addr = CAC_ADDR(addr); | |
150 | ||
1da177e4 LT |
151 | free_pages(addr, get_order(size)); |
152 | } | |
153 | ||
154 | EXPORT_SYMBOL(dma_free_coherent); | |
155 | ||
156 | static inline void __dma_sync(unsigned long addr, size_t size, | |
157 | enum dma_data_direction direction) | |
158 | { | |
159 | switch (direction) { | |
160 | case DMA_TO_DEVICE: | |
161 | dma_cache_wback(addr, size); | |
162 | break; | |
163 | ||
164 | case DMA_FROM_DEVICE: | |
165 | dma_cache_inv(addr, size); | |
166 | break; | |
167 | ||
168 | case DMA_BIDIRECTIONAL: | |
169 | dma_cache_wback_inv(addr, size); | |
170 | break; | |
171 | ||
172 | default: | |
173 | BUG(); | |
174 | } | |
175 | } | |
176 | ||
177 | dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size, | |
178 | enum dma_data_direction direction) | |
179 | { | |
180 | unsigned long addr = (unsigned long) ptr; | |
181 | ||
9a88cbb5 RB |
182 | if (!plat_device_is_coherent(dev)) |
183 | __dma_sync(addr, size, direction); | |
1da177e4 | 184 | |
9a88cbb5 | 185 | return plat_map_dma_mem(dev, ptr, size); |
1da177e4 LT |
186 | } |
187 | ||
188 | EXPORT_SYMBOL(dma_map_single); | |
189 | ||
190 | void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, | |
191 | enum dma_data_direction direction) | |
192 | { | |
9a88cbb5 | 193 | if (cpu_is_noncoherent_r10000(dev)) |
3807ef3f | 194 | __dma_sync(dma_addr_to_virt(dev, dma_addr), size, |
9a88cbb5 | 195 | direction); |
1da177e4 | 196 | |
d3f634b9 | 197 | plat_unmap_dma_mem(dev, dma_addr, size, direction); |
1da177e4 LT |
198 | } |
199 | ||
200 | EXPORT_SYMBOL(dma_unmap_single); | |
201 | ||
202 | int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, | |
203 | enum dma_data_direction direction) | |
204 | { | |
205 | int i; | |
206 | ||
207 | BUG_ON(direction == DMA_NONE); | |
208 | ||
209 | for (i = 0; i < nents; i++, sg++) { | |
210 | unsigned long addr; | |
42a3b4f2 | 211 | |
58b053e4 | 212 | addr = (unsigned long) sg_virt(sg); |
9a88cbb5 | 213 | if (!plat_device_is_coherent(dev) && addr) |
58b053e4 | 214 | __dma_sync(addr, sg->length, direction); |
fbd5604d | 215 | sg->dma_address = plat_map_dma_mem(dev, |
58b053e4 | 216 | (void *)addr, sg->length); |
1da177e4 LT |
217 | } |
218 | ||
219 | return nents; | |
220 | } | |
221 | ||
222 | EXPORT_SYMBOL(dma_map_sg); | |
223 | ||
224 | dma_addr_t dma_map_page(struct device *dev, struct page *page, | |
225 | unsigned long offset, size_t size, enum dma_data_direction direction) | |
226 | { | |
1da177e4 LT |
227 | BUG_ON(direction == DMA_NONE); |
228 | ||
9a88cbb5 RB |
229 | if (!plat_device_is_coherent(dev)) { |
230 | unsigned long addr; | |
231 | ||
232 | addr = (unsigned long) page_address(page) + offset; | |
4f29c057 | 233 | __dma_sync(addr, size, direction); |
9a88cbb5 | 234 | } |
1da177e4 | 235 | |
9a88cbb5 | 236 | return plat_map_dma_mem_page(dev, page) + offset; |
1da177e4 LT |
237 | } |
238 | ||
239 | EXPORT_SYMBOL(dma_map_page); | |
240 | ||
1da177e4 LT |
241 | void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries, |
242 | enum dma_data_direction direction) | |
243 | { | |
244 | unsigned long addr; | |
245 | int i; | |
246 | ||
247 | BUG_ON(direction == DMA_NONE); | |
248 | ||
1da177e4 | 249 | for (i = 0; i < nhwentries; i++, sg++) { |
9a88cbb5 RB |
250 | if (!plat_device_is_coherent(dev) && |
251 | direction != DMA_TO_DEVICE) { | |
58b053e4 | 252 | addr = (unsigned long) sg_virt(sg); |
9a88cbb5 | 253 | if (addr) |
58b053e4 | 254 | __dma_sync(addr, sg->length, direction); |
9a88cbb5 | 255 | } |
d3f634b9 | 256 | plat_unmap_dma_mem(dev, sg->dma_address, sg->length, direction); |
1da177e4 LT |
257 | } |
258 | } | |
259 | ||
260 | EXPORT_SYMBOL(dma_unmap_sg); | |
261 | ||
262 | void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, | |
263 | size_t size, enum dma_data_direction direction) | |
264 | { | |
1da177e4 | 265 | BUG_ON(direction == DMA_NONE); |
42a3b4f2 | 266 | |
9a88cbb5 RB |
267 | if (cpu_is_noncoherent_r10000(dev)) { |
268 | unsigned long addr; | |
269 | ||
3807ef3f | 270 | addr = dma_addr_to_virt(dev, dma_handle); |
9a88cbb5 RB |
271 | __dma_sync(addr, size, direction); |
272 | } | |
1da177e4 LT |
273 | } |
274 | ||
275 | EXPORT_SYMBOL(dma_sync_single_for_cpu); | |
276 | ||
277 | void dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, | |
278 | size_t size, enum dma_data_direction direction) | |
279 | { | |
1da177e4 LT |
280 | BUG_ON(direction == DMA_NONE); |
281 | ||
843aef49 | 282 | plat_extra_sync_for_device(dev); |
9b43fb6b | 283 | if (!plat_device_is_coherent(dev)) { |
9a88cbb5 RB |
284 | unsigned long addr; |
285 | ||
3807ef3f | 286 | addr = dma_addr_to_virt(dev, dma_handle); |
9a88cbb5 RB |
287 | __dma_sync(addr, size, direction); |
288 | } | |
1da177e4 LT |
289 | } |
290 | ||
291 | EXPORT_SYMBOL(dma_sync_single_for_device); | |
292 | ||
293 | void dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle, | |
294 | unsigned long offset, size_t size, enum dma_data_direction direction) | |
295 | { | |
1da177e4 LT |
296 | BUG_ON(direction == DMA_NONE); |
297 | ||
9a88cbb5 RB |
298 | if (cpu_is_noncoherent_r10000(dev)) { |
299 | unsigned long addr; | |
300 | ||
3807ef3f | 301 | addr = dma_addr_to_virt(dev, dma_handle); |
9a88cbb5 RB |
302 | __dma_sync(addr + offset, size, direction); |
303 | } | |
1da177e4 LT |
304 | } |
305 | ||
306 | EXPORT_SYMBOL(dma_sync_single_range_for_cpu); | |
307 | ||
308 | void dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle, | |
309 | unsigned long offset, size_t size, enum dma_data_direction direction) | |
310 | { | |
1da177e4 LT |
311 | BUG_ON(direction == DMA_NONE); |
312 | ||
843aef49 | 313 | plat_extra_sync_for_device(dev); |
9b43fb6b | 314 | if (!plat_device_is_coherent(dev)) { |
9a88cbb5 RB |
315 | unsigned long addr; |
316 | ||
3807ef3f | 317 | addr = dma_addr_to_virt(dev, dma_handle); |
9a88cbb5 RB |
318 | __dma_sync(addr + offset, size, direction); |
319 | } | |
1da177e4 LT |
320 | } |
321 | ||
322 | EXPORT_SYMBOL(dma_sync_single_range_for_device); | |
323 | ||
324 | void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems, | |
325 | enum dma_data_direction direction) | |
326 | { | |
327 | int i; | |
42a3b4f2 | 328 | |
1da177e4 | 329 | BUG_ON(direction == DMA_NONE); |
42a3b4f2 | 330 | |
1da177e4 | 331 | /* Make sure that gcc doesn't leave the empty loop body. */ |
9a88cbb5 | 332 | for (i = 0; i < nelems; i++, sg++) { |
5b648a98 | 333 | if (cpu_is_noncoherent_r10000(dev)) |
58b053e4 | 334 | __dma_sync((unsigned long)page_address(sg_page(sg)), |
9a88cbb5 | 335 | sg->length, direction); |
9a88cbb5 | 336 | } |
1da177e4 LT |
337 | } |
338 | ||
339 | EXPORT_SYMBOL(dma_sync_sg_for_cpu); | |
340 | ||
341 | void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems, | |
342 | enum dma_data_direction direction) | |
343 | { | |
344 | int i; | |
345 | ||
346 | BUG_ON(direction == DMA_NONE); | |
347 | ||
348 | /* Make sure that gcc doesn't leave the empty loop body. */ | |
9a88cbb5 RB |
349 | for (i = 0; i < nelems; i++, sg++) { |
350 | if (!plat_device_is_coherent(dev)) | |
58b053e4 | 351 | __dma_sync((unsigned long)page_address(sg_page(sg)), |
9a88cbb5 | 352 | sg->length, direction); |
9a88cbb5 | 353 | } |
1da177e4 LT |
354 | } |
355 | ||
356 | EXPORT_SYMBOL(dma_sync_sg_for_device); | |
357 | ||
8d8bb39b | 358 | int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) |
1da177e4 | 359 | { |
843aef49 | 360 | return plat_dma_mapping_error(dev, dma_addr); |
1da177e4 LT |
361 | } |
362 | ||
363 | EXPORT_SYMBOL(dma_mapping_error); | |
364 | ||
365 | int dma_supported(struct device *dev, u64 mask) | |
366 | { | |
843aef49 | 367 | return plat_dma_supported(dev, mask); |
1da177e4 LT |
368 | } |
369 | ||
370 | EXPORT_SYMBOL(dma_supported); | |
371 | ||
d3fa72e4 | 372 | void dma_cache_sync(struct device *dev, void *vaddr, size_t size, |
9a88cbb5 | 373 | enum dma_data_direction direction) |
1da177e4 | 374 | { |
9a88cbb5 | 375 | BUG_ON(direction == DMA_NONE); |
1da177e4 | 376 | |
843aef49 | 377 | plat_extra_sync_for_device(dev); |
9a88cbb5 | 378 | if (!plat_device_is_coherent(dev)) |
c7c6b390 | 379 | __dma_sync((unsigned long)vaddr, size, direction); |
1da177e4 LT |
380 | } |
381 | ||
382 | EXPORT_SYMBOL(dma_cache_sync); |