Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * This file is subject to the terms and conditions of the GNU General Public | |
3 | * License. See the file "COPYING" in the main directory of this archive | |
4 | * for more details. | |
5 | * | |
6 | * Copyright (C) 2000 Ani Joshi <ajoshi@unixbox.com> | |
9a88cbb5 | 7 | * Copyright (C) 2000, 2001, 06 Ralf Baechle <ralf@linux-mips.org> |
1da177e4 LT |
8 | * swiped from i386, and cloned for MIPS by Geert, polished by Ralf. |
9 | */ | |
9a88cbb5 | 10 | |
1da177e4 | 11 | #include <linux/types.h> |
9a88cbb5 | 12 | #include <linux/dma-mapping.h> |
1da177e4 LT |
13 | #include <linux/mm.h> |
14 | #include <linux/module.h> | |
4fcc47a0 | 15 | #include <linux/scatterlist.h> |
6e86b0bf | 16 | #include <linux/string.h> |
1da177e4 LT |
17 | |
18 | #include <asm/cache.h> | |
19 | #include <asm/io.h> | |
20 | ||
9a88cbb5 RB |
21 | #include <dma-coherence.h> |
22 | ||
c9d06962 FBH |
23 | static inline unsigned long dma_addr_to_virt(dma_addr_t dma_addr) |
24 | { | |
25 | unsigned long addr = plat_dma_addr_to_phys(dma_addr); | |
26 | ||
27 | return (unsigned long)phys_to_virt(addr); | |
28 | } | |
29 | ||
1da177e4 LT |
30 | /* |
31 | * Warning on the terminology - Linux calls an uncached area coherent; | |
32 | * MIPS terminology calls memory areas with hardware maintained coherency | |
33 | * coherent. | |
34 | */ | |
35 | ||
9a88cbb5 RB |
36 | static inline int cpu_is_noncoherent_r10000(struct device *dev) |
37 | { | |
38 | return !plat_device_is_coherent(dev) && | |
10cc3529 RB |
39 | (current_cpu_type() == CPU_R10000 || |
40 | current_cpu_type() == CPU_R12000); | |
9a88cbb5 RB |
41 | } |
42 | ||
cce335ae RB |
43 | static gfp_t massage_gfp_flags(const struct device *dev, gfp_t gfp) |
44 | { | |
45 | /* ignore region specifiers */ | |
46 | gfp &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM); | |
47 | ||
32016718 | 48 | #ifdef CONFIG_ZONE_DMA |
cce335ae RB |
49 | if (dev == NULL) |
50 | gfp |= __GFP_DMA; | |
51 | else if (dev->coherent_dma_mask < DMA_BIT_MASK(24)) | |
52 | gfp |= __GFP_DMA; | |
53 | else | |
54 | #endif | |
55 | #ifdef CONFIG_ZONE_DMA32 | |
56 | if (dev->coherent_dma_mask < DMA_BIT_MASK(32)) | |
57 | gfp |= __GFP_DMA32; | |
58 | else | |
59 | #endif | |
60 | ; | |
61 | ||
62 | /* Don't invoke OOM killer */ | |
63 | gfp |= __GFP_NORETRY; | |
64 | ||
65 | return gfp; | |
66 | } | |
67 | ||
1da177e4 | 68 | void *dma_alloc_noncoherent(struct device *dev, size_t size, |
185a8ff5 | 69 | dma_addr_t * dma_handle, gfp_t gfp) |
1da177e4 LT |
70 | { |
71 | void *ret; | |
9a88cbb5 | 72 | |
cce335ae | 73 | gfp = massage_gfp_flags(dev, gfp); |
1da177e4 | 74 | |
1da177e4 LT |
75 | ret = (void *) __get_free_pages(gfp, get_order(size)); |
76 | ||
77 | if (ret != NULL) { | |
78 | memset(ret, 0, size); | |
9a88cbb5 | 79 | *dma_handle = plat_map_dma_mem(dev, ret, size); |
1da177e4 LT |
80 | } |
81 | ||
82 | return ret; | |
83 | } | |
84 | ||
85 | EXPORT_SYMBOL(dma_alloc_noncoherent); | |
86 | ||
87 | void *dma_alloc_coherent(struct device *dev, size_t size, | |
185a8ff5 | 88 | dma_addr_t * dma_handle, gfp_t gfp) |
1da177e4 LT |
89 | { |
90 | void *ret; | |
91 | ||
cce335ae | 92 | gfp = massage_gfp_flags(dev, gfp); |
9a88cbb5 | 93 | |
9a88cbb5 RB |
94 | ret = (void *) __get_free_pages(gfp, get_order(size)); |
95 | ||
1da177e4 | 96 | if (ret) { |
9a88cbb5 RB |
97 | memset(ret, 0, size); |
98 | *dma_handle = plat_map_dma_mem(dev, ret, size); | |
99 | ||
100 | if (!plat_device_is_coherent(dev)) { | |
101 | dma_cache_wback_inv((unsigned long) ret, size); | |
102 | ret = UNCAC_ADDR(ret); | |
103 | } | |
1da177e4 LT |
104 | } |
105 | ||
106 | return ret; | |
107 | } | |
108 | ||
109 | EXPORT_SYMBOL(dma_alloc_coherent); | |
110 | ||
111 | void dma_free_noncoherent(struct device *dev, size_t size, void *vaddr, | |
112 | dma_addr_t dma_handle) | |
113 | { | |
11531ac2 | 114 | plat_unmap_dma_mem(dma_handle); |
1da177e4 LT |
115 | free_pages((unsigned long) vaddr, get_order(size)); |
116 | } | |
117 | ||
118 | EXPORT_SYMBOL(dma_free_noncoherent); | |
119 | ||
120 | void dma_free_coherent(struct device *dev, size_t size, void *vaddr, | |
121 | dma_addr_t dma_handle) | |
122 | { | |
123 | unsigned long addr = (unsigned long) vaddr; | |
124 | ||
11531ac2 DD |
125 | plat_unmap_dma_mem(dma_handle); |
126 | ||
9a88cbb5 RB |
127 | if (!plat_device_is_coherent(dev)) |
128 | addr = CAC_ADDR(addr); | |
129 | ||
1da177e4 LT |
130 | free_pages(addr, get_order(size)); |
131 | } | |
132 | ||
133 | EXPORT_SYMBOL(dma_free_coherent); | |
134 | ||
135 | static inline void __dma_sync(unsigned long addr, size_t size, | |
136 | enum dma_data_direction direction) | |
137 | { | |
138 | switch (direction) { | |
139 | case DMA_TO_DEVICE: | |
140 | dma_cache_wback(addr, size); | |
141 | break; | |
142 | ||
143 | case DMA_FROM_DEVICE: | |
144 | dma_cache_inv(addr, size); | |
145 | break; | |
146 | ||
147 | case DMA_BIDIRECTIONAL: | |
148 | dma_cache_wback_inv(addr, size); | |
149 | break; | |
150 | ||
151 | default: | |
152 | BUG(); | |
153 | } | |
154 | } | |
155 | ||
156 | dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size, | |
157 | enum dma_data_direction direction) | |
158 | { | |
159 | unsigned long addr = (unsigned long) ptr; | |
160 | ||
9a88cbb5 RB |
161 | if (!plat_device_is_coherent(dev)) |
162 | __dma_sync(addr, size, direction); | |
1da177e4 | 163 | |
9a88cbb5 | 164 | return plat_map_dma_mem(dev, ptr, size); |
1da177e4 LT |
165 | } |
166 | ||
167 | EXPORT_SYMBOL(dma_map_single); | |
168 | ||
169 | void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, | |
170 | enum dma_data_direction direction) | |
171 | { | |
9a88cbb5 | 172 | if (cpu_is_noncoherent_r10000(dev)) |
c9d06962 | 173 | __dma_sync(dma_addr_to_virt(dma_addr), size, |
9a88cbb5 | 174 | direction); |
1da177e4 | 175 | |
9a88cbb5 | 176 | plat_unmap_dma_mem(dma_addr); |
1da177e4 LT |
177 | } |
178 | ||
179 | EXPORT_SYMBOL(dma_unmap_single); | |
180 | ||
181 | int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, | |
182 | enum dma_data_direction direction) | |
183 | { | |
184 | int i; | |
185 | ||
186 | BUG_ON(direction == DMA_NONE); | |
187 | ||
188 | for (i = 0; i < nents; i++, sg++) { | |
189 | unsigned long addr; | |
42a3b4f2 | 190 | |
58b053e4 | 191 | addr = (unsigned long) sg_virt(sg); |
9a88cbb5 | 192 | if (!plat_device_is_coherent(dev) && addr) |
58b053e4 | 193 | __dma_sync(addr, sg->length, direction); |
fbd5604d | 194 | sg->dma_address = plat_map_dma_mem(dev, |
58b053e4 | 195 | (void *)addr, sg->length); |
1da177e4 LT |
196 | } |
197 | ||
198 | return nents; | |
199 | } | |
200 | ||
201 | EXPORT_SYMBOL(dma_map_sg); | |
202 | ||
203 | dma_addr_t dma_map_page(struct device *dev, struct page *page, | |
204 | unsigned long offset, size_t size, enum dma_data_direction direction) | |
205 | { | |
1da177e4 LT |
206 | BUG_ON(direction == DMA_NONE); |
207 | ||
9a88cbb5 RB |
208 | if (!plat_device_is_coherent(dev)) { |
209 | unsigned long addr; | |
210 | ||
211 | addr = (unsigned long) page_address(page) + offset; | |
212 | dma_cache_wback_inv(addr, size); | |
213 | } | |
1da177e4 | 214 | |
9a88cbb5 | 215 | return plat_map_dma_mem_page(dev, page) + offset; |
1da177e4 LT |
216 | } |
217 | ||
218 | EXPORT_SYMBOL(dma_map_page); | |
219 | ||
220 | void dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size, | |
221 | enum dma_data_direction direction) | |
222 | { | |
223 | BUG_ON(direction == DMA_NONE); | |
224 | ||
9a88cbb5 | 225 | if (!plat_device_is_coherent(dev) && direction != DMA_TO_DEVICE) { |
1da177e4 LT |
226 | unsigned long addr; |
227 | ||
9a88cbb5 | 228 | addr = plat_dma_addr_to_phys(dma_address); |
1da177e4 LT |
229 | dma_cache_wback_inv(addr, size); |
230 | } | |
9a88cbb5 RB |
231 | |
232 | plat_unmap_dma_mem(dma_address); | |
1da177e4 LT |
233 | } |
234 | ||
235 | EXPORT_SYMBOL(dma_unmap_page); | |
236 | ||
237 | void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries, | |
238 | enum dma_data_direction direction) | |
239 | { | |
240 | unsigned long addr; | |
241 | int i; | |
242 | ||
243 | BUG_ON(direction == DMA_NONE); | |
244 | ||
1da177e4 | 245 | for (i = 0; i < nhwentries; i++, sg++) { |
9a88cbb5 RB |
246 | if (!plat_device_is_coherent(dev) && |
247 | direction != DMA_TO_DEVICE) { | |
58b053e4 | 248 | addr = (unsigned long) sg_virt(sg); |
9a88cbb5 | 249 | if (addr) |
58b053e4 | 250 | __dma_sync(addr, sg->length, direction); |
9a88cbb5 RB |
251 | } |
252 | plat_unmap_dma_mem(sg->dma_address); | |
1da177e4 LT |
253 | } |
254 | } | |
255 | ||
256 | EXPORT_SYMBOL(dma_unmap_sg); | |
257 | ||
258 | void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, | |
259 | size_t size, enum dma_data_direction direction) | |
260 | { | |
1da177e4 | 261 | BUG_ON(direction == DMA_NONE); |
42a3b4f2 | 262 | |
9a88cbb5 RB |
263 | if (cpu_is_noncoherent_r10000(dev)) { |
264 | unsigned long addr; | |
265 | ||
c9d06962 | 266 | addr = dma_addr_to_virt(dma_handle); |
9a88cbb5 RB |
267 | __dma_sync(addr, size, direction); |
268 | } | |
1da177e4 LT |
269 | } |
270 | ||
271 | EXPORT_SYMBOL(dma_sync_single_for_cpu); | |
272 | ||
273 | void dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, | |
274 | size_t size, enum dma_data_direction direction) | |
275 | { | |
1da177e4 LT |
276 | BUG_ON(direction == DMA_NONE); |
277 | ||
9b43fb6b | 278 | if (!plat_device_is_coherent(dev)) { |
9a88cbb5 RB |
279 | unsigned long addr; |
280 | ||
c9d06962 | 281 | addr = dma_addr_to_virt(dma_handle); |
9a88cbb5 RB |
282 | __dma_sync(addr, size, direction); |
283 | } | |
1da177e4 LT |
284 | } |
285 | ||
286 | EXPORT_SYMBOL(dma_sync_single_for_device); | |
287 | ||
288 | void dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle, | |
289 | unsigned long offset, size_t size, enum dma_data_direction direction) | |
290 | { | |
1da177e4 LT |
291 | BUG_ON(direction == DMA_NONE); |
292 | ||
9a88cbb5 RB |
293 | if (cpu_is_noncoherent_r10000(dev)) { |
294 | unsigned long addr; | |
295 | ||
c9d06962 | 296 | addr = dma_addr_to_virt(dma_handle); |
9a88cbb5 RB |
297 | __dma_sync(addr + offset, size, direction); |
298 | } | |
1da177e4 LT |
299 | } |
300 | ||
301 | EXPORT_SYMBOL(dma_sync_single_range_for_cpu); | |
302 | ||
303 | void dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle, | |
304 | unsigned long offset, size_t size, enum dma_data_direction direction) | |
305 | { | |
1da177e4 LT |
306 | BUG_ON(direction == DMA_NONE); |
307 | ||
9b43fb6b | 308 | if (!plat_device_is_coherent(dev)) { |
9a88cbb5 RB |
309 | unsigned long addr; |
310 | ||
c9d06962 | 311 | addr = dma_addr_to_virt(dma_handle); |
9a88cbb5 RB |
312 | __dma_sync(addr + offset, size, direction); |
313 | } | |
1da177e4 LT |
314 | } |
315 | ||
316 | EXPORT_SYMBOL(dma_sync_single_range_for_device); | |
317 | ||
318 | void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems, | |
319 | enum dma_data_direction direction) | |
320 | { | |
321 | int i; | |
42a3b4f2 | 322 | |
1da177e4 | 323 | BUG_ON(direction == DMA_NONE); |
42a3b4f2 | 324 | |
1da177e4 | 325 | /* Make sure that gcc doesn't leave the empty loop body. */ |
9a88cbb5 | 326 | for (i = 0; i < nelems; i++, sg++) { |
5b648a98 | 327 | if (cpu_is_noncoherent_r10000(dev)) |
58b053e4 | 328 | __dma_sync((unsigned long)page_address(sg_page(sg)), |
9a88cbb5 | 329 | sg->length, direction); |
9a88cbb5 | 330 | } |
1da177e4 LT |
331 | } |
332 | ||
333 | EXPORT_SYMBOL(dma_sync_sg_for_cpu); | |
334 | ||
335 | void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems, | |
336 | enum dma_data_direction direction) | |
337 | { | |
338 | int i; | |
339 | ||
340 | BUG_ON(direction == DMA_NONE); | |
341 | ||
342 | /* Make sure that gcc doesn't leave the empty loop body. */ | |
9a88cbb5 RB |
343 | for (i = 0; i < nelems; i++, sg++) { |
344 | if (!plat_device_is_coherent(dev)) | |
58b053e4 | 345 | __dma_sync((unsigned long)page_address(sg_page(sg)), |
9a88cbb5 | 346 | sg->length, direction); |
9a88cbb5 | 347 | } |
1da177e4 LT |
348 | } |
349 | ||
350 | EXPORT_SYMBOL(dma_sync_sg_for_device); | |
351 | ||
8d8bb39b | 352 | int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) |
1da177e4 LT |
353 | { |
354 | return 0; | |
355 | } | |
356 | ||
357 | EXPORT_SYMBOL(dma_mapping_error); | |
358 | ||
359 | int dma_supported(struct device *dev, u64 mask) | |
360 | { | |
361 | /* | |
362 | * we fall back to GFP_DMA when the mask isn't all 1s, | |
363 | * so we can't guarantee allocations that must be | |
364 | * within a tighter range than GFP_DMA.. | |
365 | */ | |
cce335ae | 366 | if (mask < DMA_BIT_MASK(24)) |
1da177e4 LT |
367 | return 0; |
368 | ||
369 | return 1; | |
370 | } | |
371 | ||
372 | EXPORT_SYMBOL(dma_supported); | |
373 | ||
f67637ee | 374 | int dma_is_consistent(struct device *dev, dma_addr_t dma_addr) |
1da177e4 | 375 | { |
9a88cbb5 | 376 | return plat_device_is_coherent(dev); |
1da177e4 LT |
377 | } |
378 | ||
379 | EXPORT_SYMBOL(dma_is_consistent); | |
380 | ||
d3fa72e4 | 381 | void dma_cache_sync(struct device *dev, void *vaddr, size_t size, |
9a88cbb5 | 382 | enum dma_data_direction direction) |
1da177e4 | 383 | { |
9a88cbb5 | 384 | BUG_ON(direction == DMA_NONE); |
1da177e4 | 385 | |
9a88cbb5 | 386 | if (!plat_device_is_coherent(dev)) |
c7c6b390 | 387 | __dma_sync((unsigned long)vaddr, size, direction); |
1da177e4 LT |
388 | } |
389 | ||
390 | EXPORT_SYMBOL(dma_cache_sync); |