MIPS: Fix emulation of 64-bit FPU on FPU-less 64-bit CPUs.
[linux-2.6-block.git] / arch / mips / mm / dma-default.c
CommitLineData
1da177e4
LT
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2000 Ani Joshi <ajoshi@unixbox.com>
9a88cbb5 7 * Copyright (C) 2000, 2001, 06 Ralf Baechle <ralf@linux-mips.org>
1da177e4
LT
8 * swiped from i386, and cloned for MIPS by Geert, polished by Ralf.
9 */
9a88cbb5 10
1da177e4 11#include <linux/types.h>
9a88cbb5 12#include <linux/dma-mapping.h>
1da177e4
LT
13#include <linux/mm.h>
14#include <linux/module.h>
4fcc47a0 15#include <linux/scatterlist.h>
6e86b0bf 16#include <linux/string.h>
1da177e4
LT
17
18#include <asm/cache.h>
19#include <asm/io.h>
20
9a88cbb5
RB
21#include <dma-coherence.h>
22
3807ef3f
KC
23static inline unsigned long dma_addr_to_virt(struct device *dev,
24 dma_addr_t dma_addr)
c9d06962 25{
3807ef3f 26 unsigned long addr = plat_dma_addr_to_phys(dev, dma_addr);
c9d06962
FBH
27
28 return (unsigned long)phys_to_virt(addr);
29}
30
1da177e4
LT
31/*
32 * Warning on the terminology - Linux calls an uncached area coherent;
33 * MIPS terminology calls memory areas with hardware maintained coherency
34 * coherent.
35 */
36
9a88cbb5
RB
37static inline int cpu_is_noncoherent_r10000(struct device *dev)
38{
39 return !plat_device_is_coherent(dev) &&
10cc3529
RB
40 (current_cpu_type() == CPU_R10000 ||
41 current_cpu_type() == CPU_R12000);
9a88cbb5
RB
42}
43
cce335ae
RB
44static gfp_t massage_gfp_flags(const struct device *dev, gfp_t gfp)
45{
46 /* ignore region specifiers */
47 gfp &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM);
48
32016718 49#ifdef CONFIG_ZONE_DMA
cce335ae
RB
50 if (dev == NULL)
51 gfp |= __GFP_DMA;
52 else if (dev->coherent_dma_mask < DMA_BIT_MASK(24))
53 gfp |= __GFP_DMA;
54 else
55#endif
56#ifdef CONFIG_ZONE_DMA32
57 if (dev->coherent_dma_mask < DMA_BIT_MASK(32))
58 gfp |= __GFP_DMA32;
59 else
60#endif
61 ;
62
63 /* Don't invoke OOM killer */
64 gfp |= __GFP_NORETRY;
65
66 return gfp;
67}
68
1da177e4 69void *dma_alloc_noncoherent(struct device *dev, size_t size,
185a8ff5 70 dma_addr_t * dma_handle, gfp_t gfp)
1da177e4
LT
71{
72 void *ret;
9a88cbb5 73
cce335ae 74 gfp = massage_gfp_flags(dev, gfp);
1da177e4 75
1da177e4
LT
76 ret = (void *) __get_free_pages(gfp, get_order(size));
77
78 if (ret != NULL) {
79 memset(ret, 0, size);
9a88cbb5 80 *dma_handle = plat_map_dma_mem(dev, ret, size);
1da177e4
LT
81 }
82
83 return ret;
84}
85
86EXPORT_SYMBOL(dma_alloc_noncoherent);
87
88void *dma_alloc_coherent(struct device *dev, size_t size,
185a8ff5 89 dma_addr_t * dma_handle, gfp_t gfp)
1da177e4
LT
90{
91 void *ret;
92
cce335ae 93 gfp = massage_gfp_flags(dev, gfp);
9a88cbb5 94
9a88cbb5
RB
95 ret = (void *) __get_free_pages(gfp, get_order(size));
96
1da177e4 97 if (ret) {
9a88cbb5
RB
98 memset(ret, 0, size);
99 *dma_handle = plat_map_dma_mem(dev, ret, size);
100
101 if (!plat_device_is_coherent(dev)) {
102 dma_cache_wback_inv((unsigned long) ret, size);
103 ret = UNCAC_ADDR(ret);
104 }
1da177e4
LT
105 }
106
107 return ret;
108}
109
110EXPORT_SYMBOL(dma_alloc_coherent);
111
112void dma_free_noncoherent(struct device *dev, size_t size, void *vaddr,
113 dma_addr_t dma_handle)
114{
d3f634b9 115 plat_unmap_dma_mem(dev, dma_handle, size, DMA_BIDIRECTIONAL);
1da177e4
LT
116 free_pages((unsigned long) vaddr, get_order(size));
117}
118
119EXPORT_SYMBOL(dma_free_noncoherent);
120
121void dma_free_coherent(struct device *dev, size_t size, void *vaddr,
122 dma_addr_t dma_handle)
123{
124 unsigned long addr = (unsigned long) vaddr;
125
d3f634b9 126 plat_unmap_dma_mem(dev, dma_handle, size, DMA_BIDIRECTIONAL);
11531ac2 127
9a88cbb5
RB
128 if (!plat_device_is_coherent(dev))
129 addr = CAC_ADDR(addr);
130
1da177e4
LT
131 free_pages(addr, get_order(size));
132}
133
134EXPORT_SYMBOL(dma_free_coherent);
135
136static inline void __dma_sync(unsigned long addr, size_t size,
137 enum dma_data_direction direction)
138{
139 switch (direction) {
140 case DMA_TO_DEVICE:
141 dma_cache_wback(addr, size);
142 break;
143
144 case DMA_FROM_DEVICE:
145 dma_cache_inv(addr, size);
146 break;
147
148 case DMA_BIDIRECTIONAL:
149 dma_cache_wback_inv(addr, size);
150 break;
151
152 default:
153 BUG();
154 }
155}
156
157dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size,
158 enum dma_data_direction direction)
159{
160 unsigned long addr = (unsigned long) ptr;
161
9a88cbb5
RB
162 if (!plat_device_is_coherent(dev))
163 __dma_sync(addr, size, direction);
1da177e4 164
9a88cbb5 165 return plat_map_dma_mem(dev, ptr, size);
1da177e4
LT
166}
167
168EXPORT_SYMBOL(dma_map_single);
169
170void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
171 enum dma_data_direction direction)
172{
9a88cbb5 173 if (cpu_is_noncoherent_r10000(dev))
3807ef3f 174 __dma_sync(dma_addr_to_virt(dev, dma_addr), size,
9a88cbb5 175 direction);
1da177e4 176
d3f634b9 177 plat_unmap_dma_mem(dev, dma_addr, size, direction);
1da177e4
LT
178}
179
180EXPORT_SYMBOL(dma_unmap_single);
181
182int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
183 enum dma_data_direction direction)
184{
185 int i;
186
187 BUG_ON(direction == DMA_NONE);
188
189 for (i = 0; i < nents; i++, sg++) {
190 unsigned long addr;
42a3b4f2 191
58b053e4 192 addr = (unsigned long) sg_virt(sg);
9a88cbb5 193 if (!plat_device_is_coherent(dev) && addr)
58b053e4 194 __dma_sync(addr, sg->length, direction);
fbd5604d 195 sg->dma_address = plat_map_dma_mem(dev,
58b053e4 196 (void *)addr, sg->length);
1da177e4
LT
197 }
198
199 return nents;
200}
201
202EXPORT_SYMBOL(dma_map_sg);
203
204dma_addr_t dma_map_page(struct device *dev, struct page *page,
205 unsigned long offset, size_t size, enum dma_data_direction direction)
206{
1da177e4
LT
207 BUG_ON(direction == DMA_NONE);
208
9a88cbb5
RB
209 if (!plat_device_is_coherent(dev)) {
210 unsigned long addr;
211
212 addr = (unsigned long) page_address(page) + offset;
4f29c057 213 __dma_sync(addr, size, direction);
9a88cbb5 214 }
1da177e4 215
9a88cbb5 216 return plat_map_dma_mem_page(dev, page) + offset;
1da177e4
LT
217}
218
219EXPORT_SYMBOL(dma_map_page);
220
1da177e4
LT
221void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
222 enum dma_data_direction direction)
223{
224 unsigned long addr;
225 int i;
226
227 BUG_ON(direction == DMA_NONE);
228
1da177e4 229 for (i = 0; i < nhwentries; i++, sg++) {
9a88cbb5
RB
230 if (!plat_device_is_coherent(dev) &&
231 direction != DMA_TO_DEVICE) {
58b053e4 232 addr = (unsigned long) sg_virt(sg);
9a88cbb5 233 if (addr)
58b053e4 234 __dma_sync(addr, sg->length, direction);
9a88cbb5 235 }
d3f634b9 236 plat_unmap_dma_mem(dev, sg->dma_address, sg->length, direction);
1da177e4
LT
237 }
238}
239
240EXPORT_SYMBOL(dma_unmap_sg);
241
242void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
243 size_t size, enum dma_data_direction direction)
244{
1da177e4 245 BUG_ON(direction == DMA_NONE);
42a3b4f2 246
9a88cbb5
RB
247 if (cpu_is_noncoherent_r10000(dev)) {
248 unsigned long addr;
249
3807ef3f 250 addr = dma_addr_to_virt(dev, dma_handle);
9a88cbb5
RB
251 __dma_sync(addr, size, direction);
252 }
1da177e4
LT
253}
254
255EXPORT_SYMBOL(dma_sync_single_for_cpu);
256
257void dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
258 size_t size, enum dma_data_direction direction)
259{
1da177e4
LT
260 BUG_ON(direction == DMA_NONE);
261
843aef49 262 plat_extra_sync_for_device(dev);
9b43fb6b 263 if (!plat_device_is_coherent(dev)) {
9a88cbb5
RB
264 unsigned long addr;
265
3807ef3f 266 addr = dma_addr_to_virt(dev, dma_handle);
9a88cbb5
RB
267 __dma_sync(addr, size, direction);
268 }
1da177e4
LT
269}
270
271EXPORT_SYMBOL(dma_sync_single_for_device);
272
273void dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
274 unsigned long offset, size_t size, enum dma_data_direction direction)
275{
1da177e4
LT
276 BUG_ON(direction == DMA_NONE);
277
9a88cbb5
RB
278 if (cpu_is_noncoherent_r10000(dev)) {
279 unsigned long addr;
280
3807ef3f 281 addr = dma_addr_to_virt(dev, dma_handle);
9a88cbb5
RB
282 __dma_sync(addr + offset, size, direction);
283 }
1da177e4
LT
284}
285
286EXPORT_SYMBOL(dma_sync_single_range_for_cpu);
287
288void dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
289 unsigned long offset, size_t size, enum dma_data_direction direction)
290{
1da177e4
LT
291 BUG_ON(direction == DMA_NONE);
292
843aef49 293 plat_extra_sync_for_device(dev);
9b43fb6b 294 if (!plat_device_is_coherent(dev)) {
9a88cbb5
RB
295 unsigned long addr;
296
3807ef3f 297 addr = dma_addr_to_virt(dev, dma_handle);
9a88cbb5
RB
298 __dma_sync(addr + offset, size, direction);
299 }
1da177e4
LT
300}
301
302EXPORT_SYMBOL(dma_sync_single_range_for_device);
303
304void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
305 enum dma_data_direction direction)
306{
307 int i;
42a3b4f2 308
1da177e4 309 BUG_ON(direction == DMA_NONE);
42a3b4f2 310
1da177e4 311 /* Make sure that gcc doesn't leave the empty loop body. */
9a88cbb5 312 for (i = 0; i < nelems; i++, sg++) {
5b648a98 313 if (cpu_is_noncoherent_r10000(dev))
58b053e4 314 __dma_sync((unsigned long)page_address(sg_page(sg)),
9a88cbb5 315 sg->length, direction);
9a88cbb5 316 }
1da177e4
LT
317}
318
319EXPORT_SYMBOL(dma_sync_sg_for_cpu);
320
321void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
322 enum dma_data_direction direction)
323{
324 int i;
325
326 BUG_ON(direction == DMA_NONE);
327
328 /* Make sure that gcc doesn't leave the empty loop body. */
9a88cbb5
RB
329 for (i = 0; i < nelems; i++, sg++) {
330 if (!plat_device_is_coherent(dev))
58b053e4 331 __dma_sync((unsigned long)page_address(sg_page(sg)),
9a88cbb5 332 sg->length, direction);
9a88cbb5 333 }
1da177e4
LT
334}
335
336EXPORT_SYMBOL(dma_sync_sg_for_device);
337
8d8bb39b 338int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
1da177e4 339{
843aef49 340 return plat_dma_mapping_error(dev, dma_addr);
1da177e4
LT
341}
342
343EXPORT_SYMBOL(dma_mapping_error);
344
345int dma_supported(struct device *dev, u64 mask)
346{
843aef49 347 return plat_dma_supported(dev, mask);
1da177e4
LT
348}
349
350EXPORT_SYMBOL(dma_supported);
351
f67637ee 352int dma_is_consistent(struct device *dev, dma_addr_t dma_addr)
1da177e4 353{
9a88cbb5 354 return plat_device_is_coherent(dev);
1da177e4
LT
355}
356
357EXPORT_SYMBOL(dma_is_consistent);
358
d3fa72e4 359void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
9a88cbb5 360 enum dma_data_direction direction)
1da177e4 361{
9a88cbb5 362 BUG_ON(direction == DMA_NONE);
1da177e4 363
843aef49 364 plat_extra_sync_for_device(dev);
9a88cbb5 365 if (!plat_device_is_coherent(dev))
c7c6b390 366 __dma_sync((unsigned long)vaddr, size, direction);
1da177e4
LT
367}
368
369EXPORT_SYMBOL(dma_cache_sync);