Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * This file is subject to the terms and conditions of the GNU General Public | |
3 | * License. See the file "COPYING" in the main directory of this archive | |
4 | * for more details. | |
5 | * | |
6 | * Copyright (C) 2000 Ani Joshi <ajoshi@unixbox.com> | |
70342287 | 7 | * Copyright (C) 2000, 2001, 06 Ralf Baechle <ralf@linux-mips.org> |
1da177e4 LT |
8 | * swiped from i386, and cloned for MIPS by Geert, polished by Ralf. |
9 | */ | |
9a88cbb5 | 10 | |
1da177e4 | 11 | #include <linux/types.h> |
9a88cbb5 | 12 | #include <linux/dma-mapping.h> |
1da177e4 LT |
13 | #include <linux/mm.h> |
14 | #include <linux/module.h> | |
4fcc47a0 | 15 | #include <linux/scatterlist.h> |
6e86b0bf | 16 | #include <linux/string.h> |
5a0e3ad6 | 17 | #include <linux/gfp.h> |
e36863a5 | 18 | #include <linux/highmem.h> |
1da177e4 LT |
19 | |
20 | #include <asm/cache.h> | |
21 | #include <asm/io.h> | |
22 | ||
9a88cbb5 RB |
23 | #include <dma-coherence.h> |
24 | ||
e36863a5 | 25 | static inline struct page *dma_addr_to_page(struct device *dev, |
3807ef3f | 26 | dma_addr_t dma_addr) |
c9d06962 | 27 | { |
e36863a5 DD |
28 | return pfn_to_page( |
29 | plat_dma_addr_to_phys(dev, dma_addr) >> PAGE_SHIFT); | |
c9d06962 FBH |
30 | } |
31 | ||
1da177e4 LT |
32 | /* |
33 | * Warning on the terminology - Linux calls an uncached area coherent; | |
34 | * MIPS terminology calls memory areas with hardware maintained coherency | |
35 | * coherent. | |
36 | */ | |
37 | ||
9a88cbb5 RB |
38 | static inline int cpu_is_noncoherent_r10000(struct device *dev) |
39 | { | |
40 | return !plat_device_is_coherent(dev) && | |
10cc3529 RB |
41 | (current_cpu_type() == CPU_R10000 || |
42 | current_cpu_type() == CPU_R12000); | |
9a88cbb5 RB |
43 | } |
44 | ||
cce335ae RB |
45 | static gfp_t massage_gfp_flags(const struct device *dev, gfp_t gfp) |
46 | { | |
a2e715a8 RB |
47 | gfp_t dma_flag; |
48 | ||
cce335ae RB |
49 | /* ignore region specifiers */ |
50 | gfp &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM); | |
51 | ||
a2e715a8 | 52 | #ifdef CONFIG_ISA |
cce335ae | 53 | if (dev == NULL) |
a2e715a8 | 54 | dma_flag = __GFP_DMA; |
cce335ae RB |
55 | else |
56 | #endif | |
a2e715a8 | 57 | #if defined(CONFIG_ZONE_DMA32) && defined(CONFIG_ZONE_DMA) |
cce335ae | 58 | if (dev->coherent_dma_mask < DMA_BIT_MASK(32)) |
a2e715a8 RB |
59 | dma_flag = __GFP_DMA; |
60 | else if (dev->coherent_dma_mask < DMA_BIT_MASK(64)) | |
61 | dma_flag = __GFP_DMA32; | |
62 | else | |
63 | #endif | |
64 | #if defined(CONFIG_ZONE_DMA32) && !defined(CONFIG_ZONE_DMA) | |
65 | if (dev->coherent_dma_mask < DMA_BIT_MASK(64)) | |
66 | dma_flag = __GFP_DMA32; | |
67 | else | |
68 | #endif | |
69 | #if defined(CONFIG_ZONE_DMA) && !defined(CONFIG_ZONE_DMA32) | |
70 | if (dev->coherent_dma_mask < DMA_BIT_MASK(64)) | |
71 | dma_flag = __GFP_DMA; | |
cce335ae RB |
72 | else |
73 | #endif | |
a2e715a8 | 74 | dma_flag = 0; |
cce335ae RB |
75 | |
76 | /* Don't invoke OOM killer */ | |
77 | gfp |= __GFP_NORETRY; | |
78 | ||
a2e715a8 | 79 | return gfp | dma_flag; |
cce335ae RB |
80 | } |
81 | ||
1da177e4 | 82 | void *dma_alloc_noncoherent(struct device *dev, size_t size, |
185a8ff5 | 83 | dma_addr_t * dma_handle, gfp_t gfp) |
1da177e4 LT |
84 | { |
85 | void *ret; | |
9a88cbb5 | 86 | |
cce335ae | 87 | gfp = massage_gfp_flags(dev, gfp); |
1da177e4 | 88 | |
1da177e4 LT |
89 | ret = (void *) __get_free_pages(gfp, get_order(size)); |
90 | ||
91 | if (ret != NULL) { | |
92 | memset(ret, 0, size); | |
9a88cbb5 | 93 | *dma_handle = plat_map_dma_mem(dev, ret, size); |
1da177e4 LT |
94 | } |
95 | ||
96 | return ret; | |
97 | } | |
1da177e4 LT |
98 | EXPORT_SYMBOL(dma_alloc_noncoherent); |
99 | ||
48e1fd5a | 100 | static void *mips_dma_alloc_coherent(struct device *dev, size_t size, |
e8d51e54 | 101 | dma_addr_t * dma_handle, gfp_t gfp, struct dma_attrs *attrs) |
1da177e4 LT |
102 | { |
103 | void *ret; | |
104 | ||
f8ac0425 YY |
105 | if (dma_alloc_from_coherent(dev, size, dma_handle, &ret)) |
106 | return ret; | |
107 | ||
cce335ae | 108 | gfp = massage_gfp_flags(dev, gfp); |
9a88cbb5 | 109 | |
9a88cbb5 RB |
110 | ret = (void *) __get_free_pages(gfp, get_order(size)); |
111 | ||
1da177e4 | 112 | if (ret) { |
9a88cbb5 RB |
113 | memset(ret, 0, size); |
114 | *dma_handle = plat_map_dma_mem(dev, ret, size); | |
115 | ||
116 | if (!plat_device_is_coherent(dev)) { | |
117 | dma_cache_wback_inv((unsigned long) ret, size); | |
118 | ret = UNCAC_ADDR(ret); | |
119 | } | |
1da177e4 LT |
120 | } |
121 | ||
122 | return ret; | |
123 | } | |
124 | ||
1da177e4 LT |
125 | |
126 | void dma_free_noncoherent(struct device *dev, size_t size, void *vaddr, | |
127 | dma_addr_t dma_handle) | |
128 | { | |
d3f634b9 | 129 | plat_unmap_dma_mem(dev, dma_handle, size, DMA_BIDIRECTIONAL); |
1da177e4 LT |
130 | free_pages((unsigned long) vaddr, get_order(size)); |
131 | } | |
1da177e4 LT |
132 | EXPORT_SYMBOL(dma_free_noncoherent); |
133 | ||
48e1fd5a | 134 | static void mips_dma_free_coherent(struct device *dev, size_t size, void *vaddr, |
e8d51e54 | 135 | dma_addr_t dma_handle, struct dma_attrs *attrs) |
1da177e4 LT |
136 | { |
137 | unsigned long addr = (unsigned long) vaddr; | |
f8ac0425 YY |
138 | int order = get_order(size); |
139 | ||
140 | if (dma_release_from_coherent(dev, order, vaddr)) | |
141 | return; | |
1da177e4 | 142 | |
d3f634b9 | 143 | plat_unmap_dma_mem(dev, dma_handle, size, DMA_BIDIRECTIONAL); |
11531ac2 | 144 | |
9a88cbb5 RB |
145 | if (!plat_device_is_coherent(dev)) |
146 | addr = CAC_ADDR(addr); | |
147 | ||
1da177e4 LT |
148 | free_pages(addr, get_order(size)); |
149 | } | |
150 | ||
e36863a5 | 151 | static inline void __dma_sync_virtual(void *addr, size_t size, |
1da177e4 LT |
152 | enum dma_data_direction direction) |
153 | { | |
154 | switch (direction) { | |
155 | case DMA_TO_DEVICE: | |
e36863a5 | 156 | dma_cache_wback((unsigned long)addr, size); |
1da177e4 LT |
157 | break; |
158 | ||
159 | case DMA_FROM_DEVICE: | |
e36863a5 | 160 | dma_cache_inv((unsigned long)addr, size); |
1da177e4 LT |
161 | break; |
162 | ||
163 | case DMA_BIDIRECTIONAL: | |
e36863a5 | 164 | dma_cache_wback_inv((unsigned long)addr, size); |
1da177e4 LT |
165 | break; |
166 | ||
167 | default: | |
168 | BUG(); | |
169 | } | |
170 | } | |
171 | ||
e36863a5 DD |
172 | /* |
173 | * A single sg entry may refer to multiple physically contiguous | |
174 | * pages. But we still need to process highmem pages individually. | |
175 | * If highmem is not configured then the bulk of this loop gets | |
176 | * optimized out. | |
177 | */ | |
178 | static inline void __dma_sync(struct page *page, | |
179 | unsigned long offset, size_t size, enum dma_data_direction direction) | |
180 | { | |
181 | size_t left = size; | |
182 | ||
183 | do { | |
184 | size_t len = left; | |
185 | ||
186 | if (PageHighMem(page)) { | |
187 | void *addr; | |
188 | ||
189 | if (offset + len > PAGE_SIZE) { | |
190 | if (offset >= PAGE_SIZE) { | |
191 | page += offset >> PAGE_SHIFT; | |
192 | offset &= ~PAGE_MASK; | |
193 | } | |
194 | len = PAGE_SIZE - offset; | |
195 | } | |
196 | ||
197 | addr = kmap_atomic(page); | |
198 | __dma_sync_virtual(addr + offset, len, direction); | |
199 | kunmap_atomic(addr); | |
200 | } else | |
201 | __dma_sync_virtual(page_address(page) + offset, | |
202 | size, direction); | |
203 | offset = 0; | |
204 | page++; | |
205 | left -= len; | |
206 | } while (left); | |
207 | } | |
208 | ||
48e1fd5a DD |
209 | static void mips_dma_unmap_page(struct device *dev, dma_addr_t dma_addr, |
210 | size_t size, enum dma_data_direction direction, struct dma_attrs *attrs) | |
1da177e4 | 211 | { |
9a88cbb5 | 212 | if (cpu_is_noncoherent_r10000(dev)) |
e36863a5 DD |
213 | __dma_sync(dma_addr_to_page(dev, dma_addr), |
214 | dma_addr & ~PAGE_MASK, size, direction); | |
1da177e4 | 215 | |
d3f634b9 | 216 | plat_unmap_dma_mem(dev, dma_addr, size, direction); |
1da177e4 LT |
217 | } |
218 | ||
48e1fd5a DD |
219 | static int mips_dma_map_sg(struct device *dev, struct scatterlist *sg, |
220 | int nents, enum dma_data_direction direction, struct dma_attrs *attrs) | |
1da177e4 LT |
221 | { |
222 | int i; | |
223 | ||
1da177e4 | 224 | for (i = 0; i < nents; i++, sg++) { |
e36863a5 DD |
225 | if (!plat_device_is_coherent(dev)) |
226 | __dma_sync(sg_page(sg), sg->offset, sg->length, | |
227 | direction); | |
228 | sg->dma_address = plat_map_dma_mem_page(dev, sg_page(sg)) + | |
229 | sg->offset; | |
1da177e4 LT |
230 | } |
231 | ||
232 | return nents; | |
233 | } | |
234 | ||
48e1fd5a DD |
235 | static dma_addr_t mips_dma_map_page(struct device *dev, struct page *page, |
236 | unsigned long offset, size_t size, enum dma_data_direction direction, | |
237 | struct dma_attrs *attrs) | |
1da177e4 | 238 | { |
48e1fd5a | 239 | if (!plat_device_is_coherent(dev)) |
e36863a5 | 240 | __dma_sync(page, offset, size, direction); |
1da177e4 | 241 | |
e36863a5 | 242 | return plat_map_dma_mem_page(dev, page) + offset; |
1da177e4 LT |
243 | } |
244 | ||
48e1fd5a DD |
245 | static void mips_dma_unmap_sg(struct device *dev, struct scatterlist *sg, |
246 | int nhwentries, enum dma_data_direction direction, | |
247 | struct dma_attrs *attrs) | |
1da177e4 | 248 | { |
1da177e4 LT |
249 | int i; |
250 | ||
1da177e4 | 251 | for (i = 0; i < nhwentries; i++, sg++) { |
9a88cbb5 | 252 | if (!plat_device_is_coherent(dev) && |
e36863a5 DD |
253 | direction != DMA_TO_DEVICE) |
254 | __dma_sync(sg_page(sg), sg->offset, sg->length, | |
255 | direction); | |
d3f634b9 | 256 | plat_unmap_dma_mem(dev, sg->dma_address, sg->length, direction); |
1da177e4 LT |
257 | } |
258 | } | |
259 | ||
48e1fd5a DD |
260 | static void mips_dma_sync_single_for_cpu(struct device *dev, |
261 | dma_addr_t dma_handle, size_t size, enum dma_data_direction direction) | |
1da177e4 | 262 | { |
e36863a5 DD |
263 | if (cpu_is_noncoherent_r10000(dev)) |
264 | __dma_sync(dma_addr_to_page(dev, dma_handle), | |
265 | dma_handle & ~PAGE_MASK, size, direction); | |
1da177e4 LT |
266 | } |
267 | ||
48e1fd5a DD |
268 | static void mips_dma_sync_single_for_device(struct device *dev, |
269 | dma_addr_t dma_handle, size_t size, enum dma_data_direction direction) | |
1da177e4 | 270 | { |
843aef49 | 271 | plat_extra_sync_for_device(dev); |
e36863a5 DD |
272 | if (!plat_device_is_coherent(dev)) |
273 | __dma_sync(dma_addr_to_page(dev, dma_handle), | |
274 | dma_handle & ~PAGE_MASK, size, direction); | |
1da177e4 LT |
275 | } |
276 | ||
48e1fd5a DD |
277 | static void mips_dma_sync_sg_for_cpu(struct device *dev, |
278 | struct scatterlist *sg, int nelems, enum dma_data_direction direction) | |
1da177e4 LT |
279 | { |
280 | int i; | |
42a3b4f2 | 281 | |
1da177e4 | 282 | /* Make sure that gcc doesn't leave the empty loop body. */ |
9a88cbb5 | 283 | for (i = 0; i < nelems; i++, sg++) { |
5b648a98 | 284 | if (cpu_is_noncoherent_r10000(dev)) |
e36863a5 DD |
285 | __dma_sync(sg_page(sg), sg->offset, sg->length, |
286 | direction); | |
9a88cbb5 | 287 | } |
1da177e4 LT |
288 | } |
289 | ||
48e1fd5a DD |
290 | static void mips_dma_sync_sg_for_device(struct device *dev, |
291 | struct scatterlist *sg, int nelems, enum dma_data_direction direction) | |
1da177e4 LT |
292 | { |
293 | int i; | |
294 | ||
1da177e4 | 295 | /* Make sure that gcc doesn't leave the empty loop body. */ |
9a88cbb5 RB |
296 | for (i = 0; i < nelems; i++, sg++) { |
297 | if (!plat_device_is_coherent(dev)) | |
e36863a5 DD |
298 | __dma_sync(sg_page(sg), sg->offset, sg->length, |
299 | direction); | |
9a88cbb5 | 300 | } |
1da177e4 LT |
301 | } |
302 | ||
48e1fd5a | 303 | int mips_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) |
1da177e4 | 304 | { |
843aef49 | 305 | return plat_dma_mapping_error(dev, dma_addr); |
1da177e4 LT |
306 | } |
307 | ||
48e1fd5a | 308 | int mips_dma_supported(struct device *dev, u64 mask) |
1da177e4 | 309 | { |
843aef49 | 310 | return plat_dma_supported(dev, mask); |
1da177e4 LT |
311 | } |
312 | ||
a3aad4aa | 313 | void dma_cache_sync(struct device *dev, void *vaddr, size_t size, |
48e1fd5a | 314 | enum dma_data_direction direction) |
1da177e4 | 315 | { |
9a88cbb5 | 316 | BUG_ON(direction == DMA_NONE); |
1da177e4 | 317 | |
843aef49 | 318 | plat_extra_sync_for_device(dev); |
9a88cbb5 | 319 | if (!plat_device_is_coherent(dev)) |
e36863a5 | 320 | __dma_sync_virtual(vaddr, size, direction); |
1da177e4 LT |
321 | } |
322 | ||
a3aad4aa RB |
323 | EXPORT_SYMBOL(dma_cache_sync); |
324 | ||
48e1fd5a | 325 | static struct dma_map_ops mips_default_dma_map_ops = { |
e8d51e54 AP |
326 | .alloc = mips_dma_alloc_coherent, |
327 | .free = mips_dma_free_coherent, | |
48e1fd5a DD |
328 | .map_page = mips_dma_map_page, |
329 | .unmap_page = mips_dma_unmap_page, | |
330 | .map_sg = mips_dma_map_sg, | |
331 | .unmap_sg = mips_dma_unmap_sg, | |
332 | .sync_single_for_cpu = mips_dma_sync_single_for_cpu, | |
333 | .sync_single_for_device = mips_dma_sync_single_for_device, | |
334 | .sync_sg_for_cpu = mips_dma_sync_sg_for_cpu, | |
335 | .sync_sg_for_device = mips_dma_sync_sg_for_device, | |
336 | .mapping_error = mips_dma_mapping_error, | |
337 | .dma_supported = mips_dma_supported | |
338 | }; | |
339 | ||
340 | struct dma_map_ops *mips_dma_map_ops = &mips_default_dma_map_ops; | |
341 | EXPORT_SYMBOL(mips_dma_map_ops); | |
342 | ||
343 | #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16) | |
344 | ||
345 | static int __init mips_dma_init(void) | |
346 | { | |
347 | dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES); | |
348 | ||
349 | return 0; | |
350 | } | |
351 | fs_initcall(mips_dma_init); |