Commit | Line | Data |
---|---|---|
5f97f7f9 HS |
1 | #ifndef __ASM_AVR32_DMA_MAPPING_H |
2 | #define __ASM_AVR32_DMA_MAPPING_H | |
3 | ||
4 | #include <linux/mm.h> | |
5 | #include <linux/device.h> | |
18ccc419 | 6 | #include <linux/scatterlist.h> |
5f97f7f9 HS |
7 | #include <asm/processor.h> |
8 | #include <asm/cacheflush.h> | |
9 | #include <asm/io.h> | |
10 | ||
d3fa72e4 RB |
11 | extern void dma_cache_sync(struct device *dev, void *vaddr, size_t size, |
12 | int direction); | |
5f97f7f9 HS |
13 | |
14 | /* | |
15 | * Return whether the given device DMA address mask can be supported | |
16 | * properly. For example, if your device can only drive the low 24-bits | |
17 | * during bus mastering, then you would pass 0x00ffffff as the mask | |
18 | * to this function. | |
19 | */ | |
20 | static inline int dma_supported(struct device *dev, u64 mask) | |
21 | { | |
22 | /* Fix when needed. I really don't know of any limitations */ | |
23 | return 1; | |
24 | } | |
25 | ||
26 | static inline int dma_set_mask(struct device *dev, u64 dma_mask) | |
27 | { | |
28 | if (!dev->dma_mask || !dma_supported(dev, dma_mask)) | |
29 | return -EIO; | |
30 | ||
31 | *dev->dma_mask = dma_mask; | |
32 | return 0; | |
33 | } | |
34 | ||
6eb484fe HS |
35 | /* |
36 | * dma_map_single can't fail as it is implemented now. | |
37 | */ | |
38 | static inline int dma_mapping_error(dma_addr_t addr) | |
39 | { | |
40 | return 0; | |
41 | } | |
42 | ||
5f97f7f9 HS |
43 | /** |
44 | * dma_alloc_coherent - allocate consistent memory for DMA | |
45 | * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices | |
46 | * @size: required memory size | |
47 | * @handle: bus-specific DMA address | |
48 | * | |
49 | * Allocate some uncached, unbuffered memory for a device for | |
50 | * performing DMA. This function allocates pages, and will | |
51 | * return the CPU-viewed address, and sets @handle to be the | |
52 | * device-viewed address. | |
53 | */ | |
54 | extern void *dma_alloc_coherent(struct device *dev, size_t size, | |
55 | dma_addr_t *handle, gfp_t gfp); | |
56 | ||
57 | /** | |
58 | * dma_free_coherent - free memory allocated by dma_alloc_coherent | |
59 | * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices | |
60 | * @size: size of memory originally requested in dma_alloc_coherent | |
61 | * @cpu_addr: CPU-view address returned from dma_alloc_coherent | |
62 | * @handle: device-view address returned from dma_alloc_coherent | |
63 | * | |
64 | * Free (and unmap) a DMA buffer previously allocated by | |
65 | * dma_alloc_coherent(). | |
66 | * | |
67 | * References to memory and mappings associated with cpu_addr/handle | |
68 | * during and after this call executing are illegal. | |
69 | */ | |
70 | extern void dma_free_coherent(struct device *dev, size_t size, | |
71 | void *cpu_addr, dma_addr_t handle); | |
72 | ||
73 | /** | |
74 | * dma_alloc_writecombine - allocate write-combining memory for DMA | |
75 | * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices | |
76 | * @size: required memory size | |
77 | * @handle: bus-specific DMA address | |
78 | * | |
79 | * Allocate some uncached, buffered memory for a device for | |
80 | * performing DMA. This function allocates pages, and will | |
81 | * return the CPU-viewed address, and sets @handle to be the | |
82 | * device-viewed address. | |
83 | */ | |
84 | extern void *dma_alloc_writecombine(struct device *dev, size_t size, | |
85 | dma_addr_t *handle, gfp_t gfp); | |
86 | ||
87 | /** | |
88 | * dma_free_coherent - free memory allocated by dma_alloc_writecombine | |
89 | * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices | |
90 | * @size: size of memory originally requested in dma_alloc_writecombine | |
91 | * @cpu_addr: CPU-view address returned from dma_alloc_writecombine | |
92 | * @handle: device-view address returned from dma_alloc_writecombine | |
93 | * | |
94 | * Free (and unmap) a DMA buffer previously allocated by | |
95 | * dma_alloc_writecombine(). | |
96 | * | |
97 | * References to memory and mappings associated with cpu_addr/handle | |
98 | * during and after this call executing are illegal. | |
99 | */ | |
100 | extern void dma_free_writecombine(struct device *dev, size_t size, | |
101 | void *cpu_addr, dma_addr_t handle); | |
102 | ||
103 | /** | |
104 | * dma_map_single - map a single buffer for streaming DMA | |
105 | * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices | |
106 | * @cpu_addr: CPU direct mapped address of buffer | |
107 | * @size: size of buffer to map | |
108 | * @dir: DMA transfer direction | |
109 | * | |
110 | * Ensure that any data held in the cache is appropriately discarded | |
111 | * or written back. | |
112 | * | |
113 | * The device owns this memory once this call has completed. The CPU | |
114 | * can regain ownership by calling dma_unmap_single() or dma_sync_single(). | |
115 | */ | |
116 | static inline dma_addr_t | |
117 | dma_map_single(struct device *dev, void *cpu_addr, size_t size, | |
118 | enum dma_data_direction direction) | |
119 | { | |
50954ab3 | 120 | dma_cache_sync(dev, cpu_addr, size, direction); |
5f97f7f9 HS |
121 | return virt_to_bus(cpu_addr); |
122 | } | |
123 | ||
124 | /** | |
125 | * dma_unmap_single - unmap a single buffer previously mapped | |
126 | * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices | |
127 | * @handle: DMA address of buffer | |
128 | * @size: size of buffer to map | |
129 | * @dir: DMA transfer direction | |
130 | * | |
131 | * Unmap a single streaming mode DMA translation. The handle and size | |
132 | * must match what was provided in the previous dma_map_single() call. | |
133 | * All other usages are undefined. | |
134 | * | |
135 | * After this call, reads by the CPU to the buffer are guaranteed to see | |
136 | * whatever the device wrote there. | |
137 | */ | |
138 | static inline void | |
139 | dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, | |
140 | enum dma_data_direction direction) | |
141 | { | |
142 | ||
143 | } | |
144 | ||
145 | /** | |
146 | * dma_map_page - map a portion of a page for streaming DMA | |
147 | * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices | |
148 | * @page: page that buffer resides in | |
149 | * @offset: offset into page for start of buffer | |
150 | * @size: size of buffer to map | |
151 | * @dir: DMA transfer direction | |
152 | * | |
153 | * Ensure that any data held in the cache is appropriately discarded | |
154 | * or written back. | |
155 | * | |
156 | * The device owns this memory once this call has completed. The CPU | |
157 | * can regain ownership by calling dma_unmap_page() or dma_sync_single(). | |
158 | */ | |
159 | static inline dma_addr_t | |
160 | dma_map_page(struct device *dev, struct page *page, | |
161 | unsigned long offset, size_t size, | |
162 | enum dma_data_direction direction) | |
163 | { | |
164 | return dma_map_single(dev, page_address(page) + offset, | |
165 | size, direction); | |
166 | } | |
167 | ||
168 | /** | |
169 | * dma_unmap_page - unmap a buffer previously mapped through dma_map_page() | |
170 | * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices | |
171 | * @handle: DMA address of buffer | |
172 | * @size: size of buffer to map | |
173 | * @dir: DMA transfer direction | |
174 | * | |
175 | * Unmap a single streaming mode DMA translation. The handle and size | |
176 | * must match what was provided in the previous dma_map_single() call. | |
177 | * All other usages are undefined. | |
178 | * | |
179 | * After this call, reads by the CPU to the buffer are guaranteed to see | |
180 | * whatever the device wrote there. | |
181 | */ | |
182 | static inline void | |
183 | dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size, | |
184 | enum dma_data_direction direction) | |
185 | { | |
186 | dma_unmap_single(dev, dma_address, size, direction); | |
187 | } | |
188 | ||
189 | /** | |
190 | * dma_map_sg - map a set of SG buffers for streaming mode DMA | |
191 | * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices | |
192 | * @sg: list of buffers | |
193 | * @nents: number of buffers to map | |
194 | * @dir: DMA transfer direction | |
195 | * | |
196 | * Map a set of buffers described by scatterlist in streaming | |
197 | * mode for DMA. This is the scatter-gather version of the | |
198 | * above pci_map_single interface. Here the scatter gather list | |
199 | * elements are each tagged with the appropriate dma address | |
200 | * and length. They are obtained via sg_dma_{address,length}(SG). | |
201 | * | |
202 | * NOTE: An implementation may be able to use a smaller number of | |
203 | * DMA address/length pairs than there are SG table elements. | |
204 | * (for example via virtual mapping capabilities) | |
205 | * The routine returns the number of addr/length pairs actually | |
206 | * used, at most nents. | |
207 | * | |
208 | * Device ownership issues as mentioned above for pci_map_single are | |
209 | * the same here. | |
210 | */ | |
211 | static inline int | |
212 | dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, | |
213 | enum dma_data_direction direction) | |
214 | { | |
215 | int i; | |
216 | ||
217 | for (i = 0; i < nents; i++) { | |
218 | char *virt; | |
219 | ||
83fcaf70 JA |
220 | sg[i].dma_address = page_to_bus(sg_page(&sg[i])) + sg[i].offset; |
221 | virt = sg_virt(&sg[i]); | |
50954ab3 | 222 | dma_cache_sync(dev, virt, sg[i].length, direction); |
5f97f7f9 HS |
223 | } |
224 | ||
225 | return nents; | |
226 | } | |
227 | ||
228 | /** | |
229 | * dma_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg | |
230 | * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices | |
231 | * @sg: list of buffers | |
232 | * @nents: number of buffers to map | |
233 | * @dir: DMA transfer direction | |
234 | * | |
235 | * Unmap a set of streaming mode DMA translations. | |
236 | * Again, CPU read rules concerning calls here are the same as for | |
237 | * pci_unmap_single() above. | |
238 | */ | |
239 | static inline void | |
240 | dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries, | |
241 | enum dma_data_direction direction) | |
242 | { | |
243 | ||
244 | } | |
245 | ||
246 | /** | |
247 | * dma_sync_single_for_cpu | |
248 | * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices | |
249 | * @handle: DMA address of buffer | |
250 | * @size: size of buffer to map | |
251 | * @dir: DMA transfer direction | |
252 | * | |
253 | * Make physical memory consistent for a single streaming mode DMA | |
254 | * translation after a transfer. | |
255 | * | |
256 | * If you perform a dma_map_single() but wish to interrogate the | |
257 | * buffer using the cpu, yet do not wish to teardown the DMA mapping, | |
258 | * you must call this function before doing so. At the next point you | |
259 | * give the DMA address back to the card, you must first perform a | |
260 | * dma_sync_single_for_device, and then the device again owns the | |
261 | * buffer. | |
262 | */ | |
263 | static inline void | |
264 | dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, | |
265 | size_t size, enum dma_data_direction direction) | |
266 | { | |
b5a8e736 HS |
267 | /* |
268 | * No need to do anything since the CPU isn't supposed to | |
269 | * touch this memory after we flushed it at mapping- or | |
270 | * sync-for-device time. | |
271 | */ | |
5f97f7f9 HS |
272 | } |
273 | ||
274 | static inline void | |
275 | dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, | |
276 | size_t size, enum dma_data_direction direction) | |
277 | { | |
50954ab3 | 278 | dma_cache_sync(dev, bus_to_virt(dma_handle), size, direction); |
5f97f7f9 HS |
279 | } |
280 | ||
a9e28d9b GZ |
281 | static inline void |
282 | dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle, | |
283 | unsigned long offset, size_t size, | |
284 | enum dma_data_direction direction) | |
285 | { | |
286 | /* just sync everything, that's all the pci API can do */ | |
287 | dma_sync_single_for_cpu(dev, dma_handle, offset+size, direction); | |
288 | } | |
289 | ||
290 | static inline void | |
291 | dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle, | |
292 | unsigned long offset, size_t size, | |
293 | enum dma_data_direction direction) | |
294 | { | |
295 | /* just sync everything, that's all the pci API can do */ | |
296 | dma_sync_single_for_device(dev, dma_handle, offset+size, direction); | |
297 | } | |
298 | ||
5f97f7f9 HS |
299 | /** |
300 | * dma_sync_sg_for_cpu | |
301 | * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices | |
302 | * @sg: list of buffers | |
303 | * @nents: number of buffers to map | |
304 | * @dir: DMA transfer direction | |
305 | * | |
306 | * Make physical memory consistent for a set of streaming | |
307 | * mode DMA translations after a transfer. | |
308 | * | |
309 | * The same as dma_sync_single_for_* but for a scatter-gather list, | |
310 | * same rules and usage. | |
311 | */ | |
312 | static inline void | |
313 | dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, | |
314 | int nents, enum dma_data_direction direction) | |
315 | { | |
b5a8e736 HS |
316 | /* |
317 | * No need to do anything since the CPU isn't supposed to | |
318 | * touch this memory after we flushed it at mapping- or | |
319 | * sync-for-device time. | |
320 | */ | |
5f97f7f9 HS |
321 | } |
322 | ||
323 | static inline void | |
324 | dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, | |
325 | int nents, enum dma_data_direction direction) | |
326 | { | |
327 | int i; | |
328 | ||
329 | for (i = 0; i < nents; i++) { | |
83fcaf70 | 330 | dma_cache_sync(dev, sg_virt(&sg[i]), sg[i].length, direction); |
5f97f7f9 HS |
331 | } |
332 | } | |
333 | ||
334 | /* Now for the API extensions over the pci_ one */ | |
335 | ||
336 | #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) | |
337 | #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) | |
338 | ||
f67637ee | 339 | static inline int dma_is_consistent(struct device *dev, dma_addr_t dma_addr) |
5f97f7f9 HS |
340 | { |
341 | return 1; | |
342 | } | |
343 | ||
344 | static inline int dma_get_cache_alignment(void) | |
345 | { | |
346 | return boot_cpu_data.dcache.linesz; | |
347 | } | |
348 | ||
349 | #endif /* __ASM_AVR32_DMA_MAPPING_H */ |