1 #ifndef ASMARM_DMA_MAPPING_H
2 #define ASMARM_DMA_MAPPING_H
6 #include <linux/mm_types.h>
7 #include <linux/scatterlist.h>
9 #include <asm-generic/dma-coherent.h>
10 #include <asm/memory.h>
13 * page_to_dma/dma_to_virt/virt_to_dma are architecture private functions
14 * used internally by the DMA-mapping API to provide DMA addresses. They
15 * must not be used by drivers.
17 #ifndef __arch_page_to_dma
18 static inline dma_addr_t page_to_dma(struct device *dev, struct page *page)
20 return (dma_addr_t)__pfn_to_bus(page_to_pfn(page));
23 static inline void *dma_to_virt(struct device *dev, dma_addr_t addr)
25 return (void *)__bus_to_virt(addr);
28 static inline dma_addr_t virt_to_dma(struct device *dev, void *addr)
30 return (dma_addr_t)__virt_to_bus((unsigned long)(addr));
33 static inline dma_addr_t page_to_dma(struct device *dev, struct page *page)
35 return __arch_page_to_dma(dev, page);
38 static inline void *dma_to_virt(struct device *dev, dma_addr_t addr)
40 return __arch_dma_to_virt(dev, addr);
43 static inline dma_addr_t virt_to_dma(struct device *dev, void *addr)
45 return __arch_virt_to_dma(dev, addr);
50 * DMA-consistent mapping functions. These allocate/free a region of
51 * uncached, unwrite-buffered mapped memory space for use with DMA
52 * devices. This is the "generic" version. The PCI specific version
55 * Note: Drivers should NOT use this function directly, as it will break
56 * platforms with CONFIG_DMABOUNCE.
57 * Use the driver DMA support - see dma-mapping.h (dma_sync_*)
59 extern void dma_cache_maint(const void *kaddr, size_t size, int rw);
60 extern void dma_cache_maint_page(struct page *page, unsigned long offset,
64 * Return whether the given device DMA address mask can be supported
65 * properly. For example, if your device can only drive the low 24-bits
66 * during bus mastering, then you would pass 0x00ffffff as the mask
69 * FIXME: This should really be a platform specific issue - we should
70 * return false if GFP_DMA allocations may not satisfy the supplied 'mask'.
72 static inline int dma_supported(struct device *dev, u64 mask)
74 if (mask < ISA_DMA_THRESHOLD)
79 static inline int dma_set_mask(struct device *dev, u64 dma_mask)
81 if (!dev->dma_mask || !dma_supported(dev, dma_mask))
84 *dev->dma_mask = dma_mask;
89 static inline int dma_get_cache_alignment(void)
94 static inline int dma_is_consistent(struct device *dev, dma_addr_t handle)
96 return !!arch_is_coherent();
100 * DMA errors are defined by all-bits-set in the DMA address.
102 static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
104 return dma_addr == ~0;
108 * Dummy noncoherent implementation. We don't provide a dma_cache_sync
109 * function so drivers using this API are highlighted with build warnings.
111 static inline void *dma_alloc_noncoherent(struct device *dev, size_t size,
112 dma_addr_t *handle, gfp_t gfp)
117 static inline void dma_free_noncoherent(struct device *dev, size_t size,
118 void *cpu_addr, dma_addr_t handle)
123 * dma_alloc_coherent - allocate consistent memory for DMA
124 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
125 * @size: required memory size
126 * @handle: bus-specific DMA address
128 * Allocate some uncached, unbuffered memory for a device for
129 * performing DMA. This function allocates pages, and will
130 * return the CPU-viewed address, and sets @handle to be the
131 * device-viewed address.
133 extern void *dma_alloc_coherent(struct device *, size_t, dma_addr_t *, gfp_t);
136 * dma_free_coherent - free memory allocated by dma_alloc_coherent
137 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
138 * @size: size of memory originally requested in dma_alloc_coherent
139 * @cpu_addr: CPU-view address returned from dma_alloc_coherent
140 * @handle: device-view address returned from dma_alloc_coherent
142 * Free (and unmap) a DMA buffer previously allocated by
143 * dma_alloc_coherent().
145 * References to memory and mappings associated with cpu_addr/handle
146 * during and after this call executing are illegal.
148 extern void dma_free_coherent(struct device *, size_t, void *, dma_addr_t);
151 * dma_mmap_coherent - map a coherent DMA allocation into user space
152 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
153 * @vma: vm_area_struct describing requested user mapping
154 * @cpu_addr: kernel CPU-view address returned from dma_alloc_coherent
155 * @handle: device-view address returned from dma_alloc_coherent
156 * @size: size of memory originally requested in dma_alloc_coherent
158 * Map a coherent DMA buffer previously allocated by dma_alloc_coherent
159 * into user space. The coherent DMA buffer must not be freed by the
160 * driver until the user space mapping has been released.
162 int dma_mmap_coherent(struct device *, struct vm_area_struct *,
163 void *, dma_addr_t, size_t);
167 * dma_alloc_writecombine - allocate writecombining memory for DMA
168 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
169 * @size: required memory size
170 * @handle: bus-specific DMA address
172 * Allocate some uncached, buffered memory for a device for
173 * performing DMA. This function allocates pages, and will
174 * return the CPU-viewed address, and sets @handle to be the
175 * device-viewed address.
177 extern void *dma_alloc_writecombine(struct device *, size_t, dma_addr_t *,
180 #define dma_free_writecombine(dev,size,cpu_addr,handle) \
181 dma_free_coherent(dev,size,cpu_addr,handle)
183 int dma_mmap_writecombine(struct device *, struct vm_area_struct *,
184 void *, dma_addr_t, size_t);
187 #ifdef CONFIG_DMABOUNCE
189 * For SA-1111, IXP425, and ADI systems the dma-mapping functions are "magic"
190 * and utilize bounce buffers as needed to work around limited DMA windows.
192 * On the SA-1111, a bug limits DMA to only certain regions of RAM.
193 * On the IXP425, the PCI inbound window is 64MB (256MB total RAM)
194 * On some ADI engineering systems, PCI inbound window is 32MB (12MB total RAM)
196 * The following are helper functions used by the dmabounce subystem
201 * dmabounce_register_dev
203 * @dev: valid struct device pointer
204 * @small_buf_size: size of buffers to use with small buffer pool
205 * @large_buf_size: size of buffers to use with large buffer pool (can be 0)
207 * This function should be called by low-level platform code to register
208 * a device as requireing DMA buffer bouncing. The function will allocate
209 * appropriate DMA pools for the device.
212 extern int dmabounce_register_dev(struct device *, unsigned long,
216 * dmabounce_unregister_dev
218 * @dev: valid struct device pointer
220 * This function should be called by low-level platform code when device
221 * that was previously registered with dmabounce_register_dev is removed
225 extern void dmabounce_unregister_dev(struct device *);
230 * @dev: valid struct device pointer
231 * @dma_handle: dma_handle of unbounced buffer
232 * @size: size of region being mapped
234 * Platforms that utilize the dmabounce mechanism must implement
237 * The dmabounce routines call this function whenever a dma-mapping
238 * is requested to determine whether a given buffer needs to be bounced
239 * or not. The function must return 0 if the buffer is OK for
240 * DMA access and 1 if the buffer needs to be bounced.
243 extern int dma_needs_bounce(struct device*, dma_addr_t, size_t);
246 * The DMA API, implemented by dmabounce.c. See below for descriptions.
248 extern dma_addr_t dma_map_single(struct device *, void *, size_t,
249 enum dma_data_direction);
250 extern dma_addr_t dma_map_page(struct device *, struct page *,
251 unsigned long, size_t, enum dma_data_direction);
252 extern void dma_unmap_single(struct device *, dma_addr_t, size_t,
253 enum dma_data_direction);
258 int dmabounce_sync_for_cpu(struct device *, dma_addr_t, unsigned long,
259 size_t, enum dma_data_direction);
260 int dmabounce_sync_for_device(struct device *, dma_addr_t, unsigned long,
261 size_t, enum dma_data_direction);
263 static inline int dmabounce_sync_for_cpu(struct device *d, dma_addr_t addr,
264 unsigned long offset, size_t size, enum dma_data_direction dir)
269 static inline int dmabounce_sync_for_device(struct device *d, dma_addr_t addr,
270 unsigned long offset, size_t size, enum dma_data_direction dir)
277 * dma_map_single - map a single buffer for streaming DMA
278 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
279 * @cpu_addr: CPU direct mapped address of buffer
280 * @size: size of buffer to map
281 * @dir: DMA transfer direction
283 * Ensure that any data held in the cache is appropriately discarded
286 * The device owns this memory once this call has completed. The CPU
287 * can regain ownership by calling dma_unmap_single() or
288 * dma_sync_single_for_cpu().
290 static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr,
291 size_t size, enum dma_data_direction dir)
293 BUG_ON(!valid_dma_direction(dir));
295 if (!arch_is_coherent())
296 dma_cache_maint(cpu_addr, size, dir);
298 return virt_to_dma(dev, cpu_addr);
302 * dma_map_page - map a portion of a page for streaming DMA
303 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
304 * @page: page that buffer resides in
305 * @offset: offset into page for start of buffer
306 * @size: size of buffer to map
307 * @dir: DMA transfer direction
309 * Ensure that any data held in the cache is appropriately discarded
312 * The device owns this memory once this call has completed. The CPU
313 * can regain ownership by calling dma_unmap_page().
315 static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
316 unsigned long offset, size_t size, enum dma_data_direction dir)
318 BUG_ON(!valid_dma_direction(dir));
320 if (!arch_is_coherent())
321 dma_cache_maint_page(page, offset, size, dir);
323 return page_to_dma(dev, page) + offset;
327 * dma_unmap_single - unmap a single buffer previously mapped
328 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
329 * @handle: DMA address of buffer
330 * @size: size of buffer (same as passed to dma_map_single)
331 * @dir: DMA transfer direction (same as passed to dma_map_single)
333 * Unmap a single streaming mode DMA translation. The handle and size
334 * must match what was provided in the previous dma_map_single() call.
335 * All other usages are undefined.
337 * After this call, reads by the CPU to the buffer are guaranteed to see
338 * whatever the device wrote there.
340 static inline void dma_unmap_single(struct device *dev, dma_addr_t handle,
341 size_t size, enum dma_data_direction dir)
345 #endif /* CONFIG_DMABOUNCE */
348 * dma_unmap_page - unmap a buffer previously mapped through dma_map_page()
349 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
350 * @handle: DMA address of buffer
351 * @size: size of buffer (same as passed to dma_map_page)
352 * @dir: DMA transfer direction (same as passed to dma_map_page)
354 * Unmap a page streaming mode DMA translation. The handle and size
355 * must match what was provided in the previous dma_map_page() call.
356 * All other usages are undefined.
358 * After this call, reads by the CPU to the buffer are guaranteed to see
359 * whatever the device wrote there.
361 static inline void dma_unmap_page(struct device *dev, dma_addr_t handle,
362 size_t size, enum dma_data_direction dir)
364 dma_unmap_single(dev, handle, size, dir);
368 * dma_sync_single_range_for_cpu
369 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
370 * @handle: DMA address of buffer
371 * @offset: offset of region to start sync
372 * @size: size of region to sync
373 * @dir: DMA transfer direction (same as passed to dma_map_single)
375 * Make physical memory consistent for a single streaming mode DMA
376 * translation after a transfer.
378 * If you perform a dma_map_single() but wish to interrogate the
379 * buffer using the cpu, yet do not wish to teardown the PCI dma
380 * mapping, you must call this function before doing so. At the
381 * next point you give the PCI dma address back to the card, you
382 * must first the perform a dma_sync_for_device, and then the
383 * device again owns the buffer.
385 static inline void dma_sync_single_range_for_cpu(struct device *dev,
386 dma_addr_t handle, unsigned long offset, size_t size,
387 enum dma_data_direction dir)
389 BUG_ON(!valid_dma_direction(dir));
391 dmabounce_sync_for_cpu(dev, handle, offset, size, dir);
394 static inline void dma_sync_single_range_for_device(struct device *dev,
395 dma_addr_t handle, unsigned long offset, size_t size,
396 enum dma_data_direction dir)
398 BUG_ON(!valid_dma_direction(dir));
400 if (!dmabounce_sync_for_device(dev, handle, offset, size, dir))
403 if (!arch_is_coherent())
404 dma_cache_maint(dma_to_virt(dev, handle) + offset, size, dir);
407 static inline void dma_sync_single_for_cpu(struct device *dev,
408 dma_addr_t handle, size_t size, enum dma_data_direction dir)
410 dma_sync_single_range_for_cpu(dev, handle, 0, size, dir);
413 static inline void dma_sync_single_for_device(struct device *dev,
414 dma_addr_t handle, size_t size, enum dma_data_direction dir)
416 dma_sync_single_range_for_device(dev, handle, 0, size, dir);
420 * The scatter list versions of the above methods.
422 extern int dma_map_sg(struct device *, struct scatterlist *, int,
423 enum dma_data_direction);
424 extern void dma_unmap_sg(struct device *, struct scatterlist *, int,
425 enum dma_data_direction);
426 extern void dma_sync_sg_for_cpu(struct device *, struct scatterlist *, int,
427 enum dma_data_direction);
428 extern void dma_sync_sg_for_device(struct device *, struct scatterlist *, int,
429 enum dma_data_direction);
432 #endif /* __KERNEL__ */