Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | #ifndef _ASM_IA64_DMA_MAPPING_H |
2 | #define _ASM_IA64_DMA_MAPPING_H | |
3 | ||
4 | /* | |
5 | * Copyright (C) 2003-2004 Hewlett-Packard Co | |
6 | * David Mosberger-Tang <davidm@hpl.hp.com> | |
7 | */ | |
1da177e4 LT |
8 | #include <asm/machvec.h> |
9 | ||
10 | #define dma_alloc_coherent platform_dma_alloc_coherent | |
11 | #define dma_alloc_noncoherent platform_dma_alloc_coherent /* coherent mem. is cheap */ | |
12 | #define dma_free_coherent platform_dma_free_coherent | |
13 | #define dma_free_noncoherent platform_dma_free_coherent | |
14 | #define dma_map_single platform_dma_map_single | |
15 | #define dma_map_sg platform_dma_map_sg | |
16 | #define dma_unmap_single platform_dma_unmap_single | |
17 | #define dma_unmap_sg platform_dma_unmap_sg | |
18 | #define dma_sync_single_for_cpu platform_dma_sync_single_for_cpu | |
19 | #define dma_sync_sg_for_cpu platform_dma_sync_sg_for_cpu | |
20 | #define dma_sync_single_for_device platform_dma_sync_single_for_device | |
21 | #define dma_sync_sg_for_device platform_dma_sync_sg_for_device | |
22 | #define dma_mapping_error platform_dma_mapping_error | |
23 | ||
24 | #define dma_map_page(dev, pg, off, size, dir) \ | |
25 | dma_map_single(dev, page_address(pg) + (off), (size), (dir)) | |
26 | #define dma_unmap_page(dev, dma_addr, size, dir) \ | |
27 | dma_unmap_single(dev, dma_addr, size, dir) | |
28 | ||
29 | /* | |
30 | * Rest of this file is part of the "Advanced DMA API". Use at your own risk. | |
31 | * See Documentation/DMA-API.txt for details. | |
32 | */ | |
33 | ||
34 | #define dma_sync_single_range_for_cpu(dev, dma_handle, offset, size, dir) \ | |
35 | dma_sync_single_for_cpu(dev, dma_handle, size, dir) | |
36 | #define dma_sync_single_range_for_device(dev, dma_handle, offset, size, dir) \ | |
37 | dma_sync_single_for_device(dev, dma_handle, size, dir) | |
38 | ||
39 | #define dma_supported platform_dma_supported | |
40 | ||
41 | static inline int | |
42 | dma_set_mask (struct device *dev, u64 mask) | |
43 | { | |
44 | if (!dev->dma_mask || !dma_supported(dev, mask)) | |
45 | return -EIO; | |
46 | *dev->dma_mask = mask; | |
47 | return 0; | |
48 | } | |
49 | ||
e1531b42 | 50 | extern int dma_get_cache_alignment(void); |
1da177e4 LT |
51 | |
52 | static inline void | |
d3fa72e4 RB |
53 | dma_cache_sync (struct device *dev, void *vaddr, size_t size, |
54 | enum dma_data_direction dir) | |
1da177e4 LT |
55 | { |
56 | /* | |
57 | * IA-64 is cache-coherent, so this is mostly a no-op. However, we do need to | |
58 | * ensure that dma_cache_sync() enforces order, hence the mb(). | |
59 | */ | |
60 | mb(); | |
61 | } | |
62 | ||
f67637ee | 63 | #define dma_is_consistent(d, h) (1) /* all we do is coherent memory... */ |
1da177e4 LT |
64 | |
65 | #endif /* _ASM_IA64_DMA_MAPPING_H */ |