Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | #ifndef _X8664_DMA_MAPPING_H |
2 | #define _X8664_DMA_MAPPING_H 1 | |
3 | ||
4 | /* | |
5 | * IOMMU interface. See Documentation/DMA-mapping.txt and DMA-API.txt for | |
6 | * documentation. | |
7 | */ | |
8 | ||
9 | #include <linux/config.h> | |
10 | ||
11 | #include <asm/scatterlist.h> | |
12 | #include <asm/io.h> | |
13 | #include <asm/swiotlb.h> | |
14 | ||
15 | extern dma_addr_t bad_dma_address; | |
16 | #define dma_mapping_error(x) \ | |
17 | (swiotlb ? swiotlb_dma_mapping_error(x) : ((x) == bad_dma_address)) | |
18 | ||
19 | void *dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, | |
20 | unsigned gfp); | |
21 | void dma_free_coherent(struct device *dev, size_t size, void *vaddr, | |
22 | dma_addr_t dma_handle); | |
23 | ||
24 | #ifdef CONFIG_GART_IOMMU | |
25 | ||
26 | extern dma_addr_t dma_map_single(struct device *hwdev, void *ptr, size_t size, | |
27 | int direction); | |
28 | extern void dma_unmap_single(struct device *dev, dma_addr_t addr,size_t size, | |
29 | int direction); | |
30 | ||
31 | #else | |
32 | ||
33 | /* No IOMMU */ | |
34 | ||
35 | static inline dma_addr_t dma_map_single(struct device *hwdev, void *ptr, | |
36 | size_t size, int direction) | |
37 | { | |
38 | dma_addr_t addr; | |
39 | ||
40 | if (direction == DMA_NONE) | |
41 | out_of_line_bug(); | |
42 | addr = virt_to_bus(ptr); | |
43 | ||
44 | if ((addr+size) & ~*hwdev->dma_mask) | |
45 | out_of_line_bug(); | |
46 | return addr; | |
47 | } | |
48 | ||
49 | static inline void dma_unmap_single(struct device *hwdev, dma_addr_t dma_addr, | |
50 | size_t size, int direction) | |
51 | { | |
52 | if (direction == DMA_NONE) | |
53 | out_of_line_bug(); | |
54 | /* Nothing to do */ | |
55 | } | |
56 | ||
57 | #endif | |
58 | ||
59 | #define dma_map_page(dev,page,offset,size,dir) \ | |
60 | dma_map_single((dev), page_address(page)+(offset), (size), (dir)) | |
61 | ||
62 | static inline void dma_sync_single_for_cpu(struct device *hwdev, | |
63 | dma_addr_t dma_handle, | |
64 | size_t size, int direction) | |
65 | { | |
66 | if (direction == DMA_NONE) | |
67 | out_of_line_bug(); | |
68 | ||
69 | if (swiotlb) | |
70 | return swiotlb_sync_single_for_cpu(hwdev,dma_handle,size,direction); | |
71 | ||
72 | flush_write_buffers(); | |
73 | } | |
74 | ||
75 | static inline void dma_sync_single_for_device(struct device *hwdev, | |
76 | dma_addr_t dma_handle, | |
77 | size_t size, int direction) | |
78 | { | |
79 | if (direction == DMA_NONE) | |
80 | out_of_line_bug(); | |
81 | ||
82 | if (swiotlb) | |
83 | return swiotlb_sync_single_for_device(hwdev,dma_handle,size,direction); | |
84 | ||
85 | flush_write_buffers(); | |
86 | } | |
87 | ||
27183ebd AK |
88 | #define dma_sync_single_range_for_cpu(dev, dma_handle, offset, size, dir) \ |
89 | dma_sync_single_for_cpu(dev, dma_handle, size, dir) | |
90 | #define dma_sync_single_range_for_device(dev, dma_handle, offset, size, dir) \ | |
91 | dma_sync_single_for_device(dev, dma_handle, size, dir) | |
92 | ||
1da177e4 LT |
93 | static inline void dma_sync_sg_for_cpu(struct device *hwdev, |
94 | struct scatterlist *sg, | |
95 | int nelems, int direction) | |
96 | { | |
97 | if (direction == DMA_NONE) | |
98 | out_of_line_bug(); | |
99 | ||
100 | if (swiotlb) | |
101 | return swiotlb_sync_sg_for_cpu(hwdev,sg,nelems,direction); | |
102 | ||
103 | flush_write_buffers(); | |
104 | } | |
105 | ||
106 | static inline void dma_sync_sg_for_device(struct device *hwdev, | |
107 | struct scatterlist *sg, | |
108 | int nelems, int direction) | |
109 | { | |
110 | if (direction == DMA_NONE) | |
111 | out_of_line_bug(); | |
112 | ||
113 | if (swiotlb) | |
114 | return swiotlb_sync_sg_for_device(hwdev,sg,nelems,direction); | |
115 | ||
116 | flush_write_buffers(); | |
117 | } | |
118 | ||
119 | extern int dma_map_sg(struct device *hwdev, struct scatterlist *sg, | |
120 | int nents, int direction); | |
121 | extern void dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, | |
122 | int nents, int direction); | |
123 | ||
124 | #define dma_unmap_page dma_unmap_single | |
125 | ||
126 | extern int dma_supported(struct device *hwdev, u64 mask); | |
127 | extern int dma_get_cache_alignment(void); | |
128 | #define dma_is_consistent(h) 1 | |
129 | ||
130 | static inline int dma_set_mask(struct device *dev, u64 mask) | |
131 | { | |
132 | if (!dev->dma_mask || !dma_supported(dev, mask)) | |
133 | return -EIO; | |
134 | *dev->dma_mask = mask; | |
135 | return 0; | |
136 | } | |
137 | ||
138 | static inline void dma_cache_sync(void *vaddr, size_t size, enum dma_data_direction dir) | |
139 | { | |
140 | flush_write_buffers(); | |
141 | } | |
142 | ||
143 | #endif |