Commit | Line | Data |
---|---|---|
6f536635 GC |
1 | #ifndef _ASM_DMA_MAPPING_H_ |
2 | #define _ASM_DMA_MAPPING_H_ | |
3 | ||
4 | /* | |
5 | * IOMMU interface. See Documentation/DMA-mapping.txt and DMA-API.txt for | |
6 | * documentation. | |
7 | */ | |
8 | ||
9 | #include <linux/scatterlist.h> | |
10 | #include <asm/io.h> | |
11 | #include <asm/swiotlb.h> | |
12 | ||
7c183416 | 13 | extern dma_addr_t bad_dma_address; |
b7107a3d GC |
14 | extern int iommu_merge; |
15 | extern struct device fallback_dev; | |
16 | extern int panic_on_overflow; | |
fae9a0d8 | 17 | extern int force_iommu; |
7c183416 | 18 | |
6f536635 | 19 | struct dma_mapping_ops { |
8d8bb39b FT |
20 | int (*mapping_error)(struct device *dev, |
21 | dma_addr_t dma_addr); | |
6f536635 GC |
22 | void* (*alloc_coherent)(struct device *dev, size_t size, |
23 | dma_addr_t *dma_handle, gfp_t gfp); | |
24 | void (*free_coherent)(struct device *dev, size_t size, | |
25 | void *vaddr, dma_addr_t dma_handle); | |
2be62149 | 26 | dma_addr_t (*map_single)(struct device *hwdev, phys_addr_t ptr, |
6f536635 GC |
27 | size_t size, int direction); |
28 | /* like map_single, but doesn't check the device mask */ | |
2be62149 | 29 | dma_addr_t (*map_simple)(struct device *hwdev, phys_addr_t ptr, |
6f536635 GC |
30 | size_t size, int direction); |
31 | void (*unmap_single)(struct device *dev, dma_addr_t addr, | |
32 | size_t size, int direction); | |
33 | void (*sync_single_for_cpu)(struct device *hwdev, | |
34 | dma_addr_t dma_handle, size_t size, | |
35 | int direction); | |
36 | void (*sync_single_for_device)(struct device *hwdev, | |
37 | dma_addr_t dma_handle, size_t size, | |
38 | int direction); | |
39 | void (*sync_single_range_for_cpu)(struct device *hwdev, | |
40 | dma_addr_t dma_handle, unsigned long offset, | |
41 | size_t size, int direction); | |
42 | void (*sync_single_range_for_device)(struct device *hwdev, | |
43 | dma_addr_t dma_handle, unsigned long offset, | |
44 | size_t size, int direction); | |
45 | void (*sync_sg_for_cpu)(struct device *hwdev, | |
46 | struct scatterlist *sg, int nelems, | |
47 | int direction); | |
48 | void (*sync_sg_for_device)(struct device *hwdev, | |
49 | struct scatterlist *sg, int nelems, | |
50 | int direction); | |
51 | int (*map_sg)(struct device *hwdev, struct scatterlist *sg, | |
52 | int nents, int direction); | |
53 | void (*unmap_sg)(struct device *hwdev, | |
54 | struct scatterlist *sg, int nents, | |
55 | int direction); | |
56 | int (*dma_supported)(struct device *hwdev, u64 mask); | |
57 | int is_phys; | |
58 | }; | |
59 | ||
8d8bb39b | 60 | extern struct dma_mapping_ops *dma_ops; |
22456b97 | 61 | |
8d8bb39b | 62 | static inline struct dma_mapping_ops *get_dma_ops(struct device *dev) |
c786df08 | 63 | { |
8d8bb39b FT |
64 | #ifdef CONFIG_X86_32 |
65 | return dma_ops; | |
66 | #else | |
67 | if (unlikely(!dev) || !dev->archdata.dma_ops) | |
68 | return dma_ops; | |
69 | else | |
70 | return dev->archdata.dma_ops; | |
71 | #endif | |
72 | } | |
73 | ||
74 | /* Make sure we keep the same behaviour */ | |
75 | static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) | |
76 | { | |
77 | #ifdef CONFIG_X86_32 | |
78 | return 0; | |
79 | #else | |
80 | struct dma_mapping_ops *ops = get_dma_ops(dev); | |
81 | if (ops->mapping_error) | |
82 | return ops->mapping_error(dev, dma_addr); | |
c786df08 GC |
83 | |
84 | return (dma_addr == bad_dma_address); | |
8d8bb39b | 85 | #endif |
c786df08 GC |
86 | } |
87 | ||
8d396ded GC |
88 | #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) |
89 | #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) | |
90 | ||
91 | void *dma_alloc_coherent(struct device *dev, size_t size, | |
92 | dma_addr_t *dma_handle, gfp_t flag); | |
93 | ||
94 | void dma_free_coherent(struct device *dev, size_t size, | |
95 | void *vaddr, dma_addr_t dma_handle); | |
96 | ||
97 | ||
802c1f66 GC |
98 | extern int dma_supported(struct device *hwdev, u64 mask); |
99 | extern int dma_set_mask(struct device *dev, u64 mask); | |
100 | ||
22456b97 GC |
101 | static inline dma_addr_t |
102 | dma_map_single(struct device *hwdev, void *ptr, size_t size, | |
103 | int direction) | |
104 | { | |
8d8bb39b FT |
105 | struct dma_mapping_ops *ops = get_dma_ops(hwdev); |
106 | ||
22456b97 | 107 | BUG_ON(!valid_dma_direction(direction)); |
8d8bb39b | 108 | return ops->map_single(hwdev, virt_to_phys(ptr), size, direction); |
22456b97 GC |
109 | } |
110 | ||
0cb0ae68 GC |
111 | static inline void |
112 | dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size, | |
113 | int direction) | |
114 | { | |
8d8bb39b FT |
115 | struct dma_mapping_ops *ops = get_dma_ops(dev); |
116 | ||
0cb0ae68 | 117 | BUG_ON(!valid_dma_direction(direction)); |
8d8bb39b FT |
118 | if (ops->unmap_single) |
119 | ops->unmap_single(dev, addr, size, direction); | |
0cb0ae68 GC |
120 | } |
121 | ||
16a3ce9b GC |
122 | static inline int |
123 | dma_map_sg(struct device *hwdev, struct scatterlist *sg, | |
124 | int nents, int direction) | |
125 | { | |
8d8bb39b FT |
126 | struct dma_mapping_ops *ops = get_dma_ops(hwdev); |
127 | ||
16a3ce9b | 128 | BUG_ON(!valid_dma_direction(direction)); |
8d8bb39b | 129 | return ops->map_sg(hwdev, sg, nents, direction); |
16a3ce9b | 130 | } |
72c784f8 GC |
131 | |
132 | static inline void | |
133 | dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents, | |
134 | int direction) | |
135 | { | |
8d8bb39b FT |
136 | struct dma_mapping_ops *ops = get_dma_ops(hwdev); |
137 | ||
72c784f8 | 138 | BUG_ON(!valid_dma_direction(direction)); |
8d8bb39b FT |
139 | if (ops->unmap_sg) |
140 | ops->unmap_sg(hwdev, sg, nents, direction); | |
72c784f8 | 141 | } |
c01dd8cf GC |
142 | |
143 | static inline void | |
144 | dma_sync_single_for_cpu(struct device *hwdev, dma_addr_t dma_handle, | |
145 | size_t size, int direction) | |
146 | { | |
8d8bb39b FT |
147 | struct dma_mapping_ops *ops = get_dma_ops(hwdev); |
148 | ||
c01dd8cf | 149 | BUG_ON(!valid_dma_direction(direction)); |
8d8bb39b FT |
150 | if (ops->sync_single_for_cpu) |
151 | ops->sync_single_for_cpu(hwdev, dma_handle, size, direction); | |
c01dd8cf GC |
152 | flush_write_buffers(); |
153 | } | |
154 | ||
9231b269 GC |
155 | static inline void |
156 | dma_sync_single_for_device(struct device *hwdev, dma_addr_t dma_handle, | |
157 | size_t size, int direction) | |
158 | { | |
8d8bb39b FT |
159 | struct dma_mapping_ops *ops = get_dma_ops(hwdev); |
160 | ||
9231b269 | 161 | BUG_ON(!valid_dma_direction(direction)); |
8d8bb39b FT |
162 | if (ops->sync_single_for_device) |
163 | ops->sync_single_for_device(hwdev, dma_handle, size, direction); | |
9231b269 GC |
164 | flush_write_buffers(); |
165 | } | |
166 | ||
627610fc GC |
167 | static inline void |
168 | dma_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dma_handle, | |
169 | unsigned long offset, size_t size, int direction) | |
170 | { | |
8d8bb39b | 171 | struct dma_mapping_ops *ops = get_dma_ops(hwdev); |
627610fc | 172 | |
8d8bb39b FT |
173 | BUG_ON(!valid_dma_direction(direction)); |
174 | if (ops->sync_single_range_for_cpu) | |
175 | ops->sync_single_range_for_cpu(hwdev, dma_handle, offset, | |
176 | size, direction); | |
627610fc GC |
177 | flush_write_buffers(); |
178 | } | |
71362332 GC |
179 | |
180 | static inline void | |
181 | dma_sync_single_range_for_device(struct device *hwdev, dma_addr_t dma_handle, | |
182 | unsigned long offset, size_t size, | |
183 | int direction) | |
184 | { | |
8d8bb39b | 185 | struct dma_mapping_ops *ops = get_dma_ops(hwdev); |
71362332 | 186 | |
8d8bb39b FT |
187 | BUG_ON(!valid_dma_direction(direction)); |
188 | if (ops->sync_single_range_for_device) | |
189 | ops->sync_single_range_for_device(hwdev, dma_handle, | |
190 | offset, size, direction); | |
71362332 GC |
191 | flush_write_buffers(); |
192 | } | |
193 | ||
ed435dee GC |
194 | static inline void |
195 | dma_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg, | |
196 | int nelems, int direction) | |
197 | { | |
8d8bb39b FT |
198 | struct dma_mapping_ops *ops = get_dma_ops(hwdev); |
199 | ||
ed435dee | 200 | BUG_ON(!valid_dma_direction(direction)); |
8d8bb39b FT |
201 | if (ops->sync_sg_for_cpu) |
202 | ops->sync_sg_for_cpu(hwdev, sg, nelems, direction); | |
ed435dee GC |
203 | flush_write_buffers(); |
204 | } | |
e7f3a913 GC |
205 | |
206 | static inline void | |
207 | dma_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg, | |
208 | int nelems, int direction) | |
209 | { | |
8d8bb39b FT |
210 | struct dma_mapping_ops *ops = get_dma_ops(hwdev); |
211 | ||
e7f3a913 | 212 | BUG_ON(!valid_dma_direction(direction)); |
8d8bb39b FT |
213 | if (ops->sync_sg_for_device) |
214 | ops->sync_sg_for_device(hwdev, sg, nelems, direction); | |
e7f3a913 GC |
215 | |
216 | flush_write_buffers(); | |
217 | } | |
4d92fbf2 GC |
218 | |
219 | static inline dma_addr_t dma_map_page(struct device *dev, struct page *page, | |
220 | size_t offset, size_t size, | |
221 | int direction) | |
222 | { | |
8d8bb39b FT |
223 | struct dma_mapping_ops *ops = get_dma_ops(dev); |
224 | ||
2be62149 | 225 | BUG_ON(!valid_dma_direction(direction)); |
8d8bb39b FT |
226 | return ops->map_single(dev, page_to_phys(page) + offset, |
227 | size, direction); | |
4d92fbf2 GC |
228 | } |
229 | ||
230 | static inline void dma_unmap_page(struct device *dev, dma_addr_t addr, | |
231 | size_t size, int direction) | |
232 | { | |
233 | dma_unmap_single(dev, addr, size, direction); | |
234 | } | |
235 | ||
3cb6a917 GC |
236 | static inline void |
237 | dma_cache_sync(struct device *dev, void *vaddr, size_t size, | |
238 | enum dma_data_direction dir) | |
239 | { | |
240 | flush_write_buffers(); | |
241 | } | |
ae17a63b | 242 | |
b7107a3d GC |
243 | static inline int dma_get_cache_alignment(void) |
244 | { | |
245 | /* no easy way to get cache size on all x86, so return the | |
246 | * maximum possible, to be safe */ | |
247 | return boot_cpu_data.x86_clflush_size; | |
248 | } | |
249 | ||
250 | #define dma_is_consistent(d, h) (1) | |
251 | ||
323ec001 | 252 | #include <asm-generic/dma-coherent.h> |
6f536635 | 253 | #endif |