Commit | Line | Data |
---|---|---|
96532bab RD |
1 | #ifndef _LINUX_DMA_MAPPING_H |
2 | #define _LINUX_DMA_MAPPING_H | |
1da177e4 | 3 | |
002edb6f | 4 | #include <linux/sizes.h> |
842fa69f | 5 | #include <linux/string.h> |
1da177e4 LT |
6 | #include <linux/device.h> |
7 | #include <linux/err.h> | |
f0402a26 | 8 | #include <linux/dma-attrs.h> |
b7f080cf | 9 | #include <linux/dma-direction.h> |
f0402a26 | 10 | #include <linux/scatterlist.h> |
1da177e4 | 11 | |
77f2ea2f BH |
12 | /* |
13 | * A dma_addr_t can hold any valid DMA or bus address for the platform. | |
14 | * It can be given to a device to use as a DMA source or target. A CPU cannot | |
15 | * reference a dma_addr_t directly because there may be translation between | |
16 | * its physical address space and the bus address space. | |
17 | */ | |
f0402a26 | 18 | struct dma_map_ops { |
613c4578 MS |
19 | void* (*alloc)(struct device *dev, size_t size, |
20 | dma_addr_t *dma_handle, gfp_t gfp, | |
21 | struct dma_attrs *attrs); | |
22 | void (*free)(struct device *dev, size_t size, | |
23 | void *vaddr, dma_addr_t dma_handle, | |
24 | struct dma_attrs *attrs); | |
9adc5374 MS |
25 | int (*mmap)(struct device *, struct vm_area_struct *, |
26 | void *, dma_addr_t, size_t, struct dma_attrs *attrs); | |
27 | ||
d2b7428e MS |
28 | int (*get_sgtable)(struct device *dev, struct sg_table *sgt, void *, |
29 | dma_addr_t, size_t, struct dma_attrs *attrs); | |
30 | ||
f0402a26 FT |
31 | dma_addr_t (*map_page)(struct device *dev, struct page *page, |
32 | unsigned long offset, size_t size, | |
33 | enum dma_data_direction dir, | |
34 | struct dma_attrs *attrs); | |
35 | void (*unmap_page)(struct device *dev, dma_addr_t dma_handle, | |
36 | size_t size, enum dma_data_direction dir, | |
37 | struct dma_attrs *attrs); | |
04abab69 RRD |
38 | /* |
39 | * map_sg returns 0 on error and a value > 0 on success. | |
40 | * It should never return a value < 0. | |
41 | */ | |
f0402a26 FT |
42 | int (*map_sg)(struct device *dev, struct scatterlist *sg, |
43 | int nents, enum dma_data_direction dir, | |
44 | struct dma_attrs *attrs); | |
45 | void (*unmap_sg)(struct device *dev, | |
46 | struct scatterlist *sg, int nents, | |
47 | enum dma_data_direction dir, | |
48 | struct dma_attrs *attrs); | |
49 | void (*sync_single_for_cpu)(struct device *dev, | |
50 | dma_addr_t dma_handle, size_t size, | |
51 | enum dma_data_direction dir); | |
52 | void (*sync_single_for_device)(struct device *dev, | |
53 | dma_addr_t dma_handle, size_t size, | |
54 | enum dma_data_direction dir); | |
f0402a26 FT |
55 | void (*sync_sg_for_cpu)(struct device *dev, |
56 | struct scatterlist *sg, int nents, | |
57 | enum dma_data_direction dir); | |
58 | void (*sync_sg_for_device)(struct device *dev, | |
59 | struct scatterlist *sg, int nents, | |
60 | enum dma_data_direction dir); | |
61 | int (*mapping_error)(struct device *dev, dma_addr_t dma_addr); | |
62 | int (*dma_supported)(struct device *dev, u64 mask); | |
f726f30e | 63 | int (*set_dma_mask)(struct device *dev, u64 mask); |
3a8f7558 MM |
64 | #ifdef ARCH_HAS_DMA_GET_REQUIRED_MASK |
65 | u64 (*get_required_mask)(struct device *dev); | |
66 | #endif | |
f0402a26 FT |
67 | int is_phys; |
68 | }; | |
69 | ||
8f286c33 | 70 | #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1)) |
34c65384 | 71 | |
32e8f702 JB |
72 | #define DMA_MASK_NONE 0x0ULL |
73 | ||
d6bd3a39 REB |
74 | static inline int valid_dma_direction(int dma_direction) |
75 | { | |
76 | return ((dma_direction == DMA_BIDIRECTIONAL) || | |
77 | (dma_direction == DMA_TO_DEVICE) || | |
78 | (dma_direction == DMA_FROM_DEVICE)); | |
79 | } | |
80 | ||
32e8f702 JB |
81 | static inline int is_device_dma_capable(struct device *dev) |
82 | { | |
83 | return dev->dma_mask != NULL && *dev->dma_mask != DMA_MASK_NONE; | |
84 | } | |
85 | ||
1b0fac45 | 86 | #ifdef CONFIG_HAS_DMA |
1da177e4 | 87 | #include <asm/dma-mapping.h> |
1b0fac45 DW |
88 | #else |
89 | #include <asm-generic/dma-mapping-broken.h> | |
90 | #endif | |
1da177e4 | 91 | |
589fc9a6 FT |
92 | static inline u64 dma_get_mask(struct device *dev) |
93 | { | |
07a2c01a | 94 | if (dev && dev->dma_mask && *dev->dma_mask) |
589fc9a6 | 95 | return *dev->dma_mask; |
284901a9 | 96 | return DMA_BIT_MASK(32); |
589fc9a6 FT |
97 | } |
98 | ||
58af4a24 | 99 | #ifdef CONFIG_ARCH_HAS_DMA_SET_COHERENT_MASK |
710224fa FT |
100 | int dma_set_coherent_mask(struct device *dev, u64 mask); |
101 | #else | |
6a1961f4 FT |
102 | static inline int dma_set_coherent_mask(struct device *dev, u64 mask) |
103 | { | |
104 | if (!dma_supported(dev, mask)) | |
105 | return -EIO; | |
106 | dev->coherent_dma_mask = mask; | |
107 | return 0; | |
108 | } | |
710224fa | 109 | #endif |
6a1961f4 | 110 | |
4aa806b7 RK |
111 | /* |
112 | * Set both the DMA mask and the coherent DMA mask to the same thing. | |
113 | * Note that we don't check the return value from dma_set_coherent_mask() | |
114 | * as the DMA API guarantees that the coherent DMA mask can be set to | |
115 | * the same or smaller than the streaming DMA mask. | |
116 | */ | |
117 | static inline int dma_set_mask_and_coherent(struct device *dev, u64 mask) | |
118 | { | |
119 | int rc = dma_set_mask(dev, mask); | |
120 | if (rc == 0) | |
121 | dma_set_coherent_mask(dev, mask); | |
122 | return rc; | |
123 | } | |
124 | ||
fa6a8d6d RK |
125 | /* |
126 | * Similar to the above, except it deals with the case where the device | |
127 | * does not have dev->dma_mask appropriately setup. | |
128 | */ | |
129 | static inline int dma_coerce_mask_and_coherent(struct device *dev, u64 mask) | |
130 | { | |
131 | dev->dma_mask = &dev->coherent_dma_mask; | |
132 | return dma_set_mask_and_coherent(dev, mask); | |
133 | } | |
134 | ||
1da177e4 LT |
135 | extern u64 dma_get_required_mask(struct device *dev); |
136 | ||
a3a60f81 | 137 | #ifndef arch_setup_dma_ops |
97890ba9 WD |
138 | static inline void arch_setup_dma_ops(struct device *dev, u64 dma_base, |
139 | u64 size, struct iommu_ops *iommu, | |
140 | bool coherent) { } | |
141 | #endif | |
142 | ||
143 | #ifndef arch_teardown_dma_ops | |
144 | static inline void arch_teardown_dma_ops(struct device *dev) { } | |
591c1ee4 SS |
145 | #endif |
146 | ||
6b7b6510 FT |
147 | static inline unsigned int dma_get_max_seg_size(struct device *dev) |
148 | { | |
002edb6f RM |
149 | if (dev->dma_parms && dev->dma_parms->max_segment_size) |
150 | return dev->dma_parms->max_segment_size; | |
151 | return SZ_64K; | |
6b7b6510 FT |
152 | } |
153 | ||
154 | static inline unsigned int dma_set_max_seg_size(struct device *dev, | |
155 | unsigned int size) | |
156 | { | |
157 | if (dev->dma_parms) { | |
158 | dev->dma_parms->max_segment_size = size; | |
159 | return 0; | |
002edb6f RM |
160 | } |
161 | return -EIO; | |
6b7b6510 FT |
162 | } |
163 | ||
d22a6966 FT |
164 | static inline unsigned long dma_get_seg_boundary(struct device *dev) |
165 | { | |
002edb6f RM |
166 | if (dev->dma_parms && dev->dma_parms->segment_boundary_mask) |
167 | return dev->dma_parms->segment_boundary_mask; | |
168 | return DMA_BIT_MASK(32); | |
d22a6966 FT |
169 | } |
170 | ||
171 | static inline int dma_set_seg_boundary(struct device *dev, unsigned long mask) | |
172 | { | |
173 | if (dev->dma_parms) { | |
174 | dev->dma_parms->segment_boundary_mask = mask; | |
175 | return 0; | |
002edb6f RM |
176 | } |
177 | return -EIO; | |
d22a6966 FT |
178 | } |
179 | ||
00c8f162 SS |
180 | #ifndef dma_max_pfn |
181 | static inline unsigned long dma_max_pfn(struct device *dev) | |
182 | { | |
183 | return *dev->dma_mask >> PAGE_SHIFT; | |
184 | } | |
185 | #endif | |
186 | ||
842fa69f AM |
187 | static inline void *dma_zalloc_coherent(struct device *dev, size_t size, |
188 | dma_addr_t *dma_handle, gfp_t flag) | |
189 | { | |
ede23fa8 JP |
190 | void *ret = dma_alloc_coherent(dev, size, dma_handle, |
191 | flag | __GFP_ZERO); | |
842fa69f AM |
192 | return ret; |
193 | } | |
194 | ||
e259f191 | 195 | #ifdef CONFIG_HAS_DMA |
4565f017 FT |
196 | static inline int dma_get_cache_alignment(void) |
197 | { | |
198 | #ifdef ARCH_DMA_MINALIGN | |
199 | return ARCH_DMA_MINALIGN; | |
200 | #endif | |
201 | return 1; | |
202 | } | |
e259f191 | 203 | #endif |
4565f017 | 204 | |
1da177e4 LT |
205 | /* flags for the coherent memory api */ |
206 | #define DMA_MEMORY_MAP 0x01 | |
207 | #define DMA_MEMORY_IO 0x02 | |
208 | #define DMA_MEMORY_INCLUDES_CHILDREN 0x04 | |
209 | #define DMA_MEMORY_EXCLUSIVE 0x08 | |
210 | ||
211 | #ifndef ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY | |
212 | static inline int | |
88a984ba | 213 | dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr, |
1da177e4 LT |
214 | dma_addr_t device_addr, size_t size, int flags) |
215 | { | |
216 | return 0; | |
217 | } | |
218 | ||
219 | static inline void | |
220 | dma_release_declared_memory(struct device *dev) | |
221 | { | |
222 | } | |
223 | ||
224 | static inline void * | |
225 | dma_mark_declared_memory_occupied(struct device *dev, | |
226 | dma_addr_t device_addr, size_t size) | |
227 | { | |
228 | return ERR_PTR(-EBUSY); | |
229 | } | |
230 | #endif | |
231 | ||
9ac7849e TH |
232 | /* |
233 | * Managed DMA API | |
234 | */ | |
235 | extern void *dmam_alloc_coherent(struct device *dev, size_t size, | |
236 | dma_addr_t *dma_handle, gfp_t gfp); | |
237 | extern void dmam_free_coherent(struct device *dev, size_t size, void *vaddr, | |
238 | dma_addr_t dma_handle); | |
239 | extern void *dmam_alloc_noncoherent(struct device *dev, size_t size, | |
240 | dma_addr_t *dma_handle, gfp_t gfp); | |
241 | extern void dmam_free_noncoherent(struct device *dev, size_t size, void *vaddr, | |
242 | dma_addr_t dma_handle); | |
243 | #ifdef ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY | |
88a984ba BH |
244 | extern int dmam_declare_coherent_memory(struct device *dev, |
245 | phys_addr_t phys_addr, | |
9ac7849e TH |
246 | dma_addr_t device_addr, size_t size, |
247 | int flags); | |
248 | extern void dmam_release_declared_memory(struct device *dev); | |
249 | #else /* ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY */ | |
250 | static inline int dmam_declare_coherent_memory(struct device *dev, | |
88a984ba | 251 | phys_addr_t phys_addr, dma_addr_t device_addr, |
9ac7849e TH |
252 | size_t size, gfp_t gfp) |
253 | { | |
254 | return 0; | |
255 | } | |
1da177e4 | 256 | |
9ac7849e TH |
257 | static inline void dmam_release_declared_memory(struct device *dev) |
258 | { | |
259 | } | |
260 | #endif /* ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY */ | |
1da177e4 | 261 | |
74bc7cee AK |
262 | #ifndef CONFIG_HAVE_DMA_ATTRS |
263 | struct dma_attrs; | |
264 | ||
265 | #define dma_map_single_attrs(dev, cpu_addr, size, dir, attrs) \ | |
266 | dma_map_single(dev, cpu_addr, size, dir) | |
267 | ||
268 | #define dma_unmap_single_attrs(dev, dma_addr, size, dir, attrs) \ | |
269 | dma_unmap_single(dev, dma_addr, size, dir) | |
270 | ||
271 | #define dma_map_sg_attrs(dev, sgl, nents, dir, attrs) \ | |
272 | dma_map_sg(dev, sgl, nents, dir) | |
273 | ||
274 | #define dma_unmap_sg_attrs(dev, sgl, nents, dir, attrs) \ | |
275 | dma_unmap_sg(dev, sgl, nents, dir) | |
276 | ||
b4bbb107 TR |
277 | #else |
278 | static inline void *dma_alloc_writecombine(struct device *dev, size_t size, | |
279 | dma_addr_t *dma_addr, gfp_t gfp) | |
280 | { | |
281 | DEFINE_DMA_ATTRS(attrs); | |
282 | dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs); | |
283 | return dma_alloc_attrs(dev, size, dma_addr, gfp, &attrs); | |
284 | } | |
285 | ||
286 | static inline void dma_free_writecombine(struct device *dev, size_t size, | |
287 | void *cpu_addr, dma_addr_t dma_addr) | |
288 | { | |
289 | DEFINE_DMA_ATTRS(attrs); | |
290 | dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs); | |
291 | return dma_free_attrs(dev, size, cpu_addr, dma_addr, &attrs); | |
292 | } | |
293 | ||
294 | static inline int dma_mmap_writecombine(struct device *dev, | |
295 | struct vm_area_struct *vma, | |
296 | void *cpu_addr, dma_addr_t dma_addr, | |
297 | size_t size) | |
298 | { | |
299 | DEFINE_DMA_ATTRS(attrs); | |
300 | dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs); | |
301 | return dma_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, &attrs); | |
302 | } | |
74bc7cee AK |
303 | #endif /* CONFIG_HAVE_DMA_ATTRS */ |
304 | ||
0acedc12 FT |
305 | #ifdef CONFIG_NEED_DMA_MAP_STATE |
306 | #define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME) dma_addr_t ADDR_NAME | |
307 | #define DEFINE_DMA_UNMAP_LEN(LEN_NAME) __u32 LEN_NAME | |
308 | #define dma_unmap_addr(PTR, ADDR_NAME) ((PTR)->ADDR_NAME) | |
309 | #define dma_unmap_addr_set(PTR, ADDR_NAME, VAL) (((PTR)->ADDR_NAME) = (VAL)) | |
310 | #define dma_unmap_len(PTR, LEN_NAME) ((PTR)->LEN_NAME) | |
311 | #define dma_unmap_len_set(PTR, LEN_NAME, VAL) (((PTR)->LEN_NAME) = (VAL)) | |
312 | #else | |
313 | #define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME) | |
314 | #define DEFINE_DMA_UNMAP_LEN(LEN_NAME) | |
315 | #define dma_unmap_addr(PTR, ADDR_NAME) (0) | |
316 | #define dma_unmap_addr_set(PTR, ADDR_NAME, VAL) do { } while (0) | |
317 | #define dma_unmap_len(PTR, LEN_NAME) (0) | |
318 | #define dma_unmap_len_set(PTR, LEN_NAME, VAL) do { } while (0) | |
319 | #endif | |
320 | ||
9ac7849e | 321 | #endif |