Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
96532bab RD |
2 | #ifndef _LINUX_DMA_MAPPING_H |
3 | #define _LINUX_DMA_MAPPING_H | |
1da177e4 LT |
4 | |
5 | #include <linux/device.h> | |
6 | #include <linux/err.h> | |
b7f080cf | 7 | #include <linux/dma-direction.h> |
f0402a26 | 8 | #include <linux/scatterlist.h> |
e1c7e324 | 9 | #include <linux/bug.h> |
1da177e4 | 10 | |
00085f1e KK |
11 | /** |
12 | * List of possible attributes associated with a DMA mapping. The semantics | |
985098a0 | 13 | * of each attribute should be defined in Documentation/core-api/dma-attributes.rst. |
00085f1e | 14 | */ |
7283fff8 | 15 | |
00085f1e KK |
16 | /* |
17 | * DMA_ATTR_WEAK_ORDERING: Specifies that reads and writes to the mapping | |
18 | * may be weakly ordered, that is that reads and writes may pass each other. | |
19 | */ | |
20 | #define DMA_ATTR_WEAK_ORDERING (1UL << 1) | |
21 | /* | |
22 | * DMA_ATTR_WRITE_COMBINE: Specifies that writes to the mapping may be | |
23 | * buffered to improve performance. | |
24 | */ | |
25 | #define DMA_ATTR_WRITE_COMBINE (1UL << 2) | |
00085f1e KK |
26 | /* |
27 | * DMA_ATTR_NO_KERNEL_MAPPING: Lets the platform to avoid creating a kernel | |
28 | * virtual mapping for the allocated buffer. | |
29 | */ | |
30 | #define DMA_ATTR_NO_KERNEL_MAPPING (1UL << 4) | |
31 | /* | |
32 | * DMA_ATTR_SKIP_CPU_SYNC: Allows platform code to skip synchronization of | |
33 | * the CPU cache for the given buffer assuming that it has been already | |
34 | * transferred to 'device' domain. | |
35 | */ | |
36 | #define DMA_ATTR_SKIP_CPU_SYNC (1UL << 5) | |
37 | /* | |
38 | * DMA_ATTR_FORCE_CONTIGUOUS: Forces contiguous allocation of the buffer | |
39 | * in physical memory. | |
40 | */ | |
41 | #define DMA_ATTR_FORCE_CONTIGUOUS (1UL << 6) | |
42 | /* | |
43 | * DMA_ATTR_ALLOC_SINGLE_PAGES: This is a hint to the DMA-mapping subsystem | |
44 | * that it's probably not worth the time to try to allocate memory to in a way | |
45 | * that gives better TLB efficiency. | |
46 | */ | |
47 | #define DMA_ATTR_ALLOC_SINGLE_PAGES (1UL << 7) | |
a9a62c93 MFO |
48 | /* |
49 | * DMA_ATTR_NO_WARN: This tells the DMA-mapping subsystem to suppress | |
50 | * allocation failure reports (similarly to __GFP_NOWARN). | |
51 | */ | |
52 | #define DMA_ATTR_NO_WARN (1UL << 8) | |
00085f1e | 53 | |
b2fb3664 MH |
54 | /* |
55 | * DMA_ATTR_PRIVILEGED: used to indicate that the buffer is fully | |
56 | * accessible at an elevated privilege level (and ideally inaccessible or | |
57 | * at least read-only at lesser-privileged levels). | |
58 | */ | |
59 | #define DMA_ATTR_PRIVILEGED (1UL << 9) | |
60 | ||
eba304c6 CH |
61 | /* |
62 | * A dma_addr_t can hold any valid DMA or bus address for the platform. It can | |
63 | * be given to a device to use as a DMA source or target. It is specific to a | |
64 | * given device and there may be a translation between the CPU physical address | |
65 | * space and the bus address space. | |
66 | * | |
67 | * DMA_MAPPING_ERROR is the magic error code if a mapping failed. It should not | |
68 | * be used directly in drivers, but checked for using dma_mapping_error() | |
69 | * instead. | |
70 | */ | |
42ee3cae CH |
71 | #define DMA_MAPPING_ERROR (~(dma_addr_t)0) |
72 | ||
8f286c33 | 73 | #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1)) |
34c65384 | 74 | |
393cf700 LR |
75 | struct dma_iova_state { |
76 | dma_addr_t addr; | |
77 | u64 __size; | |
78 | }; | |
79 | ||
80 | /* | |
81 | * Use the high bit to mark if we used swiotlb for one or more ranges. | |
82 | */ | |
83 | #define DMA_IOVA_USE_SWIOTLB (1ULL << 63) | |
84 | ||
85 | static inline size_t dma_iova_size(struct dma_iova_state *state) | |
86 | { | |
87 | /* Casting is needed for 32-bits systems */ | |
88 | return (size_t)(state->__size & ~DMA_IOVA_USE_SWIOTLB); | |
89 | } | |
90 | ||
a1fd09e8 CH |
91 | #ifdef CONFIG_DMA_API_DEBUG |
92 | void debug_dma_mapping_error(struct device *dev, dma_addr_t dma_addr); | |
93 | void debug_dma_map_single(struct device *dev, const void *addr, | |
94 | unsigned long len); | |
95 | #else | |
96 | static inline void debug_dma_mapping_error(struct device *dev, | |
97 | dma_addr_t dma_addr) | |
98 | { | |
99 | } | |
100 | static inline void debug_dma_map_single(struct device *dev, const void *addr, | |
101 | unsigned long len) | |
102 | { | |
103 | } | |
104 | #endif /* CONFIG_DMA_API_DEBUG */ | |
105 | ||
ed6ccf10 | 106 | #ifdef CONFIG_HAS_DMA |
ed6ccf10 CH |
107 | static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) |
108 | { | |
109 | debug_dma_mapping_error(dev, dma_addr); | |
110 | ||
a7f3d3d3 | 111 | if (unlikely(dma_addr == DMA_MAPPING_ERROR)) |
ed6ccf10 CH |
112 | return -ENOMEM; |
113 | return 0; | |
114 | } | |
115 | ||
d3fa60d7 CH |
116 | dma_addr_t dma_map_page_attrs(struct device *dev, struct page *page, |
117 | size_t offset, size_t size, enum dma_data_direction dir, | |
118 | unsigned long attrs); | |
119 | void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr, size_t size, | |
120 | enum dma_data_direction dir, unsigned long attrs); | |
2a047e06 CH |
121 | unsigned int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg, |
122 | int nents, enum dma_data_direction dir, unsigned long attrs); | |
d3fa60d7 CH |
123 | void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg, |
124 | int nents, enum dma_data_direction dir, | |
125 | unsigned long attrs); | |
fffe3cc8 LG |
126 | int dma_map_sgtable(struct device *dev, struct sg_table *sgt, |
127 | enum dma_data_direction dir, unsigned long attrs); | |
d3fa60d7 CH |
128 | dma_addr_t dma_map_resource(struct device *dev, phys_addr_t phys_addr, |
129 | size_t size, enum dma_data_direction dir, unsigned long attrs); | |
130 | void dma_unmap_resource(struct device *dev, dma_addr_t addr, size_t size, | |
131 | enum dma_data_direction dir, unsigned long attrs); | |
ed6ccf10 CH |
132 | void *dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle, |
133 | gfp_t flag, unsigned long attrs); | |
134 | void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr, | |
135 | dma_addr_t dma_handle, unsigned long attrs); | |
136 | void *dmam_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle, | |
137 | gfp_t gfp, unsigned long attrs); | |
138 | void dmam_free_coherent(struct device *dev, size_t size, void *vaddr, | |
139 | dma_addr_t dma_handle); | |
ed6ccf10 CH |
140 | int dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt, |
141 | void *cpu_addr, dma_addr_t dma_addr, size_t size, | |
142 | unsigned long attrs); | |
143 | int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, | |
144 | void *cpu_addr, dma_addr_t dma_addr, size_t size, | |
145 | unsigned long attrs); | |
e29ccc18 | 146 | bool dma_can_mmap(struct device *dev); |
159bf192 | 147 | bool dma_pci_p2pdma_supported(struct device *dev); |
ed6ccf10 CH |
148 | int dma_set_mask(struct device *dev, u64 mask); |
149 | int dma_set_coherent_mask(struct device *dev, u64 mask); | |
150 | u64 dma_get_required_mask(struct device *dev); | |
8ae0e970 | 151 | bool dma_addressing_limited(struct device *dev); |
133d624b | 152 | size_t dma_max_mapping_size(struct device *dev); |
a229cc14 | 153 | size_t dma_opt_mapping_size(struct device *dev); |
6ba99411 | 154 | unsigned long dma_get_merge_boundary(struct device *dev); |
7d5b5738 CH |
155 | struct sg_table *dma_alloc_noncontiguous(struct device *dev, size_t size, |
156 | enum dma_data_direction dir, gfp_t gfp, unsigned long attrs); | |
157 | void dma_free_noncontiguous(struct device *dev, size_t size, | |
158 | struct sg_table *sgt, enum dma_data_direction dir); | |
159 | void *dma_vmap_noncontiguous(struct device *dev, size_t size, | |
160 | struct sg_table *sgt); | |
161 | void dma_vunmap_noncontiguous(struct device *dev, void *vaddr); | |
162 | int dma_mmap_noncontiguous(struct device *dev, struct vm_area_struct *vma, | |
163 | size_t size, struct sg_table *sgt); | |
ed6ccf10 CH |
164 | #else /* CONFIG_HAS_DMA */ |
165 | static inline dma_addr_t dma_map_page_attrs(struct device *dev, | |
166 | struct page *page, size_t offset, size_t size, | |
167 | enum dma_data_direction dir, unsigned long attrs) | |
168 | { | |
169 | return DMA_MAPPING_ERROR; | |
170 | } | |
171 | static inline void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr, | |
172 | size_t size, enum dma_data_direction dir, unsigned long attrs) | |
173 | { | |
174 | } | |
2a047e06 CH |
175 | static inline unsigned int dma_map_sg_attrs(struct device *dev, |
176 | struct scatterlist *sg, int nents, enum dma_data_direction dir, | |
177 | unsigned long attrs) | |
ed6ccf10 CH |
178 | { |
179 | return 0; | |
180 | } | |
181 | static inline void dma_unmap_sg_attrs(struct device *dev, | |
182 | struct scatterlist *sg, int nents, enum dma_data_direction dir, | |
183 | unsigned long attrs) | |
184 | { | |
185 | } | |
fffe3cc8 LG |
186 | static inline int dma_map_sgtable(struct device *dev, struct sg_table *sgt, |
187 | enum dma_data_direction dir, unsigned long attrs) | |
188 | { | |
189 | return -EOPNOTSUPP; | |
190 | } | |
ed6ccf10 CH |
191 | static inline dma_addr_t dma_map_resource(struct device *dev, |
192 | phys_addr_t phys_addr, size_t size, enum dma_data_direction dir, | |
193 | unsigned long attrs) | |
194 | { | |
195 | return DMA_MAPPING_ERROR; | |
196 | } | |
197 | static inline void dma_unmap_resource(struct device *dev, dma_addr_t addr, | |
198 | size_t size, enum dma_data_direction dir, unsigned long attrs) | |
199 | { | |
200 | } | |
ed6ccf10 CH |
201 | static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) |
202 | { | |
203 | return -ENOMEM; | |
204 | } | |
205 | static inline void *dma_alloc_attrs(struct device *dev, size_t size, | |
206 | dma_addr_t *dma_handle, gfp_t flag, unsigned long attrs) | |
207 | { | |
208 | return NULL; | |
209 | } | |
210 | static void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr, | |
211 | dma_addr_t dma_handle, unsigned long attrs) | |
212 | { | |
213 | } | |
214 | static inline void *dmam_alloc_attrs(struct device *dev, size_t size, | |
215 | dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs) | |
216 | { | |
217 | return NULL; | |
218 | } | |
219 | static inline void dmam_free_coherent(struct device *dev, size_t size, | |
220 | void *vaddr, dma_addr_t dma_handle) | |
221 | { | |
222 | } | |
ed6ccf10 CH |
223 | static inline int dma_get_sgtable_attrs(struct device *dev, |
224 | struct sg_table *sgt, void *cpu_addr, dma_addr_t dma_addr, | |
225 | size_t size, unsigned long attrs) | |
226 | { | |
227 | return -ENXIO; | |
228 | } | |
229 | static inline int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, | |
230 | void *cpu_addr, dma_addr_t dma_addr, size_t size, | |
231 | unsigned long attrs) | |
232 | { | |
233 | return -ENXIO; | |
234 | } | |
e29ccc18 CH |
235 | static inline bool dma_can_mmap(struct device *dev) |
236 | { | |
237 | return false; | |
238 | } | |
159bf192 LG |
239 | static inline bool dma_pci_p2pdma_supported(struct device *dev) |
240 | { | |
241 | return false; | |
242 | } | |
ed6ccf10 CH |
243 | static inline int dma_set_mask(struct device *dev, u64 mask) |
244 | { | |
245 | return -EIO; | |
246 | } | |
247 | static inline int dma_set_coherent_mask(struct device *dev, u64 mask) | |
248 | { | |
249 | return -EIO; | |
250 | } | |
251 | static inline u64 dma_get_required_mask(struct device *dev) | |
252 | { | |
253 | return 0; | |
254 | } | |
8ae0e970 JH |
255 | static inline bool dma_addressing_limited(struct device *dev) |
256 | { | |
257 | return false; | |
258 | } | |
133d624b JR |
259 | static inline size_t dma_max_mapping_size(struct device *dev) |
260 | { | |
261 | return 0; | |
262 | } | |
a229cc14 JG |
263 | static inline size_t dma_opt_mapping_size(struct device *dev) |
264 | { | |
265 | return 0; | |
266 | } | |
6ba99411 YS |
267 | static inline unsigned long dma_get_merge_boundary(struct device *dev) |
268 | { | |
269 | return 0; | |
270 | } | |
7d5b5738 CH |
271 | static inline struct sg_table *dma_alloc_noncontiguous(struct device *dev, |
272 | size_t size, enum dma_data_direction dir, gfp_t gfp, | |
273 | unsigned long attrs) | |
274 | { | |
275 | return NULL; | |
276 | } | |
277 | static inline void dma_free_noncontiguous(struct device *dev, size_t size, | |
278 | struct sg_table *sgt, enum dma_data_direction dir) | |
279 | { | |
280 | } | |
281 | static inline void *dma_vmap_noncontiguous(struct device *dev, size_t size, | |
282 | struct sg_table *sgt) | |
283 | { | |
284 | return NULL; | |
285 | } | |
286 | static inline void dma_vunmap_noncontiguous(struct device *dev, void *vaddr) | |
287 | { | |
288 | } | |
289 | static inline int dma_mmap_noncontiguous(struct device *dev, | |
290 | struct vm_area_struct *vma, size_t size, struct sg_table *sgt) | |
291 | { | |
292 | return -EINVAL; | |
293 | } | |
ed6ccf10 CH |
294 | #endif /* CONFIG_HAS_DMA */ |
295 | ||
393cf700 LR |
296 | #ifdef CONFIG_IOMMU_DMA |
297 | /** | |
298 | * dma_use_iova - check if the IOVA API is used for this state | |
299 | * @state: IOVA state | |
300 | * | |
301 | * Return %true if the DMA transfers uses the dma_iova_*() calls or %false if | |
302 | * they can't be used. | |
303 | */ | |
304 | static inline bool dma_use_iova(struct dma_iova_state *state) | |
305 | { | |
306 | return state->__size != 0; | |
307 | } | |
308 | ||
309 | bool dma_iova_try_alloc(struct device *dev, struct dma_iova_state *state, | |
310 | phys_addr_t phys, size_t size); | |
311 | void dma_iova_free(struct device *dev, struct dma_iova_state *state); | |
433a7620 LR |
312 | void dma_iova_destroy(struct device *dev, struct dma_iova_state *state, |
313 | size_t mapped_len, enum dma_data_direction dir, | |
314 | unsigned long attrs); | |
315 | int dma_iova_sync(struct device *dev, struct dma_iova_state *state, | |
316 | size_t offset, size_t size); | |
317 | int dma_iova_link(struct device *dev, struct dma_iova_state *state, | |
318 | phys_addr_t phys, size_t offset, size_t size, | |
319 | enum dma_data_direction dir, unsigned long attrs); | |
320 | void dma_iova_unlink(struct device *dev, struct dma_iova_state *state, | |
321 | size_t offset, size_t size, enum dma_data_direction dir, | |
322 | unsigned long attrs); | |
393cf700 LR |
323 | #else /* CONFIG_IOMMU_DMA */ |
324 | static inline bool dma_use_iova(struct dma_iova_state *state) | |
325 | { | |
326 | return false; | |
327 | } | |
328 | static inline bool dma_iova_try_alloc(struct device *dev, | |
329 | struct dma_iova_state *state, phys_addr_t phys, size_t size) | |
330 | { | |
331 | return false; | |
332 | } | |
333 | static inline void dma_iova_free(struct device *dev, | |
334 | struct dma_iova_state *state) | |
335 | { | |
336 | } | |
433a7620 LR |
337 | static inline void dma_iova_destroy(struct device *dev, |
338 | struct dma_iova_state *state, size_t mapped_len, | |
339 | enum dma_data_direction dir, unsigned long attrs) | |
340 | { | |
341 | } | |
342 | static inline int dma_iova_sync(struct device *dev, | |
343 | struct dma_iova_state *state, size_t offset, size_t size) | |
344 | { | |
345 | return -EOPNOTSUPP; | |
346 | } | |
347 | static inline int dma_iova_link(struct device *dev, | |
348 | struct dma_iova_state *state, phys_addr_t phys, size_t offset, | |
349 | size_t size, enum dma_data_direction dir, unsigned long attrs) | |
350 | { | |
351 | return -EOPNOTSUPP; | |
352 | } | |
353 | static inline void dma_iova_unlink(struct device *dev, | |
354 | struct dma_iova_state *state, size_t offset, size_t size, | |
355 | enum dma_data_direction dir, unsigned long attrs) | |
356 | { | |
357 | } | |
393cf700 LR |
358 | #endif /* CONFIG_IOMMU_DMA */ |
359 | ||
fe7514b1 | 360 | #if defined(CONFIG_HAS_DMA) && defined(CONFIG_DMA_NEED_SYNC) |
f406c8e4 | 361 | void __dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size, |
fe7514b1 | 362 | enum dma_data_direction dir); |
f406c8e4 | 363 | void __dma_sync_single_for_device(struct device *dev, dma_addr_t addr, |
fe7514b1 | 364 | size_t size, enum dma_data_direction dir); |
f406c8e4 | 365 | void __dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, |
fe7514b1 | 366 | int nelems, enum dma_data_direction dir); |
f406c8e4 | 367 | void __dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, |
fe7514b1 | 368 | int nelems, enum dma_data_direction dir); |
f406c8e4 AL |
369 | bool __dma_need_sync(struct device *dev, dma_addr_t dma_addr); |
370 | ||
371 | static inline bool dma_dev_need_sync(const struct device *dev) | |
372 | { | |
373 | /* Always call DMA sync operations when debugging is enabled */ | |
a6016aac | 374 | return !dev->dma_skip_sync || IS_ENABLED(CONFIG_DMA_API_DEBUG); |
f406c8e4 AL |
375 | } |
376 | ||
377 | static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, | |
378 | size_t size, enum dma_data_direction dir) | |
379 | { | |
380 | if (dma_dev_need_sync(dev)) | |
381 | __dma_sync_single_for_cpu(dev, addr, size, dir); | |
382 | } | |
383 | ||
384 | static inline void dma_sync_single_for_device(struct device *dev, | |
385 | dma_addr_t addr, size_t size, enum dma_data_direction dir) | |
386 | { | |
387 | if (dma_dev_need_sync(dev)) | |
388 | __dma_sync_single_for_device(dev, addr, size, dir); | |
389 | } | |
390 | ||
391 | static inline void dma_sync_sg_for_cpu(struct device *dev, | |
392 | struct scatterlist *sg, int nelems, enum dma_data_direction dir) | |
393 | { | |
394 | if (dma_dev_need_sync(dev)) | |
395 | __dma_sync_sg_for_cpu(dev, sg, nelems, dir); | |
396 | } | |
397 | ||
398 | static inline void dma_sync_sg_for_device(struct device *dev, | |
399 | struct scatterlist *sg, int nelems, enum dma_data_direction dir) | |
400 | { | |
401 | if (dma_dev_need_sync(dev)) | |
402 | __dma_sync_sg_for_device(dev, sg, nelems, dir); | |
403 | } | |
404 | ||
405 | static inline bool dma_need_sync(struct device *dev, dma_addr_t dma_addr) | |
406 | { | |
407 | return dma_dev_need_sync(dev) ? __dma_need_sync(dev, dma_addr) : false; | |
408 | } | |
5f3b133a | 409 | bool dma_need_unmap(struct device *dev); |
fe7514b1 | 410 | #else /* !CONFIG_HAS_DMA || !CONFIG_DMA_NEED_SYNC */ |
f406c8e4 AL |
411 | static inline bool dma_dev_need_sync(const struct device *dev) |
412 | { | |
413 | return false; | |
414 | } | |
fe7514b1 AL |
415 | static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, |
416 | size_t size, enum dma_data_direction dir) | |
417 | { | |
418 | } | |
419 | static inline void dma_sync_single_for_device(struct device *dev, | |
420 | dma_addr_t addr, size_t size, enum dma_data_direction dir) | |
421 | { | |
422 | } | |
423 | static inline void dma_sync_sg_for_cpu(struct device *dev, | |
424 | struct scatterlist *sg, int nelems, enum dma_data_direction dir) | |
425 | { | |
426 | } | |
427 | static inline void dma_sync_sg_for_device(struct device *dev, | |
428 | struct scatterlist *sg, int nelems, enum dma_data_direction dir) | |
429 | { | |
430 | } | |
431 | static inline bool dma_need_sync(struct device *dev, dma_addr_t dma_addr) | |
432 | { | |
433 | return false; | |
434 | } | |
5f3b133a CH |
435 | static inline bool dma_need_unmap(struct device *dev) |
436 | { | |
437 | return false; | |
438 | } | |
fe7514b1 AL |
439 | #endif /* !CONFIG_HAS_DMA || !CONFIG_DMA_NEED_SYNC */ |
440 | ||
efa70f2f CH |
441 | struct page *dma_alloc_pages(struct device *dev, size_t size, |
442 | dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp); | |
443 | void dma_free_pages(struct device *dev, size_t size, struct page *page, | |
444 | dma_addr_t dma_handle, enum dma_data_direction dir); | |
eedb0b12 CH |
445 | int dma_mmap_pages(struct device *dev, struct vm_area_struct *vma, |
446 | size_t size, struct page *page); | |
81d88ce5 CH |
447 | |
448 | static inline void *dma_alloc_noncoherent(struct device *dev, size_t size, | |
449 | dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp) | |
450 | { | |
451 | struct page *page = dma_alloc_pages(dev, size, dma_handle, dir, gfp); | |
452 | return page ? page_address(page) : NULL; | |
453 | } | |
454 | ||
455 | static inline void dma_free_noncoherent(struct device *dev, size_t size, | |
456 | void *vaddr, dma_addr_t dma_handle, enum dma_data_direction dir) | |
457 | { | |
458 | dma_free_pages(dev, size, virt_to_page(vaddr), dma_handle, dir); | |
459 | } | |
0d71675f | 460 | |
2e05ea5c CH |
461 | static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr, |
462 | size_t size, enum dma_data_direction dir, unsigned long attrs) | |
463 | { | |
4544b9f2 KC |
464 | /* DMA must never operate on areas that might be remapped. */ |
465 | if (dev_WARN_ONCE(dev, is_vmalloc_addr(ptr), | |
466 | "rejecting DMA map of vmalloc memory\n")) | |
467 | return DMA_MAPPING_ERROR; | |
2e05ea5c CH |
468 | debug_dma_map_single(dev, ptr, size); |
469 | return dma_map_page_attrs(dev, virt_to_page(ptr), offset_in_page(ptr), | |
470 | size, dir, attrs); | |
471 | } | |
472 | ||
473 | static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr, | |
474 | size_t size, enum dma_data_direction dir, unsigned long attrs) | |
475 | { | |
476 | return dma_unmap_page_attrs(dev, addr, size, dir, attrs); | |
477 | } | |
478 | ||
ed6ccf10 CH |
479 | static inline void dma_sync_single_range_for_cpu(struct device *dev, |
480 | dma_addr_t addr, unsigned long offset, size_t size, | |
481 | enum dma_data_direction dir) | |
482 | { | |
483 | return dma_sync_single_for_cpu(dev, addr + offset, size, dir); | |
484 | } | |
485 | ||
486 | static inline void dma_sync_single_range_for_device(struct device *dev, | |
487 | dma_addr_t addr, unsigned long offset, size_t size, | |
488 | enum dma_data_direction dir) | |
489 | { | |
490 | return dma_sync_single_for_device(dev, addr + offset, size, dir); | |
491 | } | |
492 | ||
d9d200bc MS |
493 | /** |
494 | * dma_unmap_sgtable - Unmap the given buffer for DMA | |
495 | * @dev: The device for which to perform the DMA operation | |
496 | * @sgt: The sg_table object describing the buffer | |
497 | * @dir: DMA direction | |
498 | * @attrs: Optional DMA attributes for the unmap operation | |
499 | * | |
500 | * Unmaps a buffer described by a scatterlist stored in the given sg_table | |
501 | * object for the @dir DMA operation by the @dev device. After this function | |
502 | * the ownership of the buffer is transferred back to the CPU domain. | |
503 | */ | |
504 | static inline void dma_unmap_sgtable(struct device *dev, struct sg_table *sgt, | |
505 | enum dma_data_direction dir, unsigned long attrs) | |
506 | { | |
507 | dma_unmap_sg_attrs(dev, sgt->sgl, sgt->orig_nents, dir, attrs); | |
508 | } | |
509 | ||
510 | /** | |
511 | * dma_sync_sgtable_for_cpu - Synchronize the given buffer for CPU access | |
512 | * @dev: The device for which to perform the DMA operation | |
513 | * @sgt: The sg_table object describing the buffer | |
514 | * @dir: DMA direction | |
515 | * | |
516 | * Performs the needed cache synchronization and moves the ownership of the | |
517 | * buffer back to the CPU domain, so it is safe to perform any access to it | |
518 | * by the CPU. Before doing any further DMA operations, one has to transfer | |
519 | * the ownership of the buffer back to the DMA domain by calling the | |
520 | * dma_sync_sgtable_for_device(). | |
521 | */ | |
522 | static inline void dma_sync_sgtable_for_cpu(struct device *dev, | |
523 | struct sg_table *sgt, enum dma_data_direction dir) | |
524 | { | |
525 | dma_sync_sg_for_cpu(dev, sgt->sgl, sgt->orig_nents, dir); | |
526 | } | |
527 | ||
528 | /** | |
529 | * dma_sync_sgtable_for_device - Synchronize the given buffer for DMA | |
530 | * @dev: The device for which to perform the DMA operation | |
531 | * @sgt: The sg_table object describing the buffer | |
532 | * @dir: DMA direction | |
533 | * | |
534 | * Performs the needed cache synchronization and moves the ownership of the | |
535 | * buffer back to the DMA domain, so it is safe to perform the DMA operation. | |
536 | * Once finished, one has to call dma_sync_sgtable_for_cpu() or | |
537 | * dma_unmap_sgtable(). | |
538 | */ | |
539 | static inline void dma_sync_sgtable_for_device(struct device *dev, | |
540 | struct sg_table *sgt, enum dma_data_direction dir) | |
541 | { | |
542 | dma_sync_sg_for_device(dev, sgt->sgl, sgt->orig_nents, dir); | |
543 | } | |
544 | ||
00085f1e KK |
545 | #define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, 0) |
546 | #define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, 0) | |
547 | #define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, 0) | |
548 | #define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, 0) | |
0495c3d3 AD |
549 | #define dma_map_page(d, p, o, s, r) dma_map_page_attrs(d, p, o, s, r, 0) |
550 | #define dma_unmap_page(d, a, s, r) dma_unmap_page_attrs(d, a, s, r, 0) | |
ed6ccf10 CH |
551 | #define dma_get_sgtable(d, t, v, h, s) dma_get_sgtable_attrs(d, t, v, h, s, 0) |
552 | #define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, 0) | |
c9eb6172 | 553 | |
79636caa PT |
554 | bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size); |
555 | ||
e1c7e324 | 556 | static inline void *dma_alloc_coherent(struct device *dev, size_t size, |
7ed1d91a | 557 | dma_addr_t *dma_handle, gfp_t gfp) |
e1c7e324 | 558 | { |
7ed1d91a CH |
559 | return dma_alloc_attrs(dev, size, dma_handle, gfp, |
560 | (gfp & __GFP_NOWARN) ? DMA_ATTR_NO_WARN : 0); | |
e1c7e324 CH |
561 | } |
562 | ||
563 | static inline void dma_free_coherent(struct device *dev, size_t size, | |
564 | void *cpu_addr, dma_addr_t dma_handle) | |
565 | { | |
00085f1e | 566 | return dma_free_attrs(dev, size, cpu_addr, dma_handle, 0); |
e1c7e324 CH |
567 | } |
568 | ||
1da177e4 | 569 | |
589fc9a6 FT |
570 | static inline u64 dma_get_mask(struct device *dev) |
571 | { | |
d7e02a93 | 572 | if (dev->dma_mask && *dev->dma_mask) |
589fc9a6 | 573 | return *dev->dma_mask; |
284901a9 | 574 | return DMA_BIT_MASK(32); |
589fc9a6 FT |
575 | } |
576 | ||
4aa806b7 RK |
577 | /* |
578 | * Set both the DMA mask and the coherent DMA mask to the same thing. | |
579 | * Note that we don't check the return value from dma_set_coherent_mask() | |
580 | * as the DMA API guarantees that the coherent DMA mask can be set to | |
581 | * the same or smaller than the streaming DMA mask. | |
582 | */ | |
583 | static inline int dma_set_mask_and_coherent(struct device *dev, u64 mask) | |
584 | { | |
585 | int rc = dma_set_mask(dev, mask); | |
586 | if (rc == 0) | |
587 | dma_set_coherent_mask(dev, mask); | |
588 | return rc; | |
589 | } | |
590 | ||
fa6a8d6d RK |
591 | /* |
592 | * Similar to the above, except it deals with the case where the device | |
593 | * does not have dev->dma_mask appropriately setup. | |
594 | */ | |
595 | static inline int dma_coerce_mask_and_coherent(struct device *dev, u64 mask) | |
596 | { | |
597 | dev->dma_mask = &dev->coherent_dma_mask; | |
598 | return dma_set_mask_and_coherent(dev, mask); | |
599 | } | |
600 | ||
6b7b6510 FT |
601 | static inline unsigned int dma_get_max_seg_size(struct device *dev) |
602 | { | |
002edb6f RM |
603 | if (dev->dma_parms && dev->dma_parms->max_segment_size) |
604 | return dev->dma_parms->max_segment_size; | |
605 | return SZ_64K; | |
6b7b6510 FT |
606 | } |
607 | ||
334304ac | 608 | static inline void dma_set_max_seg_size(struct device *dev, unsigned int size) |
6b7b6510 | 609 | { |
334304ac CH |
610 | if (WARN_ON_ONCE(!dev->dma_parms)) |
611 | return; | |
612 | dev->dma_parms->max_segment_size = size; | |
6b7b6510 FT |
613 | } |
614 | ||
d22a6966 FT |
615 | static inline unsigned long dma_get_seg_boundary(struct device *dev) |
616 | { | |
002edb6f RM |
617 | if (dev->dma_parms && dev->dma_parms->segment_boundary_mask) |
618 | return dev->dma_parms->segment_boundary_mask; | |
135ba11a | 619 | return ULONG_MAX; |
d22a6966 FT |
620 | } |
621 | ||
1e9d90db NC |
622 | /** |
623 | * dma_get_seg_boundary_nr_pages - return the segment boundary in "page" units | |
624 | * @dev: device to guery the boundary for | |
625 | * @page_shift: ilog() of the IOMMU page size | |
626 | * | |
627 | * Return the segment boundary in IOMMU page units (which may be different from | |
628 | * the CPU page size) for the passed in device. | |
629 | * | |
630 | * If @dev is NULL a boundary of U32_MAX is assumed, this case is just for | |
631 | * non-DMA API callers. | |
632 | */ | |
633 | static inline unsigned long dma_get_seg_boundary_nr_pages(struct device *dev, | |
634 | unsigned int page_shift) | |
635 | { | |
636 | if (!dev) | |
637 | return (U32_MAX >> page_shift) + 1; | |
638 | return (dma_get_seg_boundary(dev) >> page_shift) + 1; | |
639 | } | |
640 | ||
560a861a | 641 | static inline void dma_set_seg_boundary(struct device *dev, unsigned long mask) |
d22a6966 | 642 | { |
560a861a CH |
643 | if (WARN_ON_ONCE(!dev->dma_parms)) |
644 | return; | |
645 | dev->dma_parms->segment_boundary_mask = mask; | |
d22a6966 FT |
646 | } |
647 | ||
36950f2d JG |
648 | static inline unsigned int dma_get_min_align_mask(struct device *dev) |
649 | { | |
650 | if (dev->dma_parms) | |
651 | return dev->dma_parms->min_align_mask; | |
652 | return 0; | |
653 | } | |
654 | ||
c42a0126 | 655 | static inline void dma_set_min_align_mask(struct device *dev, |
36950f2d JG |
656 | unsigned int min_align_mask) |
657 | { | |
658 | if (WARN_ON_ONCE(!dev->dma_parms)) | |
c42a0126 | 659 | return; |
36950f2d | 660 | dev->dma_parms->min_align_mask = min_align_mask; |
36950f2d JG |
661 | } |
662 | ||
8c57da28 | 663 | #ifndef dma_get_cache_alignment |
4565f017 FT |
664 | static inline int dma_get_cache_alignment(void) |
665 | { | |
4ab5f8ec | 666 | #ifdef ARCH_HAS_DMA_MINALIGN |
4565f017 FT |
667 | return ARCH_DMA_MINALIGN; |
668 | #endif | |
669 | return 1; | |
670 | } | |
8c57da28 | 671 | #endif |
4565f017 | 672 | |
d7076f07 CH |
673 | static inline void *dmam_alloc_coherent(struct device *dev, size_t size, |
674 | dma_addr_t *dma_handle, gfp_t gfp) | |
675 | { | |
676 | return dmam_alloc_attrs(dev, size, dma_handle, gfp, | |
677 | (gfp & __GFP_NOWARN) ? DMA_ATTR_NO_WARN : 0); | |
678 | } | |
679 | ||
f6e45661 LR |
680 | static inline void *dma_alloc_wc(struct device *dev, size_t size, |
681 | dma_addr_t *dma_addr, gfp_t gfp) | |
b4bbb107 | 682 | { |
0cd60eb1 | 683 | unsigned long attrs = DMA_ATTR_WRITE_COMBINE; |
7ed1d91a CH |
684 | |
685 | if (gfp & __GFP_NOWARN) | |
686 | attrs |= DMA_ATTR_NO_WARN; | |
687 | ||
688 | return dma_alloc_attrs(dev, size, dma_addr, gfp, attrs); | |
b4bbb107 TR |
689 | } |
690 | ||
f6e45661 LR |
691 | static inline void dma_free_wc(struct device *dev, size_t size, |
692 | void *cpu_addr, dma_addr_t dma_addr) | |
b4bbb107 | 693 | { |
00085f1e KK |
694 | return dma_free_attrs(dev, size, cpu_addr, dma_addr, |
695 | DMA_ATTR_WRITE_COMBINE); | |
b4bbb107 TR |
696 | } |
697 | ||
f6e45661 LR |
698 | static inline int dma_mmap_wc(struct device *dev, |
699 | struct vm_area_struct *vma, | |
700 | void *cpu_addr, dma_addr_t dma_addr, | |
701 | size_t size) | |
b4bbb107 | 702 | { |
00085f1e KK |
703 | return dma_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, |
704 | DMA_ATTR_WRITE_COMBINE); | |
b4bbb107 | 705 | } |
74bc7cee | 706 | |
f616ab59 | 707 | #ifdef CONFIG_NEED_DMA_MAP_STATE |
0acedc12 FT |
708 | #define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME) dma_addr_t ADDR_NAME |
709 | #define DEFINE_DMA_UNMAP_LEN(LEN_NAME) __u32 LEN_NAME | |
710 | #define dma_unmap_addr(PTR, ADDR_NAME) ((PTR)->ADDR_NAME) | |
711 | #define dma_unmap_addr_set(PTR, ADDR_NAME, VAL) (((PTR)->ADDR_NAME) = (VAL)) | |
712 | #define dma_unmap_len(PTR, LEN_NAME) ((PTR)->LEN_NAME) | |
713 | #define dma_unmap_len_set(PTR, LEN_NAME, VAL) (((PTR)->LEN_NAME) = (VAL)) | |
714 | #else | |
715 | #define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME) | |
716 | #define DEFINE_DMA_UNMAP_LEN(LEN_NAME) | |
c9b19ea6 MS |
717 | #define dma_unmap_addr(PTR, ADDR_NAME) \ |
718 | ({ typeof(PTR) __p __maybe_unused = PTR; 0; }) | |
719 | #define dma_unmap_addr_set(PTR, ADDR_NAME, VAL) \ | |
720 | do { typeof(PTR) __p __maybe_unused = PTR; } while (0) | |
721 | #define dma_unmap_len(PTR, LEN_NAME) \ | |
722 | ({ typeof(PTR) __p __maybe_unused = PTR; 0; }) | |
723 | #define dma_unmap_len_set(PTR, LEN_NAME, VAL) \ | |
724 | do { typeof(PTR) __p __maybe_unused = PTR; } while (0) | |
0acedc12 FT |
725 | #endif |
726 | ||
e0d07278 | 727 | #endif /* _LINUX_DMA_MAPPING_H */ |