1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_DMA_MAPPING_H
3 #define _LINUX_DMA_MAPPING_H
5 #include <linux/device.h>
7 #include <linux/dma-direction.h>
8 #include <linux/scatterlist.h>
12 * List of possible attributes associated with a DMA mapping. The semantics
13 * of each attribute should be defined in Documentation/core-api/dma-attributes.rst.
17 * DMA_ATTR_WEAK_ORDERING: Specifies that reads and writes to the mapping
18 * may be weakly ordered, that is that reads and writes may pass each other.
20 #define DMA_ATTR_WEAK_ORDERING (1UL << 1)
22 * DMA_ATTR_WRITE_COMBINE: Specifies that writes to the mapping may be
23 * buffered to improve performance.
25 #define DMA_ATTR_WRITE_COMBINE (1UL << 2)
27 * DMA_ATTR_NO_KERNEL_MAPPING: Lets the platform to avoid creating a kernel
28 * virtual mapping for the allocated buffer.
30 #define DMA_ATTR_NO_KERNEL_MAPPING (1UL << 4)
32 * DMA_ATTR_SKIP_CPU_SYNC: Allows platform code to skip synchronization of
33 * the CPU cache for the given buffer assuming that it has been already
34 * transferred to 'device' domain.
36 #define DMA_ATTR_SKIP_CPU_SYNC (1UL << 5)
38 * DMA_ATTR_FORCE_CONTIGUOUS: Forces contiguous allocation of the buffer
41 #define DMA_ATTR_FORCE_CONTIGUOUS (1UL << 6)
43 * DMA_ATTR_ALLOC_SINGLE_PAGES: This is a hint to the DMA-mapping subsystem
44 * that it's probably not worth the time to try to allocate memory to in a way
45 * that gives better TLB efficiency.
47 #define DMA_ATTR_ALLOC_SINGLE_PAGES (1UL << 7)
49 * DMA_ATTR_NO_WARN: This tells the DMA-mapping subsystem to suppress
50 * allocation failure reports (similarly to __GFP_NOWARN).
52 #define DMA_ATTR_NO_WARN (1UL << 8)
55 * DMA_ATTR_PRIVILEGED: used to indicate that the buffer is fully
56 * accessible at an elevated privilege level (and ideally inaccessible or
57 * at least read-only at lesser-privileged levels).
59 #define DMA_ATTR_PRIVILEGED (1UL << 9)
62 * A dma_addr_t can hold any valid DMA or bus address for the platform. It can
63 * be given to a device to use as a DMA source or target. It is specific to a
64 * given device and there may be a translation between the CPU physical address
65 * space and the bus address space.
67 * DMA_MAPPING_ERROR is the magic error code if a mapping failed. It should not
68 * be used directly in drivers, but checked for using dma_mapping_error()
71 #define DMA_MAPPING_ERROR (~(dma_addr_t)0)
73 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
75 struct dma_iova_state {
81 * Use the high bit to mark if we used swiotlb for one or more ranges.
83 #define DMA_IOVA_USE_SWIOTLB (1ULL << 63)
85 static inline size_t dma_iova_size(struct dma_iova_state *state)
87 /* Casting is needed for 32-bits systems */
88 return (size_t)(state->__size & ~DMA_IOVA_USE_SWIOTLB);
91 #ifdef CONFIG_DMA_API_DEBUG
92 void debug_dma_mapping_error(struct device *dev, dma_addr_t dma_addr);
93 void debug_dma_map_single(struct device *dev, const void *addr,
96 static inline void debug_dma_mapping_error(struct device *dev,
100 static inline void debug_dma_map_single(struct device *dev, const void *addr,
104 #endif /* CONFIG_DMA_API_DEBUG */
106 #ifdef CONFIG_HAS_DMA
107 static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
109 debug_dma_mapping_error(dev, dma_addr);
111 if (unlikely(dma_addr == DMA_MAPPING_ERROR))
116 dma_addr_t dma_map_page_attrs(struct device *dev, struct page *page,
117 size_t offset, size_t size, enum dma_data_direction dir,
118 unsigned long attrs);
119 void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr, size_t size,
120 enum dma_data_direction dir, unsigned long attrs);
121 unsigned int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
122 int nents, enum dma_data_direction dir, unsigned long attrs);
123 void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg,
124 int nents, enum dma_data_direction dir,
125 unsigned long attrs);
126 int dma_map_sgtable(struct device *dev, struct sg_table *sgt,
127 enum dma_data_direction dir, unsigned long attrs);
128 dma_addr_t dma_map_resource(struct device *dev, phys_addr_t phys_addr,
129 size_t size, enum dma_data_direction dir, unsigned long attrs);
130 void dma_unmap_resource(struct device *dev, dma_addr_t addr, size_t size,
131 enum dma_data_direction dir, unsigned long attrs);
132 void *dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle,
133 gfp_t flag, unsigned long attrs);
134 void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr,
135 dma_addr_t dma_handle, unsigned long attrs);
136 void *dmam_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle,
137 gfp_t gfp, unsigned long attrs);
138 void dmam_free_coherent(struct device *dev, size_t size, void *vaddr,
139 dma_addr_t dma_handle);
140 int dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt,
141 void *cpu_addr, dma_addr_t dma_addr, size_t size,
142 unsigned long attrs);
143 int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
144 void *cpu_addr, dma_addr_t dma_addr, size_t size,
145 unsigned long attrs);
146 bool dma_can_mmap(struct device *dev);
147 bool dma_pci_p2pdma_supported(struct device *dev);
148 int dma_set_mask(struct device *dev, u64 mask);
149 int dma_set_coherent_mask(struct device *dev, u64 mask);
150 u64 dma_get_required_mask(struct device *dev);
151 bool dma_addressing_limited(struct device *dev);
152 size_t dma_max_mapping_size(struct device *dev);
153 size_t dma_opt_mapping_size(struct device *dev);
154 unsigned long dma_get_merge_boundary(struct device *dev);
155 struct sg_table *dma_alloc_noncontiguous(struct device *dev, size_t size,
156 enum dma_data_direction dir, gfp_t gfp, unsigned long attrs);
157 void dma_free_noncontiguous(struct device *dev, size_t size,
158 struct sg_table *sgt, enum dma_data_direction dir);
159 void *dma_vmap_noncontiguous(struct device *dev, size_t size,
160 struct sg_table *sgt);
161 void dma_vunmap_noncontiguous(struct device *dev, void *vaddr);
162 int dma_mmap_noncontiguous(struct device *dev, struct vm_area_struct *vma,
163 size_t size, struct sg_table *sgt);
164 #else /* CONFIG_HAS_DMA */
165 static inline dma_addr_t dma_map_page_attrs(struct device *dev,
166 struct page *page, size_t offset, size_t size,
167 enum dma_data_direction dir, unsigned long attrs)
169 return DMA_MAPPING_ERROR;
171 static inline void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr,
172 size_t size, enum dma_data_direction dir, unsigned long attrs)
175 static inline unsigned int dma_map_sg_attrs(struct device *dev,
176 struct scatterlist *sg, int nents, enum dma_data_direction dir,
181 static inline void dma_unmap_sg_attrs(struct device *dev,
182 struct scatterlist *sg, int nents, enum dma_data_direction dir,
186 static inline int dma_map_sgtable(struct device *dev, struct sg_table *sgt,
187 enum dma_data_direction dir, unsigned long attrs)
191 static inline dma_addr_t dma_map_resource(struct device *dev,
192 phys_addr_t phys_addr, size_t size, enum dma_data_direction dir,
195 return DMA_MAPPING_ERROR;
197 static inline void dma_unmap_resource(struct device *dev, dma_addr_t addr,
198 size_t size, enum dma_data_direction dir, unsigned long attrs)
201 static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
205 static inline void *dma_alloc_attrs(struct device *dev, size_t size,
206 dma_addr_t *dma_handle, gfp_t flag, unsigned long attrs)
210 static void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr,
211 dma_addr_t dma_handle, unsigned long attrs)
214 static inline void *dmam_alloc_attrs(struct device *dev, size_t size,
215 dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
219 static inline void dmam_free_coherent(struct device *dev, size_t size,
220 void *vaddr, dma_addr_t dma_handle)
223 static inline int dma_get_sgtable_attrs(struct device *dev,
224 struct sg_table *sgt, void *cpu_addr, dma_addr_t dma_addr,
225 size_t size, unsigned long attrs)
229 static inline int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
230 void *cpu_addr, dma_addr_t dma_addr, size_t size,
235 static inline bool dma_can_mmap(struct device *dev)
239 static inline bool dma_pci_p2pdma_supported(struct device *dev)
243 static inline int dma_set_mask(struct device *dev, u64 mask)
247 static inline int dma_set_coherent_mask(struct device *dev, u64 mask)
251 static inline u64 dma_get_required_mask(struct device *dev)
255 static inline bool dma_addressing_limited(struct device *dev)
259 static inline size_t dma_max_mapping_size(struct device *dev)
263 static inline size_t dma_opt_mapping_size(struct device *dev)
267 static inline unsigned long dma_get_merge_boundary(struct device *dev)
271 static inline struct sg_table *dma_alloc_noncontiguous(struct device *dev,
272 size_t size, enum dma_data_direction dir, gfp_t gfp,
277 static inline void dma_free_noncontiguous(struct device *dev, size_t size,
278 struct sg_table *sgt, enum dma_data_direction dir)
281 static inline void *dma_vmap_noncontiguous(struct device *dev, size_t size,
282 struct sg_table *sgt)
286 static inline void dma_vunmap_noncontiguous(struct device *dev, void *vaddr)
289 static inline int dma_mmap_noncontiguous(struct device *dev,
290 struct vm_area_struct *vma, size_t size, struct sg_table *sgt)
294 #endif /* CONFIG_HAS_DMA */
296 #ifdef CONFIG_IOMMU_DMA
298 * dma_use_iova - check if the IOVA API is used for this state
301 * Return %true if the DMA transfers uses the dma_iova_*() calls or %false if
302 * they can't be used.
304 static inline bool dma_use_iova(struct dma_iova_state *state)
306 return state->__size != 0;
309 bool dma_iova_try_alloc(struct device *dev, struct dma_iova_state *state,
310 phys_addr_t phys, size_t size);
311 void dma_iova_free(struct device *dev, struct dma_iova_state *state);
312 void dma_iova_destroy(struct device *dev, struct dma_iova_state *state,
313 size_t mapped_len, enum dma_data_direction dir,
314 unsigned long attrs);
315 int dma_iova_sync(struct device *dev, struct dma_iova_state *state,
316 size_t offset, size_t size);
317 int dma_iova_link(struct device *dev, struct dma_iova_state *state,
318 phys_addr_t phys, size_t offset, size_t size,
319 enum dma_data_direction dir, unsigned long attrs);
320 void dma_iova_unlink(struct device *dev, struct dma_iova_state *state,
321 size_t offset, size_t size, enum dma_data_direction dir,
322 unsigned long attrs);
323 #else /* CONFIG_IOMMU_DMA */
324 static inline bool dma_use_iova(struct dma_iova_state *state)
328 static inline bool dma_iova_try_alloc(struct device *dev,
329 struct dma_iova_state *state, phys_addr_t phys, size_t size)
333 static inline void dma_iova_free(struct device *dev,
334 struct dma_iova_state *state)
337 static inline void dma_iova_destroy(struct device *dev,
338 struct dma_iova_state *state, size_t mapped_len,
339 enum dma_data_direction dir, unsigned long attrs)
342 static inline int dma_iova_sync(struct device *dev,
343 struct dma_iova_state *state, size_t offset, size_t size)
347 static inline int dma_iova_link(struct device *dev,
348 struct dma_iova_state *state, phys_addr_t phys, size_t offset,
349 size_t size, enum dma_data_direction dir, unsigned long attrs)
353 static inline void dma_iova_unlink(struct device *dev,
354 struct dma_iova_state *state, size_t offset, size_t size,
355 enum dma_data_direction dir, unsigned long attrs)
358 #endif /* CONFIG_IOMMU_DMA */
360 #if defined(CONFIG_HAS_DMA) && defined(CONFIG_DMA_NEED_SYNC)
361 void __dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size,
362 enum dma_data_direction dir);
363 void __dma_sync_single_for_device(struct device *dev, dma_addr_t addr,
364 size_t size, enum dma_data_direction dir);
365 void __dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
366 int nelems, enum dma_data_direction dir);
367 void __dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
368 int nelems, enum dma_data_direction dir);
369 bool __dma_need_sync(struct device *dev, dma_addr_t dma_addr);
371 static inline bool dma_dev_need_sync(const struct device *dev)
373 /* Always call DMA sync operations when debugging is enabled */
374 return !dev->dma_skip_sync || IS_ENABLED(CONFIG_DMA_API_DEBUG);
377 static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
378 size_t size, enum dma_data_direction dir)
380 if (dma_dev_need_sync(dev))
381 __dma_sync_single_for_cpu(dev, addr, size, dir);
384 static inline void dma_sync_single_for_device(struct device *dev,
385 dma_addr_t addr, size_t size, enum dma_data_direction dir)
387 if (dma_dev_need_sync(dev))
388 __dma_sync_single_for_device(dev, addr, size, dir);
391 static inline void dma_sync_sg_for_cpu(struct device *dev,
392 struct scatterlist *sg, int nelems, enum dma_data_direction dir)
394 if (dma_dev_need_sync(dev))
395 __dma_sync_sg_for_cpu(dev, sg, nelems, dir);
398 static inline void dma_sync_sg_for_device(struct device *dev,
399 struct scatterlist *sg, int nelems, enum dma_data_direction dir)
401 if (dma_dev_need_sync(dev))
402 __dma_sync_sg_for_device(dev, sg, nelems, dir);
405 static inline bool dma_need_sync(struct device *dev, dma_addr_t dma_addr)
407 return dma_dev_need_sync(dev) ? __dma_need_sync(dev, dma_addr) : false;
409 bool dma_need_unmap(struct device *dev);
410 #else /* !CONFIG_HAS_DMA || !CONFIG_DMA_NEED_SYNC */
411 static inline bool dma_dev_need_sync(const struct device *dev)
415 static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
416 size_t size, enum dma_data_direction dir)
419 static inline void dma_sync_single_for_device(struct device *dev,
420 dma_addr_t addr, size_t size, enum dma_data_direction dir)
423 static inline void dma_sync_sg_for_cpu(struct device *dev,
424 struct scatterlist *sg, int nelems, enum dma_data_direction dir)
427 static inline void dma_sync_sg_for_device(struct device *dev,
428 struct scatterlist *sg, int nelems, enum dma_data_direction dir)
431 static inline bool dma_need_sync(struct device *dev, dma_addr_t dma_addr)
435 static inline bool dma_need_unmap(struct device *dev)
439 #endif /* !CONFIG_HAS_DMA || !CONFIG_DMA_NEED_SYNC */
441 struct page *dma_alloc_pages(struct device *dev, size_t size,
442 dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp);
443 void dma_free_pages(struct device *dev, size_t size, struct page *page,
444 dma_addr_t dma_handle, enum dma_data_direction dir);
445 int dma_mmap_pages(struct device *dev, struct vm_area_struct *vma,
446 size_t size, struct page *page);
448 static inline void *dma_alloc_noncoherent(struct device *dev, size_t size,
449 dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp)
451 struct page *page = dma_alloc_pages(dev, size, dma_handle, dir, gfp);
452 return page ? page_address(page) : NULL;
455 static inline void dma_free_noncoherent(struct device *dev, size_t size,
456 void *vaddr, dma_addr_t dma_handle, enum dma_data_direction dir)
458 dma_free_pages(dev, size, virt_to_page(vaddr), dma_handle, dir);
461 static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr,
462 size_t size, enum dma_data_direction dir, unsigned long attrs)
464 /* DMA must never operate on areas that might be remapped. */
465 if (dev_WARN_ONCE(dev, is_vmalloc_addr(ptr),
466 "rejecting DMA map of vmalloc memory\n"))
467 return DMA_MAPPING_ERROR;
468 debug_dma_map_single(dev, ptr, size);
469 return dma_map_page_attrs(dev, virt_to_page(ptr), offset_in_page(ptr),
473 static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr,
474 size_t size, enum dma_data_direction dir, unsigned long attrs)
476 return dma_unmap_page_attrs(dev, addr, size, dir, attrs);
479 static inline void dma_sync_single_range_for_cpu(struct device *dev,
480 dma_addr_t addr, unsigned long offset, size_t size,
481 enum dma_data_direction dir)
483 return dma_sync_single_for_cpu(dev, addr + offset, size, dir);
486 static inline void dma_sync_single_range_for_device(struct device *dev,
487 dma_addr_t addr, unsigned long offset, size_t size,
488 enum dma_data_direction dir)
490 return dma_sync_single_for_device(dev, addr + offset, size, dir);
494 * dma_unmap_sgtable - Unmap the given buffer for DMA
495 * @dev: The device for which to perform the DMA operation
496 * @sgt: The sg_table object describing the buffer
497 * @dir: DMA direction
498 * @attrs: Optional DMA attributes for the unmap operation
500 * Unmaps a buffer described by a scatterlist stored in the given sg_table
501 * object for the @dir DMA operation by the @dev device. After this function
502 * the ownership of the buffer is transferred back to the CPU domain.
504 static inline void dma_unmap_sgtable(struct device *dev, struct sg_table *sgt,
505 enum dma_data_direction dir, unsigned long attrs)
507 dma_unmap_sg_attrs(dev, sgt->sgl, sgt->orig_nents, dir, attrs);
511 * dma_sync_sgtable_for_cpu - Synchronize the given buffer for CPU access
512 * @dev: The device for which to perform the DMA operation
513 * @sgt: The sg_table object describing the buffer
514 * @dir: DMA direction
516 * Performs the needed cache synchronization and moves the ownership of the
517 * buffer back to the CPU domain, so it is safe to perform any access to it
518 * by the CPU. Before doing any further DMA operations, one has to transfer
519 * the ownership of the buffer back to the DMA domain by calling the
520 * dma_sync_sgtable_for_device().
522 static inline void dma_sync_sgtable_for_cpu(struct device *dev,
523 struct sg_table *sgt, enum dma_data_direction dir)
525 dma_sync_sg_for_cpu(dev, sgt->sgl, sgt->orig_nents, dir);
529 * dma_sync_sgtable_for_device - Synchronize the given buffer for DMA
530 * @dev: The device for which to perform the DMA operation
531 * @sgt: The sg_table object describing the buffer
532 * @dir: DMA direction
534 * Performs the needed cache synchronization and moves the ownership of the
535 * buffer back to the DMA domain, so it is safe to perform the DMA operation.
536 * Once finished, one has to call dma_sync_sgtable_for_cpu() or
537 * dma_unmap_sgtable().
539 static inline void dma_sync_sgtable_for_device(struct device *dev,
540 struct sg_table *sgt, enum dma_data_direction dir)
542 dma_sync_sg_for_device(dev, sgt->sgl, sgt->orig_nents, dir);
545 #define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, 0)
546 #define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, 0)
547 #define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, 0)
548 #define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, 0)
549 #define dma_map_page(d, p, o, s, r) dma_map_page_attrs(d, p, o, s, r, 0)
550 #define dma_unmap_page(d, a, s, r) dma_unmap_page_attrs(d, a, s, r, 0)
551 #define dma_get_sgtable(d, t, v, h, s) dma_get_sgtable_attrs(d, t, v, h, s, 0)
552 #define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, 0)
554 bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size);
556 static inline void *dma_alloc_coherent(struct device *dev, size_t size,
557 dma_addr_t *dma_handle, gfp_t gfp)
559 return dma_alloc_attrs(dev, size, dma_handle, gfp,
560 (gfp & __GFP_NOWARN) ? DMA_ATTR_NO_WARN : 0);
563 static inline void dma_free_coherent(struct device *dev, size_t size,
564 void *cpu_addr, dma_addr_t dma_handle)
566 return dma_free_attrs(dev, size, cpu_addr, dma_handle, 0);
570 static inline u64 dma_get_mask(struct device *dev)
572 if (dev->dma_mask && *dev->dma_mask)
573 return *dev->dma_mask;
574 return DMA_BIT_MASK(32);
578 * Set both the DMA mask and the coherent DMA mask to the same thing.
579 * Note that we don't check the return value from dma_set_coherent_mask()
580 * as the DMA API guarantees that the coherent DMA mask can be set to
581 * the same or smaller than the streaming DMA mask.
583 static inline int dma_set_mask_and_coherent(struct device *dev, u64 mask)
585 int rc = dma_set_mask(dev, mask);
587 dma_set_coherent_mask(dev, mask);
592 * Similar to the above, except it deals with the case where the device
593 * does not have dev->dma_mask appropriately setup.
595 static inline int dma_coerce_mask_and_coherent(struct device *dev, u64 mask)
597 dev->dma_mask = &dev->coherent_dma_mask;
598 return dma_set_mask_and_coherent(dev, mask);
601 static inline unsigned int dma_get_max_seg_size(struct device *dev)
603 if (dev->dma_parms && dev->dma_parms->max_segment_size)
604 return dev->dma_parms->max_segment_size;
608 static inline void dma_set_max_seg_size(struct device *dev, unsigned int size)
610 if (WARN_ON_ONCE(!dev->dma_parms))
612 dev->dma_parms->max_segment_size = size;
615 static inline unsigned long dma_get_seg_boundary(struct device *dev)
617 if (dev->dma_parms && dev->dma_parms->segment_boundary_mask)
618 return dev->dma_parms->segment_boundary_mask;
623 * dma_get_seg_boundary_nr_pages - return the segment boundary in "page" units
624 * @dev: device to guery the boundary for
625 * @page_shift: ilog() of the IOMMU page size
627 * Return the segment boundary in IOMMU page units (which may be different from
628 * the CPU page size) for the passed in device.
630 * If @dev is NULL a boundary of U32_MAX is assumed, this case is just for
631 * non-DMA API callers.
633 static inline unsigned long dma_get_seg_boundary_nr_pages(struct device *dev,
634 unsigned int page_shift)
637 return (U32_MAX >> page_shift) + 1;
638 return (dma_get_seg_boundary(dev) >> page_shift) + 1;
641 static inline void dma_set_seg_boundary(struct device *dev, unsigned long mask)
643 if (WARN_ON_ONCE(!dev->dma_parms))
645 dev->dma_parms->segment_boundary_mask = mask;
648 static inline unsigned int dma_get_min_align_mask(struct device *dev)
651 return dev->dma_parms->min_align_mask;
655 static inline void dma_set_min_align_mask(struct device *dev,
656 unsigned int min_align_mask)
658 if (WARN_ON_ONCE(!dev->dma_parms))
660 dev->dma_parms->min_align_mask = min_align_mask;
663 #ifndef dma_get_cache_alignment
664 static inline int dma_get_cache_alignment(void)
666 #ifdef ARCH_HAS_DMA_MINALIGN
667 return ARCH_DMA_MINALIGN;
673 static inline void *dmam_alloc_coherent(struct device *dev, size_t size,
674 dma_addr_t *dma_handle, gfp_t gfp)
676 return dmam_alloc_attrs(dev, size, dma_handle, gfp,
677 (gfp & __GFP_NOWARN) ? DMA_ATTR_NO_WARN : 0);
680 static inline void *dma_alloc_wc(struct device *dev, size_t size,
681 dma_addr_t *dma_addr, gfp_t gfp)
683 unsigned long attrs = DMA_ATTR_WRITE_COMBINE;
685 if (gfp & __GFP_NOWARN)
686 attrs |= DMA_ATTR_NO_WARN;
688 return dma_alloc_attrs(dev, size, dma_addr, gfp, attrs);
691 static inline void dma_free_wc(struct device *dev, size_t size,
692 void *cpu_addr, dma_addr_t dma_addr)
694 return dma_free_attrs(dev, size, cpu_addr, dma_addr,
695 DMA_ATTR_WRITE_COMBINE);
698 static inline int dma_mmap_wc(struct device *dev,
699 struct vm_area_struct *vma,
700 void *cpu_addr, dma_addr_t dma_addr,
703 return dma_mmap_attrs(dev, vma, cpu_addr, dma_addr, size,
704 DMA_ATTR_WRITE_COMBINE);
707 #ifdef CONFIG_NEED_DMA_MAP_STATE
708 #define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME) dma_addr_t ADDR_NAME
709 #define DEFINE_DMA_UNMAP_LEN(LEN_NAME) __u32 LEN_NAME
710 #define dma_unmap_addr(PTR, ADDR_NAME) ((PTR)->ADDR_NAME)
711 #define dma_unmap_addr_set(PTR, ADDR_NAME, VAL) (((PTR)->ADDR_NAME) = (VAL))
712 #define dma_unmap_len(PTR, LEN_NAME) ((PTR)->LEN_NAME)
713 #define dma_unmap_len_set(PTR, LEN_NAME, VAL) (((PTR)->LEN_NAME) = (VAL))
715 #define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME)
716 #define DEFINE_DMA_UNMAP_LEN(LEN_NAME)
717 #define dma_unmap_addr(PTR, ADDR_NAME) \
718 ({ typeof(PTR) __p __maybe_unused = PTR; 0; })
719 #define dma_unmap_addr_set(PTR, ADDR_NAME, VAL) \
720 do { typeof(PTR) __p __maybe_unused = PTR; } while (0)
721 #define dma_unmap_len(PTR, LEN_NAME) \
722 ({ typeof(PTR) __p __maybe_unused = PTR; 0; })
723 #define dma_unmap_len_set(PTR, LEN_NAME, VAL) \
724 do { typeof(PTR) __p __maybe_unused = PTR; } while (0)
727 #endif /* _LINUX_DMA_MAPPING_H */