1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_DMA_MAPPING_H
3 #define _LINUX_DMA_MAPPING_H
5 #include <linux/cache.h>
6 #include <linux/sizes.h>
7 #include <linux/string.h>
8 #include <linux/device.h>
10 #include <linux/dma-direction.h>
11 #include <linux/scatterlist.h>
12 #include <linux/bug.h>
13 #include <linux/mem_encrypt.h>
16 * List of possible attributes associated with a DMA mapping. The semantics
17 * of each attribute should be defined in Documentation/core-api/dma-attributes.rst.
21 * DMA_ATTR_WEAK_ORDERING: Specifies that reads and writes to the mapping
22 * may be weakly ordered, that is that reads and writes may pass each other.
24 #define DMA_ATTR_WEAK_ORDERING (1UL << 1)
26 * DMA_ATTR_WRITE_COMBINE: Specifies that writes to the mapping may be
27 * buffered to improve performance.
29 #define DMA_ATTR_WRITE_COMBINE (1UL << 2)
31 * DMA_ATTR_NO_KERNEL_MAPPING: Lets the platform to avoid creating a kernel
32 * virtual mapping for the allocated buffer.
34 #define DMA_ATTR_NO_KERNEL_MAPPING (1UL << 4)
36 * DMA_ATTR_SKIP_CPU_SYNC: Allows platform code to skip synchronization of
37 * the CPU cache for the given buffer assuming that it has been already
38 * transferred to 'device' domain.
40 #define DMA_ATTR_SKIP_CPU_SYNC (1UL << 5)
42 * DMA_ATTR_FORCE_CONTIGUOUS: Forces contiguous allocation of the buffer
45 #define DMA_ATTR_FORCE_CONTIGUOUS (1UL << 6)
47 * DMA_ATTR_ALLOC_SINGLE_PAGES: This is a hint to the DMA-mapping subsystem
48 * that it's probably not worth the time to try to allocate memory to in a way
49 * that gives better TLB efficiency.
51 #define DMA_ATTR_ALLOC_SINGLE_PAGES (1UL << 7)
53 * DMA_ATTR_NO_WARN: This tells the DMA-mapping subsystem to suppress
54 * allocation failure reports (similarly to __GFP_NOWARN).
56 #define DMA_ATTR_NO_WARN (1UL << 8)
59 * DMA_ATTR_PRIVILEGED: used to indicate that the buffer is fully
60 * accessible at an elevated privilege level (and ideally inaccessible or
61 * at least read-only at lesser-privileged levels).
63 #define DMA_ATTR_PRIVILEGED (1UL << 9)
66 * A dma_addr_t can hold any valid DMA or bus address for the platform. It can
67 * be given to a device to use as a DMA source or target. It is specific to a
68 * given device and there may be a translation between the CPU physical address
69 * space and the bus address space.
71 * DMA_MAPPING_ERROR is the magic error code if a mapping failed. It should not
72 * be used directly in drivers, but checked for using dma_mapping_error()
75 #define DMA_MAPPING_ERROR (~(dma_addr_t)0)
77 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
79 #ifdef CONFIG_DMA_API_DEBUG
80 void debug_dma_mapping_error(struct device *dev, dma_addr_t dma_addr);
81 void debug_dma_map_single(struct device *dev, const void *addr,
84 static inline void debug_dma_mapping_error(struct device *dev,
88 static inline void debug_dma_map_single(struct device *dev, const void *addr,
92 #endif /* CONFIG_DMA_API_DEBUG */
95 static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
97 debug_dma_mapping_error(dev, dma_addr);
99 if (unlikely(dma_addr == DMA_MAPPING_ERROR))
104 dma_addr_t dma_map_page_attrs(struct device *dev, struct page *page,
105 size_t offset, size_t size, enum dma_data_direction dir,
106 unsigned long attrs);
107 void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr, size_t size,
108 enum dma_data_direction dir, unsigned long attrs);
109 unsigned int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
110 int nents, enum dma_data_direction dir, unsigned long attrs);
111 void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg,
112 int nents, enum dma_data_direction dir,
113 unsigned long attrs);
114 int dma_map_sgtable(struct device *dev, struct sg_table *sgt,
115 enum dma_data_direction dir, unsigned long attrs);
116 dma_addr_t dma_map_resource(struct device *dev, phys_addr_t phys_addr,
117 size_t size, enum dma_data_direction dir, unsigned long attrs);
118 void dma_unmap_resource(struct device *dev, dma_addr_t addr, size_t size,
119 enum dma_data_direction dir, unsigned long attrs);
120 void *dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle,
121 gfp_t flag, unsigned long attrs);
122 void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr,
123 dma_addr_t dma_handle, unsigned long attrs);
124 void *dmam_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle,
125 gfp_t gfp, unsigned long attrs);
126 void dmam_free_coherent(struct device *dev, size_t size, void *vaddr,
127 dma_addr_t dma_handle);
128 int dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt,
129 void *cpu_addr, dma_addr_t dma_addr, size_t size,
130 unsigned long attrs);
131 int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
132 void *cpu_addr, dma_addr_t dma_addr, size_t size,
133 unsigned long attrs);
134 bool dma_can_mmap(struct device *dev);
135 bool dma_pci_p2pdma_supported(struct device *dev);
136 int dma_set_mask(struct device *dev, u64 mask);
137 int dma_set_coherent_mask(struct device *dev, u64 mask);
138 u64 dma_get_required_mask(struct device *dev);
139 bool dma_addressing_limited(struct device *dev);
140 size_t dma_max_mapping_size(struct device *dev);
141 size_t dma_opt_mapping_size(struct device *dev);
142 unsigned long dma_get_merge_boundary(struct device *dev);
143 struct sg_table *dma_alloc_noncontiguous(struct device *dev, size_t size,
144 enum dma_data_direction dir, gfp_t gfp, unsigned long attrs);
145 void dma_free_noncontiguous(struct device *dev, size_t size,
146 struct sg_table *sgt, enum dma_data_direction dir);
147 void *dma_vmap_noncontiguous(struct device *dev, size_t size,
148 struct sg_table *sgt);
149 void dma_vunmap_noncontiguous(struct device *dev, void *vaddr);
150 int dma_mmap_noncontiguous(struct device *dev, struct vm_area_struct *vma,
151 size_t size, struct sg_table *sgt);
152 #else /* CONFIG_HAS_DMA */
153 static inline dma_addr_t dma_map_page_attrs(struct device *dev,
154 struct page *page, size_t offset, size_t size,
155 enum dma_data_direction dir, unsigned long attrs)
157 return DMA_MAPPING_ERROR;
159 static inline void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr,
160 size_t size, enum dma_data_direction dir, unsigned long attrs)
163 static inline unsigned int dma_map_sg_attrs(struct device *dev,
164 struct scatterlist *sg, int nents, enum dma_data_direction dir,
169 static inline void dma_unmap_sg_attrs(struct device *dev,
170 struct scatterlist *sg, int nents, enum dma_data_direction dir,
174 static inline int dma_map_sgtable(struct device *dev, struct sg_table *sgt,
175 enum dma_data_direction dir, unsigned long attrs)
179 static inline dma_addr_t dma_map_resource(struct device *dev,
180 phys_addr_t phys_addr, size_t size, enum dma_data_direction dir,
183 return DMA_MAPPING_ERROR;
185 static inline void dma_unmap_resource(struct device *dev, dma_addr_t addr,
186 size_t size, enum dma_data_direction dir, unsigned long attrs)
189 static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
193 static inline void *dma_alloc_attrs(struct device *dev, size_t size,
194 dma_addr_t *dma_handle, gfp_t flag, unsigned long attrs)
198 static void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr,
199 dma_addr_t dma_handle, unsigned long attrs)
202 static inline void *dmam_alloc_attrs(struct device *dev, size_t size,
203 dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
207 static inline void dmam_free_coherent(struct device *dev, size_t size,
208 void *vaddr, dma_addr_t dma_handle)
211 static inline int dma_get_sgtable_attrs(struct device *dev,
212 struct sg_table *sgt, void *cpu_addr, dma_addr_t dma_addr,
213 size_t size, unsigned long attrs)
217 static inline int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
218 void *cpu_addr, dma_addr_t dma_addr, size_t size,
223 static inline bool dma_can_mmap(struct device *dev)
227 static inline bool dma_pci_p2pdma_supported(struct device *dev)
231 static inline int dma_set_mask(struct device *dev, u64 mask)
235 static inline int dma_set_coherent_mask(struct device *dev, u64 mask)
239 static inline u64 dma_get_required_mask(struct device *dev)
243 static inline bool dma_addressing_limited(struct device *dev)
247 static inline size_t dma_max_mapping_size(struct device *dev)
251 static inline size_t dma_opt_mapping_size(struct device *dev)
255 static inline unsigned long dma_get_merge_boundary(struct device *dev)
259 static inline struct sg_table *dma_alloc_noncontiguous(struct device *dev,
260 size_t size, enum dma_data_direction dir, gfp_t gfp,
265 static inline void dma_free_noncontiguous(struct device *dev, size_t size,
266 struct sg_table *sgt, enum dma_data_direction dir)
269 static inline void *dma_vmap_noncontiguous(struct device *dev, size_t size,
270 struct sg_table *sgt)
274 static inline void dma_vunmap_noncontiguous(struct device *dev, void *vaddr)
277 static inline int dma_mmap_noncontiguous(struct device *dev,
278 struct vm_area_struct *vma, size_t size, struct sg_table *sgt)
282 #endif /* CONFIG_HAS_DMA */
284 #if defined(CONFIG_HAS_DMA) && defined(CONFIG_DMA_NEED_SYNC)
285 void __dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size,
286 enum dma_data_direction dir);
287 void __dma_sync_single_for_device(struct device *dev, dma_addr_t addr,
288 size_t size, enum dma_data_direction dir);
289 void __dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
290 int nelems, enum dma_data_direction dir);
291 void __dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
292 int nelems, enum dma_data_direction dir);
293 bool __dma_need_sync(struct device *dev, dma_addr_t dma_addr);
295 static inline bool dma_dev_need_sync(const struct device *dev)
297 /* Always call DMA sync operations when debugging is enabled */
298 return !dev->dma_skip_sync || IS_ENABLED(CONFIG_DMA_API_DEBUG);
301 static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
302 size_t size, enum dma_data_direction dir)
304 if (dma_dev_need_sync(dev))
305 __dma_sync_single_for_cpu(dev, addr, size, dir);
308 static inline void dma_sync_single_for_device(struct device *dev,
309 dma_addr_t addr, size_t size, enum dma_data_direction dir)
311 if (dma_dev_need_sync(dev))
312 __dma_sync_single_for_device(dev, addr, size, dir);
315 static inline void dma_sync_sg_for_cpu(struct device *dev,
316 struct scatterlist *sg, int nelems, enum dma_data_direction dir)
318 if (dma_dev_need_sync(dev))
319 __dma_sync_sg_for_cpu(dev, sg, nelems, dir);
322 static inline void dma_sync_sg_for_device(struct device *dev,
323 struct scatterlist *sg, int nelems, enum dma_data_direction dir)
325 if (dma_dev_need_sync(dev))
326 __dma_sync_sg_for_device(dev, sg, nelems, dir);
329 static inline bool dma_need_sync(struct device *dev, dma_addr_t dma_addr)
331 return dma_dev_need_sync(dev) ? __dma_need_sync(dev, dma_addr) : false;
333 #else /* !CONFIG_HAS_DMA || !CONFIG_DMA_NEED_SYNC */
334 static inline bool dma_dev_need_sync(const struct device *dev)
338 static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
339 size_t size, enum dma_data_direction dir)
342 static inline void dma_sync_single_for_device(struct device *dev,
343 dma_addr_t addr, size_t size, enum dma_data_direction dir)
346 static inline void dma_sync_sg_for_cpu(struct device *dev,
347 struct scatterlist *sg, int nelems, enum dma_data_direction dir)
350 static inline void dma_sync_sg_for_device(struct device *dev,
351 struct scatterlist *sg, int nelems, enum dma_data_direction dir)
354 static inline bool dma_need_sync(struct device *dev, dma_addr_t dma_addr)
358 #endif /* !CONFIG_HAS_DMA || !CONFIG_DMA_NEED_SYNC */
360 struct page *dma_alloc_pages(struct device *dev, size_t size,
361 dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp);
362 void dma_free_pages(struct device *dev, size_t size, struct page *page,
363 dma_addr_t dma_handle, enum dma_data_direction dir);
364 int dma_mmap_pages(struct device *dev, struct vm_area_struct *vma,
365 size_t size, struct page *page);
367 static inline void *dma_alloc_noncoherent(struct device *dev, size_t size,
368 dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp)
370 struct page *page = dma_alloc_pages(dev, size, dma_handle, dir, gfp);
371 return page ? page_address(page) : NULL;
374 static inline void dma_free_noncoherent(struct device *dev, size_t size,
375 void *vaddr, dma_addr_t dma_handle, enum dma_data_direction dir)
377 dma_free_pages(dev, size, virt_to_page(vaddr), dma_handle, dir);
380 static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr,
381 size_t size, enum dma_data_direction dir, unsigned long attrs)
383 /* DMA must never operate on areas that might be remapped. */
384 if (dev_WARN_ONCE(dev, is_vmalloc_addr(ptr),
385 "rejecting DMA map of vmalloc memory\n"))
386 return DMA_MAPPING_ERROR;
387 debug_dma_map_single(dev, ptr, size);
388 return dma_map_page_attrs(dev, virt_to_page(ptr), offset_in_page(ptr),
392 static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr,
393 size_t size, enum dma_data_direction dir, unsigned long attrs)
395 return dma_unmap_page_attrs(dev, addr, size, dir, attrs);
398 static inline void dma_sync_single_range_for_cpu(struct device *dev,
399 dma_addr_t addr, unsigned long offset, size_t size,
400 enum dma_data_direction dir)
402 return dma_sync_single_for_cpu(dev, addr + offset, size, dir);
405 static inline void dma_sync_single_range_for_device(struct device *dev,
406 dma_addr_t addr, unsigned long offset, size_t size,
407 enum dma_data_direction dir)
409 return dma_sync_single_for_device(dev, addr + offset, size, dir);
413 * dma_unmap_sgtable - Unmap the given buffer for DMA
414 * @dev: The device for which to perform the DMA operation
415 * @sgt: The sg_table object describing the buffer
416 * @dir: DMA direction
417 * @attrs: Optional DMA attributes for the unmap operation
419 * Unmaps a buffer described by a scatterlist stored in the given sg_table
420 * object for the @dir DMA operation by the @dev device. After this function
421 * the ownership of the buffer is transferred back to the CPU domain.
423 static inline void dma_unmap_sgtable(struct device *dev, struct sg_table *sgt,
424 enum dma_data_direction dir, unsigned long attrs)
426 dma_unmap_sg_attrs(dev, sgt->sgl, sgt->orig_nents, dir, attrs);
430 * dma_sync_sgtable_for_cpu - Synchronize the given buffer for CPU access
431 * @dev: The device for which to perform the DMA operation
432 * @sgt: The sg_table object describing the buffer
433 * @dir: DMA direction
435 * Performs the needed cache synchronization and moves the ownership of the
436 * buffer back to the CPU domain, so it is safe to perform any access to it
437 * by the CPU. Before doing any further DMA operations, one has to transfer
438 * the ownership of the buffer back to the DMA domain by calling the
439 * dma_sync_sgtable_for_device().
441 static inline void dma_sync_sgtable_for_cpu(struct device *dev,
442 struct sg_table *sgt, enum dma_data_direction dir)
444 dma_sync_sg_for_cpu(dev, sgt->sgl, sgt->orig_nents, dir);
448 * dma_sync_sgtable_for_device - Synchronize the given buffer for DMA
449 * @dev: The device for which to perform the DMA operation
450 * @sgt: The sg_table object describing the buffer
451 * @dir: DMA direction
453 * Performs the needed cache synchronization and moves the ownership of the
454 * buffer back to the DMA domain, so it is safe to perform the DMA operation.
455 * Once finished, one has to call dma_sync_sgtable_for_cpu() or
456 * dma_unmap_sgtable().
458 static inline void dma_sync_sgtable_for_device(struct device *dev,
459 struct sg_table *sgt, enum dma_data_direction dir)
461 dma_sync_sg_for_device(dev, sgt->sgl, sgt->orig_nents, dir);
464 #define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, 0)
465 #define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, 0)
466 #define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, 0)
467 #define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, 0)
468 #define dma_map_page(d, p, o, s, r) dma_map_page_attrs(d, p, o, s, r, 0)
469 #define dma_unmap_page(d, a, s, r) dma_unmap_page_attrs(d, a, s, r, 0)
470 #define dma_get_sgtable(d, t, v, h, s) dma_get_sgtable_attrs(d, t, v, h, s, 0)
471 #define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, 0)
473 bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size);
475 static inline void *dma_alloc_coherent(struct device *dev, size_t size,
476 dma_addr_t *dma_handle, gfp_t gfp)
478 return dma_alloc_attrs(dev, size, dma_handle, gfp,
479 (gfp & __GFP_NOWARN) ? DMA_ATTR_NO_WARN : 0);
482 static inline void dma_free_coherent(struct device *dev, size_t size,
483 void *cpu_addr, dma_addr_t dma_handle)
485 return dma_free_attrs(dev, size, cpu_addr, dma_handle, 0);
489 static inline u64 dma_get_mask(struct device *dev)
491 if (dev->dma_mask && *dev->dma_mask)
492 return *dev->dma_mask;
493 return DMA_BIT_MASK(32);
497 * Set both the DMA mask and the coherent DMA mask to the same thing.
498 * Note that we don't check the return value from dma_set_coherent_mask()
499 * as the DMA API guarantees that the coherent DMA mask can be set to
500 * the same or smaller than the streaming DMA mask.
502 static inline int dma_set_mask_and_coherent(struct device *dev, u64 mask)
504 int rc = dma_set_mask(dev, mask);
506 dma_set_coherent_mask(dev, mask);
511 * Similar to the above, except it deals with the case where the device
512 * does not have dev->dma_mask appropriately setup.
514 static inline int dma_coerce_mask_and_coherent(struct device *dev, u64 mask)
516 dev->dma_mask = &dev->coherent_dma_mask;
517 return dma_set_mask_and_coherent(dev, mask);
520 static inline unsigned int dma_get_max_seg_size(struct device *dev)
522 if (dev->dma_parms && dev->dma_parms->max_segment_size)
523 return dev->dma_parms->max_segment_size;
527 static inline int dma_set_max_seg_size(struct device *dev, unsigned int size)
529 if (dev->dma_parms) {
530 dev->dma_parms->max_segment_size = size;
536 static inline unsigned long dma_get_seg_boundary(struct device *dev)
538 if (dev->dma_parms && dev->dma_parms->segment_boundary_mask)
539 return dev->dma_parms->segment_boundary_mask;
544 * dma_get_seg_boundary_nr_pages - return the segment boundary in "page" units
545 * @dev: device to guery the boundary for
546 * @page_shift: ilog() of the IOMMU page size
548 * Return the segment boundary in IOMMU page units (which may be different from
549 * the CPU page size) for the passed in device.
551 * If @dev is NULL a boundary of U32_MAX is assumed, this case is just for
552 * non-DMA API callers.
554 static inline unsigned long dma_get_seg_boundary_nr_pages(struct device *dev,
555 unsigned int page_shift)
558 return (U32_MAX >> page_shift) + 1;
559 return (dma_get_seg_boundary(dev) >> page_shift) + 1;
562 static inline int dma_set_seg_boundary(struct device *dev, unsigned long mask)
564 if (dev->dma_parms) {
565 dev->dma_parms->segment_boundary_mask = mask;
571 static inline unsigned int dma_get_min_align_mask(struct device *dev)
574 return dev->dma_parms->min_align_mask;
578 static inline int dma_set_min_align_mask(struct device *dev,
579 unsigned int min_align_mask)
581 if (WARN_ON_ONCE(!dev->dma_parms))
583 dev->dma_parms->min_align_mask = min_align_mask;
587 #ifndef dma_get_cache_alignment
588 static inline int dma_get_cache_alignment(void)
590 #ifdef ARCH_HAS_DMA_MINALIGN
591 return ARCH_DMA_MINALIGN;
597 static inline void *dmam_alloc_coherent(struct device *dev, size_t size,
598 dma_addr_t *dma_handle, gfp_t gfp)
600 return dmam_alloc_attrs(dev, size, dma_handle, gfp,
601 (gfp & __GFP_NOWARN) ? DMA_ATTR_NO_WARN : 0);
604 static inline void *dma_alloc_wc(struct device *dev, size_t size,
605 dma_addr_t *dma_addr, gfp_t gfp)
607 unsigned long attrs = DMA_ATTR_WRITE_COMBINE;
609 if (gfp & __GFP_NOWARN)
610 attrs |= DMA_ATTR_NO_WARN;
612 return dma_alloc_attrs(dev, size, dma_addr, gfp, attrs);
615 static inline void dma_free_wc(struct device *dev, size_t size,
616 void *cpu_addr, dma_addr_t dma_addr)
618 return dma_free_attrs(dev, size, cpu_addr, dma_addr,
619 DMA_ATTR_WRITE_COMBINE);
622 static inline int dma_mmap_wc(struct device *dev,
623 struct vm_area_struct *vma,
624 void *cpu_addr, dma_addr_t dma_addr,
627 return dma_mmap_attrs(dev, vma, cpu_addr, dma_addr, size,
628 DMA_ATTR_WRITE_COMBINE);
631 #ifdef CONFIG_NEED_DMA_MAP_STATE
632 #define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME) dma_addr_t ADDR_NAME
633 #define DEFINE_DMA_UNMAP_LEN(LEN_NAME) __u32 LEN_NAME
634 #define dma_unmap_addr(PTR, ADDR_NAME) ((PTR)->ADDR_NAME)
635 #define dma_unmap_addr_set(PTR, ADDR_NAME, VAL) (((PTR)->ADDR_NAME) = (VAL))
636 #define dma_unmap_len(PTR, LEN_NAME) ((PTR)->LEN_NAME)
637 #define dma_unmap_len_set(PTR, LEN_NAME, VAL) (((PTR)->LEN_NAME) = (VAL))
639 #define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME)
640 #define DEFINE_DMA_UNMAP_LEN(LEN_NAME)
641 #define dma_unmap_addr(PTR, ADDR_NAME) (0)
642 #define dma_unmap_addr_set(PTR, ADDR_NAME, VAL) do { } while (0)
643 #define dma_unmap_len(PTR, LEN_NAME) (0)
644 #define dma_unmap_len_set(PTR, LEN_NAME, VAL) do { } while (0)
647 #endif /* _LINUX_DMA_MAPPING_H */