Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
[linux-block.git] / include / linux / dma-direct.h
CommitLineData
ea8c64ac 1/* SPDX-License-Identifier: GPL-2.0 */
b4174173
CH
2/*
3 * Internals of the DMA direct mapping implementation. Only for use by the
4 * DMA mapping code and IOMMU drivers.
5 */
ea8c64ac
CH
6#ifndef _LINUX_DMA_DIRECT_H
7#define _LINUX_DMA_DIRECT_H 1
8
9#include <linux/dma-mapping.h>
b4174173 10#include <linux/dma-noncoherent.h>
b12d6627 11#include <linux/memblock.h> /* for min_low_pfn */
b6e05477 12#include <linux/mem_encrypt.h>
b4174173 13#include <linux/swiotlb.h>
ea8c64ac 14
8b5369ea
NSJ
15extern unsigned int zone_dma_bits;
16
ea8c64ac
CH
17#ifdef CONFIG_ARCH_HAS_PHYS_TO_DMA
18#include <asm/dma-direct.h>
19#else
b6e05477 20static inline dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t paddr)
ea8c64ac
CH
21{
22 dma_addr_t dev_addr = (dma_addr_t)paddr;
23
24 return dev_addr - ((dma_addr_t)dev->dma_pfn_offset << PAGE_SHIFT);
25}
26
b6e05477 27static inline phys_addr_t __dma_to_phys(struct device *dev, dma_addr_t dev_addr)
ea8c64ac
CH
28{
29 phys_addr_t paddr = (phys_addr_t)dev_addr;
30
31 return paddr + ((phys_addr_t)dev->dma_pfn_offset << PAGE_SHIFT);
32}
130c1ccb 33#endif /* !CONFIG_ARCH_HAS_PHYS_TO_DMA */
ea8c64ac 34
9087c375
TL
35#ifdef CONFIG_ARCH_HAS_FORCE_DMA_UNENCRYPTED
36bool force_dma_unencrypted(struct device *dev);
37#else
38static inline bool force_dma_unencrypted(struct device *dev)
39{
40 return false;
41}
42#endif /* CONFIG_ARCH_HAS_FORCE_DMA_UNENCRYPTED */
43
b6e05477
CH
44/*
45 * If memory encryption is supported, phys_to_dma will set the memory encryption
46 * bit in the DMA address, and dma_to_phys will clear it. The raw __phys_to_dma
47 * and __dma_to_phys versions should only be used on non-encrypted memory for
48 * special occasions like DMA coherent buffers.
49 */
50static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
51{
52 return __sme_set(__phys_to_dma(dev, paddr));
53}
54
55static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
56{
57 return __sme_clr(__dma_to_phys(dev, daddr));
58}
59
68a33b17
CH
60static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size,
61 bool is_ram)
c7345159
CH
62{
63 dma_addr_t end = addr + size - 1;
64
65 if (!dev->dma_mask)
66 return false;
67
68a33b17 68 if (is_ram && !IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT) &&
c7345159
CH
69 min(addr, end) < phys_to_dma(dev, PFN_PHYS(min_low_pfn)))
70 return false;
71
a7ba70f1 72 return end <= min_not_zero(*dev->dma_mask, dev->bus_dma_limit);
c7345159
CH
73}
74
a20bb058 75u64 dma_direct_get_required_mask(struct device *dev);
19dca8c0
CH
76void *dma_direct_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
77 gfp_t gfp, unsigned long attrs);
78void dma_direct_free(struct device *dev, size_t size, void *cpu_addr,
79 dma_addr_t dma_addr, unsigned long attrs);
bc3ec75d
CH
80void *dma_direct_alloc_pages(struct device *dev, size_t size,
81 dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs);
82void dma_direct_free_pages(struct device *dev, size_t size, void *cpu_addr,
83 dma_addr_t dma_addr, unsigned long attrs);
34dc0ea6
CH
84int dma_direct_get_sgtable(struct device *dev, struct sg_table *sgt,
85 void *cpu_addr, dma_addr_t dma_addr, size_t size,
86 unsigned long attrs);
87bool dma_direct_can_mmap(struct device *dev);
88int dma_direct_mmap(struct device *dev, struct vm_area_struct *vma,
89 void *cpu_addr, dma_addr_t dma_addr, size_t size,
90 unsigned long attrs);
1a9777a8 91int dma_direct_supported(struct device *dev, u64 mask);
3aa91625 92bool dma_direct_need_sync(struct device *dev, dma_addr_t dma_addr);
d3fa60d7
CH
93int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents,
94 enum dma_data_direction dir, unsigned long attrs);
95dma_addr_t dma_direct_map_resource(struct device *dev, phys_addr_t paddr,
96 size_t size, enum dma_data_direction dir, unsigned long attrs);
b4174173 97size_t dma_direct_max_mapping_size(struct device *dev);
d3fa60d7
CH
98
99#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \
100 defined(CONFIG_SWIOTLB)
b4174173
CH
101void dma_direct_sync_sg_for_device(struct device *dev, struct scatterlist *sgl,
102 int nents, enum dma_data_direction dir);
d3fa60d7 103#else
d3fa60d7
CH
104static inline void dma_direct_sync_sg_for_device(struct device *dev,
105 struct scatterlist *sgl, int nents, enum dma_data_direction dir)
106{
107}
108#endif
109
110#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \
111 defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL) || \
112 defined(CONFIG_SWIOTLB)
d3fa60d7
CH
113void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sgl,
114 int nents, enum dma_data_direction dir, unsigned long attrs);
d3fa60d7
CH
115void dma_direct_sync_sg_for_cpu(struct device *dev,
116 struct scatterlist *sgl, int nents, enum dma_data_direction dir);
117#else
d3fa60d7
CH
118static inline void dma_direct_unmap_sg(struct device *dev,
119 struct scatterlist *sgl, int nents, enum dma_data_direction dir,
120 unsigned long attrs)
121{
122}
b4174173
CH
123static inline void dma_direct_sync_sg_for_cpu(struct device *dev,
124 struct scatterlist *sgl, int nents, enum dma_data_direction dir)
125{
126}
127#endif
128
129static inline void dma_direct_sync_single_for_device(struct device *dev,
130 dma_addr_t addr, size_t size, enum dma_data_direction dir)
131{
132 phys_addr_t paddr = dma_to_phys(dev, addr);
133
134 if (unlikely(is_swiotlb_buffer(paddr)))
135 swiotlb_tbl_sync_single(dev, paddr, size, dir, SYNC_FOR_DEVICE);
136
137 if (!dev_is_dma_coherent(dev))
138 arch_sync_dma_for_device(paddr, size, dir);
139}
140
d3fa60d7
CH
141static inline void dma_direct_sync_single_for_cpu(struct device *dev,
142 dma_addr_t addr, size_t size, enum dma_data_direction dir)
143{
b4174173
CH
144 phys_addr_t paddr = dma_to_phys(dev, addr);
145
146 if (!dev_is_dma_coherent(dev)) {
147 arch_sync_dma_for_cpu(paddr, size, dir);
148 arch_sync_dma_for_cpu_all();
149 }
150
151 if (unlikely(is_swiotlb_buffer(paddr)))
152 swiotlb_tbl_sync_single(dev, paddr, size, dir, SYNC_FOR_CPU);
d3fa60d7 153}
b4174173
CH
154
155static inline dma_addr_t dma_direct_map_page(struct device *dev,
156 struct page *page, unsigned long offset, size_t size,
157 enum dma_data_direction dir, unsigned long attrs)
d3fa60d7 158{
b4174173
CH
159 phys_addr_t phys = page_to_phys(page) + offset;
160 dma_addr_t dma_addr = phys_to_dma(dev, phys);
161
162 if (unlikely(swiotlb_force == SWIOTLB_FORCE))
163 return swiotlb_map(dev, phys, size, dir, attrs);
164
165 if (unlikely(!dma_capable(dev, dma_addr, size, true))) {
166 if (swiotlb_force != SWIOTLB_NO_FORCE)
167 return swiotlb_map(dev, phys, size, dir, attrs);
168
169 dev_WARN_ONCE(dev, 1,
170 "DMA addr %pad+%zu overflow (mask %llx, bus limit %llx).\n",
171 &dma_addr, size, *dev->dma_mask, dev->bus_dma_limit);
172 return DMA_MAPPING_ERROR;
173 }
174
175 if (!dev_is_dma_coherent(dev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
176 arch_sync_dma_for_device(phys, size, dir);
177 return dma_addr;
d3fa60d7 178}
d3fa60d7 179
b4174173
CH
180static inline void dma_direct_unmap_page(struct device *dev, dma_addr_t addr,
181 size_t size, enum dma_data_direction dir, unsigned long attrs)
182{
183 phys_addr_t phys = dma_to_phys(dev, addr);
d3fa60d7 184
b4174173
CH
185 if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
186 dma_direct_sync_single_for_cpu(dev, addr, size, dir);
187
188 if (unlikely(is_swiotlb_buffer(phys)))
189 swiotlb_tbl_unmap_single(dev, phys, size, size, dir, attrs);
190}
ea8c64ac 191#endif /* _LINUX_DMA_DIRECT_H */