Commit | Line | Data |
---|---|---|
b2441318 | 1 | // SPDX-License-Identifier: GPL-2.0 |
a8463d4b | 2 | /* |
2e86a047 CH |
3 | * DMA operations that map physical memory directly without using an IOMMU or |
4 | * flushing caches. | |
a8463d4b CB |
5 | */ |
6 | #include <linux/export.h> | |
7 | #include <linux/mm.h> | |
2e86a047 | 8 | #include <linux/dma-direct.h> |
a8463d4b | 9 | #include <linux/scatterlist.h> |
080321d3 | 10 | #include <linux/dma-contiguous.h> |
25f1e188 | 11 | #include <linux/pfn.h> |
c10f07aa | 12 | #include <linux/set_memory.h> |
a8463d4b | 13 | |
27975969 CH |
14 | #define DIRECT_MAPPING_ERROR 0 |
15 | ||
c61e9637 CH |
16 | /* |
17 | * Most architectures use ZONE_DMA for the first 16 Megabytes, but | |
18 | * some use it for entirely different regions: | |
19 | */ | |
20 | #ifndef ARCH_ZONE_DMA_BITS | |
21 | #define ARCH_ZONE_DMA_BITS 24 | |
22 | #endif | |
23 | ||
c10f07aa CH |
24 | /* |
25 | * For AMD SEV all DMA must be to unencrypted addresses. | |
26 | */ | |
27 | static inline bool force_dma_unencrypted(void) | |
28 | { | |
29 | return sev_active(); | |
30 | } | |
31 | ||
27975969 CH |
32 | static bool |
33 | check_addr(struct device *dev, dma_addr_t dma_addr, size_t size, | |
34 | const char *caller) | |
35 | { | |
36 | if (unlikely(dev && !dma_capable(dev, dma_addr, size))) { | |
2550bbfd CH |
37 | if (!dev->dma_mask) { |
38 | dev_err(dev, | |
39 | "%s: call on device without dma_mask\n", | |
40 | caller); | |
41 | return false; | |
42 | } | |
43 | ||
27975969 CH |
44 | if (*dev->dma_mask >= DMA_BIT_MASK(32)) { |
45 | dev_err(dev, | |
46 | "%s: overflow %pad+%zu of device mask %llx\n", | |
47 | caller, &dma_addr, size, *dev->dma_mask); | |
48 | } | |
49 | return false; | |
50 | } | |
51 | return true; | |
52 | } | |
53 | ||
95f18391 CH |
54 | static bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size) |
55 | { | |
c10f07aa CH |
56 | dma_addr_t addr = force_dma_unencrypted() ? |
57 | __phys_to_dma(dev, phys) : phys_to_dma(dev, phys); | |
58 | return addr + size - 1 <= dev->coherent_dma_mask; | |
95f18391 CH |
59 | } |
60 | ||
19dca8c0 CH |
61 | void *dma_direct_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, |
62 | gfp_t gfp, unsigned long attrs) | |
a8463d4b | 63 | { |
080321d3 CH |
64 | unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; |
65 | int page_order = get_order(size); | |
66 | struct page *page = NULL; | |
c10f07aa | 67 | void *ret; |
a8463d4b | 68 | |
e89f5b37 CH |
69 | /* we always manually zero the memory once we are done: */ |
70 | gfp &= ~__GFP_ZERO; | |
71 | ||
c61e9637 CH |
72 | /* GFP_DMA32 and GFP_DMA are no ops without the corresponding zones: */ |
73 | if (dev->coherent_dma_mask <= DMA_BIT_MASK(ARCH_ZONE_DMA_BITS)) | |
74 | gfp |= GFP_DMA; | |
75 | if (dev->coherent_dma_mask <= DMA_BIT_MASK(32) && !(gfp & GFP_DMA)) | |
76 | gfp |= GFP_DMA32; | |
77 | ||
95f18391 | 78 | again: |
080321d3 | 79 | /* CMA can be used only in the context which permits sleeping */ |
95f18391 | 80 | if (gfpflags_allow_blocking(gfp)) { |
d834c5ab MS |
81 | page = dma_alloc_from_contiguous(dev, count, page_order, |
82 | gfp & __GFP_NOWARN); | |
95f18391 CH |
83 | if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) { |
84 | dma_release_from_contiguous(dev, page, count); | |
85 | page = NULL; | |
86 | } | |
87 | } | |
080321d3 | 88 | if (!page) |
21f237e4 | 89 | page = alloc_pages_node(dev_to_node(dev), gfp, page_order); |
95f18391 CH |
90 | |
91 | if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) { | |
92 | __free_pages(page, page_order); | |
93 | page = NULL; | |
94 | ||
de7eab30 TI |
95 | if (IS_ENABLED(CONFIG_ZONE_DMA32) && |
96 | dev->coherent_dma_mask < DMA_BIT_MASK(64) && | |
97 | !(gfp & (GFP_DMA32 | GFP_DMA))) { | |
98 | gfp |= GFP_DMA32; | |
99 | goto again; | |
100 | } | |
101 | ||
504a918e TI |
102 | if (IS_ENABLED(CONFIG_ZONE_DMA) && |
103 | dev->coherent_dma_mask < DMA_BIT_MASK(32) && | |
95f18391 CH |
104 | !(gfp & GFP_DMA)) { |
105 | gfp = (gfp & ~GFP_DMA32) | GFP_DMA; | |
106 | goto again; | |
107 | } | |
108 | } | |
109 | ||
080321d3 CH |
110 | if (!page) |
111 | return NULL; | |
c10f07aa CH |
112 | ret = page_address(page); |
113 | if (force_dma_unencrypted()) { | |
114 | set_memory_decrypted((unsigned long)ret, 1 << page_order); | |
115 | *dma_handle = __phys_to_dma(dev, page_to_phys(page)); | |
116 | } else { | |
117 | *dma_handle = phys_to_dma(dev, page_to_phys(page)); | |
118 | } | |
119 | memset(ret, 0, size); | |
120 | return ret; | |
a8463d4b CB |
121 | } |
122 | ||
42ed6452 CH |
123 | /* |
124 | * NOTE: this function must never look at the dma_addr argument, because we want | |
125 | * to be able to use it as a helper for iommu implementations as well. | |
126 | */ | |
19dca8c0 | 127 | void dma_direct_free(struct device *dev, size_t size, void *cpu_addr, |
002e6745 | 128 | dma_addr_t dma_addr, unsigned long attrs) |
a8463d4b | 129 | { |
080321d3 | 130 | unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; |
c10f07aa | 131 | unsigned int page_order = get_order(size); |
080321d3 | 132 | |
c10f07aa CH |
133 | if (force_dma_unencrypted()) |
134 | set_memory_encrypted((unsigned long)cpu_addr, 1 << page_order); | |
080321d3 | 135 | if (!dma_release_from_contiguous(dev, virt_to_page(cpu_addr), count)) |
c10f07aa | 136 | free_pages((unsigned long)cpu_addr, page_order); |
a8463d4b CB |
137 | } |
138 | ||
782e6769 | 139 | dma_addr_t dma_direct_map_page(struct device *dev, struct page *page, |
002e6745 CH |
140 | unsigned long offset, size_t size, enum dma_data_direction dir, |
141 | unsigned long attrs) | |
a8463d4b | 142 | { |
27975969 CH |
143 | dma_addr_t dma_addr = phys_to_dma(dev, page_to_phys(page)) + offset; |
144 | ||
145 | if (!check_addr(dev, dma_addr, size, __func__)) | |
146 | return DIRECT_MAPPING_ERROR; | |
147 | return dma_addr; | |
a8463d4b CB |
148 | } |
149 | ||
782e6769 CH |
150 | int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents, |
151 | enum dma_data_direction dir, unsigned long attrs) | |
a8463d4b CB |
152 | { |
153 | int i; | |
154 | struct scatterlist *sg; | |
155 | ||
156 | for_each_sg(sgl, sg, nents, i) { | |
a8463d4b | 157 | BUG_ON(!sg_page(sg)); |
2e86a047 CH |
158 | |
159 | sg_dma_address(sg) = phys_to_dma(dev, sg_phys(sg)); | |
27975969 CH |
160 | if (!check_addr(dev, sg_dma_address(sg), sg->length, __func__)) |
161 | return 0; | |
a8463d4b CB |
162 | sg_dma_len(sg) = sg->length; |
163 | } | |
164 | ||
165 | return nents; | |
166 | } | |
167 | ||
1a9777a8 CH |
168 | int dma_direct_supported(struct device *dev, u64 mask) |
169 | { | |
170 | #ifdef CONFIG_ZONE_DMA | |
c1d0af1a | 171 | if (mask < phys_to_dma(dev, DMA_BIT_MASK(ARCH_ZONE_DMA_BITS))) |
1a9777a8 CH |
172 | return 0; |
173 | #else | |
174 | /* | |
175 | * Because 32-bit DMA masks are so common we expect every architecture | |
176 | * to be able to satisfy them - either by not supporting more physical | |
177 | * memory, or by providing a ZONE_DMA32. If neither is the case, the | |
178 | * architecture needs to use an IOMMU instead of the direct mapping. | |
179 | */ | |
c1d0af1a | 180 | if (mask < phys_to_dma(dev, DMA_BIT_MASK(32))) |
1a9777a8 CH |
181 | return 0; |
182 | #endif | |
f068fe31 | 183 | /* |
f07d141f RM |
184 | * Upstream PCI/PCIe bridges or SoC interconnects may not carry |
185 | * as many DMA address bits as the device itself supports. | |
f068fe31 | 186 | */ |
f07d141f | 187 | if (dev->bus_dma_mask && mask > dev->bus_dma_mask) |
f068fe31 | 188 | return 0; |
1a9777a8 CH |
189 | return 1; |
190 | } | |
191 | ||
782e6769 | 192 | int dma_direct_mapping_error(struct device *dev, dma_addr_t dma_addr) |
27975969 CH |
193 | { |
194 | return dma_addr == DIRECT_MAPPING_ERROR; | |
195 | } | |
196 | ||
002e6745 CH |
197 | const struct dma_map_ops dma_direct_ops = { |
198 | .alloc = dma_direct_alloc, | |
199 | .free = dma_direct_free, | |
200 | .map_page = dma_direct_map_page, | |
201 | .map_sg = dma_direct_map_sg, | |
1a9777a8 | 202 | .dma_supported = dma_direct_supported, |
27975969 | 203 | .mapping_error = dma_direct_mapping_error, |
a8463d4b | 204 | }; |
002e6745 | 205 | EXPORT_SYMBOL(dma_direct_ops); |