Commit | Line | Data |
---|---|---|
b2441318 | 1 | // SPDX-License-Identifier: GPL-2.0 |
a8463d4b | 2 | /* |
bc3ec75d CH |
3 | * Copyright (C) 2018 Christoph Hellwig. |
4 | * | |
5 | * DMA operations that map physical memory directly without using an IOMMU. | |
a8463d4b | 6 | */ |
a20bb058 | 7 | #include <linux/bootmem.h> /* for max_pfn */ |
a8463d4b CB |
8 | #include <linux/export.h> |
9 | #include <linux/mm.h> | |
2e86a047 | 10 | #include <linux/dma-direct.h> |
a8463d4b | 11 | #include <linux/scatterlist.h> |
080321d3 | 12 | #include <linux/dma-contiguous.h> |
bc3ec75d | 13 | #include <linux/dma-noncoherent.h> |
25f1e188 | 14 | #include <linux/pfn.h> |
c10f07aa | 15 | #include <linux/set_memory.h> |
a8463d4b | 16 | |
27975969 CH |
17 | #define DIRECT_MAPPING_ERROR 0 |
18 | ||
c61e9637 CH |
19 | /* |
20 | * Most architectures use ZONE_DMA for the first 16 Megabytes, but | |
21 | * some use it for entirely different regions: | |
22 | */ | |
23 | #ifndef ARCH_ZONE_DMA_BITS | |
24 | #define ARCH_ZONE_DMA_BITS 24 | |
25 | #endif | |
26 | ||
c10f07aa CH |
27 | /* |
28 | * For AMD SEV all DMA must be to unencrypted addresses. | |
29 | */ | |
30 | static inline bool force_dma_unencrypted(void) | |
31 | { | |
32 | return sev_active(); | |
33 | } | |
34 | ||
27975969 CH |
35 | static bool |
36 | check_addr(struct device *dev, dma_addr_t dma_addr, size_t size, | |
37 | const char *caller) | |
38 | { | |
39 | if (unlikely(dev && !dma_capable(dev, dma_addr, size))) { | |
2550bbfd CH |
40 | if (!dev->dma_mask) { |
41 | dev_err(dev, | |
42 | "%s: call on device without dma_mask\n", | |
43 | caller); | |
44 | return false; | |
45 | } | |
46 | ||
b4ebe606 | 47 | if (*dev->dma_mask >= DMA_BIT_MASK(32) || dev->bus_dma_mask) { |
27975969 | 48 | dev_err(dev, |
b4ebe606 CH |
49 | "%s: overflow %pad+%zu of device mask %llx bus mask %llx\n", |
50 | caller, &dma_addr, size, | |
51 | *dev->dma_mask, dev->bus_dma_mask); | |
27975969 CH |
52 | } |
53 | return false; | |
54 | } | |
55 | return true; | |
56 | } | |
57 | ||
a20bb058 CH |
58 | static inline dma_addr_t phys_to_dma_direct(struct device *dev, |
59 | phys_addr_t phys) | |
60 | { | |
61 | if (force_dma_unencrypted()) | |
62 | return __phys_to_dma(dev, phys); | |
63 | return phys_to_dma(dev, phys); | |
64 | } | |
65 | ||
66 | u64 dma_direct_get_required_mask(struct device *dev) | |
67 | { | |
68 | u64 max_dma = phys_to_dma_direct(dev, (max_pfn - 1) << PAGE_SHIFT); | |
69 | ||
b4ebe606 CH |
70 | if (dev->bus_dma_mask && dev->bus_dma_mask < max_dma) |
71 | max_dma = dev->bus_dma_mask; | |
72 | ||
a20bb058 CH |
73 | return (1ULL << (fls64(max_dma) - 1)) * 2 - 1; |
74 | } | |
75 | ||
7d21ee4c CH |
76 | static gfp_t __dma_direct_optimal_gfp_mask(struct device *dev, u64 dma_mask, |
77 | u64 *phys_mask) | |
78 | { | |
b4ebe606 CH |
79 | if (dev->bus_dma_mask && dev->bus_dma_mask < dma_mask) |
80 | dma_mask = dev->bus_dma_mask; | |
81 | ||
7d21ee4c CH |
82 | if (force_dma_unencrypted()) |
83 | *phys_mask = __dma_to_phys(dev, dma_mask); | |
84 | else | |
85 | *phys_mask = dma_to_phys(dev, dma_mask); | |
86 | ||
87 | /* GFP_DMA32 and GFP_DMA are no ops without the corresponding zones: */ | |
88 | if (*phys_mask <= DMA_BIT_MASK(ARCH_ZONE_DMA_BITS)) | |
89 | return GFP_DMA; | |
90 | if (*phys_mask <= DMA_BIT_MASK(32)) | |
91 | return GFP_DMA32; | |
92 | return 0; | |
93 | } | |
94 | ||
95f18391 CH |
95 | static bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size) |
96 | { | |
a20bb058 | 97 | return phys_to_dma_direct(dev, phys) + size - 1 <= |
b4ebe606 | 98 | min_not_zero(dev->coherent_dma_mask, dev->bus_dma_mask); |
95f18391 CH |
99 | } |
100 | ||
bc3ec75d CH |
101 | void *dma_direct_alloc_pages(struct device *dev, size_t size, |
102 | dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs) | |
a8463d4b | 103 | { |
080321d3 CH |
104 | unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; |
105 | int page_order = get_order(size); | |
106 | struct page *page = NULL; | |
7d21ee4c | 107 | u64 phys_mask; |
c10f07aa | 108 | void *ret; |
a8463d4b | 109 | |
e89f5b37 CH |
110 | /* we always manually zero the memory once we are done: */ |
111 | gfp &= ~__GFP_ZERO; | |
7d21ee4c CH |
112 | gfp |= __dma_direct_optimal_gfp_mask(dev, dev->coherent_dma_mask, |
113 | &phys_mask); | |
95f18391 | 114 | again: |
080321d3 | 115 | /* CMA can be used only in the context which permits sleeping */ |
95f18391 | 116 | if (gfpflags_allow_blocking(gfp)) { |
d834c5ab MS |
117 | page = dma_alloc_from_contiguous(dev, count, page_order, |
118 | gfp & __GFP_NOWARN); | |
95f18391 CH |
119 | if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) { |
120 | dma_release_from_contiguous(dev, page, count); | |
121 | page = NULL; | |
122 | } | |
123 | } | |
080321d3 | 124 | if (!page) |
21f237e4 | 125 | page = alloc_pages_node(dev_to_node(dev), gfp, page_order); |
95f18391 CH |
126 | |
127 | if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) { | |
128 | __free_pages(page, page_order); | |
129 | page = NULL; | |
130 | ||
de7eab30 | 131 | if (IS_ENABLED(CONFIG_ZONE_DMA32) && |
7d21ee4c | 132 | phys_mask < DMA_BIT_MASK(64) && |
de7eab30 TI |
133 | !(gfp & (GFP_DMA32 | GFP_DMA))) { |
134 | gfp |= GFP_DMA32; | |
135 | goto again; | |
136 | } | |
137 | ||
504a918e | 138 | if (IS_ENABLED(CONFIG_ZONE_DMA) && |
7d21ee4c | 139 | phys_mask < DMA_BIT_MASK(32) && !(gfp & GFP_DMA)) { |
95f18391 CH |
140 | gfp = (gfp & ~GFP_DMA32) | GFP_DMA; |
141 | goto again; | |
142 | } | |
143 | } | |
144 | ||
080321d3 CH |
145 | if (!page) |
146 | return NULL; | |
c10f07aa CH |
147 | ret = page_address(page); |
148 | if (force_dma_unencrypted()) { | |
149 | set_memory_decrypted((unsigned long)ret, 1 << page_order); | |
150 | *dma_handle = __phys_to_dma(dev, page_to_phys(page)); | |
151 | } else { | |
152 | *dma_handle = phys_to_dma(dev, page_to_phys(page)); | |
153 | } | |
154 | memset(ret, 0, size); | |
155 | return ret; | |
a8463d4b CB |
156 | } |
157 | ||
42ed6452 CH |
158 | /* |
159 | * NOTE: this function must never look at the dma_addr argument, because we want | |
160 | * to be able to use it as a helper for iommu implementations as well. | |
161 | */ | |
bc3ec75d | 162 | void dma_direct_free_pages(struct device *dev, size_t size, void *cpu_addr, |
002e6745 | 163 | dma_addr_t dma_addr, unsigned long attrs) |
a8463d4b | 164 | { |
080321d3 | 165 | unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; |
c10f07aa | 166 | unsigned int page_order = get_order(size); |
080321d3 | 167 | |
c10f07aa CH |
168 | if (force_dma_unencrypted()) |
169 | set_memory_encrypted((unsigned long)cpu_addr, 1 << page_order); | |
080321d3 | 170 | if (!dma_release_from_contiguous(dev, virt_to_page(cpu_addr), count)) |
c10f07aa | 171 | free_pages((unsigned long)cpu_addr, page_order); |
a8463d4b CB |
172 | } |
173 | ||
bc3ec75d CH |
174 | void *dma_direct_alloc(struct device *dev, size_t size, |
175 | dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs) | |
176 | { | |
177 | if (!dev_is_dma_coherent(dev)) | |
178 | return arch_dma_alloc(dev, size, dma_handle, gfp, attrs); | |
179 | return dma_direct_alloc_pages(dev, size, dma_handle, gfp, attrs); | |
180 | } | |
181 | ||
182 | void dma_direct_free(struct device *dev, size_t size, | |
183 | void *cpu_addr, dma_addr_t dma_addr, unsigned long attrs) | |
184 | { | |
185 | if (!dev_is_dma_coherent(dev)) | |
186 | arch_dma_free(dev, size, cpu_addr, dma_addr, attrs); | |
187 | else | |
188 | dma_direct_free_pages(dev, size, cpu_addr, dma_addr, attrs); | |
189 | } | |
190 | ||
bc3ec75d CH |
191 | static void dma_direct_sync_single_for_device(struct device *dev, |
192 | dma_addr_t addr, size_t size, enum dma_data_direction dir) | |
193 | { | |
194 | if (dev_is_dma_coherent(dev)) | |
195 | return; | |
196 | arch_sync_dma_for_device(dev, dma_to_phys(dev, addr), size, dir); | |
197 | } | |
198 | ||
199 | static void dma_direct_sync_sg_for_device(struct device *dev, | |
200 | struct scatterlist *sgl, int nents, enum dma_data_direction dir) | |
201 | { | |
202 | struct scatterlist *sg; | |
203 | int i; | |
204 | ||
205 | if (dev_is_dma_coherent(dev)) | |
206 | return; | |
207 | ||
208 | for_each_sg(sgl, sg, nents, i) | |
209 | arch_sync_dma_for_device(dev, sg_phys(sg), sg->length, dir); | |
210 | } | |
211 | ||
212 | #if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \ | |
213 | defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL) | |
214 | static void dma_direct_sync_single_for_cpu(struct device *dev, | |
215 | dma_addr_t addr, size_t size, enum dma_data_direction dir) | |
216 | { | |
217 | if (dev_is_dma_coherent(dev)) | |
218 | return; | |
219 | arch_sync_dma_for_cpu(dev, dma_to_phys(dev, addr), size, dir); | |
220 | arch_sync_dma_for_cpu_all(dev); | |
221 | } | |
222 | ||
223 | static void dma_direct_sync_sg_for_cpu(struct device *dev, | |
224 | struct scatterlist *sgl, int nents, enum dma_data_direction dir) | |
225 | { | |
226 | struct scatterlist *sg; | |
227 | int i; | |
228 | ||
229 | if (dev_is_dma_coherent(dev)) | |
230 | return; | |
231 | ||
232 | for_each_sg(sgl, sg, nents, i) | |
233 | arch_sync_dma_for_cpu(dev, sg_phys(sg), sg->length, dir); | |
234 | arch_sync_dma_for_cpu_all(dev); | |
235 | } | |
236 | ||
237 | static void dma_direct_unmap_page(struct device *dev, dma_addr_t addr, | |
238 | size_t size, enum dma_data_direction dir, unsigned long attrs) | |
239 | { | |
240 | if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) | |
241 | dma_direct_sync_single_for_cpu(dev, addr, size, dir); | |
242 | } | |
243 | ||
244 | static void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sgl, | |
245 | int nents, enum dma_data_direction dir, unsigned long attrs) | |
246 | { | |
247 | if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) | |
248 | dma_direct_sync_sg_for_cpu(dev, sgl, nents, dir); | |
249 | } | |
250 | #endif | |
251 | ||
782e6769 | 252 | dma_addr_t dma_direct_map_page(struct device *dev, struct page *page, |
002e6745 CH |
253 | unsigned long offset, size_t size, enum dma_data_direction dir, |
254 | unsigned long attrs) | |
a8463d4b | 255 | { |
bc3ec75d CH |
256 | phys_addr_t phys = page_to_phys(page) + offset; |
257 | dma_addr_t dma_addr = phys_to_dma(dev, phys); | |
27975969 CH |
258 | |
259 | if (!check_addr(dev, dma_addr, size, __func__)) | |
260 | return DIRECT_MAPPING_ERROR; | |
bc3ec75d CH |
261 | |
262 | if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) | |
263 | dma_direct_sync_single_for_device(dev, dma_addr, size, dir); | |
27975969 | 264 | return dma_addr; |
a8463d4b CB |
265 | } |
266 | ||
782e6769 CH |
267 | int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents, |
268 | enum dma_data_direction dir, unsigned long attrs) | |
a8463d4b CB |
269 | { |
270 | int i; | |
271 | struct scatterlist *sg; | |
272 | ||
273 | for_each_sg(sgl, sg, nents, i) { | |
a8463d4b | 274 | BUG_ON(!sg_page(sg)); |
2e86a047 CH |
275 | |
276 | sg_dma_address(sg) = phys_to_dma(dev, sg_phys(sg)); | |
27975969 CH |
277 | if (!check_addr(dev, sg_dma_address(sg), sg->length, __func__)) |
278 | return 0; | |
a8463d4b CB |
279 | sg_dma_len(sg) = sg->length; |
280 | } | |
281 | ||
bc3ec75d CH |
282 | if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) |
283 | dma_direct_sync_sg_for_device(dev, sgl, nents, dir); | |
a8463d4b CB |
284 | return nents; |
285 | } | |
286 | ||
9d7a224b CH |
287 | /* |
288 | * Because 32-bit DMA masks are so common we expect every architecture to be | |
289 | * able to satisfy them - either by not supporting more physical memory, or by | |
290 | * providing a ZONE_DMA32. If neither is the case, the architecture needs to | |
291 | * use an IOMMU instead of the direct mapping. | |
292 | */ | |
1a9777a8 CH |
293 | int dma_direct_supported(struct device *dev, u64 mask) |
294 | { | |
9d7a224b CH |
295 | u64 min_mask; |
296 | ||
297 | if (IS_ENABLED(CONFIG_ZONE_DMA)) | |
298 | min_mask = DMA_BIT_MASK(ARCH_ZONE_DMA_BITS); | |
299 | else | |
300 | min_mask = DMA_BIT_MASK(32); | |
301 | ||
302 | min_mask = min_t(u64, min_mask, (max_pfn - 1) << PAGE_SHIFT); | |
303 | ||
1fc8e642 | 304 | return mask >= phys_to_dma(dev, min_mask); |
1a9777a8 CH |
305 | } |
306 | ||
782e6769 | 307 | int dma_direct_mapping_error(struct device *dev, dma_addr_t dma_addr) |
27975969 CH |
308 | { |
309 | return dma_addr == DIRECT_MAPPING_ERROR; | |
310 | } | |
311 | ||
002e6745 CH |
312 | const struct dma_map_ops dma_direct_ops = { |
313 | .alloc = dma_direct_alloc, | |
314 | .free = dma_direct_free, | |
315 | .map_page = dma_direct_map_page, | |
316 | .map_sg = dma_direct_map_sg, | |
bc3ec75d CH |
317 | #if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) |
318 | .sync_single_for_device = dma_direct_sync_single_for_device, | |
319 | .sync_sg_for_device = dma_direct_sync_sg_for_device, | |
320 | #endif | |
321 | #if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \ | |
322 | defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL) | |
323 | .sync_single_for_cpu = dma_direct_sync_single_for_cpu, | |
324 | .sync_sg_for_cpu = dma_direct_sync_sg_for_cpu, | |
325 | .unmap_page = dma_direct_unmap_page, | |
326 | .unmap_sg = dma_direct_unmap_sg, | |
327 | #endif | |
a20bb058 | 328 | .get_required_mask = dma_direct_get_required_mask, |
1a9777a8 | 329 | .dma_supported = dma_direct_supported, |
27975969 | 330 | .mapping_error = dma_direct_mapping_error, |
bc3ec75d | 331 | .cache_sync = arch_dma_cache_sync, |
a8463d4b | 332 | }; |
002e6745 | 333 | EXPORT_SYMBOL(dma_direct_ops); |