Commit | Line | Data |
---|---|---|
b2441318 | 1 | // SPDX-License-Identifier: GPL-2.0 |
a8463d4b | 2 | /* |
efa70f2f | 3 | * Copyright (C) 2018-2020 Christoph Hellwig. |
bc3ec75d CH |
4 | * |
5 | * DMA operations that map physical memory directly without using an IOMMU. | |
a8463d4b | 6 | */ |
57c8a661 | 7 | #include <linux/memblock.h> /* for max_pfn */ |
a8463d4b CB |
8 | #include <linux/export.h> |
9 | #include <linux/mm.h> | |
2e86a047 | 10 | #include <linux/dma-direct.h> |
0a0f0d8b | 11 | #include <linux/dma-map-ops.h> |
a8463d4b | 12 | #include <linux/scatterlist.h> |
080321d3 | 13 | #include <linux/dma-contiguous.h> |
25f1e188 | 14 | #include <linux/pfn.h> |
3acac065 | 15 | #include <linux/vmalloc.h> |
c10f07aa | 16 | #include <linux/set_memory.h> |
e0d07278 | 17 | #include <linux/slab.h> |
a8463d4b | 18 | |
c61e9637 | 19 | /* |
8b5369ea NSJ |
20 | * Most architectures use ZONE_DMA for the first 16 Megabytes, but some use it |
21 | * it for entirely different regions. In that case the arch code needs to | |
22 | * override the variable below for dma-direct to work properly. | |
c61e9637 | 23 | */ |
8b5369ea | 24 | unsigned int zone_dma_bits __ro_after_init = 24; |
c61e9637 | 25 | |
a20bb058 CH |
26 | static inline dma_addr_t phys_to_dma_direct(struct device *dev, |
27 | phys_addr_t phys) | |
28 | { | |
9087c375 | 29 | if (force_dma_unencrypted(dev)) |
5ceda740 | 30 | return phys_to_dma_unencrypted(dev, phys); |
a20bb058 CH |
31 | return phys_to_dma(dev, phys); |
32 | } | |
33 | ||
34dc0ea6 CH |
34 | static inline struct page *dma_direct_to_page(struct device *dev, |
35 | dma_addr_t dma_addr) | |
36 | { | |
37 | return pfn_to_page(PHYS_PFN(dma_to_phys(dev, dma_addr))); | |
38 | } | |
39 | ||
a20bb058 CH |
40 | u64 dma_direct_get_required_mask(struct device *dev) |
41 | { | |
cdcda0d1 KVA |
42 | phys_addr_t phys = (phys_addr_t)(max_pfn - 1) << PAGE_SHIFT; |
43 | u64 max_dma = phys_to_dma_direct(dev, phys); | |
a20bb058 CH |
44 | |
45 | return (1ULL << (fls64(max_dma) - 1)) * 2 - 1; | |
46 | } | |
47 | ||
9420139f | 48 | static gfp_t dma_direct_optimal_gfp_mask(struct device *dev, u64 dma_mask, |
c84dc6e6 | 49 | u64 *phys_limit) |
7d21ee4c | 50 | { |
a7ba70f1 | 51 | u64 dma_limit = min_not_zero(dma_mask, dev->bus_dma_limit); |
b4ebe606 | 52 | |
79ac32a4 CH |
53 | /* |
54 | * Optimistically try the zone that the physical address mask falls | |
55 | * into first. If that returns memory that isn't actually addressable | |
56 | * we will fallback to the next lower zone and try again. | |
57 | * | |
58 | * Note that GFP_DMA32 and GFP_DMA are no ops without the corresponding | |
59 | * zones. | |
60 | */ | |
7bc5c428 | 61 | *phys_limit = dma_to_phys(dev, dma_limit); |
a7ba70f1 | 62 | if (*phys_limit <= DMA_BIT_MASK(zone_dma_bits)) |
7d21ee4c | 63 | return GFP_DMA; |
a7ba70f1 | 64 | if (*phys_limit <= DMA_BIT_MASK(32)) |
7d21ee4c CH |
65 | return GFP_DMA32; |
66 | return 0; | |
67 | } | |
68 | ||
9420139f | 69 | static bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size) |
95f18391 | 70 | { |
e0d07278 JQ |
71 | dma_addr_t dma_addr = phys_to_dma_direct(dev, phys); |
72 | ||
73 | if (dma_addr == DMA_MAPPING_ERROR) | |
74 | return false; | |
75 | return dma_addr + size - 1 <= | |
76 | min_not_zero(dev->coherent_dma_mask, dev->bus_dma_limit); | |
95f18391 CH |
77 | } |
78 | ||
76a19940 DR |
79 | /* |
80 | * Decrypting memory is allowed to block, so if this device requires | |
81 | * unencrypted memory it must come from atomic pools. | |
82 | */ | |
83 | static inline bool dma_should_alloc_from_pool(struct device *dev, gfp_t gfp, | |
84 | unsigned long attrs) | |
85 | { | |
86 | if (!IS_ENABLED(CONFIG_DMA_COHERENT_POOL)) | |
87 | return false; | |
88 | if (gfpflags_allow_blocking(gfp)) | |
89 | return false; | |
90 | if (force_dma_unencrypted(dev)) | |
91 | return true; | |
92 | if (!IS_ENABLED(CONFIG_DMA_DIRECT_REMAP)) | |
93 | return false; | |
94 | if (dma_alloc_need_uncached(dev, attrs)) | |
95 | return true; | |
96 | return false; | |
97 | } | |
98 | ||
99 | static inline bool dma_should_free_from_pool(struct device *dev, | |
100 | unsigned long attrs) | |
101 | { | |
102 | if (IS_ENABLED(CONFIG_DMA_COHERENT_POOL)) | |
103 | return true; | |
104 | if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) && | |
105 | !force_dma_unencrypted(dev)) | |
106 | return false; | |
107 | if (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP)) | |
108 | return true; | |
109 | return false; | |
110 | } | |
111 | ||
26749b32 | 112 | static struct page *__dma_direct_alloc_pages(struct device *dev, size_t size, |
3773dfe6 | 113 | gfp_t gfp) |
a8463d4b | 114 | { |
90ae409f | 115 | int node = dev_to_node(dev); |
080321d3 | 116 | struct page *page = NULL; |
a7ba70f1 | 117 | u64 phys_limit; |
a8463d4b | 118 | |
633d5fce DR |
119 | WARN_ON_ONCE(!PAGE_ALIGNED(size)); |
120 | ||
c84dc6e6 DR |
121 | gfp |= dma_direct_optimal_gfp_mask(dev, dev->coherent_dma_mask, |
122 | &phys_limit); | |
633d5fce | 123 | page = dma_alloc_contiguous(dev, size, gfp); |
90ae409f | 124 | if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) { |
633d5fce | 125 | dma_free_contiguous(dev, page, size); |
90ae409f CH |
126 | page = NULL; |
127 | } | |
95f18391 | 128 | again: |
90ae409f | 129 | if (!page) |
633d5fce | 130 | page = alloc_pages_node(node, gfp, get_order(size)); |
95f18391 | 131 | if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) { |
b1d2dc00 | 132 | dma_free_contiguous(dev, page, size); |
95f18391 CH |
133 | page = NULL; |
134 | ||
de7eab30 | 135 | if (IS_ENABLED(CONFIG_ZONE_DMA32) && |
a7ba70f1 | 136 | phys_limit < DMA_BIT_MASK(64) && |
de7eab30 TI |
137 | !(gfp & (GFP_DMA32 | GFP_DMA))) { |
138 | gfp |= GFP_DMA32; | |
139 | goto again; | |
140 | } | |
141 | ||
fbce251b | 142 | if (IS_ENABLED(CONFIG_ZONE_DMA) && !(gfp & GFP_DMA)) { |
95f18391 CH |
143 | gfp = (gfp & ~GFP_DMA32) | GFP_DMA; |
144 | goto again; | |
145 | } | |
146 | } | |
147 | ||
b18814e7 CH |
148 | return page; |
149 | } | |
150 | ||
2f5388a2 | 151 | void *dma_direct_alloc(struct device *dev, size_t size, |
b18814e7 CH |
152 | dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs) |
153 | { | |
154 | struct page *page; | |
155 | void *ret; | |
56fccf21 | 156 | int err; |
b18814e7 | 157 | |
2f5388a2 CH |
158 | if (!IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED) && |
159 | !IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) && | |
160 | dma_alloc_need_uncached(dev, attrs)) | |
161 | return arch_dma_alloc(dev, size, dma_handle, gfp, attrs); | |
162 | ||
633d5fce | 163 | size = PAGE_ALIGN(size); |
3773dfe6 CH |
164 | if (attrs & DMA_ATTR_NO_WARN) |
165 | gfp |= __GFP_NOWARN; | |
633d5fce | 166 | |
76a19940 | 167 | if (dma_should_alloc_from_pool(dev, gfp, attrs)) { |
9420139f CH |
168 | u64 phys_mask; |
169 | ||
170 | gfp |= dma_direct_optimal_gfp_mask(dev, dev->coherent_dma_mask, | |
171 | &phys_mask); | |
172 | page = dma_alloc_from_pool(dev, size, &ret, gfp, | |
173 | dma_coherent_ok); | |
174 | if (!page) | |
3acac065 CH |
175 | return NULL; |
176 | goto done; | |
177 | } | |
178 | ||
3773dfe6 CH |
179 | /* we always manually zero the memory once we are done */ |
180 | page = __dma_direct_alloc_pages(dev, size, gfp & ~__GFP_ZERO); | |
080321d3 CH |
181 | if (!page) |
182 | return NULL; | |
b18814e7 | 183 | |
cf14be0b CH |
184 | if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) && |
185 | !force_dma_unencrypted(dev)) { | |
d98849af CH |
186 | /* remove any dirty cache lines on the kernel alias */ |
187 | if (!PageHighMem(page)) | |
188 | arch_dma_prep_coherent(page, size); | |
189 | /* return the page pointer as the opaque cookie */ | |
3acac065 CH |
190 | ret = page; |
191 | goto done; | |
192 | } | |
193 | ||
194 | if ((IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) && | |
195 | dma_alloc_need_uncached(dev, attrs)) || | |
196 | (IS_ENABLED(CONFIG_DMA_REMAP) && PageHighMem(page))) { | |
197 | /* remove any dirty cache lines on the kernel alias */ | |
633d5fce | 198 | arch_dma_prep_coherent(page, size); |
3acac065 CH |
199 | |
200 | /* create a coherent mapping */ | |
633d5fce | 201 | ret = dma_common_contiguous_remap(page, size, |
3acac065 CH |
202 | dma_pgprot(dev, PAGE_KERNEL, attrs), |
203 | __builtin_return_address(0)); | |
3d0fc341 CH |
204 | if (!ret) |
205 | goto out_free_pages; | |
1a2b3357 DR |
206 | if (force_dma_unencrypted(dev)) { |
207 | err = set_memory_decrypted((unsigned long)ret, | |
208 | 1 << get_order(size)); | |
209 | if (err) | |
210 | goto out_free_pages; | |
211 | } | |
3acac065 CH |
212 | memset(ret, 0, size); |
213 | goto done; | |
d98849af CH |
214 | } |
215 | ||
704f2c20 CH |
216 | if (PageHighMem(page)) { |
217 | /* | |
218 | * Depending on the cma= arguments and per-arch setup | |
b1d2dc00 | 219 | * dma_alloc_contiguous could return highmem pages. |
704f2c20 CH |
220 | * Without remapping there is no way to return them here, |
221 | * so log an error and fail. | |
222 | */ | |
223 | dev_info(dev, "Rejecting highmem page from CMA.\n"); | |
3d0fc341 | 224 | goto out_free_pages; |
704f2c20 CH |
225 | } |
226 | ||
c10f07aa | 227 | ret = page_address(page); |
56fccf21 DR |
228 | if (force_dma_unencrypted(dev)) { |
229 | err = set_memory_decrypted((unsigned long)ret, | |
230 | 1 << get_order(size)); | |
231 | if (err) | |
232 | goto out_free_pages; | |
233 | } | |
3acac065 | 234 | |
c10f07aa | 235 | memset(ret, 0, size); |
c30700db | 236 | |
fa7e2247 | 237 | if (IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED) && |
4b85faed | 238 | dma_alloc_need_uncached(dev, attrs)) { |
c30700db | 239 | arch_dma_prep_coherent(page, size); |
fa7e2247 CH |
240 | ret = arch_dma_set_uncached(ret, size); |
241 | if (IS_ERR(ret)) | |
96a539fa | 242 | goto out_encrypt_pages; |
c30700db | 243 | } |
3acac065 | 244 | done: |
96eb89ca | 245 | *dma_handle = phys_to_dma_direct(dev, page_to_phys(page)); |
c10f07aa | 246 | return ret; |
96a539fa DR |
247 | |
248 | out_encrypt_pages: | |
56fccf21 DR |
249 | if (force_dma_unencrypted(dev)) { |
250 | err = set_memory_encrypted((unsigned long)page_address(page), | |
251 | 1 << get_order(size)); | |
252 | /* If memory cannot be re-encrypted, it must be leaked */ | |
253 | if (err) | |
254 | return NULL; | |
255 | } | |
3d0fc341 CH |
256 | out_free_pages: |
257 | dma_free_contiguous(dev, page, size); | |
258 | return NULL; | |
a8463d4b CB |
259 | } |
260 | ||
2f5388a2 CH |
261 | void dma_direct_free(struct device *dev, size_t size, |
262 | void *cpu_addr, dma_addr_t dma_addr, unsigned long attrs) | |
a8463d4b | 263 | { |
c10f07aa | 264 | unsigned int page_order = get_order(size); |
080321d3 | 265 | |
2f5388a2 CH |
266 | if (!IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED) && |
267 | !IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) && | |
268 | dma_alloc_need_uncached(dev, attrs)) { | |
269 | arch_dma_free(dev, size, cpu_addr, dma_addr, attrs); | |
270 | return; | |
271 | } | |
272 | ||
76a19940 DR |
273 | /* If cpu_addr is not from an atomic pool, dma_free_from_pool() fails */ |
274 | if (dma_should_free_from_pool(dev, attrs) && | |
275 | dma_free_from_pool(dev, cpu_addr, PAGE_ALIGN(size))) | |
276 | return; | |
277 | ||
cf14be0b CH |
278 | if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) && |
279 | !force_dma_unencrypted(dev)) { | |
d98849af | 280 | /* cpu_addr is a struct page cookie, not a kernel address */ |
acaade1a | 281 | dma_free_contiguous(dev, cpu_addr, size); |
d98849af CH |
282 | return; |
283 | } | |
284 | ||
9087c375 | 285 | if (force_dma_unencrypted(dev)) |
c10f07aa | 286 | set_memory_encrypted((unsigned long)cpu_addr, 1 << page_order); |
c30700db | 287 | |
3acac065 CH |
288 | if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr)) |
289 | vunmap(cpu_addr); | |
999a5d12 CH |
290 | else if (IS_ENABLED(CONFIG_ARCH_HAS_DMA_CLEAR_UNCACHED)) |
291 | arch_dma_clear_uncached(cpu_addr, size); | |
3acac065 CH |
292 | |
293 | dma_free_contiguous(dev, dma_direct_to_page(dev, dma_addr), size); | |
a8463d4b CB |
294 | } |
295 | ||
efa70f2f CH |
296 | struct page *dma_direct_alloc_pages(struct device *dev, size_t size, |
297 | dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp) | |
298 | { | |
299 | struct page *page; | |
300 | void *ret; | |
301 | ||
302 | if (dma_should_alloc_from_pool(dev, gfp, 0)) { | |
303 | page = dma_alloc_from_pool(dev, size, &ret, gfp, | |
304 | dma_coherent_ok); | |
305 | if (!page) | |
306 | return NULL; | |
307 | goto done; | |
308 | } | |
309 | ||
310 | page = __dma_direct_alloc_pages(dev, size, gfp); | |
311 | if (!page) | |
312 | return NULL; | |
313 | ret = page_address(page); | |
314 | if (force_dma_unencrypted(dev)) { | |
315 | if (set_memory_decrypted((unsigned long)ret, | |
316 | 1 << get_order(size))) | |
317 | goto out_free_pages; | |
318 | } | |
319 | memset(ret, 0, size); | |
320 | done: | |
321 | *dma_handle = phys_to_dma_direct(dev, page_to_phys(page)); | |
322 | return page; | |
323 | out_free_pages: | |
324 | dma_free_contiguous(dev, page, size); | |
325 | return NULL; | |
326 | } | |
327 | ||
328 | void dma_direct_free_pages(struct device *dev, size_t size, | |
329 | struct page *page, dma_addr_t dma_addr, | |
330 | enum dma_data_direction dir) | |
331 | { | |
332 | unsigned int page_order = get_order(size); | |
333 | void *vaddr = page_address(page); | |
334 | ||
335 | /* If cpu_addr is not from an atomic pool, dma_free_from_pool() fails */ | |
336 | if (dma_should_free_from_pool(dev, 0) && | |
337 | dma_free_from_pool(dev, vaddr, size)) | |
338 | return; | |
339 | ||
340 | if (force_dma_unencrypted(dev)) | |
341 | set_memory_encrypted((unsigned long)vaddr, 1 << page_order); | |
342 | ||
343 | dma_free_contiguous(dev, page, size); | |
344 | } | |
345 | ||
55897af6 CH |
346 | #if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \ |
347 | defined(CONFIG_SWIOTLB) | |
55897af6 | 348 | void dma_direct_sync_sg_for_device(struct device *dev, |
bc3ec75d CH |
349 | struct scatterlist *sgl, int nents, enum dma_data_direction dir) |
350 | { | |
351 | struct scatterlist *sg; | |
352 | int i; | |
353 | ||
55897af6 | 354 | for_each_sg(sgl, sg, nents, i) { |
449fa54d FD |
355 | phys_addr_t paddr = dma_to_phys(dev, sg_dma_address(sg)); |
356 | ||
357 | if (unlikely(is_swiotlb_buffer(paddr))) | |
358 | swiotlb_tbl_sync_single(dev, paddr, sg->length, | |
55897af6 | 359 | dir, SYNC_FOR_DEVICE); |
bc3ec75d | 360 | |
55897af6 | 361 | if (!dev_is_dma_coherent(dev)) |
56e35f9c | 362 | arch_sync_dma_for_device(paddr, sg->length, |
55897af6 CH |
363 | dir); |
364 | } | |
bc3ec75d | 365 | } |
17ac5247 | 366 | #endif |
bc3ec75d CH |
367 | |
368 | #if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \ | |
55897af6 CH |
369 | defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL) || \ |
370 | defined(CONFIG_SWIOTLB) | |
55897af6 | 371 | void dma_direct_sync_sg_for_cpu(struct device *dev, |
bc3ec75d CH |
372 | struct scatterlist *sgl, int nents, enum dma_data_direction dir) |
373 | { | |
374 | struct scatterlist *sg; | |
375 | int i; | |
376 | ||
55897af6 | 377 | for_each_sg(sgl, sg, nents, i) { |
449fa54d FD |
378 | phys_addr_t paddr = dma_to_phys(dev, sg_dma_address(sg)); |
379 | ||
55897af6 | 380 | if (!dev_is_dma_coherent(dev)) |
56e35f9c | 381 | arch_sync_dma_for_cpu(paddr, sg->length, dir); |
449fa54d FD |
382 | |
383 | if (unlikely(is_swiotlb_buffer(paddr))) | |
384 | swiotlb_tbl_sync_single(dev, paddr, sg->length, dir, | |
55897af6 | 385 | SYNC_FOR_CPU); |
abdaf11a CH |
386 | |
387 | if (dir == DMA_FROM_DEVICE) | |
388 | arch_dma_mark_clean(paddr, sg->length); | |
55897af6 | 389 | } |
bc3ec75d | 390 | |
55897af6 | 391 | if (!dev_is_dma_coherent(dev)) |
56e35f9c | 392 | arch_sync_dma_for_cpu_all(); |
bc3ec75d CH |
393 | } |
394 | ||
55897af6 | 395 | void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sgl, |
bc3ec75d CH |
396 | int nents, enum dma_data_direction dir, unsigned long attrs) |
397 | { | |
55897af6 CH |
398 | struct scatterlist *sg; |
399 | int i; | |
400 | ||
401 | for_each_sg(sgl, sg, nents, i) | |
402 | dma_direct_unmap_page(dev, sg->dma_address, sg_dma_len(sg), dir, | |
403 | attrs); | |
bc3ec75d CH |
404 | } |
405 | #endif | |
406 | ||
782e6769 CH |
407 | int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents, |
408 | enum dma_data_direction dir, unsigned long attrs) | |
a8463d4b CB |
409 | { |
410 | int i; | |
411 | struct scatterlist *sg; | |
412 | ||
413 | for_each_sg(sgl, sg, nents, i) { | |
17ac5247 CH |
414 | sg->dma_address = dma_direct_map_page(dev, sg_page(sg), |
415 | sg->offset, sg->length, dir, attrs); | |
416 | if (sg->dma_address == DMA_MAPPING_ERROR) | |
55897af6 | 417 | goto out_unmap; |
a8463d4b CB |
418 | sg_dma_len(sg) = sg->length; |
419 | } | |
420 | ||
421 | return nents; | |
55897af6 CH |
422 | |
423 | out_unmap: | |
424 | dma_direct_unmap_sg(dev, sgl, i, dir, attrs | DMA_ATTR_SKIP_CPU_SYNC); | |
425 | return 0; | |
a8463d4b CB |
426 | } |
427 | ||
cfced786 CH |
428 | dma_addr_t dma_direct_map_resource(struct device *dev, phys_addr_t paddr, |
429 | size_t size, enum dma_data_direction dir, unsigned long attrs) | |
430 | { | |
431 | dma_addr_t dma_addr = paddr; | |
432 | ||
68a33b17 | 433 | if (unlikely(!dma_capable(dev, dma_addr, size, false))) { |
75467ee4 CH |
434 | dev_err_once(dev, |
435 | "DMA addr %pad+%zu overflow (mask %llx, bus limit %llx).\n", | |
436 | &dma_addr, size, *dev->dma_mask, dev->bus_dma_limit); | |
437 | WARN_ON_ONCE(1); | |
cfced786 CH |
438 | return DMA_MAPPING_ERROR; |
439 | } | |
440 | ||
441 | return dma_addr; | |
442 | } | |
cfced786 | 443 | |
34dc0ea6 CH |
444 | int dma_direct_get_sgtable(struct device *dev, struct sg_table *sgt, |
445 | void *cpu_addr, dma_addr_t dma_addr, size_t size, | |
446 | unsigned long attrs) | |
447 | { | |
448 | struct page *page = dma_direct_to_page(dev, dma_addr); | |
449 | int ret; | |
450 | ||
451 | ret = sg_alloc_table(sgt, 1, GFP_KERNEL); | |
452 | if (!ret) | |
453 | sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0); | |
454 | return ret; | |
455 | } | |
456 | ||
34dc0ea6 CH |
457 | bool dma_direct_can_mmap(struct device *dev) |
458 | { | |
459 | return dev_is_dma_coherent(dev) || | |
460 | IS_ENABLED(CONFIG_DMA_NONCOHERENT_MMAP); | |
461 | } | |
462 | ||
463 | int dma_direct_mmap(struct device *dev, struct vm_area_struct *vma, | |
464 | void *cpu_addr, dma_addr_t dma_addr, size_t size, | |
465 | unsigned long attrs) | |
466 | { | |
467 | unsigned long user_count = vma_pages(vma); | |
468 | unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT; | |
469 | unsigned long pfn = PHYS_PFN(dma_to_phys(dev, dma_addr)); | |
470 | int ret = -ENXIO; | |
471 | ||
472 | vma->vm_page_prot = dma_pgprot(dev, vma->vm_page_prot, attrs); | |
473 | ||
474 | if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret)) | |
475 | return ret; | |
476 | ||
477 | if (vma->vm_pgoff >= count || user_count > count - vma->vm_pgoff) | |
478 | return -ENXIO; | |
479 | return remap_pfn_range(vma, vma->vm_start, pfn + vma->vm_pgoff, | |
480 | user_count << PAGE_SHIFT, vma->vm_page_prot); | |
481 | } | |
34dc0ea6 | 482 | |
1a9777a8 CH |
483 | int dma_direct_supported(struct device *dev, u64 mask) |
484 | { | |
91ef26f9 | 485 | u64 min_mask = (max_pfn - 1) << PAGE_SHIFT; |
9d7a224b | 486 | |
91ef26f9 CH |
487 | /* |
488 | * Because 32-bit DMA masks are so common we expect every architecture | |
489 | * to be able to satisfy them - either by not supporting more physical | |
490 | * memory, or by providing a ZONE_DMA32. If neither is the case, the | |
491 | * architecture needs to use an IOMMU instead of the direct mapping. | |
492 | */ | |
493 | if (mask >= DMA_BIT_MASK(32)) | |
494 | return 1; | |
9d7a224b | 495 | |
c92a54cf | 496 | /* |
5ceda740 CH |
497 | * This check needs to be against the actual bit mask value, so use |
498 | * phys_to_dma_unencrypted() here so that the SME encryption mask isn't | |
c92a54cf LT |
499 | * part of the check. |
500 | */ | |
91ef26f9 CH |
501 | if (IS_ENABLED(CONFIG_ZONE_DMA)) |
502 | min_mask = min_t(u64, min_mask, DMA_BIT_MASK(zone_dma_bits)); | |
5ceda740 | 503 | return mask >= phys_to_dma_unencrypted(dev, min_mask); |
1a9777a8 | 504 | } |
133d624b JR |
505 | |
506 | size_t dma_direct_max_mapping_size(struct device *dev) | |
507 | { | |
133d624b | 508 | /* If SWIOTLB is active, use its maximum mapping size */ |
a5008b59 CH |
509 | if (is_swiotlb_active() && |
510 | (dma_addressing_limited(dev) || swiotlb_force == SWIOTLB_FORCE)) | |
511 | return swiotlb_max_mapping_size(dev); | |
512 | return SIZE_MAX; | |
133d624b | 513 | } |
3aa91625 CH |
514 | |
515 | bool dma_direct_need_sync(struct device *dev, dma_addr_t dma_addr) | |
516 | { | |
517 | return !dev_is_dma_coherent(dev) || | |
518 | is_swiotlb_buffer(dma_to_phys(dev, dma_addr)); | |
519 | } | |
e0d07278 JQ |
520 | |
521 | /** | |
522 | * dma_direct_set_offset - Assign scalar offset for a single DMA range. | |
523 | * @dev: device pointer; needed to "own" the alloced memory. | |
524 | * @cpu_start: beginning of memory region covered by this offset. | |
525 | * @dma_start: beginning of DMA/PCI region covered by this offset. | |
526 | * @size: size of the region. | |
527 | * | |
528 | * This is for the simple case of a uniform offset which cannot | |
529 | * be discovered by "dma-ranges". | |
530 | * | |
531 | * It returns -ENOMEM if out of memory, -EINVAL if a map | |
532 | * already exists, 0 otherwise. | |
533 | * | |
534 | * Note: any call to this from a driver is a bug. The mapping needs | |
535 | * to be described by the device tree or other firmware interfaces. | |
536 | */ | |
537 | int dma_direct_set_offset(struct device *dev, phys_addr_t cpu_start, | |
538 | dma_addr_t dma_start, u64 size) | |
539 | { | |
540 | struct bus_dma_region *map; | |
541 | u64 offset = (u64)cpu_start - (u64)dma_start; | |
542 | ||
543 | if (dev->dma_range_map) { | |
544 | dev_err(dev, "attempt to add DMA range to existing map\n"); | |
545 | return -EINVAL; | |
546 | } | |
547 | ||
548 | if (!offset) | |
549 | return 0; | |
550 | ||
551 | map = kcalloc(2, sizeof(*map), GFP_KERNEL); | |
552 | if (!map) | |
553 | return -ENOMEM; | |
554 | map[0].cpu_start = cpu_start; | |
555 | map[0].dma_start = dma_start; | |
556 | map[0].offset = offset; | |
557 | map[0].size = size; | |
558 | dev->dma_range_map = map; | |
559 | return 0; | |
560 | } | |
561 | EXPORT_SYMBOL_GPL(dma_direct_set_offset); |