Commit | Line | Data |
---|---|---|
b2441318 | 1 | // SPDX-License-Identifier: GPL-2.0 |
a8463d4b | 2 | /* |
efa70f2f | 3 | * Copyright (C) 2018-2020 Christoph Hellwig. |
bc3ec75d CH |
4 | * |
5 | * DMA operations that map physical memory directly without using an IOMMU. | |
a8463d4b | 6 | */ |
57c8a661 | 7 | #include <linux/memblock.h> /* for max_pfn */ |
a8463d4b CB |
8 | #include <linux/export.h> |
9 | #include <linux/mm.h> | |
0a0f0d8b | 10 | #include <linux/dma-map-ops.h> |
a8463d4b | 11 | #include <linux/scatterlist.h> |
25f1e188 | 12 | #include <linux/pfn.h> |
3acac065 | 13 | #include <linux/vmalloc.h> |
c10f07aa | 14 | #include <linux/set_memory.h> |
e0d07278 | 15 | #include <linux/slab.h> |
19c65c3d | 16 | #include "direct.h" |
a8463d4b | 17 | |
c61e9637 | 18 | /* |
7b7b8a2c | 19 | * Most architectures use ZONE_DMA for the first 16 Megabytes, but some use |
8b5369ea NSJ |
20 | * it for entirely different regions. In that case the arch code needs to |
21 | * override the variable below for dma-direct to work properly. | |
c61e9637 | 22 | */ |
8b5369ea | 23 | unsigned int zone_dma_bits __ro_after_init = 24; |
c61e9637 | 24 | |
a20bb058 CH |
25 | static inline dma_addr_t phys_to_dma_direct(struct device *dev, |
26 | phys_addr_t phys) | |
27 | { | |
9087c375 | 28 | if (force_dma_unencrypted(dev)) |
5ceda740 | 29 | return phys_to_dma_unencrypted(dev, phys); |
a20bb058 CH |
30 | return phys_to_dma(dev, phys); |
31 | } | |
32 | ||
34dc0ea6 CH |
33 | static inline struct page *dma_direct_to_page(struct device *dev, |
34 | dma_addr_t dma_addr) | |
35 | { | |
36 | return pfn_to_page(PHYS_PFN(dma_to_phys(dev, dma_addr))); | |
37 | } | |
38 | ||
a20bb058 CH |
39 | u64 dma_direct_get_required_mask(struct device *dev) |
40 | { | |
cdcda0d1 KVA |
41 | phys_addr_t phys = (phys_addr_t)(max_pfn - 1) << PAGE_SHIFT; |
42 | u64 max_dma = phys_to_dma_direct(dev, phys); | |
a20bb058 CH |
43 | |
44 | return (1ULL << (fls64(max_dma) - 1)) * 2 - 1; | |
45 | } | |
46 | ||
9420139f | 47 | static gfp_t dma_direct_optimal_gfp_mask(struct device *dev, u64 dma_mask, |
c84dc6e6 | 48 | u64 *phys_limit) |
7d21ee4c | 49 | { |
a7ba70f1 | 50 | u64 dma_limit = min_not_zero(dma_mask, dev->bus_dma_limit); |
b4ebe606 | 51 | |
79ac32a4 CH |
52 | /* |
53 | * Optimistically try the zone that the physical address mask falls | |
54 | * into first. If that returns memory that isn't actually addressable | |
55 | * we will fallback to the next lower zone and try again. | |
56 | * | |
57 | * Note that GFP_DMA32 and GFP_DMA are no ops without the corresponding | |
58 | * zones. | |
59 | */ | |
7bc5c428 | 60 | *phys_limit = dma_to_phys(dev, dma_limit); |
a7ba70f1 | 61 | if (*phys_limit <= DMA_BIT_MASK(zone_dma_bits)) |
7d21ee4c | 62 | return GFP_DMA; |
a7ba70f1 | 63 | if (*phys_limit <= DMA_BIT_MASK(32)) |
7d21ee4c CH |
64 | return GFP_DMA32; |
65 | return 0; | |
66 | } | |
67 | ||
9420139f | 68 | static bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size) |
95f18391 | 69 | { |
e0d07278 JQ |
70 | dma_addr_t dma_addr = phys_to_dma_direct(dev, phys); |
71 | ||
72 | if (dma_addr == DMA_MAPPING_ERROR) | |
73 | return false; | |
74 | return dma_addr + size - 1 <= | |
75 | min_not_zero(dev->coherent_dma_mask, dev->bus_dma_limit); | |
95f18391 CH |
76 | } |
77 | ||
26749b32 | 78 | static struct page *__dma_direct_alloc_pages(struct device *dev, size_t size, |
3773dfe6 | 79 | gfp_t gfp) |
a8463d4b | 80 | { |
90ae409f | 81 | int node = dev_to_node(dev); |
080321d3 | 82 | struct page *page = NULL; |
a7ba70f1 | 83 | u64 phys_limit; |
a8463d4b | 84 | |
633d5fce DR |
85 | WARN_ON_ONCE(!PAGE_ALIGNED(size)); |
86 | ||
c84dc6e6 DR |
87 | gfp |= dma_direct_optimal_gfp_mask(dev, dev->coherent_dma_mask, |
88 | &phys_limit); | |
633d5fce | 89 | page = dma_alloc_contiguous(dev, size, gfp); |
90ae409f | 90 | if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) { |
633d5fce | 91 | dma_free_contiguous(dev, page, size); |
90ae409f CH |
92 | page = NULL; |
93 | } | |
95f18391 | 94 | again: |
90ae409f | 95 | if (!page) |
633d5fce | 96 | page = alloc_pages_node(node, gfp, get_order(size)); |
95f18391 | 97 | if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) { |
b1d2dc00 | 98 | dma_free_contiguous(dev, page, size); |
95f18391 CH |
99 | page = NULL; |
100 | ||
de7eab30 | 101 | if (IS_ENABLED(CONFIG_ZONE_DMA32) && |
a7ba70f1 | 102 | phys_limit < DMA_BIT_MASK(64) && |
de7eab30 TI |
103 | !(gfp & (GFP_DMA32 | GFP_DMA))) { |
104 | gfp |= GFP_DMA32; | |
105 | goto again; | |
106 | } | |
107 | ||
fbce251b | 108 | if (IS_ENABLED(CONFIG_ZONE_DMA) && !(gfp & GFP_DMA)) { |
95f18391 CH |
109 | gfp = (gfp & ~GFP_DMA32) | GFP_DMA; |
110 | goto again; | |
111 | } | |
112 | } | |
113 | ||
b18814e7 CH |
114 | return page; |
115 | } | |
116 | ||
5b138c53 CH |
117 | static void *dma_direct_alloc_from_pool(struct device *dev, size_t size, |
118 | dma_addr_t *dma_handle, gfp_t gfp) | |
119 | { | |
120 | struct page *page; | |
121 | u64 phys_mask; | |
122 | void *ret; | |
123 | ||
124 | gfp |= dma_direct_optimal_gfp_mask(dev, dev->coherent_dma_mask, | |
125 | &phys_mask); | |
126 | page = dma_alloc_from_pool(dev, size, &ret, gfp, dma_coherent_ok); | |
127 | if (!page) | |
128 | return NULL; | |
129 | *dma_handle = phys_to_dma_direct(dev, page_to_phys(page)); | |
130 | return ret; | |
131 | } | |
132 | ||
2f5388a2 | 133 | void *dma_direct_alloc(struct device *dev, size_t size, |
b18814e7 CH |
134 | dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs) |
135 | { | |
136 | struct page *page; | |
137 | void *ret; | |
56fccf21 | 138 | int err; |
b18814e7 | 139 | |
633d5fce | 140 | size = PAGE_ALIGN(size); |
3773dfe6 CH |
141 | if (attrs & DMA_ATTR_NO_WARN) |
142 | gfp |= __GFP_NOWARN; | |
633d5fce | 143 | |
cf14be0b CH |
144 | if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) && |
145 | !force_dma_unencrypted(dev)) { | |
849facea CH |
146 | page = __dma_direct_alloc_pages(dev, size, gfp & ~__GFP_ZERO); |
147 | if (!page) | |
148 | return NULL; | |
d98849af CH |
149 | /* remove any dirty cache lines on the kernel alias */ |
150 | if (!PageHighMem(page)) | |
151 | arch_dma_prep_coherent(page, size); | |
849facea | 152 | *dma_handle = phys_to_dma_direct(dev, page_to_phys(page)); |
d98849af | 153 | /* return the page pointer as the opaque cookie */ |
849facea | 154 | return page; |
3acac065 CH |
155 | } |
156 | ||
849facea CH |
157 | if (!IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED) && |
158 | !IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) && | |
159 | !dev_is_dma_coherent(dev)) | |
160 | return arch_dma_alloc(dev, size, dma_handle, gfp, attrs); | |
161 | ||
162 | /* | |
163 | * Remapping or decrypting memory may block. If either is required and | |
164 | * we can't block, allocate the memory from the atomic pools. | |
165 | */ | |
166 | if (IS_ENABLED(CONFIG_DMA_COHERENT_POOL) && | |
167 | !gfpflags_allow_blocking(gfp) && | |
168 | (force_dma_unencrypted(dev) || | |
169 | (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) && !dev_is_dma_coherent(dev)))) | |
170 | return dma_direct_alloc_from_pool(dev, size, dma_handle, gfp); | |
171 | ||
172 | /* we always manually zero the memory once we are done */ | |
173 | page = __dma_direct_alloc_pages(dev, size, gfp & ~__GFP_ZERO); | |
174 | if (!page) | |
175 | return NULL; | |
176 | ||
3acac065 | 177 | if ((IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) && |
849facea | 178 | !dev_is_dma_coherent(dev)) || |
3acac065 CH |
179 | (IS_ENABLED(CONFIG_DMA_REMAP) && PageHighMem(page))) { |
180 | /* remove any dirty cache lines on the kernel alias */ | |
633d5fce | 181 | arch_dma_prep_coherent(page, size); |
3acac065 CH |
182 | |
183 | /* create a coherent mapping */ | |
633d5fce | 184 | ret = dma_common_contiguous_remap(page, size, |
3acac065 CH |
185 | dma_pgprot(dev, PAGE_KERNEL, attrs), |
186 | __builtin_return_address(0)); | |
3d0fc341 CH |
187 | if (!ret) |
188 | goto out_free_pages; | |
1a2b3357 DR |
189 | if (force_dma_unencrypted(dev)) { |
190 | err = set_memory_decrypted((unsigned long)ret, | |
191 | 1 << get_order(size)); | |
192 | if (err) | |
193 | goto out_free_pages; | |
194 | } | |
3acac065 CH |
195 | memset(ret, 0, size); |
196 | goto done; | |
d98849af CH |
197 | } |
198 | ||
704f2c20 CH |
199 | if (PageHighMem(page)) { |
200 | /* | |
201 | * Depending on the cma= arguments and per-arch setup | |
b1d2dc00 | 202 | * dma_alloc_contiguous could return highmem pages. |
704f2c20 CH |
203 | * Without remapping there is no way to return them here, |
204 | * so log an error and fail. | |
205 | */ | |
206 | dev_info(dev, "Rejecting highmem page from CMA.\n"); | |
3d0fc341 | 207 | goto out_free_pages; |
704f2c20 CH |
208 | } |
209 | ||
c10f07aa | 210 | ret = page_address(page); |
56fccf21 DR |
211 | if (force_dma_unencrypted(dev)) { |
212 | err = set_memory_decrypted((unsigned long)ret, | |
213 | 1 << get_order(size)); | |
214 | if (err) | |
215 | goto out_free_pages; | |
216 | } | |
3acac065 | 217 | |
c10f07aa | 218 | memset(ret, 0, size); |
c30700db | 219 | |
fa7e2247 | 220 | if (IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED) && |
849facea | 221 | !dev_is_dma_coherent(dev)) { |
c30700db | 222 | arch_dma_prep_coherent(page, size); |
fa7e2247 CH |
223 | ret = arch_dma_set_uncached(ret, size); |
224 | if (IS_ERR(ret)) | |
96a539fa | 225 | goto out_encrypt_pages; |
c30700db | 226 | } |
3acac065 | 227 | done: |
96eb89ca | 228 | *dma_handle = phys_to_dma_direct(dev, page_to_phys(page)); |
c10f07aa | 229 | return ret; |
96a539fa DR |
230 | |
231 | out_encrypt_pages: | |
56fccf21 DR |
232 | if (force_dma_unencrypted(dev)) { |
233 | err = set_memory_encrypted((unsigned long)page_address(page), | |
234 | 1 << get_order(size)); | |
235 | /* If memory cannot be re-encrypted, it must be leaked */ | |
236 | if (err) | |
237 | return NULL; | |
238 | } | |
3d0fc341 CH |
239 | out_free_pages: |
240 | dma_free_contiguous(dev, page, size); | |
241 | return NULL; | |
a8463d4b CB |
242 | } |
243 | ||
2f5388a2 CH |
244 | void dma_direct_free(struct device *dev, size_t size, |
245 | void *cpu_addr, dma_addr_t dma_addr, unsigned long attrs) | |
a8463d4b | 246 | { |
c10f07aa | 247 | unsigned int page_order = get_order(size); |
080321d3 | 248 | |
849facea CH |
249 | if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) && |
250 | !force_dma_unencrypted(dev)) { | |
251 | /* cpu_addr is a struct page cookie, not a kernel address */ | |
252 | dma_free_contiguous(dev, cpu_addr, size); | |
253 | return; | |
254 | } | |
255 | ||
2f5388a2 CH |
256 | if (!IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED) && |
257 | !IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) && | |
849facea | 258 | !dev_is_dma_coherent(dev)) { |
2f5388a2 CH |
259 | arch_dma_free(dev, size, cpu_addr, dma_addr, attrs); |
260 | return; | |
261 | } | |
262 | ||
76a19940 | 263 | /* If cpu_addr is not from an atomic pool, dma_free_from_pool() fails */ |
849facea | 264 | if (IS_ENABLED(CONFIG_DMA_COHERENT_POOL) && |
76a19940 DR |
265 | dma_free_from_pool(dev, cpu_addr, PAGE_ALIGN(size))) |
266 | return; | |
267 | ||
9087c375 | 268 | if (force_dma_unencrypted(dev)) |
c10f07aa | 269 | set_memory_encrypted((unsigned long)cpu_addr, 1 << page_order); |
c30700db | 270 | |
3acac065 CH |
271 | if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr)) |
272 | vunmap(cpu_addr); | |
999a5d12 CH |
273 | else if (IS_ENABLED(CONFIG_ARCH_HAS_DMA_CLEAR_UNCACHED)) |
274 | arch_dma_clear_uncached(cpu_addr, size); | |
3acac065 CH |
275 | |
276 | dma_free_contiguous(dev, dma_direct_to_page(dev, dma_addr), size); | |
a8463d4b CB |
277 | } |
278 | ||
efa70f2f CH |
279 | struct page *dma_direct_alloc_pages(struct device *dev, size_t size, |
280 | dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp) | |
281 | { | |
282 | struct page *page; | |
283 | void *ret; | |
284 | ||
849facea CH |
285 | if (IS_ENABLED(CONFIG_DMA_COHERENT_POOL) && |
286 | force_dma_unencrypted(dev) && !gfpflags_allow_blocking(gfp)) | |
5b138c53 | 287 | return dma_direct_alloc_from_pool(dev, size, dma_handle, gfp); |
efa70f2f CH |
288 | |
289 | page = __dma_direct_alloc_pages(dev, size, gfp); | |
290 | if (!page) | |
291 | return NULL; | |
08a89c28 CH |
292 | if (PageHighMem(page)) { |
293 | /* | |
294 | * Depending on the cma= arguments and per-arch setup | |
295 | * dma_alloc_contiguous could return highmem pages. | |
296 | * Without remapping there is no way to return them here, | |
297 | * so log an error and fail. | |
298 | */ | |
299 | dev_info(dev, "Rejecting highmem page from CMA.\n"); | |
300 | goto out_free_pages; | |
301 | } | |
302 | ||
efa70f2f CH |
303 | ret = page_address(page); |
304 | if (force_dma_unencrypted(dev)) { | |
305 | if (set_memory_decrypted((unsigned long)ret, | |
306 | 1 << get_order(size))) | |
307 | goto out_free_pages; | |
308 | } | |
309 | memset(ret, 0, size); | |
efa70f2f CH |
310 | *dma_handle = phys_to_dma_direct(dev, page_to_phys(page)); |
311 | return page; | |
312 | out_free_pages: | |
313 | dma_free_contiguous(dev, page, size); | |
314 | return NULL; | |
315 | } | |
316 | ||
317 | void dma_direct_free_pages(struct device *dev, size_t size, | |
318 | struct page *page, dma_addr_t dma_addr, | |
319 | enum dma_data_direction dir) | |
320 | { | |
321 | unsigned int page_order = get_order(size); | |
322 | void *vaddr = page_address(page); | |
323 | ||
324 | /* If cpu_addr is not from an atomic pool, dma_free_from_pool() fails */ | |
849facea | 325 | if (IS_ENABLED(CONFIG_DMA_COHERENT_POOL) && |
efa70f2f CH |
326 | dma_free_from_pool(dev, vaddr, size)) |
327 | return; | |
328 | ||
329 | if (force_dma_unencrypted(dev)) | |
330 | set_memory_encrypted((unsigned long)vaddr, 1 << page_order); | |
331 | ||
332 | dma_free_contiguous(dev, page, size); | |
333 | } | |
334 | ||
55897af6 CH |
335 | #if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \ |
336 | defined(CONFIG_SWIOTLB) | |
55897af6 | 337 | void dma_direct_sync_sg_for_device(struct device *dev, |
bc3ec75d CH |
338 | struct scatterlist *sgl, int nents, enum dma_data_direction dir) |
339 | { | |
340 | struct scatterlist *sg; | |
341 | int i; | |
342 | ||
55897af6 | 343 | for_each_sg(sgl, sg, nents, i) { |
449fa54d FD |
344 | phys_addr_t paddr = dma_to_phys(dev, sg_dma_address(sg)); |
345 | ||
346 | if (unlikely(is_swiotlb_buffer(paddr))) | |
80808d27 CH |
347 | swiotlb_sync_single_for_device(dev, paddr, sg->length, |
348 | dir); | |
bc3ec75d | 349 | |
55897af6 | 350 | if (!dev_is_dma_coherent(dev)) |
56e35f9c | 351 | arch_sync_dma_for_device(paddr, sg->length, |
55897af6 CH |
352 | dir); |
353 | } | |
bc3ec75d | 354 | } |
17ac5247 | 355 | #endif |
bc3ec75d CH |
356 | |
357 | #if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \ | |
55897af6 CH |
358 | defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL) || \ |
359 | defined(CONFIG_SWIOTLB) | |
55897af6 | 360 | void dma_direct_sync_sg_for_cpu(struct device *dev, |
bc3ec75d CH |
361 | struct scatterlist *sgl, int nents, enum dma_data_direction dir) |
362 | { | |
363 | struct scatterlist *sg; | |
364 | int i; | |
365 | ||
55897af6 | 366 | for_each_sg(sgl, sg, nents, i) { |
449fa54d FD |
367 | phys_addr_t paddr = dma_to_phys(dev, sg_dma_address(sg)); |
368 | ||
55897af6 | 369 | if (!dev_is_dma_coherent(dev)) |
56e35f9c | 370 | arch_sync_dma_for_cpu(paddr, sg->length, dir); |
449fa54d FD |
371 | |
372 | if (unlikely(is_swiotlb_buffer(paddr))) | |
80808d27 CH |
373 | swiotlb_sync_single_for_cpu(dev, paddr, sg->length, |
374 | dir); | |
abdaf11a CH |
375 | |
376 | if (dir == DMA_FROM_DEVICE) | |
377 | arch_dma_mark_clean(paddr, sg->length); | |
55897af6 | 378 | } |
bc3ec75d | 379 | |
55897af6 | 380 | if (!dev_is_dma_coherent(dev)) |
56e35f9c | 381 | arch_sync_dma_for_cpu_all(); |
bc3ec75d CH |
382 | } |
383 | ||
55897af6 | 384 | void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sgl, |
bc3ec75d CH |
385 | int nents, enum dma_data_direction dir, unsigned long attrs) |
386 | { | |
55897af6 CH |
387 | struct scatterlist *sg; |
388 | int i; | |
389 | ||
390 | for_each_sg(sgl, sg, nents, i) | |
391 | dma_direct_unmap_page(dev, sg->dma_address, sg_dma_len(sg), dir, | |
392 | attrs); | |
bc3ec75d CH |
393 | } |
394 | #endif | |
395 | ||
782e6769 CH |
396 | int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents, |
397 | enum dma_data_direction dir, unsigned long attrs) | |
a8463d4b CB |
398 | { |
399 | int i; | |
400 | struct scatterlist *sg; | |
401 | ||
402 | for_each_sg(sgl, sg, nents, i) { | |
17ac5247 CH |
403 | sg->dma_address = dma_direct_map_page(dev, sg_page(sg), |
404 | sg->offset, sg->length, dir, attrs); | |
405 | if (sg->dma_address == DMA_MAPPING_ERROR) | |
55897af6 | 406 | goto out_unmap; |
a8463d4b CB |
407 | sg_dma_len(sg) = sg->length; |
408 | } | |
409 | ||
410 | return nents; | |
55897af6 CH |
411 | |
412 | out_unmap: | |
413 | dma_direct_unmap_sg(dev, sgl, i, dir, attrs | DMA_ATTR_SKIP_CPU_SYNC); | |
414 | return 0; | |
a8463d4b CB |
415 | } |
416 | ||
cfced786 CH |
417 | dma_addr_t dma_direct_map_resource(struct device *dev, phys_addr_t paddr, |
418 | size_t size, enum dma_data_direction dir, unsigned long attrs) | |
419 | { | |
420 | dma_addr_t dma_addr = paddr; | |
421 | ||
68a33b17 | 422 | if (unlikely(!dma_capable(dev, dma_addr, size, false))) { |
75467ee4 CH |
423 | dev_err_once(dev, |
424 | "DMA addr %pad+%zu overflow (mask %llx, bus limit %llx).\n", | |
425 | &dma_addr, size, *dev->dma_mask, dev->bus_dma_limit); | |
426 | WARN_ON_ONCE(1); | |
cfced786 CH |
427 | return DMA_MAPPING_ERROR; |
428 | } | |
429 | ||
430 | return dma_addr; | |
431 | } | |
cfced786 | 432 | |
34dc0ea6 CH |
433 | int dma_direct_get_sgtable(struct device *dev, struct sg_table *sgt, |
434 | void *cpu_addr, dma_addr_t dma_addr, size_t size, | |
435 | unsigned long attrs) | |
436 | { | |
437 | struct page *page = dma_direct_to_page(dev, dma_addr); | |
438 | int ret; | |
439 | ||
440 | ret = sg_alloc_table(sgt, 1, GFP_KERNEL); | |
441 | if (!ret) | |
442 | sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0); | |
443 | return ret; | |
444 | } | |
445 | ||
34dc0ea6 CH |
446 | bool dma_direct_can_mmap(struct device *dev) |
447 | { | |
448 | return dev_is_dma_coherent(dev) || | |
449 | IS_ENABLED(CONFIG_DMA_NONCOHERENT_MMAP); | |
450 | } | |
451 | ||
452 | int dma_direct_mmap(struct device *dev, struct vm_area_struct *vma, | |
453 | void *cpu_addr, dma_addr_t dma_addr, size_t size, | |
454 | unsigned long attrs) | |
455 | { | |
456 | unsigned long user_count = vma_pages(vma); | |
457 | unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT; | |
458 | unsigned long pfn = PHYS_PFN(dma_to_phys(dev, dma_addr)); | |
459 | int ret = -ENXIO; | |
460 | ||
461 | vma->vm_page_prot = dma_pgprot(dev, vma->vm_page_prot, attrs); | |
462 | ||
463 | if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret)) | |
464 | return ret; | |
465 | ||
466 | if (vma->vm_pgoff >= count || user_count > count - vma->vm_pgoff) | |
467 | return -ENXIO; | |
468 | return remap_pfn_range(vma, vma->vm_start, pfn + vma->vm_pgoff, | |
469 | user_count << PAGE_SHIFT, vma->vm_page_prot); | |
470 | } | |
34dc0ea6 | 471 | |
1a9777a8 CH |
472 | int dma_direct_supported(struct device *dev, u64 mask) |
473 | { | |
91ef26f9 | 474 | u64 min_mask = (max_pfn - 1) << PAGE_SHIFT; |
9d7a224b | 475 | |
91ef26f9 CH |
476 | /* |
477 | * Because 32-bit DMA masks are so common we expect every architecture | |
478 | * to be able to satisfy them - either by not supporting more physical | |
479 | * memory, or by providing a ZONE_DMA32. If neither is the case, the | |
480 | * architecture needs to use an IOMMU instead of the direct mapping. | |
481 | */ | |
482 | if (mask >= DMA_BIT_MASK(32)) | |
483 | return 1; | |
9d7a224b | 484 | |
c92a54cf | 485 | /* |
5ceda740 CH |
486 | * This check needs to be against the actual bit mask value, so use |
487 | * phys_to_dma_unencrypted() here so that the SME encryption mask isn't | |
c92a54cf LT |
488 | * part of the check. |
489 | */ | |
91ef26f9 CH |
490 | if (IS_ENABLED(CONFIG_ZONE_DMA)) |
491 | min_mask = min_t(u64, min_mask, DMA_BIT_MASK(zone_dma_bits)); | |
5ceda740 | 492 | return mask >= phys_to_dma_unencrypted(dev, min_mask); |
1a9777a8 | 493 | } |
133d624b JR |
494 | |
495 | size_t dma_direct_max_mapping_size(struct device *dev) | |
496 | { | |
133d624b | 497 | /* If SWIOTLB is active, use its maximum mapping size */ |
a5008b59 CH |
498 | if (is_swiotlb_active() && |
499 | (dma_addressing_limited(dev) || swiotlb_force == SWIOTLB_FORCE)) | |
500 | return swiotlb_max_mapping_size(dev); | |
501 | return SIZE_MAX; | |
133d624b | 502 | } |
3aa91625 CH |
503 | |
504 | bool dma_direct_need_sync(struct device *dev, dma_addr_t dma_addr) | |
505 | { | |
506 | return !dev_is_dma_coherent(dev) || | |
507 | is_swiotlb_buffer(dma_to_phys(dev, dma_addr)); | |
508 | } | |
e0d07278 JQ |
509 | |
510 | /** | |
511 | * dma_direct_set_offset - Assign scalar offset for a single DMA range. | |
512 | * @dev: device pointer; needed to "own" the alloced memory. | |
513 | * @cpu_start: beginning of memory region covered by this offset. | |
514 | * @dma_start: beginning of DMA/PCI region covered by this offset. | |
515 | * @size: size of the region. | |
516 | * | |
517 | * This is for the simple case of a uniform offset which cannot | |
518 | * be discovered by "dma-ranges". | |
519 | * | |
520 | * It returns -ENOMEM if out of memory, -EINVAL if a map | |
521 | * already exists, 0 otherwise. | |
522 | * | |
523 | * Note: any call to this from a driver is a bug. The mapping needs | |
524 | * to be described by the device tree or other firmware interfaces. | |
525 | */ | |
526 | int dma_direct_set_offset(struct device *dev, phys_addr_t cpu_start, | |
527 | dma_addr_t dma_start, u64 size) | |
528 | { | |
529 | struct bus_dma_region *map; | |
530 | u64 offset = (u64)cpu_start - (u64)dma_start; | |
531 | ||
532 | if (dev->dma_range_map) { | |
533 | dev_err(dev, "attempt to add DMA range to existing map\n"); | |
534 | return -EINVAL; | |
535 | } | |
536 | ||
537 | if (!offset) | |
538 | return 0; | |
539 | ||
540 | map = kcalloc(2, sizeof(*map), GFP_KERNEL); | |
541 | if (!map) | |
542 | return -ENOMEM; | |
543 | map[0].cpu_start = cpu_start; | |
544 | map[0].dma_start = dma_start; | |
545 | map[0].offset = offset; | |
546 | map[0].size = size; | |
547 | dev->dma_range_map = map; | |
548 | return 0; | |
549 | } |