Commit | Line | Data |
---|---|---|
b2441318 | 1 | // SPDX-License-Identifier: GPL-2.0 |
a8463d4b | 2 | /* |
efa70f2f | 3 | * Copyright (C) 2018-2020 Christoph Hellwig. |
bc3ec75d CH |
4 | * |
5 | * DMA operations that map physical memory directly without using an IOMMU. | |
a8463d4b | 6 | */ |
57c8a661 | 7 | #include <linux/memblock.h> /* for max_pfn */ |
a8463d4b CB |
8 | #include <linux/export.h> |
9 | #include <linux/mm.h> | |
0a0f0d8b | 10 | #include <linux/dma-map-ops.h> |
a8463d4b | 11 | #include <linux/scatterlist.h> |
25f1e188 | 12 | #include <linux/pfn.h> |
3acac065 | 13 | #include <linux/vmalloc.h> |
c10f07aa | 14 | #include <linux/set_memory.h> |
e0d07278 | 15 | #include <linux/slab.h> |
19c65c3d | 16 | #include "direct.h" |
a8463d4b | 17 | |
c61e9637 | 18 | /* |
7b7b8a2c | 19 | * Most architectures use ZONE_DMA for the first 16 Megabytes, but some use |
8b5369ea NSJ |
20 | * it for entirely different regions. In that case the arch code needs to |
21 | * override the variable below for dma-direct to work properly. | |
c61e9637 | 22 | */ |
8b5369ea | 23 | unsigned int zone_dma_bits __ro_after_init = 24; |
c61e9637 | 24 | |
a20bb058 CH |
25 | static inline dma_addr_t phys_to_dma_direct(struct device *dev, |
26 | phys_addr_t phys) | |
27 | { | |
9087c375 | 28 | if (force_dma_unencrypted(dev)) |
5ceda740 | 29 | return phys_to_dma_unencrypted(dev, phys); |
a20bb058 CH |
30 | return phys_to_dma(dev, phys); |
31 | } | |
32 | ||
34dc0ea6 CH |
33 | static inline struct page *dma_direct_to_page(struct device *dev, |
34 | dma_addr_t dma_addr) | |
35 | { | |
36 | return pfn_to_page(PHYS_PFN(dma_to_phys(dev, dma_addr))); | |
37 | } | |
38 | ||
a20bb058 CH |
39 | u64 dma_direct_get_required_mask(struct device *dev) |
40 | { | |
cdcda0d1 KVA |
41 | phys_addr_t phys = (phys_addr_t)(max_pfn - 1) << PAGE_SHIFT; |
42 | u64 max_dma = phys_to_dma_direct(dev, phys); | |
a20bb058 CH |
43 | |
44 | return (1ULL << (fls64(max_dma) - 1)) * 2 - 1; | |
45 | } | |
46 | ||
25a4ce56 | 47 | static gfp_t dma_direct_optimal_gfp_mask(struct device *dev, u64 *phys_limit) |
7d21ee4c | 48 | { |
25a4ce56 PT |
49 | u64 dma_limit = min_not_zero( |
50 | dev->coherent_dma_mask, | |
51 | dev->bus_dma_limit); | |
b4ebe606 | 52 | |
79ac32a4 CH |
53 | /* |
54 | * Optimistically try the zone that the physical address mask falls | |
55 | * into first. If that returns memory that isn't actually addressable | |
56 | * we will fallback to the next lower zone and try again. | |
57 | * | |
58 | * Note that GFP_DMA32 and GFP_DMA are no ops without the corresponding | |
59 | * zones. | |
60 | */ | |
7bc5c428 | 61 | *phys_limit = dma_to_phys(dev, dma_limit); |
a7ba70f1 | 62 | if (*phys_limit <= DMA_BIT_MASK(zone_dma_bits)) |
7d21ee4c | 63 | return GFP_DMA; |
a7ba70f1 | 64 | if (*phys_limit <= DMA_BIT_MASK(32)) |
7d21ee4c CH |
65 | return GFP_DMA32; |
66 | return 0; | |
67 | } | |
68 | ||
79636caa | 69 | bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size) |
95f18391 | 70 | { |
e0d07278 JQ |
71 | dma_addr_t dma_addr = phys_to_dma_direct(dev, phys); |
72 | ||
73 | if (dma_addr == DMA_MAPPING_ERROR) | |
74 | return false; | |
75 | return dma_addr + size - 1 <= | |
76 | min_not_zero(dev->coherent_dma_mask, dev->bus_dma_limit); | |
95f18391 CH |
77 | } |
78 | ||
4d056478 CH |
79 | static int dma_set_decrypted(struct device *dev, void *vaddr, size_t size) |
80 | { | |
81 | if (!force_dma_unencrypted(dev)) | |
82 | return 0; | |
4a37f3dd | 83 | return set_memory_decrypted((unsigned long)vaddr, PFN_UP(size)); |
4d056478 CH |
84 | } |
85 | ||
86 | static int dma_set_encrypted(struct device *dev, void *vaddr, size_t size) | |
87 | { | |
a90cf304 CH |
88 | int ret; |
89 | ||
4d056478 CH |
90 | if (!force_dma_unencrypted(dev)) |
91 | return 0; | |
4a37f3dd | 92 | ret = set_memory_encrypted((unsigned long)vaddr, PFN_UP(size)); |
a90cf304 CH |
93 | if (ret) |
94 | pr_warn_ratelimited("leaking DMA memory that can't be re-encrypted\n"); | |
95 | return ret; | |
4d056478 CH |
96 | } |
97 | ||
f4111e39 CC |
98 | static void __dma_direct_free_pages(struct device *dev, struct page *page, |
99 | size_t size) | |
100 | { | |
f5d3939a | 101 | if (swiotlb_free(dev, page, size)) |
f4111e39 CC |
102 | return; |
103 | dma_free_contiguous(dev, page, size); | |
104 | } | |
105 | ||
aea7e2a8 CH |
106 | static struct page *dma_direct_alloc_swiotlb(struct device *dev, size_t size) |
107 | { | |
108 | struct page *page = swiotlb_alloc(dev, size); | |
109 | ||
110 | if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) { | |
111 | swiotlb_free(dev, page, size); | |
112 | return NULL; | |
113 | } | |
114 | ||
115 | return page; | |
116 | } | |
117 | ||
26749b32 | 118 | static struct page *__dma_direct_alloc_pages(struct device *dev, size_t size, |
92826e96 | 119 | gfp_t gfp, bool allow_highmem) |
a8463d4b | 120 | { |
90ae409f | 121 | int node = dev_to_node(dev); |
080321d3 | 122 | struct page *page = NULL; |
a7ba70f1 | 123 | u64 phys_limit; |
a8463d4b | 124 | |
633d5fce DR |
125 | WARN_ON_ONCE(!PAGE_ALIGNED(size)); |
126 | ||
aea7e2a8 CH |
127 | if (is_swiotlb_for_alloc(dev)) |
128 | return dma_direct_alloc_swiotlb(dev, size); | |
129 | ||
25a4ce56 | 130 | gfp |= dma_direct_optimal_gfp_mask(dev, &phys_limit); |
633d5fce | 131 | page = dma_alloc_contiguous(dev, size, gfp); |
92826e96 CH |
132 | if (page) { |
133 | if (!dma_coherent_ok(dev, page_to_phys(page), size) || | |
134 | (!allow_highmem && PageHighMem(page))) { | |
135 | dma_free_contiguous(dev, page, size); | |
136 | page = NULL; | |
137 | } | |
90ae409f | 138 | } |
95f18391 | 139 | again: |
90ae409f | 140 | if (!page) |
633d5fce | 141 | page = alloc_pages_node(node, gfp, get_order(size)); |
95f18391 | 142 | if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) { |
b1d2dc00 | 143 | dma_free_contiguous(dev, page, size); |
95f18391 CH |
144 | page = NULL; |
145 | ||
de7eab30 | 146 | if (IS_ENABLED(CONFIG_ZONE_DMA32) && |
a7ba70f1 | 147 | phys_limit < DMA_BIT_MASK(64) && |
de7eab30 TI |
148 | !(gfp & (GFP_DMA32 | GFP_DMA))) { |
149 | gfp |= GFP_DMA32; | |
150 | goto again; | |
151 | } | |
152 | ||
fbce251b | 153 | if (IS_ENABLED(CONFIG_ZONE_DMA) && !(gfp & GFP_DMA)) { |
95f18391 CH |
154 | gfp = (gfp & ~GFP_DMA32) | GFP_DMA; |
155 | goto again; | |
156 | } | |
157 | } | |
158 | ||
b18814e7 CH |
159 | return page; |
160 | } | |
161 | ||
28e4576d CH |
162 | /* |
163 | * Check if a potentially blocking operations needs to dip into the atomic | |
164 | * pools for the given device/gfp. | |
165 | */ | |
166 | static bool dma_direct_use_pool(struct device *dev, gfp_t gfp) | |
167 | { | |
168 | return !gfpflags_allow_blocking(gfp) && !is_swiotlb_for_alloc(dev); | |
169 | } | |
170 | ||
5b138c53 CH |
171 | static void *dma_direct_alloc_from_pool(struct device *dev, size_t size, |
172 | dma_addr_t *dma_handle, gfp_t gfp) | |
173 | { | |
174 | struct page *page; | |
25a4ce56 | 175 | u64 phys_limit; |
5b138c53 CH |
176 | void *ret; |
177 | ||
78bc7278 CH |
178 | if (WARN_ON_ONCE(!IS_ENABLED(CONFIG_DMA_COHERENT_POOL))) |
179 | return NULL; | |
180 | ||
25a4ce56 | 181 | gfp |= dma_direct_optimal_gfp_mask(dev, &phys_limit); |
5b138c53 CH |
182 | page = dma_alloc_from_pool(dev, size, &ret, gfp, dma_coherent_ok); |
183 | if (!page) | |
184 | return NULL; | |
185 | *dma_handle = phys_to_dma_direct(dev, page_to_phys(page)); | |
186 | return ret; | |
187 | } | |
188 | ||
d541ae55 CH |
189 | static void *dma_direct_alloc_no_mapping(struct device *dev, size_t size, |
190 | dma_addr_t *dma_handle, gfp_t gfp) | |
191 | { | |
192 | struct page *page; | |
193 | ||
92826e96 | 194 | page = __dma_direct_alloc_pages(dev, size, gfp & ~__GFP_ZERO, true); |
d541ae55 CH |
195 | if (!page) |
196 | return NULL; | |
197 | ||
198 | /* remove any dirty cache lines on the kernel alias */ | |
199 | if (!PageHighMem(page)) | |
200 | arch_dma_prep_coherent(page, size); | |
201 | ||
202 | /* return the page pointer as the opaque cookie */ | |
203 | *dma_handle = phys_to_dma_direct(dev, page_to_phys(page)); | |
204 | return page; | |
205 | } | |
206 | ||
2f5388a2 | 207 | void *dma_direct_alloc(struct device *dev, size_t size, |
b18814e7 CH |
208 | dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs) |
209 | { | |
f3c96222 | 210 | bool remap = false, set_uncached = false; |
b18814e7 CH |
211 | struct page *page; |
212 | void *ret; | |
213 | ||
633d5fce | 214 | size = PAGE_ALIGN(size); |
3773dfe6 CH |
215 | if (attrs & DMA_ATTR_NO_WARN) |
216 | gfp |= __GFP_NOWARN; | |
633d5fce | 217 | |
cf14be0b | 218 | if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) && |
d541ae55 CH |
219 | !force_dma_unencrypted(dev) && !is_swiotlb_for_alloc(dev)) |
220 | return dma_direct_alloc_no_mapping(dev, size, dma_handle, gfp); | |
3acac065 | 221 | |
a86d1094 | 222 | if (!dev_is_dma_coherent(dev)) { |
2c8ed1b9 | 223 | if (IS_ENABLED(CONFIG_ARCH_HAS_DMA_ALLOC) && |
a86d1094 CH |
224 | !is_swiotlb_for_alloc(dev)) |
225 | return arch_dma_alloc(dev, size, dma_handle, gfp, | |
226 | attrs); | |
849facea | 227 | |
a86d1094 CH |
228 | /* |
229 | * If there is a global pool, always allocate from it for | |
230 | * non-coherent devices. | |
231 | */ | |
232 | if (IS_ENABLED(CONFIG_DMA_GLOBAL_POOL)) | |
233 | return dma_alloc_from_global_coherent(dev, size, | |
234 | dma_handle); | |
235 | ||
236 | /* | |
b1da46d7 CH |
237 | * Otherwise we require the architecture to either be able to |
238 | * mark arbitrary parts of the kernel direct mapping uncached, | |
239 | * or remapped it uncached. | |
a86d1094 | 240 | */ |
b1da46d7 | 241 | set_uncached = IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED); |
a86d1094 | 242 | remap = IS_ENABLED(CONFIG_DMA_DIRECT_REMAP); |
63f067e3 CH |
243 | if (!set_uncached && !remap) { |
244 | pr_warn_once("coherent DMA allocations not supported on this platform.\n"); | |
b1da46d7 | 245 | return NULL; |
63f067e3 | 246 | } |
a86d1094 | 247 | } |
faf4ef82 | 248 | |
849facea | 249 | /* |
b1da46d7 CH |
250 | * Remapping or decrypting memory may block, allocate the memory from |
251 | * the atomic pools instead if we aren't allowed block. | |
849facea | 252 | */ |
b1da46d7 CH |
253 | if ((remap || force_dma_unencrypted(dev)) && |
254 | dma_direct_use_pool(dev, gfp)) | |
849facea CH |
255 | return dma_direct_alloc_from_pool(dev, size, dma_handle, gfp); |
256 | ||
257 | /* we always manually zero the memory once we are done */ | |
92826e96 | 258 | page = __dma_direct_alloc_pages(dev, size, gfp & ~__GFP_ZERO, true); |
849facea CH |
259 | if (!page) |
260 | return NULL; | |
f5ff79fd CH |
261 | |
262 | /* | |
263 | * dma_alloc_contiguous can return highmem pages depending on a | |
264 | * combination the cma= arguments and per-arch setup. These need to be | |
265 | * remapped to return a kernel virtual address. | |
266 | */ | |
a86d1094 | 267 | if (PageHighMem(page)) { |
f3c96222 | 268 | remap = true; |
a86d1094 CH |
269 | set_uncached = false; |
270 | } | |
f3c96222 CH |
271 | |
272 | if (remap) { | |
4fe87e81 CH |
273 | pgprot_t prot = dma_pgprot(dev, PAGE_KERNEL, attrs); |
274 | ||
275 | if (force_dma_unencrypted(dev)) | |
276 | prot = pgprot_decrypted(prot); | |
277 | ||
3acac065 | 278 | /* remove any dirty cache lines on the kernel alias */ |
633d5fce | 279 | arch_dma_prep_coherent(page, size); |
3acac065 CH |
280 | |
281 | /* create a coherent mapping */ | |
4fe87e81 | 282 | ret = dma_common_contiguous_remap(page, size, prot, |
3acac065 | 283 | __builtin_return_address(0)); |
3d0fc341 CH |
284 | if (!ret) |
285 | goto out_free_pages; | |
f3c96222 CH |
286 | } else { |
287 | ret = page_address(page); | |
288 | if (dma_set_decrypted(dev, ret, size)) | |
b9fa1694 | 289 | goto out_leak_pages; |
704f2c20 CH |
290 | } |
291 | ||
c10f07aa | 292 | memset(ret, 0, size); |
c30700db | 293 | |
f3c96222 | 294 | if (set_uncached) { |
c30700db | 295 | arch_dma_prep_coherent(page, size); |
fa7e2247 CH |
296 | ret = arch_dma_set_uncached(ret, size); |
297 | if (IS_ERR(ret)) | |
96a539fa | 298 | goto out_encrypt_pages; |
c30700db | 299 | } |
f3c96222 | 300 | |
96eb89ca | 301 | *dma_handle = phys_to_dma_direct(dev, page_to_phys(page)); |
c10f07aa | 302 | return ret; |
96a539fa DR |
303 | |
304 | out_encrypt_pages: | |
4d056478 CH |
305 | if (dma_set_encrypted(dev, page_address(page), size)) |
306 | return NULL; | |
3d0fc341 | 307 | out_free_pages: |
f4111e39 | 308 | __dma_direct_free_pages(dev, page, size); |
3d0fc341 | 309 | return NULL; |
b9fa1694 RE |
310 | out_leak_pages: |
311 | return NULL; | |
a8463d4b CB |
312 | } |
313 | ||
2f5388a2 CH |
314 | void dma_direct_free(struct device *dev, size_t size, |
315 | void *cpu_addr, dma_addr_t dma_addr, unsigned long attrs) | |
a8463d4b | 316 | { |
c10f07aa | 317 | unsigned int page_order = get_order(size); |
080321d3 | 318 | |
849facea | 319 | if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) && |
f4111e39 | 320 | !force_dma_unencrypted(dev) && !is_swiotlb_for_alloc(dev)) { |
849facea CH |
321 | /* cpu_addr is a struct page cookie, not a kernel address */ |
322 | dma_free_contiguous(dev, cpu_addr, size); | |
323 | return; | |
324 | } | |
325 | ||
2c8ed1b9 | 326 | if (IS_ENABLED(CONFIG_ARCH_HAS_DMA_ALLOC) && |
3de18c86 | 327 | !dev_is_dma_coherent(dev) && |
f4111e39 | 328 | !is_swiotlb_for_alloc(dev)) { |
2f5388a2 CH |
329 | arch_dma_free(dev, size, cpu_addr, dma_addr, attrs); |
330 | return; | |
331 | } | |
332 | ||
faf4ef82 CH |
333 | if (IS_ENABLED(CONFIG_DMA_GLOBAL_POOL) && |
334 | !dev_is_dma_coherent(dev)) { | |
335 | if (!dma_release_from_global_coherent(page_order, cpu_addr)) | |
336 | WARN_ON_ONCE(1); | |
337 | return; | |
338 | } | |
339 | ||
76a19940 | 340 | /* If cpu_addr is not from an atomic pool, dma_free_from_pool() fails */ |
849facea | 341 | if (IS_ENABLED(CONFIG_DMA_COHERENT_POOL) && |
76a19940 DR |
342 | dma_free_from_pool(dev, cpu_addr, PAGE_ALIGN(size))) |
343 | return; | |
344 | ||
f5ff79fd | 345 | if (is_vmalloc_addr(cpu_addr)) { |
3acac065 | 346 | vunmap(cpu_addr); |
5570449b CH |
347 | } else { |
348 | if (IS_ENABLED(CONFIG_ARCH_HAS_DMA_CLEAR_UNCACHED)) | |
349 | arch_dma_clear_uncached(cpu_addr, size); | |
3be45625 | 350 | if (dma_set_encrypted(dev, cpu_addr, size)) |
a90cf304 | 351 | return; |
5570449b | 352 | } |
3acac065 | 353 | |
f4111e39 | 354 | __dma_direct_free_pages(dev, dma_direct_to_page(dev, dma_addr), size); |
a8463d4b CB |
355 | } |
356 | ||
efa70f2f CH |
357 | struct page *dma_direct_alloc_pages(struct device *dev, size_t size, |
358 | dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp) | |
359 | { | |
360 | struct page *page; | |
361 | void *ret; | |
362 | ||
28e4576d | 363 | if (force_dma_unencrypted(dev) && dma_direct_use_pool(dev, gfp)) |
5b138c53 | 364 | return dma_direct_alloc_from_pool(dev, size, dma_handle, gfp); |
efa70f2f | 365 | |
92826e96 | 366 | page = __dma_direct_alloc_pages(dev, size, gfp, false); |
efa70f2f CH |
367 | if (!page) |
368 | return NULL; | |
08a89c28 | 369 | |
efa70f2f | 370 | ret = page_address(page); |
4d056478 | 371 | if (dma_set_decrypted(dev, ret, size)) |
b9fa1694 | 372 | goto out_leak_pages; |
efa70f2f | 373 | memset(ret, 0, size); |
efa70f2f CH |
374 | *dma_handle = phys_to_dma_direct(dev, page_to_phys(page)); |
375 | return page; | |
b9fa1694 | 376 | out_leak_pages: |
efa70f2f CH |
377 | return NULL; |
378 | } | |
379 | ||
380 | void dma_direct_free_pages(struct device *dev, size_t size, | |
381 | struct page *page, dma_addr_t dma_addr, | |
382 | enum dma_data_direction dir) | |
383 | { | |
efa70f2f CH |
384 | void *vaddr = page_address(page); |
385 | ||
386 | /* If cpu_addr is not from an atomic pool, dma_free_from_pool() fails */ | |
849facea | 387 | if (IS_ENABLED(CONFIG_DMA_COHERENT_POOL) && |
efa70f2f CH |
388 | dma_free_from_pool(dev, vaddr, size)) |
389 | return; | |
390 | ||
3be45625 | 391 | if (dma_set_encrypted(dev, vaddr, size)) |
a90cf304 | 392 | return; |
f4111e39 | 393 | __dma_direct_free_pages(dev, page, size); |
efa70f2f CH |
394 | } |
395 | ||
55897af6 CH |
396 | #if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \ |
397 | defined(CONFIG_SWIOTLB) | |
55897af6 | 398 | void dma_direct_sync_sg_for_device(struct device *dev, |
bc3ec75d CH |
399 | struct scatterlist *sgl, int nents, enum dma_data_direction dir) |
400 | { | |
401 | struct scatterlist *sg; | |
402 | int i; | |
403 | ||
55897af6 | 404 | for_each_sg(sgl, sg, nents, i) { |
449fa54d FD |
405 | phys_addr_t paddr = dma_to_phys(dev, sg_dma_address(sg)); |
406 | ||
7296f230 | 407 | swiotlb_sync_single_for_device(dev, paddr, sg->length, dir); |
bc3ec75d | 408 | |
55897af6 | 409 | if (!dev_is_dma_coherent(dev)) |
56e35f9c | 410 | arch_sync_dma_for_device(paddr, sg->length, |
55897af6 CH |
411 | dir); |
412 | } | |
bc3ec75d | 413 | } |
17ac5247 | 414 | #endif |
bc3ec75d CH |
415 | |
416 | #if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \ | |
55897af6 CH |
417 | defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL) || \ |
418 | defined(CONFIG_SWIOTLB) | |
55897af6 | 419 | void dma_direct_sync_sg_for_cpu(struct device *dev, |
bc3ec75d CH |
420 | struct scatterlist *sgl, int nents, enum dma_data_direction dir) |
421 | { | |
422 | struct scatterlist *sg; | |
423 | int i; | |
424 | ||
55897af6 | 425 | for_each_sg(sgl, sg, nents, i) { |
449fa54d FD |
426 | phys_addr_t paddr = dma_to_phys(dev, sg_dma_address(sg)); |
427 | ||
55897af6 | 428 | if (!dev_is_dma_coherent(dev)) |
56e35f9c | 429 | arch_sync_dma_for_cpu(paddr, sg->length, dir); |
449fa54d | 430 | |
7296f230 | 431 | swiotlb_sync_single_for_cpu(dev, paddr, sg->length, dir); |
abdaf11a CH |
432 | |
433 | if (dir == DMA_FROM_DEVICE) | |
434 | arch_dma_mark_clean(paddr, sg->length); | |
55897af6 | 435 | } |
bc3ec75d | 436 | |
55897af6 | 437 | if (!dev_is_dma_coherent(dev)) |
56e35f9c | 438 | arch_sync_dma_for_cpu_all(); |
bc3ec75d CH |
439 | } |
440 | ||
f02ad36d LG |
441 | /* |
442 | * Unmaps segments, except for ones marked as pci_p2pdma which do not | |
443 | * require any further action as they contain a bus address. | |
444 | */ | |
55897af6 | 445 | void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sgl, |
bc3ec75d CH |
446 | int nents, enum dma_data_direction dir, unsigned long attrs) |
447 | { | |
55897af6 CH |
448 | struct scatterlist *sg; |
449 | int i; | |
450 | ||
f02ad36d | 451 | for_each_sg(sgl, sg, nents, i) { |
cb147bbe | 452 | if (sg_dma_is_bus_address(sg)) |
f02ad36d LG |
453 | sg_dma_unmark_bus_address(sg); |
454 | else | |
455 | dma_direct_unmap_page(dev, sg->dma_address, | |
456 | sg_dma_len(sg), dir, attrs); | |
457 | } | |
bc3ec75d CH |
458 | } |
459 | #endif | |
460 | ||
782e6769 CH |
461 | int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents, |
462 | enum dma_data_direction dir, unsigned long attrs) | |
a8463d4b | 463 | { |
f02ad36d LG |
464 | struct pci_p2pdma_map_state p2pdma_state = {}; |
465 | enum pci_p2pdma_map_type map; | |
a8463d4b | 466 | struct scatterlist *sg; |
f02ad36d | 467 | int i, ret; |
a8463d4b CB |
468 | |
469 | for_each_sg(sgl, sg, nents, i) { | |
f02ad36d LG |
470 | if (is_pci_p2pdma_page(sg_page(sg))) { |
471 | map = pci_p2pdma_map_segment(&p2pdma_state, dev, sg); | |
472 | switch (map) { | |
473 | case PCI_P2PDMA_MAP_BUS_ADDR: | |
474 | continue; | |
475 | case PCI_P2PDMA_MAP_THRU_HOST_BRIDGE: | |
476 | /* | |
477 | * Any P2P mapping that traverses the PCI | |
478 | * host bridge must be mapped with CPU physical | |
479 | * address and not PCI bus addresses. This is | |
480 | * done with dma_direct_map_page() below. | |
481 | */ | |
482 | break; | |
483 | default: | |
484 | ret = -EREMOTEIO; | |
485 | goto out_unmap; | |
486 | } | |
487 | } | |
488 | ||
17ac5247 CH |
489 | sg->dma_address = dma_direct_map_page(dev, sg_page(sg), |
490 | sg->offset, sg->length, dir, attrs); | |
f02ad36d LG |
491 | if (sg->dma_address == DMA_MAPPING_ERROR) { |
492 | ret = -EIO; | |
55897af6 | 493 | goto out_unmap; |
f02ad36d | 494 | } |
a8463d4b CB |
495 | sg_dma_len(sg) = sg->length; |
496 | } | |
497 | ||
498 | return nents; | |
55897af6 CH |
499 | |
500 | out_unmap: | |
501 | dma_direct_unmap_sg(dev, sgl, i, dir, attrs | DMA_ATTR_SKIP_CPU_SYNC); | |
f02ad36d | 502 | return ret; |
a8463d4b CB |
503 | } |
504 | ||
cfced786 CH |
505 | dma_addr_t dma_direct_map_resource(struct device *dev, phys_addr_t paddr, |
506 | size_t size, enum dma_data_direction dir, unsigned long attrs) | |
507 | { | |
508 | dma_addr_t dma_addr = paddr; | |
509 | ||
68a33b17 | 510 | if (unlikely(!dma_capable(dev, dma_addr, size, false))) { |
75467ee4 CH |
511 | dev_err_once(dev, |
512 | "DMA addr %pad+%zu overflow (mask %llx, bus limit %llx).\n", | |
513 | &dma_addr, size, *dev->dma_mask, dev->bus_dma_limit); | |
514 | WARN_ON_ONCE(1); | |
cfced786 CH |
515 | return DMA_MAPPING_ERROR; |
516 | } | |
517 | ||
518 | return dma_addr; | |
519 | } | |
cfced786 | 520 | |
34dc0ea6 CH |
521 | int dma_direct_get_sgtable(struct device *dev, struct sg_table *sgt, |
522 | void *cpu_addr, dma_addr_t dma_addr, size_t size, | |
523 | unsigned long attrs) | |
524 | { | |
525 | struct page *page = dma_direct_to_page(dev, dma_addr); | |
526 | int ret; | |
527 | ||
528 | ret = sg_alloc_table(sgt, 1, GFP_KERNEL); | |
529 | if (!ret) | |
530 | sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0); | |
531 | return ret; | |
532 | } | |
533 | ||
34dc0ea6 CH |
534 | bool dma_direct_can_mmap(struct device *dev) |
535 | { | |
536 | return dev_is_dma_coherent(dev) || | |
537 | IS_ENABLED(CONFIG_DMA_NONCOHERENT_MMAP); | |
538 | } | |
539 | ||
540 | int dma_direct_mmap(struct device *dev, struct vm_area_struct *vma, | |
541 | void *cpu_addr, dma_addr_t dma_addr, size_t size, | |
542 | unsigned long attrs) | |
543 | { | |
544 | unsigned long user_count = vma_pages(vma); | |
545 | unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT; | |
546 | unsigned long pfn = PHYS_PFN(dma_to_phys(dev, dma_addr)); | |
547 | int ret = -ENXIO; | |
548 | ||
549 | vma->vm_page_prot = dma_pgprot(dev, vma->vm_page_prot, attrs); | |
4fe87e81 CH |
550 | if (force_dma_unencrypted(dev)) |
551 | vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot); | |
34dc0ea6 CH |
552 | |
553 | if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret)) | |
554 | return ret; | |
faf4ef82 CH |
555 | if (dma_mmap_from_global_coherent(vma, cpu_addr, size, &ret)) |
556 | return ret; | |
34dc0ea6 CH |
557 | |
558 | if (vma->vm_pgoff >= count || user_count > count - vma->vm_pgoff) | |
559 | return -ENXIO; | |
560 | return remap_pfn_range(vma, vma->vm_start, pfn + vma->vm_pgoff, | |
561 | user_count << PAGE_SHIFT, vma->vm_page_prot); | |
562 | } | |
34dc0ea6 | 563 | |
1a9777a8 CH |
564 | int dma_direct_supported(struct device *dev, u64 mask) |
565 | { | |
91ef26f9 | 566 | u64 min_mask = (max_pfn - 1) << PAGE_SHIFT; |
9d7a224b | 567 | |
91ef26f9 CH |
568 | /* |
569 | * Because 32-bit DMA masks are so common we expect every architecture | |
570 | * to be able to satisfy them - either by not supporting more physical | |
571 | * memory, or by providing a ZONE_DMA32. If neither is the case, the | |
572 | * architecture needs to use an IOMMU instead of the direct mapping. | |
573 | */ | |
574 | if (mask >= DMA_BIT_MASK(32)) | |
575 | return 1; | |
9d7a224b | 576 | |
c92a54cf | 577 | /* |
5ceda740 CH |
578 | * This check needs to be against the actual bit mask value, so use |
579 | * phys_to_dma_unencrypted() here so that the SME encryption mask isn't | |
c92a54cf LT |
580 | * part of the check. |
581 | */ | |
91ef26f9 CH |
582 | if (IS_ENABLED(CONFIG_ZONE_DMA)) |
583 | min_mask = min_t(u64, min_mask, DMA_BIT_MASK(zone_dma_bits)); | |
5ceda740 | 584 | return mask >= phys_to_dma_unencrypted(dev, min_mask); |
1a9777a8 | 585 | } |
133d624b | 586 | |
a409d960 JH |
587 | /* |
588 | * To check whether all ram resource ranges are covered by dma range map | |
589 | * Returns 0 when further check is needed | |
590 | * Returns 1 if there is some RAM range can't be covered by dma_range_map | |
591 | */ | |
592 | static int check_ram_in_range_map(unsigned long start_pfn, | |
593 | unsigned long nr_pages, void *data) | |
594 | { | |
595 | unsigned long end_pfn = start_pfn + nr_pages; | |
596 | const struct bus_dma_region *bdr = NULL; | |
597 | const struct bus_dma_region *m; | |
598 | struct device *dev = data; | |
599 | ||
600 | while (start_pfn < end_pfn) { | |
601 | for (m = dev->dma_range_map; PFN_DOWN(m->size); m++) { | |
602 | unsigned long cpu_start_pfn = PFN_DOWN(m->cpu_start); | |
603 | ||
604 | if (start_pfn >= cpu_start_pfn && | |
605 | start_pfn - cpu_start_pfn < PFN_DOWN(m->size)) { | |
606 | bdr = m; | |
607 | break; | |
608 | } | |
609 | } | |
610 | if (!bdr) | |
611 | return 1; | |
612 | ||
613 | start_pfn = PFN_DOWN(bdr->cpu_start) + PFN_DOWN(bdr->size); | |
614 | } | |
615 | ||
616 | return 0; | |
617 | } | |
618 | ||
619 | bool dma_direct_all_ram_mapped(struct device *dev) | |
620 | { | |
621 | if (!dev->dma_range_map) | |
622 | return true; | |
623 | return !walk_system_ram_range(0, PFN_DOWN(ULONG_MAX) + 1, dev, | |
624 | check_ram_in_range_map); | |
625 | } | |
626 | ||
133d624b JR |
627 | size_t dma_direct_max_mapping_size(struct device *dev) |
628 | { | |
133d624b | 629 | /* If SWIOTLB is active, use its maximum mapping size */ |
6f2beb26 | 630 | if (is_swiotlb_active(dev) && |
903cd0f3 | 631 | (dma_addressing_limited(dev) || is_swiotlb_force_bounce(dev))) |
a5008b59 CH |
632 | return swiotlb_max_mapping_size(dev); |
633 | return SIZE_MAX; | |
133d624b | 634 | } |
3aa91625 CH |
635 | |
636 | bool dma_direct_need_sync(struct device *dev, dma_addr_t dma_addr) | |
637 | { | |
638 | return !dev_is_dma_coherent(dev) || | |
7296f230 | 639 | swiotlb_find_pool(dev, dma_to_phys(dev, dma_addr)); |
3aa91625 | 640 | } |
e0d07278 JQ |
641 | |
642 | /** | |
643 | * dma_direct_set_offset - Assign scalar offset for a single DMA range. | |
644 | * @dev: device pointer; needed to "own" the alloced memory. | |
645 | * @cpu_start: beginning of memory region covered by this offset. | |
646 | * @dma_start: beginning of DMA/PCI region covered by this offset. | |
647 | * @size: size of the region. | |
648 | * | |
649 | * This is for the simple case of a uniform offset which cannot | |
650 | * be discovered by "dma-ranges". | |
651 | * | |
652 | * It returns -ENOMEM if out of memory, -EINVAL if a map | |
653 | * already exists, 0 otherwise. | |
654 | * | |
655 | * Note: any call to this from a driver is a bug. The mapping needs | |
656 | * to be described by the device tree or other firmware interfaces. | |
657 | */ | |
658 | int dma_direct_set_offset(struct device *dev, phys_addr_t cpu_start, | |
659 | dma_addr_t dma_start, u64 size) | |
660 | { | |
661 | struct bus_dma_region *map; | |
662 | u64 offset = (u64)cpu_start - (u64)dma_start; | |
663 | ||
664 | if (dev->dma_range_map) { | |
665 | dev_err(dev, "attempt to add DMA range to existing map\n"); | |
666 | return -EINVAL; | |
667 | } | |
668 | ||
669 | if (!offset) | |
670 | return 0; | |
671 | ||
672 | map = kcalloc(2, sizeof(*map), GFP_KERNEL); | |
673 | if (!map) | |
674 | return -ENOMEM; | |
675 | map[0].cpu_start = cpu_start; | |
676 | map[0].dma_start = dma_start; | |
e0d07278 JQ |
677 | map[0].size = size; |
678 | dev->dma_range_map = map; | |
679 | return 0; | |
680 | } |