Commit | Line | Data |
---|---|---|
b2441318 | 1 | // SPDX-License-Identifier: GPL-2.0 |
a8463d4b | 2 | /* |
bc3ec75d CH |
3 | * Copyright (C) 2018 Christoph Hellwig. |
4 | * | |
5 | * DMA operations that map physical memory directly without using an IOMMU. | |
a8463d4b | 6 | */ |
57c8a661 | 7 | #include <linux/memblock.h> /* for max_pfn */ |
a8463d4b CB |
8 | #include <linux/export.h> |
9 | #include <linux/mm.h> | |
2e86a047 | 10 | #include <linux/dma-direct.h> |
a8463d4b | 11 | #include <linux/scatterlist.h> |
080321d3 | 12 | #include <linux/dma-contiguous.h> |
bc3ec75d | 13 | #include <linux/dma-noncoherent.h> |
25f1e188 | 14 | #include <linux/pfn.h> |
c10f07aa | 15 | #include <linux/set_memory.h> |
55897af6 | 16 | #include <linux/swiotlb.h> |
a8463d4b | 17 | |
c61e9637 CH |
18 | /* |
19 | * Most architectures use ZONE_DMA for the first 16 Megabytes, but | |
20 | * some use it for entirely different regions: | |
21 | */ | |
22 | #ifndef ARCH_ZONE_DMA_BITS | |
23 | #define ARCH_ZONE_DMA_BITS 24 | |
24 | #endif | |
25 | ||
58dfd4ac | 26 | static void report_addr(struct device *dev, dma_addr_t dma_addr, size_t size) |
27975969 | 27 | { |
58dfd4ac CH |
28 | if (!dev->dma_mask) { |
29 | dev_err_once(dev, "DMA map on device without dma_mask\n"); | |
30 | } else if (*dev->dma_mask >= DMA_BIT_MASK(32) || dev->bus_dma_mask) { | |
31 | dev_err_once(dev, | |
32 | "overflow %pad+%zu of DMA mask %llx bus mask %llx\n", | |
33 | &dma_addr, size, *dev->dma_mask, dev->bus_dma_mask); | |
27975969 | 34 | } |
58dfd4ac | 35 | WARN_ON_ONCE(1); |
27975969 CH |
36 | } |
37 | ||
a20bb058 CH |
38 | static inline dma_addr_t phys_to_dma_direct(struct device *dev, |
39 | phys_addr_t phys) | |
40 | { | |
9087c375 | 41 | if (force_dma_unencrypted(dev)) |
a20bb058 CH |
42 | return __phys_to_dma(dev, phys); |
43 | return phys_to_dma(dev, phys); | |
44 | } | |
45 | ||
34dc0ea6 CH |
46 | static inline struct page *dma_direct_to_page(struct device *dev, |
47 | dma_addr_t dma_addr) | |
48 | { | |
49 | return pfn_to_page(PHYS_PFN(dma_to_phys(dev, dma_addr))); | |
50 | } | |
51 | ||
a20bb058 CH |
52 | u64 dma_direct_get_required_mask(struct device *dev) |
53 | { | |
54 | u64 max_dma = phys_to_dma_direct(dev, (max_pfn - 1) << PAGE_SHIFT); | |
55 | ||
56 | return (1ULL << (fls64(max_dma) - 1)) * 2 - 1; | |
57 | } | |
58 | ||
7d21ee4c CH |
59 | static gfp_t __dma_direct_optimal_gfp_mask(struct device *dev, u64 dma_mask, |
60 | u64 *phys_mask) | |
61 | { | |
b4ebe606 CH |
62 | if (dev->bus_dma_mask && dev->bus_dma_mask < dma_mask) |
63 | dma_mask = dev->bus_dma_mask; | |
64 | ||
9087c375 | 65 | if (force_dma_unencrypted(dev)) |
7d21ee4c CH |
66 | *phys_mask = __dma_to_phys(dev, dma_mask); |
67 | else | |
68 | *phys_mask = dma_to_phys(dev, dma_mask); | |
69 | ||
79ac32a4 CH |
70 | /* |
71 | * Optimistically try the zone that the physical address mask falls | |
72 | * into first. If that returns memory that isn't actually addressable | |
73 | * we will fallback to the next lower zone and try again. | |
74 | * | |
75 | * Note that GFP_DMA32 and GFP_DMA are no ops without the corresponding | |
76 | * zones. | |
77 | */ | |
7d21ee4c CH |
78 | if (*phys_mask <= DMA_BIT_MASK(ARCH_ZONE_DMA_BITS)) |
79 | return GFP_DMA; | |
80 | if (*phys_mask <= DMA_BIT_MASK(32)) | |
81 | return GFP_DMA32; | |
82 | return 0; | |
83 | } | |
84 | ||
95f18391 CH |
85 | static bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size) |
86 | { | |
a20bb058 | 87 | return phys_to_dma_direct(dev, phys) + size - 1 <= |
b4ebe606 | 88 | min_not_zero(dev->coherent_dma_mask, dev->bus_dma_mask); |
95f18391 CH |
89 | } |
90 | ||
b18814e7 | 91 | struct page *__dma_direct_alloc_pages(struct device *dev, size_t size, |
4e1003aa | 92 | gfp_t gfp, unsigned long attrs) |
a8463d4b | 93 | { |
90ae409f CH |
94 | size_t alloc_size = PAGE_ALIGN(size); |
95 | int node = dev_to_node(dev); | |
080321d3 | 96 | struct page *page = NULL; |
7d21ee4c | 97 | u64 phys_mask; |
a8463d4b | 98 | |
b9fd0426 CH |
99 | if (attrs & DMA_ATTR_NO_WARN) |
100 | gfp |= __GFP_NOWARN; | |
101 | ||
e89f5b37 CH |
102 | /* we always manually zero the memory once we are done: */ |
103 | gfp &= ~__GFP_ZERO; | |
7d21ee4c CH |
104 | gfp |= __dma_direct_optimal_gfp_mask(dev, dev->coherent_dma_mask, |
105 | &phys_mask); | |
90ae409f CH |
106 | page = dma_alloc_contiguous(dev, alloc_size, gfp); |
107 | if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) { | |
108 | dma_free_contiguous(dev, page, alloc_size); | |
109 | page = NULL; | |
110 | } | |
95f18391 | 111 | again: |
90ae409f CH |
112 | if (!page) |
113 | page = alloc_pages_node(node, gfp, get_order(alloc_size)); | |
95f18391 | 114 | if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) { |
b1d2dc00 | 115 | dma_free_contiguous(dev, page, size); |
95f18391 CH |
116 | page = NULL; |
117 | ||
de7eab30 | 118 | if (IS_ENABLED(CONFIG_ZONE_DMA32) && |
7d21ee4c | 119 | phys_mask < DMA_BIT_MASK(64) && |
de7eab30 TI |
120 | !(gfp & (GFP_DMA32 | GFP_DMA))) { |
121 | gfp |= GFP_DMA32; | |
122 | goto again; | |
123 | } | |
124 | ||
fbce251b | 125 | if (IS_ENABLED(CONFIG_ZONE_DMA) && !(gfp & GFP_DMA)) { |
95f18391 CH |
126 | gfp = (gfp & ~GFP_DMA32) | GFP_DMA; |
127 | goto again; | |
128 | } | |
129 | } | |
130 | ||
b18814e7 CH |
131 | return page; |
132 | } | |
133 | ||
134 | void *dma_direct_alloc_pages(struct device *dev, size_t size, | |
135 | dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs) | |
136 | { | |
137 | struct page *page; | |
138 | void *ret; | |
139 | ||
4e1003aa | 140 | page = __dma_direct_alloc_pages(dev, size, gfp, attrs); |
080321d3 CH |
141 | if (!page) |
142 | return NULL; | |
b18814e7 | 143 | |
cf14be0b CH |
144 | if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) && |
145 | !force_dma_unencrypted(dev)) { | |
d98849af CH |
146 | /* remove any dirty cache lines on the kernel alias */ |
147 | if (!PageHighMem(page)) | |
148 | arch_dma_prep_coherent(page, size); | |
cf14be0b | 149 | *dma_handle = phys_to_dma(dev, page_to_phys(page)); |
d98849af CH |
150 | /* return the page pointer as the opaque cookie */ |
151 | return page; | |
152 | } | |
153 | ||
704f2c20 CH |
154 | if (PageHighMem(page)) { |
155 | /* | |
156 | * Depending on the cma= arguments and per-arch setup | |
b1d2dc00 | 157 | * dma_alloc_contiguous could return highmem pages. |
704f2c20 CH |
158 | * Without remapping there is no way to return them here, |
159 | * so log an error and fail. | |
160 | */ | |
161 | dev_info(dev, "Rejecting highmem page from CMA.\n"); | |
acaade1a | 162 | dma_free_contiguous(dev, page, size); |
704f2c20 CH |
163 | return NULL; |
164 | } | |
165 | ||
c10f07aa | 166 | ret = page_address(page); |
9087c375 | 167 | if (force_dma_unencrypted(dev)) { |
b18814e7 | 168 | set_memory_decrypted((unsigned long)ret, 1 << get_order(size)); |
c10f07aa CH |
169 | *dma_handle = __phys_to_dma(dev, page_to_phys(page)); |
170 | } else { | |
171 | *dma_handle = phys_to_dma(dev, page_to_phys(page)); | |
172 | } | |
173 | memset(ret, 0, size); | |
c30700db CH |
174 | |
175 | if (IS_ENABLED(CONFIG_ARCH_HAS_UNCACHED_SEGMENT) && | |
4b85faed | 176 | dma_alloc_need_uncached(dev, attrs)) { |
c30700db CH |
177 | arch_dma_prep_coherent(page, size); |
178 | ret = uncached_kernel_address(ret); | |
179 | } | |
180 | ||
c10f07aa | 181 | return ret; |
a8463d4b CB |
182 | } |
183 | ||
bc3ec75d | 184 | void dma_direct_free_pages(struct device *dev, size_t size, void *cpu_addr, |
002e6745 | 185 | dma_addr_t dma_addr, unsigned long attrs) |
a8463d4b | 186 | { |
c10f07aa | 187 | unsigned int page_order = get_order(size); |
080321d3 | 188 | |
cf14be0b CH |
189 | if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) && |
190 | !force_dma_unencrypted(dev)) { | |
d98849af | 191 | /* cpu_addr is a struct page cookie, not a kernel address */ |
acaade1a | 192 | dma_free_contiguous(dev, cpu_addr, size); |
d98849af CH |
193 | return; |
194 | } | |
195 | ||
9087c375 | 196 | if (force_dma_unencrypted(dev)) |
c10f07aa | 197 | set_memory_encrypted((unsigned long)cpu_addr, 1 << page_order); |
c30700db CH |
198 | |
199 | if (IS_ENABLED(CONFIG_ARCH_HAS_UNCACHED_SEGMENT) && | |
4b85faed | 200 | dma_alloc_need_uncached(dev, attrs)) |
c30700db | 201 | cpu_addr = cached_kernel_address(cpu_addr); |
acaade1a | 202 | dma_free_contiguous(dev, virt_to_page(cpu_addr), size); |
a8463d4b CB |
203 | } |
204 | ||
bc3ec75d CH |
205 | void *dma_direct_alloc(struct device *dev, size_t size, |
206 | dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs) | |
207 | { | |
c30700db | 208 | if (!IS_ENABLED(CONFIG_ARCH_HAS_UNCACHED_SEGMENT) && |
c2f2124e | 209 | dma_alloc_need_uncached(dev, attrs)) |
bc3ec75d CH |
210 | return arch_dma_alloc(dev, size, dma_handle, gfp, attrs); |
211 | return dma_direct_alloc_pages(dev, size, dma_handle, gfp, attrs); | |
212 | } | |
213 | ||
214 | void dma_direct_free(struct device *dev, size_t size, | |
215 | void *cpu_addr, dma_addr_t dma_addr, unsigned long attrs) | |
216 | { | |
c30700db | 217 | if (!IS_ENABLED(CONFIG_ARCH_HAS_UNCACHED_SEGMENT) && |
c2f2124e | 218 | dma_alloc_need_uncached(dev, attrs)) |
bc3ec75d CH |
219 | arch_dma_free(dev, size, cpu_addr, dma_addr, attrs); |
220 | else | |
221 | dma_direct_free_pages(dev, size, cpu_addr, dma_addr, attrs); | |
222 | } | |
223 | ||
55897af6 CH |
224 | #if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \ |
225 | defined(CONFIG_SWIOTLB) | |
226 | void dma_direct_sync_single_for_device(struct device *dev, | |
bc3ec75d CH |
227 | dma_addr_t addr, size_t size, enum dma_data_direction dir) |
228 | { | |
55897af6 CH |
229 | phys_addr_t paddr = dma_to_phys(dev, addr); |
230 | ||
231 | if (unlikely(is_swiotlb_buffer(paddr))) | |
232 | swiotlb_tbl_sync_single(dev, paddr, size, dir, SYNC_FOR_DEVICE); | |
233 | ||
234 | if (!dev_is_dma_coherent(dev)) | |
235 | arch_sync_dma_for_device(dev, paddr, size, dir); | |
bc3ec75d | 236 | } |
356da6d0 | 237 | EXPORT_SYMBOL(dma_direct_sync_single_for_device); |
bc3ec75d | 238 | |
55897af6 | 239 | void dma_direct_sync_sg_for_device(struct device *dev, |
bc3ec75d CH |
240 | struct scatterlist *sgl, int nents, enum dma_data_direction dir) |
241 | { | |
242 | struct scatterlist *sg; | |
243 | int i; | |
244 | ||
55897af6 | 245 | for_each_sg(sgl, sg, nents, i) { |
449fa54d FD |
246 | phys_addr_t paddr = dma_to_phys(dev, sg_dma_address(sg)); |
247 | ||
248 | if (unlikely(is_swiotlb_buffer(paddr))) | |
249 | swiotlb_tbl_sync_single(dev, paddr, sg->length, | |
55897af6 | 250 | dir, SYNC_FOR_DEVICE); |
bc3ec75d | 251 | |
55897af6 | 252 | if (!dev_is_dma_coherent(dev)) |
449fa54d | 253 | arch_sync_dma_for_device(dev, paddr, sg->length, |
55897af6 CH |
254 | dir); |
255 | } | |
bc3ec75d | 256 | } |
356da6d0 | 257 | EXPORT_SYMBOL(dma_direct_sync_sg_for_device); |
17ac5247 | 258 | #endif |
bc3ec75d CH |
259 | |
260 | #if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \ | |
55897af6 CH |
261 | defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL) || \ |
262 | defined(CONFIG_SWIOTLB) | |
263 | void dma_direct_sync_single_for_cpu(struct device *dev, | |
bc3ec75d CH |
264 | dma_addr_t addr, size_t size, enum dma_data_direction dir) |
265 | { | |
55897af6 CH |
266 | phys_addr_t paddr = dma_to_phys(dev, addr); |
267 | ||
268 | if (!dev_is_dma_coherent(dev)) { | |
269 | arch_sync_dma_for_cpu(dev, paddr, size, dir); | |
270 | arch_sync_dma_for_cpu_all(dev); | |
271 | } | |
272 | ||
273 | if (unlikely(is_swiotlb_buffer(paddr))) | |
274 | swiotlb_tbl_sync_single(dev, paddr, size, dir, SYNC_FOR_CPU); | |
bc3ec75d | 275 | } |
356da6d0 | 276 | EXPORT_SYMBOL(dma_direct_sync_single_for_cpu); |
bc3ec75d | 277 | |
55897af6 | 278 | void dma_direct_sync_sg_for_cpu(struct device *dev, |
bc3ec75d CH |
279 | struct scatterlist *sgl, int nents, enum dma_data_direction dir) |
280 | { | |
281 | struct scatterlist *sg; | |
282 | int i; | |
283 | ||
55897af6 | 284 | for_each_sg(sgl, sg, nents, i) { |
449fa54d FD |
285 | phys_addr_t paddr = dma_to_phys(dev, sg_dma_address(sg)); |
286 | ||
55897af6 | 287 | if (!dev_is_dma_coherent(dev)) |
449fa54d FD |
288 | arch_sync_dma_for_cpu(dev, paddr, sg->length, dir); |
289 | ||
290 | if (unlikely(is_swiotlb_buffer(paddr))) | |
291 | swiotlb_tbl_sync_single(dev, paddr, sg->length, dir, | |
55897af6 CH |
292 | SYNC_FOR_CPU); |
293 | } | |
bc3ec75d | 294 | |
55897af6 CH |
295 | if (!dev_is_dma_coherent(dev)) |
296 | arch_sync_dma_for_cpu_all(dev); | |
bc3ec75d | 297 | } |
356da6d0 | 298 | EXPORT_SYMBOL(dma_direct_sync_sg_for_cpu); |
bc3ec75d | 299 | |
55897af6 | 300 | void dma_direct_unmap_page(struct device *dev, dma_addr_t addr, |
bc3ec75d CH |
301 | size_t size, enum dma_data_direction dir, unsigned long attrs) |
302 | { | |
55897af6 CH |
303 | phys_addr_t phys = dma_to_phys(dev, addr); |
304 | ||
bc3ec75d CH |
305 | if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) |
306 | dma_direct_sync_single_for_cpu(dev, addr, size, dir); | |
55897af6 CH |
307 | |
308 | if (unlikely(is_swiotlb_buffer(phys))) | |
3fc1ca00 | 309 | swiotlb_tbl_unmap_single(dev, phys, size, size, dir, attrs); |
bc3ec75d | 310 | } |
356da6d0 | 311 | EXPORT_SYMBOL(dma_direct_unmap_page); |
bc3ec75d | 312 | |
55897af6 | 313 | void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sgl, |
bc3ec75d CH |
314 | int nents, enum dma_data_direction dir, unsigned long attrs) |
315 | { | |
55897af6 CH |
316 | struct scatterlist *sg; |
317 | int i; | |
318 | ||
319 | for_each_sg(sgl, sg, nents, i) | |
320 | dma_direct_unmap_page(dev, sg->dma_address, sg_dma_len(sg), dir, | |
321 | attrs); | |
bc3ec75d | 322 | } |
356da6d0 | 323 | EXPORT_SYMBOL(dma_direct_unmap_sg); |
bc3ec75d CH |
324 | #endif |
325 | ||
55897af6 CH |
326 | static inline bool dma_direct_possible(struct device *dev, dma_addr_t dma_addr, |
327 | size_t size) | |
328 | { | |
329 | return swiotlb_force != SWIOTLB_FORCE && | |
d7e02a93 | 330 | dma_capable(dev, dma_addr, size); |
55897af6 CH |
331 | } |
332 | ||
782e6769 | 333 | dma_addr_t dma_direct_map_page(struct device *dev, struct page *page, |
002e6745 CH |
334 | unsigned long offset, size_t size, enum dma_data_direction dir, |
335 | unsigned long attrs) | |
a8463d4b | 336 | { |
bc3ec75d CH |
337 | phys_addr_t phys = page_to_phys(page) + offset; |
338 | dma_addr_t dma_addr = phys_to_dma(dev, phys); | |
27975969 | 339 | |
55897af6 CH |
340 | if (unlikely(!dma_direct_possible(dev, dma_addr, size)) && |
341 | !swiotlb_map(dev, &phys, &dma_addr, size, dir, attrs)) { | |
58dfd4ac | 342 | report_addr(dev, dma_addr, size); |
b0cbeae4 | 343 | return DMA_MAPPING_ERROR; |
58dfd4ac | 344 | } |
bc3ec75d | 345 | |
55897af6 CH |
346 | if (!dev_is_dma_coherent(dev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) |
347 | arch_sync_dma_for_device(dev, phys, size, dir); | |
27975969 | 348 | return dma_addr; |
a8463d4b | 349 | } |
356da6d0 | 350 | EXPORT_SYMBOL(dma_direct_map_page); |
a8463d4b | 351 | |
782e6769 CH |
352 | int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents, |
353 | enum dma_data_direction dir, unsigned long attrs) | |
a8463d4b CB |
354 | { |
355 | int i; | |
356 | struct scatterlist *sg; | |
357 | ||
358 | for_each_sg(sgl, sg, nents, i) { | |
17ac5247 CH |
359 | sg->dma_address = dma_direct_map_page(dev, sg_page(sg), |
360 | sg->offset, sg->length, dir, attrs); | |
361 | if (sg->dma_address == DMA_MAPPING_ERROR) | |
55897af6 | 362 | goto out_unmap; |
a8463d4b CB |
363 | sg_dma_len(sg) = sg->length; |
364 | } | |
365 | ||
366 | return nents; | |
55897af6 CH |
367 | |
368 | out_unmap: | |
369 | dma_direct_unmap_sg(dev, sgl, i, dir, attrs | DMA_ATTR_SKIP_CPU_SYNC); | |
370 | return 0; | |
a8463d4b | 371 | } |
356da6d0 | 372 | EXPORT_SYMBOL(dma_direct_map_sg); |
a8463d4b | 373 | |
cfced786 CH |
374 | dma_addr_t dma_direct_map_resource(struct device *dev, phys_addr_t paddr, |
375 | size_t size, enum dma_data_direction dir, unsigned long attrs) | |
376 | { | |
377 | dma_addr_t dma_addr = paddr; | |
378 | ||
379 | if (unlikely(!dma_direct_possible(dev, dma_addr, size))) { | |
380 | report_addr(dev, dma_addr, size); | |
381 | return DMA_MAPPING_ERROR; | |
382 | } | |
383 | ||
384 | return dma_addr; | |
385 | } | |
386 | EXPORT_SYMBOL(dma_direct_map_resource); | |
387 | ||
34dc0ea6 CH |
388 | int dma_direct_get_sgtable(struct device *dev, struct sg_table *sgt, |
389 | void *cpu_addr, dma_addr_t dma_addr, size_t size, | |
390 | unsigned long attrs) | |
391 | { | |
392 | struct page *page = dma_direct_to_page(dev, dma_addr); | |
393 | int ret; | |
394 | ||
395 | ret = sg_alloc_table(sgt, 1, GFP_KERNEL); | |
396 | if (!ret) | |
397 | sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0); | |
398 | return ret; | |
399 | } | |
400 | ||
401 | #ifdef CONFIG_MMU | |
402 | bool dma_direct_can_mmap(struct device *dev) | |
403 | { | |
404 | return dev_is_dma_coherent(dev) || | |
405 | IS_ENABLED(CONFIG_DMA_NONCOHERENT_MMAP); | |
406 | } | |
407 | ||
408 | int dma_direct_mmap(struct device *dev, struct vm_area_struct *vma, | |
409 | void *cpu_addr, dma_addr_t dma_addr, size_t size, | |
410 | unsigned long attrs) | |
411 | { | |
412 | unsigned long user_count = vma_pages(vma); | |
413 | unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT; | |
414 | unsigned long pfn = PHYS_PFN(dma_to_phys(dev, dma_addr)); | |
415 | int ret = -ENXIO; | |
416 | ||
417 | vma->vm_page_prot = dma_pgprot(dev, vma->vm_page_prot, attrs); | |
418 | ||
419 | if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret)) | |
420 | return ret; | |
421 | ||
422 | if (vma->vm_pgoff >= count || user_count > count - vma->vm_pgoff) | |
423 | return -ENXIO; | |
424 | return remap_pfn_range(vma, vma->vm_start, pfn + vma->vm_pgoff, | |
425 | user_count << PAGE_SHIFT, vma->vm_page_prot); | |
426 | } | |
427 | #else /* CONFIG_MMU */ | |
428 | bool dma_direct_can_mmap(struct device *dev) | |
429 | { | |
430 | return false; | |
431 | } | |
432 | ||
433 | int dma_direct_mmap(struct device *dev, struct vm_area_struct *vma, | |
434 | void *cpu_addr, dma_addr_t dma_addr, size_t size, | |
435 | unsigned long attrs) | |
436 | { | |
437 | return -ENXIO; | |
438 | } | |
439 | #endif /* CONFIG_MMU */ | |
440 | ||
9d7a224b CH |
441 | /* |
442 | * Because 32-bit DMA masks are so common we expect every architecture to be | |
443 | * able to satisfy them - either by not supporting more physical memory, or by | |
444 | * providing a ZONE_DMA32. If neither is the case, the architecture needs to | |
445 | * use an IOMMU instead of the direct mapping. | |
446 | */ | |
1a9777a8 CH |
447 | int dma_direct_supported(struct device *dev, u64 mask) |
448 | { | |
9d7a224b CH |
449 | u64 min_mask; |
450 | ||
451 | if (IS_ENABLED(CONFIG_ZONE_DMA)) | |
452 | min_mask = DMA_BIT_MASK(ARCH_ZONE_DMA_BITS); | |
453 | else | |
454 | min_mask = DMA_BIT_MASK(32); | |
455 | ||
456 | min_mask = min_t(u64, min_mask, (max_pfn - 1) << PAGE_SHIFT); | |
457 | ||
c92a54cf LT |
458 | /* |
459 | * This check needs to be against the actual bit mask value, so | |
460 | * use __phys_to_dma() here so that the SME encryption mask isn't | |
461 | * part of the check. | |
462 | */ | |
463 | return mask >= __phys_to_dma(dev, min_mask); | |
1a9777a8 | 464 | } |
133d624b JR |
465 | |
466 | size_t dma_direct_max_mapping_size(struct device *dev) | |
467 | { | |
133d624b | 468 | /* If SWIOTLB is active, use its maximum mapping size */ |
a5008b59 CH |
469 | if (is_swiotlb_active() && |
470 | (dma_addressing_limited(dev) || swiotlb_force == SWIOTLB_FORCE)) | |
471 | return swiotlb_max_mapping_size(dev); | |
472 | return SIZE_MAX; | |
133d624b | 473 | } |