Commit | Line | Data |
---|---|---|
989d42e8 | 1 | // SPDX-License-Identifier: GPL-2.0 |
9ac7849e | 2 | /* |
cf65a0f6 | 3 | * arch-independent dma-mapping routines |
9ac7849e TH |
4 | * |
5 | * Copyright (c) 2006 SUSE Linux Products GmbH | |
6 | * Copyright (c) 2006 Tejun Heo <teheo@suse.de> | |
9ac7849e | 7 | */ |
05887cb6 | 8 | #include <linux/memblock.h> /* for max_pfn */ |
09515ef5 | 9 | #include <linux/acpi.h> |
0a0f0d8b | 10 | #include <linux/dma-map-ops.h> |
1b6bc32f | 11 | #include <linux/export.h> |
5a0e3ad6 | 12 | #include <linux/gfp.h> |
b5c58b2f | 13 | #include <linux/iommu-dma.h> |
7ade4f10 | 14 | #include <linux/kmsan.h> |
09515ef5 | 15 | #include <linux/of_device.h> |
513510dd LA |
16 | #include <linux/slab.h> |
17 | #include <linux/vmalloc.h> | |
a1fd09e8 | 18 | #include "debug.h" |
19c65c3d | 19 | #include "direct.h" |
9ac7849e | 20 | |
038eb433 SA |
21 | #define CREATE_TRACE_POINTS |
22 | #include <trace/events/dma.h> | |
23 | ||
fe4e5efa JY |
24 | #if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \ |
25 | defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \ | |
26 | defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL) | |
1d3f56b2 | 27 | bool dma_default_coherent = IS_ENABLED(CONFIG_ARCH_DMA_DEFAULT_COHERENT); |
fe4e5efa | 28 | #endif |
6d4e9a8e | 29 | |
9ac7849e TH |
30 | /* |
31 | * Managed DMA API | |
32 | */ | |
33 | struct dma_devres { | |
34 | size_t size; | |
35 | void *vaddr; | |
36 | dma_addr_t dma_handle; | |
63d36c95 | 37 | unsigned long attrs; |
9ac7849e TH |
38 | }; |
39 | ||
63d36c95 | 40 | static void dmam_release(struct device *dev, void *res) |
9ac7849e TH |
41 | { |
42 | struct dma_devres *this = res; | |
43 | ||
63d36c95 CH |
44 | dma_free_attrs(dev, this->size, this->vaddr, this->dma_handle, |
45 | this->attrs); | |
9ac7849e TH |
46 | } |
47 | ||
48 | static int dmam_match(struct device *dev, void *res, void *match_data) | |
49 | { | |
50 | struct dma_devres *this = res, *match = match_data; | |
51 | ||
52 | if (this->vaddr == match->vaddr) { | |
53 | WARN_ON(this->size != match->size || | |
54 | this->dma_handle != match->dma_handle); | |
55 | return 1; | |
56 | } | |
57 | return 0; | |
58 | } | |
59 | ||
9ac7849e TH |
60 | /** |
61 | * dmam_free_coherent - Managed dma_free_coherent() | |
62 | * @dev: Device to free coherent memory for | |
63 | * @size: Size of allocation | |
64 | * @vaddr: Virtual address of the memory to free | |
65 | * @dma_handle: DMA handle of the memory to free | |
66 | * | |
67 | * Managed dma_free_coherent(). | |
68 | */ | |
69 | void dmam_free_coherent(struct device *dev, size_t size, void *vaddr, | |
70 | dma_addr_t dma_handle) | |
71 | { | |
72 | struct dma_devres match_data = { size, vaddr, dma_handle }; | |
73 | ||
63d36c95 | 74 | WARN_ON(devres_destroy(dev, dmam_release, dmam_match, &match_data)); |
28e8b740 | 75 | dma_free_coherent(dev, size, vaddr, dma_handle); |
9ac7849e TH |
76 | } |
77 | EXPORT_SYMBOL(dmam_free_coherent); | |
78 | ||
79 | /** | |
63d36c95 | 80 | * dmam_alloc_attrs - Managed dma_alloc_attrs() |
9ac7849e TH |
81 | * @dev: Device to allocate non_coherent memory for |
82 | * @size: Size of allocation | |
83 | * @dma_handle: Out argument for allocated DMA handle | |
84 | * @gfp: Allocation flags | |
63d36c95 | 85 | * @attrs: Flags in the DMA_ATTR_* namespace. |
9ac7849e | 86 | * |
63d36c95 CH |
87 | * Managed dma_alloc_attrs(). Memory allocated using this function will be |
88 | * automatically released on driver detach. | |
9ac7849e TH |
89 | * |
90 | * RETURNS: | |
91 | * Pointer to allocated memory on success, NULL on failure. | |
92 | */ | |
63d36c95 CH |
93 | void *dmam_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle, |
94 | gfp_t gfp, unsigned long attrs) | |
9ac7849e TH |
95 | { |
96 | struct dma_devres *dr; | |
97 | void *vaddr; | |
98 | ||
63d36c95 | 99 | dr = devres_alloc(dmam_release, sizeof(*dr), gfp); |
9ac7849e TH |
100 | if (!dr) |
101 | return NULL; | |
102 | ||
63d36c95 | 103 | vaddr = dma_alloc_attrs(dev, size, dma_handle, gfp, attrs); |
9ac7849e TH |
104 | if (!vaddr) { |
105 | devres_free(dr); | |
106 | return NULL; | |
107 | } | |
108 | ||
109 | dr->vaddr = vaddr; | |
110 | dr->dma_handle = *dma_handle; | |
111 | dr->size = size; | |
63d36c95 | 112 | dr->attrs = attrs; |
9ac7849e TH |
113 | |
114 | devres_add(dev, dr); | |
115 | ||
116 | return vaddr; | |
117 | } | |
63d36c95 | 118 | EXPORT_SYMBOL(dmam_alloc_attrs); |
9ac7849e | 119 | |
d35834c6 CH |
120 | static bool dma_go_direct(struct device *dev, dma_addr_t mask, |
121 | const struct dma_map_ops *ops) | |
d3fa60d7 | 122 | { |
b5c58b2f LR |
123 | if (use_dma_iommu(dev)) |
124 | return false; | |
125 | ||
d35834c6 CH |
126 | if (likely(!ops)) |
127 | return true; | |
b5c58b2f | 128 | |
d35834c6 CH |
129 | #ifdef CONFIG_DMA_OPS_BYPASS |
130 | if (dev->dma_ops_bypass) | |
131 | return min_not_zero(mask, dev->bus_dma_limit) >= | |
132 | dma_direct_get_required_mask(dev); | |
133 | #endif | |
134 | return false; | |
135 | } | |
136 | ||
137 | ||
138 | /* | |
139 | * Check if the devices uses a direct mapping for streaming DMA operations. | |
140 | * This allows IOMMU drivers to set a bypass mode if the DMA mask is large | |
141 | * enough. | |
142 | */ | |
143 | static inline bool dma_alloc_direct(struct device *dev, | |
144 | const struct dma_map_ops *ops) | |
145 | { | |
146 | return dma_go_direct(dev, dev->coherent_dma_mask, ops); | |
147 | } | |
148 | ||
149 | static inline bool dma_map_direct(struct device *dev, | |
150 | const struct dma_map_ops *ops) | |
151 | { | |
152 | return dma_go_direct(dev, *dev->dma_mask, ops); | |
d3fa60d7 CH |
153 | } |
154 | ||
155 | dma_addr_t dma_map_page_attrs(struct device *dev, struct page *page, | |
156 | size_t offset, size_t size, enum dma_data_direction dir, | |
157 | unsigned long attrs) | |
158 | { | |
159 | const struct dma_map_ops *ops = get_dma_ops(dev); | |
160 | dma_addr_t addr; | |
161 | ||
162 | BUG_ON(!valid_dma_direction(dir)); | |
f959dcd6 TT |
163 | |
164 | if (WARN_ON_ONCE(!dev->dma_mask)) | |
165 | return DMA_MAPPING_ERROR; | |
166 | ||
8d8d53cf AK |
167 | if (dma_map_direct(dev, ops) || |
168 | arch_dma_map_page_direct(dev, page_to_phys(page) + offset + size)) | |
d3fa60d7 | 169 | addr = dma_direct_map_page(dev, page, offset, size, dir, attrs); |
b5c58b2f LR |
170 | else if (use_dma_iommu(dev)) |
171 | addr = iommu_dma_map_page(dev, page, offset, size, dir, attrs); | |
d3fa60d7 CH |
172 | else |
173 | addr = ops->map_page(dev, page, offset, size, dir, attrs); | |
7ade4f10 | 174 | kmsan_handle_dma(page, offset, size, dir); |
038eb433 SA |
175 | trace_dma_map_page(dev, page_to_phys(page) + offset, addr, size, dir, |
176 | attrs); | |
c2bbf9d1 | 177 | debug_dma_map_page(dev, page, offset, size, dir, addr, attrs); |
d3fa60d7 CH |
178 | |
179 | return addr; | |
180 | } | |
181 | EXPORT_SYMBOL(dma_map_page_attrs); | |
182 | ||
183 | void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr, size_t size, | |
184 | enum dma_data_direction dir, unsigned long attrs) | |
185 | { | |
186 | const struct dma_map_ops *ops = get_dma_ops(dev); | |
187 | ||
188 | BUG_ON(!valid_dma_direction(dir)); | |
8d8d53cf AK |
189 | if (dma_map_direct(dev, ops) || |
190 | arch_dma_unmap_page_direct(dev, addr + size)) | |
d3fa60d7 | 191 | dma_direct_unmap_page(dev, addr, size, dir, attrs); |
b5c58b2f LR |
192 | else if (use_dma_iommu(dev)) |
193 | iommu_dma_unmap_page(dev, addr, size, dir, attrs); | |
f69e342e | 194 | else |
d3fa60d7 | 195 | ops->unmap_page(dev, addr, size, dir, attrs); |
038eb433 | 196 | trace_dma_unmap_page(dev, addr, size, dir, attrs); |
d3fa60d7 CH |
197 | debug_dma_unmap_page(dev, addr, size, dir); |
198 | } | |
199 | EXPORT_SYMBOL(dma_unmap_page_attrs); | |
200 | ||
fffe3cc8 LG |
201 | static int __dma_map_sg_attrs(struct device *dev, struct scatterlist *sg, |
202 | int nents, enum dma_data_direction dir, unsigned long attrs) | |
d3fa60d7 CH |
203 | { |
204 | const struct dma_map_ops *ops = get_dma_ops(dev); | |
205 | int ents; | |
206 | ||
207 | BUG_ON(!valid_dma_direction(dir)); | |
f959dcd6 TT |
208 | |
209 | if (WARN_ON_ONCE(!dev->dma_mask)) | |
210 | return 0; | |
211 | ||
8d8d53cf AK |
212 | if (dma_map_direct(dev, ops) || |
213 | arch_dma_map_sg_direct(dev, sg, nents)) | |
d3fa60d7 | 214 | ents = dma_direct_map_sg(dev, sg, nents, dir, attrs); |
b5c58b2f LR |
215 | else if (use_dma_iommu(dev)) |
216 | ents = iommu_dma_map_sg(dev, sg, nents, dir, attrs); | |
d3fa60d7 CH |
217 | else |
218 | ents = ops->map_sg(dev, sg, nents, dir, attrs); | |
fffe3cc8 | 219 | |
7ade4f10 AP |
220 | if (ents > 0) { |
221 | kmsan_handle_dma_sg(sg, nents, dir); | |
038eb433 | 222 | trace_dma_map_sg(dev, sg, nents, ents, dir, attrs); |
c2bbf9d1 | 223 | debug_dma_map_sg(dev, sg, nents, ents, dir, attrs); |
7ade4f10 AP |
224 | } else if (WARN_ON_ONCE(ents != -EINVAL && ents != -ENOMEM && |
225 | ents != -EIO && ents != -EREMOTEIO)) { | |
68b6dbf1 | 226 | trace_dma_map_sg_err(dev, sg, nents, ents, dir, attrs); |
fffe3cc8 | 227 | return -EIO; |
7ade4f10 | 228 | } |
d3fa60d7 CH |
229 | |
230 | return ents; | |
231 | } | |
fffe3cc8 LG |
232 | |
233 | /** | |
234 | * dma_map_sg_attrs - Map the given buffer for DMA | |
235 | * @dev: The device for which to perform the DMA operation | |
a61cb601 CH |
236 | * @sg: The sg_table object describing the buffer |
237 | * @nents: Number of entries to map | |
fffe3cc8 LG |
238 | * @dir: DMA direction |
239 | * @attrs: Optional DMA attributes for the map operation | |
240 | * | |
241 | * Maps a buffer described by a scatterlist passed in the sg argument with | |
242 | * nents segments for the @dir DMA operation by the @dev device. | |
243 | * | |
244 | * Returns the number of mapped entries (which can be less than nents) | |
245 | * on success. Zero is returned for any error. | |
246 | * | |
247 | * dma_unmap_sg_attrs() should be used to unmap the buffer with the | |
248 | * original sg and original nents (not the value returned by this funciton). | |
249 | */ | |
2a047e06 | 250 | unsigned int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg, |
fffe3cc8 LG |
251 | int nents, enum dma_data_direction dir, unsigned long attrs) |
252 | { | |
253 | int ret; | |
254 | ||
255 | ret = __dma_map_sg_attrs(dev, sg, nents, dir, attrs); | |
256 | if (ret < 0) | |
257 | return 0; | |
258 | return ret; | |
259 | } | |
d3fa60d7 CH |
260 | EXPORT_SYMBOL(dma_map_sg_attrs); |
261 | ||
fffe3cc8 LG |
262 | /** |
263 | * dma_map_sgtable - Map the given buffer for DMA | |
264 | * @dev: The device for which to perform the DMA operation | |
265 | * @sgt: The sg_table object describing the buffer | |
266 | * @dir: DMA direction | |
267 | * @attrs: Optional DMA attributes for the map operation | |
268 | * | |
269 | * Maps a buffer described by a scatterlist stored in the given sg_table | |
270 | * object for the @dir DMA operation by the @dev device. After success, the | |
271 | * ownership for the buffer is transferred to the DMA domain. One has to | |
272 | * call dma_sync_sgtable_for_cpu() or dma_unmap_sgtable() to move the | |
273 | * ownership of the buffer back to the CPU domain before touching the | |
274 | * buffer by the CPU. | |
275 | * | |
276 | * Returns 0 on success or a negative error code on error. The following | |
277 | * error codes are supported with the given meaning: | |
278 | * | |
84197024 LG |
279 | * -EINVAL An invalid argument, unaligned access or other error |
280 | * in usage. Will not succeed if retried. | |
281 | * -ENOMEM Insufficient resources (like memory or IOVA space) to | |
282 | * complete the mapping. Should succeed if retried later. | |
283 | * -EIO Legacy error code with an unknown meaning. eg. this is | |
284 | * returned if a lower level call returned | |
285 | * DMA_MAPPING_ERROR. | |
286 | * -EREMOTEIO The DMA device cannot access P2PDMA memory specified | |
287 | * in the sg_table. This will not succeed if retried. | |
fffe3cc8 LG |
288 | */ |
289 | int dma_map_sgtable(struct device *dev, struct sg_table *sgt, | |
290 | enum dma_data_direction dir, unsigned long attrs) | |
291 | { | |
292 | int nents; | |
293 | ||
294 | nents = __dma_map_sg_attrs(dev, sgt->sgl, sgt->orig_nents, dir, attrs); | |
fffe3cc8 LG |
295 | if (nents < 0) |
296 | return nents; | |
297 | sgt->nents = nents; | |
298 | return 0; | |
299 | } | |
300 | EXPORT_SYMBOL_GPL(dma_map_sgtable); | |
301 | ||
d3fa60d7 CH |
302 | void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg, |
303 | int nents, enum dma_data_direction dir, | |
304 | unsigned long attrs) | |
305 | { | |
306 | const struct dma_map_ops *ops = get_dma_ops(dev); | |
307 | ||
308 | BUG_ON(!valid_dma_direction(dir)); | |
038eb433 | 309 | trace_dma_unmap_sg(dev, sg, nents, dir, attrs); |
d3fa60d7 | 310 | debug_dma_unmap_sg(dev, sg, nents, dir); |
8d8d53cf AK |
311 | if (dma_map_direct(dev, ops) || |
312 | arch_dma_unmap_sg_direct(dev, sg, nents)) | |
d3fa60d7 | 313 | dma_direct_unmap_sg(dev, sg, nents, dir, attrs); |
b5c58b2f LR |
314 | else if (use_dma_iommu(dev)) |
315 | iommu_dma_unmap_sg(dev, sg, nents, dir, attrs); | |
316 | else if (ops->unmap_sg) | |
d3fa60d7 CH |
317 | ops->unmap_sg(dev, sg, nents, dir, attrs); |
318 | } | |
319 | EXPORT_SYMBOL(dma_unmap_sg_attrs); | |
320 | ||
321 | dma_addr_t dma_map_resource(struct device *dev, phys_addr_t phys_addr, | |
322 | size_t size, enum dma_data_direction dir, unsigned long attrs) | |
323 | { | |
324 | const struct dma_map_ops *ops = get_dma_ops(dev); | |
325 | dma_addr_t addr = DMA_MAPPING_ERROR; | |
326 | ||
327 | BUG_ON(!valid_dma_direction(dir)); | |
328 | ||
f959dcd6 TT |
329 | if (WARN_ON_ONCE(!dev->dma_mask)) |
330 | return DMA_MAPPING_ERROR; | |
331 | ||
d35834c6 | 332 | if (dma_map_direct(dev, ops)) |
d3fa60d7 | 333 | addr = dma_direct_map_resource(dev, phys_addr, size, dir, attrs); |
b5c58b2f LR |
334 | else if (use_dma_iommu(dev)) |
335 | addr = iommu_dma_map_resource(dev, phys_addr, size, dir, attrs); | |
d3fa60d7 CH |
336 | else if (ops->map_resource) |
337 | addr = ops->map_resource(dev, phys_addr, size, dir, attrs); | |
338 | ||
038eb433 | 339 | trace_dma_map_resource(dev, phys_addr, addr, size, dir, attrs); |
c2bbf9d1 | 340 | debug_dma_map_resource(dev, phys_addr, size, dir, addr, attrs); |
d3fa60d7 CH |
341 | return addr; |
342 | } | |
343 | EXPORT_SYMBOL(dma_map_resource); | |
344 | ||
345 | void dma_unmap_resource(struct device *dev, dma_addr_t addr, size_t size, | |
346 | enum dma_data_direction dir, unsigned long attrs) | |
347 | { | |
348 | const struct dma_map_ops *ops = get_dma_ops(dev); | |
349 | ||
350 | BUG_ON(!valid_dma_direction(dir)); | |
b5c58b2f LR |
351 | if (dma_map_direct(dev, ops)) |
352 | ; /* nothing to do: uncached and no swiotlb */ | |
353 | else if (use_dma_iommu(dev)) | |
354 | iommu_dma_unmap_resource(dev, addr, size, dir, attrs); | |
355 | else if (ops->unmap_resource) | |
d3fa60d7 | 356 | ops->unmap_resource(dev, addr, size, dir, attrs); |
038eb433 | 357 | trace_dma_unmap_resource(dev, addr, size, dir, attrs); |
d3fa60d7 CH |
358 | debug_dma_unmap_resource(dev, addr, size, dir); |
359 | } | |
360 | EXPORT_SYMBOL(dma_unmap_resource); | |
361 | ||
fe7514b1 | 362 | #ifdef CONFIG_DMA_NEED_SYNC |
f406c8e4 | 363 | void __dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size, |
d3fa60d7 CH |
364 | enum dma_data_direction dir) |
365 | { | |
366 | const struct dma_map_ops *ops = get_dma_ops(dev); | |
367 | ||
368 | BUG_ON(!valid_dma_direction(dir)); | |
d35834c6 | 369 | if (dma_map_direct(dev, ops)) |
d3fa60d7 | 370 | dma_direct_sync_single_for_cpu(dev, addr, size, dir); |
b5c58b2f LR |
371 | else if (use_dma_iommu(dev)) |
372 | iommu_dma_sync_single_for_cpu(dev, addr, size, dir); | |
d3fa60d7 CH |
373 | else if (ops->sync_single_for_cpu) |
374 | ops->sync_single_for_cpu(dev, addr, size, dir); | |
038eb433 | 375 | trace_dma_sync_single_for_cpu(dev, addr, size, dir); |
d3fa60d7 CH |
376 | debug_dma_sync_single_for_cpu(dev, addr, size, dir); |
377 | } | |
f406c8e4 | 378 | EXPORT_SYMBOL(__dma_sync_single_for_cpu); |
d3fa60d7 | 379 | |
f406c8e4 | 380 | void __dma_sync_single_for_device(struct device *dev, dma_addr_t addr, |
d3fa60d7 CH |
381 | size_t size, enum dma_data_direction dir) |
382 | { | |
383 | const struct dma_map_ops *ops = get_dma_ops(dev); | |
384 | ||
385 | BUG_ON(!valid_dma_direction(dir)); | |
d35834c6 | 386 | if (dma_map_direct(dev, ops)) |
d3fa60d7 | 387 | dma_direct_sync_single_for_device(dev, addr, size, dir); |
b5c58b2f LR |
388 | else if (use_dma_iommu(dev)) |
389 | iommu_dma_sync_single_for_device(dev, addr, size, dir); | |
d3fa60d7 CH |
390 | else if (ops->sync_single_for_device) |
391 | ops->sync_single_for_device(dev, addr, size, dir); | |
038eb433 | 392 | trace_dma_sync_single_for_device(dev, addr, size, dir); |
d3fa60d7 CH |
393 | debug_dma_sync_single_for_device(dev, addr, size, dir); |
394 | } | |
f406c8e4 | 395 | EXPORT_SYMBOL(__dma_sync_single_for_device); |
d3fa60d7 | 396 | |
f406c8e4 | 397 | void __dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, |
d3fa60d7 CH |
398 | int nelems, enum dma_data_direction dir) |
399 | { | |
400 | const struct dma_map_ops *ops = get_dma_ops(dev); | |
401 | ||
402 | BUG_ON(!valid_dma_direction(dir)); | |
d35834c6 | 403 | if (dma_map_direct(dev, ops)) |
d3fa60d7 | 404 | dma_direct_sync_sg_for_cpu(dev, sg, nelems, dir); |
b5c58b2f LR |
405 | else if (use_dma_iommu(dev)) |
406 | iommu_dma_sync_sg_for_cpu(dev, sg, nelems, dir); | |
d3fa60d7 CH |
407 | else if (ops->sync_sg_for_cpu) |
408 | ops->sync_sg_for_cpu(dev, sg, nelems, dir); | |
038eb433 | 409 | trace_dma_sync_sg_for_cpu(dev, sg, nelems, dir); |
d3fa60d7 CH |
410 | debug_dma_sync_sg_for_cpu(dev, sg, nelems, dir); |
411 | } | |
f406c8e4 | 412 | EXPORT_SYMBOL(__dma_sync_sg_for_cpu); |
d3fa60d7 | 413 | |
f406c8e4 | 414 | void __dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, |
d3fa60d7 CH |
415 | int nelems, enum dma_data_direction dir) |
416 | { | |
417 | const struct dma_map_ops *ops = get_dma_ops(dev); | |
418 | ||
419 | BUG_ON(!valid_dma_direction(dir)); | |
d35834c6 | 420 | if (dma_map_direct(dev, ops)) |
d3fa60d7 | 421 | dma_direct_sync_sg_for_device(dev, sg, nelems, dir); |
b5c58b2f LR |
422 | else if (use_dma_iommu(dev)) |
423 | iommu_dma_sync_sg_for_device(dev, sg, nelems, dir); | |
d3fa60d7 CH |
424 | else if (ops->sync_sg_for_device) |
425 | ops->sync_sg_for_device(dev, sg, nelems, dir); | |
038eb433 | 426 | trace_dma_sync_sg_for_device(dev, sg, nelems, dir); |
d3fa60d7 CH |
427 | debug_dma_sync_sg_for_device(dev, sg, nelems, dir); |
428 | } | |
f406c8e4 | 429 | EXPORT_SYMBOL(__dma_sync_sg_for_device); |
d3fa60d7 | 430 | |
f406c8e4 | 431 | bool __dma_need_sync(struct device *dev, dma_addr_t dma_addr) |
fe7514b1 AL |
432 | { |
433 | const struct dma_map_ops *ops = get_dma_ops(dev); | |
434 | ||
435 | if (dma_map_direct(dev, ops)) | |
f406c8e4 | 436 | /* |
a6016aac | 437 | * dma_skip_sync could've been reset on first SWIOTLB buffer |
f406c8e4 AL |
438 | * mapping, but @dma_addr is not necessary an SWIOTLB buffer. |
439 | * In this case, fall back to more granular check. | |
440 | */ | |
fe7514b1 | 441 | return dma_direct_need_sync(dev, dma_addr); |
f406c8e4 | 442 | return true; |
fe7514b1 | 443 | } |
f406c8e4 AL |
444 | EXPORT_SYMBOL_GPL(__dma_need_sync); |
445 | ||
5f3b133a CH |
446 | /** |
447 | * dma_need_unmap - does this device need dma_unmap_* operations | |
448 | * @dev: device to check | |
449 | * | |
450 | * If this function returns %false, drivers can skip calling dma_unmap_* after | |
451 | * finishing an I/O. This function must be called after all mappings that might | |
452 | * need to be unmapped have been performed. | |
453 | */ | |
454 | bool dma_need_unmap(struct device *dev) | |
455 | { | |
456 | if (!dma_map_direct(dev, get_dma_ops(dev))) | |
457 | return true; | |
458 | if (!dev->dma_skip_sync) | |
459 | return true; | |
460 | return IS_ENABLED(CONFIG_DMA_API_DEBUG); | |
461 | } | |
462 | EXPORT_SYMBOL_GPL(dma_need_unmap); | |
463 | ||
f406c8e4 AL |
464 | static void dma_setup_need_sync(struct device *dev) |
465 | { | |
466 | const struct dma_map_ops *ops = get_dma_ops(dev); | |
467 | ||
b5c58b2f | 468 | if (dma_map_direct(dev, ops) || use_dma_iommu(dev)) |
f406c8e4 | 469 | /* |
a6016aac | 470 | * dma_skip_sync will be reset to %false on first SWIOTLB buffer |
f406c8e4 AL |
471 | * mapping, if any. During the device initialization, it's |
472 | * enough to check only for the DMA coherence. | |
473 | */ | |
a6016aac | 474 | dev->dma_skip_sync = dev_is_dma_coherent(dev); |
f406c8e4 AL |
475 | else if (!ops->sync_single_for_device && !ops->sync_single_for_cpu && |
476 | !ops->sync_sg_for_device && !ops->sync_sg_for_cpu) | |
477 | /* | |
478 | * Synchronization is not possible when none of DMA sync ops | |
479 | * is set. | |
480 | */ | |
a6016aac | 481 | dev->dma_skip_sync = true; |
f406c8e4 | 482 | else |
a6016aac | 483 | dev->dma_skip_sync = false; |
f406c8e4 AL |
484 | } |
485 | #else /* !CONFIG_DMA_NEED_SYNC */ | |
486 | static inline void dma_setup_need_sync(struct device *dev) { } | |
487 | #endif /* !CONFIG_DMA_NEED_SYNC */ | |
d3fa60d7 | 488 | |
14451467 CH |
489 | /* |
490 | * The whole dma_get_sgtable() idea is fundamentally unsafe - it seems | |
491 | * that the intention is to allow exporting memory allocated via the | |
492 | * coherent DMA APIs through the dma_buf API, which only accepts a | |
493 | * scattertable. This presents a couple of problems: | |
494 | * 1. Not all memory allocated via the coherent DMA APIs is backed by | |
495 | * a struct page | |
496 | * 2. Passing coherent DMA memory into the streaming APIs is not allowed | |
497 | * as we will try to flush the memory through a different alias to that | |
498 | * actually being used (and the flushes are redundant.) | |
499 | */ | |
7249c1a5 CH |
500 | int dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt, |
501 | void *cpu_addr, dma_addr_t dma_addr, size_t size, | |
502 | unsigned long attrs) | |
503 | { | |
504 | const struct dma_map_ops *ops = get_dma_ops(dev); | |
356da6d0 | 505 | |
d35834c6 | 506 | if (dma_alloc_direct(dev, ops)) |
34dc0ea6 | 507 | return dma_direct_get_sgtable(dev, sgt, cpu_addr, dma_addr, |
f9f3232a | 508 | size, attrs); |
b5c58b2f LR |
509 | if (use_dma_iommu(dev)) |
510 | return iommu_dma_get_sgtable(dev, sgt, cpu_addr, dma_addr, | |
511 | size, attrs); | |
f9f3232a CH |
512 | if (!ops->get_sgtable) |
513 | return -ENXIO; | |
514 | return ops->get_sgtable(dev, sgt, cpu_addr, dma_addr, size, attrs); | |
7249c1a5 CH |
515 | } |
516 | EXPORT_SYMBOL(dma_get_sgtable_attrs); | |
d2b7428e | 517 | |
33dcb37c CH |
518 | #ifdef CONFIG_MMU |
519 | /* | |
520 | * Return the page attributes used for mapping dma_alloc_* memory, either in | |
521 | * kernel space if remapping is needed, or to userspace through dma_mmap_*. | |
522 | */ | |
523 | pgprot_t dma_pgprot(struct device *dev, pgprot_t prot, unsigned long attrs) | |
524 | { | |
efa70f2f | 525 | if (dev_is_dma_coherent(dev)) |
33dcb37c | 526 | return prot; |
419e2f18 CH |
527 | #ifdef CONFIG_ARCH_HAS_DMA_WRITE_COMBINE |
528 | if (attrs & DMA_ATTR_WRITE_COMBINE) | |
529 | return pgprot_writecombine(prot); | |
530 | #endif | |
531 | return pgprot_dmacoherent(prot); | |
33dcb37c CH |
532 | } |
533 | #endif /* CONFIG_MMU */ | |
534 | ||
e29ccc18 CH |
535 | /** |
536 | * dma_can_mmap - check if a given device supports dma_mmap_* | |
537 | * @dev: device to check | |
538 | * | |
539 | * Returns %true if @dev supports dma_mmap_coherent() and dma_mmap_attrs() to | |
540 | * map DMA allocations to userspace. | |
541 | */ | |
542 | bool dma_can_mmap(struct device *dev) | |
543 | { | |
544 | const struct dma_map_ops *ops = get_dma_ops(dev); | |
545 | ||
d35834c6 | 546 | if (dma_alloc_direct(dev, ops)) |
34dc0ea6 | 547 | return dma_direct_can_mmap(dev); |
b5c58b2f LR |
548 | if (use_dma_iommu(dev)) |
549 | return true; | |
e29ccc18 CH |
550 | return ops->mmap != NULL; |
551 | } | |
552 | EXPORT_SYMBOL_GPL(dma_can_mmap); | |
553 | ||
7249c1a5 CH |
554 | /** |
555 | * dma_mmap_attrs - map a coherent DMA allocation into user space | |
556 | * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices | |
557 | * @vma: vm_area_struct describing requested user mapping | |
558 | * @cpu_addr: kernel CPU-view address returned from dma_alloc_attrs | |
559 | * @dma_addr: device-view address returned from dma_alloc_attrs | |
560 | * @size: size of memory originally requested in dma_alloc_attrs | |
561 | * @attrs: attributes of mapping properties requested in dma_alloc_attrs | |
562 | * | |
563 | * Map a coherent DMA buffer previously allocated by dma_alloc_attrs into user | |
564 | * space. The coherent DMA buffer must not be freed by the driver until the | |
565 | * user space mapping has been released. | |
566 | */ | |
567 | int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, | |
568 | void *cpu_addr, dma_addr_t dma_addr, size_t size, | |
569 | unsigned long attrs) | |
570 | { | |
571 | const struct dma_map_ops *ops = get_dma_ops(dev); | |
356da6d0 | 572 | |
d35834c6 | 573 | if (dma_alloc_direct(dev, ops)) |
34dc0ea6 | 574 | return dma_direct_mmap(dev, vma, cpu_addr, dma_addr, size, |
f9f3232a | 575 | attrs); |
b5c58b2f LR |
576 | if (use_dma_iommu(dev)) |
577 | return iommu_dma_mmap(dev, vma, cpu_addr, dma_addr, size, | |
578 | attrs); | |
f9f3232a CH |
579 | if (!ops->mmap) |
580 | return -ENXIO; | |
581 | return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs); | |
7249c1a5 CH |
582 | } |
583 | EXPORT_SYMBOL(dma_mmap_attrs); | |
05887cb6 | 584 | |
05887cb6 CH |
585 | u64 dma_get_required_mask(struct device *dev) |
586 | { | |
587 | const struct dma_map_ops *ops = get_dma_ops(dev); | |
588 | ||
d35834c6 | 589 | if (dma_alloc_direct(dev, ops)) |
356da6d0 | 590 | return dma_direct_get_required_mask(dev); |
b348b6d1 LR |
591 | |
592 | if (use_dma_iommu(dev)) | |
593 | return DMA_BIT_MASK(32); | |
594 | ||
05887cb6 CH |
595 | if (ops->get_required_mask) |
596 | return ops->get_required_mask(dev); | |
249baa54 CH |
597 | |
598 | /* | |
599 | * We require every DMA ops implementation to at least support a 32-bit | |
600 | * DMA mask (and use bounce buffering if that isn't supported in | |
601 | * hardware). As the direct mapping code has its own routine to | |
602 | * actually report an optimal mask we default to 32-bit here as that | |
603 | * is the right thing for most IOMMUs, and at least not actively | |
604 | * harmful in general. | |
605 | */ | |
606 | return DMA_BIT_MASK(32); | |
05887cb6 CH |
607 | } |
608 | EXPORT_SYMBOL_GPL(dma_get_required_mask); | |
05887cb6 | 609 | |
7249c1a5 CH |
610 | void *dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle, |
611 | gfp_t flag, unsigned long attrs) | |
612 | { | |
613 | const struct dma_map_ops *ops = get_dma_ops(dev); | |
614 | void *cpu_addr; | |
615 | ||
148a97d5 | 616 | WARN_ON_ONCE(!dev->coherent_dma_mask); |
7249c1a5 | 617 | |
ffcb7545 CH |
618 | /* |
619 | * DMA allocations can never be turned back into a page pointer, so | |
620 | * requesting compound pages doesn't make sense (and can't even be | |
621 | * supported at all by various backends). | |
622 | */ | |
623 | if (WARN_ON_ONCE(flag & __GFP_COMP)) | |
624 | return NULL; | |
625 | ||
68b6dbf1 SA |
626 | if (dma_alloc_from_dev_coherent(dev, size, dma_handle, &cpu_addr)) { |
627 | trace_dma_alloc(dev, cpu_addr, *dma_handle, size, | |
628 | DMA_BIDIRECTIONAL, flag, attrs); | |
7249c1a5 | 629 | return cpu_addr; |
68b6dbf1 | 630 | } |
7249c1a5 CH |
631 | |
632 | /* let the implementation decide on the zone to allocate from: */ | |
633 | flag &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM); | |
634 | ||
68b6dbf1 | 635 | if (dma_alloc_direct(dev, ops)) { |
356da6d0 | 636 | cpu_addr = dma_direct_alloc(dev, size, dma_handle, flag, attrs); |
68b6dbf1 | 637 | } else if (use_dma_iommu(dev)) { |
b5c58b2f | 638 | cpu_addr = iommu_dma_alloc(dev, size, dma_handle, flag, attrs); |
68b6dbf1 | 639 | } else if (ops->alloc) { |
356da6d0 | 640 | cpu_addr = ops->alloc(dev, size, dma_handle, flag, attrs); |
68b6dbf1 SA |
641 | } else { |
642 | trace_dma_alloc(dev, NULL, 0, size, DMA_BIDIRECTIONAL, flag, | |
643 | attrs); | |
7249c1a5 | 644 | return NULL; |
68b6dbf1 | 645 | } |
7249c1a5 | 646 | |
3afff779 SA |
647 | trace_dma_alloc(dev, cpu_addr, *dma_handle, size, DMA_BIDIRECTIONAL, |
648 | flag, attrs); | |
c2bbf9d1 | 649 | debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr, attrs); |
7249c1a5 CH |
650 | return cpu_addr; |
651 | } | |
652 | EXPORT_SYMBOL(dma_alloc_attrs); | |
653 | ||
654 | void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr, | |
655 | dma_addr_t dma_handle, unsigned long attrs) | |
656 | { | |
657 | const struct dma_map_ops *ops = get_dma_ops(dev); | |
658 | ||
7249c1a5 CH |
659 | if (dma_release_from_dev_coherent(dev, get_order(size), cpu_addr)) |
660 | return; | |
661 | /* | |
662 | * On non-coherent platforms which implement DMA-coherent buffers via | |
663 | * non-cacheable remaps, ops->free() may call vunmap(). Thus getting | |
664 | * this far in IRQ context is a) at risk of a BUG_ON() or trying to | |
665 | * sleep on some machines, and b) an indication that the driver is | |
666 | * probably misusing the coherent API anyway. | |
667 | */ | |
668 | WARN_ON(irqs_disabled()); | |
669 | ||
68b6dbf1 SA |
670 | trace_dma_free(dev, cpu_addr, dma_handle, size, DMA_BIDIRECTIONAL, |
671 | attrs); | |
356da6d0 | 672 | if (!cpu_addr) |
7249c1a5 CH |
673 | return; |
674 | ||
675 | debug_dma_free_coherent(dev, size, cpu_addr, dma_handle); | |
d35834c6 | 676 | if (dma_alloc_direct(dev, ops)) |
356da6d0 | 677 | dma_direct_free(dev, size, cpu_addr, dma_handle, attrs); |
b5c58b2f LR |
678 | else if (use_dma_iommu(dev)) |
679 | iommu_dma_free(dev, size, cpu_addr, dma_handle, attrs); | |
356da6d0 CH |
680 | else if (ops->free) |
681 | ops->free(dev, size, cpu_addr, dma_handle, attrs); | |
7249c1a5 CH |
682 | } |
683 | EXPORT_SYMBOL(dma_free_attrs); | |
684 | ||
198c50e2 | 685 | static struct page *__dma_alloc_pages(struct device *dev, size_t size, |
efa70f2f CH |
686 | dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp) |
687 | { | |
688 | const struct dma_map_ops *ops = get_dma_ops(dev); | |
efa70f2f CH |
689 | |
690 | if (WARN_ON_ONCE(!dev->coherent_dma_mask)) | |
691 | return NULL; | |
692 | if (WARN_ON_ONCE(gfp & (__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM))) | |
693 | return NULL; | |
3622b86f CH |
694 | if (WARN_ON_ONCE(gfp & __GFP_COMP)) |
695 | return NULL; | |
efa70f2f CH |
696 | |
697 | size = PAGE_ALIGN(size); | |
698 | if (dma_alloc_direct(dev, ops)) | |
198c50e2 | 699 | return dma_direct_alloc_pages(dev, size, dma_handle, dir, gfp); |
b5c58b2f LR |
700 | if (use_dma_iommu(dev)) |
701 | return dma_common_alloc_pages(dev, size, dma_handle, dir, gfp); | |
8a2f1187 | 702 | if (!ops->alloc_pages_op) |
efa70f2f | 703 | return NULL; |
8a2f1187 | 704 | return ops->alloc_pages_op(dev, size, dma_handle, dir, gfp); |
198c50e2 | 705 | } |
efa70f2f | 706 | |
198c50e2 CH |
707 | struct page *dma_alloc_pages(struct device *dev, size_t size, |
708 | dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp) | |
709 | { | |
710 | struct page *page = __dma_alloc_pages(dev, size, dma_handle, dir, gfp); | |
efa70f2f | 711 | |
038eb433 | 712 | if (page) { |
c4484ab8 SA |
713 | trace_dma_alloc_pages(dev, page_to_virt(page), *dma_handle, |
714 | size, dir, gfp, 0); | |
c2bbf9d1 | 715 | debug_dma_map_page(dev, page, 0, size, dir, *dma_handle, 0); |
68b6dbf1 SA |
716 | } else { |
717 | trace_dma_alloc_pages(dev, NULL, 0, size, dir, gfp, 0); | |
038eb433 | 718 | } |
efa70f2f CH |
719 | return page; |
720 | } | |
721 | EXPORT_SYMBOL_GPL(dma_alloc_pages); | |
722 | ||
198c50e2 | 723 | static void __dma_free_pages(struct device *dev, size_t size, struct page *page, |
efa70f2f CH |
724 | dma_addr_t dma_handle, enum dma_data_direction dir) |
725 | { | |
726 | const struct dma_map_ops *ops = get_dma_ops(dev); | |
727 | ||
728 | size = PAGE_ALIGN(size); | |
efa70f2f CH |
729 | if (dma_alloc_direct(dev, ops)) |
730 | dma_direct_free_pages(dev, size, page, dma_handle, dir); | |
b5c58b2f LR |
731 | else if (use_dma_iommu(dev)) |
732 | dma_common_free_pages(dev, size, page, dma_handle, dir); | |
efa70f2f CH |
733 | else if (ops->free_pages) |
734 | ops->free_pages(dev, size, page, dma_handle, dir); | |
735 | } | |
198c50e2 CH |
736 | |
737 | void dma_free_pages(struct device *dev, size_t size, struct page *page, | |
738 | dma_addr_t dma_handle, enum dma_data_direction dir) | |
739 | { | |
c4484ab8 | 740 | trace_dma_free_pages(dev, page_to_virt(page), dma_handle, size, dir, 0); |
198c50e2 CH |
741 | debug_dma_unmap_page(dev, dma_handle, size, dir); |
742 | __dma_free_pages(dev, size, page, dma_handle, dir); | |
743 | } | |
efa70f2f CH |
744 | EXPORT_SYMBOL_GPL(dma_free_pages); |
745 | ||
eedb0b12 CH |
746 | int dma_mmap_pages(struct device *dev, struct vm_area_struct *vma, |
747 | size_t size, struct page *page) | |
748 | { | |
749 | unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT; | |
750 | ||
751 | if (vma->vm_pgoff >= count || vma_pages(vma) > count - vma->vm_pgoff) | |
752 | return -ENXIO; | |
753 | return remap_pfn_range(vma, vma->vm_start, | |
754 | page_to_pfn(page) + vma->vm_pgoff, | |
755 | vma_pages(vma) << PAGE_SHIFT, vma->vm_page_prot); | |
756 | } | |
757 | EXPORT_SYMBOL_GPL(dma_mmap_pages); | |
758 | ||
7d5b5738 CH |
759 | static struct sg_table *alloc_single_sgt(struct device *dev, size_t size, |
760 | enum dma_data_direction dir, gfp_t gfp) | |
761 | { | |
762 | struct sg_table *sgt; | |
763 | struct page *page; | |
764 | ||
765 | sgt = kmalloc(sizeof(*sgt), gfp); | |
766 | if (!sgt) | |
767 | return NULL; | |
768 | if (sg_alloc_table(sgt, 1, gfp)) | |
769 | goto out_free_sgt; | |
770 | page = __dma_alloc_pages(dev, size, &sgt->sgl->dma_address, dir, gfp); | |
771 | if (!page) | |
772 | goto out_free_table; | |
773 | sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0); | |
774 | sg_dma_len(sgt->sgl) = sgt->sgl->length; | |
775 | return sgt; | |
776 | out_free_table: | |
777 | sg_free_table(sgt); | |
778 | out_free_sgt: | |
779 | kfree(sgt); | |
780 | return NULL; | |
781 | } | |
782 | ||
783 | struct sg_table *dma_alloc_noncontiguous(struct device *dev, size_t size, | |
784 | enum dma_data_direction dir, gfp_t gfp, unsigned long attrs) | |
785 | { | |
7d5b5738 CH |
786 | struct sg_table *sgt; |
787 | ||
788 | if (WARN_ON_ONCE(attrs & ~DMA_ATTR_ALLOC_SINGLE_PAGES)) | |
789 | return NULL; | |
3622b86f CH |
790 | if (WARN_ON_ONCE(gfp & __GFP_COMP)) |
791 | return NULL; | |
7d5b5738 | 792 | |
bb0e3919 | 793 | if (use_dma_iommu(dev)) |
b5c58b2f | 794 | sgt = iommu_dma_alloc_noncontiguous(dev, size, dir, gfp, attrs); |
7d5b5738 CH |
795 | else |
796 | sgt = alloc_single_sgt(dev, size, dir, gfp); | |
797 | ||
798 | if (sgt) { | |
799 | sgt->nents = 1; | |
c4484ab8 | 800 | trace_dma_alloc_sgt(dev, sgt, size, dir, gfp, attrs); |
c2bbf9d1 | 801 | debug_dma_map_sg(dev, sgt->sgl, sgt->orig_nents, 1, dir, attrs); |
68b6dbf1 | 802 | } else { |
d5bbfbad | 803 | trace_dma_alloc_sgt_err(dev, NULL, 0, size, dir, gfp, attrs); |
7d5b5738 CH |
804 | } |
805 | return sgt; | |
806 | } | |
807 | EXPORT_SYMBOL_GPL(dma_alloc_noncontiguous); | |
808 | ||
809 | static void free_single_sgt(struct device *dev, size_t size, | |
810 | struct sg_table *sgt, enum dma_data_direction dir) | |
811 | { | |
812 | __dma_free_pages(dev, size, sg_page(sgt->sgl), sgt->sgl->dma_address, | |
813 | dir); | |
814 | sg_free_table(sgt); | |
815 | kfree(sgt); | |
816 | } | |
817 | ||
818 | void dma_free_noncontiguous(struct device *dev, size_t size, | |
819 | struct sg_table *sgt, enum dma_data_direction dir) | |
820 | { | |
c4484ab8 | 821 | trace_dma_free_sgt(dev, sgt, size, dir); |
7d5b5738 | 822 | debug_dma_unmap_sg(dev, sgt->sgl, sgt->orig_nents, dir); |
bb0e3919 CH |
823 | |
824 | if (use_dma_iommu(dev)) | |
b5c58b2f | 825 | iommu_dma_free_noncontiguous(dev, size, sgt, dir); |
7d5b5738 CH |
826 | else |
827 | free_single_sgt(dev, size, sgt, dir); | |
828 | } | |
829 | EXPORT_SYMBOL_GPL(dma_free_noncontiguous); | |
830 | ||
831 | void *dma_vmap_noncontiguous(struct device *dev, size_t size, | |
832 | struct sg_table *sgt) | |
833 | { | |
7d5b5738 | 834 | |
bb0e3919 CH |
835 | if (use_dma_iommu(dev)) |
836 | return iommu_dma_vmap_noncontiguous(dev, size, sgt); | |
837 | ||
7d5b5738 CH |
838 | return page_address(sg_page(sgt->sgl)); |
839 | } | |
840 | EXPORT_SYMBOL_GPL(dma_vmap_noncontiguous); | |
841 | ||
842 | void dma_vunmap_noncontiguous(struct device *dev, void *vaddr) | |
843 | { | |
bb0e3919 CH |
844 | if (use_dma_iommu(dev)) |
845 | iommu_dma_vunmap_noncontiguous(dev, vaddr); | |
7d5b5738 CH |
846 | } |
847 | EXPORT_SYMBOL_GPL(dma_vunmap_noncontiguous); | |
848 | ||
849 | int dma_mmap_noncontiguous(struct device *dev, struct vm_area_struct *vma, | |
850 | size_t size, struct sg_table *sgt) | |
851 | { | |
bb0e3919 CH |
852 | if (use_dma_iommu(dev)) |
853 | return iommu_dma_mmap_noncontiguous(dev, vma, size, sgt); | |
7d5b5738 CH |
854 | return dma_mmap_pages(dev, vma, size, sg_page(sgt->sgl)); |
855 | } | |
856 | EXPORT_SYMBOL_GPL(dma_mmap_noncontiguous); | |
857 | ||
9fc18f6d | 858 | static int dma_supported(struct device *dev, u64 mask) |
7249c1a5 CH |
859 | { |
860 | const struct dma_map_ops *ops = get_dma_ops(dev); | |
861 | ||
a5fb217f CH |
862 | if (use_dma_iommu(dev)) { |
863 | if (WARN_ON(ops)) | |
864 | return false; | |
f45cfab2 | 865 | return true; |
a5fb217f CH |
866 | } |
867 | ||
d35834c6 | 868 | /* |
a5fb217f CH |
869 | * ->dma_supported sets and clears the bypass flag, so ignore it here |
870 | * and always call into the method if there is one. | |
d35834c6 | 871 | */ |
a5fb217f CH |
872 | if (ops) { |
873 | if (!ops->dma_supported) | |
874 | return true; | |
875 | return ops->dma_supported(dev, mask); | |
876 | } | |
877 | ||
878 | return dma_direct_supported(dev, mask); | |
7249c1a5 | 879 | } |
159bf192 LG |
880 | |
881 | bool dma_pci_p2pdma_supported(struct device *dev) | |
882 | { | |
883 | const struct dma_map_ops *ops = get_dma_ops(dev); | |
884 | ||
159bf192 LG |
885 | /* |
886 | * Note: dma_ops_bypass is not checked here because P2PDMA should | |
887 | * not be used with dma mapping ops that do not have support even | |
888 | * if the specific device is bypassing them. | |
889 | */ | |
890 | ||
b5c58b2f LR |
891 | /* if ops is not set, dma direct and default IOMMU support P2PDMA */ |
892 | return !ops; | |
159bf192 LG |
893 | } |
894 | EXPORT_SYMBOL_GPL(dma_pci_p2pdma_supported); | |
7249c1a5 | 895 | |
7249c1a5 CH |
896 | int dma_set_mask(struct device *dev, u64 mask) |
897 | { | |
4a54d16f CH |
898 | /* |
899 | * Truncate the mask to the actually supported dma_addr_t width to | |
900 | * avoid generating unsupportable addresses. | |
901 | */ | |
902 | mask = (dma_addr_t)mask; | |
903 | ||
7249c1a5 CH |
904 | if (!dev->dma_mask || !dma_supported(dev, mask)) |
905 | return -EIO; | |
906 | ||
11ddce15 | 907 | arch_dma_set_mask(dev, mask); |
7249c1a5 | 908 | *dev->dma_mask = mask; |
f406c8e4 AL |
909 | dma_setup_need_sync(dev); |
910 | ||
7249c1a5 CH |
911 | return 0; |
912 | } | |
913 | EXPORT_SYMBOL(dma_set_mask); | |
7249c1a5 | 914 | |
7249c1a5 CH |
915 | int dma_set_coherent_mask(struct device *dev, u64 mask) |
916 | { | |
4a54d16f CH |
917 | /* |
918 | * Truncate the mask to the actually supported dma_addr_t width to | |
919 | * avoid generating unsupportable addresses. | |
920 | */ | |
921 | mask = (dma_addr_t)mask; | |
922 | ||
7249c1a5 CH |
923 | if (!dma_supported(dev, mask)) |
924 | return -EIO; | |
925 | ||
7249c1a5 CH |
926 | dev->coherent_dma_mask = mask; |
927 | return 0; | |
928 | } | |
929 | EXPORT_SYMBOL(dma_set_coherent_mask); | |
8ddbe594 | 930 | |
2042c352 | 931 | static bool __dma_addressing_limited(struct device *dev) |
8ae0e970 | 932 | { |
a409d960 JH |
933 | const struct dma_map_ops *ops = get_dma_ops(dev); |
934 | ||
935 | if (min_not_zero(dma_get_mask(dev), dev->bus_dma_limit) < | |
936 | dma_get_required_mask(dev)) | |
937 | return true; | |
938 | ||
b348b6d1 | 939 | if (unlikely(ops) || use_dma_iommu(dev)) |
a409d960 JH |
940 | return false; |
941 | return !dma_direct_all_ram_mapped(dev); | |
8ae0e970 | 942 | } |
2042c352 | 943 | |
cae5572e BS |
944 | /** |
945 | * dma_addressing_limited - return if the device is addressing limited | |
946 | * @dev: device to check | |
947 | * | |
948 | * Return %true if the devices DMA mask is too small to address all memory in | |
949 | * the system, else %false. Lack of addressing bits is the prime reason for | |
950 | * bounce buffering, but might not be the only one. | |
951 | */ | |
2042c352 BS |
952 | bool dma_addressing_limited(struct device *dev) |
953 | { | |
954 | if (!__dma_addressing_limited(dev)) | |
955 | return false; | |
956 | ||
957 | dev_dbg(dev, "device is DMA addressing limited\n"); | |
958 | return true; | |
959 | } | |
8ae0e970 JH |
960 | EXPORT_SYMBOL_GPL(dma_addressing_limited); |
961 | ||
133d624b JR |
962 | size_t dma_max_mapping_size(struct device *dev) |
963 | { | |
964 | const struct dma_map_ops *ops = get_dma_ops(dev); | |
965 | size_t size = SIZE_MAX; | |
966 | ||
d35834c6 | 967 | if (dma_map_direct(dev, ops)) |
133d624b | 968 | size = dma_direct_max_mapping_size(dev); |
b5c58b2f LR |
969 | else if (use_dma_iommu(dev)) |
970 | size = iommu_dma_max_mapping_size(dev); | |
133d624b JR |
971 | else if (ops && ops->max_mapping_size) |
972 | size = ops->max_mapping_size(dev); | |
973 | ||
974 | return size; | |
975 | } | |
976 | EXPORT_SYMBOL_GPL(dma_max_mapping_size); | |
6ba99411 | 977 | |
a229cc14 JG |
978 | size_t dma_opt_mapping_size(struct device *dev) |
979 | { | |
980 | const struct dma_map_ops *ops = get_dma_ops(dev); | |
981 | size_t size = SIZE_MAX; | |
982 | ||
b5c58b2f LR |
983 | if (use_dma_iommu(dev)) |
984 | size = iommu_dma_opt_mapping_size(); | |
985 | else if (ops && ops->opt_mapping_size) | |
a229cc14 JG |
986 | size = ops->opt_mapping_size(); |
987 | ||
988 | return min(dma_max_mapping_size(dev), size); | |
989 | } | |
990 | EXPORT_SYMBOL_GPL(dma_opt_mapping_size); | |
991 | ||
6ba99411 YS |
992 | unsigned long dma_get_merge_boundary(struct device *dev) |
993 | { | |
994 | const struct dma_map_ops *ops = get_dma_ops(dev); | |
995 | ||
b5c58b2f LR |
996 | if (use_dma_iommu(dev)) |
997 | return iommu_dma_get_merge_boundary(dev); | |
998 | ||
6ba99411 YS |
999 | if (!ops || !ops->get_merge_boundary) |
1000 | return 0; /* can't merge */ | |
1001 | ||
1002 | return ops->get_merge_boundary(dev); | |
1003 | } | |
1004 | EXPORT_SYMBOL_GPL(dma_get_merge_boundary); |