Commit | Line | Data |
---|---|---|
989d42e8 | 1 | // SPDX-License-Identifier: GPL-2.0 |
9ac7849e | 2 | /* |
cf65a0f6 | 3 | * arch-independent dma-mapping routines |
9ac7849e TH |
4 | * |
5 | * Copyright (c) 2006 SUSE Linux Products GmbH | |
6 | * Copyright (c) 2006 Tejun Heo <teheo@suse.de> | |
9ac7849e | 7 | */ |
05887cb6 | 8 | #include <linux/memblock.h> /* for max_pfn */ |
09515ef5 | 9 | #include <linux/acpi.h> |
0a0f0d8b | 10 | #include <linux/dma-map-ops.h> |
1b6bc32f | 11 | #include <linux/export.h> |
5a0e3ad6 | 12 | #include <linux/gfp.h> |
09515ef5 | 13 | #include <linux/of_device.h> |
513510dd LA |
14 | #include <linux/slab.h> |
15 | #include <linux/vmalloc.h> | |
a1fd09e8 | 16 | #include "debug.h" |
19c65c3d | 17 | #include "direct.h" |
9ac7849e | 18 | |
6d4e9a8e CH |
19 | bool dma_default_coherent; |
20 | ||
9ac7849e TH |
21 | /* |
22 | * Managed DMA API | |
23 | */ | |
24 | struct dma_devres { | |
25 | size_t size; | |
26 | void *vaddr; | |
27 | dma_addr_t dma_handle; | |
63d36c95 | 28 | unsigned long attrs; |
9ac7849e TH |
29 | }; |
30 | ||
63d36c95 | 31 | static void dmam_release(struct device *dev, void *res) |
9ac7849e TH |
32 | { |
33 | struct dma_devres *this = res; | |
34 | ||
63d36c95 CH |
35 | dma_free_attrs(dev, this->size, this->vaddr, this->dma_handle, |
36 | this->attrs); | |
9ac7849e TH |
37 | } |
38 | ||
39 | static int dmam_match(struct device *dev, void *res, void *match_data) | |
40 | { | |
41 | struct dma_devres *this = res, *match = match_data; | |
42 | ||
43 | if (this->vaddr == match->vaddr) { | |
44 | WARN_ON(this->size != match->size || | |
45 | this->dma_handle != match->dma_handle); | |
46 | return 1; | |
47 | } | |
48 | return 0; | |
49 | } | |
50 | ||
9ac7849e TH |
51 | /** |
52 | * dmam_free_coherent - Managed dma_free_coherent() | |
53 | * @dev: Device to free coherent memory for | |
54 | * @size: Size of allocation | |
55 | * @vaddr: Virtual address of the memory to free | |
56 | * @dma_handle: DMA handle of the memory to free | |
57 | * | |
58 | * Managed dma_free_coherent(). | |
59 | */ | |
60 | void dmam_free_coherent(struct device *dev, size_t size, void *vaddr, | |
61 | dma_addr_t dma_handle) | |
62 | { | |
63 | struct dma_devres match_data = { size, vaddr, dma_handle }; | |
64 | ||
65 | dma_free_coherent(dev, size, vaddr, dma_handle); | |
63d36c95 | 66 | WARN_ON(devres_destroy(dev, dmam_release, dmam_match, &match_data)); |
9ac7849e TH |
67 | } |
68 | EXPORT_SYMBOL(dmam_free_coherent); | |
69 | ||
70 | /** | |
63d36c95 | 71 | * dmam_alloc_attrs - Managed dma_alloc_attrs() |
9ac7849e TH |
72 | * @dev: Device to allocate non_coherent memory for |
73 | * @size: Size of allocation | |
74 | * @dma_handle: Out argument for allocated DMA handle | |
75 | * @gfp: Allocation flags | |
63d36c95 | 76 | * @attrs: Flags in the DMA_ATTR_* namespace. |
9ac7849e | 77 | * |
63d36c95 CH |
78 | * Managed dma_alloc_attrs(). Memory allocated using this function will be |
79 | * automatically released on driver detach. | |
9ac7849e TH |
80 | * |
81 | * RETURNS: | |
82 | * Pointer to allocated memory on success, NULL on failure. | |
83 | */ | |
63d36c95 CH |
84 | void *dmam_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle, |
85 | gfp_t gfp, unsigned long attrs) | |
9ac7849e TH |
86 | { |
87 | struct dma_devres *dr; | |
88 | void *vaddr; | |
89 | ||
63d36c95 | 90 | dr = devres_alloc(dmam_release, sizeof(*dr), gfp); |
9ac7849e TH |
91 | if (!dr) |
92 | return NULL; | |
93 | ||
63d36c95 | 94 | vaddr = dma_alloc_attrs(dev, size, dma_handle, gfp, attrs); |
9ac7849e TH |
95 | if (!vaddr) { |
96 | devres_free(dr); | |
97 | return NULL; | |
98 | } | |
99 | ||
100 | dr->vaddr = vaddr; | |
101 | dr->dma_handle = *dma_handle; | |
102 | dr->size = size; | |
63d36c95 | 103 | dr->attrs = attrs; |
9ac7849e TH |
104 | |
105 | devres_add(dev, dr); | |
106 | ||
107 | return vaddr; | |
108 | } | |
63d36c95 | 109 | EXPORT_SYMBOL(dmam_alloc_attrs); |
9ac7849e | 110 | |
d35834c6 CH |
111 | static bool dma_go_direct(struct device *dev, dma_addr_t mask, |
112 | const struct dma_map_ops *ops) | |
d3fa60d7 | 113 | { |
d35834c6 CH |
114 | if (likely(!ops)) |
115 | return true; | |
116 | #ifdef CONFIG_DMA_OPS_BYPASS | |
117 | if (dev->dma_ops_bypass) | |
118 | return min_not_zero(mask, dev->bus_dma_limit) >= | |
119 | dma_direct_get_required_mask(dev); | |
120 | #endif | |
121 | return false; | |
122 | } | |
123 | ||
124 | ||
125 | /* | |
126 | * Check if the devices uses a direct mapping for streaming DMA operations. | |
127 | * This allows IOMMU drivers to set a bypass mode if the DMA mask is large | |
128 | * enough. | |
129 | */ | |
130 | static inline bool dma_alloc_direct(struct device *dev, | |
131 | const struct dma_map_ops *ops) | |
132 | { | |
133 | return dma_go_direct(dev, dev->coherent_dma_mask, ops); | |
134 | } | |
135 | ||
136 | static inline bool dma_map_direct(struct device *dev, | |
137 | const struct dma_map_ops *ops) | |
138 | { | |
139 | return dma_go_direct(dev, *dev->dma_mask, ops); | |
d3fa60d7 CH |
140 | } |
141 | ||
142 | dma_addr_t dma_map_page_attrs(struct device *dev, struct page *page, | |
143 | size_t offset, size_t size, enum dma_data_direction dir, | |
144 | unsigned long attrs) | |
145 | { | |
146 | const struct dma_map_ops *ops = get_dma_ops(dev); | |
147 | dma_addr_t addr; | |
148 | ||
149 | BUG_ON(!valid_dma_direction(dir)); | |
f959dcd6 TT |
150 | |
151 | if (WARN_ON_ONCE(!dev->dma_mask)) | |
152 | return DMA_MAPPING_ERROR; | |
153 | ||
8d8d53cf AK |
154 | if (dma_map_direct(dev, ops) || |
155 | arch_dma_map_page_direct(dev, page_to_phys(page) + offset + size)) | |
d3fa60d7 CH |
156 | addr = dma_direct_map_page(dev, page, offset, size, dir, attrs); |
157 | else | |
158 | addr = ops->map_page(dev, page, offset, size, dir, attrs); | |
c2bbf9d1 | 159 | debug_dma_map_page(dev, page, offset, size, dir, addr, attrs); |
d3fa60d7 CH |
160 | |
161 | return addr; | |
162 | } | |
163 | EXPORT_SYMBOL(dma_map_page_attrs); | |
164 | ||
165 | void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr, size_t size, | |
166 | enum dma_data_direction dir, unsigned long attrs) | |
167 | { | |
168 | const struct dma_map_ops *ops = get_dma_ops(dev); | |
169 | ||
170 | BUG_ON(!valid_dma_direction(dir)); | |
8d8d53cf AK |
171 | if (dma_map_direct(dev, ops) || |
172 | arch_dma_unmap_page_direct(dev, addr + size)) | |
d3fa60d7 CH |
173 | dma_direct_unmap_page(dev, addr, size, dir, attrs); |
174 | else if (ops->unmap_page) | |
175 | ops->unmap_page(dev, addr, size, dir, attrs); | |
176 | debug_dma_unmap_page(dev, addr, size, dir); | |
177 | } | |
178 | EXPORT_SYMBOL(dma_unmap_page_attrs); | |
179 | ||
fffe3cc8 LG |
180 | static int __dma_map_sg_attrs(struct device *dev, struct scatterlist *sg, |
181 | int nents, enum dma_data_direction dir, unsigned long attrs) | |
d3fa60d7 CH |
182 | { |
183 | const struct dma_map_ops *ops = get_dma_ops(dev); | |
184 | int ents; | |
185 | ||
186 | BUG_ON(!valid_dma_direction(dir)); | |
f959dcd6 TT |
187 | |
188 | if (WARN_ON_ONCE(!dev->dma_mask)) | |
189 | return 0; | |
190 | ||
8d8d53cf AK |
191 | if (dma_map_direct(dev, ops) || |
192 | arch_dma_map_sg_direct(dev, sg, nents)) | |
d3fa60d7 CH |
193 | ents = dma_direct_map_sg(dev, sg, nents, dir, attrs); |
194 | else | |
195 | ents = ops->map_sg(dev, sg, nents, dir, attrs); | |
fffe3cc8 LG |
196 | |
197 | if (ents > 0) | |
c2bbf9d1 | 198 | debug_dma_map_sg(dev, sg, nents, ents, dir, attrs); |
fffe3cc8 | 199 | else if (WARN_ON_ONCE(ents != -EINVAL && ents != -ENOMEM && |
7c2645a2 | 200 | ents != -EIO && ents != -EREMOTEIO)) |
fffe3cc8 | 201 | return -EIO; |
d3fa60d7 CH |
202 | |
203 | return ents; | |
204 | } | |
fffe3cc8 LG |
205 | |
206 | /** | |
207 | * dma_map_sg_attrs - Map the given buffer for DMA | |
208 | * @dev: The device for which to perform the DMA operation | |
a61cb601 CH |
209 | * @sg: The sg_table object describing the buffer |
210 | * @nents: Number of entries to map | |
fffe3cc8 LG |
211 | * @dir: DMA direction |
212 | * @attrs: Optional DMA attributes for the map operation | |
213 | * | |
214 | * Maps a buffer described by a scatterlist passed in the sg argument with | |
215 | * nents segments for the @dir DMA operation by the @dev device. | |
216 | * | |
217 | * Returns the number of mapped entries (which can be less than nents) | |
218 | * on success. Zero is returned for any error. | |
219 | * | |
220 | * dma_unmap_sg_attrs() should be used to unmap the buffer with the | |
221 | * original sg and original nents (not the value returned by this funciton). | |
222 | */ | |
2a047e06 | 223 | unsigned int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg, |
fffe3cc8 LG |
224 | int nents, enum dma_data_direction dir, unsigned long attrs) |
225 | { | |
226 | int ret; | |
227 | ||
228 | ret = __dma_map_sg_attrs(dev, sg, nents, dir, attrs); | |
229 | if (ret < 0) | |
230 | return 0; | |
231 | return ret; | |
232 | } | |
d3fa60d7 CH |
233 | EXPORT_SYMBOL(dma_map_sg_attrs); |
234 | ||
fffe3cc8 LG |
235 | /** |
236 | * dma_map_sgtable - Map the given buffer for DMA | |
237 | * @dev: The device for which to perform the DMA operation | |
238 | * @sgt: The sg_table object describing the buffer | |
239 | * @dir: DMA direction | |
240 | * @attrs: Optional DMA attributes for the map operation | |
241 | * | |
242 | * Maps a buffer described by a scatterlist stored in the given sg_table | |
243 | * object for the @dir DMA operation by the @dev device. After success, the | |
244 | * ownership for the buffer is transferred to the DMA domain. One has to | |
245 | * call dma_sync_sgtable_for_cpu() or dma_unmap_sgtable() to move the | |
246 | * ownership of the buffer back to the CPU domain before touching the | |
247 | * buffer by the CPU. | |
248 | * | |
249 | * Returns 0 on success or a negative error code on error. The following | |
250 | * error codes are supported with the given meaning: | |
251 | * | |
84197024 LG |
252 | * -EINVAL An invalid argument, unaligned access or other error |
253 | * in usage. Will not succeed if retried. | |
254 | * -ENOMEM Insufficient resources (like memory or IOVA space) to | |
255 | * complete the mapping. Should succeed if retried later. | |
256 | * -EIO Legacy error code with an unknown meaning. eg. this is | |
257 | * returned if a lower level call returned | |
258 | * DMA_MAPPING_ERROR. | |
259 | * -EREMOTEIO The DMA device cannot access P2PDMA memory specified | |
260 | * in the sg_table. This will not succeed if retried. | |
fffe3cc8 LG |
261 | */ |
262 | int dma_map_sgtable(struct device *dev, struct sg_table *sgt, | |
263 | enum dma_data_direction dir, unsigned long attrs) | |
264 | { | |
265 | int nents; | |
266 | ||
267 | nents = __dma_map_sg_attrs(dev, sgt->sgl, sgt->orig_nents, dir, attrs); | |
fffe3cc8 LG |
268 | if (nents < 0) |
269 | return nents; | |
270 | sgt->nents = nents; | |
271 | return 0; | |
272 | } | |
273 | EXPORT_SYMBOL_GPL(dma_map_sgtable); | |
274 | ||
d3fa60d7 CH |
275 | void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg, |
276 | int nents, enum dma_data_direction dir, | |
277 | unsigned long attrs) | |
278 | { | |
279 | const struct dma_map_ops *ops = get_dma_ops(dev); | |
280 | ||
281 | BUG_ON(!valid_dma_direction(dir)); | |
282 | debug_dma_unmap_sg(dev, sg, nents, dir); | |
8d8d53cf AK |
283 | if (dma_map_direct(dev, ops) || |
284 | arch_dma_unmap_sg_direct(dev, sg, nents)) | |
d3fa60d7 CH |
285 | dma_direct_unmap_sg(dev, sg, nents, dir, attrs); |
286 | else if (ops->unmap_sg) | |
287 | ops->unmap_sg(dev, sg, nents, dir, attrs); | |
288 | } | |
289 | EXPORT_SYMBOL(dma_unmap_sg_attrs); | |
290 | ||
291 | dma_addr_t dma_map_resource(struct device *dev, phys_addr_t phys_addr, | |
292 | size_t size, enum dma_data_direction dir, unsigned long attrs) | |
293 | { | |
294 | const struct dma_map_ops *ops = get_dma_ops(dev); | |
295 | dma_addr_t addr = DMA_MAPPING_ERROR; | |
296 | ||
297 | BUG_ON(!valid_dma_direction(dir)); | |
298 | ||
f959dcd6 TT |
299 | if (WARN_ON_ONCE(!dev->dma_mask)) |
300 | return DMA_MAPPING_ERROR; | |
301 | ||
d35834c6 | 302 | if (dma_map_direct(dev, ops)) |
d3fa60d7 CH |
303 | addr = dma_direct_map_resource(dev, phys_addr, size, dir, attrs); |
304 | else if (ops->map_resource) | |
305 | addr = ops->map_resource(dev, phys_addr, size, dir, attrs); | |
306 | ||
c2bbf9d1 | 307 | debug_dma_map_resource(dev, phys_addr, size, dir, addr, attrs); |
d3fa60d7 CH |
308 | return addr; |
309 | } | |
310 | EXPORT_SYMBOL(dma_map_resource); | |
311 | ||
312 | void dma_unmap_resource(struct device *dev, dma_addr_t addr, size_t size, | |
313 | enum dma_data_direction dir, unsigned long attrs) | |
314 | { | |
315 | const struct dma_map_ops *ops = get_dma_ops(dev); | |
316 | ||
317 | BUG_ON(!valid_dma_direction(dir)); | |
d35834c6 | 318 | if (!dma_map_direct(dev, ops) && ops->unmap_resource) |
d3fa60d7 CH |
319 | ops->unmap_resource(dev, addr, size, dir, attrs); |
320 | debug_dma_unmap_resource(dev, addr, size, dir); | |
321 | } | |
322 | EXPORT_SYMBOL(dma_unmap_resource); | |
323 | ||
324 | void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size, | |
325 | enum dma_data_direction dir) | |
326 | { | |
327 | const struct dma_map_ops *ops = get_dma_ops(dev); | |
328 | ||
329 | BUG_ON(!valid_dma_direction(dir)); | |
d35834c6 | 330 | if (dma_map_direct(dev, ops)) |
d3fa60d7 CH |
331 | dma_direct_sync_single_for_cpu(dev, addr, size, dir); |
332 | else if (ops->sync_single_for_cpu) | |
333 | ops->sync_single_for_cpu(dev, addr, size, dir); | |
334 | debug_dma_sync_single_for_cpu(dev, addr, size, dir); | |
335 | } | |
336 | EXPORT_SYMBOL(dma_sync_single_for_cpu); | |
337 | ||
338 | void dma_sync_single_for_device(struct device *dev, dma_addr_t addr, | |
339 | size_t size, enum dma_data_direction dir) | |
340 | { | |
341 | const struct dma_map_ops *ops = get_dma_ops(dev); | |
342 | ||
343 | BUG_ON(!valid_dma_direction(dir)); | |
d35834c6 | 344 | if (dma_map_direct(dev, ops)) |
d3fa60d7 CH |
345 | dma_direct_sync_single_for_device(dev, addr, size, dir); |
346 | else if (ops->sync_single_for_device) | |
347 | ops->sync_single_for_device(dev, addr, size, dir); | |
348 | debug_dma_sync_single_for_device(dev, addr, size, dir); | |
349 | } | |
350 | EXPORT_SYMBOL(dma_sync_single_for_device); | |
351 | ||
352 | void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, | |
353 | int nelems, enum dma_data_direction dir) | |
354 | { | |
355 | const struct dma_map_ops *ops = get_dma_ops(dev); | |
356 | ||
357 | BUG_ON(!valid_dma_direction(dir)); | |
d35834c6 | 358 | if (dma_map_direct(dev, ops)) |
d3fa60d7 CH |
359 | dma_direct_sync_sg_for_cpu(dev, sg, nelems, dir); |
360 | else if (ops->sync_sg_for_cpu) | |
361 | ops->sync_sg_for_cpu(dev, sg, nelems, dir); | |
362 | debug_dma_sync_sg_for_cpu(dev, sg, nelems, dir); | |
363 | } | |
364 | EXPORT_SYMBOL(dma_sync_sg_for_cpu); | |
365 | ||
366 | void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, | |
367 | int nelems, enum dma_data_direction dir) | |
368 | { | |
369 | const struct dma_map_ops *ops = get_dma_ops(dev); | |
370 | ||
371 | BUG_ON(!valid_dma_direction(dir)); | |
d35834c6 | 372 | if (dma_map_direct(dev, ops)) |
d3fa60d7 CH |
373 | dma_direct_sync_sg_for_device(dev, sg, nelems, dir); |
374 | else if (ops->sync_sg_for_device) | |
375 | ops->sync_sg_for_device(dev, sg, nelems, dir); | |
376 | debug_dma_sync_sg_for_device(dev, sg, nelems, dir); | |
377 | } | |
378 | EXPORT_SYMBOL(dma_sync_sg_for_device); | |
379 | ||
14451467 CH |
380 | /* |
381 | * The whole dma_get_sgtable() idea is fundamentally unsafe - it seems | |
382 | * that the intention is to allow exporting memory allocated via the | |
383 | * coherent DMA APIs through the dma_buf API, which only accepts a | |
384 | * scattertable. This presents a couple of problems: | |
385 | * 1. Not all memory allocated via the coherent DMA APIs is backed by | |
386 | * a struct page | |
387 | * 2. Passing coherent DMA memory into the streaming APIs is not allowed | |
388 | * as we will try to flush the memory through a different alias to that | |
389 | * actually being used (and the flushes are redundant.) | |
390 | */ | |
7249c1a5 CH |
391 | int dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt, |
392 | void *cpu_addr, dma_addr_t dma_addr, size_t size, | |
393 | unsigned long attrs) | |
394 | { | |
395 | const struct dma_map_ops *ops = get_dma_ops(dev); | |
356da6d0 | 396 | |
d35834c6 | 397 | if (dma_alloc_direct(dev, ops)) |
34dc0ea6 | 398 | return dma_direct_get_sgtable(dev, sgt, cpu_addr, dma_addr, |
f9f3232a CH |
399 | size, attrs); |
400 | if (!ops->get_sgtable) | |
401 | return -ENXIO; | |
402 | return ops->get_sgtable(dev, sgt, cpu_addr, dma_addr, size, attrs); | |
7249c1a5 CH |
403 | } |
404 | EXPORT_SYMBOL(dma_get_sgtable_attrs); | |
d2b7428e | 405 | |
33dcb37c CH |
406 | #ifdef CONFIG_MMU |
407 | /* | |
408 | * Return the page attributes used for mapping dma_alloc_* memory, either in | |
409 | * kernel space if remapping is needed, or to userspace through dma_mmap_*. | |
410 | */ | |
411 | pgprot_t dma_pgprot(struct device *dev, pgprot_t prot, unsigned long attrs) | |
412 | { | |
efa70f2f | 413 | if (dev_is_dma_coherent(dev)) |
33dcb37c | 414 | return prot; |
419e2f18 CH |
415 | #ifdef CONFIG_ARCH_HAS_DMA_WRITE_COMBINE |
416 | if (attrs & DMA_ATTR_WRITE_COMBINE) | |
417 | return pgprot_writecombine(prot); | |
418 | #endif | |
419 | return pgprot_dmacoherent(prot); | |
33dcb37c CH |
420 | } |
421 | #endif /* CONFIG_MMU */ | |
422 | ||
e29ccc18 CH |
423 | /** |
424 | * dma_can_mmap - check if a given device supports dma_mmap_* | |
425 | * @dev: device to check | |
426 | * | |
427 | * Returns %true if @dev supports dma_mmap_coherent() and dma_mmap_attrs() to | |
428 | * map DMA allocations to userspace. | |
429 | */ | |
430 | bool dma_can_mmap(struct device *dev) | |
431 | { | |
432 | const struct dma_map_ops *ops = get_dma_ops(dev); | |
433 | ||
d35834c6 | 434 | if (dma_alloc_direct(dev, ops)) |
34dc0ea6 | 435 | return dma_direct_can_mmap(dev); |
e29ccc18 CH |
436 | return ops->mmap != NULL; |
437 | } | |
438 | EXPORT_SYMBOL_GPL(dma_can_mmap); | |
439 | ||
7249c1a5 CH |
440 | /** |
441 | * dma_mmap_attrs - map a coherent DMA allocation into user space | |
442 | * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices | |
443 | * @vma: vm_area_struct describing requested user mapping | |
444 | * @cpu_addr: kernel CPU-view address returned from dma_alloc_attrs | |
445 | * @dma_addr: device-view address returned from dma_alloc_attrs | |
446 | * @size: size of memory originally requested in dma_alloc_attrs | |
447 | * @attrs: attributes of mapping properties requested in dma_alloc_attrs | |
448 | * | |
449 | * Map a coherent DMA buffer previously allocated by dma_alloc_attrs into user | |
450 | * space. The coherent DMA buffer must not be freed by the driver until the | |
451 | * user space mapping has been released. | |
452 | */ | |
453 | int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, | |
454 | void *cpu_addr, dma_addr_t dma_addr, size_t size, | |
455 | unsigned long attrs) | |
456 | { | |
457 | const struct dma_map_ops *ops = get_dma_ops(dev); | |
356da6d0 | 458 | |
d35834c6 | 459 | if (dma_alloc_direct(dev, ops)) |
34dc0ea6 | 460 | return dma_direct_mmap(dev, vma, cpu_addr, dma_addr, size, |
f9f3232a CH |
461 | attrs); |
462 | if (!ops->mmap) | |
463 | return -ENXIO; | |
464 | return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs); | |
7249c1a5 CH |
465 | } |
466 | EXPORT_SYMBOL(dma_mmap_attrs); | |
05887cb6 | 467 | |
05887cb6 CH |
468 | u64 dma_get_required_mask(struct device *dev) |
469 | { | |
470 | const struct dma_map_ops *ops = get_dma_ops(dev); | |
471 | ||
d35834c6 | 472 | if (dma_alloc_direct(dev, ops)) |
356da6d0 | 473 | return dma_direct_get_required_mask(dev); |
05887cb6 CH |
474 | if (ops->get_required_mask) |
475 | return ops->get_required_mask(dev); | |
249baa54 CH |
476 | |
477 | /* | |
478 | * We require every DMA ops implementation to at least support a 32-bit | |
479 | * DMA mask (and use bounce buffering if that isn't supported in | |
480 | * hardware). As the direct mapping code has its own routine to | |
481 | * actually report an optimal mask we default to 32-bit here as that | |
482 | * is the right thing for most IOMMUs, and at least not actively | |
483 | * harmful in general. | |
484 | */ | |
485 | return DMA_BIT_MASK(32); | |
05887cb6 CH |
486 | } |
487 | EXPORT_SYMBOL_GPL(dma_get_required_mask); | |
05887cb6 | 488 | |
7249c1a5 CH |
489 | void *dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle, |
490 | gfp_t flag, unsigned long attrs) | |
491 | { | |
492 | const struct dma_map_ops *ops = get_dma_ops(dev); | |
493 | void *cpu_addr; | |
494 | ||
148a97d5 | 495 | WARN_ON_ONCE(!dev->coherent_dma_mask); |
7249c1a5 CH |
496 | |
497 | if (dma_alloc_from_dev_coherent(dev, size, dma_handle, &cpu_addr)) | |
498 | return cpu_addr; | |
499 | ||
500 | /* let the implementation decide on the zone to allocate from: */ | |
501 | flag &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM); | |
502 | ||
d35834c6 | 503 | if (dma_alloc_direct(dev, ops)) |
356da6d0 CH |
504 | cpu_addr = dma_direct_alloc(dev, size, dma_handle, flag, attrs); |
505 | else if (ops->alloc) | |
506 | cpu_addr = ops->alloc(dev, size, dma_handle, flag, attrs); | |
507 | else | |
7249c1a5 CH |
508 | return NULL; |
509 | ||
c2bbf9d1 | 510 | debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr, attrs); |
7249c1a5 CH |
511 | return cpu_addr; |
512 | } | |
513 | EXPORT_SYMBOL(dma_alloc_attrs); | |
514 | ||
515 | void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr, | |
516 | dma_addr_t dma_handle, unsigned long attrs) | |
517 | { | |
518 | const struct dma_map_ops *ops = get_dma_ops(dev); | |
519 | ||
7249c1a5 CH |
520 | if (dma_release_from_dev_coherent(dev, get_order(size), cpu_addr)) |
521 | return; | |
522 | /* | |
523 | * On non-coherent platforms which implement DMA-coherent buffers via | |
524 | * non-cacheable remaps, ops->free() may call vunmap(). Thus getting | |
525 | * this far in IRQ context is a) at risk of a BUG_ON() or trying to | |
526 | * sleep on some machines, and b) an indication that the driver is | |
527 | * probably misusing the coherent API anyway. | |
528 | */ | |
529 | WARN_ON(irqs_disabled()); | |
530 | ||
356da6d0 | 531 | if (!cpu_addr) |
7249c1a5 CH |
532 | return; |
533 | ||
534 | debug_dma_free_coherent(dev, size, cpu_addr, dma_handle); | |
d35834c6 | 535 | if (dma_alloc_direct(dev, ops)) |
356da6d0 CH |
536 | dma_direct_free(dev, size, cpu_addr, dma_handle, attrs); |
537 | else if (ops->free) | |
538 | ops->free(dev, size, cpu_addr, dma_handle, attrs); | |
7249c1a5 CH |
539 | } |
540 | EXPORT_SYMBOL(dma_free_attrs); | |
541 | ||
198c50e2 | 542 | static struct page *__dma_alloc_pages(struct device *dev, size_t size, |
efa70f2f CH |
543 | dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp) |
544 | { | |
545 | const struct dma_map_ops *ops = get_dma_ops(dev); | |
efa70f2f CH |
546 | |
547 | if (WARN_ON_ONCE(!dev->coherent_dma_mask)) | |
548 | return NULL; | |
549 | if (WARN_ON_ONCE(gfp & (__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM))) | |
550 | return NULL; | |
551 | ||
552 | size = PAGE_ALIGN(size); | |
553 | if (dma_alloc_direct(dev, ops)) | |
198c50e2 CH |
554 | return dma_direct_alloc_pages(dev, size, dma_handle, dir, gfp); |
555 | if (!ops->alloc_pages) | |
efa70f2f | 556 | return NULL; |
198c50e2 CH |
557 | return ops->alloc_pages(dev, size, dma_handle, dir, gfp); |
558 | } | |
efa70f2f | 559 | |
198c50e2 CH |
560 | struct page *dma_alloc_pages(struct device *dev, size_t size, |
561 | dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp) | |
562 | { | |
563 | struct page *page = __dma_alloc_pages(dev, size, dma_handle, dir, gfp); | |
efa70f2f | 564 | |
198c50e2 | 565 | if (page) |
c2bbf9d1 | 566 | debug_dma_map_page(dev, page, 0, size, dir, *dma_handle, 0); |
efa70f2f CH |
567 | return page; |
568 | } | |
569 | EXPORT_SYMBOL_GPL(dma_alloc_pages); | |
570 | ||
198c50e2 | 571 | static void __dma_free_pages(struct device *dev, size_t size, struct page *page, |
efa70f2f CH |
572 | dma_addr_t dma_handle, enum dma_data_direction dir) |
573 | { | |
574 | const struct dma_map_ops *ops = get_dma_ops(dev); | |
575 | ||
576 | size = PAGE_ALIGN(size); | |
efa70f2f CH |
577 | if (dma_alloc_direct(dev, ops)) |
578 | dma_direct_free_pages(dev, size, page, dma_handle, dir); | |
579 | else if (ops->free_pages) | |
580 | ops->free_pages(dev, size, page, dma_handle, dir); | |
581 | } | |
198c50e2 CH |
582 | |
583 | void dma_free_pages(struct device *dev, size_t size, struct page *page, | |
584 | dma_addr_t dma_handle, enum dma_data_direction dir) | |
585 | { | |
586 | debug_dma_unmap_page(dev, dma_handle, size, dir); | |
587 | __dma_free_pages(dev, size, page, dma_handle, dir); | |
588 | } | |
efa70f2f CH |
589 | EXPORT_SYMBOL_GPL(dma_free_pages); |
590 | ||
eedb0b12 CH |
591 | int dma_mmap_pages(struct device *dev, struct vm_area_struct *vma, |
592 | size_t size, struct page *page) | |
593 | { | |
594 | unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT; | |
595 | ||
596 | if (vma->vm_pgoff >= count || vma_pages(vma) > count - vma->vm_pgoff) | |
597 | return -ENXIO; | |
598 | return remap_pfn_range(vma, vma->vm_start, | |
599 | page_to_pfn(page) + vma->vm_pgoff, | |
600 | vma_pages(vma) << PAGE_SHIFT, vma->vm_page_prot); | |
601 | } | |
602 | EXPORT_SYMBOL_GPL(dma_mmap_pages); | |
603 | ||
7d5b5738 CH |
604 | static struct sg_table *alloc_single_sgt(struct device *dev, size_t size, |
605 | enum dma_data_direction dir, gfp_t gfp) | |
606 | { | |
607 | struct sg_table *sgt; | |
608 | struct page *page; | |
609 | ||
610 | sgt = kmalloc(sizeof(*sgt), gfp); | |
611 | if (!sgt) | |
612 | return NULL; | |
613 | if (sg_alloc_table(sgt, 1, gfp)) | |
614 | goto out_free_sgt; | |
615 | page = __dma_alloc_pages(dev, size, &sgt->sgl->dma_address, dir, gfp); | |
616 | if (!page) | |
617 | goto out_free_table; | |
618 | sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0); | |
619 | sg_dma_len(sgt->sgl) = sgt->sgl->length; | |
620 | return sgt; | |
621 | out_free_table: | |
622 | sg_free_table(sgt); | |
623 | out_free_sgt: | |
624 | kfree(sgt); | |
625 | return NULL; | |
626 | } | |
627 | ||
628 | struct sg_table *dma_alloc_noncontiguous(struct device *dev, size_t size, | |
629 | enum dma_data_direction dir, gfp_t gfp, unsigned long attrs) | |
630 | { | |
631 | const struct dma_map_ops *ops = get_dma_ops(dev); | |
632 | struct sg_table *sgt; | |
633 | ||
634 | if (WARN_ON_ONCE(attrs & ~DMA_ATTR_ALLOC_SINGLE_PAGES)) | |
635 | return NULL; | |
636 | ||
637 | if (ops && ops->alloc_noncontiguous) | |
638 | sgt = ops->alloc_noncontiguous(dev, size, dir, gfp, attrs); | |
639 | else | |
640 | sgt = alloc_single_sgt(dev, size, dir, gfp); | |
641 | ||
642 | if (sgt) { | |
643 | sgt->nents = 1; | |
c2bbf9d1 | 644 | debug_dma_map_sg(dev, sgt->sgl, sgt->orig_nents, 1, dir, attrs); |
7d5b5738 CH |
645 | } |
646 | return sgt; | |
647 | } | |
648 | EXPORT_SYMBOL_GPL(dma_alloc_noncontiguous); | |
649 | ||
650 | static void free_single_sgt(struct device *dev, size_t size, | |
651 | struct sg_table *sgt, enum dma_data_direction dir) | |
652 | { | |
653 | __dma_free_pages(dev, size, sg_page(sgt->sgl), sgt->sgl->dma_address, | |
654 | dir); | |
655 | sg_free_table(sgt); | |
656 | kfree(sgt); | |
657 | } | |
658 | ||
659 | void dma_free_noncontiguous(struct device *dev, size_t size, | |
660 | struct sg_table *sgt, enum dma_data_direction dir) | |
661 | { | |
662 | const struct dma_map_ops *ops = get_dma_ops(dev); | |
663 | ||
664 | debug_dma_unmap_sg(dev, sgt->sgl, sgt->orig_nents, dir); | |
665 | if (ops && ops->free_noncontiguous) | |
666 | ops->free_noncontiguous(dev, size, sgt, dir); | |
667 | else | |
668 | free_single_sgt(dev, size, sgt, dir); | |
669 | } | |
670 | EXPORT_SYMBOL_GPL(dma_free_noncontiguous); | |
671 | ||
672 | void *dma_vmap_noncontiguous(struct device *dev, size_t size, | |
673 | struct sg_table *sgt) | |
674 | { | |
675 | const struct dma_map_ops *ops = get_dma_ops(dev); | |
676 | unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT; | |
677 | ||
678 | if (ops && ops->alloc_noncontiguous) | |
679 | return vmap(sgt_handle(sgt)->pages, count, VM_MAP, PAGE_KERNEL); | |
680 | return page_address(sg_page(sgt->sgl)); | |
681 | } | |
682 | EXPORT_SYMBOL_GPL(dma_vmap_noncontiguous); | |
683 | ||
684 | void dma_vunmap_noncontiguous(struct device *dev, void *vaddr) | |
685 | { | |
686 | const struct dma_map_ops *ops = get_dma_ops(dev); | |
687 | ||
688 | if (ops && ops->alloc_noncontiguous) | |
689 | vunmap(vaddr); | |
690 | } | |
691 | EXPORT_SYMBOL_GPL(dma_vunmap_noncontiguous); | |
692 | ||
693 | int dma_mmap_noncontiguous(struct device *dev, struct vm_area_struct *vma, | |
694 | size_t size, struct sg_table *sgt) | |
695 | { | |
696 | const struct dma_map_ops *ops = get_dma_ops(dev); | |
697 | ||
698 | if (ops && ops->alloc_noncontiguous) { | |
699 | unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT; | |
700 | ||
701 | if (vma->vm_pgoff >= count || | |
702 | vma_pages(vma) > count - vma->vm_pgoff) | |
703 | return -ENXIO; | |
704 | return vm_map_pages(vma, sgt_handle(sgt)->pages, count); | |
705 | } | |
706 | return dma_mmap_pages(dev, vma, size, sg_page(sgt->sgl)); | |
707 | } | |
708 | EXPORT_SYMBOL_GPL(dma_mmap_noncontiguous); | |
709 | ||
9fc18f6d | 710 | static int dma_supported(struct device *dev, u64 mask) |
7249c1a5 CH |
711 | { |
712 | const struct dma_map_ops *ops = get_dma_ops(dev); | |
713 | ||
d35834c6 CH |
714 | /* |
715 | * ->dma_supported sets the bypass flag, so we must always call | |
716 | * into the method here unless the device is truly direct mapped. | |
717 | */ | |
718 | if (!ops) | |
356da6d0 | 719 | return dma_direct_supported(dev, mask); |
8b1cce9f | 720 | if (!ops->dma_supported) |
7249c1a5 CH |
721 | return 1; |
722 | return ops->dma_supported(dev, mask); | |
723 | } | |
159bf192 LG |
724 | |
725 | bool dma_pci_p2pdma_supported(struct device *dev) | |
726 | { | |
727 | const struct dma_map_ops *ops = get_dma_ops(dev); | |
728 | ||
729 | /* if ops is not set, dma direct will be used which supports P2PDMA */ | |
730 | if (!ops) | |
731 | return true; | |
732 | ||
733 | /* | |
734 | * Note: dma_ops_bypass is not checked here because P2PDMA should | |
735 | * not be used with dma mapping ops that do not have support even | |
736 | * if the specific device is bypassing them. | |
737 | */ | |
738 | ||
739 | return ops->flags & DMA_F_PCI_P2PDMA_SUPPORTED; | |
740 | } | |
741 | EXPORT_SYMBOL_GPL(dma_pci_p2pdma_supported); | |
7249c1a5 | 742 | |
11ddce15 CH |
743 | #ifdef CONFIG_ARCH_HAS_DMA_SET_MASK |
744 | void arch_dma_set_mask(struct device *dev, u64 mask); | |
745 | #else | |
746 | #define arch_dma_set_mask(dev, mask) do { } while (0) | |
747 | #endif | |
748 | ||
7249c1a5 CH |
749 | int dma_set_mask(struct device *dev, u64 mask) |
750 | { | |
4a54d16f CH |
751 | /* |
752 | * Truncate the mask to the actually supported dma_addr_t width to | |
753 | * avoid generating unsupportable addresses. | |
754 | */ | |
755 | mask = (dma_addr_t)mask; | |
756 | ||
7249c1a5 CH |
757 | if (!dev->dma_mask || !dma_supported(dev, mask)) |
758 | return -EIO; | |
759 | ||
11ddce15 | 760 | arch_dma_set_mask(dev, mask); |
7249c1a5 CH |
761 | *dev->dma_mask = mask; |
762 | return 0; | |
763 | } | |
764 | EXPORT_SYMBOL(dma_set_mask); | |
7249c1a5 | 765 | |
7249c1a5 CH |
766 | int dma_set_coherent_mask(struct device *dev, u64 mask) |
767 | { | |
4a54d16f CH |
768 | /* |
769 | * Truncate the mask to the actually supported dma_addr_t width to | |
770 | * avoid generating unsupportable addresses. | |
771 | */ | |
772 | mask = (dma_addr_t)mask; | |
773 | ||
7249c1a5 CH |
774 | if (!dma_supported(dev, mask)) |
775 | return -EIO; | |
776 | ||
7249c1a5 CH |
777 | dev->coherent_dma_mask = mask; |
778 | return 0; | |
779 | } | |
780 | EXPORT_SYMBOL(dma_set_coherent_mask); | |
8ddbe594 | 781 | |
133d624b JR |
782 | size_t dma_max_mapping_size(struct device *dev) |
783 | { | |
784 | const struct dma_map_ops *ops = get_dma_ops(dev); | |
785 | size_t size = SIZE_MAX; | |
786 | ||
d35834c6 | 787 | if (dma_map_direct(dev, ops)) |
133d624b JR |
788 | size = dma_direct_max_mapping_size(dev); |
789 | else if (ops && ops->max_mapping_size) | |
790 | size = ops->max_mapping_size(dev); | |
791 | ||
792 | return size; | |
793 | } | |
794 | EXPORT_SYMBOL_GPL(dma_max_mapping_size); | |
6ba99411 | 795 | |
a229cc14 JG |
796 | size_t dma_opt_mapping_size(struct device *dev) |
797 | { | |
798 | const struct dma_map_ops *ops = get_dma_ops(dev); | |
799 | size_t size = SIZE_MAX; | |
800 | ||
801 | if (ops && ops->opt_mapping_size) | |
802 | size = ops->opt_mapping_size(); | |
803 | ||
804 | return min(dma_max_mapping_size(dev), size); | |
805 | } | |
806 | EXPORT_SYMBOL_GPL(dma_opt_mapping_size); | |
807 | ||
3aa91625 CH |
808 | bool dma_need_sync(struct device *dev, dma_addr_t dma_addr) |
809 | { | |
810 | const struct dma_map_ops *ops = get_dma_ops(dev); | |
811 | ||
d35834c6 | 812 | if (dma_map_direct(dev, ops)) |
3aa91625 CH |
813 | return dma_direct_need_sync(dev, dma_addr); |
814 | return ops->sync_single_for_cpu || ops->sync_single_for_device; | |
815 | } | |
816 | EXPORT_SYMBOL_GPL(dma_need_sync); | |
817 | ||
6ba99411 YS |
818 | unsigned long dma_get_merge_boundary(struct device *dev) |
819 | { | |
820 | const struct dma_map_ops *ops = get_dma_ops(dev); | |
821 | ||
822 | if (!ops || !ops->get_merge_boundary) | |
823 | return 0; /* can't merge */ | |
824 | ||
825 | return ops->get_merge_boundary(dev); | |
826 | } | |
827 | EXPORT_SYMBOL_GPL(dma_get_merge_boundary); |