Commit | Line | Data |
---|---|---|
989d42e8 | 1 | // SPDX-License-Identifier: GPL-2.0 |
9ac7849e | 2 | /* |
cf65a0f6 | 3 | * arch-independent dma-mapping routines |
9ac7849e TH |
4 | * |
5 | * Copyright (c) 2006 SUSE Linux Products GmbH | |
6 | * Copyright (c) 2006 Tejun Heo <teheo@suse.de> | |
9ac7849e | 7 | */ |
05887cb6 | 8 | #include <linux/memblock.h> /* for max_pfn */ |
09515ef5 | 9 | #include <linux/acpi.h> |
356da6d0 | 10 | #include <linux/dma-direct.h> |
58b04406 | 11 | #include <linux/dma-noncoherent.h> |
1b6bc32f | 12 | #include <linux/export.h> |
5a0e3ad6 | 13 | #include <linux/gfp.h> |
09515ef5 | 14 | #include <linux/of_device.h> |
513510dd LA |
15 | #include <linux/slab.h> |
16 | #include <linux/vmalloc.h> | |
9ac7849e TH |
17 | |
18 | /* | |
19 | * Managed DMA API | |
20 | */ | |
21 | struct dma_devres { | |
22 | size_t size; | |
23 | void *vaddr; | |
24 | dma_addr_t dma_handle; | |
63d36c95 | 25 | unsigned long attrs; |
9ac7849e TH |
26 | }; |
27 | ||
63d36c95 | 28 | static void dmam_release(struct device *dev, void *res) |
9ac7849e TH |
29 | { |
30 | struct dma_devres *this = res; | |
31 | ||
63d36c95 CH |
32 | dma_free_attrs(dev, this->size, this->vaddr, this->dma_handle, |
33 | this->attrs); | |
9ac7849e TH |
34 | } |
35 | ||
36 | static int dmam_match(struct device *dev, void *res, void *match_data) | |
37 | { | |
38 | struct dma_devres *this = res, *match = match_data; | |
39 | ||
40 | if (this->vaddr == match->vaddr) { | |
41 | WARN_ON(this->size != match->size || | |
42 | this->dma_handle != match->dma_handle); | |
43 | return 1; | |
44 | } | |
45 | return 0; | |
46 | } | |
47 | ||
48 | /** | |
49 | * dmam_alloc_coherent - Managed dma_alloc_coherent() | |
50 | * @dev: Device to allocate coherent memory for | |
51 | * @size: Size of allocation | |
52 | * @dma_handle: Out argument for allocated DMA handle | |
53 | * @gfp: Allocation flags | |
54 | * | |
55 | * Managed dma_alloc_coherent(). Memory allocated using this function | |
56 | * will be automatically released on driver detach. | |
57 | * | |
58 | * RETURNS: | |
59 | * Pointer to allocated memory on success, NULL on failure. | |
60 | */ | |
6d42d79e | 61 | void *dmam_alloc_coherent(struct device *dev, size_t size, |
9ac7849e TH |
62 | dma_addr_t *dma_handle, gfp_t gfp) |
63 | { | |
64 | struct dma_devres *dr; | |
65 | void *vaddr; | |
66 | ||
63d36c95 | 67 | dr = devres_alloc(dmam_release, sizeof(*dr), gfp); |
9ac7849e TH |
68 | if (!dr) |
69 | return NULL; | |
70 | ||
71 | vaddr = dma_alloc_coherent(dev, size, dma_handle, gfp); | |
72 | if (!vaddr) { | |
73 | devres_free(dr); | |
74 | return NULL; | |
75 | } | |
76 | ||
77 | dr->vaddr = vaddr; | |
78 | dr->dma_handle = *dma_handle; | |
79 | dr->size = size; | |
80 | ||
81 | devres_add(dev, dr); | |
82 | ||
83 | return vaddr; | |
84 | } | |
85 | EXPORT_SYMBOL(dmam_alloc_coherent); | |
86 | ||
87 | /** | |
88 | * dmam_free_coherent - Managed dma_free_coherent() | |
89 | * @dev: Device to free coherent memory for | |
90 | * @size: Size of allocation | |
91 | * @vaddr: Virtual address of the memory to free | |
92 | * @dma_handle: DMA handle of the memory to free | |
93 | * | |
94 | * Managed dma_free_coherent(). | |
95 | */ | |
96 | void dmam_free_coherent(struct device *dev, size_t size, void *vaddr, | |
97 | dma_addr_t dma_handle) | |
98 | { | |
99 | struct dma_devres match_data = { size, vaddr, dma_handle }; | |
100 | ||
101 | dma_free_coherent(dev, size, vaddr, dma_handle); | |
63d36c95 | 102 | WARN_ON(devres_destroy(dev, dmam_release, dmam_match, &match_data)); |
9ac7849e TH |
103 | } |
104 | EXPORT_SYMBOL(dmam_free_coherent); | |
105 | ||
106 | /** | |
63d36c95 | 107 | * dmam_alloc_attrs - Managed dma_alloc_attrs() |
9ac7849e TH |
108 | * @dev: Device to allocate non_coherent memory for |
109 | * @size: Size of allocation | |
110 | * @dma_handle: Out argument for allocated DMA handle | |
111 | * @gfp: Allocation flags | |
63d36c95 | 112 | * @attrs: Flags in the DMA_ATTR_* namespace. |
9ac7849e | 113 | * |
63d36c95 CH |
114 | * Managed dma_alloc_attrs(). Memory allocated using this function will be |
115 | * automatically released on driver detach. | |
9ac7849e TH |
116 | * |
117 | * RETURNS: | |
118 | * Pointer to allocated memory on success, NULL on failure. | |
119 | */ | |
63d36c95 CH |
120 | void *dmam_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle, |
121 | gfp_t gfp, unsigned long attrs) | |
9ac7849e TH |
122 | { |
123 | struct dma_devres *dr; | |
124 | void *vaddr; | |
125 | ||
63d36c95 | 126 | dr = devres_alloc(dmam_release, sizeof(*dr), gfp); |
9ac7849e TH |
127 | if (!dr) |
128 | return NULL; | |
129 | ||
63d36c95 | 130 | vaddr = dma_alloc_attrs(dev, size, dma_handle, gfp, attrs); |
9ac7849e TH |
131 | if (!vaddr) { |
132 | devres_free(dr); | |
133 | return NULL; | |
134 | } | |
135 | ||
136 | dr->vaddr = vaddr; | |
137 | dr->dma_handle = *dma_handle; | |
138 | dr->size = size; | |
63d36c95 | 139 | dr->attrs = attrs; |
9ac7849e TH |
140 | |
141 | devres_add(dev, dr); | |
142 | ||
143 | return vaddr; | |
144 | } | |
63d36c95 | 145 | EXPORT_SYMBOL(dmam_alloc_attrs); |
9ac7849e | 146 | |
20d666e4 | 147 | #ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT |
9ac7849e TH |
148 | |
149 | static void dmam_coherent_decl_release(struct device *dev, void *res) | |
150 | { | |
151 | dma_release_declared_memory(dev); | |
152 | } | |
153 | ||
154 | /** | |
155 | * dmam_declare_coherent_memory - Managed dma_declare_coherent_memory() | |
156 | * @dev: Device to declare coherent memory for | |
88a984ba | 157 | * @phys_addr: Physical address of coherent memory to be declared |
9ac7849e TH |
158 | * @device_addr: Device address of coherent memory to be declared |
159 | * @size: Size of coherent memory to be declared | |
160 | * @flags: Flags | |
161 | * | |
162 | * Managed dma_declare_coherent_memory(). | |
163 | * | |
164 | * RETURNS: | |
165 | * 0 on success, -errno on failure. | |
166 | */ | |
88a984ba | 167 | int dmam_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr, |
9ac7849e TH |
168 | dma_addr_t device_addr, size_t size, int flags) |
169 | { | |
170 | void *res; | |
171 | int rc; | |
172 | ||
173 | res = devres_alloc(dmam_coherent_decl_release, 0, GFP_KERNEL); | |
174 | if (!res) | |
175 | return -ENOMEM; | |
176 | ||
88a984ba | 177 | rc = dma_declare_coherent_memory(dev, phys_addr, device_addr, size, |
9ac7849e | 178 | flags); |
2436bdcd | 179 | if (!rc) |
9ac7849e | 180 | devres_add(dev, res); |
2436bdcd | 181 | else |
9ac7849e TH |
182 | devres_free(res); |
183 | ||
184 | return rc; | |
185 | } | |
186 | EXPORT_SYMBOL(dmam_declare_coherent_memory); | |
187 | ||
188 | /** | |
189 | * dmam_release_declared_memory - Managed dma_release_declared_memory(). | |
190 | * @dev: Device to release declared coherent memory for | |
191 | * | |
192 | * Managed dmam_release_declared_memory(). | |
193 | */ | |
194 | void dmam_release_declared_memory(struct device *dev) | |
195 | { | |
196 | WARN_ON(devres_destroy(dev, dmam_coherent_decl_release, NULL, NULL)); | |
197 | } | |
198 | EXPORT_SYMBOL(dmam_release_declared_memory); | |
199 | ||
c6c22955 MS |
200 | #endif |
201 | ||
d2b7428e MS |
202 | /* |
203 | * Create scatter-list for the already allocated DMA buffer. | |
204 | */ | |
205 | int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt, | |
9406a49f CH |
206 | void *cpu_addr, dma_addr_t dma_addr, size_t size, |
207 | unsigned long attrs) | |
d2b7428e | 208 | { |
9406a49f | 209 | struct page *page; |
d2b7428e MS |
210 | int ret; |
211 | ||
9406a49f CH |
212 | if (!dev_is_dma_coherent(dev)) { |
213 | if (!IS_ENABLED(CONFIG_ARCH_HAS_DMA_COHERENT_TO_PFN)) | |
214 | return -ENXIO; | |
d2b7428e | 215 | |
9406a49f CH |
216 | page = pfn_to_page(arch_dma_coherent_to_pfn(dev, cpu_addr, |
217 | dma_addr)); | |
218 | } else { | |
219 | page = virt_to_page(cpu_addr); | |
220 | } | |
221 | ||
222 | ret = sg_alloc_table(sgt, 1, GFP_KERNEL); | |
223 | if (!ret) | |
224 | sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0); | |
225 | return ret; | |
d2b7428e | 226 | } |
7249c1a5 CH |
227 | |
228 | int dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt, | |
229 | void *cpu_addr, dma_addr_t dma_addr, size_t size, | |
230 | unsigned long attrs) | |
231 | { | |
232 | const struct dma_map_ops *ops = get_dma_ops(dev); | |
356da6d0 CH |
233 | |
234 | if (!dma_is_direct(ops) && ops->get_sgtable) | |
7249c1a5 CH |
235 | return ops->get_sgtable(dev, sgt, cpu_addr, dma_addr, size, |
236 | attrs); | |
237 | return dma_common_get_sgtable(dev, sgt, cpu_addr, dma_addr, size, | |
238 | attrs); | |
239 | } | |
240 | EXPORT_SYMBOL(dma_get_sgtable_attrs); | |
d2b7428e | 241 | |
64ccc9c0 MS |
242 | /* |
243 | * Create userspace mapping for the DMA-coherent memory. | |
244 | */ | |
245 | int dma_common_mmap(struct device *dev, struct vm_area_struct *vma, | |
58b04406 CH |
246 | void *cpu_addr, dma_addr_t dma_addr, size_t size, |
247 | unsigned long attrs) | |
64ccc9c0 | 248 | { |
07c75d7a | 249 | #ifndef CONFIG_ARCH_NO_COHERENT_DMA_MMAP |
95da00e3 | 250 | unsigned long user_count = vma_pages(vma); |
64ccc9c0 | 251 | unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT; |
64ccc9c0 | 252 | unsigned long off = vma->vm_pgoff; |
58b04406 CH |
253 | unsigned long pfn; |
254 | int ret = -ENXIO; | |
64ccc9c0 | 255 | |
58b04406 | 256 | vma->vm_page_prot = arch_dma_mmap_pgprot(dev, vma->vm_page_prot, attrs); |
64ccc9c0 | 257 | |
43fc509c | 258 | if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret)) |
64ccc9c0 MS |
259 | return ret; |
260 | ||
58b04406 CH |
261 | if (off >= count || user_count > count - off) |
262 | return -ENXIO; | |
263 | ||
264 | if (!dev_is_dma_coherent(dev)) { | |
265 | if (!IS_ENABLED(CONFIG_ARCH_HAS_DMA_COHERENT_TO_PFN)) | |
266 | return -ENXIO; | |
267 | pfn = arch_dma_coherent_to_pfn(dev, cpu_addr, dma_addr); | |
268 | } else { | |
269 | pfn = page_to_pfn(virt_to_page(cpu_addr)); | |
270 | } | |
64ccc9c0 | 271 | |
58b04406 CH |
272 | return remap_pfn_range(vma, vma->vm_start, pfn + vma->vm_pgoff, |
273 | user_count << PAGE_SHIFT, vma->vm_page_prot); | |
274 | #else | |
275 | return -ENXIO; | |
276 | #endif /* !CONFIG_ARCH_NO_COHERENT_DMA_MMAP */ | |
64ccc9c0 | 277 | } |
7249c1a5 CH |
278 | |
279 | /** | |
280 | * dma_mmap_attrs - map a coherent DMA allocation into user space | |
281 | * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices | |
282 | * @vma: vm_area_struct describing requested user mapping | |
283 | * @cpu_addr: kernel CPU-view address returned from dma_alloc_attrs | |
284 | * @dma_addr: device-view address returned from dma_alloc_attrs | |
285 | * @size: size of memory originally requested in dma_alloc_attrs | |
286 | * @attrs: attributes of mapping properties requested in dma_alloc_attrs | |
287 | * | |
288 | * Map a coherent DMA buffer previously allocated by dma_alloc_attrs into user | |
289 | * space. The coherent DMA buffer must not be freed by the driver until the | |
290 | * user space mapping has been released. | |
291 | */ | |
292 | int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, | |
293 | void *cpu_addr, dma_addr_t dma_addr, size_t size, | |
294 | unsigned long attrs) | |
295 | { | |
296 | const struct dma_map_ops *ops = get_dma_ops(dev); | |
356da6d0 CH |
297 | |
298 | if (!dma_is_direct(ops) && ops->mmap) | |
7249c1a5 CH |
299 | return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs); |
300 | return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size, attrs); | |
301 | } | |
302 | EXPORT_SYMBOL(dma_mmap_attrs); | |
05887cb6 CH |
303 | |
304 | #ifndef ARCH_HAS_DMA_GET_REQUIRED_MASK | |
305 | static u64 dma_default_get_required_mask(struct device *dev) | |
306 | { | |
307 | u32 low_totalram = ((max_pfn - 1) << PAGE_SHIFT); | |
308 | u32 high_totalram = ((max_pfn - 1) >> (32 - PAGE_SHIFT)); | |
309 | u64 mask; | |
310 | ||
311 | if (!high_totalram) { | |
312 | /* convert to mask just covering totalram */ | |
313 | low_totalram = (1 << (fls(low_totalram) - 1)); | |
314 | low_totalram += low_totalram - 1; | |
315 | mask = low_totalram; | |
316 | } else { | |
317 | high_totalram = (1 << (fls(high_totalram) - 1)); | |
318 | high_totalram += high_totalram - 1; | |
319 | mask = (((u64)high_totalram) << 32) + 0xffffffff; | |
320 | } | |
321 | return mask; | |
322 | } | |
323 | ||
324 | u64 dma_get_required_mask(struct device *dev) | |
325 | { | |
326 | const struct dma_map_ops *ops = get_dma_ops(dev); | |
327 | ||
356da6d0 CH |
328 | if (dma_is_direct(ops)) |
329 | return dma_direct_get_required_mask(dev); | |
05887cb6 CH |
330 | if (ops->get_required_mask) |
331 | return ops->get_required_mask(dev); | |
332 | return dma_default_get_required_mask(dev); | |
333 | } | |
334 | EXPORT_SYMBOL_GPL(dma_get_required_mask); | |
335 | #endif | |
336 | ||
7249c1a5 CH |
337 | #ifndef arch_dma_alloc_attrs |
338 | #define arch_dma_alloc_attrs(dev) (true) | |
339 | #endif | |
340 | ||
341 | void *dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle, | |
342 | gfp_t flag, unsigned long attrs) | |
343 | { | |
344 | const struct dma_map_ops *ops = get_dma_ops(dev); | |
345 | void *cpu_addr; | |
346 | ||
7249c1a5 CH |
347 | WARN_ON_ONCE(dev && !dev->coherent_dma_mask); |
348 | ||
349 | if (dma_alloc_from_dev_coherent(dev, size, dma_handle, &cpu_addr)) | |
350 | return cpu_addr; | |
351 | ||
352 | /* let the implementation decide on the zone to allocate from: */ | |
353 | flag &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM); | |
354 | ||
355 | if (!arch_dma_alloc_attrs(&dev)) | |
356 | return NULL; | |
356da6d0 CH |
357 | |
358 | if (dma_is_direct(ops)) | |
359 | cpu_addr = dma_direct_alloc(dev, size, dma_handle, flag, attrs); | |
360 | else if (ops->alloc) | |
361 | cpu_addr = ops->alloc(dev, size, dma_handle, flag, attrs); | |
362 | else | |
7249c1a5 CH |
363 | return NULL; |
364 | ||
7249c1a5 CH |
365 | debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr); |
366 | return cpu_addr; | |
367 | } | |
368 | EXPORT_SYMBOL(dma_alloc_attrs); | |
369 | ||
370 | void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr, | |
371 | dma_addr_t dma_handle, unsigned long attrs) | |
372 | { | |
373 | const struct dma_map_ops *ops = get_dma_ops(dev); | |
374 | ||
7249c1a5 CH |
375 | if (dma_release_from_dev_coherent(dev, get_order(size), cpu_addr)) |
376 | return; | |
377 | /* | |
378 | * On non-coherent platforms which implement DMA-coherent buffers via | |
379 | * non-cacheable remaps, ops->free() may call vunmap(). Thus getting | |
380 | * this far in IRQ context is a) at risk of a BUG_ON() or trying to | |
381 | * sleep on some machines, and b) an indication that the driver is | |
382 | * probably misusing the coherent API anyway. | |
383 | */ | |
384 | WARN_ON(irqs_disabled()); | |
385 | ||
356da6d0 | 386 | if (!cpu_addr) |
7249c1a5 CH |
387 | return; |
388 | ||
389 | debug_dma_free_coherent(dev, size, cpu_addr, dma_handle); | |
356da6d0 CH |
390 | if (dma_is_direct(ops)) |
391 | dma_direct_free(dev, size, cpu_addr, dma_handle, attrs); | |
392 | else if (ops->free) | |
393 | ops->free(dev, size, cpu_addr, dma_handle, attrs); | |
7249c1a5 CH |
394 | } |
395 | EXPORT_SYMBOL(dma_free_attrs); | |
396 | ||
397 | static inline void dma_check_mask(struct device *dev, u64 mask) | |
398 | { | |
399 | if (sme_active() && (mask < (((u64)sme_get_me_mask() << 1) - 1))) | |
400 | dev_warn(dev, "SME is active, device will require DMA bounce buffers\n"); | |
401 | } | |
402 | ||
403 | int dma_supported(struct device *dev, u64 mask) | |
404 | { | |
405 | const struct dma_map_ops *ops = get_dma_ops(dev); | |
406 | ||
356da6d0 CH |
407 | if (dma_is_direct(ops)) |
408 | return dma_direct_supported(dev, mask); | |
409 | if (ops->dma_supported) | |
7249c1a5 CH |
410 | return 1; |
411 | return ops->dma_supported(dev, mask); | |
412 | } | |
413 | EXPORT_SYMBOL(dma_supported); | |
414 | ||
415 | #ifndef HAVE_ARCH_DMA_SET_MASK | |
416 | int dma_set_mask(struct device *dev, u64 mask) | |
417 | { | |
418 | if (!dev->dma_mask || !dma_supported(dev, mask)) | |
419 | return -EIO; | |
420 | ||
421 | dma_check_mask(dev, mask); | |
422 | *dev->dma_mask = mask; | |
423 | return 0; | |
424 | } | |
425 | EXPORT_SYMBOL(dma_set_mask); | |
426 | #endif | |
427 | ||
428 | #ifndef CONFIG_ARCH_HAS_DMA_SET_COHERENT_MASK | |
429 | int dma_set_coherent_mask(struct device *dev, u64 mask) | |
430 | { | |
431 | if (!dma_supported(dev, mask)) | |
432 | return -EIO; | |
433 | ||
434 | dma_check_mask(dev, mask); | |
435 | dev->coherent_dma_mask = mask; | |
436 | return 0; | |
437 | } | |
438 | EXPORT_SYMBOL(dma_set_coherent_mask); | |
439 | #endif | |
8ddbe594 CH |
440 | |
441 | void dma_cache_sync(struct device *dev, void *vaddr, size_t size, | |
442 | enum dma_data_direction dir) | |
443 | { | |
444 | const struct dma_map_ops *ops = get_dma_ops(dev); | |
445 | ||
446 | BUG_ON(!valid_dma_direction(dir)); | |
356da6d0 CH |
447 | |
448 | if (dma_is_direct(ops)) | |
449 | arch_dma_cache_sync(dev, vaddr, size, dir); | |
450 | else if (ops->cache_sync) | |
8ddbe594 CH |
451 | ops->cache_sync(dev, vaddr, size, dir); |
452 | } | |
453 | EXPORT_SYMBOL(dma_cache_sync); |