2 * SWIOTLB-based DMA API implementation
4 * Copyright (C) 2012 ARM Ltd.
5 * Author: Catalin Marinas <catalin.marinas@arm.com>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
20 #include <linux/gfp.h>
21 #include <linux/acpi.h>
22 #include <linux/bootmem.h>
23 #include <linux/cache.h>
24 #include <linux/export.h>
25 #include <linux/slab.h>
26 #include <linux/genalloc.h>
27 #include <linux/dma-direct.h>
28 #include <linux/dma-noncoherent.h>
29 #include <linux/dma-contiguous.h>
30 #include <linux/vmalloc.h>
31 #include <linux/swiotlb.h>
32 #include <linux/pci.h>
34 #include <asm/cacheflush.h>
36 static struct gen_pool *atomic_pool __ro_after_init;
38 #define DEFAULT_DMA_COHERENT_POOL_SIZE SZ_256K
39 static size_t atomic_pool_size __initdata = DEFAULT_DMA_COHERENT_POOL_SIZE;
41 static int __init early_coherent_pool(char *p)
43 atomic_pool_size = memparse(p, &p);
46 early_param("coherent_pool", early_coherent_pool);
48 static void *__alloc_from_pool(size_t size, struct page **ret_page, gfp_t flags)
54 WARN(1, "coherent pool not initialised!\n");
58 val = gen_pool_alloc(atomic_pool, size);
60 phys_addr_t phys = gen_pool_virt_to_phys(atomic_pool, val);
62 *ret_page = phys_to_page(phys);
70 static bool __in_atomic_pool(void *start, size_t size)
72 return addr_in_gen_pool(atomic_pool, (unsigned long)start, size);
75 static int __free_from_pool(void *start, size_t size)
77 if (!__in_atomic_pool(start, size))
80 gen_pool_free(atomic_pool, (unsigned long)start, size);
85 void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
86 gfp_t flags, unsigned long attrs)
89 void *ptr, *coherent_ptr;
90 pgprot_t prot = pgprot_writecombine(PAGE_KERNEL);
92 size = PAGE_ALIGN(size);
94 if (!gfpflags_allow_blocking(flags)) {
95 struct page *page = NULL;
96 void *addr = __alloc_from_pool(size, &page, flags);
99 *dma_handle = phys_to_dma(dev, page_to_phys(page));
104 ptr = dma_direct_alloc_pages(dev, size, dma_handle, flags, attrs);
108 /* remove any dirty cache lines on the kernel alias */
109 __dma_flush_area(ptr, size);
111 /* create a coherent mapping */
112 page = virt_to_page(ptr);
113 coherent_ptr = dma_common_contiguous_remap(page, size, VM_USERMAP,
114 prot, __builtin_return_address(0));
121 dma_direct_free_pages(dev, size, ptr, *dma_handle, attrs);
126 void arch_dma_free(struct device *dev, size_t size, void *vaddr,
127 dma_addr_t dma_handle, unsigned long attrs)
129 if (!__free_from_pool(vaddr, PAGE_ALIGN(size))) {
130 void *kaddr = phys_to_virt(dma_to_phys(dev, dma_handle));
133 dma_direct_free_pages(dev, size, kaddr, dma_handle, attrs);
137 long arch_dma_coherent_to_pfn(struct device *dev, void *cpu_addr,
140 return __phys_to_pfn(dma_to_phys(dev, dma_addr));
143 pgprot_t arch_dma_mmap_pgprot(struct device *dev, pgprot_t prot,
146 if (!dev_is_dma_coherent(dev) || (attrs & DMA_ATTR_WRITE_COMBINE))
147 return pgprot_writecombine(prot);
151 void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
152 size_t size, enum dma_data_direction dir)
154 __dma_map_area(phys_to_virt(paddr), size, dir);
157 void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
158 size_t size, enum dma_data_direction dir)
160 __dma_unmap_area(phys_to_virt(paddr), size, dir);
163 static int __swiotlb_get_sgtable_page(struct sg_table *sgt,
164 struct page *page, size_t size)
166 int ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
169 sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
174 static int __swiotlb_mmap_pfn(struct vm_area_struct *vma,
175 unsigned long pfn, size_t size)
178 unsigned long nr_vma_pages = vma_pages(vma);
179 unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
180 unsigned long off = vma->vm_pgoff;
182 if (off < nr_pages && nr_vma_pages <= (nr_pages - off)) {
183 ret = remap_pfn_range(vma, vma->vm_start,
185 vma->vm_end - vma->vm_start,
192 static int __init atomic_pool_init(void)
194 pgprot_t prot = __pgprot(PROT_NORMAL_NC);
195 unsigned long nr_pages = atomic_pool_size >> PAGE_SHIFT;
198 unsigned int pool_size_order = get_order(atomic_pool_size);
200 if (dev_get_cma_area(NULL))
201 page = dma_alloc_from_contiguous(NULL, nr_pages,
202 pool_size_order, false);
204 page = alloc_pages(GFP_DMA32, pool_size_order);
208 void *page_addr = page_address(page);
210 memset(page_addr, 0, atomic_pool_size);
211 __dma_flush_area(page_addr, atomic_pool_size);
213 atomic_pool = gen_pool_create(PAGE_SHIFT, -1);
217 addr = dma_common_contiguous_remap(page, atomic_pool_size,
218 VM_USERMAP, prot, atomic_pool_init);
221 goto destroy_genpool;
223 ret = gen_pool_add_virt(atomic_pool, (unsigned long)addr,
225 atomic_pool_size, -1);
229 gen_pool_set_algo(atomic_pool,
230 gen_pool_first_fit_order_align,
233 pr_info("DMA: preallocated %zu KiB pool for atomic allocations\n",
234 atomic_pool_size / 1024);
240 dma_common_free_remap(addr, atomic_pool_size, VM_USERMAP);
242 gen_pool_destroy(atomic_pool);
245 if (!dma_release_from_contiguous(NULL, page, nr_pages))
246 __free_pages(page, pool_size_order);
248 pr_err("DMA: failed to allocate %zu KiB pool for atomic coherent allocation\n",
249 atomic_pool_size / 1024);
253 /********************************************
254 * The following APIs are for dummy DMA ops *
255 ********************************************/
257 static void *__dummy_alloc(struct device *dev, size_t size,
258 dma_addr_t *dma_handle, gfp_t flags,
264 static void __dummy_free(struct device *dev, size_t size,
265 void *vaddr, dma_addr_t dma_handle,
270 static int __dummy_mmap(struct device *dev,
271 struct vm_area_struct *vma,
272 void *cpu_addr, dma_addr_t dma_addr, size_t size,
278 static dma_addr_t __dummy_map_page(struct device *dev, struct page *page,
279 unsigned long offset, size_t size,
280 enum dma_data_direction dir,
286 static void __dummy_unmap_page(struct device *dev, dma_addr_t dev_addr,
287 size_t size, enum dma_data_direction dir,
292 static int __dummy_map_sg(struct device *dev, struct scatterlist *sgl,
293 int nelems, enum dma_data_direction dir,
299 static void __dummy_unmap_sg(struct device *dev,
300 struct scatterlist *sgl, int nelems,
301 enum dma_data_direction dir,
306 static void __dummy_sync_single(struct device *dev,
307 dma_addr_t dev_addr, size_t size,
308 enum dma_data_direction dir)
312 static void __dummy_sync_sg(struct device *dev,
313 struct scatterlist *sgl, int nelems,
314 enum dma_data_direction dir)
318 static int __dummy_mapping_error(struct device *hwdev, dma_addr_t dma_addr)
323 static int __dummy_dma_supported(struct device *hwdev, u64 mask)
328 const struct dma_map_ops dummy_dma_ops = {
329 .alloc = __dummy_alloc,
330 .free = __dummy_free,
331 .mmap = __dummy_mmap,
332 .map_page = __dummy_map_page,
333 .unmap_page = __dummy_unmap_page,
334 .map_sg = __dummy_map_sg,
335 .unmap_sg = __dummy_unmap_sg,
336 .sync_single_for_cpu = __dummy_sync_single,
337 .sync_single_for_device = __dummy_sync_single,
338 .sync_sg_for_cpu = __dummy_sync_sg,
339 .sync_sg_for_device = __dummy_sync_sg,
340 .mapping_error = __dummy_mapping_error,
341 .dma_supported = __dummy_dma_supported,
343 EXPORT_SYMBOL(dummy_dma_ops);
345 static int __init arm64_dma_init(void)
347 WARN_TAINT(ARCH_DMA_MINALIGN < cache_line_size(),
348 TAINT_CPU_OUT_OF_SPEC,
349 "ARCH_DMA_MINALIGN smaller than CTR_EL0.CWG (%d < %d)",
350 ARCH_DMA_MINALIGN, cache_line_size());
352 return atomic_pool_init();
354 arch_initcall(arm64_dma_init);
356 #ifdef CONFIG_IOMMU_DMA
357 #include <linux/dma-iommu.h>
358 #include <linux/platform_device.h>
359 #include <linux/amba/bus.h>
361 /* Thankfully, all cache ops are by VA so we can ignore phys here */
362 static void flush_page(struct device *dev, const void *virt, phys_addr_t phys)
364 __dma_flush_area(virt, PAGE_SIZE);
367 static void *__iommu_alloc_attrs(struct device *dev, size_t size,
368 dma_addr_t *handle, gfp_t gfp,
371 bool coherent = dev_is_dma_coherent(dev);
372 int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs);
373 size_t iosize = size;
376 if (WARN(!dev, "cannot create IOMMU mapping for unknown device\n"))
379 size = PAGE_ALIGN(size);
382 * Some drivers rely on this, and we probably don't want the
383 * possibility of stale kernel data being read by devices anyway.
387 if (!gfpflags_allow_blocking(gfp)) {
390 * In atomic context we can't remap anything, so we'll only
391 * get the virtually contiguous buffer we need by way of a
392 * physically contiguous allocation.
395 page = alloc_pages(gfp, get_order(size));
396 addr = page ? page_address(page) : NULL;
398 addr = __alloc_from_pool(size, &page, gfp);
403 *handle = iommu_dma_map_page(dev, page, 0, iosize, ioprot);
404 if (iommu_dma_mapping_error(dev, *handle)) {
406 __free_pages(page, get_order(size));
408 __free_from_pool(addr, size);
411 } else if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) {
412 pgprot_t prot = arch_dma_mmap_pgprot(dev, PAGE_KERNEL, attrs);
415 page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT,
416 get_order(size), gfp & __GFP_NOWARN);
420 *handle = iommu_dma_map_page(dev, page, 0, iosize, ioprot);
421 if (iommu_dma_mapping_error(dev, *handle)) {
422 dma_release_from_contiguous(dev, page,
426 addr = dma_common_contiguous_remap(page, size, VM_USERMAP,
428 __builtin_return_address(0));
430 memset(addr, 0, size);
432 __dma_flush_area(page_to_virt(page), iosize);
434 iommu_dma_unmap_page(dev, *handle, iosize, 0, attrs);
435 dma_release_from_contiguous(dev, page,
439 pgprot_t prot = arch_dma_mmap_pgprot(dev, PAGE_KERNEL, attrs);
442 pages = iommu_dma_alloc(dev, iosize, gfp, attrs, ioprot,
447 addr = dma_common_pages_remap(pages, size, VM_USERMAP, prot,
448 __builtin_return_address(0));
450 iommu_dma_free(dev, pages, iosize, handle);
455 static void __iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
456 dma_addr_t handle, unsigned long attrs)
458 size_t iosize = size;
460 size = PAGE_ALIGN(size);
462 * @cpu_addr will be one of 4 things depending on how it was allocated:
463 * - A remapped array of pages for contiguous allocations.
464 * - A remapped array of pages from iommu_dma_alloc(), for all
465 * non-atomic allocations.
466 * - A non-cacheable alias from the atomic pool, for atomic
467 * allocations by non-coherent devices.
468 * - A normal lowmem address, for atomic allocations by
470 * Hence how dodgy the below logic looks...
472 if (__in_atomic_pool(cpu_addr, size)) {
473 iommu_dma_unmap_page(dev, handle, iosize, 0, 0);
474 __free_from_pool(cpu_addr, size);
475 } else if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) {
476 struct page *page = vmalloc_to_page(cpu_addr);
478 iommu_dma_unmap_page(dev, handle, iosize, 0, attrs);
479 dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT);
480 dma_common_free_remap(cpu_addr, size, VM_USERMAP);
481 } else if (is_vmalloc_addr(cpu_addr)){
482 struct vm_struct *area = find_vm_area(cpu_addr);
484 if (WARN_ON(!area || !area->pages))
486 iommu_dma_free(dev, area->pages, iosize, &handle);
487 dma_common_free_remap(cpu_addr, size, VM_USERMAP);
489 iommu_dma_unmap_page(dev, handle, iosize, 0, 0);
490 __free_pages(virt_to_page(cpu_addr), get_order(size));
494 static int __iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
495 void *cpu_addr, dma_addr_t dma_addr, size_t size,
498 struct vm_struct *area;
501 vma->vm_page_prot = arch_dma_mmap_pgprot(dev, vma->vm_page_prot, attrs);
503 if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
506 if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) {
508 * DMA_ATTR_FORCE_CONTIGUOUS allocations are always remapped,
509 * hence in the vmalloc space.
511 unsigned long pfn = vmalloc_to_pfn(cpu_addr);
512 return __swiotlb_mmap_pfn(vma, pfn, size);
515 area = find_vm_area(cpu_addr);
516 if (WARN_ON(!area || !area->pages))
519 return iommu_dma_mmap(area->pages, size, vma);
522 static int __iommu_get_sgtable(struct device *dev, struct sg_table *sgt,
523 void *cpu_addr, dma_addr_t dma_addr,
524 size_t size, unsigned long attrs)
526 unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
527 struct vm_struct *area = find_vm_area(cpu_addr);
529 if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) {
531 * DMA_ATTR_FORCE_CONTIGUOUS allocations are always remapped,
532 * hence in the vmalloc space.
534 struct page *page = vmalloc_to_page(cpu_addr);
535 return __swiotlb_get_sgtable_page(sgt, page, size);
538 if (WARN_ON(!area || !area->pages))
541 return sg_alloc_table_from_pages(sgt, area->pages, count, 0, size,
545 static void __iommu_sync_single_for_cpu(struct device *dev,
546 dma_addr_t dev_addr, size_t size,
547 enum dma_data_direction dir)
551 if (dev_is_dma_coherent(dev))
554 phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dev_addr);
555 arch_sync_dma_for_cpu(dev, phys, size, dir);
558 static void __iommu_sync_single_for_device(struct device *dev,
559 dma_addr_t dev_addr, size_t size,
560 enum dma_data_direction dir)
564 if (dev_is_dma_coherent(dev))
567 phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dev_addr);
568 arch_sync_dma_for_device(dev, phys, size, dir);
571 static dma_addr_t __iommu_map_page(struct device *dev, struct page *page,
572 unsigned long offset, size_t size,
573 enum dma_data_direction dir,
576 bool coherent = dev_is_dma_coherent(dev);
577 int prot = dma_info_to_prot(dir, coherent, attrs);
578 dma_addr_t dev_addr = iommu_dma_map_page(dev, page, offset, size, prot);
580 if (!coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
581 !iommu_dma_mapping_error(dev, dev_addr))
582 __dma_map_area(page_address(page) + offset, size, dir);
587 static void __iommu_unmap_page(struct device *dev, dma_addr_t dev_addr,
588 size_t size, enum dma_data_direction dir,
591 if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
592 __iommu_sync_single_for_cpu(dev, dev_addr, size, dir);
594 iommu_dma_unmap_page(dev, dev_addr, size, dir, attrs);
597 static void __iommu_sync_sg_for_cpu(struct device *dev,
598 struct scatterlist *sgl, int nelems,
599 enum dma_data_direction dir)
601 struct scatterlist *sg;
604 if (dev_is_dma_coherent(dev))
607 for_each_sg(sgl, sg, nelems, i)
608 arch_sync_dma_for_cpu(dev, sg_phys(sg), sg->length, dir);
611 static void __iommu_sync_sg_for_device(struct device *dev,
612 struct scatterlist *sgl, int nelems,
613 enum dma_data_direction dir)
615 struct scatterlist *sg;
618 if (dev_is_dma_coherent(dev))
621 for_each_sg(sgl, sg, nelems, i)
622 arch_sync_dma_for_device(dev, sg_phys(sg), sg->length, dir);
625 static int __iommu_map_sg_attrs(struct device *dev, struct scatterlist *sgl,
626 int nelems, enum dma_data_direction dir,
629 bool coherent = dev_is_dma_coherent(dev);
631 if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
632 __iommu_sync_sg_for_device(dev, sgl, nelems, dir);
634 return iommu_dma_map_sg(dev, sgl, nelems,
635 dma_info_to_prot(dir, coherent, attrs));
638 static void __iommu_unmap_sg_attrs(struct device *dev,
639 struct scatterlist *sgl, int nelems,
640 enum dma_data_direction dir,
643 if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
644 __iommu_sync_sg_for_cpu(dev, sgl, nelems, dir);
646 iommu_dma_unmap_sg(dev, sgl, nelems, dir, attrs);
649 static const struct dma_map_ops iommu_dma_ops = {
650 .alloc = __iommu_alloc_attrs,
651 .free = __iommu_free_attrs,
652 .mmap = __iommu_mmap_attrs,
653 .get_sgtable = __iommu_get_sgtable,
654 .map_page = __iommu_map_page,
655 .unmap_page = __iommu_unmap_page,
656 .map_sg = __iommu_map_sg_attrs,
657 .unmap_sg = __iommu_unmap_sg_attrs,
658 .sync_single_for_cpu = __iommu_sync_single_for_cpu,
659 .sync_single_for_device = __iommu_sync_single_for_device,
660 .sync_sg_for_cpu = __iommu_sync_sg_for_cpu,
661 .sync_sg_for_device = __iommu_sync_sg_for_device,
662 .map_resource = iommu_dma_map_resource,
663 .unmap_resource = iommu_dma_unmap_resource,
664 .mapping_error = iommu_dma_mapping_error,
667 static int __init __iommu_dma_init(void)
669 return iommu_dma_init();
671 arch_initcall(__iommu_dma_init);
673 static void __iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
674 const struct iommu_ops *ops)
676 struct iommu_domain *domain;
682 * The IOMMU core code allocates the default DMA domain, which the
683 * underlying IOMMU driver needs to support via the dma-iommu layer.
685 domain = iommu_get_domain_for_dev(dev);
690 if (domain->type == IOMMU_DOMAIN_DMA) {
691 if (iommu_dma_init_domain(domain, dma_base, size, dev))
694 dev->dma_ops = &iommu_dma_ops;
700 pr_warn("Failed to set up IOMMU for device %s; retaining platform DMA ops\n",
704 void arch_teardown_dma_ops(struct device *dev)
711 static void __iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
712 const struct iommu_ops *iommu)
715 #endif /* CONFIG_IOMMU_DMA */
717 void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
718 const struct iommu_ops *iommu, bool coherent)
721 dev->dma_ops = &swiotlb_dma_ops;
723 dev->dma_coherent = coherent;
724 __iommu_setup_dma_ops(dev, dma_base, size, iommu);
727 if (xen_initial_domain()) {
728 dev->archdata.dev_dma_ops = dev->dma_ops;
729 dev->dma_ops = xen_dma_ops;