1 // SPDX-License-Identifier: GPL-2.0-only
3 * SWIOTLB-based DMA API implementation
5 * Copyright (C) 2012 ARM Ltd.
6 * Author: Catalin Marinas <catalin.marinas@arm.com>
10 #include <linux/acpi.h>
11 #include <linux/memblock.h>
12 #include <linux/cache.h>
13 #include <linux/export.h>
14 #include <linux/slab.h>
15 #include <linux/genalloc.h>
16 #include <linux/dma-direct.h>
17 #include <linux/dma-noncoherent.h>
18 #include <linux/dma-contiguous.h>
19 #include <linux/vmalloc.h>
20 #include <linux/swiotlb.h>
21 #include <linux/pci.h>
23 #include <asm/cacheflush.h>
25 pgprot_t arch_dma_mmap_pgprot(struct device *dev, pgprot_t prot,
28 if (!dev_is_dma_coherent(dev) || (attrs & DMA_ATTR_WRITE_COMBINE))
29 return pgprot_writecombine(prot);
33 void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
34 size_t size, enum dma_data_direction dir)
36 __dma_map_area(phys_to_virt(paddr), size, dir);
39 void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
40 size_t size, enum dma_data_direction dir)
42 __dma_unmap_area(phys_to_virt(paddr), size, dir);
45 void arch_dma_prep_coherent(struct page *page, size_t size)
47 __dma_flush_area(page_address(page), size);
50 #ifdef CONFIG_IOMMU_DMA
51 static int __swiotlb_get_sgtable_page(struct sg_table *sgt,
52 struct page *page, size_t size)
54 int ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
57 sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
62 static int __swiotlb_mmap_pfn(struct vm_area_struct *vma,
63 unsigned long pfn, size_t size)
66 unsigned long nr_vma_pages = vma_pages(vma);
67 unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
68 unsigned long off = vma->vm_pgoff;
70 if (off < nr_pages && nr_vma_pages <= (nr_pages - off)) {
71 ret = remap_pfn_range(vma, vma->vm_start,
73 vma->vm_end - vma->vm_start,
79 #endif /* CONFIG_IOMMU_DMA */
81 static int __init arm64_dma_init(void)
83 return dma_atomic_pool_init(GFP_DMA32, __pgprot(PROT_NORMAL_NC));
85 arch_initcall(arm64_dma_init);
87 #ifdef CONFIG_IOMMU_DMA
88 #include <linux/dma-iommu.h>
89 #include <linux/platform_device.h>
90 #include <linux/amba/bus.h>
92 /* Thankfully, all cache ops are by VA so we can ignore phys here */
93 static void flush_page(struct device *dev, const void *virt, phys_addr_t phys)
95 __dma_flush_area(virt, PAGE_SIZE);
98 static void *__iommu_alloc_attrs(struct device *dev, size_t size,
99 dma_addr_t *handle, gfp_t gfp,
102 bool coherent = dev_is_dma_coherent(dev);
103 int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs);
104 size_t iosize = size;
107 if (WARN(!dev, "cannot create IOMMU mapping for unknown device\n"))
110 size = PAGE_ALIGN(size);
113 * Some drivers rely on this, and we probably don't want the
114 * possibility of stale kernel data being read by devices anyway.
118 if (!gfpflags_allow_blocking(gfp)) {
121 * In atomic context we can't remap anything, so we'll only
122 * get the virtually contiguous buffer we need by way of a
123 * physically contiguous allocation.
126 page = alloc_pages(gfp, get_order(size));
127 addr = page ? page_address(page) : NULL;
129 addr = dma_alloc_from_pool(size, &page, gfp);
134 *handle = iommu_dma_map_page(dev, page, 0, iosize, ioprot);
135 if (*handle == DMA_MAPPING_ERROR) {
137 __free_pages(page, get_order(size));
139 dma_free_from_pool(addr, size);
142 } else if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) {
143 pgprot_t prot = arch_dma_mmap_pgprot(dev, PAGE_KERNEL, attrs);
146 page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT,
147 get_order(size), gfp & __GFP_NOWARN);
151 *handle = iommu_dma_map_page(dev, page, 0, iosize, ioprot);
152 if (*handle == DMA_MAPPING_ERROR) {
153 dma_release_from_contiguous(dev, page,
157 addr = dma_common_contiguous_remap(page, size, VM_USERMAP,
159 __builtin_return_address(0));
162 __dma_flush_area(page_to_virt(page), iosize);
163 memset(addr, 0, size);
165 iommu_dma_unmap_page(dev, *handle, iosize, 0, attrs);
166 dma_release_from_contiguous(dev, page,
170 pgprot_t prot = arch_dma_mmap_pgprot(dev, PAGE_KERNEL, attrs);
173 pages = iommu_dma_alloc(dev, iosize, gfp, attrs, ioprot,
178 addr = dma_common_pages_remap(pages, size, VM_USERMAP, prot,
179 __builtin_return_address(0));
181 iommu_dma_free(dev, pages, iosize, handle);
186 static void __iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
187 dma_addr_t handle, unsigned long attrs)
189 size_t iosize = size;
191 size = PAGE_ALIGN(size);
193 * @cpu_addr will be one of 4 things depending on how it was allocated:
194 * - A remapped array of pages for contiguous allocations.
195 * - A remapped array of pages from iommu_dma_alloc(), for all
196 * non-atomic allocations.
197 * - A non-cacheable alias from the atomic pool, for atomic
198 * allocations by non-coherent devices.
199 * - A normal lowmem address, for atomic allocations by
201 * Hence how dodgy the below logic looks...
203 if (dma_in_atomic_pool(cpu_addr, size)) {
204 iommu_dma_unmap_page(dev, handle, iosize, 0, 0);
205 dma_free_from_pool(cpu_addr, size);
206 } else if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) {
207 struct page *page = vmalloc_to_page(cpu_addr);
209 iommu_dma_unmap_page(dev, handle, iosize, 0, attrs);
210 dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT);
211 dma_common_free_remap(cpu_addr, size, VM_USERMAP);
212 } else if (is_vmalloc_addr(cpu_addr)){
213 struct vm_struct *area = find_vm_area(cpu_addr);
215 if (WARN_ON(!area || !area->pages))
217 iommu_dma_free(dev, area->pages, iosize, &handle);
218 dma_common_free_remap(cpu_addr, size, VM_USERMAP);
220 iommu_dma_unmap_page(dev, handle, iosize, 0, 0);
221 __free_pages(virt_to_page(cpu_addr), get_order(size));
225 static int __iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
226 void *cpu_addr, dma_addr_t dma_addr, size_t size,
229 struct vm_struct *area;
232 vma->vm_page_prot = arch_dma_mmap_pgprot(dev, vma->vm_page_prot, attrs);
234 if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
237 if (!is_vmalloc_addr(cpu_addr)) {
238 unsigned long pfn = page_to_pfn(virt_to_page(cpu_addr));
239 return __swiotlb_mmap_pfn(vma, pfn, size);
242 if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) {
244 * DMA_ATTR_FORCE_CONTIGUOUS allocations are always remapped,
245 * hence in the vmalloc space.
247 unsigned long pfn = vmalloc_to_pfn(cpu_addr);
248 return __swiotlb_mmap_pfn(vma, pfn, size);
251 area = find_vm_area(cpu_addr);
252 if (WARN_ON(!area || !area->pages))
255 return iommu_dma_mmap(area->pages, size, vma);
258 static int __iommu_get_sgtable(struct device *dev, struct sg_table *sgt,
259 void *cpu_addr, dma_addr_t dma_addr,
260 size_t size, unsigned long attrs)
262 unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
263 struct vm_struct *area = find_vm_area(cpu_addr);
265 if (!is_vmalloc_addr(cpu_addr)) {
266 struct page *page = virt_to_page(cpu_addr);
267 return __swiotlb_get_sgtable_page(sgt, page, size);
270 if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) {
272 * DMA_ATTR_FORCE_CONTIGUOUS allocations are always remapped,
273 * hence in the vmalloc space.
275 struct page *page = vmalloc_to_page(cpu_addr);
276 return __swiotlb_get_sgtable_page(sgt, page, size);
279 if (WARN_ON(!area || !area->pages))
282 return sg_alloc_table_from_pages(sgt, area->pages, count, 0, size,
286 static void __iommu_sync_single_for_cpu(struct device *dev,
287 dma_addr_t dev_addr, size_t size,
288 enum dma_data_direction dir)
292 if (dev_is_dma_coherent(dev))
295 phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dev_addr);
296 arch_sync_dma_for_cpu(dev, phys, size, dir);
299 static void __iommu_sync_single_for_device(struct device *dev,
300 dma_addr_t dev_addr, size_t size,
301 enum dma_data_direction dir)
305 if (dev_is_dma_coherent(dev))
308 phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dev_addr);
309 arch_sync_dma_for_device(dev, phys, size, dir);
312 static dma_addr_t __iommu_map_page(struct device *dev, struct page *page,
313 unsigned long offset, size_t size,
314 enum dma_data_direction dir,
317 bool coherent = dev_is_dma_coherent(dev);
318 int prot = dma_info_to_prot(dir, coherent, attrs);
319 dma_addr_t dev_addr = iommu_dma_map_page(dev, page, offset, size, prot);
321 if (!coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
322 dev_addr != DMA_MAPPING_ERROR)
323 __dma_map_area(page_address(page) + offset, size, dir);
328 static void __iommu_unmap_page(struct device *dev, dma_addr_t dev_addr,
329 size_t size, enum dma_data_direction dir,
332 if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
333 __iommu_sync_single_for_cpu(dev, dev_addr, size, dir);
335 iommu_dma_unmap_page(dev, dev_addr, size, dir, attrs);
338 static void __iommu_sync_sg_for_cpu(struct device *dev,
339 struct scatterlist *sgl, int nelems,
340 enum dma_data_direction dir)
342 struct scatterlist *sg;
345 if (dev_is_dma_coherent(dev))
348 for_each_sg(sgl, sg, nelems, i)
349 arch_sync_dma_for_cpu(dev, sg_phys(sg), sg->length, dir);
352 static void __iommu_sync_sg_for_device(struct device *dev,
353 struct scatterlist *sgl, int nelems,
354 enum dma_data_direction dir)
356 struct scatterlist *sg;
359 if (dev_is_dma_coherent(dev))
362 for_each_sg(sgl, sg, nelems, i)
363 arch_sync_dma_for_device(dev, sg_phys(sg), sg->length, dir);
366 static int __iommu_map_sg_attrs(struct device *dev, struct scatterlist *sgl,
367 int nelems, enum dma_data_direction dir,
370 bool coherent = dev_is_dma_coherent(dev);
372 if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
373 __iommu_sync_sg_for_device(dev, sgl, nelems, dir);
375 return iommu_dma_map_sg(dev, sgl, nelems,
376 dma_info_to_prot(dir, coherent, attrs));
379 static void __iommu_unmap_sg_attrs(struct device *dev,
380 struct scatterlist *sgl, int nelems,
381 enum dma_data_direction dir,
384 if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
385 __iommu_sync_sg_for_cpu(dev, sgl, nelems, dir);
387 iommu_dma_unmap_sg(dev, sgl, nelems, dir, attrs);
390 static const struct dma_map_ops iommu_dma_ops = {
391 .alloc = __iommu_alloc_attrs,
392 .free = __iommu_free_attrs,
393 .mmap = __iommu_mmap_attrs,
394 .get_sgtable = __iommu_get_sgtable,
395 .map_page = __iommu_map_page,
396 .unmap_page = __iommu_unmap_page,
397 .map_sg = __iommu_map_sg_attrs,
398 .unmap_sg = __iommu_unmap_sg_attrs,
399 .sync_single_for_cpu = __iommu_sync_single_for_cpu,
400 .sync_single_for_device = __iommu_sync_single_for_device,
401 .sync_sg_for_cpu = __iommu_sync_sg_for_cpu,
402 .sync_sg_for_device = __iommu_sync_sg_for_device,
403 .map_resource = iommu_dma_map_resource,
404 .unmap_resource = iommu_dma_unmap_resource,
407 static int __init __iommu_dma_init(void)
409 return iommu_dma_init();
411 arch_initcall(__iommu_dma_init);
413 static void __iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
414 const struct iommu_ops *ops)
416 struct iommu_domain *domain;
422 * The IOMMU core code allocates the default DMA domain, which the
423 * underlying IOMMU driver needs to support via the dma-iommu layer.
425 domain = iommu_get_domain_for_dev(dev);
430 if (domain->type == IOMMU_DOMAIN_DMA) {
431 if (iommu_dma_init_domain(domain, dma_base, size, dev))
434 dev->dma_ops = &iommu_dma_ops;
440 pr_warn("Failed to set up IOMMU for device %s; retaining platform DMA ops\n",
444 void arch_teardown_dma_ops(struct device *dev)
451 static void __iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
452 const struct iommu_ops *iommu)
455 #endif /* CONFIG_IOMMU_DMA */
457 void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
458 const struct iommu_ops *iommu, bool coherent)
460 int cls = cache_line_size_of_cpu();
462 WARN_TAINT(!coherent && cls > ARCH_DMA_MINALIGN,
463 TAINT_CPU_OUT_OF_SPEC,
464 "%s %s: ARCH_DMA_MINALIGN smaller than CTR_EL0.CWG (%d < %d)",
465 dev_driver_string(dev), dev_name(dev),
466 ARCH_DMA_MINALIGN, cls);
468 dev->dma_coherent = coherent;
469 __iommu_setup_dma_ops(dev, dma_base, size, iommu);
472 if (xen_initial_domain())
473 dev->dma_ops = xen_dma_ops;