1 // SPDX-License-Identifier: GPL-2.0-only
3 * linux/arch/arm/mm/dma-mapping.c
5 * Copyright (C) 2000-2004 Russell King
7 * DMA uncached mapping support.
9 #include <linux/module.h>
11 #include <linux/genalloc.h>
12 #include <linux/gfp.h>
13 #include <linux/errno.h>
14 #include <linux/list.h>
15 #include <linux/init.h>
16 #include <linux/device.h>
17 #include <linux/dma-direct.h>
18 #include <linux/dma-mapping.h>
19 #include <linux/dma-noncoherent.h>
20 #include <linux/dma-contiguous.h>
21 #include <linux/highmem.h>
22 #include <linux/memblock.h>
23 #include <linux/slab.h>
24 #include <linux/iommu.h>
26 #include <linux/vmalloc.h>
27 #include <linux/sizes.h>
28 #include <linux/cma.h>
30 #include <asm/memory.h>
31 #include <asm/highmem.h>
32 #include <asm/cacheflush.h>
33 #include <asm/tlbflush.h>
34 #include <asm/mach/arch.h>
35 #include <asm/dma-iommu.h>
36 #include <asm/mach/map.h>
37 #include <asm/system_info.h>
38 #include <asm/dma-contiguous.h>
39 #include <xen/swiotlb-xen.h>
44 struct arm_dma_alloc_args {
54 struct arm_dma_free_args {
65 struct arm_dma_allocator {
66 void *(*alloc)(struct arm_dma_alloc_args *args,
67 struct page **ret_page);
68 void (*free)(struct arm_dma_free_args *args);
71 struct arm_dma_buffer {
72 struct list_head list;
74 struct arm_dma_allocator *allocator;
77 static LIST_HEAD(arm_dma_bufs);
78 static DEFINE_SPINLOCK(arm_dma_bufs_lock);
80 static struct arm_dma_buffer *arm_dma_buffer_find(void *virt)
82 struct arm_dma_buffer *buf, *found = NULL;
85 spin_lock_irqsave(&arm_dma_bufs_lock, flags);
86 list_for_each_entry(buf, &arm_dma_bufs, list) {
87 if (buf->virt == virt) {
93 spin_unlock_irqrestore(&arm_dma_bufs_lock, flags);
98 * The DMA API is built upon the notion of "buffer ownership". A buffer
99 * is either exclusively owned by the CPU (and therefore may be accessed
100 * by it) or exclusively owned by the DMA device. These helper functions
101 * represent the transitions between these two ownership states.
103 * Note, however, that on later ARMs, this notion does not work due to
104 * speculative prefetches. We model our approach on the assumption that
105 * the CPU does do speculative prefetches, which means we clean caches
106 * before transfers and delay cache invalidation until transfer completion.
109 static void __dma_page_cpu_to_dev(struct page *, unsigned long,
110 size_t, enum dma_data_direction);
111 static void __dma_page_dev_to_cpu(struct page *, unsigned long,
112 size_t, enum dma_data_direction);
115 * arm_dma_map_page - map a portion of a page for streaming DMA
116 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
117 * @page: page that buffer resides in
118 * @offset: offset into page for start of buffer
119 * @size: size of buffer to map
120 * @dir: DMA transfer direction
122 * Ensure that any data held in the cache is appropriately discarded
125 * The device owns this memory once this call has completed. The CPU
126 * can regain ownership by calling dma_unmap_page().
128 static dma_addr_t arm_dma_map_page(struct device *dev, struct page *page,
129 unsigned long offset, size_t size, enum dma_data_direction dir,
132 if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
133 __dma_page_cpu_to_dev(page, offset, size, dir);
134 return pfn_to_dma(dev, page_to_pfn(page)) + offset;
137 static dma_addr_t arm_coherent_dma_map_page(struct device *dev, struct page *page,
138 unsigned long offset, size_t size, enum dma_data_direction dir,
141 return pfn_to_dma(dev, page_to_pfn(page)) + offset;
145 * arm_dma_unmap_page - unmap a buffer previously mapped through dma_map_page()
146 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
147 * @handle: DMA address of buffer
148 * @size: size of buffer (same as passed to dma_map_page)
149 * @dir: DMA transfer direction (same as passed to dma_map_page)
151 * Unmap a page streaming mode DMA translation. The handle and size
152 * must match what was provided in the previous dma_map_page() call.
153 * All other usages are undefined.
155 * After this call, reads by the CPU to the buffer are guaranteed to see
156 * whatever the device wrote there.
158 static void arm_dma_unmap_page(struct device *dev, dma_addr_t handle,
159 size_t size, enum dma_data_direction dir, unsigned long attrs)
161 if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
162 __dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, handle)),
163 handle & ~PAGE_MASK, size, dir);
166 static void arm_dma_sync_single_for_cpu(struct device *dev,
167 dma_addr_t handle, size_t size, enum dma_data_direction dir)
169 unsigned int offset = handle & (PAGE_SIZE - 1);
170 struct page *page = pfn_to_page(dma_to_pfn(dev, handle-offset));
171 __dma_page_dev_to_cpu(page, offset, size, dir);
174 static void arm_dma_sync_single_for_device(struct device *dev,
175 dma_addr_t handle, size_t size, enum dma_data_direction dir)
177 unsigned int offset = handle & (PAGE_SIZE - 1);
178 struct page *page = pfn_to_page(dma_to_pfn(dev, handle-offset));
179 __dma_page_cpu_to_dev(page, offset, size, dir);
183 * Return whether the given device DMA address mask can be supported
184 * properly. For example, if your device can only drive the low 24-bits
185 * during bus mastering, then you would pass 0x00ffffff as the mask
188 static int arm_dma_supported(struct device *dev, u64 mask)
190 unsigned long max_dma_pfn = min(max_pfn - 1, arm_dma_pfn_limit);
193 * Translate the device's DMA mask to a PFN limit. This
194 * PFN number includes the page which we can DMA to.
196 return dma_to_pfn(dev, mask) >= max_dma_pfn;
199 const struct dma_map_ops arm_dma_ops = {
200 .alloc = arm_dma_alloc,
201 .free = arm_dma_free,
202 .alloc_pages = dma_direct_alloc_pages,
203 .free_pages = dma_direct_free_pages,
204 .mmap = arm_dma_mmap,
205 .get_sgtable = arm_dma_get_sgtable,
206 .map_page = arm_dma_map_page,
207 .unmap_page = arm_dma_unmap_page,
208 .map_sg = arm_dma_map_sg,
209 .unmap_sg = arm_dma_unmap_sg,
210 .map_resource = dma_direct_map_resource,
211 .sync_single_for_cpu = arm_dma_sync_single_for_cpu,
212 .sync_single_for_device = arm_dma_sync_single_for_device,
213 .sync_sg_for_cpu = arm_dma_sync_sg_for_cpu,
214 .sync_sg_for_device = arm_dma_sync_sg_for_device,
215 .dma_supported = arm_dma_supported,
216 .get_required_mask = dma_direct_get_required_mask,
218 EXPORT_SYMBOL(arm_dma_ops);
220 static void *arm_coherent_dma_alloc(struct device *dev, size_t size,
221 dma_addr_t *handle, gfp_t gfp, unsigned long attrs);
222 static void arm_coherent_dma_free(struct device *dev, size_t size, void *cpu_addr,
223 dma_addr_t handle, unsigned long attrs);
224 static int arm_coherent_dma_mmap(struct device *dev, struct vm_area_struct *vma,
225 void *cpu_addr, dma_addr_t dma_addr, size_t size,
226 unsigned long attrs);
228 const struct dma_map_ops arm_coherent_dma_ops = {
229 .alloc = arm_coherent_dma_alloc,
230 .free = arm_coherent_dma_free,
231 .alloc_pages = dma_direct_alloc_pages,
232 .free_pages = dma_direct_free_pages,
233 .mmap = arm_coherent_dma_mmap,
234 .get_sgtable = arm_dma_get_sgtable,
235 .map_page = arm_coherent_dma_map_page,
236 .map_sg = arm_dma_map_sg,
237 .map_resource = dma_direct_map_resource,
238 .dma_supported = arm_dma_supported,
239 .get_required_mask = dma_direct_get_required_mask,
241 EXPORT_SYMBOL(arm_coherent_dma_ops);
243 static void __dma_clear_buffer(struct page *page, size_t size, int coherent_flag)
246 * Ensure that the allocated pages are zeroed, and that any data
247 * lurking in the kernel direct-mapped region is invalidated.
249 if (PageHighMem(page)) {
250 phys_addr_t base = __pfn_to_phys(page_to_pfn(page));
251 phys_addr_t end = base + size;
253 void *ptr = kmap_atomic(page);
254 memset(ptr, 0, PAGE_SIZE);
255 if (coherent_flag != COHERENT)
256 dmac_flush_range(ptr, ptr + PAGE_SIZE);
261 if (coherent_flag != COHERENT)
262 outer_flush_range(base, end);
264 void *ptr = page_address(page);
265 memset(ptr, 0, size);
266 if (coherent_flag != COHERENT) {
267 dmac_flush_range(ptr, ptr + size);
268 outer_flush_range(__pa(ptr), __pa(ptr) + size);
274 * Allocate a DMA buffer for 'dev' of size 'size' using the
275 * specified gfp mask. Note that 'size' must be page aligned.
277 static struct page *__dma_alloc_buffer(struct device *dev, size_t size,
278 gfp_t gfp, int coherent_flag)
280 unsigned long order = get_order(size);
281 struct page *page, *p, *e;
283 page = alloc_pages(gfp, order);
288 * Now split the huge page and free the excess pages
290 split_page(page, order);
291 for (p = page + (size >> PAGE_SHIFT), e = page + (1 << order); p < e; p++)
294 __dma_clear_buffer(page, size, coherent_flag);
300 * Free a DMA buffer. 'size' must be page aligned.
302 static void __dma_free_buffer(struct page *page, size_t size)
304 struct page *e = page + (size >> PAGE_SHIFT);
312 static void *__alloc_from_contiguous(struct device *dev, size_t size,
313 pgprot_t prot, struct page **ret_page,
314 const void *caller, bool want_vaddr,
315 int coherent_flag, gfp_t gfp);
317 static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp,
318 pgprot_t prot, struct page **ret_page,
319 const void *caller, bool want_vaddr);
321 #define DEFAULT_DMA_COHERENT_POOL_SIZE SZ_256K
322 static struct gen_pool *atomic_pool __ro_after_init;
324 static size_t atomic_pool_size __initdata = DEFAULT_DMA_COHERENT_POOL_SIZE;
326 static int __init early_coherent_pool(char *p)
328 atomic_pool_size = memparse(p, &p);
331 early_param("coherent_pool", early_coherent_pool);
334 * Initialise the coherent pool for atomic allocations.
336 static int __init atomic_pool_init(void)
338 pgprot_t prot = pgprot_dmacoherent(PAGE_KERNEL);
339 gfp_t gfp = GFP_KERNEL | GFP_DMA;
343 atomic_pool = gen_pool_create(PAGE_SHIFT, -1);
347 * The atomic pool is only used for non-coherent allocations
348 * so we must pass NORMAL for coherent_flag.
350 if (dev_get_cma_area(NULL))
351 ptr = __alloc_from_contiguous(NULL, atomic_pool_size, prot,
352 &page, atomic_pool_init, true, NORMAL,
355 ptr = __alloc_remap_buffer(NULL, atomic_pool_size, gfp, prot,
356 &page, atomic_pool_init, true);
360 ret = gen_pool_add_virt(atomic_pool, (unsigned long)ptr,
362 atomic_pool_size, -1);
364 goto destroy_genpool;
366 gen_pool_set_algo(atomic_pool,
367 gen_pool_first_fit_order_align,
369 pr_info("DMA: preallocated %zu KiB pool for atomic coherent allocations\n",
370 atomic_pool_size / 1024);
375 gen_pool_destroy(atomic_pool);
378 pr_err("DMA: failed to allocate %zu KiB pool for atomic coherent allocation\n",
379 atomic_pool_size / 1024);
383 * CMA is activated by core_initcall, so we must be called after it.
385 postcore_initcall(atomic_pool_init);
387 struct dma_contig_early_reserve {
392 static struct dma_contig_early_reserve dma_mmu_remap[MAX_CMA_AREAS] __initdata;
394 static int dma_mmu_remap_num __initdata;
396 void __init dma_contiguous_early_fixup(phys_addr_t base, unsigned long size)
398 dma_mmu_remap[dma_mmu_remap_num].base = base;
399 dma_mmu_remap[dma_mmu_remap_num].size = size;
403 void __init dma_contiguous_remap(void)
406 for (i = 0; i < dma_mmu_remap_num; i++) {
407 phys_addr_t start = dma_mmu_remap[i].base;
408 phys_addr_t end = start + dma_mmu_remap[i].size;
412 if (end > arm_lowmem_limit)
413 end = arm_lowmem_limit;
417 map.pfn = __phys_to_pfn(start);
418 map.virtual = __phys_to_virt(start);
419 map.length = end - start;
420 map.type = MT_MEMORY_DMA_READY;
423 * Clear previous low-memory mapping to ensure that the
424 * TLB does not see any conflicting entries, then flush
425 * the TLB of the old entries before creating new mappings.
427 * This ensures that any speculatively loaded TLB entries
428 * (even though they may be rare) can not cause any problems,
429 * and ensures that this code is architecturally compliant.
431 for (addr = __phys_to_virt(start); addr < __phys_to_virt(end);
433 pmd_clear(pmd_off_k(addr));
435 flush_tlb_kernel_range(__phys_to_virt(start),
436 __phys_to_virt(end));
438 iotable_init(&map, 1);
442 static int __dma_update_pte(pte_t *pte, unsigned long addr, void *data)
444 struct page *page = virt_to_page(addr);
445 pgprot_t prot = *(pgprot_t *)data;
447 set_pte_ext(pte, mk_pte(page, prot), 0);
451 static void __dma_remap(struct page *page, size_t size, pgprot_t prot)
453 unsigned long start = (unsigned long) page_address(page);
454 unsigned end = start + size;
456 apply_to_page_range(&init_mm, start, size, __dma_update_pte, &prot);
457 flush_tlb_kernel_range(start, end);
460 static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp,
461 pgprot_t prot, struct page **ret_page,
462 const void *caller, bool want_vaddr)
467 * __alloc_remap_buffer is only called when the device is
470 page = __dma_alloc_buffer(dev, size, gfp, NORMAL);
476 ptr = dma_common_contiguous_remap(page, size, prot, caller);
478 __dma_free_buffer(page, size);
487 static void *__alloc_from_pool(size_t size, struct page **ret_page)
493 WARN(1, "coherent pool not initialised!\n");
497 val = gen_pool_alloc(atomic_pool, size);
499 phys_addr_t phys = gen_pool_virt_to_phys(atomic_pool, val);
501 *ret_page = phys_to_page(phys);
508 static bool __in_atomic_pool(void *start, size_t size)
510 return gen_pool_has_addr(atomic_pool, (unsigned long)start, size);
513 static int __free_from_pool(void *start, size_t size)
515 if (!__in_atomic_pool(start, size))
518 gen_pool_free(atomic_pool, (unsigned long)start, size);
523 static void *__alloc_from_contiguous(struct device *dev, size_t size,
524 pgprot_t prot, struct page **ret_page,
525 const void *caller, bool want_vaddr,
526 int coherent_flag, gfp_t gfp)
528 unsigned long order = get_order(size);
529 size_t count = size >> PAGE_SHIFT;
533 page = dma_alloc_from_contiguous(dev, count, order, gfp & __GFP_NOWARN);
537 __dma_clear_buffer(page, size, coherent_flag);
542 if (PageHighMem(page)) {
543 ptr = dma_common_contiguous_remap(page, size, prot, caller);
545 dma_release_from_contiguous(dev, page, count);
549 __dma_remap(page, size, prot);
550 ptr = page_address(page);
558 static void __free_from_contiguous(struct device *dev, struct page *page,
559 void *cpu_addr, size_t size, bool want_vaddr)
562 if (PageHighMem(page))
563 dma_common_free_remap(cpu_addr, size);
565 __dma_remap(page, size, PAGE_KERNEL);
567 dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT);
570 static inline pgprot_t __get_dma_pgprot(unsigned long attrs, pgprot_t prot)
572 prot = (attrs & DMA_ATTR_WRITE_COMBINE) ?
573 pgprot_writecombine(prot) :
574 pgprot_dmacoherent(prot);
578 static void *__alloc_simple_buffer(struct device *dev, size_t size, gfp_t gfp,
579 struct page **ret_page)
582 /* __alloc_simple_buffer is only called when the device is coherent */
583 page = __dma_alloc_buffer(dev, size, gfp, COHERENT);
588 return page_address(page);
591 static void *simple_allocator_alloc(struct arm_dma_alloc_args *args,
592 struct page **ret_page)
594 return __alloc_simple_buffer(args->dev, args->size, args->gfp,
598 static void simple_allocator_free(struct arm_dma_free_args *args)
600 __dma_free_buffer(args->page, args->size);
603 static struct arm_dma_allocator simple_allocator = {
604 .alloc = simple_allocator_alloc,
605 .free = simple_allocator_free,
608 static void *cma_allocator_alloc(struct arm_dma_alloc_args *args,
609 struct page **ret_page)
611 return __alloc_from_contiguous(args->dev, args->size, args->prot,
612 ret_page, args->caller,
613 args->want_vaddr, args->coherent_flag,
617 static void cma_allocator_free(struct arm_dma_free_args *args)
619 __free_from_contiguous(args->dev, args->page, args->cpu_addr,
620 args->size, args->want_vaddr);
623 static struct arm_dma_allocator cma_allocator = {
624 .alloc = cma_allocator_alloc,
625 .free = cma_allocator_free,
628 static void *pool_allocator_alloc(struct arm_dma_alloc_args *args,
629 struct page **ret_page)
631 return __alloc_from_pool(args->size, ret_page);
634 static void pool_allocator_free(struct arm_dma_free_args *args)
636 __free_from_pool(args->cpu_addr, args->size);
639 static struct arm_dma_allocator pool_allocator = {
640 .alloc = pool_allocator_alloc,
641 .free = pool_allocator_free,
644 static void *remap_allocator_alloc(struct arm_dma_alloc_args *args,
645 struct page **ret_page)
647 return __alloc_remap_buffer(args->dev, args->size, args->gfp,
648 args->prot, ret_page, args->caller,
652 static void remap_allocator_free(struct arm_dma_free_args *args)
654 if (args->want_vaddr)
655 dma_common_free_remap(args->cpu_addr, args->size);
657 __dma_free_buffer(args->page, args->size);
660 static struct arm_dma_allocator remap_allocator = {
661 .alloc = remap_allocator_alloc,
662 .free = remap_allocator_free,
665 static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
666 gfp_t gfp, pgprot_t prot, bool is_coherent,
667 unsigned long attrs, const void *caller)
669 u64 mask = min_not_zero(dev->coherent_dma_mask, dev->bus_dma_limit);
670 struct page *page = NULL;
672 bool allowblock, cma;
673 struct arm_dma_buffer *buf;
674 struct arm_dma_alloc_args args = {
676 .size = PAGE_ALIGN(size),
680 .want_vaddr = ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) == 0),
681 .coherent_flag = is_coherent ? COHERENT : NORMAL,
684 #ifdef CONFIG_DMA_API_DEBUG
685 u64 limit = (mask + 1) & ~mask;
686 if (limit && size >= limit) {
687 dev_warn(dev, "coherent allocation too big (requested %#x mask %#llx)\n",
693 buf = kzalloc(sizeof(*buf),
694 gfp & ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM));
698 if (mask < 0xffffffffULL)
702 * Following is a work-around (a.k.a. hack) to prevent pages
703 * with __GFP_COMP being passed to split_page() which cannot
704 * handle them. The real problem is that this flag probably
705 * should be 0 on ARM as it is not supported on this
706 * platform; see CONFIG_HUGETLBFS.
708 gfp &= ~(__GFP_COMP);
711 *handle = DMA_MAPPING_ERROR;
712 allowblock = gfpflags_allow_blocking(gfp);
713 cma = allowblock ? dev_get_cma_area(dev) : false;
716 buf->allocator = &cma_allocator;
717 else if (is_coherent)
718 buf->allocator = &simple_allocator;
720 buf->allocator = &remap_allocator;
722 buf->allocator = &pool_allocator;
724 addr = buf->allocator->alloc(&args, &page);
729 *handle = pfn_to_dma(dev, page_to_pfn(page));
730 buf->virt = args.want_vaddr ? addr : page;
732 spin_lock_irqsave(&arm_dma_bufs_lock, flags);
733 list_add(&buf->list, &arm_dma_bufs);
734 spin_unlock_irqrestore(&arm_dma_bufs_lock, flags);
739 return args.want_vaddr ? addr : page;
743 * Allocate DMA-coherent memory space and return both the kernel remapped
744 * virtual and bus address for that space.
746 void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
747 gfp_t gfp, unsigned long attrs)
749 pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL);
751 return __dma_alloc(dev, size, handle, gfp, prot, false,
752 attrs, __builtin_return_address(0));
755 static void *arm_coherent_dma_alloc(struct device *dev, size_t size,
756 dma_addr_t *handle, gfp_t gfp, unsigned long attrs)
758 return __dma_alloc(dev, size, handle, gfp, PAGE_KERNEL, true,
759 attrs, __builtin_return_address(0));
762 static int __arm_dma_mmap(struct device *dev, struct vm_area_struct *vma,
763 void *cpu_addr, dma_addr_t dma_addr, size_t size,
767 unsigned long nr_vma_pages = vma_pages(vma);
768 unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
769 unsigned long pfn = dma_to_pfn(dev, dma_addr);
770 unsigned long off = vma->vm_pgoff;
772 if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
775 if (off < nr_pages && nr_vma_pages <= (nr_pages - off)) {
776 ret = remap_pfn_range(vma, vma->vm_start,
778 vma->vm_end - vma->vm_start,
786 * Create userspace mapping for the DMA-coherent memory.
788 static int arm_coherent_dma_mmap(struct device *dev, struct vm_area_struct *vma,
789 void *cpu_addr, dma_addr_t dma_addr, size_t size,
792 return __arm_dma_mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
795 int arm_dma_mmap(struct device *dev, struct vm_area_struct *vma,
796 void *cpu_addr, dma_addr_t dma_addr, size_t size,
799 vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot);
800 return __arm_dma_mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
804 * Free a buffer as defined by the above mapping.
806 static void __arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
807 dma_addr_t handle, unsigned long attrs,
810 struct page *page = pfn_to_page(dma_to_pfn(dev, handle));
811 struct arm_dma_buffer *buf;
812 struct arm_dma_free_args args = {
814 .size = PAGE_ALIGN(size),
815 .cpu_addr = cpu_addr,
817 .want_vaddr = ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) == 0),
820 buf = arm_dma_buffer_find(cpu_addr);
821 if (WARN(!buf, "Freeing invalid buffer %p\n", cpu_addr))
824 buf->allocator->free(&args);
828 void arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
829 dma_addr_t handle, unsigned long attrs)
831 __arm_dma_free(dev, size, cpu_addr, handle, attrs, false);
834 static void arm_coherent_dma_free(struct device *dev, size_t size, void *cpu_addr,
835 dma_addr_t handle, unsigned long attrs)
837 __arm_dma_free(dev, size, cpu_addr, handle, attrs, true);
840 int arm_dma_get_sgtable(struct device *dev, struct sg_table *sgt,
841 void *cpu_addr, dma_addr_t handle, size_t size,
844 unsigned long pfn = dma_to_pfn(dev, handle);
848 /* If the PFN is not valid, we do not have a struct page */
852 page = pfn_to_page(pfn);
854 ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
858 sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
862 static void dma_cache_maint_page(struct page *page, unsigned long offset,
863 size_t size, enum dma_data_direction dir,
864 void (*op)(const void *, size_t, int))
869 pfn = page_to_pfn(page) + offset / PAGE_SIZE;
873 * A single sg entry may refer to multiple physically contiguous
874 * pages. But we still need to process highmem pages individually.
875 * If highmem is not configured then the bulk of this loop gets
882 page = pfn_to_page(pfn);
884 if (PageHighMem(page)) {
885 if (len + offset > PAGE_SIZE)
886 len = PAGE_SIZE - offset;
888 if (cache_is_vipt_nonaliasing()) {
889 vaddr = kmap_atomic(page);
890 op(vaddr + offset, len, dir);
891 kunmap_atomic(vaddr);
893 vaddr = kmap_high_get(page);
895 op(vaddr + offset, len, dir);
900 vaddr = page_address(page) + offset;
910 * Make an area consistent for devices.
911 * Note: Drivers should NOT use this function directly, as it will break
912 * platforms with CONFIG_DMABOUNCE.
913 * Use the driver DMA support - see dma-mapping.h (dma_sync_*)
915 static void __dma_page_cpu_to_dev(struct page *page, unsigned long off,
916 size_t size, enum dma_data_direction dir)
920 dma_cache_maint_page(page, off, size, dir, dmac_map_area);
922 paddr = page_to_phys(page) + off;
923 if (dir == DMA_FROM_DEVICE) {
924 outer_inv_range(paddr, paddr + size);
926 outer_clean_range(paddr, paddr + size);
928 /* FIXME: non-speculating: flush on bidirectional mappings? */
931 static void __dma_page_dev_to_cpu(struct page *page, unsigned long off,
932 size_t size, enum dma_data_direction dir)
934 phys_addr_t paddr = page_to_phys(page) + off;
936 /* FIXME: non-speculating: not required */
937 /* in any case, don't bother invalidating if DMA to device */
938 if (dir != DMA_TO_DEVICE) {
939 outer_inv_range(paddr, paddr + size);
941 dma_cache_maint_page(page, off, size, dir, dmac_unmap_area);
945 * Mark the D-cache clean for these pages to avoid extra flushing.
947 if (dir != DMA_TO_DEVICE && size >= PAGE_SIZE) {
951 pfn = page_to_pfn(page) + off / PAGE_SIZE;
955 left -= PAGE_SIZE - off;
957 while (left >= PAGE_SIZE) {
958 page = pfn_to_page(pfn++);
959 set_bit(PG_dcache_clean, &page->flags);
966 * arm_dma_map_sg - map a set of SG buffers for streaming mode DMA
967 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
968 * @sg: list of buffers
969 * @nents: number of buffers to map
970 * @dir: DMA transfer direction
972 * Map a set of buffers described by scatterlist in streaming mode for DMA.
973 * This is the scatter-gather version of the dma_map_single interface.
974 * Here the scatter gather list elements are each tagged with the
975 * appropriate dma address and length. They are obtained via
976 * sg_dma_{address,length}.
978 * Device ownership issues as mentioned for dma_map_single are the same
981 int arm_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
982 enum dma_data_direction dir, unsigned long attrs)
984 const struct dma_map_ops *ops = get_dma_ops(dev);
985 struct scatterlist *s;
988 for_each_sg(sg, s, nents, i) {
989 #ifdef CONFIG_NEED_SG_DMA_LENGTH
990 s->dma_length = s->length;
992 s->dma_address = ops->map_page(dev, sg_page(s), s->offset,
993 s->length, dir, attrs);
994 if (dma_mapping_error(dev, s->dma_address))
1000 for_each_sg(sg, s, i, j)
1001 ops->unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir, attrs);
1006 * arm_dma_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg
1007 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
1008 * @sg: list of buffers
1009 * @nents: number of buffers to unmap (same as was passed to dma_map_sg)
1010 * @dir: DMA transfer direction (same as was passed to dma_map_sg)
1012 * Unmap a set of streaming mode DMA translations. Again, CPU access
1013 * rules concerning calls here are the same as for dma_unmap_single().
1015 void arm_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
1016 enum dma_data_direction dir, unsigned long attrs)
1018 const struct dma_map_ops *ops = get_dma_ops(dev);
1019 struct scatterlist *s;
1023 for_each_sg(sg, s, nents, i)
1024 ops->unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir, attrs);
1028 * arm_dma_sync_sg_for_cpu
1029 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
1030 * @sg: list of buffers
1031 * @nents: number of buffers to map (returned from dma_map_sg)
1032 * @dir: DMA transfer direction (same as was passed to dma_map_sg)
1034 void arm_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
1035 int nents, enum dma_data_direction dir)
1037 const struct dma_map_ops *ops = get_dma_ops(dev);
1038 struct scatterlist *s;
1041 for_each_sg(sg, s, nents, i)
1042 ops->sync_single_for_cpu(dev, sg_dma_address(s), s->length,
1047 * arm_dma_sync_sg_for_device
1048 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
1049 * @sg: list of buffers
1050 * @nents: number of buffers to map (returned from dma_map_sg)
1051 * @dir: DMA transfer direction (same as was passed to dma_map_sg)
1053 void arm_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
1054 int nents, enum dma_data_direction dir)
1056 const struct dma_map_ops *ops = get_dma_ops(dev);
1057 struct scatterlist *s;
1060 for_each_sg(sg, s, nents, i)
1061 ops->sync_single_for_device(dev, sg_dma_address(s), s->length,
1065 static const struct dma_map_ops *arm_get_dma_map_ops(bool coherent)
1068 * When CONFIG_ARM_LPAE is set, physical address can extend above
1069 * 32-bits, which then can't be addressed by devices that only support
1071 * Use the generic dma-direct / swiotlb ops code in that case, as that
1072 * handles bounce buffering for us.
1074 if (IS_ENABLED(CONFIG_ARM_LPAE))
1076 return coherent ? &arm_coherent_dma_ops : &arm_dma_ops;
1079 #ifdef CONFIG_ARM_DMA_USE_IOMMU
1081 static int __dma_info_to_prot(enum dma_data_direction dir, unsigned long attrs)
1085 if (attrs & DMA_ATTR_PRIVILEGED)
1089 case DMA_BIDIRECTIONAL:
1090 return prot | IOMMU_READ | IOMMU_WRITE;
1092 return prot | IOMMU_READ;
1093 case DMA_FROM_DEVICE:
1094 return prot | IOMMU_WRITE;
1102 static int extend_iommu_mapping(struct dma_iommu_mapping *mapping);
1104 static inline dma_addr_t __alloc_iova(struct dma_iommu_mapping *mapping,
1107 unsigned int order = get_order(size);
1108 unsigned int align = 0;
1109 unsigned int count, start;
1110 size_t mapping_size = mapping->bits << PAGE_SHIFT;
1111 unsigned long flags;
1115 if (order > CONFIG_ARM_DMA_IOMMU_ALIGNMENT)
1116 order = CONFIG_ARM_DMA_IOMMU_ALIGNMENT;
1118 count = PAGE_ALIGN(size) >> PAGE_SHIFT;
1119 align = (1 << order) - 1;
1121 spin_lock_irqsave(&mapping->lock, flags);
1122 for (i = 0; i < mapping->nr_bitmaps; i++) {
1123 start = bitmap_find_next_zero_area(mapping->bitmaps[i],
1124 mapping->bits, 0, count, align);
1126 if (start > mapping->bits)
1129 bitmap_set(mapping->bitmaps[i], start, count);
1134 * No unused range found. Try to extend the existing mapping
1135 * and perform a second attempt to reserve an IO virtual
1136 * address range of size bytes.
1138 if (i == mapping->nr_bitmaps) {
1139 if (extend_iommu_mapping(mapping)) {
1140 spin_unlock_irqrestore(&mapping->lock, flags);
1141 return DMA_MAPPING_ERROR;
1144 start = bitmap_find_next_zero_area(mapping->bitmaps[i],
1145 mapping->bits, 0, count, align);
1147 if (start > mapping->bits) {
1148 spin_unlock_irqrestore(&mapping->lock, flags);
1149 return DMA_MAPPING_ERROR;
1152 bitmap_set(mapping->bitmaps[i], start, count);
1154 spin_unlock_irqrestore(&mapping->lock, flags);
1156 iova = mapping->base + (mapping_size * i);
1157 iova += start << PAGE_SHIFT;
1162 static inline void __free_iova(struct dma_iommu_mapping *mapping,
1163 dma_addr_t addr, size_t size)
1165 unsigned int start, count;
1166 size_t mapping_size = mapping->bits << PAGE_SHIFT;
1167 unsigned long flags;
1168 dma_addr_t bitmap_base;
1174 bitmap_index = (u32) (addr - mapping->base) / (u32) mapping_size;
1175 BUG_ON(addr < mapping->base || bitmap_index > mapping->extensions);
1177 bitmap_base = mapping->base + mapping_size * bitmap_index;
1179 start = (addr - bitmap_base) >> PAGE_SHIFT;
1181 if (addr + size > bitmap_base + mapping_size) {
1183 * The address range to be freed reaches into the iova
1184 * range of the next bitmap. This should not happen as
1185 * we don't allow this in __alloc_iova (at the
1190 count = size >> PAGE_SHIFT;
1192 spin_lock_irqsave(&mapping->lock, flags);
1193 bitmap_clear(mapping->bitmaps[bitmap_index], start, count);
1194 spin_unlock_irqrestore(&mapping->lock, flags);
1197 /* We'll try 2M, 1M, 64K, and finally 4K; array must end with 0! */
1198 static const int iommu_order_array[] = { 9, 8, 4, 0 };
1200 static struct page **__iommu_alloc_buffer(struct device *dev, size_t size,
1201 gfp_t gfp, unsigned long attrs,
1204 struct page **pages;
1205 int count = size >> PAGE_SHIFT;
1206 int array_size = count * sizeof(struct page *);
1210 if (array_size <= PAGE_SIZE)
1211 pages = kzalloc(array_size, GFP_KERNEL);
1213 pages = vzalloc(array_size);
1217 if (attrs & DMA_ATTR_FORCE_CONTIGUOUS)
1219 unsigned long order = get_order(size);
1222 page = dma_alloc_from_contiguous(dev, count, order,
1223 gfp & __GFP_NOWARN);
1227 __dma_clear_buffer(page, size, coherent_flag);
1229 for (i = 0; i < count; i++)
1230 pages[i] = page + i;
1235 /* Go straight to 4K chunks if caller says it's OK. */
1236 if (attrs & DMA_ATTR_ALLOC_SINGLE_PAGES)
1237 order_idx = ARRAY_SIZE(iommu_order_array) - 1;
1240 * IOMMU can map any pages, so himem can also be used here
1242 gfp |= __GFP_NOWARN | __GFP_HIGHMEM;
1247 order = iommu_order_array[order_idx];
1249 /* Drop down when we get small */
1250 if (__fls(count) < order) {
1256 /* See if it's easy to allocate a high-order chunk */
1257 pages[i] = alloc_pages(gfp | __GFP_NORETRY, order);
1259 /* Go down a notch at first sign of pressure */
1265 pages[i] = alloc_pages(gfp, 0);
1271 split_page(pages[i], order);
1274 pages[i + j] = pages[i] + j;
1277 __dma_clear_buffer(pages[i], PAGE_SIZE << order, coherent_flag);
1279 count -= 1 << order;
1286 __free_pages(pages[i], 0);
1291 static int __iommu_free_buffer(struct device *dev, struct page **pages,
1292 size_t size, unsigned long attrs)
1294 int count = size >> PAGE_SHIFT;
1297 if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) {
1298 dma_release_from_contiguous(dev, pages[0], count);
1300 for (i = 0; i < count; i++)
1302 __free_pages(pages[i], 0);
1310 * Create a mapping in device IO address space for specified pages
1313 __iommu_create_mapping(struct device *dev, struct page **pages, size_t size,
1314 unsigned long attrs)
1316 struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
1317 unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
1318 dma_addr_t dma_addr, iova;
1321 dma_addr = __alloc_iova(mapping, size);
1322 if (dma_addr == DMA_MAPPING_ERROR)
1326 for (i = 0; i < count; ) {
1329 unsigned int next_pfn = page_to_pfn(pages[i]) + 1;
1330 phys_addr_t phys = page_to_phys(pages[i]);
1331 unsigned int len, j;
1333 for (j = i + 1; j < count; j++, next_pfn++)
1334 if (page_to_pfn(pages[j]) != next_pfn)
1337 len = (j - i) << PAGE_SHIFT;
1338 ret = iommu_map(mapping->domain, iova, phys, len,
1339 __dma_info_to_prot(DMA_BIDIRECTIONAL, attrs));
1347 iommu_unmap(mapping->domain, dma_addr, iova-dma_addr);
1348 __free_iova(mapping, dma_addr, size);
1349 return DMA_MAPPING_ERROR;
1352 static int __iommu_remove_mapping(struct device *dev, dma_addr_t iova, size_t size)
1354 struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
1357 * add optional in-page offset from iova to size and align
1358 * result to page size
1360 size = PAGE_ALIGN((iova & ~PAGE_MASK) + size);
1363 iommu_unmap(mapping->domain, iova, size);
1364 __free_iova(mapping, iova, size);
1368 static struct page **__atomic_get_pages(void *addr)
1373 phys = gen_pool_virt_to_phys(atomic_pool, (unsigned long)addr);
1374 page = phys_to_page(phys);
1376 return (struct page **)page;
1379 static struct page **__iommu_get_pages(void *cpu_addr, unsigned long attrs)
1381 if (__in_atomic_pool(cpu_addr, PAGE_SIZE))
1382 return __atomic_get_pages(cpu_addr);
1384 if (attrs & DMA_ATTR_NO_KERNEL_MAPPING)
1387 return dma_common_find_pages(cpu_addr);
1390 static void *__iommu_alloc_simple(struct device *dev, size_t size, gfp_t gfp,
1391 dma_addr_t *handle, int coherent_flag,
1392 unsigned long attrs)
1397 if (coherent_flag == COHERENT)
1398 addr = __alloc_simple_buffer(dev, size, gfp, &page);
1400 addr = __alloc_from_pool(size, &page);
1404 *handle = __iommu_create_mapping(dev, &page, size, attrs);
1405 if (*handle == DMA_MAPPING_ERROR)
1411 __free_from_pool(addr, size);
1415 static void __iommu_free_atomic(struct device *dev, void *cpu_addr,
1416 dma_addr_t handle, size_t size, int coherent_flag)
1418 __iommu_remove_mapping(dev, handle, size);
1419 if (coherent_flag == COHERENT)
1420 __dma_free_buffer(virt_to_page(cpu_addr), size);
1422 __free_from_pool(cpu_addr, size);
1425 static void *__arm_iommu_alloc_attrs(struct device *dev, size_t size,
1426 dma_addr_t *handle, gfp_t gfp, unsigned long attrs,
1429 pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL);
1430 struct page **pages;
1433 *handle = DMA_MAPPING_ERROR;
1434 size = PAGE_ALIGN(size);
1436 if (coherent_flag == COHERENT || !gfpflags_allow_blocking(gfp))
1437 return __iommu_alloc_simple(dev, size, gfp, handle,
1438 coherent_flag, attrs);
1441 * Following is a work-around (a.k.a. hack) to prevent pages
1442 * with __GFP_COMP being passed to split_page() which cannot
1443 * handle them. The real problem is that this flag probably
1444 * should be 0 on ARM as it is not supported on this
1445 * platform; see CONFIG_HUGETLBFS.
1447 gfp &= ~(__GFP_COMP);
1449 pages = __iommu_alloc_buffer(dev, size, gfp, attrs, coherent_flag);
1453 *handle = __iommu_create_mapping(dev, pages, size, attrs);
1454 if (*handle == DMA_MAPPING_ERROR)
1457 if (attrs & DMA_ATTR_NO_KERNEL_MAPPING)
1460 addr = dma_common_pages_remap(pages, size, prot,
1461 __builtin_return_address(0));
1468 __iommu_remove_mapping(dev, *handle, size);
1470 __iommu_free_buffer(dev, pages, size, attrs);
1474 static void *arm_iommu_alloc_attrs(struct device *dev, size_t size,
1475 dma_addr_t *handle, gfp_t gfp, unsigned long attrs)
1477 return __arm_iommu_alloc_attrs(dev, size, handle, gfp, attrs, NORMAL);
1480 static void *arm_coherent_iommu_alloc_attrs(struct device *dev, size_t size,
1481 dma_addr_t *handle, gfp_t gfp, unsigned long attrs)
1483 return __arm_iommu_alloc_attrs(dev, size, handle, gfp, attrs, COHERENT);
1486 static int __arm_iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
1487 void *cpu_addr, dma_addr_t dma_addr, size_t size,
1488 unsigned long attrs)
1490 struct page **pages = __iommu_get_pages(cpu_addr, attrs);
1491 unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
1497 if (vma->vm_pgoff >= nr_pages)
1500 err = vm_map_pages(vma, pages, nr_pages);
1502 pr_err("Remapping memory failed: %d\n", err);
1506 static int arm_iommu_mmap_attrs(struct device *dev,
1507 struct vm_area_struct *vma, void *cpu_addr,
1508 dma_addr_t dma_addr, size_t size, unsigned long attrs)
1510 vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot);
1512 return __arm_iommu_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, attrs);
1515 static int arm_coherent_iommu_mmap_attrs(struct device *dev,
1516 struct vm_area_struct *vma, void *cpu_addr,
1517 dma_addr_t dma_addr, size_t size, unsigned long attrs)
1519 return __arm_iommu_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, attrs);
1523 * free a page as defined by the above mapping.
1524 * Must not be called with IRQs disabled.
1526 static void __arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
1527 dma_addr_t handle, unsigned long attrs, int coherent_flag)
1529 struct page **pages;
1530 size = PAGE_ALIGN(size);
1532 if (coherent_flag == COHERENT || __in_atomic_pool(cpu_addr, size)) {
1533 __iommu_free_atomic(dev, cpu_addr, handle, size, coherent_flag);
1537 pages = __iommu_get_pages(cpu_addr, attrs);
1539 WARN(1, "trying to free invalid coherent area: %p\n", cpu_addr);
1543 if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) == 0)
1544 dma_common_free_remap(cpu_addr, size);
1546 __iommu_remove_mapping(dev, handle, size);
1547 __iommu_free_buffer(dev, pages, size, attrs);
1550 static void arm_iommu_free_attrs(struct device *dev, size_t size,
1551 void *cpu_addr, dma_addr_t handle,
1552 unsigned long attrs)
1554 __arm_iommu_free_attrs(dev, size, cpu_addr, handle, attrs, NORMAL);
1557 static void arm_coherent_iommu_free_attrs(struct device *dev, size_t size,
1558 void *cpu_addr, dma_addr_t handle, unsigned long attrs)
1560 __arm_iommu_free_attrs(dev, size, cpu_addr, handle, attrs, COHERENT);
1563 static int arm_iommu_get_sgtable(struct device *dev, struct sg_table *sgt,
1564 void *cpu_addr, dma_addr_t dma_addr,
1565 size_t size, unsigned long attrs)
1567 unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
1568 struct page **pages = __iommu_get_pages(cpu_addr, attrs);
1573 return sg_alloc_table_from_pages(sgt, pages, count, 0, size,
1578 * Map a part of the scatter-gather list into contiguous io address space
1580 static int __map_sg_chunk(struct device *dev, struct scatterlist *sg,
1581 size_t size, dma_addr_t *handle,
1582 enum dma_data_direction dir, unsigned long attrs,
1585 struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
1586 dma_addr_t iova, iova_base;
1589 struct scatterlist *s;
1592 size = PAGE_ALIGN(size);
1593 *handle = DMA_MAPPING_ERROR;
1595 iova_base = iova = __alloc_iova(mapping, size);
1596 if (iova == DMA_MAPPING_ERROR)
1599 for (count = 0, s = sg; count < (size >> PAGE_SHIFT); s = sg_next(s)) {
1600 phys_addr_t phys = page_to_phys(sg_page(s));
1601 unsigned int len = PAGE_ALIGN(s->offset + s->length);
1603 if (!is_coherent && (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
1604 __dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir);
1606 prot = __dma_info_to_prot(dir, attrs);
1608 ret = iommu_map(mapping->domain, iova, phys, len, prot);
1611 count += len >> PAGE_SHIFT;
1614 *handle = iova_base;
1618 iommu_unmap(mapping->domain, iova_base, count * PAGE_SIZE);
1619 __free_iova(mapping, iova_base, size);
1623 static int __iommu_map_sg(struct device *dev, struct scatterlist *sg, int nents,
1624 enum dma_data_direction dir, unsigned long attrs,
1627 struct scatterlist *s = sg, *dma = sg, *start = sg;
1629 unsigned int offset = s->offset;
1630 unsigned int size = s->offset + s->length;
1631 unsigned int max = dma_get_max_seg_size(dev);
1633 for (i = 1; i < nents; i++) {
1636 s->dma_address = DMA_MAPPING_ERROR;
1639 if (s->offset || (size & ~PAGE_MASK) || size + s->length > max) {
1640 if (__map_sg_chunk(dev, start, size, &dma->dma_address,
1641 dir, attrs, is_coherent) < 0)
1644 dma->dma_address += offset;
1645 dma->dma_length = size - offset;
1647 size = offset = s->offset;
1654 if (__map_sg_chunk(dev, start, size, &dma->dma_address, dir, attrs,
1658 dma->dma_address += offset;
1659 dma->dma_length = size - offset;
1664 for_each_sg(sg, s, count, i)
1665 __iommu_remove_mapping(dev, sg_dma_address(s), sg_dma_len(s));
1670 * arm_coherent_iommu_map_sg - map a set of SG buffers for streaming mode DMA
1671 * @dev: valid struct device pointer
1672 * @sg: list of buffers
1673 * @nents: number of buffers to map
1674 * @dir: DMA transfer direction
1676 * Map a set of i/o coherent buffers described by scatterlist in streaming
1677 * mode for DMA. The scatter gather list elements are merged together (if
1678 * possible) and tagged with the appropriate dma address and length. They are
1679 * obtained via sg_dma_{address,length}.
1681 static int arm_coherent_iommu_map_sg(struct device *dev, struct scatterlist *sg,
1682 int nents, enum dma_data_direction dir, unsigned long attrs)
1684 return __iommu_map_sg(dev, sg, nents, dir, attrs, true);
1688 * arm_iommu_map_sg - map a set of SG buffers for streaming mode DMA
1689 * @dev: valid struct device pointer
1690 * @sg: list of buffers
1691 * @nents: number of buffers to map
1692 * @dir: DMA transfer direction
1694 * Map a set of buffers described by scatterlist in streaming mode for DMA.
1695 * The scatter gather list elements are merged together (if possible) and
1696 * tagged with the appropriate dma address and length. They are obtained via
1697 * sg_dma_{address,length}.
1699 static int arm_iommu_map_sg(struct device *dev, struct scatterlist *sg,
1700 int nents, enum dma_data_direction dir, unsigned long attrs)
1702 return __iommu_map_sg(dev, sg, nents, dir, attrs, false);
1705 static void __iommu_unmap_sg(struct device *dev, struct scatterlist *sg,
1706 int nents, enum dma_data_direction dir,
1707 unsigned long attrs, bool is_coherent)
1709 struct scatterlist *s;
1712 for_each_sg(sg, s, nents, i) {
1714 __iommu_remove_mapping(dev, sg_dma_address(s),
1716 if (!is_coherent && (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
1717 __dma_page_dev_to_cpu(sg_page(s), s->offset,
1723 * arm_coherent_iommu_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg
1724 * @dev: valid struct device pointer
1725 * @sg: list of buffers
1726 * @nents: number of buffers to unmap (same as was passed to dma_map_sg)
1727 * @dir: DMA transfer direction (same as was passed to dma_map_sg)
1729 * Unmap a set of streaming mode DMA translations. Again, CPU access
1730 * rules concerning calls here are the same as for dma_unmap_single().
1732 static void arm_coherent_iommu_unmap_sg(struct device *dev,
1733 struct scatterlist *sg, int nents, enum dma_data_direction dir,
1734 unsigned long attrs)
1736 __iommu_unmap_sg(dev, sg, nents, dir, attrs, true);
1740 * arm_iommu_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg
1741 * @dev: valid struct device pointer
1742 * @sg: list of buffers
1743 * @nents: number of buffers to unmap (same as was passed to dma_map_sg)
1744 * @dir: DMA transfer direction (same as was passed to dma_map_sg)
1746 * Unmap a set of streaming mode DMA translations. Again, CPU access
1747 * rules concerning calls here are the same as for dma_unmap_single().
1749 static void arm_iommu_unmap_sg(struct device *dev,
1750 struct scatterlist *sg, int nents,
1751 enum dma_data_direction dir,
1752 unsigned long attrs)
1754 __iommu_unmap_sg(dev, sg, nents, dir, attrs, false);
1758 * arm_iommu_sync_sg_for_cpu
1759 * @dev: valid struct device pointer
1760 * @sg: list of buffers
1761 * @nents: number of buffers to map (returned from dma_map_sg)
1762 * @dir: DMA transfer direction (same as was passed to dma_map_sg)
1764 static void arm_iommu_sync_sg_for_cpu(struct device *dev,
1765 struct scatterlist *sg,
1766 int nents, enum dma_data_direction dir)
1768 struct scatterlist *s;
1771 for_each_sg(sg, s, nents, i)
1772 __dma_page_dev_to_cpu(sg_page(s), s->offset, s->length, dir);
1777 * arm_iommu_sync_sg_for_device
1778 * @dev: valid struct device pointer
1779 * @sg: list of buffers
1780 * @nents: number of buffers to map (returned from dma_map_sg)
1781 * @dir: DMA transfer direction (same as was passed to dma_map_sg)
1783 static void arm_iommu_sync_sg_for_device(struct device *dev,
1784 struct scatterlist *sg,
1785 int nents, enum dma_data_direction dir)
1787 struct scatterlist *s;
1790 for_each_sg(sg, s, nents, i)
1791 __dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir);
1796 * arm_coherent_iommu_map_page
1797 * @dev: valid struct device pointer
1798 * @page: page that buffer resides in
1799 * @offset: offset into page for start of buffer
1800 * @size: size of buffer to map
1801 * @dir: DMA transfer direction
1803 * Coherent IOMMU aware version of arm_dma_map_page()
1805 static dma_addr_t arm_coherent_iommu_map_page(struct device *dev, struct page *page,
1806 unsigned long offset, size_t size, enum dma_data_direction dir,
1807 unsigned long attrs)
1809 struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
1810 dma_addr_t dma_addr;
1811 int ret, prot, len = PAGE_ALIGN(size + offset);
1813 dma_addr = __alloc_iova(mapping, len);
1814 if (dma_addr == DMA_MAPPING_ERROR)
1817 prot = __dma_info_to_prot(dir, attrs);
1819 ret = iommu_map(mapping->domain, dma_addr, page_to_phys(page), len, prot);
1823 return dma_addr + offset;
1825 __free_iova(mapping, dma_addr, len);
1826 return DMA_MAPPING_ERROR;
1830 * arm_iommu_map_page
1831 * @dev: valid struct device pointer
1832 * @page: page that buffer resides in
1833 * @offset: offset into page for start of buffer
1834 * @size: size of buffer to map
1835 * @dir: DMA transfer direction
1837 * IOMMU aware version of arm_dma_map_page()
1839 static dma_addr_t arm_iommu_map_page(struct device *dev, struct page *page,
1840 unsigned long offset, size_t size, enum dma_data_direction dir,
1841 unsigned long attrs)
1843 if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
1844 __dma_page_cpu_to_dev(page, offset, size, dir);
1846 return arm_coherent_iommu_map_page(dev, page, offset, size, dir, attrs);
1850 * arm_coherent_iommu_unmap_page
1851 * @dev: valid struct device pointer
1852 * @handle: DMA address of buffer
1853 * @size: size of buffer (same as passed to dma_map_page)
1854 * @dir: DMA transfer direction (same as passed to dma_map_page)
1856 * Coherent IOMMU aware version of arm_dma_unmap_page()
1858 static void arm_coherent_iommu_unmap_page(struct device *dev, dma_addr_t handle,
1859 size_t size, enum dma_data_direction dir, unsigned long attrs)
1861 struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
1862 dma_addr_t iova = handle & PAGE_MASK;
1863 int offset = handle & ~PAGE_MASK;
1864 int len = PAGE_ALIGN(size + offset);
1869 iommu_unmap(mapping->domain, iova, len);
1870 __free_iova(mapping, iova, len);
1874 * arm_iommu_unmap_page
1875 * @dev: valid struct device pointer
1876 * @handle: DMA address of buffer
1877 * @size: size of buffer (same as passed to dma_map_page)
1878 * @dir: DMA transfer direction (same as passed to dma_map_page)
1880 * IOMMU aware version of arm_dma_unmap_page()
1882 static void arm_iommu_unmap_page(struct device *dev, dma_addr_t handle,
1883 size_t size, enum dma_data_direction dir, unsigned long attrs)
1885 struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
1886 dma_addr_t iova = handle & PAGE_MASK;
1887 struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
1888 int offset = handle & ~PAGE_MASK;
1889 int len = PAGE_ALIGN(size + offset);
1894 if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
1895 __dma_page_dev_to_cpu(page, offset, size, dir);
1897 iommu_unmap(mapping->domain, iova, len);
1898 __free_iova(mapping, iova, len);
1902 * arm_iommu_map_resource - map a device resource for DMA
1903 * @dev: valid struct device pointer
1904 * @phys_addr: physical address of resource
1905 * @size: size of resource to map
1906 * @dir: DMA transfer direction
1908 static dma_addr_t arm_iommu_map_resource(struct device *dev,
1909 phys_addr_t phys_addr, size_t size,
1910 enum dma_data_direction dir, unsigned long attrs)
1912 struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
1913 dma_addr_t dma_addr;
1915 phys_addr_t addr = phys_addr & PAGE_MASK;
1916 unsigned int offset = phys_addr & ~PAGE_MASK;
1917 size_t len = PAGE_ALIGN(size + offset);
1919 dma_addr = __alloc_iova(mapping, len);
1920 if (dma_addr == DMA_MAPPING_ERROR)
1923 prot = __dma_info_to_prot(dir, attrs) | IOMMU_MMIO;
1925 ret = iommu_map(mapping->domain, dma_addr, addr, len, prot);
1929 return dma_addr + offset;
1931 __free_iova(mapping, dma_addr, len);
1932 return DMA_MAPPING_ERROR;
1936 * arm_iommu_unmap_resource - unmap a device DMA resource
1937 * @dev: valid struct device pointer
1938 * @dma_handle: DMA address to resource
1939 * @size: size of resource to map
1940 * @dir: DMA transfer direction
1942 static void arm_iommu_unmap_resource(struct device *dev, dma_addr_t dma_handle,
1943 size_t size, enum dma_data_direction dir,
1944 unsigned long attrs)
1946 struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
1947 dma_addr_t iova = dma_handle & PAGE_MASK;
1948 unsigned int offset = dma_handle & ~PAGE_MASK;
1949 size_t len = PAGE_ALIGN(size + offset);
1954 iommu_unmap(mapping->domain, iova, len);
1955 __free_iova(mapping, iova, len);
1958 static void arm_iommu_sync_single_for_cpu(struct device *dev,
1959 dma_addr_t handle, size_t size, enum dma_data_direction dir)
1961 struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
1962 dma_addr_t iova = handle & PAGE_MASK;
1963 struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
1964 unsigned int offset = handle & ~PAGE_MASK;
1969 __dma_page_dev_to_cpu(page, offset, size, dir);
1972 static void arm_iommu_sync_single_for_device(struct device *dev,
1973 dma_addr_t handle, size_t size, enum dma_data_direction dir)
1975 struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
1976 dma_addr_t iova = handle & PAGE_MASK;
1977 struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
1978 unsigned int offset = handle & ~PAGE_MASK;
1983 __dma_page_cpu_to_dev(page, offset, size, dir);
1986 static const struct dma_map_ops iommu_ops = {
1987 .alloc = arm_iommu_alloc_attrs,
1988 .free = arm_iommu_free_attrs,
1989 .mmap = arm_iommu_mmap_attrs,
1990 .get_sgtable = arm_iommu_get_sgtable,
1992 .map_page = arm_iommu_map_page,
1993 .unmap_page = arm_iommu_unmap_page,
1994 .sync_single_for_cpu = arm_iommu_sync_single_for_cpu,
1995 .sync_single_for_device = arm_iommu_sync_single_for_device,
1997 .map_sg = arm_iommu_map_sg,
1998 .unmap_sg = arm_iommu_unmap_sg,
1999 .sync_sg_for_cpu = arm_iommu_sync_sg_for_cpu,
2000 .sync_sg_for_device = arm_iommu_sync_sg_for_device,
2002 .map_resource = arm_iommu_map_resource,
2003 .unmap_resource = arm_iommu_unmap_resource,
2005 .dma_supported = arm_dma_supported,
2008 static const struct dma_map_ops iommu_coherent_ops = {
2009 .alloc = arm_coherent_iommu_alloc_attrs,
2010 .free = arm_coherent_iommu_free_attrs,
2011 .mmap = arm_coherent_iommu_mmap_attrs,
2012 .get_sgtable = arm_iommu_get_sgtable,
2014 .map_page = arm_coherent_iommu_map_page,
2015 .unmap_page = arm_coherent_iommu_unmap_page,
2017 .map_sg = arm_coherent_iommu_map_sg,
2018 .unmap_sg = arm_coherent_iommu_unmap_sg,
2020 .map_resource = arm_iommu_map_resource,
2021 .unmap_resource = arm_iommu_unmap_resource,
2023 .dma_supported = arm_dma_supported,
2027 * arm_iommu_create_mapping
2028 * @bus: pointer to the bus holding the client device (for IOMMU calls)
2029 * @base: start address of the valid IO address space
2030 * @size: maximum size of the valid IO address space
2032 * Creates a mapping structure which holds information about used/unused
2033 * IO address ranges, which is required to perform memory allocation and
2034 * mapping with IOMMU aware functions.
2036 * The client device need to be attached to the mapping with
2037 * arm_iommu_attach_device function.
2039 struct dma_iommu_mapping *
2040 arm_iommu_create_mapping(struct bus_type *bus, dma_addr_t base, u64 size)
2042 unsigned int bits = size >> PAGE_SHIFT;
2043 unsigned int bitmap_size = BITS_TO_LONGS(bits) * sizeof(long);
2044 struct dma_iommu_mapping *mapping;
2048 /* currently only 32-bit DMA address space is supported */
2049 if (size > DMA_BIT_MASK(32) + 1)
2050 return ERR_PTR(-ERANGE);
2053 return ERR_PTR(-EINVAL);
2055 if (bitmap_size > PAGE_SIZE) {
2056 extensions = bitmap_size / PAGE_SIZE;
2057 bitmap_size = PAGE_SIZE;
2060 mapping = kzalloc(sizeof(struct dma_iommu_mapping), GFP_KERNEL);
2064 mapping->bitmap_size = bitmap_size;
2065 mapping->bitmaps = kcalloc(extensions, sizeof(unsigned long *),
2067 if (!mapping->bitmaps)
2070 mapping->bitmaps[0] = kzalloc(bitmap_size, GFP_KERNEL);
2071 if (!mapping->bitmaps[0])
2074 mapping->nr_bitmaps = 1;
2075 mapping->extensions = extensions;
2076 mapping->base = base;
2077 mapping->bits = BITS_PER_BYTE * bitmap_size;
2079 spin_lock_init(&mapping->lock);
2081 mapping->domain = iommu_domain_alloc(bus);
2082 if (!mapping->domain)
2085 kref_init(&mapping->kref);
2088 kfree(mapping->bitmaps[0]);
2090 kfree(mapping->bitmaps);
2094 return ERR_PTR(err);
2096 EXPORT_SYMBOL_GPL(arm_iommu_create_mapping);
2098 static void release_iommu_mapping(struct kref *kref)
2101 struct dma_iommu_mapping *mapping =
2102 container_of(kref, struct dma_iommu_mapping, kref);
2104 iommu_domain_free(mapping->domain);
2105 for (i = 0; i < mapping->nr_bitmaps; i++)
2106 kfree(mapping->bitmaps[i]);
2107 kfree(mapping->bitmaps);
2111 static int extend_iommu_mapping(struct dma_iommu_mapping *mapping)
2115 if (mapping->nr_bitmaps >= mapping->extensions)
2118 next_bitmap = mapping->nr_bitmaps;
2119 mapping->bitmaps[next_bitmap] = kzalloc(mapping->bitmap_size,
2121 if (!mapping->bitmaps[next_bitmap])
2124 mapping->nr_bitmaps++;
2129 void arm_iommu_release_mapping(struct dma_iommu_mapping *mapping)
2132 kref_put(&mapping->kref, release_iommu_mapping);
2134 EXPORT_SYMBOL_GPL(arm_iommu_release_mapping);
2136 static int __arm_iommu_attach_device(struct device *dev,
2137 struct dma_iommu_mapping *mapping)
2141 err = iommu_attach_device(mapping->domain, dev);
2145 kref_get(&mapping->kref);
2146 to_dma_iommu_mapping(dev) = mapping;
2148 pr_debug("Attached IOMMU controller to %s device.\n", dev_name(dev));
2153 * arm_iommu_attach_device
2154 * @dev: valid struct device pointer
2155 * @mapping: io address space mapping structure (returned from
2156 * arm_iommu_create_mapping)
2158 * Attaches specified io address space mapping to the provided device.
2159 * This replaces the dma operations (dma_map_ops pointer) with the
2160 * IOMMU aware version.
2162 * More than one client might be attached to the same io address space
2165 int arm_iommu_attach_device(struct device *dev,
2166 struct dma_iommu_mapping *mapping)
2170 err = __arm_iommu_attach_device(dev, mapping);
2174 set_dma_ops(dev, &iommu_ops);
2177 EXPORT_SYMBOL_GPL(arm_iommu_attach_device);
2180 * arm_iommu_detach_device
2181 * @dev: valid struct device pointer
2183 * Detaches the provided device from a previously attached map.
2184 * This overwrites the dma_ops pointer with appropriate non-IOMMU ops.
2186 void arm_iommu_detach_device(struct device *dev)
2188 struct dma_iommu_mapping *mapping;
2190 mapping = to_dma_iommu_mapping(dev);
2192 dev_warn(dev, "Not attached\n");
2196 iommu_detach_device(mapping->domain, dev);
2197 kref_put(&mapping->kref, release_iommu_mapping);
2198 to_dma_iommu_mapping(dev) = NULL;
2199 set_dma_ops(dev, arm_get_dma_map_ops(dev->archdata.dma_coherent));
2201 pr_debug("Detached IOMMU controller from %s device.\n", dev_name(dev));
2203 EXPORT_SYMBOL_GPL(arm_iommu_detach_device);
2205 static const struct dma_map_ops *arm_get_iommu_dma_map_ops(bool coherent)
2207 return coherent ? &iommu_coherent_ops : &iommu_ops;
2210 static bool arm_setup_iommu_dma_ops(struct device *dev, u64 dma_base, u64 size,
2211 const struct iommu_ops *iommu)
2213 struct dma_iommu_mapping *mapping;
2218 mapping = arm_iommu_create_mapping(dev->bus, dma_base, size);
2219 if (IS_ERR(mapping)) {
2220 pr_warn("Failed to create %llu-byte IOMMU mapping for device %s\n",
2221 size, dev_name(dev));
2225 if (__arm_iommu_attach_device(dev, mapping)) {
2226 pr_warn("Failed to attached device %s to IOMMU_mapping\n",
2228 arm_iommu_release_mapping(mapping);
2235 static void arm_teardown_iommu_dma_ops(struct device *dev)
2237 struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
2242 arm_iommu_detach_device(dev);
2243 arm_iommu_release_mapping(mapping);
2248 static bool arm_setup_iommu_dma_ops(struct device *dev, u64 dma_base, u64 size,
2249 const struct iommu_ops *iommu)
2254 static void arm_teardown_iommu_dma_ops(struct device *dev) { }
2256 #define arm_get_iommu_dma_map_ops arm_get_dma_map_ops
2258 #endif /* CONFIG_ARM_DMA_USE_IOMMU */
2260 void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
2261 const struct iommu_ops *iommu, bool coherent)
2263 const struct dma_map_ops *dma_ops;
2265 dev->archdata.dma_coherent = coherent;
2266 #ifdef CONFIG_SWIOTLB
2267 dev->dma_coherent = coherent;
2271 * Don't override the dma_ops if they have already been set. Ideally
2272 * this should be the only location where dma_ops are set, remove this
2273 * check when all other callers of set_dma_ops will have disappeared.
2278 if (arm_setup_iommu_dma_ops(dev, dma_base, size, iommu))
2279 dma_ops = arm_get_iommu_dma_map_ops(coherent);
2281 dma_ops = arm_get_dma_map_ops(coherent);
2283 set_dma_ops(dev, dma_ops);
2286 if (xen_initial_domain())
2287 dev->dma_ops = &xen_swiotlb_dma_ops;
2289 dev->archdata.dma_ops_setup = true;
2292 void arch_teardown_dma_ops(struct device *dev)
2294 if (!dev->archdata.dma_ops_setup)
2297 arm_teardown_iommu_dma_ops(dev);
2298 /* Let arch_setup_dma_ops() start again from scratch upon re-probe */
2299 set_dma_ops(dev, NULL);
2302 #ifdef CONFIG_SWIOTLB
2303 void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
2304 enum dma_data_direction dir)
2306 __dma_page_cpu_to_dev(phys_to_page(paddr), paddr & (PAGE_SIZE - 1),
2310 void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
2311 enum dma_data_direction dir)
2313 __dma_page_dev_to_cpu(phys_to_page(paddr), paddr & (PAGE_SIZE - 1),
2317 void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
2318 gfp_t gfp, unsigned long attrs)
2320 return __dma_alloc(dev, size, dma_handle, gfp,
2321 __get_dma_pgprot(attrs, PAGE_KERNEL), false,
2322 attrs, __builtin_return_address(0));
2325 void arch_dma_free(struct device *dev, size_t size, void *cpu_addr,
2326 dma_addr_t dma_handle, unsigned long attrs)
2328 __arm_dma_free(dev, size, cpu_addr, dma_handle, attrs, false);
2330 #endif /* CONFIG_SWIOTLB */