arm64: use swiotlb_alloc and swiotlb_free
authorChristoph Hellwig <hch@lst.de>
Sun, 24 Dec 2017 12:53:50 +0000 (13:53 +0100)
committerChristoph Hellwig <hch@lst.de>
Mon, 15 Jan 2018 08:36:02 +0000 (09:36 +0100)
The generic swiotlb_alloc and swiotlb_free routines already take care
of CMA allocations and adding GFP_DMA32 where needed, so use them
instead of the arm specific helpers.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Acked-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Robin Murphy <robin.murphy@arm.com>
arch/arm64/Kconfig
arch/arm64/mm/dma-mapping.c

index 6b6985f15d02a9da0f24f06a341f43e30246e82f..53205c02b18a9c8341cb80b27eacdedc24899a09 100644 (file)
@@ -59,6 +59,7 @@ config ARM64
        select COMMON_CLK
        select CPU_PM if (SUSPEND || CPU_IDLE)
        select DCACHE_WORD_ACCESS
+       select DMA_DIRECT_OPS
        select EDAC_SUPPORT
        select FRAME_POINTER
        select GENERIC_ALLOCATOR
index 0d641875b20e9760c203ae56f21ccd6bd6d0bfe2..a96ec0181818b90e830898753ea602d77e34b2a9 100644 (file)
@@ -91,46 +91,6 @@ static int __free_from_pool(void *start, size_t size)
        return 1;
 }
 
-static void *__dma_alloc_coherent(struct device *dev, size_t size,
-                                 dma_addr_t *dma_handle, gfp_t flags,
-                                 unsigned long attrs)
-{
-       if (IS_ENABLED(CONFIG_ZONE_DMA32) &&
-           dev->coherent_dma_mask <= DMA_BIT_MASK(32))
-               flags |= GFP_DMA32;
-       if (dev_get_cma_area(dev) && gfpflags_allow_blocking(flags)) {
-               struct page *page;
-               void *addr;
-
-               page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT,
-                                                get_order(size), flags);
-               if (!page)
-                       return NULL;
-
-               *dma_handle = phys_to_dma(dev, page_to_phys(page));
-               addr = page_address(page);
-               memset(addr, 0, size);
-               return addr;
-       } else {
-               return swiotlb_alloc_coherent(dev, size, dma_handle, flags);
-       }
-}
-
-static void __dma_free_coherent(struct device *dev, size_t size,
-                               void *vaddr, dma_addr_t dma_handle,
-                               unsigned long attrs)
-{
-       bool freed;
-       phys_addr_t paddr = dma_to_phys(dev, dma_handle);
-
-
-       freed = dma_release_from_contiguous(dev,
-                                       phys_to_page(paddr),
-                                       size >> PAGE_SHIFT);
-       if (!freed)
-               swiotlb_free_coherent(dev, size, vaddr, dma_handle);
-}
-
 static void *__dma_alloc(struct device *dev, size_t size,
                         dma_addr_t *dma_handle, gfp_t flags,
                         unsigned long attrs)
@@ -152,7 +112,7 @@ static void *__dma_alloc(struct device *dev, size_t size,
                return addr;
        }
 
-       ptr = __dma_alloc_coherent(dev, size, dma_handle, flags, attrs);
+       ptr = swiotlb_alloc(dev, size, dma_handle, flags, attrs);
        if (!ptr)
                goto no_mem;
 
@@ -173,7 +133,7 @@ static void *__dma_alloc(struct device *dev, size_t size,
        return coherent_ptr;
 
 no_map:
-       __dma_free_coherent(dev, size, ptr, *dma_handle, attrs);
+       swiotlb_free(dev, size, ptr, *dma_handle, attrs);
 no_mem:
        return NULL;
 }
@@ -191,7 +151,7 @@ static void __dma_free(struct device *dev, size_t size,
                        return;
                vunmap(vaddr);
        }
-       __dma_free_coherent(dev, size, swiotlb_addr, dma_handle, attrs);
+       swiotlb_free(dev, size, swiotlb_addr, dma_handle, attrs);
 }
 
 static dma_addr_t __swiotlb_map_page(struct device *dev, struct page *page,