swiotlb: remove the overflow buffer
authorChristoph Hellwig <hch@lst.de>
Thu, 16 Aug 2018 12:30:39 +0000 (15:30 +0300)
committerChristoph Hellwig <hch@lst.de>
Fri, 19 Oct 2018 06:43:46 +0000 (08:43 +0200)
Like all other dma mapping drivers just return an error code instead
of an actual memory buffer.  The reason for the overflow buffer was
that at the time swiotlb was invented there was no way to check for
dma mapping errors, but this has long been fixed.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Acked-by: Catalin Marinas <catalin.marinas@arm.com>
Reviewed-by: Robin Murphy <robin.murphy@arm.com>
Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
arch/arm64/mm/dma-mapping.c
arch/powerpc/kernel/dma-swiotlb.c
include/linux/dma-direct.h
include/linux/swiotlb.h
kernel/dma/direct.c
kernel/dma/swiotlb.c

index 072c51fb07d73031578267eea0d5f877685f2676..8d91b927e09efeed7da1d5b83f4a0dc56e4a3397 100644 (file)
@@ -324,7 +324,7 @@ static int __swiotlb_dma_supported(struct device *hwdev, u64 mask)
 static int __swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t addr)
 {
        if (swiotlb)
-               return swiotlb_dma_mapping_error(hwdev, addr);
+               return dma_direct_mapping_error(hwdev, addr);
        return 0;
 }
 
index 88f3963ca30f685aead454338a2467a223dbc6ae..5fc335f4d9cd0f561dfa3a947e2502e7de88e658 100644 (file)
@@ -11,7 +11,7 @@
  *
  */
 
-#include <linux/dma-mapping.h>
+#include <linux/dma-direct.h>
 #include <linux/memblock.h>
 #include <linux/pfn.h>
 #include <linux/of_platform.h>
@@ -59,7 +59,7 @@ const struct dma_map_ops powerpc_swiotlb_dma_ops = {
        .sync_single_for_device = swiotlb_sync_single_for_device,
        .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu,
        .sync_sg_for_device = swiotlb_sync_sg_for_device,
-       .mapping_error = swiotlb_dma_mapping_error,
+       .mapping_error = dma_direct_mapping_error,
        .get_required_mask = swiotlb_powerpc_get_required,
 };
 
index fbca184ff5a0ac2f5f4dbdb5ed9d4cb6359a8fb0..bd73e7a9141076389ad638cc8fe4f48515331711 100644 (file)
@@ -5,6 +5,8 @@
 #include <linux/dma-mapping.h>
 #include <linux/mem_encrypt.h>
 
+#define DIRECT_MAPPING_ERROR           0
+
 #ifdef CONFIG_ARCH_HAS_PHYS_TO_DMA
 #include <asm/dma-direct.h>
 #else
index 7ef541ce8f344f547c41478cfea9f25a1d7d5ba4..f847c1b265c4911c87a9bd1e6f728c81340bd323 100644 (file)
@@ -106,9 +106,6 @@ extern void
 swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
                           int nelems, enum dma_data_direction dir);
 
-extern int
-swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr);
-
 extern int
 swiotlb_dma_supported(struct device *hwdev, u64 mask);
 
index 87a6bc2a96c0c46ac422fbf77f6ca63803752c74..f14c376937e5708d9b0d980130866631012dde8e 100644 (file)
@@ -14,8 +14,6 @@
 #include <linux/pfn.h>
 #include <linux/set_memory.h>
 
-#define DIRECT_MAPPING_ERROR           0
-
 /*
  * Most architectures use ZONE_DMA for the first 16 Megabytes, but
  * some use it for entirely different regions:
index 69bf305ee5f874a68dd03524fca8c9c6d24afb9a..11dbcd80b4a6af8a31cab768e28ddd2a6f5cfdf4 100644 (file)
@@ -72,13 +72,6 @@ static phys_addr_t io_tlb_start, io_tlb_end;
  */
 static unsigned long io_tlb_nslabs;
 
-/*
- * When the IOMMU overflows we return a fallback buffer. This sets the size.
- */
-static unsigned long io_tlb_overflow = 32*1024;
-
-static phys_addr_t io_tlb_overflow_buffer;
-
 /*
  * This is a free list describing the number of free entries available from
  * each index
@@ -126,7 +119,6 @@ setup_io_tlb_npages(char *str)
        return 0;
 }
 early_param("swiotlb", setup_io_tlb_npages);
-/* make io_tlb_overflow tunable too? */
 
 unsigned long swiotlb_nr_tbl(void)
 {
@@ -194,16 +186,10 @@ void __init swiotlb_update_mem_attributes(void)
        bytes = PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT);
        set_memory_decrypted((unsigned long)vaddr, bytes >> PAGE_SHIFT);
        memset(vaddr, 0, bytes);
-
-       vaddr = phys_to_virt(io_tlb_overflow_buffer);
-       bytes = PAGE_ALIGN(io_tlb_overflow);
-       set_memory_decrypted((unsigned long)vaddr, bytes >> PAGE_SHIFT);
-       memset(vaddr, 0, bytes);
 }
 
 int __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose)
 {
-       void *v_overflow_buffer;
        unsigned long i, bytes;
 
        bytes = nslabs << IO_TLB_SHIFT;
@@ -212,17 +198,6 @@ int __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose)
        io_tlb_start = __pa(tlb);
        io_tlb_end = io_tlb_start + bytes;
 
-       /*
-        * Get the overflow emergency buffer
-        */
-       v_overflow_buffer = memblock_virt_alloc_low_nopanic(
-                                               PAGE_ALIGN(io_tlb_overflow),
-                                               PAGE_SIZE);
-       if (!v_overflow_buffer)
-               return -ENOMEM;
-
-       io_tlb_overflow_buffer = __pa(v_overflow_buffer);
-
        /*
         * Allocate and initialize the free list array.  This array is used
         * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE
@@ -330,7 +305,6 @@ int
 swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs)
 {
        unsigned long i, bytes;
-       unsigned char *v_overflow_buffer;
 
        bytes = nslabs << IO_TLB_SHIFT;
 
@@ -341,19 +315,6 @@ swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs)
        set_memory_decrypted((unsigned long)tlb, bytes >> PAGE_SHIFT);
        memset(tlb, 0, bytes);
 
-       /*
-        * Get the overflow emergency buffer
-        */
-       v_overflow_buffer = (void *)__get_free_pages(GFP_DMA,
-                                                    get_order(io_tlb_overflow));
-       if (!v_overflow_buffer)
-               goto cleanup2;
-
-       set_memory_decrypted((unsigned long)v_overflow_buffer,
-                       io_tlb_overflow >> PAGE_SHIFT);
-       memset(v_overflow_buffer, 0, io_tlb_overflow);
-       io_tlb_overflow_buffer = virt_to_phys(v_overflow_buffer);
-
        /*
         * Allocate and initialize the free list array.  This array is used
         * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE
@@ -390,10 +351,6 @@ cleanup4:
                                                         sizeof(int)));
        io_tlb_list = NULL;
 cleanup3:
-       free_pages((unsigned long)v_overflow_buffer,
-                  get_order(io_tlb_overflow));
-       io_tlb_overflow_buffer = 0;
-cleanup2:
        io_tlb_end = 0;
        io_tlb_start = 0;
        io_tlb_nslabs = 0;
@@ -407,8 +364,6 @@ void __init swiotlb_exit(void)
                return;
 
        if (late_alloc) {
-               free_pages((unsigned long)phys_to_virt(io_tlb_overflow_buffer),
-                          get_order(io_tlb_overflow));
                free_pages((unsigned long)io_tlb_orig_addr,
                           get_order(io_tlb_nslabs * sizeof(phys_addr_t)));
                free_pages((unsigned long)io_tlb_list, get_order(io_tlb_nslabs *
@@ -416,8 +371,6 @@ void __init swiotlb_exit(void)
                free_pages((unsigned long)phys_to_virt(io_tlb_start),
                           get_order(io_tlb_nslabs << IO_TLB_SHIFT));
        } else {
-               memblock_free_late(io_tlb_overflow_buffer,
-                                  PAGE_ALIGN(io_tlb_overflow));
                memblock_free_late(__pa(io_tlb_orig_addr),
                                   PAGE_ALIGN(io_tlb_nslabs * sizeof(phys_addr_t)));
                memblock_free_late(__pa(io_tlb_list),
@@ -790,7 +743,7 @@ dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
        /* Oh well, have to allocate and map a bounce buffer. */
        map = map_single(dev, phys, size, dir, attrs);
        if (map == SWIOTLB_MAP_ERROR)
-               return __phys_to_dma(dev, io_tlb_overflow_buffer);
+               return DIRECT_MAPPING_ERROR;
 
        dev_addr = __phys_to_dma(dev, map);
 
@@ -801,7 +754,7 @@ dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
        attrs |= DMA_ATTR_SKIP_CPU_SYNC;
        swiotlb_tbl_unmap_single(dev, map, size, dir, attrs);
 
-       return __phys_to_dma(dev, io_tlb_overflow_buffer);
+       return DIRECT_MAPPING_ERROR;
 }
 
 /*
@@ -985,12 +938,6 @@ swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
        swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_DEVICE);
 }
 
-int
-swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr)
-{
-       return (dma_addr == __phys_to_dma(hwdev, io_tlb_overflow_buffer));
-}
-
 /*
  * Return whether the given device DMA address mask can be supported
  * properly.  For example, if your device can only drive the low 24-bits
@@ -1033,7 +980,7 @@ void swiotlb_free(struct device *dev, size_t size, void *vaddr,
 }
 
 const struct dma_map_ops swiotlb_dma_ops = {
-       .mapping_error          = swiotlb_dma_mapping_error,
+       .mapping_error          = dma_direct_mapping_error,
        .alloc                  = swiotlb_alloc,
        .free                   = swiotlb_free,
        .sync_single_for_cpu    = swiotlb_sync_single_for_cpu,