iommu: Remove struct iommu_ops *iommu from arch_setup_dma_ops()
authorJason Gunthorpe <jgg@nvidia.com>
Thu, 7 Dec 2023 18:03:08 +0000 (14:03 -0400)
committerJoerg Roedel <jroedel@suse.de>
Tue, 12 Dec 2023 09:18:45 +0000 (10:18 +0100)
This is not being used to pass ops, it is just a way to tell if an
iommu driver was probed. These days this can be detected directly via
device_iommu_mapped(). Call device_iommu_mapped() in the two places that
need to check it and remove the iommu parameter everywhere.

Reviewed-by: Jerry Snitselaar <jsnitsel@redhat.com>
Reviewed-by: Lu Baolu <baolu.lu@linux.intel.com>
Reviewed-by: Moritz Fischer <mdf@kernel.org>
Acked-by: Christoph Hellwig <hch@lst.de>
Acked-by: Rob Herring <robh@kernel.org>
Tested-by: Hector Martin <marcan@marcan.st>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
Link: https://lore.kernel.org/r/1-v2-16e4def25ebb+820-iommu_fwspec_p1_jgg@nvidia.com
Signed-off-by: Joerg Roedel <jroedel@suse.de>
arch/arc/mm/dma.c
arch/arm/mm/dma-mapping-nommu.c
arch/arm/mm/dma-mapping.c
arch/arm64/mm/dma-mapping.c
arch/mips/mm/dma-noncoherent.c
arch/riscv/mm/dma-noncoherent.c
drivers/acpi/scan.c
drivers/hv/hv_common.c
drivers/of/device.c
include/linux/dma-map-ops.h

index 2a7fbbb83b7056e976fa446995d5bf33ecbf4764..197707bc7658898843d78bee7c0f02a10f7d26b9 100644 (file)
@@ -91,7 +91,7 @@ void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
  * Plug in direct dma map ops.
  */
 void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
-                       const struct iommu_ops *iommu, bool coherent)
+                       bool coherent)
 {
        /*
         * IOC hardware snoops all DMA traffic keeping the caches consistent
index cfd9c933d2f09c6a80d86c9b50959728498cb37a..b94850b579952aefacbd1710bc3c317b4c4b77c9 100644 (file)
@@ -34,7 +34,7 @@ void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
 }
 
 void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
-                       const struct iommu_ops *iommu, bool coherent)
+                       bool coherent)
 {
        if (IS_ENABLED(CONFIG_CPU_V7M)) {
                /*
index 5409225b4abc06b425c1483f53e087318c22a080..6c359a3af8d9c73e35686efe538f1f2754d58a07 100644 (file)
@@ -1713,7 +1713,7 @@ void arm_iommu_detach_device(struct device *dev)
 EXPORT_SYMBOL_GPL(arm_iommu_detach_device);
 
 static void arm_setup_iommu_dma_ops(struct device *dev, u64 dma_base, u64 size,
-                                   const struct iommu_ops *iommu, bool coherent)
+                                   bool coherent)
 {
        struct dma_iommu_mapping *mapping;
 
@@ -1748,7 +1748,7 @@ static void arm_teardown_iommu_dma_ops(struct device *dev)
 #else
 
 static void arm_setup_iommu_dma_ops(struct device *dev, u64 dma_base, u64 size,
-                                   const struct iommu_ops *iommu, bool coherent)
+                                   bool coherent)
 {
 }
 
@@ -1757,7 +1757,7 @@ static void arm_teardown_iommu_dma_ops(struct device *dev) { }
 #endif /* CONFIG_ARM_DMA_USE_IOMMU */
 
 void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
-                       const struct iommu_ops *iommu, bool coherent)
+                       bool coherent)
 {
        /*
         * Due to legacy code that sets the ->dma_coherent flag from a bus
@@ -1776,8 +1776,8 @@ void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
        if (dev->dma_ops)
                return;
 
-       if (iommu)
-               arm_setup_iommu_dma_ops(dev, dma_base, size, iommu, coherent);
+       if (device_iommu_mapped(dev))
+               arm_setup_iommu_dma_ops(dev, dma_base, size, coherent);
 
        xen_setup_dma_ops(dev);
        dev->archdata.dma_ops_setup = true;
index 3cb101e8cb29baca75d3fd25287c9dfe932f7677..61886e43e3a10fe8c84ce6febffc5d65703a2cce 100644 (file)
@@ -47,7 +47,7 @@ void arch_teardown_dma_ops(struct device *dev)
 #endif
 
 void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
-                       const struct iommu_ops *iommu, bool coherent)
+                       bool coherent)
 {
        int cls = cache_line_size_of_cpu();
 
@@ -58,7 +58,7 @@ void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
                   ARCH_DMA_MINALIGN, cls);
 
        dev->dma_coherent = coherent;
-       if (iommu)
+       if (device_iommu_mapped(dev))
                iommu_setup_dma_ops(dev, dma_base, dma_base + size - 1);
 
        xen_setup_dma_ops(dev);
index 3c4fc97b9f394b0c2f2d22c5f59d23c0105cbfb3..0f3cec663a12cd51498157c390f974213cb5a658 100644 (file)
@@ -138,7 +138,7 @@ void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
 
 #ifdef CONFIG_ARCH_HAS_SETUP_DMA_OPS
 void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
-               const struct iommu_ops *iommu, bool coherent)
+               bool coherent)
 {
        dev->dma_coherent = coherent;
 }
index 4e4e469b8dd66cfdf3e24346a514db2d3dd55773..843107f834b231a032c6853b5d58382ed6165a37 100644 (file)
@@ -129,7 +129,7 @@ void arch_dma_prep_coherent(struct page *page, size_t size)
 }
 
 void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
-               const struct iommu_ops *iommu, bool coherent)
+                       bool coherent)
 {
        WARN_TAINT(!coherent && riscv_cbom_block_size > ARCH_DMA_MINALIGN,
                   TAINT_CPU_OUT_OF_SPEC,
index 02bb2cce423f47d6bc1a1c4e6563a6de6369514d..444a0b3c72f2d8ec64f6e1a5e14b1e1d981171b9 100644 (file)
@@ -1641,8 +1641,7 @@ int acpi_dma_configure_id(struct device *dev, enum dev_dma_attr attr,
        if (PTR_ERR(iommu) == -EPROBE_DEFER)
                return -EPROBE_DEFER;
 
-       arch_setup_dma_ops(dev, 0, U64_MAX,
-                               iommu, attr == DEV_DMA_COHERENT);
+       arch_setup_dma_ops(dev, 0, U64_MAX, attr == DEV_DMA_COHERENT);
 
        return 0;
 }
index 4372f5d146ab22edaf948622648df24e741ec2f6..0285a74363b3d11e35b2e29aa86e1861e1900f00 100644 (file)
@@ -488,7 +488,7 @@ void hv_setup_dma_ops(struct device *dev, bool coherent)
         * Hyper-V does not offer a vIOMMU in the guest
         * VM, so pass 0/NULL for the IOMMU settings
         */
-       arch_setup_dma_ops(dev, 0, 0, NULL, coherent);
+       arch_setup_dma_ops(dev, 0, 0, coherent);
 }
 EXPORT_SYMBOL_GPL(hv_setup_dma_ops);
 
index 1ca42ad9dd159d4409b7edb73dd232e9b3dcc7fa..65c71be71a8d45bcbac215d501af22c5e4a338d6 100644 (file)
@@ -193,7 +193,7 @@ int of_dma_configure_id(struct device *dev, struct device_node *np,
        dev_dbg(dev, "device is%sbehind an iommu\n",
                iommu ? " " : " not ");
 
-       arch_setup_dma_ops(dev, dma_start, size, iommu, coherent);
+       arch_setup_dma_ops(dev, dma_start, size, coherent);
 
        if (!iommu)
                of_dma_set_restricted_buffer(dev, np);
index a52e508d1869f660e254cf86e710e33ee990f449..e9cc317e9d7de605f95928035e7f8d17851bf927 100644 (file)
@@ -427,10 +427,10 @@ bool arch_dma_unmap_sg_direct(struct device *dev, struct scatterlist *sg,
 
 #ifdef CONFIG_ARCH_HAS_SETUP_DMA_OPS
 void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
-               const struct iommu_ops *iommu, bool coherent);
+               bool coherent);
 #else
 static inline void arch_setup_dma_ops(struct device *dev, u64 dma_base,
-               u64 size, const struct iommu_ops *iommu, bool coherent)
+               u64 size, bool coherent)
 {
 }
 #endif /* CONFIG_ARCH_HAS_SETUP_DMA_OPS */