vfio/spapr_tce: Invalidate multiple TCEs at once
authorAlexey Kardashevskiy <aik@ozlabs.ru>
Thu, 29 Aug 2019 08:52:50 +0000 (18:52 +1000)
committerMichael Ellerman <mpe@ellerman.id.au>
Thu, 29 Aug 2019 23:40:14 +0000 (09:40 +1000)
Invalidating a TCE cache entry for each updated TCE is quite expensive.
This makes use of the new iommu_table_ops::xchg_no_kill()/tce_kill()
callbacks to bring down the time spent in mapping a huge guest DMA window.

Signed-off-by: Alexey Kardashevskiy <aik@ozlabs.ru>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20190829085252.72370-4-aik@ozlabs.ru
drivers/vfio/vfio_iommu_spapr_tce.c

index 8ce9ad21129f08d2e49820d76627684bd54e743e..9809369e0ed36138b9631a26b49541b31e172bc5 100644 (file)
@@ -435,7 +435,7 @@ static int tce_iommu_clear(struct tce_container *container,
        unsigned long oldhpa;
        long ret;
        enum dma_data_direction direction;
-       unsigned long lastentry = entry + pages;
+       unsigned long lastentry = entry + pages, firstentry = entry;
 
        for ( ; entry < lastentry; ++entry) {
                if (tbl->it_indirect_levels && tbl->it_userspace) {
@@ -460,7 +460,7 @@ static int tce_iommu_clear(struct tce_container *container,
 
                direction = DMA_NONE;
                oldhpa = 0;
-               ret = iommu_tce_xchg(container->mm, tbl, entry, &oldhpa,
+               ret = iommu_tce_xchg_no_kill(container->mm, tbl, entry, &oldhpa,
                                &direction);
                if (ret)
                        continue;
@@ -476,6 +476,8 @@ static int tce_iommu_clear(struct tce_container *container,
                tce_iommu_unuse_page(container, oldhpa);
        }
 
+       iommu_tce_kill(tbl, firstentry, pages);
+
        return 0;
 }
 
@@ -518,8 +520,8 @@ static long tce_iommu_build(struct tce_container *container,
 
                hpa |= offset;
                dirtmp = direction;
-               ret = iommu_tce_xchg(container->mm, tbl, entry + i, &hpa,
-                               &dirtmp);
+               ret = iommu_tce_xchg_no_kill(container->mm, tbl, entry + i,
+                               &hpa, &dirtmp);
                if (ret) {
                        tce_iommu_unuse_page(container, hpa);
                        pr_err("iommu_tce: %s failed ioba=%lx, tce=%lx, ret=%ld\n",
@@ -536,6 +538,8 @@ static long tce_iommu_build(struct tce_container *container,
 
        if (ret)
                tce_iommu_clear(container, tbl, entry, i);
+       else
+               iommu_tce_kill(tbl, entry, pages);
 
        return ret;
 }
@@ -572,8 +576,8 @@ static long tce_iommu_build_v2(struct tce_container *container,
                if (mm_iommu_mapped_inc(mem))
                        break;
 
-               ret = iommu_tce_xchg(container->mm, tbl, entry + i, &hpa,
-                               &dirtmp);
+               ret = iommu_tce_xchg_no_kill(container->mm, tbl, entry + i,
+                               &hpa, &dirtmp);
                if (ret) {
                        /* dirtmp cannot be DMA_NONE here */
                        tce_iommu_unuse_page_v2(container, tbl, entry + i);
@@ -593,6 +597,8 @@ static long tce_iommu_build_v2(struct tce_container *container,
 
        if (ret)
                tce_iommu_clear(container, tbl, entry, i);
+       else
+               iommu_tce_kill(tbl, entry, pages);
 
        return ret;
 }