media: ipu6: optimize the IPU6 MMU unmapping flow
authorBingbu Cao <bingbu.cao@intel.com>
Tue, 5 Nov 2024 02:45:07 +0000 (10:45 +0800)
committerHans Verkuil <hverkuil@xs4all.nl>
Thu, 7 Nov 2024 08:05:58 +0000 (09:05 +0100)
The MMU mapping flow is optimized for improve the performance, the
unmapping flow could also be optimized to follow same flow.

Signed-off-by: Bingbu Cao <bingbu.cao@intel.com>
Signed-off-by: Sakari Ailus <sakari.ailus@linux.intel.com>
Signed-off-by: Hans Verkuil <hverkuil@xs4all.nl>
drivers/media/pci/intel/ipu6/ipu6-mmu.c

index 70e68c1414ca01a3fa09ba61fa655dc72691d1a9..9ad04a92ce84ba00d519bfc077e06ed5671b6387 100644 (file)
@@ -257,44 +257,51 @@ static u32 *alloc_l2_pt(struct ipu6_mmu_info *mmu_info)
 static size_t l2_unmap(struct ipu6_mmu_info *mmu_info, unsigned long iova,
                       phys_addr_t dummy, size_t size)
 {
-       u32 l1_idx = iova >> ISP_L1PT_SHIFT;
-       u32 iova_start = iova;
+       unsigned int l2_entries;
        unsigned int l2_idx;
-       size_t unmapped = 0;
        unsigned long flags;
+       u32 l1_idx;
        u32 *l2_pt;
 
-       dev_dbg(mmu_info->dev, "unmapping l2 page table for l1 index %u (iova 0x%8.8lx)\n",
-               l1_idx, iova);
-
        spin_lock_irqsave(&mmu_info->lock, flags);
-       if (mmu_info->l1_pt[l1_idx] == mmu_info->dummy_l2_pteval) {
-               spin_unlock_irqrestore(&mmu_info->lock, flags);
-               dev_err(mmu_info->dev,
-                       "unmap iova 0x%8.8lx l1 idx %u which was not mapped\n",
-                       iova, l1_idx);
-               return 0;
-       }
-
-       for (l2_idx = (iova_start & ISP_L2PT_MASK) >> ISP_L2PT_SHIFT;
-            (iova_start & ISP_L1PT_MASK) + (l2_idx << ISP_PAGE_SHIFT)
-                    < iova_start + size && l2_idx < ISP_L2PT_PTES; l2_idx++) {
-               phys_addr_t pteval;
+       for (l1_idx = iova >> ISP_L1PT_SHIFT;
+            size > 0 && l1_idx < ISP_L1PT_PTES; l1_idx++) {
+               dev_dbg(mmu_info->dev,
+                       "unmapping l2 pgtable (l1 index %u (iova 0x%8.8lx))\n",
+                       l1_idx, iova);
 
+               if (mmu_info->l1_pt[l1_idx] == mmu_info->dummy_l2_pteval) {
+                       dev_err(mmu_info->dev,
+                               "unmap not mapped iova 0x%8.8lx l1 index %u\n",
+                               iova, l1_idx);
+                       continue;
+               }
                l2_pt = mmu_info->l2_pts[l1_idx];
-               pteval = TBL_PHYS_ADDR(l2_pt[l2_idx]);
-               dev_dbg(mmu_info->dev,
-                       "unmap l2 index %u with pteval 0x%p\n",
-                       l2_idx, &pteval);
-               l2_pt[l2_idx] = mmu_info->dummy_page_pteval;
 
-               clflush_cache_range((void *)&l2_pt[l2_idx],
-                                   sizeof(l2_pt[l2_idx]));
-               unmapped++;
+               l2_entries = 0;
+               for (l2_idx = (iova & ISP_L2PT_MASK) >> ISP_L2PT_SHIFT;
+                    size > 0 && l2_idx < ISP_L2PT_PTES; l2_idx++) {
+                       phys_addr_t pteval = TBL_PHYS_ADDR(l2_pt[l2_idx]);
+
+                       dev_dbg(mmu_info->dev,
+                               "unmap l2 index %u with pteval 0x%p\n",
+                               l2_idx, &pteval);
+                       l2_pt[l2_idx] = mmu_info->dummy_page_pteval;
+
+                       iova += ISP_PAGE_SIZE;
+                       size -= ISP_PAGE_SIZE;
+
+                       l2_entries++;
+               }
+
+               WARN_ON_ONCE(!l2_entries);
+               clflush_cache_range(&l2_pt[l2_idx - l2_entries],
+                                   sizeof(l2_pt[0]) * l2_entries);
        }
-       spin_unlock_irqrestore(&mmu_info->lock, flags);
 
-       return unmapped << ISP_PAGE_SHIFT;
+       WARN_ON_ONCE(size);
+       spin_unlock_irqrestore(&mmu_info->lock, flags);
+       return l2_entries << ISP_PAGE_SHIFT;
 }
 
 static int l2_map(struct ipu6_mmu_info *mmu_info, unsigned long iova,
@@ -643,40 +650,13 @@ phys_addr_t ipu6_mmu_iova_to_phys(struct ipu6_mmu_info *mmu_info,
        return phy_addr;
 }
 
-static size_t ipu6_mmu_pgsize(unsigned long pgsize_bitmap,
-                             unsigned long addr_merge, size_t size)
-{
-       unsigned int pgsize_idx;
-       size_t pgsize;
-
-       /* Max page size that still fits into 'size' */
-       pgsize_idx = __fls(size);
-
-       if (likely(addr_merge)) {
-               /* Max page size allowed by address */
-               unsigned int align_pgsize_idx = __ffs(addr_merge);
-
-               pgsize_idx = min(pgsize_idx, align_pgsize_idx);
-       }
-
-       pgsize = (1UL << (pgsize_idx + 1)) - 1;
-       pgsize &= pgsize_bitmap;
-
-       WARN_ON(!pgsize);
-
-       /* pick the biggest page */
-       pgsize_idx = __fls(pgsize);
-       pgsize = 1UL << pgsize_idx;
-
-       return pgsize;
-}
-
 size_t ipu6_mmu_unmap(struct ipu6_mmu_info *mmu_info, unsigned long iova,
                      size_t size)
 {
-       size_t unmapped_page, unmapped = 0;
        unsigned int min_pagesz;
 
+       dev_dbg(mmu_info->dev, "unmapping iova 0x%lx size 0x%zx\n", iova, size);
+
        /* find out the minimum page size supported */
        min_pagesz = 1 << __ffs(mmu_info->pgsize_bitmap);
 
@@ -688,29 +668,10 @@ size_t ipu6_mmu_unmap(struct ipu6_mmu_info *mmu_info, unsigned long iova,
        if (!IS_ALIGNED(iova | size, min_pagesz)) {
                dev_err(NULL, "unaligned: iova 0x%lx size 0x%zx min_pagesz 0x%x\n",
                        iova, size, min_pagesz);
-               return -EINVAL;
-       }
-
-       /*
-        * Keep iterating until we either unmap 'size' bytes (or more)
-        * or we hit an area that isn't mapped.
-        */
-       while (unmapped < size) {
-               size_t pgsize = ipu6_mmu_pgsize(mmu_info->pgsize_bitmap,
-                                               iova, size - unmapped);
-
-               unmapped_page = __ipu6_mmu_unmap(mmu_info, iova, pgsize);
-               if (!unmapped_page)
-                       break;
-
-               dev_dbg(mmu_info->dev, "unmapped: iova 0x%lx size 0x%zx\n",
-                       iova, unmapped_page);
-
-               iova += unmapped_page;
-               unmapped += unmapped_page;
+               return 0;
        }
 
-       return unmapped;
+       return __ipu6_mmu_unmap(mmu_info, iova, size);
 }
 
 int ipu6_mmu_map(struct ipu6_mmu_info *mmu_info, unsigned long iova,