media: ipu6: move the l2_unmap() up before l2_map()
authorBingbu Cao <bingbu.cao@intel.com>
Tue, 5 Nov 2024 02:45:05 +0000 (10:45 +0800)
committerHans Verkuil <hverkuil@xs4all.nl>
Thu, 7 Nov 2024 08:05:58 +0000 (09:05 +0100)
l2_map() and l2_unmap() are better to be grouped together.
l2_unmap() will soon be called from l2_map() for mapping
optimization.

Signed-off-by: Bingbu Cao <bingbu.cao@intel.com>
Signed-off-by: Jianhui Dai <jianhui.j.dai@intel.com>
[Sakari Ailus: Rebase on debug print fixes on 32-bit.]
Signed-off-by: Sakari Ailus <sakari.ailus@linux.intel.com>
Signed-off-by: Hans Verkuil <hverkuil@xs4all.nl>
drivers/media/pci/intel/ipu6/ipu6-mmu.c

index 11d69a64ad6764db2238326c46c9cf78e368e529..a87f53be5d572d0ba56a692832778969dfb73d88 100644 (file)
@@ -254,6 +254,49 @@ static u32 *alloc_l2_pt(struct ipu6_mmu_info *mmu_info)
        return pt;
 }
 
+static size_t l2_unmap(struct ipu6_mmu_info *mmu_info, unsigned long iova,
+                      phys_addr_t dummy, size_t size)
+{
+       u32 l1_idx = iova >> ISP_L1PT_SHIFT;
+       u32 iova_start = iova;
+       unsigned int l2_idx;
+       size_t unmapped = 0;
+       unsigned long flags;
+       u32 *l2_pt;
+
+       dev_dbg(mmu_info->dev, "unmapping l2 page table for l1 index %u (iova 0x%8.8lx)\n",
+               l1_idx, iova);
+
+       spin_lock_irqsave(&mmu_info->lock, flags);
+       if (mmu_info->l1_pt[l1_idx] == mmu_info->dummy_l2_pteval) {
+               spin_unlock_irqrestore(&mmu_info->lock, flags);
+               dev_err(mmu_info->dev,
+                       "unmap iova 0x%8.8lx l1 idx %u which was not mapped\n",
+                       iova, l1_idx);
+               return 0;
+       }
+
+       for (l2_idx = (iova_start & ISP_L2PT_MASK) >> ISP_L2PT_SHIFT;
+            (iova_start & ISP_L1PT_MASK) + (l2_idx << ISP_PAGE_SHIFT)
+                    < iova_start + size && l2_idx < ISP_L2PT_PTES; l2_idx++) {
+               phys_addr_t pteval;
+
+               l2_pt = mmu_info->l2_pts[l1_idx];
+               pteval = TBL_PHYS_ADDR(l2_pt[l2_idx]);
+               dev_dbg(mmu_info->dev,
+                       "unmap l2 index %u with pteval 0x%p\n",
+                       l2_idx, &pteval);
+               l2_pt[l2_idx] = mmu_info->dummy_page_pteval;
+
+               clflush_cache_range((void *)&l2_pt[l2_idx],
+                                   sizeof(l2_pt[l2_idx]));
+               unmapped++;
+       }
+       spin_unlock_irqrestore(&mmu_info->lock, flags);
+
+       return unmapped << ISP_PAGE_SHIFT;
+}
+
 static int l2_map(struct ipu6_mmu_info *mmu_info, unsigned long iova,
                  phys_addr_t paddr, size_t size)
 {
@@ -338,49 +381,6 @@ static int __ipu6_mmu_map(struct ipu6_mmu_info *mmu_info, unsigned long iova,
        return l2_map(mmu_info, iova_start, paddr, size);
 }
 
-static size_t l2_unmap(struct ipu6_mmu_info *mmu_info, unsigned long iova,
-                      phys_addr_t dummy, size_t size)
-{
-       u32 l1_idx = iova >> ISP_L1PT_SHIFT;
-       u32 iova_start = iova;
-       unsigned int l2_idx;
-       size_t unmapped = 0;
-       unsigned long flags;
-       u32 *l2_pt;
-
-       dev_dbg(mmu_info->dev, "unmapping l2 page table for l1 index %u (iova 0x%8.8lx)\n",
-               l1_idx, iova);
-
-       spin_lock_irqsave(&mmu_info->lock, flags);
-       if (mmu_info->l1_pt[l1_idx] == mmu_info->dummy_l2_pteval) {
-               spin_unlock_irqrestore(&mmu_info->lock, flags);
-               dev_err(mmu_info->dev,
-                       "unmap iova 0x%8.8lx l1 idx %u which was not mapped\n",
-                       iova, l1_idx);
-               return 0;
-       }
-
-       for (l2_idx = (iova_start & ISP_L2PT_MASK) >> ISP_L2PT_SHIFT;
-            (iova_start & ISP_L1PT_MASK) + (l2_idx << ISP_PAGE_SHIFT)
-                    < iova_start + size && l2_idx < ISP_L2PT_PTES; l2_idx++) {
-               phys_addr_t pteval;
-
-               l2_pt = mmu_info->l2_pts[l1_idx];
-               pteval = TBL_PHYS_ADDR(l2_pt[l2_idx]);
-               dev_dbg(mmu_info->dev,
-                       "unmap l2 index %u with pteval 0x%p\n",
-                       l2_idx, &pteval);
-               l2_pt[l2_idx] = mmu_info->dummy_page_pteval;
-
-               clflush_cache_range((void *)&l2_pt[l2_idx],
-                                   sizeof(l2_pt[l2_idx]));
-               unmapped++;
-       }
-       spin_unlock_irqrestore(&mmu_info->lock, flags);
-
-       return unmapped << ISP_PAGE_SHIFT;
-}
-
 static size_t __ipu6_mmu_unmap(struct ipu6_mmu_info *mmu_info,
                               unsigned long iova, size_t size)
 {