iommu/vt-d: Use cache_tag_flush_range() in cache_invalidate_user
authorLu Baolu <baolu.lu@linux.intel.com>
Wed, 24 Apr 2024 07:16:40 +0000 (15:16 +0800)
committerJoerg Roedel <jroedel@suse.de>
Fri, 26 Apr 2024 09:57:46 +0000 (11:57 +0200)
The cache_invalidate_user callback is called to invalidate a range
of caches for the affected user domain. Use cache_tag_flush_range()
in this callback.

Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com>
Reviewed-by: Kevin Tian <kevin.tian@intel.com>
Link: https://lore.kernel.org/r/20240416080656.60968-9-baolu.lu@linux.intel.com
Signed-off-by: Joerg Roedel <jroedel@suse.de>
drivers/iommu/intel/iommu.h
drivers/iommu/intel/nested.c

index cb83b0995391d7dcc20859734a472d797e60fd8e..1d705a983dd7ba5c879e904b27c2478a70fa2435 100644 (file)
@@ -1049,6 +1049,12 @@ static inline unsigned long aligned_nrpages(unsigned long host_addr, size_t size
        return PAGE_ALIGN(host_addr + size) >> VTD_PAGE_SHIFT;
 }
 
+/* Return a size from number of VTD pages. */
+static inline unsigned long nrpages_to_size(unsigned long npages)
+{
+       return npages << VTD_PAGE_SHIFT;
+}
+
 /* Convert value to context PASID directory size field coding. */
 #define context_pdts(pds)      (((pds) & 0x7) << 9)
 
index 13406ee742bfa741c8198096b60aef3110a28384..16a2bcf5cfeb9d2ea745e0aaf09bbc4df5cf6449 100644 (file)
@@ -88,50 +88,6 @@ static void intel_nested_domain_free(struct iommu_domain *domain)
        kfree(dmar_domain);
 }
 
-static void nested_flush_dev_iotlb(struct dmar_domain *domain, u64 addr,
-                                  unsigned int mask)
-{
-       struct device_domain_info *info;
-       unsigned long flags;
-       u16 sid, qdep;
-
-       spin_lock_irqsave(&domain->lock, flags);
-       list_for_each_entry(info, &domain->devices, link) {
-               if (!info->ats_enabled)
-                       continue;
-               sid = info->bus << 8 | info->devfn;
-               qdep = info->ats_qdep;
-               qi_flush_dev_iotlb(info->iommu, sid, info->pfsid,
-                                  qdep, addr, mask);
-               quirk_extra_dev_tlb_flush(info, addr, mask,
-                                         IOMMU_NO_PASID, qdep);
-       }
-       spin_unlock_irqrestore(&domain->lock, flags);
-}
-
-static void intel_nested_flush_cache(struct dmar_domain *domain, u64 addr,
-                                    u64 npages, bool ih)
-{
-       struct iommu_domain_info *info;
-       unsigned int mask;
-       unsigned long i;
-
-       xa_for_each(&domain->iommu_array, i, info)
-               qi_flush_piotlb(info->iommu,
-                               domain_id_iommu(domain, info->iommu),
-                               IOMMU_NO_PASID, addr, npages, ih);
-
-       if (!domain->has_iotlb_device)
-               return;
-
-       if (npages == U64_MAX)
-               mask = 64 - VTD_PAGE_SHIFT;
-       else
-               mask = ilog2(__roundup_pow_of_two(npages));
-
-       nested_flush_dev_iotlb(domain, addr, mask);
-}
-
 static int intel_nested_cache_invalidate_user(struct iommu_domain *domain,
                                              struct iommu_user_data_array *array)
 {
@@ -164,9 +120,9 @@ static int intel_nested_cache_invalidate_user(struct iommu_domain *domain,
                        break;
                }
 
-               intel_nested_flush_cache(dmar_domain, inv_entry.addr,
-                                        inv_entry.npages,
-                                        inv_entry.flags & IOMMU_VTD_INV_FLAGS_LEAF);
+               cache_tag_flush_range(dmar_domain, inv_entry.addr,
+                                     inv_entry.addr + nrpages_to_size(inv_entry.npages) - 1,
+                                     inv_entry.flags & IOMMU_VTD_INV_FLAGS_LEAF);
                processed++;
        }