iommu/vt-d: Factor out helpers from domain_context_mapping_one()
authorLu Baolu <baolu.lu@linux.intel.com>
Mon, 2 Sep 2024 02:27:15 +0000 (10:27 +0800)
committerJoerg Roedel <jroedel@suse.de>
Mon, 2 Sep 2024 16:14:57 +0000 (18:14 +0200)
Extract common code from domain_context_mapping_one() into new helpers,
making it reusable by other functions such as the upcoming identity domain
implementation. No intentional functional changes.

Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com>
Reviewed-by: Jason Gunthorpe <jgg@nvidia.com>
Reviewed-by: Kevin Tian <kevin.tian@intel.com>
Reviewed-by: Jerry Snitselaar <jsnitsel@redhat.com>
Link: https://lore.kernel.org/r/20240809055431.36513-6-baolu.lu@linux.intel.com
Signed-off-by: Joerg Roedel <jroedel@suse.de>
drivers/iommu/intel/iommu.c

index aa8e10a2ad51b83e7950b9ca79a05c4aab5ad60b..7950152bb4e6c430f39bf33f388bf0677524f644 100644 (file)
@@ -1597,6 +1597,61 @@ static void domain_exit(struct dmar_domain *domain)
        kfree(domain);
 }
 
+/*
+ * For kdump cases, old valid entries may be cached due to the
+ * in-flight DMA and copied pgtable, but there is no unmapping
+ * behaviour for them, thus we need an explicit cache flush for
+ * the newly-mapped device. For kdump, at this point, the device
+ * is supposed to finish reset at its driver probe stage, so no
+ * in-flight DMA will exist, and we don't need to worry anymore
+ * hereafter.
+ */
+static void copied_context_tear_down(struct intel_iommu *iommu,
+                                    struct context_entry *context,
+                                    u8 bus, u8 devfn)
+{
+       u16 did_old;
+
+       if (!context_copied(iommu, bus, devfn))
+               return;
+
+       assert_spin_locked(&iommu->lock);
+
+       did_old = context_domain_id(context);
+       context_clear_entry(context);
+
+       if (did_old < cap_ndoms(iommu->cap)) {
+               iommu->flush.flush_context(iommu, did_old,
+                                          (((u16)bus) << 8) | devfn,
+                                          DMA_CCMD_MASK_NOBIT,
+                                          DMA_CCMD_DEVICE_INVL);
+               iommu->flush.flush_iotlb(iommu, did_old, 0, 0,
+                                        DMA_TLB_DSI_FLUSH);
+       }
+
+       clear_context_copied(iommu, bus, devfn);
+}
+
+/*
+ * It's a non-present to present mapping. If hardware doesn't cache
+ * non-present entry we only need to flush the write-buffer. If the
+ * _does_ cache non-present entries, then it does so in the special
+ * domain #0, which we have to flush:
+ */
+static void context_present_cache_flush(struct intel_iommu *iommu, u16 did,
+                                       u8 bus, u8 devfn)
+{
+       if (cap_caching_mode(iommu->cap)) {
+               iommu->flush.flush_context(iommu, 0,
+                                          (((u16)bus) << 8) | devfn,
+                                          DMA_CCMD_MASK_NOBIT,
+                                          DMA_CCMD_DEVICE_INVL);
+               iommu->flush.flush_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH);
+       } else {
+               iommu_flush_write_buffer(iommu);
+       }
+}
+
 static int domain_context_mapping_one(struct dmar_domain *domain,
                                      struct intel_iommu *iommu,
                                      u8 bus, u8 devfn)
@@ -1625,31 +1680,9 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
        if (context_present(context) && !context_copied(iommu, bus, devfn))
                goto out_unlock;
 
-       /*
-        * For kdump cases, old valid entries may be cached due to the
-        * in-flight DMA and copied pgtable, but there is no unmapping
-        * behaviour for them, thus we need an explicit cache flush for
-        * the newly-mapped device. For kdump, at this point, the device
-        * is supposed to finish reset at its driver probe stage, so no
-        * in-flight DMA will exist, and we don't need to worry anymore
-        * hereafter.
-        */
-       if (context_copied(iommu, bus, devfn)) {
-               u16 did_old = context_domain_id(context);
-
-               if (did_old < cap_ndoms(iommu->cap)) {
-                       iommu->flush.flush_context(iommu, did_old,
-                                                  (((u16)bus) << 8) | devfn,
-                                                  DMA_CCMD_MASK_NOBIT,
-                                                  DMA_CCMD_DEVICE_INVL);
-                       iommu->flush.flush_iotlb(iommu, did_old, 0, 0,
-                                                DMA_TLB_DSI_FLUSH);
-               }
-
-               clear_context_copied(iommu, bus, devfn);
-       }
-
+       copied_context_tear_down(iommu, context, bus, devfn);
        context_clear_entry(context);
+
        context_set_domain_id(context, did);
 
        if (translation != CONTEXT_TT_PASS_THROUGH) {
@@ -1685,23 +1718,7 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
        context_set_present(context);
        if (!ecap_coherent(iommu->ecap))
                clflush_cache_range(context, sizeof(*context));
-
-       /*
-        * It's a non-present to present mapping. If hardware doesn't cache
-        * non-present entry we only need to flush the write-buffer. If the
-        * _does_ cache non-present entries, then it does so in the special
-        * domain #0, which we have to flush:
-        */
-       if (cap_caching_mode(iommu->cap)) {
-               iommu->flush.flush_context(iommu, 0,
-                                          (((u16)bus) << 8) | devfn,
-                                          DMA_CCMD_MASK_NOBIT,
-                                          DMA_CCMD_DEVICE_INVL);
-               iommu->flush.flush_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH);
-       } else {
-               iommu_flush_write_buffer(iommu);
-       }
-
+       context_present_cache_flush(iommu, did, bus, devfn);
        ret = 0;
 
 out_unlock: