iommu/amd: Add support for device based TLB invalidation
authorVasant Hegde <vasant.hegde@amd.com>
Mon, 5 Feb 2024 11:56:05 +0000 (11:56 +0000)
committerJoerg Roedel <jroedel@suse.de>
Fri, 9 Feb 2024 12:16:24 +0000 (13:16 +0100)
Add support to invalidate TLB/IOTLB for the given device.

These functions will be used in subsequent patches where we will
introduce per device GCR3 table and SVA support.

Signed-off-by: Vasant Hegde <vasant.hegde@amd.com>
Reviewed-by: Jason Gunthorpe <jgg@nvidia.com>
Link: https://lore.kernel.org/r/20240205115615.6053-8-vasant.hegde@amd.com
Signed-off-by: Joerg Roedel <jroedel@suse.de>
drivers/iommu/amd/amd_iommu.h
drivers/iommu/amd/iommu.c

index 4e1f9a444a1ab0d76ef1b8877563c44da0aaca6f..1f4bbc6bf1e536d5a773f33caea379049c5cd19b 100644 (file)
@@ -55,6 +55,11 @@ void amd_iommu_domain_update(struct protection_domain *domain);
 void amd_iommu_domain_flush_complete(struct protection_domain *domain);
 void amd_iommu_domain_flush_pages(struct protection_domain *domain,
                                  u64 address, size_t size);
+void amd_iommu_dev_flush_pasid_pages(struct iommu_dev_data *dev_data,
+                                    ioasid_t pasid, u64 address, size_t size);
+void amd_iommu_dev_flush_pasid_all(struct iommu_dev_data *dev_data,
+                                  ioasid_t pasid);
+
 int amd_iommu_flush_tlb(struct iommu_domain *dom, u32 pasid);
 int amd_iommu_domain_set_gcr3(struct iommu_domain *dom, u32 pasid,
                              unsigned long cr3);
index 498323930892c54322b0e6f008dc8f8da7376cf2..dc8afcefd3ccb4f824e05a7d39ed5298102f4406 100644 (file)
@@ -1538,6 +1538,29 @@ static void amd_iommu_domain_flush_all(struct protection_domain *domain)
                                     CMD_INV_IOMMU_ALL_PAGES_ADDRESS);
 }
 
+void amd_iommu_dev_flush_pasid_pages(struct iommu_dev_data *dev_data,
+                                    ioasid_t pasid, u64 address, size_t size)
+{
+       struct iommu_cmd cmd;
+       struct amd_iommu *iommu = get_amd_iommu_from_dev(dev_data->dev);
+
+       build_inv_iommu_pages(&cmd, address, size,
+                             dev_data->domain->id, pasid, true);
+       iommu_queue_command(iommu, &cmd);
+
+       if (dev_data->ats_enabled)
+               device_flush_iotlb(dev_data, address, size, pasid, true);
+
+       iommu_completion_wait(iommu);
+}
+
+void amd_iommu_dev_flush_pasid_all(struct iommu_dev_data *dev_data,
+                                  ioasid_t pasid)
+{
+       amd_iommu_dev_flush_pasid_pages(dev_data, 0,
+                                       CMD_INV_IOMMU_ALL_PAGES_ADDRESS, pasid);
+}
+
 void amd_iommu_domain_flush_complete(struct protection_domain *domain)
 {
        int i;