vfio iommu: Added pin and unpin callback functions to vfio_iommu_driver_ops
authorKirti Wankhede <kwankhede@nvidia.com>
Wed, 16 Nov 2016 20:46:17 +0000 (02:16 +0530)
committerAlex Williamson <alex.williamson@redhat.com>
Thu, 17 Nov 2016 15:24:58 +0000 (08:24 -0700)
Added APIs for pining and unpining set of pages. These call back into
backend iommu module to actually pin and unpin pages.
Added two new callback functions to struct vfio_iommu_driver_ops. Backend
IOMMU module that supports pining and unpinning pages for mdev devices
should provide these functions.

Renamed static functions in vfio_type1_iommu.c to resolve conflicts

Signed-off-by: Kirti Wankhede <kwankhede@nvidia.com>
Signed-off-by: Neo Jia <cjia@nvidia.com>
Reviewed-by: Dong Jia Shi <bjsdjshi@linux.vnet.ibm.com>
Signed-off-by: Alex Williamson <alex.williamson@redhat.com>
drivers/vfio/vfio.c
drivers/vfio/vfio_iommu_type1.c
include/linux/vfio.h

index 2e83bdf007fea3ec6fb500acba339c1572e0f982..bd36c16b0ef241fe851a4fd56246749482287ace 100644 (file)
@@ -1799,6 +1799,108 @@ void vfio_info_cap_shift(struct vfio_info_cap *caps, size_t offset)
 }
 EXPORT_SYMBOL_GPL(vfio_info_cap_shift);
 
+
+/*
+ * Pin a set of guest PFNs and return their associated host PFNs for local
+ * domain only.
+ * @dev [in]     : device
+ * @user_pfn [in]: array of user/guest PFNs to be unpinned.
+ * @npage [in]   : count of elements in user_pfn array.  This count should not
+ *                be greater VFIO_PIN_PAGES_MAX_ENTRIES.
+ * @prot [in]    : protection flags
+ * @phys_pfn[out]: array of host PFNs
+ * Return error or number of pages pinned.
+ */
+int vfio_pin_pages(struct device *dev, unsigned long *user_pfn, int npage,
+                  int prot, unsigned long *phys_pfn)
+{
+       struct vfio_container *container;
+       struct vfio_group *group;
+       struct vfio_iommu_driver *driver;
+       int ret;
+
+       if (!dev || !user_pfn || !phys_pfn || !npage)
+               return -EINVAL;
+
+       if (npage > VFIO_PIN_PAGES_MAX_ENTRIES)
+               return -E2BIG;
+
+       group = vfio_group_get_from_dev(dev);
+       if (IS_ERR(group))
+               return PTR_ERR(group);
+
+       ret = vfio_group_add_container_user(group);
+       if (ret)
+               goto err_pin_pages;
+
+       container = group->container;
+       down_read(&container->group_lock);
+
+       driver = container->iommu_driver;
+       if (likely(driver && driver->ops->pin_pages))
+               ret = driver->ops->pin_pages(container->iommu_data, user_pfn,
+                                            npage, prot, phys_pfn);
+       else
+               ret = -ENOTTY;
+
+       up_read(&container->group_lock);
+       vfio_group_try_dissolve_container(group);
+
+err_pin_pages:
+       vfio_group_put(group);
+       return ret;
+}
+EXPORT_SYMBOL(vfio_pin_pages);
+
+/*
+ * Unpin set of host PFNs for local domain only.
+ * @dev [in]     : device
+ * @user_pfn [in]: array of user/guest PFNs to be unpinned. Number of user/guest
+ *                PFNs should not be greater than VFIO_PIN_PAGES_MAX_ENTRIES.
+ * @npage [in]   : count of elements in user_pfn array.  This count should not
+ *                 be greater than VFIO_PIN_PAGES_MAX_ENTRIES.
+ * Return error or number of pages unpinned.
+ */
+int vfio_unpin_pages(struct device *dev, unsigned long *user_pfn, int npage)
+{
+       struct vfio_container *container;
+       struct vfio_group *group;
+       struct vfio_iommu_driver *driver;
+       int ret;
+
+       if (!dev || !user_pfn || !npage)
+               return -EINVAL;
+
+       if (npage > VFIO_PIN_PAGES_MAX_ENTRIES)
+               return -E2BIG;
+
+       group = vfio_group_get_from_dev(dev);
+       if (IS_ERR(group))
+               return PTR_ERR(group);
+
+       ret = vfio_group_add_container_user(group);
+       if (ret)
+               goto err_unpin_pages;
+
+       container = group->container;
+       down_read(&container->group_lock);
+
+       driver = container->iommu_driver;
+       if (likely(driver && driver->ops->unpin_pages))
+               ret = driver->ops->unpin_pages(container->iommu_data, user_pfn,
+                                              npage);
+       else
+               ret = -ENOTTY;
+
+       up_read(&container->group_lock);
+       vfio_group_try_dissolve_container(group);
+
+err_unpin_pages:
+       vfio_group_put(group);
+       return ret;
+}
+EXPORT_SYMBOL(vfio_unpin_pages);
+
 /**
  * Module/class support
  */
index 2ba19424e4a18f33555f370247ffc28204212089..9f3d58d3dfaf1fce5157adf320eef5a14dfb119d 100644 (file)
@@ -259,8 +259,8 @@ static int vaddr_get_pfn(unsigned long vaddr, int prot, unsigned long *pfn)
  * the iommu can only map chunks of consecutive pfns anyway, so get the
  * first page and all consecutive pages with the same locking.
  */
-static long vfio_pin_pages(unsigned long vaddr, long npage,
-                          int prot, unsigned long *pfn_base)
+static long vfio_pin_pages_remote(unsigned long vaddr, long npage,
+                                 int prot, unsigned long *pfn_base)
 {
        unsigned long limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
        bool lock_cap = capable(CAP_IPC_LOCK);
@@ -318,8 +318,8 @@ static long vfio_pin_pages(unsigned long vaddr, long npage,
        return i;
 }
 
-static long vfio_unpin_pages(unsigned long pfn, long npage,
-                            int prot, bool do_accounting)
+static long vfio_unpin_pages_remote(unsigned long pfn, long npage,
+                                   int prot, bool do_accounting)
 {
        unsigned long unlocked = 0;
        long i;
@@ -382,9 +382,9 @@ static void vfio_unmap_unpin(struct vfio_iommu *iommu, struct vfio_dma *dma)
                if (WARN_ON(!unmapped))
                        break;
 
-               unlocked += vfio_unpin_pages(phys >> PAGE_SHIFT,
-                                            unmapped >> PAGE_SHIFT,
-                                            dma->prot, false);
+               unlocked += vfio_unpin_pages_remote(phys >> PAGE_SHIFT,
+                                                   unmapped >> PAGE_SHIFT,
+                                                   dma->prot, false);
                iova += unmapped;
 
                cond_resched();
@@ -613,8 +613,8 @@ static int vfio_dma_do_map(struct vfio_iommu *iommu,
 
        while (size) {
                /* Pin a contiguous chunk of memory */
-               npage = vfio_pin_pages(vaddr + dma->size,
-                                      size >> PAGE_SHIFT, prot, &pfn);
+               npage = vfio_pin_pages_remote(vaddr + dma->size,
+                                             size >> PAGE_SHIFT, prot, &pfn);
                if (npage <= 0) {
                        WARN_ON(!npage);
                        ret = (int)npage;
@@ -624,7 +624,7 @@ static int vfio_dma_do_map(struct vfio_iommu *iommu,
                /* Map it! */
                ret = vfio_iommu_map(iommu, iova + dma->size, pfn, npage, prot);
                if (ret) {
-                       vfio_unpin_pages(pfn, npage, prot, true);
+                       vfio_unpin_pages_remote(pfn, npage, prot, true);
                        break;
                }
 
index 0ecae0b1cd3418345c736b185e7afa42771e201c..3c862a030029bc6b3aa4ee413af2a6b872d67183 100644 (file)
@@ -75,7 +75,11 @@ struct vfio_iommu_driver_ops {
                                        struct iommu_group *group);
        void            (*detach_group)(void *iommu_data,
                                        struct iommu_group *group);
-
+       int             (*pin_pages)(void *iommu_data, unsigned long *user_pfn,
+                                    int npage, int prot,
+                                    unsigned long *phys_pfn);
+       int             (*unpin_pages)(void *iommu_data,
+                                      unsigned long *user_pfn, int npage);
 };
 
 extern int vfio_register_iommu_driver(const struct vfio_iommu_driver_ops *ops);
@@ -92,6 +96,13 @@ extern int vfio_external_user_iommu_id(struct vfio_group *group);
 extern long vfio_external_check_extension(struct vfio_group *group,
                                          unsigned long arg);
 
+#define VFIO_PIN_PAGES_MAX_ENTRIES     (PAGE_SIZE/sizeof(unsigned long))
+
+extern int vfio_pin_pages(struct device *dev, unsigned long *user_pfn,
+                         int npage, int prot, unsigned long *phys_pfn);
+extern int vfio_unpin_pages(struct device *dev, unsigned long *user_pfn,
+                           int npage);
+
 /*
  * Sub-module helpers
  */