vfio iommu type1: Update argument of vaddr_get_pfn()
authorKirti Wankhede <kwankhede@nvidia.com>
Wed, 16 Nov 2016 20:46:19 +0000 (02:16 +0530)
committerAlex Williamson <alex.williamson@redhat.com>
Thu, 17 Nov 2016 15:25:03 +0000 (08:25 -0700)
Update arguments of vaddr_get_pfn() to take struct mm_struct *mm as input
argument.

Signed-off-by: Kirti Wankhede <kwankhede@nvidia.com>
Signed-off-by: Neo Jia <cjia@nvidia.com>
Signed-off-by: Alex Williamson <alex.williamson@redhat.com>
drivers/vfio/vfio_iommu_type1.c

index 34d17e51dc977814d71c4d43a376e76de4073c00..52af5fc01d91941e8d5abb1b270f1898f2d350b4 100644 (file)
@@ -230,20 +230,36 @@ static int put_pfn(unsigned long pfn, int prot)
        return 0;
 }
 
-static int vaddr_get_pfn(unsigned long vaddr, int prot, unsigned long *pfn)
+static int vaddr_get_pfn(struct mm_struct *mm, unsigned long vaddr,
+                        int prot, unsigned long *pfn)
 {
        struct page *page[1];
        struct vm_area_struct *vma;
-       int ret = -EFAULT;
+       int ret;
+
+       if (mm == current->mm) {
+               ret = get_user_pages_fast(vaddr, 1, !!(prot & IOMMU_WRITE),
+                                         page);
+       } else {
+               unsigned int flags = 0;
+
+               if (prot & IOMMU_WRITE)
+                       flags |= FOLL_WRITE;
+
+               down_read(&mm->mmap_sem);
+               ret = get_user_pages_remote(NULL, mm, vaddr, 1, flags, page,
+                                           NULL);
+               up_read(&mm->mmap_sem);
+       }
 
-       if (get_user_pages_fast(vaddr, 1, !!(prot & IOMMU_WRITE), page) == 1) {
+       if (ret == 1) {
                *pfn = page_to_pfn(page[0]);
                return 0;
        }
 
-       down_read(&current->mm->mmap_sem);
+       down_read(&mm->mmap_sem);
 
-       vma = find_vma_intersection(current->mm, vaddr, vaddr + 1);
+       vma = find_vma_intersection(mm, vaddr, vaddr + 1);
 
        if (vma && vma->vm_flags & VM_PFNMAP) {
                *pfn = ((vaddr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
@@ -251,7 +267,7 @@ static int vaddr_get_pfn(unsigned long vaddr, int prot, unsigned long *pfn)
                        ret = 0;
        }
 
-       up_read(&current->mm->mmap_sem);
+       up_read(&mm->mmap_sem);
 
        return ret;
 }
@@ -272,7 +288,7 @@ static long vfio_pin_pages_remote(unsigned long vaddr, long npage,
        if (!current->mm)
                return -ENODEV;
 
-       ret = vaddr_get_pfn(vaddr, prot, pfn_base);
+       ret = vaddr_get_pfn(current->mm, vaddr, prot, pfn_base);
        if (ret)
                return ret;
 
@@ -295,7 +311,7 @@ static long vfio_pin_pages_remote(unsigned long vaddr, long npage,
        for (i = 1, vaddr += PAGE_SIZE; i < npage; i++, vaddr += PAGE_SIZE) {
                unsigned long pfn = 0;
 
-               ret = vaddr_get_pfn(vaddr, prot, &pfn);
+               ret = vaddr_get_pfn(current->mm, vaddr, prot, &pfn);
                if (ret)
                        break;