mm: inline vm_insert_pfn_prot() into caller
authorMatthew Wilcox <willy@infradead.org>
Fri, 26 Oct 2018 22:04:33 +0000 (15:04 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 26 Oct 2018 23:25:20 +0000 (16:25 -0700)
vm_insert_pfn_prot() is only called from vmf_insert_pfn_prot(), so inline
it and convert some of the errnos into vm_fault codes earlier.

Link: http://lkml.kernel.org/r/20180828145728.11873-9-willy@infradead.org
Signed-off-by: Matthew Wilcox <willy@infradead.org>
Reviewed-by: Andrew Morton <akpm@linux-foundation.org>
Cc: Nicolas Pitre <nicolas.pitre@linaro.org>
Cc: Souptick Joarder <jrdr.linux@gmail.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
mm/memory.c

index 08653d0a795a3610eb7cf68e82f2195e264be7bb..40b692fa4b9946ea19ddd0b98fe19fc3aa08e013 100644 (file)
@@ -1572,36 +1572,6 @@ out:
        return retval;
 }
 
-static int vm_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr,
-                       unsigned long pfn, pgprot_t pgprot)
-{
-       int ret;
-       /*
-        * Technically, architectures with pte_special can avoid all these
-        * restrictions (same for remap_pfn_range).  However we would like
-        * consistency in testing and feature parity among all, so we should
-        * try to keep these invariants in place for everybody.
-        */
-       BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)));
-       BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) ==
-                                               (VM_PFNMAP|VM_MIXEDMAP));
-       BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags));
-       BUG_ON((vma->vm_flags & VM_MIXEDMAP) && pfn_valid(pfn));
-
-       if (addr < vma->vm_start || addr >= vma->vm_end)
-               return -EFAULT;
-
-       if (!pfn_modify_allowed(pfn, pgprot))
-               return -EACCES;
-
-       track_pfn_insert(vma, &pgprot, __pfn_to_pfn_t(pfn, PFN_DEV));
-
-       ret = insert_pfn(vma, addr, __pfn_to_pfn_t(pfn, PFN_DEV), pgprot,
-                       false);
-
-       return ret;
-}
-
 /**
  * vmf_insert_pfn_prot - insert single pfn into user vma with specified pgprot
  * @vma: user vma to map to
@@ -1623,7 +1593,30 @@ static int vm_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr,
 vm_fault_t vmf_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr,
                        unsigned long pfn, pgprot_t pgprot)
 {
-       int err = vm_insert_pfn_prot(vma, addr, pfn, pgprot);
+       int err;
+
+       /*
+        * Technically, architectures with pte_special can avoid all these
+        * restrictions (same for remap_pfn_range).  However we would like
+        * consistency in testing and feature parity among all, so we should
+        * try to keep these invariants in place for everybody.
+        */
+       BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)));
+       BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) ==
+                                               (VM_PFNMAP|VM_MIXEDMAP));
+       BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags));
+       BUG_ON((vma->vm_flags & VM_MIXEDMAP) && pfn_valid(pfn));
+
+       if (addr < vma->vm_start || addr >= vma->vm_end)
+               return VM_FAULT_SIGBUS;
+
+       if (!pfn_modify_allowed(pfn, pgprot))
+               return VM_FAULT_SIGBUS;
+
+       track_pfn_insert(vma, &pgprot, __pfn_to_pfn_t(pfn, PFN_DEV));
+
+       err = insert_pfn(vma, addr, __pfn_to_pfn_t(pfn, PFN_DEV), pgprot,
+                       false);
 
        if (err == -ENOMEM)
                return VM_FAULT_OOM;