Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/ebiederm...
[linux-block.git] / mm / mmap.c
index 99f97d200aa4da93e0f394468222650b1d46765f..bc88d16743641263c02129afadc8cef31671b618 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -93,6 +93,12 @@ static void unmap_region(struct mm_struct *mm,
  * MAP_PRIVATE r: (no) no      r: (yes) yes    r: (no) yes     r: (no) yes
  *             w: (no) no      w: (no) no      w: (copy) copy  w: (no) no
  *             x: (no) no      x: (no) yes     x: (no) yes     x: (yes) yes
+ *
+ * On arm64, PROT_EXEC has the following behaviour for both MAP_SHARED and
+ * MAP_PRIVATE (with Enhanced PAN supported):
+ *                                                             r: (no) no
+ *                                                             w: (no) no
+ *                                                             x: (yes) yes
  */
 pgprot_t protection_map[16] __ro_after_init = {
        __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
@@ -606,7 +612,7 @@ static unsigned long count_vma_pages_range(struct mm_struct *mm,
        unsigned long nr_pages = 0;
        struct vm_area_struct *vma;
 
-       /* Find first overlaping mapping */
+       /* Find first overlapping mapping */
        vma = find_vma_intersection(mm, addr, end);
        if (!vma)
                return 0;
@@ -2869,7 +2875,7 @@ int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len,
        if (unlikely(uf)) {
                /*
                 * If userfaultfd_unmap_prep returns an error the vmas
-                * will remain splitted, but userland will get a
+                * will remain split, but userland will get a
                 * highly unexpected error anyway. This is no
                 * different than the case where the first of the two
                 * __split_vma fails, but we don't undo the first
@@ -3023,25 +3029,9 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
 
        flags &= MAP_NONBLOCK;
        flags |= MAP_SHARED | MAP_FIXED | MAP_POPULATE;
-       if (vma->vm_flags & VM_LOCKED) {
-               struct vm_area_struct *tmp;
+       if (vma->vm_flags & VM_LOCKED)
                flags |= MAP_LOCKED;
 
-               /* drop PG_Mlocked flag for over-mapped range */
-               for (tmp = vma; tmp->vm_start >= start + size;
-                               tmp = tmp->vm_next) {
-                       /*
-                        * Split pmd and munlock page on the border
-                        * of the range.
-                        */
-                       vma_adjust_trans_huge(tmp, start, start + size, 0);
-
-                       munlock_vma_pages_range(tmp,
-                                       max(tmp->vm_start, start),
-                                       min(tmp->vm_end, start + size));
-               }
-       }
-
        file = get_file(vma->vm_file);
        ret = do_mmap(vma->vm_file, start, size,
                        prot, flags, pgoff, &populate, NULL);
@@ -3403,14 +3393,10 @@ static const char *special_mapping_name(struct vm_area_struct *vma)
        return ((struct vm_special_mapping *)vma->vm_private_data)->name;
 }
 
-static int special_mapping_mremap(struct vm_area_struct *new_vma,
-                                 unsigned long flags)
+static int special_mapping_mremap(struct vm_area_struct *new_vma)
 {
        struct vm_special_mapping *sm = new_vma->vm_private_data;
 
-       if (flags & MREMAP_DONTUNMAP)
-               return -EINVAL;
-
        if (WARN_ON_ONCE(current->mm != new_vma->vm_mm))
                return -EFAULT;