binder: Don't modify VMA bounds in ->mmap handler
authorJann Horn <jannh@google.com>
Wed, 16 Oct 2019 15:01:18 +0000 (17:01 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 17 Oct 2019 12:58:44 +0000 (05:58 -0700)
binder_mmap() tries to prevent the creation of overly big binder mappings
by silently truncating the size of the VMA to 4MiB. However, this violates
the API contract of mmap(). If userspace attempts to create a large binder
VMA, and later attempts to unmap that VMA, it will call munmap() on a range
beyond the end of the VMA, which may have been allocated to another VMA in
the meantime. This can lead to userspace memory corruption.

The following sequence of calls leads to a segfault without this commit:

int main(void) {
  int binder_fd = open("/dev/binder", O_RDWR);
  if (binder_fd == -1) err(1, "open binder");
  void *binder_mapping = mmap(NULL, 0x800000UL, PROT_READ, MAP_SHARED,
                              binder_fd, 0);
  if (binder_mapping == MAP_FAILED) err(1, "mmap binder");
  void *data_mapping = mmap(NULL, 0x400000UL, PROT_READ|PROT_WRITE,
                            MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
  if (data_mapping == MAP_FAILED) err(1, "mmap data");
  munmap(binder_mapping, 0x800000UL);
  *(char*)data_mapping = 1;
  return 0;
}

Cc: stable@vger.kernel.org
Signed-off-by: Jann Horn <jannh@google.com>
Acked-by: Todd Kjos <tkjos@google.com>
Acked-by: Christian Brauner <christian.brauner@ubuntu.com>
Link: https://lore.kernel.org/r/20191016150119.154756-1-jannh@google.com
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
drivers/android/binder.c
drivers/android/binder_alloc.c

index 5b9ac2122e894558eea8e67353f981438283a2be..265d9dd46a5ebb6610bf826ed0fa620b55c289a6 100644 (file)
@@ -97,10 +97,6 @@ DEFINE_SHOW_ATTRIBUTE(proc);
 #define SZ_1K                               0x400
 #endif
 
-#ifndef SZ_4M
-#define SZ_4M                               0x400000
-#endif
-
 #define FORBIDDEN_MMAP_FLAGS                (VM_WRITE)
 
 enum {
@@ -5177,9 +5173,6 @@ static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
        if (proc->tsk != current->group_leader)
                return -EINVAL;
 
-       if ((vma->vm_end - vma->vm_start) > SZ_4M)
-               vma->vm_end = vma->vm_start + SZ_4M;
-
        binder_debug(BINDER_DEBUG_OPEN_CLOSE,
                     "%s: %d %lx-%lx (%ld K) vma %lx pagep %lx\n",
                     __func__, proc->pid, vma->vm_start, vma->vm_end,
index d42a8b2f636a5bbdcd43bcfee43b24d820e1e745..eb76a823fbb200f008b0e4c6ad02bc5818929857 100644 (file)
@@ -22,6 +22,7 @@
 #include <asm/cacheflush.h>
 #include <linux/uaccess.h>
 #include <linux/highmem.h>
+#include <linux/sizes.h>
 #include "binder_alloc.h"
 #include "binder_trace.h"
 
@@ -689,7 +690,9 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc,
        alloc->buffer = (void __user *)vma->vm_start;
        mutex_unlock(&binder_alloc_mmap_lock);
 
-       alloc->pages = kcalloc((vma->vm_end - vma->vm_start) / PAGE_SIZE,
+       alloc->buffer_size = min_t(unsigned long, vma->vm_end - vma->vm_start,
+                                  SZ_4M);
+       alloc->pages = kcalloc(alloc->buffer_size / PAGE_SIZE,
                               sizeof(alloc->pages[0]),
                               GFP_KERNEL);
        if (alloc->pages == NULL) {
@@ -697,7 +700,6 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc,
                failure_string = "alloc page array";
                goto err_alloc_pages_failed;
        }
-       alloc->buffer_size = vma->vm_end - vma->vm_start;
 
        buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
        if (!buffer) {