Merge tag 'staging-4.19-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh...
[linux-2.6-block.git] / drivers / staging / android / ashmem.c
index d5d33e12e9529288876dd56950f751116cf6dd98..a880b5c6c6c321c8d4e10de3f0298fc90fb2e46c 100644 (file)
@@ -178,7 +178,7 @@ static int range_alloc(struct ashmem_area *asma,
        struct ashmem_range *range;
 
        range = kmem_cache_zalloc(ashmem_range_cachep, GFP_KERNEL);
-       if (unlikely(!range))
+       if (!range)
                return -ENOMEM;
 
        range->asma = asma;
@@ -246,11 +246,11 @@ static int ashmem_open(struct inode *inode, struct file *file)
        int ret;
 
        ret = generic_file_open(inode, file);
-       if (unlikely(ret))
+       if (ret)
                return ret;
 
        asma = kmem_cache_zalloc(ashmem_area_cachep, GFP_KERNEL);
-       if (unlikely(!asma))
+       if (!asma)
                return -ENOMEM;
 
        INIT_LIST_HEAD(&asma->unpinned_list);
@@ -361,14 +361,20 @@ static int ashmem_mmap(struct file *file, struct vm_area_struct *vma)
        mutex_lock(&ashmem_mutex);
 
        /* user needs to SET_SIZE before mapping */
-       if (unlikely(!asma->size)) {
+       if (!asma->size) {
+               ret = -EINVAL;
+               goto out;
+       }
+
+       /* requested mapping size larger than object size */
+       if (vma->vm_end - vma->vm_start > PAGE_ALIGN(asma->size)) {
                ret = -EINVAL;
                goto out;
        }
 
        /* requested protection bits must match our allowed protection mask */
-       if (unlikely((vma->vm_flags & ~calc_vm_prot_bits(asma->prot_mask, 0)) &
-                    calc_vm_prot_bits(PROT_MASK, 0))) {
+       if ((vma->vm_flags & ~calc_vm_prot_bits(asma->prot_mask, 0)) &
+           calc_vm_prot_bits(PROT_MASK, 0)) {
                ret = -EPERM;
                goto out;
        }
@@ -446,9 +452,9 @@ ashmem_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
                loff_t start = range->pgstart * PAGE_SIZE;
                loff_t end = (range->pgend + 1) * PAGE_SIZE;
 
-               vfs_fallocate(range->asma->file,
-                             FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
-                             start, end - start);
+               range->asma->file->f_op->fallocate(range->asma->file,
+                               FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
+                               start, end - start);
                range->purged = ASHMEM_WAS_PURGED;
                lru_del(range);
 
@@ -488,7 +494,7 @@ static int set_prot_mask(struct ashmem_area *asma, unsigned long prot)
        mutex_lock(&ashmem_mutex);
 
        /* the user can only remove, not add, protection bits */
-       if (unlikely((asma->prot_mask & prot) != prot)) {
+       if ((asma->prot_mask & prot) != prot) {
                ret = -EINVAL;
                goto out;
        }
@@ -526,7 +532,7 @@ static int set_name(struct ashmem_area *asma, void __user *name)
                local_name[ASHMEM_NAME_LEN - 1] = '\0';
        mutex_lock(&ashmem_mutex);
        /* cannot change an existing mapping's name */
-       if (unlikely(asma->file))
+       if (asma->file)
                ret = -EINVAL;
        else
                strcpy(asma->name + ASHMEM_NAME_PREFIX_LEN, local_name);
@@ -565,7 +571,7 @@ static int get_name(struct ashmem_area *asma, void __user *name)
         * Now we are just copying from the stack variable to userland
         * No lock held
         */
-       if (unlikely(copy_to_user(name, local_name, len)))
+       if (copy_to_user(name, local_name, len))
                ret = -EFAULT;
        return ret;
 }
@@ -703,25 +709,25 @@ static int ashmem_pin_unpin(struct ashmem_area *asma, unsigned long cmd,
        size_t pgstart, pgend;
        int ret = -EINVAL;
 
-       if (unlikely(copy_from_user(&pin, p, sizeof(pin))))
+       if (copy_from_user(&pin, p, sizeof(pin)))
                return -EFAULT;
 
        mutex_lock(&ashmem_mutex);
 
-       if (unlikely(!asma->file))
+       if (!asma->file)
                goto out_unlock;
 
        /* per custom, you can pass zero for len to mean "everything onward" */
        if (!pin.len)
                pin.len = PAGE_ALIGN(asma->size) - pin.offset;
 
-       if (unlikely((pin.offset | pin.len) & ~PAGE_MASK))
+       if ((pin.offset | pin.len) & ~PAGE_MASK)
                goto out_unlock;
 
-       if (unlikely(((__u32)-1) - pin.offset < pin.len))
+       if (((__u32)-1) - pin.offset < pin.len)
                goto out_unlock;
 
-       if (unlikely(PAGE_ALIGN(asma->size) < pin.offset + pin.len))
+       if (PAGE_ALIGN(asma->size) < pin.offset + pin.len)
                goto out_unlock;
 
        pgstart = pin.offset / PAGE_SIZE;
@@ -858,7 +864,7 @@ static int __init ashmem_init(void)
        ashmem_area_cachep = kmem_cache_create("ashmem_area_cache",
                                               sizeof(struct ashmem_area),
                                               0, 0, NULL);
-       if (unlikely(!ashmem_area_cachep)) {
+       if (!ashmem_area_cachep) {
                pr_err("failed to create slab cache\n");
                goto out;
        }
@@ -866,13 +872,13 @@ static int __init ashmem_init(void)
        ashmem_range_cachep = kmem_cache_create("ashmem_range_cache",
                                                sizeof(struct ashmem_range),
                                                0, 0, NULL);
-       if (unlikely(!ashmem_range_cachep)) {
+       if (!ashmem_range_cachep) {
                pr_err("failed to create slab cache\n");
                goto out_free1;
        }
 
        ret = misc_register(&ashmem_misc);
-       if (unlikely(ret)) {
+       if (ret) {
                pr_err("failed to register misc device!\n");
                goto out_free2;
        }