staging: erofs: unzip_vle_lz4.c,utils.c: rectify BUG_ONs
authorGao Xiang <gaoxiang25@huawei.com>
Tue, 11 Dec 2018 07:17:50 +0000 (15:17 +0800)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 12 Dec 2018 09:56:34 +0000 (10:56 +0100)
remove all redundant BUG_ONs, and turn the rest
useful usages to DBG_BUGONs.

Signed-off-by: Gao Xiang <gaoxiang25@huawei.com>
Reviewed-by: Chao Yu <yuchao0@huawei.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
drivers/staging/erofs/unzip_vle_lz4.c
drivers/staging/erofs/utils.c

index de0a5d1365a406c27f52c4de240da59518d59501..52797bd89da18363c73b78af0589cd22e76a73ba 100644 (file)
@@ -79,7 +79,7 @@ int z_erofs_vle_plain_copy(struct page **compressed_pages,
                        if (compressed_pages[j] != page)
                                continue;
 
-                       BUG_ON(mirrored[j]);
+                       DBG_BUGON(mirrored[j]);
                        memcpy(percpu_data + j * PAGE_SIZE, dst, PAGE_SIZE);
                        mirrored[j] = true;
                        break;
index d2e3ace9104693e2dabcd7eb5df50a0d230c3501..b535898ca753f72bcaad1af54cee34b375370e81 100644 (file)
@@ -23,9 +23,6 @@ struct page *erofs_allocpage(struct list_head *pool, gfp_t gfp)
                list_del(&page->lru);
        } else {
                page = alloc_pages(gfp | __GFP_NOFAIL, 0);
-
-               BUG_ON(page == NULL);
-               BUG_ON(page->mapping != NULL);
        }
        return page;
 }
@@ -58,7 +55,7 @@ repeat:
                /* decrease refcount added by erofs_workgroup_put */
                if (unlikely(oldcount == 1))
                        atomic_long_dec(&erofs_global_shrink_cnt);
-               BUG_ON(index != grp->index);
+               DBG_BUGON(index != grp->index);
        }
        rcu_read_unlock();
        return grp;
@@ -71,8 +68,11 @@ int erofs_register_workgroup(struct super_block *sb,
        struct erofs_sb_info *sbi;
        int err;
 
-       /* grp->refcount should not < 1 */
-       BUG_ON(!atomic_read(&grp->refcount));
+       /* grp shouldn't be broken or used before */
+       if (unlikely(atomic_read(&grp->refcount) != 1)) {
+               DBG_BUGON(1);
+               return -EINVAL;
+       }
 
        err = radix_tree_preload(GFP_NOFS);
        if (err)