erofs: get rid of `justfound` debugging tag
authorGao Xiang <hsiangkao@linux.alibaba.com>
Tue, 5 Mar 2024 09:14:45 +0000 (17:14 +0800)
committerGao Xiang <hsiangkao@linux.alibaba.com>
Sun, 10 Mar 2024 10:40:49 +0000 (18:40 +0800)
`justfound` is introduced to identify cached folios that are just added
to compressed bvecs so that more checks can be applied in the I/O
submission path.

EROFS is quite now stable compared to the codebase at that stage.
`justfound` becomes a burden for upcoming features.  Drop it.

Reviewed-by: Chao Yu <chao@kernel.org>
Signed-off-by: Gao Xiang <hsiangkao@linux.alibaba.com>
Link: https://lore.kernel.org/r/20240305091448.1384242-3-hsiangkao@linux.alibaba.com
fs/erofs/zdata.c

index c250746577086066603c727462456d364af20090..75b05990b571db4c186fedb1f8fcb2794a45e3d0 100644 (file)
@@ -565,17 +565,13 @@ static void z_erofs_bind_cache(struct z_erofs_decompress_frontend *fe)
 
        for (i = 0; i < pclusterpages; ++i) {
                struct page *page, *newpage;
-               void *t;        /* mark pages just found for debugging */
 
                /* Inaccurate check w/o locking to avoid unneeded lookups */
                if (READ_ONCE(pcl->compressed_bvecs[i].page))
                        continue;
 
                page = find_get_page(mc, pcl->obj.index + i);
-               if (page) {
-                       t = (void *)((unsigned long)page | 1);
-                       newpage = NULL;
-               } else {
+               if (!page) {
                        /* I/O is needed, no possible to decompress directly */
                        standalone = false;
                        if (!shouldalloc)
@@ -589,11 +585,10 @@ static void z_erofs_bind_cache(struct z_erofs_decompress_frontend *fe)
                        if (!newpage)
                                continue;
                        set_page_private(newpage, Z_EROFS_PREALLOCATED_PAGE);
-                       t = (void *)((unsigned long)newpage | 1);
                }
                spin_lock(&pcl->obj.lockref.lock);
                if (!pcl->compressed_bvecs[i].page) {
-                       pcl->compressed_bvecs[i].page = t;
+                       pcl->compressed_bvecs[i].page = page ? page : newpage;
                        spin_unlock(&pcl->obj.lockref.lock);
                        continue;
                }
@@ -1423,7 +1418,7 @@ static void z_erofs_fill_bio_vec(struct bio_vec *bvec,
        struct z_erofs_bvec zbv;
        struct address_space *mapping;
        struct page *page;
-       int justfound, bs = i_blocksize(f->inode);
+       int bs = i_blocksize(f->inode);
 
        /* Except for inplace pages, the entire page can be used for I/Os */
        bvec->bv_offset = 0;
@@ -1432,9 +1427,6 @@ repeat:
        spin_lock(&pcl->obj.lockref.lock);
        zbv = pcl->compressed_bvecs[nr];
        page = zbv.page;
-       justfound = (unsigned long)page & 1UL;
-       page = (struct page *)((unsigned long)page & ~1UL);
-       pcl->compressed_bvecs[nr].page = page;
        spin_unlock(&pcl->obj.lockref.lock);
        if (!page)
                goto out_allocpage;
@@ -1465,9 +1457,6 @@ repeat:
        }
 
        lock_page(page);
-       /* only true if page reclaim goes wrong, should never happen */
-       DBG_BUGON(justfound && PagePrivate(page));
-
        /* the cached page is still in managed cache */
        if (page->mapping == mc) {
                /*
@@ -1475,7 +1464,6 @@ repeat:
                 * `->private` pcluster hint.  Let's reconnect them.
                 */
                if (!PagePrivate(page)) {
-                       DBG_BUGON(!justfound);
                        /* compressed_bvecs[] already takes a ref */
                        attach_page_private(page, pcl);
                        put_page(page);
@@ -1494,8 +1482,6 @@ repeat:
         * allocate a new page for compressed data.
         */
        DBG_BUGON(page->mapping);
-       DBG_BUGON(!justfound);
-
        tocache = true;
        unlock_page(page);
        put_page(page);