f2fs: convert f2fs_read_multi_pages() to use folio
authorChao Yu <chao@kernel.org>
Wed, 14 Aug 2024 13:50:59 +0000 (21:50 +0800)
committerJaegeuk Kim <jaegeuk@kernel.org>
Fri, 6 Sep 2024 23:04:47 +0000 (23:04 +0000)
Convert to use folio, so that we can get rid of 'page->index' to
prepare for removal of 'index' field in structure page [1].

[1] https://lore.kernel.org/all/Zp8fgUSIBGQ1TN0D@casper.infradead.org/

Cc: Matthew Wilcox <willy@infradead.org>
Signed-off-by: Chao Yu <chao@kernel.org>
Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
fs/f2fs/data.c

index 0779e222f709be7e1fcfc3bdf015fbbde885e957..4f4e76c336117f5c819d1998c51f281a60d8e498 100644 (file)
@@ -2207,19 +2207,22 @@ int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
        /* get rid of pages beyond EOF */
        for (i = 0; i < cc->cluster_size; i++) {
                struct page *page = cc->rpages[i];
+               struct folio *folio;
 
                if (!page)
                        continue;
-               if ((sector_t)page->index >= last_block_in_file) {
-                       zero_user_segment(page, 0, PAGE_SIZE);
-                       if (!PageUptodate(page))
-                               SetPageUptodate(page);
-               } else if (!PageUptodate(page)) {
+
+               folio = page_folio(page);
+               if ((sector_t)folio->index >= last_block_in_file) {
+                       folio_zero_segment(folio, 0, folio_size(folio));
+                       if (!folio_test_uptodate(folio))
+                               folio_mark_uptodate(folio);
+               } else if (!folio_test_uptodate(folio)) {
                        continue;
                }
-               unlock_page(page);
+               folio_unlock(folio);
                if (for_write)
-                       put_page(page);
+                       folio_put(folio);
                cc->rpages[i] = NULL;
                cc->nr_rpages--;
        }
@@ -2279,7 +2282,7 @@ skip_reading_dnode:
        }
 
        for (i = 0; i < cc->nr_cpages; i++) {
-               struct page *page = dic->cpages[i];
+               struct folio *folio = page_folio(dic->cpages[i]);
                block_t blkaddr;
                struct bio_post_read_ctx *ctx;
 
@@ -2289,7 +2292,8 @@ skip_reading_dnode:
 
                f2fs_wait_on_block_writeback(inode, blkaddr);
 
-               if (f2fs_load_compressed_page(sbi, page, blkaddr)) {
+               if (f2fs_load_compressed_page(sbi, folio_page(folio, 0),
+                                                               blkaddr)) {
                        if (atomic_dec_and_test(&dic->remaining_pages)) {
                                f2fs_decompress_cluster(dic, true);
                                break;
@@ -2299,7 +2303,7 @@ skip_reading_dnode:
 
                if (bio && (!page_is_mergeable(sbi, bio,
                                        *last_block_in_bio, blkaddr) ||
-                   !f2fs_crypt_mergeable_bio(bio, inode, page->index, NULL))) {
+                   !f2fs_crypt_mergeable_bio(bio, inode, folio->index, NULL))) {
 submit_and_realloc:
                        f2fs_submit_read_bio(sbi, bio, DATA);
                        bio = NULL;
@@ -2308,7 +2312,7 @@ submit_and_realloc:
                if (!bio) {
                        bio = f2fs_grab_read_bio(inode, blkaddr, nr_pages,
                                        f2fs_ra_op_flags(rac),
-                                       page->index, for_write);
+                                       folio->index, for_write);
                        if (IS_ERR(bio)) {
                                ret = PTR_ERR(bio);
                                f2fs_decompress_end_io(dic, ret, true);
@@ -2318,7 +2322,7 @@ submit_and_realloc:
                        }
                }
 
-               if (bio_add_page(bio, page, blocksize, 0) < blocksize)
+               if (!bio_add_folio(bio, folio, blocksize, 0))
                        goto submit_and_realloc;
 
                ctx = get_post_read_ctx(bio);