btrfs: convert find_next_dirty_byte() to take a folio
authorJosef Bacik <josef@toxicpanda.com>
Thu, 25 Jul 2024 00:17:26 +0000 (20:17 -0400)
committerDavid Sterba <dsterba@suse.com>
Tue, 10 Sep 2024 14:51:16 +0000 (16:51 +0200)
We already use a folio some in this function, replace all page usage
with the folio and update the function to take the folio as an argument.

Signed-off-by: Josef Bacik <josef@toxicpanda.com>
Reviewed-by: David Sterba <dsterba@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
fs/btrfs/extent_io.c

index 782a38370d0383dc0b33d4d2e22db7b653dd0878..e56b62746a15f16cfd70280cb0aa034607744a1a 100644 (file)
@@ -1348,9 +1348,8 @@ out:
  * If no dirty range is found, @start will be page_offset(page) + PAGE_SIZE.
  */
 static void find_next_dirty_byte(const struct btrfs_fs_info *fs_info,
-                                struct page *page, u64 *start, u64 *end)
+                                struct folio *folio, u64 *start, u64 *end)
 {
-       struct folio *folio = page_folio(page);
        struct btrfs_subpage *subpage = folio_get_private(folio);
        struct btrfs_subpage_info *spi = fs_info->subpage_info;
        u64 orig_start = *start;
@@ -1363,14 +1362,15 @@ static void find_next_dirty_byte(const struct btrfs_fs_info *fs_info,
         * For regular sector size == page size case, since one page only
         * contains one sector, we return the page offset directly.
         */
-       if (!btrfs_is_subpage(fs_info, page->mapping)) {
-               *start = page_offset(page);
-               *end = page_offset(page) + PAGE_SIZE;
+       if (!btrfs_is_subpage(fs_info, folio->mapping)) {
+               *start = folio_pos(folio);
+               *end = folio_pos(folio) + folio_size(folio);
                return;
        }
 
        range_start_bit = spi->dirty_offset +
-                         (offset_in_page(orig_start) >> fs_info->sectorsize_bits);
+                         (offset_in_folio(folio, orig_start) >>
+                          fs_info->sectorsize_bits);
 
        /* We should have the page locked, but just in case */
        spin_lock_irqsave(&subpage->lock, flags);
@@ -1381,8 +1381,8 @@ static void find_next_dirty_byte(const struct btrfs_fs_info *fs_info,
        range_start_bit -= spi->dirty_offset;
        range_end_bit -= spi->dirty_offset;
 
-       *start = page_offset(page) + range_start_bit * fs_info->sectorsize;
-       *end = page_offset(page) + range_end_bit * fs_info->sectorsize;
+       *start = folio_pos(folio) + range_start_bit * fs_info->sectorsize;
+       *end = folio_pos(folio) + range_end_bit * fs_info->sectorsize;
 }
 
 /*
@@ -1443,7 +1443,7 @@ static noinline_for_stack int __extent_writepage_io(struct btrfs_inode *inode,
                        break;
                }
 
-               find_next_dirty_byte(fs_info, &folio->page, &dirty_range_start,
+               find_next_dirty_byte(fs_info, folio, &dirty_range_start,
                                     &dirty_range_end);
                if (cur < dirty_range_start) {
                        cur = dirty_range_start;