btrfs: prepare btrfs_page_mkwrite() for large folios
authorQu Wenruo <wqu@suse.com>
Thu, 20 Feb 2025 09:22:26 +0000 (19:52 +1030)
committerDavid Sterba <dsterba@suse.com>
Tue, 18 Mar 2025 19:35:53 +0000 (20:35 +0100)
This changes the assumption that the folio is always page sized.
(Although the ASSERT() for folio order is still kept as-is).

Just replace the PAGE_SIZE with folio_size().

Reviewed-by: Johannes Thumshirn <johannes.thumshirn@wdc.com>
Signed-off-by: Qu Wenruo <wqu@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
fs/btrfs/file.c

index 643f101c73403735659dadb657478c2e232f9467..262a707d899064aeddb9aa4e5ba398b990da9aa3 100644 (file)
@@ -1782,6 +1782,7 @@ static vm_fault_t btrfs_page_mkwrite(struct vm_fault *vmf)
        struct extent_changeset *data_reserved = NULL;
        unsigned long zero_start;
        loff_t size;
+       size_t fsize = folio_size(folio);
        vm_fault_t ret;
        int ret2;
        int reserved = 0;
@@ -1792,7 +1793,7 @@ static vm_fault_t btrfs_page_mkwrite(struct vm_fault *vmf)
 
        ASSERT(folio_order(folio) == 0);
 
-       reserved_space = PAGE_SIZE;
+       reserved_space = fsize;
 
        sb_start_pagefault(inode->i_sb);
        page_start = folio_pos(folio);
@@ -1846,7 +1847,7 @@ again:
         * We can't set the delalloc bits if there are pending ordered
         * extents.  Drop our locks and wait for them to finish.
         */
-       ordered = btrfs_lookup_ordered_range(BTRFS_I(inode), page_start, PAGE_SIZE);
+       ordered = btrfs_lookup_ordered_range(BTRFS_I(inode), page_start, fsize);
        if (ordered) {
                unlock_extent(io_tree, page_start, page_end, &cached_state);
                folio_unlock(folio);
@@ -1858,11 +1859,11 @@ again:
 
        if (folio->index == ((size - 1) >> PAGE_SHIFT)) {
                reserved_space = round_up(size - page_start, fs_info->sectorsize);
-               if (reserved_space < PAGE_SIZE) {
+               if (reserved_space < fsize) {
                        end = page_start + reserved_space - 1;
                        btrfs_delalloc_release_space(BTRFS_I(inode),
                                        data_reserved, page_start,
-                                       PAGE_SIZE - reserved_space, true);
+                                       fsize - reserved_space, true);
                }
        }
 
@@ -1889,12 +1890,12 @@ again:
        if (page_start + folio_size(folio) > size)
                zero_start = offset_in_folio(folio, size);
        else
-               zero_start = PAGE_SIZE;
+               zero_start = fsize;
 
-       if (zero_start != PAGE_SIZE)
+       if (zero_start != fsize)
                folio_zero_range(folio, zero_start, folio_size(folio) - zero_start);
 
-       btrfs_folio_clear_checked(fs_info, folio, page_start, PAGE_SIZE);
+       btrfs_folio_clear_checked(fs_info, folio, page_start, fsize);
        btrfs_folio_set_dirty(fs_info, folio, page_start, end + 1 - page_start);
        btrfs_folio_set_uptodate(fs_info, folio, page_start, end + 1 - page_start);
 
@@ -1903,7 +1904,7 @@ again:
        unlock_extent(io_tree, page_start, page_end, &cached_state);
        up_read(&BTRFS_I(inode)->i_mmap_lock);
 
-       btrfs_delalloc_release_extents(BTRFS_I(inode), PAGE_SIZE);
+       btrfs_delalloc_release_extents(BTRFS_I(inode), fsize);
        sb_end_pagefault(inode->i_sb);
        extent_changeset_free(data_reserved);
        return VM_FAULT_LOCKED;
@@ -1912,7 +1913,7 @@ out_unlock:
        folio_unlock(folio);
        up_read(&BTRFS_I(inode)->i_mmap_lock);
 out:
-       btrfs_delalloc_release_extents(BTRFS_I(inode), PAGE_SIZE);
+       btrfs_delalloc_release_extents(BTRFS_I(inode), fsize);
        btrfs_delalloc_release_space(BTRFS_I(inode), data_reserved, page_start,
                                     reserved_space, (ret != 0));
 out_noreserve: