btrfs: drop argument tree from btrfs_lock_and_flush_ordered_range
authorDavid Sterba <dsterba@suse.com>
Wed, 5 Feb 2020 18:09:33 +0000 (19:09 +0100)
committerDavid Sterba <dsterba@suse.com>
Mon, 23 Mar 2020 16:01:34 +0000 (17:01 +0100)
The tree pointer can be safely read from the inode so we can drop the
redundant argument from btrfs_lock_and_flush_ordered_range.

Reviewed-by: Johannes Thumshirn <johannes.thumshirn@wdc.com>
Reviewed-by: Nikolay Borisov <nborisov@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
fs/btrfs/extent_io.c
fs/btrfs/file.c
fs/btrfs/inode.c
fs/btrfs/ordered-data.c
fs/btrfs/ordered-data.h

index 7a2f657f5dbdcad4f9998252dd9bbce579a72378..35accc9b8ced5d8c579a9f9408bb74b3aaeb12a0 100644 (file)
@@ -3331,7 +3331,7 @@ static inline void contiguous_readpages(struct extent_io_tree *tree,
 
        ASSERT(tree == &inode->io_tree);
 
-       btrfs_lock_and_flush_ordered_range(tree, inode, start, end, NULL);
+       btrfs_lock_and_flush_ordered_range(inode, start, end, NULL);
 
        for (index = 0; index < nr_pages; index++) {
                __do_readpage(tree, pages[index], btrfs_get_extent, em_cached,
@@ -3354,7 +3354,7 @@ static int __extent_read_full_page(struct extent_io_tree *tree,
 
        ASSERT(tree == &inode->io_tree);
 
-       btrfs_lock_and_flush_ordered_range(tree, inode, start, end, NULL);
+       btrfs_lock_and_flush_ordered_range(inode, start, end, NULL);
 
        ret = __do_readpage(tree, page, get_extent, NULL, bio, mirror_num,
                            bio_flags, read_flags, NULL);
index 3f7f5323aefa3b7d6de0cc82c297c727b7f76f0f..fd52ad00b6c8f58d031eddc928086865b8aa98d2 100644 (file)
@@ -1561,7 +1561,7 @@ static noinline int check_can_nocow(struct btrfs_inode *inode, loff_t pos,
        lockend = round_up(pos + *write_bytes,
                           fs_info->sectorsize) - 1;
 
-       btrfs_lock_and_flush_ordered_range(&inode->io_tree, inode, lockstart,
+       btrfs_lock_and_flush_ordered_range(inode, lockstart,
                                           lockend, NULL);
 
        num_bytes = lockend - lockstart + 1;
index 945f8e2043e1f7cc6a6986c2fd71c15891cd94b9..79a2020a805f575ebb60727563d71127557fc371 100644 (file)
@@ -4619,7 +4619,7 @@ int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size)
        if (size <= hole_start)
                return 0;
 
-       btrfs_lock_and_flush_ordered_range(io_tree, BTRFS_I(inode), hole_start,
+       btrfs_lock_and_flush_ordered_range(BTRFS_I(inode), hole_start,
                                           block_end - 1, &cached_state);
        cur_offset = hole_start;
        while (1) {
index f47accad6f052c5b6492da8f5f3fa2760ee16af9..e13b3d28c063b4f3bf015dc8c5a2a4abcdf044bd 100644 (file)
@@ -835,7 +835,6 @@ out:
  * btrfs_flush_ordered_range - Lock the passed range and ensures all pending
  * ordered extents in it are run to completion.
  *
- * @tree:         IO tree used for locking out other users of the range
  * @inode:        Inode whose ordered tree is to be searched
  * @start:        Beginning of range to flush
  * @end:          Last byte of range to lock
@@ -845,8 +844,7 @@ out:
  * This function always returns with the given range locked, ensuring after it's
  * called no order extent can be pending.
  */
-void btrfs_lock_and_flush_ordered_range(struct extent_io_tree *tree,
-                                       struct btrfs_inode *inode, u64 start,
+void btrfs_lock_and_flush_ordered_range(struct btrfs_inode *inode, u64 start,
                                        u64 end,
                                        struct extent_state **cached_state)
 {
@@ -854,13 +852,11 @@ void btrfs_lock_and_flush_ordered_range(struct extent_io_tree *tree,
        struct extent_state *cache = NULL;
        struct extent_state **cachedp = &cache;
 
-       ASSERT(tree == &inode->io_tree);
-
        if (cached_state)
                cachedp = cached_state;
 
        while (1) {
-               lock_extent_bits(tree, start, end, cachedp);
+               lock_extent_bits(&inode->io_tree, start, end, cachedp);
                ordered = btrfs_lookup_ordered_range(inode, start,
                                                     end - start + 1);
                if (!ordered) {
@@ -873,7 +869,7 @@ void btrfs_lock_and_flush_ordered_range(struct extent_io_tree *tree,
                                refcount_dec(&cache->refs);
                        break;
                }
-               unlock_extent_cached(tree, start, end, cachedp);
+               unlock_extent_cached(&inode->io_tree, start, end, cachedp);
                btrfs_start_ordered_extent(&inode->vfs_inode, ordered, 1);
                btrfs_put_ordered_extent(ordered);
        }
index a46f319d9ae0c964144c876051a2e364fa27d68e..c01c9698250b9a0c32dda9f4343c8cdca92a3a42 100644 (file)
@@ -183,8 +183,7 @@ u64 btrfs_wait_ordered_extents(struct btrfs_root *root, u64 nr,
                               const u64 range_start, const u64 range_len);
 void btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, u64 nr,
                              const u64 range_start, const u64 range_len);
-void btrfs_lock_and_flush_ordered_range(struct extent_io_tree *tree,
-                                       struct btrfs_inode *inode, u64 start,
+void btrfs_lock_and_flush_ordered_range(struct btrfs_inode *inode, u64 start,
                                        u64 end,
                                        struct extent_state **cached_state);
 int __init ordered_data_init(void);