btrfs: add btrfs prefix to main lock, try lock and unlock extent functions
authorFilipe Manana <fdmanana@suse.com>
Mon, 31 Mar 2025 13:23:42 +0000 (14:23 +0100)
committerDavid Sterba <dsterba@suse.com>
Thu, 15 May 2025 12:30:43 +0000 (14:30 +0200)
These functions are exported so they should have a 'btrfs_' prefix by
convention, to make it clear they are btrfs specific and to avoid
collisions with functions from elsewhere in the kernel. So add a prefix to
their name.

Reviewed-by: Johannes Thumshirn <johannes.thumshirn@wdc.com>
Signed-off-by: Filipe Manana <fdmanana@suse.com>
Reviewed-by: David Sterba <dsterba@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
16 files changed:
fs/btrfs/compression.c
fs/btrfs/defrag.c
fs/btrfs/direct-io.c
fs/btrfs/extent-io-tree.h
fs/btrfs/extent_io.c
fs/btrfs/extent_map.c
fs/btrfs/fiemap.c
fs/btrfs/file.c
fs/btrfs/free-space-cache.c
fs/btrfs/inode.c
fs/btrfs/ioctl.c
fs/btrfs/ordered-data.c
fs/btrfs/reflink.c
fs/btrfs/relocation.c
fs/btrfs/tests/extent-io-tests.c
fs/btrfs/tree-log.c

index 74edd2dd92ff62bc9a93f8ebb02ffd2c42dde8a7..8f3771451faa4809acdb4fb8dacb944cdfdc5a4c 100644 (file)
@@ -499,7 +499,7 @@ static noinline int add_ra_bio_pages(struct inode *inode,
                }
 
                page_end = (pg_index << PAGE_SHIFT) + folio_size(folio) - 1;
-               lock_extent(tree, cur, page_end, NULL);
+               btrfs_lock_extent(tree, cur, page_end, NULL);
                read_lock(&em_tree->lock);
                em = lookup_extent_mapping(em_tree, cur, page_end + 1 - cur);
                read_unlock(&em_tree->lock);
@@ -514,14 +514,14 @@ static noinline int add_ra_bio_pages(struct inode *inode,
                    (extent_map_block_start(em) >> SECTOR_SHIFT) !=
                    orig_bio->bi_iter.bi_sector) {
                        free_extent_map(em);
-                       unlock_extent(tree, cur, page_end, NULL);
+                       btrfs_unlock_extent(tree, cur, page_end, NULL);
                        folio_unlock(folio);
                        folio_put(folio);
                        break;
                }
                add_size = min(em->start + em->len, page_end + 1) - cur;
                free_extent_map(em);
-               unlock_extent(tree, cur, page_end, NULL);
+               btrfs_unlock_extent(tree, cur, page_end, NULL);
 
                if (folio_contains(folio, end_index)) {
                        size_t zero_offset = offset_in_folio(folio, isize);
index d302a67efcedb6d0f184429eccf7adf8489506fc..3e2e462365d6ff0565273f1515bae62fb51ca660 100644 (file)
@@ -776,10 +776,10 @@ static struct extent_map *defrag_lookup_extent(struct inode *inode, u64 start,
 
                /* Get the big lock and read metadata off disk. */
                if (!locked)
-                       lock_extent(io_tree, start, end, &cached);
+                       btrfs_lock_extent(io_tree, start, end, &cached);
                em = defrag_get_extent(BTRFS_I(inode), start, newer_than);
                if (!locked)
-                       unlock_extent(io_tree, start, end, &cached);
+                       btrfs_unlock_extent(io_tree, start, end, &cached);
 
                if (IS_ERR(em))
                        return NULL;
@@ -891,10 +891,10 @@ again:
        while (1) {
                struct btrfs_ordered_extent *ordered;
 
-               lock_extent(&inode->io_tree, page_start, page_end, &cached_state);
+               btrfs_lock_extent(&inode->io_tree, page_start, page_end, &cached_state);
                ordered = btrfs_lookup_ordered_range(inode, page_start, PAGE_SIZE);
-               unlock_extent(&inode->io_tree, page_start, page_end,
-                             &cached_state);
+               btrfs_unlock_extent(&inode->io_tree, page_start, page_end,
+                                   &cached_state);
                if (!ordered)
                        break;
 
@@ -1223,9 +1223,9 @@ static int defrag_one_range(struct btrfs_inode *inode, u64 start, u32 len,
                folio_wait_writeback(folios[i]);
 
        /* Lock the pages range */
-       lock_extent(&inode->io_tree, start_index << PAGE_SHIFT,
-                   (last_index << PAGE_SHIFT) + PAGE_SIZE - 1,
-                   &cached_state);
+       btrfs_lock_extent(&inode->io_tree, start_index << PAGE_SHIFT,
+                         (last_index << PAGE_SHIFT) + PAGE_SIZE - 1,
+                         &cached_state);
        /*
         * Now we have a consistent view about the extent map, re-check
         * which range really needs to be defragged.
@@ -1251,9 +1251,9 @@ static int defrag_one_range(struct btrfs_inode *inode, u64 start, u32 len,
                kfree(entry);
        }
 unlock_extent:
-       unlock_extent(&inode->io_tree, start_index << PAGE_SHIFT,
-                     (last_index << PAGE_SHIFT) + PAGE_SIZE - 1,
-                     &cached_state);
+       btrfs_unlock_extent(&inode->io_tree, start_index << PAGE_SHIFT,
+                           (last_index << PAGE_SHIFT) + PAGE_SIZE - 1,
+                           &cached_state);
 free_folios:
        for (i = 0; i < nr_pages; i++) {
                folio_unlock(folios[i]);
index a374ce7a1813b0a4e49e806beb7c95320dc29ac8..eef8f0a93e80b2ee380de3d837b3ecadcd7c2a2d 100644 (file)
@@ -50,13 +50,13 @@ static int lock_extent_direct(struct inode *inode, u64 lockstart, u64 lockend,
 
        while (1) {
                if (nowait) {
-                       if (!try_lock_extent(io_tree, lockstart, lockend,
-                                            cached_state)) {
+                       if (!btrfs_try_lock_extent(io_tree, lockstart, lockend,
+                                                  cached_state)) {
                                ret = -EAGAIN;
                                break;
                        }
                } else {
-                       lock_extent(io_tree, lockstart, lockend, cached_state);
+                       btrfs_lock_extent(io_tree, lockstart, lockend, cached_state);
                }
                /*
                 * We're concerned with the entire range that we're going to be
@@ -78,7 +78,7 @@ static int lock_extent_direct(struct inode *inode, u64 lockstart, u64 lockend,
                                                         lockstart, lockend)))
                        break;
 
-               unlock_extent(io_tree, lockstart, lockend, cached_state);
+               btrfs_unlock_extent(io_tree, lockstart, lockend, cached_state);
 
                if (ordered) {
                        if (nowait) {
index bdcb183245160861d03d0acff9c73eb1e7601fdb..86436ed37ad614a6dcc4c4b406ea7658bb2fb135 100644 (file)
@@ -145,14 +145,14 @@ int __lock_extent(struct extent_io_tree *tree, u64 start, u64 end, u32 bits,
 bool __try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end, u32 bits,
                       struct extent_state **cached);
 
-static inline int lock_extent(struct extent_io_tree *tree, u64 start, u64 end,
-                             struct extent_state **cached)
+static inline int btrfs_lock_extent(struct extent_io_tree *tree, u64 start, u64 end,
+                                   struct extent_state **cached)
 {
        return __lock_extent(tree, start, end, EXTENT_LOCKED, cached);
 }
 
-static inline bool try_lock_extent(struct extent_io_tree *tree, u64 start,
-                                  u64 end, struct extent_state **cached)
+static inline bool btrfs_try_lock_extent(struct extent_io_tree *tree, u64 start,
+                                        u64 end, struct extent_state **cached)
 {
        return __try_lock_extent(tree, start, end, EXTENT_LOCKED, cached);
 }
@@ -184,8 +184,8 @@ static inline int clear_extent_bit(struct extent_io_tree *tree, u64 start,
        return __clear_extent_bit(tree, start, end, bits, cached, NULL);
 }
 
-static inline int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end,
-                               struct extent_state **cached)
+static inline int btrfs_unlock_extent(struct extent_io_tree *tree, u64 start, u64 end,
+                                     struct extent_state **cached)
 {
        return __clear_extent_bit(tree, start, end, EXTENT_LOCKED, cached, NULL);
 }
index ba067d3461e62e05010901abd2258ad91893907a..31cf2189a2243712783c391ae2679cddf722a5e1 100644 (file)
@@ -371,13 +371,13 @@ again:
        }
 
        /* step three, lock the state bits for the whole range */
-       lock_extent(tree, delalloc_start, delalloc_end, &cached_state);
+       btrfs_lock_extent(tree, delalloc_start, delalloc_end, &cached_state);
 
        /* then test to make sure it is all still delalloc */
        ret = test_range_bit(tree, delalloc_start, delalloc_end,
                             EXTENT_DELALLOC, cached_state);
 
-       unlock_extent(tree, delalloc_start, delalloc_end, &cached_state);
+       btrfs_unlock_extent(tree, delalloc_start, delalloc_end, &cached_state);
        if (!ret) {
                unlock_delalloc_folio(inode, locked_folio, delalloc_start,
                                      delalloc_end);
@@ -1201,7 +1201,7 @@ static void lock_extents_for_read(struct btrfs_inode *inode, u64 start, u64 end,
        ASSERT(IS_ALIGNED(end + 1, PAGE_SIZE));
 
 again:
-       lock_extent(&inode->io_tree, start, end, cached_state);
+       btrfs_lock_extent(&inode->io_tree, start, end, cached_state);
        cur_pos = start;
        while (cur_pos < end) {
                struct btrfs_ordered_extent *ordered;
@@ -1224,7 +1224,7 @@ again:
                }
 
                /* Now wait for the OE to finish. */
-               unlock_extent(&inode->io_tree, start, end, cached_state);
+               btrfs_unlock_extent(&inode->io_tree, start, end, cached_state);
                btrfs_start_ordered_extent_nowriteback(ordered, start, end + 1 - start);
                btrfs_put_ordered_extent(ordered);
                /* We have unlocked the whole range, restart from the beginning. */
@@ -1244,7 +1244,7 @@ int btrfs_read_folio(struct file *file, struct folio *folio)
 
        lock_extents_for_read(inode, start, end, &cached_state);
        ret = btrfs_do_readpage(folio, &em_cached, &bio_ctrl, NULL);
-       unlock_extent(&inode->io_tree, start, end, &cached_state);
+       btrfs_unlock_extent(&inode->io_tree, start, end, &cached_state);
 
        free_extent_map(em_cached);
 
@@ -1432,8 +1432,8 @@ static noinline_for_stack int writepage_delalloc(struct btrfs_inode *inode,
                         * We've hit an error during previous delalloc range,
                         * have to cleanup the remaining locked ranges.
                         */
-                       unlock_extent(&inode->io_tree, found_start,
-                                     found_start + found_len - 1, NULL);
+                       btrfs_unlock_extent(&inode->io_tree, found_start,
+                                           found_start + found_len - 1, NULL);
                        unlock_delalloc_folio(&inode->vfs_inode, folio,
                                              found_start,
                                              found_start + found_len - 1);
@@ -2563,7 +2563,7 @@ void btrfs_readahead(struct readahead_control *rac)
        while ((folio = readahead_folio(rac)) != NULL)
                btrfs_do_readpage(folio, &em_cached, &bio_ctrl, &prev_em_start);
 
-       unlock_extent(&inode->io_tree, start, end, &cached_state);
+       btrfs_unlock_extent(&inode->io_tree, start, end, &cached_state);
 
        if (em_cached)
                free_extent_map(em_cached);
@@ -2590,7 +2590,7 @@ int extent_invalidate_folio(struct extent_io_tree *tree,
        if (start > end)
                return 0;
 
-       lock_extent(tree, start, end, &cached_state);
+       btrfs_lock_extent(tree, start, end, &cached_state);
        folio_wait_writeback(folio);
 
        /*
@@ -2598,7 +2598,7 @@ int extent_invalidate_folio(struct extent_io_tree *tree,
         * so here we only need to unlock the extent range to free any
         * existing extent state.
         */
-       unlock_extent(tree, start, end, &cached_state);
+       btrfs_unlock_extent(tree, start, end, &cached_state);
        return 0;
 }
 
index d62c36a0b7ba41b67975db0f0416eff175a2cbaa..67c724a576eee60a96bdc4b25dadcfc5f96d5191 100644 (file)
@@ -1055,7 +1055,7 @@ int split_extent_map(struct btrfs_inode *inode, u64 start, u64 len, u64 pre,
                goto out_free_pre;
        }
 
-       lock_extent(&inode->io_tree, start, start + len - 1, NULL);
+       btrfs_lock_extent(&inode->io_tree, start, start + len - 1, NULL);
        write_lock(&em_tree->lock);
        em = lookup_extent_mapping(em_tree, start, len);
        if (!em) {
@@ -1108,7 +1108,7 @@ int split_extent_map(struct btrfs_inode *inode, u64 start, u64 len, u64 pre,
 
 out_unlock:
        write_unlock(&em_tree->lock);
-       unlock_extent(&inode->io_tree, start, start + len - 1, NULL);
+       btrfs_unlock_extent(&inode->io_tree, start, start + len - 1, NULL);
        free_extent_map(split_mid);
 out_free_pre:
        free_extent_map(split_pre);
index 7715e30508c575411a803992c1c5be562e5eb601..ba65a4821c445ca2ad330d531d887d6e7f27dd3f 100644 (file)
@@ -661,7 +661,7 @@ restart:
        range_end = round_up(start + len, sectorsize);
        prev_extent_end = range_start;
 
-       lock_extent(&inode->io_tree, range_start, range_end, &cached_state);
+       btrfs_lock_extent(&inode->io_tree, range_start, range_end, &cached_state);
 
        ret = fiemap_find_last_extent_offset(inode, path, &last_extent_end);
        if (ret < 0)
@@ -841,7 +841,7 @@ check_eof_delalloc:
        }
 
 out_unlock:
-       unlock_extent(&inode->io_tree, range_start, range_end, &cached_state);
+       btrfs_unlock_extent(&inode->io_tree, range_start, range_end, &cached_state);
 
        if (ret == BTRFS_FIEMAP_FLUSH_CACHE) {
                btrfs_release_path(path);
index cd2e34e11afd0005fc121b29f2fa9dd3e0e24fc8..0baee2bb110b3ebe90f76d83345388e2c864633c 100644 (file)
@@ -921,14 +921,15 @@ lock_and_cleanup_extent_if_need(struct btrfs_inode *inode, struct folio *folio,
                struct btrfs_ordered_extent *ordered;
 
                if (nowait) {
-                       if (!try_lock_extent(&inode->io_tree, start_pos, last_pos,
-                                            cached_state)) {
+                       if (!btrfs_try_lock_extent(&inode->io_tree, start_pos,
+                                                  last_pos, cached_state)) {
                                folio_unlock(folio);
                                folio_put(folio);
                                return -EAGAIN;
                        }
                } else {
-                       lock_extent(&inode->io_tree, start_pos, last_pos, cached_state);
+                       btrfs_lock_extent(&inode->io_tree, start_pos, last_pos,
+                                         cached_state);
                }
 
                ordered = btrfs_lookup_ordered_range(inode, start_pos,
@@ -936,8 +937,8 @@ lock_and_cleanup_extent_if_need(struct btrfs_inode *inode, struct folio *folio,
                if (ordered &&
                    ordered->file_offset + ordered->num_bytes > start_pos &&
                    ordered->file_offset <= last_pos) {
-                       unlock_extent(&inode->io_tree, start_pos, last_pos,
-                                     cached_state);
+                       btrfs_unlock_extent(&inode->io_tree, start_pos, last_pos,
+                                           cached_state);
                        folio_unlock(folio);
                        folio_put(folio);
                        btrfs_start_ordered_extent(ordered);
@@ -1017,7 +1018,7 @@ int btrfs_check_nocow_lock(struct btrfs_inode *inode, loff_t pos,
        else
                *write_bytes = min_t(size_t, *write_bytes ,
                                     num_bytes - pos + lockstart);
-       unlock_extent(&inode->io_tree, lockstart, lockend, &cached_state);
+       btrfs_unlock_extent(&inode->io_tree, lockstart, lockend, &cached_state);
 
        return ret;
 }
@@ -1284,8 +1285,8 @@ again:
                /* No copied bytes, unlock, release reserved space and exit. */
                if (copied == 0) {
                        if (extents_locked)
-                               unlock_extent(&inode->io_tree, lockstart, lockend,
-                                             &cached_state);
+                               btrfs_unlock_extent(&inode->io_tree, lockstart, lockend,
+                                                   &cached_state);
                        else
                                free_extent_state(cached_state);
                        btrfs_delalloc_release_extents(inode, reserved_len);
@@ -1314,7 +1315,7 @@ again:
         * to avoid a memory leak.
         */
        if (extents_locked)
-               unlock_extent(&inode->io_tree, lockstart, lockend, &cached_state);
+               btrfs_unlock_extent(&inode->io_tree, lockstart, lockend, &cached_state);
        else
                free_extent_state(cached_state);
 
@@ -1895,11 +1896,11 @@ again:
        }
        folio_wait_writeback(folio);
 
-       lock_extent(io_tree, page_start, page_end, &cached_state);
+       btrfs_lock_extent(io_tree, page_start, page_end, &cached_state);
        ret2 = set_folio_extent_mapped(folio);
        if (ret2 < 0) {
                ret = vmf_error(ret2);
-               unlock_extent(io_tree, page_start, page_end, &cached_state);
+               btrfs_unlock_extent(io_tree, page_start, page_end, &cached_state);
                goto out_unlock;
        }
 
@@ -1909,7 +1910,7 @@ again:
         */
        ordered = btrfs_lookup_ordered_range(BTRFS_I(inode), page_start, fsize);
        if (ordered) {
-               unlock_extent(io_tree, page_start, page_end, &cached_state);
+               btrfs_unlock_extent(io_tree, page_start, page_end, &cached_state);
                folio_unlock(folio);
                up_read(&BTRFS_I(inode)->i_mmap_lock);
                btrfs_start_ordered_extent(ordered);
@@ -1941,7 +1942,7 @@ again:
        ret2 = btrfs_set_extent_delalloc(BTRFS_I(inode), page_start, end, 0,
                                        &cached_state);
        if (ret2) {
-               unlock_extent(io_tree, page_start, page_end, &cached_state);
+               btrfs_unlock_extent(io_tree, page_start, page_end, &cached_state);
                ret = VM_FAULT_SIGBUS;
                goto out_unlock;
        }
@@ -1961,7 +1962,7 @@ again:
 
        btrfs_set_inode_last_sub_trans(BTRFS_I(inode));
 
-       unlock_extent(io_tree, page_start, page_end, &cached_state);
+       btrfs_unlock_extent(io_tree, page_start, page_end, &cached_state);
        up_read(&BTRFS_I(inode)->i_mmap_lock);
 
        btrfs_delalloc_release_extents(BTRFS_I(inode), fsize);
@@ -2222,8 +2223,8 @@ static void btrfs_punch_hole_lock_range(struct inode *inode,
        while (1) {
                truncate_pagecache_range(inode, lockstart, lockend);
 
-               lock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend,
-                           cached_state);
+               btrfs_lock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend,
+                                 cached_state);
                /*
                 * We can't have ordered extents in the range, nor dirty/writeback
                 * pages, because we have locked the inode's VFS lock in exclusive
@@ -2237,8 +2238,8 @@ static void btrfs_punch_hole_lock_range(struct inode *inode,
                if (!check_range_has_page(inode, lockstart, lockend))
                        break;
 
-               unlock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend,
-                             cached_state);
+               btrfs_unlock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend,
+                                   cached_state);
        }
 
        btrfs_assert_inode_range_clean(BTRFS_I(inode), lockstart, lockend);
@@ -2736,8 +2737,8 @@ static int btrfs_punch_hole(struct file *file, loff_t offset, loff_t len)
        btrfs_end_transaction(trans);
        btrfs_btree_balance_dirty(fs_info);
 out:
-       unlock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend,
-                     &cached_state);
+       btrfs_unlock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend,
+                           &cached_state);
 out_only_mutex:
        if (!updated_inode && truncated_block && !ret) {
                /*
@@ -3007,16 +3008,16 @@ reserve_space:
                ret = btrfs_qgroup_reserve_data(BTRFS_I(inode), &data_reserved,
                                                alloc_start, bytes_to_reserve);
                if (ret) {
-                       unlock_extent(&BTRFS_I(inode)->io_tree, lockstart,
-                                     lockend, &cached_state);
+                       btrfs_unlock_extent(&BTRFS_I(inode)->io_tree, lockstart,
+                                           lockend, &cached_state);
                        goto out;
                }
                ret = btrfs_prealloc_file_range(inode, mode, alloc_start,
                                                alloc_end - alloc_start,
                                                fs_info->sectorsize,
                                                offset + len, &alloc_hint);
-               unlock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend,
-                             &cached_state);
+               btrfs_unlock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend,
+                                   &cached_state);
                /* btrfs_prealloc_file_range releases reserved space on error */
                if (ret) {
                        space_reserved = false;
@@ -3127,8 +3128,8 @@ static long btrfs_fallocate(struct file *file, int mode,
        }
 
        locked_end = alloc_end - 1;
-       lock_extent(&BTRFS_I(inode)->io_tree, alloc_start, locked_end,
-                   &cached_state);
+       btrfs_lock_extent(&BTRFS_I(inode)->io_tree, alloc_start, locked_end,
+                         &cached_state);
 
        btrfs_assert_inode_range_clean(BTRFS_I(inode), alloc_start, locked_end);
 
@@ -3216,8 +3217,8 @@ static long btrfs_fallocate(struct file *file, int mode,
         */
        ret = btrfs_fallocate_update_isize(inode, actual_end, mode);
 out_unlock:
-       unlock_extent(&BTRFS_I(inode)->io_tree, alloc_start, locked_end,
-                     &cached_state);
+       btrfs_unlock_extent(&BTRFS_I(inode)->io_tree, alloc_start, locked_end,
+                           &cached_state);
 out:
        btrfs_inode_unlock(BTRFS_I(inode), BTRFS_ILOCK_MMAP);
        extent_changeset_free(data_reserved);
@@ -3563,7 +3564,7 @@ static loff_t find_desired_extent(struct file *file, loff_t offset, int whence)
 
        last_extent_end = lockstart;
 
-       lock_extent(&inode->io_tree, lockstart, lockend, &cached_state);
+       btrfs_lock_extent(&inode->io_tree, lockstart, lockend, &cached_state);
 
        ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
        if (ret < 0) {
@@ -3709,7 +3710,7 @@ static loff_t find_desired_extent(struct file *file, loff_t offset, int whence)
        }
 
 out:
-       unlock_extent(&inode->io_tree, lockstart, lockend, &cached_state);
+       btrfs_unlock_extent(&inode->io_tree, lockstart, lockend, &cached_state);
        btrfs_free_path(path);
 
        if (ret < 0)
index 633dfe4ee93e8d5c6174ec6b9a53ee0752b4b3f2..30263f15bdcadecdfad4e517e6e22b0a67a691a0 100644 (file)
@@ -336,7 +336,7 @@ int btrfs_truncate_free_space_cache(struct btrfs_trans_handle *trans,
        btrfs_i_size_write(inode, 0);
        truncate_pagecache(vfs_inode, 0);
 
-       lock_extent(&inode->io_tree, 0, (u64)-1, &cached_state);
+       btrfs_lock_extent(&inode->io_tree, 0, (u64)-1, &cached_state);
        btrfs_drop_extent_map_range(inode, 0, (u64)-1, false);
 
        /*
@@ -348,7 +348,7 @@ int btrfs_truncate_free_space_cache(struct btrfs_trans_handle *trans,
        inode_sub_bytes(&inode->vfs_inode, control.sub_bytes);
        btrfs_inode_safe_disk_i_size_write(inode, control.last_size);
 
-       unlock_extent(&inode->io_tree, 0, (u64)-1, &cached_state);
+       btrfs_unlock_extent(&inode->io_tree, 0, (u64)-1, &cached_state);
        if (ret)
                goto fail;
 
@@ -1288,8 +1288,8 @@ cleanup_write_cache_enospc(struct inode *inode,
                           struct extent_state **cached_state)
 {
        io_ctl_drop_pages(io_ctl);
-       unlock_extent(&BTRFS_I(inode)->io_tree, 0, i_size_read(inode) - 1,
-                     cached_state);
+       btrfs_unlock_extent(&BTRFS_I(inode)->io_tree, 0, i_size_read(inode) - 1,
+                           cached_state);
 }
 
 static int __btrfs_wait_cache_io(struct btrfs_root *root,
@@ -1414,8 +1414,8 @@ static int __btrfs_write_out_cache(struct inode *inode,
        if (ret)
                goto out_unlock;
 
-       lock_extent(&BTRFS_I(inode)->io_tree, 0, i_size_read(inode) - 1,
-                   &cached_state);
+       btrfs_lock_extent(&BTRFS_I(inode)->io_tree, 0, i_size_read(inode) - 1,
+                         &cached_state);
 
        io_ctl_set_generation(io_ctl, trans->transid);
 
@@ -1475,8 +1475,8 @@ static int __btrfs_write_out_cache(struct inode *inode,
        io_ctl_drop_pages(io_ctl);
        io_ctl_free(io_ctl);
 
-       unlock_extent(&BTRFS_I(inode)->io_tree, 0, i_size_read(inode) - 1,
-                     &cached_state);
+       btrfs_unlock_extent(&BTRFS_I(inode)->io_tree, 0, i_size_read(inode) - 1,
+                           &cached_state);
 
        /*
         * at this point the pages are under IO and we're happy,
index b2b454e447cacf3940b96af08834ee76e723a614..6b173fa2c325e699e8360f25fb08377e1b1acae7 100644 (file)
@@ -686,12 +686,12 @@ static noinline int cow_file_range_inline(struct btrfs_inode *inode,
        if (!can_cow_file_range_inline(inode, offset, size, compressed_size))
                return 1;
 
-       lock_extent(&inode->io_tree, offset, end, &cached);
+       btrfs_lock_extent(&inode->io_tree, offset, end, &cached);
        ret = __cow_file_range_inline(inode, size, compressed_size,
                                      compress_type, compressed_folio,
                                      update_i_size);
        if (ret > 0) {
-               unlock_extent(&inode->io_tree, offset, end, &cached);
+               btrfs_unlock_extent(&inode->io_tree, offset, end, &cached);
                return ret;
        }
 
@@ -1138,7 +1138,7 @@ static void submit_one_async_extent(struct async_chunk *async_chunk,
                goto done;
        }
 
-       lock_extent(io_tree, start, end, &cached);
+       btrfs_lock_extent(io_tree, start, end, &cached);
 
        /* Here we're doing allocation and writeback of the compressed pages */
        file_extent.disk_bytenr = ins.objectid;
@@ -1389,14 +1389,14 @@ static noinline int cow_file_range(struct btrfs_inode *inode,
                 * Locked range will be released either during error clean up or
                 * after the whole range is finished.
                 */
-               lock_extent(&inode->io_tree, start, start + cur_alloc_size - 1,
-                           &cached);
+               btrfs_lock_extent(&inode->io_tree, start, start + cur_alloc_size - 1,
+                                 &cached);
 
                em = btrfs_create_io_em(inode, start, &file_extent,
                                        BTRFS_ORDERED_REGULAR);
                if (IS_ERR(em)) {
-                       unlock_extent(&inode->io_tree, start,
-                                     start + cur_alloc_size - 1, &cached);
+                       btrfs_unlock_extent(&inode->io_tree, start,
+                                           start + cur_alloc_size - 1, &cached);
                        ret = PTR_ERR(em);
                        goto out_reserve;
                }
@@ -1405,8 +1405,8 @@ static noinline int cow_file_range(struct btrfs_inode *inode,
                ordered = btrfs_alloc_ordered_extent(inode, start, &file_extent,
                                                     1 << BTRFS_ORDERED_REGULAR);
                if (IS_ERR(ordered)) {
-                       unlock_extent(&inode->io_tree, start,
-                                     start + cur_alloc_size - 1, &cached);
+                       btrfs_unlock_extent(&inode->io_tree, start,
+                                           start + cur_alloc_size - 1, &cached);
                        ret = PTR_ERR(ordered);
                        goto out_drop_extent_cache;
                }
@@ -1741,7 +1741,7 @@ static int fallback_to_cow(struct btrfs_inode *inode,
         * group that contains that extent to RO mode and therefore force COW
         * when starting writeback.
         */
-       lock_extent(io_tree, start, end, &cached_state);
+       btrfs_lock_extent(io_tree, start, end, &cached_state);
        count = count_range_bits(io_tree, &range_start, end, range_bytes,
                                 EXTENT_NORESERVE, 0, NULL);
        if (count > 0 || is_space_ino || is_reloc_ino) {
@@ -1759,7 +1759,7 @@ static int fallback_to_cow(struct btrfs_inode *inode,
                if (count > 0)
                        clear_extent_bits(io_tree, start, end, EXTENT_NORESERVE);
        }
-       unlock_extent(io_tree, start, end, &cached_state);
+       btrfs_unlock_extent(io_tree, start, end, &cached_state);
 
        /*
         * Don't try to create inline extents, as a mix of inline extent that
@@ -1967,7 +1967,7 @@ static int nocow_one_range(struct btrfs_inode *inode, struct folio *locked_folio
        u64 end = file_pos + len - 1;
        int ret = 0;
 
-       lock_extent(&inode->io_tree, file_pos, end, cached);
+       btrfs_lock_extent(&inode->io_tree, file_pos, end, cached);
 
        if (is_prealloc) {
                struct extent_map *em;
@@ -1975,7 +1975,7 @@ static int nocow_one_range(struct btrfs_inode *inode, struct folio *locked_folio
                em = btrfs_create_io_em(inode, file_pos, &nocow_args->file_extent,
                                        BTRFS_ORDERED_PREALLOC);
                if (IS_ERR(em)) {
-                       unlock_extent(&inode->io_tree, file_pos, end, cached);
+                       btrfs_unlock_extent(&inode->io_tree, file_pos, end, cached);
                        return PTR_ERR(em);
                }
                free_extent_map(em);
@@ -1988,7 +1988,7 @@ static int nocow_one_range(struct btrfs_inode *inode, struct folio *locked_folio
        if (IS_ERR(ordered)) {
                if (is_prealloc)
                        btrfs_drop_extent_map_range(inode, file_pos, end, false);
-               unlock_extent(&inode->io_tree, file_pos, end, cached);
+               btrfs_unlock_extent(&inode->io_tree, file_pos, end, cached);
                return PTR_ERR(ordered);
        }
 
@@ -2287,7 +2287,7 @@ error:
        if (cur_offset < end) {
                struct extent_state *cached = NULL;
 
-               lock_extent(&inode->io_tree, cur_offset, end, &cached);
+               btrfs_lock_extent(&inode->io_tree, cur_offset, end, &cached);
                extent_clear_unlock_delalloc(inode, cur_offset, end,
                                             locked_folio, &cached,
                                             EXTENT_LOCKED | EXTENT_DELALLOC |
@@ -2793,7 +2793,7 @@ again:
        if (ret)
                goto out_page;
 
-       lock_extent(&inode->io_tree, page_start, page_end, &cached_state);
+       btrfs_lock_extent(&inode->io_tree, page_start, page_end, &cached_state);
 
        /* already ordered? We're done */
        if (folio_test_ordered(folio))
@@ -2801,8 +2801,8 @@ again:
 
        ordered = btrfs_lookup_ordered_range(inode, page_start, PAGE_SIZE);
        if (ordered) {
-               unlock_extent(&inode->io_tree, page_start, page_end,
-                             &cached_state);
+               btrfs_unlock_extent(&inode->io_tree, page_start, page_end,
+                                   &cached_state);
                folio_unlock(folio);
                btrfs_start_ordered_extent(ordered);
                btrfs_put_ordered_extent(ordered);
@@ -2828,7 +2828,7 @@ out_reserved:
        if (free_delalloc_space)
                btrfs_delalloc_release_space(inode, data_reserved, page_start,
                                             PAGE_SIZE, true);
-       unlock_extent(&inode->io_tree, page_start, page_end, &cached_state);
+       btrfs_unlock_extent(&inode->io_tree, page_start, page_end, &cached_state);
 out_page:
        if (ret) {
                /*
@@ -4860,11 +4860,11 @@ again:
 
        folio_wait_writeback(folio);
 
-       lock_extent(io_tree, block_start, block_end, &cached_state);
+       btrfs_lock_extent(io_tree, block_start, block_end, &cached_state);
 
        ordered = btrfs_lookup_ordered_extent(inode, block_start);
        if (ordered) {
-               unlock_extent(io_tree, block_start, block_end, &cached_state);
+               btrfs_unlock_extent(io_tree, block_start, block_end, &cached_state);
                folio_unlock(folio);
                folio_put(folio);
                btrfs_start_ordered_extent(ordered);
@@ -4879,7 +4879,7 @@ again:
        ret = btrfs_set_extent_delalloc(inode, block_start, block_end, 0,
                                        &cached_state);
        if (ret) {
-               unlock_extent(io_tree, block_start, block_end, &cached_state);
+               btrfs_unlock_extent(io_tree, block_start, block_end, &cached_state);
                goto out_unlock;
        }
 
@@ -4898,7 +4898,7 @@ again:
                                  block_end + 1 - block_start);
        btrfs_folio_set_dirty(fs_info, folio, block_start,
                              block_end + 1 - block_start);
-       unlock_extent(io_tree, block_start, block_end, &cached_state);
+       btrfs_unlock_extent(io_tree, block_start, block_end, &cached_state);
 
        if (only_release_metadata)
                set_extent_bit(&inode->io_tree, block_start, block_end,
@@ -5060,7 +5060,7 @@ next:
                        break;
        }
        free_extent_map(em);
-       unlock_extent(io_tree, hole_start, block_end - 1, &cached_state);
+       btrfs_unlock_extent(io_tree, hole_start, block_end - 1, &cached_state);
        return ret;
 }
 
@@ -5244,7 +5244,7 @@ static void evict_inode_truncate_pages(struct inode *inode)
                state_flags = state->state;
                spin_unlock(&io_tree->lock);
 
-               lock_extent(io_tree, start, end, &cached_state);
+               btrfs_lock_extent(io_tree, start, end, &cached_state);
 
                /*
                 * If still has DELALLOC flag, the extent didn't reach disk,
@@ -7360,7 +7360,7 @@ static void btrfs_invalidate_folio(struct folio *folio, size_t offset,
        }
 
        if (!inode_evicting)
-               lock_extent(tree, page_start, page_end, &cached_state);
+               btrfs_lock_extent(tree, page_start, page_end, &cached_state);
 
        cur = page_start;
        while (cur < page_end) {
@@ -7568,7 +7568,7 @@ static int btrfs_truncate(struct btrfs_inode *inode, bool skip_writeback)
                const u64 lock_start = ALIGN_DOWN(new_size, fs_info->sectorsize);
 
                control.new_size = new_size;
-               lock_extent(&inode->io_tree, lock_start, (u64)-1, &cached_state);
+               btrfs_lock_extent(&inode->io_tree, lock_start, (u64)-1, &cached_state);
                /*
                 * We want to drop from the next block forward in case this new
                 * size is not block aligned since we will be keeping the last
@@ -7583,7 +7583,7 @@ static int btrfs_truncate(struct btrfs_inode *inode, bool skip_writeback)
                inode_sub_bytes(&inode->vfs_inode, control.sub_bytes);
                btrfs_inode_safe_disk_i_size_write(inode, control.last_size);
 
-               unlock_extent(&inode->io_tree, lock_start, (u64)-1, &cached_state);
+               btrfs_unlock_extent(&inode->io_tree, lock_start, (u64)-1, &cached_state);
 
                trans->block_rsv = &fs_info->trans_block_rsv;
                if (ret != -ENOSPC && ret != -EAGAIN)
@@ -9138,7 +9138,7 @@ static ssize_t btrfs_encoded_read_inline(
 
        read_extent_buffer(leaf, tmp, ptr, count);
        btrfs_release_path(path);
-       unlock_extent(io_tree, start, lockend, cached_state);
+       btrfs_unlock_extent(io_tree, start, lockend, cached_state);
        btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED);
        *unlocked = true;
 
@@ -9286,7 +9286,7 @@ ssize_t btrfs_encoded_read_regular(struct kiocb *iocb, struct iov_iter *iter,
        if (ret)
                goto out;
 
-       unlock_extent(io_tree, start, lockend, cached_state);
+       btrfs_unlock_extent(io_tree, start, lockend, cached_state);
        btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED);
        *unlocked = true;
 
@@ -9363,7 +9363,7 @@ ssize_t btrfs_encoded_read(struct kiocb *iocb, struct iov_iter *iter,
                        goto out_unlock_inode;
                }
 
-               if (!try_lock_extent(io_tree, start, lockend, cached_state)) {
+               if (!btrfs_try_lock_extent(io_tree, start, lockend, cached_state)) {
                        ret = -EAGAIN;
                        goto out_unlock_inode;
                }
@@ -9372,7 +9372,7 @@ ssize_t btrfs_encoded_read(struct kiocb *iocb, struct iov_iter *iter,
                                                     lockend - start + 1);
                if (ordered) {
                        btrfs_put_ordered_extent(ordered);
-                       unlock_extent(io_tree, start, lockend, cached_state);
+                       btrfs_unlock_extent(io_tree, start, lockend, cached_state);
                        ret = -EAGAIN;
                        goto out_unlock_inode;
                }
@@ -9385,13 +9385,13 @@ ssize_t btrfs_encoded_read(struct kiocb *iocb, struct iov_iter *iter,
                        if (ret)
                                goto out_unlock_inode;
 
-                       lock_extent(io_tree, start, lockend, cached_state);
+                       btrfs_lock_extent(io_tree, start, lockend, cached_state);
                        ordered = btrfs_lookup_ordered_range(inode, start,
                                                             lockend - start + 1);
                        if (!ordered)
                                break;
                        btrfs_put_ordered_extent(ordered);
-                       unlock_extent(io_tree, start, lockend, cached_state);
+                       btrfs_unlock_extent(io_tree, start, lockend, cached_state);
                        cond_resched();
                }
        }
@@ -9466,7 +9466,7 @@ ssize_t btrfs_encoded_read(struct kiocb *iocb, struct iov_iter *iter,
        em = NULL;
 
        if (*disk_bytenr == EXTENT_MAP_HOLE) {
-               unlock_extent(io_tree, start, lockend, cached_state);
+               btrfs_unlock_extent(io_tree, start, lockend, cached_state);
                btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED);
                unlocked = true;
                ret = iov_iter_zero(count, iter);
@@ -9482,7 +9482,7 @@ out_em:
 out_unlock_extent:
        /* Leave inode and extent locked if we need to do a read. */
        if (!unlocked && ret != -EIOCBQUEUED)
-               unlock_extent(io_tree, start, lockend, cached_state);
+               btrfs_unlock_extent(io_tree, start, lockend, cached_state);
 out_unlock_inode:
        if (!unlocked && ret != -EIOCBQUEUED)
                btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED);
@@ -9629,14 +9629,14 @@ ssize_t btrfs_do_encoded_write(struct kiocb *iocb, struct iov_iter *from,
                                                    end >> PAGE_SHIFT);
                if (ret)
                        goto out_folios;
-               lock_extent(io_tree, start, end, &cached_state);
+               btrfs_lock_extent(io_tree, start, end, &cached_state);
                ordered = btrfs_lookup_ordered_range(inode, start, num_bytes);
                if (!ordered &&
                    !filemap_range_has_page(inode->vfs_inode.i_mapping, start, end))
                        break;
                if (ordered)
                        btrfs_put_ordered_extent(ordered);
-               unlock_extent(io_tree, start, end, &cached_state);
+               btrfs_unlock_extent(io_tree, start, end, &cached_state);
                cond_resched();
        }
 
@@ -9701,7 +9701,7 @@ ssize_t btrfs_do_encoded_write(struct kiocb *iocb, struct iov_iter *from,
        if (start + encoded->len > inode->vfs_inode.i_size)
                i_size_write(&inode->vfs_inode, start + encoded->len);
 
-       unlock_extent(io_tree, start, end, &cached_state);
+       btrfs_unlock_extent(io_tree, start, end, &cached_state);
 
        btrfs_delalloc_release_extents(inode, num_bytes);
 
@@ -9726,7 +9726,7 @@ out_free_data_space:
        if (!extent_reserved)
                btrfs_free_reserved_data_space_noquota(fs_info, disk_num_bytes);
 out_unlock:
-       unlock_extent(io_tree, start, end, &cached_state);
+       btrfs_unlock_extent(io_tree, start, end, &cached_state);
 out_folios:
        for (i = 0; i < nr_folios; i++) {
                if (folios[i])
@@ -9991,7 +9991,7 @@ static int btrfs_swap_activate(struct swap_info_struct *sis, struct file *file,
 
        isize = ALIGN_DOWN(inode->i_size, fs_info->sectorsize);
 
-       lock_extent(io_tree, 0, isize - 1, &cached_state);
+       btrfs_lock_extent(io_tree, 0, isize - 1, &cached_state);
        while (prev_extent_end < isize) {
                struct btrfs_key key;
                struct extent_buffer *leaf;
@@ -10169,7 +10169,7 @@ out:
        if (!IS_ERR_OR_NULL(map))
                btrfs_free_chunk_map(map);
 
-       unlock_extent(io_tree, 0, isize - 1, &cached_state);
+       btrfs_unlock_extent(io_tree, 0, isize - 1, &cached_state);
 
        if (ret)
                btrfs_swap_deactivate(file);
index 63aeacc549457487674c014dda1085bfd161ef5b..3341344a9fd27acf955f3a88faca8c6ccc7edf6c 100644 (file)
@@ -4510,7 +4510,7 @@ static int btrfs_ioctl_encoded_read(struct file *file, void __user *argp,
                                                 args.compression, &unlocked);
 
                if (!unlocked) {
-                       unlock_extent(io_tree, start, lockend, &cached_state);
+                       btrfs_unlock_extent(io_tree, start, lockend, &cached_state);
                        btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED);
                }
        }
@@ -4699,7 +4699,7 @@ static void btrfs_uring_read_finished(struct io_uring_cmd *cmd, unsigned int iss
        ret = priv->count;
 
 out:
-       unlock_extent(io_tree, priv->start, priv->lockend, &priv->cached_state);
+       btrfs_unlock_extent(io_tree, priv->start, priv->lockend, &priv->cached_state);
        btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED);
 
        io_uring_cmd_done(cmd, ret, 0, issue_flags);
@@ -4788,7 +4788,7 @@ static int btrfs_uring_read_extent(struct kiocb *iocb, struct iov_iter *iter,
        return -EIOCBQUEUED;
 
 out_fail:
-       unlock_extent(io_tree, start, lockend, &cached_state);
+       btrfs_unlock_extent(io_tree, start, lockend, &cached_state);
        btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED);
        kfree(priv);
        return ret;
@@ -4913,7 +4913,7 @@ static int btrfs_uring_encoded_read(struct io_uring_cmd *cmd, unsigned int issue
                         (const char *)&data->args + copy_end_kernel,
                         sizeof(data->args) - copy_end_kernel)) {
                if (ret == -EIOCBQUEUED) {
-                       unlock_extent(io_tree, start, lockend, &cached_state);
+                       btrfs_unlock_extent(io_tree, start, lockend, &cached_state);
                        btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED);
                }
                ret = -EFAULT;
index 03c945711003c0ac4c896452d6d1973bedaa7098..b5b544712e93a36cd3f39bcdfad6a7aab5bce588 100644 (file)
@@ -1173,7 +1173,7 @@ void btrfs_lock_and_flush_ordered_range(struct btrfs_inode *inode, u64 start,
                cachedp = cached_state;
 
        while (1) {
-               lock_extent(&inode->io_tree, start, end, cachedp);
+               btrfs_lock_extent(&inode->io_tree, start, end, cachedp);
                ordered = btrfs_lookup_ordered_range(inode, start,
                                                     end - start + 1);
                if (!ordered) {
@@ -1186,7 +1186,7 @@ void btrfs_lock_and_flush_ordered_range(struct btrfs_inode *inode, u64 start,
                                refcount_dec(&cache->refs);
                        break;
                }
-               unlock_extent(&inode->io_tree, start, end, cachedp);
+               btrfs_unlock_extent(&inode->io_tree, start, end, cachedp);
                btrfs_start_ordered_extent(ordered);
                btrfs_put_ordered_extent(ordered);
        }
@@ -1204,7 +1204,7 @@ bool btrfs_try_lock_ordered_range(struct btrfs_inode *inode, u64 start, u64 end,
 {
        struct btrfs_ordered_extent *ordered;
 
-       if (!try_lock_extent(&inode->io_tree, start, end, cached_state))
+       if (!btrfs_try_lock_extent(&inode->io_tree, start, end, cached_state))
                return false;
 
        ordered = btrfs_lookup_ordered_range(inode, start, end - start + 1);
@@ -1212,7 +1212,7 @@ bool btrfs_try_lock_ordered_range(struct btrfs_inode *inode, u64 start, u64 end,
                return true;
 
        btrfs_put_ordered_extent(ordered);
-       unlock_extent(&inode->io_tree, start, end, cached_state);
+       btrfs_unlock_extent(&inode->io_tree, start, end, cached_state);
 
        return false;
 }
index 09e40a2fdcf00455b7871083133e6b928d60ba5a..e6c0a10c4c67bc081d7f29b1b0ff9343768cd3d5 100644 (file)
@@ -645,10 +645,10 @@ static int btrfs_extent_same_range(struct btrfs_inode *src, u64 loff, u64 len,
         * because we have already locked the inode's i_mmap_lock in exclusive
         * mode.
         */
-       lock_extent(&dst->io_tree, dst_loff, end, &cached_state);
+       btrfs_lock_extent(&dst->io_tree, dst_loff, end, &cached_state);
        ret = btrfs_clone(&src->vfs_inode, &dst->vfs_inode, loff, len,
                          ALIGN(len, bs), dst_loff, 1);
-       unlock_extent(&dst->io_tree, dst_loff, end, &cached_state);
+       btrfs_unlock_extent(&dst->io_tree, dst_loff, end, &cached_state);
 
        btrfs_btree_balance_dirty(fs_info);
 
@@ -748,9 +748,9 @@ static noinline int btrfs_clone_files(struct file *file, struct file *file_src,
         * mode.
         */
        end = destoff + len - 1;
-       lock_extent(&BTRFS_I(inode)->io_tree, destoff, end, &cached_state);
+       btrfs_lock_extent(&BTRFS_I(inode)->io_tree, destoff, end, &cached_state);
        ret = btrfs_clone(src, inode, off, olen, len, destoff, 0);
-       unlock_extent(&BTRFS_I(inode)->io_tree, destoff, end, &cached_state);
+       btrfs_unlock_extent(&BTRFS_I(inode)->io_tree, destoff, end, &cached_state);
 
        /*
         * We may have copied an inline extent into a page of the destination
index b7f7563d3c141a5de6c437767e668a9b231ea76e..4ac2e63cbe55706c212fbb7e1f5b2ef5105a34e7 100644 (file)
@@ -910,16 +910,16 @@ int replace_file_extents(struct btrfs_trans_handle *trans,
                                /* Take mmap lock to serialize with reflinks. */
                                if (!down_read_trylock(&inode->i_mmap_lock))
                                        continue;
-                               ret = try_lock_extent(&inode->io_tree, key.offset,
-                                                     end, &cached_state);
+                               ret = btrfs_try_lock_extent(&inode->io_tree, key.offset,
+                                                           end, &cached_state);
                                if (!ret) {
                                        up_read(&inode->i_mmap_lock);
                                        continue;
                                }
 
                                btrfs_drop_extent_map_range(inode, key.offset, end, true);
-                               unlock_extent(&inode->io_tree, key.offset, end,
-                                             &cached_state);
+                               btrfs_unlock_extent(&inode->io_tree, key.offset, end,
+                                                   &cached_state);
                                up_read(&inode->i_mmap_lock);
                        }
                }
@@ -1378,9 +1378,9 @@ static int invalidate_extent_cache(struct btrfs_root *root,
                }
 
                /* the lock_extent waits for read_folio to complete */
-               lock_extent(&inode->io_tree, start, end, &cached_state);
+               btrfs_lock_extent(&inode->io_tree, start, end, &cached_state);
                btrfs_drop_extent_map_range(inode, start, end, true);
-               unlock_extent(&inode->io_tree, start, end, &cached_state);
+               btrfs_unlock_extent(&inode->io_tree, start, end, &cached_state);
        }
        return 0;
 }
@@ -2735,13 +2735,13 @@ static noinline_for_stack int prealloc_file_extent_cluster(struct reloc_control
                else
                        end = cluster->end - offset;
 
-               lock_extent(&inode->io_tree, start, end, &cached_state);
+               btrfs_lock_extent(&inode->io_tree, start, end, &cached_state);
                num_bytes = end + 1 - start;
                ret = btrfs_prealloc_file_range(&inode->vfs_inode, 0, start,
                                                num_bytes, num_bytes,
                                                end + 1, &alloc_hint);
                cur_offset = end + 1;
-               unlock_extent(&inode->io_tree, start, end, &cached_state);
+               btrfs_unlock_extent(&inode->io_tree, start, end, &cached_state);
                if (ret)
                        break;
        }
@@ -2774,9 +2774,9 @@ static noinline_for_stack int setup_relocation_extent_mapping(struct reloc_contr
        em->ram_bytes = em->len;
        em->flags |= EXTENT_FLAG_PINNED;
 
-       lock_extent(&inode->io_tree, start, end, &cached_state);
+       btrfs_lock_extent(&inode->io_tree, start, end, &cached_state);
        ret = btrfs_replace_extent_map_range(inode, em, false);
-       unlock_extent(&inode->io_tree, start, end, &cached_state);
+       btrfs_unlock_extent(&inode->io_tree, start, end, &cached_state);
        free_extent_map(em);
 
        return ret;
@@ -2899,8 +2899,8 @@ again:
                        goto release_folio;
 
                /* Mark the range delalloc and dirty for later writeback */
-               lock_extent(&BTRFS_I(inode)->io_tree, clamped_start, clamped_end,
-                           &cached_state);
+               btrfs_lock_extent(&BTRFS_I(inode)->io_tree, clamped_start,
+                                 clamped_end, &cached_state);
                ret = btrfs_set_extent_delalloc(BTRFS_I(inode), clamped_start,
                                                clamped_end, 0, &cached_state);
                if (ret) {
@@ -2933,8 +2933,8 @@ again:
                                       boundary_start, boundary_end,
                                       EXTENT_BOUNDARY, NULL);
                }
-               unlock_extent(&BTRFS_I(inode)->io_tree, clamped_start, clamped_end,
-                             &cached_state);
+               btrfs_unlock_extent(&BTRFS_I(inode)->io_tree, clamped_start, clamped_end,
+                                   &cached_state);
                btrfs_delalloc_release_extents(BTRFS_I(inode), clamped_len);
                cur += clamped_len;
 
index 8773758a8cc7d2196822907f08309e7383d9b636..98c6e237d648026e99df28d1d8145f578d68e534 100644 (file)
@@ -190,7 +190,7 @@ static int test_find_delalloc(u32 sectorsize, u32 nodesize)
                        sectorsize - 1, start, end);
                goto out_bits;
        }
-       unlock_extent(tmp, start, end, NULL);
+       btrfs_unlock_extent(tmp, start, end, NULL);
        unlock_page(locked_page);
        put_page(locked_page);
 
@@ -226,7 +226,7 @@ static int test_find_delalloc(u32 sectorsize, u32 nodesize)
                test_err("there were unlocked pages in the range");
                goto out_bits;
        }
-       unlock_extent(tmp, start, end, NULL);
+       btrfs_unlock_extent(tmp, start, end, NULL);
        /* locked_page was unlocked above */
        put_page(locked_page);
 
@@ -281,7 +281,7 @@ static int test_find_delalloc(u32 sectorsize, u32 nodesize)
                test_err("pages in range were not all locked");
                goto out_bits;
        }
-       unlock_extent(tmp, start, end, NULL);
+       btrfs_unlock_extent(tmp, start, end, NULL);
 
        /*
         * Now to test where we run into a page that is no longer dirty in the
index f5af11565b87603bc078ab891318959a6dbccdea..31087b69d68f391ae58029dbae2b60f211e153e2 100644 (file)
@@ -4300,8 +4300,8 @@ static int log_csums(struct btrfs_trans_handle *trans,
         * file which happens to refer to the same extent as well. Such races
         * can leave checksum items in the log with overlapping ranges.
         */
-       ret = lock_extent(&log_root->log_csum_range, sums->logical, lock_end,
-                         &cached_state);
+       ret = btrfs_lock_extent(&log_root->log_csum_range, sums->logical, lock_end,
+                               &cached_state);
        if (ret)
                return ret;
        /*
@@ -4317,8 +4317,8 @@ static int log_csums(struct btrfs_trans_handle *trans,
        if (!ret)
                ret = btrfs_csum_file_blocks(trans, log_root, sums);
 
-       unlock_extent(&log_root->log_csum_range, sums->logical, lock_end,
-                     &cached_state);
+       btrfs_unlock_extent(&log_root->log_csum_range, sums->logical, lock_end,
+                           &cached_state);
 
        return ret;
 }