btrfs: use refcount_t type for the extent buffer reference counter
authorFilipe Manana <fdmanana@suse.com>
Mon, 2 Jun 2025 12:56:48 +0000 (13:56 +0100)
committerDavid Sterba <dsterba@suse.com>
Mon, 21 Jul 2025 21:53:30 +0000 (23:53 +0200)
Instead of using a bare atomic, use the refcount_t type, which despite
being a structure that contains only an atomic, has an API that checks
for underflows and other hazards. This doesn't change the size of the
extent_buffer structure.

This removes the need to do things like this:

    WARN_ON(atomic_read(&eb->refs) == 0);
    if (atomic_dec_and_test(&eb->refs)) {
        (...)
    }

And do just:

    if (refcount_dec_and_test(&eb->refs)) {
        (...)
    }

Since refcount_dec_and_test() already triggers a warning when we decrement
a ref count that has a value of 0 (or below zero).

Reviewed-by: Boris Burkov <boris@bur.io>
Signed-off-by: Filipe Manana <fdmanana@suse.com>
Reviewed-by: David Sterba <dsterba@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
fs/btrfs/ctree.c
fs/btrfs/extent-tree.c
fs/btrfs/extent_io.c
fs/btrfs/extent_io.h
fs/btrfs/fiemap.c
fs/btrfs/print-tree.c
fs/btrfs/qgroup.c
fs/btrfs/relocation.c
fs/btrfs/tree-log.c
fs/btrfs/zoned.c
include/trace/events/btrfs.h

index 94c4ed1b99d0f3a3aeac9bfa60bea0877f63fe28..1b36ee2d804484b5c6432579a490097bd74e427e 100644 (file)
@@ -198,7 +198,7 @@ struct extent_buffer *btrfs_root_node(struct btrfs_root *root)
                 * the inc_not_zero dance and if it doesn't work then
                 * synchronize_rcu and try again.
                 */
-               if (atomic_inc_not_zero(&eb->refs)) {
+               if (refcount_inc_not_zero(&eb->refs)) {
                        rcu_read_unlock();
                        break;
                }
@@ -560,7 +560,7 @@ int btrfs_force_cow_block(struct btrfs_trans_handle *trans,
                        btrfs_abort_transaction(trans, ret);
                        goto error_unlock_cow;
                }
-               atomic_inc(&cow->refs);
+               refcount_inc(&cow->refs);
                rcu_assign_pointer(root->node, cow);
 
                ret = btrfs_free_tree_block(trans, btrfs_root_id(root), buf,
@@ -1092,7 +1092,7 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
        /* update the path */
        if (left) {
                if (btrfs_header_nritems(left) > orig_slot) {
-                       atomic_inc(&left->refs);
+                       refcount_inc(&left->refs);
                        /* left was locked after cow */
                        path->nodes[level] = left;
                        path->slots[level + 1] -= 1;
@@ -1696,7 +1696,7 @@ static struct extent_buffer *btrfs_search_slot_get_root(struct btrfs_root *root,
 
        if (p->search_commit_root) {
                b = root->commit_root;
-               atomic_inc(&b->refs);
+               refcount_inc(&b->refs);
                level = btrfs_header_level(b);
                /*
                 * Ensure that all callers have set skip_locking when
@@ -2894,7 +2894,7 @@ static noinline int insert_new_root(struct btrfs_trans_handle *trans,
        free_extent_buffer(old);
 
        add_root_to_dirty_list(root);
-       atomic_inc(&c->refs);
+       refcount_inc(&c->refs);
        path->nodes[level] = c;
        path->locks[level] = BTRFS_WRITE_LOCK;
        path->slots[level] = 0;
@@ -4451,7 +4451,7 @@ static noinline int btrfs_del_leaf(struct btrfs_trans_handle *trans,
 
        root_sub_used_bytes(root);
 
-       atomic_inc(&leaf->refs);
+       refcount_inc(&leaf->refs);
        ret = btrfs_free_tree_block(trans, btrfs_root_id(root), leaf, 0, 1);
        free_extent_buffer_stale(leaf);
        if (ret < 0)
@@ -4536,7 +4536,7 @@ int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root,
                         * for possible call to btrfs_del_ptr below
                         */
                        slot = path->slots[1];
-                       atomic_inc(&leaf->refs);
+                       refcount_inc(&leaf->refs);
                        /*
                         * We want to be able to at least push one item to the
                         * left neighbour leaf, and that's the first item.
index 2c122f9f82800c0b4fee5d0b61af1558a483e764..46d4963a82413575ce933fdd498b2475e91cb1a2 100644 (file)
@@ -6348,7 +6348,7 @@ int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
 
        btrfs_assert_tree_write_locked(parent);
        parent_level = btrfs_header_level(parent);
-       atomic_inc(&parent->refs);
+       refcount_inc(&parent->refs);
        path->nodes[parent_level] = parent;
        path->slots[parent_level] = btrfs_header_nritems(parent);
 
index 5e8dd9a99e58c1ea33d9107a72761446445c3eec..ac639d50706ab74df82af70c6e2558ff6e14f637 100644 (file)
@@ -77,7 +77,7 @@ void btrfs_extent_buffer_leak_debug_check(struct btrfs_fs_info *fs_info)
                                      struct extent_buffer, leak_list);
                pr_err(
        "BTRFS: buffer leak start %llu len %u refs %d bflags %lu owner %llu\n",
-                      eb->start, eb->len, atomic_read(&eb->refs), eb->bflags,
+                      eb->start, eb->len, refcount_read(&eb->refs), eb->bflags,
                       btrfs_header_owner(eb));
                list_del(&eb->leak_list);
                WARN_ON_ONCE(1);
@@ -1961,7 +1961,7 @@ retry:
        if (!eb)
                return NULL;
 
-       if (!atomic_inc_not_zero(&eb->refs)) {
+       if (!refcount_inc_not_zero(&eb->refs)) {
                xas_reset(xas);
                goto retry;
        }
@@ -2012,7 +2012,7 @@ static struct extent_buffer *find_extent_buffer_nolock(
 
        rcu_read_lock();
        eb = xa_load(&fs_info->buffer_tree, index);
-       if (eb && !atomic_inc_not_zero(&eb->refs))
+       if (eb && !refcount_inc_not_zero(&eb->refs))
                eb = NULL;
        rcu_read_unlock();
        return eb;
@@ -2842,7 +2842,7 @@ static struct extent_buffer *__alloc_extent_buffer(struct btrfs_fs_info *fs_info
        btrfs_leak_debug_add_eb(eb);
 
        spin_lock_init(&eb->refs_lock);
-       atomic_set(&eb->refs, 1);
+       refcount_set(&eb->refs, 1);
 
        ASSERT(eb->len <= BTRFS_MAX_METADATA_BLOCKSIZE);
 
@@ -2975,13 +2975,13 @@ static void check_buffer_tree_ref(struct extent_buffer *eb)
         * once io is initiated, TREE_REF can no longer be cleared, so that is
         * the moment at which any such race is best fixed.
         */
-       refs = atomic_read(&eb->refs);
+       refs = refcount_read(&eb->refs);
        if (refs >= 2 && test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
                return;
 
        spin_lock(&eb->refs_lock);
        if (!test_and_set_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
-               atomic_inc(&eb->refs);
+               refcount_inc(&eb->refs);
        spin_unlock(&eb->refs_lock);
 }
 
@@ -3047,7 +3047,7 @@ again:
                return ERR_PTR(ret);
        }
        if (exists) {
-               if (!atomic_inc_not_zero(&exists->refs)) {
+               if (!refcount_inc_not_zero(&exists->refs)) {
                        /* The extent buffer is being freed, retry. */
                        xa_unlock_irq(&fs_info->buffer_tree);
                        goto again;
@@ -3092,7 +3092,7 @@ static struct extent_buffer *grab_extent_buffer(struct btrfs_fs_info *fs_info,
         * just overwrite folio private.
         */
        exists = folio_get_private(folio);
-       if (atomic_inc_not_zero(&exists->refs))
+       if (refcount_inc_not_zero(&exists->refs))
                return exists;
 
        WARN_ON(folio_test_dirty(folio));
@@ -3362,7 +3362,7 @@ again:
                goto out;
        }
        if (existing_eb) {
-               if (!atomic_inc_not_zero(&existing_eb->refs)) {
+               if (!refcount_inc_not_zero(&existing_eb->refs)) {
                        xa_unlock_irq(&fs_info->buffer_tree);
                        goto again;
                }
@@ -3391,7 +3391,7 @@ again:
        return eb;
 
 out:
-       WARN_ON(!atomic_dec_and_test(&eb->refs));
+       WARN_ON(!refcount_dec_and_test(&eb->refs));
 
        /*
         * Any attached folios need to be detached before we unlock them.  This
@@ -3437,8 +3437,7 @@ static int release_extent_buffer(struct extent_buffer *eb)
 {
        lockdep_assert_held(&eb->refs_lock);
 
-       WARN_ON(atomic_read(&eb->refs) == 0);
-       if (atomic_dec_and_test(&eb->refs)) {
+       if (refcount_dec_and_test(&eb->refs)) {
                struct btrfs_fs_info *fs_info = eb->fs_info;
 
                spin_unlock(&eb->refs_lock);
@@ -3484,7 +3483,7 @@ void free_extent_buffer(struct extent_buffer *eb)
        if (!eb)
                return;
 
-       refs = atomic_read(&eb->refs);
+       refs = refcount_read(&eb->refs);
        while (1) {
                if (test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags)) {
                        if (refs == 1)
@@ -3494,16 +3493,16 @@ void free_extent_buffer(struct extent_buffer *eb)
                }
 
                /* Optimization to avoid locking eb->refs_lock. */
-               if (atomic_try_cmpxchg(&eb->refs, &refs, refs - 1))
+               if (atomic_try_cmpxchg(&eb->refs.refs, &refs, refs - 1))
                        return;
        }
 
        spin_lock(&eb->refs_lock);
-       if (atomic_read(&eb->refs) == 2 &&
+       if (refcount_read(&eb->refs) == 2 &&
            test_bit(EXTENT_BUFFER_STALE, &eb->bflags) &&
            !extent_buffer_under_io(eb) &&
            test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
-               atomic_dec(&eb->refs);
+               refcount_dec(&eb->refs);
 
        /*
         * I know this is terrible, but it's temporary until we stop tracking
@@ -3520,9 +3519,9 @@ void free_extent_buffer_stale(struct extent_buffer *eb)
        spin_lock(&eb->refs_lock);
        set_bit(EXTENT_BUFFER_STALE, &eb->bflags);
 
-       if (atomic_read(&eb->refs) == 2 && !extent_buffer_under_io(eb) &&
+       if (refcount_read(&eb->refs) == 2 && !extent_buffer_under_io(eb) &&
            test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
-               atomic_dec(&eb->refs);
+               refcount_dec(&eb->refs);
        release_extent_buffer(eb);
 }
 
@@ -3580,7 +3579,7 @@ void btrfs_clear_buffer_dirty(struct btrfs_trans_handle *trans,
                        btree_clear_folio_dirty_tag(folio);
                folio_unlock(folio);
        }
-       WARN_ON(atomic_read(&eb->refs) == 0);
+       WARN_ON(refcount_read(&eb->refs) == 0);
 }
 
 void set_extent_buffer_dirty(struct extent_buffer *eb)
@@ -3591,7 +3590,7 @@ void set_extent_buffer_dirty(struct extent_buffer *eb)
 
        was_dirty = test_and_set_bit(EXTENT_BUFFER_DIRTY, &eb->bflags);
 
-       WARN_ON(atomic_read(&eb->refs) == 0);
+       WARN_ON(refcount_read(&eb->refs) == 0);
        WARN_ON(!test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags));
        WARN_ON(test_bit(EXTENT_BUFFER_ZONED_ZEROOUT, &eb->bflags));
 
@@ -3717,7 +3716,7 @@ int read_extent_buffer_pages_nowait(struct extent_buffer *eb, int mirror_num,
 
        eb->read_mirror = 0;
        check_buffer_tree_ref(eb);
-       atomic_inc(&eb->refs);
+       refcount_inc(&eb->refs);
 
        bbio = btrfs_bio_alloc(INLINE_EXTENT_BUFFER_PAGES,
                               REQ_OP_READ | REQ_META, eb->fs_info,
@@ -4312,7 +4311,7 @@ static int try_release_subpage_extent_buffer(struct folio *folio)
                 * won't disappear out from under us.
                 */
                spin_lock(&eb->refs_lock);
-               if (atomic_read(&eb->refs) != 1 || extent_buffer_under_io(eb)) {
+               if (refcount_read(&eb->refs) != 1 || extent_buffer_under_io(eb)) {
                        spin_unlock(&eb->refs_lock);
                        continue;
                }
@@ -4378,7 +4377,7 @@ int try_release_extent_buffer(struct folio *folio)
         * this page.
         */
        spin_lock(&eb->refs_lock);
-       if (atomic_read(&eb->refs) != 1 || extent_buffer_under_io(eb)) {
+       if (refcount_read(&eb->refs) != 1 || extent_buffer_under_io(eb)) {
                spin_unlock(&eb->refs_lock);
                spin_unlock(&folio->mapping->i_private_lock);
                return 0;
index e36e8d6a00bc50538f63ade5fe96a65990223483..65bb87f1dce61e859cdb8248f6bf28c71673501c 100644 (file)
@@ -98,7 +98,7 @@ struct extent_buffer {
        void *addr;
 
        spinlock_t refs_lock;
-       atomic_t refs;
+       refcount_t refs;
        int read_mirror;
        /* >= 0 if eb belongs to a log tree, -1 otherwise */
        s8 log_index;
index 43bf0979fd53943b5bf563a329cf7a0dd965e976..7935586a9dbd0ffebb3c8052d987a6330dfa54cd 100644 (file)
@@ -320,7 +320,7 @@ static int fiemap_next_leaf_item(struct btrfs_inode *inode, struct btrfs_path *p
         * the cost of allocating a new one.
         */
        ASSERT(test_bit(EXTENT_BUFFER_UNMAPPED, &clone->bflags));
-       atomic_inc(&clone->refs);
+       refcount_inc(&clone->refs);
 
        ret = btrfs_next_leaf(inode->root, path);
        if (ret != 0)
index fc821aa446f02f31a099635307959ecff22f72c5..21605b03f511889591ae88fb9d13724427678ba6 100644 (file)
@@ -223,7 +223,7 @@ static void print_eb_refs_lock(const struct extent_buffer *eb)
 {
 #ifdef CONFIG_BTRFS_DEBUG
        btrfs_info(eb->fs_info, "refs %u lock_owner %u current %u",
-                  atomic_read(&eb->refs), eb->lock_owner, current->pid);
+                  refcount_read(&eb->refs), eb->lock_owner, current->pid);
 #endif
 }
 
index 90685812ee56de11a1e7bf2e80e3a5f058a04da5..a1afc549c404bcf5e07414506cbf296cafe33548 100644 (file)
@@ -2338,7 +2338,7 @@ static int qgroup_trace_extent_swap(struct btrfs_trans_handle* trans,
                btrfs_item_key_to_cpu(dst_path->nodes[dst_level], &key, 0);
 
        /* For src_path */
-       atomic_inc(&src_eb->refs);
+       refcount_inc(&src_eb->refs);
        src_path->nodes[root_level] = src_eb;
        src_path->slots[root_level] = dst_path->slots[root_level];
        src_path->locks[root_level] = 0;
@@ -2571,7 +2571,7 @@ static int qgroup_trace_subtree_swap(struct btrfs_trans_handle *trans,
                goto out;
        }
        /* For dst_path */
-       atomic_inc(&dst_eb->refs);
+       refcount_inc(&dst_eb->refs);
        dst_path->nodes[level] = dst_eb;
        dst_path->slots[level] = 0;
        dst_path->locks[level] = 0;
@@ -2663,7 +2663,7 @@ int btrfs_qgroup_trace_subtree(struct btrfs_trans_handle *trans,
         * walk back up the tree (adjusting slot pointers as we go)
         * and restart the search process.
         */
-       atomic_inc(&root_eb->refs);     /* For path */
+       refcount_inc(&root_eb->refs);   /* For path */
        path->nodes[root_level] = root_eb;
        path->slots[root_level] = 0;
        path->locks[root_level] = 0; /* so release_path doesn't try to unlock */
index 0b73f58db33f033e1fa7b6d4910a9818498563a4..d7ec1d72821c266e65c79642d47905df423ed960 100644 (file)
@@ -1524,7 +1524,7 @@ static noinline_for_stack int merge_reloc_root(struct reloc_control *rc,
 
        if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) {
                level = btrfs_root_level(root_item);
-               atomic_inc(&reloc_root->node->refs);
+               refcount_inc(&reloc_root->node->refs);
                path->nodes[level] = reloc_root->node;
                path->slots[level] = 0;
        } else {
@@ -4347,7 +4347,7 @@ int btrfs_reloc_cow_block(struct btrfs_trans_handle *trans,
                }
 
                btrfs_backref_drop_node_buffer(node);
-               atomic_inc(&cow->refs);
+               refcount_inc(&cow->refs);
                node->eb = cow;
                node->new_bytenr = cow->start;
 
index cd37c385541c5877db667978dd2d75b6e247a9e4..a7590c8c9996e8001faf4b1a2c35aaa49abe5856 100644 (file)
@@ -2719,7 +2719,7 @@ static int walk_log_tree(struct btrfs_trans_handle *trans,
        level = btrfs_header_level(log->node);
        orig_level = level;
        path->nodes[level] = log->node;
-       atomic_inc(&log->node->refs);
+       refcount_inc(&log->node->refs);
        path->slots[level] = 0;
 
        while (1) {
@@ -3683,7 +3683,7 @@ static int clone_leaf(struct btrfs_path *path, struct btrfs_log_ctx *ctx)
         * Add extra ref to scratch eb so that it is not freed when callers
         * release the path, so we can reuse it later if needed.
         */
-       atomic_inc(&ctx->scratch_eb->refs);
+       refcount_inc(&ctx->scratch_eb->refs);
 
        return 0;
 }
index 3fa526a0e37bb82abda51df5633b9dc14f6fdaa3..805f2eca20e935640242cf0101025edb36015de4 100644 (file)
@@ -2485,7 +2485,7 @@ void btrfs_schedule_zone_finish_bg(struct btrfs_block_group *bg,
 
        /* For the work */
        btrfs_get_block_group(bg);
-       atomic_inc(&eb->refs);
+       refcount_inc(&eb->refs);
        bg->last_eb = eb;
        INIT_WORK(&bg->zone_finish_work, btrfs_zone_finish_endio_workfn);
        queue_work(system_unbound_wq, &bg->zone_finish_work);
index bebc252db8654f9f1fe783f83ceac7d03434866b..a32305044371d900709910f344f17c2b00d2fd40 100644 (file)
@@ -1095,7 +1095,7 @@ TRACE_EVENT(btrfs_cow_block,
        TP_fast_assign_btrfs(root->fs_info,
                __entry->root_objectid  = btrfs_root_id(root);
                __entry->buf_start      = buf->start;
-               __entry->refs           = atomic_read(&buf->refs);
+               __entry->refs           = refcount_read(&buf->refs);
                __entry->cow_start      = cow->start;
                __entry->buf_level      = btrfs_header_level(buf);
                __entry->cow_level      = btrfs_header_level(cow);