btrfs: harden block_group::bg_list against list_del() races
authorBoris Burkov <boris@bur.io>
Wed, 5 Mar 2025 23:16:57 +0000 (15:16 -0800)
committerDavid Sterba <dsterba@suse.com>
Tue, 18 Mar 2025 19:35:51 +0000 (20:35 +0100)
As far as I can tell, these calls of list_del_init() on bg_list cannot
run concurrently with btrfs_mark_bg_unused() or btrfs_mark_bg_to_reclaim(),
as they are in transaction error paths and situations where the block
group is readonly.

However, if there is any chance at all of racing with mark_bg_unused(),
or a different future user of bg_list, better to be safe than sorry.

Otherwise we risk the following interleaving (bg_list refcount in parens)

T1 (some random op)                       T2 (btrfs_mark_bg_unused)
                                        !list_empty(&bg->bg_list); (1)
list_del_init(&bg->bg_list); (1)
                                        list_move_tail (1)
btrfs_put_block_group (0)
                                        btrfs_delete_unused_bgs
                                             bg = list_first_entry
                                             list_del_init(&bg->bg_list);
                                             btrfs_put_block_group(bg); (-1)

Ultimately, this results in a broken ref count that hits zero one deref
early and the real final deref underflows the refcount, resulting in a WARNING.

Reviewed-by: Qu Wenruo <wqu@suse.com>
Reviewed-by: Filipe Manana <fdmanana@suse.com>
Signed-off-by: Boris Burkov <boris@bur.io>
Reviewed-by: David Sterba <dsterba@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
fs/btrfs/extent-tree.c
fs/btrfs/transaction.c

index 5de1a1293c9386c741d5d95c0a2eddea410e1c7d..957230abd8271c0d7b580dceb7b40f0709d981b7 100644 (file)
@@ -2868,7 +2868,15 @@ int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans)
                                                   block_group->length,
                                                   &trimmed);
 
+               /*
+                * Not strictly necessary to lock, as the block_group should be
+                * read-only from btrfs_delete_unused_bgs().
+                */
+               ASSERT(block_group->ro);
+               spin_lock(&fs_info->unused_bgs_lock);
                list_del_init(&block_group->bg_list);
+               spin_unlock(&fs_info->unused_bgs_lock);
+
                btrfs_unfreeze_block_group(block_group);
                btrfs_put_block_group(block_group);
 
index db8fe291d01059b105c7afac037f15eeef17f532..470dfc3a1a5cbebc51db27474cf4dcb3a9945b98 100644 (file)
@@ -160,7 +160,13 @@ void btrfs_put_transaction(struct btrfs_transaction *transaction)
                        cache = list_first_entry(&transaction->deleted_bgs,
                                                 struct btrfs_block_group,
                                                 bg_list);
+                       /*
+                        * Not strictly necessary to lock, as no other task will be using a
+                        * block_group on the deleted_bgs list during a transaction abort.
+                        */
+                       spin_lock(&transaction->fs_info->unused_bgs_lock);
                        list_del_init(&cache->bg_list);
+                       spin_unlock(&transaction->fs_info->unused_bgs_lock);
                        btrfs_unfreeze_block_group(cache);
                        btrfs_put_block_group(cache);
                }
@@ -2096,7 +2102,13 @@ static void btrfs_cleanup_pending_block_groups(struct btrfs_trans_handle *trans)
 
        list_for_each_entry_safe(block_group, tmp, &trans->new_bgs, bg_list) {
                btrfs_dec_delayed_refs_rsv_bg_inserts(fs_info);
+               /*
+               * Not strictly necessary to lock, as no other task will be using a
+               * block_group on the new_bgs list during a transaction abort.
+               */
+              spin_lock(&fs_info->unused_bgs_lock);
                list_del_init(&block_group->bg_list);
+              spin_unlock(&fs_info->unused_bgs_lock);
        }
 }