From 80a160e49414972b712f30b9b287d88197fe3077 Mon Sep 17 00:00:00 2001 From: Kent Overstreet Date: Thu, 22 May 2025 15:33:14 -0400 Subject: [PATCH] bcachefs: Plumb btree_trans for more locking asserts Signed-off-by: Kent Overstreet --- fs/bcachefs/btree_iter.c | 12 ++++++------ fs/bcachefs/btree_iter.h | 3 ++- fs/bcachefs/btree_key_cache.c | 2 +- fs/bcachefs/btree_locking.c | 12 ++++++------ fs/bcachefs/btree_locking.h | 11 ++++++----- 5 files changed, 21 insertions(+), 19 deletions(-) diff --git a/fs/bcachefs/btree_iter.c b/fs/bcachefs/btree_iter.c index cae0fa60434b..51ff452562af 100644 --- a/fs/bcachefs/btree_iter.c +++ b/fs/bcachefs/btree_iter.c @@ -228,7 +228,7 @@ static void __bch2_btree_path_verify(struct btree_trans *trans, __bch2_btree_path_verify_level(trans, path, i); } - bch2_btree_path_verify_locks(path); + bch2_btree_path_verify_locks(trans, path); } void __bch2_trans_verify_paths(struct btree_trans *trans) @@ -991,7 +991,7 @@ static __always_inline int btree_path_down(struct btree_trans *trans, path->level = level; bch2_btree_path_level_init(trans, path, b); - bch2_btree_path_verify_locks(path); + bch2_btree_path_verify_locks(trans, path); err: bch2_bkey_buf_exit(&tmp, c); return ret; @@ -1103,7 +1103,7 @@ static void btree_path_set_level_down(struct btree_trans *trans, if (btree_lock_want(path, l) == BTREE_NODE_UNLOCKED) btree_node_unlock(trans, path, l); - btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE); + btree_path_set_dirty(trans, path, BTREE_ITER_NEED_TRAVERSE); bch2_btree_path_verify(trans, path); } @@ -1301,7 +1301,7 @@ __bch2_btree_path_set_pos(struct btree_trans *trans, if (unlikely(path->cached)) { btree_node_unlock(trans, path, 0); path->l[0].b = ERR_PTR(-BCH_ERR_no_btree_node_up); - btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE); + btree_path_set_dirty(trans, path, BTREE_ITER_NEED_TRAVERSE); goto out; } @@ -1330,7 +1330,7 @@ __bch2_btree_path_set_pos(struct btree_trans *trans, } if (unlikely(level != path->level)) { - btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE); + btree_path_set_dirty(trans, path, BTREE_ITER_NEED_TRAVERSE); __bch2_btree_path_unlock(trans, path); } out: @@ -1984,7 +1984,7 @@ struct btree *bch2_btree_iter_next_node(struct btree_trans *trans, struct btree_ __bch2_btree_path_unlock(trans, path); path->l[path->level].b = ERR_PTR(-BCH_ERR_no_btree_node_relock); path->l[path->level + 1].b = ERR_PTR(-BCH_ERR_no_btree_node_relock); - btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE); + btree_path_set_dirty(trans, path, BTREE_ITER_NEED_TRAVERSE); trace_and_count(trans->c, trans_restart_relock_next_node, trans, _THIS_IP_, path); ret = btree_trans_restart(trans, BCH_ERR_transaction_restart_relock); goto err; diff --git a/fs/bcachefs/btree_iter.h b/fs/bcachefs/btree_iter.h index cafd35a5e7a3..7cb2c38b70c0 100644 --- a/fs/bcachefs/btree_iter.h +++ b/fs/bcachefs/btree_iter.h @@ -46,7 +46,8 @@ static inline bool __btree_path_put(struct btree_trans *trans, struct btree_path return --path->ref == 0; } -static inline void btree_path_set_dirty(struct btree_path *path, +static inline void btree_path_set_dirty(struct btree_trans *trans, + struct btree_path *path, enum btree_path_uptodate u) { path->uptodate = max_t(unsigned, path->uptodate, u); diff --git a/fs/bcachefs/btree_key_cache.c b/fs/bcachefs/btree_key_cache.c index 9948d0e4d442..9da950e7eb7d 100644 --- a/fs/bcachefs/btree_key_cache.c +++ b/fs/bcachefs/btree_key_cache.c @@ -656,7 +656,7 @@ void bch2_btree_key_cache_drop(struct btree_trans *trans, path2->should_be_locked = false; __bch2_btree_path_unlock(trans, path2); path2->l[0].b = ERR_PTR(-BCH_ERR_no_btree_node_drop); - btree_path_set_dirty(path2, BTREE_ITER_NEED_TRAVERSE); + btree_path_set_dirty(trans, path2, BTREE_ITER_NEED_TRAVERSE); } bch2_trans_verify_locks(trans); diff --git a/fs/bcachefs/btree_locking.c b/fs/bcachefs/btree_locking.c index 826930b4b164..2cdc9a04f3e8 100644 --- a/fs/bcachefs/btree_locking.c +++ b/fs/bcachefs/btree_locking.c @@ -494,7 +494,7 @@ err: } __bch2_btree_path_unlock(trans, path); - btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE); + btree_path_set_dirty(trans, path, BTREE_ITER_NEED_TRAVERSE); /* * When we fail to get a lock, we have to ensure that any child nodes @@ -594,7 +594,7 @@ int bch2_btree_path_relock_intent(struct btree_trans *trans, l++) { if (!bch2_btree_node_relock(trans, path, l)) { __bch2_btree_path_unlock(trans, path); - btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE); + btree_path_set_dirty(trans, path, BTREE_ITER_NEED_TRAVERSE); trace_and_count(trans->c, trans_restart_relock_path_intent, trans, _RET_IP_, path); return btree_trans_restart(trans, BCH_ERR_transaction_restart_relock_path_intent); } @@ -636,7 +636,7 @@ bool __bch2_btree_path_upgrade_norestart(struct btree_trans *trans, bool ret = !btree_path_get_locks(trans, path, true, NULL, 0) || !path->should_be_locked; - bch2_btree_path_verify_locks(path); + bch2_btree_path_verify_locks(trans, path); return ret; } @@ -739,7 +739,7 @@ void __bch2_btree_path_downgrade(struct btree_trans *trans, } } - bch2_btree_path_verify_locks(path); + bch2_btree_path_verify_locks(trans, path); trace_path_downgrade(trans, _RET_IP_, path, old_locks_want); } @@ -880,7 +880,7 @@ int __bch2_trans_mutex_lock(struct btree_trans *trans, /* Debug */ -void __bch2_btree_path_verify_locks(struct btree_path *path) +void __bch2_btree_path_verify_locks(struct btree_trans *trans, struct btree_path *path) { /* * A path may be uptodate and yet have nothing locked if and only if @@ -929,5 +929,5 @@ void __bch2_trans_verify_locks(struct btree_trans *trans) unsigned i; trans_for_each_path(trans, path, i) - __bch2_btree_path_verify_locks(path); + __bch2_btree_path_verify_locks(trans, path); } diff --git a/fs/bcachefs/btree_locking.h b/fs/bcachefs/btree_locking.h index 63d7e5fb77c8..9adca77e2580 100644 --- a/fs/bcachefs/btree_locking.h +++ b/fs/bcachefs/btree_locking.h @@ -160,7 +160,7 @@ static inline int btree_path_highest_level_locked(struct btree_path *path) static inline void __bch2_btree_path_unlock(struct btree_trans *trans, struct btree_path *path) { - btree_path_set_dirty(path, BTREE_ITER_NEED_RELOCK); + btree_path_set_dirty(trans, path, BTREE_ITER_NEED_RELOCK); while (path->nodes_locked) btree_node_unlock(trans, path, btree_path_lowest_level_locked(path)); @@ -433,7 +433,7 @@ static inline void btree_path_set_level_up(struct btree_trans *trans, struct btree_path *path) { __btree_path_set_level_up(trans, path, path->level++); - btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE); + btree_path_set_dirty(trans, path, BTREE_ITER_NEED_TRAVERSE); } /* debug */ @@ -445,13 +445,14 @@ struct six_lock_count bch2_btree_node_lock_counts(struct btree_trans *, int bch2_check_for_deadlock(struct btree_trans *, struct printbuf *); -void __bch2_btree_path_verify_locks(struct btree_path *); +void __bch2_btree_path_verify_locks(struct btree_trans *, struct btree_path *); void __bch2_trans_verify_locks(struct btree_trans *); -static inline void bch2_btree_path_verify_locks(struct btree_path *path) +static inline void bch2_btree_path_verify_locks(struct btree_trans *trans, + struct btree_path *path) { if (static_branch_unlikely(&bch2_debug_check_btree_locking)) - __bch2_btree_path_verify_locks(path); + __bch2_btree_path_verify_locks(trans, path); } static inline void bch2_trans_verify_locks(struct btree_trans *trans) -- 2.25.1