bcachefs: bch2_trans_unlock_write()
authorKent Overstreet <kent.overstreet@linux.dev>
Tue, 24 Dec 2024 10:40:17 +0000 (05:40 -0500)
committerKent Overstreet <kent.overstreet@linux.dev>
Fri, 10 Jan 2025 04:38:42 +0000 (23:38 -0500)
New helper for dropping all write locks; which is distinct from the
helper the transaction commit path uses, which is faster and only
touches updates.

Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
fs/bcachefs/btree_locking.c
fs/bcachefs/btree_locking.h
fs/bcachefs/btree_trans_commit.c

index b339c209345a2afff992aede4e0f8e4e0bd8bf80..8503931463d1f53f5223e31dcc2ef787e83f8345 100644 (file)
@@ -818,6 +818,17 @@ void bch2_trans_unlock_long(struct btree_trans *trans)
        bch2_trans_srcu_unlock(trans);
 }
 
+void bch2_trans_unlock_write(struct btree_trans *trans)
+{
+       struct btree_path *path;
+       unsigned i;
+
+       trans_for_each_path(trans, path, i)
+               for (unsigned l = 0; l < BTREE_MAX_DEPTH; l++)
+                       if (btree_node_write_locked(path, l))
+                               bch2_btree_node_unlock_write(trans, path, path->l[l].b);
+}
+
 int __bch2_trans_mutex_lock(struct btree_trans *trans,
                            struct mutex *lock)
 {
index 80f1770781010ebaa542e50896217e67ad31ae05..b54ef48eb8cc2728b14238b5bd7fe5ebeebd25d9 100644 (file)
@@ -16,6 +16,7 @@
 void bch2_btree_lock_init(struct btree_bkey_cached_common *, enum six_lock_init_flags);
 
 void bch2_trans_unlock_noassert(struct btree_trans *);
+void bch2_trans_unlock_write(struct btree_trans *);
 
 static inline bool is_btree_node(struct btree_path *path, unsigned l)
 {
index 2f1dd516318e167208688a645cfa15cb15b34e54..6b79b672e0b1b1cb731c19c1e4d3d967d1471d7a 100644 (file)
@@ -133,7 +133,7 @@ static inline int bch2_trans_lock_write(struct btree_trans *trans)
        return 0;
 }
 
-static inline void bch2_trans_unlock_write(struct btree_trans *trans)
+static inline void bch2_trans_unlock_updates_write(struct btree_trans *trans)
 {
        if (likely(trans->write_locked)) {
                trans_for_each_update(trans, i)
@@ -384,7 +384,7 @@ btree_key_can_insert_cached_slowpath(struct btree_trans *trans, unsigned flags,
        struct bkey_i *new_k;
        int ret;
 
-       bch2_trans_unlock_write(trans);
+       bch2_trans_unlock_updates_write(trans);
        bch2_trans_unlock(trans);
 
        new_k = kmalloc(new_u64s * sizeof(u64), GFP_KERNEL);
@@ -868,7 +868,7 @@ static inline int do_bch2_trans_commit(struct btree_trans *trans, unsigned flags
        if (!ret && unlikely(trans->journal_replay_not_finished))
                bch2_drop_overwrites_from_journal(trans);
 
-       bch2_trans_unlock_write(trans);
+       bch2_trans_unlock_updates_write(trans);
 
        if (!ret && trans->journal_pin)
                bch2_journal_pin_add(&c->journal, trans->journal_res.seq,