bcachefs: bch2_btree_node_write_trans()
authorKent Overstreet <kent.overstreet@linux.dev>
Sat, 21 Dec 2024 08:31:00 +0000 (03:31 -0500)
committerKent Overstreet <kent.overstreet@linux.dev>
Fri, 10 Jan 2025 04:38:41 +0000 (23:38 -0500)
Avoiding screwing up path->lock_seq.

Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
fs/bcachefs/btree_io.c
fs/bcachefs/btree_io.h
fs/bcachefs/btree_locking.h
fs/bcachefs/btree_trans_commit.c
fs/bcachefs/btree_update_interior.c

index d99f8a78d286d9d478a17b15b5caf93ca7ae6d47..e371e60e3133ef53e4ba7e203cc8cd0dcdab15ab 100644 (file)
@@ -489,8 +489,8 @@ void bch2_btree_init_next(struct btree_trans *trans, struct btree *b)
        if (b->nsets == MAX_BSETS &&
            !btree_node_write_in_flight(b) &&
            should_compact_all(c, b)) {
-               bch2_btree_node_write(c, b, SIX_LOCK_write,
-                                     BTREE_WRITE_init_next_bset);
+               bch2_btree_node_write_trans(trans, b, SIX_LOCK_write,
+                                           BTREE_WRITE_init_next_bset);
                reinit_iter = true;
        }
 
@@ -2345,6 +2345,34 @@ void bch2_btree_node_write(struct bch_fs *c, struct btree *b,
        }
 }
 
+void bch2_btree_node_write_trans(struct btree_trans *trans, struct btree *b,
+                                enum six_lock_type lock_type_held,
+                                unsigned flags)
+{
+       struct bch_fs *c = trans->c;
+
+       if (lock_type_held == SIX_LOCK_intent ||
+           (lock_type_held == SIX_LOCK_read &&
+            six_lock_tryupgrade(&b->c.lock))) {
+               __bch2_btree_node_write(c, b, flags);
+
+               /* don't cycle lock unnecessarily: */
+               if (btree_node_just_written(b) &&
+                   six_trylock_write(&b->c.lock)) {
+                       bch2_btree_post_write_cleanup(c, b);
+                       __bch2_btree_node_unlock_write(trans, b);
+               }
+
+               if (lock_type_held == SIX_LOCK_read)
+                       six_lock_downgrade(&b->c.lock);
+       } else {
+               __bch2_btree_node_write(c, b, flags);
+               if (lock_type_held == SIX_LOCK_write &&
+                   btree_node_just_written(b))
+                       bch2_btree_post_write_cleanup(c, b);
+       }
+}
+
 static bool __bch2_btree_flush_all(struct bch_fs *c, unsigned flag)
 {
        struct bucket_table *tbl;
index 9b01ca3de90776b2838bcf8db656960e04a5d65c..6f9e4a6dacf784721422f582367ebd61252e0fd7 100644 (file)
@@ -144,11 +144,13 @@ enum btree_write_flags {
 void __bch2_btree_node_write(struct bch_fs *, struct btree *, unsigned);
 void bch2_btree_node_write(struct bch_fs *, struct btree *,
                           enum six_lock_type, unsigned);
+void bch2_btree_node_write_trans(struct btree_trans *, struct btree *,
+                                enum six_lock_type, unsigned);
 
-static inline void btree_node_write_if_need(struct bch_fs *c, struct btree *b,
+static inline void btree_node_write_if_need(struct btree_trans *trans, struct btree *b,
                                            enum six_lock_type lock_held)
 {
-       bch2_btree_node_write(c, b, lock_held, BTREE_WRITE_ONLY_IF_NEED);
+       bch2_btree_node_write_trans(trans, b, lock_held, BTREE_WRITE_ONLY_IF_NEED);
 }
 
 bool bch2_btree_flush_all_reads(struct bch_fs *);
index 7474ab6ce0191a4cd276dc22f65bf6aed6e503b9..fb3d04ddcb409e86a4827e1da5af5451b77aa78a 100644 (file)
@@ -163,22 +163,27 @@ static inline void __bch2_btree_path_unlock(struct btree_trans *trans,
  * succeed:
  */
 static inline void
-bch2_btree_node_unlock_write_inlined(struct btree_trans *trans, struct btree_path *path,
-                                    struct btree *b)
+__bch2_btree_node_unlock_write(struct btree_trans *trans, struct btree *b)
 {
        struct btree_path *linked;
        unsigned i;
 
+       trans_for_each_path_with_node(trans, b, linked, i)
+               linked->l[b->c.level].lock_seq++;
+
+       six_unlock_write(&b->c.lock);
+}
+
+static inline void
+bch2_btree_node_unlock_write_inlined(struct btree_trans *trans, struct btree_path *path,
+                                    struct btree *b)
+{
        EBUG_ON(path->l[b->c.level].b != b);
        EBUG_ON(path->l[b->c.level].lock_seq != six_lock_seq(&b->c.lock));
        EBUG_ON(btree_node_locked_type(path, b->c.level) != SIX_LOCK_write);
 
        mark_btree_node_locked_noreset(path, b->c.level, BTREE_NODE_INTENT_LOCKED);
-
-       trans_for_each_path_with_node(trans, b, linked, i)
-               linked->l[b->c.level].lock_seq++;
-
-       six_unlock_write(&b->c.lock);
+       __bch2_btree_node_unlock_write(trans, b);
 }
 
 void bch2_btree_node_unlock_write(struct btree_trans *,
index c3a3bfd11e8cee6c0c42204cd1650942be148cae..2f1dd516318e167208688a645cfa15cb15b34e54 100644 (file)
@@ -249,7 +249,7 @@ static int __btree_node_flush(struct journal *j, struct journal_entry_pin *pin,
                new |= 1 << BTREE_NODE_need_write;
        } while (!try_cmpxchg(&b->flags, &old, new));
 
-       btree_node_write_if_need(c, b, SIX_LOCK_read);
+       btree_node_write_if_need(trans, b, SIX_LOCK_read);
        six_unlock_read(&b->c.lock);
 
        bch2_trans_put(trans);
index 03a6eba7403dd6162301d07561f707aec4e16249..76c8602601dd6e3d45d9a89e54fdff065e0bca7e 100644 (file)
@@ -803,7 +803,7 @@ err:
                mark_btree_node_locked_noreset(path, b->c.level, BTREE_NODE_INTENT_LOCKED);
                six_unlock_write(&b->c.lock);
 
-               btree_node_write_if_need(c, b, SIX_LOCK_intent);
+               btree_node_write_if_need(trans, b, SIX_LOCK_intent);
                btree_node_unlock(trans, path, b->c.level);
                bch2_path_put(trans, path_idx, true);
        }
@@ -824,7 +824,7 @@ err:
                b = as->new_nodes[i];
 
                btree_node_lock_nopath_nofail(trans, &b->c, SIX_LOCK_read);
-               btree_node_write_if_need(c, b, SIX_LOCK_read);
+               btree_node_write_if_need(trans, b, SIX_LOCK_read);
                six_unlock_read(&b->c.lock);
        }
 
@@ -1709,14 +1709,14 @@ static int btree_split(struct btree_update *as, struct btree_trans *trans,
 
        if (n3) {
                bch2_btree_update_get_open_buckets(as, n3);
-               bch2_btree_node_write(c, n3, SIX_LOCK_intent, 0);
+               bch2_btree_node_write_trans(trans, n3, SIX_LOCK_intent, 0);
        }
        if (n2) {
                bch2_btree_update_get_open_buckets(as, n2);
-               bch2_btree_node_write(c, n2, SIX_LOCK_intent, 0);
+               bch2_btree_node_write_trans(trans, n2, SIX_LOCK_intent, 0);
        }
        bch2_btree_update_get_open_buckets(as, n1);
-       bch2_btree_node_write(c, n1, SIX_LOCK_intent, 0);
+       bch2_btree_node_write_trans(trans, n1, SIX_LOCK_intent, 0);
 
        /*
         * The old node must be freed (in memory) _before_ unlocking the new
@@ -1911,7 +1911,7 @@ static void __btree_increase_depth(struct btree_update *as, struct btree_trans *
        BUG_ON(ret);
 
        bch2_btree_update_get_open_buckets(as, n);
-       bch2_btree_node_write(c, n, SIX_LOCK_intent, 0);
+       bch2_btree_node_write_trans(trans, n, SIX_LOCK_intent, 0);
        bch2_trans_node_add(trans, path, n);
        six_unlock_intent(&n->c.lock);
 
@@ -2104,7 +2104,7 @@ int __bch2_foreground_maybe_merge(struct btree_trans *trans,
        bch2_trans_verify_paths(trans);
 
        bch2_btree_update_get_open_buckets(as, n);
-       bch2_btree_node_write(c, n, SIX_LOCK_intent, 0);
+       bch2_btree_node_write_trans(trans, n, SIX_LOCK_intent, 0);
 
        bch2_btree_node_free_inmem(trans, trans->paths + path, b);
        bch2_btree_node_free_inmem(trans, trans->paths + sib_path, m);
@@ -2181,7 +2181,7 @@ int bch2_btree_node_rewrite(struct btree_trans *trans,
        bch2_btree_interior_update_will_free_node(as, b);
 
        bch2_btree_update_get_open_buckets(as, n);
-       bch2_btree_node_write(c, n, SIX_LOCK_intent, 0);
+       bch2_btree_node_write_trans(trans, n, SIX_LOCK_intent, 0);
 
        bch2_btree_node_free_inmem(trans, btree_iter_path(trans, iter), b);