bcachefs: Fix bch2_btree_node_fill() for !path
authorKent Overstreet <kent.overstreet@linux.dev>
Fri, 12 Apr 2024 19:54:33 +0000 (15:54 -0400)
committerKent Overstreet <kent.overstreet@linux.dev>
Mon, 15 Apr 2024 00:02:11 +0000 (20:02 -0400)
We shouldn't be doing the unlock/relock dance when we're not using a
path - this fixes an assertion pop when called from btree node scan.

Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
fs/bcachefs/btree_cache.c

index c347d08d80bcf89e3adc3e035288579ff8baa94a..02c70e813face0ce975f1f700e55a34743d286ea 100644 (file)
@@ -709,7 +709,6 @@ static noinline struct btree *bch2_btree_node_fill(struct btree_trans *trans,
        struct bch_fs *c = trans->c;
        struct btree_cache *bc = &c->btree_cache;
        struct btree *b;
-       u32 seq;
 
        if (unlikely(level >= BTREE_MAX_DEPTH)) {
                int ret = bch2_fs_topology_error(c, "attempting to get btree node at level %u, >= max depth %u",
@@ -775,34 +774,26 @@ static noinline struct btree *bch2_btree_node_fill(struct btree_trans *trans,
        }
 
        set_btree_node_read_in_flight(b);
-
        six_unlock_write(&b->c.lock);
-       seq = six_lock_seq(&b->c.lock);
-       six_unlock_intent(&b->c.lock);
 
-       /* Unlock before doing IO: */
-       if (path && sync)
-               bch2_trans_unlock_noassert(trans);
-
-       bch2_btree_node_read(trans, b, sync);
+       if (path) {
+               u32 seq = six_lock_seq(&b->c.lock);
 
-       if (!sync)
-               return NULL;
+               /* Unlock before doing IO: */
+               six_unlock_intent(&b->c.lock);
+               bch2_trans_unlock_noassert(trans);
 
-       if (path) {
-               int ret = bch2_trans_relock(trans) ?:
-                       bch2_btree_path_relock_intent(trans, path);
-               if (ret) {
-                       BUG_ON(!trans->restarted);
-                       return ERR_PTR(ret);
-               }
-       }
+               bch2_btree_node_read(trans, b, sync);
 
-       if (!six_relock_type(&b->c.lock, lock_type, seq)) {
-               BUG_ON(!path);
+               if (!sync)
+                       return NULL;
 
-               trace_and_count(c, trans_restart_relock_after_fill, trans, _THIS_IP_, path);
-               return ERR_PTR(btree_trans_restart(trans, BCH_ERR_transaction_restart_relock_after_fill));
+               if (!six_relock_type(&b->c.lock, lock_type, seq))
+                       b = NULL;
+       } else {
+               bch2_btree_node_read(trans, b, sync);
+               if (lock_type == SIX_LOCK_read)
+                       six_lock_downgrade(&b->c.lock);
        }
 
        return b;
@@ -1135,18 +1126,19 @@ int bch2_btree_node_prefetch(struct btree_trans *trans,
 {
        struct bch_fs *c = trans->c;
        struct btree_cache *bc = &c->btree_cache;
-       struct btree *b;
 
        BUG_ON(path && !btree_node_locked(path, level + 1));
        BUG_ON(level >= BTREE_MAX_DEPTH);
 
-       b = btree_cache_find(bc, k);
+       struct btree *b = btree_cache_find(bc, k);
        if (b)
                return 0;
 
        b = bch2_btree_node_fill(trans, path, k, btree_id,
                                 level, SIX_LOCK_read, false);
-       return PTR_ERR_OR_ZERO(b);
+       if (!IS_ERR_OR_NULL(b))
+               six_unlock_read(&b->c.lock);
+       return bch2_trans_relock(trans) ?: PTR_ERR_OR_ZERO(b);
 }
 
 void bch2_btree_node_evict(struct btree_trans *trans, const struct bkey_i *k)