bcachefs: Kill remaining bch2_btree_iter_unlock() uses
authorKent Overstreet <kent.overstreet@gmail.com>
Fri, 10 May 2019 20:09:17 +0000 (16:09 -0400)
committerKent Overstreet <kent.overstreet@linux.dev>
Sun, 22 Oct 2023 21:08:21 +0000 (17:08 -0400)
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
fs/bcachefs/btree_iter.c
fs/bcachefs/btree_iter.h
fs/bcachefs/btree_update_interior.c

index afede965102487990d443f3f606d753e4df50a02..fbf5f809e6ac300038237f0188d28e240a26e8a3 100644 (file)
@@ -389,16 +389,6 @@ void __bch2_btree_iter_downgrade(struct btree_iter *iter,
        bch2_btree_trans_verify_locks(iter->trans);
 }
 
-int bch2_btree_iter_unlock(struct btree_iter *iter)
-{
-       struct btree_iter *linked;
-
-       trans_for_each_iter(iter->trans, linked)
-               __bch2_btree_iter_unlock(linked);
-
-       return btree_iter_err(iter);
-}
-
 bool bch2_btree_trans_relock(struct btree_trans *trans)
 {
        struct btree_iter *iter;
@@ -1041,7 +1031,7 @@ static unsigned btree_iter_up_until_locked(struct btree_iter *iter,
  * Returns 0 on success, -EIO on error (error reading in a btree node).
  *
  * On error, caller (peek_node()/peek_key()) must return NULL; the error is
- * stashed in the iterator and returned from bch2_btree_iter_unlock().
+ * stashed in the iterator and returned from bch2_trans_exit().
  */
 int __must_check __bch2_btree_iter_traverse(struct btree_iter *iter)
 {
index 171e729ed3ea4aa50d6033d70a0e54dc5bad561d..9b7dfee2da8203e4f7bce806134f760cad3ed442 100644 (file)
@@ -105,8 +105,6 @@ void bch2_btree_node_iter_fix(struct btree_iter *, struct btree *,
                              struct btree_node_iter *, struct bkey_packed *,
                              unsigned, unsigned);
 
-int bch2_btree_iter_unlock(struct btree_iter *);
-
 bool bch2_btree_trans_relock(struct btree_trans *);
 void bch2_btree_trans_unlock(struct btree_trans *);
 
index 73675af8743a7a425b8299d7e5db17f5ae16ebed..6d6b10502188c85ad27c7743d7d7d61af8a1aa65 100644 (file)
@@ -1551,6 +1551,7 @@ split:
 int bch2_btree_split_leaf(struct bch_fs *c, struct btree_iter *iter,
                          unsigned flags)
 {
+       struct btree_trans *trans = iter->trans;
        struct btree *b = iter->l[0].b;
        struct btree_update *as;
        struct closure cl;
@@ -1561,7 +1562,7 @@ int bch2_btree_split_leaf(struct bch_fs *c, struct btree_iter *iter,
         * We already have a disk reservation and open buckets pinned; this
         * allocation must not block:
         */
-       trans_for_each_iter(iter->trans, linked)
+       trans_for_each_iter(trans, linked)
                if (linked->btree_id == BTREE_ID_EXTENTS)
                        flags |= BTREE_INSERT_USE_RESERVE;
 
@@ -1573,10 +1574,10 @@ int bch2_btree_split_leaf(struct bch_fs *c, struct btree_iter *iter,
                if (flags & BTREE_INSERT_NOUNLOCK)
                        return -EINTR;
 
-               bch2_btree_trans_unlock(iter->trans);
+               bch2_btree_trans_unlock(trans);
                down_read(&c->gc_lock);
 
-               if (!bch2_btree_trans_relock(iter->trans))
+               if (!bch2_btree_trans_relock(trans))
                        ret = -EINTR;
        }
 
@@ -1597,7 +1598,7 @@ int bch2_btree_split_leaf(struct bch_fs *c, struct btree_iter *iter,
                ret = PTR_ERR(as);
                if (ret == -EAGAIN) {
                        BUG_ON(flags & BTREE_INSERT_NOUNLOCK);
-                       bch2_btree_iter_unlock(iter);
+                       bch2_btree_trans_unlock(trans);
                        ret = -EINTR;
                }
                goto out;
@@ -1624,6 +1625,7 @@ void __bch2_foreground_maybe_merge(struct bch_fs *c,
                                   unsigned flags,
                                   enum btree_node_sibling sib)
 {
+       struct btree_trans *trans = iter->trans;
        struct btree_update *as;
        struct bkey_format_state new_s;
        struct bkey_format new_f;
@@ -1778,7 +1780,7 @@ err_cycle_gc_lock:
        if (flags & BTREE_INSERT_NOUNLOCK)
                goto out;
 
-       bch2_btree_iter_unlock(iter);
+       bch2_btree_trans_unlock(trans);
 
        down_read(&c->gc_lock);
        up_read(&c->gc_lock);
@@ -1794,7 +1796,7 @@ err:
 
        if ((ret == -EAGAIN || ret == -EINTR) &&
            !(flags & BTREE_INSERT_NOUNLOCK)) {
-               bch2_btree_iter_unlock(iter);
+               bch2_btree_trans_unlock(trans);
                closure_sync(&cl);
                ret = bch2_btree_iter_traverse(iter);
                if (ret)
@@ -1861,6 +1863,7 @@ static int __btree_node_rewrite(struct bch_fs *c, struct btree_iter *iter,
 int bch2_btree_node_rewrite(struct bch_fs *c, struct btree_iter *iter,
                            __le64 seq, unsigned flags)
 {
+       struct btree_trans *trans = iter->trans;
        struct closure cl;
        struct btree *b;
        int ret;
@@ -1873,7 +1876,7 @@ int bch2_btree_node_rewrite(struct bch_fs *c, struct btree_iter *iter,
 
        if (!(flags & BTREE_INSERT_GC_LOCK_HELD)) {
                if (!down_read_trylock(&c->gc_lock)) {
-                       bch2_btree_iter_unlock(iter);
+                       bch2_btree_trans_unlock(trans);
                        down_read(&c->gc_lock);
                }
        }
@@ -1892,7 +1895,7 @@ int bch2_btree_node_rewrite(struct bch_fs *c, struct btree_iter *iter,
                    ret != -EINTR)
                        break;
 
-               bch2_btree_iter_unlock(iter);
+               bch2_btree_trans_unlock(trans);
                closure_sync(&cl);
        }