bcachefs: btree_iter_set_dontneed()
authorKent Overstreet <kent.overstreet@gmail.com>
Sat, 20 Mar 2021 02:54:18 +0000 (22:54 -0400)
committerKent Overstreet <kent.overstreet@linux.dev>
Sun, 22 Oct 2023 21:08:56 +0000 (17:08 -0400)
This is a bit clearer than using bch2_btree_iter_free().

Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com>
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
fs/bcachefs/btree_iter.c
fs/bcachefs/btree_iter.h
fs/bcachefs/btree_key_cache.c

index 711734f2023bc0eb9379b48f41f3069e733a3060..02a486e83881dd91e26b2afcd452fbe4360f94ce 100644 (file)
@@ -1972,7 +1972,7 @@ int bch2_trans_iter_free(struct btree_trans *trans,
        if (IS_ERR_OR_NULL(iter))
                return 0;
 
-       trans->iters_touched &= ~(1ULL << iter->idx);
+       set_btree_iter_dontneed(trans, iter);
 
        return bch2_trans_iter_put(trans, iter);
 }
@@ -2133,7 +2133,7 @@ struct btree_iter *__bch2_trans_copy_iter(struct btree_trans *trans,
         * We don't need to preserve this iter since it's cheap to copy it
         * again - this will cause trans_iter_put() to free it right away:
         */
-       trans->iters_touched &= ~(1ULL << iter->idx);
+       set_btree_iter_dontneed(trans, iter);
 
        return iter;
 }
index 76f0f8f3c12557210282aec73b68b164e1965c67..c839bfe6ffa48d77a0652f80bb5cd6b0e1f5ce66 100644 (file)
@@ -300,6 +300,11 @@ static inline bool btree_iter_keep(struct btree_trans *trans, struct btree_iter
                (iter->flags & BTREE_ITER_KEEP_UNTIL_COMMIT);
 }
 
+static inline void set_btree_iter_dontneed(struct btree_trans *trans, struct btree_iter *iter)
+{
+       trans->iters_touched &= ~(1ULL << iter->idx);
+}
+
 #define TRANS_RESET_NOTRAVERSE         (1 << 0)
 
 void bch2_trans_reset(struct btree_trans *, unsigned);
index 76f19f86c8adbbe5d5428be45dd30b9888d260e5..d7b4df4cff17714a803b31cacd14c5e281cb91a2 100644 (file)
@@ -172,23 +172,21 @@ static int btree_key_cache_fill(struct btree_trans *trans,
                                   ck->key.pos, BTREE_ITER_SLOTS);
        k = bch2_btree_iter_peek_slot(iter);
        ret = bkey_err(k);
-       if (ret) {
-               bch2_trans_iter_put(trans, iter);
-               return ret;
-       }
+       if (ret)
+               goto err;
 
        if (!bch2_btree_node_relock(ck_iter, 0)) {
-               bch2_trans_iter_put(trans, iter);
                trace_transaction_restart_ip(trans->ip, _THIS_IP_);
-               return -EINTR;
+               ret = -EINTR;
+               goto err;
        }
 
        if (k.k->u64s > ck->u64s) {
                new_u64s = roundup_pow_of_two(k.k->u64s);
                new_k = kmalloc(new_u64s * sizeof(u64), GFP_NOFS);
                if (!new_k) {
-                       bch2_trans_iter_put(trans, iter);
-                       return -ENOMEM;
+                       ret = -ENOMEM;
+                       goto err;
                }
        }
 
@@ -204,9 +202,10 @@ static int btree_key_cache_fill(struct btree_trans *trans,
        bch2_btree_node_unlock_write(ck_iter->l[0].b, ck_iter);
 
        /* We're not likely to need this iterator again: */
-       bch2_trans_iter_free(trans, iter);
-
-       return 0;
+       set_btree_iter_dontneed(trans, iter);
+err:
+       bch2_trans_iter_put(trans, iter);
+       return ret;
 }
 
 static int bkey_cached_check_fn(struct six_lock *lock, void *p)