if (IS_ERR_OR_NULL(iter))
return 0;
- trans->iters_touched &= ~(1ULL << iter->idx);
+ set_btree_iter_dontneed(trans, iter);
return bch2_trans_iter_put(trans, iter);
}
* We don't need to preserve this iter since it's cheap to copy it
* again - this will cause trans_iter_put() to free it right away:
*/
- trans->iters_touched &= ~(1ULL << iter->idx);
+ set_btree_iter_dontneed(trans, iter);
return iter;
}
(iter->flags & BTREE_ITER_KEEP_UNTIL_COMMIT);
}
+static inline void set_btree_iter_dontneed(struct btree_trans *trans, struct btree_iter *iter)
+{
+ trans->iters_touched &= ~(1ULL << iter->idx);
+}
+
#define TRANS_RESET_NOTRAVERSE (1 << 0)
void bch2_trans_reset(struct btree_trans *, unsigned);
ck->key.pos, BTREE_ITER_SLOTS);
k = bch2_btree_iter_peek_slot(iter);
ret = bkey_err(k);
- if (ret) {
- bch2_trans_iter_put(trans, iter);
- return ret;
- }
+ if (ret)
+ goto err;
if (!bch2_btree_node_relock(ck_iter, 0)) {
- bch2_trans_iter_put(trans, iter);
trace_transaction_restart_ip(trans->ip, _THIS_IP_);
- return -EINTR;
+ ret = -EINTR;
+ goto err;
}
if (k.k->u64s > ck->u64s) {
new_u64s = roundup_pow_of_two(k.k->u64s);
new_k = kmalloc(new_u64s * sizeof(u64), GFP_NOFS);
if (!new_k) {
- bch2_trans_iter_put(trans, iter);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto err;
}
}
bch2_btree_node_unlock_write(ck_iter->l[0].b, ck_iter);
/* We're not likely to need this iterator again: */
- bch2_trans_iter_free(trans, iter);
-
- return 0;
+ set_btree_iter_dontneed(trans, iter);
+err:
+ bch2_trans_iter_put(trans, iter);
+ return ret;
}
static int bkey_cached_check_fn(struct six_lock *lock, void *p)