bch2_trans_srcu_unlock(trans);
}
+void bch2_trans_unlock_write(struct btree_trans *trans)
+{
+ struct btree_path *path;
+ unsigned i;
+
+ trans_for_each_path(trans, path, i)
+ for (unsigned l = 0; l < BTREE_MAX_DEPTH; l++)
+ if (btree_node_write_locked(path, l))
+ bch2_btree_node_unlock_write(trans, path, path->l[l].b);
+}
+
int __bch2_trans_mutex_lock(struct btree_trans *trans,
struct mutex *lock)
{
void bch2_btree_lock_init(struct btree_bkey_cached_common *, enum six_lock_init_flags);
void bch2_trans_unlock_noassert(struct btree_trans *);
+void bch2_trans_unlock_write(struct btree_trans *);
static inline bool is_btree_node(struct btree_path *path, unsigned l)
{
return 0;
}
-static inline void bch2_trans_unlock_write(struct btree_trans *trans)
+static inline void bch2_trans_unlock_updates_write(struct btree_trans *trans)
{
if (likely(trans->write_locked)) {
trans_for_each_update(trans, i)
struct bkey_i *new_k;
int ret;
- bch2_trans_unlock_write(trans);
+ bch2_trans_unlock_updates_write(trans);
bch2_trans_unlock(trans);
new_k = kmalloc(new_u64s * sizeof(u64), GFP_KERNEL);
if (!ret && unlikely(trans->journal_replay_not_finished))
bch2_drop_overwrites_from_journal(trans);
- bch2_trans_unlock_write(trans);
+ bch2_trans_unlock_updates_write(trans);
if (!ret && trans->journal_pin)
bch2_journal_pin_add(&c->journal, trans->journal_res.seq,