bch2_trans_unlock(trans);
cond_resched();
+ trans->locked = true;
if (unlikely(trans->memory_allocation_failure)) {
struct closure cl;
if (!trans->restarted &&
(need_resched() ||
time_after64(now, trans->last_begin_time + BTREE_TRANS_MAX_LOCK_HOLD_TIME_NS))) {
- drop_locks_do(trans, (cond_resched(), 0));
+ bch2_trans_unlock(trans);
+ cond_resched();
now = local_clock();
}
trans->last_begin_time = now;
bch2_trans_srcu_unlock(trans);
trans->last_begin_ip = _RET_IP_;
+ trans->locked = true;
+
if (trans->restarted) {
bch2_btree_path_traverse_all(trans);
trans->notrace_relock_fail = false;
*/
BUG_ON(pos_task &&
pid == pos_task->pid &&
- bch2_trans_locked(pos));
+ pos->locked);
if (pos_task && pid < pos_task->pid) {
list_add_tail(&trans->list, &pos->list);
trans->last_begin_time = local_clock();
trans->fn_idx = fn_idx;
trans->locking_wait.task = current;
+ trans->locked = true;
trans->journal_replay_not_finished =
unlikely(!test_bit(JOURNAL_REPLAY_DONE, &c->journal.flags)) &&
atomic_inc_not_zero(&c->journal_keys.ref);
if (path->uptodate == BTREE_ITER_NEED_RELOCK)
path->uptodate = BTREE_ITER_UPTODATE;
- bch2_trans_verify_locks(trans);
-
return path->uptodate < BTREE_ITER_NEED_RELOCK;
}
{
struct get_locks_fail f;
- return btree_path_get_locks(trans, path, false, &f);
+ bool ret = btree_path_get_locks(trans, path, false, &f);
+ bch2_trans_verify_locks(trans);
+ return ret;
}
int __bch2_btree_path_relock(struct btree_trans *trans,
path->locks_want = new_locks_want;
- return btree_path_get_locks(trans, path, true, f);
+ bool ret = btree_path_get_locks(trans, path, true, f);
+ bch2_trans_verify_locks(trans);
+ return ret;
}
bool __bch2_btree_path_upgrade(struct btree_trans *trans,
unsigned new_locks_want,
struct get_locks_fail *f)
{
- if (bch2_btree_path_upgrade_noupgrade_sibs(trans, path, new_locks_want, f))
- return true;
+ bool ret = bch2_btree_path_upgrade_noupgrade_sibs(trans, path, new_locks_want, f);
+ if (ret)
+ goto out;
/*
* XXX: this is ugly - we'd prefer to not be mucking with other
btree_path_get_locks(trans, linked, true, NULL);
}
}
-
- return false;
+out:
+ bch2_trans_verify_locks(trans);
+ return ret;
}
void __bch2_btree_path_downgrade(struct btree_trans *trans,
if (unlikely(trans->restarted))
return -((int) trans->restarted);
+ if (unlikely(trans->locked))
+ goto out;
trans_for_each_path(trans, path, i) {
struct get_locks_fail f;
return bch2_trans_relock_fail(trans, path, &f, trace);
}
+ trans->locked = true;
+out:
bch2_trans_verify_locks(trans);
return 0;
}
void bch2_trans_unlock_noassert(struct btree_trans *trans)
{
__bch2_trans_unlock(trans);
+
+ trans->locked = false;
+ trans->last_unlock_ip = _RET_IP_;
}
void bch2_trans_unlock(struct btree_trans *trans)
{
__bch2_trans_unlock(trans);
+
+ trans->locked = false;
+ trans->last_unlock_ip = _RET_IP_;
}
void bch2_trans_unlock_long(struct btree_trans *trans)
bch2_trans_srcu_unlock(trans);
}
-bool bch2_trans_locked(struct btree_trans *trans)
-{
- struct btree_path *path;
- unsigned i;
-
- trans_for_each_path(trans, path, i)
- if (path->nodes_locked)
- return true;
- return false;
-}
-
int __bch2_trans_mutex_lock(struct btree_trans *trans,
struct mutex *lock)
{
}
}
+static bool bch2_trans_locked(struct btree_trans *trans)
+{
+ struct btree_path *path;
+ unsigned i;
+
+ trans_for_each_path(trans, path, i)
+ if (path->nodes_locked)
+ return true;
+ return false;
+}
+
void bch2_trans_verify_locks(struct btree_trans *trans)
{
struct btree_path *path;