#include "fifo.h"
#include "nocow_locking_types.h"
#include "opts.h"
+#include "seqmutex.h"
#include "util.h"
#ifdef CONFIG_BCACHEFS_DEBUG
} btree_write_stats[BTREE_WRITE_TYPE_NR];
/* btree_iter.c: */
- struct mutex btree_trans_lock;
+ struct seqmutex btree_trans_lock;
struct list_head btree_trans_list;
mempool_t btree_paths_pool;
mempool_t btree_trans_mem_pool;
if (IS_ENABLED(CONFIG_BCACHEFS_DEBUG_TRANSACTIONS)) {
struct btree_trans *pos;
- mutex_lock(&c->btree_trans_lock);
+ seqmutex_lock(&c->btree_trans_lock);
list_for_each_entry(pos, &c->btree_trans_list, list) {
/*
* We'd much prefer to be stricter here and completely
}
list_add_tail(&trans->list, &c->btree_trans_list);
list_add_done:
- mutex_unlock(&c->btree_trans_lock);
+ seqmutex_unlock(&c->btree_trans_lock);
}
}
bch2_trans_unlock(trans);
+ if (IS_ENABLED(CONFIG_BCACHEFS_DEBUG_TRANSACTIONS)) {
+ seqmutex_lock(&c->btree_trans_lock);
+ list_del(&trans->list);
+ seqmutex_unlock(&c->btree_trans_lock);
+ }
+
closure_sync(&trans->ref);
if (s)
check_btree_paths_leaked(trans);
- if (IS_ENABLED(CONFIG_BCACHEFS_DEBUG_TRANSACTIONS)) {
- mutex_lock(&c->btree_trans_lock);
- list_del(&trans->list);
- mutex_unlock(&c->btree_trans_lock);
- }
-
srcu_read_unlock(&c->btree_trans_barrier, trans->srcu_idx);
bch2_journal_preres_put(&c->journal, &trans->journal_preres);
}
INIT_LIST_HEAD(&c->btree_trans_list);
- mutex_init(&c->btree_trans_lock);
+ seqmutex_init(&c->btree_trans_lock);
ret = mempool_init_kmalloc_pool(&c->btree_paths_pool, 1,
sizeof(struct btree_path) * nr +
struct bch_fs *c = i->c;
struct btree_trans *trans;
ssize_t ret = 0;
+ u32 seq;
i->ubuf = buf;
i->size = size;
i->ret = 0;
-
- mutex_lock(&c->btree_trans_lock);
+restart:
+ seqmutex_lock(&c->btree_trans_lock);
list_for_each_entry(trans, &c->btree_trans_list, list) {
if (trans->locking_wait.task->pid <= i->iter)
continue;
+ closure_get(&trans->ref);
+ seq = seqmutex_seq(&c->btree_trans_lock);
+ seqmutex_unlock(&c->btree_trans_lock);
+
ret = flush_buf(i);
- if (ret)
- break;
+ if (ret) {
+ closure_put(&trans->ref);
+ goto unlocked;
+ }
bch2_btree_trans_to_text(&i->buf, trans);
prt_newline(&i->buf);
i->iter = trans->locking_wait.task->pid;
- }
- mutex_unlock(&c->btree_trans_lock);
+ closure_put(&trans->ref);
+
+ if (!seqmutex_relock(&c->btree_trans_lock, seq))
+ goto restart;
+ }
+ seqmutex_unlock(&c->btree_trans_lock);
+unlocked:
if (i->buf.allocation_failure)
ret = -ENOMEM;
struct bch_fs *c = i->c;
struct btree_trans *trans;
ssize_t ret = 0;
+ u32 seq;
i->ubuf = buf;
i->size = size;
if (i->iter)
goto out;
-
- mutex_lock(&c->btree_trans_lock);
+restart:
+ seqmutex_lock(&c->btree_trans_lock);
list_for_each_entry(trans, &c->btree_trans_list, list) {
if (trans->locking_wait.task->pid <= i->iter)
continue;
+ closure_get(&trans->ref);
+ seq = seqmutex_seq(&c->btree_trans_lock);
+ seqmutex_unlock(&c->btree_trans_lock);
+
ret = flush_buf(i);
- if (ret)
- break;
+ if (ret) {
+ closure_put(&trans->ref);
+ goto out;
+ }
bch2_check_for_deadlock(trans, &i->buf);
i->iter = trans->locking_wait.task->pid;
+
+ closure_put(&trans->ref);
+
+ if (!seqmutex_relock(&c->btree_trans_lock, seq))
+ goto restart;
}
- mutex_unlock(&c->btree_trans_lock);
+ seqmutex_unlock(&c->btree_trans_lock);
out:
if (i->buf.allocation_failure)
ret = -ENOMEM;
--- /dev/null
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _BCACHEFS_SEQMUTEX_H
+#define _BCACHEFS_SEQMUTEX_H
+
+#include <linux/mutex.h>
+
+struct seqmutex {
+ struct mutex lock;
+ u32 seq;
+};
+
+#define seqmutex_init(_lock) mutex_init(&(_lock)->lock)
+
+static inline bool seqmutex_trylock(struct seqmutex *lock)
+{
+ return mutex_trylock(&lock->lock);
+}
+
+static inline void seqmutex_lock(struct seqmutex *lock)
+{
+ mutex_lock(&lock->lock);
+}
+
+static inline void seqmutex_unlock(struct seqmutex *lock)
+{
+ lock->seq++;
+ mutex_unlock(&lock->lock);
+}
+
+static inline u32 seqmutex_seq(struct seqmutex *lock)
+{
+ return lock->seq;
+}
+
+static inline bool seqmutex_relock(struct seqmutex *lock, u32 seq)
+{
+ if (lock->seq != seq || !mutex_trylock(&lock->lock))
+ return false;
+
+ if (lock->seq != seq) {
+ mutex_unlock(&lock->lock);
+ return false;
+ }
+
+ return true;
+}
+
+#endif /* _BCACHEFS_SEQMUTEX_H */
{
struct btree_trans *trans;
- mutex_lock(&c->btree_trans_lock);
+ seqmutex_lock(&c->btree_trans_lock);
list_for_each_entry(trans, &c->btree_trans_list, list) {
struct btree_bkey_cached_common *b = READ_ONCE(trans->locking);
six_lock_wakeup_all(&b->lock);
}
- mutex_unlock(&c->btree_trans_lock);
+ seqmutex_unlock(&c->btree_trans_lock);
}
SHOW(bch2_fs)