1 // SPDX-License-Identifier: GPL-2.0
4 #include "btree_locking.h"
5 #include "btree_types.h"
7 static struct lock_class_key bch2_btree_node_lock_key;
9 void bch2_btree_lock_init(struct btree_bkey_cached_common *b,
10 enum six_lock_init_flags flags)
12 __six_lock_init(&b->lock, "b->c.lock", &bch2_btree_node_lock_key, flags);
13 lockdep_set_novalidate_class(&b->lock);
17 void bch2_assert_btree_nodes_not_locked(void)
20 //Re-enable when lock_class_is_held() is merged:
21 BUG_ON(lock_class_is_held(&bch2_btree_node_lock_key));
26 /* Btree node locking: */
28 struct six_lock_count bch2_btree_node_lock_counts(struct btree_trans *trans,
29 struct btree_path *skip,
30 struct btree_bkey_cached_common *b,
33 struct btree_path *path;
34 struct six_lock_count ret;
37 memset(&ret, 0, sizeof(ret));
39 if (IS_ERR_OR_NULL(b))
42 trans_for_each_path(trans, path, i)
43 if (path != skip && &path->l[level].b->c == b) {
44 int t = btree_node_locked_type(path, level);
46 if (t != BTREE_NODE_UNLOCKED)
55 void bch2_btree_node_unlock_write(struct btree_trans *trans,
56 struct btree_path *path, struct btree *b)
58 bch2_btree_node_unlock_write_inlined(trans, path, b);
64 * @trans wants to lock @b with type @type
66 struct trans_waiting_for_lock {
67 struct btree_trans *trans;
68 struct btree_bkey_cached_common *node_want;
69 enum six_lock_type lock_want;
71 /* for iterating over held locks :*/
78 struct trans_waiting_for_lock g[8];
82 static noinline void print_cycle(struct printbuf *out, struct lock_graph *g)
84 struct trans_waiting_for_lock *i;
86 prt_printf(out, "Found lock cycle (%u entries):", g->nr);
89 for (i = g->g; i < g->g + g->nr; i++) {
90 struct task_struct *task = READ_ONCE(i->trans->locking_wait.task);
94 bch2_btree_trans_to_text(out, i->trans);
95 bch2_prt_task_backtrace(out, task, i == g->g ? 5 : 1, GFP_NOWAIT);
99 static noinline void print_chain(struct printbuf *out, struct lock_graph *g)
101 struct trans_waiting_for_lock *i;
103 for (i = g->g; i != g->g + g->nr; i++) {
104 struct task_struct *task = i->trans->locking_wait.task;
107 prt_printf(out, "%u ", task ?task->pid : 0);
112 static void lock_graph_up(struct lock_graph *g)
114 closure_put(&g->g[--g->nr].trans->ref);
117 static noinline void lock_graph_pop_all(struct lock_graph *g)
123 static void __lock_graph_down(struct lock_graph *g, struct btree_trans *trans)
125 g->g[g->nr++] = (struct trans_waiting_for_lock) {
127 .node_want = trans->locking,
128 .lock_want = trans->locking_wait.lock_want,
132 static void lock_graph_down(struct lock_graph *g, struct btree_trans *trans)
134 closure_get(&trans->ref);
135 __lock_graph_down(g, trans);
138 static bool lock_graph_remove_non_waiters(struct lock_graph *g)
140 struct trans_waiting_for_lock *i;
142 for (i = g->g + 1; i < g->g + g->nr; i++)
143 if (i->trans->locking != i->node_want ||
144 i->trans->locking_wait.start_time != i[-1].lock_start_time) {
145 while (g->g + g->nr > i)
153 static void trace_would_deadlock(struct lock_graph *g, struct btree_trans *trans)
155 struct bch_fs *c = trans->c;
157 count_event(c, trans_restart_would_deadlock);
159 if (trace_trans_restart_would_deadlock_enabled()) {
160 struct printbuf buf = PRINTBUF;
163 print_cycle(&buf, g);
165 trace_trans_restart_would_deadlock(trans, buf.buf);
170 static int abort_lock(struct lock_graph *g, struct trans_waiting_for_lock *i)
173 trace_would_deadlock(g, i->trans);
174 return btree_trans_restart(i->trans, BCH_ERR_transaction_restart_would_deadlock);
176 i->trans->lock_must_abort = true;
177 wake_up_process(i->trans->locking_wait.task);
182 static int btree_trans_abort_preference(struct btree_trans *trans)
184 if (trans->lock_may_not_fail)
186 if (trans->locking_wait.lock_want == SIX_LOCK_write)
188 if (!trans->in_traverse_all)
193 static noinline int break_cycle(struct lock_graph *g, struct printbuf *cycle)
195 struct trans_waiting_for_lock *i, *abort = NULL;
196 unsigned best = 0, pref;
199 if (lock_graph_remove_non_waiters(g))
202 /* Only checking, for debugfs: */
204 print_cycle(cycle, g);
209 for (i = g->g; i < g->g + g->nr; i++) {
210 pref = btree_trans_abort_preference(i->trans);
217 if (unlikely(!best)) {
218 struct printbuf buf = PRINTBUF;
220 prt_printf(&buf, bch2_fmt(g->g->trans->c, "cycle of nofail locks"));
222 for (i = g->g; i < g->g + g->nr; i++) {
223 struct btree_trans *trans = i->trans;
225 bch2_btree_trans_to_text(&buf, trans);
227 prt_printf(&buf, "backtrace:");
229 printbuf_indent_add(&buf, 2);
230 bch2_prt_task_backtrace(&buf, trans->locking_wait.task, 2, GFP_NOWAIT);
231 printbuf_indent_sub(&buf, 2);
235 bch2_print_string_as_lines(KERN_ERR, buf.buf);
240 ret = abort_lock(g, abort);
248 static int lock_graph_descend(struct lock_graph *g, struct btree_trans *trans,
249 struct printbuf *cycle)
251 struct btree_trans *orig_trans = g->g->trans;
252 struct trans_waiting_for_lock *i;
254 for (i = g->g; i < g->g + g->nr; i++)
255 if (i->trans == trans) {
256 closure_put(&trans->ref);
257 return break_cycle(g, cycle);
260 if (g->nr == ARRAY_SIZE(g->g)) {
261 closure_put(&trans->ref);
263 if (orig_trans->lock_may_not_fail)
272 trace_and_count(trans->c, trans_restart_would_deadlock_recursion_limit, trans, _RET_IP_);
273 return btree_trans_restart(orig_trans, BCH_ERR_transaction_restart_deadlock_recursion_limit);
276 __lock_graph_down(g, trans);
280 static bool lock_type_conflicts(enum six_lock_type t1, enum six_lock_type t2)
285 int bch2_check_for_deadlock(struct btree_trans *trans, struct printbuf *cycle)
288 struct trans_waiting_for_lock *top;
289 struct btree_bkey_cached_common *b;
290 btree_path_idx_t path_idx;
295 if (trans->lock_must_abort) {
299 trace_would_deadlock(&g, trans);
300 return btree_trans_restart(trans, BCH_ERR_transaction_restart_would_deadlock);
303 lock_graph_down(&g, trans);
305 /* trans->paths is rcu protected vs. freeing */
313 top = &g.g[g.nr - 1];
315 struct btree_path *paths = rcu_dereference(top->trans->paths);
319 unsigned long *paths_allocated = trans_paths_allocated(paths);
321 trans_for_each_path_idx_from(paths_allocated, *trans_paths_nr(paths),
322 path_idx, top->path_idx) {
323 struct btree_path *path = paths + path_idx;
324 if (!path->nodes_locked)
327 if (path_idx != top->path_idx) {
328 top->path_idx = path_idx;
330 top->lock_start_time = 0;
334 top->level < BTREE_MAX_DEPTH;
335 top->level++, top->lock_start_time = 0) {
336 int lock_held = btree_node_locked_type(path, top->level);
338 if (lock_held == BTREE_NODE_UNLOCKED)
341 b = &READ_ONCE(path->l[top->level].b)->c;
343 if (IS_ERR_OR_NULL(b)) {
345 * If we get here, it means we raced with the
346 * other thread updating its btree_path
347 * structures - which means it can't be blocked
350 if (!lock_graph_remove_non_waiters(&g)) {
352 * If lock_graph_remove_non_waiters()
353 * didn't do anything, it must be
354 * because we're being called by debugfs
355 * checking for lock cycles, which
356 * invokes us on btree_transactions that
357 * aren't actually waiting on anything.
360 lock_graph_pop_all(&g);
366 if (list_empty_careful(&b->lock.wait_list))
369 raw_spin_lock(&b->lock.wait_lock);
370 list_for_each_entry(trans, &b->lock.wait_list, locking_wait.list) {
371 BUG_ON(b != trans->locking);
373 if (top->lock_start_time &&
374 time_after_eq64(top->lock_start_time, trans->locking_wait.start_time))
377 top->lock_start_time = trans->locking_wait.start_time;
379 /* Don't check for self deadlock: */
380 if (trans == top->trans ||
381 !lock_type_conflicts(lock_held, trans->locking_wait.lock_want))
384 closure_get(&trans->ref);
385 raw_spin_unlock(&b->lock.wait_lock);
387 ret = lock_graph_descend(&g, trans, cycle);
393 raw_spin_unlock(&b->lock.wait_lock);
397 if (g.nr > 1 && cycle)
398 print_chain(cycle, &g);
408 int bch2_six_check_for_deadlock(struct six_lock *lock, void *p)
410 struct btree_trans *trans = p;
412 return bch2_check_for_deadlock(trans, NULL);
415 int __bch2_btree_node_lock_write(struct btree_trans *trans, struct btree_path *path,
416 struct btree_bkey_cached_common *b,
417 bool lock_may_not_fail)
419 int readers = bch2_btree_node_lock_counts(trans, NULL, b, b->level).n[SIX_LOCK_read];
423 * Must drop our read locks before calling six_lock_write() -
424 * six_unlock() won't do wakeups until the reader count
425 * goes to 0, and it's safe because we have the node intent
428 six_lock_readers_add(&b->lock, -readers);
429 ret = __btree_node_lock_nopath(trans, b, SIX_LOCK_write,
430 lock_may_not_fail, _RET_IP_);
431 six_lock_readers_add(&b->lock, readers);
434 mark_btree_node_locked_noreset(path, b->level, BTREE_NODE_INTENT_LOCKED);
439 void bch2_btree_node_lock_write_nofail(struct btree_trans *trans,
440 struct btree_path *path,
441 struct btree_bkey_cached_common *b)
443 int ret = __btree_node_lock_write(trans, path, b, true);
449 static inline bool btree_path_get_locks(struct btree_trans *trans,
450 struct btree_path *path,
452 struct get_locks_fail *f)
454 unsigned l = path->level;
458 if (!btree_path_node(path, l))
462 ? bch2_btree_node_upgrade(trans, path, l)
463 : bch2_btree_node_relock(trans, path, l))) {
473 } while (l < path->locks_want);
476 * When we fail to get a lock, we have to ensure that any child nodes
477 * can't be relocked so bch2_btree_path_traverse has to walk back up to
478 * the node that we failed to relock:
481 __bch2_btree_path_unlock(trans, path);
482 btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
485 path->l[fail_idx].b = upgrade
486 ? ERR_PTR(-BCH_ERR_no_btree_node_upgrade)
487 : ERR_PTR(-BCH_ERR_no_btree_node_relock);
489 } while (fail_idx >= 0);
492 if (path->uptodate == BTREE_ITER_NEED_RELOCK)
493 path->uptodate = BTREE_ITER_UPTODATE;
495 bch2_trans_verify_locks(trans);
497 return path->uptodate < BTREE_ITER_NEED_RELOCK;
500 bool __bch2_btree_node_relock(struct btree_trans *trans,
501 struct btree_path *path, unsigned level,
504 struct btree *b = btree_path_node(path, level);
505 int want = __btree_lock_want(path, level);
510 if (six_relock_type(&b->c.lock, want, path->l[level].lock_seq) ||
511 (btree_node_lock_seq_matches(path, b, level) &&
512 btree_node_lock_increment(trans, &b->c, level, want))) {
513 mark_btree_node_locked(trans, path, level, want);
517 if (trace && !trans->notrace_relock_fail)
518 trace_and_count(trans->c, btree_path_relock_fail, trans, _RET_IP_, path, level);
524 bool bch2_btree_node_upgrade(struct btree_trans *trans,
525 struct btree_path *path, unsigned level)
527 struct btree *b = path->l[level].b;
528 struct six_lock_count count = bch2_btree_node_lock_counts(trans, path, &b->c, level);
530 if (!is_btree_node(path, level))
533 switch (btree_lock_want(path, level)) {
534 case BTREE_NODE_UNLOCKED:
535 BUG_ON(btree_node_locked(path, level));
537 case BTREE_NODE_READ_LOCKED:
538 BUG_ON(btree_node_intent_locked(path, level));
539 return bch2_btree_node_relock(trans, path, level);
540 case BTREE_NODE_INTENT_LOCKED:
542 case BTREE_NODE_WRITE_LOCKED:
546 if (btree_node_intent_locked(path, level))
552 if (btree_node_locked(path, level)) {
555 six_lock_readers_add(&b->c.lock, -count.n[SIX_LOCK_read]);
556 ret = six_lock_tryupgrade(&b->c.lock);
557 six_lock_readers_add(&b->c.lock, count.n[SIX_LOCK_read]);
562 if (six_relock_type(&b->c.lock, SIX_LOCK_intent, path->l[level].lock_seq))
567 * Do we already have an intent lock via another path? If so, just bump
570 if (btree_node_lock_seq_matches(path, b, level) &&
571 btree_node_lock_increment(trans, &b->c, level, BTREE_NODE_INTENT_LOCKED)) {
572 btree_node_unlock(trans, path, level);
576 trace_and_count(trans->c, btree_path_upgrade_fail, trans, _RET_IP_, path, level);
579 mark_btree_node_locked_noreset(path, level, BTREE_NODE_INTENT_LOCKED);
583 /* Btree path locking: */
586 * Only for btree_cache.c - only relocks intent locks
588 int bch2_btree_path_relock_intent(struct btree_trans *trans,
589 struct btree_path *path)
593 for (l = path->level;
594 l < path->locks_want && btree_path_node(path, l);
596 if (!bch2_btree_node_relock(trans, path, l)) {
597 __bch2_btree_path_unlock(trans, path);
598 btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
599 trace_and_count(trans->c, trans_restart_relock_path_intent, trans, _RET_IP_, path);
600 return btree_trans_restart(trans, BCH_ERR_transaction_restart_relock_path_intent);
608 bool bch2_btree_path_relock_norestart(struct btree_trans *trans, struct btree_path *path)
610 struct get_locks_fail f;
612 return btree_path_get_locks(trans, path, false, &f);
615 int __bch2_btree_path_relock(struct btree_trans *trans,
616 struct btree_path *path, unsigned long trace_ip)
618 if (!bch2_btree_path_relock_norestart(trans, path)) {
619 trace_and_count(trans->c, trans_restart_relock_path, trans, trace_ip, path);
620 return btree_trans_restart(trans, BCH_ERR_transaction_restart_relock_path);
626 bool bch2_btree_path_upgrade_noupgrade_sibs(struct btree_trans *trans,
627 struct btree_path *path,
628 unsigned new_locks_want,
629 struct get_locks_fail *f)
631 EBUG_ON(path->locks_want >= new_locks_want);
633 path->locks_want = new_locks_want;
635 return btree_path_get_locks(trans, path, true, f);
638 bool __bch2_btree_path_upgrade(struct btree_trans *trans,
639 struct btree_path *path,
640 unsigned new_locks_want,
641 struct get_locks_fail *f)
643 if (bch2_btree_path_upgrade_noupgrade_sibs(trans, path, new_locks_want, f))
647 * XXX: this is ugly - we'd prefer to not be mucking with other
648 * iterators in the btree_trans here.
650 * On failure to upgrade the iterator, setting iter->locks_want and
651 * calling get_locks() is sufficient to make bch2_btree_path_traverse()
652 * get the locks we want on transaction restart.
654 * But if this iterator was a clone, on transaction restart what we did
655 * to this iterator isn't going to be preserved.
657 * Possibly we could add an iterator field for the parent iterator when
658 * an iterator is a copy - for now, we'll just upgrade any other
659 * iterators with the same btree id.
661 * The code below used to be needed to ensure ancestor nodes get locked
662 * before interior nodes - now that's handled by
663 * bch2_btree_path_traverse_all().
665 if (!path->cached && !trans->in_traverse_all) {
666 struct btree_path *linked;
669 trans_for_each_path(trans, linked, i)
670 if (linked != path &&
671 linked->cached == path->cached &&
672 linked->btree_id == path->btree_id &&
673 linked->locks_want < new_locks_want) {
674 linked->locks_want = new_locks_want;
675 btree_path_get_locks(trans, linked, true, NULL);
682 void __bch2_btree_path_downgrade(struct btree_trans *trans,
683 struct btree_path *path,
684 unsigned new_locks_want)
686 unsigned l, old_locks_want = path->locks_want;
688 if (trans->restarted)
691 EBUG_ON(path->locks_want < new_locks_want);
693 path->locks_want = new_locks_want;
695 while (path->nodes_locked &&
696 (l = btree_path_highest_level_locked(path)) >= path->locks_want) {
697 if (l > path->level) {
698 btree_node_unlock(trans, path, l);
700 if (btree_node_intent_locked(path, l)) {
701 six_lock_downgrade(&path->l[l].b->c.lock);
702 mark_btree_node_locked_noreset(path, l, BTREE_NODE_READ_LOCKED);
708 bch2_btree_path_verify_locks(path);
710 trace_path_downgrade(trans, _RET_IP_, path, old_locks_want);
713 /* Btree transaction locking: */
715 void bch2_trans_downgrade(struct btree_trans *trans)
717 struct btree_path *path;
720 if (trans->restarted)
723 trans_for_each_path(trans, path, i)
725 bch2_btree_path_downgrade(trans, path);
728 int bch2_trans_relock(struct btree_trans *trans)
730 struct btree_path *path;
733 if (unlikely(trans->restarted))
734 return -((int) trans->restarted);
736 trans_for_each_path(trans, path, i) {
737 struct get_locks_fail f;
739 if (path->should_be_locked &&
740 !btree_path_get_locks(trans, path, false, &f)) {
741 if (trace_trans_restart_relock_enabled()) {
742 struct printbuf buf = PRINTBUF;
744 bch2_bpos_to_text(&buf, path->pos);
745 prt_printf(&buf, " l=%u seq=%u node seq=",
746 f.l, path->l[f.l].lock_seq);
747 if (IS_ERR_OR_NULL(f.b)) {
748 prt_str(&buf, bch2_err_str(PTR_ERR(f.b)));
750 prt_printf(&buf, "%u", f.b->c.lock.seq);
752 struct six_lock_count c =
753 bch2_btree_node_lock_counts(trans, NULL, &f.b->c, f.l);
754 prt_printf(&buf, " self locked %u.%u.%u", c.n[0], c.n[1], c.n[2]);
756 c = six_lock_counts(&f.b->c.lock);
757 prt_printf(&buf, " total locked %u.%u.%u", c.n[0], c.n[1], c.n[2]);
760 trace_trans_restart_relock(trans, _RET_IP_, buf.buf);
764 count_event(trans->c, trans_restart_relock);
765 return btree_trans_restart(trans, BCH_ERR_transaction_restart_relock);
772 int bch2_trans_relock_notrace(struct btree_trans *trans)
774 struct btree_path *path;
777 if (unlikely(trans->restarted))
778 return -((int) trans->restarted);
780 trans_for_each_path(trans, path, i)
781 if (path->should_be_locked &&
782 !bch2_btree_path_relock_norestart(trans, path)) {
783 return btree_trans_restart(trans, BCH_ERR_transaction_restart_relock);
788 void bch2_trans_unlock_noassert(struct btree_trans *trans)
790 struct btree_path *path;
793 trans_for_each_path(trans, path, i)
794 __bch2_btree_path_unlock(trans, path);
797 void bch2_trans_unlock(struct btree_trans *trans)
799 struct btree_path *path;
802 trans_for_each_path(trans, path, i)
803 __bch2_btree_path_unlock(trans, path);
806 void bch2_trans_unlock_long(struct btree_trans *trans)
808 bch2_trans_unlock(trans);
809 bch2_trans_srcu_unlock(trans);
812 bool bch2_trans_locked(struct btree_trans *trans)
814 struct btree_path *path;
817 trans_for_each_path(trans, path, i)
818 if (path->nodes_locked)
823 int __bch2_trans_mutex_lock(struct btree_trans *trans,
826 int ret = drop_locks_do(trans, (mutex_lock(lock), 0));
835 #ifdef CONFIG_BCACHEFS_DEBUG
837 void bch2_btree_path_verify_locks(struct btree_path *path)
841 if (!path->nodes_locked) {
842 BUG_ON(path->uptodate == BTREE_ITER_UPTODATE &&
843 btree_path_node(path, path->level));
847 for (l = 0; l < BTREE_MAX_DEPTH; l++) {
848 int want = btree_lock_want(path, l);
849 int have = btree_node_locked_type(path, l);
851 BUG_ON(!is_btree_node(path, l) && have != BTREE_NODE_UNLOCKED);
853 BUG_ON(is_btree_node(path, l) &&
854 (want == BTREE_NODE_UNLOCKED ||
855 have != BTREE_NODE_WRITE_LOCKED) &&
860 void bch2_trans_verify_locks(struct btree_trans *trans)
862 struct btree_path *path;
865 trans_for_each_path(trans, path, i)
866 bch2_btree_path_verify_locks(path);