bcachefs: bch2_trans_relock_fail() - factor out slowpath
authorKent Overstreet <kent.overstreet@linux.dev>
Tue, 9 Apr 2024 23:45:41 +0000 (19:45 -0400)
committerKent Overstreet <kent.overstreet@linux.dev>
Wed, 8 May 2024 21:29:19 +0000 (17:29 -0400)
Factor out slowpath into a separate helper

Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
fs/bcachefs/btree_locking.c

index 8d1c4f78db5eb83c35ba1903c0c74925700b073c..e6adb2df3a571f2c216729f984fcc6bd00be4a0c 100644 (file)
@@ -723,51 +723,51 @@ void bch2_trans_downgrade(struct btree_trans *trans)
                        bch2_btree_path_downgrade(trans, path);
 }
 
-int bch2_trans_relock(struct btree_trans *trans)
+static inline void __bch2_trans_unlock(struct btree_trans *trans)
 {
        struct btree_path *path;
        unsigned i;
 
-       if (unlikely(trans->restarted))
-               return -((int) trans->restarted);
+       trans_for_each_path(trans, path, i)
+               __bch2_btree_path_unlock(trans, path);
+}
 
-       trans_for_each_path(trans, path, i) {
-               struct get_locks_fail f;
+static noinline __cold int bch2_trans_relock_fail(struct btree_trans *trans, struct btree_path *path,
+                                                 struct get_locks_fail *f, bool trace)
+{
+       if (!trace)
+               goto out;
 
-               if (path->should_be_locked &&
-                   !btree_path_get_locks(trans, path, false, &f)) {
-                       if (trace_trans_restart_relock_enabled()) {
-                               struct printbuf buf = PRINTBUF;
-
-                               bch2_bpos_to_text(&buf, path->pos);
-                               prt_printf(&buf, " l=%u seq=%u node seq=",
-                                          f.l, path->l[f.l].lock_seq);
-                               if (IS_ERR_OR_NULL(f.b)) {
-                                       prt_str(&buf, bch2_err_str(PTR_ERR(f.b)));
-                               } else {
-                                       prt_printf(&buf, "%u", f.b->c.lock.seq);
-
-                                       struct six_lock_count c =
-                                               bch2_btree_node_lock_counts(trans, NULL, &f.b->c, f.l);
-                                       prt_printf(&buf, " self locked %u.%u.%u", c.n[0], c.n[1], c.n[2]);
-
-                                       c = six_lock_counts(&f.b->c.lock);
-                                       prt_printf(&buf, " total locked %u.%u.%u", c.n[0], c.n[1], c.n[2]);
-                               }
+       if (trace_trans_restart_relock_enabled()) {
+               struct printbuf buf = PRINTBUF;
 
-                               trace_trans_restart_relock(trans, _RET_IP_, buf.buf);
-                               printbuf_exit(&buf);
-                       }
+               bch2_bpos_to_text(&buf, path->pos);
+               prt_printf(&buf, " l=%u seq=%u node seq=", f->l, path->l[f->l].lock_seq);
+               if (IS_ERR_OR_NULL(f->b)) {
+                       prt_str(&buf, bch2_err_str(PTR_ERR(f->b)));
+               } else {
+                       prt_printf(&buf, "%u", f->b->c.lock.seq);
 
-                       count_event(trans->c, trans_restart_relock);
-                       return btree_trans_restart(trans, BCH_ERR_transaction_restart_relock);
+                       struct six_lock_count c =
+                               bch2_btree_node_lock_counts(trans, NULL, &f->b->c, f->l);
+                       prt_printf(&buf, " self locked %u.%u.%u", c.n[0], c.n[1], c.n[2]);
+
+                       c = six_lock_counts(&f->b->c.lock);
+                       prt_printf(&buf, " total locked %u.%u.%u", c.n[0], c.n[1], c.n[2]);
                }
+
+               trace_trans_restart_relock(trans, _RET_IP_, buf.buf);
+               printbuf_exit(&buf);
        }
 
-       return 0;
+       count_event(trans->c, trans_restart_relock);
+out:
+       __bch2_trans_unlock(trans);
+       bch2_trans_verify_locks(trans);
+       return btree_trans_restart(trans, BCH_ERR_transaction_restart_relock);
 }
 
-int bch2_trans_relock_notrace(struct btree_trans *trans)
+static inline int __bch2_trans_relock(struct btree_trans *trans, bool trace)
 {
        struct btree_path *path;
        unsigned i;
@@ -775,30 +775,36 @@ int bch2_trans_relock_notrace(struct btree_trans *trans)
        if (unlikely(trans->restarted))
                return -((int) trans->restarted);
 
-       trans_for_each_path(trans, path, i)
+       trans_for_each_path(trans, path, i) {
+               struct get_locks_fail f;
+
                if (path->should_be_locked &&
-                   !bch2_btree_path_relock_norestart(trans, path)) {
-                       return btree_trans_restart(trans, BCH_ERR_transaction_restart_relock);
-               }
+                   !btree_path_get_locks(trans, path, false, &f))
+                       return bch2_trans_relock_fail(trans, path, &f, trace);
+       }
+
+       bch2_trans_verify_locks(trans);
        return 0;
 }
 
-void bch2_trans_unlock_noassert(struct btree_trans *trans)
+int bch2_trans_relock(struct btree_trans *trans)
 {
-       struct btree_path *path;
-       unsigned i;
+       return __bch2_trans_relock(trans, true);
+}
 
-       trans_for_each_path(trans, path, i)
-               __bch2_btree_path_unlock(trans, path);
+int bch2_trans_relock_notrace(struct btree_trans *trans)
+{
+       return __bch2_trans_relock(trans, false);
 }
 
-void bch2_trans_unlock(struct btree_trans *trans)
+void bch2_trans_unlock_noassert(struct btree_trans *trans)
 {
-       struct btree_path *path;
-       unsigned i;
+       __bch2_trans_unlock(trans);
+}
 
-       trans_for_each_path(trans, path, i)
-               __bch2_btree_path_unlock(trans, path);
+void bch2_trans_unlock(struct btree_trans *trans)
+{
+       __bch2_trans_unlock(trans);
 }
 
 void bch2_trans_unlock_long(struct btree_trans *trans)