bcachefs: Don't block on reclaim_lock from journal_res_get
authorKent Overstreet <kent.overstreet@gmail.com>
Thu, 28 Feb 2019 19:22:52 +0000 (14:22 -0500)
committerKent Overstreet <kent.overstreet@linux.dev>
Sun, 22 Oct 2023 21:08:17 +0000 (17:08 -0400)
When we're doing btree updates from journal flush, this becomes a
locking inversion

Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
fs/bcachefs/journal.c
fs/bcachefs/journal_reclaim.c
fs/bcachefs/journal_reclaim.h

index 5caa01881d0043656e2ad6e77349526c95b13cce..ba6adf11ef4261fa3bdab69de54224d72ae37243 100644 (file)
@@ -390,7 +390,10 @@ retry:
                                goto retry;
                        }
 
-                       bch2_journal_reclaim_work(&j->reclaim_work.work);
+                       if (mutex_trylock(&j->reclaim_lock)) {
+                               bch2_journal_reclaim(j);
+                               mutex_unlock(&j->reclaim_lock);
+                       }
                }
 
                ret = -EAGAIN;
index 0884fc823cdf27aaf193e448472f15e32949195f..a3c53b78ad10a5dcc14020d072ca4a91efc5b030 100644 (file)
@@ -433,7 +433,7 @@ static void journal_flush_pins(struct journal *j, u64 seq_to_flush,
 }
 
 /**
- * bch2_journal_reclaim_work - free up journal buckets
+ * bch2_journal_reclaim - free up journal buckets
  *
  * Background journal reclaim writes out btree nodes. It should be run
  * early enough so that we never completely run out of journal buckets.
@@ -450,18 +450,17 @@ static void journal_flush_pins(struct journal *j, u64 seq_to_flush,
  * 512 journal entries or 25% of all journal buckets, then
  * journal_next_bucket() should not stall.
  */
-void bch2_journal_reclaim_work(struct work_struct *work)
+void bch2_journal_reclaim(struct journal *j)
 {
-       struct bch_fs *c = container_of(to_delayed_work(work),
-                               struct bch_fs, journal.reclaim_work);
-       struct journal *j = &c->journal;
+       struct bch_fs *c = container_of(j, struct bch_fs, journal);
        struct bch_dev *ca;
        unsigned iter, bucket_to_flush, min_nr = 0;
        u64 seq_to_flush = 0;
 
+       lockdep_assert_held(&j->reclaim_lock);
+
        bch2_journal_do_discards(j);
 
-       mutex_lock(&j->reclaim_lock);
        spin_lock(&j->lock);
 
        for_each_rw_member(ca, c, iter) {
@@ -493,13 +492,21 @@ void bch2_journal_reclaim_work(struct work_struct *work)
 
        journal_flush_pins(j, seq_to_flush, min_nr);
 
-       mutex_unlock(&j->reclaim_lock);
-
        if (!test_bit(BCH_FS_RO, &c->flags))
                queue_delayed_work(c->journal_reclaim_wq, &j->reclaim_work,
                                   msecs_to_jiffies(j->reclaim_delay_ms));
 }
 
+void bch2_journal_reclaim_work(struct work_struct *work)
+{
+       struct journal *j = container_of(to_delayed_work(work),
+                               struct journal, reclaim_work);
+
+       mutex_lock(&j->reclaim_lock);
+       bch2_journal_reclaim(j);
+       mutex_unlock(&j->reclaim_lock);
+}
+
 static int journal_flush_done(struct journal *j, u64 seq_to_flush)
 {
        int ret;
index 71545ad3bd58c62d5a17c42a9013fc7c9cd60838..9bf982a177971fbf917dc26ac1f60d300d9c0ac5 100644 (file)
@@ -42,6 +42,7 @@ void bch2_journal_pin_add_if_older(struct journal *,
 void bch2_journal_pin_flush(struct journal *, struct journal_entry_pin *);
 
 void bch2_journal_do_discards(struct journal *);
+void bch2_journal_reclaim(struct journal *);
 void bch2_journal_reclaim_work(struct work_struct *);
 
 void bch2_journal_flush_pins(struct journal *, u64);