bcachefs; kill bch2_btree_key_cache_flush()
[linux-block.git] / fs / bcachefs / btree_key_cache.c
index 37fbf22de8fcba305d717f41e4ae8a9461502d53..7d2db1e5df3bff4f083025d6b08974e65a829a1c 100644 (file)
@@ -645,22 +645,27 @@ static int btree_key_cache_flush_pos(struct btree_trans *trans,
        if (journal_seq && ck->journal.seq != journal_seq)
                goto out;
 
+       trans->journal_res.seq = ck->journal.seq;
+
        /*
-        * Since journal reclaim depends on us making progress here, and the
-        * allocator/copygc depend on journal reclaim making progress, we need
-        * to be using alloc reserves:
+        * If we're at the end of the journal, we really want to free up space
+        * in the journal right away - we don't want to pin that old journal
+        * sequence number with a new btree node write, we want to re-journal
+        * the update
         */
+       if (ck->journal.seq == journal_last_seq(j))
+               commit_flags |= BCH_WATERMARK_reclaim;
+       else
+               commit_flags |= BCH_TRANS_COMMIT_no_journal_res;
+
        ret   = bch2_btree_iter_traverse(&b_iter) ?:
                bch2_trans_update(trans, &b_iter, ck->k,
                                  BTREE_UPDATE_KEY_CACHE_RECLAIM|
                                  BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE|
                                  BTREE_TRIGGER_NORUN) ?:
                bch2_trans_commit(trans, NULL, NULL,
-                                 BTREE_INSERT_NOCHECK_RW|
-                                 BTREE_INSERT_NOFAIL|
-                                 (ck->journal.seq == journal_last_seq(j)
-                                  ? BCH_WATERMARK_reclaim
-                                  : 0)|
+                                 BCH_TRANS_COMMIT_no_check_rw|
+                                 BCH_TRANS_COMMIT_no_enospc|
                                  commit_flags);
 
        bch2_fs_fatal_err_on(ret &&
@@ -732,9 +737,9 @@ int bch2_btree_key_cache_journal_flush(struct journal *j,
        }
        six_unlock_read(&ck->c.lock);
 
-       ret = commit_do(trans, NULL, NULL, 0,
+       ret = lockrestart_do(trans,
                btree_key_cache_flush_pos(trans, key, seq,
-                               BTREE_INSERT_JOURNAL_RECLAIM, false));
+                               BCH_TRANS_COMMIT_journal_reclaim, false));
 unlock:
        srcu_read_unlock(&c->btree_trans_barrier, srcu_idx);
 
@@ -742,22 +747,6 @@ unlock:
        return ret;
 }
 
-/*
- * Flush and evict a key from the key cache:
- */
-int bch2_btree_key_cache_flush(struct btree_trans *trans,
-                              enum btree_id id, struct bpos pos)
-{
-       struct bch_fs *c = trans->c;
-       struct bkey_cached_key key = { id, pos };
-
-       /* Fastpath - assume it won't be found: */
-       if (!bch2_btree_key_cache_find(c, id, pos))
-               return 0;
-
-       return btree_key_cache_flush_pos(trans, key, 0, 0, true);
-}
-
 bool bch2_btree_insert_key_cached(struct btree_trans *trans,
                                  unsigned flags,
                                  struct btree_insert_entry *insert_entry)
@@ -773,7 +762,7 @@ bool bch2_btree_insert_key_cached(struct btree_trans *trans,
        ck->valid = true;
 
        if (!test_bit(BKEY_CACHED_DIRTY, &ck->flags)) {
-               EBUG_ON(test_bit(BCH_FS_CLEAN_SHUTDOWN, &c->flags));
+               EBUG_ON(test_bit(BCH_FS_clean_shutdown, &c->flags));
                set_bit(BKEY_CACHED_DIRTY, &ck->flags);
                atomic_long_inc(&c->btree_key_cache.nr_dirty);
 
@@ -992,8 +981,6 @@ void bch2_fs_btree_key_cache_exit(struct btree_key_cache *bc)
        list_for_each_entry_safe(ck, n, &items, list) {
                cond_resched();
 
-               bch2_journal_pin_drop(&c->journal, &ck->journal);
-
                list_del(&ck->list);
                kfree(ck->k);
                six_lock_exit(&ck->c.lock);
@@ -1002,7 +989,7 @@ void bch2_fs_btree_key_cache_exit(struct btree_key_cache *bc)
 
        if (atomic_long_read(&bc->nr_dirty) &&
            !bch2_journal_error(&c->journal) &&
-           test_bit(BCH_FS_WAS_RW, &c->flags))
+           test_bit(BCH_FS_was_rw, &c->flags))
                panic("btree key cache shutdown error: nr_dirty nonzero (%li)\n",
                      atomic_long_read(&bc->nr_dirty));