bcachefs: support btree updates of prejournaled keys
authorBrian Foster <bfoster@redhat.com>
Wed, 19 Jul 2023 12:53:05 +0000 (08:53 -0400)
committerKent Overstreet <kent.overstreet@linux.dev>
Sun, 22 Oct 2023 21:10:08 +0000 (17:10 -0400)
Introduce support for prejournaled key updates. This allows a
transaction to commit an update for a key that already exists (and
is pinned) in the journal. This is required for btree write buffer
updates as the current scheme of journaling both on write buffer
insertion and write buffer (slow path) flush is unsafe in certain
crash recovery scenarios.

Create a small trans update wrapper to pass along the seq where the
key resides into the btree_insert_entry. From there, trans commit
passes the seq into the btree insert path where it is used to manage
the journal pin for the associated btree leaf.

Note that this patch only introduces the underlying mechanism and
otherwise includes no functional changes.

Signed-off-by: Brian Foster <bfoster@redhat.com>
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
fs/bcachefs/bkey_methods.h
fs/bcachefs/btree_types.h
fs/bcachefs/btree_update.h
fs/bcachefs/btree_update_leaf.c

index d1ff83a73511d8663a0551f16b4557f0ab648e2a..d7b63769068c7464e709d1724d4a53c39c3bc4f4 100644 (file)
@@ -98,6 +98,7 @@ static inline int bch2_mark_key(struct btree_trans *trans,
 enum btree_update_flags {
        __BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE = __BTREE_ITER_FLAGS_END,
        __BTREE_UPDATE_NOJOURNAL,
+       __BTREE_UPDATE_PREJOURNAL,
        __BTREE_UPDATE_KEY_CACHE_RECLAIM,
 
        __BTREE_TRIGGER_NORUN,          /* Don't run triggers at all */
@@ -112,6 +113,7 @@ enum btree_update_flags {
 
 #define BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE (1U << __BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE)
 #define BTREE_UPDATE_NOJOURNAL         (1U << __BTREE_UPDATE_NOJOURNAL)
+#define BTREE_UPDATE_PREJOURNAL                (1U << __BTREE_UPDATE_PREJOURNAL)
 #define BTREE_UPDATE_KEY_CACHE_RECLAIM (1U << __BTREE_UPDATE_KEY_CACHE_RECLAIM)
 
 #define BTREE_TRIGGER_NORUN            (1U << __BTREE_TRIGGER_NORUN)
index 937f9c2b63ed3ee61dbb455f4501fce7050a8c07..9bfaa15d5ad49c22d6aabcdeb2bd83ed82616cb6 100644 (file)
@@ -380,6 +380,7 @@ struct btree_insert_entry {
        u8                      old_btree_u64s;
        struct bkey_i           *k;
        struct btree_path       *path;
+       u64                     seq;
        /* key being overwritten: */
        struct bkey             old_k;
        const struct bch_val    *old_v;
index f794c9d108b86bf96a409b394f6d9847e8f9c8a9..256da97f721c9d4fbb274e70ffd9950b29d96e6e 100644 (file)
@@ -111,6 +111,8 @@ int bch2_bkey_get_empty_slot(struct btree_trans *, struct btree_iter *,
 
 int __must_check bch2_trans_update(struct btree_trans *, struct btree_iter *,
                                   struct bkey_i *, enum btree_update_flags);
+int __must_check bch2_trans_update_seq(struct btree_trans *, u64, struct btree_iter *,
+                                      struct bkey_i *, enum btree_update_flags);
 int __must_check bch2_trans_update_buffered(struct btree_trans *,
                                            enum btree_id, struct bkey_i *);
 
index 319286294d6a9c46487c8b510318ec3378e64c3f..609780f0ce8e70c2a0b671ace8760eee424513c7 100644 (file)
@@ -747,9 +747,14 @@ bch2_trans_commit_write_locked(struct btree_trans *trans, unsigned flags,
        trans_for_each_update(trans, i) {
                i->k->k.needs_whiteout = false;
 
-               if (!i->cached)
-                       bch2_btree_insert_key_leaf(trans, i->path, i->k, trans->journal_res.seq);
-               else if (!i->key_cache_already_flushed)
+               if (!i->cached) {
+                       u64 seq = trans->journal_res.seq;
+
+                       if (i->flags & BTREE_UPDATE_PREJOURNAL)
+                               seq = i->seq;
+
+                       bch2_btree_insert_key_leaf(trans, i->path, i->k, seq);
+               } else if (!i->key_cache_already_flushed)
                        bch2_btree_insert_key_cached(trans, flags, i);
                else {
                        bch2_btree_key_cache_drop(trans, i->path);
@@ -1571,12 +1576,21 @@ bch2_trans_update_by_path(struct btree_trans *trans, struct btree_path *path,
 {
        struct bch_fs *c = trans->c;
        struct btree_insert_entry *i, n;
+       u64 seq = 0;
        int cmp;
 
        EBUG_ON(!path->should_be_locked);
        EBUG_ON(trans->nr_updates >= BTREE_ITER_MAX);
        EBUG_ON(!bpos_eq(k->k.p, path->pos));
 
+       /*
+        * The transaction journal res hasn't been allocated at this point.
+        * That occurs at commit time. Reuse the seq field to pass in the seq
+        * of a prejournaled key.
+        */
+       if (flags & BTREE_UPDATE_PREJOURNAL)
+               seq = trans->journal_res.seq;
+
        n = (struct btree_insert_entry) {
                .flags          = flags,
                .bkey_type      = __btree_node_type(path->level, path->btree_id),
@@ -1585,6 +1599,7 @@ bch2_trans_update_by_path(struct btree_trans *trans, struct btree_path *path,
                .cached         = path->cached,
                .path           = path,
                .k              = k,
+               .seq            = seq,
                .ip_allocated   = ip,
        };
 
@@ -1612,6 +1627,7 @@ bch2_trans_update_by_path(struct btree_trans *trans, struct btree_path *path,
                i->cached       = n.cached;
                i->k            = n.k;
                i->path         = n.path;
+               i->seq          = n.seq;
                i->ip_allocated = n.ip_allocated;
        } else {
                array_insert_item(trans->updates, trans->nr_updates,
@@ -1709,6 +1725,18 @@ int __must_check bch2_trans_update(struct btree_trans *trans, struct btree_iter
        return bch2_trans_update_by_path(trans, path, k, flags, _RET_IP_);
 }
 
+/*
+ * Add a transaction update for a key that has already been journaled.
+ */
+int __must_check bch2_trans_update_seq(struct btree_trans *trans, u64 seq,
+                                      struct btree_iter *iter, struct bkey_i *k,
+                                      enum btree_update_flags flags)
+{
+       trans->journal_res.seq = seq;
+       return bch2_trans_update(trans, iter, k, flags|BTREE_UPDATE_NOJOURNAL|
+                                                BTREE_UPDATE_PREJOURNAL);
+}
+
 int __must_check bch2_trans_update_buffered(struct btree_trans *trans,
                                            enum btree_id btree,
                                            struct bkey_i *k)