bcachefs: Move key marking out of extents.c
authorKent Overstreet <kent.overstreet@gmail.com>
Mon, 5 Nov 2018 07:31:48 +0000 (02:31 -0500)
committerKent Overstreet <kent.overstreet@linux.dev>
Sun, 22 Oct 2023 21:08:11 +0000 (17:08 -0400)
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
fs/bcachefs/btree_update_leaf.c
fs/bcachefs/buckets.c
fs/bcachefs/buckets.h
fs/bcachefs/extents.c

index 104c0b91da75c4e3453382ed76b79624b4641456..44501e98a4ac5a34e9e4e5ca28aebf2e3184d82f 100644 (file)
@@ -6,6 +6,7 @@
 #include "btree_io.h"
 #include "btree_iter.h"
 #include "btree_locking.h"
+#include "buckets.h"
 #include "debug.h"
 #include "extents.h"
 #include "journal.h"
@@ -204,6 +205,8 @@ btree_insert_key_leaf(struct btree_insert *trans,
        int old_live_u64s = b->nr.live_u64s;
        int live_u64s_added, u64s_added;
 
+       bch2_mark_update(trans, insert);
+
        ret = !btree_node_is_extents(b)
                ? bch2_insert_fixup_key(trans, insert)
                : bch2_insert_fixup_extent(trans, insert);
index 54eb1b6b820b7d4bfe9e084dd099007a3a03476d..ea28788b26ddbf1d0fa98e18ca46f86f96f1518d 100644 (file)
@@ -65,7 +65,9 @@
 
 #include "bcachefs.h"
 #include "alloc_background.h"
+#include "bset.h"
 #include "btree_gc.h"
+#include "btree_update.h"
 #include "buckets.h"
 #include "error.h"
 #include "movinggc.h"
@@ -346,7 +348,8 @@ void bch2_fs_usage_apply(struct bch_fs *c,
         * reservation:
         */
        should_not_have_added = added - (s64) (disk_res ? disk_res->sectors : 0);
-       if (WARN_ON(should_not_have_added > 0)) {
+       if (WARN_ONCE(should_not_have_added > 0,
+                     "disk usage increased without a reservation")) {
                atomic64_sub(should_not_have_added, &c->sectors_available);
                added -= should_not_have_added;
        }
@@ -642,9 +645,6 @@ static void bch2_mark_extent(struct bch_fs *c, struct bkey_s_c k,
                             struct bch_fs_usage *stats,
                             u64 journal_seq, unsigned flags)
 {
-       unsigned replicas = bch2_extent_nr_dirty_ptrs(k);
-
-       BUG_ON(replicas && replicas - 1 > ARRAY_SIZE(stats->replicas));
        BUG_ON(!sectors);
 
        switch (k.k->type) {
@@ -653,38 +653,43 @@ static void bch2_mark_extent(struct bch_fs *c, struct bkey_s_c k,
                struct bkey_s_c_extent e = bkey_s_c_to_extent(k);
                const union bch_extent_entry *entry;
                struct extent_ptr_decoded p;
+               s64 cached_sectors      = 0;
+               s64 dirty_sectors       = 0;
+               unsigned replicas       = 0;
 
                extent_for_each_ptr_decode(e, p, entry) {
                        s64 disk_sectors = ptr_disk_sectors(e, p, sectors);
 
-                       /*
-                        * fs level usage (which determines free space) is in
-                        * uncompressed sectors, until copygc + compression is
-                        * sorted out:
-                        *
-                        * note also that we always update @fs_usage, even when
-                        * we otherwise wouldn't do anything because gc is
-                        * running - this is because the caller still needs to
-                        * account w.r.t. its disk reservation. It is caller's
-                        * responsibility to not apply @fs_usage if gc is in
-                        * progress.
-                        */
-                       stats->replicas
-                               [!p.ptr.cached && replicas ? replicas - 1 : 0].data
-                               [!p.ptr.cached ? data_type : BCH_DATA_CACHED] +=
-                                       disk_sectors;
-
                        bch2_mark_pointer(c, e, p, disk_sectors, data_type,
                                          stats, journal_seq, flags);
+
+                       if (!p.ptr.cached)
+                               replicas++;
+
+                       if (p.ptr.cached)
+                               cached_sectors  += disk_sectors;
+                       else
+                               dirty_sectors   += disk_sectors;
                }
+
+               replicas        = clamp_t(unsigned,     replicas,
+                                         1, ARRAY_SIZE(stats->replicas));
+
+               stats->replicas[0].data[BCH_DATA_CACHED]        += cached_sectors;
+               stats->replicas[replicas - 1].data[data_type]   += dirty_sectors;
                break;
        }
-       case BCH_RESERVATION:
-               if (replicas)
-                       stats->replicas[replicas - 1].persistent_reserved +=
-                               sectors * replicas;
+       case BCH_RESERVATION: {
+               unsigned replicas = bkey_s_c_to_reservation(k).v->nr_replicas;
+
+               sectors *= replicas;
+               replicas = clamp_t(unsigned, replicas,
+                                  1, ARRAY_SIZE(stats->replicas));
+
+               stats->replicas[replicas - 1].persistent_reserved += sectors;
                break;
        }
+       }
 }
 
 void bch2_mark_key(struct bch_fs *c,
@@ -748,6 +753,76 @@ void bch2_mark_key(struct bch_fs *c,
        percpu_up_read(&c->usage_lock);
 }
 
+void bch2_mark_update(struct btree_insert *trans,
+                     struct btree_insert_entry *insert)
+{
+       struct bch_fs           *c = trans->c;
+       struct btree_iter       *iter = insert->iter;
+       struct btree            *b = iter->l[0].b;
+       struct btree_node_iter  node_iter = iter->l[0].iter;
+       struct bch_fs_usage     stats = { 0 };
+       struct gc_pos           pos = gc_pos_btree_node(b);
+       struct bkey_packed      *_k;
+
+       if (!(trans->flags & BTREE_INSERT_JOURNAL_REPLAY))
+               bch2_mark_key(c, btree_node_type(b), bkey_i_to_s_c(insert->k),
+                             true,
+                             bpos_min(insert->k->k.p, b->key.k.p).offset -
+                             bkey_start_offset(&insert->k->k),
+                             pos, &stats, trans->journal_res.seq, 0);
+
+       while ((_k = bch2_btree_node_iter_peek_filter(&node_iter, b,
+                                                     KEY_TYPE_DISCARD))) {
+               struct bkey             unpacked;
+               struct bkey_s_c         k;
+               s64                     sectors = 0;
+
+               k = bkey_disassemble(b, _k, &unpacked);
+
+               if (btree_node_is_extents(b)
+                   ? bkey_cmp(insert->k->k.p, bkey_start_pos(k.k)) <= 0
+                   : bkey_cmp(insert->k->k.p, k.k->p))
+                       break;
+
+               if (btree_node_is_extents(b)) {
+                       switch (bch2_extent_overlap(&insert->k->k, k.k)) {
+                       case BCH_EXTENT_OVERLAP_ALL:
+                               sectors = -((s64) k.k->size);
+                               break;
+                       case BCH_EXTENT_OVERLAP_BACK:
+                               sectors = bkey_start_offset(&insert->k->k) -
+                                       k.k->p.offset;
+                               break;
+                       case BCH_EXTENT_OVERLAP_FRONT:
+                               sectors = bkey_start_offset(k.k) -
+                                       insert->k->k.p.offset;
+                               break;
+                       case BCH_EXTENT_OVERLAP_MIDDLE:
+                               sectors = k.k->p.offset - insert->k->k.p.offset;
+                               BUG_ON(sectors <= 0);
+
+                               bch2_mark_key(c, btree_node_type(b), k,
+                                             true, sectors,
+                                             pos, &stats, trans->journal_res.seq, 0);
+
+                               sectors = bkey_start_offset(&insert->k->k) -
+                                       k.k->p.offset;
+                               break;
+                       }
+
+                       BUG_ON(sectors >= 0);
+               }
+
+               bch2_mark_key(c, btree_node_type(b), k,
+                             false, sectors,
+                             pos, &stats, trans->journal_res.seq, 0);
+
+               bch2_btree_node_iter_advance(&node_iter, b);
+       }
+
+       bch2_fs_usage_apply(c, &stats, trans->disk_res, pos);
+}
+
 /* Disk reservations: */
 
 static u64 __recalc_sectors_available(struct bch_fs *c)
index c40ffe862a0632abfe496ba0ca1a75d80dc298c6..8fe6871ad165860e45036fbe548c1aa76b59d212 100644 (file)
@@ -213,6 +213,7 @@ void bch2_mark_metadata_bucket(struct bch_fs *, struct bch_dev *,
 void bch2_mark_key(struct bch_fs *, enum bkey_type, struct bkey_s_c,
                   bool, s64, struct gc_pos,
                   struct bch_fs_usage *, u64, unsigned);
+void bch2_mark_update(struct btree_insert *, struct btree_insert_entry *);
 
 void bch2_recalc_sectors_available(struct bch_fs *);
 
index a7223e7c8793b0e090328b4808c35b4ea11614ab..0cf3436247934bfe2cc02a316faaa38af0083b7f 100644 (file)
@@ -1009,7 +1009,6 @@ struct extent_insert_state {
        struct btree_insert             *trans;
        struct btree_insert_entry       *insert;
        struct bpos                     committed;
-       struct bch_fs_usage             stats;
 
        /* for deleting: */
        struct bkey_i                   whiteout;
@@ -1018,54 +1017,6 @@ struct extent_insert_state {
        bool                            deleting;
 };
 
-static void bch2_add_sectors(struct extent_insert_state *s,
-                            struct bkey_s_c k, u64 offset, s64 sectors)
-{
-       struct bch_fs *c = s->trans->c;
-       struct btree *b = s->insert->iter->l[0].b;
-
-       EBUG_ON(bkey_cmp(bkey_start_pos(k.k), b->data->min_key) < 0);
-
-       if (!sectors)
-               return;
-
-       bch2_mark_key(c, BKEY_TYPE_EXTENTS, k, sectors > 0, sectors,
-                     gc_pos_btree_node(b), &s->stats,
-                     s->trans->journal_res.seq, 0);
-}
-
-static void bch2_subtract_sectors(struct extent_insert_state *s,
-                                struct bkey_s_c k, u64 offset, s64 sectors)
-{
-       bch2_add_sectors(s, k, offset, -sectors);
-}
-
-/* These wrappers subtract exactly the sectors that we're removing from @k */
-static void bch2_cut_subtract_back(struct extent_insert_state *s,
-                                 struct bpos where, struct bkey_s k)
-{
-       bch2_subtract_sectors(s, k.s_c, where.offset,
-                            k.k->p.offset - where.offset);
-       bch2_cut_back(where, k.k);
-}
-
-static void bch2_cut_subtract_front(struct extent_insert_state *s,
-                                  struct bpos where, struct bkey_s k)
-{
-       bch2_subtract_sectors(s, k.s_c, bkey_start_offset(k.k),
-                            where.offset - bkey_start_offset(k.k));
-       __bch2_cut_front(where, k);
-}
-
-static void bch2_drop_subtract(struct extent_insert_state *s, struct bkey_s k)
-{
-       if (k.k->size)
-               bch2_subtract_sectors(s, k.s_c,
-                                    bkey_start_offset(k.k), k.k->size);
-       k.k->size = 0;
-       k.k->type = KEY_TYPE_DELETED;
-}
-
 static bool bch2_extent_merge_inline(struct bch_fs *,
                                     struct btree_iter *,
                                     struct bkey_packed *,
@@ -1166,11 +1117,7 @@ static void extent_insert_committed(struct extent_insert_state *s)
        if (s->deleting)
                split.k.k.type = KEY_TYPE_DISCARD;
 
-       if (!(s->trans->flags & BTREE_INSERT_JOURNAL_REPLAY))
-               bch2_cut_subtract_back(s, s->committed,
-                                      bkey_i_to_s(&split.k));
-       else
-               bch2_cut_back(s->committed, &split.k.k);
+       bch2_cut_back(s->committed, &split.k.k);
 
        if (!bkey_cmp(s->committed, iter->pos))
                return;
@@ -1290,7 +1237,7 @@ extent_squash(struct extent_insert_state *s, struct bkey_i *insert,
        switch (overlap) {
        case BCH_EXTENT_OVERLAP_FRONT:
                /* insert overlaps with start of k: */
-               bch2_cut_subtract_front(s, insert->k.p, k);
+               __bch2_cut_front(insert->k.p, k);
                BUG_ON(bkey_deleted(k.k));
                extent_save(l->b, _k, k.k);
                verify_modified_extent(iter, _k);
@@ -1298,7 +1245,7 @@ extent_squash(struct extent_insert_state *s, struct bkey_i *insert,
 
        case BCH_EXTENT_OVERLAP_BACK:
                /* insert overlaps with end of k: */
-               bch2_cut_subtract_back(s, bkey_start_pos(&insert->k), k);
+               bch2_cut_back(bkey_start_pos(&insert->k), k.k);
                BUG_ON(bkey_deleted(k.k));
                extent_save(l->b, _k, k.k);
 
@@ -1318,7 +1265,8 @@ extent_squash(struct extent_insert_state *s, struct bkey_i *insert,
                if (!bkey_whiteout(k.k))
                        btree_account_key_drop(l->b, _k);
 
-               bch2_drop_subtract(s, k);
+               k.k->size = 0;
+               k.k->type = KEY_TYPE_DELETED;
 
                if (_k >= btree_bset_last(l->b)->start) {
                        unsigned u64s = _k->u64s;
@@ -1358,14 +1306,11 @@ extent_squash(struct extent_insert_state *s, struct bkey_i *insert,
                bch2_cut_back(bkey_start_pos(&insert->k), &split.k.k);
                BUG_ON(bkey_deleted(&split.k.k));
 
-               bch2_cut_subtract_front(s, insert->k.p, k);
+               __bch2_cut_front(insert->k.p, k);
                BUG_ON(bkey_deleted(k.k));
                extent_save(l->b, _k, k.k);
                verify_modified_extent(iter, _k);
 
-               bch2_add_sectors(s, bkey_i_to_s_c(&split.k),
-                               bkey_start_offset(&split.k.k),
-                               split.k.k.size);
                extent_bset_insert(c, iter, &split.k);
                break;
        }
@@ -1414,8 +1359,6 @@ static void __bch2_insert_fixup_extent(struct extent_insert_state *s)
                    !bkey_cmp(bkey_start_pos(&insert->k), bkey_start_pos(k.k))) {
                        if (!bkey_whiteout(k.k)) {
                                btree_account_key_drop(l->b, _k);
-                               bch2_subtract_sectors(s, k.s_c,
-                                                     bkey_start_offset(k.k), k.k->size);
                                _k->type = KEY_TYPE_DISCARD;
                                reserve_whiteout(l->b, _k);
                        }
@@ -1505,7 +1448,6 @@ enum btree_insert_ret
 bch2_insert_fixup_extent(struct btree_insert *trans,
                         struct btree_insert_entry *insert)
 {
-       struct bch_fs *c        = trans->c;
        struct btree_iter *iter = insert->iter;
        struct btree *b         = iter->l[0].b;
        struct extent_insert_state s = {
@@ -1530,19 +1472,10 @@ bch2_insert_fixup_extent(struct btree_insert *trans,
         */
        EBUG_ON(bkey_cmp(iter->pos, bkey_start_pos(&insert->k->k)));
 
-       if (!s.deleting &&
-           !(trans->flags & BTREE_INSERT_JOURNAL_REPLAY))
-               bch2_add_sectors(&s, bkey_i_to_s_c(insert->k),
-                               bkey_start_offset(&insert->k->k),
-                               insert->k->k.size);
-
        __bch2_insert_fixup_extent(&s);
 
        extent_insert_committed(&s);
 
-       bch2_fs_usage_apply(c, &s.stats, trans->disk_res,
-                          gc_pos_btree_node(b));
-
        EBUG_ON(bkey_cmp(iter->pos, bkey_start_pos(&insert->k->k)));
        EBUG_ON(bkey_cmp(iter->pos, s.committed));