bcachefs: kill bucket mark sector count saturation
authorKent Overstreet <kent.overstreet@gmail.com>
Sun, 22 Jul 2018 10:10:52 +0000 (06:10 -0400)
committerKent Overstreet <kent.overstreet@linux.dev>
Sun, 22 Oct 2023 21:08:08 +0000 (17:08 -0400)
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
fs/bcachefs/bcachefs.h
fs/bcachefs/btree_gc.c
fs/bcachefs/buckets.c
fs/bcachefs/buckets.h
fs/bcachefs/trace.h

index 0c55cc91490762900846d0f01065637526425559..7c6b1925f67bff94b1477c3aee9820b57635fec7 100644 (file)
@@ -413,7 +413,6 @@ struct bch_dev {
        /* last calculated minimum prio */
        u16                     max_last_bucket_io[2];
 
-       atomic_long_t           saturated_count;
        size_t                  inc_gen_needs_gc;
        size_t                  inc_gen_really_needs_gc;
        u64                     allocator_journal_seq_flush;
index 00c28a0a4d9d0f0116f2384d29fc1a041cf458a1..5053247a6b4222402f954a19e9d3c40bbe020fae 100644 (file)
@@ -570,9 +570,6 @@ void bch2_gc(struct bch_fs *c)
        bch2_mark_pending_btree_node_frees(c);
        bch2_mark_allocator_buckets(c);
 
-       for_each_member_device(ca, c, i)
-               atomic_long_set(&ca->saturated_count, 0);
-
        /* Indicates that gc is no longer in progress: */
        gc_pos_set(c, gc_phase(GC_PHASE_DONE));
        c->gc_count++;
index 4a910f773953ade6992c0d7a1d95c850c45c205c..eec2f6cb4f5b0897048ccd27381927bc94548bdd 100644 (file)
@@ -454,17 +454,11 @@ void bch2_mark_alloc_bucket(struct bch_fs *c, struct bch_dev *ca,
               c->gc_pos.phase == GC_PHASE_DONE);
 }
 
-#define saturated_add(ca, dst, src, max)                       \
+#define checked_add(a, b)                                      \
 do {                                                           \
-       BUG_ON((int) (dst) + (src) < 0);                        \
-       if ((dst) == (max))                                     \
-               ;                                               \
-       else if ((dst) + (src) <= (max))                        \
-               dst += (src);                                   \
-       else {                                                  \
-               dst = (max);                                    \
-               trace_sectors_saturated(ca);            \
-       }                                                       \
+       unsigned _res = (unsigned) (a) + (b);                   \
+       (a) = _res;                                             \
+       BUG_ON((a) != _res);                                    \
 } while (0)
 
 void bch2_mark_metadata_bucket(struct bch_fs *c, struct bch_dev *ca,
@@ -489,9 +483,9 @@ void bch2_mark_metadata_bucket(struct bch_fs *c, struct bch_dev *ca,
 
        g = bucket(ca, b);
        old = bucket_data_cmpxchg(c, ca, g, new, ({
-               saturated_add(ca, new.dirty_sectors, sectors,
-                             GC_MAX_SECTORS_USED);
-               new.data_type           = type;
+               new.data_type = type;
+               checked_add(new.dirty_sectors, sectors);
+               new.dirty_sectors += sectors;
        }));
 
        rcu_read_unlock();
@@ -525,7 +519,6 @@ static void bch2_mark_pointer(struct bch_fs *c,
                              u64 journal_seq, unsigned flags)
 {
        struct bucket_mark old, new;
-       unsigned saturated;
        struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
        struct bucket *g = PTR_BUCKET(ca, ptr);
        enum bch_data_type data_type = type == S_META
@@ -560,7 +553,6 @@ static void bch2_mark_pointer(struct bch_fs *c,
        v = atomic64_read(&g->_mark.v);
        do {
                new.v.counter = old.v.counter = v;
-               saturated = 0;
 
                /*
                 * Check this after reading bucket mark to guard against
@@ -574,17 +566,10 @@ static void bch2_mark_pointer(struct bch_fs *c,
                        return;
                }
 
-               if (!ptr->cached &&
-                   new.dirty_sectors == GC_MAX_SECTORS_USED &&
-                   sectors < 0)
-                       saturated = -sectors;
-
-               if (ptr->cached)
-                       saturated_add(ca, new.cached_sectors, sectors,
-                                     GC_MAX_SECTORS_USED);
+               if (!ptr->cached)
+                       checked_add(new.dirty_sectors, sectors);
                else
-                       saturated_add(ca, new.dirty_sectors, sectors,
-                                     GC_MAX_SECTORS_USED);
+                       checked_add(new.cached_sectors, sectors);
 
                if (!new.dirty_sectors &&
                    !new.cached_sectors) {
@@ -610,16 +595,6 @@ static void bch2_mark_pointer(struct bch_fs *c,
 
        BUG_ON(!(flags & BCH_BUCKET_MARK_MAY_MAKE_UNAVAILABLE) &&
               bucket_became_unavailable(c, old, new));
-
-       if (saturated &&
-           atomic_long_add_return(saturated,
-                                  &ca->saturated_count) >=
-           bucket_to_sector(ca, ca->free_inc.size)) {
-               if (c->gc_thread) {
-                       trace_gc_sectors_saturated(c);
-                       wake_up_process(c->gc_thread);
-               }
-       }
 }
 
 void bch2_mark_key(struct bch_fs *c, struct bkey_s_c k,
index a4ba6d787b0b75ed0ced7098ff36f9fb1d3b25ea..d0dc9c8b4f0bd200c8908d37fa5616d08f9aa607 100644 (file)
@@ -115,11 +115,6 @@ static inline u8 ptr_stale(struct bch_dev *ca,
 
 /* bucket gc marks */
 
-/* The dirty and cached sector counts saturate. If this occurs,
- * reference counting alone will not free the bucket, and a btree
- * GC must be performed. */
-#define GC_MAX_SECTORS_USED ((1U << 15) - 1)
-
 static inline unsigned bucket_sectors_used(struct bucket_mark mark)
 {
        return mark.dirty_sectors + mark.cached_sectors;
index d0b99c692063279fe8c0c564efd397f10fdce07d..9730540f7375b03582148053a96cf95eed2feaa0 100644 (file)
@@ -44,21 +44,6 @@ DECLARE_EVENT_CLASS(bkey,
                  __entry->offset, __entry->size)
 );
 
-DECLARE_EVENT_CLASS(bch_dev,
-       TP_PROTO(struct bch_dev *ca),
-       TP_ARGS(ca),
-
-       TP_STRUCT__entry(
-               __array(char,           uuid,   16      )
-       ),
-
-       TP_fast_assign(
-               memcpy(__entry->uuid, ca->uuid.b, 16);
-       ),
-
-       TP_printk("%pU", __entry->uuid)
-);
-
 DECLARE_EVENT_CLASS(bch_fs,
        TP_PROTO(struct bch_fs *c),
        TP_ARGS(c),
@@ -361,16 +346,6 @@ DEFINE_EVENT(bch_fs, gc_coalesce_end,
        TP_ARGS(c)
 );
 
-DEFINE_EVENT(bch_dev, sectors_saturated,
-       TP_PROTO(struct bch_dev *ca),
-       TP_ARGS(ca)
-);
-
-DEFINE_EVENT(bch_fs, gc_sectors_saturated,
-       TP_PROTO(struct bch_fs *c),
-       TP_ARGS(c)
-);
-
 DEFINE_EVENT(bch_fs, gc_cannot_inc_gens,
        TP_PROTO(struct bch_fs *c),
        TP_ARGS(c)