From b2be7c8b731262c5342e9f068b490d61e540ad0d Mon Sep 17 00:00:00 2001 From: Kent Overstreet Date: Sun, 22 Jul 2018 06:10:52 -0400 Subject: [PATCH] bcachefs: kill bucket mark sector count saturation Signed-off-by: Kent Overstreet --- fs/bcachefs/bcachefs.h | 1 - fs/bcachefs/btree_gc.c | 3 --- fs/bcachefs/buckets.c | 45 ++++++++++-------------------------------- fs/bcachefs/buckets.h | 5 ----- fs/bcachefs/trace.h | 25 ----------------------- 5 files changed, 10 insertions(+), 69 deletions(-) diff --git a/fs/bcachefs/bcachefs.h b/fs/bcachefs/bcachefs.h index 0c55cc914907..7c6b1925f67b 100644 --- a/fs/bcachefs/bcachefs.h +++ b/fs/bcachefs/bcachefs.h @@ -413,7 +413,6 @@ struct bch_dev { /* last calculated minimum prio */ u16 max_last_bucket_io[2]; - atomic_long_t saturated_count; size_t inc_gen_needs_gc; size_t inc_gen_really_needs_gc; u64 allocator_journal_seq_flush; diff --git a/fs/bcachefs/btree_gc.c b/fs/bcachefs/btree_gc.c index 00c28a0a4d9d..5053247a6b42 100644 --- a/fs/bcachefs/btree_gc.c +++ b/fs/bcachefs/btree_gc.c @@ -570,9 +570,6 @@ void bch2_gc(struct bch_fs *c) bch2_mark_pending_btree_node_frees(c); bch2_mark_allocator_buckets(c); - for_each_member_device(ca, c, i) - atomic_long_set(&ca->saturated_count, 0); - /* Indicates that gc is no longer in progress: */ gc_pos_set(c, gc_phase(GC_PHASE_DONE)); c->gc_count++; diff --git a/fs/bcachefs/buckets.c b/fs/bcachefs/buckets.c index 4a910f773953..eec2f6cb4f5b 100644 --- a/fs/bcachefs/buckets.c +++ b/fs/bcachefs/buckets.c @@ -454,17 +454,11 @@ void bch2_mark_alloc_bucket(struct bch_fs *c, struct bch_dev *ca, c->gc_pos.phase == GC_PHASE_DONE); } -#define saturated_add(ca, dst, src, max) \ +#define checked_add(a, b) \ do { \ - BUG_ON((int) (dst) + (src) < 0); \ - if ((dst) == (max)) \ - ; \ - else if ((dst) + (src) <= (max)) \ - dst += (src); \ - else { \ - dst = (max); \ - trace_sectors_saturated(ca); \ - } \ + unsigned _res = (unsigned) (a) + (b); \ + (a) = _res; \ + BUG_ON((a) != _res); \ } while (0) void bch2_mark_metadata_bucket(struct bch_fs *c, struct bch_dev *ca, @@ -489,9 +483,9 @@ void bch2_mark_metadata_bucket(struct bch_fs *c, struct bch_dev *ca, g = bucket(ca, b); old = bucket_data_cmpxchg(c, ca, g, new, ({ - saturated_add(ca, new.dirty_sectors, sectors, - GC_MAX_SECTORS_USED); - new.data_type = type; + new.data_type = type; + checked_add(new.dirty_sectors, sectors); + new.dirty_sectors += sectors; })); rcu_read_unlock(); @@ -525,7 +519,6 @@ static void bch2_mark_pointer(struct bch_fs *c, u64 journal_seq, unsigned flags) { struct bucket_mark old, new; - unsigned saturated; struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev); struct bucket *g = PTR_BUCKET(ca, ptr); enum bch_data_type data_type = type == S_META @@ -560,7 +553,6 @@ static void bch2_mark_pointer(struct bch_fs *c, v = atomic64_read(&g->_mark.v); do { new.v.counter = old.v.counter = v; - saturated = 0; /* * Check this after reading bucket mark to guard against @@ -574,17 +566,10 @@ static void bch2_mark_pointer(struct bch_fs *c, return; } - if (!ptr->cached && - new.dirty_sectors == GC_MAX_SECTORS_USED && - sectors < 0) - saturated = -sectors; - - if (ptr->cached) - saturated_add(ca, new.cached_sectors, sectors, - GC_MAX_SECTORS_USED); + if (!ptr->cached) + checked_add(new.dirty_sectors, sectors); else - saturated_add(ca, new.dirty_sectors, sectors, - GC_MAX_SECTORS_USED); + checked_add(new.cached_sectors, sectors); if (!new.dirty_sectors && !new.cached_sectors) { @@ -610,16 +595,6 @@ static void bch2_mark_pointer(struct bch_fs *c, BUG_ON(!(flags & BCH_BUCKET_MARK_MAY_MAKE_UNAVAILABLE) && bucket_became_unavailable(c, old, new)); - - if (saturated && - atomic_long_add_return(saturated, - &ca->saturated_count) >= - bucket_to_sector(ca, ca->free_inc.size)) { - if (c->gc_thread) { - trace_gc_sectors_saturated(c); - wake_up_process(c->gc_thread); - } - } } void bch2_mark_key(struct bch_fs *c, struct bkey_s_c k, diff --git a/fs/bcachefs/buckets.h b/fs/bcachefs/buckets.h index a4ba6d787b0b..d0dc9c8b4f0b 100644 --- a/fs/bcachefs/buckets.h +++ b/fs/bcachefs/buckets.h @@ -115,11 +115,6 @@ static inline u8 ptr_stale(struct bch_dev *ca, /* bucket gc marks */ -/* The dirty and cached sector counts saturate. If this occurs, - * reference counting alone will not free the bucket, and a btree - * GC must be performed. */ -#define GC_MAX_SECTORS_USED ((1U << 15) - 1) - static inline unsigned bucket_sectors_used(struct bucket_mark mark) { return mark.dirty_sectors + mark.cached_sectors; diff --git a/fs/bcachefs/trace.h b/fs/bcachefs/trace.h index d0b99c692063..9730540f7375 100644 --- a/fs/bcachefs/trace.h +++ b/fs/bcachefs/trace.h @@ -44,21 +44,6 @@ DECLARE_EVENT_CLASS(bkey, __entry->offset, __entry->size) ); -DECLARE_EVENT_CLASS(bch_dev, - TP_PROTO(struct bch_dev *ca), - TP_ARGS(ca), - - TP_STRUCT__entry( - __array(char, uuid, 16 ) - ), - - TP_fast_assign( - memcpy(__entry->uuid, ca->uuid.b, 16); - ), - - TP_printk("%pU", __entry->uuid) -); - DECLARE_EVENT_CLASS(bch_fs, TP_PROTO(struct bch_fs *c), TP_ARGS(c), @@ -361,16 +346,6 @@ DEFINE_EVENT(bch_fs, gc_coalesce_end, TP_ARGS(c) ); -DEFINE_EVENT(bch_dev, sectors_saturated, - TP_PROTO(struct bch_dev *ca), - TP_ARGS(ca) -); - -DEFINE_EVENT(bch_fs, gc_sectors_saturated, - TP_PROTO(struct bch_fs *c), - TP_ARGS(c) -); - DEFINE_EVENT(bch_fs, gc_cannot_inc_gens, TP_PROTO(struct bch_fs *c), TP_ARGS(c) -- 2.25.1