bcachefs: bch_dev_usage_full
authorKent Overstreet <kent.overstreet@linux.dev>
Mon, 31 Mar 2025 20:09:39 +0000 (16:09 -0400)
committerKent Overstreet <kent.overstreet@linux.dev>
Wed, 2 Apr 2025 14:24:34 +0000 (10:24 -0400)
All the fastpaths that need device usage don't need the sector totals or
fragmentation, just bucket counts.

Split bch_dev_usage up into two different versions, the normal one with
just bucket counts.

This is also a stack usage improvement, since we have a bch_dev_usage on
the stack in the allocation path.

Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
fs/bcachefs/alloc_background.h
fs/bcachefs/alloc_foreground.c
fs/bcachefs/bcachefs.h
fs/bcachefs/buckets.c
fs/bcachefs/buckets.h
fs/bcachefs/buckets_types.h
fs/bcachefs/chardev.c
fs/bcachefs/movinggc.c

index c556ccaffe89824bdc24578e09cfb3276f433093..34b3d6ac4fbb74a2c5cc492b875f8aa2b3955361 100644 (file)
@@ -321,11 +321,11 @@ static inline u64 should_invalidate_buckets(struct bch_dev *ca,
 {
        u64 want_free = ca->mi.nbuckets >> 7;
        u64 free = max_t(s64, 0,
-                          u.d[BCH_DATA_free].buckets
-                        + u.d[BCH_DATA_need_discard].buckets
+                          u.buckets[BCH_DATA_free]
+                        + u.buckets[BCH_DATA_need_discard]
                         - bch2_dev_buckets_reserved(ca, BCH_WATERMARK_stripe));
 
-       return clamp_t(s64, want_free - free, 0, u.d[BCH_DATA_cached].buckets);
+       return clamp_t(s64, want_free - free, 0, u.buckets[BCH_DATA_cached]);
 }
 
 void bch2_dev_do_invalidates(struct bch_dev *);
index d188bb531e2b0d81e169a32561a48e5ec0704a20..7c930ef7738040549c00afe1d23ede99e8fb47ce 100644 (file)
@@ -469,7 +469,7 @@ static noinline void trace_bucket_alloc2(struct bch_fs *c, struct bch_dev *ca,
        prt_printf(&buf, "watermark\t%s\n",     bch2_watermarks[watermark]);
        prt_printf(&buf, "data type\t%s\n",     __bch2_data_types[data_type]);
        prt_printf(&buf, "blocking\t%u\n",      cl != NULL);
-       prt_printf(&buf, "free\t%llu\n",        usage->d[BCH_DATA_free].buckets);
+       prt_printf(&buf, "free\t%llu\n",        usage->buckets[BCH_DATA_free]);
        prt_printf(&buf, "avail\t%llu\n",       dev_buckets_free(ca, *usage, watermark));
        prt_printf(&buf, "copygc_wait\t%lu/%lli\n",
                   bch2_copygc_wait_amount(c),
@@ -524,10 +524,10 @@ again:
        bch2_dev_usage_read_fast(ca, usage);
        avail = dev_buckets_free(ca, *usage, watermark);
 
-       if (usage->d[BCH_DATA_need_discard].buckets > avail)
+       if (usage->buckets[BCH_DATA_need_discard] > avail)
                bch2_dev_do_discards(ca);
 
-       if (usage->d[BCH_DATA_need_gc_gens].buckets > avail)
+       if (usage->buckets[BCH_DATA_need_gc_gens] > avail)
                bch2_gc_gens_async(c);
 
        if (should_invalidate_buckets(ca, *usage))
@@ -1669,7 +1669,7 @@ void bch2_fs_alloc_debug_to_text(struct printbuf *out, struct bch_fs *c)
 void bch2_dev_alloc_debug_to_text(struct printbuf *out, struct bch_dev *ca)
 {
        struct bch_fs *c = ca->fs;
-       struct bch_dev_usage stats = bch2_dev_usage_read(ca);
+       struct bch_dev_usage_full stats = bch2_dev_usage_full_read(ca);
        unsigned nr[BCH_DATA_NR];
 
        memset(nr, 0, sizeof(nr));
@@ -1692,7 +1692,8 @@ void bch2_dev_alloc_debug_to_text(struct printbuf *out, struct bch_dev *ca)
        printbuf_tabstop_push(out, 16);
 
        prt_printf(out, "open buckets\t%i\r\n", ca->nr_open_buckets);
-       prt_printf(out, "buckets to invalidate\t%llu\r\n",      should_invalidate_buckets(ca, stats));
+       prt_printf(out, "buckets to invalidate\t%llu\r\n",
+                  should_invalidate_buckets(ca, bch2_dev_usage_read(ca)));
 }
 
 static noinline void bch2_print_allocator_stuck(struct bch_fs *c)
index 21da4167a3ae2f6a6aa1ab1d099757ba9ec3cb00..5d9f208a1bb744a7a53481d99aee2eaf71cff026 100644 (file)
@@ -562,7 +562,8 @@ struct bch_dev {
        unsigned long           *bucket_backpointer_mismatches;
        unsigned long           *bucket_backpointer_empty;
 
-       struct bch_dev_usage __percpu   *usage;
+       struct bch_dev_usage_full __percpu
+                               *usage;
 
        /* Allocator: */
        u64                     alloc_cursor[3];
index a1fc462ea0ded2098fa11f160f433d3b2a253bb1..fea61e60a9eea8980f907be514b58fae1ddb645d 100644 (file)
 #include <linux/preempt.h>
 
 void bch2_dev_usage_read_fast(struct bch_dev *ca, struct bch_dev_usage *usage)
+{
+       for (unsigned i = 0; i < BCH_DATA_NR; i++)
+               usage->buckets[i] = percpu_u64_get(&ca->usage->d[i].buckets);
+}
+
+void bch2_dev_usage_full_read_fast(struct bch_dev *ca, struct bch_dev_usage_full *usage)
 {
        memset(usage, 0, sizeof(*usage));
        acc_u64s_percpu((u64 *) usage, (u64 __percpu *) ca->usage, dev_usage_u64s());
@@ -75,7 +81,7 @@ bch2_fs_usage_read_short(struct bch_fs *c)
 
 void bch2_dev_usage_to_text(struct printbuf *out,
                            struct bch_dev *ca,
-                           struct bch_dev_usage *usage)
+                           struct bch_dev_usage_full *usage)
 {
        if (out->nr_tabstops < 5) {
                printbuf_tabstops_reset(out);
@@ -1331,7 +1337,7 @@ void bch2_dev_buckets_free(struct bch_dev *ca)
 
 int bch2_dev_buckets_alloc(struct bch_fs *c, struct bch_dev *ca)
 {
-       ca->usage = alloc_percpu(struct bch_dev_usage);
+       ca->usage = alloc_percpu(struct bch_dev_usage_full);
        if (!ca->usage)
                return -BCH_ERR_ENOMEM_usage_init;
 
index c5363256e3638771eba5dc9a4e7689642a3f8037..1c38b165f48b09fbe259466869cacd1ef1140992 100644 (file)
@@ -172,7 +172,16 @@ static inline struct bch_dev_usage bch2_dev_usage_read(struct bch_dev *ca)
        return ret;
 }
 
-void bch2_dev_usage_to_text(struct printbuf *, struct bch_dev *, struct bch_dev_usage *);
+void bch2_dev_usage_full_read_fast(struct bch_dev *, struct bch_dev_usage_full *);
+static inline struct bch_dev_usage_full bch2_dev_usage_full_read(struct bch_dev *ca)
+{
+       struct bch_dev_usage_full ret;
+
+       bch2_dev_usage_full_read_fast(ca, &ret);
+       return ret;
+}
+
+void bch2_dev_usage_to_text(struct printbuf *, struct bch_dev *, struct bch_dev_usage_full *);
 
 static inline u64 bch2_dev_buckets_reserved(struct bch_dev *ca, enum bch_watermark watermark)
 {
@@ -207,7 +216,7 @@ static inline u64 dev_buckets_free(struct bch_dev *ca,
                                   enum bch_watermark watermark)
 {
        return max_t(s64, 0,
-                    usage.d[BCH_DATA_free].buckets -
+                    usage.buckets[BCH_DATA_free]-
                     ca->nr_open_buckets -
                     bch2_dev_buckets_reserved(ca, watermark));
 }
@@ -217,10 +226,10 @@ static inline u64 __dev_buckets_available(struct bch_dev *ca,
                                          enum bch_watermark watermark)
 {
        return max_t(s64, 0,
-                      usage.d[BCH_DATA_free].buckets
-                    + usage.d[BCH_DATA_cached].buckets
-                    + usage.d[BCH_DATA_need_gc_gens].buckets
-                    + usage.d[BCH_DATA_need_discard].buckets
+                      usage.buckets[BCH_DATA_free]
+                    + usage.buckets[BCH_DATA_cached]
+                    + usage.buckets[BCH_DATA_need_gc_gens]
+                    + usage.buckets[BCH_DATA_need_discard]
                     - ca->nr_open_buckets
                     - bch2_dev_buckets_reserved(ca, watermark));
 }
index 900b8680c8b5e47a0422cd3b73662386fc4d743e..0aed2500ade32fe7c9b68325f6fb1eba31629d44 100644 (file)
@@ -54,7 +54,12 @@ struct bucket_gens {
        u8                      b[] __counted_by(nbuckets);
 };
 
+/* Only info on bucket countns: */
 struct bch_dev_usage {
+       u64                     buckets[BCH_DATA_NR];
+};
+
+struct bch_dev_usage_full {
        struct bch_dev_usage_type {
                u64             buckets;
                u64             sectors; /* _compressed_ sectors: */
index c9d1585eec215a85a84b90c42fb3c1b42b332b0d..5891b3a1e61ce23c01b08198f5098a66849d5b22 100644 (file)
@@ -350,8 +350,8 @@ static ssize_t bch2_data_job_read(struct file *file, char __user *buf,
        if (ctx->arg.op == BCH_DATA_OP_scrub) {
                struct bch_dev *ca = bch2_dev_tryget(c, ctx->arg.scrub.dev);
                if (ca) {
-                       struct bch_dev_usage u;
-                       bch2_dev_usage_read_fast(ca, &u);
+                       struct bch_dev_usage_full u;
+                       bch2_dev_usage_full_read_fast(ca, &u);
                        for (unsigned i = BCH_DATA_btree; i < ARRAY_SIZE(u.d); i++)
                                if (ctx->arg.scrub.data_types & BIT(i))
                                        e.p.sectors_total += u.d[i].sectors;
@@ -473,7 +473,7 @@ static long bch2_ioctl_dev_usage(struct bch_fs *c,
                                 struct bch_ioctl_dev_usage __user *user_arg)
 {
        struct bch_ioctl_dev_usage arg;
-       struct bch_dev_usage src;
+       struct bch_dev_usage_full src;
        struct bch_dev *ca;
        unsigned i;
 
@@ -493,7 +493,7 @@ static long bch2_ioctl_dev_usage(struct bch_fs *c,
        if (IS_ERR(ca))
                return PTR_ERR(ca);
 
-       src = bch2_dev_usage_read(ca);
+       src = bch2_dev_usage_full_read(ca);
 
        arg.state               = ca->mi.state;
        arg.bucket_size         = ca->mi.bucket_size;
@@ -514,7 +514,7 @@ static long bch2_ioctl_dev_usage_v2(struct bch_fs *c,
                                 struct bch_ioctl_dev_usage_v2 __user *user_arg)
 {
        struct bch_ioctl_dev_usage_v2 arg;
-       struct bch_dev_usage src;
+       struct bch_dev_usage_full src;
        struct bch_dev *ca;
        int ret = 0;
 
@@ -534,7 +534,7 @@ static long bch2_ioctl_dev_usage_v2(struct bch_fs *c,
        if (IS_ERR(ca))
                return PTR_ERR(ca);
 
-       src = bch2_dev_usage_read(ca);
+       src = bch2_dev_usage_full_read(ca);
 
        arg.state               = ca->mi.state;
        arg.bucket_size         = ca->mi.bucket_size;
index 5126c870ce5b5afe7e62d783d761a5e8977d0504..159410c50861b7283a6577b618c81925c18f4d85 100644 (file)
@@ -280,7 +280,11 @@ unsigned long bch2_copygc_wait_amount(struct bch_fs *c)
        s64 wait = S64_MAX, fragmented_allowed, fragmented;
 
        for_each_rw_member(c, ca) {
-               struct bch_dev_usage usage = bch2_dev_usage_read(ca);
+               struct bch_dev_usage_full usage_full = bch2_dev_usage_full_read(ca);
+               struct bch_dev_usage usage;
+
+               for (unsigned i = 0; i < BCH_DATA_NR; i++)
+                       usage.buckets[i] = usage_full.d[i].buckets;
 
                fragmented_allowed = ((__dev_buckets_available(ca, usage, BCH_WATERMARK_stripe) *
                                       ca->mi.bucket_size) >> 1);
@@ -288,7 +292,7 @@ unsigned long bch2_copygc_wait_amount(struct bch_fs *c)
 
                for (unsigned i = 0; i < BCH_DATA_NR; i++)
                        if (data_type_movable(i))
-                               fragmented += usage.d[i].fragmented;
+                               fragmented += usage_full.d[i].fragmented;
 
                wait = min(wait, max(0LL, fragmented_allowed - fragmented));
        }