bcachefs: Erasure coding fixes & refactoring
[linux-block.git] / fs / bcachefs / btree_types.h
index fcd660470e52181950514d11da5cb65fba846707..e51e3c7868de069465d7a6fb1f3c179f91c45e2e 100644 (file)
@@ -6,11 +6,13 @@
 #include <linux/rhashtable.h>
 
 #include "bkey_methods.h"
+#include "buckets_types.h"
 #include "journal_types.h"
 #include "six.h"
 
 struct open_bucket;
 struct btree_update;
+struct btree_trans;
 
 #define MAX_BSETS              3U
 
@@ -51,32 +53,27 @@ struct bset_tree {
 
 struct btree_write {
        struct journal_entry_pin        journal;
-       struct closure_waitlist         wait;
 };
 
-struct btree_ob_ref {
-       u8                      nr;
-       u8                      refs[BCH_REPLICAS_MAX];
+struct btree_alloc {
+       struct open_buckets     ob;
+       __BKEY_PADDED(k, BKEY_BTREE_PTR_VAL_U64s_MAX);
 };
 
-struct btree_alloc {
-       struct btree_ob_ref     ob;
-       BKEY_PADDED(k);
+struct btree_bkey_cached_common {
+       struct six_lock         lock;
+       u8                      level;
+       u8                      btree_id;
 };
 
 struct btree {
-       /* Hottest entries first */
-       struct rhash_head       hash;
-
-       /* Key/pointer for this btree node */
-       __BKEY_PADDED(key, BKEY_BTREE_PTR_VAL_U64s_MAX);
+       struct btree_bkey_cached_common c;
 
-       struct six_lock         lock;
+       struct rhash_head       hash;
+       u64                     hash_val;
 
        unsigned long           flags;
        u16                     written;
-       u8                      level;
-       u8                      btree_id;
        u8                      nsets;
        u8                      nr_key_bits;
 
@@ -97,8 +94,7 @@ struct btree {
        struct btree_nr_keys    nr;
        u16                     sib_u64s[2];
        u16                     whiteout_u64s;
-       u16                     uncompacted_whiteout_u64s;
-       u8                      page_order;
+       u8                      byte_order;
        u8                      unpack_fn_len;
 
        /*
@@ -127,16 +123,15 @@ struct btree {
         */
        unsigned long           will_make_reachable;
 
-       struct btree_ob_ref     ob;
+       struct open_buckets     ob;
 
        /* lru list */
        struct list_head        list;
 
        struct btree_write      writes[2];
 
-#ifdef CONFIG_BCACHEFS_DEBUG
-       bool                    *expensive_debug_checks;
-#endif
+       /* Key/pointer for this btree node */
+       __BKEY_PADDED(key, BKEY_BTREE_PTR_VAL_U64s_MAX);
 };
 
 struct btree_cache {
@@ -163,6 +158,7 @@ struct btree_cache {
        /* Number of elements in live + freeable lists */
        unsigned                used;
        unsigned                reserve;
+       atomic_t                dirty;
        struct shrinker         shrink;
 
        /*
@@ -176,8 +172,6 @@ struct btree_cache {
 };
 
 struct btree_node_iter {
-       u8              is_extents;
-
        struct btree_node_iter_set {
                u16     k, end;
        } data[MAX_BSETS];
@@ -185,24 +179,48 @@ struct btree_node_iter {
 
 enum btree_iter_type {
        BTREE_ITER_KEYS,
-       BTREE_ITER_SLOTS,
        BTREE_ITER_NODES,
+       BTREE_ITER_CACHED,
 };
 
 #define BTREE_ITER_TYPE                        ((1 << 2) - 1)
 
-#define BTREE_ITER_INTENT              (1 << 2)
-#define BTREE_ITER_PREFETCH            (1 << 3)
 /*
- * Used in bch2_btree_iter_traverse(), to indicate whether we're searching for
- * @pos or the first key strictly greater than @pos
+ * Iterate over all possible positions, synthesizing deleted keys for holes:
+ */
+#define BTREE_ITER_SLOTS               (1 << 2)
+/*
+ * Indicates that intent locks should be taken on leaf nodes, because we expect
+ * to be doing updates:
+ */
+#define BTREE_ITER_INTENT              (1 << 3)
+/*
+ * Causes the btree iterator code to prefetch additional btree nodes from disk:
  */
-#define BTREE_ITER_IS_EXTENTS          (1 << 4)
+#define BTREE_ITER_PREFETCH            (1 << 4)
 /*
- * indicates we need to call bch2_btree_iter_traverse() to revalidate iterator:
+ * Indicates that this iterator should not be reused until transaction commit,
+ * either because a pending update references it or because the update depends
+ * on that particular key being locked (e.g. by the str_hash code, for hash
+ * table consistency)
  */
-#define BTREE_ITER_AT_END_OF_LEAF      (1 << 5)
-#define BTREE_ITER_ERROR               (1 << 6)
+#define BTREE_ITER_KEEP_UNTIL_COMMIT   (1 << 5)
+/*
+ * Used in bch2_btree_iter_traverse(), to indicate whether we're searching for
+ * @pos or the first key strictly greater than @pos
+ */
+#define BTREE_ITER_IS_EXTENTS          (1 << 6)
+#define BTREE_ITER_ERROR               (1 << 7)
+#define BTREE_ITER_SET_POS_AFTER_COMMIT        (1 << 8)
+#define BTREE_ITER_CACHED_NOFILL       (1 << 9)
+#define BTREE_ITER_CACHED_NOCREATE     (1 << 10)
+
+#define BTREE_ITER_USER_FLAGS                          \
+       (BTREE_ITER_SLOTS                               \
+       |BTREE_ITER_INTENT                              \
+       |BTREE_ITER_PREFETCH                            \
+       |BTREE_ITER_CACHED_NOFILL                       \
+       |BTREE_ITER_CACHED_NOCREATE)
 
 enum btree_iter_uptodate {
        BTREE_ITER_UPTODATE             = 0,
@@ -211,6 +229,14 @@ enum btree_iter_uptodate {
        BTREE_ITER_NEED_TRAVERSE        = 3,
 };
 
+#define BTREE_ITER_NO_NODE_GET_LOCKS   ((struct btree *) 1)
+#define BTREE_ITER_NO_NODE_DROP                ((struct btree *) 2)
+#define BTREE_ITER_NO_NODE_LOCK_ROOT   ((struct btree *) 3)
+#define BTREE_ITER_NO_NODE_UP          ((struct btree *) 4)
+#define BTREE_ITER_NO_NODE_DOWN                ((struct btree *) 5)
+#define BTREE_ITER_NO_NODE_INIT                ((struct btree *) 6)
+#define BTREE_ITER_NO_NODE_ERROR       ((struct btree *) 7)
+
 /*
  * @pos                        - iterator's current position
  * @level              - current btree depth
@@ -219,13 +245,17 @@ enum btree_iter_uptodate {
  * @nodes_intent_locked        - bitmask indicating which locks are intent locks
  */
 struct btree_iter {
-       struct bch_fs           *c;
+       struct btree_trans      *trans;
        struct bpos             pos;
+       struct bpos             pos_after_commit;
+
+       u16                     flags;
+       u8                      idx;
 
-       u8                      flags;
-       enum btree_iter_uptodate uptodate:4;
        enum btree_id           btree_id:4;
+       enum btree_iter_uptodate uptodate:4;
        unsigned                level:4,
+                               min_depth:4,
                                locks_want:4,
                                nodes_locked:4,
                                nodes_intent_locked:4;
@@ -233,59 +263,132 @@ struct btree_iter {
        struct btree_iter_level {
                struct btree    *b;
                struct btree_node_iter iter;
+               u32             lock_seq;
        }                       l[BTREE_MAX_DEPTH];
 
-       u32                     lock_seq[BTREE_MAX_DEPTH];
-
        /*
         * Current unpacked key - so that bch2_btree_iter_next()/
         * bch2_btree_iter_next_slot() can correctly advance pos.
         */
        struct bkey             k;
+       unsigned long           ip_allocated;
+};
 
-       /*
-        * Circular linked list of linked iterators: linked iterators share
-        * locks (e.g. two linked iterators may have the same node intent
-        * locked, or read and write locked, at the same time), and insertions
-        * through one iterator won't invalidate the other linked iterators.
-        */
+static inline enum btree_iter_type
+btree_iter_type(const struct btree_iter *iter)
+{
+       return iter->flags & BTREE_ITER_TYPE;
+}
+
+static inline bool btree_iter_is_cached(const struct btree_iter *iter)
+{
+       return btree_iter_type(iter) == BTREE_ITER_CACHED;
+}
+
+static inline struct btree_iter_level *iter_l(struct btree_iter *iter)
+{
+       return iter->l + iter->level;
+}
 
-       /* Must come last: */
-       struct btree_iter       *next;
+struct btree_key_cache {
+       struct mutex            lock;
+       struct rhashtable       table;
+       bool                    table_init_done;
+       struct list_head        freed;
+       struct list_head        clean;
+       struct list_head        dirty;
+       struct shrinker         shrink;
+
+       size_t                  nr_freed;
+       size_t                  nr_keys;
+       size_t                  nr_dirty;
 };
 
-#define BTREE_ITER_MAX         8
+struct bkey_cached_key {
+       u32                     btree_id;
+       struct bpos             pos;
+} __attribute__((packed, aligned(4)));
+
+#define BKEY_CACHED_ACCESSED           0
+#define BKEY_CACHED_DIRTY              1
+
+struct bkey_cached {
+       struct btree_bkey_cached_common c;
+
+       unsigned long           flags;
+       u8                      u64s;
+       bool                    valid;
+       u32                     btree_trans_barrier_seq;
+       struct bkey_cached_key  key;
+
+       struct rhash_head       hash;
+       struct list_head        list;
+
+       struct journal_preres   res;
+       struct journal_entry_pin journal;
+
+       struct bkey_i           *k;
+};
 
 struct btree_insert_entry {
-       struct btree_iter *iter;
-       struct bkey_i   *k;
-       unsigned        extra_res;
-       /*
-        * true if entire key was inserted - can only be false for
-        * extents
-        */
-       bool            done;
+       unsigned                trigger_flags;
+       unsigned                trans_triggers_run:1;
+       struct bkey_i           *k;
+       struct btree_iter       *iter;
 };
 
+#ifndef CONFIG_LOCKDEP
+#define BTREE_ITER_MAX         64
+#else
+#define BTREE_ITER_MAX         32
+#endif
+
 struct btree_trans {
        struct bch_fs           *c;
-       size_t                  nr_restarts;
+#ifdef CONFIG_BCACHEFS_DEBUG
+       struct list_head        list;
+       struct btree            *locking;
+       unsigned                locking_iter_idx;
+       struct bpos             locking_pos;
+       u8                      locking_btree_id;
+       u8                      locking_level;
+       pid_t                   pid;
+#endif
+       unsigned long           ip;
+       int                     srcu_idx;
 
-       u8                      nr_iters;
-       u8                      iters_live;
-       u8                      iters_linked;
        u8                      nr_updates;
+       u8                      nr_updates2;
+       unsigned                used_mempool:1;
+       unsigned                error:1;
+       unsigned                nounlock:1;
+       unsigned                in_traverse_all:1;
+
+       u64                     iters_linked;
+       u64                     iters_live;
+       u64                     iters_touched;
 
        unsigned                mem_top;
        unsigned                mem_bytes;
        void                    *mem;
 
        struct btree_iter       *iters;
-       u64                     iter_ids[BTREE_ITER_MAX];
-
-       struct btree_insert_entry updates[BTREE_ITER_MAX];
-
-       struct btree_iter       iters_onstack[2];
+       struct btree_insert_entry *updates;
+       struct btree_insert_entry *updates2;
+
+       /* update path: */
+       struct jset_entry       *extra_journal_entries;
+       unsigned                extra_journal_entry_u64s;
+       struct journal_entry_pin *journal_pin;
+
+       struct journal_res      journal_res;
+       struct journal_preres   journal_preres;
+       u64                     *journal_seq;
+       struct disk_reservation *disk_res;
+       unsigned                flags;
+       unsigned                journal_u64s;
+       unsigned                journal_preres_u64s;
+       struct replicas_delta_list *fs_usage_deltas;
 };
 
 #define BTREE_FLAG(flag)                                               \
@@ -310,11 +413,13 @@ enum btree_flags {
        BTREE_NODE_just_written,
        BTREE_NODE_dying,
        BTREE_NODE_fake,
+       BTREE_NODE_old_extent_overwrite,
+       BTREE_NODE_need_rewrite,
+       BTREE_NODE_never_write,
 };
 
 BTREE_FLAG(read_in_flight);
 BTREE_FLAG(read_error);
-BTREE_FLAG(dirty);
 BTREE_FLAG(need_write);
 BTREE_FLAG(noevict);
 BTREE_FLAG(write_idx);
@@ -323,6 +428,9 @@ BTREE_FLAG(write_in_flight);
 BTREE_FLAG(just_written);
 BTREE_FLAG(dying);
 BTREE_FLAG(fake);
+BTREE_FLAG(old_extent_overwrite);
+BTREE_FLAG(need_rewrite);
+BTREE_FLAG(never_write);
 
 static inline struct btree_write *btree_current_write(struct btree *b)
 {
@@ -340,10 +448,38 @@ static inline struct bset_tree *bset_tree_last(struct btree *b)
        return b->set + b->nsets - 1;
 }
 
+static inline void *
+__btree_node_offset_to_ptr(const struct btree *b, u16 offset)
+{
+       return (void *) ((u64 *) b->data + 1 + offset);
+}
+
+static inline u16
+__btree_node_ptr_to_offset(const struct btree *b, const void *p)
+{
+       u16 ret = (u64 *) p - 1 - (u64 *) b->data;
+
+       EBUG_ON(__btree_node_offset_to_ptr(b, ret) != p);
+       return ret;
+}
+
 static inline struct bset *bset(const struct btree *b,
                                const struct bset_tree *t)
 {
-       return (void *) b->data + t->data_offset * sizeof(u64);
+       return __btree_node_offset_to_ptr(b, t->data_offset);
+}
+
+static inline void set_btree_bset_end(struct btree *b, struct bset_tree *t)
+{
+       t->end_offset =
+               __btree_node_ptr_to_offset(b, vstruct_last(bset(b, t)));
+}
+
+static inline void set_btree_bset(struct btree *b, struct bset_tree *t,
+                                 const struct bset *i)
+{
+       t->data_offset = __btree_node_ptr_to_offset(b, i);
+       set_btree_bset_end(b, t);
 }
 
 static inline struct bset *btree_bset_first(struct btree *b)
@@ -359,16 +495,13 @@ static inline struct bset *btree_bset_last(struct btree *b)
 static inline u16
 __btree_node_key_to_offset(const struct btree *b, const struct bkey_packed *k)
 {
-       size_t ret = (u64 *) k - (u64 *) b->data - 1;
-
-       EBUG_ON(ret > U16_MAX);
-       return ret;
+       return __btree_node_ptr_to_offset(b, k);
 }
 
 static inline struct bkey_packed *
 __btree_node_offset_to_key(const struct btree *b, u16 k)
 {
-       return (void *) ((u64 *) b->data + k + 1);
+       return __btree_node_offset_to_ptr(b, k);
 }
 
 static inline unsigned btree_bkey_first_offset(const struct bset_tree *t)
@@ -376,7 +509,13 @@ static inline unsigned btree_bkey_first_offset(const struct bset_tree *t)
        return t->data_offset + offsetof(struct bset, _data) / sizeof(u64);
 }
 
-#define btree_bkey_first(_b, _t)       (bset(_b, _t)->start)
+#define btree_bkey_first(_b, _t)                                       \
+({                                                                     \
+       EBUG_ON(bset(_b, _t)->start !=                                  \
+               __btree_node_offset_to_key(_b, btree_bkey_first_offset(_t)));\
+                                                                       \
+       bset(_b, _t)->start;                                            \
+})
 
 #define btree_bkey_last(_b, _t)                                                \
 ({                                                                     \
@@ -386,21 +525,15 @@ static inline unsigned btree_bkey_first_offset(const struct bset_tree *t)
        __btree_node_offset_to_key(_b, (_t)->end_offset);               \
 })
 
-static inline void set_btree_bset_end(struct btree *b, struct bset_tree *t)
+static inline unsigned bset_u64s(struct bset_tree *t)
 {
-       t->end_offset =
-               __btree_node_key_to_offset(b, vstruct_last(bset(b, t)));
-       btree_bkey_last(b, t);
+       return t->end_offset - t->data_offset -
+               sizeof(struct bset) / sizeof(u64);
 }
 
-static inline void set_btree_bset(struct btree *b, struct bset_tree *t,
-                                 const struct bset *i)
+static inline unsigned bset_dead_u64s(struct btree *b, struct bset_tree *t)
 {
-       t->data_offset = (u64 *) i - (u64 *) b->data;
-
-       EBUG_ON(bset(b, t) != i);
-
-       set_btree_bset_end(b, t);
+       return bset_u64s(t) - b->nr.bset_u64s[t - b->set];
 }
 
 static inline unsigned bset_byte_offset(struct btree *b, void *i)
@@ -408,36 +541,100 @@ static inline unsigned bset_byte_offset(struct btree *b, void *i)
        return i - (void *) b->data;
 }
 
-/* Type of keys @b contains: */
-static inline enum bkey_type btree_node_type(struct btree *b)
+enum btree_node_type {
+#define x(kwd, val, name) BKEY_TYPE_##kwd = val,
+       BCH_BTREE_IDS()
+#undef x
+       BKEY_TYPE_BTREE,
+};
+
+/* Type of a key in btree @id at level @level: */
+static inline enum btree_node_type __btree_node_type(unsigned level, enum btree_id id)
 {
-       return b->level ? BKEY_TYPE_BTREE : b->btree_id;
+       return level ? BKEY_TYPE_BTREE : (enum btree_node_type) id;
 }
 
-static inline const struct bkey_ops *btree_node_ops(struct btree *b)
+/* Type of keys @b contains: */
+static inline enum btree_node_type btree_node_type(struct btree *b)
 {
-       return &bch2_bkey_ops[btree_node_type(b)];
+       return __btree_node_type(b->c.level, b->c.btree_id);
 }
 
-static inline bool btree_node_has_ptrs(struct btree *b)
+static inline bool btree_node_type_is_extents(enum btree_node_type type)
 {
-       return btree_type_has_ptrs(btree_node_type(b));
+       switch (type) {
+       case BKEY_TYPE_EXTENTS:
+       case BKEY_TYPE_REFLINK:
+               return true;
+       default:
+               return false;
+       }
 }
 
 static inline bool btree_node_is_extents(struct btree *b)
 {
-       return btree_node_type(b) == BKEY_TYPE_EXTENTS;
+       return btree_node_type_is_extents(btree_node_type(b));
+}
+
+static inline enum btree_node_type btree_iter_key_type(struct btree_iter *iter)
+{
+       return __btree_node_type(iter->level, iter->btree_id);
+}
+
+static inline bool btree_iter_is_extents(struct btree_iter *iter)
+{
+       return btree_node_type_is_extents(btree_iter_key_type(iter));
+}
+
+#define BTREE_NODE_TYPE_HAS_TRIGGERS                   \
+       ((1U << BKEY_TYPE_EXTENTS)|                     \
+        (1U << BKEY_TYPE_ALLOC)|                       \
+        (1U << BKEY_TYPE_INODES)|                      \
+        (1U << BKEY_TYPE_REFLINK)|                     \
+        (1U << BKEY_TYPE_EC)|                          \
+        (1U << BKEY_TYPE_BTREE))
+
+#define BTREE_NODE_TYPE_HAS_TRANS_TRIGGERS             \
+       ((1U << BKEY_TYPE_EXTENTS)|                     \
+        (1U << BKEY_TYPE_INODES)|                      \
+        (1U << BKEY_TYPE_EC)|                          \
+        (1U << BKEY_TYPE_REFLINK))
+
+enum btree_trigger_flags {
+       __BTREE_TRIGGER_NORUN,          /* Don't run triggers at all */
+
+       __BTREE_TRIGGER_INSERT,
+       __BTREE_TRIGGER_OVERWRITE,
+       __BTREE_TRIGGER_OVERWRITE_SPLIT,
+
+       __BTREE_TRIGGER_GC,
+       __BTREE_TRIGGER_BUCKET_INVALIDATE,
+       __BTREE_TRIGGER_NOATOMIC,
+};
+
+#define BTREE_TRIGGER_NORUN            (1U << __BTREE_TRIGGER_NORUN)
+
+#define BTREE_TRIGGER_INSERT           (1U << __BTREE_TRIGGER_INSERT)
+#define BTREE_TRIGGER_OVERWRITE                (1U << __BTREE_TRIGGER_OVERWRITE)
+#define BTREE_TRIGGER_OVERWRITE_SPLIT  (1U << __BTREE_TRIGGER_OVERWRITE_SPLIT)
+
+#define BTREE_TRIGGER_GC               (1U << __BTREE_TRIGGER_GC)
+#define BTREE_TRIGGER_BUCKET_INVALIDATE        (1U << __BTREE_TRIGGER_BUCKET_INVALIDATE)
+#define BTREE_TRIGGER_NOATOMIC         (1U << __BTREE_TRIGGER_NOATOMIC)
+
+static inline bool btree_node_type_needs_gc(enum btree_node_type type)
+{
+       return BTREE_NODE_TYPE_HAS_TRIGGERS & (1U << type);
 }
 
 struct btree_root {
        struct btree            *b;
 
-       struct btree_update     *as;
-
        /* On disk root - see async splits: */
        __BKEY_PADDED(key, BKEY_BTREE_PTR_VAL_U64s_MAX);
        u8                      level;
        u8                      alive;
+       s8                      error;
 };
 
 /*
@@ -445,25 +642,14 @@ struct btree_root {
  * we're holding the write lock and we know what key is about to be overwritten:
  */
 
-struct btree_iter;
-struct btree_node_iter;
-
 enum btree_insert_ret {
        BTREE_INSERT_OK,
-       /* extent spanned multiple leaf nodes: have to traverse to next node: */
-       BTREE_INSERT_NEED_TRAVERSE,
-       /* write lock held for too long */
        /* leaf node needs to be split */
        BTREE_INSERT_BTREE_NODE_FULL,
-       BTREE_INSERT_JOURNAL_RES_FULL,
        BTREE_INSERT_ENOSPC,
-       BTREE_INSERT_NEED_GC_LOCK,
-};
-
-struct extent_insert_hook {
-       enum btree_insert_ret
-       (*fn)(struct extent_insert_hook *, struct bpos, struct bpos,
-             struct bkey_s_c, const struct bkey_i *);
+       BTREE_INSERT_NEED_MARK_REPLICAS,
+       BTREE_INSERT_NEED_JOURNAL_RES,
+       BTREE_INSERT_NEED_JOURNAL_RECLAIM,
 };
 
 enum btree_gc_coalesce_fail_reason {