1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _BCACHEFS_BTREE_TYPES_H
3 #define _BCACHEFS_BTREE_TYPES_H
5 #include <linux/list.h>
6 #include <linux/rhashtable.h>
8 #include "bkey_methods.h"
9 #include "journal_types.h"
17 struct btree_nr_keys {
20 * Amount of live metadata (i.e. size of node after a compaction) in
24 u16 bset_u64s[MAX_BSETS];
33 * We construct a binary tree in an array as if the array
34 * started at 1, so that things line up on the same cachelines
35 * better: see comments in bset.c at cacheline_to_bkey() for
39 /* size of the binary tree and prev array */
42 /* function of size - precalculated for to_inorder() */
53 struct journal_entry_pin journal;
54 struct closure_waitlist wait;
59 u8 refs[BCH_REPLICAS_MAX];
63 struct btree_ob_ref ob;
68 /* Hottest entries first */
69 struct rhash_head hash;
71 /* Key/pointer for this btree node */
72 __BKEY_PADDED(key, BKEY_BTREE_PTR_VAL_U64s_MAX);
83 struct bkey_format format;
85 struct btree_node *data;
89 * Sets of sorted keys - the real btree node - plus a binary search tree
91 * set[0] is special; set[0]->tree, set[0]->prev and set[0]->data point
92 * to the memory we have allocated for this btree node. Additionally,
93 * set[0]->data points to the entire btree node as it exists on disk.
95 struct bset_tree set[MAX_BSETS];
97 struct btree_nr_keys nr;
100 u16 uncompacted_whiteout_u64s;
105 * XXX: add a delete sequence number, so when bch2_btree_node_relock()
106 * fails because the lock sequence number has changed - i.e. the
107 * contents were modified - we can still relock the node if it's still
108 * the one we want, without redoing the traversal
112 * For asynchronous splits/interior node updates:
113 * When we do a split, we allocate new child nodes and update the parent
114 * node to point to them: we update the parent in memory immediately,
115 * but then we must wait until the children have been written out before
116 * the update to the parent can be written - this is a list of the
117 * btree_updates that are blocking this node from being
120 struct list_head write_blocked;
123 * Also for asynchronous splits/interior node updates:
124 * If a btree node isn't reachable yet, we don't want to kick off
125 * another write - because that write also won't yet be reachable and
126 * marking it as completed before it's reachable would be incorrect:
128 unsigned long will_make_reachable;
130 struct btree_ob_ref ob;
133 struct list_head list;
135 struct btree_write writes[2];
137 #ifdef CONFIG_BCACHEFS_DEBUG
138 bool *expensive_debug_checks;
143 struct rhashtable table;
144 bool table_init_done;
146 * We never free a struct btree, except on shutdown - we just put it on
147 * the btree_cache_freed list and reuse it later. This simplifies the
148 * code, and it doesn't cost us much memory as the memory usage is
149 * dominated by buffers that hold the actual btree node data and those
150 * can be freed - and the number of struct btrees allocated is
151 * effectively bounded.
153 * btree_cache_freeable effectively is a small cache - we use it because
154 * high order page allocations can be rather expensive, and it's quite
155 * common to delete and allocate btree nodes in quick succession. It
156 * should never grow past ~2-3 nodes in practice.
159 struct list_head live;
160 struct list_head freeable;
161 struct list_head freed;
163 /* Number of elements in live + freeable lists */
166 struct shrinker shrink;
169 * If we need to allocate memory for a new btree node and that
170 * allocation fails, we can cannibalize another node in the btree cache
171 * to satisfy the allocation - lock to guarantee only one thread does
174 struct task_struct *alloc_lock;
175 struct closure_waitlist alloc_wait;
178 struct btree_node_iter {
181 struct btree_node_iter_set {
186 enum btree_iter_type {
192 #define BTREE_ITER_TYPE ((1 << 2) - 1)
194 #define BTREE_ITER_INTENT (1 << 2)
195 #define BTREE_ITER_PREFETCH (1 << 3)
197 * Used in bch2_btree_iter_traverse(), to indicate whether we're searching for
198 * @pos or the first key strictly greater than @pos
200 #define BTREE_ITER_IS_EXTENTS (1 << 4)
202 * indicates we need to call bch2_btree_iter_traverse() to revalidate iterator:
204 #define BTREE_ITER_AT_END_OF_LEAF (1 << 5)
205 #define BTREE_ITER_ERROR (1 << 6)
207 enum btree_iter_uptodate {
208 BTREE_ITER_UPTODATE = 0,
209 BTREE_ITER_NEED_PEEK = 1,
210 BTREE_ITER_NEED_RELOCK = 2,
211 BTREE_ITER_NEED_TRAVERSE = 3,
215 * @pos - iterator's current position
216 * @level - current btree depth
217 * @locks_want - btree level below which we start taking intent locks
218 * @nodes_locked - bitmask indicating which nodes in @nodes are locked
219 * @nodes_intent_locked - bitmask indicating which locks are intent locks
226 enum btree_iter_uptodate uptodate:4;
227 enum btree_id btree_id:4;
231 nodes_intent_locked:4;
233 struct btree_iter_level {
235 struct btree_node_iter iter;
236 } l[BTREE_MAX_DEPTH];
238 u32 lock_seq[BTREE_MAX_DEPTH];
241 * Current unpacked key - so that bch2_btree_iter_next()/
242 * bch2_btree_iter_next_slot() can correctly advance pos.
247 * Circular linked list of linked iterators: linked iterators share
248 * locks (e.g. two linked iterators may have the same node intent
249 * locked, or read and write locked, at the same time), and insertions
250 * through one iterator won't invalidate the other linked iterators.
253 /* Must come last: */
254 struct btree_iter *next;
257 #define BTREE_ITER_MAX 8
259 struct btree_insert_entry {
260 struct btree_iter *iter;
264 * true if entire key was inserted - can only be false for
283 struct btree_iter *iters;
284 u64 iter_ids[BTREE_ITER_MAX];
286 struct btree_insert_entry updates[BTREE_ITER_MAX];
288 struct btree_iter iters_onstack[2];
291 #define BTREE_FLAG(flag) \
292 static inline bool btree_node_ ## flag(struct btree *b) \
293 { return test_bit(BTREE_NODE_ ## flag, &b->flags); } \
295 static inline void set_btree_node_ ## flag(struct btree *b) \
296 { set_bit(BTREE_NODE_ ## flag, &b->flags); } \
298 static inline void clear_btree_node_ ## flag(struct btree *b) \
299 { clear_bit(BTREE_NODE_ ## flag, &b->flags); }
302 BTREE_NODE_read_in_flight,
303 BTREE_NODE_read_error,
305 BTREE_NODE_need_write,
307 BTREE_NODE_write_idx,
309 BTREE_NODE_write_in_flight,
310 BTREE_NODE_just_written,
315 BTREE_FLAG(read_in_flight);
316 BTREE_FLAG(read_error);
318 BTREE_FLAG(need_write);
320 BTREE_FLAG(write_idx);
321 BTREE_FLAG(accessed);
322 BTREE_FLAG(write_in_flight);
323 BTREE_FLAG(just_written);
327 static inline struct btree_write *btree_current_write(struct btree *b)
329 return b->writes + btree_node_write_idx(b);
332 static inline struct btree_write *btree_prev_write(struct btree *b)
334 return b->writes + (btree_node_write_idx(b) ^ 1);
337 static inline struct bset_tree *bset_tree_last(struct btree *b)
340 return b->set + b->nsets - 1;
343 static inline struct bset *bset(const struct btree *b,
344 const struct bset_tree *t)
346 return (void *) b->data + t->data_offset * sizeof(u64);
349 static inline struct bset *btree_bset_first(struct btree *b)
351 return bset(b, b->set);
354 static inline struct bset *btree_bset_last(struct btree *b)
356 return bset(b, bset_tree_last(b));
360 __btree_node_key_to_offset(const struct btree *b, const struct bkey_packed *k)
362 size_t ret = (u64 *) k - (u64 *) b->data - 1;
364 EBUG_ON(ret > U16_MAX);
368 static inline struct bkey_packed *
369 __btree_node_offset_to_key(const struct btree *b, u16 k)
371 return (void *) ((u64 *) b->data + k + 1);
374 #define btree_bkey_first(_b, _t) (bset(_b, _t)->start)
376 #define btree_bkey_last(_b, _t) \
378 EBUG_ON(__btree_node_offset_to_key(_b, (_t)->end_offset) != \
379 vstruct_last(bset(_b, _t))); \
381 __btree_node_offset_to_key(_b, (_t)->end_offset); \
384 static inline void set_btree_bset_end(struct btree *b, struct bset_tree *t)
387 __btree_node_key_to_offset(b, vstruct_last(bset(b, t)));
388 btree_bkey_last(b, t);
391 static inline void set_btree_bset(struct btree *b, struct bset_tree *t,
392 const struct bset *i)
394 t->data_offset = (u64 *) i - (u64 *) b->data;
396 EBUG_ON(bset(b, t) != i);
398 set_btree_bset_end(b, t);
401 static inline unsigned bset_byte_offset(struct btree *b, void *i)
403 return i - (void *) b->data;
406 /* Type of keys @b contains: */
407 static inline enum bkey_type btree_node_type(struct btree *b)
409 return b->level ? BKEY_TYPE_BTREE : b->btree_id;
412 static inline const struct bkey_ops *btree_node_ops(struct btree *b)
414 return &bch2_bkey_ops[btree_node_type(b)];
417 static inline bool btree_node_has_ptrs(struct btree *b)
419 return btree_type_has_ptrs(btree_node_type(b));
422 static inline bool btree_node_is_extents(struct btree *b)
424 return btree_node_type(b) == BKEY_TYPE_EXTENTS;
430 struct btree_update *as;
432 /* On disk root - see async splits: */
433 __BKEY_PADDED(key, BKEY_BTREE_PTR_VAL_U64s_MAX);
439 * Optional hook that will be called just prior to a btree node update, when
440 * we're holding the write lock and we know what key is about to be overwritten:
444 struct btree_node_iter;
446 enum btree_insert_ret {
448 /* extent spanned multiple leaf nodes: have to traverse to next node: */
449 BTREE_INSERT_NEED_TRAVERSE,
450 /* write lock held for too long */
451 BTREE_INSERT_NEED_RESCHED,
452 /* leaf node needs to be split */
453 BTREE_INSERT_BTREE_NODE_FULL,
454 BTREE_INSERT_JOURNAL_RES_FULL,
456 BTREE_INSERT_NEED_GC_LOCK,
459 struct extent_insert_hook {
460 enum btree_insert_ret
461 (*fn)(struct extent_insert_hook *, struct bpos, struct bpos,
462 struct bkey_s_c, const struct bkey_i *);
465 enum btree_gc_coalesce_fail_reason {
466 BTREE_GC_COALESCE_FAIL_RESERVE_GET,
467 BTREE_GC_COALESCE_FAIL_KEYLIST_REALLOC,
468 BTREE_GC_COALESCE_FAIL_FORMAT_FITS,
471 enum btree_node_sibling {
476 typedef struct btree_nr_keys (*sort_fix_overlapping_fn)(struct bset *,
478 struct btree_node_iter *);
480 #endif /* _BCACHEFS_BTREE_TYPES_H */