1 // SPDX-License-Identifier: GPL-2.0
3 * Code for working with individual keys, and sorted sets of keys with in a
6 * Copyright 2012 Google, Inc.
10 #include "btree_cache.h"
12 #include "eytzinger.h"
16 #include <linux/unaligned.h>
17 #include <linux/console.h>
18 #include <linux/random.h>
19 #include <linux/prefetch.h>
21 static inline void __bch2_btree_node_iter_advance(struct btree_node_iter *,
24 static inline unsigned __btree_node_iter_used(struct btree_node_iter *iter)
26 unsigned n = ARRAY_SIZE(iter->data);
28 while (n && __btree_node_iter_set_end(iter, n - 1))
34 struct bset_tree *bch2_bkey_to_bset(struct btree *b, struct bkey_packed *k)
36 return bch2_bkey_to_bset_inlined(b, k);
40 * There are never duplicate live keys in the btree - but including keys that
41 * have been flagged as deleted (and will be cleaned up later) we _will_ see
44 * Thus the sort order is: usual key comparison first, but for keys that compare
45 * equal the deleted key(s) come first, and the (at most one) live version comes
48 * The main reason for this is insertion: to handle overwrites, we first iterate
49 * over keys that compare equal to our insert key, and then insert immediately
50 * prior to the first key greater than the key we're inserting - our insert
51 * position will be after all keys that compare equal to our insert key, which
52 * by the time we actually do the insert will all be deleted.
55 void bch2_dump_bset(struct bch_fs *c, struct btree *b,
56 struct bset *i, unsigned set)
58 struct bkey_packed *_k, *_n;
61 struct printbuf buf = PRINTBUF;
72 printk(KERN_ERR "block %u key %5zu - u64s 0? aieee!\n", set,
73 _k->_data - i->_data);
77 k = bkey_disassemble(b, _k, &uk);
81 bch2_bkey_val_to_text(&buf, c, k);
83 bch2_bkey_to_text(&buf, k.k);
84 printk(KERN_ERR "block %u key %5zu: %s\n", set,
85 _k->_data - i->_data, buf.buf);
87 if (_n == vstruct_last(i))
90 n = bkey_unpack_key(b, _n);
92 if (bpos_lt(n.p, k.k->p)) {
93 printk(KERN_ERR "Key skipped backwards\n");
97 if (!bkey_deleted(k.k) && bpos_eq(n.p, k.k->p))
98 printk(KERN_ERR "Duplicate keys\n");
104 void bch2_dump_btree_node(struct bch_fs *c, struct btree *b)
108 bch2_dump_bset(c, b, bset(b, t), t - b->set);
112 void bch2_dump_btree_node_iter(struct btree *b,
113 struct btree_node_iter *iter)
115 struct btree_node_iter_set *set;
116 struct printbuf buf = PRINTBUF;
118 printk(KERN_ERR "btree node iter with %u/%u sets:\n",
119 __btree_node_iter_used(iter), b->nsets);
121 btree_node_iter_for_each(iter, set) {
122 struct bkey_packed *k = __btree_node_offset_to_key(b, set->k);
123 struct bset_tree *t = bch2_bkey_to_bset(b, k);
124 struct bkey uk = bkey_unpack_key(b, k);
126 printbuf_reset(&buf);
127 bch2_bkey_to_text(&buf, &uk);
128 printk(KERN_ERR "set %zu key %u: %s\n",
129 t - b->set, set->k, buf.buf);
135 struct btree_nr_keys bch2_btree_node_count_keys(struct btree *b)
137 struct bkey_packed *k;
138 struct btree_nr_keys nr = {};
141 bset_tree_for_each_key(b, t, k)
142 if (!bkey_deleted(k))
143 btree_keys_account_key_add(&nr, t - b->set, k);
147 void __bch2_verify_btree_nr_keys(struct btree *b)
149 struct btree_nr_keys nr = bch2_btree_node_count_keys(b);
151 BUG_ON(memcmp(&nr, &b->nr, sizeof(nr)));
154 static void __bch2_btree_node_iter_next_check(struct btree_node_iter *_iter,
157 struct btree_node_iter iter = *_iter;
158 const struct bkey_packed *k, *n;
160 k = bch2_btree_node_iter_peek_all(&iter, b);
161 __bch2_btree_node_iter_advance(&iter, b);
162 n = bch2_btree_node_iter_peek_all(&iter, b);
164 bkey_unpack_key(b, k);
167 bkey_iter_cmp(b, k, n) > 0) {
168 struct btree_node_iter_set *set;
169 struct bkey ku = bkey_unpack_key(b, k);
170 struct bkey nu = bkey_unpack_key(b, n);
171 struct printbuf buf1 = PRINTBUF;
172 struct printbuf buf2 = PRINTBUF;
174 bch2_dump_btree_node(NULL, b);
175 bch2_bkey_to_text(&buf1, &ku);
176 bch2_bkey_to_text(&buf2, &nu);
177 printk(KERN_ERR "out of order/overlapping:\n%s\n%s\n",
179 printk(KERN_ERR "iter was:");
181 btree_node_iter_for_each(_iter, set) {
182 struct bkey_packed *k2 = __btree_node_offset_to_key(b, set->k);
183 struct bset_tree *t = bch2_bkey_to_bset(b, k2);
184 printk(" [%zi %zi]", t - b->set,
185 k2->_data - bset(b, t)->_data);
191 void __bch2_btree_node_iter_verify(struct btree_node_iter *iter,
194 struct btree_node_iter_set *set, *s2;
195 struct bkey_packed *k, *p;
197 if (bch2_btree_node_iter_end(iter))
200 /* Verify no duplicates: */
201 btree_node_iter_for_each(iter, set) {
202 BUG_ON(set->k > set->end);
203 btree_node_iter_for_each(iter, s2)
204 BUG_ON(set != s2 && set->end == s2->end);
207 /* Verify that set->end is correct: */
208 btree_node_iter_for_each(iter, set) {
210 if (set->end == t->end_offset) {
211 BUG_ON(set->k < btree_bkey_first_offset(t) ||
212 set->k >= t->end_offset);
220 /* Verify iterator is sorted: */
221 btree_node_iter_for_each(iter, set)
222 BUG_ON(set != iter->data &&
223 btree_node_iter_cmp(b, set[-1], set[0]) > 0);
225 k = bch2_btree_node_iter_peek_all(iter, b);
227 for_each_bset(b, t) {
228 if (iter->data[0].end == t->end_offset)
231 p = bch2_bkey_prev_all(b, t,
232 bch2_btree_node_iter_bset_pos(iter, b, t));
234 BUG_ON(p && bkey_iter_cmp(b, k, p) < 0);
238 static void __bch2_verify_insert_pos(struct btree *b, struct bkey_packed *where,
239 struct bkey_packed *insert, unsigned clobber_u64s)
241 struct bset_tree *t = bch2_bkey_to_bset(b, where);
242 struct bkey_packed *prev = bch2_bkey_prev_all(b, t, where);
243 struct bkey_packed *next = (void *) ((u64 *) where->_data + clobber_u64s);
244 struct printbuf buf1 = PRINTBUF;
245 struct printbuf buf2 = PRINTBUF;
248 bkey_iter_cmp(b, prev, insert) > 0);
251 bkey_iter_cmp(b, prev, insert) > 0) {
252 struct bkey k1 = bkey_unpack_key(b, prev);
253 struct bkey k2 = bkey_unpack_key(b, insert);
255 bch2_dump_btree_node(NULL, b);
256 bch2_bkey_to_text(&buf1, &k1);
257 bch2_bkey_to_text(&buf2, &k2);
259 panic("prev > insert:\n"
266 BUG_ON(next != btree_bkey_last(b, t) &&
267 bkey_iter_cmp(b, insert, next) > 0);
269 if (next != btree_bkey_last(b, t) &&
270 bkey_iter_cmp(b, insert, next) > 0) {
271 struct bkey k1 = bkey_unpack_key(b, insert);
272 struct bkey k2 = bkey_unpack_key(b, next);
274 bch2_dump_btree_node(NULL, b);
275 bch2_bkey_to_text(&buf1, &k1);
276 bch2_bkey_to_text(&buf2, &k2);
278 panic("insert > next:\n"
286 static inline void bch2_verify_insert_pos(struct btree *b,
287 struct bkey_packed *where,
288 struct bkey_packed *insert,
289 unsigned clobber_u64s)
291 if (static_branch_unlikely(&bch2_debug_check_bset_lookups))
292 __bch2_verify_insert_pos(b, where, insert, clobber_u64s);
296 /* Auxiliary search trees */
298 #define BFLOAT_FAILED_UNPACKED U8_MAX
299 #define BFLOAT_FAILED U8_MAX
306 #define BKEY_MANTISSA_BITS 16
310 struct bkey_float f[];
318 static unsigned bset_aux_tree_buf_end(const struct bset_tree *t)
320 BUG_ON(t->aux_data_offset == U16_MAX);
322 switch (bset_aux_tree_type(t)) {
323 case BSET_NO_AUX_TREE:
324 return t->aux_data_offset;
325 case BSET_RO_AUX_TREE:
326 return t->aux_data_offset +
327 DIV_ROUND_UP(t->size * sizeof(struct bkey_float), 8);
328 case BSET_RW_AUX_TREE:
329 return t->aux_data_offset +
330 DIV_ROUND_UP(sizeof(struct rw_aux_tree) * t->size, 8);
336 static unsigned bset_aux_tree_buf_start(const struct btree *b,
337 const struct bset_tree *t)
340 ? DIV_ROUND_UP(b->unpack_fn_len, 8)
341 : bset_aux_tree_buf_end(t - 1);
344 static void *__aux_tree_base(const struct btree *b,
345 const struct bset_tree *t)
347 return b->aux_data + t->aux_data_offset * 8;
350 static struct ro_aux_tree *ro_aux_tree_base(const struct btree *b,
351 const struct bset_tree *t)
353 EBUG_ON(bset_aux_tree_type(t) != BSET_RO_AUX_TREE);
355 return __aux_tree_base(b, t);
358 static struct bkey_float *bkey_float(const struct btree *b,
359 const struct bset_tree *t,
362 return ro_aux_tree_base(b, t)->f + idx;
365 static void __bset_aux_tree_verify(struct btree *b)
367 for_each_bset(b, t) {
368 if (t->aux_data_offset == U16_MAX)
371 BUG_ON(t != b->set &&
372 t[-1].aux_data_offset == U16_MAX);
374 BUG_ON(t->aux_data_offset < bset_aux_tree_buf_start(b, t));
375 BUG_ON(t->aux_data_offset > btree_aux_data_u64s(b));
376 BUG_ON(bset_aux_tree_buf_end(t) > btree_aux_data_u64s(b));
380 static inline void bset_aux_tree_verify(struct btree *b)
382 if (static_branch_unlikely(&bch2_debug_check_bset_lookups))
383 __bset_aux_tree_verify(b);
386 void bch2_btree_keys_init(struct btree *b)
391 memset(&b->nr, 0, sizeof(b->nr));
393 for (i = 0; i < MAX_BSETS; i++)
394 b->set[i].data_offset = U16_MAX;
396 bch2_bset_set_no_aux_tree(b, b->set);
399 /* Binary tree stuff for auxiliary search trees */
402 * Cacheline/offset <-> bkey pointer arithmetic:
404 * t->tree is a binary search tree in an array; each node corresponds to a key
405 * in one cacheline in t->set (BSET_CACHELINE bytes).
407 * This means we don't have to store the full index of the key that a node in
408 * the binary tree points to; eytzinger1_to_inorder() gives us the cacheline, and
409 * then bkey_float->m gives us the offset within that cacheline, in units of 8
412 * cacheline_to_bkey() and friends abstract out all the pointer arithmetic to
415 * To construct the bfloat for an arbitrary key we need to know what the key
416 * immediately preceding it is: we have to check if the two keys differ in the
417 * bits we're going to store in bkey_float->mantissa. t->prev[j] stores the size
418 * of the previous key so we can walk backwards to it from t->tree[j]'s key.
421 static inline void *bset_cacheline(const struct btree *b,
422 const struct bset_tree *t,
425 return (void *) round_down((unsigned long) btree_bkey_first(b, t),
427 cacheline * BSET_CACHELINE;
430 static struct bkey_packed *cacheline_to_bkey(const struct btree *b,
431 const struct bset_tree *t,
435 return bset_cacheline(b, t, cacheline) + offset * 8;
438 static unsigned bkey_to_cacheline(const struct btree *b,
439 const struct bset_tree *t,
440 const struct bkey_packed *k)
442 return ((void *) k - bset_cacheline(b, t, 0)) / BSET_CACHELINE;
445 static ssize_t __bkey_to_cacheline_offset(const struct btree *b,
446 const struct bset_tree *t,
448 const struct bkey_packed *k)
450 return (u64 *) k - (u64 *) bset_cacheline(b, t, cacheline);
453 static unsigned bkey_to_cacheline_offset(const struct btree *b,
454 const struct bset_tree *t,
456 const struct bkey_packed *k)
458 size_t m = __bkey_to_cacheline_offset(b, t, cacheline, k);
464 static inline struct bkey_packed *tree_to_bkey(const struct btree *b,
465 const struct bset_tree *t,
468 return cacheline_to_bkey(b, t,
469 __eytzinger1_to_inorder(j, t->size - 1, t->extra),
470 bkey_float(b, t, j)->key_offset);
473 static struct rw_aux_tree *rw_aux_tree(const struct btree *b,
474 const struct bset_tree *t)
476 EBUG_ON(bset_aux_tree_type(t) != BSET_RW_AUX_TREE);
478 return __aux_tree_base(b, t);
482 * For the write set - the one we're currently inserting keys into - we don't
483 * maintain a full search tree, we just keep a simple lookup table in t->prev.
485 static struct bkey_packed *rw_aux_to_bkey(const struct btree *b,
489 return __btree_node_offset_to_key(b, rw_aux_tree(b, t)[j].offset);
492 static void rw_aux_tree_set(const struct btree *b, struct bset_tree *t,
493 unsigned j, struct bkey_packed *k)
495 EBUG_ON(k >= btree_bkey_last(b, t));
497 rw_aux_tree(b, t)[j] = (struct rw_aux_tree) {
498 .offset = __btree_node_key_to_offset(b, k),
499 .k = bkey_unpack_pos(b, k),
503 static void __bch2_bset_verify_rw_aux_tree(struct btree *b, struct bset_tree *t)
505 struct bkey_packed *k = btree_bkey_first(b, t);
508 BUG_ON(bset_has_ro_aux_tree(t));
510 if (!bset_has_rw_aux_tree(t))
514 BUG_ON(rw_aux_to_bkey(b, t, j) != k);
518 if (rw_aux_to_bkey(b, t, j) == k) {
519 BUG_ON(!bpos_eq(rw_aux_tree(b, t)[j].k,
520 bkey_unpack_pos(b, k)));
525 BUG_ON(rw_aux_tree(b, t)[j].offset <=
526 rw_aux_tree(b, t)[j - 1].offset);
530 BUG_ON(k >= btree_bkey_last(b, t));
534 static inline void bch2_bset_verify_rw_aux_tree(struct btree *b,
537 if (static_branch_unlikely(&bch2_debug_check_bset_lookups))
538 __bch2_bset_verify_rw_aux_tree(b, t);
541 /* returns idx of first entry >= offset: */
542 static unsigned rw_aux_tree_bsearch(struct btree *b,
546 unsigned bset_offs = offset - btree_bkey_first_offset(t);
547 unsigned bset_u64s = t->end_offset - btree_bkey_first_offset(t);
548 unsigned idx = bset_u64s ? bset_offs * t->size / bset_u64s : 0;
550 EBUG_ON(bset_aux_tree_type(t) != BSET_RW_AUX_TREE);
552 EBUG_ON(idx > t->size);
554 while (idx < t->size &&
555 rw_aux_tree(b, t)[idx].offset < offset)
559 rw_aux_tree(b, t)[idx - 1].offset >= offset)
562 EBUG_ON(idx < t->size &&
563 rw_aux_tree(b, t)[idx].offset < offset);
564 EBUG_ON(idx && rw_aux_tree(b, t)[idx - 1].offset >= offset);
565 EBUG_ON(idx + 1 < t->size &&
566 rw_aux_tree(b, t)[idx].offset ==
567 rw_aux_tree(b, t)[idx + 1].offset);
572 static inline unsigned bkey_mantissa(const struct bkey_packed *k,
573 const struct bkey_float *f)
577 EBUG_ON(!bkey_packed(k));
579 v = get_unaligned((u64 *) (((u8 *) k->_data) + (f->exponent >> 3)));
582 * In little endian, we're shifting off low bits (and then the bits we
583 * want are at the low end), in big endian we're shifting off high bits
584 * (and then the bits we want are at the high end, so we shift them
587 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
588 v >>= f->exponent & 7;
590 v >>= 64 - (f->exponent & 7) - BKEY_MANTISSA_BITS;
595 static __always_inline void make_bfloat(struct btree *b, struct bset_tree *t,
597 struct bkey_packed *min_key,
598 struct bkey_packed *max_key)
600 struct bkey_float *f = bkey_float(b, t, j);
601 struct bkey_packed *m = tree_to_bkey(b, t, j);
602 struct bkey_packed *l = is_power_of_2(j)
604 : tree_to_bkey(b, t, j >> ffs(j));
605 struct bkey_packed *r = is_power_of_2(j + 1)
607 : tree_to_bkey(b, t, j >> (ffz(j) + 1));
609 int shift, exponent, high_bit;
612 * for failed bfloats, the lookup code falls back to comparing against
616 if (!bkey_packed(l) || !bkey_packed(r) || !bkey_packed(m) ||
618 f->exponent = BFLOAT_FAILED_UNPACKED;
623 * The greatest differing bit of l and r is the first bit we must
624 * include in the bfloat mantissa we're creating in order to do
625 * comparisons - that bit always becomes the high bit of
626 * bfloat->mantissa, and thus the exponent we're calculating here is
627 * the position of what will become the low bit in bfloat->mantissa:
629 * Note that this may be negative - we may be running off the low end
630 * of the key: we handle this later:
632 high_bit = max(bch2_bkey_greatest_differing_bit(b, l, r),
633 min_t(unsigned, BKEY_MANTISSA_BITS, b->nr_key_bits) - 1);
634 exponent = high_bit - (BKEY_MANTISSA_BITS - 1);
637 * Then we calculate the actual shift value, from the start of the key
638 * (k->_data), to get the key bits starting at exponent:
640 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
641 shift = (int) (b->format.key_u64s * 64 - b->nr_key_bits) + exponent;
643 EBUG_ON(shift + BKEY_MANTISSA_BITS > b->format.key_u64s * 64);
645 shift = high_bit_offset +
650 EBUG_ON(shift < KEY_PACKED_BITS_START);
652 EBUG_ON(shift < 0 || shift >= BFLOAT_FAILED);
655 mantissa = bkey_mantissa(m, f);
658 * If we've got garbage bits, set them to all 1s - it's legal for the
659 * bfloat to compare larger than the original key, but not smaller:
662 mantissa |= ~(~0U << -exponent);
664 f->mantissa = mantissa;
667 /* bytes remaining - only valid for last bset: */
668 static unsigned __bset_tree_capacity(struct btree *b, const struct bset_tree *t)
670 bset_aux_tree_verify(b);
672 return btree_aux_data_bytes(b) - t->aux_data_offset * sizeof(u64);
675 static unsigned bset_ro_tree_capacity(struct btree *b, const struct bset_tree *t)
677 return __bset_tree_capacity(b, t) / sizeof(struct bkey_float);
680 static unsigned bset_rw_tree_capacity(struct btree *b, const struct bset_tree *t)
682 return __bset_tree_capacity(b, t) / sizeof(struct rw_aux_tree);
685 static noinline void __build_rw_aux_tree(struct btree *b, struct bset_tree *t)
687 struct bkey_packed *k;
690 t->extra = BSET_RW_AUX_TREE_VAL;
691 rw_aux_tree(b, t)[0].offset =
692 __btree_node_key_to_offset(b, btree_bkey_first(b, t));
694 bset_tree_for_each_key(b, t, k) {
695 if (t->size == bset_rw_tree_capacity(b, t))
698 if ((void *) k - (void *) rw_aux_to_bkey(b, t, t->size - 1) >
700 rw_aux_tree_set(b, t, t->size++, k);
704 static noinline void __build_ro_aux_tree(struct btree *b, struct bset_tree *t)
706 struct bkey_packed *k = btree_bkey_first(b, t);
707 struct bkey_i min_key, max_key;
708 unsigned cacheline = 1;
710 t->size = min(bkey_to_cacheline(b, t, btree_bkey_last(b, t)),
711 bset_ro_tree_capacity(b, t));
715 t->extra = BSET_NO_AUX_TREE_VAL;
719 t->extra = eytzinger1_extra(t->size - 1);
721 /* First we figure out where the first key in each cacheline is */
722 eytzinger1_for_each(j, t->size - 1) {
723 while (bkey_to_cacheline(b, t, k) < cacheline)
726 if (k >= btree_bkey_last(b, t)) {
727 /* XXX: this path sucks */
732 bkey_float(b, t, j)->key_offset =
733 bkey_to_cacheline_offset(b, t, cacheline++, k);
735 EBUG_ON(tree_to_bkey(b, t, j) != k);
738 if (!bkey_pack_pos(bkey_to_packed(&min_key), b->data->min_key, b)) {
739 bkey_init(&min_key.k);
740 min_key.k.p = b->data->min_key;
743 if (!bkey_pack_pos(bkey_to_packed(&max_key), b->data->max_key, b)) {
744 bkey_init(&max_key.k);
745 max_key.k.p = b->data->max_key;
748 /* Then we build the tree */
749 eytzinger1_for_each(j, t->size - 1)
751 bkey_to_packed(&min_key),
752 bkey_to_packed(&max_key));
755 static void bset_alloc_tree(struct btree *b, struct bset_tree *t)
759 for (i = b->set; i != t; i++)
760 BUG_ON(bset_has_rw_aux_tree(i));
762 bch2_bset_set_no_aux_tree(b, t);
764 /* round up to next cacheline: */
765 t->aux_data_offset = round_up(bset_aux_tree_buf_start(b, t),
766 SMP_CACHE_BYTES / sizeof(u64));
768 bset_aux_tree_verify(b);
771 void bch2_bset_build_aux_tree(struct btree *b, struct bset_tree *t,
775 ? bset_has_rw_aux_tree(t)
776 : bset_has_ro_aux_tree(t))
779 bset_alloc_tree(b, t);
781 if (!__bset_tree_capacity(b, t))
785 __build_rw_aux_tree(b, t);
787 __build_ro_aux_tree(b, t);
789 bset_aux_tree_verify(b);
792 void bch2_bset_init_first(struct btree *b, struct bset *i)
798 memset(i, 0, sizeof(*i));
799 get_random_bytes(&i->seq, sizeof(i->seq));
800 SET_BSET_BIG_ENDIAN(i, CPU_BIG_ENDIAN);
802 t = &b->set[b->nsets++];
803 set_btree_bset(b, t, i);
806 void bch2_bset_init_next(struct btree *b, struct btree_node_entry *bne)
808 struct bset *i = &bne->keys;
811 BUG_ON(bset_byte_offset(b, bne) >= btree_buf_bytes(b));
812 BUG_ON((void *) bne < (void *) btree_bkey_last(b, bset_tree_last(b)));
813 BUG_ON(b->nsets >= MAX_BSETS);
815 memset(i, 0, sizeof(*i));
816 i->seq = btree_bset_first(b)->seq;
817 SET_BSET_BIG_ENDIAN(i, CPU_BIG_ENDIAN);
819 t = &b->set[b->nsets++];
820 set_btree_bset(b, t, i);
824 * find _some_ key in the same bset as @k that precedes @k - not necessarily the
825 * immediate predecessor:
827 static struct bkey_packed *__bkey_prev(struct btree *b, struct bset_tree *t,
828 struct bkey_packed *k)
830 struct bkey_packed *p;
834 EBUG_ON(k < btree_bkey_first(b, t) ||
835 k > btree_bkey_last(b, t));
837 if (k == btree_bkey_first(b, t))
840 switch (bset_aux_tree_type(t)) {
841 case BSET_NO_AUX_TREE:
842 p = btree_bkey_first(b, t);
844 case BSET_RO_AUX_TREE:
845 j = min_t(unsigned, t->size - 1, bkey_to_cacheline(b, t, k));
848 p = j ? tree_to_bkey(b, t,
849 __inorder_to_eytzinger1(j--,
850 t->size - 1, t->extra))
851 : btree_bkey_first(b, t);
854 case BSET_RW_AUX_TREE:
855 offset = __btree_node_key_to_offset(b, k);
856 j = rw_aux_tree_bsearch(b, t, offset);
857 p = j ? rw_aux_to_bkey(b, t, j - 1)
858 : btree_bkey_first(b, t);
865 struct bkey_packed *bch2_bkey_prev_filter(struct btree *b,
867 struct bkey_packed *k,
868 unsigned min_key_type)
870 struct bkey_packed *p, *i, *ret = NULL, *orig_k = k;
872 while ((p = __bkey_prev(b, t, k)) && !ret) {
873 for (i = p; i != k; i = bkey_p_next(i))
874 if (i->type >= min_key_type)
880 if (static_branch_unlikely(&bch2_debug_check_bset_lookups)) {
881 BUG_ON(ret >= orig_k);
885 : btree_bkey_first(b, t);
888 BUG_ON(i->type >= min_key_type);
896 static void rw_aux_tree_insert_entry(struct btree *b,
900 EBUG_ON(!idx || idx > t->size);
901 struct bkey_packed *start = rw_aux_to_bkey(b, t, idx - 1);
902 struct bkey_packed *end = idx < t->size
903 ? rw_aux_to_bkey(b, t, idx)
904 : btree_bkey_last(b, t);
906 if (t->size < bset_rw_tree_capacity(b, t) &&
907 (void *) end - (void *) start > L1_CACHE_BYTES) {
908 struct bkey_packed *k = start;
915 if ((void *) k - (void *) start >= L1_CACHE_BYTES) {
916 memmove(&rw_aux_tree(b, t)[idx + 1],
917 &rw_aux_tree(b, t)[idx],
918 (void *) &rw_aux_tree(b, t)[t->size] -
919 (void *) &rw_aux_tree(b, t)[idx]);
921 rw_aux_tree_set(b, t, idx, k);
928 static void bch2_bset_fix_lookup_table(struct btree *b,
930 struct bkey_packed *_where,
931 unsigned clobber_u64s,
934 int shift = new_u64s - clobber_u64s;
935 unsigned idx, j, where = __btree_node_key_to_offset(b, _where);
937 EBUG_ON(bset_has_ro_aux_tree(t));
939 if (!bset_has_rw_aux_tree(t))
942 if (where > rw_aux_tree(b, t)[t->size - 1].offset) {
943 rw_aux_tree_insert_entry(b, t, t->size);
947 /* returns first entry >= where */
948 idx = rw_aux_tree_bsearch(b, t, where);
950 if (rw_aux_tree(b, t)[idx].offset == where) {
951 if (!idx) { /* never delete first entry */
953 } else if (where < t->end_offset) {
954 rw_aux_tree_set(b, t, idx++, _where);
956 EBUG_ON(where != t->end_offset);
957 rw_aux_tree_insert_entry(b, t, --t->size);
962 EBUG_ON(idx < t->size && rw_aux_tree(b, t)[idx].offset <= where);
964 rw_aux_tree(b, t)[idx].offset + shift ==
965 rw_aux_tree(b, t)[idx - 1].offset) {
966 memmove(&rw_aux_tree(b, t)[idx],
967 &rw_aux_tree(b, t)[idx + 1],
968 (void *) &rw_aux_tree(b, t)[t->size] -
969 (void *) &rw_aux_tree(b, t)[idx + 1]);
973 for (j = idx; j < t->size; j++)
974 rw_aux_tree(b, t)[j].offset += shift;
976 EBUG_ON(idx < t->size &&
977 rw_aux_tree(b, t)[idx].offset ==
978 rw_aux_tree(b, t)[idx - 1].offset);
980 rw_aux_tree_insert_entry(b, t, idx);
983 bch2_bset_verify_rw_aux_tree(b, t);
984 bset_aux_tree_verify(b);
987 void bch2_bset_insert(struct btree *b,
988 struct bkey_packed *where,
989 struct bkey_i *insert,
990 unsigned clobber_u64s)
992 struct bkey_format *f = &b->format;
993 struct bset_tree *t = bset_tree_last(b);
994 struct bkey_packed packed, *src = bkey_to_packed(insert);
996 bch2_bset_verify_rw_aux_tree(b, t);
997 bch2_verify_insert_pos(b, where, bkey_to_packed(insert), clobber_u64s);
999 if (bch2_bkey_pack_key(&packed, &insert->k, f))
1002 if (!bkey_deleted(&insert->k))
1003 btree_keys_account_key_add(&b->nr, t - b->set, src);
1005 if (src->u64s != clobber_u64s) {
1006 u64 *src_p = (u64 *) where->_data + clobber_u64s;
1007 u64 *dst_p = (u64 *) where->_data + src->u64s;
1009 EBUG_ON((int) le16_to_cpu(bset(b, t)->u64s) <
1010 (int) clobber_u64s - src->u64s);
1012 memmove_u64s(dst_p, src_p, btree_bkey_last(b, t)->_data - src_p);
1013 le16_add_cpu(&bset(b, t)->u64s, src->u64s - clobber_u64s);
1014 set_btree_bset_end(b, t);
1017 memcpy_u64s_small(where, src,
1018 bkeyp_key_u64s(f, src));
1019 memcpy_u64s(bkeyp_val(f, where), &insert->v,
1020 bkeyp_val_u64s(f, src));
1022 if (src->u64s != clobber_u64s)
1023 bch2_bset_fix_lookup_table(b, t, where, clobber_u64s, src->u64s);
1025 bch2_verify_btree_nr_keys(b);
1028 void bch2_bset_delete(struct btree *b,
1029 struct bkey_packed *where,
1030 unsigned clobber_u64s)
1032 struct bset_tree *t = bset_tree_last(b);
1033 u64 *src_p = (u64 *) where->_data + clobber_u64s;
1034 u64 *dst_p = where->_data;
1036 bch2_bset_verify_rw_aux_tree(b, t);
1038 EBUG_ON(le16_to_cpu(bset(b, t)->u64s) < clobber_u64s);
1040 memmove_u64s_down(dst_p, src_p, btree_bkey_last(b, t)->_data - src_p);
1041 le16_add_cpu(&bset(b, t)->u64s, -clobber_u64s);
1042 set_btree_bset_end(b, t);
1044 bch2_bset_fix_lookup_table(b, t, where, clobber_u64s, 0);
1050 static struct bkey_packed *bset_search_write_set(const struct btree *b,
1051 struct bset_tree *t,
1052 struct bpos *search)
1054 unsigned l = 0, r = t->size;
1056 while (l + 1 != r) {
1057 unsigned m = (l + r) >> 1;
1059 if (bpos_lt(rw_aux_tree(b, t)[m].k, *search))
1065 return rw_aux_to_bkey(b, t, l);
1068 static inline void prefetch_four_cachelines(void *p)
1070 #ifdef CONFIG_X86_64
1071 asm("prefetcht0 (-127 + 64 * 0)(%0);"
1072 "prefetcht0 (-127 + 64 * 1)(%0);"
1073 "prefetcht0 (-127 + 64 * 2)(%0);"
1074 "prefetcht0 (-127 + 64 * 3)(%0);"
1078 prefetch(p + L1_CACHE_BYTES * 0);
1079 prefetch(p + L1_CACHE_BYTES * 1);
1080 prefetch(p + L1_CACHE_BYTES * 2);
1081 prefetch(p + L1_CACHE_BYTES * 3);
1085 static inline bool bkey_mantissa_bits_dropped(const struct btree *b,
1086 const struct bkey_float *f)
1088 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
1089 unsigned key_bits_start = b->format.key_u64s * 64 - b->nr_key_bits;
1091 return f->exponent > key_bits_start;
1093 unsigned key_bits_end = high_bit_offset + b->nr_key_bits;
1095 return f->exponent + BKEY_MANTISSA_BITS < key_bits_end;
1100 static struct bkey_packed *bset_search_tree(const struct btree *b,
1101 const struct bset_tree *t,
1102 const struct bpos *search,
1103 const struct bkey_packed *packed_search)
1105 struct ro_aux_tree *base = ro_aux_tree_base(b, t);
1106 struct bkey_float *f;
1107 struct bkey_packed *k;
1108 unsigned inorder, n = 1, l, r;
1112 if (likely(n << 4 < t->size))
1113 prefetch(&base->f[n << 4]);
1116 if (unlikely(f->exponent >= BFLOAT_FAILED))
1120 r = bkey_mantissa(packed_search, f);
1122 if (unlikely(l == r) && bkey_mantissa_bits_dropped(b, f))
1125 n = n * 2 + (l < r);
1128 k = tree_to_bkey(b, t, n);
1129 cmp = bkey_cmp_p_or_unp(b, k, packed_search, search);
1133 n = n * 2 + (cmp < 0);
1134 } while (n < t->size);
1136 inorder = __eytzinger1_to_inorder(n >> 1, t->size - 1, t->extra);
1139 * n would have been the node we recursed to - the low bit tells us if
1140 * we recursed left or recursed right.
1142 if (likely(!(n & 1))) {
1144 if (unlikely(!inorder))
1145 return btree_bkey_first(b, t);
1147 f = &base->f[eytzinger1_prev(n >> 1, t->size - 1)];
1150 return cacheline_to_bkey(b, t, inorder, f->key_offset);
1153 static __always_inline __flatten
1154 struct bkey_packed *__bch2_bset_search(struct btree *b,
1155 struct bset_tree *t,
1156 struct bpos *search,
1157 const struct bkey_packed *lossy_packed_search)
1161 * First, we search for a cacheline, then lastly we do a linear search
1162 * within that cacheline.
1164 * To search for the cacheline, there's three different possibilities:
1165 * * The set is too small to have a search tree, so we just do a linear
1166 * search over the whole set.
1167 * * The set is the one we're currently inserting into; keeping a full
1168 * auxiliary search tree up to date would be too expensive, so we
1169 * use a much simpler lookup table to do a binary search -
1170 * bset_search_write_set().
1171 * * Or we use the auxiliary search tree we constructed earlier -
1172 * bset_search_tree()
1175 switch (bset_aux_tree_type(t)) {
1176 case BSET_NO_AUX_TREE:
1177 return btree_bkey_first(b, t);
1178 case BSET_RW_AUX_TREE:
1179 return bset_search_write_set(b, t, search);
1180 case BSET_RO_AUX_TREE:
1181 return bset_search_tree(b, t, search, lossy_packed_search);
1187 static __always_inline __flatten
1188 struct bkey_packed *bch2_bset_search_linear(struct btree *b,
1189 struct bset_tree *t,
1190 struct bpos *search,
1191 struct bkey_packed *packed_search,
1192 const struct bkey_packed *lossy_packed_search,
1193 struct bkey_packed *m)
1195 if (lossy_packed_search)
1196 while (m != btree_bkey_last(b, t) &&
1197 bkey_iter_cmp_p_or_unp(b, m,
1198 lossy_packed_search, search) < 0)
1202 while (m != btree_bkey_last(b, t) &&
1203 bkey_iter_pos_cmp(b, m, search) < 0)
1206 if (static_branch_unlikely(&bch2_debug_check_bset_lookups)) {
1207 struct bkey_packed *prev = bch2_bkey_prev_all(b, t, m);
1210 bkey_iter_cmp_p_or_unp(b, prev,
1211 packed_search, search) >= 0);
1217 /* Btree node iterator */
1219 static inline void __bch2_btree_node_iter_push(struct btree_node_iter *iter,
1221 const struct bkey_packed *k,
1222 const struct bkey_packed *end)
1225 struct btree_node_iter_set *pos;
1227 btree_node_iter_for_each(iter, pos)
1230 BUG_ON(pos >= iter->data + ARRAY_SIZE(iter->data));
1231 *pos = (struct btree_node_iter_set) {
1232 __btree_node_key_to_offset(b, k),
1233 __btree_node_key_to_offset(b, end)
1238 void bch2_btree_node_iter_push(struct btree_node_iter *iter,
1240 const struct bkey_packed *k,
1241 const struct bkey_packed *end)
1243 __bch2_btree_node_iter_push(iter, b, k, end);
1244 bch2_btree_node_iter_sort(iter, b);
1247 noinline __flatten __cold
1248 static void btree_node_iter_init_pack_failed(struct btree_node_iter *iter,
1249 struct btree *b, struct bpos *search)
1251 struct bkey_packed *k;
1253 trace_bkey_pack_pos_fail(search);
1255 bch2_btree_node_iter_init_from_start(iter, b);
1257 while ((k = bch2_btree_node_iter_peek(iter, b)) &&
1258 bkey_iter_pos_cmp(b, k, search) < 0)
1259 bch2_btree_node_iter_advance(iter, b);
1263 * bch2_btree_node_iter_init - initialize a btree node iterator, starting from a
1266 * @iter: iterator to initialize
1267 * @b: btree node to search
1268 * @search: search key
1270 * Main entry point to the lookup code for individual btree nodes:
1274 * When you don't filter out deleted keys, btree nodes _do_ contain duplicate
1275 * keys. This doesn't matter for most code, but it does matter for lookups.
1277 * Some adjacent keys with a string of equal keys:
1280 * If you search for k, the lookup code isn't guaranteed to return you any
1281 * specific k. The lookup code is conceptually doing a binary search and
1282 * iterating backwards is very expensive so if the pivot happens to land at the
1283 * last k that's what you'll get.
1285 * This works out ok, but it's something to be aware of:
1287 * - For non extents, we guarantee that the live key comes last - see
1288 * btree_node_iter_cmp(), keys_out_of_order(). So the duplicates you don't
1289 * see will only be deleted keys you don't care about.
1291 * - For extents, deleted keys sort last (see the comment at the top of this
1292 * file). But when you're searching for extents, you actually want the first
1293 * key strictly greater than your search key - an extent that compares equal
1294 * to the search key is going to have 0 sectors after the search key.
1296 * But this does mean that we can't just search for
1297 * bpos_successor(start_of_range) to get the first extent that overlaps with
1298 * the range we want - if we're unlucky and there's an extent that ends
1299 * exactly where we searched, then there could be a deleted key at the same
1300 * position and we'd get that when we search instead of the preceding extent
1303 * So we've got to search for start_of_range, then after the lookup iterate
1304 * past any extents that compare equal to the position we searched for.
1307 void bch2_btree_node_iter_init(struct btree_node_iter *iter,
1308 struct btree *b, struct bpos *search)
1310 struct bkey_packed p, *packed_search = NULL;
1311 struct btree_node_iter_set *pos = iter->data;
1312 struct bkey_packed *k[MAX_BSETS];
1315 EBUG_ON(bpos_lt(*search, b->data->min_key));
1316 EBUG_ON(bpos_gt(*search, b->data->max_key));
1317 bset_aux_tree_verify(b);
1319 memset(iter, 0, sizeof(*iter));
1321 switch (bch2_bkey_pack_pos_lossy(&p, *search, b)) {
1322 case BKEY_PACK_POS_EXACT:
1325 case BKEY_PACK_POS_SMALLER:
1326 packed_search = NULL;
1328 case BKEY_PACK_POS_FAIL:
1329 btree_node_iter_init_pack_failed(iter, b, search);
1333 for (i = 0; i < b->nsets; i++) {
1334 k[i] = __bch2_bset_search(b, b->set + i, search, &p);
1335 prefetch_four_cachelines(k[i]);
1338 for (i = 0; i < b->nsets; i++) {
1339 struct bset_tree *t = b->set + i;
1340 struct bkey_packed *end = btree_bkey_last(b, t);
1342 k[i] = bch2_bset_search_linear(b, t, search,
1343 packed_search, &p, k[i]);
1345 *pos++ = (struct btree_node_iter_set) {
1346 __btree_node_key_to_offset(b, k[i]),
1347 __btree_node_key_to_offset(b, end)
1351 bch2_btree_node_iter_sort(iter, b);
1354 void bch2_btree_node_iter_init_from_start(struct btree_node_iter *iter,
1357 memset(iter, 0, sizeof(*iter));
1360 __bch2_btree_node_iter_push(iter, b,
1361 btree_bkey_first(b, t),
1362 btree_bkey_last(b, t));
1363 bch2_btree_node_iter_sort(iter, b);
1366 struct bkey_packed *bch2_btree_node_iter_bset_pos(struct btree_node_iter *iter,
1368 struct bset_tree *t)
1370 struct btree_node_iter_set *set;
1372 btree_node_iter_for_each(iter, set)
1373 if (set->end == t->end_offset)
1374 return __btree_node_offset_to_key(b, set->k);
1376 return btree_bkey_last(b, t);
1379 static inline bool btree_node_iter_sort_two(struct btree_node_iter *iter,
1385 if ((ret = (btree_node_iter_cmp(b,
1387 iter->data[first + 1]) > 0)))
1388 swap(iter->data[first], iter->data[first + 1]);
1392 void bch2_btree_node_iter_sort(struct btree_node_iter *iter,
1395 /* unrolled bubble sort: */
1397 if (!__btree_node_iter_set_end(iter, 2)) {
1398 btree_node_iter_sort_two(iter, b, 0);
1399 btree_node_iter_sort_two(iter, b, 1);
1402 if (!__btree_node_iter_set_end(iter, 1))
1403 btree_node_iter_sort_two(iter, b, 0);
1406 void bch2_btree_node_iter_set_drop(struct btree_node_iter *iter,
1407 struct btree_node_iter_set *set)
1409 struct btree_node_iter_set *last =
1410 iter->data + ARRAY_SIZE(iter->data) - 1;
1412 memmove(&set[0], &set[1], (void *) last - (void *) set);
1413 *last = (struct btree_node_iter_set) { 0, 0 };
1416 static inline void __bch2_btree_node_iter_advance(struct btree_node_iter *iter,
1419 iter->data->k += __bch2_btree_node_iter_peek_all(iter, b)->u64s;
1421 EBUG_ON(iter->data->k > iter->data->end);
1423 if (unlikely(__btree_node_iter_set_end(iter, 0))) {
1424 /* avoid an expensive memmove call: */
1425 iter->data[0] = iter->data[1];
1426 iter->data[1] = iter->data[2];
1427 iter->data[2] = (struct btree_node_iter_set) { 0, 0 };
1431 if (__btree_node_iter_set_end(iter, 1))
1434 if (!btree_node_iter_sort_two(iter, b, 0))
1437 if (__btree_node_iter_set_end(iter, 2))
1440 btree_node_iter_sort_two(iter, b, 1);
1443 void bch2_btree_node_iter_advance(struct btree_node_iter *iter,
1446 if (static_branch_unlikely(&bch2_debug_check_bset_lookups)) {
1447 __bch2_btree_node_iter_verify(iter, b);
1448 __bch2_btree_node_iter_next_check(iter, b);
1451 __bch2_btree_node_iter_advance(iter, b);
1457 struct bkey_packed *bch2_btree_node_iter_prev_all(struct btree_node_iter *iter,
1460 struct bkey_packed *k, *prev = NULL;
1461 struct btree_node_iter_set *set;
1464 bch2_btree_node_iter_verify(iter, b);
1466 for_each_bset(b, t) {
1467 k = bch2_bkey_prev_all(b, t,
1468 bch2_btree_node_iter_bset_pos(iter, b, t));
1470 (!prev || bkey_iter_cmp(b, k, prev) > 0)) {
1472 end = t->end_offset;
1480 * We're manually memmoving instead of just calling sort() to ensure the
1481 * prev we picked ends up in slot 0 - sort won't necessarily put it
1482 * there because of duplicate deleted keys:
1484 btree_node_iter_for_each(iter, set)
1485 if (set->end == end)
1488 BUG_ON(set != &iter->data[__btree_node_iter_used(iter)]);
1490 BUG_ON(set >= iter->data + ARRAY_SIZE(iter->data));
1492 memmove(&iter->data[1],
1494 (void *) set - (void *) &iter->data[0]);
1496 iter->data[0].k = __btree_node_key_to_offset(b, prev);
1497 iter->data[0].end = end;
1499 bch2_btree_node_iter_verify(iter, b);
1503 struct bkey_packed *bch2_btree_node_iter_prev(struct btree_node_iter *iter,
1506 struct bkey_packed *prev;
1509 prev = bch2_btree_node_iter_prev_all(iter, b);
1510 } while (prev && bkey_deleted(prev));
1515 struct bkey_s_c bch2_btree_node_iter_peek_unpack(struct btree_node_iter *iter,
1519 struct bkey_packed *k = bch2_btree_node_iter_peek(iter, b);
1521 return k ? bkey_disassemble(b, k, u) : bkey_s_c_null;
1526 void bch2_btree_keys_stats(const struct btree *b, struct bset_stats *stats)
1528 for_each_bset_c(b, t) {
1529 enum bset_aux_tree_type type = bset_aux_tree_type(t);
1532 stats->sets[type].nr++;
1533 stats->sets[type].bytes += le16_to_cpu(bset(b, t)->u64s) *
1536 if (bset_has_ro_aux_tree(t)) {
1537 stats->floats += t->size - 1;
1539 for (j = 1; j < t->size; j++)
1541 bkey_float(b, t, j)->exponent ==
1547 void bch2_bfloat_to_text(struct printbuf *out, struct btree *b,
1548 struct bkey_packed *k)
1550 struct bset_tree *t = bch2_bkey_to_bset(b, k);
1552 unsigned j, inorder;
1554 if (!bset_has_ro_aux_tree(t))
1557 inorder = bkey_to_cacheline(b, t, k);
1558 if (!inorder || inorder >= t->size)
1561 j = __inorder_to_eytzinger1(inorder, t->size - 1, t->extra);
1562 if (k != tree_to_bkey(b, t, j))
1565 switch (bkey_float(b, t, j)->exponent) {
1567 uk = bkey_unpack_key(b, k);
1569 " failed unpacked at depth %u\n"
1572 bch2_bpos_to_text(out, uk.p);
1573 prt_printf(out, "\n");