1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2010 Kent Overstreet <kent.overstreet@gmail.com>
5 * Code for managing the extent btree and dynamically updating the writeback
10 #include "bkey_methods.h"
12 #include "btree_update.h"
13 #include "btree_update_interior.h"
18 #include "disk_groups.h"
30 static void sort_key_next(struct btree_node_iter_large *iter,
32 struct btree_node_iter_set *i)
34 i->k += __btree_node_offset_to_key(b, i->k)->u64s;
37 *i = iter->data[--iter->used];
41 * Returns true if l > r - unless l == r, in which case returns true if l is
44 * Necessary for btree_sort_fixup() - if there are multiple keys that compare
45 * equal in different sets, we have to process them newest to oldest.
47 #define key_sort_cmp(h, l, r) \
50 __btree_node_offset_to_key(b, (l).k), \
51 __btree_node_offset_to_key(b, (r).k)) \
56 static inline bool should_drop_next_key(struct btree_node_iter_large *iter,
59 struct btree_node_iter_set *l = iter->data, *r = iter->data + 1;
60 struct bkey_packed *k = __btree_node_offset_to_key(b, l->k);
69 key_sort_cmp(iter, r[0], r[1]) >= 0)
73 * key_sort_cmp() ensures that when keys compare equal the older key
74 * comes first; so if l->k compares equal to r->k then l->k is older and
77 return !bkey_cmp_packed(b,
78 __btree_node_offset_to_key(b, l->k),
79 __btree_node_offset_to_key(b, r->k));
82 struct btree_nr_keys bch2_key_sort_fix_overlapping(struct bset *dst,
84 struct btree_node_iter_large *iter)
86 struct bkey_packed *out = dst->start;
87 struct btree_nr_keys nr;
89 memset(&nr, 0, sizeof(nr));
91 heap_resort(iter, key_sort_cmp, NULL);
93 while (!bch2_btree_node_iter_large_end(iter)) {
94 if (!should_drop_next_key(iter, b)) {
95 struct bkey_packed *k =
96 __btree_node_offset_to_key(b, iter->data->k);
99 btree_keys_account_key_add(&nr, 0, out);
100 out = bkey_next(out);
103 sort_key_next(iter, b, iter->data);
104 heap_sift_down(iter, 0, key_sort_cmp, NULL);
107 dst->u64s = cpu_to_le16((u64 *) out - dst->_data);
111 /* Common among btree and extent ptrs */
113 const struct bch_extent_ptr *
114 bch2_extent_has_device(struct bkey_s_c_extent e, unsigned dev)
116 const struct bch_extent_ptr *ptr;
118 extent_for_each_ptr(e, ptr)
125 void bch2_extent_drop_device(struct bkey_s_extent e, unsigned dev)
127 struct bch_extent_ptr *ptr;
129 bch2_extent_drop_ptrs(e, ptr, ptr->dev == dev);
132 const struct bch_extent_ptr *
133 bch2_extent_has_group(struct bch_fs *c, struct bkey_s_c_extent e, unsigned group)
135 const struct bch_extent_ptr *ptr;
137 extent_for_each_ptr(e, ptr) {
138 struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
141 ca->mi.group - 1 == group)
148 const struct bch_extent_ptr *
149 bch2_extent_has_target(struct bch_fs *c, struct bkey_s_c_extent e, unsigned target)
151 const struct bch_extent_ptr *ptr;
153 extent_for_each_ptr(e, ptr)
154 if (bch2_dev_in_target(c, ptr->dev, target) &&
156 !ptr_stale(bch_dev_bkey_exists(c, ptr->dev), ptr)))
162 unsigned bch2_extent_nr_ptrs(struct bkey_s_c_extent e)
164 const struct bch_extent_ptr *ptr;
165 unsigned nr_ptrs = 0;
167 extent_for_each_ptr(e, ptr)
173 unsigned bch2_extent_nr_dirty_ptrs(struct bkey_s_c k)
175 struct bkey_s_c_extent e;
176 const struct bch_extent_ptr *ptr;
177 unsigned nr_ptrs = 0;
181 case BCH_EXTENT_CACHED:
182 e = bkey_s_c_to_extent(k);
184 extent_for_each_ptr(e, ptr)
185 nr_ptrs += !ptr->cached;
188 case BCH_RESERVATION:
189 nr_ptrs = bkey_s_c_to_reservation(k).v->nr_replicas;
196 static unsigned bch2_extent_ptr_durability(struct bch_fs *c,
197 struct extent_ptr_decoded p)
199 unsigned i, durability = 0;
205 ca = bch_dev_bkey_exists(c, p.ptr.dev);
207 if (ca->mi.state != BCH_MEMBER_STATE_FAILED)
208 durability = max_t(unsigned, durability, ca->mi.durability);
210 for (i = 0; i < p.ec_nr; i++) {
211 struct ec_stripe *s =
212 genradix_ptr(&c->ec_stripes, p.idx);
217 durability = max_t(unsigned, durability, s->nr_redundant);
223 unsigned bch2_extent_durability(struct bch_fs *c, struct bkey_s_c_extent e)
225 const union bch_extent_entry *entry;
226 struct extent_ptr_decoded p;
227 unsigned durability = 0;
229 extent_for_each_ptr_decode(e, p, entry)
230 durability += bch2_extent_ptr_durability(c, p);
235 unsigned bch2_extent_is_compressed(struct bkey_s_c k)
241 case BCH_EXTENT_CACHED: {
242 struct bkey_s_c_extent e = bkey_s_c_to_extent(k);
243 const union bch_extent_entry *entry;
244 struct extent_ptr_decoded p;
246 extent_for_each_ptr_decode(e, p, entry)
248 p.crc.compression_type != BCH_COMPRESSION_NONE &&
249 p.crc.compressed_size < p.crc.live_size)
250 ret += p.crc.compressed_size;
257 bool bch2_extent_matches_ptr(struct bch_fs *c, struct bkey_s_c_extent e,
258 struct bch_extent_ptr m, u64 offset)
260 const union bch_extent_entry *entry;
261 struct extent_ptr_decoded p;
263 extent_for_each_ptr_decode(e, p, entry)
264 if (p.ptr.dev == m.dev &&
265 p.ptr.gen == m.gen &&
266 (s64) p.ptr.offset + p.crc.offset - bkey_start_offset(e.k) ==
267 (s64) m.offset - offset)
273 static union bch_extent_entry *extent_entry_prev(struct bkey_s_extent e,
274 union bch_extent_entry *entry)
276 union bch_extent_entry *i = e.v->start;
281 while (extent_entry_next(i) != entry)
282 i = extent_entry_next(i);
286 union bch_extent_entry *bch2_extent_drop_ptr(struct bkey_s_extent e,
287 struct bch_extent_ptr *ptr)
289 union bch_extent_entry *dst, *src, *prev;
290 bool drop_crc = true;
292 EBUG_ON(ptr < &e.v->start->ptr ||
293 ptr >= &extent_entry_last(e)->ptr);
294 EBUG_ON(ptr->type != 1 << BCH_EXTENT_ENTRY_ptr);
296 src = extent_entry_next(to_entry(ptr));
297 if (src != extent_entry_last(e) &&
298 !extent_entry_is_crc(src))
302 while ((prev = extent_entry_prev(e, dst))) {
303 if (extent_entry_is_ptr(prev))
306 if (extent_entry_is_crc(prev)) {
315 memmove_u64s_down(dst, src,
316 (u64 *) extent_entry_last(e) - (u64 *) src);
317 e.k->u64s -= (u64 *) src - (u64 *) dst;
322 static inline bool can_narrow_crc(struct bch_extent_crc_unpacked u,
323 struct bch_extent_crc_unpacked n)
325 return !u.compression_type &&
327 u.uncompressed_size > u.live_size &&
328 bch2_csum_type_is_encryption(u.csum_type) ==
329 bch2_csum_type_is_encryption(n.csum_type);
332 bool bch2_can_narrow_extent_crcs(struct bkey_s_c_extent e,
333 struct bch_extent_crc_unpacked n)
335 struct bch_extent_crc_unpacked crc;
336 const union bch_extent_entry *i;
341 extent_for_each_crc(e, crc, i)
342 if (can_narrow_crc(crc, n))
349 * We're writing another replica for this extent, so while we've got the data in
350 * memory we'll be computing a new checksum for the currently live data.
352 * If there are other replicas we aren't moving, and they are checksummed but
353 * not compressed, we can modify them to point to only the data that is
354 * currently live (so that readers won't have to bounce) while we've got the
357 bool bch2_extent_narrow_crcs(struct bkey_i_extent *e,
358 struct bch_extent_crc_unpacked n)
360 struct bch_extent_crc_unpacked u;
361 struct extent_ptr_decoded p;
362 union bch_extent_entry *i;
365 /* Find a checksum entry that covers only live data: */
367 extent_for_each_crc(extent_i_to_s(e), u, i)
368 if (!u.compression_type &&
370 u.live_size == u.uncompressed_size) {
377 BUG_ON(n.compression_type);
379 BUG_ON(n.live_size != e->k.size);
381 restart_narrow_pointers:
382 extent_for_each_ptr_decode(extent_i_to_s(e), p, i)
383 if (can_narrow_crc(p.crc, n)) {
384 bch2_extent_drop_ptr(extent_i_to_s(e), &i->ptr);
385 p.ptr.offset += p.crc.offset;
387 bch2_extent_ptr_decoded_append(e, &p);
389 goto restart_narrow_pointers;
395 /* returns true if not equal */
396 static inline bool bch2_crc_unpacked_cmp(struct bch_extent_crc_unpacked l,
397 struct bch_extent_crc_unpacked r)
399 return (l.csum_type != r.csum_type ||
400 l.compression_type != r.compression_type ||
401 l.compressed_size != r.compressed_size ||
402 l.uncompressed_size != r.uncompressed_size ||
403 l.offset != r.offset ||
404 l.live_size != r.live_size ||
405 l.nonce != r.nonce ||
406 bch2_crc_cmp(l.csum, r.csum));
409 static void bch2_extent_drop_stale(struct bch_fs *c, struct bkey_s_extent e)
411 struct bch_extent_ptr *ptr;
413 bch2_extent_drop_ptrs(e, ptr,
415 ptr_stale(bch_dev_bkey_exists(c, ptr->dev), ptr));
418 bool bch2_ptr_normalize(struct bch_fs *c, struct btree *b, struct bkey_s k)
420 return bch2_extent_normalize(c, k);
423 void bch2_ptr_swab(const struct bkey_format *f, struct bkey_packed *k)
427 case BCH_EXTENT_CACHED: {
428 union bch_extent_entry *entry;
429 u64 *d = (u64 *) bkeyp_val(f, k);
432 for (i = 0; i < bkeyp_val_u64s(f, k); i++)
435 for (entry = (union bch_extent_entry *) d;
436 entry < (union bch_extent_entry *) (d + bkeyp_val_u64s(f, k));
437 entry = extent_entry_next(entry)) {
438 switch (extent_entry_type(entry)) {
439 case BCH_EXTENT_ENTRY_ptr:
441 case BCH_EXTENT_ENTRY_crc32:
442 entry->crc32.csum = swab32(entry->crc32.csum);
444 case BCH_EXTENT_ENTRY_crc64:
445 entry->crc64.csum_hi = swab16(entry->crc64.csum_hi);
446 entry->crc64.csum_lo = swab64(entry->crc64.csum_lo);
448 case BCH_EXTENT_ENTRY_crc128:
449 entry->crc128.csum.hi = (__force __le64)
450 swab64((__force u64) entry->crc128.csum.hi);
451 entry->crc128.csum.lo = (__force __le64)
452 swab64((__force u64) entry->crc128.csum.lo);
454 case BCH_EXTENT_ENTRY_stripe_ptr:
463 static const char *extent_ptr_invalid(const struct bch_fs *c,
464 struct bkey_s_c_extent e,
465 const struct bch_extent_ptr *ptr,
466 unsigned size_ondisk,
469 const struct bch_extent_ptr *ptr2;
472 if (ptr->dev >= c->sb.nr_devices ||
474 return "pointer to invalid device";
476 ca = bch_dev_bkey_exists(c, ptr->dev);
478 return "pointer to invalid device";
480 extent_for_each_ptr(e, ptr2)
481 if (ptr != ptr2 && ptr->dev == ptr2->dev)
482 return "multiple pointers to same device";
484 if (ptr->offset + size_ondisk > bucket_to_sector(ca, ca->mi.nbuckets))
485 return "offset past end of device";
487 if (ptr->offset < bucket_to_sector(ca, ca->mi.first_bucket))
488 return "offset before first bucket";
490 if (bucket_remainder(ca, ptr->offset) +
491 size_ondisk > ca->mi.bucket_size)
492 return "spans multiple buckets";
497 static void extent_print_ptrs(struct printbuf *out, struct bch_fs *c,
498 struct bkey_s_c_extent e)
500 const union bch_extent_entry *entry;
501 struct bch_extent_crc_unpacked crc;
502 const struct bch_extent_ptr *ptr;
503 const struct bch_extent_stripe_ptr *ec;
507 extent_for_each_entry(e, entry) {
511 switch (__extent_entry_type(entry)) {
512 case BCH_EXTENT_ENTRY_ptr:
513 ptr = entry_to_ptr(entry);
514 ca = ptr->dev < c->sb.nr_devices && c->devs[ptr->dev]
515 ? bch_dev_bkey_exists(c, ptr->dev)
518 pr_buf(out, "ptr: %u:%llu gen %u%s%s", ptr->dev,
519 (u64) ptr->offset, ptr->gen,
520 ptr->cached ? " cached" : "",
521 ca && ptr_stale(ca, ptr)
524 case BCH_EXTENT_ENTRY_crc32:
525 case BCH_EXTENT_ENTRY_crc64:
526 case BCH_EXTENT_ENTRY_crc128:
527 crc = bch2_extent_crc_unpack(e.k, entry_to_crc(entry));
529 pr_buf(out, "crc: c_size %u size %u offset %u nonce %u csum %u compress %u",
531 crc.uncompressed_size,
532 crc.offset, crc.nonce,
534 crc.compression_type);
536 case BCH_EXTENT_ENTRY_stripe_ptr:
537 ec = &entry->stripe_ptr;
539 pr_buf(out, "ec: idx %llu block %u",
540 (u64) ec->idx, ec->block);
543 pr_buf(out, "(invalid extent entry %.16llx)", *((u64 *) entry));
550 if (bkey_extent_is_cached(e.k))
551 pr_buf(out, " cached");
554 static struct bch_dev_io_failures *dev_io_failures(struct bch_io_failures *f,
557 struct bch_dev_io_failures *i;
559 for (i = f->devs; i < f->devs + f->nr; i++)
566 void bch2_mark_io_failure(struct bch_io_failures *failed,
567 struct extent_ptr_decoded *p)
569 struct bch_dev_io_failures *f = dev_io_failures(failed, p->ptr.dev);
572 BUG_ON(failed->nr >= ARRAY_SIZE(failed->devs));
574 f = &failed->devs[failed->nr++];
579 } else if (p->idx != f->idx) {
589 * returns true if p1 is better than p2:
591 static inline bool ptr_better(struct bch_fs *c,
592 const struct extent_ptr_decoded p1,
593 const struct extent_ptr_decoded p2)
595 if (likely(!p1.idx && !p2.idx)) {
596 struct bch_dev *dev1 = bch_dev_bkey_exists(c, p1.ptr.dev);
597 struct bch_dev *dev2 = bch_dev_bkey_exists(c, p2.ptr.dev);
599 u64 l1 = atomic64_read(&dev1->cur_latency[READ]);
600 u64 l2 = atomic64_read(&dev2->cur_latency[READ]);
602 /* Pick at random, biased in favor of the faster device: */
604 return bch2_rand_range(l1 + l2) > l1;
607 if (force_reconstruct_read(c))
608 return p1.idx > p2.idx;
610 return p1.idx < p2.idx;
613 static int extent_pick_read_device(struct bch_fs *c,
614 struct bkey_s_c_extent e,
615 struct bch_io_failures *failed,
616 struct extent_ptr_decoded *pick)
618 const union bch_extent_entry *entry;
619 struct extent_ptr_decoded p;
620 struct bch_dev_io_failures *f;
624 extent_for_each_ptr_decode(e, p, entry) {
625 ca = bch_dev_bkey_exists(c, p.ptr.dev);
627 if (p.ptr.cached && ptr_stale(ca, &p.ptr))
630 f = failed ? dev_io_failures(failed, p.ptr.dev) : NULL;
632 p.idx = f->nr_failed < f->nr_retries
637 !bch2_dev_is_readable(ca))
640 if (force_reconstruct_read(c) &&
644 if (p.idx >= p.ec_nr + 1)
647 if (ret && !ptr_better(c, p, *pick))
659 const char *bch2_btree_ptr_invalid(const struct bch_fs *c, struct bkey_s_c k)
661 if (bkey_extent_is_cached(k.k))
665 return "nonzero key size";
667 if (bkey_val_u64s(k.k) > BKEY_BTREE_PTR_VAL_U64s_MAX)
668 return "value too big";
672 struct bkey_s_c_extent e = bkey_s_c_to_extent(k);
673 const union bch_extent_entry *entry;
674 const struct bch_extent_ptr *ptr;
677 extent_for_each_entry(e, entry) {
678 if (__extent_entry_type(entry) >= BCH_EXTENT_ENTRY_MAX)
679 return "invalid extent entry type";
681 if (!extent_entry_is_ptr(entry))
682 return "has non ptr field";
685 extent_for_each_ptr(e, ptr) {
686 reason = extent_ptr_invalid(c, e, ptr,
687 c->opts.btree_node_size,
697 return "invalid value type";
701 void bch2_btree_ptr_debugcheck(struct bch_fs *c, struct btree *b,
704 struct bkey_s_c_extent e = bkey_s_c_to_extent(k);
705 const struct bch_extent_ptr *ptr;
709 struct bucket_mark mark;
711 unsigned replicas = 0;
714 extent_for_each_ptr(e, ptr) {
715 ca = bch_dev_bkey_exists(c, ptr->dev);
718 if (!test_bit(BCH_FS_ALLOC_READ_DONE, &c->flags))
722 if (ptr_stale(ca, ptr))
726 seq = read_seqcount_begin(&c->gc_pos_lock);
727 mark = ptr_bucket_mark(ca, ptr);
729 bad = gc_pos_cmp(c->gc_pos, gc_pos_btree_node(b)) > 0 &&
730 (mark.data_type != BCH_DATA_BTREE ||
731 mark.dirty_sectors < c->opts.btree_node_size);
732 } while (read_seqcount_retry(&c->gc_pos_lock, seq));
734 err = "inconsistent";
739 if (!test_bit(BCH_FS_REBUILD_REPLICAS, &c->flags) &&
740 !bch2_bkey_replicas_marked(c, btree_node_type(b),
742 bch2_bkey_val_to_text(&PBUF(buf), c, btree_node_type(b), k);
744 "btree key bad (replicas not marked in superblock):\n%s",
751 bch2_bkey_val_to_text(&PBUF(buf), c, btree_node_type(b), k);
752 bch2_fs_bug(c, "%s btree pointer %s: bucket %zi gen %i mark %08x",
753 err, buf, PTR_BUCKET_NR(ca, ptr),
754 mark.gen, (unsigned) mark.v.counter);
757 void bch2_btree_ptr_to_text(struct printbuf *out, struct bch_fs *c,
762 if (bkey_extent_is_data(k.k))
763 extent_print_ptrs(out, c, bkey_s_c_to_extent(k));
765 invalid = bch2_btree_ptr_invalid(c, k);
767 pr_buf(out, " invalid: %s", invalid);
770 int bch2_btree_pick_ptr(struct bch_fs *c, const struct btree *b,
771 struct bch_io_failures *failed,
772 struct extent_ptr_decoded *pick)
774 return extent_pick_read_device(c, bkey_i_to_s_c_extent(&b->key),
780 static bool __bch2_cut_front(struct bpos where, struct bkey_s k)
784 if (bkey_cmp(where, bkey_start_pos(k.k)) <= 0)
787 EBUG_ON(bkey_cmp(where, k.k->p) > 0);
789 len = k.k->p.offset - where.offset;
791 BUG_ON(len > k.k->size);
794 * Don't readjust offset if the key size is now 0, because that could
795 * cause offset to point to the next bucket:
798 k.k->type = KEY_TYPE_DELETED;
799 else if (bkey_extent_is_data(k.k)) {
800 struct bkey_s_extent e = bkey_s_to_extent(k);
801 union bch_extent_entry *entry;
802 bool seen_crc = false;
804 extent_for_each_entry(e, entry) {
805 switch (extent_entry_type(entry)) {
806 case BCH_EXTENT_ENTRY_ptr:
808 entry->ptr.offset += e.k->size - len;
810 case BCH_EXTENT_ENTRY_crc32:
811 entry->crc32.offset += e.k->size - len;
813 case BCH_EXTENT_ENTRY_crc64:
814 entry->crc64.offset += e.k->size - len;
816 case BCH_EXTENT_ENTRY_crc128:
817 entry->crc128.offset += e.k->size - len;
819 case BCH_EXTENT_ENTRY_stripe_ptr:
823 if (extent_entry_is_crc(entry))
833 bool bch2_cut_front(struct bpos where, struct bkey_i *k)
835 return __bch2_cut_front(where, bkey_i_to_s(k));
838 bool bch2_cut_back(struct bpos where, struct bkey *k)
842 if (bkey_cmp(where, k->p) >= 0)
845 EBUG_ON(bkey_cmp(where, bkey_start_pos(k)) < 0);
847 len = where.offset - bkey_start_offset(k);
849 BUG_ON(len > k->size);
855 k->type = KEY_TYPE_DELETED;
861 * bch_key_resize - adjust size of @k
863 * bkey_start_offset(k) will be preserved, modifies where the extent ends
865 void bch2_key_resize(struct bkey *k,
868 k->p.offset -= k->size;
869 k->p.offset += new_size;
874 * In extent_sort_fix_overlapping(), insert_fixup_extent(),
875 * extent_merge_inline() - we're modifying keys in place that are packed. To do
876 * that we have to unpack the key, modify the unpacked key - then this
877 * copies/repacks the unpacked to the original as necessary.
879 static void extent_save(struct btree *b, struct bkey_packed *dst,
882 struct bkey_format *f = &b->format;
883 struct bkey_i *dst_unpacked;
885 if ((dst_unpacked = packed_to_bkey(dst)))
886 dst_unpacked->k = *src;
888 BUG_ON(!bch2_bkey_pack_key(dst, src, f));
891 static bool extent_i_save(struct btree *b, struct bkey_packed *dst,
894 struct bkey_format *f = &b->format;
895 struct bkey_i *dst_unpacked;
896 struct bkey_packed tmp;
898 if ((dst_unpacked = packed_to_bkey(dst)))
899 dst_unpacked->k = src->k;
900 else if (bch2_bkey_pack_key(&tmp, &src->k, f))
901 memcpy_u64s(dst, &tmp, f->key_u64s);
905 memcpy_u64s(bkeyp_val(f, dst), &src->v, bkey_val_u64s(&src->k));
910 * If keys compare equal, compare by pointer order:
912 * Necessary for sort_fix_overlapping() - if there are multiple keys that
913 * compare equal in different sets, we have to process them newest to oldest.
915 #define extent_sort_cmp(h, l, r) \
917 struct bkey _ul = bkey_unpack_key(b, \
918 __btree_node_offset_to_key(b, (l).k)); \
919 struct bkey _ur = bkey_unpack_key(b, \
920 __btree_node_offset_to_key(b, (r).k)); \
922 bkey_cmp(bkey_start_pos(&_ul), \
923 bkey_start_pos(&_ur)) ?: (r).k - (l).k; \
926 static inline void extent_sort_sift(struct btree_node_iter_large *iter,
927 struct btree *b, size_t i)
929 heap_sift_down(iter, i, extent_sort_cmp, NULL);
932 static inline void extent_sort_next(struct btree_node_iter_large *iter,
934 struct btree_node_iter_set *i)
936 sort_key_next(iter, b, i);
937 heap_sift_down(iter, i - iter->data, extent_sort_cmp, NULL);
940 static void extent_sort_append(struct bch_fs *c,
942 struct btree_nr_keys *nr,
943 struct bkey_packed *start,
944 struct bkey_packed **prev,
945 struct bkey_packed *k)
947 struct bkey_format *f = &b->format;
950 if (bkey_whiteout(k))
953 bch2_bkey_unpack(b, &tmp.k, k);
956 bch2_extent_merge(c, b, (void *) *prev, &tmp.k))
960 bch2_bkey_pack(*prev, (void *) *prev, f);
962 btree_keys_account_key_add(nr, 0, *prev);
963 *prev = bkey_next(*prev);
968 bkey_copy(*prev, &tmp.k);
971 struct btree_nr_keys bch2_extent_sort_fix_overlapping(struct bch_fs *c,
974 struct btree_node_iter_large *iter)
976 struct bkey_format *f = &b->format;
977 struct btree_node_iter_set *_l = iter->data, *_r;
978 struct bkey_packed *prev = NULL, *out, *lk, *rk;
979 struct bkey l_unpacked, r_unpacked;
981 struct btree_nr_keys nr;
983 memset(&nr, 0, sizeof(nr));
985 heap_resort(iter, extent_sort_cmp, NULL);
987 while (!bch2_btree_node_iter_large_end(iter)) {
988 lk = __btree_node_offset_to_key(b, _l->k);
990 if (iter->used == 1) {
991 extent_sort_append(c, b, &nr, dst->start, &prev, lk);
992 extent_sort_next(iter, b, _l);
997 if (iter->used > 2 &&
998 extent_sort_cmp(iter, _r[0], _r[1]) >= 0)
1001 rk = __btree_node_offset_to_key(b, _r->k);
1003 l = __bkey_disassemble(b, lk, &l_unpacked);
1004 r = __bkey_disassemble(b, rk, &r_unpacked);
1006 /* If current key and next key don't overlap, just append */
1007 if (bkey_cmp(l.k->p, bkey_start_pos(r.k)) <= 0) {
1008 extent_sort_append(c, b, &nr, dst->start, &prev, lk);
1009 extent_sort_next(iter, b, _l);
1013 /* Skip 0 size keys */
1015 extent_sort_next(iter, b, _r);
1020 * overlap: keep the newer key and trim the older key so they
1021 * don't overlap. comparing pointers tells us which one is
1022 * newer, since the bsets are appended one after the other.
1025 /* can't happen because of comparison func */
1026 BUG_ON(_l->k < _r->k &&
1027 !bkey_cmp(bkey_start_pos(l.k), bkey_start_pos(r.k)));
1029 if (_l->k > _r->k) {
1030 /* l wins, trim r */
1031 if (bkey_cmp(l.k->p, r.k->p) >= 0) {
1032 sort_key_next(iter, b, _r);
1034 __bch2_cut_front(l.k->p, r);
1035 extent_save(b, rk, r.k);
1038 extent_sort_sift(iter, b, _r - iter->data);
1039 } else if (bkey_cmp(l.k->p, r.k->p) > 0) {
1043 * r wins, but it overlaps in the middle of l - split l:
1045 bkey_reassemble(&tmp.k, l.s_c);
1046 bch2_cut_back(bkey_start_pos(r.k), &tmp.k.k);
1048 __bch2_cut_front(r.k->p, l);
1049 extent_save(b, lk, l.k);
1051 extent_sort_sift(iter, b, 0);
1053 extent_sort_append(c, b, &nr, dst->start, &prev,
1054 bkey_to_packed(&tmp.k));
1056 bch2_cut_back(bkey_start_pos(r.k), l.k);
1057 extent_save(b, lk, l.k);
1062 bch2_bkey_pack(prev, (void *) prev, f);
1063 btree_keys_account_key_add(&nr, 0, prev);
1064 out = bkey_next(prev);
1069 dst->u64s = cpu_to_le16((u64 *) out - dst->_data);
1073 struct extent_insert_state {
1074 struct btree_insert *trans;
1075 struct btree_insert_entry *insert;
1076 struct bpos committed;
1079 struct bkey_i whiteout;
1080 bool update_journal;
1085 static bool bch2_extent_merge_inline(struct bch_fs *,
1086 struct btree_iter *,
1087 struct bkey_packed *,
1088 struct bkey_packed *,
1091 static void verify_extent_nonoverlapping(struct btree *b,
1092 struct btree_node_iter *_iter,
1093 struct bkey_i *insert)
1095 #ifdef CONFIG_BCACHEFS_DEBUG
1096 struct btree_node_iter iter;
1097 struct bkey_packed *k;
1101 k = bch2_btree_node_iter_prev_filter(&iter, b, KEY_TYPE_DISCARD);
1103 (uk = bkey_unpack_key(b, k),
1104 bkey_cmp(uk.p, bkey_start_pos(&insert->k)) > 0));
1107 k = bch2_btree_node_iter_peek_filter(&iter, b, KEY_TYPE_DISCARD);
1110 (uk = bkey_unpack_key(b, k),
1111 bkey_cmp(insert->k.p, bkey_start_pos(&uk))) > 0);
1114 (uk = bkey_unpack_key(b, k),
1115 bkey_cmp(insert->k.p, bkey_start_pos(&uk))) > 0) {
1119 bch2_bkey_to_text(&PBUF(buf1), &insert->k);
1120 bch2_bkey_to_text(&PBUF(buf2), &uk);
1122 bch2_dump_btree_node(b);
1123 panic("insert > next :\n"
1133 static void verify_modified_extent(struct btree_iter *iter,
1134 struct bkey_packed *k)
1136 bch2_btree_iter_verify(iter, iter->l[0].b);
1137 bch2_verify_insert_pos(iter->l[0].b, k, k, k->u64s);
1140 static void extent_bset_insert(struct bch_fs *c, struct btree_iter *iter,
1141 struct bkey_i *insert)
1143 struct btree_iter_level *l = &iter->l[0];
1144 struct btree_node_iter node_iter;
1145 struct bkey_packed *k;
1147 BUG_ON(insert->k.u64s > bch_btree_keys_u64s_remaining(c, l->b));
1149 EBUG_ON(bkey_deleted(&insert->k) || !insert->k.size);
1150 verify_extent_nonoverlapping(l->b, &l->iter, insert);
1152 node_iter = l->iter;
1153 k = bch2_btree_node_iter_prev_filter(&node_iter, l->b, KEY_TYPE_DISCARD);
1154 if (k && !bkey_written(l->b, k) &&
1155 bch2_extent_merge_inline(c, iter, k, bkey_to_packed(insert), true))
1158 node_iter = l->iter;
1159 k = bch2_btree_node_iter_peek_filter(&node_iter, l->b, KEY_TYPE_DISCARD);
1160 if (k && !bkey_written(l->b, k) &&
1161 bch2_extent_merge_inline(c, iter, bkey_to_packed(insert), k, false))
1164 k = bch2_btree_node_iter_bset_pos(&l->iter, l->b, bset_tree_last(l->b));
1166 bch2_bset_insert(l->b, &l->iter, k, insert, 0);
1167 bch2_btree_node_iter_fix(iter, l->b, &l->iter, k, 0, k->u64s);
1168 bch2_btree_iter_verify(iter, l->b);
1171 static void extent_insert_committed(struct extent_insert_state *s)
1173 struct bch_fs *c = s->trans->c;
1174 struct btree_iter *iter = s->insert->iter;
1175 struct bkey_i *insert = s->insert->k;
1176 BKEY_PADDED(k) split;
1178 EBUG_ON(bkey_cmp(insert->k.p, s->committed) < 0);
1179 EBUG_ON(bkey_cmp(s->committed, bkey_start_pos(&insert->k)) < 0);
1181 bkey_copy(&split.k, insert);
1183 split.k.k.type = KEY_TYPE_DISCARD;
1185 bch2_cut_back(s->committed, &split.k.k);
1187 if (!bkey_cmp(s->committed, iter->pos))
1190 bch2_btree_iter_set_pos_same_leaf(iter, s->committed);
1192 if (s->update_btree) {
1193 if (debug_check_bkeys(c))
1194 bch2_bkey_debugcheck(c, iter->l[0].b,
1195 bkey_i_to_s_c(&split.k));
1197 EBUG_ON(bkey_deleted(&split.k.k) || !split.k.k.size);
1199 extent_bset_insert(c, iter, &split.k);
1202 if (s->update_journal) {
1203 bkey_copy(&split.k, !s->deleting ? insert : &s->whiteout);
1205 split.k.k.type = KEY_TYPE_DISCARD;
1207 bch2_cut_back(s->committed, &split.k.k);
1209 EBUG_ON(bkey_deleted(&split.k.k) || !split.k.k.size);
1211 bch2_btree_journal_key(s->trans, iter, &split.k);
1214 bch2_cut_front(s->committed, insert);
1216 insert->k.needs_whiteout = false;
1219 void bch2_extent_trim_atomic(struct bkey_i *k, struct btree_iter *iter)
1221 struct btree *b = iter->l[0].b;
1223 BUG_ON(iter->uptodate > BTREE_ITER_NEED_PEEK);
1225 bch2_cut_back(b->key.k.p, &k->k);
1227 BUG_ON(bkey_cmp(bkey_start_pos(&k->k), b->data->min_key) < 0);
1230 enum btree_insert_ret
1231 bch2_extent_can_insert(struct btree_insert *trans,
1232 struct btree_insert_entry *insert,
1235 struct btree_iter_level *l = &insert->iter->l[0];
1236 struct btree_node_iter node_iter = l->iter;
1237 enum bch_extent_overlap overlap;
1238 struct bkey_packed *_k;
1239 struct bkey unpacked;
1243 BUG_ON(trans->flags & BTREE_INSERT_ATOMIC &&
1244 !bch2_extent_is_atomic(&insert->k->k, insert->iter));
1247 * We avoid creating whiteouts whenever possible when deleting, but
1248 * those optimizations mean we may potentially insert two whiteouts
1249 * instead of one (when we overlap with the front of one extent and the
1252 if (bkey_whiteout(&insert->k->k))
1255 _k = bch2_btree_node_iter_peek_filter(&node_iter, l->b,
1258 return BTREE_INSERT_OK;
1260 k = bkey_disassemble(l->b, _k, &unpacked);
1262 overlap = bch2_extent_overlap(&insert->k->k, k.k);
1264 /* account for having to split existing extent: */
1265 if (overlap == BCH_EXTENT_OVERLAP_MIDDLE)
1268 if (overlap == BCH_EXTENT_OVERLAP_MIDDLE &&
1269 (sectors = bch2_extent_is_compressed(k))) {
1270 int flags = BCH_DISK_RESERVATION_BTREE_LOCKS_HELD;
1272 if (trans->flags & BTREE_INSERT_NOFAIL)
1273 flags |= BCH_DISK_RESERVATION_NOFAIL;
1275 switch (bch2_disk_reservation_add(trans->c,
1281 return BTREE_INSERT_ENOSPC;
1283 return BTREE_INSERT_NEED_GC_LOCK;
1289 return BTREE_INSERT_OK;
1293 extent_squash(struct extent_insert_state *s, struct bkey_i *insert,
1294 struct bkey_packed *_k, struct bkey_s k,
1295 enum bch_extent_overlap overlap)
1297 struct bch_fs *c = s->trans->c;
1298 struct btree_iter *iter = s->insert->iter;
1299 struct btree_iter_level *l = &iter->l[0];
1302 case BCH_EXTENT_OVERLAP_FRONT:
1303 /* insert overlaps with start of k: */
1304 __bch2_cut_front(insert->k.p, k);
1305 BUG_ON(bkey_deleted(k.k));
1306 extent_save(l->b, _k, k.k);
1307 verify_modified_extent(iter, _k);
1310 case BCH_EXTENT_OVERLAP_BACK:
1311 /* insert overlaps with end of k: */
1312 bch2_cut_back(bkey_start_pos(&insert->k), k.k);
1313 BUG_ON(bkey_deleted(k.k));
1314 extent_save(l->b, _k, k.k);
1317 * As the auxiliary tree is indexed by the end of the
1318 * key and we've just changed the end, update the
1321 bch2_bset_fix_invalidated_key(l->b, _k);
1322 bch2_btree_node_iter_fix(iter, l->b, &l->iter,
1323 _k, _k->u64s, _k->u64s);
1324 verify_modified_extent(iter, _k);
1327 case BCH_EXTENT_OVERLAP_ALL: {
1328 /* The insert key completely covers k, invalidate k */
1329 if (!bkey_whiteout(k.k))
1330 btree_account_key_drop(l->b, _k);
1333 k.k->type = KEY_TYPE_DELETED;
1335 if (_k >= btree_bset_last(l->b)->start) {
1336 unsigned u64s = _k->u64s;
1338 bch2_bset_delete(l->b, _k, _k->u64s);
1339 bch2_btree_node_iter_fix(iter, l->b, &l->iter,
1341 bch2_btree_iter_verify(iter, l->b);
1343 extent_save(l->b, _k, k.k);
1344 bch2_btree_node_iter_fix(iter, l->b, &l->iter,
1345 _k, _k->u64s, _k->u64s);
1346 verify_modified_extent(iter, _k);
1351 case BCH_EXTENT_OVERLAP_MIDDLE: {
1352 BKEY_PADDED(k) split;
1354 * The insert key falls 'in the middle' of k
1355 * The insert key splits k in 3:
1356 * - start only in k, preserve
1357 * - middle common section, invalidate in k
1358 * - end only in k, preserve
1360 * We update the old key to preserve the start,
1361 * insert will be the new common section,
1362 * we manually insert the end that we are preserving.
1364 * modify k _before_ doing the insert (which will move
1367 bkey_reassemble(&split.k, k.s_c);
1368 split.k.k.needs_whiteout |= bkey_written(l->b, _k);
1370 bch2_cut_back(bkey_start_pos(&insert->k), &split.k.k);
1371 BUG_ON(bkey_deleted(&split.k.k));
1373 __bch2_cut_front(insert->k.p, k);
1374 BUG_ON(bkey_deleted(k.k));
1375 extent_save(l->b, _k, k.k);
1376 verify_modified_extent(iter, _k);
1378 extent_bset_insert(c, iter, &split.k);
1384 static void __bch2_insert_fixup_extent(struct extent_insert_state *s)
1386 struct btree_iter *iter = s->insert->iter;
1387 struct btree_iter_level *l = &iter->l[0];
1388 struct bkey_packed *_k;
1389 struct bkey unpacked;
1390 struct bkey_i *insert = s->insert->k;
1392 while (bkey_cmp(s->committed, insert->k.p) < 0 &&
1393 (_k = bch2_btree_node_iter_peek_filter(&l->iter, l->b,
1394 KEY_TYPE_DISCARD))) {
1395 struct bkey_s k = __bkey_disassemble(l->b, _k, &unpacked);
1396 enum bch_extent_overlap overlap = bch2_extent_overlap(&insert->k, k.k);
1398 EBUG_ON(bkey_cmp(iter->pos, k.k->p) >= 0);
1400 if (bkey_cmp(bkey_start_pos(k.k), insert->k.p) >= 0)
1403 s->committed = bpos_min(s->insert->k->k.p, k.k->p);
1405 if (!bkey_whiteout(k.k))
1406 s->update_journal = true;
1408 if (!s->update_journal) {
1409 bch2_cut_front(s->committed, insert);
1410 bch2_cut_front(s->committed, &s->whiteout);
1411 bch2_btree_iter_set_pos_same_leaf(iter, s->committed);
1416 * When deleting, if possible just do it by switching the type
1417 * of the key we're deleting, instead of creating and inserting
1422 !bkey_cmp(insert->k.p, k.k->p) &&
1423 !bkey_cmp(bkey_start_pos(&insert->k), bkey_start_pos(k.k))) {
1424 if (!bkey_whiteout(k.k)) {
1425 btree_account_key_drop(l->b, _k);
1426 _k->type = KEY_TYPE_DISCARD;
1427 reserve_whiteout(l->b, _k);
1432 if (k.k->needs_whiteout || bkey_written(l->b, _k)) {
1433 insert->k.needs_whiteout = true;
1434 s->update_btree = true;
1437 if (s->update_btree &&
1438 overlap == BCH_EXTENT_OVERLAP_ALL &&
1439 bkey_whiteout(k.k) &&
1440 k.k->needs_whiteout) {
1441 unreserve_whiteout(l->b, _k);
1442 _k->needs_whiteout = false;
1445 extent_squash(s, insert, _k, k, overlap);
1447 if (!s->update_btree)
1448 bch2_cut_front(s->committed, insert);
1450 if (overlap == BCH_EXTENT_OVERLAP_FRONT ||
1451 overlap == BCH_EXTENT_OVERLAP_MIDDLE)
1455 if (bkey_cmp(s->committed, insert->k.p) < 0)
1456 s->committed = bpos_min(s->insert->k->k.p, l->b->key.k.p);
1459 * may have skipped past some deleted extents greater than the insert
1460 * key, before we got to a non deleted extent and knew we could bail out
1461 * rewind the iterator a bit if necessary:
1464 struct btree_node_iter node_iter = l->iter;
1466 while ((_k = bch2_btree_node_iter_prev_all(&node_iter, l->b)) &&
1467 bkey_cmp_left_packed(l->b, _k, &s->committed) > 0)
1468 l->iter = node_iter;
1473 * bch_extent_insert_fixup - insert a new extent and deal with overlaps
1475 * this may result in not actually doing the insert, or inserting some subset
1476 * of the insert key. For cmpxchg operations this is where that logic lives.
1478 * All subsets of @insert that need to be inserted are inserted using
1479 * bch2_btree_insert_and_journal(). If @b or @res fills up, this function
1480 * returns false, setting @iter->pos for the prefix of @insert that actually got
1483 * BSET INVARIANTS: this function is responsible for maintaining all the
1484 * invariants for bsets of extents in memory. things get really hairy with 0
1489 * bkey_start_pos(bkey_next(k)) >= k
1490 * or bkey_start_offset(bkey_next(k)) >= k->offset
1492 * i.e. strict ordering, no overlapping extents.
1494 * multiple bsets (i.e. full btree node):
1497 * k.size != 0 ∧ j.size != 0 →
1498 * ¬ (k > bkey_start_pos(j) ∧ k < j)
1500 * i.e. no two overlapping keys _of nonzero size_
1502 * We can't realistically maintain this invariant for zero size keys because of
1503 * the key merging done in bch2_btree_insert_key() - for two mergeable keys k, j
1504 * there may be another 0 size key between them in another bset, and it will
1505 * thus overlap with the merged key.
1507 * In addition, the end of iter->pos indicates how much has been processed.
1508 * If the end of iter->pos is not the same as the end of insert, then
1509 * key insertion needs to continue/be retried.
1511 enum btree_insert_ret
1512 bch2_insert_fixup_extent(struct btree_insert *trans,
1513 struct btree_insert_entry *insert)
1515 struct btree_iter *iter = insert->iter;
1516 struct btree *b = iter->l[0].b;
1517 struct extent_insert_state s = {
1520 .committed = iter->pos,
1522 .whiteout = *insert->k,
1523 .update_journal = !bkey_whiteout(&insert->k->k),
1524 .update_btree = !bkey_whiteout(&insert->k->k),
1525 .deleting = bkey_whiteout(&insert->k->k),
1528 EBUG_ON(iter->level);
1529 EBUG_ON(!insert->k->k.size);
1532 * As we process overlapping extents, we advance @iter->pos both to
1533 * signal to our caller (btree_insert_key()) how much of @insert->k has
1534 * been inserted, and also to keep @iter->pos consistent with
1535 * @insert->k and the node iterator that we're advancing:
1537 EBUG_ON(bkey_cmp(iter->pos, bkey_start_pos(&insert->k->k)));
1539 __bch2_insert_fixup_extent(&s);
1541 extent_insert_committed(&s);
1543 EBUG_ON(bkey_cmp(iter->pos, bkey_start_pos(&insert->k->k)));
1544 EBUG_ON(bkey_cmp(iter->pos, s.committed));
1546 if (insert->k->k.size) {
1547 /* got to the end of this leaf node */
1548 BUG_ON(bkey_cmp(iter->pos, b->key.k.p));
1549 return BTREE_INSERT_NEED_TRAVERSE;
1552 return BTREE_INSERT_OK;
1555 const char *bch2_extent_invalid(const struct bch_fs *c, struct bkey_s_c k)
1557 if (bkey_val_u64s(k.k) > BKEY_EXTENT_VAL_U64s_MAX)
1558 return "value too big";
1561 return "zero key size";
1563 switch (k.k->type) {
1565 case BCH_EXTENT_CACHED: {
1566 struct bkey_s_c_extent e = bkey_s_c_to_extent(k);
1567 const union bch_extent_entry *entry;
1568 struct bch_extent_crc_unpacked crc;
1569 const struct bch_extent_ptr *ptr;
1570 unsigned size_ondisk = e.k->size;
1572 unsigned nonce = UINT_MAX;
1574 extent_for_each_entry(e, entry) {
1575 if (__extent_entry_type(entry) >= BCH_EXTENT_ENTRY_MAX)
1576 return "invalid extent entry type";
1578 switch (extent_entry_type(entry)) {
1579 case BCH_EXTENT_ENTRY_ptr:
1580 ptr = entry_to_ptr(entry);
1582 reason = extent_ptr_invalid(c, e, &entry->ptr,
1583 size_ondisk, false);
1587 case BCH_EXTENT_ENTRY_crc32:
1588 case BCH_EXTENT_ENTRY_crc64:
1589 case BCH_EXTENT_ENTRY_crc128:
1590 crc = bch2_extent_crc_unpack(e.k, entry_to_crc(entry));
1592 if (crc.offset + e.k->size >
1593 crc.uncompressed_size)
1594 return "checksum offset + key size > uncompressed size";
1596 size_ondisk = crc.compressed_size;
1598 if (!bch2_checksum_type_valid(c, crc.csum_type))
1599 return "invalid checksum type";
1601 if (crc.compression_type >= BCH_COMPRESSION_NR)
1602 return "invalid compression type";
1604 if (bch2_csum_type_is_encryption(crc.csum_type)) {
1605 if (nonce == UINT_MAX)
1606 nonce = crc.offset + crc.nonce;
1607 else if (nonce != crc.offset + crc.nonce)
1608 return "incorrect nonce";
1611 case BCH_EXTENT_ENTRY_stripe_ptr:
1619 case BCH_RESERVATION: {
1620 struct bkey_s_c_reservation r = bkey_s_c_to_reservation(k);
1622 if (bkey_val_bytes(k.k) != sizeof(struct bch_reservation))
1623 return "incorrect value size";
1625 if (!r.v->nr_replicas || r.v->nr_replicas > BCH_REPLICAS_MAX)
1626 return "invalid nr_replicas";
1632 return "invalid value type";
1636 static void bch2_extent_debugcheck_extent(struct bch_fs *c, struct btree *b,
1637 struct bkey_s_c_extent e)
1639 const struct bch_extent_ptr *ptr;
1641 struct bucket_mark mark;
1642 unsigned seq, stale;
1645 unsigned replicas = 0;
1648 * XXX: we should be doing most/all of these checks at startup time,
1649 * where we check bch2_bkey_invalid() in btree_node_read_done()
1651 * But note that we can't check for stale pointers or incorrect gc marks
1652 * until after journal replay is done (it might be an extent that's
1653 * going to get overwritten during replay)
1656 extent_for_each_ptr(e, ptr) {
1657 ca = bch_dev_bkey_exists(c, ptr->dev);
1661 * If journal replay hasn't finished, we might be seeing keys
1662 * that will be overwritten by the time journal replay is done:
1664 if (!test_bit(JOURNAL_REPLAY_DONE, &c->journal.flags))
1670 seq = read_seqcount_begin(&c->gc_pos_lock);
1671 mark = ptr_bucket_mark(ca, ptr);
1673 /* between mark and bucket gen */
1676 stale = ptr_stale(ca, ptr);
1678 bch2_fs_bug_on(stale && !ptr->cached, c,
1679 "stale dirty pointer");
1681 bch2_fs_bug_on(stale > 96, c,
1682 "key too stale: %i",
1688 bad = gc_pos_cmp(c->gc_pos, gc_pos_btree_node(b)) > 0 &&
1689 (mark.data_type != BCH_DATA_USER ||
1691 ? mark.cached_sectors
1692 : mark.dirty_sectors));
1693 } while (read_seqcount_retry(&c->gc_pos_lock, seq));
1699 if (replicas > BCH_REPLICAS_MAX) {
1700 bch2_bkey_val_to_text(&PBUF(buf), c, btree_node_type(b),
1703 "extent key bad (too many replicas: %u): %s",
1708 if (!test_bit(BCH_FS_REBUILD_REPLICAS, &c->flags) &&
1709 !bch2_bkey_replicas_marked(c, btree_node_type(b),
1711 bch2_bkey_val_to_text(&PBUF(buf), c, btree_node_type(b),
1714 "extent key bad (replicas not marked in superblock):\n%s",
1722 bch2_bkey_val_to_text(&PBUF(buf), c, btree_node_type(b),
1724 bch2_fs_bug(c, "extent pointer bad gc mark: %s:\nbucket %zu "
1725 "gen %i type %u", buf,
1726 PTR_BUCKET_NR(ca, ptr), mark.gen, mark.data_type);
1729 void bch2_extent_debugcheck(struct bch_fs *c, struct btree *b, struct bkey_s_c k)
1731 switch (k.k->type) {
1733 case BCH_EXTENT_CACHED:
1734 bch2_extent_debugcheck_extent(c, b, bkey_s_c_to_extent(k));
1736 case BCH_RESERVATION:
1743 void bch2_extent_to_text(struct printbuf *out, struct bch_fs *c,
1746 const char *invalid;
1748 if (bkey_extent_is_data(k.k))
1749 extent_print_ptrs(out, c, bkey_s_c_to_extent(k));
1751 invalid = bch2_extent_invalid(c, k);
1753 pr_buf(out, " invalid: %s", invalid);
1756 static void bch2_extent_crc_init(union bch_extent_crc *crc,
1757 struct bch_extent_crc_unpacked new)
1759 #define common_fields(_crc) \
1760 .csum_type = _crc.csum_type, \
1761 .compression_type = _crc.compression_type, \
1762 ._compressed_size = _crc.compressed_size - 1, \
1763 ._uncompressed_size = _crc.uncompressed_size - 1, \
1764 .offset = _crc.offset
1766 if (bch_crc_bytes[new.csum_type] <= 4 &&
1767 new.uncompressed_size <= CRC32_SIZE_MAX &&
1768 new.nonce <= CRC32_NONCE_MAX) {
1769 crc->crc32 = (struct bch_extent_crc32) {
1770 .type = 1 << BCH_EXTENT_ENTRY_crc32,
1772 .csum = *((__le32 *) &new.csum.lo),
1777 if (bch_crc_bytes[new.csum_type] <= 10 &&
1778 new.uncompressed_size <= CRC64_SIZE_MAX &&
1779 new.nonce <= CRC64_NONCE_MAX) {
1780 crc->crc64 = (struct bch_extent_crc64) {
1781 .type = 1 << BCH_EXTENT_ENTRY_crc64,
1784 .csum_lo = new.csum.lo,
1785 .csum_hi = *((__le16 *) &new.csum.hi),
1790 if (bch_crc_bytes[new.csum_type] <= 16 &&
1791 new.uncompressed_size <= CRC128_SIZE_MAX &&
1792 new.nonce <= CRC128_NONCE_MAX) {
1793 crc->crc128 = (struct bch_extent_crc128) {
1794 .type = 1 << BCH_EXTENT_ENTRY_crc128,
1801 #undef common_fields
1805 void bch2_extent_crc_append(struct bkey_i_extent *e,
1806 struct bch_extent_crc_unpacked new)
1808 bch2_extent_crc_init((void *) extent_entry_last(extent_i_to_s(e)), new);
1809 __extent_entry_push(e);
1812 static inline void __extent_entry_insert(struct bkey_i_extent *e,
1813 union bch_extent_entry *dst,
1814 union bch_extent_entry *new)
1816 union bch_extent_entry *end = extent_entry_last(extent_i_to_s(e));
1818 memmove_u64s_up((u64 *) dst + extent_entry_u64s(new),
1819 dst, (u64 *) end - (u64 *) dst);
1820 e->k.u64s += extent_entry_u64s(new);
1821 memcpy_u64s_small(dst, new, extent_entry_u64s(new));
1824 void bch2_extent_ptr_decoded_append(struct bkey_i_extent *e,
1825 struct extent_ptr_decoded *p)
1827 struct bch_extent_crc_unpacked crc = bch2_extent_crc_unpack(&e->k, NULL);
1828 union bch_extent_entry *pos;
1831 if (!bch2_crc_unpacked_cmp(crc, p->crc)) {
1836 extent_for_each_crc(extent_i_to_s(e), crc, pos)
1837 if (!bch2_crc_unpacked_cmp(crc, p->crc)) {
1838 pos = extent_entry_next(pos);
1842 bch2_extent_crc_append(e, p->crc);
1843 pos = extent_entry_last(extent_i_to_s(e));
1845 p->ptr.type = 1 << BCH_EXTENT_ENTRY_ptr;
1846 __extent_entry_insert(e, pos, to_entry(&p->ptr));
1848 for (i = 0; i < p->ec_nr; i++) {
1849 p->ec[i].type = 1 << BCH_EXTENT_ENTRY_stripe_ptr;
1850 __extent_entry_insert(e, pos, to_entry(&p->ec[i]));
1855 * bch_extent_normalize - clean up an extent, dropping stale pointers etc.
1857 * Returns true if @k should be dropped entirely
1859 * For existing keys, only called when btree nodes are being rewritten, not when
1860 * they're merely being compacted/resorted in memory.
1862 bool bch2_extent_normalize(struct bch_fs *c, struct bkey_s k)
1864 struct bkey_s_extent e;
1866 switch (k.k->type) {
1867 case KEY_TYPE_ERROR:
1870 case KEY_TYPE_DELETED:
1872 case KEY_TYPE_DISCARD:
1873 return bversion_zero(k.k->version);
1874 case KEY_TYPE_COOKIE:
1878 case BCH_EXTENT_CACHED:
1879 e = bkey_s_to_extent(k);
1881 bch2_extent_drop_stale(c, e);
1883 if (!bkey_val_u64s(e.k)) {
1884 if (bkey_extent_is_cached(e.k)) {
1885 k.k->type = KEY_TYPE_DISCARD;
1886 if (bversion_zero(k.k->version))
1889 k.k->type = KEY_TYPE_ERROR;
1894 case BCH_RESERVATION:
1901 void bch2_extent_mark_replicas_cached(struct bch_fs *c,
1902 struct bkey_s_extent e,
1904 unsigned nr_desired_replicas)
1906 union bch_extent_entry *entry;
1907 struct extent_ptr_decoded p;
1908 int extra = bch2_extent_durability(c, e.c) - nr_desired_replicas;
1910 if (target && extra > 0)
1911 extent_for_each_ptr_decode(e, p, entry) {
1912 int n = bch2_extent_ptr_durability(c, p);
1914 if (n && n <= extra &&
1915 !bch2_dev_in_target(c, p.ptr.dev, target)) {
1916 entry->ptr.cached = true;
1922 extent_for_each_ptr_decode(e, p, entry) {
1923 int n = bch2_extent_ptr_durability(c, p);
1925 if (n && n <= extra) {
1926 entry->ptr.cached = true;
1933 * This picks a non-stale pointer, preferably from a device other than @avoid.
1934 * Avoid can be NULL, meaning pick any. If there are no non-stale pointers to
1935 * other devices, it will still pick a pointer from avoid.
1937 int bch2_extent_pick_ptr(struct bch_fs *c, struct bkey_s_c k,
1938 struct bch_io_failures *failed,
1939 struct extent_ptr_decoded *pick)
1943 switch (k.k->type) {
1944 case KEY_TYPE_ERROR:
1948 case BCH_EXTENT_CACHED:
1949 ret = extent_pick_read_device(c, bkey_s_c_to_extent(k),
1952 if (!ret && !bkey_extent_is_cached(k.k))
1962 enum merge_result bch2_extent_merge(struct bch_fs *c, struct btree *b,
1963 struct bkey_i *l, struct bkey_i *r)
1965 struct bkey_s_extent el, er;
1966 union bch_extent_entry *en_l, *en_r;
1968 if (key_merging_disabled(c))
1969 return BCH_MERGE_NOMERGE;
1972 * Generic header checks
1973 * Assumes left and right are in order
1974 * Left and right must be exactly aligned
1977 if (l->k.u64s != r->k.u64s ||
1978 l->k.type != r->k.type ||
1979 bversion_cmp(l->k.version, r->k.version) ||
1980 bkey_cmp(l->k.p, bkey_start_pos(&r->k)))
1981 return BCH_MERGE_NOMERGE;
1983 switch (l->k.type) {
1984 case KEY_TYPE_DISCARD:
1985 case KEY_TYPE_ERROR:
1986 /* These types are mergeable, and no val to check */
1990 case BCH_EXTENT_CACHED:
1991 el = bkey_i_to_s_extent(l);
1992 er = bkey_i_to_s_extent(r);
1994 extent_for_each_entry(el, en_l) {
1995 struct bch_extent_ptr *lp, *rp;
1998 en_r = vstruct_idx(er.v, (u64 *) en_l - el.v->_data);
2000 if ((extent_entry_type(en_l) !=
2001 extent_entry_type(en_r)) ||
2002 !extent_entry_is_ptr(en_l))
2003 return BCH_MERGE_NOMERGE;
2008 if (lp->offset + el.k->size != rp->offset ||
2009 lp->dev != rp->dev ||
2011 return BCH_MERGE_NOMERGE;
2013 /* We don't allow extents to straddle buckets: */
2014 ca = bch_dev_bkey_exists(c, lp->dev);
2016 if (PTR_BUCKET_NR(ca, lp) != PTR_BUCKET_NR(ca, rp))
2017 return BCH_MERGE_NOMERGE;
2021 case BCH_RESERVATION: {
2022 struct bkey_i_reservation *li = bkey_i_to_reservation(l);
2023 struct bkey_i_reservation *ri = bkey_i_to_reservation(r);
2025 if (li->v.generation != ri->v.generation ||
2026 li->v.nr_replicas != ri->v.nr_replicas)
2027 return BCH_MERGE_NOMERGE;
2031 return BCH_MERGE_NOMERGE;
2034 l->k.needs_whiteout |= r->k.needs_whiteout;
2036 /* Keys with no pointers aren't restricted to one bucket and could
2039 if ((u64) l->k.size + r->k.size > KEY_SIZE_MAX) {
2040 bch2_key_resize(&l->k, KEY_SIZE_MAX);
2041 bch2_cut_front(l->k.p, r);
2042 return BCH_MERGE_PARTIAL;
2045 bch2_key_resize(&l->k, l->k.size + r->k.size);
2047 return BCH_MERGE_MERGE;
2051 * When merging an extent that we're inserting into a btree node, the new merged
2052 * extent could overlap with an existing 0 size extent - if we don't fix that,
2053 * it'll break the btree node iterator so this code finds those 0 size extents
2054 * and shifts them out of the way.
2056 * Also unpacks and repacks.
2058 static bool bch2_extent_merge_inline(struct bch_fs *c,
2059 struct btree_iter *iter,
2060 struct bkey_packed *l,
2061 struct bkey_packed *r,
2064 struct btree *b = iter->l[0].b;
2065 struct btree_node_iter *node_iter = &iter->l[0].iter;
2066 BKEY_PADDED(k) li, ri;
2067 struct bkey_packed *m = back_merge ? l : r;
2068 struct bkey_i *mi = back_merge ? &li.k : &ri.k;
2069 struct bset_tree *t = bch2_bkey_to_bset(b, m);
2070 enum merge_result ret;
2072 EBUG_ON(bkey_written(b, m));
2075 * We need to save copies of both l and r, because we might get a
2076 * partial merge (which modifies both) and then fails to repack
2078 bch2_bkey_unpack(b, &li.k, l);
2079 bch2_bkey_unpack(b, &ri.k, r);
2081 ret = bch2_extent_merge(c, b, &li.k, &ri.k);
2082 if (ret == BCH_MERGE_NOMERGE)
2086 * check if we overlap with deleted extents - would break the sort
2090 struct bkey_packed *n = bkey_next(m);
2092 if (n != btree_bkey_last(b, t) &&
2093 bkey_cmp_left_packed(b, n, &li.k.k.p) <= 0 &&
2096 } else if (ret == BCH_MERGE_MERGE) {
2097 struct bkey_packed *prev = bch2_bkey_prev_all(b, t, m);
2100 bkey_cmp_left_packed_byval(b, prev,
2101 bkey_start_pos(&li.k.k)) > 0)
2105 if (ret == BCH_MERGE_PARTIAL) {
2106 if (!extent_i_save(b, m, mi))
2110 bkey_copy(packed_to_bkey(l), &li.k);
2112 bkey_copy(packed_to_bkey(r), &ri.k);
2114 if (!extent_i_save(b, m, &li.k))
2118 bch2_bset_fix_invalidated_key(b, m);
2119 bch2_btree_node_iter_fix(iter, b, node_iter,
2120 m, m->u64s, m->u64s);
2121 verify_modified_extent(iter, m);
2123 return ret == BCH_MERGE_MERGE;
2126 int bch2_check_range_allocated(struct bch_fs *c, struct bpos pos, u64 size)
2128 struct btree_iter iter;
2129 struct bpos end = pos;
2135 for_each_btree_key(&iter, c, BTREE_ID_EXTENTS, pos,
2136 BTREE_ITER_SLOTS, k) {
2137 if (bkey_cmp(bkey_start_pos(k.k), end) >= 0)
2140 if (!bch2_extent_is_fully_allocated(k)) {
2145 bch2_btree_iter_unlock(&iter);