1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2010 Kent Overstreet <kent.overstreet@gmail.com>
5 * Code for managing the extent btree and dynamically updating the writeback
10 #include "bkey_methods.h"
12 #include "btree_update.h"
13 #include "btree_update_interior.h"
18 #include "disk_groups.h"
30 unsigned bch2_bkey_nr_ptrs(struct bkey_s_c k)
32 struct bkey_ptrs_c p = bch2_bkey_ptrs_c(k);
33 const struct bch_extent_ptr *ptr;
36 bkey_for_each_ptr(p, ptr)
42 unsigned bch2_bkey_nr_dirty_ptrs(struct bkey_s_c k)
47 case KEY_TYPE_btree_ptr:
48 case KEY_TYPE_extent: {
49 struct bkey_ptrs_c p = bch2_bkey_ptrs_c(k);
50 const struct bch_extent_ptr *ptr;
52 bkey_for_each_ptr(p, ptr)
53 nr_ptrs += !ptr->cached;
57 case KEY_TYPE_reservation:
58 nr_ptrs = bkey_s_c_to_reservation(k).v->nr_replicas;
65 static unsigned bch2_extent_ptr_durability(struct bch_fs *c,
66 struct extent_ptr_decoded p)
68 unsigned i, durability = 0;
74 ca = bch_dev_bkey_exists(c, p.ptr.dev);
76 if (ca->mi.state != BCH_MEMBER_STATE_FAILED)
77 durability = max_t(unsigned, durability, ca->mi.durability);
79 for (i = 0; i < p.ec_nr; i++) {
81 genradix_ptr(&c->stripes[0], p.idx);
86 durability = max_t(unsigned, durability, s->nr_redundant);
92 unsigned bch2_bkey_durability(struct bch_fs *c, struct bkey_s_c k)
94 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
95 const union bch_extent_entry *entry;
96 struct extent_ptr_decoded p;
97 unsigned durability = 0;
99 bkey_for_each_ptr_decode(k.k, ptrs, p, entry)
100 durability += bch2_extent_ptr_durability(c, p);
105 static struct bch_dev_io_failures *dev_io_failures(struct bch_io_failures *f,
108 struct bch_dev_io_failures *i;
110 for (i = f->devs; i < f->devs + f->nr; i++)
117 void bch2_mark_io_failure(struct bch_io_failures *failed,
118 struct extent_ptr_decoded *p)
120 struct bch_dev_io_failures *f = dev_io_failures(failed, p->ptr.dev);
123 BUG_ON(failed->nr >= ARRAY_SIZE(failed->devs));
125 f = &failed->devs[failed->nr++];
130 } else if (p->idx != f->idx) {
140 * returns true if p1 is better than p2:
142 static inline bool ptr_better(struct bch_fs *c,
143 const struct extent_ptr_decoded p1,
144 const struct extent_ptr_decoded p2)
146 if (likely(!p1.idx && !p2.idx)) {
147 struct bch_dev *dev1 = bch_dev_bkey_exists(c, p1.ptr.dev);
148 struct bch_dev *dev2 = bch_dev_bkey_exists(c, p2.ptr.dev);
150 u64 l1 = atomic64_read(&dev1->cur_latency[READ]);
151 u64 l2 = atomic64_read(&dev2->cur_latency[READ]);
153 /* Pick at random, biased in favor of the faster device: */
155 return bch2_rand_range(l1 + l2) > l1;
158 if (force_reconstruct_read(c))
159 return p1.idx > p2.idx;
161 return p1.idx < p2.idx;
165 * This picks a non-stale pointer, preferably from a device other than @avoid.
166 * Avoid can be NULL, meaning pick any. If there are no non-stale pointers to
167 * other devices, it will still pick a pointer from avoid.
169 int bch2_bkey_pick_read_device(struct bch_fs *c, struct bkey_s_c k,
170 struct bch_io_failures *failed,
171 struct extent_ptr_decoded *pick)
173 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
174 const union bch_extent_entry *entry;
175 struct extent_ptr_decoded p;
176 struct bch_dev_io_failures *f;
180 if (k.k->type == KEY_TYPE_error)
183 bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
184 ca = bch_dev_bkey_exists(c, p.ptr.dev);
187 * If there are any dirty pointers it's an error if we can't
190 if (!ret && !p.ptr.cached)
193 if (p.ptr.cached && ptr_stale(ca, &p.ptr))
196 f = failed ? dev_io_failures(failed, p.ptr.dev) : NULL;
198 p.idx = f->nr_failed < f->nr_retries
203 !bch2_dev_is_readable(ca))
206 if (force_reconstruct_read(c) &&
210 if (p.idx >= p.ec_nr + 1)
213 if (ret > 0 && !ptr_better(c, p, *pick))
223 void bch2_bkey_append_ptr(struct bkey_i *k,
224 struct bch_extent_ptr ptr)
226 EBUG_ON(bch2_bkey_has_device(bkey_i_to_s_c(k), ptr.dev));
229 case KEY_TYPE_btree_ptr:
230 case KEY_TYPE_extent:
231 EBUG_ON(bkey_val_u64s(&k->k) >= BKEY_EXTENT_VAL_U64s_MAX);
233 ptr.type = 1 << BCH_EXTENT_ENTRY_ptr;
235 memcpy((void *) &k->v + bkey_val_bytes(&k->k),
245 void bch2_bkey_drop_device(struct bkey_s k, unsigned dev)
247 struct bch_extent_ptr *ptr;
249 bch2_bkey_drop_ptrs(k, ptr, ptr->dev == dev);
252 /* extent specific utility code */
254 const struct bch_extent_ptr *
255 bch2_extent_has_device(struct bkey_s_c_extent e, unsigned dev)
257 const struct bch_extent_ptr *ptr;
259 extent_for_each_ptr(e, ptr)
266 const struct bch_extent_ptr *
267 bch2_extent_has_group(struct bch_fs *c, struct bkey_s_c_extent e, unsigned group)
269 const struct bch_extent_ptr *ptr;
271 extent_for_each_ptr(e, ptr) {
272 struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
275 ca->mi.group - 1 == group)
282 const struct bch_extent_ptr *
283 bch2_extent_has_target(struct bch_fs *c, struct bkey_s_c_extent e, unsigned target)
285 const struct bch_extent_ptr *ptr;
287 extent_for_each_ptr(e, ptr)
288 if (bch2_dev_in_target(c, ptr->dev, target) &&
290 !ptr_stale(bch_dev_bkey_exists(c, ptr->dev), ptr)))
296 unsigned bch2_extent_is_compressed(struct bkey_s_c k)
301 case KEY_TYPE_extent: {
302 struct bkey_s_c_extent e = bkey_s_c_to_extent(k);
303 const union bch_extent_entry *entry;
304 struct extent_ptr_decoded p;
306 extent_for_each_ptr_decode(e, p, entry)
308 p.crc.compression_type != BCH_COMPRESSION_NONE)
309 ret += p.crc.compressed_size;
316 bool bch2_extent_matches_ptr(struct bch_fs *c, struct bkey_s_c_extent e,
317 struct bch_extent_ptr m, u64 offset)
319 const union bch_extent_entry *entry;
320 struct extent_ptr_decoded p;
322 extent_for_each_ptr_decode(e, p, entry)
323 if (p.ptr.dev == m.dev &&
324 p.ptr.gen == m.gen &&
325 (s64) p.ptr.offset + p.crc.offset - bkey_start_offset(e.k) ==
326 (s64) m.offset - offset)
332 static union bch_extent_entry *extent_entry_prev(struct bkey_ptrs ptrs,
333 union bch_extent_entry *entry)
335 union bch_extent_entry *i = ptrs.start;
340 while (extent_entry_next(i) != entry)
341 i = extent_entry_next(i);
345 union bch_extent_entry *bch2_bkey_drop_ptr(struct bkey_s k,
346 struct bch_extent_ptr *ptr)
348 struct bkey_ptrs ptrs = bch2_bkey_ptrs(k);
349 union bch_extent_entry *dst, *src, *prev;
350 bool drop_crc = true;
352 EBUG_ON(ptr < &ptrs.start->ptr ||
353 ptr >= &ptrs.end->ptr);
354 EBUG_ON(ptr->type != 1 << BCH_EXTENT_ENTRY_ptr);
356 src = extent_entry_next(to_entry(ptr));
357 if (src != ptrs.end &&
358 !extent_entry_is_crc(src))
362 while ((prev = extent_entry_prev(ptrs, dst))) {
363 if (extent_entry_is_ptr(prev))
366 if (extent_entry_is_crc(prev)) {
375 memmove_u64s_down(dst, src,
376 (u64 *) ptrs.end - (u64 *) src);
377 k.k->u64s -= (u64 *) src - (u64 *) dst;
382 static inline bool can_narrow_crc(struct bch_extent_crc_unpacked u,
383 struct bch_extent_crc_unpacked n)
385 return !u.compression_type &&
387 u.uncompressed_size > u.live_size &&
388 bch2_csum_type_is_encryption(u.csum_type) ==
389 bch2_csum_type_is_encryption(n.csum_type);
392 bool bch2_can_narrow_extent_crcs(struct bkey_s_c_extent e,
393 struct bch_extent_crc_unpacked n)
395 struct bch_extent_crc_unpacked crc;
396 const union bch_extent_entry *i;
401 extent_for_each_crc(e, crc, i)
402 if (can_narrow_crc(crc, n))
409 * We're writing another replica for this extent, so while we've got the data in
410 * memory we'll be computing a new checksum for the currently live data.
412 * If there are other replicas we aren't moving, and they are checksummed but
413 * not compressed, we can modify them to point to only the data that is
414 * currently live (so that readers won't have to bounce) while we've got the
417 bool bch2_extent_narrow_crcs(struct bkey_i_extent *e,
418 struct bch_extent_crc_unpacked n)
420 struct bch_extent_crc_unpacked u;
421 struct extent_ptr_decoded p;
422 union bch_extent_entry *i;
425 /* Find a checksum entry that covers only live data: */
427 extent_for_each_crc(extent_i_to_s(e), u, i)
428 if (!u.compression_type &&
430 u.live_size == u.uncompressed_size) {
437 BUG_ON(n.compression_type);
439 BUG_ON(n.live_size != e->k.size);
441 restart_narrow_pointers:
442 extent_for_each_ptr_decode(extent_i_to_s(e), p, i)
443 if (can_narrow_crc(p.crc, n)) {
444 bch2_bkey_drop_ptr(extent_i_to_s(e).s, &i->ptr);
445 p.ptr.offset += p.crc.offset;
447 bch2_extent_ptr_decoded_append(e, &p);
449 goto restart_narrow_pointers;
455 /* returns true if not equal */
456 static inline bool bch2_crc_unpacked_cmp(struct bch_extent_crc_unpacked l,
457 struct bch_extent_crc_unpacked r)
459 return (l.csum_type != r.csum_type ||
460 l.compression_type != r.compression_type ||
461 l.compressed_size != r.compressed_size ||
462 l.uncompressed_size != r.uncompressed_size ||
463 l.offset != r.offset ||
464 l.live_size != r.live_size ||
465 l.nonce != r.nonce ||
466 bch2_crc_cmp(l.csum, r.csum));
469 void bch2_ptr_swab(const struct bkey_format *f, struct bkey_packed *k)
471 union bch_extent_entry *entry;
472 u64 *d = (u64 *) bkeyp_val(f, k);
475 for (i = 0; i < bkeyp_val_u64s(f, k); i++)
478 for (entry = (union bch_extent_entry *) d;
479 entry < (union bch_extent_entry *) (d + bkeyp_val_u64s(f, k));
480 entry = extent_entry_next(entry)) {
481 switch (extent_entry_type(entry)) {
482 case BCH_EXTENT_ENTRY_ptr:
484 case BCH_EXTENT_ENTRY_crc32:
485 entry->crc32.csum = swab32(entry->crc32.csum);
487 case BCH_EXTENT_ENTRY_crc64:
488 entry->crc64.csum_hi = swab16(entry->crc64.csum_hi);
489 entry->crc64.csum_lo = swab64(entry->crc64.csum_lo);
491 case BCH_EXTENT_ENTRY_crc128:
492 entry->crc128.csum.hi = (__force __le64)
493 swab64((__force u64) entry->crc128.csum.hi);
494 entry->crc128.csum.lo = (__force __le64)
495 swab64((__force u64) entry->crc128.csum.lo);
497 case BCH_EXTENT_ENTRY_stripe_ptr:
503 static const char *extent_ptr_invalid(const struct bch_fs *c,
505 const struct bch_extent_ptr *ptr,
506 unsigned size_ondisk,
509 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
510 const struct bch_extent_ptr *ptr2;
513 if (ptr->dev >= c->sb.nr_devices ||
515 return "pointer to invalid device";
517 ca = bch_dev_bkey_exists(c, ptr->dev);
519 return "pointer to invalid device";
521 bkey_for_each_ptr(ptrs, ptr2)
522 if (ptr != ptr2 && ptr->dev == ptr2->dev)
523 return "multiple pointers to same device";
525 if (ptr->offset + size_ondisk > bucket_to_sector(ca, ca->mi.nbuckets))
526 return "offset past end of device";
528 if (ptr->offset < bucket_to_sector(ca, ca->mi.first_bucket))
529 return "offset before first bucket";
531 if (bucket_remainder(ca, ptr->offset) +
532 size_ondisk > ca->mi.bucket_size)
533 return "spans multiple buckets";
538 static void bkey_ptrs_to_text(struct printbuf *out, struct bch_fs *c,
541 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
542 const union bch_extent_entry *entry;
543 struct bch_extent_crc_unpacked crc;
544 const struct bch_extent_ptr *ptr;
545 const struct bch_extent_stripe_ptr *ec;
549 bkey_extent_entry_for_each(ptrs, entry) {
553 switch (__extent_entry_type(entry)) {
554 case BCH_EXTENT_ENTRY_ptr:
555 ptr = entry_to_ptr(entry);
556 ca = ptr->dev < c->sb.nr_devices && c->devs[ptr->dev]
557 ? bch_dev_bkey_exists(c, ptr->dev)
560 pr_buf(out, "ptr: %u:%llu gen %u%s%s", ptr->dev,
561 (u64) ptr->offset, ptr->gen,
562 ptr->cached ? " cached" : "",
563 ca && ptr_stale(ca, ptr)
566 case BCH_EXTENT_ENTRY_crc32:
567 case BCH_EXTENT_ENTRY_crc64:
568 case BCH_EXTENT_ENTRY_crc128:
569 crc = bch2_extent_crc_unpack(k.k, entry_to_crc(entry));
571 pr_buf(out, "crc: c_size %u size %u offset %u nonce %u csum %u compress %u",
573 crc.uncompressed_size,
574 crc.offset, crc.nonce,
576 crc.compression_type);
578 case BCH_EXTENT_ENTRY_stripe_ptr:
579 ec = &entry->stripe_ptr;
581 pr_buf(out, "ec: idx %llu block %u",
582 (u64) ec->idx, ec->block);
585 pr_buf(out, "(invalid extent entry %.16llx)", *((u64 *) entry));
595 const char *bch2_btree_ptr_invalid(const struct bch_fs *c, struct bkey_s_c k)
597 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
598 const union bch_extent_entry *entry;
599 const struct bch_extent_ptr *ptr;
602 if (bkey_val_u64s(k.k) > BKEY_BTREE_PTR_VAL_U64s_MAX)
603 return "value too big";
605 bkey_extent_entry_for_each(ptrs, entry) {
606 if (__extent_entry_type(entry) >= BCH_EXTENT_ENTRY_MAX)
607 return "invalid extent entry type";
609 if (!extent_entry_is_ptr(entry))
610 return "has non ptr field";
613 bkey_for_each_ptr(ptrs, ptr) {
614 reason = extent_ptr_invalid(c, k, ptr,
615 c->opts.btree_node_size,
624 void bch2_btree_ptr_debugcheck(struct bch_fs *c, struct btree *b,
627 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
628 const struct bch_extent_ptr *ptr;
631 struct bucket_mark mark;
634 bch2_fs_bug_on(!test_bit(BCH_FS_REBUILD_REPLICAS, &c->flags) &&
635 !bch2_bkey_replicas_marked(c, k, false), c,
636 "btree key bad (replicas not marked in superblock):\n%s",
637 (bch2_bkey_val_to_text(&PBUF(buf), c, k), buf));
639 if (!test_bit(BCH_FS_INITIAL_GC_DONE, &c->flags))
642 bkey_for_each_ptr(ptrs, ptr) {
643 ca = bch_dev_bkey_exists(c, ptr->dev);
645 mark = ptr_bucket_mark(ca, ptr);
648 if (gen_after(mark.gen, ptr->gen))
651 err = "inconsistent";
652 if (mark.data_type != BCH_DATA_BTREE ||
653 mark.dirty_sectors < c->opts.btree_node_size)
659 bch2_bkey_val_to_text(&PBUF(buf), c, k);
660 bch2_fs_bug(c, "%s btree pointer %s: bucket %zi gen %i mark %08x",
661 err, buf, PTR_BUCKET_NR(ca, ptr),
662 mark.gen, (unsigned) mark.v.counter);
665 void bch2_btree_ptr_to_text(struct printbuf *out, struct bch_fs *c,
670 bkey_ptrs_to_text(out, c, k);
672 invalid = bch2_btree_ptr_invalid(c, k);
674 pr_buf(out, " invalid: %s", invalid);
679 bool __bch2_cut_front(struct bpos where, struct bkey_s k)
683 if (bkey_cmp(where, bkey_start_pos(k.k)) <= 0)
686 EBUG_ON(bkey_cmp(where, k.k->p) > 0);
688 len = k.k->p.offset - where.offset;
690 BUG_ON(len > k.k->size);
693 * Don't readjust offset if the key size is now 0, because that could
694 * cause offset to point to the next bucket:
697 k.k->type = KEY_TYPE_deleted;
698 else if (bkey_extent_is_data(k.k)) {
699 struct bkey_s_extent e = bkey_s_to_extent(k);
700 union bch_extent_entry *entry;
701 bool seen_crc = false;
703 extent_for_each_entry(e, entry) {
704 switch (extent_entry_type(entry)) {
705 case BCH_EXTENT_ENTRY_ptr:
707 entry->ptr.offset += e.k->size - len;
709 case BCH_EXTENT_ENTRY_crc32:
710 entry->crc32.offset += e.k->size - len;
712 case BCH_EXTENT_ENTRY_crc64:
713 entry->crc64.offset += e.k->size - len;
715 case BCH_EXTENT_ENTRY_crc128:
716 entry->crc128.offset += e.k->size - len;
718 case BCH_EXTENT_ENTRY_stripe_ptr:
722 if (extent_entry_is_crc(entry))
732 bool bch2_cut_back(struct bpos where, struct bkey *k)
736 if (bkey_cmp(where, k->p) >= 0)
739 EBUG_ON(bkey_cmp(where, bkey_start_pos(k)) < 0);
741 len = where.offset - bkey_start_offset(k);
743 BUG_ON(len > k->size);
749 k->type = KEY_TYPE_deleted;
755 * bch_key_resize - adjust size of @k
757 * bkey_start_offset(k) will be preserved, modifies where the extent ends
759 void bch2_key_resize(struct bkey *k,
762 k->p.offset -= k->size;
763 k->p.offset += new_size;
767 static bool extent_i_save(struct btree *b, struct bkey_packed *dst,
770 struct bkey_format *f = &b->format;
771 struct bkey_i *dst_unpacked;
772 struct bkey_packed tmp;
774 if ((dst_unpacked = packed_to_bkey(dst)))
775 dst_unpacked->k = src->k;
776 else if (bch2_bkey_pack_key(&tmp, &src->k, f))
777 memcpy_u64s(dst, &tmp, f->key_u64s);
781 memcpy_u64s(bkeyp_val(f, dst), &src->v, bkey_val_u64s(&src->k));
785 struct extent_insert_state {
786 struct btree_insert *trans;
787 struct btree_insert_entry *insert;
788 struct bpos committed;
791 struct bkey_i whiteout;
797 static bool bch2_extent_merge_inline(struct bch_fs *,
799 struct bkey_packed *,
800 struct bkey_packed *,
803 static void verify_extent_nonoverlapping(struct btree *b,
804 struct btree_node_iter *_iter,
805 struct bkey_i *insert)
807 #ifdef CONFIG_BCACHEFS_DEBUG
808 struct btree_node_iter iter;
809 struct bkey_packed *k;
813 k = bch2_btree_node_iter_prev_filter(&iter, b, KEY_TYPE_discard);
815 (uk = bkey_unpack_key(b, k),
816 bkey_cmp(uk.p, bkey_start_pos(&insert->k)) > 0));
819 k = bch2_btree_node_iter_peek_filter(&iter, b, KEY_TYPE_discard);
822 (uk = bkey_unpack_key(b, k),
823 bkey_cmp(insert->k.p, bkey_start_pos(&uk))) > 0);
826 (uk = bkey_unpack_key(b, k),
827 bkey_cmp(insert->k.p, bkey_start_pos(&uk))) > 0) {
831 bch2_bkey_to_text(&PBUF(buf1), &insert->k);
832 bch2_bkey_to_text(&PBUF(buf2), &uk);
834 bch2_dump_btree_node(b);
835 panic("insert > next :\n"
845 static void verify_modified_extent(struct btree_iter *iter,
846 struct bkey_packed *k)
848 bch2_btree_iter_verify(iter, iter->l[0].b);
849 bch2_verify_insert_pos(iter->l[0].b, k, k, k->u64s);
852 static void extent_bset_insert(struct bch_fs *c, struct btree_iter *iter,
853 struct bkey_i *insert)
855 struct btree_iter_level *l = &iter->l[0];
856 struct btree_node_iter node_iter;
857 struct bkey_packed *k;
859 BUG_ON(insert->k.u64s > bch_btree_keys_u64s_remaining(c, l->b));
861 EBUG_ON(bkey_deleted(&insert->k) || !insert->k.size);
862 verify_extent_nonoverlapping(l->b, &l->iter, insert);
865 k = bch2_btree_node_iter_prev_filter(&node_iter, l->b, KEY_TYPE_discard);
866 if (k && !bkey_written(l->b, k) &&
867 bch2_extent_merge_inline(c, iter, k, bkey_to_packed(insert), true))
871 k = bch2_btree_node_iter_peek_filter(&node_iter, l->b, KEY_TYPE_discard);
872 if (k && !bkey_written(l->b, k) &&
873 bch2_extent_merge_inline(c, iter, bkey_to_packed(insert), k, false))
876 k = bch2_btree_node_iter_bset_pos(&l->iter, l->b, bset_tree_last(l->b));
878 bch2_bset_insert(l->b, &l->iter, k, insert, 0);
879 bch2_btree_node_iter_fix(iter, l->b, &l->iter, k, 0, k->u64s);
880 bch2_btree_iter_verify(iter, l->b);
883 static void extent_insert_committed(struct extent_insert_state *s)
885 struct bch_fs *c = s->trans->c;
886 struct btree_iter *iter = s->insert->iter;
887 struct bkey_i *insert = s->insert->k;
888 BKEY_PADDED(k) split;
890 EBUG_ON(bkey_cmp(insert->k.p, s->committed) < 0);
891 EBUG_ON(bkey_cmp(s->committed, bkey_start_pos(&insert->k)) < 0);
893 bkey_copy(&split.k, insert);
895 split.k.k.type = KEY_TYPE_discard;
897 bch2_cut_back(s->committed, &split.k.k);
899 if (!bkey_cmp(s->committed, iter->pos))
902 bch2_btree_iter_set_pos_same_leaf(iter, s->committed);
904 if (s->update_btree) {
905 if (debug_check_bkeys(c))
906 bch2_bkey_debugcheck(c, iter->l[0].b,
907 bkey_i_to_s_c(&split.k));
909 EBUG_ON(bkey_deleted(&split.k.k) || !split.k.k.size);
911 extent_bset_insert(c, iter, &split.k);
914 if (s->update_journal) {
915 bkey_copy(&split.k, !s->deleting ? insert : &s->whiteout);
917 split.k.k.type = KEY_TYPE_discard;
919 bch2_cut_back(s->committed, &split.k.k);
921 EBUG_ON(bkey_deleted(&split.k.k) || !split.k.k.size);
923 bch2_btree_journal_key(s->trans, iter, &split.k);
926 bch2_cut_front(s->committed, insert);
928 insert->k.needs_whiteout = false;
931 void bch2_extent_trim_atomic(struct bkey_i *k, struct btree_iter *iter)
933 struct btree *b = iter->l[0].b;
935 BUG_ON(iter->uptodate > BTREE_ITER_NEED_PEEK);
937 bch2_cut_back(b->key.k.p, &k->k);
939 BUG_ON(bkey_cmp(bkey_start_pos(&k->k), b->data->min_key) < 0);
942 enum btree_insert_ret
943 bch2_extent_can_insert(struct btree_insert *trans,
944 struct btree_insert_entry *insert,
947 struct btree_iter_level *l = &insert->iter->l[0];
948 struct btree_node_iter node_iter = l->iter;
949 enum bch_extent_overlap overlap;
950 struct bkey_packed *_k;
951 struct bkey unpacked;
955 BUG_ON(trans->flags & BTREE_INSERT_ATOMIC &&
956 !bch2_extent_is_atomic(&insert->k->k, insert->iter));
959 * We avoid creating whiteouts whenever possible when deleting, but
960 * those optimizations mean we may potentially insert two whiteouts
961 * instead of one (when we overlap with the front of one extent and the
964 if (bkey_whiteout(&insert->k->k))
967 _k = bch2_btree_node_iter_peek_filter(&node_iter, l->b,
970 return BTREE_INSERT_OK;
972 k = bkey_disassemble(l->b, _k, &unpacked);
974 overlap = bch2_extent_overlap(&insert->k->k, k.k);
976 /* account for having to split existing extent: */
977 if (overlap == BCH_EXTENT_OVERLAP_MIDDLE)
980 if (overlap == BCH_EXTENT_OVERLAP_MIDDLE &&
981 (sectors = bch2_extent_is_compressed(k))) {
982 int flags = BCH_DISK_RESERVATION_BTREE_LOCKS_HELD;
984 if (trans->flags & BTREE_INSERT_NOFAIL)
985 flags |= BCH_DISK_RESERVATION_NOFAIL;
987 switch (bch2_disk_reservation_add(trans->c,
993 return BTREE_INSERT_ENOSPC;
995 return BTREE_INSERT_NEED_GC_LOCK;
1001 return BTREE_INSERT_OK;
1005 extent_squash(struct extent_insert_state *s, struct bkey_i *insert,
1006 struct bkey_packed *_k, struct bkey_s k,
1007 enum bch_extent_overlap overlap)
1009 struct bch_fs *c = s->trans->c;
1010 struct btree_iter *iter = s->insert->iter;
1011 struct btree_iter_level *l = &iter->l[0];
1014 case BCH_EXTENT_OVERLAP_FRONT:
1015 /* insert overlaps with start of k: */
1016 __bch2_cut_front(insert->k.p, k);
1017 BUG_ON(bkey_deleted(k.k));
1018 extent_save(l->b, _k, k.k);
1019 verify_modified_extent(iter, _k);
1022 case BCH_EXTENT_OVERLAP_BACK:
1023 /* insert overlaps with end of k: */
1024 bch2_cut_back(bkey_start_pos(&insert->k), k.k);
1025 BUG_ON(bkey_deleted(k.k));
1026 extent_save(l->b, _k, k.k);
1029 * As the auxiliary tree is indexed by the end of the
1030 * key and we've just changed the end, update the
1033 bch2_bset_fix_invalidated_key(l->b, _k);
1034 bch2_btree_node_iter_fix(iter, l->b, &l->iter,
1035 _k, _k->u64s, _k->u64s);
1036 verify_modified_extent(iter, _k);
1039 case BCH_EXTENT_OVERLAP_ALL: {
1040 /* The insert key completely covers k, invalidate k */
1041 if (!bkey_whiteout(k.k))
1042 btree_account_key_drop(l->b, _k);
1045 k.k->type = KEY_TYPE_deleted;
1047 if (_k >= btree_bset_last(l->b)->start) {
1048 unsigned u64s = _k->u64s;
1050 bch2_bset_delete(l->b, _k, _k->u64s);
1051 bch2_btree_node_iter_fix(iter, l->b, &l->iter,
1053 bch2_btree_iter_verify(iter, l->b);
1055 extent_save(l->b, _k, k.k);
1056 bch2_btree_node_iter_fix(iter, l->b, &l->iter,
1057 _k, _k->u64s, _k->u64s);
1058 verify_modified_extent(iter, _k);
1063 case BCH_EXTENT_OVERLAP_MIDDLE: {
1064 BKEY_PADDED(k) split;
1066 * The insert key falls 'in the middle' of k
1067 * The insert key splits k in 3:
1068 * - start only in k, preserve
1069 * - middle common section, invalidate in k
1070 * - end only in k, preserve
1072 * We update the old key to preserve the start,
1073 * insert will be the new common section,
1074 * we manually insert the end that we are preserving.
1076 * modify k _before_ doing the insert (which will move
1079 bkey_reassemble(&split.k, k.s_c);
1080 split.k.k.needs_whiteout |= bkey_written(l->b, _k);
1082 bch2_cut_back(bkey_start_pos(&insert->k), &split.k.k);
1083 BUG_ON(bkey_deleted(&split.k.k));
1085 __bch2_cut_front(insert->k.p, k);
1086 BUG_ON(bkey_deleted(k.k));
1087 extent_save(l->b, _k, k.k);
1088 verify_modified_extent(iter, _k);
1090 extent_bset_insert(c, iter, &split.k);
1096 static void __bch2_insert_fixup_extent(struct extent_insert_state *s)
1098 struct btree_iter *iter = s->insert->iter;
1099 struct btree_iter_level *l = &iter->l[0];
1100 struct bkey_packed *_k;
1101 struct bkey unpacked;
1102 struct bkey_i *insert = s->insert->k;
1104 while (bkey_cmp(s->committed, insert->k.p) < 0 &&
1105 (_k = bch2_btree_node_iter_peek_filter(&l->iter, l->b,
1106 KEY_TYPE_discard))) {
1107 struct bkey_s k = __bkey_disassemble(l->b, _k, &unpacked);
1108 enum bch_extent_overlap overlap = bch2_extent_overlap(&insert->k, k.k);
1110 EBUG_ON(bkey_cmp(iter->pos, k.k->p) >= 0);
1112 if (bkey_cmp(bkey_start_pos(k.k), insert->k.p) >= 0)
1115 s->committed = bpos_min(s->insert->k->k.p, k.k->p);
1117 if (!bkey_whiteout(k.k))
1118 s->update_journal = true;
1120 if (!s->update_journal) {
1121 bch2_cut_front(s->committed, insert);
1122 bch2_cut_front(s->committed, &s->whiteout);
1123 bch2_btree_iter_set_pos_same_leaf(iter, s->committed);
1128 * When deleting, if possible just do it by switching the type
1129 * of the key we're deleting, instead of creating and inserting
1134 !bkey_cmp(insert->k.p, k.k->p) &&
1135 !bkey_cmp(bkey_start_pos(&insert->k), bkey_start_pos(k.k))) {
1136 if (!bkey_whiteout(k.k)) {
1137 btree_account_key_drop(l->b, _k);
1138 _k->type = KEY_TYPE_discard;
1139 reserve_whiteout(l->b, _k);
1144 if (k.k->needs_whiteout || bkey_written(l->b, _k)) {
1145 insert->k.needs_whiteout = true;
1146 s->update_btree = true;
1149 if (s->update_btree &&
1150 overlap == BCH_EXTENT_OVERLAP_ALL &&
1151 bkey_whiteout(k.k) &&
1152 k.k->needs_whiteout) {
1153 unreserve_whiteout(l->b, _k);
1154 _k->needs_whiteout = false;
1157 extent_squash(s, insert, _k, k, overlap);
1159 if (!s->update_btree)
1160 bch2_cut_front(s->committed, insert);
1162 if (overlap == BCH_EXTENT_OVERLAP_FRONT ||
1163 overlap == BCH_EXTENT_OVERLAP_MIDDLE)
1167 if (bkey_cmp(s->committed, insert->k.p) < 0)
1168 s->committed = bpos_min(s->insert->k->k.p, l->b->key.k.p);
1171 * may have skipped past some deleted extents greater than the insert
1172 * key, before we got to a non deleted extent and knew we could bail out
1173 * rewind the iterator a bit if necessary:
1176 struct btree_node_iter node_iter = l->iter;
1178 while ((_k = bch2_btree_node_iter_prev_all(&node_iter, l->b)) &&
1179 bkey_cmp_left_packed(l->b, _k, &s->committed) > 0)
1180 l->iter = node_iter;
1185 * bch_extent_insert_fixup - insert a new extent and deal with overlaps
1187 * this may result in not actually doing the insert, or inserting some subset
1188 * of the insert key. For cmpxchg operations this is where that logic lives.
1190 * All subsets of @insert that need to be inserted are inserted using
1191 * bch2_btree_insert_and_journal(). If @b or @res fills up, this function
1192 * returns false, setting @iter->pos for the prefix of @insert that actually got
1195 * BSET INVARIANTS: this function is responsible for maintaining all the
1196 * invariants for bsets of extents in memory. things get really hairy with 0
1201 * bkey_start_pos(bkey_next(k)) >= k
1202 * or bkey_start_offset(bkey_next(k)) >= k->offset
1204 * i.e. strict ordering, no overlapping extents.
1206 * multiple bsets (i.e. full btree node):
1209 * k.size != 0 ∧ j.size != 0 →
1210 * ¬ (k > bkey_start_pos(j) ∧ k < j)
1212 * i.e. no two overlapping keys _of nonzero size_
1214 * We can't realistically maintain this invariant for zero size keys because of
1215 * the key merging done in bch2_btree_insert_key() - for two mergeable keys k, j
1216 * there may be another 0 size key between them in another bset, and it will
1217 * thus overlap with the merged key.
1219 * In addition, the end of iter->pos indicates how much has been processed.
1220 * If the end of iter->pos is not the same as the end of insert, then
1221 * key insertion needs to continue/be retried.
1223 enum btree_insert_ret
1224 bch2_insert_fixup_extent(struct btree_insert *trans,
1225 struct btree_insert_entry *insert)
1227 struct btree_iter *iter = insert->iter;
1228 struct btree *b = iter->l[0].b;
1229 struct extent_insert_state s = {
1232 .committed = iter->pos,
1234 .whiteout = *insert->k,
1235 .update_journal = !bkey_whiteout(&insert->k->k),
1236 .update_btree = !bkey_whiteout(&insert->k->k),
1237 .deleting = bkey_whiteout(&insert->k->k),
1240 EBUG_ON(iter->level);
1241 EBUG_ON(!insert->k->k.size);
1244 * As we process overlapping extents, we advance @iter->pos both to
1245 * signal to our caller (btree_insert_key()) how much of @insert->k has
1246 * been inserted, and also to keep @iter->pos consistent with
1247 * @insert->k and the node iterator that we're advancing:
1249 EBUG_ON(bkey_cmp(iter->pos, bkey_start_pos(&insert->k->k)));
1251 __bch2_insert_fixup_extent(&s);
1253 extent_insert_committed(&s);
1255 EBUG_ON(bkey_cmp(iter->pos, bkey_start_pos(&insert->k->k)));
1256 EBUG_ON(bkey_cmp(iter->pos, s.committed));
1258 if (insert->k->k.size) {
1259 /* got to the end of this leaf node */
1260 BUG_ON(bkey_cmp(iter->pos, b->key.k.p));
1261 return BTREE_INSERT_NEED_TRAVERSE;
1264 return BTREE_INSERT_OK;
1267 const char *bch2_extent_invalid(const struct bch_fs *c, struct bkey_s_c k)
1269 struct bkey_s_c_extent e = bkey_s_c_to_extent(k);
1270 const union bch_extent_entry *entry;
1271 struct bch_extent_crc_unpacked crc;
1272 const struct bch_extent_ptr *ptr;
1273 unsigned size_ondisk = e.k->size;
1275 unsigned nonce = UINT_MAX;
1277 if (bkey_val_u64s(e.k) > BKEY_EXTENT_VAL_U64s_MAX)
1278 return "value too big";
1280 extent_for_each_entry(e, entry) {
1281 if (__extent_entry_type(entry) >= BCH_EXTENT_ENTRY_MAX)
1282 return "invalid extent entry type";
1284 switch (extent_entry_type(entry)) {
1285 case BCH_EXTENT_ENTRY_ptr:
1286 ptr = entry_to_ptr(entry);
1288 reason = extent_ptr_invalid(c, e.s_c, &entry->ptr,
1289 size_ondisk, false);
1293 case BCH_EXTENT_ENTRY_crc32:
1294 case BCH_EXTENT_ENTRY_crc64:
1295 case BCH_EXTENT_ENTRY_crc128:
1296 crc = bch2_extent_crc_unpack(e.k, entry_to_crc(entry));
1298 if (crc.offset + e.k->size >
1299 crc.uncompressed_size)
1300 return "checksum offset + key size > uncompressed size";
1302 size_ondisk = crc.compressed_size;
1304 if (!bch2_checksum_type_valid(c, crc.csum_type))
1305 return "invalid checksum type";
1307 if (crc.compression_type >= BCH_COMPRESSION_NR)
1308 return "invalid compression type";
1310 if (bch2_csum_type_is_encryption(crc.csum_type)) {
1311 if (nonce == UINT_MAX)
1312 nonce = crc.offset + crc.nonce;
1313 else if (nonce != crc.offset + crc.nonce)
1314 return "incorrect nonce";
1317 case BCH_EXTENT_ENTRY_stripe_ptr:
1325 void bch2_extent_debugcheck(struct bch_fs *c, struct btree *b,
1328 struct bkey_s_c_extent e = bkey_s_c_to_extent(k);
1329 const union bch_extent_entry *entry;
1330 struct extent_ptr_decoded p;
1334 * XXX: we should be doing most/all of these checks at startup time,
1335 * where we check bch2_bkey_invalid() in btree_node_read_done()
1337 * But note that we can't check for stale pointers or incorrect gc marks
1338 * until after journal replay is done (it might be an extent that's
1339 * going to get overwritten during replay)
1342 bch2_fs_bug_on(!test_bit(BCH_FS_REBUILD_REPLICAS, &c->flags) &&
1343 !bch2_bkey_replicas_marked(c, e.s_c, false), c,
1344 "extent key bad (replicas not marked in superblock):\n%s",
1345 (bch2_bkey_val_to_text(&PBUF(buf), c, e.s_c), buf));
1348 * If journal replay hasn't finished, we might be seeing keys
1349 * that will be overwritten by the time journal replay is done:
1351 if (!test_bit(JOURNAL_REPLAY_DONE, &c->journal.flags))
1354 extent_for_each_ptr_decode(e, p, entry) {
1355 struct bch_dev *ca = bch_dev_bkey_exists(c, p.ptr.dev);
1356 struct bucket_mark mark = ptr_bucket_mark(ca, &p.ptr);
1357 unsigned stale = gen_after(mark.gen, p.ptr.gen);
1358 unsigned disk_sectors = ptr_disk_sectors(p);
1359 unsigned mark_sectors = p.ptr.cached
1360 ? mark.cached_sectors
1361 : mark.dirty_sectors;
1363 bch2_fs_bug_on(stale && !p.ptr.cached, c,
1364 "stale dirty pointer (ptr gen %u bucket %u",
1365 p.ptr.gen, mark.gen);
1367 bch2_fs_bug_on(stale > 96, c, "key too stale: %i", stale);
1369 bch2_fs_bug_on(!stale &&
1370 (mark.data_type != BCH_DATA_USER ||
1371 mark_sectors < disk_sectors), c,
1372 "extent pointer not marked: %s:\n"
1373 "type %u sectors %u < %u",
1374 (bch2_bkey_val_to_text(&PBUF(buf), c, e.s_c), buf),
1376 mark_sectors, disk_sectors);
1380 void bch2_extent_to_text(struct printbuf *out, struct bch_fs *c,
1383 const char *invalid;
1385 bkey_ptrs_to_text(out, c, k);
1387 invalid = bch2_extent_invalid(c, k);
1389 pr_buf(out, " invalid: %s", invalid);
1392 static void bch2_extent_crc_init(union bch_extent_crc *crc,
1393 struct bch_extent_crc_unpacked new)
1395 #define common_fields(_crc) \
1396 .csum_type = _crc.csum_type, \
1397 .compression_type = _crc.compression_type, \
1398 ._compressed_size = _crc.compressed_size - 1, \
1399 ._uncompressed_size = _crc.uncompressed_size - 1, \
1400 .offset = _crc.offset
1402 if (bch_crc_bytes[new.csum_type] <= 4 &&
1403 new.uncompressed_size <= CRC32_SIZE_MAX &&
1404 new.nonce <= CRC32_NONCE_MAX) {
1405 crc->crc32 = (struct bch_extent_crc32) {
1406 .type = 1 << BCH_EXTENT_ENTRY_crc32,
1408 .csum = *((__le32 *) &new.csum.lo),
1413 if (bch_crc_bytes[new.csum_type] <= 10 &&
1414 new.uncompressed_size <= CRC64_SIZE_MAX &&
1415 new.nonce <= CRC64_NONCE_MAX) {
1416 crc->crc64 = (struct bch_extent_crc64) {
1417 .type = 1 << BCH_EXTENT_ENTRY_crc64,
1420 .csum_lo = new.csum.lo,
1421 .csum_hi = *((__le16 *) &new.csum.hi),
1426 if (bch_crc_bytes[new.csum_type] <= 16 &&
1427 new.uncompressed_size <= CRC128_SIZE_MAX &&
1428 new.nonce <= CRC128_NONCE_MAX) {
1429 crc->crc128 = (struct bch_extent_crc128) {
1430 .type = 1 << BCH_EXTENT_ENTRY_crc128,
1437 #undef common_fields
1441 void bch2_extent_crc_append(struct bkey_i_extent *e,
1442 struct bch_extent_crc_unpacked new)
1444 bch2_extent_crc_init((void *) extent_entry_last(extent_i_to_s(e)), new);
1445 __extent_entry_push(e);
1448 static inline void __extent_entry_insert(struct bkey_i_extent *e,
1449 union bch_extent_entry *dst,
1450 union bch_extent_entry *new)
1452 union bch_extent_entry *end = extent_entry_last(extent_i_to_s(e));
1454 memmove_u64s_up((u64 *) dst + extent_entry_u64s(new),
1455 dst, (u64 *) end - (u64 *) dst);
1456 e->k.u64s += extent_entry_u64s(new);
1457 memcpy_u64s_small(dst, new, extent_entry_u64s(new));
1460 void bch2_extent_ptr_decoded_append(struct bkey_i_extent *e,
1461 struct extent_ptr_decoded *p)
1463 struct bch_extent_crc_unpacked crc = bch2_extent_crc_unpack(&e->k, NULL);
1464 union bch_extent_entry *pos;
1467 if (!bch2_crc_unpacked_cmp(crc, p->crc)) {
1472 extent_for_each_crc(extent_i_to_s(e), crc, pos)
1473 if (!bch2_crc_unpacked_cmp(crc, p->crc)) {
1474 pos = extent_entry_next(pos);
1478 bch2_extent_crc_append(e, p->crc);
1479 pos = extent_entry_last(extent_i_to_s(e));
1481 p->ptr.type = 1 << BCH_EXTENT_ENTRY_ptr;
1482 __extent_entry_insert(e, pos, to_entry(&p->ptr));
1484 for (i = 0; i < p->ec_nr; i++) {
1485 p->ec[i].type = 1 << BCH_EXTENT_ENTRY_stripe_ptr;
1486 __extent_entry_insert(e, pos, to_entry(&p->ec[i]));
1491 * bch_extent_normalize - clean up an extent, dropping stale pointers etc.
1493 * Returns true if @k should be dropped entirely
1495 * For existing keys, only called when btree nodes are being rewritten, not when
1496 * they're merely being compacted/resorted in memory.
1498 bool bch2_extent_normalize(struct bch_fs *c, struct bkey_s k)
1500 struct bch_extent_ptr *ptr;
1502 bch2_bkey_drop_ptrs(k, ptr,
1504 ptr_stale(bch_dev_bkey_exists(c, ptr->dev), ptr));
1506 /* will only happen if all pointers were cached: */
1507 if (!bkey_val_u64s(k.k))
1508 k.k->type = KEY_TYPE_deleted;
1513 void bch2_extent_mark_replicas_cached(struct bch_fs *c,
1514 struct bkey_s_extent e,
1516 unsigned nr_desired_replicas)
1518 union bch_extent_entry *entry;
1519 struct extent_ptr_decoded p;
1520 int extra = bch2_bkey_durability(c, e.s_c) - nr_desired_replicas;
1522 if (target && extra > 0)
1523 extent_for_each_ptr_decode(e, p, entry) {
1524 int n = bch2_extent_ptr_durability(c, p);
1526 if (n && n <= extra &&
1527 !bch2_dev_in_target(c, p.ptr.dev, target)) {
1528 entry->ptr.cached = true;
1534 extent_for_each_ptr_decode(e, p, entry) {
1535 int n = bch2_extent_ptr_durability(c, p);
1537 if (n && n <= extra) {
1538 entry->ptr.cached = true;
1544 enum merge_result bch2_extent_merge(struct bch_fs *c,
1545 struct bkey_i *l, struct bkey_i *r)
1547 struct bkey_s_extent el = bkey_i_to_s_extent(l);
1548 struct bkey_s_extent er = bkey_i_to_s_extent(r);
1549 union bch_extent_entry *en_l, *en_r;
1551 if (bkey_val_u64s(&l->k) != bkey_val_u64s(&r->k))
1552 return BCH_MERGE_NOMERGE;
1554 extent_for_each_entry(el, en_l) {
1555 struct bch_extent_ptr *lp, *rp;
1558 en_r = vstruct_idx(er.v, (u64 *) en_l - el.v->_data);
1560 if ((extent_entry_type(en_l) !=
1561 extent_entry_type(en_r)) ||
1562 !extent_entry_is_ptr(en_l))
1563 return BCH_MERGE_NOMERGE;
1568 if (lp->offset + el.k->size != rp->offset ||
1569 lp->dev != rp->dev ||
1571 return BCH_MERGE_NOMERGE;
1573 /* We don't allow extents to straddle buckets: */
1574 ca = bch_dev_bkey_exists(c, lp->dev);
1576 if (PTR_BUCKET_NR(ca, lp) != PTR_BUCKET_NR(ca, rp))
1577 return BCH_MERGE_NOMERGE;
1580 l->k.needs_whiteout |= r->k.needs_whiteout;
1582 /* Keys with no pointers aren't restricted to one bucket and could
1585 if ((u64) l->k.size + r->k.size > KEY_SIZE_MAX) {
1586 bch2_key_resize(&l->k, KEY_SIZE_MAX);
1587 bch2_cut_front(l->k.p, r);
1588 return BCH_MERGE_PARTIAL;
1591 bch2_key_resize(&l->k, l->k.size + r->k.size);
1593 return BCH_MERGE_MERGE;
1597 * When merging an extent that we're inserting into a btree node, the new merged
1598 * extent could overlap with an existing 0 size extent - if we don't fix that,
1599 * it'll break the btree node iterator so this code finds those 0 size extents
1600 * and shifts them out of the way.
1602 * Also unpacks and repacks.
1604 static bool bch2_extent_merge_inline(struct bch_fs *c,
1605 struct btree_iter *iter,
1606 struct bkey_packed *l,
1607 struct bkey_packed *r,
1610 struct btree *b = iter->l[0].b;
1611 struct btree_node_iter *node_iter = &iter->l[0].iter;
1612 BKEY_PADDED(k) li, ri;
1613 struct bkey_packed *m = back_merge ? l : r;
1614 struct bkey_i *mi = back_merge ? &li.k : &ri.k;
1615 struct bset_tree *t = bch2_bkey_to_bset(b, m);
1616 enum merge_result ret;
1618 EBUG_ON(bkey_written(b, m));
1621 * We need to save copies of both l and r, because we might get a
1622 * partial merge (which modifies both) and then fails to repack
1624 bch2_bkey_unpack(b, &li.k, l);
1625 bch2_bkey_unpack(b, &ri.k, r);
1627 ret = bch2_bkey_merge(c, &li.k, &ri.k);
1628 if (ret == BCH_MERGE_NOMERGE)
1632 * check if we overlap with deleted extents - would break the sort
1636 struct bkey_packed *n = bkey_next(m);
1638 if (n != btree_bkey_last(b, t) &&
1639 bkey_cmp_left_packed(b, n, &li.k.k.p) <= 0 &&
1642 } else if (ret == BCH_MERGE_MERGE) {
1643 struct bkey_packed *prev = bch2_bkey_prev_all(b, t, m);
1646 bkey_cmp_left_packed_byval(b, prev,
1647 bkey_start_pos(&li.k.k)) > 0)
1651 if (ret == BCH_MERGE_PARTIAL) {
1652 if (!extent_i_save(b, m, mi))
1656 bkey_copy(packed_to_bkey(l), &li.k);
1658 bkey_copy(packed_to_bkey(r), &ri.k);
1660 if (!extent_i_save(b, m, &li.k))
1664 bch2_bset_fix_invalidated_key(b, m);
1665 bch2_btree_node_iter_fix(iter, b, node_iter,
1666 m, m->u64s, m->u64s);
1667 verify_modified_extent(iter, m);
1669 return ret == BCH_MERGE_MERGE;
1672 bool bch2_check_range_allocated(struct bch_fs *c, struct bpos pos, u64 size,
1673 unsigned nr_replicas)
1675 struct btree_iter iter;
1676 struct bpos end = pos;
1682 for_each_btree_key(&iter, c, BTREE_ID_EXTENTS, pos,
1683 BTREE_ITER_SLOTS, k) {
1684 if (bkey_cmp(bkey_start_pos(k.k), end) >= 0)
1687 if (nr_replicas > bch2_bkey_nr_ptrs_allocated(k)) {
1692 bch2_btree_iter_unlock(&iter);
1697 unsigned bch2_bkey_nr_ptrs_allocated(struct bkey_s_c k)
1701 switch (k.k->type) {
1702 case KEY_TYPE_extent: {
1703 struct bkey_s_c_extent e = bkey_s_c_to_extent(k);
1704 const union bch_extent_entry *entry;
1705 struct extent_ptr_decoded p;
1707 extent_for_each_ptr_decode(e, p, entry)
1708 ret += !p.ptr.cached &&
1709 p.crc.compression_type == BCH_COMPRESSION_NONE;
1712 case KEY_TYPE_reservation:
1713 ret = bkey_s_c_to_reservation(k).v->nr_replicas;
1720 /* KEY_TYPE_reservation: */
1722 const char *bch2_reservation_invalid(const struct bch_fs *c, struct bkey_s_c k)
1724 struct bkey_s_c_reservation r = bkey_s_c_to_reservation(k);
1726 if (bkey_val_bytes(k.k) != sizeof(struct bch_reservation))
1727 return "incorrect value size";
1729 if (!r.v->nr_replicas || r.v->nr_replicas > BCH_REPLICAS_MAX)
1730 return "invalid nr_replicas";
1735 void bch2_reservation_to_text(struct printbuf *out, struct bch_fs *c,
1738 struct bkey_s_c_reservation r = bkey_s_c_to_reservation(k);
1740 pr_buf(out, "generation %u replicas %u",
1741 le32_to_cpu(r.v->generation),
1745 enum merge_result bch2_reservation_merge(struct bch_fs *c,
1746 struct bkey_i *l, struct bkey_i *r)
1748 struct bkey_i_reservation *li = bkey_i_to_reservation(l);
1749 struct bkey_i_reservation *ri = bkey_i_to_reservation(r);
1751 if (li->v.generation != ri->v.generation ||
1752 li->v.nr_replicas != ri->v.nr_replicas)
1753 return BCH_MERGE_NOMERGE;
1755 l->k.needs_whiteout |= r->k.needs_whiteout;
1757 /* Keys with no pointers aren't restricted to one bucket and could
1760 if ((u64) l->k.size + r->k.size > KEY_SIZE_MAX) {
1761 bch2_key_resize(&l->k, KEY_SIZE_MAX);
1762 bch2_cut_front(l->k.p, r);
1763 return BCH_MERGE_PARTIAL;
1766 bch2_key_resize(&l->k, l->k.size + r->k.size);
1768 return BCH_MERGE_MERGE;