1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2010 Kent Overstreet <kent.overstreet@gmail.com>
5 * Code for managing the extent btree and dynamically updating the writeback
10 #include "bkey_methods.h"
12 #include "btree_iter.h"
16 #include "disk_groups.h"
27 static unsigned bch2_crc_field_size_max[] = {
28 [BCH_EXTENT_ENTRY_crc32] = CRC32_SIZE_MAX,
29 [BCH_EXTENT_ENTRY_crc64] = CRC64_SIZE_MAX,
30 [BCH_EXTENT_ENTRY_crc128] = CRC128_SIZE_MAX,
33 static void bch2_extent_crc_pack(union bch_extent_crc *,
34 struct bch_extent_crc_unpacked,
35 enum bch_extent_entry_type);
37 static struct bch_dev_io_failures *dev_io_failures(struct bch_io_failures *f,
40 struct bch_dev_io_failures *i;
42 for (i = f->devs; i < f->devs + f->nr; i++)
49 void bch2_mark_io_failure(struct bch_io_failures *failed,
50 struct extent_ptr_decoded *p)
52 struct bch_dev_io_failures *f = dev_io_failures(failed, p->ptr.dev);
55 BUG_ON(failed->nr >= ARRAY_SIZE(failed->devs));
57 f = &failed->devs[failed->nr++];
62 } else if (p->idx != f->idx) {
72 * returns true if p1 is better than p2:
74 static inline bool ptr_better(struct bch_fs *c,
75 const struct extent_ptr_decoded p1,
76 const struct extent_ptr_decoded p2)
78 if (likely(!p1.idx && !p2.idx)) {
79 struct bch_dev *dev1 = bch_dev_bkey_exists(c, p1.ptr.dev);
80 struct bch_dev *dev2 = bch_dev_bkey_exists(c, p2.ptr.dev);
82 u64 l1 = atomic64_read(&dev1->cur_latency[READ]);
83 u64 l2 = atomic64_read(&dev2->cur_latency[READ]);
85 /* Pick at random, biased in favor of the faster device: */
87 return bch2_rand_range(l1 + l2) > l1;
90 if (force_reconstruct_read(c))
91 return p1.idx > p2.idx;
93 return p1.idx < p2.idx;
97 * This picks a non-stale pointer, preferably from a device other than @avoid.
98 * Avoid can be NULL, meaning pick any. If there are no non-stale pointers to
99 * other devices, it will still pick a pointer from avoid.
101 int bch2_bkey_pick_read_device(struct bch_fs *c, struct bkey_s_c k,
102 struct bch_io_failures *failed,
103 struct extent_ptr_decoded *pick)
105 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
106 const union bch_extent_entry *entry;
107 struct extent_ptr_decoded p;
108 struct bch_dev_io_failures *f;
112 if (k.k->type == KEY_TYPE_error)
115 bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
116 ca = bch_dev_bkey_exists(c, p.ptr.dev);
119 * If there are any dirty pointers it's an error if we can't
122 if (!ret && !p.ptr.cached)
125 if (p.ptr.cached && ptr_stale(ca, &p.ptr))
128 f = failed ? dev_io_failures(failed, p.ptr.dev) : NULL;
130 p.idx = f->nr_failed < f->nr_retries
135 !bch2_dev_is_readable(ca))
138 if (force_reconstruct_read(c) &&
142 if (p.idx >= (unsigned) p.has_ec + 1)
145 if (ret > 0 && !ptr_better(c, p, *pick))
155 /* KEY_TYPE_btree_ptr: */
157 const char *bch2_btree_ptr_invalid(const struct bch_fs *c, struct bkey_s_c k)
159 if (bkey_val_u64s(k.k) > BKEY_BTREE_PTR_VAL_U64s_MAX)
160 return "value too big";
162 return bch2_bkey_ptrs_invalid(c, k);
165 void bch2_btree_ptr_debugcheck(struct bch_fs *c, struct bkey_s_c k)
167 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
168 const struct bch_extent_ptr *ptr;
171 struct bucket_mark mark;
174 if (!test_bit(BCH_FS_INITIAL_GC_DONE, &c->flags))
177 if (!percpu_down_read_trylock(&c->mark_lock))
180 bch2_fs_inconsistent_on(!test_bit(BCH_FS_REBUILD_REPLICAS, &c->flags) &&
181 !bch2_bkey_replicas_marked(c, k, false), c,
182 "btree key bad (replicas not marked in superblock):\n%s",
183 (bch2_bkey_val_to_text(&PBUF(buf), c, k), buf));
185 bkey_for_each_ptr(ptrs, ptr) {
186 ca = bch_dev_bkey_exists(c, ptr->dev);
188 mark = ptr_bucket_mark(ca, ptr);
191 if (gen_after(mark.gen, ptr->gen))
194 err = "inconsistent";
195 if (mark.data_type != BCH_DATA_BTREE ||
196 mark.dirty_sectors < c->opts.btree_node_size)
200 percpu_up_read(&c->mark_lock);
203 bch2_fs_inconsistent(c, "%s btree pointer %s: bucket %zi gen %i mark %08x",
204 err, (bch2_bkey_val_to_text(&PBUF(buf), c, k), buf),
205 PTR_BUCKET_NR(ca, ptr),
206 mark.gen, (unsigned) mark.v.counter);
210 void bch2_btree_ptr_to_text(struct printbuf *out, struct bch_fs *c,
213 bch2_bkey_ptrs_to_text(out, c, k);
216 /* KEY_TYPE_extent: */
218 const char *bch2_extent_invalid(const struct bch_fs *c, struct bkey_s_c k)
220 return bch2_bkey_ptrs_invalid(c, k);
223 void bch2_extent_debugcheck(struct bch_fs *c, struct bkey_s_c k)
225 struct bkey_s_c_extent e = bkey_s_c_to_extent(k);
226 const union bch_extent_entry *entry;
227 struct extent_ptr_decoded p;
230 if (!test_bit(JOURNAL_REPLAY_DONE, &c->journal.flags) ||
231 !test_bit(BCH_FS_INITIAL_GC_DONE, &c->flags))
234 if (!percpu_down_read_trylock(&c->mark_lock))
237 bch2_fs_inconsistent_on(!test_bit(BCH_FS_REBUILD_REPLICAS, &c->flags) &&
238 !bch2_bkey_replicas_marked_locked(c, e.s_c, false), c,
239 "extent key bad (replicas not marked in superblock):\n%s",
240 (bch2_bkey_val_to_text(&PBUF(buf), c, e.s_c), buf));
242 extent_for_each_ptr_decode(e, p, entry) {
243 struct bch_dev *ca = bch_dev_bkey_exists(c, p.ptr.dev);
244 struct bucket_mark mark = ptr_bucket_mark(ca, &p.ptr);
245 unsigned stale = gen_after(mark.gen, p.ptr.gen);
246 unsigned disk_sectors = ptr_disk_sectors(p);
247 unsigned mark_sectors = p.ptr.cached
248 ? mark.cached_sectors
249 : mark.dirty_sectors;
251 bch2_fs_inconsistent_on(stale && !p.ptr.cached, c,
252 "stale dirty pointer (ptr gen %u bucket %u",
253 p.ptr.gen, mark.gen);
255 bch2_fs_inconsistent_on(stale > 96, c,
256 "key too stale: %i", stale);
258 bch2_fs_inconsistent_on(!stale &&
259 (mark.data_type != BCH_DATA_USER ||
260 mark_sectors < disk_sectors), c,
261 "extent pointer not marked: %s:\n"
262 "type %u sectors %u < %u",
263 (bch2_bkey_val_to_text(&PBUF(buf), c, e.s_c), buf),
265 mark_sectors, disk_sectors);
268 percpu_up_read(&c->mark_lock);
271 void bch2_extent_to_text(struct printbuf *out, struct bch_fs *c,
274 bch2_bkey_ptrs_to_text(out, c, k);
277 enum merge_result bch2_extent_merge(struct bch_fs *c,
278 struct bkey_s _l, struct bkey_s _r)
280 struct bkey_s_extent l = bkey_s_to_extent(_l);
281 struct bkey_s_extent r = bkey_s_to_extent(_r);
282 union bch_extent_entry *en_l = l.v->start;
283 union bch_extent_entry *en_r = r.v->start;
284 struct bch_extent_crc_unpacked crc_l, crc_r;
286 if (bkey_val_u64s(l.k) != bkey_val_u64s(r.k))
287 return BCH_MERGE_NOMERGE;
289 crc_l = bch2_extent_crc_unpack(l.k, NULL);
291 extent_for_each_entry(l, en_l) {
292 en_r = vstruct_idx(r.v, (u64 *) en_l - l.v->_data);
294 if (extent_entry_type(en_l) != extent_entry_type(en_r))
295 return BCH_MERGE_NOMERGE;
297 switch (extent_entry_type(en_l)) {
298 case BCH_EXTENT_ENTRY_ptr: {
299 const struct bch_extent_ptr *lp = &en_l->ptr;
300 const struct bch_extent_ptr *rp = &en_r->ptr;
303 if (lp->offset + crc_l.compressed_size != rp->offset ||
304 lp->dev != rp->dev ||
306 return BCH_MERGE_NOMERGE;
308 /* We don't allow extents to straddle buckets: */
309 ca = bch_dev_bkey_exists(c, lp->dev);
311 if (PTR_BUCKET_NR(ca, lp) != PTR_BUCKET_NR(ca, rp))
312 return BCH_MERGE_NOMERGE;
316 case BCH_EXTENT_ENTRY_stripe_ptr:
317 if (en_l->stripe_ptr.block != en_r->stripe_ptr.block ||
318 en_l->stripe_ptr.idx != en_r->stripe_ptr.idx)
319 return BCH_MERGE_NOMERGE;
321 case BCH_EXTENT_ENTRY_crc32:
322 case BCH_EXTENT_ENTRY_crc64:
323 case BCH_EXTENT_ENTRY_crc128:
324 crc_l = bch2_extent_crc_unpack(l.k, entry_to_crc(en_l));
325 crc_r = bch2_extent_crc_unpack(r.k, entry_to_crc(en_r));
327 if (crc_l.csum_type != crc_r.csum_type ||
328 crc_l.compression_type != crc_r.compression_type ||
329 crc_l.nonce != crc_r.nonce)
330 return BCH_MERGE_NOMERGE;
332 if (crc_l.offset + crc_l.live_size != crc_l.compressed_size ||
334 return BCH_MERGE_NOMERGE;
336 if (!bch2_checksum_mergeable(crc_l.csum_type))
337 return BCH_MERGE_NOMERGE;
339 if (crc_is_compressed(crc_l))
340 return BCH_MERGE_NOMERGE;
342 if (crc_l.csum_type &&
343 crc_l.uncompressed_size +
344 crc_r.uncompressed_size > c->sb.encoded_extent_max)
345 return BCH_MERGE_NOMERGE;
347 if (crc_l.uncompressed_size + crc_r.uncompressed_size >
348 bch2_crc_field_size_max[extent_entry_type(en_l)])
349 return BCH_MERGE_NOMERGE;
353 return BCH_MERGE_NOMERGE;
357 extent_for_each_entry(l, en_l) {
358 struct bch_extent_crc_unpacked crc_l, crc_r;
360 en_r = vstruct_idx(r.v, (u64 *) en_l - l.v->_data);
362 if (!extent_entry_is_crc(en_l))
365 crc_l = bch2_extent_crc_unpack(l.k, entry_to_crc(en_l));
366 crc_r = bch2_extent_crc_unpack(r.k, entry_to_crc(en_r));
368 crc_l.csum = bch2_checksum_merge(crc_l.csum_type,
371 crc_r.uncompressed_size << 9);
373 crc_l.uncompressed_size += crc_r.uncompressed_size;
374 crc_l.compressed_size += crc_r.compressed_size;
376 bch2_extent_crc_pack(entry_to_crc(en_l), crc_l,
377 extent_entry_type(en_l));
380 bch2_key_resize(l.k, l.k->size + r.k->size);
382 return BCH_MERGE_MERGE;
385 /* KEY_TYPE_reservation: */
387 const char *bch2_reservation_invalid(const struct bch_fs *c, struct bkey_s_c k)
389 struct bkey_s_c_reservation r = bkey_s_c_to_reservation(k);
391 if (bkey_val_bytes(k.k) != sizeof(struct bch_reservation))
392 return "incorrect value size";
394 if (!r.v->nr_replicas || r.v->nr_replicas > BCH_REPLICAS_MAX)
395 return "invalid nr_replicas";
400 void bch2_reservation_to_text(struct printbuf *out, struct bch_fs *c,
403 struct bkey_s_c_reservation r = bkey_s_c_to_reservation(k);
405 pr_buf(out, "generation %u replicas %u",
406 le32_to_cpu(r.v->generation),
410 enum merge_result bch2_reservation_merge(struct bch_fs *c,
411 struct bkey_s _l, struct bkey_s _r)
413 struct bkey_s_reservation l = bkey_s_to_reservation(_l);
414 struct bkey_s_reservation r = bkey_s_to_reservation(_r);
416 if (l.v->generation != r.v->generation ||
417 l.v->nr_replicas != r.v->nr_replicas)
418 return BCH_MERGE_NOMERGE;
420 if ((u64) l.k->size + r.k->size > KEY_SIZE_MAX) {
421 bch2_key_resize(l.k, KEY_SIZE_MAX);
422 bch2_cut_front_s(l.k->p, r.s);
423 return BCH_MERGE_PARTIAL;
426 bch2_key_resize(l.k, l.k->size + r.k->size);
428 return BCH_MERGE_MERGE;
431 /* Extent checksum entries: */
433 /* returns true if not equal */
434 static inline bool bch2_crc_unpacked_cmp(struct bch_extent_crc_unpacked l,
435 struct bch_extent_crc_unpacked r)
437 return (l.csum_type != r.csum_type ||
438 l.compression_type != r.compression_type ||
439 l.compressed_size != r.compressed_size ||
440 l.uncompressed_size != r.uncompressed_size ||
441 l.offset != r.offset ||
442 l.live_size != r.live_size ||
443 l.nonce != r.nonce ||
444 bch2_crc_cmp(l.csum, r.csum));
447 static inline bool can_narrow_crc(struct bch_extent_crc_unpacked u,
448 struct bch_extent_crc_unpacked n)
450 return !crc_is_compressed(u) &&
452 u.uncompressed_size > u.live_size &&
453 bch2_csum_type_is_encryption(u.csum_type) ==
454 bch2_csum_type_is_encryption(n.csum_type);
457 bool bch2_can_narrow_extent_crcs(struct bkey_s_c k,
458 struct bch_extent_crc_unpacked n)
460 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
461 struct bch_extent_crc_unpacked crc;
462 const union bch_extent_entry *i;
467 bkey_for_each_crc(k.k, ptrs, crc, i)
468 if (can_narrow_crc(crc, n))
475 * We're writing another replica for this extent, so while we've got the data in
476 * memory we'll be computing a new checksum for the currently live data.
478 * If there are other replicas we aren't moving, and they are checksummed but
479 * not compressed, we can modify them to point to only the data that is
480 * currently live (so that readers won't have to bounce) while we've got the
483 bool bch2_bkey_narrow_crcs(struct bkey_i *k, struct bch_extent_crc_unpacked n)
485 struct bkey_ptrs ptrs = bch2_bkey_ptrs(bkey_i_to_s(k));
486 struct bch_extent_crc_unpacked u;
487 struct extent_ptr_decoded p;
488 union bch_extent_entry *i;
491 /* Find a checksum entry that covers only live data: */
493 bkey_for_each_crc(&k->k, ptrs, u, i)
494 if (!crc_is_compressed(u) &&
496 u.live_size == u.uncompressed_size) {
503 BUG_ON(crc_is_compressed(n));
505 BUG_ON(n.live_size != k->k.size);
507 restart_narrow_pointers:
508 ptrs = bch2_bkey_ptrs(bkey_i_to_s(k));
510 bkey_for_each_ptr_decode(&k->k, ptrs, p, i)
511 if (can_narrow_crc(p.crc, n)) {
512 bch2_bkey_drop_ptr(bkey_i_to_s(k), &i->ptr);
513 p.ptr.offset += p.crc.offset;
515 bch2_extent_ptr_decoded_append(k, &p);
517 goto restart_narrow_pointers;
523 static void bch2_extent_crc_pack(union bch_extent_crc *dst,
524 struct bch_extent_crc_unpacked src,
525 enum bch_extent_entry_type type)
527 #define set_common_fields(_dst, _src) \
528 _dst.type = 1 << type; \
529 _dst.csum_type = _src.csum_type, \
530 _dst.compression_type = _src.compression_type, \
531 _dst._compressed_size = _src.compressed_size - 1, \
532 _dst._uncompressed_size = _src.uncompressed_size - 1, \
533 _dst.offset = _src.offset
536 case BCH_EXTENT_ENTRY_crc32:
537 set_common_fields(dst->crc32, src);
538 dst->crc32.csum = *((__le32 *) &src.csum.lo);
540 case BCH_EXTENT_ENTRY_crc64:
541 set_common_fields(dst->crc64, src);
542 dst->crc64.nonce = src.nonce;
543 dst->crc64.csum_lo = src.csum.lo;
544 dst->crc64.csum_hi = *((__le16 *) &src.csum.hi);
546 case BCH_EXTENT_ENTRY_crc128:
547 set_common_fields(dst->crc128, src);
548 dst->crc128.nonce = src.nonce;
549 dst->crc128.csum = src.csum;
554 #undef set_common_fields
557 void bch2_extent_crc_append(struct bkey_i *k,
558 struct bch_extent_crc_unpacked new)
560 struct bkey_ptrs ptrs = bch2_bkey_ptrs(bkey_i_to_s(k));
561 union bch_extent_crc *crc = (void *) ptrs.end;
562 enum bch_extent_entry_type type;
564 if (bch_crc_bytes[new.csum_type] <= 4 &&
565 new.uncompressed_size <= CRC32_SIZE_MAX &&
566 new.nonce <= CRC32_NONCE_MAX)
567 type = BCH_EXTENT_ENTRY_crc32;
568 else if (bch_crc_bytes[new.csum_type] <= 10 &&
569 new.uncompressed_size <= CRC64_SIZE_MAX &&
570 new.nonce <= CRC64_NONCE_MAX)
571 type = BCH_EXTENT_ENTRY_crc64;
572 else if (bch_crc_bytes[new.csum_type] <= 16 &&
573 new.uncompressed_size <= CRC128_SIZE_MAX &&
574 new.nonce <= CRC128_NONCE_MAX)
575 type = BCH_EXTENT_ENTRY_crc128;
579 bch2_extent_crc_pack(crc, new, type);
581 k->k.u64s += extent_entry_u64s(ptrs.end);
583 EBUG_ON(bkey_val_u64s(&k->k) > BKEY_EXTENT_VAL_U64s_MAX);
586 /* Generic code for keys with pointers: */
588 unsigned bch2_bkey_nr_ptrs(struct bkey_s_c k)
590 return bch2_bkey_devs(k).nr;
593 unsigned bch2_bkey_nr_ptrs_allocated(struct bkey_s_c k)
595 return k.k->type == KEY_TYPE_reservation
596 ? bkey_s_c_to_reservation(k).v->nr_replicas
597 : bch2_bkey_dirty_devs(k).nr;
600 unsigned bch2_bkey_nr_ptrs_fully_allocated(struct bkey_s_c k)
604 if (k.k->type == KEY_TYPE_reservation) {
605 ret = bkey_s_c_to_reservation(k).v->nr_replicas;
607 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
608 const union bch_extent_entry *entry;
609 struct extent_ptr_decoded p;
611 bkey_for_each_ptr_decode(k.k, ptrs, p, entry)
612 ret += !p.ptr.cached && !crc_is_compressed(p.crc);
618 unsigned bch2_bkey_sectors_compressed(struct bkey_s_c k)
620 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
621 const union bch_extent_entry *entry;
622 struct extent_ptr_decoded p;
625 bkey_for_each_ptr_decode(k.k, ptrs, p, entry)
626 if (!p.ptr.cached && crc_is_compressed(p.crc))
627 ret += p.crc.compressed_size;
632 bool bch2_bkey_is_incompressible(struct bkey_s_c k)
634 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
635 const union bch_extent_entry *entry;
636 struct bch_extent_crc_unpacked crc;
638 bkey_for_each_crc(k.k, ptrs, crc, entry)
639 if (crc.compression_type == BCH_COMPRESSION_TYPE_incompressible)
644 bool bch2_check_range_allocated(struct bch_fs *c, struct bpos pos, u64 size,
645 unsigned nr_replicas)
647 struct btree_trans trans;
648 struct btree_iter *iter;
649 struct bpos end = pos;
656 bch2_trans_init(&trans, c, 0, 0);
658 for_each_btree_key(&trans, iter, BTREE_ID_EXTENTS, pos,
659 BTREE_ITER_SLOTS, k, err) {
660 if (bkey_cmp(bkey_start_pos(k.k), end) >= 0)
663 if (nr_replicas > bch2_bkey_nr_ptrs_fully_allocated(k)) {
668 bch2_trans_exit(&trans);
673 static unsigned bch2_extent_ptr_durability(struct bch_fs *c,
674 struct extent_ptr_decoded p)
676 unsigned durability = 0;
682 ca = bch_dev_bkey_exists(c, p.ptr.dev);
684 if (ca->mi.state != BCH_MEMBER_STATE_FAILED)
685 durability = max_t(unsigned, durability, ca->mi.durability);
689 genradix_ptr(&c->stripes[0], p.ec.idx);
694 durability = max_t(unsigned, durability, s->nr_redundant);
700 unsigned bch2_bkey_durability(struct bch_fs *c, struct bkey_s_c k)
702 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
703 const union bch_extent_entry *entry;
704 struct extent_ptr_decoded p;
705 unsigned durability = 0;
707 bkey_for_each_ptr_decode(k.k, ptrs, p, entry)
708 durability += bch2_extent_ptr_durability(c, p);
713 void bch2_bkey_mark_replicas_cached(struct bch_fs *c, struct bkey_s k,
715 unsigned nr_desired_replicas)
717 struct bkey_ptrs ptrs = bch2_bkey_ptrs(k);
718 union bch_extent_entry *entry;
719 struct extent_ptr_decoded p;
720 int extra = bch2_bkey_durability(c, k.s_c) - nr_desired_replicas;
722 if (target && extra > 0)
723 bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
724 int n = bch2_extent_ptr_durability(c, p);
726 if (n && n <= extra &&
727 !bch2_dev_in_target(c, p.ptr.dev, target)) {
728 entry->ptr.cached = true;
734 bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
735 int n = bch2_extent_ptr_durability(c, p);
737 if (n && n <= extra) {
738 entry->ptr.cached = true;
744 void bch2_bkey_append_ptr(struct bkey_i *k,
745 struct bch_extent_ptr ptr)
747 EBUG_ON(bch2_bkey_has_device(bkey_i_to_s_c(k), ptr.dev));
750 case KEY_TYPE_btree_ptr:
751 case KEY_TYPE_btree_ptr_v2:
752 case KEY_TYPE_extent:
753 EBUG_ON(bkey_val_u64s(&k->k) >= BKEY_EXTENT_VAL_U64s_MAX);
755 ptr.type = 1 << BCH_EXTENT_ENTRY_ptr;
757 memcpy((void *) &k->v + bkey_val_bytes(&k->k),
767 static inline void __extent_entry_insert(struct bkey_i *k,
768 union bch_extent_entry *dst,
769 union bch_extent_entry *new)
771 union bch_extent_entry *end = bkey_val_end(bkey_i_to_s(k));
773 memmove_u64s_up_small((u64 *) dst + extent_entry_u64s(new),
774 dst, (u64 *) end - (u64 *) dst);
775 k->k.u64s += extent_entry_u64s(new);
776 memcpy_u64s_small(dst, new, extent_entry_u64s(new));
779 void bch2_extent_ptr_decoded_append(struct bkey_i *k,
780 struct extent_ptr_decoded *p)
782 struct bkey_ptrs ptrs = bch2_bkey_ptrs(bkey_i_to_s(k));
783 struct bch_extent_crc_unpacked crc =
784 bch2_extent_crc_unpack(&k->k, NULL);
785 union bch_extent_entry *pos;
787 if (!bch2_crc_unpacked_cmp(crc, p->crc)) {
792 bkey_for_each_crc(&k->k, ptrs, crc, pos)
793 if (!bch2_crc_unpacked_cmp(crc, p->crc)) {
794 pos = extent_entry_next(pos);
798 bch2_extent_crc_append(k, p->crc);
799 pos = bkey_val_end(bkey_i_to_s(k));
801 p->ptr.type = 1 << BCH_EXTENT_ENTRY_ptr;
802 __extent_entry_insert(k, pos, to_entry(&p->ptr));
805 p->ec.type = 1 << BCH_EXTENT_ENTRY_stripe_ptr;
806 __extent_entry_insert(k, pos, to_entry(&p->ec));
810 static union bch_extent_entry *extent_entry_prev(struct bkey_ptrs ptrs,
811 union bch_extent_entry *entry)
813 union bch_extent_entry *i = ptrs.start;
818 while (extent_entry_next(i) != entry)
819 i = extent_entry_next(i);
823 union bch_extent_entry *bch2_bkey_drop_ptr(struct bkey_s k,
824 struct bch_extent_ptr *ptr)
826 struct bkey_ptrs ptrs = bch2_bkey_ptrs(k);
827 union bch_extent_entry *dst, *src, *prev;
828 bool drop_crc = true;
830 EBUG_ON(ptr < &ptrs.start->ptr ||
831 ptr >= &ptrs.end->ptr);
832 EBUG_ON(ptr->type != 1 << BCH_EXTENT_ENTRY_ptr);
834 src = extent_entry_next(to_entry(ptr));
835 if (src != ptrs.end &&
836 !extent_entry_is_crc(src))
840 while ((prev = extent_entry_prev(ptrs, dst))) {
841 if (extent_entry_is_ptr(prev))
844 if (extent_entry_is_crc(prev)) {
853 memmove_u64s_down(dst, src,
854 (u64 *) ptrs.end - (u64 *) src);
855 k.k->u64s -= (u64 *) src - (u64 *) dst;
860 void bch2_bkey_drop_device(struct bkey_s k, unsigned dev)
862 struct bch_extent_ptr *ptr;
864 bch2_bkey_drop_ptrs(k, ptr, ptr->dev == dev);
867 const struct bch_extent_ptr *
868 bch2_bkey_has_device(struct bkey_s_c k, unsigned dev)
870 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
871 const struct bch_extent_ptr *ptr;
873 bkey_for_each_ptr(ptrs, ptr)
880 bool bch2_bkey_has_target(struct bch_fs *c, struct bkey_s_c k, unsigned target)
882 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
883 const struct bch_extent_ptr *ptr;
885 bkey_for_each_ptr(ptrs, ptr)
886 if (bch2_dev_in_target(c, ptr->dev, target) &&
888 !ptr_stale(bch_dev_bkey_exists(c, ptr->dev), ptr)))
894 bool bch2_bkey_matches_ptr(struct bch_fs *c, struct bkey_s_c k,
895 struct bch_extent_ptr m, u64 offset)
897 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
898 const union bch_extent_entry *entry;
899 struct extent_ptr_decoded p;
901 bkey_for_each_ptr_decode(k.k, ptrs, p, entry)
902 if (p.ptr.dev == m.dev &&
903 p.ptr.gen == m.gen &&
904 (s64) p.ptr.offset + p.crc.offset - bkey_start_offset(k.k) ==
905 (s64) m.offset - offset)
912 * bch_extent_normalize - clean up an extent, dropping stale pointers etc.
914 * Returns true if @k should be dropped entirely
916 * For existing keys, only called when btree nodes are being rewritten, not when
917 * they're merely being compacted/resorted in memory.
919 bool bch2_extent_normalize(struct bch_fs *c, struct bkey_s k)
921 struct bch_extent_ptr *ptr;
923 bch2_bkey_drop_ptrs(k, ptr,
925 ptr_stale(bch_dev_bkey_exists(c, ptr->dev), ptr));
927 /* will only happen if all pointers were cached: */
928 if (!bch2_bkey_nr_ptrs(k.s_c))
929 k.k->type = KEY_TYPE_discard;
931 return bkey_whiteout(k.k);
934 void bch2_bkey_ptrs_to_text(struct printbuf *out, struct bch_fs *c,
937 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
938 const union bch_extent_entry *entry;
939 struct bch_extent_crc_unpacked crc;
940 const struct bch_extent_ptr *ptr;
941 const struct bch_extent_stripe_ptr *ec;
945 bkey_extent_entry_for_each(ptrs, entry) {
949 switch (__extent_entry_type(entry)) {
950 case BCH_EXTENT_ENTRY_ptr:
951 ptr = entry_to_ptr(entry);
952 ca = ptr->dev < c->sb.nr_devices && c->devs[ptr->dev]
953 ? bch_dev_bkey_exists(c, ptr->dev)
956 pr_buf(out, "ptr: %u:%llu gen %u%s%s", ptr->dev,
957 (u64) ptr->offset, ptr->gen,
958 ptr->cached ? " cached" : "",
959 ca && ptr_stale(ca, ptr)
962 case BCH_EXTENT_ENTRY_crc32:
963 case BCH_EXTENT_ENTRY_crc64:
964 case BCH_EXTENT_ENTRY_crc128:
965 crc = bch2_extent_crc_unpack(k.k, entry_to_crc(entry));
967 pr_buf(out, "crc: c_size %u size %u offset %u nonce %u csum %u compress %u",
969 crc.uncompressed_size,
970 crc.offset, crc.nonce,
972 crc.compression_type);
974 case BCH_EXTENT_ENTRY_stripe_ptr:
975 ec = &entry->stripe_ptr;
977 pr_buf(out, "ec: idx %llu block %u",
978 (u64) ec->idx, ec->block);
981 pr_buf(out, "(invalid extent entry %.16llx)", *((u64 *) entry));
989 static const char *extent_ptr_invalid(const struct bch_fs *c,
991 const struct bch_extent_ptr *ptr,
992 unsigned size_ondisk,
995 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
996 const struct bch_extent_ptr *ptr2;
999 if (!bch2_dev_exists2(c, ptr->dev))
1000 return "pointer to invalid device";
1002 ca = bch_dev_bkey_exists(c, ptr->dev);
1004 return "pointer to invalid device";
1006 bkey_for_each_ptr(ptrs, ptr2)
1007 if (ptr != ptr2 && ptr->dev == ptr2->dev)
1008 return "multiple pointers to same device";
1010 if (ptr->offset + size_ondisk > bucket_to_sector(ca, ca->mi.nbuckets))
1011 return "offset past end of device";
1013 if (ptr->offset < bucket_to_sector(ca, ca->mi.first_bucket))
1014 return "offset before first bucket";
1016 if (bucket_remainder(ca, ptr->offset) +
1017 size_ondisk > ca->mi.bucket_size)
1018 return "spans multiple buckets";
1023 const char *bch2_bkey_ptrs_invalid(const struct bch_fs *c, struct bkey_s_c k)
1025 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
1026 const union bch_extent_entry *entry;
1027 struct bch_extent_crc_unpacked crc;
1028 unsigned size_ondisk = k.k->size;
1030 unsigned nonce = UINT_MAX;
1032 if (k.k->type == KEY_TYPE_btree_ptr)
1033 size_ondisk = c->opts.btree_node_size;
1034 if (k.k->type == KEY_TYPE_btree_ptr_v2)
1035 size_ondisk = le16_to_cpu(bkey_s_c_to_btree_ptr_v2(k).v->sectors);
1037 bkey_extent_entry_for_each(ptrs, entry) {
1038 if (__extent_entry_type(entry) >= BCH_EXTENT_ENTRY_MAX)
1039 return "invalid extent entry type";
1041 if (k.k->type == KEY_TYPE_btree_ptr &&
1042 !extent_entry_is_ptr(entry))
1043 return "has non ptr field";
1045 switch (extent_entry_type(entry)) {
1046 case BCH_EXTENT_ENTRY_ptr:
1047 reason = extent_ptr_invalid(c, k, &entry->ptr,
1048 size_ondisk, false);
1052 case BCH_EXTENT_ENTRY_crc32:
1053 case BCH_EXTENT_ENTRY_crc64:
1054 case BCH_EXTENT_ENTRY_crc128:
1055 crc = bch2_extent_crc_unpack(k.k, entry_to_crc(entry));
1057 if (crc.offset + crc.live_size >
1058 crc.uncompressed_size)
1059 return "checksum offset + key size > uncompressed size";
1061 size_ondisk = crc.compressed_size;
1063 if (!bch2_checksum_type_valid(c, crc.csum_type))
1064 return "invalid checksum type";
1066 if (crc.compression_type >= BCH_COMPRESSION_TYPE_NR)
1067 return "invalid compression type";
1069 if (bch2_csum_type_is_encryption(crc.csum_type)) {
1070 if (nonce == UINT_MAX)
1071 nonce = crc.offset + crc.nonce;
1072 else if (nonce != crc.offset + crc.nonce)
1073 return "incorrect nonce";
1076 case BCH_EXTENT_ENTRY_stripe_ptr:
1084 void bch2_ptr_swab(struct bkey_s k)
1086 struct bkey_ptrs ptrs = bch2_bkey_ptrs(k);
1087 union bch_extent_entry *entry;
1090 for (d = (u64 *) ptrs.start;
1091 d != (u64 *) ptrs.end;
1095 for (entry = ptrs.start;
1097 entry = extent_entry_next(entry)) {
1098 switch (extent_entry_type(entry)) {
1099 case BCH_EXTENT_ENTRY_ptr:
1101 case BCH_EXTENT_ENTRY_crc32:
1102 entry->crc32.csum = swab32(entry->crc32.csum);
1104 case BCH_EXTENT_ENTRY_crc64:
1105 entry->crc64.csum_hi = swab16(entry->crc64.csum_hi);
1106 entry->crc64.csum_lo = swab64(entry->crc64.csum_lo);
1108 case BCH_EXTENT_ENTRY_crc128:
1109 entry->crc128.csum.hi = (__force __le64)
1110 swab64((__force u64) entry->crc128.csum.hi);
1111 entry->crc128.csum.lo = (__force __le64)
1112 swab64((__force u64) entry->crc128.csum.lo);
1114 case BCH_EXTENT_ENTRY_stripe_ptr:
1120 /* Generic extent code: */
1122 int bch2_cut_front_s(struct bpos where, struct bkey_s k)
1124 unsigned new_val_u64s = bkey_val_u64s(k.k);
1128 if (bkey_cmp(where, bkey_start_pos(k.k)) <= 0)
1131 EBUG_ON(bkey_cmp(where, k.k->p) > 0);
1133 sub = where.offset - bkey_start_offset(k.k);
1138 k.k->type = KEY_TYPE_deleted;
1142 switch (k.k->type) {
1143 case KEY_TYPE_extent:
1144 case KEY_TYPE_reflink_v: {
1145 struct bkey_ptrs ptrs = bch2_bkey_ptrs(k);
1146 union bch_extent_entry *entry;
1147 bool seen_crc = false;
1149 bkey_extent_entry_for_each(ptrs, entry) {
1150 switch (extent_entry_type(entry)) {
1151 case BCH_EXTENT_ENTRY_ptr:
1153 entry->ptr.offset += sub;
1155 case BCH_EXTENT_ENTRY_crc32:
1156 entry->crc32.offset += sub;
1158 case BCH_EXTENT_ENTRY_crc64:
1159 entry->crc64.offset += sub;
1161 case BCH_EXTENT_ENTRY_crc128:
1162 entry->crc128.offset += sub;
1164 case BCH_EXTENT_ENTRY_stripe_ptr:
1168 if (extent_entry_is_crc(entry))
1174 case KEY_TYPE_reflink_p: {
1175 struct bkey_s_reflink_p p = bkey_s_to_reflink_p(k);
1177 le64_add_cpu(&p.v->idx, sub);
1180 case KEY_TYPE_inline_data: {
1181 struct bkey_s_inline_data d = bkey_s_to_inline_data(k);
1183 sub = min_t(u64, sub << 9, bkey_val_bytes(d.k));
1187 bkey_val_bytes(d.k) - sub);
1189 new_val_u64s -= sub >> 3;
1194 val_u64s_delta = bkey_val_u64s(k.k) - new_val_u64s;
1195 BUG_ON(val_u64s_delta < 0);
1197 set_bkey_val_u64s(k.k, new_val_u64s);
1198 memset(bkey_val_end(k), 0, val_u64s_delta * sizeof(u64));
1199 return -val_u64s_delta;
1202 int bch2_cut_back_s(struct bpos where, struct bkey_s k)
1204 unsigned new_val_u64s = bkey_val_u64s(k.k);
1208 if (bkey_cmp(where, k.k->p) >= 0)
1211 EBUG_ON(bkey_cmp(where, bkey_start_pos(k.k)) < 0);
1213 len = where.offset - bkey_start_offset(k.k);
1219 k.k->type = KEY_TYPE_deleted;
1223 switch (k.k->type) {
1224 case KEY_TYPE_inline_data:
1225 new_val_u64s = min(new_val_u64s, k.k->size << 6);
1229 val_u64s_delta = bkey_val_u64s(k.k) - new_val_u64s;
1230 BUG_ON(val_u64s_delta < 0);
1232 set_bkey_val_u64s(k.k, new_val_u64s);
1233 memset(bkey_val_end(k), 0, val_u64s_delta * sizeof(u64));
1234 return -val_u64s_delta;