1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2010 Kent Overstreet <kent.overstreet@gmail.com>
5 * Code for managing the extent btree and dynamically updating the writeback
10 #include "bkey_methods.h"
13 #include "btree_iter.h"
17 #include "disk_groups.h"
28 static unsigned bch2_crc_field_size_max[] = {
29 [BCH_EXTENT_ENTRY_crc32] = CRC32_SIZE_MAX,
30 [BCH_EXTENT_ENTRY_crc64] = CRC64_SIZE_MAX,
31 [BCH_EXTENT_ENTRY_crc128] = CRC128_SIZE_MAX,
34 static void bch2_extent_crc_pack(union bch_extent_crc *,
35 struct bch_extent_crc_unpacked,
36 enum bch_extent_entry_type);
38 static struct bch_dev_io_failures *dev_io_failures(struct bch_io_failures *f,
41 struct bch_dev_io_failures *i;
43 for (i = f->devs; i < f->devs + f->nr; i++)
50 void bch2_mark_io_failure(struct bch_io_failures *failed,
51 struct extent_ptr_decoded *p)
53 struct bch_dev_io_failures *f = dev_io_failures(failed, p->ptr.dev);
56 BUG_ON(failed->nr >= ARRAY_SIZE(failed->devs));
58 f = &failed->devs[failed->nr++];
63 } else if (p->idx != f->idx) {
73 * returns true if p1 is better than p2:
75 static inline bool ptr_better(struct bch_fs *c,
76 const struct extent_ptr_decoded p1,
77 const struct extent_ptr_decoded p2)
79 if (likely(!p1.idx && !p2.idx)) {
80 struct bch_dev *dev1 = bch_dev_bkey_exists(c, p1.ptr.dev);
81 struct bch_dev *dev2 = bch_dev_bkey_exists(c, p2.ptr.dev);
83 u64 l1 = atomic64_read(&dev1->cur_latency[READ]);
84 u64 l2 = atomic64_read(&dev2->cur_latency[READ]);
86 /* Pick at random, biased in favor of the faster device: */
88 return bch2_rand_range(l1 + l2) > l1;
91 if (bch2_force_reconstruct_read)
92 return p1.idx > p2.idx;
94 return p1.idx < p2.idx;
98 * This picks a non-stale pointer, preferably from a device other than @avoid.
99 * Avoid can be NULL, meaning pick any. If there are no non-stale pointers to
100 * other devices, it will still pick a pointer from avoid.
102 int bch2_bkey_pick_read_device(struct bch_fs *c, struct bkey_s_c k,
103 struct bch_io_failures *failed,
104 struct extent_ptr_decoded *pick)
106 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
107 const union bch_extent_entry *entry;
108 struct extent_ptr_decoded p;
109 struct bch_dev_io_failures *f;
113 if (k.k->type == KEY_TYPE_error)
116 bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
117 ca = bch_dev_bkey_exists(c, p.ptr.dev);
120 * If there are any dirty pointers it's an error if we can't
123 if (!ret && !p.ptr.cached)
126 if (p.ptr.cached && ptr_stale(ca, &p.ptr))
129 f = failed ? dev_io_failures(failed, p.ptr.dev) : NULL;
131 p.idx = f->nr_failed < f->nr_retries
136 !bch2_dev_is_readable(ca))
139 if (bch2_force_reconstruct_read &&
143 if (p.idx >= (unsigned) p.has_ec + 1)
146 if (ret > 0 && !ptr_better(c, p, *pick))
156 /* KEY_TYPE_btree_ptr: */
158 const char *bch2_btree_ptr_invalid(const struct bch_fs *c, struct bkey_s_c k)
160 if (bkey_val_u64s(k.k) > BKEY_BTREE_PTR_VAL_U64s_MAX)
161 return "value too big";
163 return bch2_bkey_ptrs_invalid(c, k);
166 void bch2_btree_ptr_debugcheck(struct bch_fs *c, struct bkey_s_c k)
168 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
169 const struct bch_extent_ptr *ptr;
172 struct bucket_mark mark;
175 if (!test_bit(BCH_FS_INITIAL_GC_DONE, &c->flags))
178 if (!percpu_down_read_trylock(&c->mark_lock))
181 bkey_for_each_ptr(ptrs, ptr) {
182 ca = bch_dev_bkey_exists(c, ptr->dev);
184 mark = ptr_bucket_mark(ca, ptr);
187 if (gen_after(mark.gen, ptr->gen))
190 err = "inconsistent";
191 if (mark.data_type != BCH_DATA_btree ||
192 mark.dirty_sectors < c->opts.btree_node_size)
196 percpu_up_read(&c->mark_lock);
199 bch2_fs_inconsistent(c, "%s btree pointer %s: bucket %zi gen %i mark %08x",
200 err, (bch2_bkey_val_to_text(&PBUF(buf), c, k), buf),
201 PTR_BUCKET_NR(ca, ptr),
202 mark.gen, (unsigned) mark.v.counter);
206 void bch2_btree_ptr_to_text(struct printbuf *out, struct bch_fs *c,
209 bch2_bkey_ptrs_to_text(out, c, k);
212 void bch2_btree_ptr_v2_to_text(struct printbuf *out, struct bch_fs *c,
215 struct bkey_s_c_btree_ptr_v2 bp = bkey_s_c_to_btree_ptr_v2(k);
217 pr_buf(out, "seq %llx written %u min_key ",
218 le64_to_cpu(bp.v->seq),
219 le16_to_cpu(bp.v->sectors_written));
221 bch2_bpos_to_text(out, bp.v->min_key);
223 bch2_bkey_ptrs_to_text(out, c, k);
226 void bch2_btree_ptr_v2_compat(enum btree_id btree_id, unsigned version,
227 unsigned big_endian, int write,
230 struct bkey_s_btree_ptr_v2 bp = bkey_s_to_btree_ptr_v2(k);
232 compat_bpos(0, btree_id, version, big_endian, write, &bp.v->min_key);
234 if (version < bcachefs_metadata_version_inode_btree_change &&
235 btree_node_type_is_extents(btree_id) &&
236 bkey_cmp(bp.v->min_key, POS_MIN))
237 bp.v->min_key = write
238 ? bkey_predecessor(bp.v->min_key)
239 : bkey_successor(bp.v->min_key);
242 /* KEY_TYPE_extent: */
244 const char *bch2_extent_invalid(const struct bch_fs *c, struct bkey_s_c k)
246 return bch2_bkey_ptrs_invalid(c, k);
249 void bch2_extent_debugcheck(struct bch_fs *c, struct bkey_s_c k)
251 struct bkey_s_c_extent e = bkey_s_c_to_extent(k);
252 const union bch_extent_entry *entry;
253 struct extent_ptr_decoded p;
256 if (!test_bit(JOURNAL_REPLAY_DONE, &c->journal.flags) ||
257 !test_bit(BCH_FS_INITIAL_GC_DONE, &c->flags))
260 if (!percpu_down_read_trylock(&c->mark_lock))
263 extent_for_each_ptr_decode(e, p, entry) {
264 struct bch_dev *ca = bch_dev_bkey_exists(c, p.ptr.dev);
265 struct bucket_mark mark = ptr_bucket_mark(ca, &p.ptr);
266 unsigned stale = gen_after(mark.gen, p.ptr.gen);
267 unsigned disk_sectors = ptr_disk_sectors(p);
268 unsigned mark_sectors = p.ptr.cached
269 ? mark.cached_sectors
270 : mark.dirty_sectors;
272 bch2_fs_inconsistent_on(stale && !p.ptr.cached, c,
273 "stale dirty pointer (ptr gen %u bucket %u",
274 p.ptr.gen, mark.gen);
276 bch2_fs_inconsistent_on(stale > 96, c,
277 "key too stale: %i", stale);
279 bch2_fs_inconsistent_on(!stale &&
280 (mark.data_type != BCH_DATA_user ||
281 mark_sectors < disk_sectors), c,
282 "extent pointer not marked: %s:\n"
283 "type %u sectors %u < %u",
284 (bch2_bkey_val_to_text(&PBUF(buf), c, e.s_c), buf),
286 mark_sectors, disk_sectors);
289 percpu_up_read(&c->mark_lock);
292 void bch2_extent_to_text(struct printbuf *out, struct bch_fs *c,
295 bch2_bkey_ptrs_to_text(out, c, k);
298 enum merge_result bch2_extent_merge(struct bch_fs *c,
299 struct bkey_s _l, struct bkey_s _r)
301 struct bkey_s_extent l = bkey_s_to_extent(_l);
302 struct bkey_s_extent r = bkey_s_to_extent(_r);
303 union bch_extent_entry *en_l = l.v->start;
304 union bch_extent_entry *en_r = r.v->start;
305 struct bch_extent_crc_unpacked crc_l, crc_r;
307 if (bkey_val_u64s(l.k) != bkey_val_u64s(r.k))
308 return BCH_MERGE_NOMERGE;
310 crc_l = bch2_extent_crc_unpack(l.k, NULL);
312 extent_for_each_entry(l, en_l) {
313 en_r = vstruct_idx(r.v, (u64 *) en_l - l.v->_data);
315 if (extent_entry_type(en_l) != extent_entry_type(en_r))
316 return BCH_MERGE_NOMERGE;
318 switch (extent_entry_type(en_l)) {
319 case BCH_EXTENT_ENTRY_ptr: {
320 const struct bch_extent_ptr *lp = &en_l->ptr;
321 const struct bch_extent_ptr *rp = &en_r->ptr;
324 if (lp->offset + crc_l.compressed_size != rp->offset ||
325 lp->dev != rp->dev ||
327 return BCH_MERGE_NOMERGE;
329 /* We don't allow extents to straddle buckets: */
330 ca = bch_dev_bkey_exists(c, lp->dev);
332 if (PTR_BUCKET_NR(ca, lp) != PTR_BUCKET_NR(ca, rp))
333 return BCH_MERGE_NOMERGE;
337 case BCH_EXTENT_ENTRY_stripe_ptr:
338 if (en_l->stripe_ptr.block != en_r->stripe_ptr.block ||
339 en_l->stripe_ptr.idx != en_r->stripe_ptr.idx)
340 return BCH_MERGE_NOMERGE;
342 case BCH_EXTENT_ENTRY_crc32:
343 case BCH_EXTENT_ENTRY_crc64:
344 case BCH_EXTENT_ENTRY_crc128:
345 crc_l = bch2_extent_crc_unpack(l.k, entry_to_crc(en_l));
346 crc_r = bch2_extent_crc_unpack(r.k, entry_to_crc(en_r));
348 if (crc_l.csum_type != crc_r.csum_type ||
349 crc_l.compression_type != crc_r.compression_type ||
350 crc_l.nonce != crc_r.nonce)
351 return BCH_MERGE_NOMERGE;
353 if (crc_l.offset + crc_l.live_size != crc_l.compressed_size ||
355 return BCH_MERGE_NOMERGE;
357 if (!bch2_checksum_mergeable(crc_l.csum_type))
358 return BCH_MERGE_NOMERGE;
360 if (crc_is_compressed(crc_l))
361 return BCH_MERGE_NOMERGE;
363 if (crc_l.csum_type &&
364 crc_l.uncompressed_size +
365 crc_r.uncompressed_size > c->sb.encoded_extent_max)
366 return BCH_MERGE_NOMERGE;
368 if (crc_l.uncompressed_size + crc_r.uncompressed_size >
369 bch2_crc_field_size_max[extent_entry_type(en_l)])
370 return BCH_MERGE_NOMERGE;
374 return BCH_MERGE_NOMERGE;
378 extent_for_each_entry(l, en_l) {
379 struct bch_extent_crc_unpacked crc_l, crc_r;
381 en_r = vstruct_idx(r.v, (u64 *) en_l - l.v->_data);
383 if (!extent_entry_is_crc(en_l))
386 crc_l = bch2_extent_crc_unpack(l.k, entry_to_crc(en_l));
387 crc_r = bch2_extent_crc_unpack(r.k, entry_to_crc(en_r));
389 crc_l.csum = bch2_checksum_merge(crc_l.csum_type,
392 crc_r.uncompressed_size << 9);
394 crc_l.uncompressed_size += crc_r.uncompressed_size;
395 crc_l.compressed_size += crc_r.compressed_size;
397 bch2_extent_crc_pack(entry_to_crc(en_l), crc_l,
398 extent_entry_type(en_l));
401 bch2_key_resize(l.k, l.k->size + r.k->size);
403 return BCH_MERGE_MERGE;
406 /* KEY_TYPE_reservation: */
408 const char *bch2_reservation_invalid(const struct bch_fs *c, struct bkey_s_c k)
410 struct bkey_s_c_reservation r = bkey_s_c_to_reservation(k);
412 if (bkey_val_bytes(k.k) != sizeof(struct bch_reservation))
413 return "incorrect value size";
415 if (!r.v->nr_replicas || r.v->nr_replicas > BCH_REPLICAS_MAX)
416 return "invalid nr_replicas";
421 void bch2_reservation_to_text(struct printbuf *out, struct bch_fs *c,
424 struct bkey_s_c_reservation r = bkey_s_c_to_reservation(k);
426 pr_buf(out, "generation %u replicas %u",
427 le32_to_cpu(r.v->generation),
431 enum merge_result bch2_reservation_merge(struct bch_fs *c,
432 struct bkey_s _l, struct bkey_s _r)
434 struct bkey_s_reservation l = bkey_s_to_reservation(_l);
435 struct bkey_s_reservation r = bkey_s_to_reservation(_r);
437 if (l.v->generation != r.v->generation ||
438 l.v->nr_replicas != r.v->nr_replicas)
439 return BCH_MERGE_NOMERGE;
441 if ((u64) l.k->size + r.k->size > KEY_SIZE_MAX) {
442 bch2_key_resize(l.k, KEY_SIZE_MAX);
443 bch2_cut_front_s(l.k->p, r.s);
444 return BCH_MERGE_PARTIAL;
447 bch2_key_resize(l.k, l.k->size + r.k->size);
449 return BCH_MERGE_MERGE;
452 /* Extent checksum entries: */
454 /* returns true if not equal */
455 static inline bool bch2_crc_unpacked_cmp(struct bch_extent_crc_unpacked l,
456 struct bch_extent_crc_unpacked r)
458 return (l.csum_type != r.csum_type ||
459 l.compression_type != r.compression_type ||
460 l.compressed_size != r.compressed_size ||
461 l.uncompressed_size != r.uncompressed_size ||
462 l.offset != r.offset ||
463 l.live_size != r.live_size ||
464 l.nonce != r.nonce ||
465 bch2_crc_cmp(l.csum, r.csum));
468 static inline bool can_narrow_crc(struct bch_extent_crc_unpacked u,
469 struct bch_extent_crc_unpacked n)
471 return !crc_is_compressed(u) &&
473 u.uncompressed_size > u.live_size &&
474 bch2_csum_type_is_encryption(u.csum_type) ==
475 bch2_csum_type_is_encryption(n.csum_type);
478 bool bch2_can_narrow_extent_crcs(struct bkey_s_c k,
479 struct bch_extent_crc_unpacked n)
481 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
482 struct bch_extent_crc_unpacked crc;
483 const union bch_extent_entry *i;
488 bkey_for_each_crc(k.k, ptrs, crc, i)
489 if (can_narrow_crc(crc, n))
496 * We're writing another replica for this extent, so while we've got the data in
497 * memory we'll be computing a new checksum for the currently live data.
499 * If there are other replicas we aren't moving, and they are checksummed but
500 * not compressed, we can modify them to point to only the data that is
501 * currently live (so that readers won't have to bounce) while we've got the
504 bool bch2_bkey_narrow_crcs(struct bkey_i *k, struct bch_extent_crc_unpacked n)
506 struct bkey_ptrs ptrs = bch2_bkey_ptrs(bkey_i_to_s(k));
507 struct bch_extent_crc_unpacked u;
508 struct extent_ptr_decoded p;
509 union bch_extent_entry *i;
512 /* Find a checksum entry that covers only live data: */
514 bkey_for_each_crc(&k->k, ptrs, u, i)
515 if (!crc_is_compressed(u) &&
517 u.live_size == u.uncompressed_size) {
524 BUG_ON(crc_is_compressed(n));
526 BUG_ON(n.live_size != k->k.size);
528 restart_narrow_pointers:
529 ptrs = bch2_bkey_ptrs(bkey_i_to_s(k));
531 bkey_for_each_ptr_decode(&k->k, ptrs, p, i)
532 if (can_narrow_crc(p.crc, n)) {
533 bch2_bkey_drop_ptr(bkey_i_to_s(k), &i->ptr);
534 p.ptr.offset += p.crc.offset;
536 bch2_extent_ptr_decoded_append(k, &p);
538 goto restart_narrow_pointers;
544 static void bch2_extent_crc_pack(union bch_extent_crc *dst,
545 struct bch_extent_crc_unpacked src,
546 enum bch_extent_entry_type type)
548 #define set_common_fields(_dst, _src) \
549 _dst.type = 1 << type; \
550 _dst.csum_type = _src.csum_type, \
551 _dst.compression_type = _src.compression_type, \
552 _dst._compressed_size = _src.compressed_size - 1, \
553 _dst._uncompressed_size = _src.uncompressed_size - 1, \
554 _dst.offset = _src.offset
557 case BCH_EXTENT_ENTRY_crc32:
558 set_common_fields(dst->crc32, src);
559 dst->crc32.csum = *((__le32 *) &src.csum.lo);
561 case BCH_EXTENT_ENTRY_crc64:
562 set_common_fields(dst->crc64, src);
563 dst->crc64.nonce = src.nonce;
564 dst->crc64.csum_lo = src.csum.lo;
565 dst->crc64.csum_hi = *((__le16 *) &src.csum.hi);
567 case BCH_EXTENT_ENTRY_crc128:
568 set_common_fields(dst->crc128, src);
569 dst->crc128.nonce = src.nonce;
570 dst->crc128.csum = src.csum;
575 #undef set_common_fields
578 void bch2_extent_crc_append(struct bkey_i *k,
579 struct bch_extent_crc_unpacked new)
581 struct bkey_ptrs ptrs = bch2_bkey_ptrs(bkey_i_to_s(k));
582 union bch_extent_crc *crc = (void *) ptrs.end;
583 enum bch_extent_entry_type type;
585 if (bch_crc_bytes[new.csum_type] <= 4 &&
586 new.uncompressed_size <= CRC32_SIZE_MAX &&
587 new.nonce <= CRC32_NONCE_MAX)
588 type = BCH_EXTENT_ENTRY_crc32;
589 else if (bch_crc_bytes[new.csum_type] <= 10 &&
590 new.uncompressed_size <= CRC64_SIZE_MAX &&
591 new.nonce <= CRC64_NONCE_MAX)
592 type = BCH_EXTENT_ENTRY_crc64;
593 else if (bch_crc_bytes[new.csum_type] <= 16 &&
594 new.uncompressed_size <= CRC128_SIZE_MAX &&
595 new.nonce <= CRC128_NONCE_MAX)
596 type = BCH_EXTENT_ENTRY_crc128;
600 bch2_extent_crc_pack(crc, new, type);
602 k->k.u64s += extent_entry_u64s(ptrs.end);
604 EBUG_ON(bkey_val_u64s(&k->k) > BKEY_EXTENT_VAL_U64s_MAX);
607 /* Generic code for keys with pointers: */
609 unsigned bch2_bkey_nr_ptrs(struct bkey_s_c k)
611 return bch2_bkey_devs(k).nr;
614 unsigned bch2_bkey_nr_ptrs_allocated(struct bkey_s_c k)
616 return k.k->type == KEY_TYPE_reservation
617 ? bkey_s_c_to_reservation(k).v->nr_replicas
618 : bch2_bkey_dirty_devs(k).nr;
621 unsigned bch2_bkey_nr_ptrs_fully_allocated(struct bkey_s_c k)
625 if (k.k->type == KEY_TYPE_reservation) {
626 ret = bkey_s_c_to_reservation(k).v->nr_replicas;
628 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
629 const union bch_extent_entry *entry;
630 struct extent_ptr_decoded p;
632 bkey_for_each_ptr_decode(k.k, ptrs, p, entry)
633 ret += !p.ptr.cached && !crc_is_compressed(p.crc);
639 unsigned bch2_bkey_sectors_compressed(struct bkey_s_c k)
641 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
642 const union bch_extent_entry *entry;
643 struct extent_ptr_decoded p;
646 bkey_for_each_ptr_decode(k.k, ptrs, p, entry)
647 if (!p.ptr.cached && crc_is_compressed(p.crc))
648 ret += p.crc.compressed_size;
653 bool bch2_bkey_is_incompressible(struct bkey_s_c k)
655 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
656 const union bch_extent_entry *entry;
657 struct bch_extent_crc_unpacked crc;
659 bkey_for_each_crc(k.k, ptrs, crc, entry)
660 if (crc.compression_type == BCH_COMPRESSION_TYPE_incompressible)
665 bool bch2_check_range_allocated(struct bch_fs *c, struct bpos pos, u64 size,
666 unsigned nr_replicas, bool compressed)
668 struct btree_trans trans;
669 struct btree_iter *iter;
670 struct bpos end = pos;
677 bch2_trans_init(&trans, c, 0, 0);
679 for_each_btree_key(&trans, iter, BTREE_ID_EXTENTS, pos,
680 BTREE_ITER_SLOTS, k, err) {
681 if (bkey_cmp(bkey_start_pos(k.k), end) >= 0)
684 if (nr_replicas > bch2_bkey_replicas(c, k) ||
685 (!compressed && bch2_bkey_sectors_compressed(k))) {
690 bch2_trans_exit(&trans);
695 unsigned bch2_bkey_replicas(struct bch_fs *c, struct bkey_s_c k)
697 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
698 const union bch_extent_entry *entry;
699 struct extent_ptr_decoded p = { 0 };
700 unsigned replicas = 0;
702 bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
707 replicas += p.ec.redundancy;
716 static unsigned bch2_extent_ptr_durability(struct bch_fs *c,
717 struct extent_ptr_decoded p)
719 unsigned durability = 0;
725 ca = bch_dev_bkey_exists(c, p.ptr.dev);
727 if (ca->mi.state != BCH_MEMBER_STATE_FAILED)
728 durability = max_t(unsigned, durability, ca->mi.durability);
731 durability += p.ec.redundancy;
736 unsigned bch2_bkey_durability(struct bch_fs *c, struct bkey_s_c k)
738 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
739 const union bch_extent_entry *entry;
740 struct extent_ptr_decoded p;
741 unsigned durability = 0;
743 bkey_for_each_ptr_decode(k.k, ptrs, p, entry)
744 durability += bch2_extent_ptr_durability(c, p);
749 void bch2_bkey_mark_replicas_cached(struct bch_fs *c, struct bkey_s k,
751 unsigned nr_desired_replicas)
753 struct bkey_ptrs ptrs = bch2_bkey_ptrs(k);
754 union bch_extent_entry *entry;
755 struct extent_ptr_decoded p;
756 int extra = bch2_bkey_durability(c, k.s_c) - nr_desired_replicas;
758 if (target && extra > 0)
759 bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
760 int n = bch2_extent_ptr_durability(c, p);
762 if (n && n <= extra &&
763 !bch2_dev_in_target(c, p.ptr.dev, target)) {
764 entry->ptr.cached = true;
770 bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
771 int n = bch2_extent_ptr_durability(c, p);
773 if (n && n <= extra) {
774 entry->ptr.cached = true;
780 void bch2_bkey_extent_entry_drop(struct bkey_i *k, union bch_extent_entry *entry)
782 union bch_extent_entry *end = bkey_val_end(bkey_i_to_s(k));
783 union bch_extent_entry *next = extent_entry_next(entry);
785 memmove_u64s(entry, next, (u64 *) end - (u64 *) next);
786 k->k.u64s -= extent_entry_u64s(entry);
789 void bch2_bkey_append_ptr(struct bkey_i *k,
790 struct bch_extent_ptr ptr)
792 EBUG_ON(bch2_bkey_has_device(bkey_i_to_s_c(k), ptr.dev));
795 case KEY_TYPE_btree_ptr:
796 case KEY_TYPE_btree_ptr_v2:
797 case KEY_TYPE_extent:
798 EBUG_ON(bkey_val_u64s(&k->k) >= BKEY_EXTENT_VAL_U64s_MAX);
800 ptr.type = 1 << BCH_EXTENT_ENTRY_ptr;
802 memcpy((void *) &k->v + bkey_val_bytes(&k->k),
812 static inline void __extent_entry_insert(struct bkey_i *k,
813 union bch_extent_entry *dst,
814 union bch_extent_entry *new)
816 union bch_extent_entry *end = bkey_val_end(bkey_i_to_s(k));
818 memmove_u64s_up_small((u64 *) dst + extent_entry_u64s(new),
819 dst, (u64 *) end - (u64 *) dst);
820 k->k.u64s += extent_entry_u64s(new);
821 memcpy_u64s_small(dst, new, extent_entry_u64s(new));
824 void bch2_extent_ptr_decoded_append(struct bkey_i *k,
825 struct extent_ptr_decoded *p)
827 struct bkey_ptrs ptrs = bch2_bkey_ptrs(bkey_i_to_s(k));
828 struct bch_extent_crc_unpacked crc =
829 bch2_extent_crc_unpack(&k->k, NULL);
830 union bch_extent_entry *pos;
832 if (!bch2_crc_unpacked_cmp(crc, p->crc)) {
837 bkey_for_each_crc(&k->k, ptrs, crc, pos)
838 if (!bch2_crc_unpacked_cmp(crc, p->crc)) {
839 pos = extent_entry_next(pos);
843 bch2_extent_crc_append(k, p->crc);
844 pos = bkey_val_end(bkey_i_to_s(k));
846 p->ptr.type = 1 << BCH_EXTENT_ENTRY_ptr;
847 __extent_entry_insert(k, pos, to_entry(&p->ptr));
850 p->ec.type = 1 << BCH_EXTENT_ENTRY_stripe_ptr;
851 __extent_entry_insert(k, pos, to_entry(&p->ec));
855 static union bch_extent_entry *extent_entry_prev(struct bkey_ptrs ptrs,
856 union bch_extent_entry *entry)
858 union bch_extent_entry *i = ptrs.start;
863 while (extent_entry_next(i) != entry)
864 i = extent_entry_next(i);
868 union bch_extent_entry *bch2_bkey_drop_ptr(struct bkey_s k,
869 struct bch_extent_ptr *ptr)
871 struct bkey_ptrs ptrs = bch2_bkey_ptrs(k);
872 union bch_extent_entry *dst, *src, *prev;
873 bool drop_crc = true;
875 EBUG_ON(ptr < &ptrs.start->ptr ||
876 ptr >= &ptrs.end->ptr);
877 EBUG_ON(ptr->type != 1 << BCH_EXTENT_ENTRY_ptr);
879 src = extent_entry_next(to_entry(ptr));
880 if (src != ptrs.end &&
881 !extent_entry_is_crc(src))
885 while ((prev = extent_entry_prev(ptrs, dst))) {
886 if (extent_entry_is_ptr(prev))
889 if (extent_entry_is_crc(prev)) {
898 memmove_u64s_down(dst, src,
899 (u64 *) ptrs.end - (u64 *) src);
900 k.k->u64s -= (u64 *) src - (u64 *) dst;
905 void bch2_bkey_drop_device(struct bkey_s k, unsigned dev)
907 struct bch_extent_ptr *ptr;
909 bch2_bkey_drop_ptrs(k, ptr, ptr->dev == dev);
912 const struct bch_extent_ptr *
913 bch2_bkey_has_device(struct bkey_s_c k, unsigned dev)
915 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
916 const struct bch_extent_ptr *ptr;
918 bkey_for_each_ptr(ptrs, ptr)
925 bool bch2_bkey_has_target(struct bch_fs *c, struct bkey_s_c k, unsigned target)
927 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
928 const struct bch_extent_ptr *ptr;
930 bkey_for_each_ptr(ptrs, ptr)
931 if (bch2_dev_in_target(c, ptr->dev, target) &&
933 !ptr_stale(bch_dev_bkey_exists(c, ptr->dev), ptr)))
939 bool bch2_bkey_matches_ptr(struct bch_fs *c, struct bkey_s_c k,
940 struct bch_extent_ptr m, u64 offset)
942 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
943 const union bch_extent_entry *entry;
944 struct extent_ptr_decoded p;
946 bkey_for_each_ptr_decode(k.k, ptrs, p, entry)
947 if (p.ptr.dev == m.dev &&
948 p.ptr.gen == m.gen &&
949 (s64) p.ptr.offset + p.crc.offset - bkey_start_offset(k.k) ==
950 (s64) m.offset - offset)
957 * bch_extent_normalize - clean up an extent, dropping stale pointers etc.
959 * Returns true if @k should be dropped entirely
961 * For existing keys, only called when btree nodes are being rewritten, not when
962 * they're merely being compacted/resorted in memory.
964 bool bch2_extent_normalize(struct bch_fs *c, struct bkey_s k)
966 struct bch_extent_ptr *ptr;
968 bch2_bkey_drop_ptrs(k, ptr,
970 ptr_stale(bch_dev_bkey_exists(c, ptr->dev), ptr));
972 /* will only happen if all pointers were cached: */
973 if (!bch2_bkey_nr_ptrs(k.s_c))
974 k.k->type = KEY_TYPE_discard;
976 return bkey_whiteout(k.k);
979 void bch2_bkey_ptrs_to_text(struct printbuf *out, struct bch_fs *c,
982 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
983 const union bch_extent_entry *entry;
984 struct bch_extent_crc_unpacked crc;
985 const struct bch_extent_ptr *ptr;
986 const struct bch_extent_stripe_ptr *ec;
990 bkey_extent_entry_for_each(ptrs, entry) {
994 switch (__extent_entry_type(entry)) {
995 case BCH_EXTENT_ENTRY_ptr:
996 ptr = entry_to_ptr(entry);
997 ca = ptr->dev < c->sb.nr_devices && c->devs[ptr->dev]
998 ? bch_dev_bkey_exists(c, ptr->dev)
1001 pr_buf(out, "ptr: %u:%llu gen %u%s%s", ptr->dev,
1002 (u64) ptr->offset, ptr->gen,
1003 ptr->cached ? " cached" : "",
1004 ca && ptr_stale(ca, ptr)
1007 case BCH_EXTENT_ENTRY_crc32:
1008 case BCH_EXTENT_ENTRY_crc64:
1009 case BCH_EXTENT_ENTRY_crc128:
1010 crc = bch2_extent_crc_unpack(k.k, entry_to_crc(entry));
1012 pr_buf(out, "crc: c_size %u size %u offset %u nonce %u csum %u compress %u",
1013 crc.compressed_size,
1014 crc.uncompressed_size,
1015 crc.offset, crc.nonce,
1017 crc.compression_type);
1019 case BCH_EXTENT_ENTRY_stripe_ptr:
1020 ec = &entry->stripe_ptr;
1022 pr_buf(out, "ec: idx %llu block %u",
1023 (u64) ec->idx, ec->block);
1026 pr_buf(out, "(invalid extent entry %.16llx)", *((u64 *) entry));
1034 static const char *extent_ptr_invalid(const struct bch_fs *c,
1036 const struct bch_extent_ptr *ptr,
1037 unsigned size_ondisk,
1040 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
1041 const struct bch_extent_ptr *ptr2;
1044 if (!bch2_dev_exists2(c, ptr->dev))
1045 return "pointer to invalid device";
1047 ca = bch_dev_bkey_exists(c, ptr->dev);
1049 return "pointer to invalid device";
1051 bkey_for_each_ptr(ptrs, ptr2)
1052 if (ptr != ptr2 && ptr->dev == ptr2->dev)
1053 return "multiple pointers to same device";
1055 if (ptr->offset + size_ondisk > bucket_to_sector(ca, ca->mi.nbuckets))
1056 return "offset past end of device";
1058 if (ptr->offset < bucket_to_sector(ca, ca->mi.first_bucket))
1059 return "offset before first bucket";
1061 if (bucket_remainder(ca, ptr->offset) +
1062 size_ondisk > ca->mi.bucket_size)
1063 return "spans multiple buckets";
1068 const char *bch2_bkey_ptrs_invalid(const struct bch_fs *c, struct bkey_s_c k)
1070 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
1071 struct bch_devs_list devs;
1072 const union bch_extent_entry *entry;
1073 struct bch_extent_crc_unpacked crc;
1074 unsigned size_ondisk = k.k->size;
1076 unsigned nonce = UINT_MAX;
1079 if (k.k->type == KEY_TYPE_btree_ptr ||
1080 k.k->type == KEY_TYPE_btree_ptr_v2)
1081 size_ondisk = c->opts.btree_node_size;
1083 bkey_extent_entry_for_each(ptrs, entry) {
1084 if (__extent_entry_type(entry) >= BCH_EXTENT_ENTRY_MAX)
1085 return "invalid extent entry type";
1087 if (k.k->type == KEY_TYPE_btree_ptr &&
1088 !extent_entry_is_ptr(entry))
1089 return "has non ptr field";
1091 switch (extent_entry_type(entry)) {
1092 case BCH_EXTENT_ENTRY_ptr:
1093 reason = extent_ptr_invalid(c, k, &entry->ptr,
1094 size_ondisk, false);
1098 case BCH_EXTENT_ENTRY_crc32:
1099 case BCH_EXTENT_ENTRY_crc64:
1100 case BCH_EXTENT_ENTRY_crc128:
1101 crc = bch2_extent_crc_unpack(k.k, entry_to_crc(entry));
1103 if (crc.offset + crc.live_size >
1104 crc.uncompressed_size)
1105 return "checksum offset + key size > uncompressed size";
1107 size_ondisk = crc.compressed_size;
1109 if (!bch2_checksum_type_valid(c, crc.csum_type))
1110 return "invalid checksum type";
1112 if (crc.compression_type >= BCH_COMPRESSION_TYPE_NR)
1113 return "invalid compression type";
1115 if (bch2_csum_type_is_encryption(crc.csum_type)) {
1116 if (nonce == UINT_MAX)
1117 nonce = crc.offset + crc.nonce;
1118 else if (nonce != crc.offset + crc.nonce)
1119 return "incorrect nonce";
1122 case BCH_EXTENT_ENTRY_stripe_ptr:
1127 devs = bch2_bkey_devs(k);
1128 bubble_sort(devs.devs, devs.nr, u8_cmp);
1129 for (i = 0; i + 1 < devs.nr; i++)
1130 if (devs.devs[i] == devs.devs[i + 1])
1131 return "multiple ptrs to same device";
1136 void bch2_ptr_swab(struct bkey_s k)
1138 struct bkey_ptrs ptrs = bch2_bkey_ptrs(k);
1139 union bch_extent_entry *entry;
1142 for (d = (u64 *) ptrs.start;
1143 d != (u64 *) ptrs.end;
1147 for (entry = ptrs.start;
1149 entry = extent_entry_next(entry)) {
1150 switch (extent_entry_type(entry)) {
1151 case BCH_EXTENT_ENTRY_ptr:
1153 case BCH_EXTENT_ENTRY_crc32:
1154 entry->crc32.csum = swab32(entry->crc32.csum);
1156 case BCH_EXTENT_ENTRY_crc64:
1157 entry->crc64.csum_hi = swab16(entry->crc64.csum_hi);
1158 entry->crc64.csum_lo = swab64(entry->crc64.csum_lo);
1160 case BCH_EXTENT_ENTRY_crc128:
1161 entry->crc128.csum.hi = (__force __le64)
1162 swab64((__force u64) entry->crc128.csum.hi);
1163 entry->crc128.csum.lo = (__force __le64)
1164 swab64((__force u64) entry->crc128.csum.lo);
1166 case BCH_EXTENT_ENTRY_stripe_ptr:
1172 /* Generic extent code: */
1174 int bch2_cut_front_s(struct bpos where, struct bkey_s k)
1176 unsigned new_val_u64s = bkey_val_u64s(k.k);
1180 if (bkey_cmp(where, bkey_start_pos(k.k)) <= 0)
1183 EBUG_ON(bkey_cmp(where, k.k->p) > 0);
1185 sub = where.offset - bkey_start_offset(k.k);
1190 k.k->type = KEY_TYPE_deleted;
1194 switch (k.k->type) {
1195 case KEY_TYPE_extent:
1196 case KEY_TYPE_reflink_v: {
1197 struct bkey_ptrs ptrs = bch2_bkey_ptrs(k);
1198 union bch_extent_entry *entry;
1199 bool seen_crc = false;
1201 bkey_extent_entry_for_each(ptrs, entry) {
1202 switch (extent_entry_type(entry)) {
1203 case BCH_EXTENT_ENTRY_ptr:
1205 entry->ptr.offset += sub;
1207 case BCH_EXTENT_ENTRY_crc32:
1208 entry->crc32.offset += sub;
1210 case BCH_EXTENT_ENTRY_crc64:
1211 entry->crc64.offset += sub;
1213 case BCH_EXTENT_ENTRY_crc128:
1214 entry->crc128.offset += sub;
1216 case BCH_EXTENT_ENTRY_stripe_ptr:
1220 if (extent_entry_is_crc(entry))
1226 case KEY_TYPE_reflink_p: {
1227 struct bkey_s_reflink_p p = bkey_s_to_reflink_p(k);
1229 le64_add_cpu(&p.v->idx, sub);
1232 case KEY_TYPE_inline_data:
1233 case KEY_TYPE_indirect_inline_data: {
1234 void *p = bkey_inline_data_p(k);
1235 unsigned bytes = bkey_inline_data_bytes(k.k);
1237 sub = min_t(u64, sub << 9, bytes);
1239 memmove(p, p + sub, bytes - sub);
1241 new_val_u64s -= sub >> 3;
1246 val_u64s_delta = bkey_val_u64s(k.k) - new_val_u64s;
1247 BUG_ON(val_u64s_delta < 0);
1249 set_bkey_val_u64s(k.k, new_val_u64s);
1250 memset(bkey_val_end(k), 0, val_u64s_delta * sizeof(u64));
1251 return -val_u64s_delta;
1254 int bch2_cut_back_s(struct bpos where, struct bkey_s k)
1256 unsigned new_val_u64s = bkey_val_u64s(k.k);
1260 if (bkey_cmp(where, k.k->p) >= 0)
1263 EBUG_ON(bkey_cmp(where, bkey_start_pos(k.k)) < 0);
1265 len = where.offset - bkey_start_offset(k.k);
1271 k.k->type = KEY_TYPE_deleted;
1275 switch (k.k->type) {
1276 case KEY_TYPE_inline_data:
1277 case KEY_TYPE_indirect_inline_data:
1278 new_val_u64s = (bkey_inline_data_offset(k.k) +
1279 min(bkey_inline_data_bytes(k.k), k.k->size << 9)) >> 3;
1283 val_u64s_delta = bkey_val_u64s(k.k) - new_val_u64s;
1284 BUG_ON(val_u64s_delta < 0);
1286 set_bkey_val_u64s(k.k, new_val_u64s);
1287 memset(bkey_val_end(k), 0, val_u64s_delta * sizeof(u64));
1288 return -val_u64s_delta;