1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2010 Kent Overstreet <kent.overstreet@gmail.com>
5 * Code for managing the extent btree and dynamically updating the writeback
10 #include "bkey_methods.h"
13 #include "btree_iter.h"
17 #include "disk_groups.h"
28 static unsigned bch2_crc_field_size_max[] = {
29 [BCH_EXTENT_ENTRY_crc32] = CRC32_SIZE_MAX,
30 [BCH_EXTENT_ENTRY_crc64] = CRC64_SIZE_MAX,
31 [BCH_EXTENT_ENTRY_crc128] = CRC128_SIZE_MAX,
34 static void bch2_extent_crc_pack(union bch_extent_crc *,
35 struct bch_extent_crc_unpacked,
36 enum bch_extent_entry_type);
38 static struct bch_dev_io_failures *dev_io_failures(struct bch_io_failures *f,
41 struct bch_dev_io_failures *i;
43 for (i = f->devs; i < f->devs + f->nr; i++)
50 void bch2_mark_io_failure(struct bch_io_failures *failed,
51 struct extent_ptr_decoded *p)
53 struct bch_dev_io_failures *f = dev_io_failures(failed, p->ptr.dev);
56 BUG_ON(failed->nr >= ARRAY_SIZE(failed->devs));
58 f = &failed->devs[failed->nr++];
63 } else if (p->idx != f->idx) {
73 * returns true if p1 is better than p2:
75 static inline bool ptr_better(struct bch_fs *c,
76 const struct extent_ptr_decoded p1,
77 const struct extent_ptr_decoded p2)
79 if (likely(!p1.idx && !p2.idx)) {
80 struct bch_dev *dev1 = bch_dev_bkey_exists(c, p1.ptr.dev);
81 struct bch_dev *dev2 = bch_dev_bkey_exists(c, p2.ptr.dev);
83 u64 l1 = atomic64_read(&dev1->cur_latency[READ]);
84 u64 l2 = atomic64_read(&dev2->cur_latency[READ]);
86 /* Pick at random, biased in favor of the faster device: */
88 return bch2_rand_range(l1 + l2) > l1;
91 if (bch2_force_reconstruct_read)
92 return p1.idx > p2.idx;
94 return p1.idx < p2.idx;
98 * This picks a non-stale pointer, preferably from a device other than @avoid.
99 * Avoid can be NULL, meaning pick any. If there are no non-stale pointers to
100 * other devices, it will still pick a pointer from avoid.
102 int bch2_bkey_pick_read_device(struct bch_fs *c, struct bkey_s_c k,
103 struct bch_io_failures *failed,
104 struct extent_ptr_decoded *pick)
106 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
107 const union bch_extent_entry *entry;
108 struct extent_ptr_decoded p;
109 struct bch_dev_io_failures *f;
113 if (k.k->type == KEY_TYPE_error)
116 bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
117 ca = bch_dev_bkey_exists(c, p.ptr.dev);
120 * If there are any dirty pointers it's an error if we can't
123 if (!ret && !p.ptr.cached)
126 if (p.ptr.cached && ptr_stale(ca, &p.ptr))
129 f = failed ? dev_io_failures(failed, p.ptr.dev) : NULL;
131 p.idx = f->nr_failed < f->nr_retries
136 !bch2_dev_is_readable(ca))
139 if (bch2_force_reconstruct_read &&
143 if (p.idx >= (unsigned) p.has_ec + 1)
146 if (ret > 0 && !ptr_better(c, p, *pick))
156 /* KEY_TYPE_btree_ptr: */
158 const char *bch2_btree_ptr_invalid(const struct bch_fs *c, struct bkey_s_c k)
160 if (bkey_val_u64s(k.k) > BCH_REPLICAS_MAX)
161 return "value too big";
163 return bch2_bkey_ptrs_invalid(c, k);
166 void bch2_btree_ptr_to_text(struct printbuf *out, struct bch_fs *c,
169 bch2_bkey_ptrs_to_text(out, c, k);
172 const char *bch2_btree_ptr_v2_invalid(const struct bch_fs *c, struct bkey_s_c k)
174 struct bkey_s_c_btree_ptr_v2 bp = bkey_s_c_to_btree_ptr_v2(k);
176 if (bkey_val_bytes(k.k) <= sizeof(*bp.v))
177 return "value too small";
179 if (bkey_val_u64s(k.k) > BKEY_BTREE_PTR_VAL_U64s_MAX)
180 return "value too big";
182 if (c->sb.version < bcachefs_metadata_version_snapshot &&
183 bp.v->min_key.snapshot)
184 return "invalid min_key.snapshot";
186 return bch2_bkey_ptrs_invalid(c, k);
189 void bch2_btree_ptr_v2_to_text(struct printbuf *out, struct bch_fs *c,
192 struct bkey_s_c_btree_ptr_v2 bp = bkey_s_c_to_btree_ptr_v2(k);
194 pr_buf(out, "seq %llx written %u min_key ",
195 le64_to_cpu(bp.v->seq),
196 le16_to_cpu(bp.v->sectors_written));
198 bch2_bpos_to_text(out, bp.v->min_key);
200 bch2_bkey_ptrs_to_text(out, c, k);
203 void bch2_btree_ptr_v2_compat(enum btree_id btree_id, unsigned version,
204 unsigned big_endian, int write,
207 struct bkey_s_btree_ptr_v2 bp = bkey_s_to_btree_ptr_v2(k);
209 compat_bpos(0, btree_id, version, big_endian, write, &bp.v->min_key);
211 if (version < bcachefs_metadata_version_inode_btree_change &&
212 btree_node_type_is_extents(btree_id) &&
213 bkey_cmp(bp.v->min_key, POS_MIN))
214 bp.v->min_key = write
215 ? bpos_nosnap_predecessor(bp.v->min_key)
216 : bpos_nosnap_successor(bp.v->min_key);
219 /* KEY_TYPE_extent: */
221 const char *bch2_extent_invalid(const struct bch_fs *c, struct bkey_s_c k)
223 return bch2_bkey_ptrs_invalid(c, k);
226 void bch2_extent_to_text(struct printbuf *out, struct bch_fs *c,
229 bch2_bkey_ptrs_to_text(out, c, k);
232 bool bch2_extent_merge(struct bch_fs *c, struct bkey_s l, struct bkey_s_c r)
234 struct bkey_ptrs l_ptrs = bch2_bkey_ptrs(l);
235 struct bkey_ptrs_c r_ptrs = bch2_bkey_ptrs_c(r);
236 union bch_extent_entry *en_l;
237 const union bch_extent_entry *en_r;
238 struct extent_ptr_decoded lp, rp;
244 while (en_l < l_ptrs.end && en_r < r_ptrs.end) {
245 if (extent_entry_type(en_l) != extent_entry_type(en_r))
248 en_l = extent_entry_next(en_l);
249 en_r = extent_entry_next(en_r);
252 if (en_l < l_ptrs.end || en_r < r_ptrs.end)
257 lp.crc = bch2_extent_crc_unpack(l.k, NULL);
258 rp.crc = bch2_extent_crc_unpack(r.k, NULL);
260 while (__bkey_ptr_next_decode(l.k, l_ptrs.end, lp, en_l) &&
261 __bkey_ptr_next_decode(r.k, r_ptrs.end, rp, en_r)) {
262 if (lp.ptr.offset + lp.crc.offset + lp.crc.live_size !=
263 rp.ptr.offset + rp.crc.offset ||
264 lp.ptr.dev != rp.ptr.dev ||
265 lp.ptr.gen != rp.ptr.gen ||
266 lp.has_ec != rp.has_ec)
269 /* Extents may not straddle buckets: */
270 ca = bch_dev_bkey_exists(c, lp.ptr.dev);
271 if (PTR_BUCKET_NR(ca, &lp.ptr) != PTR_BUCKET_NR(ca, &rp.ptr))
274 if (lp.has_ec != rp.has_ec ||
276 (lp.ec.block != rp.ec.block ||
277 lp.ec.redundancy != rp.ec.redundancy ||
278 lp.ec.idx != rp.ec.idx)))
281 if (lp.crc.compression_type != rp.crc.compression_type ||
282 lp.crc.nonce != rp.crc.nonce)
285 if (lp.crc.offset + lp.crc.live_size + rp.crc.live_size <=
286 lp.crc.uncompressed_size) {
287 /* can use left extent's crc entry */
288 } else if (lp.crc.live_size <= rp.crc.offset ) {
289 /* can use right extent's crc entry */
291 /* check if checksums can be merged: */
292 if (lp.crc.csum_type != rp.crc.csum_type ||
293 lp.crc.nonce != rp.crc.nonce ||
294 crc_is_compressed(lp.crc) ||
295 !bch2_checksum_mergeable(lp.crc.csum_type))
298 if (lp.crc.offset + lp.crc.live_size != lp.crc.compressed_size ||
302 if (lp.crc.csum_type &&
303 lp.crc.uncompressed_size +
304 rp.crc.uncompressed_size > c->sb.encoded_extent_max)
307 if (lp.crc.uncompressed_size + rp.crc.uncompressed_size >
308 bch2_crc_field_size_max[extent_entry_type(en_l)])
312 en_l = extent_entry_next(en_l);
313 en_r = extent_entry_next(en_r);
316 use_right_ptr = false;
319 while (en_l < l_ptrs.end) {
320 if (extent_entry_type(en_l) == BCH_EXTENT_ENTRY_ptr &&
322 en_l->ptr = en_r->ptr;
324 if (extent_entry_is_crc(en_l)) {
325 struct bch_extent_crc_unpacked crc_l =
326 bch2_extent_crc_unpack(l.k, entry_to_crc(en_l));
327 struct bch_extent_crc_unpacked crc_r =
328 bch2_extent_crc_unpack(r.k, entry_to_crc(en_r));
330 use_right_ptr = false;
332 if (crc_l.offset + crc_l.live_size + crc_r.live_size <=
333 crc_l.uncompressed_size) {
334 /* can use left extent's crc entry */
335 } else if (crc_l.live_size <= crc_r.offset ) {
336 /* can use right extent's crc entry */
337 crc_r.offset -= crc_l.live_size;
338 bch2_extent_crc_pack(entry_to_crc(en_l), crc_r,
339 extent_entry_type(en_l));
340 use_right_ptr = true;
342 crc_l.csum = bch2_checksum_merge(crc_l.csum_type,
345 crc_r.uncompressed_size << 9);
347 crc_l.uncompressed_size += crc_r.uncompressed_size;
348 crc_l.compressed_size += crc_r.compressed_size;
349 bch2_extent_crc_pack(entry_to_crc(en_l), crc_l,
350 extent_entry_type(en_l));
354 en_l = extent_entry_next(en_l);
355 en_r = extent_entry_next(en_r);
358 bch2_key_resize(l.k, l.k->size + r.k->size);
362 /* KEY_TYPE_reservation: */
364 const char *bch2_reservation_invalid(const struct bch_fs *c, struct bkey_s_c k)
366 struct bkey_s_c_reservation r = bkey_s_c_to_reservation(k);
368 if (bkey_val_bytes(k.k) != sizeof(struct bch_reservation))
369 return "incorrect value size";
371 if (!r.v->nr_replicas || r.v->nr_replicas > BCH_REPLICAS_MAX)
372 return "invalid nr_replicas";
377 void bch2_reservation_to_text(struct printbuf *out, struct bch_fs *c,
380 struct bkey_s_c_reservation r = bkey_s_c_to_reservation(k);
382 pr_buf(out, "generation %u replicas %u",
383 le32_to_cpu(r.v->generation),
387 bool bch2_reservation_merge(struct bch_fs *c, struct bkey_s _l, struct bkey_s_c _r)
389 struct bkey_s_reservation l = bkey_s_to_reservation(_l);
390 struct bkey_s_c_reservation r = bkey_s_c_to_reservation(_r);
392 if (l.v->generation != r.v->generation ||
393 l.v->nr_replicas != r.v->nr_replicas)
396 bch2_key_resize(l.k, l.k->size + r.k->size);
400 /* Extent checksum entries: */
402 /* returns true if not equal */
403 static inline bool bch2_crc_unpacked_cmp(struct bch_extent_crc_unpacked l,
404 struct bch_extent_crc_unpacked r)
406 return (l.csum_type != r.csum_type ||
407 l.compression_type != r.compression_type ||
408 l.compressed_size != r.compressed_size ||
409 l.uncompressed_size != r.uncompressed_size ||
410 l.offset != r.offset ||
411 l.live_size != r.live_size ||
412 l.nonce != r.nonce ||
413 bch2_crc_cmp(l.csum, r.csum));
416 static inline bool can_narrow_crc(struct bch_extent_crc_unpacked u,
417 struct bch_extent_crc_unpacked n)
419 return !crc_is_compressed(u) &&
421 u.uncompressed_size > u.live_size &&
422 bch2_csum_type_is_encryption(u.csum_type) ==
423 bch2_csum_type_is_encryption(n.csum_type);
426 bool bch2_can_narrow_extent_crcs(struct bkey_s_c k,
427 struct bch_extent_crc_unpacked n)
429 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
430 struct bch_extent_crc_unpacked crc;
431 const union bch_extent_entry *i;
436 bkey_for_each_crc(k.k, ptrs, crc, i)
437 if (can_narrow_crc(crc, n))
444 * We're writing another replica for this extent, so while we've got the data in
445 * memory we'll be computing a new checksum for the currently live data.
447 * If there are other replicas we aren't moving, and they are checksummed but
448 * not compressed, we can modify them to point to only the data that is
449 * currently live (so that readers won't have to bounce) while we've got the
452 bool bch2_bkey_narrow_crcs(struct bkey_i *k, struct bch_extent_crc_unpacked n)
454 struct bkey_ptrs ptrs = bch2_bkey_ptrs(bkey_i_to_s(k));
455 struct bch_extent_crc_unpacked u;
456 struct extent_ptr_decoded p;
457 union bch_extent_entry *i;
460 /* Find a checksum entry that covers only live data: */
462 bkey_for_each_crc(&k->k, ptrs, u, i)
463 if (!crc_is_compressed(u) &&
465 u.live_size == u.uncompressed_size) {
472 BUG_ON(crc_is_compressed(n));
474 BUG_ON(n.live_size != k->k.size);
476 restart_narrow_pointers:
477 ptrs = bch2_bkey_ptrs(bkey_i_to_s(k));
479 bkey_for_each_ptr_decode(&k->k, ptrs, p, i)
480 if (can_narrow_crc(p.crc, n)) {
481 bch2_bkey_drop_ptr(bkey_i_to_s(k), &i->ptr);
482 p.ptr.offset += p.crc.offset;
484 bch2_extent_ptr_decoded_append(k, &p);
486 goto restart_narrow_pointers;
492 static void bch2_extent_crc_pack(union bch_extent_crc *dst,
493 struct bch_extent_crc_unpacked src,
494 enum bch_extent_entry_type type)
496 #define set_common_fields(_dst, _src) \
497 _dst.type = 1 << type; \
498 _dst.csum_type = _src.csum_type, \
499 _dst.compression_type = _src.compression_type, \
500 _dst._compressed_size = _src.compressed_size - 1, \
501 _dst._uncompressed_size = _src.uncompressed_size - 1, \
502 _dst.offset = _src.offset
505 case BCH_EXTENT_ENTRY_crc32:
506 set_common_fields(dst->crc32, src);
507 dst->crc32.csum = *((__le32 *) &src.csum.lo);
509 case BCH_EXTENT_ENTRY_crc64:
510 set_common_fields(dst->crc64, src);
511 dst->crc64.nonce = src.nonce;
512 dst->crc64.csum_lo = src.csum.lo;
513 dst->crc64.csum_hi = *((__le16 *) &src.csum.hi);
515 case BCH_EXTENT_ENTRY_crc128:
516 set_common_fields(dst->crc128, src);
517 dst->crc128.nonce = src.nonce;
518 dst->crc128.csum = src.csum;
523 #undef set_common_fields
526 void bch2_extent_crc_append(struct bkey_i *k,
527 struct bch_extent_crc_unpacked new)
529 struct bkey_ptrs ptrs = bch2_bkey_ptrs(bkey_i_to_s(k));
530 union bch_extent_crc *crc = (void *) ptrs.end;
531 enum bch_extent_entry_type type;
533 if (bch_crc_bytes[new.csum_type] <= 4 &&
534 new.uncompressed_size <= CRC32_SIZE_MAX &&
535 new.nonce <= CRC32_NONCE_MAX)
536 type = BCH_EXTENT_ENTRY_crc32;
537 else if (bch_crc_bytes[new.csum_type] <= 10 &&
538 new.uncompressed_size <= CRC64_SIZE_MAX &&
539 new.nonce <= CRC64_NONCE_MAX)
540 type = BCH_EXTENT_ENTRY_crc64;
541 else if (bch_crc_bytes[new.csum_type] <= 16 &&
542 new.uncompressed_size <= CRC128_SIZE_MAX &&
543 new.nonce <= CRC128_NONCE_MAX)
544 type = BCH_EXTENT_ENTRY_crc128;
548 bch2_extent_crc_pack(crc, new, type);
550 k->k.u64s += extent_entry_u64s(ptrs.end);
552 EBUG_ON(bkey_val_u64s(&k->k) > BKEY_EXTENT_VAL_U64s_MAX);
555 /* Generic code for keys with pointers: */
557 unsigned bch2_bkey_nr_ptrs(struct bkey_s_c k)
559 return bch2_bkey_devs(k).nr;
562 unsigned bch2_bkey_nr_ptrs_allocated(struct bkey_s_c k)
564 return k.k->type == KEY_TYPE_reservation
565 ? bkey_s_c_to_reservation(k).v->nr_replicas
566 : bch2_bkey_dirty_devs(k).nr;
569 unsigned bch2_bkey_nr_ptrs_fully_allocated(struct bkey_s_c k)
573 if (k.k->type == KEY_TYPE_reservation) {
574 ret = bkey_s_c_to_reservation(k).v->nr_replicas;
576 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
577 const union bch_extent_entry *entry;
578 struct extent_ptr_decoded p;
580 bkey_for_each_ptr_decode(k.k, ptrs, p, entry)
581 ret += !p.ptr.cached && !crc_is_compressed(p.crc);
587 unsigned bch2_bkey_sectors_compressed(struct bkey_s_c k)
589 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
590 const union bch_extent_entry *entry;
591 struct extent_ptr_decoded p;
594 bkey_for_each_ptr_decode(k.k, ptrs, p, entry)
595 if (!p.ptr.cached && crc_is_compressed(p.crc))
596 ret += p.crc.compressed_size;
601 bool bch2_bkey_is_incompressible(struct bkey_s_c k)
603 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
604 const union bch_extent_entry *entry;
605 struct bch_extent_crc_unpacked crc;
607 bkey_for_each_crc(k.k, ptrs, crc, entry)
608 if (crc.compression_type == BCH_COMPRESSION_TYPE_incompressible)
613 bool bch2_check_range_allocated(struct bch_fs *c, struct bpos pos, u64 size,
614 unsigned nr_replicas, bool compressed)
616 struct btree_trans trans;
617 struct btree_iter *iter;
618 struct bpos end = pos;
625 bch2_trans_init(&trans, c, 0, 0);
627 for_each_btree_key(&trans, iter, BTREE_ID_extents, pos,
628 BTREE_ITER_SLOTS, k, err) {
629 if (bkey_cmp(bkey_start_pos(k.k), end) >= 0)
632 if (nr_replicas > bch2_bkey_replicas(c, k) ||
633 (!compressed && bch2_bkey_sectors_compressed(k))) {
638 bch2_trans_iter_put(&trans, iter);
640 bch2_trans_exit(&trans);
645 unsigned bch2_bkey_replicas(struct bch_fs *c, struct bkey_s_c k)
647 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
648 const union bch_extent_entry *entry;
649 struct extent_ptr_decoded p = { 0 };
650 unsigned replicas = 0;
652 bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
657 replicas += p.ec.redundancy;
666 static unsigned bch2_extent_ptr_durability(struct bch_fs *c,
667 struct extent_ptr_decoded p)
669 unsigned durability = 0;
675 ca = bch_dev_bkey_exists(c, p.ptr.dev);
677 if (ca->mi.state != BCH_MEMBER_STATE_failed)
678 durability = max_t(unsigned, durability, ca->mi.durability);
681 durability += p.ec.redundancy;
686 unsigned bch2_bkey_durability(struct bch_fs *c, struct bkey_s_c k)
688 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
689 const union bch_extent_entry *entry;
690 struct extent_ptr_decoded p;
691 unsigned durability = 0;
693 bkey_for_each_ptr_decode(k.k, ptrs, p, entry)
694 durability += bch2_extent_ptr_durability(c, p);
699 void bch2_bkey_mark_replicas_cached(struct bch_fs *c, struct bkey_s k,
701 unsigned nr_desired_replicas)
703 struct bkey_ptrs ptrs = bch2_bkey_ptrs(k);
704 union bch_extent_entry *entry;
705 struct extent_ptr_decoded p;
706 int extra = bch2_bkey_durability(c, k.s_c) - nr_desired_replicas;
708 if (target && extra > 0)
709 bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
710 int n = bch2_extent_ptr_durability(c, p);
712 if (n && n <= extra &&
713 !bch2_dev_in_target(c, p.ptr.dev, target)) {
714 entry->ptr.cached = true;
720 bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
721 int n = bch2_extent_ptr_durability(c, p);
723 if (n && n <= extra) {
724 entry->ptr.cached = true;
730 void bch2_bkey_extent_entry_drop(struct bkey_i *k, union bch_extent_entry *entry)
732 union bch_extent_entry *end = bkey_val_end(bkey_i_to_s(k));
733 union bch_extent_entry *next = extent_entry_next(entry);
735 memmove_u64s(entry, next, (u64 *) end - (u64 *) next);
736 k->k.u64s -= extent_entry_u64s(entry);
739 void bch2_bkey_append_ptr(struct bkey_i *k,
740 struct bch_extent_ptr ptr)
742 EBUG_ON(bch2_bkey_has_device(bkey_i_to_s_c(k), ptr.dev));
745 case KEY_TYPE_btree_ptr:
746 case KEY_TYPE_btree_ptr_v2:
747 case KEY_TYPE_extent:
748 EBUG_ON(bkey_val_u64s(&k->k) >= BKEY_EXTENT_VAL_U64s_MAX);
750 ptr.type = 1 << BCH_EXTENT_ENTRY_ptr;
752 memcpy((void *) &k->v + bkey_val_bytes(&k->k),
762 static inline void __extent_entry_insert(struct bkey_i *k,
763 union bch_extent_entry *dst,
764 union bch_extent_entry *new)
766 union bch_extent_entry *end = bkey_val_end(bkey_i_to_s(k));
768 memmove_u64s_up_small((u64 *) dst + extent_entry_u64s(new),
769 dst, (u64 *) end - (u64 *) dst);
770 k->k.u64s += extent_entry_u64s(new);
771 memcpy_u64s_small(dst, new, extent_entry_u64s(new));
774 void bch2_extent_ptr_decoded_append(struct bkey_i *k,
775 struct extent_ptr_decoded *p)
777 struct bkey_ptrs ptrs = bch2_bkey_ptrs(bkey_i_to_s(k));
778 struct bch_extent_crc_unpacked crc =
779 bch2_extent_crc_unpack(&k->k, NULL);
780 union bch_extent_entry *pos;
782 if (!bch2_crc_unpacked_cmp(crc, p->crc)) {
787 bkey_for_each_crc(&k->k, ptrs, crc, pos)
788 if (!bch2_crc_unpacked_cmp(crc, p->crc)) {
789 pos = extent_entry_next(pos);
793 bch2_extent_crc_append(k, p->crc);
794 pos = bkey_val_end(bkey_i_to_s(k));
796 p->ptr.type = 1 << BCH_EXTENT_ENTRY_ptr;
797 __extent_entry_insert(k, pos, to_entry(&p->ptr));
800 p->ec.type = 1 << BCH_EXTENT_ENTRY_stripe_ptr;
801 __extent_entry_insert(k, pos, to_entry(&p->ec));
805 static union bch_extent_entry *extent_entry_prev(struct bkey_ptrs ptrs,
806 union bch_extent_entry *entry)
808 union bch_extent_entry *i = ptrs.start;
813 while (extent_entry_next(i) != entry)
814 i = extent_entry_next(i);
818 union bch_extent_entry *bch2_bkey_drop_ptr(struct bkey_s k,
819 struct bch_extent_ptr *ptr)
821 struct bkey_ptrs ptrs = bch2_bkey_ptrs(k);
822 union bch_extent_entry *dst, *src, *prev;
823 bool drop_crc = true;
825 EBUG_ON(ptr < &ptrs.start->ptr ||
826 ptr >= &ptrs.end->ptr);
827 EBUG_ON(ptr->type != 1 << BCH_EXTENT_ENTRY_ptr);
829 src = extent_entry_next(to_entry(ptr));
830 if (src != ptrs.end &&
831 !extent_entry_is_crc(src))
835 while ((prev = extent_entry_prev(ptrs, dst))) {
836 if (extent_entry_is_ptr(prev))
839 if (extent_entry_is_crc(prev)) {
848 memmove_u64s_down(dst, src,
849 (u64 *) ptrs.end - (u64 *) src);
850 k.k->u64s -= (u64 *) src - (u64 *) dst;
855 void bch2_bkey_drop_device(struct bkey_s k, unsigned dev)
857 struct bch_extent_ptr *ptr;
859 bch2_bkey_drop_ptrs(k, ptr, ptr->dev == dev);
862 const struct bch_extent_ptr *
863 bch2_bkey_has_device(struct bkey_s_c k, unsigned dev)
865 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
866 const struct bch_extent_ptr *ptr;
868 bkey_for_each_ptr(ptrs, ptr)
875 bool bch2_bkey_has_target(struct bch_fs *c, struct bkey_s_c k, unsigned target)
877 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
878 const struct bch_extent_ptr *ptr;
880 bkey_for_each_ptr(ptrs, ptr)
881 if (bch2_dev_in_target(c, ptr->dev, target) &&
883 !ptr_stale(bch_dev_bkey_exists(c, ptr->dev), ptr)))
889 bool bch2_bkey_matches_ptr(struct bch_fs *c, struct bkey_s_c k,
890 struct bch_extent_ptr m, u64 offset)
892 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
893 const union bch_extent_entry *entry;
894 struct extent_ptr_decoded p;
896 bkey_for_each_ptr_decode(k.k, ptrs, p, entry)
897 if (p.ptr.dev == m.dev &&
898 p.ptr.gen == m.gen &&
899 (s64) p.ptr.offset + p.crc.offset - bkey_start_offset(k.k) ==
900 (s64) m.offset - offset)
907 * bch_extent_normalize - clean up an extent, dropping stale pointers etc.
909 * Returns true if @k should be dropped entirely
911 * For existing keys, only called when btree nodes are being rewritten, not when
912 * they're merely being compacted/resorted in memory.
914 bool bch2_extent_normalize(struct bch_fs *c, struct bkey_s k)
916 struct bch_extent_ptr *ptr;
918 bch2_bkey_drop_ptrs(k, ptr,
920 ptr_stale(bch_dev_bkey_exists(c, ptr->dev), ptr));
922 /* will only happen if all pointers were cached: */
923 if (!bch2_bkey_nr_ptrs(k.s_c))
924 k.k->type = KEY_TYPE_deleted;
926 return bkey_deleted(k.k);
929 void bch2_bkey_ptrs_to_text(struct printbuf *out, struct bch_fs *c,
932 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
933 const union bch_extent_entry *entry;
934 struct bch_extent_crc_unpacked crc;
935 const struct bch_extent_ptr *ptr;
936 const struct bch_extent_stripe_ptr *ec;
940 bkey_extent_entry_for_each(ptrs, entry) {
944 switch (__extent_entry_type(entry)) {
945 case BCH_EXTENT_ENTRY_ptr:
946 ptr = entry_to_ptr(entry);
947 ca = ptr->dev < c->sb.nr_devices && c->devs[ptr->dev]
948 ? bch_dev_bkey_exists(c, ptr->dev)
951 pr_buf(out, "ptr: %u:%llu gen %u%s%s", ptr->dev,
952 (u64) ptr->offset, ptr->gen,
953 ptr->cached ? " cached" : "",
954 ca && ptr_stale(ca, ptr)
957 case BCH_EXTENT_ENTRY_crc32:
958 case BCH_EXTENT_ENTRY_crc64:
959 case BCH_EXTENT_ENTRY_crc128:
960 crc = bch2_extent_crc_unpack(k.k, entry_to_crc(entry));
962 pr_buf(out, "crc: c_size %u size %u offset %u nonce %u csum %u compress %u",
964 crc.uncompressed_size,
965 crc.offset, crc.nonce,
967 crc.compression_type);
969 case BCH_EXTENT_ENTRY_stripe_ptr:
970 ec = &entry->stripe_ptr;
972 pr_buf(out, "ec: idx %llu block %u",
973 (u64) ec->idx, ec->block);
976 pr_buf(out, "(invalid extent entry %.16llx)", *((u64 *) entry));
984 static const char *extent_ptr_invalid(const struct bch_fs *c,
986 const struct bch_extent_ptr *ptr,
987 unsigned size_ondisk,
990 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
991 const struct bch_extent_ptr *ptr2;
994 if (!bch2_dev_exists2(c, ptr->dev))
995 return "pointer to invalid device";
997 ca = bch_dev_bkey_exists(c, ptr->dev);
999 return "pointer to invalid device";
1001 bkey_for_each_ptr(ptrs, ptr2)
1002 if (ptr != ptr2 && ptr->dev == ptr2->dev)
1003 return "multiple pointers to same device";
1005 if (ptr->offset + size_ondisk > bucket_to_sector(ca, ca->mi.nbuckets))
1006 return "offset past end of device";
1008 if (ptr->offset < bucket_to_sector(ca, ca->mi.first_bucket))
1009 return "offset before first bucket";
1011 if (bucket_remainder(ca, ptr->offset) +
1012 size_ondisk > ca->mi.bucket_size)
1013 return "spans multiple buckets";
1018 const char *bch2_bkey_ptrs_invalid(const struct bch_fs *c, struct bkey_s_c k)
1020 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
1021 struct bch_devs_list devs;
1022 const union bch_extent_entry *entry;
1023 struct bch_extent_crc_unpacked crc;
1024 unsigned size_ondisk = k.k->size;
1026 unsigned nonce = UINT_MAX;
1029 if (k.k->type == KEY_TYPE_btree_ptr ||
1030 k.k->type == KEY_TYPE_btree_ptr_v2)
1031 size_ondisk = c->opts.btree_node_size;
1033 bkey_extent_entry_for_each(ptrs, entry) {
1034 if (__extent_entry_type(entry) >= BCH_EXTENT_ENTRY_MAX)
1035 return "invalid extent entry type";
1037 if (k.k->type == KEY_TYPE_btree_ptr &&
1038 !extent_entry_is_ptr(entry))
1039 return "has non ptr field";
1041 switch (extent_entry_type(entry)) {
1042 case BCH_EXTENT_ENTRY_ptr:
1043 reason = extent_ptr_invalid(c, k, &entry->ptr,
1044 size_ondisk, false);
1048 case BCH_EXTENT_ENTRY_crc32:
1049 case BCH_EXTENT_ENTRY_crc64:
1050 case BCH_EXTENT_ENTRY_crc128:
1051 crc = bch2_extent_crc_unpack(k.k, entry_to_crc(entry));
1053 if (crc.offset + crc.live_size >
1054 crc.uncompressed_size)
1055 return "checksum offset + key size > uncompressed size";
1057 size_ondisk = crc.compressed_size;
1059 if (!bch2_checksum_type_valid(c, crc.csum_type))
1060 return "invalid checksum type";
1062 if (crc.compression_type >= BCH_COMPRESSION_TYPE_NR)
1063 return "invalid compression type";
1065 if (bch2_csum_type_is_encryption(crc.csum_type)) {
1066 if (nonce == UINT_MAX)
1067 nonce = crc.offset + crc.nonce;
1068 else if (nonce != crc.offset + crc.nonce)
1069 return "incorrect nonce";
1072 case BCH_EXTENT_ENTRY_stripe_ptr:
1077 devs = bch2_bkey_devs(k);
1078 bubble_sort(devs.devs, devs.nr, u8_cmp);
1079 for (i = 0; i + 1 < devs.nr; i++)
1080 if (devs.devs[i] == devs.devs[i + 1])
1081 return "multiple ptrs to same device";
1086 void bch2_ptr_swab(struct bkey_s k)
1088 struct bkey_ptrs ptrs = bch2_bkey_ptrs(k);
1089 union bch_extent_entry *entry;
1092 for (d = (u64 *) ptrs.start;
1093 d != (u64 *) ptrs.end;
1097 for (entry = ptrs.start;
1099 entry = extent_entry_next(entry)) {
1100 switch (extent_entry_type(entry)) {
1101 case BCH_EXTENT_ENTRY_ptr:
1103 case BCH_EXTENT_ENTRY_crc32:
1104 entry->crc32.csum = swab32(entry->crc32.csum);
1106 case BCH_EXTENT_ENTRY_crc64:
1107 entry->crc64.csum_hi = swab16(entry->crc64.csum_hi);
1108 entry->crc64.csum_lo = swab64(entry->crc64.csum_lo);
1110 case BCH_EXTENT_ENTRY_crc128:
1111 entry->crc128.csum.hi = (__force __le64)
1112 swab64((__force u64) entry->crc128.csum.hi);
1113 entry->crc128.csum.lo = (__force __le64)
1114 swab64((__force u64) entry->crc128.csum.lo);
1116 case BCH_EXTENT_ENTRY_stripe_ptr:
1122 /* Generic extent code: */
1124 int bch2_cut_front_s(struct bpos where, struct bkey_s k)
1126 unsigned new_val_u64s = bkey_val_u64s(k.k);
1130 if (bkey_cmp(where, bkey_start_pos(k.k)) <= 0)
1133 EBUG_ON(bkey_cmp(where, k.k->p) > 0);
1135 sub = where.offset - bkey_start_offset(k.k);
1140 k.k->type = KEY_TYPE_deleted;
1144 switch (k.k->type) {
1145 case KEY_TYPE_extent:
1146 case KEY_TYPE_reflink_v: {
1147 struct bkey_ptrs ptrs = bch2_bkey_ptrs(k);
1148 union bch_extent_entry *entry;
1149 bool seen_crc = false;
1151 bkey_extent_entry_for_each(ptrs, entry) {
1152 switch (extent_entry_type(entry)) {
1153 case BCH_EXTENT_ENTRY_ptr:
1155 entry->ptr.offset += sub;
1157 case BCH_EXTENT_ENTRY_crc32:
1158 entry->crc32.offset += sub;
1160 case BCH_EXTENT_ENTRY_crc64:
1161 entry->crc64.offset += sub;
1163 case BCH_EXTENT_ENTRY_crc128:
1164 entry->crc128.offset += sub;
1166 case BCH_EXTENT_ENTRY_stripe_ptr:
1170 if (extent_entry_is_crc(entry))
1176 case KEY_TYPE_reflink_p: {
1177 struct bkey_s_reflink_p p = bkey_s_to_reflink_p(k);
1179 le64_add_cpu(&p.v->idx, sub);
1182 case KEY_TYPE_inline_data:
1183 case KEY_TYPE_indirect_inline_data: {
1184 void *p = bkey_inline_data_p(k);
1185 unsigned bytes = bkey_inline_data_bytes(k.k);
1187 sub = min_t(u64, sub << 9, bytes);
1189 memmove(p, p + sub, bytes - sub);
1191 new_val_u64s -= sub >> 3;
1196 val_u64s_delta = bkey_val_u64s(k.k) - new_val_u64s;
1197 BUG_ON(val_u64s_delta < 0);
1199 set_bkey_val_u64s(k.k, new_val_u64s);
1200 memset(bkey_val_end(k), 0, val_u64s_delta * sizeof(u64));
1201 return -val_u64s_delta;
1204 int bch2_cut_back_s(struct bpos where, struct bkey_s k)
1206 unsigned new_val_u64s = bkey_val_u64s(k.k);
1210 if (bkey_cmp(where, k.k->p) >= 0)
1213 EBUG_ON(bkey_cmp(where, bkey_start_pos(k.k)) < 0);
1215 len = where.offset - bkey_start_offset(k.k);
1217 k.k->p.offset = where.offset;
1221 k.k->type = KEY_TYPE_deleted;
1225 switch (k.k->type) {
1226 case KEY_TYPE_inline_data:
1227 case KEY_TYPE_indirect_inline_data:
1228 new_val_u64s = (bkey_inline_data_offset(k.k) +
1229 min(bkey_inline_data_bytes(k.k), k.k->size << 9)) >> 3;
1233 val_u64s_delta = bkey_val_u64s(k.k) - new_val_u64s;
1234 BUG_ON(val_u64s_delta < 0);
1236 set_bkey_val_u64s(k.k, new_val_u64s);
1237 memset(bkey_val_end(k), 0, val_u64s_delta * sizeof(u64));
1238 return -val_u64s_delta;