1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2010 Kent Overstreet <kent.overstreet@gmail.com>
5 * Code for managing the extent btree and dynamically updating the writeback
10 #include "bkey_methods.h"
11 #include "btree_cache.h"
14 #include "btree_iter.h"
19 #include "disk_groups.h"
30 static unsigned bch2_crc_field_size_max[] = {
31 [BCH_EXTENT_ENTRY_crc32] = CRC32_SIZE_MAX,
32 [BCH_EXTENT_ENTRY_crc64] = CRC64_SIZE_MAX,
33 [BCH_EXTENT_ENTRY_crc128] = CRC128_SIZE_MAX,
36 static void bch2_extent_crc_pack(union bch_extent_crc *,
37 struct bch_extent_crc_unpacked,
38 enum bch_extent_entry_type);
40 static struct bch_dev_io_failures *dev_io_failures(struct bch_io_failures *f,
43 struct bch_dev_io_failures *i;
45 for (i = f->devs; i < f->devs + f->nr; i++)
52 void bch2_mark_io_failure(struct bch_io_failures *failed,
53 struct extent_ptr_decoded *p)
55 struct bch_dev_io_failures *f = dev_io_failures(failed, p->ptr.dev);
58 BUG_ON(failed->nr >= ARRAY_SIZE(failed->devs));
60 f = &failed->devs[failed->nr++];
65 } else if (p->idx != f->idx) {
74 static inline u64 dev_latency(struct bch_fs *c, unsigned dev)
76 struct bch_dev *ca = bch2_dev_rcu(c, dev);
77 return ca ? atomic64_read(&ca->cur_latency[READ]) : S64_MAX;
81 * returns true if p1 is better than p2:
83 static inline bool ptr_better(struct bch_fs *c,
84 const struct extent_ptr_decoded p1,
85 const struct extent_ptr_decoded p2)
87 if (likely(!p1.idx && !p2.idx)) {
88 u64 l1 = dev_latency(c, p1.ptr.dev);
89 u64 l2 = dev_latency(c, p2.ptr.dev);
91 /* Pick at random, biased in favor of the faster device: */
93 return bch2_rand_range(l1 + l2) > l1;
96 if (bch2_force_reconstruct_read)
97 return p1.idx > p2.idx;
99 return p1.idx < p2.idx;
103 * This picks a non-stale pointer, preferably from a device other than @avoid.
104 * Avoid can be NULL, meaning pick any. If there are no non-stale pointers to
105 * other devices, it will still pick a pointer from avoid.
107 int bch2_bkey_pick_read_device(struct bch_fs *c, struct bkey_s_c k,
108 struct bch_io_failures *failed,
109 struct extent_ptr_decoded *pick)
111 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
112 const union bch_extent_entry *entry;
113 struct extent_ptr_decoded p;
114 struct bch_dev_io_failures *f;
117 if (k.k->type == KEY_TYPE_error)
121 bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
123 * Unwritten extent: no need to actually read, treat it as a
124 * hole and return 0s:
126 if (p.ptr.unwritten) {
132 * If there are any dirty pointers it's an error if we can't
135 if (!ret && !p.ptr.cached)
138 struct bch_dev *ca = bch2_dev_rcu(c, p.ptr.dev);
140 if (p.ptr.cached && (!ca || dev_ptr_stale_rcu(ca, &p.ptr)))
143 f = failed ? dev_io_failures(failed, p.ptr.dev) : NULL;
145 p.idx = f->nr_failed < f->nr_retries
152 if (!p.idx && p.has_ec && bch2_force_reconstruct_read)
155 if (!p.idx && !bch2_dev_is_readable(ca))
158 if (p.idx >= (unsigned) p.has_ec + 1)
161 if (ret > 0 && !ptr_better(c, p, *pick))
172 /* KEY_TYPE_btree_ptr: */
174 int bch2_btree_ptr_invalid(struct bch_fs *c, struct bkey_s_c k,
175 enum bch_validate_flags flags,
176 struct printbuf *err)
180 bkey_fsck_err_on(bkey_val_u64s(k.k) > BCH_REPLICAS_MAX, c, err,
181 btree_ptr_val_too_big,
182 "value too big (%zu > %u)", bkey_val_u64s(k.k), BCH_REPLICAS_MAX);
184 ret = bch2_bkey_ptrs_invalid(c, k, flags, err);
189 void bch2_btree_ptr_to_text(struct printbuf *out, struct bch_fs *c,
192 bch2_bkey_ptrs_to_text(out, c, k);
195 int bch2_btree_ptr_v2_invalid(struct bch_fs *c, struct bkey_s_c k,
196 enum bch_validate_flags flags,
197 struct printbuf *err)
199 struct bkey_s_c_btree_ptr_v2 bp = bkey_s_c_to_btree_ptr_v2(k);
202 bkey_fsck_err_on(bkey_val_u64s(k.k) > BKEY_BTREE_PTR_VAL_U64s_MAX,
203 c, err, btree_ptr_v2_val_too_big,
204 "value too big (%zu > %zu)",
205 bkey_val_u64s(k.k), BKEY_BTREE_PTR_VAL_U64s_MAX);
207 bkey_fsck_err_on(bpos_ge(bp.v->min_key, bp.k->p),
208 c, err, btree_ptr_v2_min_key_bad,
211 if (flags & BCH_VALIDATE_write)
212 bkey_fsck_err_on(!bp.v->sectors_written,
213 c, err, btree_ptr_v2_written_0,
214 "sectors_written == 0");
216 ret = bch2_bkey_ptrs_invalid(c, k, flags, err);
221 void bch2_btree_ptr_v2_to_text(struct printbuf *out, struct bch_fs *c,
224 struct bkey_s_c_btree_ptr_v2 bp = bkey_s_c_to_btree_ptr_v2(k);
226 prt_printf(out, "seq %llx written %u min_key %s",
227 le64_to_cpu(bp.v->seq),
228 le16_to_cpu(bp.v->sectors_written),
229 BTREE_PTR_RANGE_UPDATED(bp.v) ? "R " : "");
231 bch2_bpos_to_text(out, bp.v->min_key);
232 prt_printf(out, " ");
233 bch2_bkey_ptrs_to_text(out, c, k);
236 void bch2_btree_ptr_v2_compat(enum btree_id btree_id, unsigned version,
237 unsigned big_endian, int write,
240 struct bkey_s_btree_ptr_v2 bp = bkey_s_to_btree_ptr_v2(k);
242 compat_bpos(0, btree_id, version, big_endian, write, &bp.v->min_key);
244 if (version < bcachefs_metadata_version_inode_btree_change &&
245 btree_id_is_extents(btree_id) &&
246 !bkey_eq(bp.v->min_key, POS_MIN))
247 bp.v->min_key = write
248 ? bpos_nosnap_predecessor(bp.v->min_key)
249 : bpos_nosnap_successor(bp.v->min_key);
252 /* KEY_TYPE_extent: */
254 bool bch2_extent_merge(struct bch_fs *c, struct bkey_s l, struct bkey_s_c r)
256 struct bkey_ptrs l_ptrs = bch2_bkey_ptrs(l);
257 struct bkey_ptrs_c r_ptrs = bch2_bkey_ptrs_c(r);
258 union bch_extent_entry *en_l;
259 const union bch_extent_entry *en_r;
260 struct extent_ptr_decoded lp, rp;
265 while (en_l < l_ptrs.end && en_r < r_ptrs.end) {
266 if (extent_entry_type(en_l) != extent_entry_type(en_r))
269 en_l = extent_entry_next(en_l);
270 en_r = extent_entry_next(en_r);
273 if (en_l < l_ptrs.end || en_r < r_ptrs.end)
278 lp.crc = bch2_extent_crc_unpack(l.k, NULL);
279 rp.crc = bch2_extent_crc_unpack(r.k, NULL);
281 while (__bkey_ptr_next_decode(l.k, l_ptrs.end, lp, en_l) &&
282 __bkey_ptr_next_decode(r.k, r_ptrs.end, rp, en_r)) {
283 if (lp.ptr.offset + lp.crc.offset + lp.crc.live_size !=
284 rp.ptr.offset + rp.crc.offset ||
285 lp.ptr.dev != rp.ptr.dev ||
286 lp.ptr.gen != rp.ptr.gen ||
287 lp.ptr.unwritten != rp.ptr.unwritten ||
288 lp.has_ec != rp.has_ec)
291 /* Extents may not straddle buckets: */
293 struct bch_dev *ca = bch2_dev_rcu(c, lp.ptr.dev);
294 bool same_bucket = ca && PTR_BUCKET_NR(ca, &lp.ptr) == PTR_BUCKET_NR(ca, &rp.ptr);
300 if (lp.has_ec != rp.has_ec ||
302 (lp.ec.block != rp.ec.block ||
303 lp.ec.redundancy != rp.ec.redundancy ||
304 lp.ec.idx != rp.ec.idx)))
307 if (lp.crc.compression_type != rp.crc.compression_type ||
308 lp.crc.nonce != rp.crc.nonce)
311 if (lp.crc.offset + lp.crc.live_size + rp.crc.live_size <=
312 lp.crc.uncompressed_size) {
313 /* can use left extent's crc entry */
314 } else if (lp.crc.live_size <= rp.crc.offset) {
315 /* can use right extent's crc entry */
317 /* check if checksums can be merged: */
318 if (lp.crc.csum_type != rp.crc.csum_type ||
319 lp.crc.nonce != rp.crc.nonce ||
320 crc_is_compressed(lp.crc) ||
321 !bch2_checksum_mergeable(lp.crc.csum_type))
324 if (lp.crc.offset + lp.crc.live_size != lp.crc.compressed_size ||
328 if (lp.crc.csum_type &&
329 lp.crc.uncompressed_size +
330 rp.crc.uncompressed_size > (c->opts.encoded_extent_max >> 9))
334 en_l = extent_entry_next(en_l);
335 en_r = extent_entry_next(en_r);
340 while (en_l < l_ptrs.end && en_r < r_ptrs.end) {
341 if (extent_entry_is_crc(en_l)) {
342 struct bch_extent_crc_unpacked crc_l = bch2_extent_crc_unpack(l.k, entry_to_crc(en_l));
343 struct bch_extent_crc_unpacked crc_r = bch2_extent_crc_unpack(r.k, entry_to_crc(en_r));
345 if (crc_l.uncompressed_size + crc_r.uncompressed_size >
346 bch2_crc_field_size_max[extent_entry_type(en_l)])
350 en_l = extent_entry_next(en_l);
351 en_r = extent_entry_next(en_r);
354 use_right_ptr = false;
357 while (en_l < l_ptrs.end) {
358 if (extent_entry_type(en_l) == BCH_EXTENT_ENTRY_ptr &&
360 en_l->ptr = en_r->ptr;
362 if (extent_entry_is_crc(en_l)) {
363 struct bch_extent_crc_unpacked crc_l =
364 bch2_extent_crc_unpack(l.k, entry_to_crc(en_l));
365 struct bch_extent_crc_unpacked crc_r =
366 bch2_extent_crc_unpack(r.k, entry_to_crc(en_r));
368 use_right_ptr = false;
370 if (crc_l.offset + crc_l.live_size + crc_r.live_size <=
371 crc_l.uncompressed_size) {
372 /* can use left extent's crc entry */
373 } else if (crc_l.live_size <= crc_r.offset) {
374 /* can use right extent's crc entry */
375 crc_r.offset -= crc_l.live_size;
376 bch2_extent_crc_pack(entry_to_crc(en_l), crc_r,
377 extent_entry_type(en_l));
378 use_right_ptr = true;
380 crc_l.csum = bch2_checksum_merge(crc_l.csum_type,
383 crc_r.uncompressed_size << 9);
385 crc_l.uncompressed_size += crc_r.uncompressed_size;
386 crc_l.compressed_size += crc_r.compressed_size;
387 bch2_extent_crc_pack(entry_to_crc(en_l), crc_l,
388 extent_entry_type(en_l));
392 en_l = extent_entry_next(en_l);
393 en_r = extent_entry_next(en_r);
396 bch2_key_resize(l.k, l.k->size + r.k->size);
400 /* KEY_TYPE_reservation: */
402 int bch2_reservation_invalid(struct bch_fs *c, struct bkey_s_c k,
403 enum bch_validate_flags flags,
404 struct printbuf *err)
406 struct bkey_s_c_reservation r = bkey_s_c_to_reservation(k);
409 bkey_fsck_err_on(!r.v->nr_replicas || r.v->nr_replicas > BCH_REPLICAS_MAX, c, err,
410 reservation_key_nr_replicas_invalid,
411 "invalid nr_replicas (%u)", r.v->nr_replicas);
416 void bch2_reservation_to_text(struct printbuf *out, struct bch_fs *c,
419 struct bkey_s_c_reservation r = bkey_s_c_to_reservation(k);
421 prt_printf(out, "generation %u replicas %u",
422 le32_to_cpu(r.v->generation),
426 bool bch2_reservation_merge(struct bch_fs *c, struct bkey_s _l, struct bkey_s_c _r)
428 struct bkey_s_reservation l = bkey_s_to_reservation(_l);
429 struct bkey_s_c_reservation r = bkey_s_c_to_reservation(_r);
431 if (l.v->generation != r.v->generation ||
432 l.v->nr_replicas != r.v->nr_replicas)
435 bch2_key_resize(l.k, l.k->size + r.k->size);
439 /* Extent checksum entries: */
441 /* returns true if not equal */
442 static inline bool bch2_crc_unpacked_cmp(struct bch_extent_crc_unpacked l,
443 struct bch_extent_crc_unpacked r)
445 return (l.csum_type != r.csum_type ||
446 l.compression_type != r.compression_type ||
447 l.compressed_size != r.compressed_size ||
448 l.uncompressed_size != r.uncompressed_size ||
449 l.offset != r.offset ||
450 l.live_size != r.live_size ||
451 l.nonce != r.nonce ||
452 bch2_crc_cmp(l.csum, r.csum));
455 static inline bool can_narrow_crc(struct bch_extent_crc_unpacked u,
456 struct bch_extent_crc_unpacked n)
458 return !crc_is_compressed(u) &&
460 u.uncompressed_size > u.live_size &&
461 bch2_csum_type_is_encryption(u.csum_type) ==
462 bch2_csum_type_is_encryption(n.csum_type);
465 bool bch2_can_narrow_extent_crcs(struct bkey_s_c k,
466 struct bch_extent_crc_unpacked n)
468 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
469 struct bch_extent_crc_unpacked crc;
470 const union bch_extent_entry *i;
475 bkey_for_each_crc(k.k, ptrs, crc, i)
476 if (can_narrow_crc(crc, n))
483 * We're writing another replica for this extent, so while we've got the data in
484 * memory we'll be computing a new checksum for the currently live data.
486 * If there are other replicas we aren't moving, and they are checksummed but
487 * not compressed, we can modify them to point to only the data that is
488 * currently live (so that readers won't have to bounce) while we've got the
491 bool bch2_bkey_narrow_crcs(struct bkey_i *k, struct bch_extent_crc_unpacked n)
493 struct bkey_ptrs ptrs = bch2_bkey_ptrs(bkey_i_to_s(k));
494 struct bch_extent_crc_unpacked u;
495 struct extent_ptr_decoded p;
496 union bch_extent_entry *i;
499 /* Find a checksum entry that covers only live data: */
501 bkey_for_each_crc(&k->k, ptrs, u, i)
502 if (!crc_is_compressed(u) &&
504 u.live_size == u.uncompressed_size) {
511 BUG_ON(crc_is_compressed(n));
513 BUG_ON(n.live_size != k->k.size);
515 restart_narrow_pointers:
516 ptrs = bch2_bkey_ptrs(bkey_i_to_s(k));
518 bkey_for_each_ptr_decode(&k->k, ptrs, p, i)
519 if (can_narrow_crc(p.crc, n)) {
520 bch2_bkey_drop_ptr_noerror(bkey_i_to_s(k), &i->ptr);
521 p.ptr.offset += p.crc.offset;
523 bch2_extent_ptr_decoded_append(k, &p);
525 goto restart_narrow_pointers;
531 static void bch2_extent_crc_pack(union bch_extent_crc *dst,
532 struct bch_extent_crc_unpacked src,
533 enum bch_extent_entry_type type)
535 #define set_common_fields(_dst, _src) \
536 _dst.type = 1 << type; \
537 _dst.csum_type = _src.csum_type, \
538 _dst.compression_type = _src.compression_type, \
539 _dst._compressed_size = _src.compressed_size - 1, \
540 _dst._uncompressed_size = _src.uncompressed_size - 1, \
541 _dst.offset = _src.offset
544 case BCH_EXTENT_ENTRY_crc32:
545 set_common_fields(dst->crc32, src);
546 dst->crc32.csum = (u32 __force) *((__le32 *) &src.csum.lo);
548 case BCH_EXTENT_ENTRY_crc64:
549 set_common_fields(dst->crc64, src);
550 dst->crc64.nonce = src.nonce;
551 dst->crc64.csum_lo = (u64 __force) src.csum.lo;
552 dst->crc64.csum_hi = (u64 __force) *((__le16 *) &src.csum.hi);
554 case BCH_EXTENT_ENTRY_crc128:
555 set_common_fields(dst->crc128, src);
556 dst->crc128.nonce = src.nonce;
557 dst->crc128.csum = src.csum;
562 #undef set_common_fields
565 void bch2_extent_crc_append(struct bkey_i *k,
566 struct bch_extent_crc_unpacked new)
568 struct bkey_ptrs ptrs = bch2_bkey_ptrs(bkey_i_to_s(k));
569 union bch_extent_crc *crc = (void *) ptrs.end;
570 enum bch_extent_entry_type type;
572 if (bch_crc_bytes[new.csum_type] <= 4 &&
573 new.uncompressed_size <= CRC32_SIZE_MAX &&
574 new.nonce <= CRC32_NONCE_MAX)
575 type = BCH_EXTENT_ENTRY_crc32;
576 else if (bch_crc_bytes[new.csum_type] <= 10 &&
577 new.uncompressed_size <= CRC64_SIZE_MAX &&
578 new.nonce <= CRC64_NONCE_MAX)
579 type = BCH_EXTENT_ENTRY_crc64;
580 else if (bch_crc_bytes[new.csum_type] <= 16 &&
581 new.uncompressed_size <= CRC128_SIZE_MAX &&
582 new.nonce <= CRC128_NONCE_MAX)
583 type = BCH_EXTENT_ENTRY_crc128;
587 bch2_extent_crc_pack(crc, new, type);
589 k->k.u64s += extent_entry_u64s(ptrs.end);
591 EBUG_ON(bkey_val_u64s(&k->k) > BKEY_EXTENT_VAL_U64s_MAX);
594 /* Generic code for keys with pointers: */
596 unsigned bch2_bkey_nr_ptrs(struct bkey_s_c k)
598 return bch2_bkey_devs(k).nr;
601 unsigned bch2_bkey_nr_ptrs_allocated(struct bkey_s_c k)
603 return k.k->type == KEY_TYPE_reservation
604 ? bkey_s_c_to_reservation(k).v->nr_replicas
605 : bch2_bkey_dirty_devs(k).nr;
608 unsigned bch2_bkey_nr_ptrs_fully_allocated(struct bkey_s_c k)
612 if (k.k->type == KEY_TYPE_reservation) {
613 ret = bkey_s_c_to_reservation(k).v->nr_replicas;
615 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
616 const union bch_extent_entry *entry;
617 struct extent_ptr_decoded p;
619 bkey_for_each_ptr_decode(k.k, ptrs, p, entry)
620 ret += !p.ptr.cached && !crc_is_compressed(p.crc);
626 unsigned bch2_bkey_sectors_compressed(struct bkey_s_c k)
628 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
629 const union bch_extent_entry *entry;
630 struct extent_ptr_decoded p;
633 bkey_for_each_ptr_decode(k.k, ptrs, p, entry)
634 if (!p.ptr.cached && crc_is_compressed(p.crc))
635 ret += p.crc.compressed_size;
640 bool bch2_bkey_is_incompressible(struct bkey_s_c k)
642 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
643 const union bch_extent_entry *entry;
644 struct bch_extent_crc_unpacked crc;
646 bkey_for_each_crc(k.k, ptrs, crc, entry)
647 if (crc.compression_type == BCH_COMPRESSION_TYPE_incompressible)
652 unsigned bch2_bkey_replicas(struct bch_fs *c, struct bkey_s_c k)
654 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
655 const union bch_extent_entry *entry;
656 struct extent_ptr_decoded p = { 0 };
657 unsigned replicas = 0;
659 bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
664 replicas += p.ec.redundancy;
673 static inline unsigned __extent_ptr_durability(struct bch_dev *ca, struct extent_ptr_decoded *p)
679 ? p->ec.redundancy + 1
683 unsigned bch2_extent_ptr_desired_durability(struct bch_fs *c, struct extent_ptr_decoded *p)
685 struct bch_dev *ca = bch2_dev_rcu(c, p->ptr.dev);
687 return ca ? __extent_ptr_durability(ca, p) : 0;
690 unsigned bch2_extent_ptr_durability(struct bch_fs *c, struct extent_ptr_decoded *p)
692 struct bch_dev *ca = bch2_dev_rcu(c, p->ptr.dev);
694 if (!ca || ca->mi.state == BCH_MEMBER_STATE_failed)
697 return __extent_ptr_durability(ca, p);
700 unsigned bch2_bkey_durability(struct bch_fs *c, struct bkey_s_c k)
702 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
703 const union bch_extent_entry *entry;
704 struct extent_ptr_decoded p;
705 unsigned durability = 0;
708 bkey_for_each_ptr_decode(k.k, ptrs, p, entry)
709 durability += bch2_extent_ptr_durability(c, &p);
715 static unsigned bch2_bkey_durability_safe(struct bch_fs *c, struct bkey_s_c k)
717 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
718 const union bch_extent_entry *entry;
719 struct extent_ptr_decoded p;
720 unsigned durability = 0;
723 bkey_for_each_ptr_decode(k.k, ptrs, p, entry)
724 if (p.ptr.dev < c->sb.nr_devices && c->devs[p.ptr.dev])
725 durability += bch2_extent_ptr_durability(c, &p);
731 void bch2_bkey_extent_entry_drop(struct bkey_i *k, union bch_extent_entry *entry)
733 union bch_extent_entry *end = bkey_val_end(bkey_i_to_s(k));
734 union bch_extent_entry *next = extent_entry_next(entry);
736 memmove_u64s(entry, next, (u64 *) end - (u64 *) next);
737 k->k.u64s -= extent_entry_u64s(entry);
740 void bch2_extent_ptr_decoded_append(struct bkey_i *k,
741 struct extent_ptr_decoded *p)
743 struct bkey_ptrs ptrs = bch2_bkey_ptrs(bkey_i_to_s(k));
744 struct bch_extent_crc_unpacked crc =
745 bch2_extent_crc_unpack(&k->k, NULL);
746 union bch_extent_entry *pos;
748 if (!bch2_crc_unpacked_cmp(crc, p->crc)) {
753 bkey_for_each_crc(&k->k, ptrs, crc, pos)
754 if (!bch2_crc_unpacked_cmp(crc, p->crc)) {
755 pos = extent_entry_next(pos);
759 bch2_extent_crc_append(k, p->crc);
760 pos = bkey_val_end(bkey_i_to_s(k));
762 p->ptr.type = 1 << BCH_EXTENT_ENTRY_ptr;
763 __extent_entry_insert(k, pos, to_entry(&p->ptr));
766 p->ec.type = 1 << BCH_EXTENT_ENTRY_stripe_ptr;
767 __extent_entry_insert(k, pos, to_entry(&p->ec));
771 static union bch_extent_entry *extent_entry_prev(struct bkey_ptrs ptrs,
772 union bch_extent_entry *entry)
774 union bch_extent_entry *i = ptrs.start;
779 while (extent_entry_next(i) != entry)
780 i = extent_entry_next(i);
785 * Returns pointer to the next entry after the one being dropped:
787 union bch_extent_entry *bch2_bkey_drop_ptr_noerror(struct bkey_s k,
788 struct bch_extent_ptr *ptr)
790 struct bkey_ptrs ptrs = bch2_bkey_ptrs(k);
791 union bch_extent_entry *entry = to_entry(ptr), *next;
792 union bch_extent_entry *ret = entry;
793 bool drop_crc = true;
795 EBUG_ON(ptr < &ptrs.start->ptr ||
796 ptr >= &ptrs.end->ptr);
797 EBUG_ON(ptr->type != 1 << BCH_EXTENT_ENTRY_ptr);
799 for (next = extent_entry_next(entry);
801 next = extent_entry_next(next)) {
802 if (extent_entry_is_crc(next)) {
804 } else if (extent_entry_is_ptr(next)) {
810 extent_entry_drop(k, entry);
812 while ((entry = extent_entry_prev(ptrs, entry))) {
813 if (extent_entry_is_ptr(entry))
816 if ((extent_entry_is_crc(entry) && drop_crc) ||
817 extent_entry_is_stripe_ptr(entry)) {
818 ret = (void *) ret - extent_entry_bytes(entry);
819 extent_entry_drop(k, entry);
826 union bch_extent_entry *bch2_bkey_drop_ptr(struct bkey_s k,
827 struct bch_extent_ptr *ptr)
829 bool have_dirty = bch2_bkey_dirty_devs(k.s_c).nr;
830 union bch_extent_entry *ret =
831 bch2_bkey_drop_ptr_noerror(k, ptr);
834 * If we deleted all the dirty pointers and there's still cached
835 * pointers, we could set the cached pointers to dirty if they're not
836 * stale - but to do that correctly we'd need to grab an open_bucket
837 * reference so that we don't race with bucket reuse:
840 !bch2_bkey_dirty_devs(k.s_c).nr) {
841 k.k->type = KEY_TYPE_error;
842 set_bkey_val_u64s(k.k, 0);
844 } else if (!bch2_bkey_nr_ptrs(k.s_c)) {
845 k.k->type = KEY_TYPE_deleted;
846 set_bkey_val_u64s(k.k, 0);
853 void bch2_bkey_drop_device(struct bkey_s k, unsigned dev)
855 bch2_bkey_drop_ptrs(k, ptr, ptr->dev == dev);
858 void bch2_bkey_drop_device_noerror(struct bkey_s k, unsigned dev)
860 struct bch_extent_ptr *ptr = bch2_bkey_has_device(k, dev);
863 bch2_bkey_drop_ptr_noerror(k, ptr);
866 const struct bch_extent_ptr *bch2_bkey_has_device_c(struct bkey_s_c k, unsigned dev)
868 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
870 bkey_for_each_ptr(ptrs, ptr)
877 bool bch2_bkey_has_target(struct bch_fs *c, struct bkey_s_c k, unsigned target)
879 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
884 bkey_for_each_ptr(ptrs, ptr)
885 if (bch2_dev_in_target(c, ptr->dev, target) &&
886 (ca = bch2_dev_rcu(c, ptr->dev)) &&
888 !dev_ptr_stale_rcu(ca, ptr))) {
897 bool bch2_bkey_matches_ptr(struct bch_fs *c, struct bkey_s_c k,
898 struct bch_extent_ptr m, u64 offset)
900 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
901 const union bch_extent_entry *entry;
902 struct extent_ptr_decoded p;
904 bkey_for_each_ptr_decode(k.k, ptrs, p, entry)
905 if (p.ptr.dev == m.dev &&
906 p.ptr.gen == m.gen &&
907 (s64) p.ptr.offset + p.crc.offset - bkey_start_offset(k.k) ==
908 (s64) m.offset - offset)
915 * Returns true if two extents refer to the same data:
917 bool bch2_extents_match(struct bkey_s_c k1, struct bkey_s_c k2)
919 if (k1.k->type != k2.k->type)
922 if (bkey_extent_is_direct_data(k1.k)) {
923 struct bkey_ptrs_c ptrs1 = bch2_bkey_ptrs_c(k1);
924 struct bkey_ptrs_c ptrs2 = bch2_bkey_ptrs_c(k2);
925 const union bch_extent_entry *entry1, *entry2;
926 struct extent_ptr_decoded p1, p2;
928 if (bkey_extent_is_unwritten(k1) != bkey_extent_is_unwritten(k2))
931 bkey_for_each_ptr_decode(k1.k, ptrs1, p1, entry1)
932 bkey_for_each_ptr_decode(k2.k, ptrs2, p2, entry2)
933 if (p1.ptr.dev == p2.ptr.dev &&
934 p1.ptr.gen == p2.ptr.gen &&
935 (s64) p1.ptr.offset + p1.crc.offset - bkey_start_offset(k1.k) ==
936 (s64) p2.ptr.offset + p2.crc.offset - bkey_start_offset(k2.k))
941 /* KEY_TYPE_deleted, etc. */
946 struct bch_extent_ptr *
947 bch2_extent_has_ptr(struct bkey_s_c k1, struct extent_ptr_decoded p1, struct bkey_s k2)
949 struct bkey_ptrs ptrs2 = bch2_bkey_ptrs(k2);
950 union bch_extent_entry *entry2;
951 struct extent_ptr_decoded p2;
953 bkey_for_each_ptr_decode(k2.k, ptrs2, p2, entry2)
954 if (p1.ptr.dev == p2.ptr.dev &&
955 p1.ptr.gen == p2.ptr.gen &&
956 (s64) p1.ptr.offset + p1.crc.offset - bkey_start_offset(k1.k) ==
957 (s64) p2.ptr.offset + p2.crc.offset - bkey_start_offset(k2.k))
963 void bch2_extent_ptr_set_cached(struct bkey_s k, struct bch_extent_ptr *ptr)
965 struct bkey_ptrs ptrs = bch2_bkey_ptrs(k);
966 union bch_extent_entry *entry;
967 union bch_extent_entry *ec = NULL;
969 bkey_extent_entry_for_each(ptrs, entry) {
970 if (&entry->ptr == ptr) {
973 extent_entry_drop(k, ec);
977 if (extent_entry_is_stripe_ptr(entry))
979 else if (extent_entry_is_ptr(entry))
987 * bch_extent_normalize - clean up an extent, dropping stale pointers etc.
989 * Returns true if @k should be dropped entirely
991 * For existing keys, only called when btree nodes are being rewritten, not when
992 * they're merely being compacted/resorted in memory.
994 bool bch2_extent_normalize(struct bch_fs *c, struct bkey_s k)
999 bch2_bkey_drop_ptrs(k, ptr,
1001 (ca = bch2_dev_rcu(c, ptr->dev)) &&
1002 dev_ptr_stale_rcu(ca, ptr) > 0);
1005 return bkey_deleted(k.k);
1008 void bch2_extent_ptr_to_text(struct printbuf *out, struct bch_fs *c, const struct bch_extent_ptr *ptr)
1012 struct bch_dev *ca = bch2_dev_rcu(c, ptr->dev);
1014 prt_printf(out, "ptr: %u:%llu gen %u%s", ptr->dev,
1015 (u64) ptr->offset, ptr->gen,
1016 ptr->cached ? " cached" : "");
1019 u64 b = sector_to_bucket_and_offset(ca, ptr->offset, &offset);
1021 prt_printf(out, "ptr: %u:%llu:%u gen %u",
1022 ptr->dev, b, offset, ptr->gen);
1024 prt_str(out, " cached");
1026 prt_str(out, " unwritten");
1027 int stale = dev_ptr_stale_rcu(ca, ptr);
1029 prt_printf(out, " stale");
1031 prt_printf(out, " invalid");
1037 void bch2_bkey_ptrs_to_text(struct printbuf *out, struct bch_fs *c,
1040 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
1041 const union bch_extent_entry *entry;
1045 prt_printf(out, "durability: %u ", bch2_bkey_durability_safe(c, k));
1047 bkey_extent_entry_for_each(ptrs, entry) {
1049 prt_printf(out, " ");
1051 switch (__extent_entry_type(entry)) {
1052 case BCH_EXTENT_ENTRY_ptr:
1053 bch2_extent_ptr_to_text(out, c, entry_to_ptr(entry));
1056 case BCH_EXTENT_ENTRY_crc32:
1057 case BCH_EXTENT_ENTRY_crc64:
1058 case BCH_EXTENT_ENTRY_crc128: {
1059 struct bch_extent_crc_unpacked crc =
1060 bch2_extent_crc_unpack(k.k, entry_to_crc(entry));
1062 prt_printf(out, "crc: c_size %u size %u offset %u nonce %u csum ",
1063 crc.compressed_size,
1064 crc.uncompressed_size,
1065 crc.offset, crc.nonce);
1066 bch2_prt_csum_type(out, crc.csum_type);
1067 prt_str(out, " compress ");
1068 bch2_prt_compression_type(out, crc.compression_type);
1071 case BCH_EXTENT_ENTRY_stripe_ptr: {
1072 const struct bch_extent_stripe_ptr *ec = &entry->stripe_ptr;
1074 prt_printf(out, "ec: idx %llu block %u",
1075 (u64) ec->idx, ec->block);
1078 case BCH_EXTENT_ENTRY_rebalance: {
1079 const struct bch_extent_rebalance *r = &entry->rebalance;
1081 prt_str(out, "rebalance: target ");
1083 bch2_target_to_text(out, c, r->target);
1085 prt_printf(out, "%u", r->target);
1086 prt_str(out, " compression ");
1087 bch2_compression_opt_to_text(out, r->compression);
1091 prt_printf(out, "(invalid extent entry %.16llx)", *((u64 *) entry));
1099 static int extent_ptr_invalid(struct bch_fs *c,
1101 enum bch_validate_flags flags,
1102 const struct bch_extent_ptr *ptr,
1103 unsigned size_ondisk,
1105 struct printbuf *err)
1110 struct bch_dev *ca = bch2_dev_rcu(c, ptr->dev);
1116 u64 bucket = sector_to_bucket_and_offset(ca, ptr->offset, &bucket_offset);
1117 unsigned first_bucket = ca->mi.first_bucket;
1118 u64 nbuckets = ca->mi.nbuckets;
1119 unsigned bucket_size = ca->mi.bucket_size;
1122 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
1123 bkey_for_each_ptr(ptrs, ptr2)
1124 bkey_fsck_err_on(ptr != ptr2 && ptr->dev == ptr2->dev, c, err,
1125 ptr_to_duplicate_device,
1126 "multiple pointers to same device (%u)", ptr->dev);
1129 bkey_fsck_err_on(bucket >= nbuckets, c, err,
1130 ptr_after_last_bucket,
1131 "pointer past last bucket (%llu > %llu)", bucket, nbuckets);
1132 bkey_fsck_err_on(bucket < first_bucket, c, err,
1133 ptr_before_first_bucket,
1134 "pointer before first bucket (%llu < %u)", bucket, first_bucket);
1135 bkey_fsck_err_on(bucket_offset + size_ondisk > bucket_size, c, err,
1136 ptr_spans_multiple_buckets,
1137 "pointer spans multiple buckets (%u + %u > %u)",
1138 bucket_offset, size_ondisk, bucket_size);
1143 int bch2_bkey_ptrs_invalid(struct bch_fs *c, struct bkey_s_c k,
1144 enum bch_validate_flags flags,
1145 struct printbuf *err)
1147 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
1148 const union bch_extent_entry *entry;
1149 struct bch_extent_crc_unpacked crc;
1150 unsigned size_ondisk = k.k->size;
1151 unsigned nonce = UINT_MAX;
1152 unsigned nr_ptrs = 0;
1153 bool have_written = false, have_unwritten = false, have_ec = false, crc_since_last_ptr = false;
1156 if (bkey_is_btree_ptr(k.k))
1157 size_ondisk = btree_sectors(c);
1159 bkey_extent_entry_for_each(ptrs, entry) {
1160 bkey_fsck_err_on(__extent_entry_type(entry) >= BCH_EXTENT_ENTRY_MAX, c, err,
1161 extent_ptrs_invalid_entry,
1162 "invalid extent entry type (got %u, max %u)",
1163 __extent_entry_type(entry), BCH_EXTENT_ENTRY_MAX);
1165 bkey_fsck_err_on(bkey_is_btree_ptr(k.k) &&
1166 !extent_entry_is_ptr(entry), c, err,
1167 btree_ptr_has_non_ptr,
1168 "has non ptr field");
1170 switch (extent_entry_type(entry)) {
1171 case BCH_EXTENT_ENTRY_ptr:
1172 ret = extent_ptr_invalid(c, k, flags, &entry->ptr,
1173 size_ondisk, false, err);
1177 bkey_fsck_err_on(entry->ptr.cached && have_ec, c, err,
1178 ptr_cached_and_erasure_coded,
1179 "cached, erasure coded ptr");
1181 if (!entry->ptr.unwritten)
1182 have_written = true;
1184 have_unwritten = true;
1187 crc_since_last_ptr = false;
1190 case BCH_EXTENT_ENTRY_crc32:
1191 case BCH_EXTENT_ENTRY_crc64:
1192 case BCH_EXTENT_ENTRY_crc128:
1193 crc = bch2_extent_crc_unpack(k.k, entry_to_crc(entry));
1195 bkey_fsck_err_on(crc.offset + crc.live_size > crc.uncompressed_size, c, err,
1196 ptr_crc_uncompressed_size_too_small,
1197 "checksum offset + key size > uncompressed size");
1198 bkey_fsck_err_on(!bch2_checksum_type_valid(c, crc.csum_type), c, err,
1199 ptr_crc_csum_type_unknown,
1200 "invalid checksum type");
1201 bkey_fsck_err_on(crc.compression_type >= BCH_COMPRESSION_TYPE_NR, c, err,
1202 ptr_crc_compression_type_unknown,
1203 "invalid compression type");
1205 if (bch2_csum_type_is_encryption(crc.csum_type)) {
1206 if (nonce == UINT_MAX)
1207 nonce = crc.offset + crc.nonce;
1208 else if (nonce != crc.offset + crc.nonce)
1209 bkey_fsck_err(c, err, ptr_crc_nonce_mismatch,
1213 bkey_fsck_err_on(crc_since_last_ptr, c, err,
1215 "redundant crc entry");
1216 crc_since_last_ptr = true;
1218 bkey_fsck_err_on(crc_is_encoded(crc) &&
1219 (crc.uncompressed_size > c->opts.encoded_extent_max >> 9) &&
1220 (flags & (BCH_VALIDATE_write|BCH_VALIDATE_commit)), c, err,
1221 ptr_crc_uncompressed_size_too_big,
1222 "too large encoded extent");
1224 size_ondisk = crc.compressed_size;
1226 case BCH_EXTENT_ENTRY_stripe_ptr:
1227 bkey_fsck_err_on(have_ec, c, err,
1228 ptr_stripe_redundant,
1229 "redundant stripe entry");
1232 case BCH_EXTENT_ENTRY_rebalance: {
1233 const struct bch_extent_rebalance *r = &entry->rebalance;
1235 if (!bch2_compression_opt_valid(r->compression)) {
1236 struct bch_compression_opt opt = __bch2_compression_decode(r->compression);
1237 prt_printf(err, "invalid compression opt %u:%u",
1238 opt.type, opt.level);
1239 return -BCH_ERR_invalid_bkey;
1246 bkey_fsck_err_on(!nr_ptrs, c, err,
1247 extent_ptrs_no_ptrs,
1249 bkey_fsck_err_on(nr_ptrs > BCH_BKEY_PTRS_MAX, c, err,
1250 extent_ptrs_too_many_ptrs,
1251 "too many ptrs: %u > %u", nr_ptrs, BCH_BKEY_PTRS_MAX);
1252 bkey_fsck_err_on(have_written && have_unwritten, c, err,
1253 extent_ptrs_written_and_unwritten,
1254 "extent with unwritten and written ptrs");
1255 bkey_fsck_err_on(k.k->type != KEY_TYPE_extent && have_unwritten, c, err,
1256 extent_ptrs_unwritten,
1257 "has unwritten ptrs");
1258 bkey_fsck_err_on(crc_since_last_ptr, c, err,
1259 extent_ptrs_redundant_crc,
1260 "redundant crc entry");
1261 bkey_fsck_err_on(have_ec, c, err,
1262 extent_ptrs_redundant_stripe,
1263 "redundant stripe entry");
1268 void bch2_ptr_swab(struct bkey_s k)
1270 struct bkey_ptrs ptrs = bch2_bkey_ptrs(k);
1271 union bch_extent_entry *entry;
1274 for (d = (u64 *) ptrs.start;
1275 d != (u64 *) ptrs.end;
1279 for (entry = ptrs.start;
1281 entry = extent_entry_next(entry)) {
1282 switch (extent_entry_type(entry)) {
1283 case BCH_EXTENT_ENTRY_ptr:
1285 case BCH_EXTENT_ENTRY_crc32:
1286 entry->crc32.csum = swab32(entry->crc32.csum);
1288 case BCH_EXTENT_ENTRY_crc64:
1289 entry->crc64.csum_hi = swab16(entry->crc64.csum_hi);
1290 entry->crc64.csum_lo = swab64(entry->crc64.csum_lo);
1292 case BCH_EXTENT_ENTRY_crc128:
1293 entry->crc128.csum.hi = (__force __le64)
1294 swab64((__force u64) entry->crc128.csum.hi);
1295 entry->crc128.csum.lo = (__force __le64)
1296 swab64((__force u64) entry->crc128.csum.lo);
1298 case BCH_EXTENT_ENTRY_stripe_ptr:
1300 case BCH_EXTENT_ENTRY_rebalance:
1306 const struct bch_extent_rebalance *bch2_bkey_rebalance_opts(struct bkey_s_c k)
1308 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
1309 const union bch_extent_entry *entry;
1311 bkey_extent_entry_for_each(ptrs, entry)
1312 if (__extent_entry_type(entry) == BCH_EXTENT_ENTRY_rebalance)
1313 return &entry->rebalance;
1318 unsigned bch2_bkey_ptrs_need_rebalance(struct bch_fs *c, struct bkey_s_c k,
1319 unsigned target, unsigned compression)
1321 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
1322 unsigned rewrite_ptrs = 0;
1325 unsigned compression_type = bch2_compression_opt_to_type(compression);
1326 const union bch_extent_entry *entry;
1327 struct extent_ptr_decoded p;
1330 bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
1331 if (p.crc.compression_type == BCH_COMPRESSION_TYPE_incompressible ||
1334 goto incompressible;
1337 if (!p.ptr.cached && p.crc.compression_type != compression_type)
1338 rewrite_ptrs |= 1U << i;
1343 if (target && bch2_target_accepts_data(c, BCH_DATA_user, target)) {
1346 bkey_for_each_ptr(ptrs, ptr) {
1347 if (!ptr->cached && !bch2_dev_in_target(c, ptr->dev, target))
1348 rewrite_ptrs |= 1U << i;
1353 return rewrite_ptrs;
1356 bool bch2_bkey_needs_rebalance(struct bch_fs *c, struct bkey_s_c k)
1358 const struct bch_extent_rebalance *r = bch2_bkey_rebalance_opts(k);
1361 * If it's an indirect extent, we don't delete the rebalance entry when
1362 * done so that we know what options were applied - check if it still
1366 k.k->type == KEY_TYPE_reflink_v &&
1367 !bch2_bkey_ptrs_need_rebalance(c, k, r->target, r->compression))
1373 int bch2_bkey_set_needs_rebalance(struct bch_fs *c, struct bkey_i *_k,
1374 struct bch_io_opts *opts)
1376 struct bkey_s k = bkey_i_to_s(_k);
1377 struct bch_extent_rebalance *r;
1378 unsigned target = opts->background_target;
1379 unsigned compression = background_compression(*opts);
1380 bool needs_rebalance;
1382 if (!bkey_extent_is_direct_data(k.k))
1385 /* get existing rebalance entry: */
1386 r = (struct bch_extent_rebalance *) bch2_bkey_rebalance_opts(k.s_c);
1388 if (k.k->type == KEY_TYPE_reflink_v) {
1390 * indirect extents: existing options take precedence,
1391 * so that we don't move extents back and forth if
1392 * they're referenced by different inodes with different
1398 compression = r->compression;
1402 r->compression = compression;
1405 needs_rebalance = bch2_bkey_ptrs_need_rebalance(c, k.s_c, target, compression);
1407 if (needs_rebalance && !r) {
1408 union bch_extent_entry *new = bkey_val_end(k);
1410 new->rebalance.type = 1U << BCH_EXTENT_ENTRY_rebalance;
1411 new->rebalance.compression = compression;
1412 new->rebalance.target = target;
1413 new->rebalance.unused = 0;
1414 k.k->u64s += extent_entry_u64s(new);
1415 } else if (!needs_rebalance && r && k.k->type != KEY_TYPE_reflink_v) {
1417 * For indirect extents, don't delete the rebalance entry when
1418 * we're finished so that we know we specifically moved it or
1419 * compressed it to its current location/compression type
1421 extent_entry_drop(k, (union bch_extent_entry *) r);
1427 /* Generic extent code: */
1429 int bch2_cut_front_s(struct bpos where, struct bkey_s k)
1431 unsigned new_val_u64s = bkey_val_u64s(k.k);
1435 if (bkey_le(where, bkey_start_pos(k.k)))
1438 EBUG_ON(bkey_gt(where, k.k->p));
1440 sub = where.offset - bkey_start_offset(k.k);
1445 k.k->type = KEY_TYPE_deleted;
1449 switch (k.k->type) {
1450 case KEY_TYPE_extent:
1451 case KEY_TYPE_reflink_v: {
1452 struct bkey_ptrs ptrs = bch2_bkey_ptrs(k);
1453 union bch_extent_entry *entry;
1454 bool seen_crc = false;
1456 bkey_extent_entry_for_each(ptrs, entry) {
1457 switch (extent_entry_type(entry)) {
1458 case BCH_EXTENT_ENTRY_ptr:
1460 entry->ptr.offset += sub;
1462 case BCH_EXTENT_ENTRY_crc32:
1463 entry->crc32.offset += sub;
1465 case BCH_EXTENT_ENTRY_crc64:
1466 entry->crc64.offset += sub;
1468 case BCH_EXTENT_ENTRY_crc128:
1469 entry->crc128.offset += sub;
1471 case BCH_EXTENT_ENTRY_stripe_ptr:
1473 case BCH_EXTENT_ENTRY_rebalance:
1477 if (extent_entry_is_crc(entry))
1483 case KEY_TYPE_reflink_p: {
1484 struct bkey_s_reflink_p p = bkey_s_to_reflink_p(k);
1486 le64_add_cpu(&p.v->idx, sub);
1489 case KEY_TYPE_inline_data:
1490 case KEY_TYPE_indirect_inline_data: {
1491 void *p = bkey_inline_data_p(k);
1492 unsigned bytes = bkey_inline_data_bytes(k.k);
1494 sub = min_t(u64, sub << 9, bytes);
1496 memmove(p, p + sub, bytes - sub);
1498 new_val_u64s -= sub >> 3;
1503 val_u64s_delta = bkey_val_u64s(k.k) - new_val_u64s;
1504 BUG_ON(val_u64s_delta < 0);
1506 set_bkey_val_u64s(k.k, new_val_u64s);
1507 memset(bkey_val_end(k), 0, val_u64s_delta * sizeof(u64));
1508 return -val_u64s_delta;
1511 int bch2_cut_back_s(struct bpos where, struct bkey_s k)
1513 unsigned new_val_u64s = bkey_val_u64s(k.k);
1517 if (bkey_ge(where, k.k->p))
1520 EBUG_ON(bkey_lt(where, bkey_start_pos(k.k)));
1522 len = where.offset - bkey_start_offset(k.k);
1524 k.k->p.offset = where.offset;
1528 k.k->type = KEY_TYPE_deleted;
1532 switch (k.k->type) {
1533 case KEY_TYPE_inline_data:
1534 case KEY_TYPE_indirect_inline_data:
1535 new_val_u64s = (bkey_inline_data_offset(k.k) +
1536 min(bkey_inline_data_bytes(k.k), k.k->size << 9)) >> 3;
1540 val_u64s_delta = bkey_val_u64s(k.k) - new_val_u64s;
1541 BUG_ON(val_u64s_delta < 0);
1543 set_bkey_val_u64s(k.k, new_val_u64s);
1544 memset(bkey_val_end(k), 0, val_u64s_delta * sizeof(u64));
1545 return -val_u64s_delta;