#include "bcachefs.h"
#include "bkey_methods.h"
#include "btree_gc.h"
-#include "btree_update.h"
-#include "btree_update_interior.h"
+#include "btree_io.h"
+#include "btree_iter.h"
#include "buckets.h"
#include "checksum.h"
#include "debug.h"
-#include "dirent.h"
#include "disk_groups.h"
#include "error.h"
#include "extents.h"
#include "super-io.h"
#include "trace.h"
#include "util.h"
-#include "xattr.h"
-unsigned bch2_bkey_nr_ptrs(struct bkey_s_c k)
-{
- struct bkey_ptrs_c p = bch2_bkey_ptrs_c(k);
- const struct bch_extent_ptr *ptr;
- unsigned nr_ptrs = 0;
-
- bkey_for_each_ptr(p, ptr)
- nr_ptrs++;
-
- return nr_ptrs;
-}
-
-unsigned bch2_bkey_nr_dirty_ptrs(struct bkey_s_c k)
-{
- unsigned nr_ptrs = 0;
-
- switch (k.k->type) {
- case KEY_TYPE_btree_ptr:
- case KEY_TYPE_extent: {
- struct bkey_ptrs_c p = bch2_bkey_ptrs_c(k);
- const struct bch_extent_ptr *ptr;
-
- bkey_for_each_ptr(p, ptr)
- nr_ptrs += !ptr->cached;
- BUG_ON(!nr_ptrs);
- break;
- }
- case KEY_TYPE_reservation:
- nr_ptrs = bkey_s_c_to_reservation(k).v->nr_replicas;
- break;
- }
-
- return nr_ptrs;
-}
-
-static unsigned bch2_extent_ptr_durability(struct bch_fs *c,
- struct extent_ptr_decoded p)
-{
- unsigned i, durability = 0;
- struct bch_dev *ca;
-
- if (p.ptr.cached)
- return 0;
-
- ca = bch_dev_bkey_exists(c, p.ptr.dev);
-
- if (ca->mi.state != BCH_MEMBER_STATE_FAILED)
- durability = max_t(unsigned, durability, ca->mi.durability);
-
- for (i = 0; i < p.ec_nr; i++) {
- struct stripe *s =
- genradix_ptr(&c->stripes[0], p.idx);
-
- if (WARN_ON(!s))
- continue;
-
- durability = max_t(unsigned, durability, s->nr_redundant);
- }
-
- return durability;
-}
-
-unsigned bch2_bkey_durability(struct bch_fs *c, struct bkey_s_c k)
-{
- struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
- const union bch_extent_entry *entry;
- struct extent_ptr_decoded p;
- unsigned durability = 0;
-
- bkey_for_each_ptr_decode(k.k, ptrs, p, entry)
- durability += bch2_extent_ptr_durability(c, p);
+static unsigned bch2_crc_field_size_max[] = {
+ [BCH_EXTENT_ENTRY_crc32] = CRC32_SIZE_MAX,
+ [BCH_EXTENT_ENTRY_crc64] = CRC64_SIZE_MAX,
+ [BCH_EXTENT_ENTRY_crc128] = CRC128_SIZE_MAX,
+};
- return durability;
-}
+static void bch2_extent_crc_pack(union bch_extent_crc *,
+ struct bch_extent_crc_unpacked,
+ enum bch_extent_entry_type);
static struct bch_dev_io_failures *dev_io_failures(struct bch_io_failures *f,
unsigned dev)
return bch2_rand_range(l1 + l2) > l1;
}
- if (force_reconstruct_read(c))
+ if (bch2_force_reconstruct_read)
return p1.idx > p2.idx;
return p1.idx < p2.idx;
!bch2_dev_is_readable(ca))
p.idx++;
- if (force_reconstruct_read(c) &&
- !p.idx && p.ec_nr)
+ if (bch2_force_reconstruct_read &&
+ !p.idx && p.has_ec)
p.idx++;
- if (p.idx >= p.ec_nr + 1)
+ if (p.idx >= (unsigned) p.has_ec + 1)
continue;
if (ret > 0 && !ptr_better(c, p, *pick))
return ret;
}
-void bch2_bkey_append_ptr(struct bkey_i *k,
- struct bch_extent_ptr ptr)
-{
- EBUG_ON(bch2_bkey_has_device(bkey_i_to_s_c(k), ptr.dev));
+/* KEY_TYPE_btree_ptr: */
- switch (k->k.type) {
- case KEY_TYPE_btree_ptr:
- case KEY_TYPE_extent:
- EBUG_ON(bkey_val_u64s(&k->k) >= BKEY_EXTENT_VAL_U64s_MAX);
+const char *bch2_btree_ptr_invalid(const struct bch_fs *c, struct bkey_s_c k)
+{
+ if (bkey_val_u64s(k.k) > BCH_REPLICAS_MAX)
+ return "value too big";
- ptr.type = 1 << BCH_EXTENT_ENTRY_ptr;
+ return bch2_bkey_ptrs_invalid(c, k);
+}
- memcpy((void *) &k->v + bkey_val_bytes(&k->k),
- &ptr,
- sizeof(ptr));
- k->u64s++;
- break;
- default:
- BUG();
- }
+void bch2_btree_ptr_to_text(struct printbuf *out, struct bch_fs *c,
+ struct bkey_s_c k)
+{
+ bch2_bkey_ptrs_to_text(out, c, k);
}
-void bch2_bkey_drop_device(struct bkey_s k, unsigned dev)
+const char *bch2_btree_ptr_v2_invalid(const struct bch_fs *c, struct bkey_s_c k)
{
- struct bch_extent_ptr *ptr;
+ struct bkey_s_c_btree_ptr_v2 bp = bkey_s_c_to_btree_ptr_v2(k);
- bch2_bkey_drop_ptrs(k, ptr, ptr->dev == dev);
-}
+ if (bkey_val_bytes(k.k) <= sizeof(*bp.v))
+ return "value too small";
+
+ if (bkey_val_u64s(k.k) > BKEY_BTREE_PTR_VAL_U64s_MAX)
+ return "value too big";
-/* extent specific utility code */
+ if (c->sb.version < bcachefs_metadata_version_snapshot &&
+ bp.v->min_key.snapshot)
+ return "invalid min_key.snapshot";
-const struct bch_extent_ptr *
-bch2_extent_has_device(struct bkey_s_c_extent e, unsigned dev)
+ return bch2_bkey_ptrs_invalid(c, k);
+}
+
+void bch2_btree_ptr_v2_to_text(struct printbuf *out, struct bch_fs *c,
+ struct bkey_s_c k)
{
- const struct bch_extent_ptr *ptr;
+ struct bkey_s_c_btree_ptr_v2 bp = bkey_s_c_to_btree_ptr_v2(k);
- extent_for_each_ptr(e, ptr)
- if (ptr->dev == dev)
- return ptr;
+ pr_buf(out, "seq %llx written %u min_key ",
+ le64_to_cpu(bp.v->seq),
+ le16_to_cpu(bp.v->sectors_written));
- return NULL;
+ bch2_bpos_to_text(out, bp.v->min_key);
+ pr_buf(out, " ");
+ bch2_bkey_ptrs_to_text(out, c, k);
}
-const struct bch_extent_ptr *
-bch2_extent_has_group(struct bch_fs *c, struct bkey_s_c_extent e, unsigned group)
+void bch2_btree_ptr_v2_compat(enum btree_id btree_id, unsigned version,
+ unsigned big_endian, int write,
+ struct bkey_s k)
{
- const struct bch_extent_ptr *ptr;
+ struct bkey_s_btree_ptr_v2 bp = bkey_s_to_btree_ptr_v2(k);
- extent_for_each_ptr(e, ptr) {
- struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
+ compat_bpos(0, btree_id, version, big_endian, write, &bp.v->min_key);
- if (ca->mi.group &&
- ca->mi.group - 1 == group)
- return ptr;
- }
-
- return NULL;
+ if (version < bcachefs_metadata_version_inode_btree_change &&
+ btree_node_type_is_extents(btree_id) &&
+ bkey_cmp(bp.v->min_key, POS_MIN))
+ bp.v->min_key = write
+ ? bpos_nosnap_predecessor(bp.v->min_key)
+ : bpos_nosnap_successor(bp.v->min_key);
}
-const struct bch_extent_ptr *
-bch2_extent_has_target(struct bch_fs *c, struct bkey_s_c_extent e, unsigned target)
-{
- const struct bch_extent_ptr *ptr;
+/* KEY_TYPE_extent: */
- extent_for_each_ptr(e, ptr)
- if (bch2_dev_in_target(c, ptr->dev, target) &&
- (!ptr->cached ||
- !ptr_stale(bch_dev_bkey_exists(c, ptr->dev), ptr)))
- return ptr;
+const char *bch2_extent_invalid(const struct bch_fs *c, struct bkey_s_c k)
+{
+ return bch2_bkey_ptrs_invalid(c, k);
+}
- return NULL;
+void bch2_extent_to_text(struct printbuf *out, struct bch_fs *c,
+ struct bkey_s_c k)
+{
+ bch2_bkey_ptrs_to_text(out, c, k);
}
-unsigned bch2_extent_is_compressed(struct bkey_s_c k)
+enum merge_result bch2_extent_merge(struct bch_fs *c,
+ struct bkey_s _l, struct bkey_s _r)
{
- unsigned ret = 0;
+ struct bkey_s_extent l = bkey_s_to_extent(_l);
+ struct bkey_s_extent r = bkey_s_to_extent(_r);
+ union bch_extent_entry *en_l = l.v->start;
+ union bch_extent_entry *en_r = r.v->start;
+ struct bch_extent_crc_unpacked crc_l, crc_r;
- switch (k.k->type) {
- case KEY_TYPE_extent: {
- struct bkey_s_c_extent e = bkey_s_c_to_extent(k);
- const union bch_extent_entry *entry;
- struct extent_ptr_decoded p;
+ if (bkey_val_u64s(l.k) != bkey_val_u64s(r.k))
+ return BCH_MERGE_NOMERGE;
+
+ crc_l = bch2_extent_crc_unpack(l.k, NULL);
+
+ extent_for_each_entry(l, en_l) {
+ en_r = vstruct_idx(r.v, (u64 *) en_l - l.v->_data);
+
+ if (extent_entry_type(en_l) != extent_entry_type(en_r))
+ return BCH_MERGE_NOMERGE;
+
+ switch (extent_entry_type(en_l)) {
+ case BCH_EXTENT_ENTRY_ptr: {
+ const struct bch_extent_ptr *lp = &en_l->ptr;
+ const struct bch_extent_ptr *rp = &en_r->ptr;
+ struct bch_dev *ca;
+
+ if (lp->offset + crc_l.compressed_size != rp->offset ||
+ lp->dev != rp->dev ||
+ lp->gen != rp->gen)
+ return BCH_MERGE_NOMERGE;
+
+ /* We don't allow extents to straddle buckets: */
+ ca = bch_dev_bkey_exists(c, lp->dev);
+
+ if (PTR_BUCKET_NR(ca, lp) != PTR_BUCKET_NR(ca, rp))
+ return BCH_MERGE_NOMERGE;
+
+ break;
+ }
+ case BCH_EXTENT_ENTRY_stripe_ptr:
+ if (en_l->stripe_ptr.block != en_r->stripe_ptr.block ||
+ en_l->stripe_ptr.idx != en_r->stripe_ptr.idx)
+ return BCH_MERGE_NOMERGE;
+ break;
+ case BCH_EXTENT_ENTRY_crc32:
+ case BCH_EXTENT_ENTRY_crc64:
+ case BCH_EXTENT_ENTRY_crc128:
+ crc_l = bch2_extent_crc_unpack(l.k, entry_to_crc(en_l));
+ crc_r = bch2_extent_crc_unpack(r.k, entry_to_crc(en_r));
+
+ if (crc_l.csum_type != crc_r.csum_type ||
+ crc_l.compression_type != crc_r.compression_type ||
+ crc_l.nonce != crc_r.nonce)
+ return BCH_MERGE_NOMERGE;
+
+ if (crc_l.offset + crc_l.live_size != crc_l.compressed_size ||
+ crc_r.offset)
+ return BCH_MERGE_NOMERGE;
+
+ if (!bch2_checksum_mergeable(crc_l.csum_type))
+ return BCH_MERGE_NOMERGE;
+
+ if (crc_is_compressed(crc_l))
+ return BCH_MERGE_NOMERGE;
- extent_for_each_ptr_decode(e, p, entry)
- if (!p.ptr.cached &&
- p.crc.compression_type != BCH_COMPRESSION_NONE)
- ret += p.crc.compressed_size;
+ if (crc_l.csum_type &&
+ crc_l.uncompressed_size +
+ crc_r.uncompressed_size > c->sb.encoded_extent_max)
+ return BCH_MERGE_NOMERGE;
+
+ if (crc_l.uncompressed_size + crc_r.uncompressed_size >
+ bch2_crc_field_size_max[extent_entry_type(en_l)])
+ return BCH_MERGE_NOMERGE;
+
+ break;
+ default:
+ return BCH_MERGE_NOMERGE;
+ }
}
+
+ extent_for_each_entry(l, en_l) {
+ struct bch_extent_crc_unpacked crc_l, crc_r;
+
+ en_r = vstruct_idx(r.v, (u64 *) en_l - l.v->_data);
+
+ if (!extent_entry_is_crc(en_l))
+ continue;
+
+ crc_l = bch2_extent_crc_unpack(l.k, entry_to_crc(en_l));
+ crc_r = bch2_extent_crc_unpack(r.k, entry_to_crc(en_r));
+
+ crc_l.csum = bch2_checksum_merge(crc_l.csum_type,
+ crc_l.csum,
+ crc_r.csum,
+ crc_r.uncompressed_size << 9);
+
+ crc_l.uncompressed_size += crc_r.uncompressed_size;
+ crc_l.compressed_size += crc_r.compressed_size;
+
+ bch2_extent_crc_pack(entry_to_crc(en_l), crc_l,
+ extent_entry_type(en_l));
}
- return ret;
+ bch2_key_resize(l.k, l.k->size + r.k->size);
+
+ return BCH_MERGE_MERGE;
}
-bool bch2_extent_matches_ptr(struct bch_fs *c, struct bkey_s_c_extent e,
- struct bch_extent_ptr m, u64 offset)
+/* KEY_TYPE_reservation: */
+
+const char *bch2_reservation_invalid(const struct bch_fs *c, struct bkey_s_c k)
{
- const union bch_extent_entry *entry;
- struct extent_ptr_decoded p;
+ struct bkey_s_c_reservation r = bkey_s_c_to_reservation(k);
- extent_for_each_ptr_decode(e, p, entry)
- if (p.ptr.dev == m.dev &&
- p.ptr.gen == m.gen &&
- (s64) p.ptr.offset + p.crc.offset - bkey_start_offset(e.k) ==
- (s64) m.offset - offset)
- return true;
+ if (bkey_val_bytes(k.k) != sizeof(struct bch_reservation))
+ return "incorrect value size";
- return false;
+ if (!r.v->nr_replicas || r.v->nr_replicas > BCH_REPLICAS_MAX)
+ return "invalid nr_replicas";
+
+ return NULL;
}
-static union bch_extent_entry *extent_entry_prev(struct bkey_ptrs ptrs,
- union bch_extent_entry *entry)
+void bch2_reservation_to_text(struct printbuf *out, struct bch_fs *c,
+ struct bkey_s_c k)
{
- union bch_extent_entry *i = ptrs.start;
-
- if (i == entry)
- return NULL;
+ struct bkey_s_c_reservation r = bkey_s_c_to_reservation(k);
- while (extent_entry_next(i) != entry)
- i = extent_entry_next(i);
- return i;
+ pr_buf(out, "generation %u replicas %u",
+ le32_to_cpu(r.v->generation),
+ r.v->nr_replicas);
}
-union bch_extent_entry *bch2_bkey_drop_ptr(struct bkey_s k,
- struct bch_extent_ptr *ptr)
+enum merge_result bch2_reservation_merge(struct bch_fs *c,
+ struct bkey_s _l, struct bkey_s _r)
{
- struct bkey_ptrs ptrs = bch2_bkey_ptrs(k);
- union bch_extent_entry *dst, *src, *prev;
- bool drop_crc = true;
-
- EBUG_ON(ptr < &ptrs.start->ptr ||
- ptr >= &ptrs.end->ptr);
- EBUG_ON(ptr->type != 1 << BCH_EXTENT_ENTRY_ptr);
+ struct bkey_s_reservation l = bkey_s_to_reservation(_l);
+ struct bkey_s_reservation r = bkey_s_to_reservation(_r);
- src = extent_entry_next(to_entry(ptr));
- if (src != ptrs.end &&
- !extent_entry_is_crc(src))
- drop_crc = false;
+ if (l.v->generation != r.v->generation ||
+ l.v->nr_replicas != r.v->nr_replicas)
+ return BCH_MERGE_NOMERGE;
- dst = to_entry(ptr);
- while ((prev = extent_entry_prev(ptrs, dst))) {
- if (extent_entry_is_ptr(prev))
- break;
+ if ((u64) l.k->size + r.k->size > KEY_SIZE_MAX) {
+ bch2_key_resize(l.k, KEY_SIZE_MAX);
+ bch2_cut_front_s(l.k->p, r.s);
+ return BCH_MERGE_PARTIAL;
+ }
- if (extent_entry_is_crc(prev)) {
- if (drop_crc)
- dst = prev;
- break;
- }
+ bch2_key_resize(l.k, l.k->size + r.k->size);
- dst = prev;
- }
+ return BCH_MERGE_MERGE;
+}
- memmove_u64s_down(dst, src,
- (u64 *) ptrs.end - (u64 *) src);
- k.k->u64s -= (u64 *) src - (u64 *) dst;
+/* Extent checksum entries: */
- return dst;
+/* returns true if not equal */
+static inline bool bch2_crc_unpacked_cmp(struct bch_extent_crc_unpacked l,
+ struct bch_extent_crc_unpacked r)
+{
+ return (l.csum_type != r.csum_type ||
+ l.compression_type != r.compression_type ||
+ l.compressed_size != r.compressed_size ||
+ l.uncompressed_size != r.uncompressed_size ||
+ l.offset != r.offset ||
+ l.live_size != r.live_size ||
+ l.nonce != r.nonce ||
+ bch2_crc_cmp(l.csum, r.csum));
}
static inline bool can_narrow_crc(struct bch_extent_crc_unpacked u,
struct bch_extent_crc_unpacked n)
{
- return !u.compression_type &&
+ return !crc_is_compressed(u) &&
u.csum_type &&
u.uncompressed_size > u.live_size &&
bch2_csum_type_is_encryption(u.csum_type) ==
bch2_csum_type_is_encryption(n.csum_type);
}
-bool bch2_can_narrow_extent_crcs(struct bkey_s_c_extent e,
+bool bch2_can_narrow_extent_crcs(struct bkey_s_c k,
struct bch_extent_crc_unpacked n)
{
+ struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
struct bch_extent_crc_unpacked crc;
const union bch_extent_entry *i;
if (!n.csum_type)
return false;
- extent_for_each_crc(e, crc, i)
+ bkey_for_each_crc(k.k, ptrs, crc, i)
if (can_narrow_crc(crc, n))
return true;
* currently live (so that readers won't have to bounce) while we've got the
* checksum we need:
*/
-bool bch2_extent_narrow_crcs(struct bkey_i_extent *e,
- struct bch_extent_crc_unpacked n)
+bool bch2_bkey_narrow_crcs(struct bkey_i *k, struct bch_extent_crc_unpacked n)
{
+ struct bkey_ptrs ptrs = bch2_bkey_ptrs(bkey_i_to_s(k));
struct bch_extent_crc_unpacked u;
struct extent_ptr_decoded p;
union bch_extent_entry *i;
/* Find a checksum entry that covers only live data: */
if (!n.csum_type) {
- extent_for_each_crc(extent_i_to_s(e), u, i)
- if (!u.compression_type &&
+ bkey_for_each_crc(&k->k, ptrs, u, i)
+ if (!crc_is_compressed(u) &&
u.csum_type &&
u.live_size == u.uncompressed_size) {
n = u;
return false;
}
found:
- BUG_ON(n.compression_type);
+ BUG_ON(crc_is_compressed(n));
BUG_ON(n.offset);
- BUG_ON(n.live_size != e->k.size);
+ BUG_ON(n.live_size != k->k.size);
restart_narrow_pointers:
- extent_for_each_ptr_decode(extent_i_to_s(e), p, i)
+ ptrs = bch2_bkey_ptrs(bkey_i_to_s(k));
+
+ bkey_for_each_ptr_decode(&k->k, ptrs, p, i)
if (can_narrow_crc(p.crc, n)) {
- bch2_bkey_drop_ptr(extent_i_to_s(e).s, &i->ptr);
+ bch2_bkey_drop_ptr(bkey_i_to_s(k), &i->ptr);
p.ptr.offset += p.crc.offset;
p.crc = n;
- bch2_extent_ptr_decoded_append(e, &p);
+ bch2_extent_ptr_decoded_append(k, &p);
ret = true;
goto restart_narrow_pointers;
}
return ret;
}
-/* returns true if not equal */
-static inline bool bch2_crc_unpacked_cmp(struct bch_extent_crc_unpacked l,
- struct bch_extent_crc_unpacked r)
+static void bch2_extent_crc_pack(union bch_extent_crc *dst,
+ struct bch_extent_crc_unpacked src,
+ enum bch_extent_entry_type type)
{
- return (l.csum_type != r.csum_type ||
- l.compression_type != r.compression_type ||
- l.compressed_size != r.compressed_size ||
- l.uncompressed_size != r.uncompressed_size ||
- l.offset != r.offset ||
- l.live_size != r.live_size ||
- l.nonce != r.nonce ||
- bch2_crc_cmp(l.csum, r.csum));
+#define set_common_fields(_dst, _src) \
+ _dst.type = 1 << type; \
+ _dst.csum_type = _src.csum_type, \
+ _dst.compression_type = _src.compression_type, \
+ _dst._compressed_size = _src.compressed_size - 1, \
+ _dst._uncompressed_size = _src.uncompressed_size - 1, \
+ _dst.offset = _src.offset
+
+ switch (type) {
+ case BCH_EXTENT_ENTRY_crc32:
+ set_common_fields(dst->crc32, src);
+ dst->crc32.csum = *((__le32 *) &src.csum.lo);
+ break;
+ case BCH_EXTENT_ENTRY_crc64:
+ set_common_fields(dst->crc64, src);
+ dst->crc64.nonce = src.nonce;
+ dst->crc64.csum_lo = src.csum.lo;
+ dst->crc64.csum_hi = *((__le16 *) &src.csum.hi);
+ break;
+ case BCH_EXTENT_ENTRY_crc128:
+ set_common_fields(dst->crc128, src);
+ dst->crc128.nonce = src.nonce;
+ dst->crc128.csum = src.csum;
+ break;
+ default:
+ BUG();
+ }
+#undef set_common_fields
}
-void bch2_ptr_swab(const struct bkey_format *f, struct bkey_packed *k)
+void bch2_extent_crc_append(struct bkey_i *k,
+ struct bch_extent_crc_unpacked new)
{
- union bch_extent_entry *entry;
- u64 *d = (u64 *) bkeyp_val(f, k);
- unsigned i;
-
- for (i = 0; i < bkeyp_val_u64s(f, k); i++)
- d[i] = swab64(d[i]);
+ struct bkey_ptrs ptrs = bch2_bkey_ptrs(bkey_i_to_s(k));
+ union bch_extent_crc *crc = (void *) ptrs.end;
+ enum bch_extent_entry_type type;
- for (entry = (union bch_extent_entry *) d;
- entry < (union bch_extent_entry *) (d + bkeyp_val_u64s(f, k));
- entry = extent_entry_next(entry)) {
- switch (extent_entry_type(entry)) {
- case BCH_EXTENT_ENTRY_ptr:
- break;
- case BCH_EXTENT_ENTRY_crc32:
- entry->crc32.csum = swab32(entry->crc32.csum);
- break;
- case BCH_EXTENT_ENTRY_crc64:
- entry->crc64.csum_hi = swab16(entry->crc64.csum_hi);
- entry->crc64.csum_lo = swab64(entry->crc64.csum_lo);
- break;
- case BCH_EXTENT_ENTRY_crc128:
- entry->crc128.csum.hi = (__force __le64)
- swab64((__force u64) entry->crc128.csum.hi);
- entry->crc128.csum.lo = (__force __le64)
- swab64((__force u64) entry->crc128.csum.lo);
- break;
- case BCH_EXTENT_ENTRY_stripe_ptr:
- break;
- }
- }
+ if (bch_crc_bytes[new.csum_type] <= 4 &&
+ new.uncompressed_size <= CRC32_SIZE_MAX &&
+ new.nonce <= CRC32_NONCE_MAX)
+ type = BCH_EXTENT_ENTRY_crc32;
+ else if (bch_crc_bytes[new.csum_type] <= 10 &&
+ new.uncompressed_size <= CRC64_SIZE_MAX &&
+ new.nonce <= CRC64_NONCE_MAX)
+ type = BCH_EXTENT_ENTRY_crc64;
+ else if (bch_crc_bytes[new.csum_type] <= 16 &&
+ new.uncompressed_size <= CRC128_SIZE_MAX &&
+ new.nonce <= CRC128_NONCE_MAX)
+ type = BCH_EXTENT_ENTRY_crc128;
+ else
+ BUG();
+
+ bch2_extent_crc_pack(crc, new, type);
+
+ k->k.u64s += extent_entry_u64s(ptrs.end);
+
+ EBUG_ON(bkey_val_u64s(&k->k) > BKEY_EXTENT_VAL_U64s_MAX);
}
-static const char *extent_ptr_invalid(const struct bch_fs *c,
- struct bkey_s_c k,
- const struct bch_extent_ptr *ptr,
- unsigned size_ondisk,
- bool metadata)
+/* Generic code for keys with pointers: */
+
+unsigned bch2_bkey_nr_ptrs(struct bkey_s_c k)
{
- struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
- const struct bch_extent_ptr *ptr2;
- struct bch_dev *ca;
+ return bch2_bkey_devs(k).nr;
+}
- if (ptr->dev >= c->sb.nr_devices ||
- !c->devs[ptr->dev])
- return "pointer to invalid device";
+unsigned bch2_bkey_nr_ptrs_allocated(struct bkey_s_c k)
+{
+ return k.k->type == KEY_TYPE_reservation
+ ? bkey_s_c_to_reservation(k).v->nr_replicas
+ : bch2_bkey_dirty_devs(k).nr;
+}
- ca = bch_dev_bkey_exists(c, ptr->dev);
- if (!ca)
- return "pointer to invalid device";
+unsigned bch2_bkey_nr_ptrs_fully_allocated(struct bkey_s_c k)
+{
+ unsigned ret = 0;
- bkey_for_each_ptr(ptrs, ptr2)
- if (ptr != ptr2 && ptr->dev == ptr2->dev)
- return "multiple pointers to same device";
+ if (k.k->type == KEY_TYPE_reservation) {
+ ret = bkey_s_c_to_reservation(k).v->nr_replicas;
+ } else {
+ struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
+ const union bch_extent_entry *entry;
+ struct extent_ptr_decoded p;
- if (ptr->offset + size_ondisk > bucket_to_sector(ca, ca->mi.nbuckets))
- return "offset past end of device";
+ bkey_for_each_ptr_decode(k.k, ptrs, p, entry)
+ ret += !p.ptr.cached && !crc_is_compressed(p.crc);
+ }
- if (ptr->offset < bucket_to_sector(ca, ca->mi.first_bucket))
- return "offset before first bucket";
+ return ret;
+}
- if (bucket_remainder(ca, ptr->offset) +
- size_ondisk > ca->mi.bucket_size)
- return "spans multiple buckets";
+unsigned bch2_bkey_sectors_compressed(struct bkey_s_c k)
+{
+ struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
+ const union bch_extent_entry *entry;
+ struct extent_ptr_decoded p;
+ unsigned ret = 0;
- return NULL;
+ bkey_for_each_ptr_decode(k.k, ptrs, p, entry)
+ if (!p.ptr.cached && crc_is_compressed(p.crc))
+ ret += p.crc.compressed_size;
+
+ return ret;
}
-static void bkey_ptrs_to_text(struct printbuf *out, struct bch_fs *c,
- struct bkey_s_c k)
+bool bch2_bkey_is_incompressible(struct bkey_s_c k)
{
struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
const union bch_extent_entry *entry;
struct bch_extent_crc_unpacked crc;
- const struct bch_extent_ptr *ptr;
- const struct bch_extent_stripe_ptr *ec;
- struct bch_dev *ca;
- bool first = true;
- bkey_extent_entry_for_each(ptrs, entry) {
- if (!first)
- pr_buf(out, " ");
+ bkey_for_each_crc(k.k, ptrs, crc, entry)
+ if (crc.compression_type == BCH_COMPRESSION_TYPE_incompressible)
+ return true;
+ return false;
+}
- switch (__extent_entry_type(entry)) {
- case BCH_EXTENT_ENTRY_ptr:
- ptr = entry_to_ptr(entry);
- ca = ptr->dev < c->sb.nr_devices && c->devs[ptr->dev]
- ? bch_dev_bkey_exists(c, ptr->dev)
- : NULL;
+bool bch2_check_range_allocated(struct bch_fs *c, struct bpos pos, u64 size,
+ unsigned nr_replicas, bool compressed)
+{
+ struct btree_trans trans;
+ struct btree_iter *iter;
+ struct bpos end = pos;
+ struct bkey_s_c k;
+ bool ret = true;
+ int err;
- pr_buf(out, "ptr: %u:%llu gen %u%s%s", ptr->dev,
- (u64) ptr->offset, ptr->gen,
- ptr->cached ? " cached" : "",
- ca && ptr_stale(ca, ptr)
- ? " stale" : "");
- break;
- case BCH_EXTENT_ENTRY_crc32:
- case BCH_EXTENT_ENTRY_crc64:
- case BCH_EXTENT_ENTRY_crc128:
- crc = bch2_extent_crc_unpack(k.k, entry_to_crc(entry));
+ end.offset += size;
- pr_buf(out, "crc: c_size %u size %u offset %u nonce %u csum %u compress %u",
- crc.compressed_size,
- crc.uncompressed_size,
- crc.offset, crc.nonce,
- crc.csum_type,
- crc.compression_type);
+ bch2_trans_init(&trans, c, 0, 0);
+
+ for_each_btree_key(&trans, iter, BTREE_ID_extents, pos,
+ BTREE_ITER_SLOTS, k, err) {
+ if (bkey_cmp(bkey_start_pos(k.k), end) >= 0)
break;
- case BCH_EXTENT_ENTRY_stripe_ptr:
- ec = &entry->stripe_ptr;
- pr_buf(out, "ec: idx %llu block %u",
- (u64) ec->idx, ec->block);
+ if (nr_replicas > bch2_bkey_replicas(c, k) ||
+ (!compressed && bch2_bkey_sectors_compressed(k))) {
+ ret = false;
break;
- default:
- pr_buf(out, "(invalid extent entry %.16llx)", *((u64 *) entry));
- return;
}
-
- first = false;
}
-}
+ bch2_trans_iter_put(&trans, iter);
-/* Btree ptrs */
+ bch2_trans_exit(&trans);
-const char *bch2_btree_ptr_invalid(const struct bch_fs *c, struct bkey_s_c k)
+ return ret;
+}
+
+unsigned bch2_bkey_replicas(struct bch_fs *c, struct bkey_s_c k)
{
struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
const union bch_extent_entry *entry;
- const struct bch_extent_ptr *ptr;
- const char *reason;
+ struct extent_ptr_decoded p = { 0 };
+ unsigned replicas = 0;
- if (bkey_val_u64s(k.k) > BKEY_BTREE_PTR_VAL_U64s_MAX)
- return "value too big";
+ bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
+ if (p.ptr.cached)
+ continue;
- bkey_extent_entry_for_each(ptrs, entry) {
- if (__extent_entry_type(entry) >= BCH_EXTENT_ENTRY_MAX)
- return "invalid extent entry type";
+ if (p.has_ec)
+ replicas += p.ec.redundancy;
- if (!extent_entry_is_ptr(entry))
- return "has non ptr field";
- }
+ replicas++;
- bkey_for_each_ptr(ptrs, ptr) {
- reason = extent_ptr_invalid(c, k, ptr,
- c->opts.btree_node_size,
- true);
- if (reason)
- return reason;
}
- return NULL;
+ return replicas;
}
-void bch2_btree_ptr_debugcheck(struct bch_fs *c, struct btree *b,
- struct bkey_s_c k)
+static unsigned bch2_extent_ptr_durability(struct bch_fs *c,
+ struct extent_ptr_decoded p)
{
- struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
- const struct bch_extent_ptr *ptr;
- const char *err;
- char buf[160];
- struct bucket_mark mark;
+ unsigned durability = 0;
struct bch_dev *ca;
- bch2_fs_bug_on(!test_bit(BCH_FS_REBUILD_REPLICAS, &c->flags) &&
- !bch2_bkey_replicas_marked(c, k, false), c,
- "btree key bad (replicas not marked in superblock):\n%s",
- (bch2_bkey_val_to_text(&PBUF(buf), c, k), buf));
-
- if (!test_bit(BCH_FS_INITIAL_GC_DONE, &c->flags))
- return;
-
- bkey_for_each_ptr(ptrs, ptr) {
- ca = bch_dev_bkey_exists(c, ptr->dev);
+ if (p.ptr.cached)
+ return 0;
- mark = ptr_bucket_mark(ca, ptr);
+ ca = bch_dev_bkey_exists(c, p.ptr.dev);
- err = "stale";
- if (gen_after(mark.gen, ptr->gen))
- goto err;
+ if (ca->mi.state != BCH_MEMBER_STATE_failed)
+ durability = max_t(unsigned, durability, ca->mi.durability);
- err = "inconsistent";
- if (mark.data_type != BCH_DATA_BTREE ||
- mark.dirty_sectors < c->opts.btree_node_size)
- goto err;
- }
+ if (p.has_ec)
+ durability += p.ec.redundancy;
- return;
-err:
- bch2_bkey_val_to_text(&PBUF(buf), c, k);
- bch2_fs_bug(c, "%s btree pointer %s: bucket %zi gen %i mark %08x",
- err, buf, PTR_BUCKET_NR(ca, ptr),
- mark.gen, (unsigned) mark.v.counter);
+ return durability;
}
-void bch2_btree_ptr_to_text(struct printbuf *out, struct bch_fs *c,
- struct bkey_s_c k)
+unsigned bch2_bkey_durability(struct bch_fs *c, struct bkey_s_c k)
{
- const char *invalid;
+ struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
+ const union bch_extent_entry *entry;
+ struct extent_ptr_decoded p;
+ unsigned durability = 0;
- bkey_ptrs_to_text(out, c, k);
+ bkey_for_each_ptr_decode(k.k, ptrs, p, entry)
+ durability += bch2_extent_ptr_durability(c, p);
- invalid = bch2_btree_ptr_invalid(c, k);
- if (invalid)
- pr_buf(out, " invalid: %s", invalid);
+ return durability;
}
-/* Extents */
-
-bool __bch2_cut_front(struct bpos where, struct bkey_s k)
+void bch2_bkey_mark_replicas_cached(struct bch_fs *c, struct bkey_s k,
+ unsigned target,
+ unsigned nr_desired_replicas)
{
- u64 len = 0;
-
- if (bkey_cmp(where, bkey_start_pos(k.k)) <= 0)
- return false;
-
- EBUG_ON(bkey_cmp(where, k.k->p) > 0);
-
- len = k.k->p.offset - where.offset;
-
- BUG_ON(len > k.k->size);
+ struct bkey_ptrs ptrs = bch2_bkey_ptrs(k);
+ union bch_extent_entry *entry;
+ struct extent_ptr_decoded p;
+ int extra = bch2_bkey_durability(c, k.s_c) - nr_desired_replicas;
- /*
- * Don't readjust offset if the key size is now 0, because that could
- * cause offset to point to the next bucket:
- */
- if (!len)
- k.k->type = KEY_TYPE_deleted;
- else if (bkey_extent_is_data(k.k)) {
- struct bkey_s_extent e = bkey_s_to_extent(k);
- union bch_extent_entry *entry;
- bool seen_crc = false;
+ if (target && extra > 0)
+ bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
+ int n = bch2_extent_ptr_durability(c, p);
- extent_for_each_entry(e, entry) {
- switch (extent_entry_type(entry)) {
- case BCH_EXTENT_ENTRY_ptr:
- if (!seen_crc)
- entry->ptr.offset += e.k->size - len;
- break;
- case BCH_EXTENT_ENTRY_crc32:
- entry->crc32.offset += e.k->size - len;
- break;
- case BCH_EXTENT_ENTRY_crc64:
- entry->crc64.offset += e.k->size - len;
- break;
- case BCH_EXTENT_ENTRY_crc128:
- entry->crc128.offset += e.k->size - len;
- break;
- case BCH_EXTENT_ENTRY_stripe_ptr:
- break;
+ if (n && n <= extra &&
+ !bch2_dev_in_target(c, p.ptr.dev, target)) {
+ entry->ptr.cached = true;
+ extra -= n;
}
-
- if (extent_entry_is_crc(entry))
- seen_crc = true;
}
- }
- k.k->size = len;
+ if (extra > 0)
+ bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
+ int n = bch2_extent_ptr_durability(c, p);
- return true;
+ if (n && n <= extra) {
+ entry->ptr.cached = true;
+ extra -= n;
+ }
+ }
}
-bool bch2_cut_back(struct bpos where, struct bkey *k)
+void bch2_bkey_extent_entry_drop(struct bkey_i *k, union bch_extent_entry *entry)
{
- u64 len = 0;
-
- if (bkey_cmp(where, k->p) >= 0)
- return false;
-
- EBUG_ON(bkey_cmp(where, bkey_start_pos(k)) < 0);
-
- len = where.offset - bkey_start_offset(k);
+ union bch_extent_entry *end = bkey_val_end(bkey_i_to_s(k));
+ union bch_extent_entry *next = extent_entry_next(entry);
- BUG_ON(len > k->size);
-
- k->p = where;
- k->size = len;
-
- if (!len)
- k->type = KEY_TYPE_deleted;
-
- return true;
-}
-
-/**
- * bch_key_resize - adjust size of @k
- *
- * bkey_start_offset(k) will be preserved, modifies where the extent ends
- */
-void bch2_key_resize(struct bkey *k,
- unsigned new_size)
-{
- k->p.offset -= k->size;
- k->p.offset += new_size;
- k->size = new_size;
+ memmove_u64s(entry, next, (u64 *) end - (u64 *) next);
+ k->k.u64s -= extent_entry_u64s(entry);
}
-static bool extent_i_save(struct btree *b, struct bkey_packed *dst,
- struct bkey_i *src)
+void bch2_bkey_append_ptr(struct bkey_i *k,
+ struct bch_extent_ptr ptr)
{
- struct bkey_format *f = &b->format;
- struct bkey_i *dst_unpacked;
- struct bkey_packed tmp;
-
- if ((dst_unpacked = packed_to_bkey(dst)))
- dst_unpacked->k = src->k;
- else if (bch2_bkey_pack_key(&tmp, &src->k, f))
- memcpy_u64s(dst, &tmp, f->key_u64s);
- else
- return false;
-
- memcpy_u64s(bkeyp_val(f, dst), &src->v, bkey_val_u64s(&src->k));
- return true;
-}
-
-struct extent_insert_state {
- struct btree_insert *trans;
- struct btree_insert_entry *insert;
- struct bpos committed;
+ EBUG_ON(bch2_bkey_has_device(bkey_i_to_s_c(k), ptr.dev));
- /* for deleting: */
- struct bkey_i whiteout;
- bool update_journal;
- bool update_btree;
- bool deleting;
-};
+ switch (k->k.type) {
+ case KEY_TYPE_btree_ptr:
+ case KEY_TYPE_btree_ptr_v2:
+ case KEY_TYPE_extent:
+ EBUG_ON(bkey_val_u64s(&k->k) >= BKEY_EXTENT_VAL_U64s_MAX);
-static bool bch2_extent_merge_inline(struct bch_fs *,
- struct btree_iter *,
- struct bkey_packed *,
- struct bkey_packed *,
- bool);
+ ptr.type = 1 << BCH_EXTENT_ENTRY_ptr;
-static void verify_extent_nonoverlapping(struct btree *b,
- struct btree_node_iter *_iter,
- struct bkey_i *insert)
-{
-#ifdef CONFIG_BCACHEFS_DEBUG
- struct btree_node_iter iter;
- struct bkey_packed *k;
- struct bkey uk;
-
- iter = *_iter;
- k = bch2_btree_node_iter_prev_filter(&iter, b, KEY_TYPE_discard);
- BUG_ON(k &&
- (uk = bkey_unpack_key(b, k),
- bkey_cmp(uk.p, bkey_start_pos(&insert->k)) > 0));
-
- iter = *_iter;
- k = bch2_btree_node_iter_peek_filter(&iter, b, KEY_TYPE_discard);
-#if 0
- BUG_ON(k &&
- (uk = bkey_unpack_key(b, k),
- bkey_cmp(insert->k.p, bkey_start_pos(&uk))) > 0);
-#else
- if (k &&
- (uk = bkey_unpack_key(b, k),
- bkey_cmp(insert->k.p, bkey_start_pos(&uk))) > 0) {
- char buf1[100];
- char buf2[100];
-
- bch2_bkey_to_text(&PBUF(buf1), &insert->k);
- bch2_bkey_to_text(&PBUF(buf2), &uk);
-
- bch2_dump_btree_node(b);
- panic("insert > next :\n"
- "insert %s\n"
- "next %s\n",
- buf1, buf2);
+ memcpy((void *) &k->v + bkey_val_bytes(&k->k),
+ &ptr,
+ sizeof(ptr));
+ k->u64s++;
+ break;
+ default:
+ BUG();
}
-#endif
-
-#endif
}
-static void verify_modified_extent(struct btree_iter *iter,
- struct bkey_packed *k)
+static inline void __extent_entry_insert(struct bkey_i *k,
+ union bch_extent_entry *dst,
+ union bch_extent_entry *new)
{
- bch2_btree_iter_verify(iter, iter->l[0].b);
- bch2_verify_insert_pos(iter->l[0].b, k, k, k->u64s);
+ union bch_extent_entry *end = bkey_val_end(bkey_i_to_s(k));
+
+ memmove_u64s_up_small((u64 *) dst + extent_entry_u64s(new),
+ dst, (u64 *) end - (u64 *) dst);
+ k->k.u64s += extent_entry_u64s(new);
+ memcpy_u64s_small(dst, new, extent_entry_u64s(new));
}
-static void extent_bset_insert(struct bch_fs *c, struct btree_iter *iter,
- struct bkey_i *insert)
+void bch2_extent_ptr_decoded_append(struct bkey_i *k,
+ struct extent_ptr_decoded *p)
{
- struct btree_iter_level *l = &iter->l[0];
- struct btree_node_iter node_iter;
- struct bkey_packed *k;
-
- BUG_ON(insert->k.u64s > bch_btree_keys_u64s_remaining(c, l->b));
-
- EBUG_ON(bkey_deleted(&insert->k) || !insert->k.size);
- verify_extent_nonoverlapping(l->b, &l->iter, insert);
+ struct bkey_ptrs ptrs = bch2_bkey_ptrs(bkey_i_to_s(k));
+ struct bch_extent_crc_unpacked crc =
+ bch2_extent_crc_unpack(&k->k, NULL);
+ union bch_extent_entry *pos;
- node_iter = l->iter;
- k = bch2_btree_node_iter_prev_filter(&node_iter, l->b, KEY_TYPE_discard);
- if (k && !bkey_written(l->b, k) &&
- bch2_extent_merge_inline(c, iter, k, bkey_to_packed(insert), true))
- return;
+ if (!bch2_crc_unpacked_cmp(crc, p->crc)) {
+ pos = ptrs.start;
+ goto found;
+ }
- node_iter = l->iter;
- k = bch2_btree_node_iter_peek_filter(&node_iter, l->b, KEY_TYPE_discard);
- if (k && !bkey_written(l->b, k) &&
- bch2_extent_merge_inline(c, iter, bkey_to_packed(insert), k, false))
- return;
+ bkey_for_each_crc(&k->k, ptrs, crc, pos)
+ if (!bch2_crc_unpacked_cmp(crc, p->crc)) {
+ pos = extent_entry_next(pos);
+ goto found;
+ }
- k = bch2_btree_node_iter_bset_pos(&l->iter, l->b, bset_tree_last(l->b));
+ bch2_extent_crc_append(k, p->crc);
+ pos = bkey_val_end(bkey_i_to_s(k));
+found:
+ p->ptr.type = 1 << BCH_EXTENT_ENTRY_ptr;
+ __extent_entry_insert(k, pos, to_entry(&p->ptr));
- bch2_bset_insert(l->b, &l->iter, k, insert, 0);
- bch2_btree_node_iter_fix(iter, l->b, &l->iter, k, 0, k->u64s);
- bch2_btree_iter_verify(iter, l->b);
+ if (p->has_ec) {
+ p->ec.type = 1 << BCH_EXTENT_ENTRY_stripe_ptr;
+ __extent_entry_insert(k, pos, to_entry(&p->ec));
+ }
}
-static void extent_insert_committed(struct extent_insert_state *s)
+static union bch_extent_entry *extent_entry_prev(struct bkey_ptrs ptrs,
+ union bch_extent_entry *entry)
{
- struct bch_fs *c = s->trans->c;
- struct btree_iter *iter = s->insert->iter;
- struct bkey_i *insert = s->insert->k;
- BKEY_PADDED(k) split;
-
- EBUG_ON(bkey_cmp(insert->k.p, s->committed) < 0);
- EBUG_ON(bkey_cmp(s->committed, bkey_start_pos(&insert->k)) < 0);
-
- bkey_copy(&split.k, insert);
- if (s->deleting)
- split.k.k.type = KEY_TYPE_discard;
-
- bch2_cut_back(s->committed, &split.k.k);
-
- if (!bkey_cmp(s->committed, iter->pos))
- return;
+ union bch_extent_entry *i = ptrs.start;
- bch2_btree_iter_set_pos_same_leaf(iter, s->committed);
+ if (i == entry)
+ return NULL;
- if (s->update_btree) {
- if (debug_check_bkeys(c))
- bch2_bkey_debugcheck(c, iter->l[0].b,
- bkey_i_to_s_c(&split.k));
+ while (extent_entry_next(i) != entry)
+ i = extent_entry_next(i);
+ return i;
+}
- EBUG_ON(bkey_deleted(&split.k.k) || !split.k.k.size);
+union bch_extent_entry *bch2_bkey_drop_ptr(struct bkey_s k,
+ struct bch_extent_ptr *ptr)
+{
+ struct bkey_ptrs ptrs = bch2_bkey_ptrs(k);
+ union bch_extent_entry *dst, *src, *prev;
+ bool drop_crc = true;
- extent_bset_insert(c, iter, &split.k);
- }
+ EBUG_ON(ptr < &ptrs.start->ptr ||
+ ptr >= &ptrs.end->ptr);
+ EBUG_ON(ptr->type != 1 << BCH_EXTENT_ENTRY_ptr);
- if (s->update_journal) {
- bkey_copy(&split.k, !s->deleting ? insert : &s->whiteout);
- if (s->deleting)
- split.k.k.type = KEY_TYPE_discard;
+ src = extent_entry_next(to_entry(ptr));
+ if (src != ptrs.end &&
+ !extent_entry_is_crc(src))
+ drop_crc = false;
- bch2_cut_back(s->committed, &split.k.k);
+ dst = to_entry(ptr);
+ while ((prev = extent_entry_prev(ptrs, dst))) {
+ if (extent_entry_is_ptr(prev))
+ break;
- EBUG_ON(bkey_deleted(&split.k.k) || !split.k.k.size);
+ if (extent_entry_is_crc(prev)) {
+ if (drop_crc)
+ dst = prev;
+ break;
+ }
- bch2_btree_journal_key(s->trans, iter, &split.k);
+ dst = prev;
}
- bch2_cut_front(s->committed, insert);
+ memmove_u64s_down(dst, src,
+ (u64 *) ptrs.end - (u64 *) src);
+ k.k->u64s -= (u64 *) src - (u64 *) dst;
- insert->k.needs_whiteout = false;
+ return dst;
}
-void bch2_extent_trim_atomic(struct bkey_i *k, struct btree_iter *iter)
+void bch2_bkey_drop_device(struct bkey_s k, unsigned dev)
{
- struct btree *b = iter->l[0].b;
-
- BUG_ON(iter->uptodate > BTREE_ITER_NEED_PEEK);
-
- bch2_cut_back(b->key.k.p, &k->k);
+ struct bch_extent_ptr *ptr;
- BUG_ON(bkey_cmp(bkey_start_pos(&k->k), b->data->min_key) < 0);
+ bch2_bkey_drop_ptrs(k, ptr, ptr->dev == dev);
}
-enum btree_insert_ret
-bch2_extent_can_insert(struct btree_insert *trans,
- struct btree_insert_entry *insert,
- unsigned *u64s)
+const struct bch_extent_ptr *
+bch2_bkey_has_device(struct bkey_s_c k, unsigned dev)
{
- struct btree_iter_level *l = &insert->iter->l[0];
- struct btree_node_iter node_iter = l->iter;
- enum bch_extent_overlap overlap;
- struct bkey_packed *_k;
- struct bkey unpacked;
- struct bkey_s_c k;
- int sectors;
-
- BUG_ON(trans->flags & BTREE_INSERT_ATOMIC &&
- !bch2_extent_is_atomic(&insert->k->k, insert->iter));
-
- /*
- * We avoid creating whiteouts whenever possible when deleting, but
- * those optimizations mean we may potentially insert two whiteouts
- * instead of one (when we overlap with the front of one extent and the
- * back of another):
- */
- if (bkey_whiteout(&insert->k->k))
- *u64s += BKEY_U64s;
-
- _k = bch2_btree_node_iter_peek_filter(&node_iter, l->b,
- KEY_TYPE_discard);
- if (!_k)
- return BTREE_INSERT_OK;
-
- k = bkey_disassemble(l->b, _k, &unpacked);
-
- overlap = bch2_extent_overlap(&insert->k->k, k.k);
-
- /* account for having to split existing extent: */
- if (overlap == BCH_EXTENT_OVERLAP_MIDDLE)
- *u64s += _k->u64s;
-
- if (overlap == BCH_EXTENT_OVERLAP_MIDDLE &&
- (sectors = bch2_extent_is_compressed(k))) {
- int flags = trans->flags & BTREE_INSERT_NOFAIL
- ? BCH_DISK_RESERVATION_NOFAIL : 0;
-
- switch (bch2_disk_reservation_add(trans->c,
- trans->disk_res,
- sectors, flags)) {
- case 0:
- break;
- case -ENOSPC:
- return BTREE_INSERT_ENOSPC;
- default:
- BUG();
- }
- }
+ struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
+ const struct bch_extent_ptr *ptr;
- return BTREE_INSERT_OK;
+ bkey_for_each_ptr(ptrs, ptr)
+ if (ptr->dev == dev)
+ return ptr;
+
+ return NULL;
}
-static void
-extent_squash(struct extent_insert_state *s, struct bkey_i *insert,
- struct bkey_packed *_k, struct bkey_s k,
- enum bch_extent_overlap overlap)
+bool bch2_bkey_has_target(struct bch_fs *c, struct bkey_s_c k, unsigned target)
{
- struct bch_fs *c = s->trans->c;
- struct btree_iter *iter = s->insert->iter;
- struct btree_iter_level *l = &iter->l[0];
-
- switch (overlap) {
- case BCH_EXTENT_OVERLAP_FRONT:
- /* insert overlaps with start of k: */
- __bch2_cut_front(insert->k.p, k);
- BUG_ON(bkey_deleted(k.k));
- extent_save(l->b, _k, k.k);
- verify_modified_extent(iter, _k);
- break;
+ struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
+ const struct bch_extent_ptr *ptr;
- case BCH_EXTENT_OVERLAP_BACK:
- /* insert overlaps with end of k: */
- bch2_cut_back(bkey_start_pos(&insert->k), k.k);
- BUG_ON(bkey_deleted(k.k));
- extent_save(l->b, _k, k.k);
+ bkey_for_each_ptr(ptrs, ptr)
+ if (bch2_dev_in_target(c, ptr->dev, target) &&
+ (!ptr->cached ||
+ !ptr_stale(bch_dev_bkey_exists(c, ptr->dev), ptr)))
+ return true;
- /*
- * As the auxiliary tree is indexed by the end of the
- * key and we've just changed the end, update the
- * auxiliary tree.
- */
- bch2_bset_fix_invalidated_key(l->b, _k);
- bch2_btree_node_iter_fix(iter, l->b, &l->iter,
- _k, _k->u64s, _k->u64s);
- verify_modified_extent(iter, _k);
- break;
+ return false;
+}
- case BCH_EXTENT_OVERLAP_ALL: {
- /* The insert key completely covers k, invalidate k */
- if (!bkey_whiteout(k.k))
- btree_account_key_drop(l->b, _k);
+bool bch2_bkey_matches_ptr(struct bch_fs *c, struct bkey_s_c k,
+ struct bch_extent_ptr m, u64 offset)
+{
+ struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
+ const union bch_extent_entry *entry;
+ struct extent_ptr_decoded p;
- k.k->size = 0;
- k.k->type = KEY_TYPE_deleted;
+ bkey_for_each_ptr_decode(k.k, ptrs, p, entry)
+ if (p.ptr.dev == m.dev &&
+ p.ptr.gen == m.gen &&
+ (s64) p.ptr.offset + p.crc.offset - bkey_start_offset(k.k) ==
+ (s64) m.offset - offset)
+ return true;
- if (_k >= btree_bset_last(l->b)->start) {
- unsigned u64s = _k->u64s;
-
- bch2_bset_delete(l->b, _k, _k->u64s);
- bch2_btree_node_iter_fix(iter, l->b, &l->iter,
- _k, u64s, 0);
- bch2_btree_iter_verify(iter, l->b);
- } else {
- extent_save(l->b, _k, k.k);
- bch2_btree_node_iter_fix(iter, l->b, &l->iter,
- _k, _k->u64s, _k->u64s);
- verify_modified_extent(iter, _k);
- }
+ return false;
+}
- break;
- }
- case BCH_EXTENT_OVERLAP_MIDDLE: {
- BKEY_PADDED(k) split;
- /*
- * The insert key falls 'in the middle' of k
- * The insert key splits k in 3:
- * - start only in k, preserve
- * - middle common section, invalidate in k
- * - end only in k, preserve
- *
- * We update the old key to preserve the start,
- * insert will be the new common section,
- * we manually insert the end that we are preserving.
- *
- * modify k _before_ doing the insert (which will move
- * what k points to)
- */
- bkey_reassemble(&split.k, k.s_c);
- split.k.k.needs_whiteout |= bkey_written(l->b, _k);
+/*
+ * bch_extent_normalize - clean up an extent, dropping stale pointers etc.
+ *
+ * Returns true if @k should be dropped entirely
+ *
+ * For existing keys, only called when btree nodes are being rewritten, not when
+ * they're merely being compacted/resorted in memory.
+ */
+bool bch2_extent_normalize(struct bch_fs *c, struct bkey_s k)
+{
+ struct bch_extent_ptr *ptr;
- bch2_cut_back(bkey_start_pos(&insert->k), &split.k.k);
- BUG_ON(bkey_deleted(&split.k.k));
+ bch2_bkey_drop_ptrs(k, ptr,
+ ptr->cached &&
+ ptr_stale(bch_dev_bkey_exists(c, ptr->dev), ptr));
- __bch2_cut_front(insert->k.p, k);
- BUG_ON(bkey_deleted(k.k));
- extent_save(l->b, _k, k.k);
- verify_modified_extent(iter, _k);
+ /* will only happen if all pointers were cached: */
+ if (!bch2_bkey_nr_ptrs(k.s_c))
+ k.k->type = KEY_TYPE_deleted;
- extent_bset_insert(c, iter, &split.k);
- break;
- }
- }
+ return bkey_deleted(k.k);
}
-static void __bch2_insert_fixup_extent(struct extent_insert_state *s)
+void bch2_bkey_ptrs_to_text(struct printbuf *out, struct bch_fs *c,
+ struct bkey_s_c k)
{
- struct btree_iter *iter = s->insert->iter;
- struct btree_iter_level *l = &iter->l[0];
- struct bkey_packed *_k;
- struct bkey unpacked;
- struct bkey_i *insert = s->insert->k;
+ struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
+ const union bch_extent_entry *entry;
+ struct bch_extent_crc_unpacked crc;
+ const struct bch_extent_ptr *ptr;
+ const struct bch_extent_stripe_ptr *ec;
+ struct bch_dev *ca;
+ bool first = true;
- while (bkey_cmp(s->committed, insert->k.p) < 0 &&
- (_k = bch2_btree_node_iter_peek_filter(&l->iter, l->b,
- KEY_TYPE_discard))) {
- struct bkey_s k = __bkey_disassemble(l->b, _k, &unpacked);
- enum bch_extent_overlap overlap = bch2_extent_overlap(&insert->k, k.k);
+ bkey_extent_entry_for_each(ptrs, entry) {
+ if (!first)
+ pr_buf(out, " ");
- EBUG_ON(bkey_cmp(iter->pos, k.k->p) >= 0);
+ switch (__extent_entry_type(entry)) {
+ case BCH_EXTENT_ENTRY_ptr:
+ ptr = entry_to_ptr(entry);
+ ca = ptr->dev < c->sb.nr_devices && c->devs[ptr->dev]
+ ? bch_dev_bkey_exists(c, ptr->dev)
+ : NULL;
- if (bkey_cmp(bkey_start_pos(k.k), insert->k.p) >= 0)
+ pr_buf(out, "ptr: %u:%llu gen %u%s%s", ptr->dev,
+ (u64) ptr->offset, ptr->gen,
+ ptr->cached ? " cached" : "",
+ ca && ptr_stale(ca, ptr)
+ ? " stale" : "");
break;
+ case BCH_EXTENT_ENTRY_crc32:
+ case BCH_EXTENT_ENTRY_crc64:
+ case BCH_EXTENT_ENTRY_crc128:
+ crc = bch2_extent_crc_unpack(k.k, entry_to_crc(entry));
- s->committed = bpos_min(s->insert->k->k.p, k.k->p);
-
- if (!bkey_whiteout(k.k))
- s->update_journal = true;
-
- if (!s->update_journal) {
- bch2_cut_front(s->committed, insert);
- bch2_cut_front(s->committed, &s->whiteout);
- bch2_btree_iter_set_pos_same_leaf(iter, s->committed);
- goto next;
- }
+ pr_buf(out, "crc: c_size %u size %u offset %u nonce %u csum %u compress %u",
+ crc.compressed_size,
+ crc.uncompressed_size,
+ crc.offset, crc.nonce,
+ crc.csum_type,
+ crc.compression_type);
+ break;
+ case BCH_EXTENT_ENTRY_stripe_ptr:
+ ec = &entry->stripe_ptr;
- /*
- * When deleting, if possible just do it by switching the type
- * of the key we're deleting, instead of creating and inserting
- * a new whiteout:
- */
- if (s->deleting &&
- !s->update_btree &&
- !bkey_cmp(insert->k.p, k.k->p) &&
- !bkey_cmp(bkey_start_pos(&insert->k), bkey_start_pos(k.k))) {
- if (!bkey_whiteout(k.k)) {
- btree_account_key_drop(l->b, _k);
- _k->type = KEY_TYPE_discard;
- reserve_whiteout(l->b, _k);
- }
+ pr_buf(out, "ec: idx %llu block %u",
+ (u64) ec->idx, ec->block);
break;
+ default:
+ pr_buf(out, "(invalid extent entry %.16llx)", *((u64 *) entry));
+ return;
}
- if (k.k->needs_whiteout || bkey_written(l->b, _k)) {
- insert->k.needs_whiteout = true;
- s->update_btree = true;
- }
+ first = false;
+ }
+}
- if (s->update_btree &&
- overlap == BCH_EXTENT_OVERLAP_ALL &&
- bkey_whiteout(k.k) &&
- k.k->needs_whiteout) {
- unreserve_whiteout(l->b, _k);
- _k->needs_whiteout = false;
- }
+static const char *extent_ptr_invalid(const struct bch_fs *c,
+ struct bkey_s_c k,
+ const struct bch_extent_ptr *ptr,
+ unsigned size_ondisk,
+ bool metadata)
+{
+ struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
+ const struct bch_extent_ptr *ptr2;
+ struct bch_dev *ca;
- extent_squash(s, insert, _k, k, overlap);
+ if (!bch2_dev_exists2(c, ptr->dev))
+ return "pointer to invalid device";
- if (!s->update_btree)
- bch2_cut_front(s->committed, insert);
-next:
- if (overlap == BCH_EXTENT_OVERLAP_FRONT ||
- overlap == BCH_EXTENT_OVERLAP_MIDDLE)
- break;
- }
+ ca = bch_dev_bkey_exists(c, ptr->dev);
+ if (!ca)
+ return "pointer to invalid device";
- if (bkey_cmp(s->committed, insert->k.p) < 0)
- s->committed = bpos_min(s->insert->k->k.p, l->b->key.k.p);
+ bkey_for_each_ptr(ptrs, ptr2)
+ if (ptr != ptr2 && ptr->dev == ptr2->dev)
+ return "multiple pointers to same device";
- /*
- * may have skipped past some deleted extents greater than the insert
- * key, before we got to a non deleted extent and knew we could bail out
- * rewind the iterator a bit if necessary:
- */
- {
- struct btree_node_iter node_iter = l->iter;
+ if (ptr->offset + size_ondisk > bucket_to_sector(ca, ca->mi.nbuckets))
+ return "offset past end of device";
- while ((_k = bch2_btree_node_iter_prev_all(&node_iter, l->b)) &&
- bkey_cmp_left_packed(l->b, _k, &s->committed) > 0)
- l->iter = node_iter;
- }
-}
+ if (ptr->offset < bucket_to_sector(ca, ca->mi.first_bucket))
+ return "offset before first bucket";
-/**
- * bch_extent_insert_fixup - insert a new extent and deal with overlaps
- *
- * this may result in not actually doing the insert, or inserting some subset
- * of the insert key. For cmpxchg operations this is where that logic lives.
- *
- * All subsets of @insert that need to be inserted are inserted using
- * bch2_btree_insert_and_journal(). If @b or @res fills up, this function
- * returns false, setting @iter->pos for the prefix of @insert that actually got
- * inserted.
- *
- * BSET INVARIANTS: this function is responsible for maintaining all the
- * invariants for bsets of extents in memory. things get really hairy with 0
- * size extents
- *
- * within one bset:
- *
- * bkey_start_pos(bkey_next(k)) >= k
- * or bkey_start_offset(bkey_next(k)) >= k->offset
- *
- * i.e. strict ordering, no overlapping extents.
- *
- * multiple bsets (i.e. full btree node):
- *
- * ∀ k, j
- * k.size != 0 ∧ j.size != 0 →
- * ¬ (k > bkey_start_pos(j) ∧ k < j)
- *
- * i.e. no two overlapping keys _of nonzero size_
- *
- * We can't realistically maintain this invariant for zero size keys because of
- * the key merging done in bch2_btree_insert_key() - for two mergeable keys k, j
- * there may be another 0 size key between them in another bset, and it will
- * thus overlap with the merged key.
- *
- * In addition, the end of iter->pos indicates how much has been processed.
- * If the end of iter->pos is not the same as the end of insert, then
- * key insertion needs to continue/be retried.
- */
-enum btree_insert_ret
-bch2_insert_fixup_extent(struct btree_insert *trans,
- struct btree_insert_entry *insert)
-{
- struct btree_iter *iter = insert->iter;
- struct btree *b = iter->l[0].b;
- struct extent_insert_state s = {
- .trans = trans,
- .insert = insert,
- .committed = iter->pos,
-
- .whiteout = *insert->k,
- .update_journal = !bkey_whiteout(&insert->k->k),
- .update_btree = !bkey_whiteout(&insert->k->k),
- .deleting = bkey_whiteout(&insert->k->k),
- };
-
- EBUG_ON(iter->level);
- EBUG_ON(!insert->k->k.size);
-
- /*
- * As we process overlapping extents, we advance @iter->pos both to
- * signal to our caller (btree_insert_key()) how much of @insert->k has
- * been inserted, and also to keep @iter->pos consistent with
- * @insert->k and the node iterator that we're advancing:
- */
- EBUG_ON(bkey_cmp(iter->pos, bkey_start_pos(&insert->k->k)));
-
- __bch2_insert_fixup_extent(&s);
-
- extent_insert_committed(&s);
-
- EBUG_ON(bkey_cmp(iter->pos, bkey_start_pos(&insert->k->k)));
- EBUG_ON(bkey_cmp(iter->pos, s.committed));
-
- if (insert->k->k.size) {
- /* got to the end of this leaf node */
- BUG_ON(bkey_cmp(iter->pos, b->key.k.p));
- return BTREE_INSERT_NEED_TRAVERSE;
- }
+ if (bucket_remainder(ca, ptr->offset) +
+ size_ondisk > ca->mi.bucket_size)
+ return "spans multiple buckets";
- return BTREE_INSERT_OK;
+ return NULL;
}
-const char *bch2_extent_invalid(const struct bch_fs *c, struct bkey_s_c k)
+const char *bch2_bkey_ptrs_invalid(const struct bch_fs *c, struct bkey_s_c k)
{
- struct bkey_s_c_extent e = bkey_s_c_to_extent(k);
+ struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
+ struct bch_devs_list devs;
const union bch_extent_entry *entry;
struct bch_extent_crc_unpacked crc;
- const struct bch_extent_ptr *ptr;
- unsigned size_ondisk = e.k->size;
+ unsigned size_ondisk = k.k->size;
const char *reason;
unsigned nonce = UINT_MAX;
+ unsigned i;
- if (bkey_val_u64s(e.k) > BKEY_EXTENT_VAL_U64s_MAX)
- return "value too big";
+ if (k.k->type == KEY_TYPE_btree_ptr ||
+ k.k->type == KEY_TYPE_btree_ptr_v2)
+ size_ondisk = c->opts.btree_node_size;
- extent_for_each_entry(e, entry) {
+ bkey_extent_entry_for_each(ptrs, entry) {
if (__extent_entry_type(entry) >= BCH_EXTENT_ENTRY_MAX)
return "invalid extent entry type";
+ if (k.k->type == KEY_TYPE_btree_ptr &&
+ !extent_entry_is_ptr(entry))
+ return "has non ptr field";
+
switch (extent_entry_type(entry)) {
case BCH_EXTENT_ENTRY_ptr:
- ptr = entry_to_ptr(entry);
-
- reason = extent_ptr_invalid(c, e.s_c, &entry->ptr,
+ reason = extent_ptr_invalid(c, k, &entry->ptr,
size_ondisk, false);
if (reason)
return reason;
case BCH_EXTENT_ENTRY_crc32:
case BCH_EXTENT_ENTRY_crc64:
case BCH_EXTENT_ENTRY_crc128:
- crc = bch2_extent_crc_unpack(e.k, entry_to_crc(entry));
+ crc = bch2_extent_crc_unpack(k.k, entry_to_crc(entry));
- if (crc.offset + e.k->size >
+ if (crc.offset + crc.live_size >
crc.uncompressed_size)
return "checksum offset + key size > uncompressed size";
if (!bch2_checksum_type_valid(c, crc.csum_type))
return "invalid checksum type";
- if (crc.compression_type >= BCH_COMPRESSION_NR)
+ if (crc.compression_type >= BCH_COMPRESSION_TYPE_NR)
return "invalid compression type";
if (bch2_csum_type_is_encryption(crc.csum_type)) {
}
}
- return NULL;
-}
-
-void bch2_extent_debugcheck(struct bch_fs *c, struct btree *b,
- struct bkey_s_c k)
-{
- struct bkey_s_c_extent e = bkey_s_c_to_extent(k);
- const union bch_extent_entry *entry;
- struct extent_ptr_decoded p;
- char buf[160];
-
- /*
- * XXX: we should be doing most/all of these checks at startup time,
- * where we check bch2_bkey_invalid() in btree_node_read_done()
- *
- * But note that we can't check for stale pointers or incorrect gc marks
- * until after journal replay is done (it might be an extent that's
- * going to get overwritten during replay)
- */
-
- bch2_fs_bug_on(!test_bit(BCH_FS_REBUILD_REPLICAS, &c->flags) &&
- !bch2_bkey_replicas_marked(c, e.s_c, false), c,
- "extent key bad (replicas not marked in superblock):\n%s",
- (bch2_bkey_val_to_text(&PBUF(buf), c, e.s_c), buf));
-
- /*
- * If journal replay hasn't finished, we might be seeing keys
- * that will be overwritten by the time journal replay is done:
- */
- if (!test_bit(JOURNAL_REPLAY_DONE, &c->journal.flags))
- return;
-
- extent_for_each_ptr_decode(e, p, entry) {
- struct bch_dev *ca = bch_dev_bkey_exists(c, p.ptr.dev);
- struct bucket_mark mark = ptr_bucket_mark(ca, &p.ptr);
- unsigned stale = gen_after(mark.gen, p.ptr.gen);
- unsigned disk_sectors = ptr_disk_sectors(p);
- unsigned mark_sectors = p.ptr.cached
- ? mark.cached_sectors
- : mark.dirty_sectors;
-
- bch2_fs_bug_on(stale && !p.ptr.cached, c,
- "stale dirty pointer (ptr gen %u bucket %u",
- p.ptr.gen, mark.gen);
-
- bch2_fs_bug_on(stale > 96, c, "key too stale: %i", stale);
-
- bch2_fs_bug_on(!stale &&
- (mark.data_type != BCH_DATA_USER ||
- mark_sectors < disk_sectors), c,
- "extent pointer not marked: %s:\n"
- "type %u sectors %u < %u",
- (bch2_bkey_val_to_text(&PBUF(buf), c, e.s_c), buf),
- mark.data_type,
- mark_sectors, disk_sectors);
- }
-}
-
-void bch2_extent_to_text(struct printbuf *out, struct bch_fs *c,
- struct bkey_s_c k)
-{
- const char *invalid;
-
- bkey_ptrs_to_text(out, c, k);
+ devs = bch2_bkey_devs(k);
+ bubble_sort(devs.devs, devs.nr, u8_cmp);
+ for (i = 0; i + 1 < devs.nr; i++)
+ if (devs.devs[i] == devs.devs[i + 1])
+ return "multiple ptrs to same device";
- invalid = bch2_extent_invalid(c, k);
- if (invalid)
- pr_buf(out, " invalid: %s", invalid);
+ return NULL;
}
-static void bch2_extent_crc_init(union bch_extent_crc *crc,
- struct bch_extent_crc_unpacked new)
+void bch2_ptr_swab(struct bkey_s k)
{
-#define common_fields(_crc) \
- .csum_type = _crc.csum_type, \
- .compression_type = _crc.compression_type, \
- ._compressed_size = _crc.compressed_size - 1, \
- ._uncompressed_size = _crc.uncompressed_size - 1, \
- .offset = _crc.offset
-
- if (bch_crc_bytes[new.csum_type] <= 4 &&
- new.uncompressed_size <= CRC32_SIZE_MAX &&
- new.nonce <= CRC32_NONCE_MAX) {
- crc->crc32 = (struct bch_extent_crc32) {
- .type = 1 << BCH_EXTENT_ENTRY_crc32,
- common_fields(new),
- .csum = *((__le32 *) &new.csum.lo),
- };
- return;
- }
+ struct bkey_ptrs ptrs = bch2_bkey_ptrs(k);
+ union bch_extent_entry *entry;
+ u64 *d;
- if (bch_crc_bytes[new.csum_type] <= 10 &&
- new.uncompressed_size <= CRC64_SIZE_MAX &&
- new.nonce <= CRC64_NONCE_MAX) {
- crc->crc64 = (struct bch_extent_crc64) {
- .type = 1 << BCH_EXTENT_ENTRY_crc64,
- common_fields(new),
- .nonce = new.nonce,
- .csum_lo = new.csum.lo,
- .csum_hi = *((__le16 *) &new.csum.hi),
- };
- return;
- }
+ for (d = (u64 *) ptrs.start;
+ d != (u64 *) ptrs.end;
+ d++)
+ *d = swab64(*d);
- if (bch_crc_bytes[new.csum_type] <= 16 &&
- new.uncompressed_size <= CRC128_SIZE_MAX &&
- new.nonce <= CRC128_NONCE_MAX) {
- crc->crc128 = (struct bch_extent_crc128) {
- .type = 1 << BCH_EXTENT_ENTRY_crc128,
- common_fields(new),
- .nonce = new.nonce,
- .csum = new.csum,
- };
- return;
+ for (entry = ptrs.start;
+ entry < ptrs.end;
+ entry = extent_entry_next(entry)) {
+ switch (extent_entry_type(entry)) {
+ case BCH_EXTENT_ENTRY_ptr:
+ break;
+ case BCH_EXTENT_ENTRY_crc32:
+ entry->crc32.csum = swab32(entry->crc32.csum);
+ break;
+ case BCH_EXTENT_ENTRY_crc64:
+ entry->crc64.csum_hi = swab16(entry->crc64.csum_hi);
+ entry->crc64.csum_lo = swab64(entry->crc64.csum_lo);
+ break;
+ case BCH_EXTENT_ENTRY_crc128:
+ entry->crc128.csum.hi = (__force __le64)
+ swab64((__force u64) entry->crc128.csum.hi);
+ entry->crc128.csum.lo = (__force __le64)
+ swab64((__force u64) entry->crc128.csum.lo);
+ break;
+ case BCH_EXTENT_ENTRY_stripe_ptr:
+ break;
+ }
}
-#undef common_fields
- BUG();
-}
-
-void bch2_extent_crc_append(struct bkey_i_extent *e,
- struct bch_extent_crc_unpacked new)
-{
- bch2_extent_crc_init((void *) extent_entry_last(extent_i_to_s(e)), new);
- __extent_entry_push(e);
}
-static inline void __extent_entry_insert(struct bkey_i_extent *e,
- union bch_extent_entry *dst,
- union bch_extent_entry *new)
-{
- union bch_extent_entry *end = extent_entry_last(extent_i_to_s(e));
-
- memmove_u64s_up((u64 *) dst + extent_entry_u64s(new),
- dst, (u64 *) end - (u64 *) dst);
- e->k.u64s += extent_entry_u64s(new);
- memcpy_u64s_small(dst, new, extent_entry_u64s(new));
-}
+/* Generic extent code: */
-void bch2_extent_ptr_decoded_append(struct bkey_i_extent *e,
- struct extent_ptr_decoded *p)
+int bch2_cut_front_s(struct bpos where, struct bkey_s k)
{
- struct bch_extent_crc_unpacked crc = bch2_extent_crc_unpack(&e->k, NULL);
- union bch_extent_entry *pos;
- unsigned i;
-
- if (!bch2_crc_unpacked_cmp(crc, p->crc)) {
- pos = e->v.start;
- goto found;
- }
-
- extent_for_each_crc(extent_i_to_s(e), crc, pos)
- if (!bch2_crc_unpacked_cmp(crc, p->crc)) {
- pos = extent_entry_next(pos);
- goto found;
- }
+ unsigned new_val_u64s = bkey_val_u64s(k.k);
+ int val_u64s_delta;
+ u64 sub;
- bch2_extent_crc_append(e, p->crc);
- pos = extent_entry_last(extent_i_to_s(e));
-found:
- p->ptr.type = 1 << BCH_EXTENT_ENTRY_ptr;
- __extent_entry_insert(e, pos, to_entry(&p->ptr));
+ if (bkey_cmp(where, bkey_start_pos(k.k)) <= 0)
+ return 0;
- for (i = 0; i < p->ec_nr; i++) {
- p->ec[i].type = 1 << BCH_EXTENT_ENTRY_stripe_ptr;
- __extent_entry_insert(e, pos, to_entry(&p->ec[i]));
- }
-}
+ EBUG_ON(bkey_cmp(where, k.k->p) > 0);
-/*
- * bch_extent_normalize - clean up an extent, dropping stale pointers etc.
- *
- * Returns true if @k should be dropped entirely
- *
- * For existing keys, only called when btree nodes are being rewritten, not when
- * they're merely being compacted/resorted in memory.
- */
-bool bch2_extent_normalize(struct bch_fs *c, struct bkey_s k)
-{
- struct bch_extent_ptr *ptr;
+ sub = where.offset - bkey_start_offset(k.k);
- bch2_bkey_drop_ptrs(k, ptr,
- ptr->cached &&
- ptr_stale(bch_dev_bkey_exists(c, ptr->dev), ptr));
+ k.k->size -= sub;
- /* will only happen if all pointers were cached: */
- if (!bkey_val_u64s(k.k))
+ if (!k.k->size) {
k.k->type = KEY_TYPE_deleted;
+ new_val_u64s = 0;
+ }
- return false;
-}
-
-void bch2_extent_mark_replicas_cached(struct bch_fs *c,
- struct bkey_s_extent e,
- unsigned target,
- unsigned nr_desired_replicas)
-{
- union bch_extent_entry *entry;
- struct extent_ptr_decoded p;
- int extra = bch2_bkey_durability(c, e.s_c) - nr_desired_replicas;
-
- if (target && extra > 0)
- extent_for_each_ptr_decode(e, p, entry) {
- int n = bch2_extent_ptr_durability(c, p);
+ switch (k.k->type) {
+ case KEY_TYPE_extent:
+ case KEY_TYPE_reflink_v: {
+ struct bkey_ptrs ptrs = bch2_bkey_ptrs(k);
+ union bch_extent_entry *entry;
+ bool seen_crc = false;
- if (n && n <= extra &&
- !bch2_dev_in_target(c, p.ptr.dev, target)) {
- entry->ptr.cached = true;
- extra -= n;
+ bkey_extent_entry_for_each(ptrs, entry) {
+ switch (extent_entry_type(entry)) {
+ case BCH_EXTENT_ENTRY_ptr:
+ if (!seen_crc)
+ entry->ptr.offset += sub;
+ break;
+ case BCH_EXTENT_ENTRY_crc32:
+ entry->crc32.offset += sub;
+ break;
+ case BCH_EXTENT_ENTRY_crc64:
+ entry->crc64.offset += sub;
+ break;
+ case BCH_EXTENT_ENTRY_crc128:
+ entry->crc128.offset += sub;
+ break;
+ case BCH_EXTENT_ENTRY_stripe_ptr:
+ break;
}
- }
-
- if (extra > 0)
- extent_for_each_ptr_decode(e, p, entry) {
- int n = bch2_extent_ptr_durability(c, p);
- if (n && n <= extra) {
- entry->ptr.cached = true;
- extra -= n;
- }
+ if (extent_entry_is_crc(entry))
+ seen_crc = true;
}
-}
-
-enum merge_result bch2_extent_merge(struct bch_fs *c,
- struct bkey_i *l, struct bkey_i *r)
-{
- struct bkey_s_extent el = bkey_i_to_s_extent(l);
- struct bkey_s_extent er = bkey_i_to_s_extent(r);
- union bch_extent_entry *en_l, *en_r;
-
- if (bkey_val_u64s(&l->k) != bkey_val_u64s(&r->k))
- return BCH_MERGE_NOMERGE;
-
- extent_for_each_entry(el, en_l) {
- struct bch_extent_ptr *lp, *rp;
- struct bch_dev *ca;
-
- en_r = vstruct_idx(er.v, (u64 *) en_l - el.v->_data);
- if ((extent_entry_type(en_l) !=
- extent_entry_type(en_r)) ||
- !extent_entry_is_ptr(en_l))
- return BCH_MERGE_NOMERGE;
-
- lp = &en_l->ptr;
- rp = &en_r->ptr;
-
- if (lp->offset + el.k->size != rp->offset ||
- lp->dev != rp->dev ||
- lp->gen != rp->gen)
- return BCH_MERGE_NOMERGE;
-
- /* We don't allow extents to straddle buckets: */
- ca = bch_dev_bkey_exists(c, lp->dev);
-
- if (PTR_BUCKET_NR(ca, lp) != PTR_BUCKET_NR(ca, rp))
- return BCH_MERGE_NOMERGE;
+ break;
}
+ case KEY_TYPE_reflink_p: {
+ struct bkey_s_reflink_p p = bkey_s_to_reflink_p(k);
- l->k.needs_whiteout |= r->k.needs_whiteout;
-
- /* Keys with no pointers aren't restricted to one bucket and could
- * overflow KEY_SIZE
- */
- if ((u64) l->k.size + r->k.size > KEY_SIZE_MAX) {
- bch2_key_resize(&l->k, KEY_SIZE_MAX);
- bch2_cut_front(l->k.p, r);
- return BCH_MERGE_PARTIAL;
+ le64_add_cpu(&p.v->idx, sub);
+ break;
}
+ case KEY_TYPE_inline_data:
+ case KEY_TYPE_indirect_inline_data: {
+ void *p = bkey_inline_data_p(k);
+ unsigned bytes = bkey_inline_data_bytes(k.k);
- bch2_key_resize(&l->k, l->k.size + r->k.size);
-
- return BCH_MERGE_MERGE;
-}
+ sub = min_t(u64, sub << 9, bytes);
-/*
- * When merging an extent that we're inserting into a btree node, the new merged
- * extent could overlap with an existing 0 size extent - if we don't fix that,
- * it'll break the btree node iterator so this code finds those 0 size extents
- * and shifts them out of the way.
- *
- * Also unpacks and repacks.
- */
-static bool bch2_extent_merge_inline(struct bch_fs *c,
- struct btree_iter *iter,
- struct bkey_packed *l,
- struct bkey_packed *r,
- bool back_merge)
-{
- struct btree *b = iter->l[0].b;
- struct btree_node_iter *node_iter = &iter->l[0].iter;
- BKEY_PADDED(k) li, ri;
- struct bkey_packed *m = back_merge ? l : r;
- struct bkey_i *mi = back_merge ? &li.k : &ri.k;
- struct bset_tree *t = bch2_bkey_to_bset(b, m);
- enum merge_result ret;
-
- EBUG_ON(bkey_written(b, m));
-
- /*
- * We need to save copies of both l and r, because we might get a
- * partial merge (which modifies both) and then fails to repack
- */
- bch2_bkey_unpack(b, &li.k, l);
- bch2_bkey_unpack(b, &ri.k, r);
-
- ret = bch2_bkey_merge(c, &li.k, &ri.k);
- if (ret == BCH_MERGE_NOMERGE)
- return false;
+ memmove(p, p + sub, bytes - sub);
- /*
- * check if we overlap with deleted extents - would break the sort
- * order:
- */
- if (back_merge) {
- struct bkey_packed *n = bkey_next(m);
-
- if (n != btree_bkey_last(b, t) &&
- bkey_cmp_left_packed(b, n, &li.k.k.p) <= 0 &&
- bkey_deleted(n))
- return false;
- } else if (ret == BCH_MERGE_MERGE) {
- struct bkey_packed *prev = bch2_bkey_prev_all(b, t, m);
-
- if (prev &&
- bkey_cmp_left_packed_byval(b, prev,
- bkey_start_pos(&li.k.k)) > 0)
- return false;
+ new_val_u64s -= sub >> 3;
+ break;
}
-
- if (ret == BCH_MERGE_PARTIAL) {
- if (!extent_i_save(b, m, mi))
- return false;
-
- if (!back_merge)
- bkey_copy(packed_to_bkey(l), &li.k);
- else
- bkey_copy(packed_to_bkey(r), &ri.k);
- } else {
- if (!extent_i_save(b, m, &li.k))
- return false;
}
- bch2_bset_fix_invalidated_key(b, m);
- bch2_btree_node_iter_fix(iter, b, node_iter,
- m, m->u64s, m->u64s);
- verify_modified_extent(iter, m);
+ val_u64s_delta = bkey_val_u64s(k.k) - new_val_u64s;
+ BUG_ON(val_u64s_delta < 0);
- return ret == BCH_MERGE_MERGE;
+ set_bkey_val_u64s(k.k, new_val_u64s);
+ memset(bkey_val_end(k), 0, val_u64s_delta * sizeof(u64));
+ return -val_u64s_delta;
}
-bool bch2_check_range_allocated(struct bch_fs *c, struct bpos pos, u64 size,
- unsigned nr_replicas)
+int bch2_cut_back_s(struct bpos where, struct bkey_s k)
{
- struct btree_iter iter;
- struct bpos end = pos;
- struct bkey_s_c k;
- bool ret = true;
+ unsigned new_val_u64s = bkey_val_u64s(k.k);
+ int val_u64s_delta;
+ u64 len = 0;
- end.offset += size;
+ if (bkey_cmp(where, k.k->p) >= 0)
+ return 0;
- for_each_btree_key(&iter, c, BTREE_ID_EXTENTS, pos,
- BTREE_ITER_SLOTS, k) {
- if (bkey_cmp(bkey_start_pos(k.k), end) >= 0)
- break;
+ EBUG_ON(bkey_cmp(where, bkey_start_pos(k.k)) < 0);
- if (nr_replicas > bch2_bkey_nr_ptrs_allocated(k)) {
- ret = false;
- break;
- }
- }
- bch2_btree_iter_unlock(&iter);
+ len = where.offset - bkey_start_offset(k.k);
- return ret;
-}
+ k.k->p.offset = where.offset;
+ k.k->size = len;
-unsigned bch2_bkey_nr_ptrs_allocated(struct bkey_s_c k)
-{
- unsigned ret = 0;
+ if (!len) {
+ k.k->type = KEY_TYPE_deleted;
+ new_val_u64s = 0;
+ }
switch (k.k->type) {
- case KEY_TYPE_extent: {
- struct bkey_s_c_extent e = bkey_s_c_to_extent(k);
- const union bch_extent_entry *entry;
- struct extent_ptr_decoded p;
-
- extent_for_each_ptr_decode(e, p, entry)
- ret += !p.ptr.cached &&
- p.crc.compression_type == BCH_COMPRESSION_NONE;
- break;
- }
- case KEY_TYPE_reservation:
- ret = bkey_s_c_to_reservation(k).v->nr_replicas;
+ case KEY_TYPE_inline_data:
+ case KEY_TYPE_indirect_inline_data:
+ new_val_u64s = (bkey_inline_data_offset(k.k) +
+ min(bkey_inline_data_bytes(k.k), k.k->size << 9)) >> 3;
break;
}
- return ret;
-}
-
-/* KEY_TYPE_reservation: */
-
-const char *bch2_reservation_invalid(const struct bch_fs *c, struct bkey_s_c k)
-{
- struct bkey_s_c_reservation r = bkey_s_c_to_reservation(k);
-
- if (bkey_val_bytes(k.k) != sizeof(struct bch_reservation))
- return "incorrect value size";
-
- if (!r.v->nr_replicas || r.v->nr_replicas > BCH_REPLICAS_MAX)
- return "invalid nr_replicas";
-
- return NULL;
-}
-
-void bch2_reservation_to_text(struct printbuf *out, struct bch_fs *c,
- struct bkey_s_c k)
-{
- struct bkey_s_c_reservation r = bkey_s_c_to_reservation(k);
-
- pr_buf(out, "generation %u replicas %u",
- le32_to_cpu(r.v->generation),
- r.v->nr_replicas);
-}
-
-enum merge_result bch2_reservation_merge(struct bch_fs *c,
- struct bkey_i *l, struct bkey_i *r)
-{
- struct bkey_i_reservation *li = bkey_i_to_reservation(l);
- struct bkey_i_reservation *ri = bkey_i_to_reservation(r);
-
- if (li->v.generation != ri->v.generation ||
- li->v.nr_replicas != ri->v.nr_replicas)
- return BCH_MERGE_NOMERGE;
-
- l->k.needs_whiteout |= r->k.needs_whiteout;
-
- /* Keys with no pointers aren't restricted to one bucket and could
- * overflow KEY_SIZE
- */
- if ((u64) l->k.size + r->k.size > KEY_SIZE_MAX) {
- bch2_key_resize(&l->k, KEY_SIZE_MAX);
- bch2_cut_front(l->k.p, r);
- return BCH_MERGE_PARTIAL;
- }
-
- bch2_key_resize(&l->k, l->k.size + r->k.size);
+ val_u64s_delta = bkey_val_u64s(k.k) - new_val_u64s;
+ BUG_ON(val_u64s_delta < 0);
- return BCH_MERGE_MERGE;
+ set_bkey_val_u64s(k.k, new_val_u64s);
+ memset(bkey_val_end(k), 0, val_u64s_delta * sizeof(u64));
+ return -val_u64s_delta;
}