struct bch_extent_ptr *ptr;
bkey_for_each_ptr(ptrs, ptr)
- if (ptr->dev == dev)
- ptr->cached = true;
+ if (ptr->dev == dev) {
+ bch2_extent_ptr_set_cached(k, ptr);
+ return;
+ }
}
static int __bch2_data_update_index_update(struct btree_trans *trans,
return false;
}
+void bch2_extent_ptr_set_cached(struct bkey_s k, struct bch_extent_ptr *ptr)
+{
+ struct bkey_ptrs ptrs = bch2_bkey_ptrs(k);
+ union bch_extent_entry *entry;
+ union bch_extent_entry *ec = NULL;
+
+ bkey_extent_entry_for_each(ptrs, entry) {
+ if (&entry->ptr == ptr) {
+ ptr->cached = true;
+ if (ec)
+ extent_entry_drop(k, ec);
+ return;
+ }
+
+ if (extent_entry_is_stripe_ptr(entry))
+ ec = entry;
+ else if (extent_entry_is_ptr(entry))
+ ec = NULL;
+ }
+
+ BUG();
+}
+
/*
* bch_extent_normalize - clean up an extent, dropping stale pointers etc.
*
unsigned size_ondisk = k.k->size;
unsigned nonce = UINT_MAX;
unsigned nr_ptrs = 0;
- bool unwritten = false;
+ bool unwritten = false, have_ec = false;
int ret;
if (bkey_is_btree_ptr(k.k))
return -BCH_ERR_invalid_bkey;
}
+ if (entry->ptr.cached && have_ec) {
+ prt_printf(err, "cached, erasure coded ptr");
+ return -BCH_ERR_invalid_bkey;
+ }
+
unwritten = entry->ptr.unwritten;
+ have_ec = false;
nr_ptrs++;
break;
case BCH_EXTENT_ENTRY_crc32:
}
break;
case BCH_EXTENT_ENTRY_stripe_ptr:
+ have_ec = true;
break;
}
}
bool bch2_extents_match(struct bkey_s_c, struct bkey_s_c);
bool bch2_extent_has_ptr(struct bkey_s_c, struct extent_ptr_decoded, struct bkey_s_c);
+void bch2_extent_ptr_set_cached(struct bkey_s, struct bch_extent_ptr *);
+
bool bch2_extent_normalize(struct bch_fs *, struct bkey_s);
void bch2_bkey_ptrs_to_text(struct printbuf *, struct bch_fs *,
struct bkey_s_c);