bio_put(&rb->bio);
printbuf_exit(&buf);
- if (saw_error && !btree_node_read_error(b))
+ if (saw_error && !btree_node_read_error(b)) {
+ struct printbuf buf = PRINTBUF;
+
+ bch2_bpos_to_text(&buf, b->key.k.p);
+ bch_info(c, "%s: rewriting btree node at btree=%s level=%u %s due to error",
+ __func__, bch2_btree_ids[b->c.btree_id], b->c.level, buf.buf);
+ printbuf_exit(&buf);
+
bch2_btree_node_rewrite_async(c, b);
+ }
clear_btree_node_read_in_flight(b);
wake_up_bit(&b->flags, BTREE_NODE_read_in_flight);
static int async_btree_node_rewrite_trans(struct btree_trans *trans,
struct async_btree_rewrite *a)
{
+ struct bch_fs *c = trans->c;
struct btree_iter iter;
struct btree *b;
int ret;
if (ret)
goto out;
- if (!b || b->data->keys.seq != a->seq)
+ if (!b || b->data->keys.seq != a->seq) {
+ struct printbuf buf = PRINTBUF;
+
+ if (b)
+ bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&b->key));
+ else
+ prt_str(&buf, "(null");
+ bch_info(c, "%s: node to rewrite not found:, searching for seq %llu, got\n%s",
+ __func__, a->seq, buf.buf);
+ printbuf_exit(&buf);
goto out;
+ }
ret = bch2_btree_node_rewrite(trans, &iter, b, 0);
out:
struct async_btree_rewrite *a =
container_of(work, struct async_btree_rewrite, work);
struct bch_fs *c = a->c;
+ int ret;
- bch2_trans_do(c, NULL, NULL, 0,
+ ret = bch2_trans_do(c, NULL, NULL, 0,
async_btree_node_rewrite_trans(&trans, a));
+ if (ret)
+ bch_err(c, "%s: error %s", __func__, bch2_err_str(ret));
bch2_write_ref_put(c, BCH_WRITE_REF_node_rewrite);
kfree(a);
}
{
struct async_btree_rewrite *a;
- if (!bch2_write_ref_tryget(c, BCH_WRITE_REF_node_rewrite))
+ if (!bch2_write_ref_tryget(c, BCH_WRITE_REF_node_rewrite)) {
+ bch_err(c, "%s: error getting c->writes ref", __func__);
return;
+ }
a = kmalloc(sizeof(*a), GFP_NOFS);
if (!a) {
bch2_write_ref_put(c, BCH_WRITE_REF_node_rewrite);
+ bch_err(c, "%s: error allocating memory", __func__);
return;
}