heap_verify_backpointer(c, idx);
- if (stripe_idx_to_delete(c) >= 0 &&
- !percpu_ref_is_dying(&c->writes))
- schedule_work(&c->ec_stripe_delete_work);
+ if (stripe_idx_to_delete(c) >= 0)
+ bch2_do_stripe_deletes(c);
}
/* stripe deletion */
if (ec_stripe_delete(c, idx))
break;
}
+
+ percpu_ref_put(&c->writes);
+}
+
+void bch2_do_stripe_deletes(struct bch_fs *c)
+{
+ if (percpu_ref_tryget_live(&c->writes) &&
+ !schedule_work(&c->ec_stripe_delete_work))
+ percpu_ref_put(&c->writes);
}
/* stripe creation: */
void bch2_stripes_heap_del(struct bch_fs *, struct stripe *, size_t);
void bch2_stripes_heap_insert(struct bch_fs *, struct stripe *, size_t);
+void bch2_do_stripe_deletes(struct bch_fs *);
+
void bch2_ec_stop_dev(struct bch_fs *, struct bch_dev *);
void bch2_ec_flush_new_stripes(struct bch_fs *);
*/
percpu_ref_kill(&c->writes);
- cancel_work_sync(&c->ec_stripe_delete_work);
-
/*
* If we're not doing an emergency shutdown, we want to wait on
* outstanding writes to complete so they don't see spurious errors due
bch2_dev_allocator_add(c, ca);
bch2_recalc_capacity(c);
- bch2_do_discards(c);
- bch2_do_invalidates(c);
-
if (!early) {
ret = bch2_fs_read_write_late(c);
if (ret)
percpu_ref_reinit(&c->writes);
set_bit(BCH_FS_RW, &c->flags);
set_bit(BCH_FS_WAS_RW, &c->flags);
+
+ bch2_do_discards(c);
+ bch2_do_invalidates(c);
+ bch2_do_stripe_deletes(c);
return 0;
err:
__bch2_fs_read_only(c);