void bch2_fs_read_only(struct bch_fs *c)
{
if (!test_bit(BCH_FS_RW, &c->flags)) {
- BUG_ON(c->journal.reclaim_thread);
+ bch2_journal_reclaim_stop(&c->journal);
return;
}
percpu_ref_kill(&c->writes);
cancel_work_sync(&c->ec_stripe_delete_work);
- cancel_delayed_work(&c->pd_controllers_update);
/*
* If we're not doing an emergency shutdown, we want to wait on
return ret;
}
- schedule_delayed_work(&c->pd_controllers_update, 5 * HZ);
-
schedule_work(&c->ec_stripe_delete_work);
return 0;
(!early || c->opts.read_only)))
return -EROFS;
+ bch_info(c, "going read-write");
+
ret = bch2_fs_mark_dirty(c);
if (ret)
goto err;
for_each_rw_member(ca, c, i)
bch2_wake_allocator(ca);
- ret = bch2_journal_reclaim_start(&c->journal);
- if (ret) {
- bch_err(c, "error starting journal reclaim: %i", ret);
- return ret;
- }
-
if (!early) {
ret = bch2_fs_read_write_late(c);
if (ret)
bch2_fs_btree_iter_exit(c);
bch2_fs_btree_key_cache_exit(&c->btree_key_cache);
bch2_fs_btree_cache_exit(c);
+ bch2_fs_replicas_exit(c);
bch2_fs_journal_exit(&c->journal);
bch2_io_clock_exit(&c->io_clock[WRITE]);
bch2_io_clock_exit(&c->io_clock[READ]);
bch2_journal_entries_free(&c->journal_entries);
percpu_free_rwsem(&c->mark_lock);
free_percpu(c->online_reserved);
- kfree(c->usage_scratch);
- for (i = 0; i < ARRAY_SIZE(c->usage); i++)
- free_percpu(c->usage[i]);
- kfree(c->usage_base);
if (c->btree_iters_bufs)
for_each_possible_cpu(cpu)
bioset_exit(&c->btree_bio);
mempool_exit(&c->fill_iter);
percpu_ref_exit(&c->writes);
- kfree(c->replicas.entries);
- kfree(c->replicas_gc.entries);
kfree(rcu_dereference_protected(c->disk_groups, 1));
kfree(c->journal_seq_blacklist_table);
kfree(c->unused_inode_hints);
if (c->wq)
destroy_workqueue(c->wq);
- free_pages((unsigned long) c->disk_sb.sb,
- c->disk_sb.page_order);
+ bch2_free_super(&c->disk_sb);
kvpfree(c, sizeof(*c));
module_put(THIS_MODULE);
}
cancel_work_sync(&ca->io_error_work);
cancel_work_sync(&c->btree_write_error_work);
- cancel_delayed_work_sync(&c->pd_controllers_update);
cancel_work_sync(&c->read_only_work);
}
/*
* Allocator threads don't start filling copygc reserve until after we
* set BCH_FS_STARTED - wake them now:
+ *
+ * XXX ugly hack:
+ * Need to set ca->allocator_state here instead of relying on the
+ * allocator threads to do it to avoid racing with the copygc threads
+ * checking it and thinking they have no alloc reserve:
*/
- for_each_online_member(ca, c, i)
+ for_each_online_member(ca, c, i) {
+ ca->allocator_state = ALLOCATOR_running;
bch2_wake_allocator(ca);
+ }
if (c->opts.read_only || c->opts.nochanges) {
bch2_fs_read_only(c);
if (!ca)
goto err;
- if (ca->mi.state == BCH_MEMBER_STATE_RW &&
+ ca->fs = c;
+
+ if (ca->mi.state == BCH_MEMBER_STATE_rw &&
bch2_dev_allocator_start(ca)) {
bch2_dev_free(ca);
goto err;
lockdep_assert_held(&c->state_lock);
switch (new_state) {
- case BCH_MEMBER_STATE_RW:
+ case BCH_MEMBER_STATE_rw:
return true;
- case BCH_MEMBER_STATE_RO:
- if (ca->mi.state != BCH_MEMBER_STATE_RW)
+ case BCH_MEMBER_STATE_ro:
+ if (ca->mi.state != BCH_MEMBER_STATE_rw)
return true;
/* do we have enough devices to write to? */
for_each_member_device(ca2, c, i)
if (ca2 != ca)
- nr_rw += ca2->mi.state == BCH_MEMBER_STATE_RW;
+ nr_rw += ca2->mi.state == BCH_MEMBER_STATE_rw;
required = max(!(flags & BCH_FORCE_IF_METADATA_DEGRADED)
? c->opts.metadata_replicas
: c->opts.data_replicas_required);
return nr_rw >= required;
- case BCH_MEMBER_STATE_FAILED:
- case BCH_MEMBER_STATE_SPARE:
- if (ca->mi.state != BCH_MEMBER_STATE_RW &&
- ca->mi.state != BCH_MEMBER_STATE_RO)
+ case BCH_MEMBER_STATE_failed:
+ case BCH_MEMBER_STATE_spare:
+ if (ca->mi.state != BCH_MEMBER_STATE_rw &&
+ ca->mi.state != BCH_MEMBER_STATE_ro)
return true;
/* do we have enough devices to read from? */
ca = bch_dev_locked(c, i);
if (!bch2_dev_is_online(ca) &&
- (ca->mi.state == BCH_MEMBER_STATE_RW ||
- ca->mi.state == BCH_MEMBER_STATE_RO)) {
+ (ca->mi.state == BCH_MEMBER_STATE_rw ||
+ ca->mi.state == BCH_MEMBER_STATE_ro)) {
mutex_unlock(&c->sb_lock);
return false;
}
{
lockdep_assert_held(&c->state_lock);
- BUG_ON(ca->mi.state != BCH_MEMBER_STATE_RW);
+ BUG_ON(ca->mi.state != BCH_MEMBER_STATE_rw);
bch2_dev_allocator_add(c, ca);
bch2_recalc_capacity(c);
if (!bch2_dev_state_allowed(c, ca, new_state, flags))
return -EINVAL;
- if (new_state != BCH_MEMBER_STATE_RW)
+ if (new_state != BCH_MEMBER_STATE_rw)
__bch2_dev_read_only(c, ca);
- bch_notice(ca, "%s", bch2_dev_state[new_state]);
+ bch_notice(ca, "%s", bch2_member_states[new_state]);
mutex_lock(&c->sb_lock);
mi = bch2_sb_get_members(c->disk_sb.sb);
bch2_write_super(c);
mutex_unlock(&c->sb_lock);
- if (new_state == BCH_MEMBER_STATE_RW &&
+ if (new_state == BCH_MEMBER_STATE_rw &&
__bch2_dev_read_write(c, ca))
ret = -ENOMEM;
for (i = 0; i < ca->mi.nbuckets; i++) {
ret = bch2_btree_key_cache_flush(&trans,
- BTREE_ID_ALLOC, POS(ca->dev_idx, i));
+ BTREE_ID_alloc, POS(ca->dev_idx, i));
if (ret)
break;
}
if (ret)
return ret;
- return bch2_btree_delete_range(c, BTREE_ID_ALLOC,
+ return bch2_btree_delete_range(c, BTREE_ID_alloc,
POS(ca->dev_idx, 0),
POS(ca->dev_idx + 1, 0),
NULL);
*/
percpu_ref_put(&ca->ref);
- if (!bch2_dev_state_allowed(c, ca, BCH_MEMBER_STATE_FAILED, flags)) {
+ if (!bch2_dev_state_allowed(c, ca, BCH_MEMBER_STATE_failed, flags)) {
bch_err(ca, "Cannot remove without losing data");
goto err;
}
bch2_dev_usage_journal_reserve(c);
return 0;
err:
- if (ca->mi.state == BCH_MEMBER_STATE_RW &&
+ if (ca->mi.state == BCH_MEMBER_STATE_rw &&
!percpu_ref_is_zero(&ca->io_ref))
__bch2_dev_read_write(c, ca);
up_write(&c->state_lock);
if (ret)
goto err_late;
- if (ca->mi.state == BCH_MEMBER_STATE_RW) {
+ if (ca->mi.state == BCH_MEMBER_STATE_rw) {
err = __bch2_dev_read_write(c, ca);
if (err)
goto err_late;
goto err;
}
- if (ca->mi.state == BCH_MEMBER_STATE_RW) {
+ if (ca->mi.state == BCH_MEMBER_STATE_rw) {
err = __bch2_dev_read_write(c, ca);
if (err)
goto err;
return 0;
}
- if (!bch2_dev_state_allowed(c, ca, BCH_MEMBER_STATE_FAILED, flags)) {
+ if (!bch2_dev_state_allowed(c, ca, BCH_MEMBER_STATE_failed, flags)) {
bch_err(ca, "Cannot offline required disk");
up_write(&c->state_lock);
return -EINVAL;