#include "bkey_sort.h"
#include "btree_cache.h"
#include "btree_gc.h"
+#include "btree_key_cache.h"
#include "btree_update_interior.h"
#include "btree_io.h"
#include "chardev.h"
#include "io.h"
#include "journal.h"
#include "journal_reclaim.h"
+#include "journal_seq_blacklist.h"
#include "move.h"
#include "migrate.h"
#include "movinggc.h"
#include <linux/debugfs.h>
#include <linux/device.h>
#include <linux/idr.h>
-#include <linux/kthread.h>
#include <linux/module.h>
#include <linux/percpu.h>
#include <linux/random.h>
return c;
}
+static void bch2_dev_usage_journal_reserve(struct bch_fs *c)
+{
+ struct bch_dev *ca;
+ unsigned i, nr = 0, u64s =
+ ((sizeof(struct jset_entry_dev_usage) +
+ sizeof(struct jset_entry_dev_usage_type) * BCH_DATA_NR)) /
+ sizeof(u64);
+
+ rcu_read_lock();
+ for_each_member_device_rcu(ca, c, i, NULL)
+ nr++;
+ rcu_read_unlock();
+
+ bch2_journal_entry_res_resize(&c->journal,
+ &c->dev_usage_journal_res, u64s * nr);
+}
+
/* Filesystem RO/RW: */
/*
static void __bch2_fs_read_only(struct bch_fs *c)
{
struct bch_dev *ca;
- bool wrote;
unsigned i, clean_passes = 0;
- int ret;
bch2_rebalance_stop(c);
-
- for_each_member_device(ca, c, i)
- bch2_copygc_stop(ca);
-
+ bch2_copygc_stop(c);
bch2_gc_thread_stop(c);
/*
*/
bch2_journal_flush_all_pins(&c->journal);
+ /*
+ * If the allocator threads didn't all start up, the btree updates to
+ * write out alloc info aren't going to work:
+ */
if (!test_bit(BCH_FS_ALLOCATOR_RUNNING, &c->flags))
- goto allocator_not_running;
+ goto nowrote_alloc;
- do {
- ret = bch2_stripes_write(c, &wrote);
- if (ret) {
- bch2_fs_inconsistent(c, "error writing out stripes");
- break;
- }
+ bch_verbose(c, "flushing journal and stopping allocators");
- ret = bch2_alloc_write(c, false, &wrote);
- if (ret) {
- bch2_fs_inconsistent(c, "error writing out alloc info %i", ret);
- break;
- }
+ bch2_journal_flush_all_pins(&c->journal);
+ set_bit(BCH_FS_ALLOCATOR_STOPPING, &c->flags);
- for_each_member_device(ca, c, i)
- bch2_dev_allocator_quiesce(c, ca);
+ do {
+ clean_passes++;
- bch2_journal_flush_all_pins(&c->journal);
+ if (bch2_journal_flush_all_pins(&c->journal))
+ clean_passes = 0;
/*
- * We need to explicitly wait on btree interior updates to complete
- * before stopping the journal, flushing all journal pins isn't
- * sufficient, because in the BTREE_INTERIOR_UPDATING_ROOT case btree
- * interior updates have to drop their journal pin before they're
- * fully complete:
+ * In flight interior btree updates will generate more journal
+ * updates and btree updates (alloc btree):
*/
- closure_wait_event(&c->btree_interior_update_wait,
- !bch2_btree_interior_updates_nr_pending(c));
+ if (bch2_btree_interior_updates_nr_pending(c)) {
+ closure_wait_event(&c->btree_interior_update_wait,
+ !bch2_btree_interior_updates_nr_pending(c));
+ clean_passes = 0;
+ }
+ flush_work(&c->btree_interior_update_work);
- clean_passes = wrote ? 0 : clean_passes + 1;
+ if (bch2_journal_flush_all_pins(&c->journal))
+ clean_passes = 0;
} while (clean_passes < 2);
-allocator_not_running:
+ bch_verbose(c, "flushing journal and stopping allocators complete");
+
+ set_bit(BCH_FS_ALLOC_CLEAN, &c->flags);
+nowrote_alloc:
+ closure_wait_event(&c->btree_interior_update_wait,
+ !bch2_btree_interior_updates_nr_pending(c));
+ flush_work(&c->btree_interior_update_work);
+
for_each_member_device(ca, c, i)
bch2_dev_allocator_stop(ca);
clear_bit(BCH_FS_ALLOCATOR_RUNNING, &c->flags);
+ clear_bit(BCH_FS_ALLOCATOR_STOPPING, &c->flags);
bch2_fs_journal_stop(&c->journal);
- /* XXX: mark super that alloc info is persistent */
-
/*
* the journal kicks off btree writes via reclaim - wait for in flight
* writes after stopping journal:
*/
- if (test_bit(BCH_FS_EMERGENCY_RO, &c->flags))
- bch2_btree_flush_all_writes(c);
- else
- bch2_btree_verify_flushed(c);
+ bch2_btree_flush_all_writes(c);
/*
* After stopping journal:
void bch2_fs_read_only(struct bch_fs *c)
{
if (!test_bit(BCH_FS_RW, &c->flags)) {
- cancel_delayed_work_sync(&c->journal.reclaim_work);
+ bch2_journal_reclaim_stop(&c->journal);
return;
}
*/
percpu_ref_kill(&c->writes);
- cancel_delayed_work(&c->pd_controllers_update);
+ cancel_work_sync(&c->ec_stripe_delete_work);
/*
* If we're not doing an emergency shutdown, we want to wait on
if (!bch2_journal_error(&c->journal) &&
!test_bit(BCH_FS_ERROR, &c->flags) &&
!test_bit(BCH_FS_EMERGENCY_RO, &c->flags) &&
- test_bit(BCH_FS_STARTED, &c->flags))
+ test_bit(BCH_FS_STARTED, &c->flags) &&
+ test_bit(BCH_FS_ALLOC_CLEAN, &c->flags) &&
+ !c->opts.norecovery) {
+ bch_verbose(c, "marking filesystem clean");
bch2_fs_mark_clean(c);
+ }
clear_bit(BCH_FS_RW, &c->flags);
}
struct bch_fs *c =
container_of(work, struct bch_fs, read_only_work);
- mutex_lock(&c->state_lock);
+ down_write(&c->state_lock);
bch2_fs_read_only(c);
- mutex_unlock(&c->state_lock);
+ up_write(&c->state_lock);
}
static void bch2_fs_read_only_async(struct bch_fs *c)
{
bool ret = !test_and_set_bit(BCH_FS_EMERGENCY_RO, &c->flags);
- bch2_fs_read_only_async(c);
bch2_journal_halt(&c->journal);
+ bch2_fs_read_only_async(c);
wake_up(&bch_read_only_wait);
return ret;
static int bch2_fs_read_write_late(struct bch_fs *c)
{
- struct bch_dev *ca;
- unsigned i;
int ret;
ret = bch2_gc_thread_start(c);
return ret;
}
- for_each_rw_member(ca, c, i) {
- ret = bch2_copygc_start(c, ca);
- if (ret) {
- bch_err(c, "error starting copygc threads");
- percpu_ref_put(&ca->io_ref);
- return ret;
- }
+ ret = bch2_copygc_start(c);
+ if (ret) {
+ bch_err(c, "error starting copygc thread");
+ return ret;
}
ret = bch2_rebalance_start(c);
return ret;
}
- schedule_delayed_work(&c->pd_controllers_update, 5 * HZ);
+ schedule_work(&c->ec_stripe_delete_work);
return 0;
}
-int __bch2_fs_read_write(struct bch_fs *c, bool early)
+static int __bch2_fs_read_write(struct bch_fs *c, bool early)
{
struct bch_dev *ca;
unsigned i;
if (test_bit(BCH_FS_RW, &c->flags))
return 0;
+ /*
+ * nochanges is used for fsck -n mode - we have to allow going rw
+ * during recovery for that to work:
+ */
+ if (c->opts.norecovery ||
+ (c->opts.nochanges &&
+ (!early || c->opts.read_only)))
+ return -EROFS;
+
+ bch_info(c, "going read-write");
+
ret = bch2_fs_mark_dirty(c);
if (ret)
goto err;
+ /*
+ * We need to write out a journal entry before we start doing btree
+ * updates, to ensure that on unclean shutdown new journal blacklist
+ * entries are created:
+ */
+ bch2_journal_meta(&c->journal);
+
+ clear_bit(BCH_FS_ALLOC_CLEAN, &c->flags);
+
for_each_rw_member(ca, c, i)
bch2_dev_allocator_add(c, ca);
bch2_recalc_capacity(c);
- if (!test_bit(BCH_FS_ALLOCATOR_STARTED, &c->flags)) {
- ret = bch2_fs_allocator_start(c);
- if (ret) {
- bch_err(c, "error initializing allocator");
- goto err;
- }
-
- set_bit(BCH_FS_ALLOCATOR_STARTED, &c->flags);
- }
-
for_each_rw_member(ca, c, i) {
ret = bch2_dev_allocator_start(ca);
if (ret) {
set_bit(BCH_FS_ALLOCATOR_RUNNING, &c->flags);
+ for_each_rw_member(ca, c, i)
+ bch2_wake_allocator(ca);
+
if (!early) {
ret = bch2_fs_read_write_late(c);
if (ret)
percpu_ref_reinit(&c->writes);
set_bit(BCH_FS_RW, &c->flags);
-
- queue_delayed_work(c->journal_reclaim_wq,
- &c->journal.reclaim_work, 0);
return 0;
err:
__bch2_fs_read_only(c);
/* Filesystem startup/shutdown: */
-static void bch2_fs_free(struct bch_fs *c)
+static void __bch2_fs_free(struct bch_fs *c)
{
unsigned i;
+ int cpu;
for (i = 0; i < BCH_TIME_STAT_NR; i++)
bch2_time_stats_exit(&c->times[i]);
bch2_fs_ec_exit(c);
bch2_fs_encryption_exit(c);
bch2_fs_io_exit(c);
+ bch2_fs_btree_interior_update_exit(c);
+ bch2_fs_btree_iter_exit(c);
+ bch2_fs_btree_key_cache_exit(&c->btree_key_cache);
bch2_fs_btree_cache_exit(c);
+ bch2_fs_replicas_exit(c);
bch2_fs_journal_exit(&c->journal);
bch2_io_clock_exit(&c->io_clock[WRITE]);
bch2_io_clock_exit(&c->io_clock[READ]);
bch2_fs_compress_exit(c);
+ bch2_journal_keys_free(&c->journal_keys);
+ bch2_journal_entries_free(&c->journal_entries);
percpu_free_rwsem(&c->mark_lock);
- kfree(c->usage_scratch);
- free_percpu(c->usage[0]);
+ free_percpu(c->online_reserved);
+
+ if (c->btree_iters_bufs)
+ for_each_possible_cpu(cpu)
+ kfree(per_cpu_ptr(c->btree_iters_bufs, cpu)->iter);
+
+ free_percpu(c->btree_iters_bufs);
free_percpu(c->pcpu);
- mempool_exit(&c->btree_iters_pool);
+ mempool_exit(&c->large_bkey_pool);
mempool_exit(&c->btree_bounce_pool);
bioset_exit(&c->btree_bio);
- mempool_exit(&c->btree_interior_update_pool);
- mempool_exit(&c->btree_reserve_pool);
mempool_exit(&c->fill_iter);
percpu_ref_exit(&c->writes);
- kfree(c->replicas.entries);
- kfree(c->replicas_gc.entries);
kfree(rcu_dereference_protected(c->disk_groups, 1));
+ kfree(c->journal_seq_blacklist_table);
+ kfree(c->unused_inode_hints);
+ free_heap(&c->copygc_heap);
- if (c->journal_reclaim_wq)
- destroy_workqueue(c->journal_reclaim_wq);
if (c->copygc_wq)
destroy_workqueue(c->copygc_wq);
if (c->wq)
destroy_workqueue(c->wq);
- free_pages((unsigned long) c->disk_sb.sb,
- c->disk_sb.page_order);
+ bch2_free_super(&c->disk_sb);
kvpfree(c, sizeof(*c));
module_put(THIS_MODULE);
}
{
struct bch_fs *c = container_of(kobj, struct bch_fs, kobj);
- bch2_fs_free(c);
+ __bch2_fs_free(c);
}
-void bch2_fs_stop(struct bch_fs *c)
+void __bch2_fs_stop(struct bch_fs *c)
{
struct bch_dev *ca;
unsigned i;
bch_verbose(c, "shutting down");
+ set_bit(BCH_FS_STOPPING, &c->flags);
+
+ cancel_work_sync(&c->journal_seq_blacklist_gc_work);
+
+ down_write(&c->state_lock);
+ bch2_fs_read_only(c);
+ up_write(&c->state_lock);
+
for_each_member_device(ca, c, i)
if (ca->kobj.state_in_sysfs &&
ca->disk_sb.bdev)
kobject_put(&c->opts_dir);
kobject_put(&c->internal);
- mutex_lock(&bch_fs_list_lock);
- list_del(&c->list);
- mutex_unlock(&bch_fs_list_lock);
-
- closure_sync(&c->cl);
- closure_debug_destroy(&c->cl);
-
- mutex_lock(&c->state_lock);
- bch2_fs_read_only(c);
- mutex_unlock(&c->state_lock);
-
/* btree prefetch might have kicked off reads in the background: */
bch2_btree_flush_all_reads(c);
cancel_work_sync(&ca->io_error_work);
cancel_work_sync(&c->btree_write_error_work);
- cancel_delayed_work_sync(&c->pd_controllers_update);
cancel_work_sync(&c->read_only_work);
+}
- for (i = 0; i < c->sb.nr_devices; i++)
- if (c->devs[i])
- bch2_dev_free(rcu_dereference_protected(c->devs[i], 1));
+void bch2_fs_free(struct bch_fs *c)
+{
+ unsigned i;
+
+ mutex_lock(&bch_fs_list_lock);
+ list_del(&c->list);
+ mutex_unlock(&bch_fs_list_lock);
+
+ closure_sync(&c->cl);
+ closure_debug_destroy(&c->cl);
+
+ for (i = 0; i < c->sb.nr_devices; i++) {
+ struct bch_dev *ca = rcu_dereference_protected(c->devs[i], true);
+
+ if (ca) {
+ bch2_free_super(&ca->disk_sb);
+ bch2_dev_free(ca);
+ }
+ }
bch_verbose(c, "shutdown complete");
kobject_put(&c->kobj);
}
+void bch2_fs_stop(struct bch_fs *c)
+{
+ __bch2_fs_stop(c);
+ bch2_fs_free(c);
+}
+
static const char *bch2_fs_online(struct bch_fs *c)
{
struct bch_dev *ca;
bch2_opts_create_sysfs_files(&c->opts_dir))
return "error creating sysfs objects";
- mutex_lock(&c->state_lock);
+ down_write(&c->state_lock);
err = "error creating sysfs objects";
__for_each_member_device(ca, c, i, NULL)
list_add(&c->list, &bch_fs_list);
err = NULL;
err:
- mutex_unlock(&c->state_lock);
+ up_write(&c->state_lock);
return err;
}
__module_get(THIS_MODULE);
+ closure_init(&c->cl, NULL);
+
+ c->kobj.kset = bcachefs_kset;
+ kobject_init(&c->kobj, &bch2_fs_ktype);
+ kobject_init(&c->internal, &bch2_fs_internal_ktype);
+ kobject_init(&c->opts_dir, &bch2_fs_opts_dir_ktype);
+ kobject_init(&c->time_stats, &bch2_fs_time_stats_ktype);
+
c->minor = -1;
c->disk_sb.fs_sb = true;
- mutex_init(&c->state_lock);
+ init_rwsem(&c->state_lock);
mutex_init(&c->sb_lock);
mutex_init(&c->replicas_gc_lock);
mutex_init(&c->btree_root_lock);
for (i = 0; i < BCH_TIME_STAT_NR; i++)
bch2_time_stats_init(&c->times[i]);
+ bch2_fs_copygc_init(c);
+ bch2_fs_btree_key_cache_init_early(&c->btree_key_cache);
bch2_fs_allocator_background_init(c);
bch2_fs_allocator_foreground_init(c);
bch2_fs_rebalance_init(c);
INIT_LIST_HEAD(&c->list);
- INIT_LIST_HEAD(&c->btree_interior_update_list);
- mutex_init(&c->btree_reserve_cache_lock);
- mutex_init(&c->btree_interior_update_lock);
-
mutex_init(&c->usage_scratch_lock);
mutex_init(&c->bio_bounce_pages_lock);
spin_lock_init(&c->btree_write_error_lock);
INIT_WORK(&c->btree_write_error_work, bch2_btree_write_error_work);
+ INIT_WORK(&c->journal_seq_blacklist_gc_work,
+ bch2_blacklist_entries_gc);
+
+ INIT_LIST_HEAD(&c->journal_entries);
+ INIT_LIST_HEAD(&c->journal_iters);
+
INIT_LIST_HEAD(&c->fsck_errors);
mutex_init(&c->fsck_error_lock);
- INIT_LIST_HEAD(&c->ec_new_stripe_list);
- mutex_init(&c->ec_new_stripe_lock);
- mutex_init(&c->ec_stripe_create_lock);
+ INIT_LIST_HEAD(&c->ec_stripe_head_list);
+ mutex_init(&c->ec_stripe_head_lock);
+
+ INIT_LIST_HEAD(&c->ec_stripe_new_list);
+ mutex_init(&c->ec_stripe_new_lock);
+
spin_lock_init(&c->ec_stripes_heap_lock);
seqcount_init(&c->gc_pos_lock);
+ seqcount_init(&c->usage_lock);
+
c->copy_gc_enabled = 1;
c->rebalance.enabled = 1;
c->promote_whole_extents = true;
bch2_fs_btree_cache_init_early(&c->btree_cache);
+ mutex_init(&c->sectors_available_lock);
+
if (percpu_init_rwsem(&c->mark_lock))
goto err;
c->block_bits = ilog2(c->opts.block_size);
c->btree_foreground_merge_threshold = BTREE_FOREGROUND_MERGE_THRESHOLD(c);
- c->opts.nochanges |= c->opts.noreplay;
- c->opts.read_only |= c->opts.nochanges;
-
if (bch2_fs_init_fault("fs_alloc"))
goto err;
- iter_size = sizeof(struct btree_node_iter_large) +
+ iter_size = sizeof(struct sort_iter) +
(btree_blocks(c) + 1) * 2 *
- sizeof(struct btree_node_iter_set);
+ sizeof(struct sort_iter_set);
+
+ c->inode_shard_bits = ilog2(roundup_pow_of_two(num_possible_cpus()));
if (!(c->wq = alloc_workqueue("bcachefs",
- WQ_FREEZABLE|WQ_MEM_RECLAIM|WQ_HIGHPRI, 1)) ||
- !(c->copygc_wq = alloc_workqueue("bcache_copygc",
- WQ_FREEZABLE|WQ_MEM_RECLAIM|WQ_HIGHPRI, 1)) ||
- !(c->journal_reclaim_wq = alloc_workqueue("bcache_journal",
- WQ_FREEZABLE|WQ_MEM_RECLAIM|WQ_HIGHPRI, 1)) ||
+ WQ_FREEZABLE|WQ_MEM_RECLAIM, 1)) ||
+ !(c->copygc_wq = alloc_workqueue("bcachefs_copygc",
+ WQ_FREEZABLE|WQ_MEM_RECLAIM|WQ_CPU_INTENSIVE, 1)) ||
percpu_ref_init(&c->writes, bch2_writes_disabled,
PERCPU_REF_INIT_DEAD, GFP_KERNEL) ||
- mempool_init_kmalloc_pool(&c->btree_reserve_pool, 1,
- sizeof(struct btree_reserve)) ||
- mempool_init_kmalloc_pool(&c->btree_interior_update_pool, 1,
- sizeof(struct btree_update)) ||
mempool_init_kmalloc_pool(&c->fill_iter, 1, iter_size) ||
bioset_init(&c->btree_bio, 1,
max(offsetof(struct btree_read_bio, bio),
offsetof(struct btree_write_bio, wbio.bio)),
BIOSET_NEED_BVECS) ||
!(c->pcpu = alloc_percpu(struct bch_fs_pcpu)) ||
+ !(c->online_reserved = alloc_percpu(u64)) ||
+ !(c->btree_iters_bufs = alloc_percpu(struct btree_iter_buf)) ||
mempool_init_kvpmalloc_pool(&c->btree_bounce_pool, 1,
btree_bytes(c)) ||
- mempool_init_kmalloc_pool(&c->btree_iters_pool, 1,
- sizeof(struct btree_iter) * BTREE_ITER_MAX +
- sizeof(struct btree_insert_entry) *
- (BTREE_ITER_MAX + 4)) ||
+ mempool_init_kmalloc_pool(&c->large_bkey_pool, 1, 2048) ||
+ !(c->unused_inode_hints = kcalloc(1U << c->inode_shard_bits,
+ sizeof(u64), GFP_KERNEL)) ||
bch2_io_clock_init(&c->io_clock[READ]) ||
bch2_io_clock_init(&c->io_clock[WRITE]) ||
bch2_fs_journal_init(&c->journal) ||
bch2_fs_replicas_init(c) ||
bch2_fs_btree_cache_init(c) ||
+ bch2_fs_btree_key_cache_init(&c->btree_key_cache) ||
+ bch2_fs_btree_iter_init(c) ||
+ bch2_fs_btree_interior_update_init(c) ||
bch2_fs_io_init(c) ||
bch2_fs_encryption_init(c) ||
bch2_fs_compress_init(c) ||
bch2_dev_alloc(c, i))
goto err;
- /*
- * Now that all allocations have succeeded, init various refcounty
- * things that let us shutdown:
- */
- closure_init(&c->cl, NULL);
-
- c->kobj.kset = bcachefs_kset;
- kobject_init(&c->kobj, &bch2_fs_ktype);
- kobject_init(&c->internal, &bch2_fs_internal_ktype);
- kobject_init(&c->opts_dir, &bch2_fs_opts_dir_ktype);
- kobject_init(&c->time_stats, &bch2_fs_time_stats_ktype);
+ bch2_journal_entry_res_resize(&c->journal,
+ &c->btree_root_journal_res,
+ BTREE_ID_NR * (JSET_KEYS_U64s + BKEY_BTREE_PTR_U64s_MAX));
+ bch2_dev_usage_journal_reserve(c);
+ bch2_journal_entry_res_resize(&c->journal,
+ &c->clock_journal_res,
+ (sizeof(struct jset_entry_clock) / sizeof(u64)) * 2);
mutex_lock(&bch_fs_list_lock);
err = bch2_fs_online(c);
goto out;
}
-const char *bch2_fs_start(struct bch_fs *c)
+noinline_for_stack
+static void print_mount_opts(struct bch_fs *c)
+{
+ enum bch_opt_id i;
+ char buf[512];
+ struct printbuf p = PBUF(buf);
+ bool first = true;
+
+ strcpy(buf, "(null)");
+
+ if (c->opts.read_only) {
+ pr_buf(&p, "ro");
+ first = false;
+ }
+
+ for (i = 0; i < bch2_opts_nr; i++) {
+ const struct bch_option *opt = &bch2_opt_table[i];
+ u64 v = bch2_opt_get_by_id(&c->opts, i);
+
+ if (!(opt->mode & OPT_MOUNT))
+ continue;
+
+ if (v == bch2_opt_get_by_id(&bch2_opts_default, i))
+ continue;
+
+ if (!first)
+ pr_buf(&p, ",");
+ first = false;
+ bch2_opt_to_text(&p, c, opt, v, OPT_SHOW_MOUNT_STYLE);
+ }
+
+ bch_info(c, "mounted with opts: %s", buf);
+}
+
+int bch2_fs_start(struct bch_fs *c)
{
const char *err = "cannot allocate memory";
struct bch_sb_field_members *mi;
unsigned i;
int ret = -EINVAL;
- mutex_lock(&c->state_lock);
+ down_write(&c->state_lock);
BUG_ON(test_bit(BCH_FS_STARTED, &c->flags));
goto err;
err = "dynamic fault";
+ ret = -EINVAL;
if (bch2_fs_init_fault("fs_start"))
goto err;
- if (c->opts.read_only) {
+ set_bit(BCH_FS_STARTED, &c->flags);
+
+ /*
+ * Allocator threads don't start filling copygc reserve until after we
+ * set BCH_FS_STARTED - wake them now:
+ *
+ * XXX ugly hack:
+ * Need to set ca->allocator_state here instead of relying on the
+ * allocator threads to do it to avoid racing with the copygc threads
+ * checking it and thinking they have no alloc reserve:
+ */
+ for_each_online_member(ca, c, i) {
+ ca->allocator_state = ALLOCATOR_running;
+ bch2_wake_allocator(ca);
+ }
+
+ if (c->opts.read_only || c->opts.nochanges) {
bch2_fs_read_only(c);
} else {
- if (!test_bit(BCH_FS_RW, &c->flags)
- ? bch2_fs_read_write(c)
- : bch2_fs_read_write_late(c)) {
- err = "error going read write";
+ err = "error going read write";
+ ret = !test_bit(BCH_FS_RW, &c->flags)
+ ? bch2_fs_read_write(c)
+ : bch2_fs_read_write_late(c);
+ if (ret)
goto err;
- }
}
- set_bit(BCH_FS_STARTED, &c->flags);
-
- err = NULL;
+ print_mount_opts(c);
+ ret = 0;
out:
- mutex_unlock(&c->state_lock);
- return err;
+ up_write(&c->state_lock);
+ return ret;
err:
switch (ret) {
case BCH_FSCK_ERRORS_NOT_FIXED:
break;
}
- BUG_ON(!err);
+ if (ret >= 0)
+ ret = -EIO;
goto out;
}
static void bch2_dev_free(struct bch_dev *ca)
{
+ bch2_dev_allocator_stop(ca);
+
cancel_work_sync(&ca->io_error_work);
if (ca->kobj.state_in_sysfs &&
free_percpu(ca->io_done);
bioset_exit(&ca->replica_set);
bch2_dev_buckets_free(ca);
- kfree(ca->sb_read_scratch);
+ free_page((unsigned long) ca->sb_read_scratch);
bch2_time_stats_exit(&ca->io_latency[WRITE]);
bch2_time_stats_exit(&ca->io_latency[READ]);
init_rwsem(&ca->bucket_lock);
- writepoint_init(&ca->copygc_write_point, BCH_DATA_USER);
-
- spin_lock_init(&ca->freelist_lock);
- bch2_dev_copygc_init(ca);
-
INIT_WORK(&ca->io_error_work, bch2_io_error_work);
bch2_time_stats_init(&ca->io_latency[READ]);
0, GFP_KERNEL) ||
percpu_ref_init(&ca->io_ref, bch2_dev_io_ref_complete,
PERCPU_REF_INIT_DEAD, GFP_KERNEL) ||
- !(ca->sb_read_scratch = kmalloc(4096, GFP_KERNEL)) ||
+ !(ca->sb_read_scratch = (void *) __get_free_page(GFP_KERNEL)) ||
bch2_dev_buckets_alloc(c, ca) ||
bioset_init(&ca->replica_set, 4,
offsetof(struct bch_write_bio, bio), 0) ||
if (!ca)
goto err;
+ ca->fs = c;
+
+ if (ca->mi.state == BCH_MEMBER_STATE_rw &&
+ bch2_dev_allocator_start(ca)) {
+ bch2_dev_free(ca);
+ goto err;
+ }
+
bch2_dev_attach(c, ca, dev_idx);
out:
pr_verbose_init(c->opts, "ret %i", ret);
if (ret)
return ret;
- if (test_bit(BCH_FS_ALLOC_READ_DONE, &c->flags) &&
- !percpu_u64_get(&ca->usage[0]->buckets[BCH_DATA_SB])) {
- mutex_lock(&c->sb_lock);
- bch2_mark_dev_superblock(ca->fs, ca, 0);
- mutex_unlock(&c->sb_lock);
- }
-
bch2_dev_sysfs_online(c, ca);
if (c->sb.nr_devices == 1)
enum bch_member_state new_state, int flags)
{
struct bch_devs_mask new_online_devs;
- struct replicas_status s;
struct bch_dev *ca2;
int i, nr_rw = 0, required;
lockdep_assert_held(&c->state_lock);
switch (new_state) {
- case BCH_MEMBER_STATE_RW:
+ case BCH_MEMBER_STATE_rw:
return true;
- case BCH_MEMBER_STATE_RO:
- if (ca->mi.state != BCH_MEMBER_STATE_RW)
+ case BCH_MEMBER_STATE_ro:
+ if (ca->mi.state != BCH_MEMBER_STATE_rw)
return true;
/* do we have enough devices to write to? */
for_each_member_device(ca2, c, i)
if (ca2 != ca)
- nr_rw += ca2->mi.state == BCH_MEMBER_STATE_RW;
+ nr_rw += ca2->mi.state == BCH_MEMBER_STATE_rw;
required = max(!(flags & BCH_FORCE_IF_METADATA_DEGRADED)
? c->opts.metadata_replicas
: c->opts.data_replicas_required);
return nr_rw >= required;
- case BCH_MEMBER_STATE_FAILED:
- case BCH_MEMBER_STATE_SPARE:
- if (ca->mi.state != BCH_MEMBER_STATE_RW &&
- ca->mi.state != BCH_MEMBER_STATE_RO)
+ case BCH_MEMBER_STATE_failed:
+ case BCH_MEMBER_STATE_spare:
+ if (ca->mi.state != BCH_MEMBER_STATE_rw &&
+ ca->mi.state != BCH_MEMBER_STATE_ro)
return true;
/* do we have enough devices to read from? */
new_online_devs = bch2_online_devs(c);
__clear_bit(ca->dev_idx, new_online_devs.d);
- s = __bch2_replicas_status(c, new_online_devs);
-
- return bch2_have_enough_devs(s, flags);
+ return bch2_have_enough_devs(c, new_online_devs, flags, false);
default:
BUG();
}
static bool bch2_fs_may_start(struct bch_fs *c)
{
- struct replicas_status s;
struct bch_sb_field_members *mi;
struct bch_dev *ca;
- unsigned i, flags = c->opts.degraded
- ? BCH_FORCE_IF_DEGRADED
- : 0;
+ unsigned i, flags = 0;
+
+ if (c->opts.very_degraded)
+ flags |= BCH_FORCE_IF_DEGRADED|BCH_FORCE_IF_LOST;
- if (!c->opts.degraded) {
+ if (c->opts.degraded)
+ flags |= BCH_FORCE_IF_DEGRADED;
+
+ if (!c->opts.degraded &&
+ !c->opts.very_degraded) {
mutex_lock(&c->sb_lock);
mi = bch2_sb_get_members(c->disk_sb.sb);
ca = bch_dev_locked(c, i);
if (!bch2_dev_is_online(ca) &&
- (ca->mi.state == BCH_MEMBER_STATE_RW ||
- ca->mi.state == BCH_MEMBER_STATE_RO)) {
+ (ca->mi.state == BCH_MEMBER_STATE_rw ||
+ ca->mi.state == BCH_MEMBER_STATE_ro)) {
mutex_unlock(&c->sb_lock);
return false;
}
mutex_unlock(&c->sb_lock);
}
- s = bch2_replicas_status(c);
-
- return bch2_have_enough_devs(s, flags);
+ return bch2_have_enough_devs(c, bch2_online_devs(c), flags, true);
}
static void __bch2_dev_read_only(struct bch_fs *c, struct bch_dev *ca)
{
- bch2_copygc_stop(ca);
+ /*
+ * Device going read only means the copygc reserve get smaller, so we
+ * don't want that happening while copygc is in progress:
+ */
+ bch2_copygc_stop(c);
/*
* The allocator thread itself allocates btree nodes, so stop it first:
bch2_dev_allocator_stop(ca);
bch2_dev_allocator_remove(c, ca);
bch2_dev_journal_stop(&c->journal, ca);
+
+ bch2_copygc_start(c);
}
static const char *__bch2_dev_read_write(struct bch_fs *c, struct bch_dev *ca)
{
lockdep_assert_held(&c->state_lock);
- BUG_ON(ca->mi.state != BCH_MEMBER_STATE_RW);
+ BUG_ON(ca->mi.state != BCH_MEMBER_STATE_rw);
bch2_dev_allocator_add(c, ca);
bch2_recalc_capacity(c);
if (bch2_dev_allocator_start(ca))
return "error starting allocator thread";
- if (bch2_copygc_start(c, ca))
- return "error starting copygc thread";
-
return NULL;
}
if (!bch2_dev_state_allowed(c, ca, new_state, flags))
return -EINVAL;
- if (new_state != BCH_MEMBER_STATE_RW)
+ if (new_state != BCH_MEMBER_STATE_rw)
__bch2_dev_read_only(c, ca);
- bch_notice(ca, "%s", bch2_dev_state[new_state]);
+ bch_notice(ca, "%s", bch2_member_states[new_state]);
mutex_lock(&c->sb_lock);
mi = bch2_sb_get_members(c->disk_sb.sb);
bch2_write_super(c);
mutex_unlock(&c->sb_lock);
- if (new_state == BCH_MEMBER_STATE_RW &&
+ if (new_state == BCH_MEMBER_STATE_rw &&
__bch2_dev_read_write(c, ca))
ret = -ENOMEM;
{
int ret;
- mutex_lock(&c->state_lock);
+ down_write(&c->state_lock);
ret = __bch2_dev_set_state(c, ca, new_state, flags);
- mutex_unlock(&c->state_lock);
+ up_write(&c->state_lock);
return ret;
}
/* Device add/removal: */
+int bch2_dev_remove_alloc(struct bch_fs *c, struct bch_dev *ca)
+{
+ struct btree_trans trans;
+ size_t i;
+ int ret;
+
+ bch2_trans_init(&trans, c, 0, 0);
+
+ for (i = 0; i < ca->mi.nbuckets; i++) {
+ ret = bch2_btree_key_cache_flush(&trans,
+ BTREE_ID_alloc, POS(ca->dev_idx, i));
+ if (ret)
+ break;
+ }
+ bch2_trans_exit(&trans);
+
+ if (ret)
+ return ret;
+
+ return bch2_btree_delete_range(c, BTREE_ID_alloc,
+ POS(ca->dev_idx, 0),
+ POS(ca->dev_idx + 1, 0),
+ NULL);
+}
+
int bch2_dev_remove(struct bch_fs *c, struct bch_dev *ca, int flags)
{
struct bch_sb_field_members *mi;
unsigned dev_idx = ca->dev_idx, data;
int ret = -EINVAL;
- mutex_lock(&c->state_lock);
+ down_write(&c->state_lock);
- percpu_ref_put(&ca->ref); /* XXX */
+ /*
+ * We consume a reference to ca->ref, regardless of whether we succeed
+ * or fail:
+ */
+ percpu_ref_put(&ca->ref);
- if (!bch2_dev_state_allowed(c, ca, BCH_MEMBER_STATE_FAILED, flags)) {
+ if (!bch2_dev_state_allowed(c, ca, BCH_MEMBER_STATE_failed, flags)) {
bch_err(ca, "Cannot remove without losing data");
goto err;
}
__bch2_dev_read_only(c, ca);
- /*
- * XXX: verify that dev_idx is really not in use anymore, anywhere
- *
- * flag_data_bad() does not check btree pointers
- */
ret = bch2_dev_data_drop(c, ca->dev_idx, flags);
if (ret) {
bch_err(ca, "Remove failed: error %i dropping data", ret);
goto err;
}
- data = bch2_dev_has_data(c, ca);
- if (data) {
- char data_has_str[100];
-
- bch2_flags_to_text(&PBUF(data_has_str),
- bch2_data_types, data);
- bch_err(ca, "Remove failed, still has data (%s)", data_has_str);
- ret = -EBUSY;
- goto err;
- }
-
- ret = bch2_btree_delete_range(c, BTREE_ID_ALLOC,
- POS(ca->dev_idx, 0),
- POS(ca->dev_idx + 1, 0),
- NULL);
+ ret = bch2_dev_remove_alloc(c, ca);
if (ret) {
bch_err(ca, "Remove failed, error deleting alloc info");
goto err;
* (overwritten) keys that point to the device we're removing:
*/
bch2_journal_flush_all_pins(&c->journal);
+ /*
+ * hack to ensure bch2_replicas_gc2() clears out entries to this device
+ */
+ bch2_journal_meta(&c->journal);
ret = bch2_journal_error(&c->journal);
if (ret) {
bch_err(ca, "Remove failed, journal error");
goto err;
}
+ ret = bch2_replicas_gc2(c);
+ if (ret) {
+ bch_err(ca, "Remove failed: error %i from replicas gc", ret);
+ goto err;
+ }
+
+ data = bch2_dev_has_data(c, ca);
+ if (data) {
+ char data_has_str[100];
+
+ bch2_flags_to_text(&PBUF(data_has_str),
+ bch2_data_types, data);
+ bch_err(ca, "Remove failed, still has data (%s)", data_has_str);
+ ret = -EBUSY;
+ goto err;
+ }
+
__bch2_dev_offline(c, ca);
mutex_lock(&c->sb_lock);
bch2_write_super(c);
mutex_unlock(&c->sb_lock);
- mutex_unlock(&c->state_lock);
+ up_write(&c->state_lock);
+
+ bch2_dev_usage_journal_reserve(c);
return 0;
err:
- if (ca->mi.state == BCH_MEMBER_STATE_RW &&
+ if (ca->mi.state == BCH_MEMBER_STATE_rw &&
!percpu_ref_is_zero(&ca->io_ref))
__bch2_dev_read_write(c, ca);
- mutex_unlock(&c->state_lock);
+ up_write(&c->state_lock);
return ret;
}
-static void dev_usage_clear(struct bch_dev *ca)
-{
- struct bucket_array *buckets;
- int cpu;
-
- for_each_possible_cpu(cpu) {
- struct bch_dev_usage *p =
- per_cpu_ptr(ca->usage[0], cpu);
- memset(p, 0, sizeof(*p));
- }
-
- down_read(&ca->bucket_lock);
- buckets = bucket_array(ca);
-
- memset(buckets->b, 0, sizeof(buckets->b[0]) * buckets->nbuckets);
- up_read(&ca->bucket_lock);
-}
-
/* Add new device to running filesystem: */
int bch2_dev_add(struct bch_fs *c, const char *path)
{
* allocate the journal, reset all the marks, then remark after we
* attach...
*/
- bch2_mark_dev_superblock(ca->fs, ca, 0);
+ bch2_mark_dev_superblock(NULL, ca, 0);
err = "journal alloc failed";
ret = bch2_dev_journal_alloc(ca);
if (ret)
goto err;
- dev_usage_clear(ca);
-
- mutex_lock(&c->state_lock);
+ down_write(&c->state_lock);
mutex_lock(&c->sb_lock);
err = "insufficient space in new superblock";
ca->disk_sb.sb->dev_idx = dev_idx;
bch2_dev_attach(c, ca, dev_idx);
- bch2_mark_dev_superblock(c, ca, 0);
-
bch2_write_super(c);
mutex_unlock(&c->sb_lock);
- if (ca->mi.state == BCH_MEMBER_STATE_RW) {
+ bch2_dev_usage_journal_reserve(c);
+
+ err = "error marking superblock";
+ ret = bch2_trans_mark_dev_sb(c, NULL, ca);
+ if (ret)
+ goto err_late;
+
+ if (ca->mi.state == BCH_MEMBER_STATE_rw) {
err = __bch2_dev_read_write(c, ca);
if (err)
goto err_late;
}
- mutex_unlock(&c->state_lock);
+ up_write(&c->state_lock);
return 0;
err_unlock:
mutex_unlock(&c->sb_lock);
- mutex_unlock(&c->state_lock);
+ up_write(&c->state_lock);
err:
if (ca)
bch2_dev_free(ca);
bch_err(c, "Unable to add device: %s", err);
return ret;
err_late:
+ up_write(&c->state_lock);
bch_err(c, "Error going rw after adding device: %s", err);
return -EINVAL;
}
const char *err;
int ret;
- mutex_lock(&c->state_lock);
+ down_write(&c->state_lock);
ret = bch2_read_super(path, &opts, &sb);
if (ret) {
- mutex_unlock(&c->state_lock);
+ up_write(&c->state_lock);
return ret;
}
}
ca = bch_dev_locked(c, dev_idx);
- if (ca->mi.state == BCH_MEMBER_STATE_RW) {
+
+ if (bch2_trans_mark_dev_sb(c, NULL, ca)) {
+ err = "bch2_trans_mark_dev_sb() error";
+ goto err;
+ }
+
+ if (ca->mi.state == BCH_MEMBER_STATE_rw) {
err = __bch2_dev_read_write(c, ca);
if (err)
goto err;
bch2_write_super(c);
mutex_unlock(&c->sb_lock);
- mutex_unlock(&c->state_lock);
+ up_write(&c->state_lock);
return 0;
err:
- mutex_unlock(&c->state_lock);
+ up_write(&c->state_lock);
bch2_free_super(&sb);
bch_err(c, "error bringing %s online: %s", path, err);
return -EINVAL;
int bch2_dev_offline(struct bch_fs *c, struct bch_dev *ca, int flags)
{
- mutex_lock(&c->state_lock);
+ down_write(&c->state_lock);
if (!bch2_dev_is_online(ca)) {
bch_err(ca, "Already offline");
- mutex_unlock(&c->state_lock);
+ up_write(&c->state_lock);
return 0;
}
- if (!bch2_dev_state_allowed(c, ca, BCH_MEMBER_STATE_FAILED, flags)) {
+ if (!bch2_dev_state_allowed(c, ca, BCH_MEMBER_STATE_failed, flags)) {
bch_err(ca, "Cannot offline required disk");
- mutex_unlock(&c->state_lock);
+ up_write(&c->state_lock);
return -EINVAL;
}
__bch2_dev_offline(c, ca);
- mutex_unlock(&c->state_lock);
+ up_write(&c->state_lock);
return 0;
}
struct bch_member *mi;
int ret = 0;
- mutex_lock(&c->state_lock);
+ down_write(&c->state_lock);
if (nbuckets < ca->mi.nbuckets) {
bch_err(ca, "Cannot shrink yet");
bch2_recalc_capacity(c);
err:
- mutex_unlock(&c->state_lock);
+ up_write(&c->state_lock);
return ret;
}
/* return with ref on ca->ref: */
struct bch_dev *bch2_dev_lookup(struct bch_fs *c, const char *path)
{
-
struct bch_dev *ca;
dev_t dev;
unsigned i;
{
struct bch_sb_handle *sb = NULL;
struct bch_fs *c = NULL;
+ struct bch_sb_field_members *mi;
unsigned i, best_sb = 0;
const char *err;
int ret = -ENOMEM;
le64_to_cpu(sb[best_sb].sb->seq))
best_sb = i;
- for (i = 0; i < nr_devices; i++) {
+ mi = bch2_sb_get_members(sb[best_sb].sb);
+
+ i = 0;
+ while (i < nr_devices) {
+ if (i != best_sb &&
+ !bch2_dev_exists(sb[best_sb].sb, mi, sb[i].sb->dev_idx)) {
+ pr_info("%pg has been removed, skipping", sb[i].bdev);
+ bch2_free_super(&sb[i]);
+ array_remove_item(sb, nr_devices, i);
+ continue;
+ }
+
err = bch2_dev_in_fs(sb[best_sb].sb, sb[i].sb);
if (err)
goto err_print;
+ i++;
}
ret = -ENOMEM;
goto err;
err = "bch2_dev_online() error";
- mutex_lock(&c->state_lock);
+ down_write(&c->state_lock);
for (i = 0; i < nr_devices; i++)
if (bch2_dev_attach_bdev(c, &sb[i])) {
- mutex_unlock(&c->state_lock);
+ up_write(&c->state_lock);
goto err_print;
}
- mutex_unlock(&c->state_lock);
+ up_write(&c->state_lock);
err = "insufficient devices";
if (!bch2_fs_may_start(c))
goto err_print;
if (!c->opts.nostart) {
- err = bch2_fs_start(c);
- if (err)
- goto err_print;
+ ret = bch2_fs_start(c);
+ if (ret)
+ goto err;
}
out:
kfree(sb);
const char *err;
struct bch_fs *c;
bool allocated_fs = false;
+ int ret;
err = bch2_sb_validate(sb);
if (err)
mutex_unlock(&c->sb_lock);
if (!c->opts.nostart && bch2_fs_may_start(c)) {
- err = bch2_fs_start(c);
- if (err)
+ err = "error starting filesystem";
+ ret = bch2_fs_start(c);
+ if (ret)
goto err;
}
bch2_debug_exit();
bch2_vfs_exit();
bch2_chardev_exit();
+ bch2_btree_key_cache_exit();
if (bcachefs_kset)
kset_unregister(bcachefs_kset);
}
static int __init bcachefs_init(void)
{
bch2_bkey_pack_test();
- bch2_inode_pack_test();
if (!(bcachefs_kset = kset_create_and_add("bcachefs", NULL, fs_kobj)) ||
+ bch2_btree_key_cache_init() ||
bch2_chardev_init() ||
bch2_vfs_init() ||
bch2_debug_init())