bcachefs: Fix for copygc getting stuck waiting for reserve to be filled
[linux-block.git] / fs / bcachefs / super.c
CommitLineData
1c6fdbd8
KO
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * bcachefs setup/teardown code, and some metadata io - read a superblock and
4 * figure out what to do with it.
5 *
6 * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
7 * Copyright 2012 Google, Inc.
8 */
9
10#include "bcachefs.h"
7b3f84ea
KO
11#include "alloc_background.h"
12#include "alloc_foreground.h"
5b8a9227 13#include "bkey_sort.h"
1c6fdbd8
KO
14#include "btree_cache.h"
15#include "btree_gc.h"
2ca88e5a 16#include "btree_key_cache.h"
1c6fdbd8
KO
17#include "btree_update_interior.h"
18#include "btree_io.h"
19#include "chardev.h"
20#include "checksum.h"
21#include "clock.h"
22#include "compress.h"
23#include "debug.h"
24#include "disk_groups.h"
cd575ddf 25#include "ec.h"
1c6fdbd8
KO
26#include "error.h"
27#include "fs.h"
28#include "fs-io.h"
29#include "fsck.h"
30#include "inode.h"
31#include "io.h"
32#include "journal.h"
33#include "journal_reclaim.h"
1dd7f9d9 34#include "journal_seq_blacklist.h"
1c6fdbd8
KO
35#include "move.h"
36#include "migrate.h"
37#include "movinggc.h"
38#include "quota.h"
39#include "rebalance.h"
40#include "recovery.h"
41#include "replicas.h"
42#include "super.h"
43#include "super-io.h"
44#include "sysfs.h"
45#include "trace.h"
46
47#include <linux/backing-dev.h>
48#include <linux/blkdev.h>
49#include <linux/debugfs.h>
50#include <linux/device.h>
51#include <linux/idr.h>
1c6fdbd8
KO
52#include <linux/module.h>
53#include <linux/percpu.h>
54#include <linux/random.h>
55#include <linux/sysfs.h>
56#include <crypto/hash.h>
57
58MODULE_LICENSE("GPL");
59MODULE_AUTHOR("Kent Overstreet <kent.overstreet@gmail.com>");
60
61#define KTYPE(type) \
62static const struct attribute_group type ## _group = { \
63 .attrs = type ## _files \
64}; \
65 \
66static const struct attribute_group *type ## _groups[] = { \
67 &type ## _group, \
68 NULL \
69}; \
70 \
71static const struct kobj_type type ## _ktype = { \
72 .release = type ## _release, \
73 .sysfs_ops = &type ## _sysfs_ops, \
74 .default_groups = type ## _groups \
75}
76
77static void bch2_fs_release(struct kobject *);
78static void bch2_dev_release(struct kobject *);
79
80static void bch2_fs_internal_release(struct kobject *k)
81{
82}
83
84static void bch2_fs_opts_dir_release(struct kobject *k)
85{
86}
87
88static void bch2_fs_time_stats_release(struct kobject *k)
89{
90}
91
92KTYPE(bch2_fs);
93KTYPE(bch2_fs_internal);
94KTYPE(bch2_fs_opts_dir);
95KTYPE(bch2_fs_time_stats);
96KTYPE(bch2_dev);
97
98static struct kset *bcachefs_kset;
99static LIST_HEAD(bch_fs_list);
100static DEFINE_MUTEX(bch_fs_list_lock);
101
102static DECLARE_WAIT_QUEUE_HEAD(bch_read_only_wait);
103
104static void bch2_dev_free(struct bch_dev *);
105static int bch2_dev_alloc(struct bch_fs *, unsigned);
106static int bch2_dev_sysfs_online(struct bch_fs *, struct bch_dev *);
107static void __bch2_dev_read_only(struct bch_fs *, struct bch_dev *);
108
109struct bch_fs *bch2_dev_to_fs(dev_t dev)
110{
111 struct bch_fs *c;
112 struct bch_dev *ca;
113 unsigned i;
114
115 mutex_lock(&bch_fs_list_lock);
116 rcu_read_lock();
117
118 list_for_each_entry(c, &bch_fs_list, list)
119 for_each_member_device_rcu(ca, c, i, NULL)
120 if (ca->disk_sb.bdev->bd_dev == dev) {
121 closure_get(&c->cl);
122 goto found;
123 }
124 c = NULL;
125found:
126 rcu_read_unlock();
127 mutex_unlock(&bch_fs_list_lock);
128
129 return c;
130}
131
132static struct bch_fs *__bch2_uuid_to_fs(__uuid_t uuid)
133{
134 struct bch_fs *c;
135
136 lockdep_assert_held(&bch_fs_list_lock);
137
138 list_for_each_entry(c, &bch_fs_list, list)
139 if (!memcmp(&c->disk_sb.sb->uuid, &uuid, sizeof(uuid)))
140 return c;
141
142 return NULL;
143}
144
145struct bch_fs *bch2_uuid_to_fs(__uuid_t uuid)
146{
147 struct bch_fs *c;
148
149 mutex_lock(&bch_fs_list_lock);
150 c = __bch2_uuid_to_fs(uuid);
151 if (c)
152 closure_get(&c->cl);
153 mutex_unlock(&bch_fs_list_lock);
154
155 return c;
156}
157
180fb49d
KO
158static void bch2_dev_usage_journal_reserve(struct bch_fs *c)
159{
160 struct bch_dev *ca;
161 unsigned i, nr = 0, u64s =
4b8f89af
KO
162 ((sizeof(struct jset_entry_dev_usage) +
163 sizeof(struct jset_entry_dev_usage_type) * BCH_DATA_NR)) /
164 sizeof(u64);
180fb49d
KO
165
166 rcu_read_lock();
167 for_each_member_device_rcu(ca, c, i, NULL)
168 nr++;
169 rcu_read_unlock();
170
171 bch2_journal_entry_res_resize(&c->journal,
172 &c->dev_usage_journal_res, u64s * nr);
173}
174
1c6fdbd8
KO
175/* Filesystem RO/RW: */
176
177/*
178 * For startup/shutdown of RW stuff, the dependencies are:
179 *
180 * - foreground writes depend on copygc and rebalance (to free up space)
181 *
182 * - copygc and rebalance depend on mark and sweep gc (they actually probably
183 * don't because they either reserve ahead of time or don't block if
184 * allocations fail, but allocations can require mark and sweep gc to run
185 * because of generation number wraparound)
186 *
187 * - all of the above depends on the allocator threads
188 *
189 * - allocator depends on the journal (when it rewrites prios and gens)
190 */
191
192static void __bch2_fs_read_only(struct bch_fs *c)
193{
194 struct bch_dev *ca;
d5f70c1f 195 unsigned i, clean_passes = 0;
1c6fdbd8
KO
196
197 bch2_rebalance_stop(c);
e6d11615 198 bch2_copygc_stop(c);
1c6fdbd8
KO
199 bch2_gc_thread_stop(c);
200
201 /*
202 * Flush journal before stopping allocators, because flushing journal
203 * blacklist entries involves allocating new btree nodes:
204 */
205 bch2_journal_flush_all_pins(&c->journal);
206
2340fd9d
KO
207 /*
208 * If the allocator threads didn't all start up, the btree updates to
209 * write out alloc info aren't going to work:
210 */
b935a8a6 211 if (!test_bit(BCH_FS_ALLOCATOR_RUNNING, &c->flags))
2340fd9d 212 goto nowrote_alloc;
b935a8a6 213
039fc4c5 214 bch_verbose(c, "flushing journal and stopping allocators");
460651ee 215
039fc4c5
KO
216 bch2_journal_flush_all_pins(&c->journal);
217 set_bit(BCH_FS_ALLOCATOR_STOPPING, &c->flags);
61c8d7c8 218
039fc4c5
KO
219 do {
220 clean_passes++;
430735cd 221
039fc4c5
KO
222 if (bch2_journal_flush_all_pins(&c->journal))
223 clean_passes = 0;
430735cd
KO
224
225 /*
039fc4c5
KO
226 * In flight interior btree updates will generate more journal
227 * updates and btree updates (alloc btree):
430735cd 228 */
039fc4c5
KO
229 if (bch2_btree_interior_updates_nr_pending(c)) {
230 closure_wait_event(&c->btree_interior_update_wait,
231 !bch2_btree_interior_updates_nr_pending(c));
232 clean_passes = 0;
233 }
00b8ccf7 234 flush_work(&c->btree_interior_update_work);
d5f70c1f 235
039fc4c5
KO
236 if (bch2_journal_flush_all_pins(&c->journal))
237 clean_passes = 0;
d5f70c1f 238 } while (clean_passes < 2);
039fc4c5 239 bch_verbose(c, "flushing journal and stopping allocators complete");
2340fd9d
KO
240
241 set_bit(BCH_FS_ALLOC_CLEAN, &c->flags);
242nowrote_alloc:
00b8ccf7
KO
243 closure_wait_event(&c->btree_interior_update_wait,
244 !bch2_btree_interior_updates_nr_pending(c));
245 flush_work(&c->btree_interior_update_work);
246
1c6fdbd8
KO
247 for_each_member_device(ca, c, i)
248 bch2_dev_allocator_stop(ca);
249
b935a8a6 250 clear_bit(BCH_FS_ALLOCATOR_RUNNING, &c->flags);
039fc4c5 251 clear_bit(BCH_FS_ALLOCATOR_STOPPING, &c->flags);
b935a8a6 252
1c6fdbd8
KO
253 bch2_fs_journal_stop(&c->journal);
254
255 /*
256 * the journal kicks off btree writes via reclaim - wait for in flight
257 * writes after stopping journal:
258 */
a5cd80ea 259 bch2_btree_flush_all_writes(c);
1c6fdbd8
KO
260
261 /*
262 * After stopping journal:
263 */
264 for_each_member_device(ca, c, i)
265 bch2_dev_allocator_remove(c, ca);
266}
267
268static void bch2_writes_disabled(struct percpu_ref *writes)
269{
270 struct bch_fs *c = container_of(writes, struct bch_fs, writes);
271
272 set_bit(BCH_FS_WRITE_DISABLE_COMPLETE, &c->flags);
273 wake_up(&bch_read_only_wait);
274}
275
276void bch2_fs_read_only(struct bch_fs *c)
277{
134915f3 278 if (!test_bit(BCH_FS_RW, &c->flags)) {
b7a9bbfc 279 BUG_ON(c->journal.reclaim_thread);
1c6fdbd8 280 return;
134915f3 281 }
1c6fdbd8
KO
282
283 BUG_ON(test_bit(BCH_FS_WRITE_DISABLE_COMPLETE, &c->flags));
284
285 /*
286 * Block new foreground-end write operations from starting - any new
287 * writes will return -EROFS:
288 *
289 * (This is really blocking new _allocations_, writes to previously
290 * allocated space can still happen until stopping the allocator in
291 * bch2_dev_allocator_stop()).
292 */
293 percpu_ref_kill(&c->writes);
294
f516c872 295 cancel_work_sync(&c->ec_stripe_delete_work);
1c6fdbd8
KO
296
297 /*
298 * If we're not doing an emergency shutdown, we want to wait on
299 * outstanding writes to complete so they don't see spurious errors due
300 * to shutting down the allocator:
301 *
302 * If we are doing an emergency shutdown outstanding writes may
303 * hang until we shutdown the allocator so we don't want to wait
304 * on outstanding writes before shutting everything down - but
305 * we do need to wait on them before returning and signalling
306 * that going RO is complete:
307 */
308 wait_event(bch_read_only_wait,
309 test_bit(BCH_FS_WRITE_DISABLE_COMPLETE, &c->flags) ||
310 test_bit(BCH_FS_EMERGENCY_RO, &c->flags));
311
312 __bch2_fs_read_only(c);
313
314 wait_event(bch_read_only_wait,
315 test_bit(BCH_FS_WRITE_DISABLE_COMPLETE, &c->flags));
316
317 clear_bit(BCH_FS_WRITE_DISABLE_COMPLETE, &c->flags);
318
319 if (!bch2_journal_error(&c->journal) &&
320 !test_bit(BCH_FS_ERROR, &c->flags) &&
3aea4342 321 !test_bit(BCH_FS_EMERGENCY_RO, &c->flags) &&
a0e0bda1 322 test_bit(BCH_FS_STARTED, &c->flags) &&
2340fd9d 323 test_bit(BCH_FS_ALLOC_CLEAN, &c->flags) &&
b2930396
KO
324 !c->opts.norecovery) {
325 bch_verbose(c, "marking filesystem clean");
134915f3 326 bch2_fs_mark_clean(c);
b2930396 327 }
1c6fdbd8 328
134915f3 329 clear_bit(BCH_FS_RW, &c->flags);
1c6fdbd8
KO
330}
331
332static void bch2_fs_read_only_work(struct work_struct *work)
333{
334 struct bch_fs *c =
335 container_of(work, struct bch_fs, read_only_work);
336
1ada1606 337 down_write(&c->state_lock);
1c6fdbd8 338 bch2_fs_read_only(c);
1ada1606 339 up_write(&c->state_lock);
1c6fdbd8
KO
340}
341
342static void bch2_fs_read_only_async(struct bch_fs *c)
343{
344 queue_work(system_long_wq, &c->read_only_work);
345}
346
347bool bch2_fs_emergency_read_only(struct bch_fs *c)
348{
349 bool ret = !test_and_set_bit(BCH_FS_EMERGENCY_RO, &c->flags);
350
1c6fdbd8 351 bch2_journal_halt(&c->journal);
9f115ce9 352 bch2_fs_read_only_async(c);
1c6fdbd8
KO
353
354 wake_up(&bch_read_only_wait);
355 return ret;
356}
357
134915f3 358static int bch2_fs_read_write_late(struct bch_fs *c)
1c6fdbd8 359{
134915f3 360 int ret;
1c6fdbd8 361
134915f3
KO
362 ret = bch2_gc_thread_start(c);
363 if (ret) {
364 bch_err(c, "error starting gc thread");
365 return ret;
366 }
367
e6d11615
KO
368 ret = bch2_copygc_start(c);
369 if (ret) {
370 bch_err(c, "error starting copygc thread");
371 return ret;
134915f3
KO
372 }
373
374 ret = bch2_rebalance_start(c);
375 if (ret) {
376 bch_err(c, "error starting rebalance thread");
377 return ret;
378 }
379
97fd13ad
KO
380 schedule_work(&c->ec_stripe_delete_work);
381
134915f3
KO
382 return 0;
383}
1c6fdbd8 384
e731d466 385static int __bch2_fs_read_write(struct bch_fs *c, bool early)
134915f3
KO
386{
387 struct bch_dev *ca;
388 unsigned i;
389 int ret;
390
391 if (test_bit(BCH_FS_RW, &c->flags))
392 return 0;
393
619f5bee
KO
394 /*
395 * nochanges is used for fsck -n mode - we have to allow going rw
396 * during recovery for that to work:
397 */
398 if (c->opts.norecovery ||
399 (c->opts.nochanges &&
400 (!early || c->opts.read_only)))
401 return -EROFS;
330581f1 402
134915f3
KO
403 ret = bch2_fs_mark_dirty(c);
404 if (ret)
405 goto err;
1c6fdbd8 406
9f115ce9
KO
407 /*
408 * We need to write out a journal entry before we start doing btree
409 * updates, to ensure that on unclean shutdown new journal blacklist
410 * entries are created:
411 */
412 bch2_journal_meta(&c->journal);
413
2340fd9d
KO
414 clear_bit(BCH_FS_ALLOC_CLEAN, &c->flags);
415
1c6fdbd8
KO
416 for_each_rw_member(ca, c, i)
417 bch2_dev_allocator_add(c, ca);
418 bch2_recalc_capacity(c);
419
134915f3
KO
420 for_each_rw_member(ca, c, i) {
421 ret = bch2_dev_allocator_start(ca);
422 if (ret) {
423 bch_err(c, "error starting allocator threads");
1c6fdbd8
KO
424 percpu_ref_put(&ca->io_ref);
425 goto err;
426 }
134915f3 427 }
1c6fdbd8 428
134915f3 429 set_bit(BCH_FS_ALLOCATOR_RUNNING, &c->flags);
1c6fdbd8 430
59a74051
KO
431 for_each_rw_member(ca, c, i)
432 bch2_wake_allocator(ca);
433
b7a9bbfc
KO
434 ret = bch2_journal_reclaim_start(&c->journal);
435 if (ret) {
436 bch_err(c, "error starting journal reclaim: %i", ret);
437 return ret;
438 }
439
134915f3
KO
440 if (!early) {
441 ret = bch2_fs_read_write_late(c);
442 if (ret)
443 goto err;
444 }
1c6fdbd8 445
134915f3
KO
446 percpu_ref_reinit(&c->writes);
447 set_bit(BCH_FS_RW, &c->flags);
134915f3 448 return 0;
1c6fdbd8
KO
449err:
450 __bch2_fs_read_only(c);
134915f3
KO
451 return ret;
452}
453
454int bch2_fs_read_write(struct bch_fs *c)
455{
456 return __bch2_fs_read_write(c, false);
457}
458
459int bch2_fs_read_write_early(struct bch_fs *c)
460{
461 lockdep_assert_held(&c->state_lock);
462
134915f3 463 return __bch2_fs_read_write(c, true);
1c6fdbd8
KO
464}
465
466/* Filesystem startup/shutdown: */
467
d5e4dcc2 468static void __bch2_fs_free(struct bch_fs *c)
1c6fdbd8
KO
469{
470 unsigned i;
1a21bf98 471 int cpu;
1c6fdbd8
KO
472
473 for (i = 0; i < BCH_TIME_STAT_NR; i++)
474 bch2_time_stats_exit(&c->times[i]);
475
476 bch2_fs_quota_exit(c);
477 bch2_fs_fsio_exit(c);
cd575ddf 478 bch2_fs_ec_exit(c);
1c6fdbd8
KO
479 bch2_fs_encryption_exit(c);
480 bch2_fs_io_exit(c);
c823c339 481 bch2_fs_btree_interior_update_exit(c);
36e9d698 482 bch2_fs_btree_iter_exit(c);
2ca88e5a 483 bch2_fs_btree_key_cache_exit(&c->btree_key_cache);
1c6fdbd8
KO
484 bch2_fs_btree_cache_exit(c);
485 bch2_fs_journal_exit(&c->journal);
486 bch2_io_clock_exit(&c->io_clock[WRITE]);
487 bch2_io_clock_exit(&c->io_clock[READ]);
488 bch2_fs_compress_exit(c);
f1d786a0
KO
489 bch2_journal_keys_free(&c->journal_keys);
490 bch2_journal_entries_free(&c->journal_entries);
9166b41d 491 percpu_free_rwsem(&c->mark_lock);
5e82a9a1 492 free_percpu(c->online_reserved);
4d8100da 493 kfree(c->usage_scratch);
f299d573
KO
494 for (i = 0; i < ARRAY_SIZE(c->usage); i++)
495 free_percpu(c->usage[i]);
5e82a9a1 496 kfree(c->usage_base);
1a21bf98
KO
497
498 if (c->btree_iters_bufs)
499 for_each_possible_cpu(cpu)
500 kfree(per_cpu_ptr(c->btree_iters_bufs, cpu)->iter);
501
502 free_percpu(c->btree_iters_bufs);
5663a415 503 free_percpu(c->pcpu);
35189e09 504 mempool_exit(&c->large_bkey_pool);
1c6fdbd8
KO
505 mempool_exit(&c->btree_bounce_pool);
506 bioset_exit(&c->btree_bio);
1c6fdbd8
KO
507 mempool_exit(&c->fill_iter);
508 percpu_ref_exit(&c->writes);
73e6ab95
KO
509 kfree(c->replicas.entries);
510 kfree(c->replicas_gc.entries);
1c6fdbd8 511 kfree(rcu_dereference_protected(c->disk_groups, 1));
1dd7f9d9 512 kfree(c->journal_seq_blacklist_table);
b5e8a699 513 kfree(c->unused_inode_hints);
e6d11615 514 free_heap(&c->copygc_heap);
1c6fdbd8
KO
515
516 if (c->copygc_wq)
517 destroy_workqueue(c->copygc_wq);
518 if (c->wq)
519 destroy_workqueue(c->wq);
520
521 free_pages((unsigned long) c->disk_sb.sb,
522 c->disk_sb.page_order);
523 kvpfree(c, sizeof(*c));
524 module_put(THIS_MODULE);
525}
526
527static void bch2_fs_release(struct kobject *kobj)
528{
529 struct bch_fs *c = container_of(kobj, struct bch_fs, kobj);
530
d5e4dcc2 531 __bch2_fs_free(c);
1c6fdbd8
KO
532}
533
d5e4dcc2 534void __bch2_fs_stop(struct bch_fs *c)
1c6fdbd8
KO
535{
536 struct bch_dev *ca;
537 unsigned i;
538
af1c6871
KO
539 bch_verbose(c, "shutting down");
540
1dd7f9d9
KO
541 set_bit(BCH_FS_STOPPING, &c->flags);
542
543 cancel_work_sync(&c->journal_seq_blacklist_gc_work);
544
1ada1606 545 down_write(&c->state_lock);
883f1a7c 546 bch2_fs_read_only(c);
1ada1606 547 up_write(&c->state_lock);
883f1a7c 548
1c6fdbd8
KO
549 for_each_member_device(ca, c, i)
550 if (ca->kobj.state_in_sysfs &&
551 ca->disk_sb.bdev)
552 sysfs_remove_link(bdev_kobj(ca->disk_sb.bdev), "bcachefs");
553
554 if (c->kobj.state_in_sysfs)
555 kobject_del(&c->kobj);
556
557 bch2_fs_debug_exit(c);
558 bch2_fs_chardev_exit(c);
559
560 kobject_put(&c->time_stats);
561 kobject_put(&c->opts_dir);
562 kobject_put(&c->internal);
563
1c6fdbd8
KO
564 /* btree prefetch might have kicked off reads in the background: */
565 bch2_btree_flush_all_reads(c);
566
567 for_each_member_device(ca, c, i)
568 cancel_work_sync(&ca->io_error_work);
569
570 cancel_work_sync(&c->btree_write_error_work);
1c6fdbd8 571 cancel_work_sync(&c->read_only_work);
d5e4dcc2
KO
572}
573
574void bch2_fs_free(struct bch_fs *c)
575{
576 unsigned i;
577
578 mutex_lock(&bch_fs_list_lock);
579 list_del(&c->list);
580 mutex_unlock(&bch_fs_list_lock);
581
582 closure_sync(&c->cl);
583 closure_debug_destroy(&c->cl);
584
585 for (i = 0; i < c->sb.nr_devices; i++) {
586 struct bch_dev *ca = rcu_dereference_protected(c->devs[i], true);
1c6fdbd8 587
d5e4dcc2
KO
588 if (ca) {
589 bch2_free_super(&ca->disk_sb);
590 bch2_dev_free(ca);
591 }
592 }
1c6fdbd8 593
af1c6871
KO
594 bch_verbose(c, "shutdown complete");
595
1c6fdbd8
KO
596 kobject_put(&c->kobj);
597}
598
d5e4dcc2
KO
599void bch2_fs_stop(struct bch_fs *c)
600{
601 __bch2_fs_stop(c);
602 bch2_fs_free(c);
603}
604
1c6fdbd8
KO
605static const char *bch2_fs_online(struct bch_fs *c)
606{
607 struct bch_dev *ca;
608 const char *err = NULL;
609 unsigned i;
610 int ret;
611
612 lockdep_assert_held(&bch_fs_list_lock);
613
614 if (!list_empty(&c->list))
615 return NULL;
616
617 if (__bch2_uuid_to_fs(c->sb.uuid))
618 return "filesystem UUID already open";
619
620 ret = bch2_fs_chardev_init(c);
621 if (ret)
622 return "error creating character device";
623
624 bch2_fs_debug_init(c);
625
626 if (kobject_add(&c->kobj, NULL, "%pU", c->sb.user_uuid.b) ||
627 kobject_add(&c->internal, &c->kobj, "internal") ||
628 kobject_add(&c->opts_dir, &c->kobj, "options") ||
629 kobject_add(&c->time_stats, &c->kobj, "time_stats") ||
630 bch2_opts_create_sysfs_files(&c->opts_dir))
631 return "error creating sysfs objects";
632
1ada1606 633 down_write(&c->state_lock);
1c6fdbd8
KO
634
635 err = "error creating sysfs objects";
636 __for_each_member_device(ca, c, i, NULL)
637 if (bch2_dev_sysfs_online(c, ca))
638 goto err;
639
640 list_add(&c->list, &bch_fs_list);
641 err = NULL;
642err:
1ada1606 643 up_write(&c->state_lock);
1c6fdbd8
KO
644 return err;
645}
646
647static struct bch_fs *bch2_fs_alloc(struct bch_sb *sb, struct bch_opts opts)
648{
649 struct bch_sb_field_members *mi;
650 struct bch_fs *c;
ecf37a4a 651 unsigned i, iter_size;
1c6fdbd8
KO
652 const char *err;
653
654 pr_verbose_init(opts, "");
655
656 c = kvpmalloc(sizeof(struct bch_fs), GFP_KERNEL|__GFP_ZERO);
657 if (!c)
658 goto out;
659
660 __module_get(THIS_MODULE);
661
505b7a4c
KO
662 closure_init(&c->cl, NULL);
663
664 c->kobj.kset = bcachefs_kset;
665 kobject_init(&c->kobj, &bch2_fs_ktype);
666 kobject_init(&c->internal, &bch2_fs_internal_ktype);
667 kobject_init(&c->opts_dir, &bch2_fs_opts_dir_ktype);
668 kobject_init(&c->time_stats, &bch2_fs_time_stats_ktype);
669
1c6fdbd8
KO
670 c->minor = -1;
671 c->disk_sb.fs_sb = true;
672
1ada1606 673 init_rwsem(&c->state_lock);
1c6fdbd8
KO
674 mutex_init(&c->sb_lock);
675 mutex_init(&c->replicas_gc_lock);
676 mutex_init(&c->btree_root_lock);
677 INIT_WORK(&c->read_only_work, bch2_fs_read_only_work);
678
679 init_rwsem(&c->gc_lock);
680
681 for (i = 0; i < BCH_TIME_STAT_NR; i++)
682 bch2_time_stats_init(&c->times[i]);
683
e6d11615 684 bch2_fs_copygc_init(c);
2ca88e5a 685 bch2_fs_btree_key_cache_init_early(&c->btree_key_cache);
b092dadd
KO
686 bch2_fs_allocator_background_init(c);
687 bch2_fs_allocator_foreground_init(c);
1c6fdbd8
KO
688 bch2_fs_rebalance_init(c);
689 bch2_fs_quota_init(c);
690
691 INIT_LIST_HEAD(&c->list);
692
4d8100da
KO
693 mutex_init(&c->usage_scratch_lock);
694
1c6fdbd8
KO
695 mutex_init(&c->bio_bounce_pages_lock);
696
697 bio_list_init(&c->btree_write_error_list);
698 spin_lock_init(&c->btree_write_error_lock);
699 INIT_WORK(&c->btree_write_error_work, bch2_btree_write_error_work);
700
1dd7f9d9
KO
701 INIT_WORK(&c->journal_seq_blacklist_gc_work,
702 bch2_blacklist_entries_gc);
703
f1d786a0 704 INIT_LIST_HEAD(&c->journal_entries);
5b593ee1 705 INIT_LIST_HEAD(&c->journal_iters);
f1d786a0 706
1c6fdbd8
KO
707 INIT_LIST_HEAD(&c->fsck_errors);
708 mutex_init(&c->fsck_error_lock);
709
703e2a43
KO
710 INIT_LIST_HEAD(&c->ec_stripe_head_list);
711 mutex_init(&c->ec_stripe_head_lock);
712
713 INIT_LIST_HEAD(&c->ec_stripe_new_list);
714 mutex_init(&c->ec_stripe_new_lock);
715
cd575ddf
KO
716 spin_lock_init(&c->ec_stripes_heap_lock);
717
1c6fdbd8
KO
718 seqcount_init(&c->gc_pos_lock);
719
5e82a9a1
KO
720 seqcount_init(&c->usage_lock);
721
1c6fdbd8
KO
722 c->copy_gc_enabled = 1;
723 c->rebalance.enabled = 1;
724 c->promote_whole_extents = true;
725
726 c->journal.write_time = &c->times[BCH_TIME_journal_write];
727 c->journal.delay_time = &c->times[BCH_TIME_journal_delay];
49a67206 728 c->journal.blocked_time = &c->times[BCH_TIME_blocked_journal];
1c6fdbd8
KO
729 c->journal.flush_seq_time = &c->times[BCH_TIME_journal_flush_seq];
730
731 bch2_fs_btree_cache_init_early(&c->btree_cache);
732
fca1223c
KO
733 mutex_init(&c->sectors_available_lock);
734
73e6ab95
KO
735 if (percpu_init_rwsem(&c->mark_lock))
736 goto err;
737
1c6fdbd8
KO
738 mutex_lock(&c->sb_lock);
739
740 if (bch2_sb_to_fs(c, sb)) {
741 mutex_unlock(&c->sb_lock);
742 goto err;
743 }
744
745 mutex_unlock(&c->sb_lock);
746
747 scnprintf(c->name, sizeof(c->name), "%pU", &c->sb.user_uuid);
748
749 c->opts = bch2_opts_default;
750 bch2_opts_apply(&c->opts, bch2_opts_from_sb(sb));
751 bch2_opts_apply(&c->opts, opts);
752
753 c->block_bits = ilog2(c->opts.block_size);
754 c->btree_foreground_merge_threshold = BTREE_FOREGROUND_MERGE_THRESHOLD(c);
755
1c6fdbd8
KO
756 if (bch2_fs_init_fault("fs_alloc"))
757 goto err;
758
ae2f17d5 759 iter_size = sizeof(struct sort_iter) +
1c6fdbd8 760 (btree_blocks(c) + 1) * 2 *
ae2f17d5 761 sizeof(struct sort_iter_set);
1c6fdbd8 762
b5e8a699
KO
763 c->inode_shard_bits = ilog2(roundup_pow_of_two(num_possible_cpus()));
764
1c6fdbd8 765 if (!(c->wq = alloc_workqueue("bcachefs",
2f33ece9
KO
766 WQ_FREEZABLE|WQ_MEM_RECLAIM, 1)) ||
767 !(c->copygc_wq = alloc_workqueue("bcachefs_copygc",
768 WQ_FREEZABLE|WQ_MEM_RECLAIM|WQ_CPU_INTENSIVE, 1)) ||
134915f3
KO
769 percpu_ref_init(&c->writes, bch2_writes_disabled,
770 PERCPU_REF_INIT_DEAD, GFP_KERNEL) ||
1c6fdbd8
KO
771 mempool_init_kmalloc_pool(&c->fill_iter, 1, iter_size) ||
772 bioset_init(&c->btree_bio, 1,
773 max(offsetof(struct btree_read_bio, bio),
774 offsetof(struct btree_write_bio, wbio.bio)),
775 BIOSET_NEED_BVECS) ||
5663a415 776 !(c->pcpu = alloc_percpu(struct bch_fs_pcpu)) ||
5e82a9a1 777 !(c->online_reserved = alloc_percpu(u64)) ||
1a21bf98 778 !(c->btree_iters_bufs = alloc_percpu(struct btree_iter_buf)) ||
1c6fdbd8
KO
779 mempool_init_kvpmalloc_pool(&c->btree_bounce_pool, 1,
780 btree_bytes(c)) ||
35189e09 781 mempool_init_kmalloc_pool(&c->large_bkey_pool, 1, 2048) ||
b5e8a699
KO
782 !(c->unused_inode_hints = kcalloc(1U << c->inode_shard_bits,
783 sizeof(u64), GFP_KERNEL)) ||
1c6fdbd8
KO
784 bch2_io_clock_init(&c->io_clock[READ]) ||
785 bch2_io_clock_init(&c->io_clock[WRITE]) ||
786 bch2_fs_journal_init(&c->journal) ||
2c5af169 787 bch2_fs_replicas_init(c) ||
1c6fdbd8 788 bch2_fs_btree_cache_init(c) ||
2ca88e5a 789 bch2_fs_btree_key_cache_init(&c->btree_key_cache) ||
36e9d698 790 bch2_fs_btree_iter_init(c) ||
c823c339 791 bch2_fs_btree_interior_update_init(c) ||
1c6fdbd8
KO
792 bch2_fs_io_init(c) ||
793 bch2_fs_encryption_init(c) ||
794 bch2_fs_compress_init(c) ||
cd575ddf 795 bch2_fs_ec_init(c) ||
1c6fdbd8
KO
796 bch2_fs_fsio_init(c))
797 goto err;
798
799 mi = bch2_sb_get_members(c->disk_sb.sb);
800 for (i = 0; i < c->sb.nr_devices; i++)
801 if (bch2_dev_exists(c->disk_sb.sb, mi, i) &&
802 bch2_dev_alloc(c, i))
803 goto err;
804
4b8f89af
KO
805 bch2_journal_entry_res_resize(&c->journal,
806 &c->btree_root_journal_res,
807 BTREE_ID_NR * (JSET_KEYS_U64s + BKEY_BTREE_PTR_U64s_MAX));
808 bch2_dev_usage_journal_reserve(c);
809 bch2_journal_entry_res_resize(&c->journal,
810 &c->clock_journal_res,
811 (sizeof(struct jset_entry_clock) / sizeof(u64)) * 2);
812
1c6fdbd8
KO
813 mutex_lock(&bch_fs_list_lock);
814 err = bch2_fs_online(c);
815 mutex_unlock(&bch_fs_list_lock);
816 if (err) {
817 bch_err(c, "bch2_fs_online() error: %s", err);
818 goto err;
819 }
820out:
821 pr_verbose_init(opts, "ret %i", c ? 0 : -ENOMEM);
822 return c;
823err:
824 bch2_fs_free(c);
825 c = NULL;
826 goto out;
827}
828
619f5bee
KO
829noinline_for_stack
830static void print_mount_opts(struct bch_fs *c)
831{
832 enum bch_opt_id i;
833 char buf[512];
834 struct printbuf p = PBUF(buf);
835 bool first = true;
836
837 strcpy(buf, "(null)");
838
839 if (c->opts.read_only) {
840 pr_buf(&p, "ro");
841 first = false;
842 }
843
844 for (i = 0; i < bch2_opts_nr; i++) {
845 const struct bch_option *opt = &bch2_opt_table[i];
846 u64 v = bch2_opt_get_by_id(&c->opts, i);
847
848 if (!(opt->mode & OPT_MOUNT))
849 continue;
850
851 if (v == bch2_opt_get_by_id(&bch2_opts_default, i))
852 continue;
853
854 if (!first)
855 pr_buf(&p, ",");
856 first = false;
857 bch2_opt_to_text(&p, c, opt, v, OPT_SHOW_MOUNT_STYLE);
858 }
859
860 bch_info(c, "mounted with opts: %s", buf);
861}
862
863int bch2_fs_start(struct bch_fs *c)
1c6fdbd8
KO
864{
865 const char *err = "cannot allocate memory";
866 struct bch_sb_field_members *mi;
867 struct bch_dev *ca;
a420eea6 868 time64_t now = ktime_get_real_seconds();
1c6fdbd8
KO
869 unsigned i;
870 int ret = -EINVAL;
871
1ada1606 872 down_write(&c->state_lock);
1c6fdbd8 873
134915f3 874 BUG_ON(test_bit(BCH_FS_STARTED, &c->flags));
1c6fdbd8
KO
875
876 mutex_lock(&c->sb_lock);
877
878 for_each_online_member(ca, c, i)
879 bch2_sb_from_fs(c, ca);
880
881 mi = bch2_sb_get_members(c->disk_sb.sb);
882 for_each_online_member(ca, c, i)
883 mi->members[ca->dev_idx].last_mount = cpu_to_le64(now);
884
885 mutex_unlock(&c->sb_lock);
886
887 for_each_rw_member(ca, c, i)
888 bch2_dev_allocator_add(c, ca);
889 bch2_recalc_capacity(c);
890
891 ret = BCH_SB_INITIALIZED(c->disk_sb.sb)
892 ? bch2_fs_recovery(c)
893 : bch2_fs_initialize(c);
894 if (ret)
895 goto err;
cd575ddf
KO
896
897 ret = bch2_opts_check_may_set(c);
898 if (ret)
899 goto err;
1c6fdbd8
KO
900
901 err = "dynamic fault";
619f5bee 902 ret = -EINVAL;
1c6fdbd8
KO
903 if (bch2_fs_init_fault("fs_start"))
904 goto err;
905
a9310ab0
KO
906 set_bit(BCH_FS_STARTED, &c->flags);
907
9f20ed15
KO
908 /*
909 * Allocator threads don't start filling copygc reserve until after we
910 * set BCH_FS_STARTED - wake them now:
2ee47eec
KO
911 *
912 * XXX ugly hack:
913 * Need to set ca->allocator_state here instead of relying on the
914 * allocator threads to do it to avoid racing with the copygc threads
915 * checking it and thinking they have no alloc reserve:
9f20ed15 916 */
2ee47eec
KO
917 for_each_online_member(ca, c, i) {
918 ca->allocator_state = ALLOCATOR_running;
9f20ed15 919 bch2_wake_allocator(ca);
2ee47eec 920 }
9f20ed15 921
619f5bee 922 if (c->opts.read_only || c->opts.nochanges) {
1c6fdbd8
KO
923 bch2_fs_read_only(c);
924 } else {
619f5bee
KO
925 err = "error going read write";
926 ret = !test_bit(BCH_FS_RW, &c->flags)
927 ? bch2_fs_read_write(c)
928 : bch2_fs_read_write_late(c);
929 if (ret)
1c6fdbd8
KO
930 goto err;
931 }
932
619f5bee
KO
933 print_mount_opts(c);
934 ret = 0;
1c6fdbd8 935out:
1ada1606 936 up_write(&c->state_lock);
619f5bee 937 return ret;
1c6fdbd8
KO
938err:
939 switch (ret) {
940 case BCH_FSCK_ERRORS_NOT_FIXED:
941 bch_err(c, "filesystem contains errors: please report this to the developers");
942 pr_cont("mount with -o fix_errors to repair\n");
943 err = "fsck error";
944 break;
945 case BCH_FSCK_REPAIR_UNIMPLEMENTED:
946 bch_err(c, "filesystem contains errors: please report this to the developers");
947 pr_cont("repair unimplemented: inform the developers so that it can be added\n");
948 err = "fsck error";
949 break;
950 case BCH_FSCK_REPAIR_IMPOSSIBLE:
951 bch_err(c, "filesystem contains errors, but repair impossible");
952 err = "fsck error";
953 break;
954 case BCH_FSCK_UNKNOWN_VERSION:
955 err = "unknown metadata version";;
956 break;
957 case -ENOMEM:
958 err = "cannot allocate memory";
959 break;
960 case -EIO:
961 err = "IO error";
962 break;
963 }
964
9516950c
KO
965 if (ret >= 0)
966 ret = -EIO;
1c6fdbd8
KO
967 goto out;
968}
969
970static const char *bch2_dev_may_add(struct bch_sb *sb, struct bch_fs *c)
971{
972 struct bch_sb_field_members *sb_mi;
973
974 sb_mi = bch2_sb_get_members(sb);
975 if (!sb_mi)
976 return "Invalid superblock: member info area missing";
977
978 if (le16_to_cpu(sb->block_size) != c->opts.block_size)
979 return "mismatched block size";
980
981 if (le16_to_cpu(sb_mi->members[sb->dev_idx].bucket_size) <
982 BCH_SB_BTREE_NODE_SIZE(c->disk_sb.sb))
983 return "new cache bucket size is too small";
984
985 return NULL;
986}
987
988static const char *bch2_dev_in_fs(struct bch_sb *fs, struct bch_sb *sb)
989{
990 struct bch_sb *newest =
991 le64_to_cpu(fs->seq) > le64_to_cpu(sb->seq) ? fs : sb;
992 struct bch_sb_field_members *mi = bch2_sb_get_members(newest);
993
994 if (!uuid_equal(&fs->uuid, &sb->uuid))
995 return "device not a member of filesystem";
996
997 if (!bch2_dev_exists(newest, mi, sb->dev_idx))
998 return "device has been removed";
999
1000 if (fs->block_size != sb->block_size)
1001 return "mismatched block size";
1002
1003 return NULL;
1004}
1005
1006/* Device startup/shutdown: */
1007
1008static void bch2_dev_release(struct kobject *kobj)
1009{
1010 struct bch_dev *ca = container_of(kobj, struct bch_dev, kobj);
1011
1012 kfree(ca);
1013}
1014
1015static void bch2_dev_free(struct bch_dev *ca)
1016{
59a74051
KO
1017 bch2_dev_allocator_stop(ca);
1018
1c6fdbd8
KO
1019 cancel_work_sync(&ca->io_error_work);
1020
1021 if (ca->kobj.state_in_sysfs &&
1022 ca->disk_sb.bdev)
1023 sysfs_remove_link(bdev_kobj(ca->disk_sb.bdev), "bcachefs");
1024
1025 if (ca->kobj.state_in_sysfs)
1026 kobject_del(&ca->kobj);
1027
1028 bch2_free_super(&ca->disk_sb);
1029 bch2_dev_journal_exit(ca);
1030
1031 free_percpu(ca->io_done);
1032 bioset_exit(&ca->replica_set);
1033 bch2_dev_buckets_free(ca);
d1170ce5 1034 free_page((unsigned long) ca->sb_read_scratch);
1c6fdbd8
KO
1035
1036 bch2_time_stats_exit(&ca->io_latency[WRITE]);
1037 bch2_time_stats_exit(&ca->io_latency[READ]);
1038
1039 percpu_ref_exit(&ca->io_ref);
1040 percpu_ref_exit(&ca->ref);
1041 kobject_put(&ca->kobj);
1042}
1043
1044static void __bch2_dev_offline(struct bch_fs *c, struct bch_dev *ca)
1045{
1046
1047 lockdep_assert_held(&c->state_lock);
1048
1049 if (percpu_ref_is_zero(&ca->io_ref))
1050 return;
1051
1052 __bch2_dev_read_only(c, ca);
1053
1054 reinit_completion(&ca->io_ref_completion);
1055 percpu_ref_kill(&ca->io_ref);
1056 wait_for_completion(&ca->io_ref_completion);
1057
1058 if (ca->kobj.state_in_sysfs) {
1059 sysfs_remove_link(bdev_kobj(ca->disk_sb.bdev), "bcachefs");
1060 sysfs_remove_link(&ca->kobj, "block");
1061 }
1062
1063 bch2_free_super(&ca->disk_sb);
1064 bch2_dev_journal_exit(ca);
1065}
1066
1067static void bch2_dev_ref_complete(struct percpu_ref *ref)
1068{
1069 struct bch_dev *ca = container_of(ref, struct bch_dev, ref);
1070
1071 complete(&ca->ref_completion);
1072}
1073
1074static void bch2_dev_io_ref_complete(struct percpu_ref *ref)
1075{
1076 struct bch_dev *ca = container_of(ref, struct bch_dev, io_ref);
1077
1078 complete(&ca->io_ref_completion);
1079}
1080
1081static int bch2_dev_sysfs_online(struct bch_fs *c, struct bch_dev *ca)
1082{
1083 int ret;
1084
1085 if (!c->kobj.state_in_sysfs)
1086 return 0;
1087
1088 if (!ca->kobj.state_in_sysfs) {
1089 ret = kobject_add(&ca->kobj, &c->kobj,
1090 "dev-%u", ca->dev_idx);
1091 if (ret)
1092 return ret;
1093 }
1094
1095 if (ca->disk_sb.bdev) {
1096 struct kobject *block = bdev_kobj(ca->disk_sb.bdev);
1097
1098 ret = sysfs_create_link(block, &ca->kobj, "bcachefs");
1099 if (ret)
1100 return ret;
1101
1102 ret = sysfs_create_link(&ca->kobj, block, "block");
1103 if (ret)
1104 return ret;
1105 }
1106
1107 return 0;
1108}
1109
1110static struct bch_dev *__bch2_dev_alloc(struct bch_fs *c,
1111 struct bch_member *member)
1112{
1113 struct bch_dev *ca;
1114
1115 ca = kzalloc(sizeof(*ca), GFP_KERNEL);
1116 if (!ca)
1117 return NULL;
1118
1119 kobject_init(&ca->kobj, &bch2_dev_ktype);
1120 init_completion(&ca->ref_completion);
1121 init_completion(&ca->io_ref_completion);
1122
1123 init_rwsem(&ca->bucket_lock);
1124
1c6fdbd8
KO
1125 INIT_WORK(&ca->io_error_work, bch2_io_error_work);
1126
1127 bch2_time_stats_init(&ca->io_latency[READ]);
1128 bch2_time_stats_init(&ca->io_latency[WRITE]);
1129
1130 ca->mi = bch2_mi_to_cpu(member);
1131 ca->uuid = member->uuid;
1132
1133 if (opt_defined(c->opts, discard))
1134 ca->mi.discard = opt_get(c->opts, discard);
1135
1136 if (percpu_ref_init(&ca->ref, bch2_dev_ref_complete,
1137 0, GFP_KERNEL) ||
1138 percpu_ref_init(&ca->io_ref, bch2_dev_io_ref_complete,
1139 PERCPU_REF_INIT_DEAD, GFP_KERNEL) ||
d1170ce5 1140 !(ca->sb_read_scratch = (void *) __get_free_page(GFP_KERNEL)) ||
1c6fdbd8
KO
1141 bch2_dev_buckets_alloc(c, ca) ||
1142 bioset_init(&ca->replica_set, 4,
1143 offsetof(struct bch_write_bio, bio), 0) ||
1144 !(ca->io_done = alloc_percpu(*ca->io_done)))
1145 goto err;
1146
1147 return ca;
1148err:
1149 bch2_dev_free(ca);
1150 return NULL;
1151}
1152
1153static void bch2_dev_attach(struct bch_fs *c, struct bch_dev *ca,
1154 unsigned dev_idx)
1155{
1156 ca->dev_idx = dev_idx;
1157 __set_bit(ca->dev_idx, ca->self.d);
1158 scnprintf(ca->name, sizeof(ca->name), "dev-%u", dev_idx);
1159
1160 ca->fs = c;
1161 rcu_assign_pointer(c->devs[ca->dev_idx], ca);
1162
1163 if (bch2_dev_sysfs_online(c, ca))
1164 pr_warn("error creating sysfs objects");
1165}
1166
1167static int bch2_dev_alloc(struct bch_fs *c, unsigned dev_idx)
1168{
1169 struct bch_member *member =
1170 bch2_sb_get_members(c->disk_sb.sb)->members + dev_idx;
1171 struct bch_dev *ca = NULL;
1172 int ret = 0;
1173
1174 pr_verbose_init(c->opts, "");
1175
1176 if (bch2_fs_init_fault("dev_alloc"))
1177 goto err;
1178
1179 ca = __bch2_dev_alloc(c, member);
1180 if (!ca)
1181 goto err;
1182
220d2062
KO
1183 ca->fs = c;
1184
59a74051
KO
1185 if (ca->mi.state == BCH_MEMBER_STATE_RW &&
1186 bch2_dev_allocator_start(ca)) {
1187 bch2_dev_free(ca);
1188 goto err;
1189 }
1190
1c6fdbd8
KO
1191 bch2_dev_attach(c, ca, dev_idx);
1192out:
1193 pr_verbose_init(c->opts, "ret %i", ret);
1194 return ret;
1195err:
1196 if (ca)
1197 bch2_dev_free(ca);
1198 ret = -ENOMEM;
1199 goto out;
1200}
1201
1202static int __bch2_dev_attach_bdev(struct bch_dev *ca, struct bch_sb_handle *sb)
1203{
1204 unsigned ret;
1205
1206 if (bch2_dev_is_online(ca)) {
1207 bch_err(ca, "already have device online in slot %u",
1208 sb->sb->dev_idx);
1209 return -EINVAL;
1210 }
1211
1212 if (get_capacity(sb->bdev->bd_disk) <
1213 ca->mi.bucket_size * ca->mi.nbuckets) {
1214 bch_err(ca, "cannot online: device too small");
1215 return -EINVAL;
1216 }
1217
1218 BUG_ON(!percpu_ref_is_zero(&ca->io_ref));
1219
1220 if (get_capacity(sb->bdev->bd_disk) <
1221 ca->mi.bucket_size * ca->mi.nbuckets) {
1222 bch_err(ca, "device too small");
1223 return -EINVAL;
1224 }
1225
1226 ret = bch2_dev_journal_init(ca, sb->sb);
1227 if (ret)
1228 return ret;
1229
1230 /* Commit: */
1231 ca->disk_sb = *sb;
1232 memset(sb, 0, sizeof(*sb));
1233
1c6fdbd8
KO
1234 percpu_ref_reinit(&ca->io_ref);
1235
1236 return 0;
1237}
1238
1239static int bch2_dev_attach_bdev(struct bch_fs *c, struct bch_sb_handle *sb)
1240{
1241 struct bch_dev *ca;
1242 int ret;
1243
1244 lockdep_assert_held(&c->state_lock);
1245
1246 if (le64_to_cpu(sb->sb->seq) >
1247 le64_to_cpu(c->disk_sb.sb->seq))
1248 bch2_sb_to_fs(c, sb->sb);
1249
1250 BUG_ON(sb->sb->dev_idx >= c->sb.nr_devices ||
1251 !c->devs[sb->sb->dev_idx]);
1252
1253 ca = bch_dev_locked(c, sb->sb->dev_idx);
1254
1255 ret = __bch2_dev_attach_bdev(ca, sb);
1256 if (ret)
1257 return ret;
1258
1259 bch2_dev_sysfs_online(c, ca);
1260
1261 if (c->sb.nr_devices == 1)
1262 snprintf(c->name, sizeof(c->name), "%pg", ca->disk_sb.bdev);
1263 snprintf(ca->name, sizeof(ca->name), "%pg", ca->disk_sb.bdev);
1264
1265 rebalance_wakeup(c);
1266 return 0;
1267}
1268
1269/* Device management: */
1270
1271/*
1272 * Note: this function is also used by the error paths - when a particular
1273 * device sees an error, we call it to determine whether we can just set the
1274 * device RO, or - if this function returns false - we'll set the whole
1275 * filesystem RO:
1276 *
1277 * XXX: maybe we should be more explicit about whether we're changing state
1278 * because we got an error or what have you?
1279 */
1280bool bch2_dev_state_allowed(struct bch_fs *c, struct bch_dev *ca,
1281 enum bch_member_state new_state, int flags)
1282{
1283 struct bch_devs_mask new_online_devs;
1c6fdbd8
KO
1284 struct bch_dev *ca2;
1285 int i, nr_rw = 0, required;
1286
1287 lockdep_assert_held(&c->state_lock);
1288
1289 switch (new_state) {
1290 case BCH_MEMBER_STATE_RW:
1291 return true;
1292 case BCH_MEMBER_STATE_RO:
1293 if (ca->mi.state != BCH_MEMBER_STATE_RW)
1294 return true;
1295
1296 /* do we have enough devices to write to? */
1297 for_each_member_device(ca2, c, i)
1298 if (ca2 != ca)
1299 nr_rw += ca2->mi.state == BCH_MEMBER_STATE_RW;
1300
1301 required = max(!(flags & BCH_FORCE_IF_METADATA_DEGRADED)
1302 ? c->opts.metadata_replicas
1303 : c->opts.metadata_replicas_required,
1304 !(flags & BCH_FORCE_IF_DATA_DEGRADED)
1305 ? c->opts.data_replicas
1306 : c->opts.data_replicas_required);
1307
1308 return nr_rw >= required;
1309 case BCH_MEMBER_STATE_FAILED:
1310 case BCH_MEMBER_STATE_SPARE:
1311 if (ca->mi.state != BCH_MEMBER_STATE_RW &&
1312 ca->mi.state != BCH_MEMBER_STATE_RO)
1313 return true;
1314
1315 /* do we have enough devices to read from? */
1316 new_online_devs = bch2_online_devs(c);
1317 __clear_bit(ca->dev_idx, new_online_devs.d);
1318
fcb3431b 1319 return bch2_have_enough_devs(c, new_online_devs, flags, false);
1c6fdbd8
KO
1320 default:
1321 BUG();
1322 }
1323}
1324
1325static bool bch2_fs_may_start(struct bch_fs *c)
1326{
1c6fdbd8
KO
1327 struct bch_sb_field_members *mi;
1328 struct bch_dev *ca;
fcb3431b
KO
1329 unsigned i, flags = 0;
1330
1331 if (c->opts.very_degraded)
1332 flags |= BCH_FORCE_IF_DEGRADED|BCH_FORCE_IF_LOST;
1c6fdbd8 1333
fcb3431b
KO
1334 if (c->opts.degraded)
1335 flags |= BCH_FORCE_IF_DEGRADED;
1336
1337 if (!c->opts.degraded &&
1338 !c->opts.very_degraded) {
1c6fdbd8
KO
1339 mutex_lock(&c->sb_lock);
1340 mi = bch2_sb_get_members(c->disk_sb.sb);
1341
1342 for (i = 0; i < c->disk_sb.sb->nr_devices; i++) {
1343 if (!bch2_dev_exists(c->disk_sb.sb, mi, i))
1344 continue;
1345
1346 ca = bch_dev_locked(c, i);
1347
1348 if (!bch2_dev_is_online(ca) &&
1349 (ca->mi.state == BCH_MEMBER_STATE_RW ||
1350 ca->mi.state == BCH_MEMBER_STATE_RO)) {
1351 mutex_unlock(&c->sb_lock);
1352 return false;
1353 }
1354 }
1355 mutex_unlock(&c->sb_lock);
1356 }
1357
fcb3431b 1358 return bch2_have_enough_devs(c, bch2_online_devs(c), flags, true);
1c6fdbd8
KO
1359}
1360
1361static void __bch2_dev_read_only(struct bch_fs *c, struct bch_dev *ca)
1362{
74ed7e56
KO
1363 /*
1364 * Device going read only means the copygc reserve get smaller, so we
1365 * don't want that happening while copygc is in progress:
1366 */
1367 bch2_copygc_stop(c);
1368
1c6fdbd8
KO
1369 /*
1370 * The allocator thread itself allocates btree nodes, so stop it first:
1371 */
1372 bch2_dev_allocator_stop(ca);
1373 bch2_dev_allocator_remove(c, ca);
1374 bch2_dev_journal_stop(&c->journal, ca);
74ed7e56
KO
1375
1376 bch2_copygc_start(c);
1c6fdbd8
KO
1377}
1378
1379static const char *__bch2_dev_read_write(struct bch_fs *c, struct bch_dev *ca)
1380{
1381 lockdep_assert_held(&c->state_lock);
1382
1383 BUG_ON(ca->mi.state != BCH_MEMBER_STATE_RW);
1384
1385 bch2_dev_allocator_add(c, ca);
1386 bch2_recalc_capacity(c);
1387
1388 if (bch2_dev_allocator_start(ca))
1389 return "error starting allocator thread";
1390
1c6fdbd8
KO
1391 return NULL;
1392}
1393
1394int __bch2_dev_set_state(struct bch_fs *c, struct bch_dev *ca,
1395 enum bch_member_state new_state, int flags)
1396{
1397 struct bch_sb_field_members *mi;
1398 int ret = 0;
1399
1400 if (ca->mi.state == new_state)
1401 return 0;
1402
1403 if (!bch2_dev_state_allowed(c, ca, new_state, flags))
1404 return -EINVAL;
1405
1406 if (new_state != BCH_MEMBER_STATE_RW)
1407 __bch2_dev_read_only(c, ca);
1408
1409 bch_notice(ca, "%s", bch2_dev_state[new_state]);
1410
1411 mutex_lock(&c->sb_lock);
1412 mi = bch2_sb_get_members(c->disk_sb.sb);
1413 SET_BCH_MEMBER_STATE(&mi->members[ca->dev_idx], new_state);
1414 bch2_write_super(c);
1415 mutex_unlock(&c->sb_lock);
1416
1417 if (new_state == BCH_MEMBER_STATE_RW &&
1418 __bch2_dev_read_write(c, ca))
1419 ret = -ENOMEM;
1420
1421 rebalance_wakeup(c);
1422
1423 return ret;
1424}
1425
1426int bch2_dev_set_state(struct bch_fs *c, struct bch_dev *ca,
1427 enum bch_member_state new_state, int flags)
1428{
1429 int ret;
1430
1ada1606 1431 down_write(&c->state_lock);
1c6fdbd8 1432 ret = __bch2_dev_set_state(c, ca, new_state, flags);
1ada1606 1433 up_write(&c->state_lock);
1c6fdbd8
KO
1434
1435 return ret;
1436}
1437
1438/* Device add/removal: */
1439
5d20ba48
KO
1440int bch2_dev_remove_alloc(struct bch_fs *c, struct bch_dev *ca)
1441{
1442 struct btree_trans trans;
1443 size_t i;
1444 int ret;
1445
1446 bch2_trans_init(&trans, c, 0, 0);
1447
1448 for (i = 0; i < ca->mi.nbuckets; i++) {
1449 ret = bch2_btree_key_cache_flush(&trans,
1450 BTREE_ID_ALLOC, POS(ca->dev_idx, i));
1451 if (ret)
1452 break;
1453 }
1454 bch2_trans_exit(&trans);
1455
1456 if (ret)
1457 return ret;
1458
1459 return bch2_btree_delete_range(c, BTREE_ID_ALLOC,
1460 POS(ca->dev_idx, 0),
1461 POS(ca->dev_idx + 1, 0),
1462 NULL);
1463}
1464
1c6fdbd8
KO
1465int bch2_dev_remove(struct bch_fs *c, struct bch_dev *ca, int flags)
1466{
1467 struct bch_sb_field_members *mi;
1468 unsigned dev_idx = ca->dev_idx, data;
1469 int ret = -EINVAL;
1470
1ada1606 1471 down_write(&c->state_lock);
1c6fdbd8 1472
31ba2cd3
KO
1473 /*
1474 * We consume a reference to ca->ref, regardless of whether we succeed
1475 * or fail:
1476 */
1477 percpu_ref_put(&ca->ref);
1c6fdbd8
KO
1478
1479 if (!bch2_dev_state_allowed(c, ca, BCH_MEMBER_STATE_FAILED, flags)) {
1480 bch_err(ca, "Cannot remove without losing data");
1481 goto err;
1482 }
1483
1484 __bch2_dev_read_only(c, ca);
1485
1c6fdbd8
KO
1486 ret = bch2_dev_data_drop(c, ca->dev_idx, flags);
1487 if (ret) {
1488 bch_err(ca, "Remove failed: error %i dropping data", ret);
1489 goto err;
1490 }
1491
1492 ret = bch2_journal_flush_device_pins(&c->journal, ca->dev_idx);
1493 if (ret) {
1494 bch_err(ca, "Remove failed: error %i flushing journal", ret);
1495 goto err;
1496 }
1497
5d20ba48 1498 ret = bch2_dev_remove_alloc(c, ca);
1c6fdbd8
KO
1499 if (ret) {
1500 bch_err(ca, "Remove failed, error deleting alloc info");
1501 goto err;
1502 }
1503
1504 /*
1505 * must flush all existing journal entries, they might have
1506 * (overwritten) keys that point to the device we're removing:
1507 */
1508 bch2_journal_flush_all_pins(&c->journal);
31ba2cd3
KO
1509 /*
1510 * hack to ensure bch2_replicas_gc2() clears out entries to this device
1511 */
1512 bch2_journal_meta(&c->journal);
1c6fdbd8
KO
1513 ret = bch2_journal_error(&c->journal);
1514 if (ret) {
1515 bch_err(ca, "Remove failed, journal error");
1516 goto err;
1517 }
1518
31ba2cd3
KO
1519 ret = bch2_replicas_gc2(c);
1520 if (ret) {
1521 bch_err(ca, "Remove failed: error %i from replicas gc", ret);
1522 goto err;
1523 }
1524
1525 data = bch2_dev_has_data(c, ca);
1526 if (data) {
1527 char data_has_str[100];
1528
1529 bch2_flags_to_text(&PBUF(data_has_str),
1530 bch2_data_types, data);
1531 bch_err(ca, "Remove failed, still has data (%s)", data_has_str);
1532 ret = -EBUSY;
1533 goto err;
1534 }
1535
1c6fdbd8
KO
1536 __bch2_dev_offline(c, ca);
1537
1538 mutex_lock(&c->sb_lock);
1539 rcu_assign_pointer(c->devs[ca->dev_idx], NULL);
1540 mutex_unlock(&c->sb_lock);
1541
1542 percpu_ref_kill(&ca->ref);
1543 wait_for_completion(&ca->ref_completion);
1544
1545 bch2_dev_free(ca);
1546
1547 /*
1548 * Free this device's slot in the bch_member array - all pointers to
1549 * this device must be gone:
1550 */
1551 mutex_lock(&c->sb_lock);
1552 mi = bch2_sb_get_members(c->disk_sb.sb);
1553 memset(&mi->members[dev_idx].uuid, 0, sizeof(mi->members[dev_idx].uuid));
1554
1555 bch2_write_super(c);
1556
1557 mutex_unlock(&c->sb_lock);
1ada1606 1558 up_write(&c->state_lock);
180fb49d
KO
1559
1560 bch2_dev_usage_journal_reserve(c);
1c6fdbd8
KO
1561 return 0;
1562err:
d3bb629d
KO
1563 if (ca->mi.state == BCH_MEMBER_STATE_RW &&
1564 !percpu_ref_is_zero(&ca->io_ref))
1c6fdbd8 1565 __bch2_dev_read_write(c, ca);
1ada1606 1566 up_write(&c->state_lock);
1c6fdbd8
KO
1567 return ret;
1568}
1569
1570/* Add new device to running filesystem: */
1571int bch2_dev_add(struct bch_fs *c, const char *path)
1572{
1573 struct bch_opts opts = bch2_opts_empty();
1574 struct bch_sb_handle sb;
1575 const char *err;
1576 struct bch_dev *ca = NULL;
1577 struct bch_sb_field_members *mi;
1578 struct bch_member dev_mi;
1579 unsigned dev_idx, nr_devices, u64s;
1580 int ret;
1581
1582 ret = bch2_read_super(path, &opts, &sb);
1583 if (ret)
1584 return ret;
1585
1586 err = bch2_sb_validate(&sb);
1587 if (err)
1588 return -EINVAL;
1589
1590 dev_mi = bch2_sb_get_members(sb.sb)->members[sb.sb->dev_idx];
1591
1592 err = bch2_dev_may_add(sb.sb, c);
1593 if (err)
1594 return -EINVAL;
1595
1596 ca = __bch2_dev_alloc(c, &dev_mi);
1597 if (!ca) {
1598 bch2_free_super(&sb);
1599 return -ENOMEM;
1600 }
1601
1602 ret = __bch2_dev_attach_bdev(ca, &sb);
1603 if (ret) {
1604 bch2_dev_free(ca);
1605 return ret;
1606 }
1607
6eac2c2e
KO
1608 /*
1609 * We want to allocate journal on the new device before adding the new
1610 * device to the filesystem because allocating after we attach requires
1611 * spinning up the allocator thread, and the allocator thread requires
1612 * doing btree writes, which if the existing devices are RO isn't going
1613 * to work
1614 *
1615 * So we have to mark where the superblocks are, but marking allocated
1616 * data normally updates the filesystem usage too, so we have to mark,
1617 * allocate the journal, reset all the marks, then remark after we
1618 * attach...
1619 */
bfcf840d 1620 bch2_mark_dev_superblock(NULL, ca, 0);
6eac2c2e 1621
1c6fdbd8
KO
1622 err = "journal alloc failed";
1623 ret = bch2_dev_journal_alloc(ca);
1624 if (ret)
1625 goto err;
1626
1ada1606 1627 down_write(&c->state_lock);
1c6fdbd8
KO
1628 mutex_lock(&c->sb_lock);
1629
1630 err = "insufficient space in new superblock";
1631 ret = bch2_sb_from_fs(c, ca);
1632 if (ret)
1633 goto err_unlock;
1634
1635 mi = bch2_sb_get_members(ca->disk_sb.sb);
1636
1637 if (!bch2_sb_resize_members(&ca->disk_sb,
1638 le32_to_cpu(mi->field.u64s) +
1639 sizeof(dev_mi) / sizeof(u64))) {
1640 ret = -ENOSPC;
1641 goto err_unlock;
1642 }
1643
1644 if (dynamic_fault("bcachefs:add:no_slot"))
1645 goto no_slot;
1646
1647 mi = bch2_sb_get_members(c->disk_sb.sb);
1648 for (dev_idx = 0; dev_idx < BCH_SB_MEMBERS_MAX; dev_idx++)
1649 if (!bch2_dev_exists(c->disk_sb.sb, mi, dev_idx))
1650 goto have_slot;
1651no_slot:
1652 err = "no slots available in superblock";
1653 ret = -ENOSPC;
1654 goto err_unlock;
1655
1656have_slot:
1657 nr_devices = max_t(unsigned, dev_idx + 1, c->sb.nr_devices);
1658 u64s = (sizeof(struct bch_sb_field_members) +
1659 sizeof(struct bch_member) * nr_devices) / sizeof(u64);
1660
1661 err = "no space in superblock for member info";
1662 ret = -ENOSPC;
1663
1664 mi = bch2_sb_resize_members(&c->disk_sb, u64s);
1665 if (!mi)
1666 goto err_unlock;
1667
1668 /* success: */
1669
1670 mi->members[dev_idx] = dev_mi;
a420eea6 1671 mi->members[dev_idx].last_mount = cpu_to_le64(ktime_get_real_seconds());
1c6fdbd8
KO
1672 c->disk_sb.sb->nr_devices = nr_devices;
1673
1674 ca->disk_sb.sb->dev_idx = dev_idx;
1675 bch2_dev_attach(c, ca, dev_idx);
1676
1677 bch2_write_super(c);
1678 mutex_unlock(&c->sb_lock);
1679
180fb49d
KO
1680 bch2_dev_usage_journal_reserve(c);
1681
bfcf840d
KO
1682 err = "error marking superblock";
1683 ret = bch2_trans_mark_dev_sb(c, NULL, ca);
8d6b6222 1684 if (ret)
bfcf840d 1685 goto err_late;
8d6b6222 1686
1c6fdbd8
KO
1687 if (ca->mi.state == BCH_MEMBER_STATE_RW) {
1688 err = __bch2_dev_read_write(c, ca);
1689 if (err)
1690 goto err_late;
1691 }
1692
1ada1606 1693 up_write(&c->state_lock);
1c6fdbd8
KO
1694 return 0;
1695
1696err_unlock:
1697 mutex_unlock(&c->sb_lock);
1ada1606 1698 up_write(&c->state_lock);
1c6fdbd8
KO
1699err:
1700 if (ca)
1701 bch2_dev_free(ca);
1702 bch2_free_super(&sb);
1703 bch_err(c, "Unable to add device: %s", err);
1704 return ret;
1705err_late:
bfcf840d 1706 up_write(&c->state_lock);
1c6fdbd8
KO
1707 bch_err(c, "Error going rw after adding device: %s", err);
1708 return -EINVAL;
1709}
1710
1711/* Hot add existing device to running filesystem: */
1712int bch2_dev_online(struct bch_fs *c, const char *path)
1713{
1714 struct bch_opts opts = bch2_opts_empty();
1715 struct bch_sb_handle sb = { NULL };
1716 struct bch_sb_field_members *mi;
1717 struct bch_dev *ca;
1718 unsigned dev_idx;
1719 const char *err;
1720 int ret;
1721
1ada1606 1722 down_write(&c->state_lock);
1c6fdbd8
KO
1723
1724 ret = bch2_read_super(path, &opts, &sb);
1725 if (ret) {
1ada1606 1726 up_write(&c->state_lock);
1c6fdbd8
KO
1727 return ret;
1728 }
1729
1730 dev_idx = sb.sb->dev_idx;
1731
1732 err = bch2_dev_in_fs(c->disk_sb.sb, sb.sb);
1733 if (err)
1734 goto err;
1735
1736 if (bch2_dev_attach_bdev(c, &sb)) {
1737 err = "bch2_dev_attach_bdev() error";
1738 goto err;
1739 }
1740
1741 ca = bch_dev_locked(c, dev_idx);
bfcf840d
KO
1742
1743 if (bch2_trans_mark_dev_sb(c, NULL, ca)) {
1744 err = "bch2_trans_mark_dev_sb() error";
1745 goto err;
1746 }
1747
1c6fdbd8
KO
1748 if (ca->mi.state == BCH_MEMBER_STATE_RW) {
1749 err = __bch2_dev_read_write(c, ca);
1750 if (err)
1751 goto err;
1752 }
1753
1754 mutex_lock(&c->sb_lock);
1755 mi = bch2_sb_get_members(c->disk_sb.sb);
1756
1757 mi->members[ca->dev_idx].last_mount =
a420eea6 1758 cpu_to_le64(ktime_get_real_seconds());
1c6fdbd8
KO
1759
1760 bch2_write_super(c);
1761 mutex_unlock(&c->sb_lock);
1762
1ada1606 1763 up_write(&c->state_lock);
1c6fdbd8
KO
1764 return 0;
1765err:
1ada1606 1766 up_write(&c->state_lock);
1c6fdbd8
KO
1767 bch2_free_super(&sb);
1768 bch_err(c, "error bringing %s online: %s", path, err);
1769 return -EINVAL;
1770}
1771
1772int bch2_dev_offline(struct bch_fs *c, struct bch_dev *ca, int flags)
1773{
1ada1606 1774 down_write(&c->state_lock);
1c6fdbd8
KO
1775
1776 if (!bch2_dev_is_online(ca)) {
1777 bch_err(ca, "Already offline");
1ada1606 1778 up_write(&c->state_lock);
1c6fdbd8
KO
1779 return 0;
1780 }
1781
1782 if (!bch2_dev_state_allowed(c, ca, BCH_MEMBER_STATE_FAILED, flags)) {
1783 bch_err(ca, "Cannot offline required disk");
1ada1606 1784 up_write(&c->state_lock);
1c6fdbd8
KO
1785 return -EINVAL;
1786 }
1787
1788 __bch2_dev_offline(c, ca);
1789
1ada1606 1790 up_write(&c->state_lock);
1c6fdbd8
KO
1791 return 0;
1792}
1793
1794int bch2_dev_resize(struct bch_fs *c, struct bch_dev *ca, u64 nbuckets)
1795{
1796 struct bch_member *mi;
1797 int ret = 0;
1798
1ada1606 1799 down_write(&c->state_lock);
1c6fdbd8
KO
1800
1801 if (nbuckets < ca->mi.nbuckets) {
1802 bch_err(ca, "Cannot shrink yet");
1803 ret = -EINVAL;
1804 goto err;
1805 }
1806
1807 if (bch2_dev_is_online(ca) &&
1808 get_capacity(ca->disk_sb.bdev->bd_disk) <
1809 ca->mi.bucket_size * nbuckets) {
1810 bch_err(ca, "New size larger than device");
1811 ret = -EINVAL;
1812 goto err;
1813 }
1814
1815 ret = bch2_dev_buckets_resize(c, ca, nbuckets);
1816 if (ret) {
1817 bch_err(ca, "Resize error: %i", ret);
1818 goto err;
1819 }
1820
1821 mutex_lock(&c->sb_lock);
1822 mi = &bch2_sb_get_members(c->disk_sb.sb)->members[ca->dev_idx];
1823 mi->nbuckets = cpu_to_le64(nbuckets);
1824
1825 bch2_write_super(c);
1826 mutex_unlock(&c->sb_lock);
1827
1828 bch2_recalc_capacity(c);
1829err:
1ada1606 1830 up_write(&c->state_lock);
1c6fdbd8
KO
1831 return ret;
1832}
1833
1834/* return with ref on ca->ref: */
1835struct bch_dev *bch2_dev_lookup(struct bch_fs *c, const char *path)
1836{
1c6fdbd8
KO
1837 struct bch_dev *ca;
1838 dev_t dev;
1839 unsigned i;
1840 int ret;
1841
1842 ret = lookup_bdev(path, &dev);
1843 if (ret)
1844 return ERR_PTR(ret);
1845
1846 for_each_member_device(ca, c, i)
1847 if (ca->disk_sb.bdev->bd_dev == dev)
1848 goto found;
1849
1850 ca = ERR_PTR(-ENOENT);
1851found:
1852 return ca;
1853}
1854
1855/* Filesystem open: */
1856
1857struct bch_fs *bch2_fs_open(char * const *devices, unsigned nr_devices,
1858 struct bch_opts opts)
1859{
1860 struct bch_sb_handle *sb = NULL;
1861 struct bch_fs *c = NULL;
625104ea 1862 struct bch_sb_field_members *mi;
1c6fdbd8
KO
1863 unsigned i, best_sb = 0;
1864 const char *err;
1865 int ret = -ENOMEM;
1866
1867 pr_verbose_init(opts, "");
1868
1869 if (!nr_devices) {
1870 c = ERR_PTR(-EINVAL);
1871 goto out2;
1872 }
1873
1874 if (!try_module_get(THIS_MODULE)) {
1875 c = ERR_PTR(-ENODEV);
1876 goto out2;
1877 }
1878
1879 sb = kcalloc(nr_devices, sizeof(*sb), GFP_KERNEL);
1880 if (!sb)
1881 goto err;
1882
1883 for (i = 0; i < nr_devices; i++) {
1884 ret = bch2_read_super(devices[i], &opts, &sb[i]);
1885 if (ret)
1886 goto err;
1887
1888 err = bch2_sb_validate(&sb[i]);
1889 if (err)
1890 goto err_print;
1891 }
1892
1893 for (i = 1; i < nr_devices; i++)
1894 if (le64_to_cpu(sb[i].sb->seq) >
1895 le64_to_cpu(sb[best_sb].sb->seq))
1896 best_sb = i;
1897
625104ea
KO
1898 mi = bch2_sb_get_members(sb[best_sb].sb);
1899
1900 i = 0;
1901 while (i < nr_devices) {
1902 if (i != best_sb &&
1903 !bch2_dev_exists(sb[best_sb].sb, mi, sb[i].sb->dev_idx)) {
1904 pr_info("%pg has been removed, skipping", sb[i].bdev);
1905 bch2_free_super(&sb[i]);
1906 array_remove_item(sb, nr_devices, i);
1907 continue;
1908 }
1909
1c6fdbd8
KO
1910 err = bch2_dev_in_fs(sb[best_sb].sb, sb[i].sb);
1911 if (err)
1912 goto err_print;
625104ea 1913 i++;
1c6fdbd8
KO
1914 }
1915
1916 ret = -ENOMEM;
1917 c = bch2_fs_alloc(sb[best_sb].sb, opts);
1918 if (!c)
1919 goto err;
1920
1921 err = "bch2_dev_online() error";
1ada1606 1922 down_write(&c->state_lock);
1c6fdbd8
KO
1923 for (i = 0; i < nr_devices; i++)
1924 if (bch2_dev_attach_bdev(c, &sb[i])) {
1ada1606 1925 up_write(&c->state_lock);
1c6fdbd8
KO
1926 goto err_print;
1927 }
1ada1606 1928 up_write(&c->state_lock);
1c6fdbd8
KO
1929
1930 err = "insufficient devices";
1931 if (!bch2_fs_may_start(c))
1932 goto err_print;
1933
1934 if (!c->opts.nostart) {
619f5bee
KO
1935 ret = bch2_fs_start(c);
1936 if (ret)
1937 goto err;
1c6fdbd8
KO
1938 }
1939out:
1940 kfree(sb);
1941 module_put(THIS_MODULE);
1942out2:
1943 pr_verbose_init(opts, "ret %i", PTR_ERR_OR_ZERO(c));
1944 return c;
1945err_print:
1946 pr_err("bch_fs_open err opening %s: %s",
1947 devices[0], err);
1948 ret = -EINVAL;
1949err:
1950 if (c)
1951 bch2_fs_stop(c);
1952 for (i = 0; i < nr_devices; i++)
1953 bch2_free_super(&sb[i]);
1954 c = ERR_PTR(ret);
1955 goto out;
1956}
1957
1958static const char *__bch2_fs_open_incremental(struct bch_sb_handle *sb,
1959 struct bch_opts opts)
1960{
1961 const char *err;
1962 struct bch_fs *c;
1963 bool allocated_fs = false;
619f5bee 1964 int ret;
1c6fdbd8
KO
1965
1966 err = bch2_sb_validate(sb);
1967 if (err)
1968 return err;
1969
1970 mutex_lock(&bch_fs_list_lock);
1971 c = __bch2_uuid_to_fs(sb->sb->uuid);
1972 if (c) {
1973 closure_get(&c->cl);
1974
1975 err = bch2_dev_in_fs(c->disk_sb.sb, sb->sb);
1976 if (err)
1977 goto err;
1978 } else {
1979 c = bch2_fs_alloc(sb->sb, opts);
1980 err = "cannot allocate memory";
1981 if (!c)
1982 goto err;
1983
1984 allocated_fs = true;
1985 }
1986
1987 err = "bch2_dev_online() error";
1988
1989 mutex_lock(&c->sb_lock);
1990 if (bch2_dev_attach_bdev(c, sb)) {
1991 mutex_unlock(&c->sb_lock);
1992 goto err;
1993 }
1994 mutex_unlock(&c->sb_lock);
1995
1996 if (!c->opts.nostart && bch2_fs_may_start(c)) {
619f5bee
KO
1997 err = "error starting filesystem";
1998 ret = bch2_fs_start(c);
1999 if (ret)
1c6fdbd8
KO
2000 goto err;
2001 }
2002
2003 closure_put(&c->cl);
2004 mutex_unlock(&bch_fs_list_lock);
2005
2006 return NULL;
2007err:
2008 mutex_unlock(&bch_fs_list_lock);
2009
2010 if (allocated_fs)
2011 bch2_fs_stop(c);
2012 else if (c)
2013 closure_put(&c->cl);
2014
2015 return err;
2016}
2017
2018const char *bch2_fs_open_incremental(const char *path)
2019{
2020 struct bch_sb_handle sb;
2021 struct bch_opts opts = bch2_opts_empty();
2022 const char *err;
2023
2024 if (bch2_read_super(path, &opts, &sb))
2025 return "error reading superblock";
2026
2027 err = __bch2_fs_open_incremental(&sb, opts);
2028 bch2_free_super(&sb);
2029
2030 return err;
2031}
2032
2033/* Global interfaces/init */
2034
2035static void bcachefs_exit(void)
2036{
2037 bch2_debug_exit();
2038 bch2_vfs_exit();
2039 bch2_chardev_exit();
14ba3706 2040 bch2_btree_key_cache_exit();
1c6fdbd8
KO
2041 if (bcachefs_kset)
2042 kset_unregister(bcachefs_kset);
2043}
2044
2045static int __init bcachefs_init(void)
2046{
2047 bch2_bkey_pack_test();
1c6fdbd8
KO
2048
2049 if (!(bcachefs_kset = kset_create_and_add("bcachefs", NULL, fs_kobj)) ||
14ba3706 2050 bch2_btree_key_cache_init() ||
1c6fdbd8
KO
2051 bch2_chardev_init() ||
2052 bch2_vfs_init() ||
2053 bch2_debug_init())
2054 goto err;
2055
2056 return 0;
2057err:
2058 bcachefs_exit();
2059 return -ENOMEM;
2060}
2061
2062#define BCH_DEBUG_PARAM(name, description) \
2063 bool bch2_##name; \
2064 module_param_named(name, bch2_##name, bool, 0644); \
2065 MODULE_PARM_DESC(name, description);
2066BCH_DEBUG_PARAMS()
2067#undef BCH_DEBUG_PARAM
2068
26609b61 2069unsigned bch2_metadata_version = bcachefs_metadata_version_current;
1c6fdbd8
KO
2070module_param_named(version, bch2_metadata_version, uint, 0400);
2071
2072module_exit(bcachefs_exit);
2073module_init(bcachefs_init);