Commit | Line | Data |
---|---|---|
1c6fdbd8 KO |
1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* | |
3 | * bcachefs setup/teardown code, and some metadata io - read a superblock and | |
4 | * figure out what to do with it. | |
5 | * | |
6 | * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com> | |
7 | * Copyright 2012 Google, Inc. | |
8 | */ | |
9 | ||
10 | #include "bcachefs.h" | |
7b3f84ea KO |
11 | #include "alloc_background.h" |
12 | #include "alloc_foreground.h" | |
1c6fdbd8 KO |
13 | #include "btree_cache.h" |
14 | #include "btree_gc.h" | |
15 | #include "btree_update_interior.h" | |
16 | #include "btree_io.h" | |
17 | #include "chardev.h" | |
18 | #include "checksum.h" | |
19 | #include "clock.h" | |
20 | #include "compress.h" | |
21 | #include "debug.h" | |
22 | #include "disk_groups.h" | |
cd575ddf | 23 | #include "ec.h" |
1c6fdbd8 KO |
24 | #include "error.h" |
25 | #include "fs.h" | |
26 | #include "fs-io.h" | |
27 | #include "fsck.h" | |
28 | #include "inode.h" | |
29 | #include "io.h" | |
30 | #include "journal.h" | |
31 | #include "journal_reclaim.h" | |
32 | #include "move.h" | |
33 | #include "migrate.h" | |
34 | #include "movinggc.h" | |
35 | #include "quota.h" | |
36 | #include "rebalance.h" | |
37 | #include "recovery.h" | |
38 | #include "replicas.h" | |
39 | #include "super.h" | |
40 | #include "super-io.h" | |
41 | #include "sysfs.h" | |
42 | #include "trace.h" | |
43 | ||
44 | #include <linux/backing-dev.h> | |
45 | #include <linux/blkdev.h> | |
46 | #include <linux/debugfs.h> | |
47 | #include <linux/device.h> | |
48 | #include <linux/idr.h> | |
49 | #include <linux/kthread.h> | |
50 | #include <linux/module.h> | |
51 | #include <linux/percpu.h> | |
52 | #include <linux/random.h> | |
53 | #include <linux/sysfs.h> | |
54 | #include <crypto/hash.h> | |
55 | ||
56 | MODULE_LICENSE("GPL"); | |
57 | MODULE_AUTHOR("Kent Overstreet <kent.overstreet@gmail.com>"); | |
58 | ||
59 | #define KTYPE(type) \ | |
60 | static const struct attribute_group type ## _group = { \ | |
61 | .attrs = type ## _files \ | |
62 | }; \ | |
63 | \ | |
64 | static const struct attribute_group *type ## _groups[] = { \ | |
65 | &type ## _group, \ | |
66 | NULL \ | |
67 | }; \ | |
68 | \ | |
69 | static const struct kobj_type type ## _ktype = { \ | |
70 | .release = type ## _release, \ | |
71 | .sysfs_ops = &type ## _sysfs_ops, \ | |
72 | .default_groups = type ## _groups \ | |
73 | } | |
74 | ||
75 | static void bch2_fs_release(struct kobject *); | |
76 | static void bch2_dev_release(struct kobject *); | |
77 | ||
78 | static void bch2_fs_internal_release(struct kobject *k) | |
79 | { | |
80 | } | |
81 | ||
82 | static void bch2_fs_opts_dir_release(struct kobject *k) | |
83 | { | |
84 | } | |
85 | ||
86 | static void bch2_fs_time_stats_release(struct kobject *k) | |
87 | { | |
88 | } | |
89 | ||
90 | KTYPE(bch2_fs); | |
91 | KTYPE(bch2_fs_internal); | |
92 | KTYPE(bch2_fs_opts_dir); | |
93 | KTYPE(bch2_fs_time_stats); | |
94 | KTYPE(bch2_dev); | |
95 | ||
96 | static struct kset *bcachefs_kset; | |
97 | static LIST_HEAD(bch_fs_list); | |
98 | static DEFINE_MUTEX(bch_fs_list_lock); | |
99 | ||
100 | static DECLARE_WAIT_QUEUE_HEAD(bch_read_only_wait); | |
101 | ||
102 | static void bch2_dev_free(struct bch_dev *); | |
103 | static int bch2_dev_alloc(struct bch_fs *, unsigned); | |
104 | static int bch2_dev_sysfs_online(struct bch_fs *, struct bch_dev *); | |
105 | static void __bch2_dev_read_only(struct bch_fs *, struct bch_dev *); | |
106 | ||
107 | struct bch_fs *bch2_dev_to_fs(dev_t dev) | |
108 | { | |
109 | struct bch_fs *c; | |
110 | struct bch_dev *ca; | |
111 | unsigned i; | |
112 | ||
113 | mutex_lock(&bch_fs_list_lock); | |
114 | rcu_read_lock(); | |
115 | ||
116 | list_for_each_entry(c, &bch_fs_list, list) | |
117 | for_each_member_device_rcu(ca, c, i, NULL) | |
118 | if (ca->disk_sb.bdev->bd_dev == dev) { | |
119 | closure_get(&c->cl); | |
120 | goto found; | |
121 | } | |
122 | c = NULL; | |
123 | found: | |
124 | rcu_read_unlock(); | |
125 | mutex_unlock(&bch_fs_list_lock); | |
126 | ||
127 | return c; | |
128 | } | |
129 | ||
130 | static struct bch_fs *__bch2_uuid_to_fs(__uuid_t uuid) | |
131 | { | |
132 | struct bch_fs *c; | |
133 | ||
134 | lockdep_assert_held(&bch_fs_list_lock); | |
135 | ||
136 | list_for_each_entry(c, &bch_fs_list, list) | |
137 | if (!memcmp(&c->disk_sb.sb->uuid, &uuid, sizeof(uuid))) | |
138 | return c; | |
139 | ||
140 | return NULL; | |
141 | } | |
142 | ||
143 | struct bch_fs *bch2_uuid_to_fs(__uuid_t uuid) | |
144 | { | |
145 | struct bch_fs *c; | |
146 | ||
147 | mutex_lock(&bch_fs_list_lock); | |
148 | c = __bch2_uuid_to_fs(uuid); | |
149 | if (c) | |
150 | closure_get(&c->cl); | |
151 | mutex_unlock(&bch_fs_list_lock); | |
152 | ||
153 | return c; | |
154 | } | |
155 | ||
156 | /* Filesystem RO/RW: */ | |
157 | ||
158 | /* | |
159 | * For startup/shutdown of RW stuff, the dependencies are: | |
160 | * | |
161 | * - foreground writes depend on copygc and rebalance (to free up space) | |
162 | * | |
163 | * - copygc and rebalance depend on mark and sweep gc (they actually probably | |
164 | * don't because they either reserve ahead of time or don't block if | |
165 | * allocations fail, but allocations can require mark and sweep gc to run | |
166 | * because of generation number wraparound) | |
167 | * | |
168 | * - all of the above depends on the allocator threads | |
169 | * | |
170 | * - allocator depends on the journal (when it rewrites prios and gens) | |
171 | */ | |
172 | ||
173 | static void __bch2_fs_read_only(struct bch_fs *c) | |
174 | { | |
175 | struct bch_dev *ca; | |
176 | unsigned i; | |
177 | ||
178 | bch2_rebalance_stop(c); | |
179 | ||
180 | for_each_member_device(ca, c, i) | |
181 | bch2_copygc_stop(ca); | |
182 | ||
183 | bch2_gc_thread_stop(c); | |
184 | ||
185 | /* | |
186 | * Flush journal before stopping allocators, because flushing journal | |
187 | * blacklist entries involves allocating new btree nodes: | |
188 | */ | |
189 | bch2_journal_flush_all_pins(&c->journal); | |
190 | ||
191 | for_each_member_device(ca, c, i) | |
192 | bch2_dev_allocator_stop(ca); | |
193 | ||
194 | bch2_journal_flush_all_pins(&c->journal); | |
195 | ||
196 | /* | |
197 | * We need to explicitly wait on btree interior updates to complete | |
198 | * before stopping the journal, flushing all journal pins isn't | |
199 | * sufficient, because in the BTREE_INTERIOR_UPDATING_ROOT case btree | |
200 | * interior updates have to drop their journal pin before they're | |
201 | * fully complete: | |
202 | */ | |
203 | closure_wait_event(&c->btree_interior_update_wait, | |
204 | !bch2_btree_interior_updates_nr_pending(c)); | |
205 | ||
206 | bch2_fs_journal_stop(&c->journal); | |
207 | ||
208 | /* | |
209 | * the journal kicks off btree writes via reclaim - wait for in flight | |
210 | * writes after stopping journal: | |
211 | */ | |
212 | if (test_bit(BCH_FS_EMERGENCY_RO, &c->flags)) | |
213 | bch2_btree_flush_all_writes(c); | |
214 | else | |
215 | bch2_btree_verify_flushed(c); | |
216 | ||
217 | /* | |
218 | * After stopping journal: | |
219 | */ | |
220 | for_each_member_device(ca, c, i) | |
221 | bch2_dev_allocator_remove(c, ca); | |
222 | } | |
223 | ||
224 | static void bch2_writes_disabled(struct percpu_ref *writes) | |
225 | { | |
226 | struct bch_fs *c = container_of(writes, struct bch_fs, writes); | |
227 | ||
228 | set_bit(BCH_FS_WRITE_DISABLE_COMPLETE, &c->flags); | |
229 | wake_up(&bch_read_only_wait); | |
230 | } | |
231 | ||
232 | void bch2_fs_read_only(struct bch_fs *c) | |
233 | { | |
234 | if (c->state == BCH_FS_RO) | |
235 | return; | |
236 | ||
237 | BUG_ON(test_bit(BCH_FS_WRITE_DISABLE_COMPLETE, &c->flags)); | |
238 | ||
239 | /* | |
240 | * Block new foreground-end write operations from starting - any new | |
241 | * writes will return -EROFS: | |
242 | * | |
243 | * (This is really blocking new _allocations_, writes to previously | |
244 | * allocated space can still happen until stopping the allocator in | |
245 | * bch2_dev_allocator_stop()). | |
246 | */ | |
247 | percpu_ref_kill(&c->writes); | |
248 | ||
249 | cancel_delayed_work(&c->pd_controllers_update); | |
250 | ||
251 | /* | |
252 | * If we're not doing an emergency shutdown, we want to wait on | |
253 | * outstanding writes to complete so they don't see spurious errors due | |
254 | * to shutting down the allocator: | |
255 | * | |
256 | * If we are doing an emergency shutdown outstanding writes may | |
257 | * hang until we shutdown the allocator so we don't want to wait | |
258 | * on outstanding writes before shutting everything down - but | |
259 | * we do need to wait on them before returning and signalling | |
260 | * that going RO is complete: | |
261 | */ | |
262 | wait_event(bch_read_only_wait, | |
263 | test_bit(BCH_FS_WRITE_DISABLE_COMPLETE, &c->flags) || | |
264 | test_bit(BCH_FS_EMERGENCY_RO, &c->flags)); | |
265 | ||
266 | __bch2_fs_read_only(c); | |
267 | ||
268 | wait_event(bch_read_only_wait, | |
269 | test_bit(BCH_FS_WRITE_DISABLE_COMPLETE, &c->flags)); | |
270 | ||
271 | clear_bit(BCH_FS_WRITE_DISABLE_COMPLETE, &c->flags); | |
272 | ||
273 | if (!bch2_journal_error(&c->journal) && | |
274 | !test_bit(BCH_FS_ERROR, &c->flags) && | |
275 | !test_bit(BCH_FS_EMERGENCY_RO, &c->flags)) | |
276 | bch2_fs_mark_clean(c, true); | |
277 | ||
278 | if (c->state != BCH_FS_STOPPING) | |
279 | c->state = BCH_FS_RO; | |
280 | } | |
281 | ||
282 | static void bch2_fs_read_only_work(struct work_struct *work) | |
283 | { | |
284 | struct bch_fs *c = | |
285 | container_of(work, struct bch_fs, read_only_work); | |
286 | ||
287 | mutex_lock(&c->state_lock); | |
288 | bch2_fs_read_only(c); | |
289 | mutex_unlock(&c->state_lock); | |
290 | } | |
291 | ||
292 | static void bch2_fs_read_only_async(struct bch_fs *c) | |
293 | { | |
294 | queue_work(system_long_wq, &c->read_only_work); | |
295 | } | |
296 | ||
297 | bool bch2_fs_emergency_read_only(struct bch_fs *c) | |
298 | { | |
299 | bool ret = !test_and_set_bit(BCH_FS_EMERGENCY_RO, &c->flags); | |
300 | ||
301 | bch2_fs_read_only_async(c); | |
302 | bch2_journal_halt(&c->journal); | |
303 | ||
304 | wake_up(&bch_read_only_wait); | |
305 | return ret; | |
306 | } | |
307 | ||
308 | const char *bch2_fs_read_write(struct bch_fs *c) | |
309 | { | |
310 | struct bch_dev *ca; | |
311 | const char *err = NULL; | |
312 | unsigned i; | |
313 | ||
314 | if (c->state == BCH_FS_RW) | |
315 | return NULL; | |
316 | ||
317 | bch2_fs_mark_clean(c, false); | |
318 | ||
319 | for_each_rw_member(ca, c, i) | |
320 | bch2_dev_allocator_add(c, ca); | |
321 | bch2_recalc_capacity(c); | |
322 | ||
323 | err = "error starting allocator thread"; | |
324 | for_each_rw_member(ca, c, i) | |
325 | if (bch2_dev_allocator_start(ca)) { | |
326 | percpu_ref_put(&ca->io_ref); | |
327 | goto err; | |
328 | } | |
329 | ||
330 | err = "error starting btree GC thread"; | |
331 | if (bch2_gc_thread_start(c)) | |
332 | goto err; | |
333 | ||
334 | err = "error starting copygc thread"; | |
335 | for_each_rw_member(ca, c, i) | |
336 | if (bch2_copygc_start(c, ca)) { | |
337 | percpu_ref_put(&ca->io_ref); | |
338 | goto err; | |
339 | } | |
340 | ||
341 | err = "error starting rebalance thread"; | |
342 | if (bch2_rebalance_start(c)) | |
343 | goto err; | |
344 | ||
345 | schedule_delayed_work(&c->pd_controllers_update, 5 * HZ); | |
346 | ||
347 | if (c->state != BCH_FS_STARTING) | |
348 | percpu_ref_reinit(&c->writes); | |
349 | ||
350 | c->state = BCH_FS_RW; | |
351 | return NULL; | |
352 | err: | |
353 | __bch2_fs_read_only(c); | |
354 | return err; | |
355 | } | |
356 | ||
357 | /* Filesystem startup/shutdown: */ | |
358 | ||
359 | static void bch2_fs_free(struct bch_fs *c) | |
360 | { | |
361 | unsigned i; | |
362 | ||
363 | for (i = 0; i < BCH_TIME_STAT_NR; i++) | |
364 | bch2_time_stats_exit(&c->times[i]); | |
365 | ||
366 | bch2_fs_quota_exit(c); | |
367 | bch2_fs_fsio_exit(c); | |
cd575ddf | 368 | bch2_fs_ec_exit(c); |
1c6fdbd8 KO |
369 | bch2_fs_encryption_exit(c); |
370 | bch2_fs_io_exit(c); | |
371 | bch2_fs_btree_cache_exit(c); | |
372 | bch2_fs_journal_exit(&c->journal); | |
373 | bch2_io_clock_exit(&c->io_clock[WRITE]); | |
374 | bch2_io_clock_exit(&c->io_clock[READ]); | |
375 | bch2_fs_compress_exit(c); | |
376 | percpu_free_rwsem(&c->usage_lock); | |
9ca53b55 | 377 | free_percpu(c->usage[0]); |
581edb63 | 378 | mempool_exit(&c->btree_iters_pool); |
1c6fdbd8 KO |
379 | mempool_exit(&c->btree_bounce_pool); |
380 | bioset_exit(&c->btree_bio); | |
381 | mempool_exit(&c->btree_interior_update_pool); | |
382 | mempool_exit(&c->btree_reserve_pool); | |
383 | mempool_exit(&c->fill_iter); | |
384 | percpu_ref_exit(&c->writes); | |
385 | kfree(rcu_dereference_protected(c->replicas, 1)); | |
386 | kfree(rcu_dereference_protected(c->disk_groups, 1)); | |
387 | ||
388 | if (c->copygc_wq) | |
389 | destroy_workqueue(c->copygc_wq); | |
390 | if (c->wq) | |
391 | destroy_workqueue(c->wq); | |
392 | ||
393 | free_pages((unsigned long) c->disk_sb.sb, | |
394 | c->disk_sb.page_order); | |
395 | kvpfree(c, sizeof(*c)); | |
396 | module_put(THIS_MODULE); | |
397 | } | |
398 | ||
399 | static void bch2_fs_release(struct kobject *kobj) | |
400 | { | |
401 | struct bch_fs *c = container_of(kobj, struct bch_fs, kobj); | |
402 | ||
403 | bch2_fs_free(c); | |
404 | } | |
405 | ||
406 | void bch2_fs_stop(struct bch_fs *c) | |
407 | { | |
408 | struct bch_dev *ca; | |
409 | unsigned i; | |
410 | ||
af1c6871 KO |
411 | bch_verbose(c, "shutting down"); |
412 | ||
1c6fdbd8 KO |
413 | for_each_member_device(ca, c, i) |
414 | if (ca->kobj.state_in_sysfs && | |
415 | ca->disk_sb.bdev) | |
416 | sysfs_remove_link(bdev_kobj(ca->disk_sb.bdev), "bcachefs"); | |
417 | ||
418 | if (c->kobj.state_in_sysfs) | |
419 | kobject_del(&c->kobj); | |
420 | ||
421 | bch2_fs_debug_exit(c); | |
422 | bch2_fs_chardev_exit(c); | |
423 | ||
424 | kobject_put(&c->time_stats); | |
425 | kobject_put(&c->opts_dir); | |
426 | kobject_put(&c->internal); | |
427 | ||
428 | mutex_lock(&bch_fs_list_lock); | |
429 | list_del(&c->list); | |
430 | mutex_unlock(&bch_fs_list_lock); | |
431 | ||
432 | closure_sync(&c->cl); | |
433 | closure_debug_destroy(&c->cl); | |
434 | ||
435 | mutex_lock(&c->state_lock); | |
436 | bch2_fs_read_only(c); | |
437 | mutex_unlock(&c->state_lock); | |
438 | ||
439 | /* btree prefetch might have kicked off reads in the background: */ | |
440 | bch2_btree_flush_all_reads(c); | |
441 | ||
442 | for_each_member_device(ca, c, i) | |
443 | cancel_work_sync(&ca->io_error_work); | |
444 | ||
445 | cancel_work_sync(&c->btree_write_error_work); | |
446 | cancel_delayed_work_sync(&c->pd_controllers_update); | |
447 | cancel_work_sync(&c->read_only_work); | |
448 | ||
449 | for (i = 0; i < c->sb.nr_devices; i++) | |
450 | if (c->devs[i]) | |
451 | bch2_dev_free(rcu_dereference_protected(c->devs[i], 1)); | |
452 | ||
af1c6871 KO |
453 | bch_verbose(c, "shutdown complete"); |
454 | ||
1c6fdbd8 KO |
455 | kobject_put(&c->kobj); |
456 | } | |
457 | ||
458 | static const char *bch2_fs_online(struct bch_fs *c) | |
459 | { | |
460 | struct bch_dev *ca; | |
461 | const char *err = NULL; | |
462 | unsigned i; | |
463 | int ret; | |
464 | ||
465 | lockdep_assert_held(&bch_fs_list_lock); | |
466 | ||
467 | if (!list_empty(&c->list)) | |
468 | return NULL; | |
469 | ||
470 | if (__bch2_uuid_to_fs(c->sb.uuid)) | |
471 | return "filesystem UUID already open"; | |
472 | ||
473 | ret = bch2_fs_chardev_init(c); | |
474 | if (ret) | |
475 | return "error creating character device"; | |
476 | ||
477 | bch2_fs_debug_init(c); | |
478 | ||
479 | if (kobject_add(&c->kobj, NULL, "%pU", c->sb.user_uuid.b) || | |
480 | kobject_add(&c->internal, &c->kobj, "internal") || | |
481 | kobject_add(&c->opts_dir, &c->kobj, "options") || | |
482 | kobject_add(&c->time_stats, &c->kobj, "time_stats") || | |
483 | bch2_opts_create_sysfs_files(&c->opts_dir)) | |
484 | return "error creating sysfs objects"; | |
485 | ||
486 | mutex_lock(&c->state_lock); | |
487 | ||
488 | err = "error creating sysfs objects"; | |
489 | __for_each_member_device(ca, c, i, NULL) | |
490 | if (bch2_dev_sysfs_online(c, ca)) | |
491 | goto err; | |
492 | ||
493 | list_add(&c->list, &bch_fs_list); | |
494 | err = NULL; | |
495 | err: | |
496 | mutex_unlock(&c->state_lock); | |
497 | return err; | |
498 | } | |
499 | ||
500 | static struct bch_fs *bch2_fs_alloc(struct bch_sb *sb, struct bch_opts opts) | |
501 | { | |
502 | struct bch_sb_field_members *mi; | |
503 | struct bch_fs *c; | |
504 | unsigned i, iter_size; | |
505 | const char *err; | |
506 | ||
507 | pr_verbose_init(opts, ""); | |
508 | ||
509 | c = kvpmalloc(sizeof(struct bch_fs), GFP_KERNEL|__GFP_ZERO); | |
510 | if (!c) | |
511 | goto out; | |
512 | ||
513 | __module_get(THIS_MODULE); | |
514 | ||
515 | c->minor = -1; | |
516 | c->disk_sb.fs_sb = true; | |
517 | ||
518 | mutex_init(&c->state_lock); | |
519 | mutex_init(&c->sb_lock); | |
520 | mutex_init(&c->replicas_gc_lock); | |
521 | mutex_init(&c->btree_root_lock); | |
522 | INIT_WORK(&c->read_only_work, bch2_fs_read_only_work); | |
523 | ||
524 | init_rwsem(&c->gc_lock); | |
525 | ||
526 | for (i = 0; i < BCH_TIME_STAT_NR; i++) | |
527 | bch2_time_stats_init(&c->times[i]); | |
528 | ||
b092dadd KO |
529 | bch2_fs_allocator_background_init(c); |
530 | bch2_fs_allocator_foreground_init(c); | |
1c6fdbd8 KO |
531 | bch2_fs_rebalance_init(c); |
532 | bch2_fs_quota_init(c); | |
533 | ||
534 | INIT_LIST_HEAD(&c->list); | |
535 | ||
536 | INIT_LIST_HEAD(&c->btree_interior_update_list); | |
537 | mutex_init(&c->btree_reserve_cache_lock); | |
538 | mutex_init(&c->btree_interior_update_lock); | |
539 | ||
540 | mutex_init(&c->bio_bounce_pages_lock); | |
541 | ||
542 | bio_list_init(&c->btree_write_error_list); | |
543 | spin_lock_init(&c->btree_write_error_lock); | |
544 | INIT_WORK(&c->btree_write_error_work, bch2_btree_write_error_work); | |
545 | ||
546 | INIT_LIST_HEAD(&c->fsck_errors); | |
547 | mutex_init(&c->fsck_error_lock); | |
548 | ||
cd575ddf KO |
549 | INIT_LIST_HEAD(&c->ec_new_stripe_list); |
550 | mutex_init(&c->ec_new_stripe_lock); | |
dfe9bfb3 | 551 | mutex_init(&c->ec_stripe_create_lock); |
cd575ddf KO |
552 | spin_lock_init(&c->ec_stripes_heap_lock); |
553 | ||
1c6fdbd8 KO |
554 | seqcount_init(&c->gc_pos_lock); |
555 | ||
556 | c->copy_gc_enabled = 1; | |
557 | c->rebalance.enabled = 1; | |
558 | c->promote_whole_extents = true; | |
559 | ||
560 | c->journal.write_time = &c->times[BCH_TIME_journal_write]; | |
561 | c->journal.delay_time = &c->times[BCH_TIME_journal_delay]; | |
562 | c->journal.blocked_time = &c->times[BCH_TIME_journal_blocked]; | |
563 | c->journal.flush_seq_time = &c->times[BCH_TIME_journal_flush_seq]; | |
564 | ||
565 | bch2_fs_btree_cache_init_early(&c->btree_cache); | |
566 | ||
567 | mutex_lock(&c->sb_lock); | |
568 | ||
569 | if (bch2_sb_to_fs(c, sb)) { | |
570 | mutex_unlock(&c->sb_lock); | |
571 | goto err; | |
572 | } | |
573 | ||
574 | mutex_unlock(&c->sb_lock); | |
575 | ||
576 | scnprintf(c->name, sizeof(c->name), "%pU", &c->sb.user_uuid); | |
577 | ||
578 | c->opts = bch2_opts_default; | |
579 | bch2_opts_apply(&c->opts, bch2_opts_from_sb(sb)); | |
580 | bch2_opts_apply(&c->opts, opts); | |
581 | ||
582 | c->block_bits = ilog2(c->opts.block_size); | |
583 | c->btree_foreground_merge_threshold = BTREE_FOREGROUND_MERGE_THRESHOLD(c); | |
584 | ||
585 | c->opts.nochanges |= c->opts.noreplay; | |
586 | c->opts.read_only |= c->opts.nochanges; | |
587 | ||
588 | if (bch2_fs_init_fault("fs_alloc")) | |
589 | goto err; | |
590 | ||
591 | iter_size = sizeof(struct btree_node_iter_large) + | |
592 | (btree_blocks(c) + 1) * 2 * | |
593 | sizeof(struct btree_node_iter_set); | |
594 | ||
595 | if (!(c->wq = alloc_workqueue("bcachefs", | |
596 | WQ_FREEZABLE|WQ_MEM_RECLAIM|WQ_HIGHPRI, 1)) || | |
597 | !(c->copygc_wq = alloc_workqueue("bcache_copygc", | |
598 | WQ_FREEZABLE|WQ_MEM_RECLAIM|WQ_HIGHPRI, 1)) || | |
599 | percpu_ref_init(&c->writes, bch2_writes_disabled, 0, GFP_KERNEL) || | |
600 | mempool_init_kmalloc_pool(&c->btree_reserve_pool, 1, | |
601 | sizeof(struct btree_reserve)) || | |
602 | mempool_init_kmalloc_pool(&c->btree_interior_update_pool, 1, | |
603 | sizeof(struct btree_update)) || | |
604 | mempool_init_kmalloc_pool(&c->fill_iter, 1, iter_size) || | |
605 | bioset_init(&c->btree_bio, 1, | |
606 | max(offsetof(struct btree_read_bio, bio), | |
607 | offsetof(struct btree_write_bio, wbio.bio)), | |
608 | BIOSET_NEED_BVECS) || | |
9ca53b55 | 609 | !(c->usage[0] = alloc_percpu(struct bch_fs_usage)) || |
1c6fdbd8 KO |
610 | percpu_init_rwsem(&c->usage_lock) || |
611 | mempool_init_kvpmalloc_pool(&c->btree_bounce_pool, 1, | |
612 | btree_bytes(c)) || | |
581edb63 KO |
613 | mempool_init_kmalloc_pool(&c->btree_iters_pool, 1, |
614 | sizeof(struct btree_iter) * BTREE_ITER_MAX) || | |
1c6fdbd8 KO |
615 | bch2_io_clock_init(&c->io_clock[READ]) || |
616 | bch2_io_clock_init(&c->io_clock[WRITE]) || | |
617 | bch2_fs_journal_init(&c->journal) || | |
618 | bch2_fs_btree_cache_init(c) || | |
619 | bch2_fs_io_init(c) || | |
620 | bch2_fs_encryption_init(c) || | |
621 | bch2_fs_compress_init(c) || | |
cd575ddf | 622 | bch2_fs_ec_init(c) || |
1c6fdbd8 KO |
623 | bch2_fs_fsio_init(c)) |
624 | goto err; | |
625 | ||
626 | mi = bch2_sb_get_members(c->disk_sb.sb); | |
627 | for (i = 0; i < c->sb.nr_devices; i++) | |
628 | if (bch2_dev_exists(c->disk_sb.sb, mi, i) && | |
629 | bch2_dev_alloc(c, i)) | |
630 | goto err; | |
631 | ||
632 | /* | |
633 | * Now that all allocations have succeeded, init various refcounty | |
634 | * things that let us shutdown: | |
635 | */ | |
636 | closure_init(&c->cl, NULL); | |
637 | ||
638 | c->kobj.kset = bcachefs_kset; | |
639 | kobject_init(&c->kobj, &bch2_fs_ktype); | |
640 | kobject_init(&c->internal, &bch2_fs_internal_ktype); | |
641 | kobject_init(&c->opts_dir, &bch2_fs_opts_dir_ktype); | |
642 | kobject_init(&c->time_stats, &bch2_fs_time_stats_ktype); | |
643 | ||
644 | mutex_lock(&bch_fs_list_lock); | |
645 | err = bch2_fs_online(c); | |
646 | mutex_unlock(&bch_fs_list_lock); | |
647 | if (err) { | |
648 | bch_err(c, "bch2_fs_online() error: %s", err); | |
649 | goto err; | |
650 | } | |
651 | out: | |
652 | pr_verbose_init(opts, "ret %i", c ? 0 : -ENOMEM); | |
653 | return c; | |
654 | err: | |
655 | bch2_fs_free(c); | |
656 | c = NULL; | |
657 | goto out; | |
658 | } | |
659 | ||
660 | const char *bch2_fs_start(struct bch_fs *c) | |
661 | { | |
662 | const char *err = "cannot allocate memory"; | |
663 | struct bch_sb_field_members *mi; | |
664 | struct bch_dev *ca; | |
a420eea6 | 665 | time64_t now = ktime_get_real_seconds(); |
1c6fdbd8 KO |
666 | unsigned i; |
667 | int ret = -EINVAL; | |
668 | ||
669 | mutex_lock(&c->state_lock); | |
670 | ||
671 | BUG_ON(c->state != BCH_FS_STARTING); | |
672 | ||
673 | mutex_lock(&c->sb_lock); | |
674 | ||
675 | for_each_online_member(ca, c, i) | |
676 | bch2_sb_from_fs(c, ca); | |
677 | ||
678 | mi = bch2_sb_get_members(c->disk_sb.sb); | |
679 | for_each_online_member(ca, c, i) | |
680 | mi->members[ca->dev_idx].last_mount = cpu_to_le64(now); | |
681 | ||
682 | mutex_unlock(&c->sb_lock); | |
683 | ||
684 | for_each_rw_member(ca, c, i) | |
685 | bch2_dev_allocator_add(c, ca); | |
686 | bch2_recalc_capacity(c); | |
687 | ||
688 | ret = BCH_SB_INITIALIZED(c->disk_sb.sb) | |
689 | ? bch2_fs_recovery(c) | |
690 | : bch2_fs_initialize(c); | |
691 | if (ret) | |
692 | goto err; | |
cd575ddf KO |
693 | |
694 | ret = bch2_opts_check_may_set(c); | |
695 | if (ret) | |
696 | goto err; | |
1c6fdbd8 KO |
697 | |
698 | err = "dynamic fault"; | |
699 | if (bch2_fs_init_fault("fs_start")) | |
700 | goto err; | |
701 | ||
702 | if (c->opts.read_only) { | |
703 | bch2_fs_read_only(c); | |
704 | } else { | |
705 | err = bch2_fs_read_write(c); | |
706 | if (err) | |
707 | goto err; | |
708 | } | |
709 | ||
710 | set_bit(BCH_FS_STARTED, &c->flags); | |
711 | ||
712 | err = NULL; | |
713 | out: | |
714 | mutex_unlock(&c->state_lock); | |
715 | return err; | |
716 | err: | |
717 | switch (ret) { | |
718 | case BCH_FSCK_ERRORS_NOT_FIXED: | |
719 | bch_err(c, "filesystem contains errors: please report this to the developers"); | |
720 | pr_cont("mount with -o fix_errors to repair\n"); | |
721 | err = "fsck error"; | |
722 | break; | |
723 | case BCH_FSCK_REPAIR_UNIMPLEMENTED: | |
724 | bch_err(c, "filesystem contains errors: please report this to the developers"); | |
725 | pr_cont("repair unimplemented: inform the developers so that it can be added\n"); | |
726 | err = "fsck error"; | |
727 | break; | |
728 | case BCH_FSCK_REPAIR_IMPOSSIBLE: | |
729 | bch_err(c, "filesystem contains errors, but repair impossible"); | |
730 | err = "fsck error"; | |
731 | break; | |
732 | case BCH_FSCK_UNKNOWN_VERSION: | |
733 | err = "unknown metadata version";; | |
734 | break; | |
735 | case -ENOMEM: | |
736 | err = "cannot allocate memory"; | |
737 | break; | |
738 | case -EIO: | |
739 | err = "IO error"; | |
740 | break; | |
741 | } | |
742 | ||
743 | BUG_ON(!err); | |
744 | set_bit(BCH_FS_ERROR, &c->flags); | |
745 | goto out; | |
746 | } | |
747 | ||
748 | static const char *bch2_dev_may_add(struct bch_sb *sb, struct bch_fs *c) | |
749 | { | |
750 | struct bch_sb_field_members *sb_mi; | |
751 | ||
752 | sb_mi = bch2_sb_get_members(sb); | |
753 | if (!sb_mi) | |
754 | return "Invalid superblock: member info area missing"; | |
755 | ||
756 | if (le16_to_cpu(sb->block_size) != c->opts.block_size) | |
757 | return "mismatched block size"; | |
758 | ||
759 | if (le16_to_cpu(sb_mi->members[sb->dev_idx].bucket_size) < | |
760 | BCH_SB_BTREE_NODE_SIZE(c->disk_sb.sb)) | |
761 | return "new cache bucket size is too small"; | |
762 | ||
763 | return NULL; | |
764 | } | |
765 | ||
766 | static const char *bch2_dev_in_fs(struct bch_sb *fs, struct bch_sb *sb) | |
767 | { | |
768 | struct bch_sb *newest = | |
769 | le64_to_cpu(fs->seq) > le64_to_cpu(sb->seq) ? fs : sb; | |
770 | struct bch_sb_field_members *mi = bch2_sb_get_members(newest); | |
771 | ||
772 | if (!uuid_equal(&fs->uuid, &sb->uuid)) | |
773 | return "device not a member of filesystem"; | |
774 | ||
775 | if (!bch2_dev_exists(newest, mi, sb->dev_idx)) | |
776 | return "device has been removed"; | |
777 | ||
778 | if (fs->block_size != sb->block_size) | |
779 | return "mismatched block size"; | |
780 | ||
781 | return NULL; | |
782 | } | |
783 | ||
784 | /* Device startup/shutdown: */ | |
785 | ||
786 | static void bch2_dev_release(struct kobject *kobj) | |
787 | { | |
788 | struct bch_dev *ca = container_of(kobj, struct bch_dev, kobj); | |
789 | ||
790 | kfree(ca); | |
791 | } | |
792 | ||
793 | static void bch2_dev_free(struct bch_dev *ca) | |
794 | { | |
795 | cancel_work_sync(&ca->io_error_work); | |
796 | ||
797 | if (ca->kobj.state_in_sysfs && | |
798 | ca->disk_sb.bdev) | |
799 | sysfs_remove_link(bdev_kobj(ca->disk_sb.bdev), "bcachefs"); | |
800 | ||
801 | if (ca->kobj.state_in_sysfs) | |
802 | kobject_del(&ca->kobj); | |
803 | ||
804 | bch2_free_super(&ca->disk_sb); | |
805 | bch2_dev_journal_exit(ca); | |
806 | ||
807 | free_percpu(ca->io_done); | |
808 | bioset_exit(&ca->replica_set); | |
809 | bch2_dev_buckets_free(ca); | |
810 | ||
811 | bch2_time_stats_exit(&ca->io_latency[WRITE]); | |
812 | bch2_time_stats_exit(&ca->io_latency[READ]); | |
813 | ||
814 | percpu_ref_exit(&ca->io_ref); | |
815 | percpu_ref_exit(&ca->ref); | |
816 | kobject_put(&ca->kobj); | |
817 | } | |
818 | ||
819 | static void __bch2_dev_offline(struct bch_fs *c, struct bch_dev *ca) | |
820 | { | |
821 | ||
822 | lockdep_assert_held(&c->state_lock); | |
823 | ||
824 | if (percpu_ref_is_zero(&ca->io_ref)) | |
825 | return; | |
826 | ||
827 | __bch2_dev_read_only(c, ca); | |
828 | ||
829 | reinit_completion(&ca->io_ref_completion); | |
830 | percpu_ref_kill(&ca->io_ref); | |
831 | wait_for_completion(&ca->io_ref_completion); | |
832 | ||
833 | if (ca->kobj.state_in_sysfs) { | |
834 | sysfs_remove_link(bdev_kobj(ca->disk_sb.bdev), "bcachefs"); | |
835 | sysfs_remove_link(&ca->kobj, "block"); | |
836 | } | |
837 | ||
838 | bch2_free_super(&ca->disk_sb); | |
839 | bch2_dev_journal_exit(ca); | |
840 | } | |
841 | ||
842 | static void bch2_dev_ref_complete(struct percpu_ref *ref) | |
843 | { | |
844 | struct bch_dev *ca = container_of(ref, struct bch_dev, ref); | |
845 | ||
846 | complete(&ca->ref_completion); | |
847 | } | |
848 | ||
849 | static void bch2_dev_io_ref_complete(struct percpu_ref *ref) | |
850 | { | |
851 | struct bch_dev *ca = container_of(ref, struct bch_dev, io_ref); | |
852 | ||
853 | complete(&ca->io_ref_completion); | |
854 | } | |
855 | ||
856 | static int bch2_dev_sysfs_online(struct bch_fs *c, struct bch_dev *ca) | |
857 | { | |
858 | int ret; | |
859 | ||
860 | if (!c->kobj.state_in_sysfs) | |
861 | return 0; | |
862 | ||
863 | if (!ca->kobj.state_in_sysfs) { | |
864 | ret = kobject_add(&ca->kobj, &c->kobj, | |
865 | "dev-%u", ca->dev_idx); | |
866 | if (ret) | |
867 | return ret; | |
868 | } | |
869 | ||
870 | if (ca->disk_sb.bdev) { | |
871 | struct kobject *block = bdev_kobj(ca->disk_sb.bdev); | |
872 | ||
873 | ret = sysfs_create_link(block, &ca->kobj, "bcachefs"); | |
874 | if (ret) | |
875 | return ret; | |
876 | ||
877 | ret = sysfs_create_link(&ca->kobj, block, "block"); | |
878 | if (ret) | |
879 | return ret; | |
880 | } | |
881 | ||
882 | return 0; | |
883 | } | |
884 | ||
885 | static struct bch_dev *__bch2_dev_alloc(struct bch_fs *c, | |
886 | struct bch_member *member) | |
887 | { | |
888 | struct bch_dev *ca; | |
889 | ||
890 | ca = kzalloc(sizeof(*ca), GFP_KERNEL); | |
891 | if (!ca) | |
892 | return NULL; | |
893 | ||
894 | kobject_init(&ca->kobj, &bch2_dev_ktype); | |
895 | init_completion(&ca->ref_completion); | |
896 | init_completion(&ca->io_ref_completion); | |
897 | ||
898 | init_rwsem(&ca->bucket_lock); | |
899 | ||
900 | writepoint_init(&ca->copygc_write_point, BCH_DATA_USER); | |
901 | ||
902 | spin_lock_init(&ca->freelist_lock); | |
903 | bch2_dev_copygc_init(ca); | |
904 | ||
905 | INIT_WORK(&ca->io_error_work, bch2_io_error_work); | |
906 | ||
907 | bch2_time_stats_init(&ca->io_latency[READ]); | |
908 | bch2_time_stats_init(&ca->io_latency[WRITE]); | |
909 | ||
910 | ca->mi = bch2_mi_to_cpu(member); | |
911 | ca->uuid = member->uuid; | |
912 | ||
913 | if (opt_defined(c->opts, discard)) | |
914 | ca->mi.discard = opt_get(c->opts, discard); | |
915 | ||
916 | if (percpu_ref_init(&ca->ref, bch2_dev_ref_complete, | |
917 | 0, GFP_KERNEL) || | |
918 | percpu_ref_init(&ca->io_ref, bch2_dev_io_ref_complete, | |
919 | PERCPU_REF_INIT_DEAD, GFP_KERNEL) || | |
920 | bch2_dev_buckets_alloc(c, ca) || | |
921 | bioset_init(&ca->replica_set, 4, | |
922 | offsetof(struct bch_write_bio, bio), 0) || | |
923 | !(ca->io_done = alloc_percpu(*ca->io_done))) | |
924 | goto err; | |
925 | ||
926 | return ca; | |
927 | err: | |
928 | bch2_dev_free(ca); | |
929 | return NULL; | |
930 | } | |
931 | ||
932 | static void bch2_dev_attach(struct bch_fs *c, struct bch_dev *ca, | |
933 | unsigned dev_idx) | |
934 | { | |
935 | ca->dev_idx = dev_idx; | |
936 | __set_bit(ca->dev_idx, ca->self.d); | |
937 | scnprintf(ca->name, sizeof(ca->name), "dev-%u", dev_idx); | |
938 | ||
939 | ca->fs = c; | |
940 | rcu_assign_pointer(c->devs[ca->dev_idx], ca); | |
941 | ||
942 | if (bch2_dev_sysfs_online(c, ca)) | |
943 | pr_warn("error creating sysfs objects"); | |
944 | } | |
945 | ||
946 | static int bch2_dev_alloc(struct bch_fs *c, unsigned dev_idx) | |
947 | { | |
948 | struct bch_member *member = | |
949 | bch2_sb_get_members(c->disk_sb.sb)->members + dev_idx; | |
950 | struct bch_dev *ca = NULL; | |
951 | int ret = 0; | |
952 | ||
953 | pr_verbose_init(c->opts, ""); | |
954 | ||
955 | if (bch2_fs_init_fault("dev_alloc")) | |
956 | goto err; | |
957 | ||
958 | ca = __bch2_dev_alloc(c, member); | |
959 | if (!ca) | |
960 | goto err; | |
961 | ||
962 | bch2_dev_attach(c, ca, dev_idx); | |
963 | out: | |
964 | pr_verbose_init(c->opts, "ret %i", ret); | |
965 | return ret; | |
966 | err: | |
967 | if (ca) | |
968 | bch2_dev_free(ca); | |
969 | ret = -ENOMEM; | |
970 | goto out; | |
971 | } | |
972 | ||
973 | static int __bch2_dev_attach_bdev(struct bch_dev *ca, struct bch_sb_handle *sb) | |
974 | { | |
975 | unsigned ret; | |
976 | ||
977 | if (bch2_dev_is_online(ca)) { | |
978 | bch_err(ca, "already have device online in slot %u", | |
979 | sb->sb->dev_idx); | |
980 | return -EINVAL; | |
981 | } | |
982 | ||
983 | if (get_capacity(sb->bdev->bd_disk) < | |
984 | ca->mi.bucket_size * ca->mi.nbuckets) { | |
985 | bch_err(ca, "cannot online: device too small"); | |
986 | return -EINVAL; | |
987 | } | |
988 | ||
989 | BUG_ON(!percpu_ref_is_zero(&ca->io_ref)); | |
990 | ||
991 | if (get_capacity(sb->bdev->bd_disk) < | |
992 | ca->mi.bucket_size * ca->mi.nbuckets) { | |
993 | bch_err(ca, "device too small"); | |
994 | return -EINVAL; | |
995 | } | |
996 | ||
997 | ret = bch2_dev_journal_init(ca, sb->sb); | |
998 | if (ret) | |
999 | return ret; | |
1000 | ||
1001 | /* Commit: */ | |
1002 | ca->disk_sb = *sb; | |
1003 | memset(sb, 0, sizeof(*sb)); | |
1004 | ||
1c6fdbd8 KO |
1005 | percpu_ref_reinit(&ca->io_ref); |
1006 | ||
1007 | return 0; | |
1008 | } | |
1009 | ||
1010 | static int bch2_dev_attach_bdev(struct bch_fs *c, struct bch_sb_handle *sb) | |
1011 | { | |
1012 | struct bch_dev *ca; | |
1013 | int ret; | |
1014 | ||
1015 | lockdep_assert_held(&c->state_lock); | |
1016 | ||
1017 | if (le64_to_cpu(sb->sb->seq) > | |
1018 | le64_to_cpu(c->disk_sb.sb->seq)) | |
1019 | bch2_sb_to_fs(c, sb->sb); | |
1020 | ||
1021 | BUG_ON(sb->sb->dev_idx >= c->sb.nr_devices || | |
1022 | !c->devs[sb->sb->dev_idx]); | |
1023 | ||
1024 | ca = bch_dev_locked(c, sb->sb->dev_idx); | |
1025 | ||
1026 | ret = __bch2_dev_attach_bdev(ca, sb); | |
1027 | if (ret) | |
1028 | return ret; | |
1029 | ||
6eac2c2e | 1030 | mutex_lock(&c->sb_lock); |
9ca53b55 | 1031 | bch2_mark_dev_superblock(ca->fs, ca, 0); |
6eac2c2e KO |
1032 | mutex_unlock(&c->sb_lock); |
1033 | ||
1c6fdbd8 KO |
1034 | bch2_dev_sysfs_online(c, ca); |
1035 | ||
1036 | if (c->sb.nr_devices == 1) | |
1037 | snprintf(c->name, sizeof(c->name), "%pg", ca->disk_sb.bdev); | |
1038 | snprintf(ca->name, sizeof(ca->name), "%pg", ca->disk_sb.bdev); | |
1039 | ||
1040 | rebalance_wakeup(c); | |
1041 | return 0; | |
1042 | } | |
1043 | ||
1044 | /* Device management: */ | |
1045 | ||
1046 | /* | |
1047 | * Note: this function is also used by the error paths - when a particular | |
1048 | * device sees an error, we call it to determine whether we can just set the | |
1049 | * device RO, or - if this function returns false - we'll set the whole | |
1050 | * filesystem RO: | |
1051 | * | |
1052 | * XXX: maybe we should be more explicit about whether we're changing state | |
1053 | * because we got an error or what have you? | |
1054 | */ | |
1055 | bool bch2_dev_state_allowed(struct bch_fs *c, struct bch_dev *ca, | |
1056 | enum bch_member_state new_state, int flags) | |
1057 | { | |
1058 | struct bch_devs_mask new_online_devs; | |
1059 | struct replicas_status s; | |
1060 | struct bch_dev *ca2; | |
1061 | int i, nr_rw = 0, required; | |
1062 | ||
1063 | lockdep_assert_held(&c->state_lock); | |
1064 | ||
1065 | switch (new_state) { | |
1066 | case BCH_MEMBER_STATE_RW: | |
1067 | return true; | |
1068 | case BCH_MEMBER_STATE_RO: | |
1069 | if (ca->mi.state != BCH_MEMBER_STATE_RW) | |
1070 | return true; | |
1071 | ||
1072 | /* do we have enough devices to write to? */ | |
1073 | for_each_member_device(ca2, c, i) | |
1074 | if (ca2 != ca) | |
1075 | nr_rw += ca2->mi.state == BCH_MEMBER_STATE_RW; | |
1076 | ||
1077 | required = max(!(flags & BCH_FORCE_IF_METADATA_DEGRADED) | |
1078 | ? c->opts.metadata_replicas | |
1079 | : c->opts.metadata_replicas_required, | |
1080 | !(flags & BCH_FORCE_IF_DATA_DEGRADED) | |
1081 | ? c->opts.data_replicas | |
1082 | : c->opts.data_replicas_required); | |
1083 | ||
1084 | return nr_rw >= required; | |
1085 | case BCH_MEMBER_STATE_FAILED: | |
1086 | case BCH_MEMBER_STATE_SPARE: | |
1087 | if (ca->mi.state != BCH_MEMBER_STATE_RW && | |
1088 | ca->mi.state != BCH_MEMBER_STATE_RO) | |
1089 | return true; | |
1090 | ||
1091 | /* do we have enough devices to read from? */ | |
1092 | new_online_devs = bch2_online_devs(c); | |
1093 | __clear_bit(ca->dev_idx, new_online_devs.d); | |
1094 | ||
1095 | s = __bch2_replicas_status(c, new_online_devs); | |
1096 | ||
1097 | return bch2_have_enough_devs(s, flags); | |
1098 | default: | |
1099 | BUG(); | |
1100 | } | |
1101 | } | |
1102 | ||
1103 | static bool bch2_fs_may_start(struct bch_fs *c) | |
1104 | { | |
1105 | struct replicas_status s; | |
1106 | struct bch_sb_field_members *mi; | |
1107 | struct bch_dev *ca; | |
1108 | unsigned i, flags = c->opts.degraded | |
1109 | ? BCH_FORCE_IF_DEGRADED | |
1110 | : 0; | |
1111 | ||
1112 | if (!c->opts.degraded) { | |
1113 | mutex_lock(&c->sb_lock); | |
1114 | mi = bch2_sb_get_members(c->disk_sb.sb); | |
1115 | ||
1116 | for (i = 0; i < c->disk_sb.sb->nr_devices; i++) { | |
1117 | if (!bch2_dev_exists(c->disk_sb.sb, mi, i)) | |
1118 | continue; | |
1119 | ||
1120 | ca = bch_dev_locked(c, i); | |
1121 | ||
1122 | if (!bch2_dev_is_online(ca) && | |
1123 | (ca->mi.state == BCH_MEMBER_STATE_RW || | |
1124 | ca->mi.state == BCH_MEMBER_STATE_RO)) { | |
1125 | mutex_unlock(&c->sb_lock); | |
1126 | return false; | |
1127 | } | |
1128 | } | |
1129 | mutex_unlock(&c->sb_lock); | |
1130 | } | |
1131 | ||
1132 | s = bch2_replicas_status(c); | |
1133 | ||
1134 | return bch2_have_enough_devs(s, flags); | |
1135 | } | |
1136 | ||
1137 | static void __bch2_dev_read_only(struct bch_fs *c, struct bch_dev *ca) | |
1138 | { | |
1139 | bch2_copygc_stop(ca); | |
1140 | ||
1141 | /* | |
1142 | * The allocator thread itself allocates btree nodes, so stop it first: | |
1143 | */ | |
1144 | bch2_dev_allocator_stop(ca); | |
1145 | bch2_dev_allocator_remove(c, ca); | |
1146 | bch2_dev_journal_stop(&c->journal, ca); | |
1147 | } | |
1148 | ||
1149 | static const char *__bch2_dev_read_write(struct bch_fs *c, struct bch_dev *ca) | |
1150 | { | |
1151 | lockdep_assert_held(&c->state_lock); | |
1152 | ||
1153 | BUG_ON(ca->mi.state != BCH_MEMBER_STATE_RW); | |
1154 | ||
1155 | bch2_dev_allocator_add(c, ca); | |
1156 | bch2_recalc_capacity(c); | |
1157 | ||
1158 | if (bch2_dev_allocator_start(ca)) | |
1159 | return "error starting allocator thread"; | |
1160 | ||
1161 | if (bch2_copygc_start(c, ca)) | |
1162 | return "error starting copygc thread"; | |
1163 | ||
1164 | return NULL; | |
1165 | } | |
1166 | ||
1167 | int __bch2_dev_set_state(struct bch_fs *c, struct bch_dev *ca, | |
1168 | enum bch_member_state new_state, int flags) | |
1169 | { | |
1170 | struct bch_sb_field_members *mi; | |
1171 | int ret = 0; | |
1172 | ||
1173 | if (ca->mi.state == new_state) | |
1174 | return 0; | |
1175 | ||
1176 | if (!bch2_dev_state_allowed(c, ca, new_state, flags)) | |
1177 | return -EINVAL; | |
1178 | ||
1179 | if (new_state != BCH_MEMBER_STATE_RW) | |
1180 | __bch2_dev_read_only(c, ca); | |
1181 | ||
1182 | bch_notice(ca, "%s", bch2_dev_state[new_state]); | |
1183 | ||
1184 | mutex_lock(&c->sb_lock); | |
1185 | mi = bch2_sb_get_members(c->disk_sb.sb); | |
1186 | SET_BCH_MEMBER_STATE(&mi->members[ca->dev_idx], new_state); | |
1187 | bch2_write_super(c); | |
1188 | mutex_unlock(&c->sb_lock); | |
1189 | ||
1190 | if (new_state == BCH_MEMBER_STATE_RW && | |
1191 | __bch2_dev_read_write(c, ca)) | |
1192 | ret = -ENOMEM; | |
1193 | ||
1194 | rebalance_wakeup(c); | |
1195 | ||
1196 | return ret; | |
1197 | } | |
1198 | ||
1199 | int bch2_dev_set_state(struct bch_fs *c, struct bch_dev *ca, | |
1200 | enum bch_member_state new_state, int flags) | |
1201 | { | |
1202 | int ret; | |
1203 | ||
1204 | mutex_lock(&c->state_lock); | |
1205 | ret = __bch2_dev_set_state(c, ca, new_state, flags); | |
1206 | mutex_unlock(&c->state_lock); | |
1207 | ||
1208 | return ret; | |
1209 | } | |
1210 | ||
1211 | /* Device add/removal: */ | |
1212 | ||
1213 | int bch2_dev_remove(struct bch_fs *c, struct bch_dev *ca, int flags) | |
1214 | { | |
1215 | struct bch_sb_field_members *mi; | |
1216 | unsigned dev_idx = ca->dev_idx, data; | |
1217 | int ret = -EINVAL; | |
1218 | ||
1219 | mutex_lock(&c->state_lock); | |
1220 | ||
1221 | percpu_ref_put(&ca->ref); /* XXX */ | |
1222 | ||
1223 | if (!bch2_dev_state_allowed(c, ca, BCH_MEMBER_STATE_FAILED, flags)) { | |
1224 | bch_err(ca, "Cannot remove without losing data"); | |
1225 | goto err; | |
1226 | } | |
1227 | ||
1228 | __bch2_dev_read_only(c, ca); | |
1229 | ||
1230 | /* | |
1231 | * XXX: verify that dev_idx is really not in use anymore, anywhere | |
1232 | * | |
1233 | * flag_data_bad() does not check btree pointers | |
1234 | */ | |
1235 | ret = bch2_dev_data_drop(c, ca->dev_idx, flags); | |
1236 | if (ret) { | |
1237 | bch_err(ca, "Remove failed: error %i dropping data", ret); | |
1238 | goto err; | |
1239 | } | |
1240 | ||
1241 | ret = bch2_journal_flush_device_pins(&c->journal, ca->dev_idx); | |
1242 | if (ret) { | |
1243 | bch_err(ca, "Remove failed: error %i flushing journal", ret); | |
1244 | goto err; | |
1245 | } | |
1246 | ||
1247 | data = bch2_dev_has_data(c, ca); | |
1248 | if (data) { | |
1249 | char data_has_str[100]; | |
319f9ac3 KO |
1250 | |
1251 | bch2_string_opt_to_text(&PBUF(data_has_str), | |
1252 | bch2_data_types, data); | |
1c6fdbd8 KO |
1253 | bch_err(ca, "Remove failed, still has data (%s)", data_has_str); |
1254 | ret = -EBUSY; | |
1255 | goto err; | |
1256 | } | |
1257 | ||
1258 | ret = bch2_btree_delete_range(c, BTREE_ID_ALLOC, | |
1259 | POS(ca->dev_idx, 0), | |
1260 | POS(ca->dev_idx + 1, 0), | |
fc3268c1 | 1261 | NULL); |
1c6fdbd8 KO |
1262 | if (ret) { |
1263 | bch_err(ca, "Remove failed, error deleting alloc info"); | |
1264 | goto err; | |
1265 | } | |
1266 | ||
1267 | /* | |
1268 | * must flush all existing journal entries, they might have | |
1269 | * (overwritten) keys that point to the device we're removing: | |
1270 | */ | |
1271 | bch2_journal_flush_all_pins(&c->journal); | |
1272 | ret = bch2_journal_error(&c->journal); | |
1273 | if (ret) { | |
1274 | bch_err(ca, "Remove failed, journal error"); | |
1275 | goto err; | |
1276 | } | |
1277 | ||
1278 | __bch2_dev_offline(c, ca); | |
1279 | ||
1280 | mutex_lock(&c->sb_lock); | |
1281 | rcu_assign_pointer(c->devs[ca->dev_idx], NULL); | |
1282 | mutex_unlock(&c->sb_lock); | |
1283 | ||
1284 | percpu_ref_kill(&ca->ref); | |
1285 | wait_for_completion(&ca->ref_completion); | |
1286 | ||
1287 | bch2_dev_free(ca); | |
1288 | ||
1289 | /* | |
1290 | * Free this device's slot in the bch_member array - all pointers to | |
1291 | * this device must be gone: | |
1292 | */ | |
1293 | mutex_lock(&c->sb_lock); | |
1294 | mi = bch2_sb_get_members(c->disk_sb.sb); | |
1295 | memset(&mi->members[dev_idx].uuid, 0, sizeof(mi->members[dev_idx].uuid)); | |
1296 | ||
1297 | bch2_write_super(c); | |
1298 | ||
1299 | mutex_unlock(&c->sb_lock); | |
1300 | mutex_unlock(&c->state_lock); | |
1301 | return 0; | |
1302 | err: | |
1303 | if (ca->mi.state == BCH_MEMBER_STATE_RW) | |
1304 | __bch2_dev_read_write(c, ca); | |
1305 | mutex_unlock(&c->state_lock); | |
1306 | return ret; | |
1307 | } | |
1308 | ||
6eac2c2e KO |
1309 | static void dev_usage_clear(struct bch_dev *ca) |
1310 | { | |
1311 | struct bucket_array *buckets; | |
1312 | int cpu; | |
1313 | ||
1314 | for_each_possible_cpu(cpu) { | |
1315 | struct bch_dev_usage *p = | |
9ca53b55 | 1316 | per_cpu_ptr(ca->usage[0], cpu); |
6eac2c2e KO |
1317 | memset(p, 0, sizeof(*p)); |
1318 | } | |
1319 | ||
1320 | down_read(&ca->bucket_lock); | |
1321 | buckets = bucket_array(ca); | |
1322 | ||
1323 | memset(buckets->b, 0, sizeof(buckets->b[0]) * buckets->nbuckets); | |
1324 | up_read(&ca->bucket_lock); | |
1325 | } | |
1326 | ||
1c6fdbd8 KO |
1327 | /* Add new device to running filesystem: */ |
1328 | int bch2_dev_add(struct bch_fs *c, const char *path) | |
1329 | { | |
1330 | struct bch_opts opts = bch2_opts_empty(); | |
1331 | struct bch_sb_handle sb; | |
1332 | const char *err; | |
1333 | struct bch_dev *ca = NULL; | |
1334 | struct bch_sb_field_members *mi; | |
1335 | struct bch_member dev_mi; | |
1336 | unsigned dev_idx, nr_devices, u64s; | |
1337 | int ret; | |
1338 | ||
1339 | ret = bch2_read_super(path, &opts, &sb); | |
1340 | if (ret) | |
1341 | return ret; | |
1342 | ||
1343 | err = bch2_sb_validate(&sb); | |
1344 | if (err) | |
1345 | return -EINVAL; | |
1346 | ||
1347 | dev_mi = bch2_sb_get_members(sb.sb)->members[sb.sb->dev_idx]; | |
1348 | ||
1349 | err = bch2_dev_may_add(sb.sb, c); | |
1350 | if (err) | |
1351 | return -EINVAL; | |
1352 | ||
1353 | ca = __bch2_dev_alloc(c, &dev_mi); | |
1354 | if (!ca) { | |
1355 | bch2_free_super(&sb); | |
1356 | return -ENOMEM; | |
1357 | } | |
1358 | ||
1359 | ret = __bch2_dev_attach_bdev(ca, &sb); | |
1360 | if (ret) { | |
1361 | bch2_dev_free(ca); | |
1362 | return ret; | |
1363 | } | |
1364 | ||
6eac2c2e KO |
1365 | /* |
1366 | * We want to allocate journal on the new device before adding the new | |
1367 | * device to the filesystem because allocating after we attach requires | |
1368 | * spinning up the allocator thread, and the allocator thread requires | |
1369 | * doing btree writes, which if the existing devices are RO isn't going | |
1370 | * to work | |
1371 | * | |
1372 | * So we have to mark where the superblocks are, but marking allocated | |
1373 | * data normally updates the filesystem usage too, so we have to mark, | |
1374 | * allocate the journal, reset all the marks, then remark after we | |
1375 | * attach... | |
1376 | */ | |
9ca53b55 | 1377 | bch2_mark_dev_superblock(ca->fs, ca, 0); |
6eac2c2e | 1378 | |
1c6fdbd8 KO |
1379 | err = "journal alloc failed"; |
1380 | ret = bch2_dev_journal_alloc(ca); | |
1381 | if (ret) | |
1382 | goto err; | |
1383 | ||
6eac2c2e KO |
1384 | dev_usage_clear(ca); |
1385 | ||
1c6fdbd8 KO |
1386 | mutex_lock(&c->state_lock); |
1387 | mutex_lock(&c->sb_lock); | |
1388 | ||
1389 | err = "insufficient space in new superblock"; | |
1390 | ret = bch2_sb_from_fs(c, ca); | |
1391 | if (ret) | |
1392 | goto err_unlock; | |
1393 | ||
1394 | mi = bch2_sb_get_members(ca->disk_sb.sb); | |
1395 | ||
1396 | if (!bch2_sb_resize_members(&ca->disk_sb, | |
1397 | le32_to_cpu(mi->field.u64s) + | |
1398 | sizeof(dev_mi) / sizeof(u64))) { | |
1399 | ret = -ENOSPC; | |
1400 | goto err_unlock; | |
1401 | } | |
1402 | ||
1403 | if (dynamic_fault("bcachefs:add:no_slot")) | |
1404 | goto no_slot; | |
1405 | ||
1406 | mi = bch2_sb_get_members(c->disk_sb.sb); | |
1407 | for (dev_idx = 0; dev_idx < BCH_SB_MEMBERS_MAX; dev_idx++) | |
1408 | if (!bch2_dev_exists(c->disk_sb.sb, mi, dev_idx)) | |
1409 | goto have_slot; | |
1410 | no_slot: | |
1411 | err = "no slots available in superblock"; | |
1412 | ret = -ENOSPC; | |
1413 | goto err_unlock; | |
1414 | ||
1415 | have_slot: | |
1416 | nr_devices = max_t(unsigned, dev_idx + 1, c->sb.nr_devices); | |
1417 | u64s = (sizeof(struct bch_sb_field_members) + | |
1418 | sizeof(struct bch_member) * nr_devices) / sizeof(u64); | |
1419 | ||
1420 | err = "no space in superblock for member info"; | |
1421 | ret = -ENOSPC; | |
1422 | ||
1423 | mi = bch2_sb_resize_members(&c->disk_sb, u64s); | |
1424 | if (!mi) | |
1425 | goto err_unlock; | |
1426 | ||
1427 | /* success: */ | |
1428 | ||
1429 | mi->members[dev_idx] = dev_mi; | |
a420eea6 | 1430 | mi->members[dev_idx].last_mount = cpu_to_le64(ktime_get_real_seconds()); |
1c6fdbd8 KO |
1431 | c->disk_sb.sb->nr_devices = nr_devices; |
1432 | ||
1433 | ca->disk_sb.sb->dev_idx = dev_idx; | |
1434 | bch2_dev_attach(c, ca, dev_idx); | |
1435 | ||
9ca53b55 | 1436 | bch2_mark_dev_superblock(c, ca, 0); |
6eac2c2e | 1437 | |
1c6fdbd8 KO |
1438 | bch2_write_super(c); |
1439 | mutex_unlock(&c->sb_lock); | |
1440 | ||
1441 | if (ca->mi.state == BCH_MEMBER_STATE_RW) { | |
1442 | err = __bch2_dev_read_write(c, ca); | |
1443 | if (err) | |
1444 | goto err_late; | |
1445 | } | |
1446 | ||
1447 | mutex_unlock(&c->state_lock); | |
1448 | return 0; | |
1449 | ||
1450 | err_unlock: | |
1451 | mutex_unlock(&c->sb_lock); | |
1452 | mutex_unlock(&c->state_lock); | |
1453 | err: | |
1454 | if (ca) | |
1455 | bch2_dev_free(ca); | |
1456 | bch2_free_super(&sb); | |
1457 | bch_err(c, "Unable to add device: %s", err); | |
1458 | return ret; | |
1459 | err_late: | |
1460 | bch_err(c, "Error going rw after adding device: %s", err); | |
1461 | return -EINVAL; | |
1462 | } | |
1463 | ||
1464 | /* Hot add existing device to running filesystem: */ | |
1465 | int bch2_dev_online(struct bch_fs *c, const char *path) | |
1466 | { | |
1467 | struct bch_opts opts = bch2_opts_empty(); | |
1468 | struct bch_sb_handle sb = { NULL }; | |
1469 | struct bch_sb_field_members *mi; | |
1470 | struct bch_dev *ca; | |
1471 | unsigned dev_idx; | |
1472 | const char *err; | |
1473 | int ret; | |
1474 | ||
1475 | mutex_lock(&c->state_lock); | |
1476 | ||
1477 | ret = bch2_read_super(path, &opts, &sb); | |
1478 | if (ret) { | |
1479 | mutex_unlock(&c->state_lock); | |
1480 | return ret; | |
1481 | } | |
1482 | ||
1483 | dev_idx = sb.sb->dev_idx; | |
1484 | ||
1485 | err = bch2_dev_in_fs(c->disk_sb.sb, sb.sb); | |
1486 | if (err) | |
1487 | goto err; | |
1488 | ||
1489 | if (bch2_dev_attach_bdev(c, &sb)) { | |
1490 | err = "bch2_dev_attach_bdev() error"; | |
1491 | goto err; | |
1492 | } | |
1493 | ||
1494 | ca = bch_dev_locked(c, dev_idx); | |
1495 | if (ca->mi.state == BCH_MEMBER_STATE_RW) { | |
1496 | err = __bch2_dev_read_write(c, ca); | |
1497 | if (err) | |
1498 | goto err; | |
1499 | } | |
1500 | ||
1501 | mutex_lock(&c->sb_lock); | |
1502 | mi = bch2_sb_get_members(c->disk_sb.sb); | |
1503 | ||
1504 | mi->members[ca->dev_idx].last_mount = | |
a420eea6 | 1505 | cpu_to_le64(ktime_get_real_seconds()); |
1c6fdbd8 KO |
1506 | |
1507 | bch2_write_super(c); | |
1508 | mutex_unlock(&c->sb_lock); | |
1509 | ||
1510 | mutex_unlock(&c->state_lock); | |
1511 | return 0; | |
1512 | err: | |
1513 | mutex_unlock(&c->state_lock); | |
1514 | bch2_free_super(&sb); | |
1515 | bch_err(c, "error bringing %s online: %s", path, err); | |
1516 | return -EINVAL; | |
1517 | } | |
1518 | ||
1519 | int bch2_dev_offline(struct bch_fs *c, struct bch_dev *ca, int flags) | |
1520 | { | |
1521 | mutex_lock(&c->state_lock); | |
1522 | ||
1523 | if (!bch2_dev_is_online(ca)) { | |
1524 | bch_err(ca, "Already offline"); | |
1525 | mutex_unlock(&c->state_lock); | |
1526 | return 0; | |
1527 | } | |
1528 | ||
1529 | if (!bch2_dev_state_allowed(c, ca, BCH_MEMBER_STATE_FAILED, flags)) { | |
1530 | bch_err(ca, "Cannot offline required disk"); | |
1531 | mutex_unlock(&c->state_lock); | |
1532 | return -EINVAL; | |
1533 | } | |
1534 | ||
1535 | __bch2_dev_offline(c, ca); | |
1536 | ||
1537 | mutex_unlock(&c->state_lock); | |
1538 | return 0; | |
1539 | } | |
1540 | ||
1541 | int bch2_dev_resize(struct bch_fs *c, struct bch_dev *ca, u64 nbuckets) | |
1542 | { | |
1543 | struct bch_member *mi; | |
1544 | int ret = 0; | |
1545 | ||
1546 | mutex_lock(&c->state_lock); | |
1547 | ||
1548 | if (nbuckets < ca->mi.nbuckets) { | |
1549 | bch_err(ca, "Cannot shrink yet"); | |
1550 | ret = -EINVAL; | |
1551 | goto err; | |
1552 | } | |
1553 | ||
1554 | if (bch2_dev_is_online(ca) && | |
1555 | get_capacity(ca->disk_sb.bdev->bd_disk) < | |
1556 | ca->mi.bucket_size * nbuckets) { | |
1557 | bch_err(ca, "New size larger than device"); | |
1558 | ret = -EINVAL; | |
1559 | goto err; | |
1560 | } | |
1561 | ||
1562 | ret = bch2_dev_buckets_resize(c, ca, nbuckets); | |
1563 | if (ret) { | |
1564 | bch_err(ca, "Resize error: %i", ret); | |
1565 | goto err; | |
1566 | } | |
1567 | ||
1568 | mutex_lock(&c->sb_lock); | |
1569 | mi = &bch2_sb_get_members(c->disk_sb.sb)->members[ca->dev_idx]; | |
1570 | mi->nbuckets = cpu_to_le64(nbuckets); | |
1571 | ||
1572 | bch2_write_super(c); | |
1573 | mutex_unlock(&c->sb_lock); | |
1574 | ||
1575 | bch2_recalc_capacity(c); | |
1576 | err: | |
1577 | mutex_unlock(&c->state_lock); | |
1578 | return ret; | |
1579 | } | |
1580 | ||
1581 | /* return with ref on ca->ref: */ | |
1582 | struct bch_dev *bch2_dev_lookup(struct bch_fs *c, const char *path) | |
1583 | { | |
1584 | ||
1585 | struct bch_dev *ca; | |
1586 | dev_t dev; | |
1587 | unsigned i; | |
1588 | int ret; | |
1589 | ||
1590 | ret = lookup_bdev(path, &dev); | |
1591 | if (ret) | |
1592 | return ERR_PTR(ret); | |
1593 | ||
1594 | for_each_member_device(ca, c, i) | |
1595 | if (ca->disk_sb.bdev->bd_dev == dev) | |
1596 | goto found; | |
1597 | ||
1598 | ca = ERR_PTR(-ENOENT); | |
1599 | found: | |
1600 | return ca; | |
1601 | } | |
1602 | ||
1603 | /* Filesystem open: */ | |
1604 | ||
1605 | struct bch_fs *bch2_fs_open(char * const *devices, unsigned nr_devices, | |
1606 | struct bch_opts opts) | |
1607 | { | |
1608 | struct bch_sb_handle *sb = NULL; | |
1609 | struct bch_fs *c = NULL; | |
1610 | unsigned i, best_sb = 0; | |
1611 | const char *err; | |
1612 | int ret = -ENOMEM; | |
1613 | ||
1614 | pr_verbose_init(opts, ""); | |
1615 | ||
1616 | if (!nr_devices) { | |
1617 | c = ERR_PTR(-EINVAL); | |
1618 | goto out2; | |
1619 | } | |
1620 | ||
1621 | if (!try_module_get(THIS_MODULE)) { | |
1622 | c = ERR_PTR(-ENODEV); | |
1623 | goto out2; | |
1624 | } | |
1625 | ||
1626 | sb = kcalloc(nr_devices, sizeof(*sb), GFP_KERNEL); | |
1627 | if (!sb) | |
1628 | goto err; | |
1629 | ||
1630 | for (i = 0; i < nr_devices; i++) { | |
1631 | ret = bch2_read_super(devices[i], &opts, &sb[i]); | |
1632 | if (ret) | |
1633 | goto err; | |
1634 | ||
1635 | err = bch2_sb_validate(&sb[i]); | |
1636 | if (err) | |
1637 | goto err_print; | |
1638 | } | |
1639 | ||
1640 | for (i = 1; i < nr_devices; i++) | |
1641 | if (le64_to_cpu(sb[i].sb->seq) > | |
1642 | le64_to_cpu(sb[best_sb].sb->seq)) | |
1643 | best_sb = i; | |
1644 | ||
1645 | for (i = 0; i < nr_devices; i++) { | |
1646 | err = bch2_dev_in_fs(sb[best_sb].sb, sb[i].sb); | |
1647 | if (err) | |
1648 | goto err_print; | |
1649 | } | |
1650 | ||
1651 | ret = -ENOMEM; | |
1652 | c = bch2_fs_alloc(sb[best_sb].sb, opts); | |
1653 | if (!c) | |
1654 | goto err; | |
1655 | ||
1656 | err = "bch2_dev_online() error"; | |
1657 | mutex_lock(&c->state_lock); | |
1658 | for (i = 0; i < nr_devices; i++) | |
1659 | if (bch2_dev_attach_bdev(c, &sb[i])) { | |
1660 | mutex_unlock(&c->state_lock); | |
1661 | goto err_print; | |
1662 | } | |
1663 | mutex_unlock(&c->state_lock); | |
1664 | ||
1665 | err = "insufficient devices"; | |
1666 | if (!bch2_fs_may_start(c)) | |
1667 | goto err_print; | |
1668 | ||
1669 | if (!c->opts.nostart) { | |
1670 | err = bch2_fs_start(c); | |
1671 | if (err) | |
1672 | goto err_print; | |
1673 | } | |
1674 | out: | |
1675 | kfree(sb); | |
1676 | module_put(THIS_MODULE); | |
1677 | out2: | |
1678 | pr_verbose_init(opts, "ret %i", PTR_ERR_OR_ZERO(c)); | |
1679 | return c; | |
1680 | err_print: | |
1681 | pr_err("bch_fs_open err opening %s: %s", | |
1682 | devices[0], err); | |
1683 | ret = -EINVAL; | |
1684 | err: | |
1685 | if (c) | |
1686 | bch2_fs_stop(c); | |
1687 | for (i = 0; i < nr_devices; i++) | |
1688 | bch2_free_super(&sb[i]); | |
1689 | c = ERR_PTR(ret); | |
1690 | goto out; | |
1691 | } | |
1692 | ||
1693 | static const char *__bch2_fs_open_incremental(struct bch_sb_handle *sb, | |
1694 | struct bch_opts opts) | |
1695 | { | |
1696 | const char *err; | |
1697 | struct bch_fs *c; | |
1698 | bool allocated_fs = false; | |
1699 | ||
1700 | err = bch2_sb_validate(sb); | |
1701 | if (err) | |
1702 | return err; | |
1703 | ||
1704 | mutex_lock(&bch_fs_list_lock); | |
1705 | c = __bch2_uuid_to_fs(sb->sb->uuid); | |
1706 | if (c) { | |
1707 | closure_get(&c->cl); | |
1708 | ||
1709 | err = bch2_dev_in_fs(c->disk_sb.sb, sb->sb); | |
1710 | if (err) | |
1711 | goto err; | |
1712 | } else { | |
1713 | c = bch2_fs_alloc(sb->sb, opts); | |
1714 | err = "cannot allocate memory"; | |
1715 | if (!c) | |
1716 | goto err; | |
1717 | ||
1718 | allocated_fs = true; | |
1719 | } | |
1720 | ||
1721 | err = "bch2_dev_online() error"; | |
1722 | ||
1723 | mutex_lock(&c->sb_lock); | |
1724 | if (bch2_dev_attach_bdev(c, sb)) { | |
1725 | mutex_unlock(&c->sb_lock); | |
1726 | goto err; | |
1727 | } | |
1728 | mutex_unlock(&c->sb_lock); | |
1729 | ||
1730 | if (!c->opts.nostart && bch2_fs_may_start(c)) { | |
1731 | err = bch2_fs_start(c); | |
1732 | if (err) | |
1733 | goto err; | |
1734 | } | |
1735 | ||
1736 | closure_put(&c->cl); | |
1737 | mutex_unlock(&bch_fs_list_lock); | |
1738 | ||
1739 | return NULL; | |
1740 | err: | |
1741 | mutex_unlock(&bch_fs_list_lock); | |
1742 | ||
1743 | if (allocated_fs) | |
1744 | bch2_fs_stop(c); | |
1745 | else if (c) | |
1746 | closure_put(&c->cl); | |
1747 | ||
1748 | return err; | |
1749 | } | |
1750 | ||
1751 | const char *bch2_fs_open_incremental(const char *path) | |
1752 | { | |
1753 | struct bch_sb_handle sb; | |
1754 | struct bch_opts opts = bch2_opts_empty(); | |
1755 | const char *err; | |
1756 | ||
1757 | if (bch2_read_super(path, &opts, &sb)) | |
1758 | return "error reading superblock"; | |
1759 | ||
1760 | err = __bch2_fs_open_incremental(&sb, opts); | |
1761 | bch2_free_super(&sb); | |
1762 | ||
1763 | return err; | |
1764 | } | |
1765 | ||
1766 | /* Global interfaces/init */ | |
1767 | ||
1768 | static void bcachefs_exit(void) | |
1769 | { | |
1770 | bch2_debug_exit(); | |
1771 | bch2_vfs_exit(); | |
1772 | bch2_chardev_exit(); | |
1773 | if (bcachefs_kset) | |
1774 | kset_unregister(bcachefs_kset); | |
1775 | } | |
1776 | ||
1777 | static int __init bcachefs_init(void) | |
1778 | { | |
1779 | bch2_bkey_pack_test(); | |
1780 | bch2_inode_pack_test(); | |
1781 | ||
1782 | if (!(bcachefs_kset = kset_create_and_add("bcachefs", NULL, fs_kobj)) || | |
1783 | bch2_chardev_init() || | |
1784 | bch2_vfs_init() || | |
1785 | bch2_debug_init()) | |
1786 | goto err; | |
1787 | ||
1788 | return 0; | |
1789 | err: | |
1790 | bcachefs_exit(); | |
1791 | return -ENOMEM; | |
1792 | } | |
1793 | ||
1794 | #define BCH_DEBUG_PARAM(name, description) \ | |
1795 | bool bch2_##name; \ | |
1796 | module_param_named(name, bch2_##name, bool, 0644); \ | |
1797 | MODULE_PARM_DESC(name, description); | |
1798 | BCH_DEBUG_PARAMS() | |
1799 | #undef BCH_DEBUG_PARAM | |
1800 | ||
1801 | unsigned bch2_metadata_version = BCH_SB_VERSION_MAX; | |
1802 | module_param_named(version, bch2_metadata_version, uint, 0400); | |
1803 | ||
1804 | module_exit(bcachefs_exit); | |
1805 | module_init(bcachefs_init); |