Commit | Line | Data |
---|---|---|
1c6fdbd8 KO |
1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* | |
3 | * bcachefs setup/teardown code, and some metadata io - read a superblock and | |
4 | * figure out what to do with it. | |
5 | * | |
6 | * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com> | |
7 | * Copyright 2012 Google, Inc. | |
8 | */ | |
9 | ||
10 | #include "bcachefs.h" | |
7b3f84ea KO |
11 | #include "alloc_background.h" |
12 | #include "alloc_foreground.h" | |
1c6fdbd8 KO |
13 | #include "btree_cache.h" |
14 | #include "btree_gc.h" | |
15 | #include "btree_update_interior.h" | |
16 | #include "btree_io.h" | |
17 | #include "chardev.h" | |
18 | #include "checksum.h" | |
19 | #include "clock.h" | |
20 | #include "compress.h" | |
21 | #include "debug.h" | |
22 | #include "disk_groups.h" | |
23 | #include "error.h" | |
24 | #include "fs.h" | |
25 | #include "fs-io.h" | |
26 | #include "fsck.h" | |
27 | #include "inode.h" | |
28 | #include "io.h" | |
29 | #include "journal.h" | |
30 | #include "journal_reclaim.h" | |
31 | #include "move.h" | |
32 | #include "migrate.h" | |
33 | #include "movinggc.h" | |
34 | #include "quota.h" | |
35 | #include "rebalance.h" | |
36 | #include "recovery.h" | |
37 | #include "replicas.h" | |
38 | #include "super.h" | |
39 | #include "super-io.h" | |
40 | #include "sysfs.h" | |
41 | #include "trace.h" | |
42 | ||
43 | #include <linux/backing-dev.h> | |
44 | #include <linux/blkdev.h> | |
45 | #include <linux/debugfs.h> | |
46 | #include <linux/device.h> | |
47 | #include <linux/idr.h> | |
48 | #include <linux/kthread.h> | |
49 | #include <linux/module.h> | |
50 | #include <linux/percpu.h> | |
51 | #include <linux/random.h> | |
52 | #include <linux/sysfs.h> | |
53 | #include <crypto/hash.h> | |
54 | ||
55 | MODULE_LICENSE("GPL"); | |
56 | MODULE_AUTHOR("Kent Overstreet <kent.overstreet@gmail.com>"); | |
57 | ||
58 | #define KTYPE(type) \ | |
59 | static const struct attribute_group type ## _group = { \ | |
60 | .attrs = type ## _files \ | |
61 | }; \ | |
62 | \ | |
63 | static const struct attribute_group *type ## _groups[] = { \ | |
64 | &type ## _group, \ | |
65 | NULL \ | |
66 | }; \ | |
67 | \ | |
68 | static const struct kobj_type type ## _ktype = { \ | |
69 | .release = type ## _release, \ | |
70 | .sysfs_ops = &type ## _sysfs_ops, \ | |
71 | .default_groups = type ## _groups \ | |
72 | } | |
73 | ||
74 | static void bch2_fs_release(struct kobject *); | |
75 | static void bch2_dev_release(struct kobject *); | |
76 | ||
77 | static void bch2_fs_internal_release(struct kobject *k) | |
78 | { | |
79 | } | |
80 | ||
81 | static void bch2_fs_opts_dir_release(struct kobject *k) | |
82 | { | |
83 | } | |
84 | ||
85 | static void bch2_fs_time_stats_release(struct kobject *k) | |
86 | { | |
87 | } | |
88 | ||
89 | KTYPE(bch2_fs); | |
90 | KTYPE(bch2_fs_internal); | |
91 | KTYPE(bch2_fs_opts_dir); | |
92 | KTYPE(bch2_fs_time_stats); | |
93 | KTYPE(bch2_dev); | |
94 | ||
95 | static struct kset *bcachefs_kset; | |
96 | static LIST_HEAD(bch_fs_list); | |
97 | static DEFINE_MUTEX(bch_fs_list_lock); | |
98 | ||
99 | static DECLARE_WAIT_QUEUE_HEAD(bch_read_only_wait); | |
100 | ||
101 | static void bch2_dev_free(struct bch_dev *); | |
102 | static int bch2_dev_alloc(struct bch_fs *, unsigned); | |
103 | static int bch2_dev_sysfs_online(struct bch_fs *, struct bch_dev *); | |
104 | static void __bch2_dev_read_only(struct bch_fs *, struct bch_dev *); | |
105 | ||
106 | struct bch_fs *bch2_dev_to_fs(dev_t dev) | |
107 | { | |
108 | struct bch_fs *c; | |
109 | struct bch_dev *ca; | |
110 | unsigned i; | |
111 | ||
112 | mutex_lock(&bch_fs_list_lock); | |
113 | rcu_read_lock(); | |
114 | ||
115 | list_for_each_entry(c, &bch_fs_list, list) | |
116 | for_each_member_device_rcu(ca, c, i, NULL) | |
117 | if (ca->disk_sb.bdev->bd_dev == dev) { | |
118 | closure_get(&c->cl); | |
119 | goto found; | |
120 | } | |
121 | c = NULL; | |
122 | found: | |
123 | rcu_read_unlock(); | |
124 | mutex_unlock(&bch_fs_list_lock); | |
125 | ||
126 | return c; | |
127 | } | |
128 | ||
129 | static struct bch_fs *__bch2_uuid_to_fs(__uuid_t uuid) | |
130 | { | |
131 | struct bch_fs *c; | |
132 | ||
133 | lockdep_assert_held(&bch_fs_list_lock); | |
134 | ||
135 | list_for_each_entry(c, &bch_fs_list, list) | |
136 | if (!memcmp(&c->disk_sb.sb->uuid, &uuid, sizeof(uuid))) | |
137 | return c; | |
138 | ||
139 | return NULL; | |
140 | } | |
141 | ||
142 | struct bch_fs *bch2_uuid_to_fs(__uuid_t uuid) | |
143 | { | |
144 | struct bch_fs *c; | |
145 | ||
146 | mutex_lock(&bch_fs_list_lock); | |
147 | c = __bch2_uuid_to_fs(uuid); | |
148 | if (c) | |
149 | closure_get(&c->cl); | |
150 | mutex_unlock(&bch_fs_list_lock); | |
151 | ||
152 | return c; | |
153 | } | |
154 | ||
155 | /* Filesystem RO/RW: */ | |
156 | ||
157 | /* | |
158 | * For startup/shutdown of RW stuff, the dependencies are: | |
159 | * | |
160 | * - foreground writes depend on copygc and rebalance (to free up space) | |
161 | * | |
162 | * - copygc and rebalance depend on mark and sweep gc (they actually probably | |
163 | * don't because they either reserve ahead of time or don't block if | |
164 | * allocations fail, but allocations can require mark and sweep gc to run | |
165 | * because of generation number wraparound) | |
166 | * | |
167 | * - all of the above depends on the allocator threads | |
168 | * | |
169 | * - allocator depends on the journal (when it rewrites prios and gens) | |
170 | */ | |
171 | ||
172 | static void __bch2_fs_read_only(struct bch_fs *c) | |
173 | { | |
174 | struct bch_dev *ca; | |
175 | unsigned i; | |
176 | ||
177 | bch2_rebalance_stop(c); | |
178 | ||
179 | for_each_member_device(ca, c, i) | |
180 | bch2_copygc_stop(ca); | |
181 | ||
182 | bch2_gc_thread_stop(c); | |
183 | ||
184 | /* | |
185 | * Flush journal before stopping allocators, because flushing journal | |
186 | * blacklist entries involves allocating new btree nodes: | |
187 | */ | |
188 | bch2_journal_flush_all_pins(&c->journal); | |
189 | ||
190 | for_each_member_device(ca, c, i) | |
191 | bch2_dev_allocator_stop(ca); | |
192 | ||
193 | bch2_journal_flush_all_pins(&c->journal); | |
194 | ||
195 | /* | |
196 | * We need to explicitly wait on btree interior updates to complete | |
197 | * before stopping the journal, flushing all journal pins isn't | |
198 | * sufficient, because in the BTREE_INTERIOR_UPDATING_ROOT case btree | |
199 | * interior updates have to drop their journal pin before they're | |
200 | * fully complete: | |
201 | */ | |
202 | closure_wait_event(&c->btree_interior_update_wait, | |
203 | !bch2_btree_interior_updates_nr_pending(c)); | |
204 | ||
205 | bch2_fs_journal_stop(&c->journal); | |
206 | ||
207 | /* | |
208 | * the journal kicks off btree writes via reclaim - wait for in flight | |
209 | * writes after stopping journal: | |
210 | */ | |
211 | if (test_bit(BCH_FS_EMERGENCY_RO, &c->flags)) | |
212 | bch2_btree_flush_all_writes(c); | |
213 | else | |
214 | bch2_btree_verify_flushed(c); | |
215 | ||
216 | /* | |
217 | * After stopping journal: | |
218 | */ | |
219 | for_each_member_device(ca, c, i) | |
220 | bch2_dev_allocator_remove(c, ca); | |
221 | } | |
222 | ||
223 | static void bch2_writes_disabled(struct percpu_ref *writes) | |
224 | { | |
225 | struct bch_fs *c = container_of(writes, struct bch_fs, writes); | |
226 | ||
227 | set_bit(BCH_FS_WRITE_DISABLE_COMPLETE, &c->flags); | |
228 | wake_up(&bch_read_only_wait); | |
229 | } | |
230 | ||
231 | void bch2_fs_read_only(struct bch_fs *c) | |
232 | { | |
233 | if (c->state == BCH_FS_RO) | |
234 | return; | |
235 | ||
236 | BUG_ON(test_bit(BCH_FS_WRITE_DISABLE_COMPLETE, &c->flags)); | |
237 | ||
238 | /* | |
239 | * Block new foreground-end write operations from starting - any new | |
240 | * writes will return -EROFS: | |
241 | * | |
242 | * (This is really blocking new _allocations_, writes to previously | |
243 | * allocated space can still happen until stopping the allocator in | |
244 | * bch2_dev_allocator_stop()). | |
245 | */ | |
246 | percpu_ref_kill(&c->writes); | |
247 | ||
248 | cancel_delayed_work(&c->pd_controllers_update); | |
249 | ||
250 | /* | |
251 | * If we're not doing an emergency shutdown, we want to wait on | |
252 | * outstanding writes to complete so they don't see spurious errors due | |
253 | * to shutting down the allocator: | |
254 | * | |
255 | * If we are doing an emergency shutdown outstanding writes may | |
256 | * hang until we shutdown the allocator so we don't want to wait | |
257 | * on outstanding writes before shutting everything down - but | |
258 | * we do need to wait on them before returning and signalling | |
259 | * that going RO is complete: | |
260 | */ | |
261 | wait_event(bch_read_only_wait, | |
262 | test_bit(BCH_FS_WRITE_DISABLE_COMPLETE, &c->flags) || | |
263 | test_bit(BCH_FS_EMERGENCY_RO, &c->flags)); | |
264 | ||
265 | __bch2_fs_read_only(c); | |
266 | ||
267 | wait_event(bch_read_only_wait, | |
268 | test_bit(BCH_FS_WRITE_DISABLE_COMPLETE, &c->flags)); | |
269 | ||
270 | clear_bit(BCH_FS_WRITE_DISABLE_COMPLETE, &c->flags); | |
271 | ||
272 | if (!bch2_journal_error(&c->journal) && | |
273 | !test_bit(BCH_FS_ERROR, &c->flags) && | |
274 | !test_bit(BCH_FS_EMERGENCY_RO, &c->flags)) | |
275 | bch2_fs_mark_clean(c, true); | |
276 | ||
277 | if (c->state != BCH_FS_STOPPING) | |
278 | c->state = BCH_FS_RO; | |
279 | } | |
280 | ||
281 | static void bch2_fs_read_only_work(struct work_struct *work) | |
282 | { | |
283 | struct bch_fs *c = | |
284 | container_of(work, struct bch_fs, read_only_work); | |
285 | ||
286 | mutex_lock(&c->state_lock); | |
287 | bch2_fs_read_only(c); | |
288 | mutex_unlock(&c->state_lock); | |
289 | } | |
290 | ||
291 | static void bch2_fs_read_only_async(struct bch_fs *c) | |
292 | { | |
293 | queue_work(system_long_wq, &c->read_only_work); | |
294 | } | |
295 | ||
296 | bool bch2_fs_emergency_read_only(struct bch_fs *c) | |
297 | { | |
298 | bool ret = !test_and_set_bit(BCH_FS_EMERGENCY_RO, &c->flags); | |
299 | ||
300 | bch2_fs_read_only_async(c); | |
301 | bch2_journal_halt(&c->journal); | |
302 | ||
303 | wake_up(&bch_read_only_wait); | |
304 | return ret; | |
305 | } | |
306 | ||
307 | const char *bch2_fs_read_write(struct bch_fs *c) | |
308 | { | |
309 | struct bch_dev *ca; | |
310 | const char *err = NULL; | |
311 | unsigned i; | |
312 | ||
313 | if (c->state == BCH_FS_RW) | |
314 | return NULL; | |
315 | ||
316 | bch2_fs_mark_clean(c, false); | |
317 | ||
318 | for_each_rw_member(ca, c, i) | |
319 | bch2_dev_allocator_add(c, ca); | |
320 | bch2_recalc_capacity(c); | |
321 | ||
322 | err = "error starting allocator thread"; | |
323 | for_each_rw_member(ca, c, i) | |
324 | if (bch2_dev_allocator_start(ca)) { | |
325 | percpu_ref_put(&ca->io_ref); | |
326 | goto err; | |
327 | } | |
328 | ||
329 | err = "error starting btree GC thread"; | |
330 | if (bch2_gc_thread_start(c)) | |
331 | goto err; | |
332 | ||
333 | err = "error starting copygc thread"; | |
334 | for_each_rw_member(ca, c, i) | |
335 | if (bch2_copygc_start(c, ca)) { | |
336 | percpu_ref_put(&ca->io_ref); | |
337 | goto err; | |
338 | } | |
339 | ||
340 | err = "error starting rebalance thread"; | |
341 | if (bch2_rebalance_start(c)) | |
342 | goto err; | |
343 | ||
344 | schedule_delayed_work(&c->pd_controllers_update, 5 * HZ); | |
345 | ||
346 | if (c->state != BCH_FS_STARTING) | |
347 | percpu_ref_reinit(&c->writes); | |
348 | ||
349 | c->state = BCH_FS_RW; | |
350 | return NULL; | |
351 | err: | |
352 | __bch2_fs_read_only(c); | |
353 | return err; | |
354 | } | |
355 | ||
356 | /* Filesystem startup/shutdown: */ | |
357 | ||
358 | static void bch2_fs_free(struct bch_fs *c) | |
359 | { | |
360 | unsigned i; | |
361 | ||
362 | for (i = 0; i < BCH_TIME_STAT_NR; i++) | |
363 | bch2_time_stats_exit(&c->times[i]); | |
364 | ||
365 | bch2_fs_quota_exit(c); | |
366 | bch2_fs_fsio_exit(c); | |
367 | bch2_fs_encryption_exit(c); | |
368 | bch2_fs_io_exit(c); | |
369 | bch2_fs_btree_cache_exit(c); | |
370 | bch2_fs_journal_exit(&c->journal); | |
371 | bch2_io_clock_exit(&c->io_clock[WRITE]); | |
372 | bch2_io_clock_exit(&c->io_clock[READ]); | |
373 | bch2_fs_compress_exit(c); | |
374 | percpu_free_rwsem(&c->usage_lock); | |
375 | free_percpu(c->usage_percpu); | |
581edb63 | 376 | mempool_exit(&c->btree_iters_pool); |
1c6fdbd8 KO |
377 | mempool_exit(&c->btree_bounce_pool); |
378 | bioset_exit(&c->btree_bio); | |
379 | mempool_exit(&c->btree_interior_update_pool); | |
380 | mempool_exit(&c->btree_reserve_pool); | |
381 | mempool_exit(&c->fill_iter); | |
382 | percpu_ref_exit(&c->writes); | |
383 | kfree(rcu_dereference_protected(c->replicas, 1)); | |
384 | kfree(rcu_dereference_protected(c->disk_groups, 1)); | |
385 | ||
386 | if (c->copygc_wq) | |
387 | destroy_workqueue(c->copygc_wq); | |
388 | if (c->wq) | |
389 | destroy_workqueue(c->wq); | |
390 | ||
391 | free_pages((unsigned long) c->disk_sb.sb, | |
392 | c->disk_sb.page_order); | |
393 | kvpfree(c, sizeof(*c)); | |
394 | module_put(THIS_MODULE); | |
395 | } | |
396 | ||
397 | static void bch2_fs_release(struct kobject *kobj) | |
398 | { | |
399 | struct bch_fs *c = container_of(kobj, struct bch_fs, kobj); | |
400 | ||
401 | bch2_fs_free(c); | |
402 | } | |
403 | ||
404 | void bch2_fs_stop(struct bch_fs *c) | |
405 | { | |
406 | struct bch_dev *ca; | |
407 | unsigned i; | |
408 | ||
af1c6871 KO |
409 | bch_verbose(c, "shutting down"); |
410 | ||
1c6fdbd8 KO |
411 | for_each_member_device(ca, c, i) |
412 | if (ca->kobj.state_in_sysfs && | |
413 | ca->disk_sb.bdev) | |
414 | sysfs_remove_link(bdev_kobj(ca->disk_sb.bdev), "bcachefs"); | |
415 | ||
416 | if (c->kobj.state_in_sysfs) | |
417 | kobject_del(&c->kobj); | |
418 | ||
419 | bch2_fs_debug_exit(c); | |
420 | bch2_fs_chardev_exit(c); | |
421 | ||
422 | kobject_put(&c->time_stats); | |
423 | kobject_put(&c->opts_dir); | |
424 | kobject_put(&c->internal); | |
425 | ||
426 | mutex_lock(&bch_fs_list_lock); | |
427 | list_del(&c->list); | |
428 | mutex_unlock(&bch_fs_list_lock); | |
429 | ||
430 | closure_sync(&c->cl); | |
431 | closure_debug_destroy(&c->cl); | |
432 | ||
433 | mutex_lock(&c->state_lock); | |
434 | bch2_fs_read_only(c); | |
435 | mutex_unlock(&c->state_lock); | |
436 | ||
437 | /* btree prefetch might have kicked off reads in the background: */ | |
438 | bch2_btree_flush_all_reads(c); | |
439 | ||
440 | for_each_member_device(ca, c, i) | |
441 | cancel_work_sync(&ca->io_error_work); | |
442 | ||
443 | cancel_work_sync(&c->btree_write_error_work); | |
444 | cancel_delayed_work_sync(&c->pd_controllers_update); | |
445 | cancel_work_sync(&c->read_only_work); | |
446 | ||
447 | for (i = 0; i < c->sb.nr_devices; i++) | |
448 | if (c->devs[i]) | |
449 | bch2_dev_free(rcu_dereference_protected(c->devs[i], 1)); | |
450 | ||
af1c6871 KO |
451 | bch_verbose(c, "shutdown complete"); |
452 | ||
1c6fdbd8 KO |
453 | kobject_put(&c->kobj); |
454 | } | |
455 | ||
456 | static const char *bch2_fs_online(struct bch_fs *c) | |
457 | { | |
458 | struct bch_dev *ca; | |
459 | const char *err = NULL; | |
460 | unsigned i; | |
461 | int ret; | |
462 | ||
463 | lockdep_assert_held(&bch_fs_list_lock); | |
464 | ||
465 | if (!list_empty(&c->list)) | |
466 | return NULL; | |
467 | ||
468 | if (__bch2_uuid_to_fs(c->sb.uuid)) | |
469 | return "filesystem UUID already open"; | |
470 | ||
471 | ret = bch2_fs_chardev_init(c); | |
472 | if (ret) | |
473 | return "error creating character device"; | |
474 | ||
475 | bch2_fs_debug_init(c); | |
476 | ||
477 | if (kobject_add(&c->kobj, NULL, "%pU", c->sb.user_uuid.b) || | |
478 | kobject_add(&c->internal, &c->kobj, "internal") || | |
479 | kobject_add(&c->opts_dir, &c->kobj, "options") || | |
480 | kobject_add(&c->time_stats, &c->kobj, "time_stats") || | |
481 | bch2_opts_create_sysfs_files(&c->opts_dir)) | |
482 | return "error creating sysfs objects"; | |
483 | ||
484 | mutex_lock(&c->state_lock); | |
485 | ||
486 | err = "error creating sysfs objects"; | |
487 | __for_each_member_device(ca, c, i, NULL) | |
488 | if (bch2_dev_sysfs_online(c, ca)) | |
489 | goto err; | |
490 | ||
491 | list_add(&c->list, &bch_fs_list); | |
492 | err = NULL; | |
493 | err: | |
494 | mutex_unlock(&c->state_lock); | |
495 | return err; | |
496 | } | |
497 | ||
498 | static struct bch_fs *bch2_fs_alloc(struct bch_sb *sb, struct bch_opts opts) | |
499 | { | |
500 | struct bch_sb_field_members *mi; | |
501 | struct bch_fs *c; | |
502 | unsigned i, iter_size; | |
503 | const char *err; | |
504 | ||
505 | pr_verbose_init(opts, ""); | |
506 | ||
507 | c = kvpmalloc(sizeof(struct bch_fs), GFP_KERNEL|__GFP_ZERO); | |
508 | if (!c) | |
509 | goto out; | |
510 | ||
511 | __module_get(THIS_MODULE); | |
512 | ||
513 | c->minor = -1; | |
514 | c->disk_sb.fs_sb = true; | |
515 | ||
516 | mutex_init(&c->state_lock); | |
517 | mutex_init(&c->sb_lock); | |
518 | mutex_init(&c->replicas_gc_lock); | |
519 | mutex_init(&c->btree_root_lock); | |
520 | INIT_WORK(&c->read_only_work, bch2_fs_read_only_work); | |
521 | ||
522 | init_rwsem(&c->gc_lock); | |
523 | ||
524 | for (i = 0; i < BCH_TIME_STAT_NR; i++) | |
525 | bch2_time_stats_init(&c->times[i]); | |
526 | ||
b092dadd KO |
527 | bch2_fs_allocator_background_init(c); |
528 | bch2_fs_allocator_foreground_init(c); | |
1c6fdbd8 KO |
529 | bch2_fs_rebalance_init(c); |
530 | bch2_fs_quota_init(c); | |
531 | ||
532 | INIT_LIST_HEAD(&c->list); | |
533 | ||
534 | INIT_LIST_HEAD(&c->btree_interior_update_list); | |
535 | mutex_init(&c->btree_reserve_cache_lock); | |
536 | mutex_init(&c->btree_interior_update_lock); | |
537 | ||
538 | mutex_init(&c->bio_bounce_pages_lock); | |
539 | ||
540 | bio_list_init(&c->btree_write_error_list); | |
541 | spin_lock_init(&c->btree_write_error_lock); | |
542 | INIT_WORK(&c->btree_write_error_work, bch2_btree_write_error_work); | |
543 | ||
544 | INIT_LIST_HEAD(&c->fsck_errors); | |
545 | mutex_init(&c->fsck_error_lock); | |
546 | ||
547 | seqcount_init(&c->gc_pos_lock); | |
548 | ||
549 | c->copy_gc_enabled = 1; | |
550 | c->rebalance.enabled = 1; | |
551 | c->promote_whole_extents = true; | |
552 | ||
553 | c->journal.write_time = &c->times[BCH_TIME_journal_write]; | |
554 | c->journal.delay_time = &c->times[BCH_TIME_journal_delay]; | |
555 | c->journal.blocked_time = &c->times[BCH_TIME_journal_blocked]; | |
556 | c->journal.flush_seq_time = &c->times[BCH_TIME_journal_flush_seq]; | |
557 | ||
558 | bch2_fs_btree_cache_init_early(&c->btree_cache); | |
559 | ||
560 | mutex_lock(&c->sb_lock); | |
561 | ||
562 | if (bch2_sb_to_fs(c, sb)) { | |
563 | mutex_unlock(&c->sb_lock); | |
564 | goto err; | |
565 | } | |
566 | ||
567 | mutex_unlock(&c->sb_lock); | |
568 | ||
569 | scnprintf(c->name, sizeof(c->name), "%pU", &c->sb.user_uuid); | |
570 | ||
571 | c->opts = bch2_opts_default; | |
572 | bch2_opts_apply(&c->opts, bch2_opts_from_sb(sb)); | |
573 | bch2_opts_apply(&c->opts, opts); | |
574 | ||
575 | c->block_bits = ilog2(c->opts.block_size); | |
576 | c->btree_foreground_merge_threshold = BTREE_FOREGROUND_MERGE_THRESHOLD(c); | |
577 | ||
578 | c->opts.nochanges |= c->opts.noreplay; | |
579 | c->opts.read_only |= c->opts.nochanges; | |
580 | ||
581 | if (bch2_fs_init_fault("fs_alloc")) | |
582 | goto err; | |
583 | ||
584 | iter_size = sizeof(struct btree_node_iter_large) + | |
585 | (btree_blocks(c) + 1) * 2 * | |
586 | sizeof(struct btree_node_iter_set); | |
587 | ||
588 | if (!(c->wq = alloc_workqueue("bcachefs", | |
589 | WQ_FREEZABLE|WQ_MEM_RECLAIM|WQ_HIGHPRI, 1)) || | |
590 | !(c->copygc_wq = alloc_workqueue("bcache_copygc", | |
591 | WQ_FREEZABLE|WQ_MEM_RECLAIM|WQ_HIGHPRI, 1)) || | |
592 | percpu_ref_init(&c->writes, bch2_writes_disabled, 0, GFP_KERNEL) || | |
593 | mempool_init_kmalloc_pool(&c->btree_reserve_pool, 1, | |
594 | sizeof(struct btree_reserve)) || | |
595 | mempool_init_kmalloc_pool(&c->btree_interior_update_pool, 1, | |
596 | sizeof(struct btree_update)) || | |
597 | mempool_init_kmalloc_pool(&c->fill_iter, 1, iter_size) || | |
598 | bioset_init(&c->btree_bio, 1, | |
599 | max(offsetof(struct btree_read_bio, bio), | |
600 | offsetof(struct btree_write_bio, wbio.bio)), | |
601 | BIOSET_NEED_BVECS) || | |
602 | !(c->usage_percpu = alloc_percpu(struct bch_fs_usage)) || | |
603 | percpu_init_rwsem(&c->usage_lock) || | |
604 | mempool_init_kvpmalloc_pool(&c->btree_bounce_pool, 1, | |
605 | btree_bytes(c)) || | |
581edb63 KO |
606 | mempool_init_kmalloc_pool(&c->btree_iters_pool, 1, |
607 | sizeof(struct btree_iter) * BTREE_ITER_MAX) || | |
1c6fdbd8 KO |
608 | bch2_io_clock_init(&c->io_clock[READ]) || |
609 | bch2_io_clock_init(&c->io_clock[WRITE]) || | |
610 | bch2_fs_journal_init(&c->journal) || | |
611 | bch2_fs_btree_cache_init(c) || | |
612 | bch2_fs_io_init(c) || | |
613 | bch2_fs_encryption_init(c) || | |
614 | bch2_fs_compress_init(c) || | |
615 | bch2_fs_fsio_init(c)) | |
616 | goto err; | |
617 | ||
618 | mi = bch2_sb_get_members(c->disk_sb.sb); | |
619 | for (i = 0; i < c->sb.nr_devices; i++) | |
620 | if (bch2_dev_exists(c->disk_sb.sb, mi, i) && | |
621 | bch2_dev_alloc(c, i)) | |
622 | goto err; | |
623 | ||
624 | /* | |
625 | * Now that all allocations have succeeded, init various refcounty | |
626 | * things that let us shutdown: | |
627 | */ | |
628 | closure_init(&c->cl, NULL); | |
629 | ||
630 | c->kobj.kset = bcachefs_kset; | |
631 | kobject_init(&c->kobj, &bch2_fs_ktype); | |
632 | kobject_init(&c->internal, &bch2_fs_internal_ktype); | |
633 | kobject_init(&c->opts_dir, &bch2_fs_opts_dir_ktype); | |
634 | kobject_init(&c->time_stats, &bch2_fs_time_stats_ktype); | |
635 | ||
636 | mutex_lock(&bch_fs_list_lock); | |
637 | err = bch2_fs_online(c); | |
638 | mutex_unlock(&bch_fs_list_lock); | |
639 | if (err) { | |
640 | bch_err(c, "bch2_fs_online() error: %s", err); | |
641 | goto err; | |
642 | } | |
643 | out: | |
644 | pr_verbose_init(opts, "ret %i", c ? 0 : -ENOMEM); | |
645 | return c; | |
646 | err: | |
647 | bch2_fs_free(c); | |
648 | c = NULL; | |
649 | goto out; | |
650 | } | |
651 | ||
652 | const char *bch2_fs_start(struct bch_fs *c) | |
653 | { | |
654 | const char *err = "cannot allocate memory"; | |
655 | struct bch_sb_field_members *mi; | |
656 | struct bch_dev *ca; | |
657 | time64_t now = ktime_get_seconds(); | |
658 | unsigned i; | |
659 | int ret = -EINVAL; | |
660 | ||
661 | mutex_lock(&c->state_lock); | |
662 | ||
663 | BUG_ON(c->state != BCH_FS_STARTING); | |
664 | ||
665 | mutex_lock(&c->sb_lock); | |
666 | ||
667 | for_each_online_member(ca, c, i) | |
668 | bch2_sb_from_fs(c, ca); | |
669 | ||
670 | mi = bch2_sb_get_members(c->disk_sb.sb); | |
671 | for_each_online_member(ca, c, i) | |
672 | mi->members[ca->dev_idx].last_mount = cpu_to_le64(now); | |
673 | ||
674 | mutex_unlock(&c->sb_lock); | |
675 | ||
676 | for_each_rw_member(ca, c, i) | |
677 | bch2_dev_allocator_add(c, ca); | |
678 | bch2_recalc_capacity(c); | |
679 | ||
680 | ret = BCH_SB_INITIALIZED(c->disk_sb.sb) | |
681 | ? bch2_fs_recovery(c) | |
682 | : bch2_fs_initialize(c); | |
683 | if (ret) | |
684 | goto err; | |
685 | ||
686 | err = "dynamic fault"; | |
687 | if (bch2_fs_init_fault("fs_start")) | |
688 | goto err; | |
689 | ||
690 | if (c->opts.read_only) { | |
691 | bch2_fs_read_only(c); | |
692 | } else { | |
693 | err = bch2_fs_read_write(c); | |
694 | if (err) | |
695 | goto err; | |
696 | } | |
697 | ||
698 | set_bit(BCH_FS_STARTED, &c->flags); | |
699 | ||
700 | err = NULL; | |
701 | out: | |
702 | mutex_unlock(&c->state_lock); | |
703 | return err; | |
704 | err: | |
705 | switch (ret) { | |
706 | case BCH_FSCK_ERRORS_NOT_FIXED: | |
707 | bch_err(c, "filesystem contains errors: please report this to the developers"); | |
708 | pr_cont("mount with -o fix_errors to repair\n"); | |
709 | err = "fsck error"; | |
710 | break; | |
711 | case BCH_FSCK_REPAIR_UNIMPLEMENTED: | |
712 | bch_err(c, "filesystem contains errors: please report this to the developers"); | |
713 | pr_cont("repair unimplemented: inform the developers so that it can be added\n"); | |
714 | err = "fsck error"; | |
715 | break; | |
716 | case BCH_FSCK_REPAIR_IMPOSSIBLE: | |
717 | bch_err(c, "filesystem contains errors, but repair impossible"); | |
718 | err = "fsck error"; | |
719 | break; | |
720 | case BCH_FSCK_UNKNOWN_VERSION: | |
721 | err = "unknown metadata version";; | |
722 | break; | |
723 | case -ENOMEM: | |
724 | err = "cannot allocate memory"; | |
725 | break; | |
726 | case -EIO: | |
727 | err = "IO error"; | |
728 | break; | |
729 | } | |
730 | ||
731 | BUG_ON(!err); | |
732 | set_bit(BCH_FS_ERROR, &c->flags); | |
733 | goto out; | |
734 | } | |
735 | ||
736 | static const char *bch2_dev_may_add(struct bch_sb *sb, struct bch_fs *c) | |
737 | { | |
738 | struct bch_sb_field_members *sb_mi; | |
739 | ||
740 | sb_mi = bch2_sb_get_members(sb); | |
741 | if (!sb_mi) | |
742 | return "Invalid superblock: member info area missing"; | |
743 | ||
744 | if (le16_to_cpu(sb->block_size) != c->opts.block_size) | |
745 | return "mismatched block size"; | |
746 | ||
747 | if (le16_to_cpu(sb_mi->members[sb->dev_idx].bucket_size) < | |
748 | BCH_SB_BTREE_NODE_SIZE(c->disk_sb.sb)) | |
749 | return "new cache bucket size is too small"; | |
750 | ||
751 | return NULL; | |
752 | } | |
753 | ||
754 | static const char *bch2_dev_in_fs(struct bch_sb *fs, struct bch_sb *sb) | |
755 | { | |
756 | struct bch_sb *newest = | |
757 | le64_to_cpu(fs->seq) > le64_to_cpu(sb->seq) ? fs : sb; | |
758 | struct bch_sb_field_members *mi = bch2_sb_get_members(newest); | |
759 | ||
760 | if (!uuid_equal(&fs->uuid, &sb->uuid)) | |
761 | return "device not a member of filesystem"; | |
762 | ||
763 | if (!bch2_dev_exists(newest, mi, sb->dev_idx)) | |
764 | return "device has been removed"; | |
765 | ||
766 | if (fs->block_size != sb->block_size) | |
767 | return "mismatched block size"; | |
768 | ||
769 | return NULL; | |
770 | } | |
771 | ||
772 | /* Device startup/shutdown: */ | |
773 | ||
774 | static void bch2_dev_release(struct kobject *kobj) | |
775 | { | |
776 | struct bch_dev *ca = container_of(kobj, struct bch_dev, kobj); | |
777 | ||
778 | kfree(ca); | |
779 | } | |
780 | ||
781 | static void bch2_dev_free(struct bch_dev *ca) | |
782 | { | |
783 | cancel_work_sync(&ca->io_error_work); | |
784 | ||
785 | if (ca->kobj.state_in_sysfs && | |
786 | ca->disk_sb.bdev) | |
787 | sysfs_remove_link(bdev_kobj(ca->disk_sb.bdev), "bcachefs"); | |
788 | ||
789 | if (ca->kobj.state_in_sysfs) | |
790 | kobject_del(&ca->kobj); | |
791 | ||
792 | bch2_free_super(&ca->disk_sb); | |
793 | bch2_dev_journal_exit(ca); | |
794 | ||
795 | free_percpu(ca->io_done); | |
796 | bioset_exit(&ca->replica_set); | |
797 | bch2_dev_buckets_free(ca); | |
798 | ||
799 | bch2_time_stats_exit(&ca->io_latency[WRITE]); | |
800 | bch2_time_stats_exit(&ca->io_latency[READ]); | |
801 | ||
802 | percpu_ref_exit(&ca->io_ref); | |
803 | percpu_ref_exit(&ca->ref); | |
804 | kobject_put(&ca->kobj); | |
805 | } | |
806 | ||
807 | static void __bch2_dev_offline(struct bch_fs *c, struct bch_dev *ca) | |
808 | { | |
809 | ||
810 | lockdep_assert_held(&c->state_lock); | |
811 | ||
812 | if (percpu_ref_is_zero(&ca->io_ref)) | |
813 | return; | |
814 | ||
815 | __bch2_dev_read_only(c, ca); | |
816 | ||
817 | reinit_completion(&ca->io_ref_completion); | |
818 | percpu_ref_kill(&ca->io_ref); | |
819 | wait_for_completion(&ca->io_ref_completion); | |
820 | ||
821 | if (ca->kobj.state_in_sysfs) { | |
822 | sysfs_remove_link(bdev_kobj(ca->disk_sb.bdev), "bcachefs"); | |
823 | sysfs_remove_link(&ca->kobj, "block"); | |
824 | } | |
825 | ||
826 | bch2_free_super(&ca->disk_sb); | |
827 | bch2_dev_journal_exit(ca); | |
828 | } | |
829 | ||
830 | static void bch2_dev_ref_complete(struct percpu_ref *ref) | |
831 | { | |
832 | struct bch_dev *ca = container_of(ref, struct bch_dev, ref); | |
833 | ||
834 | complete(&ca->ref_completion); | |
835 | } | |
836 | ||
837 | static void bch2_dev_io_ref_complete(struct percpu_ref *ref) | |
838 | { | |
839 | struct bch_dev *ca = container_of(ref, struct bch_dev, io_ref); | |
840 | ||
841 | complete(&ca->io_ref_completion); | |
842 | } | |
843 | ||
844 | static int bch2_dev_sysfs_online(struct bch_fs *c, struct bch_dev *ca) | |
845 | { | |
846 | int ret; | |
847 | ||
848 | if (!c->kobj.state_in_sysfs) | |
849 | return 0; | |
850 | ||
851 | if (!ca->kobj.state_in_sysfs) { | |
852 | ret = kobject_add(&ca->kobj, &c->kobj, | |
853 | "dev-%u", ca->dev_idx); | |
854 | if (ret) | |
855 | return ret; | |
856 | } | |
857 | ||
858 | if (ca->disk_sb.bdev) { | |
859 | struct kobject *block = bdev_kobj(ca->disk_sb.bdev); | |
860 | ||
861 | ret = sysfs_create_link(block, &ca->kobj, "bcachefs"); | |
862 | if (ret) | |
863 | return ret; | |
864 | ||
865 | ret = sysfs_create_link(&ca->kobj, block, "block"); | |
866 | if (ret) | |
867 | return ret; | |
868 | } | |
869 | ||
870 | return 0; | |
871 | } | |
872 | ||
873 | static struct bch_dev *__bch2_dev_alloc(struct bch_fs *c, | |
874 | struct bch_member *member) | |
875 | { | |
876 | struct bch_dev *ca; | |
877 | ||
878 | ca = kzalloc(sizeof(*ca), GFP_KERNEL); | |
879 | if (!ca) | |
880 | return NULL; | |
881 | ||
882 | kobject_init(&ca->kobj, &bch2_dev_ktype); | |
883 | init_completion(&ca->ref_completion); | |
884 | init_completion(&ca->io_ref_completion); | |
885 | ||
886 | init_rwsem(&ca->bucket_lock); | |
887 | ||
888 | writepoint_init(&ca->copygc_write_point, BCH_DATA_USER); | |
889 | ||
890 | spin_lock_init(&ca->freelist_lock); | |
891 | bch2_dev_copygc_init(ca); | |
892 | ||
893 | INIT_WORK(&ca->io_error_work, bch2_io_error_work); | |
894 | ||
895 | bch2_time_stats_init(&ca->io_latency[READ]); | |
896 | bch2_time_stats_init(&ca->io_latency[WRITE]); | |
897 | ||
898 | ca->mi = bch2_mi_to_cpu(member); | |
899 | ca->uuid = member->uuid; | |
900 | ||
901 | if (opt_defined(c->opts, discard)) | |
902 | ca->mi.discard = opt_get(c->opts, discard); | |
903 | ||
904 | if (percpu_ref_init(&ca->ref, bch2_dev_ref_complete, | |
905 | 0, GFP_KERNEL) || | |
906 | percpu_ref_init(&ca->io_ref, bch2_dev_io_ref_complete, | |
907 | PERCPU_REF_INIT_DEAD, GFP_KERNEL) || | |
908 | bch2_dev_buckets_alloc(c, ca) || | |
909 | bioset_init(&ca->replica_set, 4, | |
910 | offsetof(struct bch_write_bio, bio), 0) || | |
911 | !(ca->io_done = alloc_percpu(*ca->io_done))) | |
912 | goto err; | |
913 | ||
914 | return ca; | |
915 | err: | |
916 | bch2_dev_free(ca); | |
917 | return NULL; | |
918 | } | |
919 | ||
920 | static void bch2_dev_attach(struct bch_fs *c, struct bch_dev *ca, | |
921 | unsigned dev_idx) | |
922 | { | |
923 | ca->dev_idx = dev_idx; | |
924 | __set_bit(ca->dev_idx, ca->self.d); | |
925 | scnprintf(ca->name, sizeof(ca->name), "dev-%u", dev_idx); | |
926 | ||
927 | ca->fs = c; | |
928 | rcu_assign_pointer(c->devs[ca->dev_idx], ca); | |
929 | ||
930 | if (bch2_dev_sysfs_online(c, ca)) | |
931 | pr_warn("error creating sysfs objects"); | |
932 | } | |
933 | ||
934 | static int bch2_dev_alloc(struct bch_fs *c, unsigned dev_idx) | |
935 | { | |
936 | struct bch_member *member = | |
937 | bch2_sb_get_members(c->disk_sb.sb)->members + dev_idx; | |
938 | struct bch_dev *ca = NULL; | |
939 | int ret = 0; | |
940 | ||
941 | pr_verbose_init(c->opts, ""); | |
942 | ||
943 | if (bch2_fs_init_fault("dev_alloc")) | |
944 | goto err; | |
945 | ||
946 | ca = __bch2_dev_alloc(c, member); | |
947 | if (!ca) | |
948 | goto err; | |
949 | ||
950 | bch2_dev_attach(c, ca, dev_idx); | |
951 | out: | |
952 | pr_verbose_init(c->opts, "ret %i", ret); | |
953 | return ret; | |
954 | err: | |
955 | if (ca) | |
956 | bch2_dev_free(ca); | |
957 | ret = -ENOMEM; | |
958 | goto out; | |
959 | } | |
960 | ||
961 | static int __bch2_dev_attach_bdev(struct bch_dev *ca, struct bch_sb_handle *sb) | |
962 | { | |
963 | unsigned ret; | |
964 | ||
965 | if (bch2_dev_is_online(ca)) { | |
966 | bch_err(ca, "already have device online in slot %u", | |
967 | sb->sb->dev_idx); | |
968 | return -EINVAL; | |
969 | } | |
970 | ||
971 | if (get_capacity(sb->bdev->bd_disk) < | |
972 | ca->mi.bucket_size * ca->mi.nbuckets) { | |
973 | bch_err(ca, "cannot online: device too small"); | |
974 | return -EINVAL; | |
975 | } | |
976 | ||
977 | BUG_ON(!percpu_ref_is_zero(&ca->io_ref)); | |
978 | ||
979 | if (get_capacity(sb->bdev->bd_disk) < | |
980 | ca->mi.bucket_size * ca->mi.nbuckets) { | |
981 | bch_err(ca, "device too small"); | |
982 | return -EINVAL; | |
983 | } | |
984 | ||
985 | ret = bch2_dev_journal_init(ca, sb->sb); | |
986 | if (ret) | |
987 | return ret; | |
988 | ||
989 | /* Commit: */ | |
990 | ca->disk_sb = *sb; | |
991 | memset(sb, 0, sizeof(*sb)); | |
992 | ||
1c6fdbd8 KO |
993 | percpu_ref_reinit(&ca->io_ref); |
994 | ||
995 | return 0; | |
996 | } | |
997 | ||
998 | static int bch2_dev_attach_bdev(struct bch_fs *c, struct bch_sb_handle *sb) | |
999 | { | |
1000 | struct bch_dev *ca; | |
1001 | int ret; | |
1002 | ||
1003 | lockdep_assert_held(&c->state_lock); | |
1004 | ||
1005 | if (le64_to_cpu(sb->sb->seq) > | |
1006 | le64_to_cpu(c->disk_sb.sb->seq)) | |
1007 | bch2_sb_to_fs(c, sb->sb); | |
1008 | ||
1009 | BUG_ON(sb->sb->dev_idx >= c->sb.nr_devices || | |
1010 | !c->devs[sb->sb->dev_idx]); | |
1011 | ||
1012 | ca = bch_dev_locked(c, sb->sb->dev_idx); | |
1013 | ||
1014 | ret = __bch2_dev_attach_bdev(ca, sb); | |
1015 | if (ret) | |
1016 | return ret; | |
1017 | ||
6eac2c2e KO |
1018 | mutex_lock(&c->sb_lock); |
1019 | bch2_mark_dev_superblock(ca->fs, ca, | |
1020 | BCH_BUCKET_MARK_MAY_MAKE_UNAVAILABLE); | |
1021 | mutex_unlock(&c->sb_lock); | |
1022 | ||
1c6fdbd8 KO |
1023 | bch2_dev_sysfs_online(c, ca); |
1024 | ||
1025 | if (c->sb.nr_devices == 1) | |
1026 | snprintf(c->name, sizeof(c->name), "%pg", ca->disk_sb.bdev); | |
1027 | snprintf(ca->name, sizeof(ca->name), "%pg", ca->disk_sb.bdev); | |
1028 | ||
1029 | rebalance_wakeup(c); | |
1030 | return 0; | |
1031 | } | |
1032 | ||
1033 | /* Device management: */ | |
1034 | ||
1035 | /* | |
1036 | * Note: this function is also used by the error paths - when a particular | |
1037 | * device sees an error, we call it to determine whether we can just set the | |
1038 | * device RO, or - if this function returns false - we'll set the whole | |
1039 | * filesystem RO: | |
1040 | * | |
1041 | * XXX: maybe we should be more explicit about whether we're changing state | |
1042 | * because we got an error or what have you? | |
1043 | */ | |
1044 | bool bch2_dev_state_allowed(struct bch_fs *c, struct bch_dev *ca, | |
1045 | enum bch_member_state new_state, int flags) | |
1046 | { | |
1047 | struct bch_devs_mask new_online_devs; | |
1048 | struct replicas_status s; | |
1049 | struct bch_dev *ca2; | |
1050 | int i, nr_rw = 0, required; | |
1051 | ||
1052 | lockdep_assert_held(&c->state_lock); | |
1053 | ||
1054 | switch (new_state) { | |
1055 | case BCH_MEMBER_STATE_RW: | |
1056 | return true; | |
1057 | case BCH_MEMBER_STATE_RO: | |
1058 | if (ca->mi.state != BCH_MEMBER_STATE_RW) | |
1059 | return true; | |
1060 | ||
1061 | /* do we have enough devices to write to? */ | |
1062 | for_each_member_device(ca2, c, i) | |
1063 | if (ca2 != ca) | |
1064 | nr_rw += ca2->mi.state == BCH_MEMBER_STATE_RW; | |
1065 | ||
1066 | required = max(!(flags & BCH_FORCE_IF_METADATA_DEGRADED) | |
1067 | ? c->opts.metadata_replicas | |
1068 | : c->opts.metadata_replicas_required, | |
1069 | !(flags & BCH_FORCE_IF_DATA_DEGRADED) | |
1070 | ? c->opts.data_replicas | |
1071 | : c->opts.data_replicas_required); | |
1072 | ||
1073 | return nr_rw >= required; | |
1074 | case BCH_MEMBER_STATE_FAILED: | |
1075 | case BCH_MEMBER_STATE_SPARE: | |
1076 | if (ca->mi.state != BCH_MEMBER_STATE_RW && | |
1077 | ca->mi.state != BCH_MEMBER_STATE_RO) | |
1078 | return true; | |
1079 | ||
1080 | /* do we have enough devices to read from? */ | |
1081 | new_online_devs = bch2_online_devs(c); | |
1082 | __clear_bit(ca->dev_idx, new_online_devs.d); | |
1083 | ||
1084 | s = __bch2_replicas_status(c, new_online_devs); | |
1085 | ||
1086 | return bch2_have_enough_devs(s, flags); | |
1087 | default: | |
1088 | BUG(); | |
1089 | } | |
1090 | } | |
1091 | ||
1092 | static bool bch2_fs_may_start(struct bch_fs *c) | |
1093 | { | |
1094 | struct replicas_status s; | |
1095 | struct bch_sb_field_members *mi; | |
1096 | struct bch_dev *ca; | |
1097 | unsigned i, flags = c->opts.degraded | |
1098 | ? BCH_FORCE_IF_DEGRADED | |
1099 | : 0; | |
1100 | ||
1101 | if (!c->opts.degraded) { | |
1102 | mutex_lock(&c->sb_lock); | |
1103 | mi = bch2_sb_get_members(c->disk_sb.sb); | |
1104 | ||
1105 | for (i = 0; i < c->disk_sb.sb->nr_devices; i++) { | |
1106 | if (!bch2_dev_exists(c->disk_sb.sb, mi, i)) | |
1107 | continue; | |
1108 | ||
1109 | ca = bch_dev_locked(c, i); | |
1110 | ||
1111 | if (!bch2_dev_is_online(ca) && | |
1112 | (ca->mi.state == BCH_MEMBER_STATE_RW || | |
1113 | ca->mi.state == BCH_MEMBER_STATE_RO)) { | |
1114 | mutex_unlock(&c->sb_lock); | |
1115 | return false; | |
1116 | } | |
1117 | } | |
1118 | mutex_unlock(&c->sb_lock); | |
1119 | } | |
1120 | ||
1121 | s = bch2_replicas_status(c); | |
1122 | ||
1123 | return bch2_have_enough_devs(s, flags); | |
1124 | } | |
1125 | ||
1126 | static void __bch2_dev_read_only(struct bch_fs *c, struct bch_dev *ca) | |
1127 | { | |
1128 | bch2_copygc_stop(ca); | |
1129 | ||
1130 | /* | |
1131 | * The allocator thread itself allocates btree nodes, so stop it first: | |
1132 | */ | |
1133 | bch2_dev_allocator_stop(ca); | |
1134 | bch2_dev_allocator_remove(c, ca); | |
1135 | bch2_dev_journal_stop(&c->journal, ca); | |
1136 | } | |
1137 | ||
1138 | static const char *__bch2_dev_read_write(struct bch_fs *c, struct bch_dev *ca) | |
1139 | { | |
1140 | lockdep_assert_held(&c->state_lock); | |
1141 | ||
1142 | BUG_ON(ca->mi.state != BCH_MEMBER_STATE_RW); | |
1143 | ||
1144 | bch2_dev_allocator_add(c, ca); | |
1145 | bch2_recalc_capacity(c); | |
1146 | ||
1147 | if (bch2_dev_allocator_start(ca)) | |
1148 | return "error starting allocator thread"; | |
1149 | ||
1150 | if (bch2_copygc_start(c, ca)) | |
1151 | return "error starting copygc thread"; | |
1152 | ||
1153 | return NULL; | |
1154 | } | |
1155 | ||
1156 | int __bch2_dev_set_state(struct bch_fs *c, struct bch_dev *ca, | |
1157 | enum bch_member_state new_state, int flags) | |
1158 | { | |
1159 | struct bch_sb_field_members *mi; | |
1160 | int ret = 0; | |
1161 | ||
1162 | if (ca->mi.state == new_state) | |
1163 | return 0; | |
1164 | ||
1165 | if (!bch2_dev_state_allowed(c, ca, new_state, flags)) | |
1166 | return -EINVAL; | |
1167 | ||
1168 | if (new_state != BCH_MEMBER_STATE_RW) | |
1169 | __bch2_dev_read_only(c, ca); | |
1170 | ||
1171 | bch_notice(ca, "%s", bch2_dev_state[new_state]); | |
1172 | ||
1173 | mutex_lock(&c->sb_lock); | |
1174 | mi = bch2_sb_get_members(c->disk_sb.sb); | |
1175 | SET_BCH_MEMBER_STATE(&mi->members[ca->dev_idx], new_state); | |
1176 | bch2_write_super(c); | |
1177 | mutex_unlock(&c->sb_lock); | |
1178 | ||
1179 | if (new_state == BCH_MEMBER_STATE_RW && | |
1180 | __bch2_dev_read_write(c, ca)) | |
1181 | ret = -ENOMEM; | |
1182 | ||
1183 | rebalance_wakeup(c); | |
1184 | ||
1185 | return ret; | |
1186 | } | |
1187 | ||
1188 | int bch2_dev_set_state(struct bch_fs *c, struct bch_dev *ca, | |
1189 | enum bch_member_state new_state, int flags) | |
1190 | { | |
1191 | int ret; | |
1192 | ||
1193 | mutex_lock(&c->state_lock); | |
1194 | ret = __bch2_dev_set_state(c, ca, new_state, flags); | |
1195 | mutex_unlock(&c->state_lock); | |
1196 | ||
1197 | return ret; | |
1198 | } | |
1199 | ||
1200 | /* Device add/removal: */ | |
1201 | ||
1202 | int bch2_dev_remove(struct bch_fs *c, struct bch_dev *ca, int flags) | |
1203 | { | |
1204 | struct bch_sb_field_members *mi; | |
1205 | unsigned dev_idx = ca->dev_idx, data; | |
1206 | int ret = -EINVAL; | |
1207 | ||
1208 | mutex_lock(&c->state_lock); | |
1209 | ||
1210 | percpu_ref_put(&ca->ref); /* XXX */ | |
1211 | ||
1212 | if (!bch2_dev_state_allowed(c, ca, BCH_MEMBER_STATE_FAILED, flags)) { | |
1213 | bch_err(ca, "Cannot remove without losing data"); | |
1214 | goto err; | |
1215 | } | |
1216 | ||
1217 | __bch2_dev_read_only(c, ca); | |
1218 | ||
1219 | /* | |
1220 | * XXX: verify that dev_idx is really not in use anymore, anywhere | |
1221 | * | |
1222 | * flag_data_bad() does not check btree pointers | |
1223 | */ | |
1224 | ret = bch2_dev_data_drop(c, ca->dev_idx, flags); | |
1225 | if (ret) { | |
1226 | bch_err(ca, "Remove failed: error %i dropping data", ret); | |
1227 | goto err; | |
1228 | } | |
1229 | ||
1230 | ret = bch2_journal_flush_device_pins(&c->journal, ca->dev_idx); | |
1231 | if (ret) { | |
1232 | bch_err(ca, "Remove failed: error %i flushing journal", ret); | |
1233 | goto err; | |
1234 | } | |
1235 | ||
1236 | data = bch2_dev_has_data(c, ca); | |
1237 | if (data) { | |
1238 | char data_has_str[100]; | |
1239 | bch2_scnprint_flag_list(data_has_str, | |
1240 | sizeof(data_has_str), | |
1241 | bch2_data_types, | |
1242 | data); | |
1243 | bch_err(ca, "Remove failed, still has data (%s)", data_has_str); | |
1244 | ret = -EBUSY; | |
1245 | goto err; | |
1246 | } | |
1247 | ||
1248 | ret = bch2_btree_delete_range(c, BTREE_ID_ALLOC, | |
1249 | POS(ca->dev_idx, 0), | |
1250 | POS(ca->dev_idx + 1, 0), | |
fc3268c1 | 1251 | NULL); |
1c6fdbd8 KO |
1252 | if (ret) { |
1253 | bch_err(ca, "Remove failed, error deleting alloc info"); | |
1254 | goto err; | |
1255 | } | |
1256 | ||
1257 | /* | |
1258 | * must flush all existing journal entries, they might have | |
1259 | * (overwritten) keys that point to the device we're removing: | |
1260 | */ | |
1261 | bch2_journal_flush_all_pins(&c->journal); | |
1262 | ret = bch2_journal_error(&c->journal); | |
1263 | if (ret) { | |
1264 | bch_err(ca, "Remove failed, journal error"); | |
1265 | goto err; | |
1266 | } | |
1267 | ||
1268 | __bch2_dev_offline(c, ca); | |
1269 | ||
1270 | mutex_lock(&c->sb_lock); | |
1271 | rcu_assign_pointer(c->devs[ca->dev_idx], NULL); | |
1272 | mutex_unlock(&c->sb_lock); | |
1273 | ||
1274 | percpu_ref_kill(&ca->ref); | |
1275 | wait_for_completion(&ca->ref_completion); | |
1276 | ||
1277 | bch2_dev_free(ca); | |
1278 | ||
1279 | /* | |
1280 | * Free this device's slot in the bch_member array - all pointers to | |
1281 | * this device must be gone: | |
1282 | */ | |
1283 | mutex_lock(&c->sb_lock); | |
1284 | mi = bch2_sb_get_members(c->disk_sb.sb); | |
1285 | memset(&mi->members[dev_idx].uuid, 0, sizeof(mi->members[dev_idx].uuid)); | |
1286 | ||
1287 | bch2_write_super(c); | |
1288 | ||
1289 | mutex_unlock(&c->sb_lock); | |
1290 | mutex_unlock(&c->state_lock); | |
1291 | return 0; | |
1292 | err: | |
1293 | if (ca->mi.state == BCH_MEMBER_STATE_RW) | |
1294 | __bch2_dev_read_write(c, ca); | |
1295 | mutex_unlock(&c->state_lock); | |
1296 | return ret; | |
1297 | } | |
1298 | ||
6eac2c2e KO |
1299 | static void dev_usage_clear(struct bch_dev *ca) |
1300 | { | |
1301 | struct bucket_array *buckets; | |
1302 | int cpu; | |
1303 | ||
1304 | for_each_possible_cpu(cpu) { | |
1305 | struct bch_dev_usage *p = | |
1306 | per_cpu_ptr(ca->usage_percpu, cpu); | |
1307 | memset(p, 0, sizeof(*p)); | |
1308 | } | |
1309 | ||
1310 | down_read(&ca->bucket_lock); | |
1311 | buckets = bucket_array(ca); | |
1312 | ||
1313 | memset(buckets->b, 0, sizeof(buckets->b[0]) * buckets->nbuckets); | |
1314 | up_read(&ca->bucket_lock); | |
1315 | } | |
1316 | ||
1c6fdbd8 KO |
1317 | /* Add new device to running filesystem: */ |
1318 | int bch2_dev_add(struct bch_fs *c, const char *path) | |
1319 | { | |
1320 | struct bch_opts opts = bch2_opts_empty(); | |
1321 | struct bch_sb_handle sb; | |
1322 | const char *err; | |
1323 | struct bch_dev *ca = NULL; | |
1324 | struct bch_sb_field_members *mi; | |
1325 | struct bch_member dev_mi; | |
1326 | unsigned dev_idx, nr_devices, u64s; | |
1327 | int ret; | |
1328 | ||
1329 | ret = bch2_read_super(path, &opts, &sb); | |
1330 | if (ret) | |
1331 | return ret; | |
1332 | ||
1333 | err = bch2_sb_validate(&sb); | |
1334 | if (err) | |
1335 | return -EINVAL; | |
1336 | ||
1337 | dev_mi = bch2_sb_get_members(sb.sb)->members[sb.sb->dev_idx]; | |
1338 | ||
1339 | err = bch2_dev_may_add(sb.sb, c); | |
1340 | if (err) | |
1341 | return -EINVAL; | |
1342 | ||
1343 | ca = __bch2_dev_alloc(c, &dev_mi); | |
1344 | if (!ca) { | |
1345 | bch2_free_super(&sb); | |
1346 | return -ENOMEM; | |
1347 | } | |
1348 | ||
1349 | ret = __bch2_dev_attach_bdev(ca, &sb); | |
1350 | if (ret) { | |
1351 | bch2_dev_free(ca); | |
1352 | return ret; | |
1353 | } | |
1354 | ||
6eac2c2e KO |
1355 | /* |
1356 | * We want to allocate journal on the new device before adding the new | |
1357 | * device to the filesystem because allocating after we attach requires | |
1358 | * spinning up the allocator thread, and the allocator thread requires | |
1359 | * doing btree writes, which if the existing devices are RO isn't going | |
1360 | * to work | |
1361 | * | |
1362 | * So we have to mark where the superblocks are, but marking allocated | |
1363 | * data normally updates the filesystem usage too, so we have to mark, | |
1364 | * allocate the journal, reset all the marks, then remark after we | |
1365 | * attach... | |
1366 | */ | |
1367 | bch2_mark_dev_superblock(ca->fs, ca, | |
1368 | BCH_BUCKET_MARK_MAY_MAKE_UNAVAILABLE); | |
1369 | ||
1c6fdbd8 KO |
1370 | err = "journal alloc failed"; |
1371 | ret = bch2_dev_journal_alloc(ca); | |
1372 | if (ret) | |
1373 | goto err; | |
1374 | ||
6eac2c2e KO |
1375 | dev_usage_clear(ca); |
1376 | ||
1c6fdbd8 KO |
1377 | mutex_lock(&c->state_lock); |
1378 | mutex_lock(&c->sb_lock); | |
1379 | ||
1380 | err = "insufficient space in new superblock"; | |
1381 | ret = bch2_sb_from_fs(c, ca); | |
1382 | if (ret) | |
1383 | goto err_unlock; | |
1384 | ||
1385 | mi = bch2_sb_get_members(ca->disk_sb.sb); | |
1386 | ||
1387 | if (!bch2_sb_resize_members(&ca->disk_sb, | |
1388 | le32_to_cpu(mi->field.u64s) + | |
1389 | sizeof(dev_mi) / sizeof(u64))) { | |
1390 | ret = -ENOSPC; | |
1391 | goto err_unlock; | |
1392 | } | |
1393 | ||
1394 | if (dynamic_fault("bcachefs:add:no_slot")) | |
1395 | goto no_slot; | |
1396 | ||
1397 | mi = bch2_sb_get_members(c->disk_sb.sb); | |
1398 | for (dev_idx = 0; dev_idx < BCH_SB_MEMBERS_MAX; dev_idx++) | |
1399 | if (!bch2_dev_exists(c->disk_sb.sb, mi, dev_idx)) | |
1400 | goto have_slot; | |
1401 | no_slot: | |
1402 | err = "no slots available in superblock"; | |
1403 | ret = -ENOSPC; | |
1404 | goto err_unlock; | |
1405 | ||
1406 | have_slot: | |
1407 | nr_devices = max_t(unsigned, dev_idx + 1, c->sb.nr_devices); | |
1408 | u64s = (sizeof(struct bch_sb_field_members) + | |
1409 | sizeof(struct bch_member) * nr_devices) / sizeof(u64); | |
1410 | ||
1411 | err = "no space in superblock for member info"; | |
1412 | ret = -ENOSPC; | |
1413 | ||
1414 | mi = bch2_sb_resize_members(&c->disk_sb, u64s); | |
1415 | if (!mi) | |
1416 | goto err_unlock; | |
1417 | ||
1418 | /* success: */ | |
1419 | ||
1420 | mi->members[dev_idx] = dev_mi; | |
1421 | mi->members[dev_idx].last_mount = cpu_to_le64(ktime_get_seconds()); | |
1422 | c->disk_sb.sb->nr_devices = nr_devices; | |
1423 | ||
1424 | ca->disk_sb.sb->dev_idx = dev_idx; | |
1425 | bch2_dev_attach(c, ca, dev_idx); | |
1426 | ||
6eac2c2e KO |
1427 | bch2_mark_dev_superblock(c, ca, |
1428 | BCH_BUCKET_MARK_MAY_MAKE_UNAVAILABLE); | |
1429 | ||
1c6fdbd8 KO |
1430 | bch2_write_super(c); |
1431 | mutex_unlock(&c->sb_lock); | |
1432 | ||
1433 | if (ca->mi.state == BCH_MEMBER_STATE_RW) { | |
1434 | err = __bch2_dev_read_write(c, ca); | |
1435 | if (err) | |
1436 | goto err_late; | |
1437 | } | |
1438 | ||
1439 | mutex_unlock(&c->state_lock); | |
1440 | return 0; | |
1441 | ||
1442 | err_unlock: | |
1443 | mutex_unlock(&c->sb_lock); | |
1444 | mutex_unlock(&c->state_lock); | |
1445 | err: | |
1446 | if (ca) | |
1447 | bch2_dev_free(ca); | |
1448 | bch2_free_super(&sb); | |
1449 | bch_err(c, "Unable to add device: %s", err); | |
1450 | return ret; | |
1451 | err_late: | |
1452 | bch_err(c, "Error going rw after adding device: %s", err); | |
1453 | return -EINVAL; | |
1454 | } | |
1455 | ||
1456 | /* Hot add existing device to running filesystem: */ | |
1457 | int bch2_dev_online(struct bch_fs *c, const char *path) | |
1458 | { | |
1459 | struct bch_opts opts = bch2_opts_empty(); | |
1460 | struct bch_sb_handle sb = { NULL }; | |
1461 | struct bch_sb_field_members *mi; | |
1462 | struct bch_dev *ca; | |
1463 | unsigned dev_idx; | |
1464 | const char *err; | |
1465 | int ret; | |
1466 | ||
1467 | mutex_lock(&c->state_lock); | |
1468 | ||
1469 | ret = bch2_read_super(path, &opts, &sb); | |
1470 | if (ret) { | |
1471 | mutex_unlock(&c->state_lock); | |
1472 | return ret; | |
1473 | } | |
1474 | ||
1475 | dev_idx = sb.sb->dev_idx; | |
1476 | ||
1477 | err = bch2_dev_in_fs(c->disk_sb.sb, sb.sb); | |
1478 | if (err) | |
1479 | goto err; | |
1480 | ||
1481 | if (bch2_dev_attach_bdev(c, &sb)) { | |
1482 | err = "bch2_dev_attach_bdev() error"; | |
1483 | goto err; | |
1484 | } | |
1485 | ||
1486 | ca = bch_dev_locked(c, dev_idx); | |
1487 | if (ca->mi.state == BCH_MEMBER_STATE_RW) { | |
1488 | err = __bch2_dev_read_write(c, ca); | |
1489 | if (err) | |
1490 | goto err; | |
1491 | } | |
1492 | ||
1493 | mutex_lock(&c->sb_lock); | |
1494 | mi = bch2_sb_get_members(c->disk_sb.sb); | |
1495 | ||
1496 | mi->members[ca->dev_idx].last_mount = | |
1497 | cpu_to_le64(ktime_get_seconds()); | |
1498 | ||
1499 | bch2_write_super(c); | |
1500 | mutex_unlock(&c->sb_lock); | |
1501 | ||
1502 | mutex_unlock(&c->state_lock); | |
1503 | return 0; | |
1504 | err: | |
1505 | mutex_unlock(&c->state_lock); | |
1506 | bch2_free_super(&sb); | |
1507 | bch_err(c, "error bringing %s online: %s", path, err); | |
1508 | return -EINVAL; | |
1509 | } | |
1510 | ||
1511 | int bch2_dev_offline(struct bch_fs *c, struct bch_dev *ca, int flags) | |
1512 | { | |
1513 | mutex_lock(&c->state_lock); | |
1514 | ||
1515 | if (!bch2_dev_is_online(ca)) { | |
1516 | bch_err(ca, "Already offline"); | |
1517 | mutex_unlock(&c->state_lock); | |
1518 | return 0; | |
1519 | } | |
1520 | ||
1521 | if (!bch2_dev_state_allowed(c, ca, BCH_MEMBER_STATE_FAILED, flags)) { | |
1522 | bch_err(ca, "Cannot offline required disk"); | |
1523 | mutex_unlock(&c->state_lock); | |
1524 | return -EINVAL; | |
1525 | } | |
1526 | ||
1527 | __bch2_dev_offline(c, ca); | |
1528 | ||
1529 | mutex_unlock(&c->state_lock); | |
1530 | return 0; | |
1531 | } | |
1532 | ||
1533 | int bch2_dev_resize(struct bch_fs *c, struct bch_dev *ca, u64 nbuckets) | |
1534 | { | |
1535 | struct bch_member *mi; | |
1536 | int ret = 0; | |
1537 | ||
1538 | mutex_lock(&c->state_lock); | |
1539 | ||
1540 | if (nbuckets < ca->mi.nbuckets) { | |
1541 | bch_err(ca, "Cannot shrink yet"); | |
1542 | ret = -EINVAL; | |
1543 | goto err; | |
1544 | } | |
1545 | ||
1546 | if (bch2_dev_is_online(ca) && | |
1547 | get_capacity(ca->disk_sb.bdev->bd_disk) < | |
1548 | ca->mi.bucket_size * nbuckets) { | |
1549 | bch_err(ca, "New size larger than device"); | |
1550 | ret = -EINVAL; | |
1551 | goto err; | |
1552 | } | |
1553 | ||
1554 | ret = bch2_dev_buckets_resize(c, ca, nbuckets); | |
1555 | if (ret) { | |
1556 | bch_err(ca, "Resize error: %i", ret); | |
1557 | goto err; | |
1558 | } | |
1559 | ||
1560 | mutex_lock(&c->sb_lock); | |
1561 | mi = &bch2_sb_get_members(c->disk_sb.sb)->members[ca->dev_idx]; | |
1562 | mi->nbuckets = cpu_to_le64(nbuckets); | |
1563 | ||
1564 | bch2_write_super(c); | |
1565 | mutex_unlock(&c->sb_lock); | |
1566 | ||
1567 | bch2_recalc_capacity(c); | |
1568 | err: | |
1569 | mutex_unlock(&c->state_lock); | |
1570 | return ret; | |
1571 | } | |
1572 | ||
1573 | /* return with ref on ca->ref: */ | |
1574 | struct bch_dev *bch2_dev_lookup(struct bch_fs *c, const char *path) | |
1575 | { | |
1576 | ||
1577 | struct bch_dev *ca; | |
1578 | dev_t dev; | |
1579 | unsigned i; | |
1580 | int ret; | |
1581 | ||
1582 | ret = lookup_bdev(path, &dev); | |
1583 | if (ret) | |
1584 | return ERR_PTR(ret); | |
1585 | ||
1586 | for_each_member_device(ca, c, i) | |
1587 | if (ca->disk_sb.bdev->bd_dev == dev) | |
1588 | goto found; | |
1589 | ||
1590 | ca = ERR_PTR(-ENOENT); | |
1591 | found: | |
1592 | return ca; | |
1593 | } | |
1594 | ||
1595 | /* Filesystem open: */ | |
1596 | ||
1597 | struct bch_fs *bch2_fs_open(char * const *devices, unsigned nr_devices, | |
1598 | struct bch_opts opts) | |
1599 | { | |
1600 | struct bch_sb_handle *sb = NULL; | |
1601 | struct bch_fs *c = NULL; | |
1602 | unsigned i, best_sb = 0; | |
1603 | const char *err; | |
1604 | int ret = -ENOMEM; | |
1605 | ||
1606 | pr_verbose_init(opts, ""); | |
1607 | ||
1608 | if (!nr_devices) { | |
1609 | c = ERR_PTR(-EINVAL); | |
1610 | goto out2; | |
1611 | } | |
1612 | ||
1613 | if (!try_module_get(THIS_MODULE)) { | |
1614 | c = ERR_PTR(-ENODEV); | |
1615 | goto out2; | |
1616 | } | |
1617 | ||
1618 | sb = kcalloc(nr_devices, sizeof(*sb), GFP_KERNEL); | |
1619 | if (!sb) | |
1620 | goto err; | |
1621 | ||
1622 | for (i = 0; i < nr_devices; i++) { | |
1623 | ret = bch2_read_super(devices[i], &opts, &sb[i]); | |
1624 | if (ret) | |
1625 | goto err; | |
1626 | ||
1627 | err = bch2_sb_validate(&sb[i]); | |
1628 | if (err) | |
1629 | goto err_print; | |
1630 | } | |
1631 | ||
1632 | for (i = 1; i < nr_devices; i++) | |
1633 | if (le64_to_cpu(sb[i].sb->seq) > | |
1634 | le64_to_cpu(sb[best_sb].sb->seq)) | |
1635 | best_sb = i; | |
1636 | ||
1637 | for (i = 0; i < nr_devices; i++) { | |
1638 | err = bch2_dev_in_fs(sb[best_sb].sb, sb[i].sb); | |
1639 | if (err) | |
1640 | goto err_print; | |
1641 | } | |
1642 | ||
1643 | ret = -ENOMEM; | |
1644 | c = bch2_fs_alloc(sb[best_sb].sb, opts); | |
1645 | if (!c) | |
1646 | goto err; | |
1647 | ||
1648 | err = "bch2_dev_online() error"; | |
1649 | mutex_lock(&c->state_lock); | |
1650 | for (i = 0; i < nr_devices; i++) | |
1651 | if (bch2_dev_attach_bdev(c, &sb[i])) { | |
1652 | mutex_unlock(&c->state_lock); | |
1653 | goto err_print; | |
1654 | } | |
1655 | mutex_unlock(&c->state_lock); | |
1656 | ||
1657 | err = "insufficient devices"; | |
1658 | if (!bch2_fs_may_start(c)) | |
1659 | goto err_print; | |
1660 | ||
1661 | if (!c->opts.nostart) { | |
1662 | err = bch2_fs_start(c); | |
1663 | if (err) | |
1664 | goto err_print; | |
1665 | } | |
1666 | out: | |
1667 | kfree(sb); | |
1668 | module_put(THIS_MODULE); | |
1669 | out2: | |
1670 | pr_verbose_init(opts, "ret %i", PTR_ERR_OR_ZERO(c)); | |
1671 | return c; | |
1672 | err_print: | |
1673 | pr_err("bch_fs_open err opening %s: %s", | |
1674 | devices[0], err); | |
1675 | ret = -EINVAL; | |
1676 | err: | |
1677 | if (c) | |
1678 | bch2_fs_stop(c); | |
1679 | for (i = 0; i < nr_devices; i++) | |
1680 | bch2_free_super(&sb[i]); | |
1681 | c = ERR_PTR(ret); | |
1682 | goto out; | |
1683 | } | |
1684 | ||
1685 | static const char *__bch2_fs_open_incremental(struct bch_sb_handle *sb, | |
1686 | struct bch_opts opts) | |
1687 | { | |
1688 | const char *err; | |
1689 | struct bch_fs *c; | |
1690 | bool allocated_fs = false; | |
1691 | ||
1692 | err = bch2_sb_validate(sb); | |
1693 | if (err) | |
1694 | return err; | |
1695 | ||
1696 | mutex_lock(&bch_fs_list_lock); | |
1697 | c = __bch2_uuid_to_fs(sb->sb->uuid); | |
1698 | if (c) { | |
1699 | closure_get(&c->cl); | |
1700 | ||
1701 | err = bch2_dev_in_fs(c->disk_sb.sb, sb->sb); | |
1702 | if (err) | |
1703 | goto err; | |
1704 | } else { | |
1705 | c = bch2_fs_alloc(sb->sb, opts); | |
1706 | err = "cannot allocate memory"; | |
1707 | if (!c) | |
1708 | goto err; | |
1709 | ||
1710 | allocated_fs = true; | |
1711 | } | |
1712 | ||
1713 | err = "bch2_dev_online() error"; | |
1714 | ||
1715 | mutex_lock(&c->sb_lock); | |
1716 | if (bch2_dev_attach_bdev(c, sb)) { | |
1717 | mutex_unlock(&c->sb_lock); | |
1718 | goto err; | |
1719 | } | |
1720 | mutex_unlock(&c->sb_lock); | |
1721 | ||
1722 | if (!c->opts.nostart && bch2_fs_may_start(c)) { | |
1723 | err = bch2_fs_start(c); | |
1724 | if (err) | |
1725 | goto err; | |
1726 | } | |
1727 | ||
1728 | closure_put(&c->cl); | |
1729 | mutex_unlock(&bch_fs_list_lock); | |
1730 | ||
1731 | return NULL; | |
1732 | err: | |
1733 | mutex_unlock(&bch_fs_list_lock); | |
1734 | ||
1735 | if (allocated_fs) | |
1736 | bch2_fs_stop(c); | |
1737 | else if (c) | |
1738 | closure_put(&c->cl); | |
1739 | ||
1740 | return err; | |
1741 | } | |
1742 | ||
1743 | const char *bch2_fs_open_incremental(const char *path) | |
1744 | { | |
1745 | struct bch_sb_handle sb; | |
1746 | struct bch_opts opts = bch2_opts_empty(); | |
1747 | const char *err; | |
1748 | ||
1749 | if (bch2_read_super(path, &opts, &sb)) | |
1750 | return "error reading superblock"; | |
1751 | ||
1752 | err = __bch2_fs_open_incremental(&sb, opts); | |
1753 | bch2_free_super(&sb); | |
1754 | ||
1755 | return err; | |
1756 | } | |
1757 | ||
1758 | /* Global interfaces/init */ | |
1759 | ||
1760 | static void bcachefs_exit(void) | |
1761 | { | |
1762 | bch2_debug_exit(); | |
1763 | bch2_vfs_exit(); | |
1764 | bch2_chardev_exit(); | |
1765 | if (bcachefs_kset) | |
1766 | kset_unregister(bcachefs_kset); | |
1767 | } | |
1768 | ||
1769 | static int __init bcachefs_init(void) | |
1770 | { | |
1771 | bch2_bkey_pack_test(); | |
1772 | bch2_inode_pack_test(); | |
1773 | ||
1774 | if (!(bcachefs_kset = kset_create_and_add("bcachefs", NULL, fs_kobj)) || | |
1775 | bch2_chardev_init() || | |
1776 | bch2_vfs_init() || | |
1777 | bch2_debug_init()) | |
1778 | goto err; | |
1779 | ||
1780 | return 0; | |
1781 | err: | |
1782 | bcachefs_exit(); | |
1783 | return -ENOMEM; | |
1784 | } | |
1785 | ||
1786 | #define BCH_DEBUG_PARAM(name, description) \ | |
1787 | bool bch2_##name; \ | |
1788 | module_param_named(name, bch2_##name, bool, 0644); \ | |
1789 | MODULE_PARM_DESC(name, description); | |
1790 | BCH_DEBUG_PARAMS() | |
1791 | #undef BCH_DEBUG_PARAM | |
1792 | ||
1793 | unsigned bch2_metadata_version = BCH_SB_VERSION_MAX; | |
1794 | module_param_named(version, bch2_metadata_version, uint, 0400); | |
1795 | ||
1796 | module_exit(bcachefs_exit); | |
1797 | module_init(bcachefs_init); |