bcachefs: serialize persistent_reserved
[linux-block.git] / fs / bcachefs / recovery.c
1 // SPDX-License-Identifier: GPL-2.0
2
3 #include "bcachefs.h"
4 #include "alloc_background.h"
5 #include "btree_gc.h"
6 #include "btree_update.h"
7 #include "btree_update_interior.h"
8 #include "btree_io.h"
9 #include "buckets.h"
10 #include "dirent.h"
11 #include "ec.h"
12 #include "error.h"
13 #include "fsck.h"
14 #include "journal_io.h"
15 #include "quota.h"
16 #include "recovery.h"
17 #include "replicas.h"
18 #include "super-io.h"
19
20 #include <linux/stat.h>
21
22 #define QSTR(n) { { { .len = strlen(n) } }, .name = n }
23
24 static struct bkey_i *btree_root_find(struct bch_fs *c,
25                                       struct bch_sb_field_clean *clean,
26                                       struct jset *j,
27                                       enum btree_id id, unsigned *level)
28 {
29         struct bkey_i *k;
30         struct jset_entry *entry, *start, *end;
31
32         if (clean) {
33                 start = clean->start;
34                 end = vstruct_end(&clean->field);
35         } else {
36                 start = j->start;
37                 end = vstruct_last(j);
38         }
39
40         for (entry = start; entry < end; entry = vstruct_next(entry))
41                 if (entry->type == BCH_JSET_ENTRY_btree_root &&
42                     entry->btree_id == id)
43                         goto found;
44
45         return NULL;
46 found:
47         if (!entry->u64s)
48                 return ERR_PTR(-EINVAL);
49
50         k = entry->start;
51         *level = entry->level;
52         return k;
53 }
54
55 static int journal_replay_entry_early(struct bch_fs *c,
56                                       struct jset_entry *entry)
57 {
58         int ret = 0;
59
60         switch (entry->type) {
61         case BCH_JSET_ENTRY_btree_root: {
62                 struct btree_root *r = &c->btree_roots[entry->btree_id];
63
64                 if (entry->u64s) {
65                         r->level = entry->level;
66                         bkey_copy(&r->key, &entry->start[0]);
67                         r->error = 0;
68                 } else {
69                         r->error = -EIO;
70                 }
71                 r->alive = true;
72                 break;
73         }
74         case BCH_JSET_ENTRY_usage: {
75                 struct jset_entry_usage *u =
76                         container_of(entry, struct jset_entry_usage, entry);
77
78                 switch (entry->btree_id) {
79                 case FS_USAGE_RESERVED:
80                         if (entry->level < BCH_REPLICAS_MAX)
81                                 percpu_u64_set(&c->usage[0]->
82                                                persistent_reserved[entry->level],
83                                                le64_to_cpu(u->v));
84                         break;
85                 case FS_USAGE_INODES:
86                         percpu_u64_set(&c->usage[0]->s.nr_inodes,
87                                        le64_to_cpu(u->v));
88                         break;
89                 case FS_USAGE_KEY_VERSION:
90                         atomic64_set(&c->key_version,
91                                      le64_to_cpu(u->v));
92                         break;
93                 }
94
95                 break;
96         }
97         case BCH_JSET_ENTRY_data_usage: {
98                 struct jset_entry_data_usage *u =
99                         container_of(entry, struct jset_entry_data_usage, entry);
100                 ret = bch2_replicas_set_usage(c, &u->r,
101                                               le64_to_cpu(u->v));
102                 break;
103         }
104         }
105
106         return ret;
107 }
108
109 static int verify_superblock_clean(struct bch_fs *c,
110                                    struct bch_sb_field_clean *clean,
111                                    struct jset *j)
112 {
113         unsigned i;
114         int ret = 0;
115
116         if (!clean || !j)
117                 return 0;
118
119         if (mustfix_fsck_err_on(j->seq != clean->journal_seq, c,
120                         "superblock journal seq (%llu) doesn't match journal (%llu) after clean shutdown",
121                         le64_to_cpu(clean->journal_seq),
122                         le64_to_cpu(j->seq)))
123                 bch2_fs_mark_clean(c, false);
124
125         mustfix_fsck_err_on(j->read_clock != clean->read_clock, c,
126                         "superblock read clock doesn't match journal after clean shutdown");
127         mustfix_fsck_err_on(j->write_clock != clean->write_clock, c,
128                         "superblock read clock doesn't match journal after clean shutdown");
129
130         for (i = 0; i < BTREE_ID_NR; i++) {
131                 struct bkey_i *k1, *k2;
132                 unsigned l1 = 0, l2 = 0;
133
134                 k1 = btree_root_find(c, clean, NULL, i, &l1);
135                 k2 = btree_root_find(c, NULL, j, i, &l2);
136
137                 if (!k1 && !k2)
138                         continue;
139
140                 mustfix_fsck_err_on(!k1 || !k2 ||
141                                     IS_ERR(k1) ||
142                                     IS_ERR(k2) ||
143                                     k1->k.u64s != k2->k.u64s ||
144                                     memcmp(k1, k2, bkey_bytes(k1)) ||
145                                     l1 != l2, c,
146                         "superblock btree root doesn't match journal after clean shutdown");
147         }
148 fsck_err:
149         return ret;
150 }
151
152 static bool journal_empty(struct list_head *journal)
153 {
154         struct journal_replay *i;
155         struct jset_entry *entry;
156
157         if (list_empty(journal))
158                 return true;
159
160         i = list_last_entry(journal, struct journal_replay, list);
161
162         if (i->j.last_seq != i->j.seq)
163                 return false;
164
165         list_for_each_entry(i, journal, list) {
166                 vstruct_for_each(&i->j, entry) {
167                         if (entry->type == BCH_JSET_ENTRY_btree_root ||
168                             entry->type == BCH_JSET_ENTRY_usage ||
169                             entry->type == BCH_JSET_ENTRY_data_usage)
170                                 continue;
171
172                         if (entry->type == BCH_JSET_ENTRY_btree_keys &&
173                             !entry->u64s)
174                                 continue;
175                         return false;
176                 }
177         }
178
179         return true;
180 }
181
182 int bch2_fs_recovery(struct bch_fs *c)
183 {
184         const char *err = "cannot allocate memory";
185         struct bch_sb_field_clean *clean = NULL, *sb_clean = NULL;
186         struct jset_entry *entry;
187         LIST_HEAD(journal);
188         struct jset *j = NULL;
189         unsigned i;
190         int ret;
191
192         mutex_lock(&c->sb_lock);
193         if (!c->replicas.entries) {
194                 bch_info(c, "building replicas info");
195                 set_bit(BCH_FS_REBUILD_REPLICAS, &c->flags);
196         }
197
198         if (c->sb.clean)
199                 sb_clean = bch2_sb_get_clean(c->disk_sb.sb);
200         if (sb_clean) {
201                 clean = kmemdup(sb_clean, vstruct_bytes(&sb_clean->field),
202                                 GFP_KERNEL);
203                 if (!clean) {
204                         ret = -ENOMEM;
205                         mutex_unlock(&c->sb_lock);
206                         goto err;
207                 }
208
209                 if (le16_to_cpu(c->disk_sb.sb->version) <
210                     bcachefs_metadata_version_bkey_renumber)
211                         bch2_sb_clean_renumber(clean, READ);
212         }
213         mutex_unlock(&c->sb_lock);
214
215         if (clean)
216                 bch_info(c, "recovering from clean shutdown, journal seq %llu",
217                          le64_to_cpu(clean->journal_seq));
218
219         if (!clean || c->opts.fsck) {
220                 ret = bch2_journal_read(c, &journal);
221                 if (ret)
222                         goto err;
223
224                 j = &list_entry(journal.prev, struct journal_replay, list)->j;
225         } else {
226                 ret = bch2_journal_set_seq(c,
227                                            le64_to_cpu(clean->journal_seq),
228                                            le64_to_cpu(clean->journal_seq));
229                 BUG_ON(ret);
230         }
231
232         ret = verify_superblock_clean(c, clean, j);
233         if (ret)
234                 goto err;
235
236         fsck_err_on(clean && !journal_empty(&journal), c,
237                     "filesystem marked clean but journal not empty");
238
239         err = "insufficient memory";
240         if (clean) {
241                 c->bucket_clock[READ].hand = le16_to_cpu(clean->read_clock);
242                 c->bucket_clock[WRITE].hand = le16_to_cpu(clean->write_clock);
243
244                 for (entry = clean->start;
245                      entry != vstruct_end(&clean->field);
246                      entry = vstruct_next(entry)) {
247                         ret = journal_replay_entry_early(c, entry);
248                         if (ret)
249                                 goto err;
250                 }
251         } else {
252                 struct journal_replay *i;
253
254                 c->bucket_clock[READ].hand = le16_to_cpu(j->read_clock);
255                 c->bucket_clock[WRITE].hand = le16_to_cpu(j->write_clock);
256
257                 list_for_each_entry(i, &journal, list)
258                         vstruct_for_each(&i->j, entry) {
259                                 ret = journal_replay_entry_early(c, entry);
260                                 if (ret)
261                                         goto err;
262                         }
263         }
264
265         bch2_fs_usage_initialize(c);
266
267         for (i = 0; i < BTREE_ID_NR; i++) {
268                 struct btree_root *r = &c->btree_roots[i];
269
270                 if (!r->alive)
271                         continue;
272
273                 err = "invalid btree root pointer";
274                 if (r->error)
275                         goto err;
276
277                 err = "error reading btree root";
278                 if (bch2_btree_root_read(c, i, &r->key, r->level)) {
279                         if (i != BTREE_ID_ALLOC)
280                                 goto err;
281
282                         mustfix_fsck_err(c, "error reading btree root");
283                 }
284         }
285
286         for (i = 0; i < BTREE_ID_NR; i++)
287                 if (!c->btree_roots[i].b)
288                         bch2_btree_root_alloc(c, i);
289
290         err = "error reading allocation information";
291         ret = bch2_alloc_read(c, &journal);
292         if (ret)
293                 goto err;
294
295         bch_verbose(c, "starting stripes_read");
296         ret = bch2_stripes_read(c, &journal);
297         if (ret)
298                 goto err;
299         bch_verbose(c, "stripes_read done");
300
301         set_bit(BCH_FS_ALLOC_READ_DONE, &c->flags);
302
303         bch_verbose(c, "starting mark and sweep:");
304         err = "error in recovery";
305         ret = bch2_gc(c, &journal, true);
306         if (ret)
307                 goto err;
308         bch_verbose(c, "mark and sweep done");
309
310         clear_bit(BCH_FS_REBUILD_REPLICAS, &c->flags);
311
312         /*
313          * Skip past versions that might have possibly been used (as nonces),
314          * but hadn't had their pointers written:
315          */
316         if (c->sb.encryption_type && !c->sb.clean)
317                 atomic64_add(1 << 16, &c->key_version);
318
319         if (c->opts.noreplay)
320                 goto out;
321
322         /*
323          * Mark dirty before journal replay, fsck:
324          * XXX: after a clean shutdown, this could be done lazily only when fsck
325          * finds an error
326          */
327         bch2_fs_mark_clean(c, false);
328
329         /*
330          * bch2_fs_journal_start() can't happen sooner, or btree_gc_finish()
331          * will give spurious errors about oldest_gen > bucket_gen -
332          * this is a hack but oh well.
333          */
334         bch2_fs_journal_start(&c->journal);
335
336         err = "error starting allocator";
337         ret = bch2_fs_allocator_start(c);
338         if (ret)
339                 goto err;
340
341         bch_verbose(c, "starting journal replay:");
342         err = "journal replay failed";
343         ret = bch2_journal_replay(c, &journal);
344         if (ret)
345                 goto err;
346         bch_verbose(c, "journal replay done");
347
348         if (c->opts.norecovery)
349                 goto out;
350
351         err = "error in fsck";
352         ret = bch2_fsck(c);
353         if (ret)
354                 goto err;
355
356         mutex_lock(&c->sb_lock);
357         if (c->opts.version_upgrade) {
358                 if (c->sb.version < bcachefs_metadata_version_new_versioning)
359                         c->disk_sb.sb->version_min =
360                                 le16_to_cpu(bcachefs_metadata_version_min);
361                 c->disk_sb.sb->version = le16_to_cpu(bcachefs_metadata_version_current);
362         }
363
364         if (!test_bit(BCH_FS_FSCK_UNFIXED_ERRORS, &c->flags))
365                 c->disk_sb.sb->features[0] |= 1ULL << BCH_FEATURE_ATOMIC_NLINK;
366         mutex_unlock(&c->sb_lock);
367
368         if (enabled_qtypes(c)) {
369                 bch_verbose(c, "reading quotas:");
370                 ret = bch2_fs_quota_read(c);
371                 if (ret)
372                         goto err;
373                 bch_verbose(c, "quotas done");
374         }
375
376 out:
377         bch2_journal_entries_free(&journal);
378         kfree(clean);
379         return ret;
380 err:
381 fsck_err:
382         pr_err("Error in recovery: %s (%i)", err, ret);
383         goto out;
384 }
385
386 int bch2_fs_initialize(struct bch_fs *c)
387 {
388         struct bch_inode_unpacked root_inode, lostfound_inode;
389         struct bkey_inode_buf packed_inode;
390         struct bch_hash_info root_hash_info;
391         struct qstr lostfound = QSTR("lost+found");
392         const char *err = "cannot allocate memory";
393         struct bch_dev *ca;
394         LIST_HEAD(journal);
395         unsigned i;
396         int ret;
397
398         bch_notice(c, "initializing new filesystem");
399
400         mutex_lock(&c->sb_lock);
401         for_each_online_member(ca, c, i)
402                 bch2_mark_dev_superblock(c, ca, 0);
403         mutex_unlock(&c->sb_lock);
404
405         set_bit(BCH_FS_ALLOC_READ_DONE, &c->flags);
406
407         for (i = 0; i < BTREE_ID_NR; i++)
408                 bch2_btree_root_alloc(c, i);
409
410         ret = bch2_gc(c, &journal, true);
411         if (ret)
412                 goto err;
413
414         err = "unable to allocate journal buckets";
415         for_each_online_member(ca, c, i)
416                 if (bch2_dev_journal_alloc(ca)) {
417                         percpu_ref_put(&ca->io_ref);
418                         goto err;
419                 }
420
421         /*
422          * journal_res_get() will crash if called before this has
423          * set up the journal.pin FIFO and journal.cur pointer:
424          */
425         bch2_fs_journal_start(&c->journal);
426         bch2_journal_set_replay_done(&c->journal);
427
428         err = "error starting allocator";
429         ret = bch2_fs_allocator_start(c);
430         if (ret)
431                 goto err;
432
433         bch2_inode_init(c, &root_inode, 0, 0,
434                         S_IFDIR|S_IRWXU|S_IRUGO|S_IXUGO, 0, NULL);
435         root_inode.bi_inum = BCACHEFS_ROOT_INO;
436         root_inode.bi_nlink++; /* lost+found */
437         bch2_inode_pack(&packed_inode, &root_inode);
438
439         err = "error creating root directory";
440         ret = bch2_btree_insert(c, BTREE_ID_INODES,
441                                 &packed_inode.inode.k_i,
442                                 NULL, NULL, 0);
443         if (ret)
444                 goto err;
445
446         bch2_inode_init(c, &lostfound_inode, 0, 0,
447                         S_IFDIR|S_IRWXU|S_IRUGO|S_IXUGO, 0,
448                         &root_inode);
449         lostfound_inode.bi_inum = BCACHEFS_ROOT_INO + 1;
450         bch2_inode_pack(&packed_inode, &lostfound_inode);
451
452         err = "error creating lost+found";
453         ret = bch2_btree_insert(c, BTREE_ID_INODES,
454                                 &packed_inode.inode.k_i,
455                                 NULL, NULL, 0);
456         if (ret)
457                 goto err;
458
459         root_hash_info = bch2_hash_info_init(c, &root_inode);
460
461         ret = bch2_dirent_create(c, BCACHEFS_ROOT_INO, &root_hash_info, DT_DIR,
462                                  &lostfound, lostfound_inode.bi_inum, NULL,
463                                  BTREE_INSERT_NOFAIL);
464         if (ret)
465                 goto err;
466
467         if (enabled_qtypes(c)) {
468                 ret = bch2_fs_quota_read(c);
469                 if (ret)
470                         goto err;
471         }
472
473         err = "error writing first journal entry";
474         ret = bch2_journal_meta(&c->journal);
475         if (ret)
476                 goto err;
477
478         mutex_lock(&c->sb_lock);
479         c->disk_sb.sb->version = c->disk_sb.sb->version_min =
480                 le16_to_cpu(bcachefs_metadata_version_current);
481         c->disk_sb.sb->features[0] |= 1ULL << BCH_FEATURE_ATOMIC_NLINK;
482
483         SET_BCH_SB_INITIALIZED(c->disk_sb.sb, true);
484         SET_BCH_SB_CLEAN(c->disk_sb.sb, false);
485
486         bch2_write_super(c);
487         mutex_unlock(&c->sb_lock);
488
489         return 0;
490 err:
491         pr_err("Error initializing new filesystem: %s (%i)", err, ret);
492         return ret;
493 }