| 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | |
| 3 | #include "bcachefs.h" |
| 4 | #include "alloc_background.h" |
| 5 | #include "bkey_buf.h" |
| 6 | #include "btree_journal_iter.h" |
| 7 | #include "btree_node_scan.h" |
| 8 | #include "btree_update.h" |
| 9 | #include "btree_update_interior.h" |
| 10 | #include "btree_io.h" |
| 11 | #include "buckets.h" |
| 12 | #include "dirent.h" |
| 13 | #include "disk_accounting.h" |
| 14 | #include "errcode.h" |
| 15 | #include "error.h" |
| 16 | #include "journal_io.h" |
| 17 | #include "journal_reclaim.h" |
| 18 | #include "journal_seq_blacklist.h" |
| 19 | #include "logged_ops.h" |
| 20 | #include "move.h" |
| 21 | #include "movinggc.h" |
| 22 | #include "namei.h" |
| 23 | #include "quota.h" |
| 24 | #include "rebalance.h" |
| 25 | #include "recovery.h" |
| 26 | #include "recovery_passes.h" |
| 27 | #include "replicas.h" |
| 28 | #include "sb-clean.h" |
| 29 | #include "sb-downgrade.h" |
| 30 | #include "snapshot.h" |
| 31 | #include "super-io.h" |
| 32 | |
| 33 | #include <linux/sort.h> |
| 34 | #include <linux/stat.h> |
| 35 | |
| 36 | int bch2_btree_lost_data(struct bch_fs *c, |
| 37 | struct printbuf *msg, |
| 38 | enum btree_id btree) |
| 39 | { |
| 40 | u64 b = BIT_ULL(btree); |
| 41 | int ret = 0; |
| 42 | |
| 43 | mutex_lock(&c->sb_lock); |
| 44 | struct bch_sb_field_ext *ext = bch2_sb_field_get(c->disk_sb.sb, ext); |
| 45 | |
| 46 | if (!(c->sb.btrees_lost_data & b)) { |
| 47 | prt_printf(msg, "flagging btree "); |
| 48 | bch2_btree_id_to_text(msg, btree); |
| 49 | prt_printf(msg, " lost data\n"); |
| 50 | |
| 51 | ext->btrees_lost_data |= cpu_to_le64(b); |
| 52 | } |
| 53 | |
| 54 | /* Once we have runtime self healing for topology errors we won't need this: */ |
| 55 | ret = __bch2_run_explicit_recovery_pass(c, msg, BCH_RECOVERY_PASS_check_topology, 0) ?: ret; |
| 56 | |
| 57 | /* Btree node accounting will be off: */ |
| 58 | __set_bit_le64(BCH_FSCK_ERR_accounting_mismatch, ext->errors_silent); |
| 59 | ret = __bch2_run_explicit_recovery_pass(c, msg, BCH_RECOVERY_PASS_check_allocations, 0) ?: ret; |
| 60 | |
| 61 | #ifdef CONFIG_BCACHEFS_DEBUG |
| 62 | /* |
| 63 | * These are much more minor, and don't need to be corrected right away, |
| 64 | * but in debug mode we want the next fsck run to be clean: |
| 65 | */ |
| 66 | ret = __bch2_run_explicit_recovery_pass(c, msg, BCH_RECOVERY_PASS_check_lrus, 0) ?: ret; |
| 67 | ret = __bch2_run_explicit_recovery_pass(c, msg, BCH_RECOVERY_PASS_check_backpointers_to_extents, 0) ?: ret; |
| 68 | #endif |
| 69 | |
| 70 | switch (btree) { |
| 71 | case BTREE_ID_alloc: |
| 72 | ret = __bch2_run_explicit_recovery_pass(c, msg, BCH_RECOVERY_PASS_check_alloc_info, 0) ?: ret; |
| 73 | |
| 74 | __set_bit_le64(BCH_FSCK_ERR_alloc_key_data_type_wrong, ext->errors_silent); |
| 75 | __set_bit_le64(BCH_FSCK_ERR_alloc_key_gen_wrong, ext->errors_silent); |
| 76 | __set_bit_le64(BCH_FSCK_ERR_alloc_key_dirty_sectors_wrong, ext->errors_silent); |
| 77 | __set_bit_le64(BCH_FSCK_ERR_alloc_key_cached_sectors_wrong, ext->errors_silent); |
| 78 | __set_bit_le64(BCH_FSCK_ERR_alloc_key_stripe_wrong, ext->errors_silent); |
| 79 | __set_bit_le64(BCH_FSCK_ERR_alloc_key_stripe_redundancy_wrong, ext->errors_silent); |
| 80 | goto out; |
| 81 | case BTREE_ID_backpointers: |
| 82 | ret = __bch2_run_explicit_recovery_pass(c, msg, BCH_RECOVERY_PASS_check_btree_backpointers, 0) ?: ret; |
| 83 | ret = __bch2_run_explicit_recovery_pass(c, msg, BCH_RECOVERY_PASS_check_extents_to_backpointers, 0) ?: ret; |
| 84 | goto out; |
| 85 | case BTREE_ID_need_discard: |
| 86 | ret = __bch2_run_explicit_recovery_pass(c, msg, BCH_RECOVERY_PASS_check_alloc_info, 0) ?: ret; |
| 87 | goto out; |
| 88 | case BTREE_ID_freespace: |
| 89 | ret = __bch2_run_explicit_recovery_pass(c, msg, BCH_RECOVERY_PASS_check_alloc_info, 0) ?: ret; |
| 90 | goto out; |
| 91 | case BTREE_ID_bucket_gens: |
| 92 | ret = __bch2_run_explicit_recovery_pass(c, msg, BCH_RECOVERY_PASS_check_alloc_info, 0) ?: ret; |
| 93 | goto out; |
| 94 | case BTREE_ID_lru: |
| 95 | ret = __bch2_run_explicit_recovery_pass(c, msg, BCH_RECOVERY_PASS_check_alloc_info, 0) ?: ret; |
| 96 | goto out; |
| 97 | case BTREE_ID_accounting: |
| 98 | ret = __bch2_run_explicit_recovery_pass(c, msg, BCH_RECOVERY_PASS_check_allocations, 0) ?: ret; |
| 99 | goto out; |
| 100 | case BTREE_ID_snapshots: |
| 101 | ret = __bch2_run_explicit_recovery_pass(c, msg, BCH_RECOVERY_PASS_reconstruct_snapshots, 0) ?: ret; |
| 102 | ret = __bch2_run_explicit_recovery_pass(c, msg, BCH_RECOVERY_PASS_check_topology, 0) ?: ret; |
| 103 | ret = __bch2_run_explicit_recovery_pass(c, msg, BCH_RECOVERY_PASS_scan_for_btree_nodes, 0) ?: ret; |
| 104 | goto out; |
| 105 | default: |
| 106 | ret = __bch2_run_explicit_recovery_pass(c, msg, BCH_RECOVERY_PASS_check_topology, 0) ?: ret; |
| 107 | ret = __bch2_run_explicit_recovery_pass(c, msg, BCH_RECOVERY_PASS_scan_for_btree_nodes, 0) ?: ret; |
| 108 | goto out; |
| 109 | } |
| 110 | out: |
| 111 | bch2_write_super(c); |
| 112 | mutex_unlock(&c->sb_lock); |
| 113 | |
| 114 | return ret; |
| 115 | } |
| 116 | |
| 117 | static void kill_btree(struct bch_fs *c, enum btree_id btree) |
| 118 | { |
| 119 | bch2_btree_id_root(c, btree)->alive = false; |
| 120 | bch2_shoot_down_journal_keys(c, btree, 0, BTREE_MAX_DEPTH, POS_MIN, SPOS_MAX); |
| 121 | } |
| 122 | |
| 123 | /* for -o reconstruct_alloc: */ |
| 124 | void bch2_reconstruct_alloc(struct bch_fs *c) |
| 125 | { |
| 126 | mutex_lock(&c->sb_lock); |
| 127 | struct bch_sb_field_ext *ext = bch2_sb_field_get(c->disk_sb.sb, ext); |
| 128 | |
| 129 | __set_bit_le64(BCH_RECOVERY_PASS_STABLE_check_allocations, ext->recovery_passes_required); |
| 130 | __set_bit_le64(BCH_RECOVERY_PASS_STABLE_check_alloc_info, ext->recovery_passes_required); |
| 131 | __set_bit_le64(BCH_RECOVERY_PASS_STABLE_check_lrus, ext->recovery_passes_required); |
| 132 | __set_bit_le64(BCH_RECOVERY_PASS_STABLE_check_extents_to_backpointers, ext->recovery_passes_required); |
| 133 | __set_bit_le64(BCH_RECOVERY_PASS_STABLE_check_alloc_to_lru_refs, ext->recovery_passes_required); |
| 134 | |
| 135 | __set_bit_le64(BCH_FSCK_ERR_ptr_to_missing_alloc_key, ext->errors_silent); |
| 136 | __set_bit_le64(BCH_FSCK_ERR_ptr_gen_newer_than_bucket_gen, ext->errors_silent); |
| 137 | __set_bit_le64(BCH_FSCK_ERR_stale_dirty_ptr, ext->errors_silent); |
| 138 | |
| 139 | __set_bit_le64(BCH_FSCK_ERR_dev_usage_buckets_wrong, ext->errors_silent); |
| 140 | __set_bit_le64(BCH_FSCK_ERR_dev_usage_sectors_wrong, ext->errors_silent); |
| 141 | __set_bit_le64(BCH_FSCK_ERR_dev_usage_fragmented_wrong, ext->errors_silent); |
| 142 | |
| 143 | __set_bit_le64(BCH_FSCK_ERR_fs_usage_btree_wrong, ext->errors_silent); |
| 144 | __set_bit_le64(BCH_FSCK_ERR_fs_usage_cached_wrong, ext->errors_silent); |
| 145 | __set_bit_le64(BCH_FSCK_ERR_fs_usage_persistent_reserved_wrong, ext->errors_silent); |
| 146 | __set_bit_le64(BCH_FSCK_ERR_fs_usage_replicas_wrong, ext->errors_silent); |
| 147 | |
| 148 | __set_bit_le64(BCH_FSCK_ERR_alloc_key_to_missing_lru_entry, ext->errors_silent); |
| 149 | |
| 150 | __set_bit_le64(BCH_FSCK_ERR_alloc_key_data_type_wrong, ext->errors_silent); |
| 151 | __set_bit_le64(BCH_FSCK_ERR_alloc_key_gen_wrong, ext->errors_silent); |
| 152 | __set_bit_le64(BCH_FSCK_ERR_alloc_key_dirty_sectors_wrong, ext->errors_silent); |
| 153 | __set_bit_le64(BCH_FSCK_ERR_alloc_key_cached_sectors_wrong, ext->errors_silent); |
| 154 | __set_bit_le64(BCH_FSCK_ERR_alloc_key_stripe_wrong, ext->errors_silent); |
| 155 | __set_bit_le64(BCH_FSCK_ERR_alloc_key_stripe_redundancy_wrong, ext->errors_silent); |
| 156 | __set_bit_le64(BCH_FSCK_ERR_need_discard_key_wrong, ext->errors_silent); |
| 157 | __set_bit_le64(BCH_FSCK_ERR_freespace_key_wrong, ext->errors_silent); |
| 158 | __set_bit_le64(BCH_FSCK_ERR_bucket_gens_key_wrong, ext->errors_silent); |
| 159 | __set_bit_le64(BCH_FSCK_ERR_freespace_hole_missing, ext->errors_silent); |
| 160 | __set_bit_le64(BCH_FSCK_ERR_ptr_to_missing_backpointer, ext->errors_silent); |
| 161 | __set_bit_le64(BCH_FSCK_ERR_lru_entry_bad, ext->errors_silent); |
| 162 | __set_bit_le64(BCH_FSCK_ERR_accounting_mismatch, ext->errors_silent); |
| 163 | c->sb.compat &= ~(1ULL << BCH_COMPAT_alloc_info); |
| 164 | |
| 165 | c->opts.recovery_passes |= bch2_recovery_passes_from_stable(le64_to_cpu(ext->recovery_passes_required[0])); |
| 166 | |
| 167 | c->disk_sb.sb->features[0] &= ~cpu_to_le64(BIT_ULL(BCH_FEATURE_no_alloc_info)); |
| 168 | |
| 169 | bch2_write_super(c); |
| 170 | mutex_unlock(&c->sb_lock); |
| 171 | |
| 172 | for (unsigned i = 0; i < btree_id_nr_alive(c); i++) |
| 173 | if (btree_id_is_alloc(i)) |
| 174 | kill_btree(c, i); |
| 175 | } |
| 176 | |
| 177 | /* |
| 178 | * Btree node pointers have a field to stack a pointer to the in memory btree |
| 179 | * node; we need to zero out this field when reading in btree nodes, or when |
| 180 | * reading in keys from the journal: |
| 181 | */ |
| 182 | static void zero_out_btree_mem_ptr(struct journal_keys *keys) |
| 183 | { |
| 184 | darray_for_each(*keys, i) |
| 185 | if (i->k->k.type == KEY_TYPE_btree_ptr_v2) |
| 186 | bkey_i_to_btree_ptr_v2(i->k)->v.mem_ptr = 0; |
| 187 | } |
| 188 | |
| 189 | /* journal replay: */ |
| 190 | |
| 191 | static void replay_now_at(struct journal *j, u64 seq) |
| 192 | { |
| 193 | BUG_ON(seq < j->replay_journal_seq); |
| 194 | |
| 195 | seq = min(seq, j->replay_journal_seq_end); |
| 196 | |
| 197 | while (j->replay_journal_seq < seq) |
| 198 | bch2_journal_pin_put(j, j->replay_journal_seq++); |
| 199 | } |
| 200 | |
| 201 | static int bch2_journal_replay_accounting_key(struct btree_trans *trans, |
| 202 | struct journal_key *k) |
| 203 | { |
| 204 | struct btree_iter iter; |
| 205 | bch2_trans_node_iter_init(trans, &iter, k->btree_id, k->k->k.p, |
| 206 | BTREE_MAX_DEPTH, k->level, |
| 207 | BTREE_ITER_intent); |
| 208 | int ret = bch2_btree_iter_traverse(trans, &iter); |
| 209 | if (ret) |
| 210 | goto out; |
| 211 | |
| 212 | struct bkey u; |
| 213 | struct bkey_s_c old = bch2_btree_path_peek_slot(btree_iter_path(trans, &iter), &u); |
| 214 | |
| 215 | /* Has this delta already been applied to the btree? */ |
| 216 | if (bversion_cmp(old.k->bversion, k->k->k.bversion) >= 0) { |
| 217 | ret = 0; |
| 218 | goto out; |
| 219 | } |
| 220 | |
| 221 | struct bkey_i *new = k->k; |
| 222 | if (old.k->type == KEY_TYPE_accounting) { |
| 223 | new = bch2_bkey_make_mut_noupdate(trans, bkey_i_to_s_c(k->k)); |
| 224 | ret = PTR_ERR_OR_ZERO(new); |
| 225 | if (ret) |
| 226 | goto out; |
| 227 | |
| 228 | bch2_accounting_accumulate(bkey_i_to_accounting(new), |
| 229 | bkey_s_c_to_accounting(old)); |
| 230 | } |
| 231 | |
| 232 | trans->journal_res.seq = k->journal_seq; |
| 233 | |
| 234 | ret = bch2_trans_update(trans, &iter, new, BTREE_TRIGGER_norun); |
| 235 | out: |
| 236 | bch2_trans_iter_exit(trans, &iter); |
| 237 | return ret; |
| 238 | } |
| 239 | |
| 240 | static int bch2_journal_replay_key(struct btree_trans *trans, |
| 241 | struct journal_key *k) |
| 242 | { |
| 243 | struct btree_iter iter; |
| 244 | unsigned iter_flags = |
| 245 | BTREE_ITER_intent| |
| 246 | BTREE_ITER_not_extents; |
| 247 | unsigned update_flags = BTREE_TRIGGER_norun; |
| 248 | int ret; |
| 249 | |
| 250 | if (k->overwritten) |
| 251 | return 0; |
| 252 | |
| 253 | trans->journal_res.seq = k->journal_seq; |
| 254 | |
| 255 | /* |
| 256 | * BTREE_UPDATE_key_cache_reclaim disables key cache lookup/update to |
| 257 | * keep the key cache coherent with the underlying btree. Nothing |
| 258 | * besides the allocator is doing updates yet so we don't need key cache |
| 259 | * coherency for non-alloc btrees, and key cache fills for snapshots |
| 260 | * btrees use BTREE_ITER_filter_snapshots, which isn't available until |
| 261 | * the snapshots recovery pass runs. |
| 262 | */ |
| 263 | if (!k->level && k->btree_id == BTREE_ID_alloc) |
| 264 | iter_flags |= BTREE_ITER_cached; |
| 265 | else |
| 266 | update_flags |= BTREE_UPDATE_key_cache_reclaim; |
| 267 | |
| 268 | bch2_trans_node_iter_init(trans, &iter, k->btree_id, k->k->k.p, |
| 269 | BTREE_MAX_DEPTH, k->level, |
| 270 | iter_flags); |
| 271 | ret = bch2_btree_iter_traverse(trans, &iter); |
| 272 | if (ret) |
| 273 | goto out; |
| 274 | |
| 275 | struct btree_path *path = btree_iter_path(trans, &iter); |
| 276 | if (unlikely(!btree_path_node(path, k->level))) { |
| 277 | struct bch_fs *c = trans->c; |
| 278 | |
| 279 | CLASS(printbuf, buf)(); |
| 280 | prt_str(&buf, "btree="); |
| 281 | bch2_btree_id_to_text(&buf, k->btree_id); |
| 282 | prt_printf(&buf, " level=%u ", k->level); |
| 283 | bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(k->k)); |
| 284 | |
| 285 | if (!(c->recovery.passes_complete & (BIT_ULL(BCH_RECOVERY_PASS_scan_for_btree_nodes)| |
| 286 | BIT_ULL(BCH_RECOVERY_PASS_check_topology)))) { |
| 287 | bch_err(c, "have key in journal replay for btree depth that does not exist, confused\n%s", |
| 288 | buf.buf); |
| 289 | ret = -EINVAL; |
| 290 | } |
| 291 | |
| 292 | if (!k->allocated) { |
| 293 | bch_notice(c, "dropping key in journal replay for depth that does not exist because we're recovering from scan\n%s", |
| 294 | buf.buf); |
| 295 | k->overwritten = true; |
| 296 | goto out; |
| 297 | } |
| 298 | |
| 299 | bch2_trans_iter_exit(trans, &iter); |
| 300 | bch2_trans_node_iter_init(trans, &iter, k->btree_id, k->k->k.p, |
| 301 | BTREE_MAX_DEPTH, 0, iter_flags); |
| 302 | ret = bch2_btree_iter_traverse(trans, &iter) ?: |
| 303 | bch2_btree_increase_depth(trans, iter.path, 0) ?: |
| 304 | -BCH_ERR_transaction_restart_nested; |
| 305 | goto out; |
| 306 | } |
| 307 | |
| 308 | /* Must be checked with btree locked: */ |
| 309 | if (k->overwritten) |
| 310 | goto out; |
| 311 | |
| 312 | if (k->k->k.type == KEY_TYPE_accounting) { |
| 313 | struct bkey_i *n = bch2_trans_subbuf_alloc(trans, &trans->accounting, k->k->k.u64s); |
| 314 | ret = PTR_ERR_OR_ZERO(n); |
| 315 | if (ret) |
| 316 | goto out; |
| 317 | |
| 318 | bkey_copy(n, k->k); |
| 319 | goto out; |
| 320 | } |
| 321 | |
| 322 | ret = bch2_trans_update(trans, &iter, k->k, update_flags); |
| 323 | out: |
| 324 | bch2_trans_iter_exit(trans, &iter); |
| 325 | return ret; |
| 326 | } |
| 327 | |
| 328 | static int journal_sort_seq_cmp(const void *_l, const void *_r) |
| 329 | { |
| 330 | const struct journal_key *l = *((const struct journal_key **)_l); |
| 331 | const struct journal_key *r = *((const struct journal_key **)_r); |
| 332 | |
| 333 | /* |
| 334 | * Map 0 to U64_MAX, so that keys with journal_seq === 0 come last |
| 335 | * |
| 336 | * journal_seq == 0 means that the key comes from early repair, and |
| 337 | * should be inserted last so as to avoid overflowing the journal |
| 338 | */ |
| 339 | return cmp_int(l->journal_seq - 1, r->journal_seq - 1); |
| 340 | } |
| 341 | |
| 342 | int bch2_journal_replay(struct bch_fs *c) |
| 343 | { |
| 344 | struct journal_keys *keys = &c->journal_keys; |
| 345 | DARRAY(struct journal_key *) keys_sorted = { 0 }; |
| 346 | struct journal *j = &c->journal; |
| 347 | u64 start_seq = c->journal_replay_seq_start; |
| 348 | u64 end_seq = c->journal_replay_seq_start; |
| 349 | struct btree_trans *trans = NULL; |
| 350 | bool immediate_flush = false; |
| 351 | int ret = 0; |
| 352 | |
| 353 | if (keys->nr) { |
| 354 | ret = bch2_journal_log_msg(c, "Starting journal replay (%zu keys in entries %llu-%llu)", |
| 355 | keys->nr, start_seq, end_seq); |
| 356 | if (ret) |
| 357 | goto err; |
| 358 | } |
| 359 | |
| 360 | BUG_ON(!atomic_read(&keys->ref)); |
| 361 | |
| 362 | move_gap(keys, keys->nr); |
| 363 | trans = bch2_trans_get(c); |
| 364 | |
| 365 | /* |
| 366 | * Replay accounting keys first: we can't allow the write buffer to |
| 367 | * flush accounting keys until we're done |
| 368 | */ |
| 369 | darray_for_each(*keys, k) { |
| 370 | if (!(k->k->k.type == KEY_TYPE_accounting && !k->allocated)) |
| 371 | continue; |
| 372 | |
| 373 | cond_resched(); |
| 374 | |
| 375 | ret = commit_do(trans, NULL, NULL, |
| 376 | BCH_TRANS_COMMIT_no_enospc| |
| 377 | BCH_TRANS_COMMIT_journal_reclaim| |
| 378 | BCH_TRANS_COMMIT_skip_accounting_apply| |
| 379 | BCH_TRANS_COMMIT_no_journal_res| |
| 380 | BCH_WATERMARK_reclaim, |
| 381 | bch2_journal_replay_accounting_key(trans, k)); |
| 382 | if (bch2_fs_fatal_err_on(ret, c, "error replaying accounting; %s", bch2_err_str(ret))) |
| 383 | goto err; |
| 384 | |
| 385 | k->overwritten = true; |
| 386 | } |
| 387 | |
| 388 | set_bit(BCH_FS_accounting_replay_done, &c->flags); |
| 389 | |
| 390 | /* |
| 391 | * First, attempt to replay keys in sorted order. This is more |
| 392 | * efficient - better locality of btree access - but some might fail if |
| 393 | * that would cause a journal deadlock. |
| 394 | */ |
| 395 | darray_for_each(*keys, k) { |
| 396 | cond_resched(); |
| 397 | |
| 398 | /* |
| 399 | * k->allocated means the key wasn't read in from the journal, |
| 400 | * rather it was from early repair code |
| 401 | */ |
| 402 | if (k->allocated) |
| 403 | immediate_flush = true; |
| 404 | |
| 405 | /* Skip fastpath if we're low on space in the journal */ |
| 406 | ret = c->journal.watermark ? -1 : |
| 407 | commit_do(trans, NULL, NULL, |
| 408 | BCH_TRANS_COMMIT_no_enospc| |
| 409 | BCH_TRANS_COMMIT_journal_reclaim| |
| 410 | BCH_TRANS_COMMIT_skip_accounting_apply| |
| 411 | (!k->allocated ? BCH_TRANS_COMMIT_no_journal_res : 0), |
| 412 | bch2_journal_replay_key(trans, k)); |
| 413 | BUG_ON(!ret && !k->overwritten && k->k->k.type != KEY_TYPE_accounting); |
| 414 | if (ret) { |
| 415 | ret = darray_push(&keys_sorted, k); |
| 416 | if (ret) |
| 417 | goto err; |
| 418 | } |
| 419 | } |
| 420 | |
| 421 | bch2_trans_unlock_long(trans); |
| 422 | /* |
| 423 | * Now, replay any remaining keys in the order in which they appear in |
| 424 | * the journal, unpinning those journal entries as we go: |
| 425 | */ |
| 426 | sort_nonatomic(keys_sorted.data, keys_sorted.nr, |
| 427 | sizeof(keys_sorted.data[0]), |
| 428 | journal_sort_seq_cmp, NULL); |
| 429 | |
| 430 | darray_for_each(keys_sorted, kp) { |
| 431 | cond_resched(); |
| 432 | |
| 433 | struct journal_key *k = *kp; |
| 434 | |
| 435 | if (k->journal_seq) |
| 436 | replay_now_at(j, k->journal_seq); |
| 437 | else |
| 438 | replay_now_at(j, j->replay_journal_seq_end); |
| 439 | |
| 440 | ret = commit_do(trans, NULL, NULL, |
| 441 | BCH_TRANS_COMMIT_no_enospc| |
| 442 | BCH_TRANS_COMMIT_skip_accounting_apply| |
| 443 | (!k->allocated |
| 444 | ? BCH_TRANS_COMMIT_no_journal_res|BCH_WATERMARK_reclaim |
| 445 | : 0), |
| 446 | bch2_journal_replay_key(trans, k)); |
| 447 | if (ret) { |
| 448 | struct printbuf buf = PRINTBUF; |
| 449 | bch2_btree_id_level_to_text(&buf, k->btree_id, k->level); |
| 450 | bch_err_msg(c, ret, "while replaying key at %s:", buf.buf); |
| 451 | printbuf_exit(&buf); |
| 452 | goto err; |
| 453 | } |
| 454 | |
| 455 | BUG_ON(k->btree_id != BTREE_ID_accounting && !k->overwritten); |
| 456 | } |
| 457 | |
| 458 | /* |
| 459 | * We need to put our btree_trans before calling flush_all_pins(), since |
| 460 | * that will use a btree_trans internally |
| 461 | */ |
| 462 | bch2_trans_put(trans); |
| 463 | trans = NULL; |
| 464 | |
| 465 | if (!c->opts.retain_recovery_info && |
| 466 | c->recovery.pass_done >= BCH_RECOVERY_PASS_journal_replay) |
| 467 | bch2_journal_keys_put_initial(c); |
| 468 | |
| 469 | replay_now_at(j, j->replay_journal_seq_end); |
| 470 | j->replay_journal_seq = 0; |
| 471 | |
| 472 | bch2_journal_set_replay_done(j); |
| 473 | |
| 474 | /* if we did any repair, flush it immediately */ |
| 475 | if (immediate_flush) { |
| 476 | bch2_journal_flush_all_pins(&c->journal); |
| 477 | ret = bch2_journal_meta(&c->journal); |
| 478 | } |
| 479 | |
| 480 | if (keys->nr) |
| 481 | bch2_journal_log_msg(c, "journal replay finished"); |
| 482 | err: |
| 483 | if (trans) |
| 484 | bch2_trans_put(trans); |
| 485 | darray_exit(&keys_sorted); |
| 486 | bch_err_fn(c, ret); |
| 487 | return ret; |
| 488 | } |
| 489 | |
| 490 | /* journal replay early: */ |
| 491 | |
| 492 | static int journal_replay_entry_early(struct bch_fs *c, |
| 493 | struct jset_entry *entry) |
| 494 | { |
| 495 | int ret = 0; |
| 496 | |
| 497 | switch (entry->type) { |
| 498 | case BCH_JSET_ENTRY_btree_root: { |
| 499 | |
| 500 | if (unlikely(!entry->u64s)) |
| 501 | return 0; |
| 502 | |
| 503 | if (fsck_err_on(entry->btree_id >= BTREE_ID_NR_MAX, |
| 504 | c, invalid_btree_id, |
| 505 | "invalid btree id %u (max %u)", |
| 506 | entry->btree_id, BTREE_ID_NR_MAX)) |
| 507 | return 0; |
| 508 | |
| 509 | while (entry->btree_id >= c->btree_roots_extra.nr + BTREE_ID_NR) { |
| 510 | ret = darray_push(&c->btree_roots_extra, (struct btree_root) { NULL }); |
| 511 | if (ret) |
| 512 | return ret; |
| 513 | } |
| 514 | |
| 515 | struct btree_root *r = bch2_btree_id_root(c, entry->btree_id); |
| 516 | |
| 517 | r->level = entry->level; |
| 518 | bkey_copy(&r->key, (struct bkey_i *) entry->start); |
| 519 | r->error = 0; |
| 520 | r->alive = true; |
| 521 | break; |
| 522 | } |
| 523 | case BCH_JSET_ENTRY_usage: { |
| 524 | struct jset_entry_usage *u = |
| 525 | container_of(entry, struct jset_entry_usage, entry); |
| 526 | |
| 527 | switch (entry->btree_id) { |
| 528 | case BCH_FS_USAGE_key_version: |
| 529 | atomic64_set(&c->key_version, le64_to_cpu(u->v)); |
| 530 | break; |
| 531 | } |
| 532 | break; |
| 533 | } |
| 534 | case BCH_JSET_ENTRY_blacklist: { |
| 535 | struct jset_entry_blacklist *bl_entry = |
| 536 | container_of(entry, struct jset_entry_blacklist, entry); |
| 537 | |
| 538 | ret = bch2_journal_seq_blacklist_add(c, |
| 539 | le64_to_cpu(bl_entry->seq), |
| 540 | le64_to_cpu(bl_entry->seq) + 1); |
| 541 | break; |
| 542 | } |
| 543 | case BCH_JSET_ENTRY_blacklist_v2: { |
| 544 | struct jset_entry_blacklist_v2 *bl_entry = |
| 545 | container_of(entry, struct jset_entry_blacklist_v2, entry); |
| 546 | |
| 547 | ret = bch2_journal_seq_blacklist_add(c, |
| 548 | le64_to_cpu(bl_entry->start), |
| 549 | le64_to_cpu(bl_entry->end) + 1); |
| 550 | break; |
| 551 | } |
| 552 | case BCH_JSET_ENTRY_clock: { |
| 553 | struct jset_entry_clock *clock = |
| 554 | container_of(entry, struct jset_entry_clock, entry); |
| 555 | |
| 556 | atomic64_set(&c->io_clock[clock->rw].now, le64_to_cpu(clock->time)); |
| 557 | } |
| 558 | } |
| 559 | fsck_err: |
| 560 | return ret; |
| 561 | } |
| 562 | |
| 563 | static int journal_replay_early(struct bch_fs *c, |
| 564 | struct bch_sb_field_clean *clean) |
| 565 | { |
| 566 | if (clean) { |
| 567 | for (struct jset_entry *entry = clean->start; |
| 568 | entry != vstruct_end(&clean->field); |
| 569 | entry = vstruct_next(entry)) { |
| 570 | int ret = journal_replay_entry_early(c, entry); |
| 571 | if (ret) |
| 572 | return ret; |
| 573 | } |
| 574 | } else { |
| 575 | struct genradix_iter iter; |
| 576 | struct journal_replay *i, **_i; |
| 577 | |
| 578 | genradix_for_each(&c->journal_entries, iter, _i) { |
| 579 | i = *_i; |
| 580 | |
| 581 | if (journal_replay_ignore(i)) |
| 582 | continue; |
| 583 | |
| 584 | vstruct_for_each(&i->j, entry) { |
| 585 | int ret = journal_replay_entry_early(c, entry); |
| 586 | if (ret) |
| 587 | return ret; |
| 588 | } |
| 589 | } |
| 590 | } |
| 591 | |
| 592 | return 0; |
| 593 | } |
| 594 | |
| 595 | /* sb clean section: */ |
| 596 | |
| 597 | static int read_btree_roots(struct bch_fs *c) |
| 598 | { |
| 599 | struct printbuf buf = PRINTBUF; |
| 600 | int ret = 0; |
| 601 | |
| 602 | for (unsigned i = 0; i < btree_id_nr_alive(c); i++) { |
| 603 | struct btree_root *r = bch2_btree_id_root(c, i); |
| 604 | |
| 605 | if (!r->alive) |
| 606 | continue; |
| 607 | |
| 608 | printbuf_reset(&buf); |
| 609 | bch2_btree_id_level_to_text(&buf, i, r->level); |
| 610 | |
| 611 | if (mustfix_fsck_err_on((ret = r->error), |
| 612 | c, btree_root_bkey_invalid, |
| 613 | "invalid btree root %s", |
| 614 | buf.buf) || |
| 615 | mustfix_fsck_err_on((ret = r->error = bch2_btree_root_read(c, i, &r->key, r->level)), |
| 616 | c, btree_root_read_error, |
| 617 | "error reading btree root %s: %s", |
| 618 | buf.buf, bch2_err_str(ret))) { |
| 619 | if (btree_id_is_alloc(i)) |
| 620 | r->error = 0; |
| 621 | ret = 0; |
| 622 | } |
| 623 | } |
| 624 | |
| 625 | for (unsigned i = 0; i < BTREE_ID_NR; i++) { |
| 626 | struct btree_root *r = bch2_btree_id_root(c, i); |
| 627 | |
| 628 | if (!r->b && !r->error) { |
| 629 | r->alive = false; |
| 630 | r->level = 0; |
| 631 | bch2_btree_root_alloc_fake(c, i, 0); |
| 632 | } |
| 633 | } |
| 634 | fsck_err: |
| 635 | printbuf_exit(&buf); |
| 636 | return ret; |
| 637 | } |
| 638 | |
| 639 | static bool check_version_upgrade(struct bch_fs *c) |
| 640 | { |
| 641 | unsigned latest_version = bcachefs_metadata_version_current; |
| 642 | unsigned latest_compatible = min(latest_version, |
| 643 | bch2_latest_compatible_version(c->sb.version)); |
| 644 | unsigned old_version = c->sb.version_upgrade_complete ?: c->sb.version; |
| 645 | unsigned new_version = 0; |
| 646 | bool ret = false; |
| 647 | |
| 648 | if (old_version < bcachefs_metadata_required_upgrade_below) { |
| 649 | if (c->opts.version_upgrade == BCH_VERSION_UPGRADE_incompatible || |
| 650 | latest_compatible < bcachefs_metadata_required_upgrade_below) |
| 651 | new_version = latest_version; |
| 652 | else |
| 653 | new_version = latest_compatible; |
| 654 | } else { |
| 655 | switch (c->opts.version_upgrade) { |
| 656 | case BCH_VERSION_UPGRADE_compatible: |
| 657 | new_version = latest_compatible; |
| 658 | break; |
| 659 | case BCH_VERSION_UPGRADE_incompatible: |
| 660 | new_version = latest_version; |
| 661 | break; |
| 662 | case BCH_VERSION_UPGRADE_none: |
| 663 | new_version = min(old_version, latest_version); |
| 664 | break; |
| 665 | } |
| 666 | } |
| 667 | |
| 668 | if (new_version > old_version) { |
| 669 | struct printbuf buf = PRINTBUF; |
| 670 | |
| 671 | if (old_version < bcachefs_metadata_required_upgrade_below) |
| 672 | prt_str(&buf, "Version upgrade required:\n"); |
| 673 | |
| 674 | if (old_version != c->sb.version) { |
| 675 | prt_str(&buf, "Version upgrade from "); |
| 676 | bch2_version_to_text(&buf, c->sb.version_upgrade_complete); |
| 677 | prt_str(&buf, " to "); |
| 678 | bch2_version_to_text(&buf, c->sb.version); |
| 679 | prt_str(&buf, " incomplete\n"); |
| 680 | } |
| 681 | |
| 682 | prt_printf(&buf, "Doing %s version upgrade from ", |
| 683 | BCH_VERSION_MAJOR(old_version) != BCH_VERSION_MAJOR(new_version) |
| 684 | ? "incompatible" : "compatible"); |
| 685 | bch2_version_to_text(&buf, old_version); |
| 686 | prt_str(&buf, " to "); |
| 687 | bch2_version_to_text(&buf, new_version); |
| 688 | prt_newline(&buf); |
| 689 | |
| 690 | struct bch_sb_field_ext *ext = bch2_sb_field_get(c->disk_sb.sb, ext); |
| 691 | __le64 passes = ext->recovery_passes_required[0]; |
| 692 | bch2_sb_set_upgrade(c, old_version, new_version); |
| 693 | passes = ext->recovery_passes_required[0] & ~passes; |
| 694 | |
| 695 | if (passes) { |
| 696 | prt_str(&buf, " running recovery passes: "); |
| 697 | prt_bitflags(&buf, bch2_recovery_passes, |
| 698 | bch2_recovery_passes_from_stable(le64_to_cpu(passes))); |
| 699 | } |
| 700 | |
| 701 | bch_notice(c, "%s", buf.buf); |
| 702 | printbuf_exit(&buf); |
| 703 | |
| 704 | ret = true; |
| 705 | } |
| 706 | |
| 707 | if (new_version > c->sb.version_incompat_allowed && |
| 708 | c->opts.version_upgrade == BCH_VERSION_UPGRADE_incompatible) { |
| 709 | struct printbuf buf = PRINTBUF; |
| 710 | |
| 711 | prt_str(&buf, "Now allowing incompatible features up to "); |
| 712 | bch2_version_to_text(&buf, new_version); |
| 713 | prt_str(&buf, ", previously allowed up to "); |
| 714 | bch2_version_to_text(&buf, c->sb.version_incompat_allowed); |
| 715 | prt_newline(&buf); |
| 716 | |
| 717 | bch_notice(c, "%s", buf.buf); |
| 718 | printbuf_exit(&buf); |
| 719 | |
| 720 | ret = true; |
| 721 | } |
| 722 | |
| 723 | if (ret) |
| 724 | bch2_sb_upgrade(c, new_version, |
| 725 | c->opts.version_upgrade == BCH_VERSION_UPGRADE_incompatible); |
| 726 | |
| 727 | return ret; |
| 728 | } |
| 729 | |
| 730 | int bch2_fs_recovery(struct bch_fs *c) |
| 731 | { |
| 732 | struct bch_sb_field_clean *clean = NULL; |
| 733 | struct jset *last_journal_entry = NULL; |
| 734 | u64 last_seq = 0, blacklist_seq, journal_seq; |
| 735 | int ret = 0; |
| 736 | |
| 737 | if (c->sb.clean) { |
| 738 | clean = bch2_read_superblock_clean(c); |
| 739 | ret = PTR_ERR_OR_ZERO(clean); |
| 740 | if (ret) |
| 741 | goto err; |
| 742 | |
| 743 | bch_info(c, "recovering from clean shutdown, journal seq %llu", |
| 744 | le64_to_cpu(clean->journal_seq)); |
| 745 | } else { |
| 746 | bch_info(c, "recovering from unclean shutdown"); |
| 747 | } |
| 748 | |
| 749 | if (!(c->sb.features & (1ULL << BCH_FEATURE_new_extent_overwrite))) { |
| 750 | bch_err(c, "feature new_extent_overwrite not set, filesystem no longer supported"); |
| 751 | ret = -EINVAL; |
| 752 | goto err; |
| 753 | } |
| 754 | |
| 755 | if (!c->sb.clean && |
| 756 | !(c->sb.features & (1ULL << BCH_FEATURE_extents_above_btree_updates))) { |
| 757 | bch_err(c, "filesystem needs recovery from older version; run fsck from older bcachefs-tools to fix"); |
| 758 | ret = -EINVAL; |
| 759 | goto err; |
| 760 | } |
| 761 | |
| 762 | if (c->opts.norecovery) { |
| 763 | c->opts.recovery_pass_last = c->opts.recovery_pass_last |
| 764 | ? min(c->opts.recovery_pass_last, BCH_RECOVERY_PASS_snapshots_read) |
| 765 | : BCH_RECOVERY_PASS_snapshots_read; |
| 766 | c->opts.nochanges = true; |
| 767 | } |
| 768 | |
| 769 | if (c->opts.nochanges) |
| 770 | c->opts.read_only = true; |
| 771 | |
| 772 | if (c->opts.journal_rewind) { |
| 773 | bch_info(c, "rewinding journal, fsck required"); |
| 774 | c->opts.fsck = true; |
| 775 | } |
| 776 | |
| 777 | if (go_rw_in_recovery(c)) { |
| 778 | /* |
| 779 | * start workqueues/kworkers early - kthread creation checks for |
| 780 | * pending signals, which is _very_ annoying |
| 781 | */ |
| 782 | ret = bch2_fs_init_rw(c); |
| 783 | if (ret) |
| 784 | goto err; |
| 785 | } |
| 786 | |
| 787 | mutex_lock(&c->sb_lock); |
| 788 | struct bch_sb_field_ext *ext = bch2_sb_field_get(c->disk_sb.sb, ext); |
| 789 | bool write_sb = false; |
| 790 | |
| 791 | if (BCH_SB_HAS_TOPOLOGY_ERRORS(c->disk_sb.sb)) { |
| 792 | ext->recovery_passes_required[0] |= |
| 793 | cpu_to_le64(bch2_recovery_passes_to_stable(BIT_ULL(BCH_RECOVERY_PASS_check_topology))); |
| 794 | write_sb = true; |
| 795 | } |
| 796 | |
| 797 | u64 sb_passes = bch2_recovery_passes_from_stable(le64_to_cpu(ext->recovery_passes_required[0])); |
| 798 | if (sb_passes) { |
| 799 | struct printbuf buf = PRINTBUF; |
| 800 | prt_str(&buf, "superblock requires following recovery passes to be run:\n "); |
| 801 | prt_bitflags(&buf, bch2_recovery_passes, sb_passes); |
| 802 | bch_info(c, "%s", buf.buf); |
| 803 | printbuf_exit(&buf); |
| 804 | } |
| 805 | |
| 806 | if (bch2_check_version_downgrade(c)) { |
| 807 | struct printbuf buf = PRINTBUF; |
| 808 | |
| 809 | prt_str(&buf, "Version downgrade required:"); |
| 810 | |
| 811 | __le64 passes = ext->recovery_passes_required[0]; |
| 812 | bch2_sb_set_downgrade(c, |
| 813 | BCH_VERSION_MINOR(bcachefs_metadata_version_current), |
| 814 | BCH_VERSION_MINOR(c->sb.version)); |
| 815 | passes = ext->recovery_passes_required[0] & ~passes; |
| 816 | if (passes) { |
| 817 | prt_str(&buf, "\n running recovery passes: "); |
| 818 | prt_bitflags(&buf, bch2_recovery_passes, |
| 819 | bch2_recovery_passes_from_stable(le64_to_cpu(passes))); |
| 820 | } |
| 821 | |
| 822 | bch_info(c, "%s", buf.buf); |
| 823 | printbuf_exit(&buf); |
| 824 | write_sb = true; |
| 825 | } |
| 826 | |
| 827 | if (check_version_upgrade(c)) |
| 828 | write_sb = true; |
| 829 | |
| 830 | c->opts.recovery_passes |= bch2_recovery_passes_from_stable(le64_to_cpu(ext->recovery_passes_required[0])); |
| 831 | |
| 832 | if (c->sb.version_upgrade_complete < bcachefs_metadata_version_autofix_errors) { |
| 833 | SET_BCH_SB_ERROR_ACTION(c->disk_sb.sb, BCH_ON_ERROR_fix_safe); |
| 834 | write_sb = true; |
| 835 | } |
| 836 | |
| 837 | if (write_sb) |
| 838 | bch2_write_super(c); |
| 839 | mutex_unlock(&c->sb_lock); |
| 840 | |
| 841 | if (c->sb.clean) |
| 842 | set_bit(BCH_FS_clean_recovery, &c->flags); |
| 843 | if (c->opts.fsck) |
| 844 | set_bit(BCH_FS_in_fsck, &c->flags); |
| 845 | set_bit(BCH_FS_in_recovery, &c->flags); |
| 846 | |
| 847 | ret = bch2_blacklist_table_initialize(c); |
| 848 | if (ret) { |
| 849 | bch_err(c, "error initializing blacklist table"); |
| 850 | goto err; |
| 851 | } |
| 852 | |
| 853 | bch2_journal_pos_from_member_info_resume(c); |
| 854 | |
| 855 | if (!c->sb.clean || c->opts.retain_recovery_info) { |
| 856 | struct genradix_iter iter; |
| 857 | struct journal_replay **i; |
| 858 | |
| 859 | bch_verbose(c, "starting journal read"); |
| 860 | ret = bch2_journal_read(c, &last_seq, &blacklist_seq, &journal_seq); |
| 861 | if (ret) |
| 862 | goto err; |
| 863 | |
| 864 | /* |
| 865 | * note: cmd_list_journal needs the blacklist table fully up to date so |
| 866 | * it can asterisk ignored journal entries: |
| 867 | */ |
| 868 | if (c->opts.read_journal_only) |
| 869 | goto out; |
| 870 | |
| 871 | genradix_for_each_reverse(&c->journal_entries, iter, i) |
| 872 | if (!journal_replay_ignore(*i)) { |
| 873 | last_journal_entry = &(*i)->j; |
| 874 | break; |
| 875 | } |
| 876 | |
| 877 | if (mustfix_fsck_err_on(c->sb.clean && |
| 878 | last_journal_entry && |
| 879 | !journal_entry_empty(last_journal_entry), c, |
| 880 | clean_but_journal_not_empty, |
| 881 | "filesystem marked clean but journal not empty")) { |
| 882 | c->sb.compat &= ~(1ULL << BCH_COMPAT_alloc_info); |
| 883 | SET_BCH_SB_CLEAN(c->disk_sb.sb, false); |
| 884 | c->sb.clean = false; |
| 885 | } |
| 886 | |
| 887 | if (!last_journal_entry) { |
| 888 | fsck_err_on(!c->sb.clean, c, |
| 889 | dirty_but_no_journal_entries, |
| 890 | "no journal entries found"); |
| 891 | if (clean) |
| 892 | goto use_clean; |
| 893 | |
| 894 | genradix_for_each_reverse(&c->journal_entries, iter, i) |
| 895 | if (*i) { |
| 896 | last_journal_entry = &(*i)->j; |
| 897 | (*i)->ignore_blacklisted = false; |
| 898 | (*i)->ignore_not_dirty= false; |
| 899 | /* |
| 900 | * This was probably a NO_FLUSH entry, |
| 901 | * so last_seq was garbage - but we know |
| 902 | * we're only using a single journal |
| 903 | * entry, set it here: |
| 904 | */ |
| 905 | (*i)->j.last_seq = (*i)->j.seq; |
| 906 | break; |
| 907 | } |
| 908 | } |
| 909 | |
| 910 | ret = bch2_journal_keys_sort(c); |
| 911 | if (ret) |
| 912 | goto err; |
| 913 | |
| 914 | if (c->sb.clean && last_journal_entry) { |
| 915 | ret = bch2_verify_superblock_clean(c, &clean, |
| 916 | last_journal_entry); |
| 917 | if (ret) |
| 918 | goto err; |
| 919 | } |
| 920 | } else { |
| 921 | use_clean: |
| 922 | if (!clean) { |
| 923 | bch_err(c, "no superblock clean section found"); |
| 924 | ret = bch_err_throw(c, fsck_repair_impossible); |
| 925 | goto err; |
| 926 | |
| 927 | } |
| 928 | blacklist_seq = journal_seq = le64_to_cpu(clean->journal_seq) + 1; |
| 929 | } |
| 930 | |
| 931 | c->journal_replay_seq_start = last_seq; |
| 932 | c->journal_replay_seq_end = blacklist_seq - 1; |
| 933 | |
| 934 | zero_out_btree_mem_ptr(&c->journal_keys); |
| 935 | |
| 936 | ret = journal_replay_early(c, clean); |
| 937 | if (ret) |
| 938 | goto err; |
| 939 | |
| 940 | ret = bch2_fs_resize_on_mount(c); |
| 941 | if (ret) { |
| 942 | up_write(&c->state_lock); |
| 943 | goto err; |
| 944 | } |
| 945 | |
| 946 | if (c->sb.features & BIT_ULL(BCH_FEATURE_small_image)) { |
| 947 | bch_info(c, "filesystem is an unresized image file, mounting ro"); |
| 948 | c->opts.read_only = true; |
| 949 | } |
| 950 | |
| 951 | if (!c->opts.read_only && |
| 952 | (c->sb.features & BIT_ULL(BCH_FEATURE_no_alloc_info))) { |
| 953 | bch_info(c, "mounting a filesystem with no alloc info read-write; will recreate"); |
| 954 | |
| 955 | bch2_reconstruct_alloc(c); |
| 956 | } else if (c->opts.reconstruct_alloc) { |
| 957 | bch2_journal_log_msg(c, "dropping alloc info"); |
| 958 | bch_info(c, "dropping and reconstructing all alloc info"); |
| 959 | |
| 960 | bch2_reconstruct_alloc(c); |
| 961 | } |
| 962 | |
| 963 | if (c->sb.features & BIT_ULL(BCH_FEATURE_no_alloc_info)) { |
| 964 | /* We can't go RW to fix errors without alloc info */ |
| 965 | if (c->opts.fix_errors == FSCK_FIX_yes || |
| 966 | c->opts.fix_errors == FSCK_FIX_ask) |
| 967 | c->opts.fix_errors = FSCK_FIX_no; |
| 968 | if (c->opts.errors == BCH_ON_ERROR_fix_safe) |
| 969 | c->opts.errors = BCH_ON_ERROR_continue; |
| 970 | } |
| 971 | |
| 972 | /* |
| 973 | * After an unclean shutdown, skip then next few journal sequence |
| 974 | * numbers as they may have been referenced by btree writes that |
| 975 | * happened before their corresponding journal writes - those btree |
| 976 | * writes need to be ignored, by skipping and blacklisting the next few |
| 977 | * journal sequence numbers: |
| 978 | */ |
| 979 | if (!c->sb.clean) |
| 980 | journal_seq += JOURNAL_BUF_NR * 4; |
| 981 | |
| 982 | if (blacklist_seq != journal_seq) { |
| 983 | ret = bch2_journal_log_msg(c, "blacklisting entries %llu-%llu", |
| 984 | blacklist_seq, journal_seq) ?: |
| 985 | bch2_journal_seq_blacklist_add(c, |
| 986 | blacklist_seq, journal_seq); |
| 987 | if (ret) { |
| 988 | bch_err_msg(c, ret, "error creating new journal seq blacklist entry"); |
| 989 | goto err; |
| 990 | } |
| 991 | } |
| 992 | |
| 993 | ret = bch2_journal_log_msg(c, "starting journal at entry %llu, replaying %llu-%llu", |
| 994 | journal_seq, last_seq, blacklist_seq - 1) ?: |
| 995 | bch2_fs_journal_start(&c->journal, last_seq, journal_seq); |
| 996 | if (ret) |
| 997 | goto err; |
| 998 | |
| 999 | /* |
| 1000 | * Skip past versions that might have possibly been used (as nonces), |
| 1001 | * but hadn't had their pointers written: |
| 1002 | */ |
| 1003 | if (c->sb.encryption_type && !c->sb.clean) |
| 1004 | atomic64_add(1 << 16, &c->key_version); |
| 1005 | |
| 1006 | ret = read_btree_roots(c); |
| 1007 | if (ret) |
| 1008 | goto err; |
| 1009 | |
| 1010 | set_bit(BCH_FS_btree_running, &c->flags); |
| 1011 | |
| 1012 | ret = bch2_sb_set_upgrade_extra(c); |
| 1013 | if (ret) |
| 1014 | goto err; |
| 1015 | |
| 1016 | ret = bch2_run_recovery_passes(c, 0); |
| 1017 | if (ret) |
| 1018 | goto err; |
| 1019 | |
| 1020 | /* |
| 1021 | * Normally set by the appropriate recovery pass: when cleared, this |
| 1022 | * indicates we're in early recovery and btree updates should be done by |
| 1023 | * being applied to the journal replay keys. _Must_ be cleared before |
| 1024 | * multithreaded use: |
| 1025 | */ |
| 1026 | set_bit(BCH_FS_may_go_rw, &c->flags); |
| 1027 | clear_bit(BCH_FS_in_fsck, &c->flags); |
| 1028 | |
| 1029 | /* in case we don't run journal replay, i.e. norecovery mode */ |
| 1030 | set_bit(BCH_FS_accounting_replay_done, &c->flags); |
| 1031 | |
| 1032 | bch2_async_btree_node_rewrites_flush(c); |
| 1033 | |
| 1034 | /* fsync if we fixed errors */ |
| 1035 | if (test_bit(BCH_FS_errors_fixed, &c->flags)) { |
| 1036 | bch2_journal_flush_all_pins(&c->journal); |
| 1037 | bch2_journal_meta(&c->journal); |
| 1038 | } |
| 1039 | |
| 1040 | /* If we fixed errors, verify that fs is actually clean now: */ |
| 1041 | if (IS_ENABLED(CONFIG_BCACHEFS_DEBUG) && |
| 1042 | test_bit(BCH_FS_errors_fixed, &c->flags) && |
| 1043 | !test_bit(BCH_FS_errors_not_fixed, &c->flags) && |
| 1044 | !test_bit(BCH_FS_error, &c->flags)) { |
| 1045 | bch2_flush_fsck_errs(c); |
| 1046 | |
| 1047 | bch_info(c, "Fixed errors, running fsck a second time to verify fs is clean"); |
| 1048 | clear_bit(BCH_FS_errors_fixed, &c->flags); |
| 1049 | |
| 1050 | ret = bch2_run_recovery_passes(c, |
| 1051 | BCH_RECOVERY_PASS_check_alloc_info); |
| 1052 | if (ret) |
| 1053 | goto err; |
| 1054 | |
| 1055 | if (test_bit(BCH_FS_errors_fixed, &c->flags) || |
| 1056 | test_bit(BCH_FS_errors_not_fixed, &c->flags)) { |
| 1057 | bch_err(c, "Second fsck run was not clean"); |
| 1058 | set_bit(BCH_FS_errors_not_fixed, &c->flags); |
| 1059 | } |
| 1060 | |
| 1061 | set_bit(BCH_FS_errors_fixed, &c->flags); |
| 1062 | } |
| 1063 | |
| 1064 | if (enabled_qtypes(c)) { |
| 1065 | bch_verbose(c, "reading quotas"); |
| 1066 | ret = bch2_fs_quota_read(c); |
| 1067 | if (ret) |
| 1068 | goto err; |
| 1069 | bch_verbose(c, "quotas done"); |
| 1070 | } |
| 1071 | |
| 1072 | mutex_lock(&c->sb_lock); |
| 1073 | ext = bch2_sb_field_get(c->disk_sb.sb, ext); |
| 1074 | write_sb = false; |
| 1075 | |
| 1076 | if (BCH_SB_VERSION_UPGRADE_COMPLETE(c->disk_sb.sb) != le16_to_cpu(c->disk_sb.sb->version)) { |
| 1077 | SET_BCH_SB_VERSION_UPGRADE_COMPLETE(c->disk_sb.sb, le16_to_cpu(c->disk_sb.sb->version)); |
| 1078 | write_sb = true; |
| 1079 | } |
| 1080 | |
| 1081 | if (!test_bit(BCH_FS_error, &c->flags) && |
| 1082 | !(c->disk_sb.sb->compat[0] & cpu_to_le64(1ULL << BCH_COMPAT_alloc_info))) { |
| 1083 | c->disk_sb.sb->compat[0] |= cpu_to_le64(1ULL << BCH_COMPAT_alloc_info); |
| 1084 | write_sb = true; |
| 1085 | } |
| 1086 | |
| 1087 | if (!test_bit(BCH_FS_error, &c->flags) && |
| 1088 | !bch2_is_zero(ext->errors_silent, sizeof(ext->errors_silent))) { |
| 1089 | memset(ext->errors_silent, 0, sizeof(ext->errors_silent)); |
| 1090 | write_sb = true; |
| 1091 | } |
| 1092 | |
| 1093 | if (c->opts.fsck && |
| 1094 | !test_bit(BCH_FS_error, &c->flags) && |
| 1095 | c->recovery.pass_done == BCH_RECOVERY_PASS_NR - 1 && |
| 1096 | ext->btrees_lost_data) { |
| 1097 | ext->btrees_lost_data = 0; |
| 1098 | write_sb = true; |
| 1099 | } |
| 1100 | |
| 1101 | if (c->opts.fsck && |
| 1102 | !test_bit(BCH_FS_error, &c->flags) && |
| 1103 | !test_bit(BCH_FS_errors_not_fixed, &c->flags)) { |
| 1104 | SET_BCH_SB_HAS_ERRORS(c->disk_sb.sb, 0); |
| 1105 | SET_BCH_SB_HAS_TOPOLOGY_ERRORS(c->disk_sb.sb, 0); |
| 1106 | write_sb = true; |
| 1107 | } |
| 1108 | |
| 1109 | if (bch2_blacklist_entries_gc(c)) |
| 1110 | write_sb = true; |
| 1111 | |
| 1112 | if (write_sb) |
| 1113 | bch2_write_super(c); |
| 1114 | mutex_unlock(&c->sb_lock); |
| 1115 | |
| 1116 | if (!(c->sb.compat & (1ULL << BCH_COMPAT_extents_above_btree_updates_done)) || |
| 1117 | c->sb.version_min < bcachefs_metadata_version_btree_ptr_sectors_written) { |
| 1118 | struct bch_move_stats stats; |
| 1119 | |
| 1120 | bch2_move_stats_init(&stats, "recovery"); |
| 1121 | |
| 1122 | struct printbuf buf = PRINTBUF; |
| 1123 | bch2_version_to_text(&buf, c->sb.version_min); |
| 1124 | bch_info(c, "scanning for old btree nodes: min_version %s", buf.buf); |
| 1125 | printbuf_exit(&buf); |
| 1126 | |
| 1127 | ret = bch2_fs_read_write_early(c) ?: |
| 1128 | bch2_scan_old_btree_nodes(c, &stats); |
| 1129 | if (ret) |
| 1130 | goto err; |
| 1131 | bch_info(c, "scanning for old btree nodes done"); |
| 1132 | } |
| 1133 | |
| 1134 | ret = 0; |
| 1135 | out: |
| 1136 | bch2_flush_fsck_errs(c); |
| 1137 | |
| 1138 | if (!ret && |
| 1139 | test_bit(BCH_FS_need_delete_dead_snapshots, &c->flags) && |
| 1140 | !c->opts.nochanges) { |
| 1141 | bch2_fs_read_write_early(c); |
| 1142 | bch2_delete_dead_snapshots_async(c); |
| 1143 | } |
| 1144 | |
| 1145 | bch_err_fn(c, ret); |
| 1146 | final_out: |
| 1147 | if (!IS_ERR(clean)) |
| 1148 | kfree(clean); |
| 1149 | return ret; |
| 1150 | err: |
| 1151 | fsck_err: |
| 1152 | { |
| 1153 | struct printbuf buf = PRINTBUF; |
| 1154 | bch2_log_msg_start(c, &buf); |
| 1155 | |
| 1156 | prt_printf(&buf, "error in recovery: %s\n", bch2_err_str(ret)); |
| 1157 | bch2_fs_emergency_read_only2(c, &buf); |
| 1158 | |
| 1159 | bch2_print_str(c, KERN_ERR, buf.buf); |
| 1160 | printbuf_exit(&buf); |
| 1161 | } |
| 1162 | goto final_out; |
| 1163 | } |
| 1164 | |
| 1165 | int bch2_fs_initialize(struct bch_fs *c) |
| 1166 | { |
| 1167 | struct bch_inode_unpacked root_inode, lostfound_inode; |
| 1168 | struct bkey_inode_buf packed_inode; |
| 1169 | struct qstr lostfound = QSTR("lost+found"); |
| 1170 | struct bch_member *m; |
| 1171 | int ret; |
| 1172 | |
| 1173 | bch_notice(c, "initializing new filesystem"); |
| 1174 | set_bit(BCH_FS_new_fs, &c->flags); |
| 1175 | |
| 1176 | mutex_lock(&c->sb_lock); |
| 1177 | c->disk_sb.sb->compat[0] |= cpu_to_le64(1ULL << BCH_COMPAT_extents_above_btree_updates_done); |
| 1178 | c->disk_sb.sb->compat[0] |= cpu_to_le64(1ULL << BCH_COMPAT_bformat_overflow_done); |
| 1179 | |
| 1180 | bch2_check_version_downgrade(c); |
| 1181 | |
| 1182 | if (c->opts.version_upgrade != BCH_VERSION_UPGRADE_none) { |
| 1183 | bch2_sb_upgrade(c, bcachefs_metadata_version_current, false); |
| 1184 | SET_BCH_SB_VERSION_UPGRADE_COMPLETE(c->disk_sb.sb, bcachefs_metadata_version_current); |
| 1185 | bch2_write_super(c); |
| 1186 | } |
| 1187 | |
| 1188 | for_each_member_device(c, ca) { |
| 1189 | m = bch2_members_v2_get_mut(c->disk_sb.sb, ca->dev_idx); |
| 1190 | SET_BCH_MEMBER_FREESPACE_INITIALIZED(m, false); |
| 1191 | ca->mi = bch2_mi_to_cpu(m); |
| 1192 | } |
| 1193 | |
| 1194 | bch2_write_super(c); |
| 1195 | mutex_unlock(&c->sb_lock); |
| 1196 | |
| 1197 | set_bit(BCH_FS_btree_running, &c->flags); |
| 1198 | set_bit(BCH_FS_may_go_rw, &c->flags); |
| 1199 | |
| 1200 | for (unsigned i = 0; i < BTREE_ID_NR; i++) |
| 1201 | bch2_btree_root_alloc_fake(c, i, 0); |
| 1202 | |
| 1203 | ret = bch2_fs_journal_alloc(c); |
| 1204 | if (ret) |
| 1205 | goto err; |
| 1206 | |
| 1207 | /* |
| 1208 | * journal_res_get() will crash if called before this has |
| 1209 | * set up the journal.pin FIFO and journal.cur pointer: |
| 1210 | */ |
| 1211 | ret = bch2_fs_journal_start(&c->journal, 1, 1); |
| 1212 | if (ret) |
| 1213 | goto err; |
| 1214 | |
| 1215 | ret = bch2_fs_read_write_early(c); |
| 1216 | if (ret) |
| 1217 | goto err; |
| 1218 | |
| 1219 | set_bit(BCH_FS_accounting_replay_done, &c->flags); |
| 1220 | bch2_journal_set_replay_done(&c->journal); |
| 1221 | |
| 1222 | for_each_member_device(c, ca) { |
| 1223 | ret = bch2_dev_usage_init(ca, false); |
| 1224 | if (ret) { |
| 1225 | bch2_dev_put(ca); |
| 1226 | goto err; |
| 1227 | } |
| 1228 | } |
| 1229 | |
| 1230 | /* |
| 1231 | * Write out the superblock and journal buckets, now that we can do |
| 1232 | * btree updates |
| 1233 | */ |
| 1234 | bch_verbose(c, "marking superblocks"); |
| 1235 | ret = bch2_trans_mark_dev_sbs(c); |
| 1236 | bch_err_msg(c, ret, "marking superblocks"); |
| 1237 | if (ret) |
| 1238 | goto err; |
| 1239 | |
| 1240 | ret = bch2_fs_freespace_init(c); |
| 1241 | if (ret) |
| 1242 | goto err; |
| 1243 | |
| 1244 | ret = bch2_initialize_subvolumes(c); |
| 1245 | if (ret) |
| 1246 | goto err; |
| 1247 | |
| 1248 | bch_verbose(c, "reading snapshots table"); |
| 1249 | ret = bch2_snapshots_read(c); |
| 1250 | if (ret) |
| 1251 | goto err; |
| 1252 | bch_verbose(c, "reading snapshots done"); |
| 1253 | |
| 1254 | bch2_inode_init(c, &root_inode, 0, 0, S_IFDIR|0755, 0, NULL); |
| 1255 | root_inode.bi_inum = BCACHEFS_ROOT_INO; |
| 1256 | root_inode.bi_subvol = BCACHEFS_ROOT_SUBVOL; |
| 1257 | bch2_inode_pack(&packed_inode, &root_inode); |
| 1258 | packed_inode.inode.k.p.snapshot = U32_MAX; |
| 1259 | |
| 1260 | ret = bch2_btree_insert(c, BTREE_ID_inodes, &packed_inode.inode.k_i, NULL, 0, 0); |
| 1261 | bch_err_msg(c, ret, "creating root directory"); |
| 1262 | if (ret) |
| 1263 | goto err; |
| 1264 | |
| 1265 | bch2_inode_init_early(c, &lostfound_inode); |
| 1266 | |
| 1267 | ret = bch2_trans_commit_do(c, NULL, NULL, 0, |
| 1268 | bch2_create_trans(trans, |
| 1269 | BCACHEFS_ROOT_SUBVOL_INUM, |
| 1270 | &root_inode, &lostfound_inode, |
| 1271 | &lostfound, |
| 1272 | 0, 0, S_IFDIR|0700, 0, |
| 1273 | NULL, NULL, (subvol_inum) { 0 }, 0)); |
| 1274 | bch_err_msg(c, ret, "creating lost+found"); |
| 1275 | if (ret) |
| 1276 | goto err; |
| 1277 | |
| 1278 | c->recovery.pass_done = BCH_RECOVERY_PASS_NR - 1; |
| 1279 | |
| 1280 | bch2_copygc_wakeup(c); |
| 1281 | bch2_rebalance_wakeup(c); |
| 1282 | |
| 1283 | if (enabled_qtypes(c)) { |
| 1284 | ret = bch2_fs_quota_read(c); |
| 1285 | if (ret) |
| 1286 | goto err; |
| 1287 | } |
| 1288 | |
| 1289 | ret = bch2_journal_flush(&c->journal); |
| 1290 | bch_err_msg(c, ret, "writing first journal entry"); |
| 1291 | if (ret) |
| 1292 | goto err; |
| 1293 | |
| 1294 | mutex_lock(&c->sb_lock); |
| 1295 | SET_BCH_SB_INITIALIZED(c->disk_sb.sb, true); |
| 1296 | SET_BCH_SB_CLEAN(c->disk_sb.sb, false); |
| 1297 | |
| 1298 | bch2_write_super(c); |
| 1299 | mutex_unlock(&c->sb_lock); |
| 1300 | |
| 1301 | c->recovery.curr_pass = BCH_RECOVERY_PASS_NR; |
| 1302 | return 0; |
| 1303 | err: |
| 1304 | bch_err_fn(c, ret); |
| 1305 | return ret; |
| 1306 | } |