bcachefs: More improvements for alloc info checks
[linux-block.git] / fs / bcachefs / journal_io.c
CommitLineData
1c6fdbd8
KO
1// SPDX-License-Identifier: GPL-2.0
2#include "bcachefs.h"
59cc38b8 3#include "alloc_background.h"
7b3f84ea 4#include "alloc_foreground.h"
39fb2983 5#include "btree_io.h"
00b8ccf7 6#include "btree_update_interior.h"
1c6fdbd8
KO
7#include "buckets.h"
8#include "checksum.h"
d042b040 9#include "disk_groups.h"
1c6fdbd8 10#include "error.h"
63b214e7 11#include "io.h"
1c6fdbd8
KO
12#include "journal.h"
13#include "journal_io.h"
14#include "journal_reclaim.h"
adbcada4 15#include "journal_seq_blacklist.h"
1c6fdbd8
KO
16#include "replicas.h"
17#include "trace.h"
18
adbcada4
KO
19static void __journal_replay_free(struct journal_replay *i)
20{
21 list_del(&i->list);
22 kvpfree(i, offsetof(struct journal_replay, j) +
23 vstruct_bytes(&i->j));
24
25}
26
27static void journal_replay_free(struct bch_fs *c, struct journal_replay *i)
28{
29 i->ignore = true;
30
31 if (!c->opts.read_entire_journal)
32 __journal_replay_free(i);
33}
34
1c6fdbd8
KO
35struct journal_list {
36 struct closure cl;
37 struct mutex lock;
38 struct list_head *head;
39 int ret;
40};
41
42#define JOURNAL_ENTRY_ADD_OK 0
43#define JOURNAL_ENTRY_ADD_OUT_OF_RANGE 5
44
45/*
46 * Given a journal entry we just read, add it to the list of journal entries to
47 * be replayed:
48 */
49static int journal_entry_add(struct bch_fs *c, struct bch_dev *ca,
72b7d633 50 struct journal_ptr entry_ptr,
ca73852a
KO
51 struct journal_list *jlist, struct jset *j,
52 bool bad)
1c6fdbd8 53{
e4c3f386 54 struct journal_replay *i, *pos, *dup = NULL;
72b7d633 55 struct journal_ptr *ptr;
1c6fdbd8
KO
56 struct list_head *where;
57 size_t bytes = vstruct_bytes(j);
adbcada4 58 u64 last_seq = 0;
e4c3f386 59 int ret = JOURNAL_ENTRY_ADD_OK;
1c6fdbd8 60
adbcada4
KO
61 list_for_each_entry_reverse(i, jlist->head, list) {
62 if (!JSET_NO_FLUSH(&i->j)) {
63 last_seq = le64_to_cpu(i->j.last_seq);
64 break;
7fffc85b 65 }
adbcada4 66 }
1c6fdbd8 67
adbcada4
KO
68 /* Is this entry older than the range we need? */
69 if (!c->opts.read_entire_journal &&
70 le64_to_cpu(j->seq) < last_seq) {
71 ret = JOURNAL_ENTRY_ADD_OUT_OF_RANGE;
72 goto out;
73 }
74
75 /* Drop entries we don't need anymore */
76 if (!JSET_NO_FLUSH(j)) {
7fffc85b
KO
77 list_for_each_entry_safe(i, pos, jlist->head, list) {
78 if (le64_to_cpu(i->j.seq) >= le64_to_cpu(j->last_seq))
79 break;
adbcada4 80 journal_replay_free(c, i);
7fffc85b 81 }
1c6fdbd8
KO
82 }
83
84 list_for_each_entry_reverse(i, jlist->head, list) {
ca73852a
KO
85 if (le64_to_cpu(j->seq) > le64_to_cpu(i->j.seq)) {
86 where = &i->list;
87 goto add;
88 }
89 }
90
91 where = jlist->head;
92add:
e4c3f386 93 dup = where->next != jlist->head
ca73852a
KO
94 ? container_of(where->next, struct journal_replay, list)
95 : NULL;
96
e4c3f386
KO
97 if (dup && le64_to_cpu(j->seq) != le64_to_cpu(dup->j.seq))
98 dup = NULL;
99
ca73852a
KO
100 /*
101 * Duplicate journal entries? If so we want the one that didn't have a
102 * checksum error:
103 */
e4c3f386
KO
104 if (dup) {
105 if (dup->bad) {
106 /* we'll replace @dup: */
ca73852a 107 } else if (bad) {
e4c3f386 108 i = dup;
ca73852a
KO
109 goto found;
110 } else {
e4c3f386
KO
111 fsck_err_on(bytes != vstruct_bytes(&dup->j) ||
112 memcmp(j, &dup->j, bytes), c,
1c6fdbd8
KO
113 "found duplicate but non identical journal entries (seq %llu)",
114 le64_to_cpu(j->seq));
e4c3f386 115 i = dup;
1c6fdbd8
KO
116 goto found;
117 }
1c6fdbd8
KO
118 }
119
1c6fdbd8
KO
120 i = kvpmalloc(offsetof(struct journal_replay, j) + bytes, GFP_KERNEL);
121 if (!i) {
122 ret = -ENOMEM;
123 goto out;
124 }
125
e4c3f386
KO
126 i->nr_ptrs = 0;
127 i->bad = bad;
128 i->ignore = false;
1c6fdbd8 129 unsafe_memcpy(&i->j, j, bytes, "embedded variable length struct");
e4c3f386
KO
130
131 if (dup) {
132 i->nr_ptrs = dup->nr_ptrs;
133 memcpy(i->ptrs, dup->ptrs, sizeof(dup->ptrs));
134 __journal_replay_free(dup);
135 }
136
137 list_add(&i->list, where);
1c6fdbd8 138found:
e4c3f386
KO
139 for (ptr = i->ptrs; ptr < i->ptrs + i->nr_ptrs; ptr++) {
140 if (ptr->dev == ca->dev_idx) {
141 bch_err(c, "duplicate journal entry %llu on same device",
142 le64_to_cpu(i->j.seq));
143 goto out;
144 }
145 }
146
147 if (i->nr_ptrs >= ARRAY_SIZE(i->ptrs)) {
148 bch_err(c, "found too many copies of journal entry %llu",
149 le64_to_cpu(i->j.seq));
150 goto out;
151 }
152
153 i->ptrs[i->nr_ptrs++] = entry_ptr;
1c6fdbd8
KO
154out:
155fsck_err:
156 return ret;
157}
158
159static struct nonce journal_nonce(const struct jset *jset)
160{
161 return (struct nonce) {{
162 [0] = 0,
163 [1] = ((__le32 *) &jset->seq)[0],
164 [2] = ((__le32 *) &jset->seq)[1],
165 [3] = BCH_NONCE_JOURNAL,
166 }};
167}
168
169/* this fills in a range with empty jset_entries: */
170static void journal_entry_null_range(void *start, void *end)
171{
172 struct jset_entry *entry;
173
174 for (entry = start; entry != end; entry = vstruct_next(entry))
175 memset(entry, 0, sizeof(*entry));
176}
177
178#define JOURNAL_ENTRY_REREAD 5
179#define JOURNAL_ENTRY_NONE 6
180#define JOURNAL_ENTRY_BAD 7
181
182#define journal_entry_err(c, msg, ...) \
183({ \
184 switch (write) { \
185 case READ: \
186 mustfix_fsck_err(c, msg, ##__VA_ARGS__); \
187 break; \
188 case WRITE: \
189 bch_err(c, "corrupt metadata before write:\n" \
190 msg, ##__VA_ARGS__); \
191 if (bch2_fs_inconsistent(c)) { \
192 ret = BCH_FSCK_ERRORS_NOT_FIXED; \
193 goto fsck_err; \
194 } \
195 break; \
196 } \
197 true; \
198})
199
200#define journal_entry_err_on(cond, c, msg, ...) \
201 ((cond) ? journal_entry_err(c, msg, ##__VA_ARGS__) : false)
202
4d54337c
KO
203#define FSCK_DELETED_KEY 5
204
7d6f07ed 205static int journal_validate_key(struct bch_fs *c, const char *where,
1c6fdbd8 206 struct jset_entry *entry,
39fb2983 207 unsigned level, enum btree_id btree_id,
7d6f07ed
KO
208 struct bkey_i *k, const char *type,
209 unsigned version, int big_endian, int write)
1c6fdbd8
KO
210{
211 void *next = vstruct_next(entry);
f0ac7df2 212 struct printbuf buf = PRINTBUF;
1c6fdbd8
KO
213 int ret = 0;
214
215 if (journal_entry_err_on(!k->k.u64s, c,
7d6f07ed
KO
216 "invalid %s in %s entry offset %zi/%u: k->u64s 0",
217 type, where,
4d54337c
KO
218 (u64 *) k - entry->_data,
219 le16_to_cpu(entry->u64s))) {
1c6fdbd8
KO
220 entry->u64s = cpu_to_le16((u64 *) k - entry->_data);
221 journal_entry_null_range(vstruct_next(entry), next);
4d54337c 222 return FSCK_DELETED_KEY;
1c6fdbd8
KO
223 }
224
225 if (journal_entry_err_on((void *) bkey_next(k) >
226 (void *) vstruct_next(entry), c,
7d6f07ed
KO
227 "invalid %s in %s entry offset %zi/%u: extends past end of journal entry",
228 type, where,
4d54337c
KO
229 (u64 *) k - entry->_data,
230 le16_to_cpu(entry->u64s))) {
1c6fdbd8
KO
231 entry->u64s = cpu_to_le16((u64 *) k - entry->_data);
232 journal_entry_null_range(vstruct_next(entry), next);
4d54337c 233 return FSCK_DELETED_KEY;
1c6fdbd8
KO
234 }
235
236 if (journal_entry_err_on(k->k.format != KEY_FORMAT_CURRENT, c,
7d6f07ed
KO
237 "invalid %s in %s entry offset %zi/%u: bad format %u",
238 type, where,
4d54337c
KO
239 (u64 *) k - entry->_data,
240 le16_to_cpu(entry->u64s),
ed0d631f 241 k->k.format)) {
4d54337c 242 le16_add_cpu(&entry->u64s, -((u16) k->k.u64s));
1c6fdbd8
KO
243 memmove(k, bkey_next(k), next - (void *) bkey_next(k));
244 journal_entry_null_range(vstruct_next(entry), next);
4d54337c 245 return FSCK_DELETED_KEY;
1c6fdbd8
KO
246 }
247
39fb2983 248 if (!write)
7d6f07ed
KO
249 bch2_bkey_compat(level, btree_id, version, big_endian,
250 write, NULL, bkey_to_packed(k));
26609b61 251
f0ac7df2
KO
252 if (bch2_bkey_invalid(c, bkey_i_to_s_c(k),
253 __btree_node_type(level, btree_id), &buf)) {
254 printbuf_reset(&buf);
255 pr_buf(&buf, "invalid %s in %s entry offset %zi/%u:",
256 type, where,
257 (u64 *) k - entry->_data,
258 le16_to_cpu(entry->u64s));
259 pr_newline(&buf);
260 pr_indent_push(&buf, 2);
319f9ac3 261
fa8e94fa 262 bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(k));
f0ac7df2
KO
263 pr_newline(&buf);
264 bch2_bkey_invalid(c, bkey_i_to_s_c(k),
265 __btree_node_type(level, btree_id), &buf);
266
267 mustfix_fsck_err(c, "%s", buf.buf);
1c6fdbd8 268
4d54337c 269 le16_add_cpu(&entry->u64s, -((u16) k->k.u64s));
1c6fdbd8
KO
270 memmove(k, bkey_next(k), next - (void *) bkey_next(k));
271 journal_entry_null_range(vstruct_next(entry), next);
f0ac7df2
KO
272
273 printbuf_exit(&buf);
4d54337c 274 return FSCK_DELETED_KEY;
1c6fdbd8 275 }
26609b61 276
39fb2983 277 if (write)
7d6f07ed
KO
278 bch2_bkey_compat(level, btree_id, version, big_endian,
279 write, NULL, bkey_to_packed(k));
1c6fdbd8 280fsck_err:
f0ac7df2 281 printbuf_exit(&buf);
1c6fdbd8
KO
282 return ret;
283}
284
528b18e6 285static int journal_entry_btree_keys_validate(struct bch_fs *c,
7d6f07ed 286 const char *where,
1c6fdbd8 287 struct jset_entry *entry,
7d6f07ed 288 unsigned version, int big_endian, int write)
1c6fdbd8 289{
4d54337c 290 struct bkey_i *k = entry->start;
1c6fdbd8 291
4d54337c 292 while (k != vstruct_last(entry)) {
7d6f07ed 293 int ret = journal_validate_key(c, where, entry,
39fb2983
KO
294 entry->level,
295 entry->btree_id,
7d6f07ed 296 k, "key", version, big_endian, write);
4d54337c
KO
297 if (ret == FSCK_DELETED_KEY)
298 continue;
299
300 k = bkey_next(k);
1c6fdbd8
KO
301 }
302
303 return 0;
304}
305
528b18e6
KO
306static void journal_entry_btree_keys_to_text(struct printbuf *out, struct bch_fs *c,
307 struct jset_entry *entry)
308{
309 struct bkey_i *k;
e7bc7cdf 310 bool first = true;
528b18e6 311
e7bc7cdf
KO
312 vstruct_for_each(entry, k) {
313 if (!first) {
12bf93a4 314 pr_newline(out);
e7bc7cdf
KO
315 pr_buf(out, "%s: ", bch2_jset_entry_types[entry->type]);
316 }
317 pr_buf(out, "btree=%s l=%u ", bch2_btree_ids[entry->btree_id], entry->level);
528b18e6 318 bch2_bkey_val_to_text(out, c, bkey_i_to_s_c(k));
e7bc7cdf
KO
319 first = false;
320 }
528b18e6
KO
321}
322
323static int journal_entry_btree_root_validate(struct bch_fs *c,
7d6f07ed 324 const char *where,
1c6fdbd8 325 struct jset_entry *entry,
7d6f07ed 326 unsigned version, int big_endian, int write)
1c6fdbd8
KO
327{
328 struct bkey_i *k = entry->start;
329 int ret = 0;
330
331 if (journal_entry_err_on(!entry->u64s ||
332 le16_to_cpu(entry->u64s) != k->k.u64s, c,
333 "invalid btree root journal entry: wrong number of keys")) {
334 void *next = vstruct_next(entry);
335 /*
336 * we don't want to null out this jset_entry,
337 * just the contents, so that later we can tell
338 * we were _supposed_ to have a btree root
339 */
340 entry->u64s = 0;
341 journal_entry_null_range(vstruct_next(entry), next);
342 return 0;
343 }
344
7d6f07ed
KO
345 return journal_validate_key(c, where, entry, 1, entry->btree_id, k,
346 "btree root", version, big_endian, write);
1c6fdbd8
KO
347fsck_err:
348 return ret;
349}
350
528b18e6
KO
351static void journal_entry_btree_root_to_text(struct printbuf *out, struct bch_fs *c,
352 struct jset_entry *entry)
353{
354 journal_entry_btree_keys_to_text(out, c, entry);
355}
356
357static int journal_entry_prio_ptrs_validate(struct bch_fs *c,
7d6f07ed 358 const char *where,
1c6fdbd8 359 struct jset_entry *entry,
7d6f07ed 360 unsigned version, int big_endian, int write)
1c6fdbd8
KO
361{
362 /* obsolete, don't care: */
363 return 0;
364}
365
528b18e6
KO
366static void journal_entry_prio_ptrs_to_text(struct printbuf *out, struct bch_fs *c,
367 struct jset_entry *entry)
368{
369}
370
371static int journal_entry_blacklist_validate(struct bch_fs *c,
7d6f07ed 372 const char *where,
1c6fdbd8 373 struct jset_entry *entry,
7d6f07ed 374 unsigned version, int big_endian, int write)
1c6fdbd8
KO
375{
376 int ret = 0;
377
378 if (journal_entry_err_on(le16_to_cpu(entry->u64s) != 1, c,
379 "invalid journal seq blacklist entry: bad size")) {
380 journal_entry_null_range(entry, vstruct_next(entry));
381 }
382fsck_err:
383 return ret;
384}
385
528b18e6
KO
386static void journal_entry_blacklist_to_text(struct printbuf *out, struct bch_fs *c,
387 struct jset_entry *entry)
388{
389 struct jset_entry_blacklist *bl =
390 container_of(entry, struct jset_entry_blacklist, entry);
391
392 pr_buf(out, "seq=%llu", le64_to_cpu(bl->seq));
393}
394
395static int journal_entry_blacklist_v2_validate(struct bch_fs *c,
7d6f07ed 396 const char *where,
1c6fdbd8 397 struct jset_entry *entry,
7d6f07ed 398 unsigned version, int big_endian, int write)
1c6fdbd8
KO
399{
400 struct jset_entry_blacklist_v2 *bl_entry;
401 int ret = 0;
402
403 if (journal_entry_err_on(le16_to_cpu(entry->u64s) != 2, c,
404 "invalid journal seq blacklist entry: bad size")) {
405 journal_entry_null_range(entry, vstruct_next(entry));
2c5af169 406 goto out;
1c6fdbd8
KO
407 }
408
409 bl_entry = container_of(entry, struct jset_entry_blacklist_v2, entry);
410
411 if (journal_entry_err_on(le64_to_cpu(bl_entry->start) >
412 le64_to_cpu(bl_entry->end), c,
413 "invalid journal seq blacklist entry: start > end")) {
414 journal_entry_null_range(entry, vstruct_next(entry));
415 }
2c5af169
KO
416out:
417fsck_err:
418 return ret;
419}
420
528b18e6
KO
421static void journal_entry_blacklist_v2_to_text(struct printbuf *out, struct bch_fs *c,
422 struct jset_entry *entry)
423{
424 struct jset_entry_blacklist_v2 *bl =
425 container_of(entry, struct jset_entry_blacklist_v2, entry);
426
427 pr_buf(out, "start=%llu end=%llu",
428 le64_to_cpu(bl->start),
429 le64_to_cpu(bl->end));
430}
431
432static int journal_entry_usage_validate(struct bch_fs *c,
7d6f07ed 433 const char *where,
2c5af169 434 struct jset_entry *entry,
7d6f07ed 435 unsigned version, int big_endian, int write)
2c5af169
KO
436{
437 struct jset_entry_usage *u =
438 container_of(entry, struct jset_entry_usage, entry);
439 unsigned bytes = jset_u64s(le16_to_cpu(entry->u64s)) * sizeof(u64);
440 int ret = 0;
441
3577df5f
KO
442 if (journal_entry_err_on(bytes < sizeof(*u),
443 c,
444 "invalid journal entry usage: bad size")) {
445 journal_entry_null_range(entry, vstruct_next(entry));
446 return ret;
447 }
448
449fsck_err:
450 return ret;
451}
452
528b18e6
KO
453static void journal_entry_usage_to_text(struct printbuf *out, struct bch_fs *c,
454 struct jset_entry *entry)
455{
456 struct jset_entry_usage *u =
457 container_of(entry, struct jset_entry_usage, entry);
458
459 pr_buf(out, "type=%s v=%llu",
460 bch2_fs_usage_types[u->entry.btree_id],
461 le64_to_cpu(u->v));
462}
463
464static int journal_entry_data_usage_validate(struct bch_fs *c,
7d6f07ed 465 const char *where,
3577df5f 466 struct jset_entry *entry,
7d6f07ed 467 unsigned version, int big_endian, int write)
3577df5f
KO
468{
469 struct jset_entry_data_usage *u =
470 container_of(entry, struct jset_entry_data_usage, entry);
471 unsigned bytes = jset_u64s(le16_to_cpu(entry->u64s)) * sizeof(u64);
472 int ret = 0;
473
2c5af169
KO
474 if (journal_entry_err_on(bytes < sizeof(*u) ||
475 bytes < sizeof(*u) + u->r.nr_devs,
476 c,
477 "invalid journal entry usage: bad size")) {
478 journal_entry_null_range(entry, vstruct_next(entry));
479 return ret;
480 }
1c6fdbd8
KO
481
482fsck_err:
483 return ret;
484}
485
528b18e6
KO
486static void journal_entry_data_usage_to_text(struct printbuf *out, struct bch_fs *c,
487 struct jset_entry *entry)
488{
489 struct jset_entry_data_usage *u =
490 container_of(entry, struct jset_entry_data_usage, entry);
491
492 bch2_replicas_entry_to_text(out, &u->r);
493 pr_buf(out, "=%llu", le64_to_cpu(u->v));
494}
495
496static int journal_entry_clock_validate(struct bch_fs *c,
7d6f07ed 497 const char *where,
2abe5420 498 struct jset_entry *entry,
7d6f07ed 499 unsigned version, int big_endian, int write)
2abe5420
KO
500{
501 struct jset_entry_clock *clock =
502 container_of(entry, struct jset_entry_clock, entry);
503 unsigned bytes = jset_u64s(le16_to_cpu(entry->u64s)) * sizeof(u64);
504 int ret = 0;
505
506 if (journal_entry_err_on(bytes != sizeof(*clock),
507 c, "invalid journal entry clock: bad size")) {
508 journal_entry_null_range(entry, vstruct_next(entry));
509 return ret;
510 }
511
512 if (journal_entry_err_on(clock->rw > 1,
513 c, "invalid journal entry clock: bad rw")) {
514 journal_entry_null_range(entry, vstruct_next(entry));
515 return ret;
516 }
517
518fsck_err:
519 return ret;
520}
521
528b18e6
KO
522static void journal_entry_clock_to_text(struct printbuf *out, struct bch_fs *c,
523 struct jset_entry *entry)
524{
525 struct jset_entry_clock *clock =
526 container_of(entry, struct jset_entry_clock, entry);
527
528 pr_buf(out, "%s=%llu", clock->rw ? "write" : "read", le64_to_cpu(clock->time));
529}
530
531static int journal_entry_dev_usage_validate(struct bch_fs *c,
7d6f07ed 532 const char *where,
180fb49d 533 struct jset_entry *entry,
7d6f07ed 534 unsigned version, int big_endian, int write)
180fb49d
KO
535{
536 struct jset_entry_dev_usage *u =
537 container_of(entry, struct jset_entry_dev_usage, entry);
538 unsigned bytes = jset_u64s(le16_to_cpu(entry->u64s)) * sizeof(u64);
45c2e33f 539 unsigned expected = sizeof(*u);
180fb49d
KO
540 unsigned dev;
541 int ret = 0;
542
543 if (journal_entry_err_on(bytes < expected,
544 c, "invalid journal entry dev usage: bad size (%u < %u)",
545 bytes, expected)) {
546 journal_entry_null_range(entry, vstruct_next(entry));
547 return ret;
548 }
549
550 dev = le32_to_cpu(u->dev);
551
552 if (journal_entry_err_on(!bch2_dev_exists2(c, dev),
553 c, "invalid journal entry dev usage: bad dev")) {
554 journal_entry_null_range(entry, vstruct_next(entry));
555 return ret;
556 }
557
558 if (journal_entry_err_on(u->pad,
559 c, "invalid journal entry dev usage: bad pad")) {
560 journal_entry_null_range(entry, vstruct_next(entry));
561 return ret;
562 }
563
564fsck_err:
565 return ret;
566}
567
528b18e6
KO
568static void journal_entry_dev_usage_to_text(struct printbuf *out, struct bch_fs *c,
569 struct jset_entry *entry)
570{
571 struct jset_entry_dev_usage *u =
572 container_of(entry, struct jset_entry_dev_usage, entry);
573 unsigned i, nr_types = jset_entry_dev_usage_nr_types(u);
574
575 pr_buf(out, "dev=%u", le32_to_cpu(u->dev));
576
577 for (i = 0; i < nr_types; i++) {
578 if (i < BCH_DATA_NR)
579 pr_buf(out, " %s", bch2_data_types[i]);
580 else
581 pr_buf(out, " (unknown data type %u)", i);
582 pr_buf(out, ": buckets=%llu sectors=%llu fragmented=%llu",
583 le64_to_cpu(u->d[i].buckets),
584 le64_to_cpu(u->d[i].sectors),
585 le64_to_cpu(u->d[i].fragmented));
586 }
587
588 pr_buf(out, " buckets_ec: %llu buckets_unavailable: %llu",
589 le64_to_cpu(u->buckets_ec),
590 le64_to_cpu(u->buckets_unavailable));
591}
592
593static int journal_entry_log_validate(struct bch_fs *c,
fb64f3fd
KO
594 const char *where,
595 struct jset_entry *entry,
596 unsigned version, int big_endian, int write)
597{
598 return 0;
599}
600
528b18e6
KO
601static void journal_entry_log_to_text(struct printbuf *out, struct bch_fs *c,
602 struct jset_entry *entry)
603{
604 struct jset_entry_log *l = container_of(entry, struct jset_entry_log, entry);
605 unsigned bytes = vstruct_bytes(entry) - offsetof(struct jset_entry_log, d);
606
d4b69152 607 pr_buf(out, "%.*s", bytes, l->d);
528b18e6
KO
608}
609
1c6fdbd8 610struct jset_entry_ops {
7d6f07ed
KO
611 int (*validate)(struct bch_fs *, const char *,
612 struct jset_entry *, unsigned, int, int);
528b18e6 613 void (*to_text)(struct printbuf *, struct bch_fs *, struct jset_entry *);
1c6fdbd8
KO
614};
615
616static const struct jset_entry_ops bch2_jset_entry_ops[] = {
617#define x(f, nr) \
618 [BCH_JSET_ENTRY_##f] = (struct jset_entry_ops) { \
528b18e6
KO
619 .validate = journal_entry_##f##_validate, \
620 .to_text = journal_entry_##f##_to_text, \
1c6fdbd8
KO
621 },
622 BCH_JSET_ENTRY_TYPES()
623#undef x
624};
625
7d6f07ed
KO
626int bch2_journal_entry_validate(struct bch_fs *c, const char *where,
627 struct jset_entry *entry,
628 unsigned version, int big_endian, int write)
1c6fdbd8 629{
2c5af169 630 return entry->type < BCH_JSET_ENTRY_NR
7d6f07ed
KO
631 ? bch2_jset_entry_ops[entry->type].validate(c, where, entry,
632 version, big_endian, write)
2c5af169 633 : 0;
1c6fdbd8
KO
634}
635
528b18e6
KO
636void bch2_journal_entry_to_text(struct printbuf *out, struct bch_fs *c,
637 struct jset_entry *entry)
638{
639 if (entry->type < BCH_JSET_ENTRY_NR) {
640 pr_buf(out, "%s: ", bch2_jset_entry_types[entry->type]);
641 bch2_jset_entry_ops[entry->type].to_text(out, c, entry);
642 } else {
643 pr_buf(out, "(unknown type %u)", entry->type);
644 }
645}
646
1c6fdbd8
KO
647static int jset_validate_entries(struct bch_fs *c, struct jset *jset,
648 int write)
649{
7d6f07ed 650 char buf[100];
1c6fdbd8
KO
651 struct jset_entry *entry;
652 int ret = 0;
653
654 vstruct_for_each(jset, entry) {
7d6f07ed
KO
655 scnprintf(buf, sizeof(buf), "jset %llu entry offset %zi/%u",
656 le64_to_cpu(jset->seq),
657 (u64 *) entry - jset->_data,
658 le32_to_cpu(jset->u64s));
659
1c6fdbd8
KO
660 if (journal_entry_err_on(vstruct_next(entry) >
661 vstruct_last(jset), c,
662 "journal entry extends past end of jset")) {
663 jset->u64s = cpu_to_le32((u64 *) entry - jset->_data);
664 break;
665 }
666
7d6f07ed
KO
667 ret = bch2_journal_entry_validate(c, buf, entry,
668 le32_to_cpu(jset->version),
669 JSET_BIG_ENDIAN(jset), write);
1c6fdbd8
KO
670 if (ret)
671 break;
672 }
673fsck_err:
674 return ret;
675}
676
677static int jset_validate(struct bch_fs *c,
ca73852a 678 struct bch_dev *ca,
1c6fdbd8
KO
679 struct jset *jset, u64 sector,
680 unsigned bucket_sectors_left,
681 unsigned sectors_read,
682 int write)
683{
684 size_t bytes = vstruct_bytes(jset);
685 struct bch_csum csum;
26609b61 686 unsigned version;
1c6fdbd8
KO
687 int ret = 0;
688
689 if (le64_to_cpu(jset->magic) != jset_magic(c))
690 return JOURNAL_ENTRY_NONE;
691
26609b61 692 version = le32_to_cpu(jset->version);
ca73852a
KO
693 if (journal_entry_err_on((version != BCH_JSET_VERSION_OLD &&
694 version < bcachefs_metadata_version_min) ||
695 version >= bcachefs_metadata_version_max, c,
696 "%s sector %llu seq %llu: unknown journal entry version %u",
ed9d58a2
KO
697 ca ? ca->name : c->name,
698 sector, le64_to_cpu(jset->seq),
ca73852a 699 version)) {
35ef6df5
KO
700 /* don't try to continue: */
701 return EINVAL;
1c6fdbd8
KO
702 }
703
35ef6df5
KO
704 if (bytes > (sectors_read << 9) &&
705 sectors_read < bucket_sectors_left)
706 return JOURNAL_ENTRY_REREAD;
707
1c6fdbd8 708 if (journal_entry_err_on(bytes > bucket_sectors_left << 9, c,
ca73852a 709 "%s sector %llu seq %llu: journal entry too big (%zu bytes)",
ed9d58a2
KO
710 ca ? ca->name : c->name,
711 sector, le64_to_cpu(jset->seq), bytes)) {
35ef6df5
KO
712 ret = JOURNAL_ENTRY_BAD;
713 le32_add_cpu(&jset->u64s,
714 -((bytes - (bucket_sectors_left << 9)) / 8));
1c6fdbd8
KO
715 }
716
ed9d58a2 717 if (journal_entry_err_on(!bch2_checksum_type_valid(c, JSET_CSUM_TYPE(jset)), c,
ca73852a 718 "%s sector %llu seq %llu: journal entry with unknown csum type %llu",
ed9d58a2
KO
719 ca ? ca->name : c->name,
720 sector, le64_to_cpu(jset->seq),
35ef6df5
KO
721 JSET_CSUM_TYPE(jset))) {
722 ret = JOURNAL_ENTRY_BAD;
ed9d58a2 723 goto csum_done;
35ef6df5 724 }
1c6fdbd8 725
ed9d58a2
KO
726 if (write)
727 goto csum_done;
728
1c6fdbd8
KO
729 csum = csum_vstruct(c, JSET_CSUM_TYPE(jset), journal_nonce(jset), jset);
730 if (journal_entry_err_on(bch2_crc_cmp(csum, jset->csum), c,
ca73852a 731 "%s sector %llu seq %llu: journal checksum bad",
ed9d58a2
KO
732 ca ? ca->name : c->name,
733 sector, le64_to_cpu(jset->seq)))
35ef6df5 734 ret = JOURNAL_ENTRY_BAD;
1c6fdbd8 735
a9de137b 736 ret = bch2_encrypt(c, JSET_CSUM_TYPE(jset), journal_nonce(jset),
1c6fdbd8
KO
737 jset->encrypted_start,
738 vstruct_end(jset) - (void *) jset->encrypted_start);
a9de137b
KO
739 bch2_fs_fatal_err_on(ret, c,
740 "error decrypting journal entry: %i", ret);
ed9d58a2
KO
741csum_done:
742 /* last_seq is ignored when JSET_NO_FLUSH is true */
743 if (journal_entry_err_on(!JSET_NO_FLUSH(jset) &&
744 le64_to_cpu(jset->last_seq) > le64_to_cpu(jset->seq), c,
745 "invalid journal entry: last_seq > seq (%llu > %llu)",
746 le64_to_cpu(jset->last_seq),
747 le64_to_cpu(jset->seq))) {
1c6fdbd8 748 jset->last_seq = jset->seq;
ca73852a
KO
749 return JOURNAL_ENTRY_BAD;
750 }
1c6fdbd8
KO
751fsck_err:
752 return ret;
753}
754
ed9d58a2
KO
755static int jset_validate_for_write(struct bch_fs *c, struct jset *jset)
756{
757 unsigned sectors = vstruct_sectors(jset, c->block_bits);
758
759 return jset_validate(c, NULL, jset, 0, sectors, sectors, WRITE) ?:
760 jset_validate_entries(c, jset, WRITE);
761}
762
1c6fdbd8
KO
763struct journal_read_buf {
764 void *data;
765 size_t size;
766};
767
768static int journal_read_buf_realloc(struct journal_read_buf *b,
769 size_t new_size)
770{
771 void *n;
772
773 /* the bios are sized for this many pages, max: */
774 if (new_size > JOURNAL_ENTRY_SIZE_MAX)
775 return -ENOMEM;
776
777 new_size = roundup_pow_of_two(new_size);
778 n = kvpmalloc(new_size, GFP_KERNEL);
779 if (!n)
780 return -ENOMEM;
781
782 kvpfree(b->data, b->size);
783 b->data = n;
784 b->size = new_size;
785 return 0;
786}
787
788static int journal_read_bucket(struct bch_dev *ca,
789 struct journal_read_buf *buf,
790 struct journal_list *jlist,
a9ec3454 791 unsigned bucket)
1c6fdbd8
KO
792{
793 struct bch_fs *c = ca->fs;
794 struct journal_device *ja = &ca->journal;
1c6fdbd8
KO
795 struct jset *j = NULL;
796 unsigned sectors, sectors_read = 0;
797 u64 offset = bucket_to_sector(ca, ja->buckets[bucket]),
798 end = offset + ca->mi.bucket_size;
799 bool saw_bad = false;
800 int ret = 0;
801
802 pr_debug("reading %u", bucket);
803
804 while (offset < end) {
805 if (!sectors_read) {
ac10a961
KO
806 struct bio *bio;
807 unsigned nr_bvecs;
808reread:
809 sectors_read = min_t(unsigned,
1c6fdbd8 810 end - offset, buf->size >> 9);
ac10a961
KO
811 nr_bvecs = buf_pages(buf->data, sectors_read << 9);
812
813 bio = bio_kmalloc(nr_bvecs, GFP_KERNEL);
814 bio_init(bio, ca->disk_sb.bdev, bio->bi_inline_vecs, nr_bvecs, REQ_OP_READ);
1c6fdbd8 815
885678f6
KO
816 bio->bi_iter.bi_sector = offset;
817 bch2_bio_map(bio, buf->data, sectors_read << 9);
1c6fdbd8
KO
818
819 ret = submit_bio_wait(bio);
ac10a961 820 kfree(bio);
1c6fdbd8
KO
821
822 if (bch2_dev_io_err_on(ret, ca,
0fefe8d8 823 "journal read error: sector %llu",
1c6fdbd8 824 offset) ||
29d90f61
KO
825 bch2_meta_read_fault("journal")) {
826 /*
827 * We don't error out of the recovery process
828 * here, since the relevant journal entry may be
829 * found on a different device, and missing or
830 * no journal entries will be handled later
831 */
832 return 0;
833 }
1c6fdbd8
KO
834
835 j = buf->data;
836 }
837
ca73852a 838 ret = jset_validate(c, ca, j, offset,
1c6fdbd8
KO
839 end - offset, sectors_read,
840 READ);
841 switch (ret) {
842 case BCH_FSCK_OK:
ca73852a 843 sectors = vstruct_sectors(j, c->block_bits);
1c6fdbd8
KO
844 break;
845 case JOURNAL_ENTRY_REREAD:
846 if (vstruct_bytes(j) > buf->size) {
847 ret = journal_read_buf_realloc(buf,
848 vstruct_bytes(j));
849 if (ret)
850 return ret;
851 }
852 goto reread;
853 case JOURNAL_ENTRY_NONE:
854 if (!saw_bad)
855 return 0;
8244f320 856 sectors = block_sectors(c);
1c6fdbd8
KO
857 goto next_block;
858 case JOURNAL_ENTRY_BAD:
859 saw_bad = true;
ca73852a
KO
860 /*
861 * On checksum error we don't really trust the size
862 * field of the journal entry we read, so try reading
863 * again at next block boundary:
864 */
8244f320 865 sectors = block_sectors(c);
ca73852a 866 break;
1c6fdbd8
KO
867 default:
868 return ret;
869 }
870
871 /*
872 * This happens sometimes if we don't have discards on -
873 * when we've partially overwritten a bucket with new
874 * journal entries. We don't need the rest of the
875 * bucket:
876 */
877 if (le64_to_cpu(j->seq) < ja->bucket_seq[bucket])
878 return 0;
879
880 ja->bucket_seq[bucket] = le64_to_cpu(j->seq);
881
882 mutex_lock(&jlist->lock);
72b7d633
KO
883 ret = journal_entry_add(c, ca, (struct journal_ptr) {
884 .dev = ca->dev_idx,
885 .bucket = bucket,
886 .bucket_offset = offset -
887 bucket_to_sector(ca, ja->buckets[bucket]),
888 .sector = offset,
e4c3f386 889 }, jlist, j, ret != 0);
1c6fdbd8
KO
890 mutex_unlock(&jlist->lock);
891
892 switch (ret) {
893 case JOURNAL_ENTRY_ADD_OK:
1c6fdbd8
KO
894 break;
895 case JOURNAL_ENTRY_ADD_OUT_OF_RANGE:
896 break;
897 default:
898 return ret;
899 }
1c6fdbd8
KO
900next_block:
901 pr_debug("next");
902 offset += sectors;
903 sectors_read -= sectors;
904 j = ((void *) j) + (sectors << 9);
905 }
906
907 return 0;
908}
909
910static void bch2_journal_read_device(struct closure *cl)
911{
1c6fdbd8
KO
912 struct journal_device *ja =
913 container_of(cl, struct journal_device, read);
914 struct bch_dev *ca = container_of(ja, struct bch_dev, journal);
365f64f3 915 struct bch_fs *c = ca->fs;
1c6fdbd8
KO
916 struct journal_list *jlist =
917 container_of(cl->parent, struct journal_list, cl);
062afcba 918 struct journal_replay *r;
1c6fdbd8 919 struct journal_read_buf buf = { NULL, 0 };
a9ec3454
KO
920 u64 min_seq = U64_MAX;
921 unsigned i;
9714baaa 922 int ret = 0;
1c6fdbd8
KO
923
924 if (!ja->nr)
925 goto out;
926
1c6fdbd8
KO
927 ret = journal_read_buf_realloc(&buf, PAGE_SIZE);
928 if (ret)
929 goto err;
930
931 pr_debug("%u journal buckets", ja->nr);
932
1c6fdbd8 933 for (i = 0; i < ja->nr; i++) {
a9ec3454
KO
934 ret = journal_read_bucket(ca, &buf, jlist, i);
935 if (ret)
936 goto err;
1c6fdbd8
KO
937 }
938
a9ec3454
KO
939 /* Find the journal bucket with the highest sequence number: */
940 for (i = 0; i < ja->nr; i++) {
941 if (ja->bucket_seq[i] > ja->bucket_seq[ja->cur_idx])
942 ja->cur_idx = i;
1c6fdbd8 943
a9ec3454 944 min_seq = min(ja->bucket_seq[i], min_seq);
1c6fdbd8
KO
945 }
946
1c6fdbd8 947 /*
1c6fdbd8
KO
948 * If there's duplicate journal entries in multiple buckets (which
949 * definitely isn't supposed to happen, but...) - make sure to start
950 * cur_idx at the last of those buckets, so we don't deadlock trying to
951 * allocate
952 */
a9ec3454 953 while (ja->bucket_seq[ja->cur_idx] > min_seq &&
062afcba 954 ja->bucket_seq[ja->cur_idx] ==
a9ec3454 955 ja->bucket_seq[(ja->cur_idx + 1) % ja->nr])
a36d3685 956 ja->cur_idx = (ja->cur_idx + 1) % ja->nr;
a9ec3454 957
062afcba
KO
958 ja->sectors_free = ca->mi.bucket_size;
959
960 mutex_lock(&jlist->lock);
961 list_for_each_entry(r, jlist->head, list) {
962 for (i = 0; i < r->nr_ptrs; i++) {
963 if (r->ptrs[i].dev == ca->dev_idx &&
964 sector_to_bucket(ca, r->ptrs[i].sector) == ja->buckets[ja->cur_idx]) {
965 unsigned wrote = (r->ptrs[i].sector % ca->mi.bucket_size) +
966 vstruct_sectors(&r->j, c->block_bits);
967
968 ja->sectors_free = min(ja->sectors_free,
969 ca->mi.bucket_size - wrote);
970 }
971 }
972 }
973 mutex_unlock(&jlist->lock);
974
b0be2fcf
KO
975 if (ja->bucket_seq[ja->cur_idx] &&
976 ja->sectors_free == ca->mi.bucket_size) {
977 bch_err(c, "ja->sectors_free == ca->mi.bucket_size");
978 bch_err(c, "cur_idx %u/%u", ja->cur_idx, ja->nr);
979 for (i = 0; i < 3; i++) {
980 unsigned idx = (ja->cur_idx + ja->nr - 1 + i) % ja->nr;
981 bch_err(c, "bucket_seq[%u] = %llu", idx, ja->bucket_seq[idx]);
982 }
983 ja->sectors_free = 0;
984 }
1c6fdbd8
KO
985
986 /*
0ce2dbbe 987 * Set dirty_idx to indicate the entire journal is full and needs to be
1c6fdbd8
KO
988 * reclaimed - journal reclaim will immediately reclaim whatever isn't
989 * pinned when it first runs:
990 */
0ce2dbbe
KO
991 ja->discard_idx = ja->dirty_idx_ondisk =
992 ja->dirty_idx = (ja->cur_idx + 1) % ja->nr;
1c6fdbd8 993out:
365f64f3 994 bch_verbose(c, "journal read done on device %s, ret %i", ca->name, ret);
1c6fdbd8 995 kvpfree(buf.data, buf.size);
1c6fdbd8
KO
996 percpu_ref_put(&ca->io_ref);
997 closure_return(cl);
998 return;
999err:
1000 mutex_lock(&jlist->lock);
1001 jlist->ret = ret;
1002 mutex_unlock(&jlist->lock);
1003 goto out;
1c6fdbd8
KO
1004}
1005
72b7d633
KO
1006void bch2_journal_ptrs_to_text(struct printbuf *out, struct bch_fs *c,
1007 struct journal_replay *j)
e4c3f386
KO
1008{
1009 unsigned i;
1010
1011 for (i = 0; i < j->nr_ptrs; i++) {
c0ebe3e4 1012 struct bch_dev *ca = bch_dev_bkey_exists(c, j->ptrs[i].dev);
514852c2
KO
1013 u64 offset;
1014
72b7d633 1015 div64_u64_rem(j->ptrs[i].sector, ca->mi.bucket_size, &offset);
e4c3f386
KO
1016
1017 if (i)
1018 pr_buf(out, " ");
72b7d633 1019 pr_buf(out, "%u:%u:%u (sector %llu)",
e4c3f386 1020 j->ptrs[i].dev,
72b7d633
KO
1021 j->ptrs[i].bucket,
1022 j->ptrs[i].bucket_offset,
1023 j->ptrs[i].sector);
e4c3f386
KO
1024 }
1025}
1026
adbcada4
KO
1027int bch2_journal_read(struct bch_fs *c, struct list_head *list,
1028 u64 *blacklist_seq, u64 *start_seq)
1c6fdbd8 1029{
1c6fdbd8 1030 struct journal_list jlist;
adbcada4 1031 struct journal_replay *i, *t;
1c6fdbd8 1032 struct bch_dev *ca;
1c6fdbd8 1033 unsigned iter;
fa8e94fa 1034 struct printbuf buf = PRINTBUF;
1c6fdbd8
KO
1035 size_t keys = 0, entries = 0;
1036 bool degraded = false;
adbcada4 1037 u64 seq, last_seq = 0;
1c6fdbd8
KO
1038 int ret = 0;
1039
1040 closure_init_stack(&jlist.cl);
1041 mutex_init(&jlist.lock);
1042 jlist.head = list;
1043 jlist.ret = 0;
1044
1045 for_each_member_device(ca, c, iter) {
6bdbfa87 1046 if (!test_bit(BCH_FS_REBUILD_REPLICAS, &c->flags) &&
89fd25be 1047 !(bch2_dev_has_data(c, ca) & (1 << BCH_DATA_journal)))
1c6fdbd8
KO
1048 continue;
1049
2436cb9f
KO
1050 if ((ca->mi.state == BCH_MEMBER_STATE_rw ||
1051 ca->mi.state == BCH_MEMBER_STATE_ro) &&
1c6fdbd8
KO
1052 percpu_ref_tryget(&ca->io_ref))
1053 closure_call(&ca->journal.read,
1054 bch2_journal_read_device,
1055 system_unbound_wq,
1056 &jlist.cl);
1057 else
1058 degraded = true;
1059 }
1060
1061 closure_sync(&jlist.cl);
1062
1063 if (jlist.ret)
1064 return jlist.ret;
1065
adbcada4
KO
1066 if (list_empty(list)) {
1067 bch_info(c, "journal read done, but no entries found");
1068 return 0;
1069 }
1070
1071 i = list_last_entry(list, struct journal_replay, list);
1072 *start_seq = le64_to_cpu(i->j.seq) + 1;
1073
1074 /*
1075 * Find most recent flush entry, and ignore newer non flush entries -
1076 * those entries will be blacklisted:
1077 */
1078 list_for_each_entry_safe_reverse(i, t, list, list) {
1079 if (i->ignore)
1080 continue;
1081
1082 if (!JSET_NO_FLUSH(&i->j)) {
1083 last_seq = le64_to_cpu(i->j.last_seq);
1084 *blacklist_seq = le64_to_cpu(i->j.seq) + 1;
1085 break;
1086 }
1087
1088 journal_replay_free(c, i);
1089 }
1090
1091 if (!last_seq) {
1092 fsck_err(c, "journal read done, but no entries found after dropping non-flushes");
fa8e94fa
KO
1093 ret = -1;
1094 goto err;
adbcada4
KO
1095 }
1096
1097 /* Drop blacklisted entries and entries older than last_seq: */
1098 list_for_each_entry_safe(i, t, list, list) {
1099 if (i->ignore)
1100 continue;
1101
1102 seq = le64_to_cpu(i->j.seq);
1103 if (seq < last_seq) {
1104 journal_replay_free(c, i);
1105 continue;
1106 }
1107
1108 if (bch2_journal_seq_is_blacklisted(c, seq, true)) {
1109 fsck_err_on(!JSET_NO_FLUSH(&i->j), c,
1110 "found blacklisted journal entry %llu", seq);
1111
1112 journal_replay_free(c, i);
1113 }
1114 }
1115
1116 /* Check for missing entries: */
1117 seq = last_seq;
1118 list_for_each_entry(i, list, list) {
1119 if (i->ignore)
1120 continue;
1121
1122 BUG_ON(seq > le64_to_cpu(i->j.seq));
1123
1124 while (seq < le64_to_cpu(i->j.seq)) {
1125 u64 missing_start, missing_end;
fa8e94fa 1126 struct printbuf buf1 = PRINTBUF, buf2 = PRINTBUF;
adbcada4
KO
1127
1128 while (seq < le64_to_cpu(i->j.seq) &&
1129 bch2_journal_seq_is_blacklisted(c, seq, false))
1130 seq++;
1131
1132 if (seq == le64_to_cpu(i->j.seq))
1133 break;
1134
1135 missing_start = seq;
1136
1137 while (seq < le64_to_cpu(i->j.seq) &&
1138 !bch2_journal_seq_is_blacklisted(c, seq, false))
1139 seq++;
1140
e4c3f386 1141 if (i->list.prev != list) {
e4c3f386
KO
1142 struct journal_replay *p = list_prev_entry(i, list);
1143
fa8e94fa
KO
1144 bch2_journal_ptrs_to_text(&buf1, c, p);
1145 pr_buf(&buf1, " size %zu", vstruct_sectors(&p->j, c->block_bits));
e4c3f386 1146 } else
fa8e94fa
KO
1147 pr_buf(&buf1, "(none)");
1148 bch2_journal_ptrs_to_text(&buf2, c, i);
e4c3f386 1149
adbcada4 1150 missing_end = seq - 1;
e4c3f386
KO
1151 fsck_err(c, "journal entries %llu-%llu missing! (replaying %llu-%llu)\n"
1152 " prev at %s\n"
1153 " next at %s",
adbcada4 1154 missing_start, missing_end,
e4c3f386 1155 last_seq, *blacklist_seq - 1,
fa8e94fa
KO
1156 buf1.buf, buf2.buf);
1157
1158 printbuf_exit(&buf1);
1159 printbuf_exit(&buf2);
adbcada4
KO
1160 }
1161
1162 seq++;
1163 }
1164
1c6fdbd8 1165 list_for_each_entry(i, list, list) {
1dd7f9d9
KO
1166 struct jset_entry *entry;
1167 struct bkey_i *k, *_n;
e4c3f386
KO
1168 struct bch_replicas_padded replicas = {
1169 .e.data_type = BCH_DATA_journal,
1170 .e.nr_required = 1,
1171 };
1172 unsigned ptr;
7ef2a73a 1173
adbcada4
KO
1174 if (i->ignore)
1175 continue;
1176
1c6fdbd8
KO
1177 ret = jset_validate_entries(c, &i->j, READ);
1178 if (ret)
fa8e94fa 1179 goto err;
1c6fdbd8 1180
e4c3f386
KO
1181 for (ptr = 0; ptr < i->nr_ptrs; ptr++)
1182 replicas.e.devs[replicas.e.nr_devs++] = i->ptrs[ptr].dev;
1183
26452d1d
KO
1184 bch2_replicas_entry_sort(&replicas.e);
1185
1c6fdbd8
KO
1186 /*
1187 * If we're mounting in degraded mode - if we didn't read all
1188 * the devices - this is wrong:
1189 */
1190
fa8e94fa
KO
1191 printbuf_reset(&buf);
1192 bch2_replicas_entry_to_text(&buf, &replicas.e);
1193
1c6fdbd8
KO
1194 if (!degraded &&
1195 (test_bit(BCH_FS_REBUILD_REPLICAS, &c->flags) ||
988e98cf 1196 fsck_err_on(!bch2_replicas_marked(c, &replicas.e), c,
7ef2a73a 1197 "superblock not marked as containing replicas %s",
fa8e94fa 1198 buf.buf))) {
7ef2a73a 1199 ret = bch2_mark_replicas(c, &replicas.e);
1c6fdbd8 1200 if (ret)
fa8e94fa 1201 goto err;
1c6fdbd8 1202 }
1c6fdbd8
KO
1203
1204 for_each_jset_key(k, _n, entry, &i->j)
1205 keys++;
1206 entries++;
1207 }
1208
adbcada4
KO
1209 bch_info(c, "journal read done, %zu keys in %zu entries, seq %llu",
1210 keys, entries, *start_seq);
1dd7f9d9 1211
adbcada4
KO
1212 if (*start_seq != *blacklist_seq)
1213 bch_info(c, "dropped unflushed entries %llu-%llu",
1214 *blacklist_seq, *start_seq - 1);
fa8e94fa 1215err:
1c6fdbd8 1216fsck_err:
fa8e94fa 1217 printbuf_exit(&buf);
1c6fdbd8
KO
1218 return ret;
1219}
1220
1c6fdbd8
KO
1221/* journal write: */
1222
a9ec3454
KO
1223static void __journal_write_alloc(struct journal *j,
1224 struct journal_buf *w,
1225 struct dev_alloc_list *devs_sorted,
1226 unsigned sectors,
1227 unsigned *replicas,
1228 unsigned replicas_want)
1c6fdbd8
KO
1229{
1230 struct bch_fs *c = container_of(j, struct bch_fs, journal);
1c6fdbd8
KO
1231 struct journal_device *ja;
1232 struct bch_dev *ca;
a9ec3454 1233 unsigned i;
a2753581 1234
a9ec3454
KO
1235 if (*replicas >= replicas_want)
1236 return;
1c6fdbd8 1237
a9ec3454
KO
1238 for (i = 0; i < devs_sorted->nr; i++) {
1239 ca = rcu_dereference(c->devs[devs_sorted->devs[i]]);
1c6fdbd8
KO
1240 if (!ca)
1241 continue;
1242
1c6fdbd8 1243 ja = &ca->journal;
1c6fdbd8
KO
1244
1245 /*
1246 * Check that we can use this device, and aren't already using
1247 * it:
1248 */
a9ec3454 1249 if (!ca->mi.durability ||
2436cb9f 1250 ca->mi.state != BCH_MEMBER_STATE_rw ||
a9ec3454 1251 !ja->nr ||
26609b61
KO
1252 bch2_bkey_has_device(bkey_i_to_s_c(&w->key),
1253 ca->dev_idx) ||
a9ec3454 1254 sectors > ja->sectors_free)
1c6fdbd8
KO
1255 continue;
1256
3d080aa5 1257 bch2_dev_stripe_increment(ca, &j->wp.stripe);
1c6fdbd8 1258
26609b61 1259 bch2_bkey_append_ptr(&w->key,
1c6fdbd8
KO
1260 (struct bch_extent_ptr) {
1261 .offset = bucket_to_sector(ca,
a9ec3454
KO
1262 ja->buckets[ja->cur_idx]) +
1263 ca->mi.bucket_size -
1264 ja->sectors_free,
1c6fdbd8
KO
1265 .dev = ca->dev_idx,
1266 });
1267
a9ec3454
KO
1268 ja->sectors_free -= sectors;
1269 ja->bucket_seq[ja->cur_idx] = le64_to_cpu(w->data->seq);
1270
1271 *replicas += ca->mi.durability;
1272
1273 if (*replicas >= replicas_want)
1274 break;
1c6fdbd8 1275 }
a9ec3454 1276}
1c6fdbd8 1277
a9ec3454
KO
1278/**
1279 * journal_next_bucket - move on to the next journal bucket if possible
1280 */
1281static int journal_write_alloc(struct journal *j, struct journal_buf *w,
1282 unsigned sectors)
1283{
1284 struct bch_fs *c = container_of(j, struct bch_fs, journal);
d042b040 1285 struct bch_devs_mask devs;
a9ec3454
KO
1286 struct journal_device *ja;
1287 struct bch_dev *ca;
1288 struct dev_alloc_list devs_sorted;
d042b040
KO
1289 unsigned target = c->opts.metadata_target ?:
1290 c->opts.foreground_target;
a9ec3454
KO
1291 unsigned i, replicas = 0, replicas_want =
1292 READ_ONCE(c->opts.metadata_replicas);
1c6fdbd8 1293
a9ec3454 1294 rcu_read_lock();
d042b040
KO
1295retry:
1296 devs = target_rw_devs(c, BCH_DATA_journal, target);
1c6fdbd8 1297
d042b040 1298 devs_sorted = bch2_dev_alloc_list(c, &j->wp.stripe, &devs);
1c6fdbd8 1299
a9ec3454
KO
1300 __journal_write_alloc(j, w, &devs_sorted,
1301 sectors, &replicas, replicas_want);
1c6fdbd8 1302
a9ec3454
KO
1303 if (replicas >= replicas_want)
1304 goto done;
1305
1306 for (i = 0; i < devs_sorted.nr; i++) {
1307 ca = rcu_dereference(c->devs[devs_sorted.devs[i]]);
1308 if (!ca)
1309 continue;
1310
1311 ja = &ca->journal;
1312
1313 if (sectors > ja->sectors_free &&
1314 sectors <= ca->mi.bucket_size &&
03d5eaed
KO
1315 bch2_journal_dev_buckets_available(j, ja,
1316 journal_space_discarded)) {
a9ec3454
KO
1317 ja->cur_idx = (ja->cur_idx + 1) % ja->nr;
1318 ja->sectors_free = ca->mi.bucket_size;
68ef94a6
KO
1319
1320 /*
1321 * ja->bucket_seq[ja->cur_idx] must always have
1322 * something sensible:
1323 */
1324 ja->bucket_seq[ja->cur_idx] = le64_to_cpu(w->data->seq);
a9ec3454
KO
1325 }
1326 }
1327
1328 __journal_write_alloc(j, w, &devs_sorted,
1329 sectors, &replicas, replicas_want);
d042b040
KO
1330
1331 if (replicas < replicas_want && target) {
1332 /* Retry from all devices: */
1333 target = 0;
1334 goto retry;
1335 }
a9ec3454 1336done:
a9ec3454
KO
1337 rcu_read_unlock();
1338
07a1006a
KO
1339 BUG_ON(bkey_val_u64s(&w->key.k) > BCH_REPLICAS_MAX);
1340
57cb2142 1341 return replicas >= c->opts.metadata_replicas_required ? 0 : -EROFS;
1c6fdbd8
KO
1342}
1343
1c6fdbd8
KO
1344static void journal_buf_realloc(struct journal *j, struct journal_buf *buf)
1345{
1346 /* we aren't holding j->lock: */
1347 unsigned new_size = READ_ONCE(j->buf_size_want);
1348 void *new_buf;
1349
d16b4a77 1350 if (buf->buf_size >= new_size)
1c6fdbd8
KO
1351 return;
1352
1353 new_buf = kvpmalloc(new_size, GFP_NOIO|__GFP_NOWARN);
1354 if (!new_buf)
1355 return;
1356
d16b4a77 1357 memcpy(new_buf, buf->data, buf->buf_size);
c859430b
KO
1358
1359 spin_lock(&j->lock);
1360 swap(buf->data, new_buf);
1361 swap(buf->buf_size, new_size);
1362 spin_unlock(&j->lock);
1363
1364 kvpfree(new_buf, new_size);
1c6fdbd8
KO
1365}
1366
ebb84d09
KO
1367static inline struct journal_buf *journal_last_unwritten_buf(struct journal *j)
1368{
30ef633a 1369 return j->buf + (journal_last_unwritten_seq(j) & JOURNAL_BUF_MASK);
ebb84d09
KO
1370}
1371
1c6fdbd8
KO
1372static void journal_write_done(struct closure *cl)
1373{
1374 struct journal *j = container_of(cl, struct journal, io);
1375 struct bch_fs *c = container_of(j, struct bch_fs, journal);
ebb84d09 1376 struct journal_buf *w = journal_last_unwritten_buf(j);
7ef2a73a 1377 struct bch_replicas_padded replicas;
ebb84d09 1378 union journal_res_state old, new;
1784d43a 1379 u64 v, seq;
158eecb8 1380 int err = 0;
1c6fdbd8 1381
991ba021
KO
1382 bch2_time_stats_update(!JSET_NO_FLUSH(w->data)
1383 ? j->flush_write_time
1384 : j->noflush_write_time, j->write_start_time);
9c859dc9 1385
d797ca3d 1386 if (!w->devs_written.nr) {
1c6fdbd8 1387 bch_err(c, "unable to write journal to sufficient devices");
158eecb8
KO
1388 err = -EIO;
1389 } else {
d797ca3d
KO
1390 bch2_devlist_to_replicas(&replicas.e, BCH_DATA_journal,
1391 w->devs_written);
158eecb8
KO
1392 if (bch2_mark_replicas(c, &replicas.e))
1393 err = -EIO;
1c6fdbd8
KO
1394 }
1395
158eecb8
KO
1396 if (err)
1397 bch2_fatal_error(c);
1c6fdbd8
KO
1398
1399 spin_lock(&j->lock);
ed9d58a2 1400 seq = le64_to_cpu(w->data->seq);
ed9d58a2 1401
1c6fdbd8 1402 if (seq >= j->pin.front)
d797ca3d 1403 journal_seq_pin(j, seq)->devs = w->devs_written;
1c6fdbd8 1404
9be1efe9 1405 if (!err) {
9be1efe9
KO
1406 if (!JSET_NO_FLUSH(w->data)) {
1407 j->flushed_seq_ondisk = seq;
1408 j->last_seq_ondisk = w->last_seq;
f25d8215 1409
59cc38b8 1410 bch2_do_discards(c);
f25d8215
KO
1411 closure_wake_up(&c->freelist_wait);
1412
1413 bch2_reset_alloc_cursors(c);
9be1efe9
KO
1414 }
1415 } else if (!j->err_seq || seq < j->err_seq)
1416 j->err_seq = seq;
0ce2dbbe 1417
f0a3a2cc
KO
1418 j->seq_ondisk = seq;
1419
1c6fdbd8
KO
1420 /*
1421 * Updating last_seq_ondisk may let bch2_journal_reclaim_work() discard
1422 * more buckets:
1423 *
1424 * Must come before signaling write completion, for
1425 * bch2_fs_journal_stop():
1426 */
b7a9bbfc 1427 journal_reclaim_kick(&c->journal);
158eecb8 1428
1c6fdbd8
KO
1429 /* also must come before signalling write completion: */
1430 closure_debug_destroy(cl);
1431
ebb84d09
KO
1432 v = atomic64_read(&j->reservations.counter);
1433 do {
1434 old.v = new.v = v;
24a3d53b 1435 BUG_ON(journal_state_count(new, new.unwritten_idx));
ebb84d09
KO
1436
1437 new.unwritten_idx++;
1438 } while ((v = atomic64_cmpxchg(&j->reservations.counter,
1439 old.v, new.v)) != old.v);
1c6fdbd8 1440
5d32c5bb
KO
1441 bch2_journal_space_available(j);
1442
1c6fdbd8
KO
1443 closure_wake_up(&w->wait);
1444 journal_wake(j);
1445
24a3d53b
KO
1446 if (!journal_state_count(new, new.unwritten_idx) &&
1447 journal_last_unwritten_seq(j) <= journal_cur_seq(j)) {
1448 closure_call(&j->io, bch2_journal_write, c->io_complete_wq, NULL);
1449 } else if (journal_last_unwritten_seq(j) == journal_cur_seq(j) &&
1450 new.cur_entry_offset < JOURNAL_ENTRY_CLOSED_VAL) {
fbec3b88
KO
1451 struct journal_buf *buf = journal_cur_buf(j);
1452 long delta = buf->expires - jiffies;
ebb84d09 1453
24a3d53b
KO
1454 /*
1455 * We don't close a journal entry to write it while there's
1456 * previous entries still in flight - the current journal entry
1457 * might want to be written now:
1458 */
1459
fbec3b88 1460 mod_delayed_work(c->io_complete_wq, &j->write_work, max(0L, delta));
24a3d53b 1461 }
fbec3b88
KO
1462
1463 spin_unlock(&j->lock);
1c6fdbd8
KO
1464}
1465
1466static void journal_write_endio(struct bio *bio)
1467{
1468 struct bch_dev *ca = bio->bi_private;
1469 struct journal *j = &ca->fs->journal;
d797ca3d
KO
1470 struct journal_buf *w = journal_last_unwritten_buf(j);
1471 unsigned long flags;
1c6fdbd8 1472
d797ca3d
KO
1473 if (bch2_dev_io_err_on(bio->bi_status, ca, "error writing journal entry %llu: %s",
1474 le64_to_cpu(w->data->seq),
63b214e7 1475 bch2_blk_status_to_str(bio->bi_status)) ||
1c6fdbd8 1476 bch2_meta_write_fault("journal")) {
1c6fdbd8 1477 spin_lock_irqsave(&j->err_lock, flags);
d797ca3d 1478 bch2_dev_list_drop_dev(&w->devs_written, ca->dev_idx);
1c6fdbd8
KO
1479 spin_unlock_irqrestore(&j->err_lock, flags);
1480 }
1481
1482 closure_put(&j->io);
1483 percpu_ref_put(&ca->io_ref);
1484}
1485
280249b9
KO
1486static void do_journal_write(struct closure *cl)
1487{
1488 struct journal *j = container_of(cl, struct journal, io);
1489 struct bch_fs *c = container_of(j, struct bch_fs, journal);
1490 struct bch_dev *ca;
1491 struct journal_buf *w = journal_last_unwritten_buf(j);
1492 struct bch_extent_ptr *ptr;
1493 struct bio *bio;
1494 unsigned sectors = vstruct_sectors(w->data, c->block_bits);
1495
1496 extent_for_each_ptr(bkey_i_to_s_extent(&w->key), ptr) {
1497 ca = bch_dev_bkey_exists(c, ptr->dev);
1498 if (!percpu_ref_tryget(&ca->io_ref)) {
1499 /* XXX: fix this */
1500 bch_err(c, "missing device for journal write\n");
1501 continue;
1502 }
1503
1504 this_cpu_add(ca->io_done->sectors[WRITE][BCH_DATA_journal],
1505 sectors);
1506
1507 bio = ca->journal.bio;
1508 bio_reset(bio, ca->disk_sb.bdev, REQ_OP_WRITE|REQ_SYNC|REQ_META);
1509 bio->bi_iter.bi_sector = ptr->offset;
1510 bio->bi_end_io = journal_write_endio;
1511 bio->bi_private = ca;
1512
a28bd48a
KO
1513 BUG_ON(bio->bi_iter.bi_sector == ca->prev_journal_sector);
1514 ca->prev_journal_sector = bio->bi_iter.bi_sector;
1515
280249b9
KO
1516 if (!JSET_NO_FLUSH(w->data))
1517 bio->bi_opf |= REQ_FUA;
1518 if (!JSET_NO_FLUSH(w->data) && !w->separate_flush)
1519 bio->bi_opf |= REQ_PREFLUSH;
1520
1521 bch2_bio_map(bio, w->data, sectors << 9);
1522
1523 trace_journal_write(bio);
1524 closure_bio_submit(bio, cl);
1525
1526 ca->journal.bucket_seq[ca->journal.cur_idx] =
1527 le64_to_cpu(w->data->seq);
1528 }
1529
731bdd2e 1530 continue_at(cl, journal_write_done, c->io_complete_wq);
280249b9
KO
1531 return;
1532}
1533
1c6fdbd8
KO
1534void bch2_journal_write(struct closure *cl)
1535{
1536 struct journal *j = container_of(cl, struct journal, io);
1537 struct bch_fs *c = container_of(j, struct bch_fs, journal);
1538 struct bch_dev *ca;
ebb84d09 1539 struct journal_buf *w = journal_last_unwritten_buf(j);
3ccc5c50 1540 struct jset_entry *start, *end;
1c6fdbd8
KO
1541 struct jset *jset;
1542 struct bio *bio;
fa8e94fa 1543 struct printbuf journal_debug_buf = PRINTBUF;
26609b61 1544 bool validate_before_checksum = false;
280249b9 1545 unsigned i, sectors, bytes, u64s, nr_rw_members = 0;
e5a66496
KO
1546 int ret;
1547
b7a9bbfc
KO
1548 BUG_ON(BCH_SB_CLEAN(c->disk_sb.sb));
1549
1c6fdbd8
KO
1550 journal_buf_realloc(j, w);
1551 jset = w->data;
1552
1553 j->write_start_time = local_clock();
1c6fdbd8 1554
adbcada4 1555 spin_lock(&j->lock);
e0c014e7
KO
1556 if (bch2_journal_error(j) ||
1557 w->noflush ||
1558 (!w->must_flush &&
1559 (jiffies - j->last_flush_write) < msecs_to_jiffies(c->opts.journal_flush_delay) &&
1560 test_bit(JOURNAL_MAY_SKIP_FLUSH, &j->flags))) {
adbcada4
KO
1561 w->noflush = true;
1562 SET_JSET_NO_FLUSH(jset, true);
c0ebe3e4
KO
1563 jset->last_seq = 0;
1564 w->last_seq = 0;
adbcada4
KO
1565
1566 j->nr_noflush_writes++;
1567 } else {
1568 j->last_flush_write = jiffies;
1569 j->nr_flush_writes++;
1570 }
1571 spin_unlock(&j->lock);
1572
00b8ccf7
KO
1573 /*
1574 * New btree roots are set by journalling them; when the journal entry
1575 * gets written we have to propagate them to c->btree_roots
1576 *
1577 * But, every journal entry we write has to contain all the btree roots
1578 * (at least for now); so after we copy btree roots to c->btree_roots we
1579 * have to get any missing btree roots and add them to this journal
1580 * entry:
1581 */
1582
1583 bch2_journal_entries_to_btree_roots(c, jset);
1584
1585 start = end = vstruct_last(jset);
1586
1587 end = bch2_btree_roots_to_journal_entries(c, jset->start, end);
1588
2abe5420
KO
1589 bch2_journal_super_entries_add_common(c, &end,
1590 le64_to_cpu(jset->seq));
3ccc5c50
KO
1591 u64s = (u64 *) end - (u64 *) start;
1592 BUG_ON(u64s > j->entry_u64s_reserved);
1593
d16b4a77
KO
1594 le32_add_cpu(&jset->u64s, u64s);
1595 BUG_ON(vstruct_sectors(jset, c->block_bits) > w->sectors);
1c6fdbd8 1596
1c6fdbd8 1597 jset->magic = cpu_to_le64(jset_magic(c));
74b33393 1598 jset->version = c->sb.version < bcachefs_metadata_version_bkey_renumber
26609b61
KO
1599 ? cpu_to_le32(BCH_JSET_VERSION_OLD)
1600 : cpu_to_le32(c->sb.version);
1c6fdbd8
KO
1601
1602 SET_JSET_BIG_ENDIAN(jset, CPU_BIG_ENDIAN);
1603 SET_JSET_CSUM_TYPE(jset, bch2_meta_checksum_type(c));
1604
4141fde0 1605 if (!JSET_NO_FLUSH(jset) && journal_entry_empty(jset))
158eecb8
KO
1606 j->last_empty_seq = le64_to_cpu(jset->seq);
1607
26609b61
KO
1608 if (bch2_csum_type_is_encryption(JSET_CSUM_TYPE(jset)))
1609 validate_before_checksum = true;
1610
e751c01a 1611 if (le32_to_cpu(jset->version) < bcachefs_metadata_version_current)
26609b61
KO
1612 validate_before_checksum = true;
1613
1614 if (validate_before_checksum &&
ed9d58a2 1615 jset_validate_for_write(c, jset))
1c6fdbd8
KO
1616 goto err;
1617
a9de137b 1618 ret = bch2_encrypt(c, JSET_CSUM_TYPE(jset), journal_nonce(jset),
1c6fdbd8
KO
1619 jset->encrypted_start,
1620 vstruct_end(jset) - (void *) jset->encrypted_start);
a9de137b
KO
1621 if (bch2_fs_fatal_err_on(ret, c,
1622 "error decrypting journal entry: %i", ret))
1623 goto err;
1c6fdbd8
KO
1624
1625 jset->csum = csum_vstruct(c, JSET_CSUM_TYPE(jset),
1626 journal_nonce(jset), jset);
1627
26609b61 1628 if (!validate_before_checksum &&
ed9d58a2 1629 jset_validate_for_write(c, jset))
1c6fdbd8
KO
1630 goto err;
1631
1632 sectors = vstruct_sectors(jset, c->block_bits);
d16b4a77 1633 BUG_ON(sectors > w->sectors);
1c6fdbd8 1634
d16b4a77
KO
1635 bytes = vstruct_bytes(jset);
1636 memset((void *) jset + bytes, 0, (sectors << 9) - bytes);
1c6fdbd8 1637
c18dade6 1638retry_alloc:
e5a66496
KO
1639 spin_lock(&j->lock);
1640 ret = journal_write_alloc(j, w, sectors);
1641
c18dade6
KO
1642 if (ret && j->can_discard) {
1643 spin_unlock(&j->lock);
1644 bch2_journal_do_discards(j);
1645 goto retry_alloc;
1646 }
1647
fa8e94fa
KO
1648 if (ret)
1649 __bch2_journal_debug_to_text(&journal_debug_buf, j);
85674154 1650
e5a66496
KO
1651 /*
1652 * write is allocated, no longer need to account for it in
1653 * bch2_journal_space_available():
1654 */
1655 w->sectors = 0;
1656
1657 /*
1658 * journal entry has been compacted and allocated, recalculate space
1659 * available:
1660 */
1661 bch2_journal_space_available(j);
1662 spin_unlock(&j->lock);
1663
1664 if (ret) {
85674154 1665 bch_err(c, "Unable to allocate journal write:\n%s",
fa8e94fa
KO
1666 journal_debug_buf.buf);
1667 printbuf_exit(&journal_debug_buf);
1c6fdbd8 1668 bch2_fatal_error(c);
731bdd2e 1669 continue_at(cl, journal_write_done, c->io_complete_wq);
1c6fdbd8
KO
1670 return;
1671 }
1672
d797ca3d
KO
1673 w->devs_written = bch2_bkey_devs(bkey_i_to_s_c(&w->key));
1674
b66b2bc0 1675 if (c->opts.nochanges)
1c6fdbd8
KO
1676 goto no_io;
1677
280249b9
KO
1678 for_each_rw_member(ca, c, i)
1679 nr_rw_members++;
1c6fdbd8 1680
280249b9
KO
1681 if (nr_rw_members > 1)
1682 w->separate_flush = true;
1c6fdbd8 1683
280249b9
KO
1684 if (!JSET_NO_FLUSH(jset) && w->separate_flush) {
1685 for_each_rw_member(ca, c, i) {
1686 percpu_ref_get(&ca->io_ref);
1c6fdbd8 1687
280249b9
KO
1688 bio = ca->journal.bio;
1689 bio_reset(bio, ca->disk_sb.bdev, REQ_OP_FLUSH);
1690 bio->bi_end_io = journal_write_endio;
1691 bio->bi_private = ca;
1692 closure_bio_submit(bio, cl);
1693 }
1c6fdbd8
KO
1694 }
1695
731bdd2e 1696 continue_at(cl, do_journal_write, c->io_complete_wq);
280249b9 1697 return;
1c6fdbd8 1698no_io:
731bdd2e 1699 continue_at(cl, journal_write_done, c->io_complete_wq);
1c6fdbd8
KO
1700 return;
1701err:
b74b147d 1702 bch2_fatal_error(c);
731bdd2e 1703 continue_at(cl, journal_write_done, c->io_complete_wq);
1c6fdbd8 1704}