bcachefs: In fsck, pass BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE when deleting dirents
[linux-block.git] / fs / bcachefs / journal_io.c
CommitLineData
1c6fdbd8
KO
1// SPDX-License-Identifier: GPL-2.0
2#include "bcachefs.h"
59cc38b8 3#include "alloc_background.h"
7b3f84ea 4#include "alloc_foreground.h"
39fb2983 5#include "btree_io.h"
00b8ccf7 6#include "btree_update_interior.h"
1c6fdbd8
KO
7#include "buckets.h"
8#include "checksum.h"
d042b040 9#include "disk_groups.h"
1c6fdbd8 10#include "error.h"
63b214e7 11#include "io.h"
1c6fdbd8
KO
12#include "journal.h"
13#include "journal_io.h"
14#include "journal_reclaim.h"
adbcada4 15#include "journal_seq_blacklist.h"
1c6fdbd8
KO
16#include "replicas.h"
17#include "trace.h"
18
ec7ccbde 19static inline u32 journal_entry_radix_idx(struct bch_fs *c, u64 seq)
adbcada4 20{
ec7ccbde 21 return (seq - c->journal_entries_base_seq) & (~0U >> 1);
ce6201c4
KO
22}
23
24static void __journal_replay_free(struct bch_fs *c,
25 struct journal_replay *i)
26{
27 struct journal_replay **p =
ec7ccbde
KO
28 genradix_ptr(&c->journal_entries,
29 journal_entry_radix_idx(c, le64_to_cpu(i->j.seq)));
ce6201c4
KO
30
31 BUG_ON(*p != i);
32 *p = NULL;
adbcada4
KO
33 kvpfree(i, offsetof(struct journal_replay, j) +
34 vstruct_bytes(&i->j));
adbcada4
KO
35}
36
37static void journal_replay_free(struct bch_fs *c, struct journal_replay *i)
38{
39 i->ignore = true;
40
41 if (!c->opts.read_entire_journal)
ce6201c4 42 __journal_replay_free(c, i);
adbcada4
KO
43}
44
1c6fdbd8
KO
45struct journal_list {
46 struct closure cl;
ec7ccbde 47 u64 last_seq;
1c6fdbd8 48 struct mutex lock;
1c6fdbd8
KO
49 int ret;
50};
51
52#define JOURNAL_ENTRY_ADD_OK 0
53#define JOURNAL_ENTRY_ADD_OUT_OF_RANGE 5
54
55/*
56 * Given a journal entry we just read, add it to the list of journal entries to
57 * be replayed:
58 */
59static int journal_entry_add(struct bch_fs *c, struct bch_dev *ca,
72b7d633 60 struct journal_ptr entry_ptr,
ca73852a
KO
61 struct journal_list *jlist, struct jset *j,
62 bool bad)
1c6fdbd8 63{
ce6201c4
KO
64 struct genradix_iter iter;
65 struct journal_replay **_i, *i, *dup;
72b7d633 66 struct journal_ptr *ptr;
1c6fdbd8 67 size_t bytes = vstruct_bytes(j);
ec7ccbde 68 u64 last_seq = !JSET_NO_FLUSH(j) ? le64_to_cpu(j->last_seq) : 0;
e4c3f386 69 int ret = JOURNAL_ENTRY_ADD_OK;
1c6fdbd8 70
ec7ccbde
KO
71 /* Is this entry older than the range we need? */
72 if (!c->opts.read_entire_journal &&
73 le64_to_cpu(j->seq) < jlist->last_seq)
74 return JOURNAL_ENTRY_ADD_OUT_OF_RANGE;
75
ce6201c4 76 /*
ec7ccbde
KO
77 * genradixes are indexed by a ulong, not a u64, so we can't index them
78 * by sequence number directly: Assume instead that they will all fall
79 * within the range of +-2billion of the filrst one we find.
ce6201c4
KO
80 */
81 if (!c->journal_entries_base_seq)
82 c->journal_entries_base_seq = max_t(s64, 1, le64_to_cpu(j->seq) - S32_MAX);
83
adbcada4 84 /* Drop entries we don't need anymore */
ec7ccbde
KO
85 if (last_seq > jlist->last_seq && !c->opts.read_entire_journal) {
86 genradix_for_each_from(&c->journal_entries, iter, _i,
87 journal_entry_radix_idx(c, jlist->last_seq)) {
ce6201c4
KO
88 i = *_i;
89
ec7ccbde 90 if (!i || i->ignore)
ce6201c4
KO
91 continue;
92
ec7ccbde 93 if (le64_to_cpu(i->j.seq) >= last_seq)
7fffc85b 94 break;
adbcada4 95 journal_replay_free(c, i);
7fffc85b 96 }
1c6fdbd8
KO
97 }
98
ec7ccbde
KO
99 jlist->last_seq = max(jlist->last_seq, last_seq);
100
101 _i = genradix_ptr_alloc(&c->journal_entries,
102 journal_entry_radix_idx(c, le64_to_cpu(j->seq)),
103 GFP_KERNEL);
104 if (!_i)
105 return -ENOMEM;
e4c3f386 106
ca73852a
KO
107 /*
108 * Duplicate journal entries? If so we want the one that didn't have a
109 * checksum error:
110 */
ec7ccbde 111 dup = *_i;
e4c3f386
KO
112 if (dup) {
113 if (dup->bad) {
114 /* we'll replace @dup: */
ca73852a 115 } else if (bad) {
e4c3f386 116 i = dup;
ca73852a
KO
117 goto found;
118 } else {
e4c3f386
KO
119 fsck_err_on(bytes != vstruct_bytes(&dup->j) ||
120 memcmp(j, &dup->j, bytes), c,
1c6fdbd8
KO
121 "found duplicate but non identical journal entries (seq %llu)",
122 le64_to_cpu(j->seq));
e4c3f386 123 i = dup;
1c6fdbd8
KO
124 goto found;
125 }
1c6fdbd8
KO
126 }
127
1c6fdbd8 128 i = kvpmalloc(offsetof(struct journal_replay, j) + bytes, GFP_KERNEL);
ec7ccbde
KO
129 if (!i)
130 return -ENOMEM;
1c6fdbd8 131
e4c3f386
KO
132 i->nr_ptrs = 0;
133 i->bad = bad;
134 i->ignore = false;
1c6fdbd8 135 unsafe_memcpy(&i->j, j, bytes, "embedded variable length struct");
e4c3f386
KO
136
137 if (dup) {
138 i->nr_ptrs = dup->nr_ptrs;
139 memcpy(i->ptrs, dup->ptrs, sizeof(dup->ptrs));
ce6201c4 140 __journal_replay_free(c, dup);
e4c3f386
KO
141 }
142
ce6201c4
KO
143
144 *_i = i;
1c6fdbd8 145found:
e4c3f386
KO
146 for (ptr = i->ptrs; ptr < i->ptrs + i->nr_ptrs; ptr++) {
147 if (ptr->dev == ca->dev_idx) {
148 bch_err(c, "duplicate journal entry %llu on same device",
149 le64_to_cpu(i->j.seq));
150 goto out;
151 }
152 }
153
154 if (i->nr_ptrs >= ARRAY_SIZE(i->ptrs)) {
155 bch_err(c, "found too many copies of journal entry %llu",
156 le64_to_cpu(i->j.seq));
157 goto out;
158 }
159
160 i->ptrs[i->nr_ptrs++] = entry_ptr;
1c6fdbd8
KO
161out:
162fsck_err:
163 return ret;
164}
165
166static struct nonce journal_nonce(const struct jset *jset)
167{
168 return (struct nonce) {{
169 [0] = 0,
170 [1] = ((__le32 *) &jset->seq)[0],
171 [2] = ((__le32 *) &jset->seq)[1],
172 [3] = BCH_NONCE_JOURNAL,
173 }};
174}
175
176/* this fills in a range with empty jset_entries: */
177static void journal_entry_null_range(void *start, void *end)
178{
179 struct jset_entry *entry;
180
181 for (entry = start; entry != end; entry = vstruct_next(entry))
182 memset(entry, 0, sizeof(*entry));
183}
184
185#define JOURNAL_ENTRY_REREAD 5
186#define JOURNAL_ENTRY_NONE 6
187#define JOURNAL_ENTRY_BAD 7
188
189#define journal_entry_err(c, msg, ...) \
190({ \
191 switch (write) { \
192 case READ: \
193 mustfix_fsck_err(c, msg, ##__VA_ARGS__); \
194 break; \
195 case WRITE: \
196 bch_err(c, "corrupt metadata before write:\n" \
197 msg, ##__VA_ARGS__); \
198 if (bch2_fs_inconsistent(c)) { \
199 ret = BCH_FSCK_ERRORS_NOT_FIXED; \
200 goto fsck_err; \
201 } \
202 break; \
203 } \
204 true; \
205})
206
207#define journal_entry_err_on(cond, c, msg, ...) \
208 ((cond) ? journal_entry_err(c, msg, ##__VA_ARGS__) : false)
209
4d54337c
KO
210#define FSCK_DELETED_KEY 5
211
7d6f07ed 212static int journal_validate_key(struct bch_fs *c, const char *where,
1c6fdbd8 213 struct jset_entry *entry,
39fb2983 214 unsigned level, enum btree_id btree_id,
7d6f07ed
KO
215 struct bkey_i *k, const char *type,
216 unsigned version, int big_endian, int write)
1c6fdbd8
KO
217{
218 void *next = vstruct_next(entry);
f0ac7df2 219 struct printbuf buf = PRINTBUF;
1c6fdbd8
KO
220 int ret = 0;
221
222 if (journal_entry_err_on(!k->k.u64s, c,
7d6f07ed
KO
223 "invalid %s in %s entry offset %zi/%u: k->u64s 0",
224 type, where,
4d54337c
KO
225 (u64 *) k - entry->_data,
226 le16_to_cpu(entry->u64s))) {
1c6fdbd8
KO
227 entry->u64s = cpu_to_le16((u64 *) k - entry->_data);
228 journal_entry_null_range(vstruct_next(entry), next);
4d54337c 229 return FSCK_DELETED_KEY;
1c6fdbd8
KO
230 }
231
232 if (journal_entry_err_on((void *) bkey_next(k) >
233 (void *) vstruct_next(entry), c,
7d6f07ed
KO
234 "invalid %s in %s entry offset %zi/%u: extends past end of journal entry",
235 type, where,
4d54337c
KO
236 (u64 *) k - entry->_data,
237 le16_to_cpu(entry->u64s))) {
1c6fdbd8
KO
238 entry->u64s = cpu_to_le16((u64 *) k - entry->_data);
239 journal_entry_null_range(vstruct_next(entry), next);
4d54337c 240 return FSCK_DELETED_KEY;
1c6fdbd8
KO
241 }
242
243 if (journal_entry_err_on(k->k.format != KEY_FORMAT_CURRENT, c,
7d6f07ed
KO
244 "invalid %s in %s entry offset %zi/%u: bad format %u",
245 type, where,
4d54337c
KO
246 (u64 *) k - entry->_data,
247 le16_to_cpu(entry->u64s),
ed0d631f 248 k->k.format)) {
4d54337c 249 le16_add_cpu(&entry->u64s, -((u16) k->k.u64s));
1c6fdbd8
KO
250 memmove(k, bkey_next(k), next - (void *) bkey_next(k));
251 journal_entry_null_range(vstruct_next(entry), next);
4d54337c 252 return FSCK_DELETED_KEY;
1c6fdbd8
KO
253 }
254
39fb2983 255 if (!write)
7d6f07ed
KO
256 bch2_bkey_compat(level, btree_id, version, big_endian,
257 write, NULL, bkey_to_packed(k));
26609b61 258
f0ac7df2 259 if (bch2_bkey_invalid(c, bkey_i_to_s_c(k),
275c8426 260 __btree_node_type(level, btree_id), write, &buf)) {
f0ac7df2
KO
261 printbuf_reset(&buf);
262 pr_buf(&buf, "invalid %s in %s entry offset %zi/%u:",
263 type, where,
264 (u64 *) k - entry->_data,
265 le16_to_cpu(entry->u64s));
266 pr_newline(&buf);
267 pr_indent_push(&buf, 2);
319f9ac3 268
fa8e94fa 269 bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(k));
f0ac7df2
KO
270 pr_newline(&buf);
271 bch2_bkey_invalid(c, bkey_i_to_s_c(k),
275c8426 272 __btree_node_type(level, btree_id), write, &buf);
f0ac7df2
KO
273
274 mustfix_fsck_err(c, "%s", buf.buf);
1c6fdbd8 275
4d54337c 276 le16_add_cpu(&entry->u64s, -((u16) k->k.u64s));
1c6fdbd8
KO
277 memmove(k, bkey_next(k), next - (void *) bkey_next(k));
278 journal_entry_null_range(vstruct_next(entry), next);
f0ac7df2
KO
279
280 printbuf_exit(&buf);
4d54337c 281 return FSCK_DELETED_KEY;
1c6fdbd8 282 }
26609b61 283
39fb2983 284 if (write)
7d6f07ed
KO
285 bch2_bkey_compat(level, btree_id, version, big_endian,
286 write, NULL, bkey_to_packed(k));
1c6fdbd8 287fsck_err:
f0ac7df2 288 printbuf_exit(&buf);
1c6fdbd8
KO
289 return ret;
290}
291
528b18e6 292static int journal_entry_btree_keys_validate(struct bch_fs *c,
7d6f07ed 293 const char *where,
1c6fdbd8 294 struct jset_entry *entry,
7d6f07ed 295 unsigned version, int big_endian, int write)
1c6fdbd8 296{
4d54337c 297 struct bkey_i *k = entry->start;
1c6fdbd8 298
4d54337c 299 while (k != vstruct_last(entry)) {
7d6f07ed 300 int ret = journal_validate_key(c, where, entry,
39fb2983
KO
301 entry->level,
302 entry->btree_id,
7d6f07ed 303 k, "key", version, big_endian, write);
4d54337c
KO
304 if (ret == FSCK_DELETED_KEY)
305 continue;
306
307 k = bkey_next(k);
1c6fdbd8
KO
308 }
309
310 return 0;
311}
312
528b18e6
KO
313static void journal_entry_btree_keys_to_text(struct printbuf *out, struct bch_fs *c,
314 struct jset_entry *entry)
315{
316 struct bkey_i *k;
e7bc7cdf 317 bool first = true;
528b18e6 318
e7bc7cdf
KO
319 vstruct_for_each(entry, k) {
320 if (!first) {
12bf93a4 321 pr_newline(out);
e7bc7cdf
KO
322 pr_buf(out, "%s: ", bch2_jset_entry_types[entry->type]);
323 }
324 pr_buf(out, "btree=%s l=%u ", bch2_btree_ids[entry->btree_id], entry->level);
528b18e6 325 bch2_bkey_val_to_text(out, c, bkey_i_to_s_c(k));
e7bc7cdf
KO
326 first = false;
327 }
528b18e6
KO
328}
329
330static int journal_entry_btree_root_validate(struct bch_fs *c,
7d6f07ed 331 const char *where,
1c6fdbd8 332 struct jset_entry *entry,
7d6f07ed 333 unsigned version, int big_endian, int write)
1c6fdbd8
KO
334{
335 struct bkey_i *k = entry->start;
336 int ret = 0;
337
338 if (journal_entry_err_on(!entry->u64s ||
339 le16_to_cpu(entry->u64s) != k->k.u64s, c,
340 "invalid btree root journal entry: wrong number of keys")) {
341 void *next = vstruct_next(entry);
342 /*
343 * we don't want to null out this jset_entry,
344 * just the contents, so that later we can tell
345 * we were _supposed_ to have a btree root
346 */
347 entry->u64s = 0;
348 journal_entry_null_range(vstruct_next(entry), next);
349 return 0;
350 }
351
7d6f07ed
KO
352 return journal_validate_key(c, where, entry, 1, entry->btree_id, k,
353 "btree root", version, big_endian, write);
1c6fdbd8
KO
354fsck_err:
355 return ret;
356}
357
528b18e6
KO
358static void journal_entry_btree_root_to_text(struct printbuf *out, struct bch_fs *c,
359 struct jset_entry *entry)
360{
361 journal_entry_btree_keys_to_text(out, c, entry);
362}
363
364static int journal_entry_prio_ptrs_validate(struct bch_fs *c,
7d6f07ed 365 const char *where,
1c6fdbd8 366 struct jset_entry *entry,
7d6f07ed 367 unsigned version, int big_endian, int write)
1c6fdbd8
KO
368{
369 /* obsolete, don't care: */
370 return 0;
371}
372
528b18e6
KO
373static void journal_entry_prio_ptrs_to_text(struct printbuf *out, struct bch_fs *c,
374 struct jset_entry *entry)
375{
376}
377
378static int journal_entry_blacklist_validate(struct bch_fs *c,
7d6f07ed 379 const char *where,
1c6fdbd8 380 struct jset_entry *entry,
7d6f07ed 381 unsigned version, int big_endian, int write)
1c6fdbd8
KO
382{
383 int ret = 0;
384
385 if (journal_entry_err_on(le16_to_cpu(entry->u64s) != 1, c,
386 "invalid journal seq blacklist entry: bad size")) {
387 journal_entry_null_range(entry, vstruct_next(entry));
388 }
389fsck_err:
390 return ret;
391}
392
528b18e6
KO
393static void journal_entry_blacklist_to_text(struct printbuf *out, struct bch_fs *c,
394 struct jset_entry *entry)
395{
396 struct jset_entry_blacklist *bl =
397 container_of(entry, struct jset_entry_blacklist, entry);
398
399 pr_buf(out, "seq=%llu", le64_to_cpu(bl->seq));
400}
401
402static int journal_entry_blacklist_v2_validate(struct bch_fs *c,
7d6f07ed 403 const char *where,
1c6fdbd8 404 struct jset_entry *entry,
7d6f07ed 405 unsigned version, int big_endian, int write)
1c6fdbd8
KO
406{
407 struct jset_entry_blacklist_v2 *bl_entry;
408 int ret = 0;
409
410 if (journal_entry_err_on(le16_to_cpu(entry->u64s) != 2, c,
411 "invalid journal seq blacklist entry: bad size")) {
412 journal_entry_null_range(entry, vstruct_next(entry));
2c5af169 413 goto out;
1c6fdbd8
KO
414 }
415
416 bl_entry = container_of(entry, struct jset_entry_blacklist_v2, entry);
417
418 if (journal_entry_err_on(le64_to_cpu(bl_entry->start) >
419 le64_to_cpu(bl_entry->end), c,
420 "invalid journal seq blacklist entry: start > end")) {
421 journal_entry_null_range(entry, vstruct_next(entry));
422 }
2c5af169
KO
423out:
424fsck_err:
425 return ret;
426}
427
528b18e6
KO
428static void journal_entry_blacklist_v2_to_text(struct printbuf *out, struct bch_fs *c,
429 struct jset_entry *entry)
430{
431 struct jset_entry_blacklist_v2 *bl =
432 container_of(entry, struct jset_entry_blacklist_v2, entry);
433
434 pr_buf(out, "start=%llu end=%llu",
435 le64_to_cpu(bl->start),
436 le64_to_cpu(bl->end));
437}
438
439static int journal_entry_usage_validate(struct bch_fs *c,
7d6f07ed 440 const char *where,
2c5af169 441 struct jset_entry *entry,
7d6f07ed 442 unsigned version, int big_endian, int write)
2c5af169
KO
443{
444 struct jset_entry_usage *u =
445 container_of(entry, struct jset_entry_usage, entry);
446 unsigned bytes = jset_u64s(le16_to_cpu(entry->u64s)) * sizeof(u64);
447 int ret = 0;
448
3577df5f
KO
449 if (journal_entry_err_on(bytes < sizeof(*u),
450 c,
451 "invalid journal entry usage: bad size")) {
452 journal_entry_null_range(entry, vstruct_next(entry));
453 return ret;
454 }
455
456fsck_err:
457 return ret;
458}
459
528b18e6
KO
460static void journal_entry_usage_to_text(struct printbuf *out, struct bch_fs *c,
461 struct jset_entry *entry)
462{
463 struct jset_entry_usage *u =
464 container_of(entry, struct jset_entry_usage, entry);
465
466 pr_buf(out, "type=%s v=%llu",
467 bch2_fs_usage_types[u->entry.btree_id],
468 le64_to_cpu(u->v));
469}
470
471static int journal_entry_data_usage_validate(struct bch_fs *c,
7d6f07ed 472 const char *where,
3577df5f 473 struct jset_entry *entry,
7d6f07ed 474 unsigned version, int big_endian, int write)
3577df5f
KO
475{
476 struct jset_entry_data_usage *u =
477 container_of(entry, struct jset_entry_data_usage, entry);
478 unsigned bytes = jset_u64s(le16_to_cpu(entry->u64s)) * sizeof(u64);
479 int ret = 0;
480
2c5af169
KO
481 if (journal_entry_err_on(bytes < sizeof(*u) ||
482 bytes < sizeof(*u) + u->r.nr_devs,
483 c,
484 "invalid journal entry usage: bad size")) {
485 journal_entry_null_range(entry, vstruct_next(entry));
486 return ret;
487 }
1c6fdbd8
KO
488
489fsck_err:
490 return ret;
491}
492
528b18e6
KO
493static void journal_entry_data_usage_to_text(struct printbuf *out, struct bch_fs *c,
494 struct jset_entry *entry)
495{
496 struct jset_entry_data_usage *u =
497 container_of(entry, struct jset_entry_data_usage, entry);
498
499 bch2_replicas_entry_to_text(out, &u->r);
500 pr_buf(out, "=%llu", le64_to_cpu(u->v));
501}
502
503static int journal_entry_clock_validate(struct bch_fs *c,
7d6f07ed 504 const char *where,
2abe5420 505 struct jset_entry *entry,
7d6f07ed 506 unsigned version, int big_endian, int write)
2abe5420
KO
507{
508 struct jset_entry_clock *clock =
509 container_of(entry, struct jset_entry_clock, entry);
510 unsigned bytes = jset_u64s(le16_to_cpu(entry->u64s)) * sizeof(u64);
511 int ret = 0;
512
513 if (journal_entry_err_on(bytes != sizeof(*clock),
514 c, "invalid journal entry clock: bad size")) {
515 journal_entry_null_range(entry, vstruct_next(entry));
516 return ret;
517 }
518
519 if (journal_entry_err_on(clock->rw > 1,
520 c, "invalid journal entry clock: bad rw")) {
521 journal_entry_null_range(entry, vstruct_next(entry));
522 return ret;
523 }
524
525fsck_err:
526 return ret;
527}
528
528b18e6
KO
529static void journal_entry_clock_to_text(struct printbuf *out, struct bch_fs *c,
530 struct jset_entry *entry)
531{
532 struct jset_entry_clock *clock =
533 container_of(entry, struct jset_entry_clock, entry);
534
535 pr_buf(out, "%s=%llu", clock->rw ? "write" : "read", le64_to_cpu(clock->time));
536}
537
538static int journal_entry_dev_usage_validate(struct bch_fs *c,
7d6f07ed 539 const char *where,
180fb49d 540 struct jset_entry *entry,
7d6f07ed 541 unsigned version, int big_endian, int write)
180fb49d
KO
542{
543 struct jset_entry_dev_usage *u =
544 container_of(entry, struct jset_entry_dev_usage, entry);
545 unsigned bytes = jset_u64s(le16_to_cpu(entry->u64s)) * sizeof(u64);
45c2e33f 546 unsigned expected = sizeof(*u);
180fb49d
KO
547 unsigned dev;
548 int ret = 0;
549
550 if (journal_entry_err_on(bytes < expected,
551 c, "invalid journal entry dev usage: bad size (%u < %u)",
552 bytes, expected)) {
553 journal_entry_null_range(entry, vstruct_next(entry));
554 return ret;
555 }
556
557 dev = le32_to_cpu(u->dev);
558
559 if (journal_entry_err_on(!bch2_dev_exists2(c, dev),
560 c, "invalid journal entry dev usage: bad dev")) {
561 journal_entry_null_range(entry, vstruct_next(entry));
562 return ret;
563 }
564
565 if (journal_entry_err_on(u->pad,
566 c, "invalid journal entry dev usage: bad pad")) {
567 journal_entry_null_range(entry, vstruct_next(entry));
568 return ret;
569 }
570
571fsck_err:
572 return ret;
573}
574
528b18e6
KO
575static void journal_entry_dev_usage_to_text(struct printbuf *out, struct bch_fs *c,
576 struct jset_entry *entry)
577{
578 struct jset_entry_dev_usage *u =
579 container_of(entry, struct jset_entry_dev_usage, entry);
580 unsigned i, nr_types = jset_entry_dev_usage_nr_types(u);
581
582 pr_buf(out, "dev=%u", le32_to_cpu(u->dev));
583
584 for (i = 0; i < nr_types; i++) {
585 if (i < BCH_DATA_NR)
586 pr_buf(out, " %s", bch2_data_types[i]);
587 else
588 pr_buf(out, " (unknown data type %u)", i);
589 pr_buf(out, ": buckets=%llu sectors=%llu fragmented=%llu",
590 le64_to_cpu(u->d[i].buckets),
591 le64_to_cpu(u->d[i].sectors),
592 le64_to_cpu(u->d[i].fragmented));
593 }
594
822835ff 595 pr_buf(out, " buckets_ec: %llu", le64_to_cpu(u->buckets_ec));
528b18e6
KO
596}
597
598static int journal_entry_log_validate(struct bch_fs *c,
fb64f3fd
KO
599 const char *where,
600 struct jset_entry *entry,
601 unsigned version, int big_endian, int write)
602{
603 return 0;
604}
605
528b18e6
KO
606static void journal_entry_log_to_text(struct printbuf *out, struct bch_fs *c,
607 struct jset_entry *entry)
608{
609 struct jset_entry_log *l = container_of(entry, struct jset_entry_log, entry);
610 unsigned bytes = vstruct_bytes(entry) - offsetof(struct jset_entry_log, d);
611
d4b69152 612 pr_buf(out, "%.*s", bytes, l->d);
528b18e6
KO
613}
614
1c6fdbd8 615struct jset_entry_ops {
7d6f07ed
KO
616 int (*validate)(struct bch_fs *, const char *,
617 struct jset_entry *, unsigned, int, int);
528b18e6 618 void (*to_text)(struct printbuf *, struct bch_fs *, struct jset_entry *);
1c6fdbd8
KO
619};
620
621static const struct jset_entry_ops bch2_jset_entry_ops[] = {
622#define x(f, nr) \
623 [BCH_JSET_ENTRY_##f] = (struct jset_entry_ops) { \
528b18e6
KO
624 .validate = journal_entry_##f##_validate, \
625 .to_text = journal_entry_##f##_to_text, \
1c6fdbd8
KO
626 },
627 BCH_JSET_ENTRY_TYPES()
628#undef x
629};
630
7d6f07ed
KO
631int bch2_journal_entry_validate(struct bch_fs *c, const char *where,
632 struct jset_entry *entry,
633 unsigned version, int big_endian, int write)
1c6fdbd8 634{
2c5af169 635 return entry->type < BCH_JSET_ENTRY_NR
7d6f07ed
KO
636 ? bch2_jset_entry_ops[entry->type].validate(c, where, entry,
637 version, big_endian, write)
2c5af169 638 : 0;
1c6fdbd8
KO
639}
640
528b18e6
KO
641void bch2_journal_entry_to_text(struct printbuf *out, struct bch_fs *c,
642 struct jset_entry *entry)
643{
644 if (entry->type < BCH_JSET_ENTRY_NR) {
645 pr_buf(out, "%s: ", bch2_jset_entry_types[entry->type]);
646 bch2_jset_entry_ops[entry->type].to_text(out, c, entry);
647 } else {
648 pr_buf(out, "(unknown type %u)", entry->type);
649 }
650}
651
1c6fdbd8
KO
652static int jset_validate_entries(struct bch_fs *c, struct jset *jset,
653 int write)
654{
7d6f07ed 655 char buf[100];
1c6fdbd8
KO
656 struct jset_entry *entry;
657 int ret = 0;
658
659 vstruct_for_each(jset, entry) {
7d6f07ed
KO
660 scnprintf(buf, sizeof(buf), "jset %llu entry offset %zi/%u",
661 le64_to_cpu(jset->seq),
662 (u64 *) entry - jset->_data,
663 le32_to_cpu(jset->u64s));
664
1c6fdbd8
KO
665 if (journal_entry_err_on(vstruct_next(entry) >
666 vstruct_last(jset), c,
667 "journal entry extends past end of jset")) {
668 jset->u64s = cpu_to_le32((u64 *) entry - jset->_data);
669 break;
670 }
671
7d6f07ed
KO
672 ret = bch2_journal_entry_validate(c, buf, entry,
673 le32_to_cpu(jset->version),
674 JSET_BIG_ENDIAN(jset), write);
1c6fdbd8
KO
675 if (ret)
676 break;
677 }
678fsck_err:
679 return ret;
680}
681
682static int jset_validate(struct bch_fs *c,
ca73852a 683 struct bch_dev *ca,
1c6fdbd8
KO
684 struct jset *jset, u64 sector,
685 unsigned bucket_sectors_left,
686 unsigned sectors_read,
687 int write)
688{
689 size_t bytes = vstruct_bytes(jset);
690 struct bch_csum csum;
26609b61 691 unsigned version;
1c6fdbd8
KO
692 int ret = 0;
693
694 if (le64_to_cpu(jset->magic) != jset_magic(c))
695 return JOURNAL_ENTRY_NONE;
696
26609b61 697 version = le32_to_cpu(jset->version);
ca73852a
KO
698 if (journal_entry_err_on((version != BCH_JSET_VERSION_OLD &&
699 version < bcachefs_metadata_version_min) ||
700 version >= bcachefs_metadata_version_max, c,
701 "%s sector %llu seq %llu: unknown journal entry version %u",
ed9d58a2
KO
702 ca ? ca->name : c->name,
703 sector, le64_to_cpu(jset->seq),
ca73852a 704 version)) {
35ef6df5
KO
705 /* don't try to continue: */
706 return EINVAL;
1c6fdbd8
KO
707 }
708
35ef6df5
KO
709 if (bytes > (sectors_read << 9) &&
710 sectors_read < bucket_sectors_left)
711 return JOURNAL_ENTRY_REREAD;
712
1c6fdbd8 713 if (journal_entry_err_on(bytes > bucket_sectors_left << 9, c,
ca73852a 714 "%s sector %llu seq %llu: journal entry too big (%zu bytes)",
ed9d58a2
KO
715 ca ? ca->name : c->name,
716 sector, le64_to_cpu(jset->seq), bytes)) {
35ef6df5
KO
717 ret = JOURNAL_ENTRY_BAD;
718 le32_add_cpu(&jset->u64s,
719 -((bytes - (bucket_sectors_left << 9)) / 8));
1c6fdbd8
KO
720 }
721
ed9d58a2 722 if (journal_entry_err_on(!bch2_checksum_type_valid(c, JSET_CSUM_TYPE(jset)), c,
ca73852a 723 "%s sector %llu seq %llu: journal entry with unknown csum type %llu",
ed9d58a2
KO
724 ca ? ca->name : c->name,
725 sector, le64_to_cpu(jset->seq),
35ef6df5
KO
726 JSET_CSUM_TYPE(jset))) {
727 ret = JOURNAL_ENTRY_BAD;
ed9d58a2 728 goto csum_done;
35ef6df5 729 }
1c6fdbd8 730
ed9d58a2
KO
731 if (write)
732 goto csum_done;
733
1c6fdbd8
KO
734 csum = csum_vstruct(c, JSET_CSUM_TYPE(jset), journal_nonce(jset), jset);
735 if (journal_entry_err_on(bch2_crc_cmp(csum, jset->csum), c,
ca73852a 736 "%s sector %llu seq %llu: journal checksum bad",
ed9d58a2
KO
737 ca ? ca->name : c->name,
738 sector, le64_to_cpu(jset->seq)))
35ef6df5 739 ret = JOURNAL_ENTRY_BAD;
1c6fdbd8 740
a9de137b 741 ret = bch2_encrypt(c, JSET_CSUM_TYPE(jset), journal_nonce(jset),
1c6fdbd8
KO
742 jset->encrypted_start,
743 vstruct_end(jset) - (void *) jset->encrypted_start);
a9de137b
KO
744 bch2_fs_fatal_err_on(ret, c,
745 "error decrypting journal entry: %i", ret);
ed9d58a2
KO
746csum_done:
747 /* last_seq is ignored when JSET_NO_FLUSH is true */
748 if (journal_entry_err_on(!JSET_NO_FLUSH(jset) &&
749 le64_to_cpu(jset->last_seq) > le64_to_cpu(jset->seq), c,
750 "invalid journal entry: last_seq > seq (%llu > %llu)",
751 le64_to_cpu(jset->last_seq),
752 le64_to_cpu(jset->seq))) {
1c6fdbd8 753 jset->last_seq = jset->seq;
ca73852a
KO
754 return JOURNAL_ENTRY_BAD;
755 }
1c6fdbd8
KO
756fsck_err:
757 return ret;
758}
759
ed9d58a2
KO
760static int jset_validate_for_write(struct bch_fs *c, struct jset *jset)
761{
762 unsigned sectors = vstruct_sectors(jset, c->block_bits);
763
764 return jset_validate(c, NULL, jset, 0, sectors, sectors, WRITE) ?:
765 jset_validate_entries(c, jset, WRITE);
766}
767
1c6fdbd8
KO
768struct journal_read_buf {
769 void *data;
770 size_t size;
771};
772
773static int journal_read_buf_realloc(struct journal_read_buf *b,
774 size_t new_size)
775{
776 void *n;
777
778 /* the bios are sized for this many pages, max: */
779 if (new_size > JOURNAL_ENTRY_SIZE_MAX)
780 return -ENOMEM;
781
782 new_size = roundup_pow_of_two(new_size);
783 n = kvpmalloc(new_size, GFP_KERNEL);
784 if (!n)
785 return -ENOMEM;
786
787 kvpfree(b->data, b->size);
788 b->data = n;
789 b->size = new_size;
790 return 0;
791}
792
793static int journal_read_bucket(struct bch_dev *ca,
794 struct journal_read_buf *buf,
795 struct journal_list *jlist,
a9ec3454 796 unsigned bucket)
1c6fdbd8
KO
797{
798 struct bch_fs *c = ca->fs;
799 struct journal_device *ja = &ca->journal;
1c6fdbd8
KO
800 struct jset *j = NULL;
801 unsigned sectors, sectors_read = 0;
802 u64 offset = bucket_to_sector(ca, ja->buckets[bucket]),
803 end = offset + ca->mi.bucket_size;
804 bool saw_bad = false;
805 int ret = 0;
806
807 pr_debug("reading %u", bucket);
808
809 while (offset < end) {
810 if (!sectors_read) {
ac10a961
KO
811 struct bio *bio;
812 unsigned nr_bvecs;
813reread:
814 sectors_read = min_t(unsigned,
1c6fdbd8 815 end - offset, buf->size >> 9);
ac10a961
KO
816 nr_bvecs = buf_pages(buf->data, sectors_read << 9);
817
818 bio = bio_kmalloc(nr_bvecs, GFP_KERNEL);
819 bio_init(bio, ca->disk_sb.bdev, bio->bi_inline_vecs, nr_bvecs, REQ_OP_READ);
1c6fdbd8 820
885678f6
KO
821 bio->bi_iter.bi_sector = offset;
822 bch2_bio_map(bio, buf->data, sectors_read << 9);
1c6fdbd8
KO
823
824 ret = submit_bio_wait(bio);
ac10a961 825 kfree(bio);
1c6fdbd8
KO
826
827 if (bch2_dev_io_err_on(ret, ca,
0fefe8d8 828 "journal read error: sector %llu",
1c6fdbd8 829 offset) ||
29d90f61
KO
830 bch2_meta_read_fault("journal")) {
831 /*
832 * We don't error out of the recovery process
833 * here, since the relevant journal entry may be
834 * found on a different device, and missing or
835 * no journal entries will be handled later
836 */
837 return 0;
838 }
1c6fdbd8
KO
839
840 j = buf->data;
841 }
842
ca73852a 843 ret = jset_validate(c, ca, j, offset,
1c6fdbd8
KO
844 end - offset, sectors_read,
845 READ);
846 switch (ret) {
847 case BCH_FSCK_OK:
ca73852a 848 sectors = vstruct_sectors(j, c->block_bits);
1c6fdbd8
KO
849 break;
850 case JOURNAL_ENTRY_REREAD:
851 if (vstruct_bytes(j) > buf->size) {
852 ret = journal_read_buf_realloc(buf,
853 vstruct_bytes(j));
854 if (ret)
855 return ret;
856 }
857 goto reread;
858 case JOURNAL_ENTRY_NONE:
859 if (!saw_bad)
860 return 0;
8244f320 861 sectors = block_sectors(c);
1c6fdbd8
KO
862 goto next_block;
863 case JOURNAL_ENTRY_BAD:
864 saw_bad = true;
ca73852a
KO
865 /*
866 * On checksum error we don't really trust the size
867 * field of the journal entry we read, so try reading
868 * again at next block boundary:
869 */
8244f320 870 sectors = block_sectors(c);
ca73852a 871 break;
1c6fdbd8
KO
872 default:
873 return ret;
874 }
875
876 /*
877 * This happens sometimes if we don't have discards on -
878 * when we've partially overwritten a bucket with new
879 * journal entries. We don't need the rest of the
880 * bucket:
881 */
882 if (le64_to_cpu(j->seq) < ja->bucket_seq[bucket])
883 return 0;
884
885 ja->bucket_seq[bucket] = le64_to_cpu(j->seq);
886
887 mutex_lock(&jlist->lock);
72b7d633
KO
888 ret = journal_entry_add(c, ca, (struct journal_ptr) {
889 .dev = ca->dev_idx,
890 .bucket = bucket,
891 .bucket_offset = offset -
892 bucket_to_sector(ca, ja->buckets[bucket]),
893 .sector = offset,
e4c3f386 894 }, jlist, j, ret != 0);
1c6fdbd8
KO
895 mutex_unlock(&jlist->lock);
896
897 switch (ret) {
898 case JOURNAL_ENTRY_ADD_OK:
1c6fdbd8
KO
899 break;
900 case JOURNAL_ENTRY_ADD_OUT_OF_RANGE:
901 break;
902 default:
903 return ret;
904 }
1c6fdbd8
KO
905next_block:
906 pr_debug("next");
907 offset += sectors;
908 sectors_read -= sectors;
909 j = ((void *) j) + (sectors << 9);
910 }
911
912 return 0;
913}
914
915static void bch2_journal_read_device(struct closure *cl)
916{
1c6fdbd8
KO
917 struct journal_device *ja =
918 container_of(cl, struct journal_device, read);
919 struct bch_dev *ca = container_of(ja, struct bch_dev, journal);
365f64f3 920 struct bch_fs *c = ca->fs;
1c6fdbd8
KO
921 struct journal_list *jlist =
922 container_of(cl->parent, struct journal_list, cl);
ce6201c4
KO
923 struct journal_replay *r, **_r;
924 struct genradix_iter iter;
1c6fdbd8 925 struct journal_read_buf buf = { NULL, 0 };
a9ec3454
KO
926 u64 min_seq = U64_MAX;
927 unsigned i;
9714baaa 928 int ret = 0;
1c6fdbd8
KO
929
930 if (!ja->nr)
931 goto out;
932
1c6fdbd8
KO
933 ret = journal_read_buf_realloc(&buf, PAGE_SIZE);
934 if (ret)
935 goto err;
936
937 pr_debug("%u journal buckets", ja->nr);
938
1c6fdbd8 939 for (i = 0; i < ja->nr; i++) {
a9ec3454
KO
940 ret = journal_read_bucket(ca, &buf, jlist, i);
941 if (ret)
942 goto err;
1c6fdbd8
KO
943 }
944
a9ec3454
KO
945 /* Find the journal bucket with the highest sequence number: */
946 for (i = 0; i < ja->nr; i++) {
947 if (ja->bucket_seq[i] > ja->bucket_seq[ja->cur_idx])
948 ja->cur_idx = i;
1c6fdbd8 949
a9ec3454 950 min_seq = min(ja->bucket_seq[i], min_seq);
1c6fdbd8
KO
951 }
952
1c6fdbd8 953 /*
1c6fdbd8
KO
954 * If there's duplicate journal entries in multiple buckets (which
955 * definitely isn't supposed to happen, but...) - make sure to start
956 * cur_idx at the last of those buckets, so we don't deadlock trying to
957 * allocate
958 */
a9ec3454 959 while (ja->bucket_seq[ja->cur_idx] > min_seq &&
062afcba 960 ja->bucket_seq[ja->cur_idx] ==
a9ec3454 961 ja->bucket_seq[(ja->cur_idx + 1) % ja->nr])
a36d3685 962 ja->cur_idx = (ja->cur_idx + 1) % ja->nr;
a9ec3454 963
062afcba
KO
964 ja->sectors_free = ca->mi.bucket_size;
965
966 mutex_lock(&jlist->lock);
ce6201c4
KO
967 genradix_for_each(&c->journal_entries, iter, _r) {
968 r = *_r;
969
970 if (!r)
971 continue;
972
062afcba
KO
973 for (i = 0; i < r->nr_ptrs; i++) {
974 if (r->ptrs[i].dev == ca->dev_idx &&
975 sector_to_bucket(ca, r->ptrs[i].sector) == ja->buckets[ja->cur_idx]) {
502f973d 976 unsigned wrote = bucket_remainder(ca, r->ptrs[i].sector) +
062afcba
KO
977 vstruct_sectors(&r->j, c->block_bits);
978
979 ja->sectors_free = min(ja->sectors_free,
980 ca->mi.bucket_size - wrote);
981 }
982 }
983 }
984 mutex_unlock(&jlist->lock);
985
b0be2fcf
KO
986 if (ja->bucket_seq[ja->cur_idx] &&
987 ja->sectors_free == ca->mi.bucket_size) {
988 bch_err(c, "ja->sectors_free == ca->mi.bucket_size");
989 bch_err(c, "cur_idx %u/%u", ja->cur_idx, ja->nr);
990 for (i = 0; i < 3; i++) {
991 unsigned idx = (ja->cur_idx + ja->nr - 1 + i) % ja->nr;
992 bch_err(c, "bucket_seq[%u] = %llu", idx, ja->bucket_seq[idx]);
993 }
994 ja->sectors_free = 0;
995 }
1c6fdbd8
KO
996
997 /*
0ce2dbbe 998 * Set dirty_idx to indicate the entire journal is full and needs to be
1c6fdbd8
KO
999 * reclaimed - journal reclaim will immediately reclaim whatever isn't
1000 * pinned when it first runs:
1001 */
0ce2dbbe
KO
1002 ja->discard_idx = ja->dirty_idx_ondisk =
1003 ja->dirty_idx = (ja->cur_idx + 1) % ja->nr;
1c6fdbd8 1004out:
365f64f3 1005 bch_verbose(c, "journal read done on device %s, ret %i", ca->name, ret);
1c6fdbd8 1006 kvpfree(buf.data, buf.size);
1c6fdbd8
KO
1007 percpu_ref_put(&ca->io_ref);
1008 closure_return(cl);
1009 return;
1010err:
1011 mutex_lock(&jlist->lock);
1012 jlist->ret = ret;
1013 mutex_unlock(&jlist->lock);
1014 goto out;
1c6fdbd8
KO
1015}
1016
72b7d633
KO
1017void bch2_journal_ptrs_to_text(struct printbuf *out, struct bch_fs *c,
1018 struct journal_replay *j)
e4c3f386
KO
1019{
1020 unsigned i;
1021
1022 for (i = 0; i < j->nr_ptrs; i++) {
c0ebe3e4 1023 struct bch_dev *ca = bch_dev_bkey_exists(c, j->ptrs[i].dev);
514852c2
KO
1024 u64 offset;
1025
72b7d633 1026 div64_u64_rem(j->ptrs[i].sector, ca->mi.bucket_size, &offset);
e4c3f386
KO
1027
1028 if (i)
1029 pr_buf(out, " ");
72b7d633 1030 pr_buf(out, "%u:%u:%u (sector %llu)",
e4c3f386 1031 j->ptrs[i].dev,
72b7d633
KO
1032 j->ptrs[i].bucket,
1033 j->ptrs[i].bucket_offset,
1034 j->ptrs[i].sector);
e4c3f386
KO
1035 }
1036}
1037
ce6201c4 1038int bch2_journal_read(struct bch_fs *c, u64 *blacklist_seq, u64 *start_seq)
1c6fdbd8 1039{
1c6fdbd8 1040 struct journal_list jlist;
ce6201c4
KO
1041 struct journal_replay *i, **_i, *prev = NULL;
1042 struct genradix_iter radix_iter;
1c6fdbd8 1043 struct bch_dev *ca;
1c6fdbd8 1044 unsigned iter;
fa8e94fa 1045 struct printbuf buf = PRINTBUF;
1c6fdbd8
KO
1046 size_t keys = 0, entries = 0;
1047 bool degraded = false;
adbcada4 1048 u64 seq, last_seq = 0;
1c6fdbd8
KO
1049 int ret = 0;
1050
1051 closure_init_stack(&jlist.cl);
1052 mutex_init(&jlist.lock);
ec7ccbde 1053 jlist.last_seq = 0;
1c6fdbd8
KO
1054 jlist.ret = 0;
1055
1056 for_each_member_device(ca, c, iter) {
6bdbfa87 1057 if (!test_bit(BCH_FS_REBUILD_REPLICAS, &c->flags) &&
89fd25be 1058 !(bch2_dev_has_data(c, ca) & (1 << BCH_DATA_journal)))
1c6fdbd8
KO
1059 continue;
1060
2436cb9f
KO
1061 if ((ca->mi.state == BCH_MEMBER_STATE_rw ||
1062 ca->mi.state == BCH_MEMBER_STATE_ro) &&
1c6fdbd8
KO
1063 percpu_ref_tryget(&ca->io_ref))
1064 closure_call(&ca->journal.read,
1065 bch2_journal_read_device,
1066 system_unbound_wq,
1067 &jlist.cl);
1068 else
1069 degraded = true;
1070 }
1071
1072 closure_sync(&jlist.cl);
1073
1074 if (jlist.ret)
1075 return jlist.ret;
1076
ce6201c4 1077 *start_seq = 0;
adbcada4
KO
1078
1079 /*
1080 * Find most recent flush entry, and ignore newer non flush entries -
1081 * those entries will be blacklisted:
1082 */
ce6201c4
KO
1083 genradix_for_each_reverse(&c->journal_entries, radix_iter, _i) {
1084 i = *_i;
1085
1086 if (!i || i->ignore)
adbcada4
KO
1087 continue;
1088
ce6201c4
KO
1089 if (!*start_seq)
1090 *start_seq = le64_to_cpu(i->j.seq) + 1;
1091
adbcada4
KO
1092 if (!JSET_NO_FLUSH(&i->j)) {
1093 last_seq = le64_to_cpu(i->j.last_seq);
1094 *blacklist_seq = le64_to_cpu(i->j.seq) + 1;
1095 break;
1096 }
1097
1098 journal_replay_free(c, i);
1099 }
1100
ce6201c4
KO
1101 if (!*start_seq) {
1102 bch_info(c, "journal read done, but no entries found");
1103 return 0;
1104 }
1105
adbcada4
KO
1106 if (!last_seq) {
1107 fsck_err(c, "journal read done, but no entries found after dropping non-flushes");
fa8e94fa
KO
1108 ret = -1;
1109 goto err;
adbcada4
KO
1110 }
1111
1112 /* Drop blacklisted entries and entries older than last_seq: */
ce6201c4
KO
1113 genradix_for_each(&c->journal_entries, radix_iter, _i) {
1114 i = *_i;
1115
1116 if (!i || i->ignore)
adbcada4
KO
1117 continue;
1118
1119 seq = le64_to_cpu(i->j.seq);
1120 if (seq < last_seq) {
1121 journal_replay_free(c, i);
1122 continue;
1123 }
1124
1125 if (bch2_journal_seq_is_blacklisted(c, seq, true)) {
1126 fsck_err_on(!JSET_NO_FLUSH(&i->j), c,
1127 "found blacklisted journal entry %llu", seq);
1128
1129 journal_replay_free(c, i);
1130 }
1131 }
1132
1133 /* Check for missing entries: */
1134 seq = last_seq;
ce6201c4
KO
1135 genradix_for_each(&c->journal_entries, radix_iter, _i) {
1136 i = *_i;
1137
1138 if (!i || i->ignore)
adbcada4
KO
1139 continue;
1140
1141 BUG_ON(seq > le64_to_cpu(i->j.seq));
1142
1143 while (seq < le64_to_cpu(i->j.seq)) {
1144 u64 missing_start, missing_end;
fa8e94fa 1145 struct printbuf buf1 = PRINTBUF, buf2 = PRINTBUF;
adbcada4
KO
1146
1147 while (seq < le64_to_cpu(i->j.seq) &&
1148 bch2_journal_seq_is_blacklisted(c, seq, false))
1149 seq++;
1150
1151 if (seq == le64_to_cpu(i->j.seq))
1152 break;
1153
1154 missing_start = seq;
1155
1156 while (seq < le64_to_cpu(i->j.seq) &&
1157 !bch2_journal_seq_is_blacklisted(c, seq, false))
1158 seq++;
1159
ce6201c4
KO
1160 if (prev) {
1161 bch2_journal_ptrs_to_text(&buf1, c, prev);
1162 pr_buf(&buf1, " size %zu", vstruct_sectors(&prev->j, c->block_bits));
e4c3f386 1163 } else
fa8e94fa
KO
1164 pr_buf(&buf1, "(none)");
1165 bch2_journal_ptrs_to_text(&buf2, c, i);
e4c3f386 1166
adbcada4 1167 missing_end = seq - 1;
e4c3f386
KO
1168 fsck_err(c, "journal entries %llu-%llu missing! (replaying %llu-%llu)\n"
1169 " prev at %s\n"
1170 " next at %s",
adbcada4 1171 missing_start, missing_end,
e4c3f386 1172 last_seq, *blacklist_seq - 1,
fa8e94fa
KO
1173 buf1.buf, buf2.buf);
1174
1175 printbuf_exit(&buf1);
1176 printbuf_exit(&buf2);
adbcada4
KO
1177 }
1178
ce6201c4 1179 prev = i;
adbcada4
KO
1180 seq++;
1181 }
1182
ce6201c4 1183 genradix_for_each(&c->journal_entries, radix_iter, _i) {
1dd7f9d9
KO
1184 struct jset_entry *entry;
1185 struct bkey_i *k, *_n;
e4c3f386
KO
1186 struct bch_replicas_padded replicas = {
1187 .e.data_type = BCH_DATA_journal,
1188 .e.nr_required = 1,
1189 };
1190 unsigned ptr;
7ef2a73a 1191
ce6201c4
KO
1192 i = *_i;
1193 if (!i || i->ignore)
adbcada4
KO
1194 continue;
1195
1c6fdbd8
KO
1196 ret = jset_validate_entries(c, &i->j, READ);
1197 if (ret)
fa8e94fa 1198 goto err;
1c6fdbd8 1199
e4c3f386
KO
1200 for (ptr = 0; ptr < i->nr_ptrs; ptr++)
1201 replicas.e.devs[replicas.e.nr_devs++] = i->ptrs[ptr].dev;
1202
26452d1d
KO
1203 bch2_replicas_entry_sort(&replicas.e);
1204
1c6fdbd8
KO
1205 /*
1206 * If we're mounting in degraded mode - if we didn't read all
1207 * the devices - this is wrong:
1208 */
1209
fa8e94fa
KO
1210 printbuf_reset(&buf);
1211 bch2_replicas_entry_to_text(&buf, &replicas.e);
1212
1c6fdbd8
KO
1213 if (!degraded &&
1214 (test_bit(BCH_FS_REBUILD_REPLICAS, &c->flags) ||
988e98cf 1215 fsck_err_on(!bch2_replicas_marked(c, &replicas.e), c,
7ef2a73a 1216 "superblock not marked as containing replicas %s",
fa8e94fa 1217 buf.buf))) {
7ef2a73a 1218 ret = bch2_mark_replicas(c, &replicas.e);
1c6fdbd8 1219 if (ret)
fa8e94fa 1220 goto err;
1c6fdbd8 1221 }
1c6fdbd8
KO
1222
1223 for_each_jset_key(k, _n, entry, &i->j)
1224 keys++;
1225 entries++;
1226 }
1227
adbcada4
KO
1228 bch_info(c, "journal read done, %zu keys in %zu entries, seq %llu",
1229 keys, entries, *start_seq);
1dd7f9d9 1230
adbcada4
KO
1231 if (*start_seq != *blacklist_seq)
1232 bch_info(c, "dropped unflushed entries %llu-%llu",
1233 *blacklist_seq, *start_seq - 1);
fa8e94fa 1234err:
1c6fdbd8 1235fsck_err:
fa8e94fa 1236 printbuf_exit(&buf);
1c6fdbd8
KO
1237 return ret;
1238}
1239
1c6fdbd8
KO
1240/* journal write: */
1241
a9ec3454
KO
1242static void __journal_write_alloc(struct journal *j,
1243 struct journal_buf *w,
1244 struct dev_alloc_list *devs_sorted,
1245 unsigned sectors,
1246 unsigned *replicas,
1247 unsigned replicas_want)
1c6fdbd8
KO
1248{
1249 struct bch_fs *c = container_of(j, struct bch_fs, journal);
1c6fdbd8
KO
1250 struct journal_device *ja;
1251 struct bch_dev *ca;
a9ec3454 1252 unsigned i;
a2753581 1253
a9ec3454
KO
1254 if (*replicas >= replicas_want)
1255 return;
1c6fdbd8 1256
a9ec3454
KO
1257 for (i = 0; i < devs_sorted->nr; i++) {
1258 ca = rcu_dereference(c->devs[devs_sorted->devs[i]]);
1c6fdbd8
KO
1259 if (!ca)
1260 continue;
1261
1c6fdbd8 1262 ja = &ca->journal;
1c6fdbd8
KO
1263
1264 /*
1265 * Check that we can use this device, and aren't already using
1266 * it:
1267 */
a9ec3454 1268 if (!ca->mi.durability ||
2436cb9f 1269 ca->mi.state != BCH_MEMBER_STATE_rw ||
a9ec3454 1270 !ja->nr ||
26609b61
KO
1271 bch2_bkey_has_device(bkey_i_to_s_c(&w->key),
1272 ca->dev_idx) ||
a9ec3454 1273 sectors > ja->sectors_free)
1c6fdbd8
KO
1274 continue;
1275
3d080aa5 1276 bch2_dev_stripe_increment(ca, &j->wp.stripe);
1c6fdbd8 1277
26609b61 1278 bch2_bkey_append_ptr(&w->key,
1c6fdbd8
KO
1279 (struct bch_extent_ptr) {
1280 .offset = bucket_to_sector(ca,
a9ec3454
KO
1281 ja->buckets[ja->cur_idx]) +
1282 ca->mi.bucket_size -
1283 ja->sectors_free,
1c6fdbd8
KO
1284 .dev = ca->dev_idx,
1285 });
1286
a9ec3454
KO
1287 ja->sectors_free -= sectors;
1288 ja->bucket_seq[ja->cur_idx] = le64_to_cpu(w->data->seq);
1289
1290 *replicas += ca->mi.durability;
1291
1292 if (*replicas >= replicas_want)
1293 break;
1c6fdbd8 1294 }
a9ec3454 1295}
1c6fdbd8 1296
a9ec3454
KO
1297/**
1298 * journal_next_bucket - move on to the next journal bucket if possible
1299 */
1300static int journal_write_alloc(struct journal *j, struct journal_buf *w,
1301 unsigned sectors)
1302{
1303 struct bch_fs *c = container_of(j, struct bch_fs, journal);
d042b040 1304 struct bch_devs_mask devs;
a9ec3454
KO
1305 struct journal_device *ja;
1306 struct bch_dev *ca;
1307 struct dev_alloc_list devs_sorted;
d042b040
KO
1308 unsigned target = c->opts.metadata_target ?:
1309 c->opts.foreground_target;
a9ec3454
KO
1310 unsigned i, replicas = 0, replicas_want =
1311 READ_ONCE(c->opts.metadata_replicas);
1c6fdbd8 1312
a9ec3454 1313 rcu_read_lock();
d042b040
KO
1314retry:
1315 devs = target_rw_devs(c, BCH_DATA_journal, target);
1c6fdbd8 1316
d042b040 1317 devs_sorted = bch2_dev_alloc_list(c, &j->wp.stripe, &devs);
1c6fdbd8 1318
a9ec3454
KO
1319 __journal_write_alloc(j, w, &devs_sorted,
1320 sectors, &replicas, replicas_want);
1c6fdbd8 1321
a9ec3454
KO
1322 if (replicas >= replicas_want)
1323 goto done;
1324
1325 for (i = 0; i < devs_sorted.nr; i++) {
1326 ca = rcu_dereference(c->devs[devs_sorted.devs[i]]);
1327 if (!ca)
1328 continue;
1329
1330 ja = &ca->journal;
1331
1332 if (sectors > ja->sectors_free &&
1333 sectors <= ca->mi.bucket_size &&
03d5eaed
KO
1334 bch2_journal_dev_buckets_available(j, ja,
1335 journal_space_discarded)) {
a9ec3454
KO
1336 ja->cur_idx = (ja->cur_idx + 1) % ja->nr;
1337 ja->sectors_free = ca->mi.bucket_size;
68ef94a6
KO
1338
1339 /*
1340 * ja->bucket_seq[ja->cur_idx] must always have
1341 * something sensible:
1342 */
1343 ja->bucket_seq[ja->cur_idx] = le64_to_cpu(w->data->seq);
a9ec3454
KO
1344 }
1345 }
1346
1347 __journal_write_alloc(j, w, &devs_sorted,
1348 sectors, &replicas, replicas_want);
d042b040
KO
1349
1350 if (replicas < replicas_want && target) {
1351 /* Retry from all devices: */
1352 target = 0;
1353 goto retry;
1354 }
a9ec3454 1355done:
a9ec3454
KO
1356 rcu_read_unlock();
1357
07a1006a
KO
1358 BUG_ON(bkey_val_u64s(&w->key.k) > BCH_REPLICAS_MAX);
1359
57cb2142 1360 return replicas >= c->opts.metadata_replicas_required ? 0 : -EROFS;
1c6fdbd8
KO
1361}
1362
1c6fdbd8
KO
1363static void journal_buf_realloc(struct journal *j, struct journal_buf *buf)
1364{
1365 /* we aren't holding j->lock: */
1366 unsigned new_size = READ_ONCE(j->buf_size_want);
1367 void *new_buf;
1368
d16b4a77 1369 if (buf->buf_size >= new_size)
1c6fdbd8
KO
1370 return;
1371
1372 new_buf = kvpmalloc(new_size, GFP_NOIO|__GFP_NOWARN);
1373 if (!new_buf)
1374 return;
1375
d16b4a77 1376 memcpy(new_buf, buf->data, buf->buf_size);
c859430b
KO
1377
1378 spin_lock(&j->lock);
1379 swap(buf->data, new_buf);
1380 swap(buf->buf_size, new_size);
1381 spin_unlock(&j->lock);
1382
1383 kvpfree(new_buf, new_size);
1c6fdbd8
KO
1384}
1385
ebb84d09
KO
1386static inline struct journal_buf *journal_last_unwritten_buf(struct journal *j)
1387{
30ef633a 1388 return j->buf + (journal_last_unwritten_seq(j) & JOURNAL_BUF_MASK);
ebb84d09
KO
1389}
1390
1c6fdbd8
KO
1391static void journal_write_done(struct closure *cl)
1392{
1393 struct journal *j = container_of(cl, struct journal, io);
1394 struct bch_fs *c = container_of(j, struct bch_fs, journal);
ebb84d09 1395 struct journal_buf *w = journal_last_unwritten_buf(j);
7ef2a73a 1396 struct bch_replicas_padded replicas;
ebb84d09 1397 union journal_res_state old, new;
1784d43a 1398 u64 v, seq;
158eecb8 1399 int err = 0;
1c6fdbd8 1400
991ba021
KO
1401 bch2_time_stats_update(!JSET_NO_FLUSH(w->data)
1402 ? j->flush_write_time
1403 : j->noflush_write_time, j->write_start_time);
9c859dc9 1404
d797ca3d 1405 if (!w->devs_written.nr) {
1c6fdbd8 1406 bch_err(c, "unable to write journal to sufficient devices");
158eecb8
KO
1407 err = -EIO;
1408 } else {
d797ca3d
KO
1409 bch2_devlist_to_replicas(&replicas.e, BCH_DATA_journal,
1410 w->devs_written);
158eecb8
KO
1411 if (bch2_mark_replicas(c, &replicas.e))
1412 err = -EIO;
1c6fdbd8
KO
1413 }
1414
158eecb8
KO
1415 if (err)
1416 bch2_fatal_error(c);
1c6fdbd8
KO
1417
1418 spin_lock(&j->lock);
ed9d58a2 1419 seq = le64_to_cpu(w->data->seq);
ed9d58a2 1420
1c6fdbd8 1421 if (seq >= j->pin.front)
d797ca3d 1422 journal_seq_pin(j, seq)->devs = w->devs_written;
1c6fdbd8 1423
9be1efe9 1424 if (!err) {
9be1efe9
KO
1425 if (!JSET_NO_FLUSH(w->data)) {
1426 j->flushed_seq_ondisk = seq;
1427 j->last_seq_ondisk = w->last_seq;
f25d8215 1428
59cc38b8 1429 bch2_do_discards(c);
f25d8215
KO
1430 closure_wake_up(&c->freelist_wait);
1431
1432 bch2_reset_alloc_cursors(c);
9be1efe9
KO
1433 }
1434 } else if (!j->err_seq || seq < j->err_seq)
1435 j->err_seq = seq;
0ce2dbbe 1436
f0a3a2cc
KO
1437 j->seq_ondisk = seq;
1438
1c6fdbd8
KO
1439 /*
1440 * Updating last_seq_ondisk may let bch2_journal_reclaim_work() discard
1441 * more buckets:
1442 *
1443 * Must come before signaling write completion, for
1444 * bch2_fs_journal_stop():
1445 */
b7a9bbfc 1446 journal_reclaim_kick(&c->journal);
158eecb8 1447
1c6fdbd8
KO
1448 /* also must come before signalling write completion: */
1449 closure_debug_destroy(cl);
1450
ebb84d09
KO
1451 v = atomic64_read(&j->reservations.counter);
1452 do {
1453 old.v = new.v = v;
24a3d53b 1454 BUG_ON(journal_state_count(new, new.unwritten_idx));
ebb84d09
KO
1455
1456 new.unwritten_idx++;
1457 } while ((v = atomic64_cmpxchg(&j->reservations.counter,
1458 old.v, new.v)) != old.v);
1c6fdbd8 1459
5d32c5bb
KO
1460 bch2_journal_space_available(j);
1461
1c6fdbd8
KO
1462 closure_wake_up(&w->wait);
1463 journal_wake(j);
1464
24a3d53b
KO
1465 if (!journal_state_count(new, new.unwritten_idx) &&
1466 journal_last_unwritten_seq(j) <= journal_cur_seq(j)) {
1467 closure_call(&j->io, bch2_journal_write, c->io_complete_wq, NULL);
1468 } else if (journal_last_unwritten_seq(j) == journal_cur_seq(j) &&
1469 new.cur_entry_offset < JOURNAL_ENTRY_CLOSED_VAL) {
fbec3b88
KO
1470 struct journal_buf *buf = journal_cur_buf(j);
1471 long delta = buf->expires - jiffies;
ebb84d09 1472
24a3d53b
KO
1473 /*
1474 * We don't close a journal entry to write it while there's
1475 * previous entries still in flight - the current journal entry
1476 * might want to be written now:
1477 */
1478
fbec3b88 1479 mod_delayed_work(c->io_complete_wq, &j->write_work, max(0L, delta));
24a3d53b 1480 }
fbec3b88
KO
1481
1482 spin_unlock(&j->lock);
1c6fdbd8
KO
1483}
1484
1485static void journal_write_endio(struct bio *bio)
1486{
1487 struct bch_dev *ca = bio->bi_private;
1488 struct journal *j = &ca->fs->journal;
d797ca3d
KO
1489 struct journal_buf *w = journal_last_unwritten_buf(j);
1490 unsigned long flags;
1c6fdbd8 1491
d797ca3d
KO
1492 if (bch2_dev_io_err_on(bio->bi_status, ca, "error writing journal entry %llu: %s",
1493 le64_to_cpu(w->data->seq),
63b214e7 1494 bch2_blk_status_to_str(bio->bi_status)) ||
1c6fdbd8 1495 bch2_meta_write_fault("journal")) {
1c6fdbd8 1496 spin_lock_irqsave(&j->err_lock, flags);
d797ca3d 1497 bch2_dev_list_drop_dev(&w->devs_written, ca->dev_idx);
1c6fdbd8
KO
1498 spin_unlock_irqrestore(&j->err_lock, flags);
1499 }
1500
1501 closure_put(&j->io);
1502 percpu_ref_put(&ca->io_ref);
1503}
1504
280249b9
KO
1505static void do_journal_write(struct closure *cl)
1506{
1507 struct journal *j = container_of(cl, struct journal, io);
1508 struct bch_fs *c = container_of(j, struct bch_fs, journal);
1509 struct bch_dev *ca;
1510 struct journal_buf *w = journal_last_unwritten_buf(j);
1511 struct bch_extent_ptr *ptr;
1512 struct bio *bio;
1513 unsigned sectors = vstruct_sectors(w->data, c->block_bits);
1514
1515 extent_for_each_ptr(bkey_i_to_s_extent(&w->key), ptr) {
1516 ca = bch_dev_bkey_exists(c, ptr->dev);
1517 if (!percpu_ref_tryget(&ca->io_ref)) {
1518 /* XXX: fix this */
1519 bch_err(c, "missing device for journal write\n");
1520 continue;
1521 }
1522
1523 this_cpu_add(ca->io_done->sectors[WRITE][BCH_DATA_journal],
1524 sectors);
1525
1526 bio = ca->journal.bio;
1527 bio_reset(bio, ca->disk_sb.bdev, REQ_OP_WRITE|REQ_SYNC|REQ_META);
1528 bio->bi_iter.bi_sector = ptr->offset;
1529 bio->bi_end_io = journal_write_endio;
1530 bio->bi_private = ca;
1531
a28bd48a
KO
1532 BUG_ON(bio->bi_iter.bi_sector == ca->prev_journal_sector);
1533 ca->prev_journal_sector = bio->bi_iter.bi_sector;
1534
280249b9
KO
1535 if (!JSET_NO_FLUSH(w->data))
1536 bio->bi_opf |= REQ_FUA;
1537 if (!JSET_NO_FLUSH(w->data) && !w->separate_flush)
1538 bio->bi_opf |= REQ_PREFLUSH;
1539
1540 bch2_bio_map(bio, w->data, sectors << 9);
1541
1542 trace_journal_write(bio);
1543 closure_bio_submit(bio, cl);
1544
1545 ca->journal.bucket_seq[ca->journal.cur_idx] =
1546 le64_to_cpu(w->data->seq);
1547 }
1548
731bdd2e 1549 continue_at(cl, journal_write_done, c->io_complete_wq);
280249b9
KO
1550 return;
1551}
1552
1c6fdbd8
KO
1553void bch2_journal_write(struct closure *cl)
1554{
1555 struct journal *j = container_of(cl, struct journal, io);
1556 struct bch_fs *c = container_of(j, struct bch_fs, journal);
1557 struct bch_dev *ca;
ebb84d09 1558 struct journal_buf *w = journal_last_unwritten_buf(j);
3ccc5c50 1559 struct jset_entry *start, *end;
1c6fdbd8
KO
1560 struct jset *jset;
1561 struct bio *bio;
fa8e94fa 1562 struct printbuf journal_debug_buf = PRINTBUF;
26609b61 1563 bool validate_before_checksum = false;
280249b9 1564 unsigned i, sectors, bytes, u64s, nr_rw_members = 0;
e5a66496
KO
1565 int ret;
1566
b7a9bbfc
KO
1567 BUG_ON(BCH_SB_CLEAN(c->disk_sb.sb));
1568
1c6fdbd8
KO
1569 journal_buf_realloc(j, w);
1570 jset = w->data;
1571
1572 j->write_start_time = local_clock();
1c6fdbd8 1573
adbcada4 1574 spin_lock(&j->lock);
e0c014e7
KO
1575 if (bch2_journal_error(j) ||
1576 w->noflush ||
1577 (!w->must_flush &&
1578 (jiffies - j->last_flush_write) < msecs_to_jiffies(c->opts.journal_flush_delay) &&
1579 test_bit(JOURNAL_MAY_SKIP_FLUSH, &j->flags))) {
adbcada4
KO
1580 w->noflush = true;
1581 SET_JSET_NO_FLUSH(jset, true);
c0ebe3e4
KO
1582 jset->last_seq = 0;
1583 w->last_seq = 0;
adbcada4
KO
1584
1585 j->nr_noflush_writes++;
1586 } else {
1587 j->last_flush_write = jiffies;
1588 j->nr_flush_writes++;
1589 }
1590 spin_unlock(&j->lock);
1591
00b8ccf7
KO
1592 /*
1593 * New btree roots are set by journalling them; when the journal entry
1594 * gets written we have to propagate them to c->btree_roots
1595 *
1596 * But, every journal entry we write has to contain all the btree roots
1597 * (at least for now); so after we copy btree roots to c->btree_roots we
1598 * have to get any missing btree roots and add them to this journal
1599 * entry:
1600 */
1601
1602 bch2_journal_entries_to_btree_roots(c, jset);
1603
1604 start = end = vstruct_last(jset);
1605
1606 end = bch2_btree_roots_to_journal_entries(c, jset->start, end);
1607
2abe5420
KO
1608 bch2_journal_super_entries_add_common(c, &end,
1609 le64_to_cpu(jset->seq));
3ccc5c50
KO
1610 u64s = (u64 *) end - (u64 *) start;
1611 BUG_ON(u64s > j->entry_u64s_reserved);
1612
d16b4a77
KO
1613 le32_add_cpu(&jset->u64s, u64s);
1614 BUG_ON(vstruct_sectors(jset, c->block_bits) > w->sectors);
1c6fdbd8 1615
1c6fdbd8 1616 jset->magic = cpu_to_le64(jset_magic(c));
74b33393 1617 jset->version = c->sb.version < bcachefs_metadata_version_bkey_renumber
26609b61
KO
1618 ? cpu_to_le32(BCH_JSET_VERSION_OLD)
1619 : cpu_to_le32(c->sb.version);
1c6fdbd8
KO
1620
1621 SET_JSET_BIG_ENDIAN(jset, CPU_BIG_ENDIAN);
1622 SET_JSET_CSUM_TYPE(jset, bch2_meta_checksum_type(c));
1623
4141fde0 1624 if (!JSET_NO_FLUSH(jset) && journal_entry_empty(jset))
158eecb8
KO
1625 j->last_empty_seq = le64_to_cpu(jset->seq);
1626
26609b61
KO
1627 if (bch2_csum_type_is_encryption(JSET_CSUM_TYPE(jset)))
1628 validate_before_checksum = true;
1629
e751c01a 1630 if (le32_to_cpu(jset->version) < bcachefs_metadata_version_current)
26609b61
KO
1631 validate_before_checksum = true;
1632
1633 if (validate_before_checksum &&
ed9d58a2 1634 jset_validate_for_write(c, jset))
1c6fdbd8
KO
1635 goto err;
1636
a9de137b 1637 ret = bch2_encrypt(c, JSET_CSUM_TYPE(jset), journal_nonce(jset),
1c6fdbd8
KO
1638 jset->encrypted_start,
1639 vstruct_end(jset) - (void *) jset->encrypted_start);
a9de137b
KO
1640 if (bch2_fs_fatal_err_on(ret, c,
1641 "error decrypting journal entry: %i", ret))
1642 goto err;
1c6fdbd8
KO
1643
1644 jset->csum = csum_vstruct(c, JSET_CSUM_TYPE(jset),
1645 journal_nonce(jset), jset);
1646
26609b61 1647 if (!validate_before_checksum &&
ed9d58a2 1648 jset_validate_for_write(c, jset))
1c6fdbd8
KO
1649 goto err;
1650
1651 sectors = vstruct_sectors(jset, c->block_bits);
d16b4a77 1652 BUG_ON(sectors > w->sectors);
1c6fdbd8 1653
d16b4a77
KO
1654 bytes = vstruct_bytes(jset);
1655 memset((void *) jset + bytes, 0, (sectors << 9) - bytes);
1c6fdbd8 1656
c18dade6 1657retry_alloc:
e5a66496
KO
1658 spin_lock(&j->lock);
1659 ret = journal_write_alloc(j, w, sectors);
1660
c18dade6
KO
1661 if (ret && j->can_discard) {
1662 spin_unlock(&j->lock);
1663 bch2_journal_do_discards(j);
1664 goto retry_alloc;
1665 }
1666
fa8e94fa
KO
1667 if (ret)
1668 __bch2_journal_debug_to_text(&journal_debug_buf, j);
85674154 1669
e5a66496
KO
1670 /*
1671 * write is allocated, no longer need to account for it in
1672 * bch2_journal_space_available():
1673 */
1674 w->sectors = 0;
1675
1676 /*
1677 * journal entry has been compacted and allocated, recalculate space
1678 * available:
1679 */
1680 bch2_journal_space_available(j);
1681 spin_unlock(&j->lock);
1682
1683 if (ret) {
85674154 1684 bch_err(c, "Unable to allocate journal write:\n%s",
fa8e94fa
KO
1685 journal_debug_buf.buf);
1686 printbuf_exit(&journal_debug_buf);
1c6fdbd8 1687 bch2_fatal_error(c);
731bdd2e 1688 continue_at(cl, journal_write_done, c->io_complete_wq);
1c6fdbd8
KO
1689 return;
1690 }
1691
d797ca3d
KO
1692 w->devs_written = bch2_bkey_devs(bkey_i_to_s_c(&w->key));
1693
b66b2bc0 1694 if (c->opts.nochanges)
1c6fdbd8
KO
1695 goto no_io;
1696
280249b9
KO
1697 for_each_rw_member(ca, c, i)
1698 nr_rw_members++;
1c6fdbd8 1699
280249b9
KO
1700 if (nr_rw_members > 1)
1701 w->separate_flush = true;
1c6fdbd8 1702
280249b9
KO
1703 if (!JSET_NO_FLUSH(jset) && w->separate_flush) {
1704 for_each_rw_member(ca, c, i) {
1705 percpu_ref_get(&ca->io_ref);
1c6fdbd8 1706
280249b9
KO
1707 bio = ca->journal.bio;
1708 bio_reset(bio, ca->disk_sb.bdev, REQ_OP_FLUSH);
1709 bio->bi_end_io = journal_write_endio;
1710 bio->bi_private = ca;
1711 closure_bio_submit(bio, cl);
1712 }
1c6fdbd8
KO
1713 }
1714
731bdd2e 1715 continue_at(cl, do_journal_write, c->io_complete_wq);
280249b9 1716 return;
1c6fdbd8 1717no_io:
731bdd2e 1718 continue_at(cl, journal_write_done, c->io_complete_wq);
1c6fdbd8
KO
1719 return;
1720err:
b74b147d 1721 bch2_fatal_error(c);
731bdd2e 1722 continue_at(cl, journal_write_done, c->io_complete_wq);
1c6fdbd8 1723}