bcachefs: Use x-macros for more enums
[linux-block.git] / fs / bcachefs / journal_io.c
CommitLineData
1c6fdbd8
KO
1// SPDX-License-Identifier: GPL-2.0
2#include "bcachefs.h"
7b3f84ea 3#include "alloc_foreground.h"
39fb2983 4#include "btree_io.h"
00b8ccf7 5#include "btree_update_interior.h"
1c6fdbd8
KO
6#include "buckets.h"
7#include "checksum.h"
d042b040 8#include "disk_groups.h"
1c6fdbd8 9#include "error.h"
63b214e7 10#include "io.h"
1c6fdbd8
KO
11#include "journal.h"
12#include "journal_io.h"
13#include "journal_reclaim.h"
adbcada4 14#include "journal_seq_blacklist.h"
1c6fdbd8
KO
15#include "replicas.h"
16#include "trace.h"
17
adbcada4
KO
18static void __journal_replay_free(struct journal_replay *i)
19{
20 list_del(&i->list);
21 kvpfree(i, offsetof(struct journal_replay, j) +
22 vstruct_bytes(&i->j));
23
24}
25
26static void journal_replay_free(struct bch_fs *c, struct journal_replay *i)
27{
28 i->ignore = true;
29
30 if (!c->opts.read_entire_journal)
31 __journal_replay_free(i);
32}
33
1c6fdbd8
KO
34struct journal_list {
35 struct closure cl;
36 struct mutex lock;
37 struct list_head *head;
38 int ret;
39};
40
41#define JOURNAL_ENTRY_ADD_OK 0
42#define JOURNAL_ENTRY_ADD_OUT_OF_RANGE 5
43
44/*
45 * Given a journal entry we just read, add it to the list of journal entries to
46 * be replayed:
47 */
48static int journal_entry_add(struct bch_fs *c, struct bch_dev *ca,
e4c3f386 49 struct bch_extent_ptr entry_ptr,
ca73852a
KO
50 struct journal_list *jlist, struct jset *j,
51 bool bad)
1c6fdbd8 52{
e4c3f386
KO
53 struct journal_replay *i, *pos, *dup = NULL;
54 struct bch_extent_ptr *ptr;
1c6fdbd8
KO
55 struct list_head *where;
56 size_t bytes = vstruct_bytes(j);
adbcada4 57 u64 last_seq = 0;
e4c3f386 58 int ret = JOURNAL_ENTRY_ADD_OK;
1c6fdbd8 59
adbcada4
KO
60 list_for_each_entry_reverse(i, jlist->head, list) {
61 if (!JSET_NO_FLUSH(&i->j)) {
62 last_seq = le64_to_cpu(i->j.last_seq);
63 break;
7fffc85b 64 }
adbcada4 65 }
1c6fdbd8 66
adbcada4
KO
67 /* Is this entry older than the range we need? */
68 if (!c->opts.read_entire_journal &&
69 le64_to_cpu(j->seq) < last_seq) {
70 ret = JOURNAL_ENTRY_ADD_OUT_OF_RANGE;
71 goto out;
72 }
73
74 /* Drop entries we don't need anymore */
75 if (!JSET_NO_FLUSH(j)) {
7fffc85b
KO
76 list_for_each_entry_safe(i, pos, jlist->head, list) {
77 if (le64_to_cpu(i->j.seq) >= le64_to_cpu(j->last_seq))
78 break;
adbcada4 79 journal_replay_free(c, i);
7fffc85b 80 }
1c6fdbd8
KO
81 }
82
83 list_for_each_entry_reverse(i, jlist->head, list) {
ca73852a
KO
84 if (le64_to_cpu(j->seq) > le64_to_cpu(i->j.seq)) {
85 where = &i->list;
86 goto add;
87 }
88 }
89
90 where = jlist->head;
91add:
e4c3f386 92 dup = where->next != jlist->head
ca73852a
KO
93 ? container_of(where->next, struct journal_replay, list)
94 : NULL;
95
e4c3f386
KO
96 if (dup && le64_to_cpu(j->seq) != le64_to_cpu(dup->j.seq))
97 dup = NULL;
98
ca73852a
KO
99 /*
100 * Duplicate journal entries? If so we want the one that didn't have a
101 * checksum error:
102 */
e4c3f386
KO
103 if (dup) {
104 if (dup->bad) {
105 /* we'll replace @dup: */
ca73852a 106 } else if (bad) {
e4c3f386 107 i = dup;
ca73852a
KO
108 goto found;
109 } else {
e4c3f386
KO
110 fsck_err_on(bytes != vstruct_bytes(&dup->j) ||
111 memcmp(j, &dup->j, bytes), c,
1c6fdbd8
KO
112 "found duplicate but non identical journal entries (seq %llu)",
113 le64_to_cpu(j->seq));
e4c3f386 114 i = dup;
1c6fdbd8
KO
115 goto found;
116 }
1c6fdbd8
KO
117 }
118
1c6fdbd8
KO
119 i = kvpmalloc(offsetof(struct journal_replay, j) + bytes, GFP_KERNEL);
120 if (!i) {
121 ret = -ENOMEM;
122 goto out;
123 }
124
e4c3f386
KO
125 i->nr_ptrs = 0;
126 i->bad = bad;
127 i->ignore = false;
1c6fdbd8 128 unsafe_memcpy(&i->j, j, bytes, "embedded variable length struct");
e4c3f386
KO
129
130 if (dup) {
131 i->nr_ptrs = dup->nr_ptrs;
132 memcpy(i->ptrs, dup->ptrs, sizeof(dup->ptrs));
133 __journal_replay_free(dup);
134 }
135
136 list_add(&i->list, where);
1c6fdbd8 137found:
e4c3f386
KO
138 for (ptr = i->ptrs; ptr < i->ptrs + i->nr_ptrs; ptr++) {
139 if (ptr->dev == ca->dev_idx) {
140 bch_err(c, "duplicate journal entry %llu on same device",
141 le64_to_cpu(i->j.seq));
142 goto out;
143 }
144 }
145
146 if (i->nr_ptrs >= ARRAY_SIZE(i->ptrs)) {
147 bch_err(c, "found too many copies of journal entry %llu",
148 le64_to_cpu(i->j.seq));
149 goto out;
150 }
151
152 i->ptrs[i->nr_ptrs++] = entry_ptr;
1c6fdbd8
KO
153out:
154fsck_err:
155 return ret;
156}
157
158static struct nonce journal_nonce(const struct jset *jset)
159{
160 return (struct nonce) {{
161 [0] = 0,
162 [1] = ((__le32 *) &jset->seq)[0],
163 [2] = ((__le32 *) &jset->seq)[1],
164 [3] = BCH_NONCE_JOURNAL,
165 }};
166}
167
168/* this fills in a range with empty jset_entries: */
169static void journal_entry_null_range(void *start, void *end)
170{
171 struct jset_entry *entry;
172
173 for (entry = start; entry != end; entry = vstruct_next(entry))
174 memset(entry, 0, sizeof(*entry));
175}
176
177#define JOURNAL_ENTRY_REREAD 5
178#define JOURNAL_ENTRY_NONE 6
179#define JOURNAL_ENTRY_BAD 7
180
181#define journal_entry_err(c, msg, ...) \
182({ \
183 switch (write) { \
184 case READ: \
185 mustfix_fsck_err(c, msg, ##__VA_ARGS__); \
186 break; \
187 case WRITE: \
188 bch_err(c, "corrupt metadata before write:\n" \
189 msg, ##__VA_ARGS__); \
190 if (bch2_fs_inconsistent(c)) { \
191 ret = BCH_FSCK_ERRORS_NOT_FIXED; \
192 goto fsck_err; \
193 } \
194 break; \
195 } \
196 true; \
197})
198
199#define journal_entry_err_on(cond, c, msg, ...) \
200 ((cond) ? journal_entry_err(c, msg, ##__VA_ARGS__) : false)
201
4d54337c
KO
202#define FSCK_DELETED_KEY 5
203
1c6fdbd8
KO
204static int journal_validate_key(struct bch_fs *c, struct jset *jset,
205 struct jset_entry *entry,
39fb2983
KO
206 unsigned level, enum btree_id btree_id,
207 struct bkey_i *k,
1c6fdbd8
KO
208 const char *type, int write)
209{
210 void *next = vstruct_next(entry);
211 const char *invalid;
26609b61 212 unsigned version = le32_to_cpu(jset->version);
1c6fdbd8
KO
213 int ret = 0;
214
215 if (journal_entry_err_on(!k->k.u64s, c,
4d54337c 216 "invalid %s in jset %llu offset %zi/%u entry offset %zi/%u: k->u64s 0",
ed0d631f 217 type, le64_to_cpu(jset->seq),
4d54337c
KO
218 (u64 *) entry - jset->_data,
219 le32_to_cpu(jset->u64s),
220 (u64 *) k - entry->_data,
221 le16_to_cpu(entry->u64s))) {
1c6fdbd8
KO
222 entry->u64s = cpu_to_le16((u64 *) k - entry->_data);
223 journal_entry_null_range(vstruct_next(entry), next);
4d54337c 224 return FSCK_DELETED_KEY;
1c6fdbd8
KO
225 }
226
227 if (journal_entry_err_on((void *) bkey_next(k) >
228 (void *) vstruct_next(entry), c,
4d54337c 229 "invalid %s in jset %llu offset %zi/%u entry offset %zi/%u: extends past end of journal entry",
ed0d631f 230 type, le64_to_cpu(jset->seq),
4d54337c
KO
231 (u64 *) entry - jset->_data,
232 le32_to_cpu(jset->u64s),
233 (u64 *) k - entry->_data,
234 le16_to_cpu(entry->u64s))) {
1c6fdbd8
KO
235 entry->u64s = cpu_to_le16((u64 *) k - entry->_data);
236 journal_entry_null_range(vstruct_next(entry), next);
4d54337c 237 return FSCK_DELETED_KEY;
1c6fdbd8
KO
238 }
239
240 if (journal_entry_err_on(k->k.format != KEY_FORMAT_CURRENT, c,
4d54337c 241 "invalid %s in jset %llu offset %zi/%u entry offset %zi/%u: bad format %u",
ed0d631f
KO
242 type, le64_to_cpu(jset->seq),
243 (u64 *) entry - jset->_data,
4d54337c
KO
244 le32_to_cpu(jset->u64s),
245 (u64 *) k - entry->_data,
246 le16_to_cpu(entry->u64s),
ed0d631f 247 k->k.format)) {
4d54337c 248 le16_add_cpu(&entry->u64s, -((u16) k->k.u64s));
1c6fdbd8
KO
249 memmove(k, bkey_next(k), next - (void *) bkey_next(k));
250 journal_entry_null_range(vstruct_next(entry), next);
4d54337c 251 return FSCK_DELETED_KEY;
1c6fdbd8
KO
252 }
253
39fb2983
KO
254 if (!write)
255 bch2_bkey_compat(level, btree_id, version,
256 JSET_BIG_ENDIAN(jset), write,
257 NULL, bkey_to_packed(k));
26609b61 258
39fb2983
KO
259 invalid = bch2_bkey_invalid(c, bkey_i_to_s_c(k),
260 __btree_node_type(level, btree_id));
1c6fdbd8 261 if (invalid) {
319f9ac3
KO
262 char buf[160];
263
26609b61 264 bch2_bkey_val_to_text(&PBUF(buf), c, bkey_i_to_s_c(k));
4d54337c 265 mustfix_fsck_err(c, "invalid %s in jset %llu offset %zi/%u entry offset %zi/%u: %s\n%s",
ed0d631f
KO
266 type, le64_to_cpu(jset->seq),
267 (u64 *) entry - jset->_data,
4d54337c
KO
268 le32_to_cpu(jset->u64s),
269 (u64 *) k - entry->_data,
270 le16_to_cpu(entry->u64s),
ed0d631f 271 invalid, buf);
1c6fdbd8 272
4d54337c 273 le16_add_cpu(&entry->u64s, -((u16) k->k.u64s));
1c6fdbd8
KO
274 memmove(k, bkey_next(k), next - (void *) bkey_next(k));
275 journal_entry_null_range(vstruct_next(entry), next);
4d54337c 276 return FSCK_DELETED_KEY;
1c6fdbd8 277 }
26609b61 278
39fb2983
KO
279 if (write)
280 bch2_bkey_compat(level, btree_id, version,
281 JSET_BIG_ENDIAN(jset), write,
282 NULL, bkey_to_packed(k));
1c6fdbd8
KO
283fsck_err:
284 return ret;
285}
286
287static int journal_entry_validate_btree_keys(struct bch_fs *c,
288 struct jset *jset,
289 struct jset_entry *entry,
290 int write)
291{
4d54337c 292 struct bkey_i *k = entry->start;
1c6fdbd8 293
4d54337c 294 while (k != vstruct_last(entry)) {
39fb2983
KO
295 int ret = journal_validate_key(c, jset, entry,
296 entry->level,
297 entry->btree_id,
298 k, "key", write);
4d54337c
KO
299 if (ret == FSCK_DELETED_KEY)
300 continue;
301
302 k = bkey_next(k);
1c6fdbd8
KO
303 }
304
305 return 0;
306}
307
308static int journal_entry_validate_btree_root(struct bch_fs *c,
309 struct jset *jset,
310 struct jset_entry *entry,
311 int write)
312{
313 struct bkey_i *k = entry->start;
314 int ret = 0;
315
316 if (journal_entry_err_on(!entry->u64s ||
317 le16_to_cpu(entry->u64s) != k->k.u64s, c,
318 "invalid btree root journal entry: wrong number of keys")) {
319 void *next = vstruct_next(entry);
320 /*
321 * we don't want to null out this jset_entry,
322 * just the contents, so that later we can tell
323 * we were _supposed_ to have a btree root
324 */
325 entry->u64s = 0;
326 journal_entry_null_range(vstruct_next(entry), next);
327 return 0;
328 }
329
39fb2983 330 return journal_validate_key(c, jset, entry, 1, entry->btree_id, k,
1c6fdbd8
KO
331 "btree root", write);
332fsck_err:
333 return ret;
334}
335
336static int journal_entry_validate_prio_ptrs(struct bch_fs *c,
337 struct jset *jset,
338 struct jset_entry *entry,
339 int write)
340{
341 /* obsolete, don't care: */
342 return 0;
343}
344
345static int journal_entry_validate_blacklist(struct bch_fs *c,
346 struct jset *jset,
347 struct jset_entry *entry,
348 int write)
349{
350 int ret = 0;
351
352 if (journal_entry_err_on(le16_to_cpu(entry->u64s) != 1, c,
353 "invalid journal seq blacklist entry: bad size")) {
354 journal_entry_null_range(entry, vstruct_next(entry));
355 }
356fsck_err:
357 return ret;
358}
359
360static int journal_entry_validate_blacklist_v2(struct bch_fs *c,
361 struct jset *jset,
362 struct jset_entry *entry,
363 int write)
364{
365 struct jset_entry_blacklist_v2 *bl_entry;
366 int ret = 0;
367
368 if (journal_entry_err_on(le16_to_cpu(entry->u64s) != 2, c,
369 "invalid journal seq blacklist entry: bad size")) {
370 journal_entry_null_range(entry, vstruct_next(entry));
2c5af169 371 goto out;
1c6fdbd8
KO
372 }
373
374 bl_entry = container_of(entry, struct jset_entry_blacklist_v2, entry);
375
376 if (journal_entry_err_on(le64_to_cpu(bl_entry->start) >
377 le64_to_cpu(bl_entry->end), c,
378 "invalid journal seq blacklist entry: start > end")) {
379 journal_entry_null_range(entry, vstruct_next(entry));
380 }
2c5af169
KO
381out:
382fsck_err:
383 return ret;
384}
385
386static int journal_entry_validate_usage(struct bch_fs *c,
387 struct jset *jset,
388 struct jset_entry *entry,
389 int write)
390{
391 struct jset_entry_usage *u =
392 container_of(entry, struct jset_entry_usage, entry);
393 unsigned bytes = jset_u64s(le16_to_cpu(entry->u64s)) * sizeof(u64);
394 int ret = 0;
395
3577df5f
KO
396 if (journal_entry_err_on(bytes < sizeof(*u),
397 c,
398 "invalid journal entry usage: bad size")) {
399 journal_entry_null_range(entry, vstruct_next(entry));
400 return ret;
401 }
402
403fsck_err:
404 return ret;
405}
406
407static int journal_entry_validate_data_usage(struct bch_fs *c,
408 struct jset *jset,
409 struct jset_entry *entry,
410 int write)
411{
412 struct jset_entry_data_usage *u =
413 container_of(entry, struct jset_entry_data_usage, entry);
414 unsigned bytes = jset_u64s(le16_to_cpu(entry->u64s)) * sizeof(u64);
415 int ret = 0;
416
2c5af169
KO
417 if (journal_entry_err_on(bytes < sizeof(*u) ||
418 bytes < sizeof(*u) + u->r.nr_devs,
419 c,
420 "invalid journal entry usage: bad size")) {
421 journal_entry_null_range(entry, vstruct_next(entry));
422 return ret;
423 }
1c6fdbd8
KO
424
425fsck_err:
426 return ret;
427}
428
2abe5420
KO
429static int journal_entry_validate_clock(struct bch_fs *c,
430 struct jset *jset,
431 struct jset_entry *entry,
432 int write)
433{
434 struct jset_entry_clock *clock =
435 container_of(entry, struct jset_entry_clock, entry);
436 unsigned bytes = jset_u64s(le16_to_cpu(entry->u64s)) * sizeof(u64);
437 int ret = 0;
438
439 if (journal_entry_err_on(bytes != sizeof(*clock),
440 c, "invalid journal entry clock: bad size")) {
441 journal_entry_null_range(entry, vstruct_next(entry));
442 return ret;
443 }
444
445 if (journal_entry_err_on(clock->rw > 1,
446 c, "invalid journal entry clock: bad rw")) {
447 journal_entry_null_range(entry, vstruct_next(entry));
448 return ret;
449 }
450
451fsck_err:
452 return ret;
453}
454
180fb49d
KO
455static int journal_entry_validate_dev_usage(struct bch_fs *c,
456 struct jset *jset,
457 struct jset_entry *entry,
458 int write)
459{
460 struct jset_entry_dev_usage *u =
461 container_of(entry, struct jset_entry_dev_usage, entry);
462 unsigned bytes = jset_u64s(le16_to_cpu(entry->u64s)) * sizeof(u64);
463 unsigned expected = sizeof(*u) + sizeof(u->d[0]) * 7; /* Current value of BCH_DATA_NR */
464 unsigned dev;
465 int ret = 0;
466
467 if (journal_entry_err_on(bytes < expected,
468 c, "invalid journal entry dev usage: bad size (%u < %u)",
469 bytes, expected)) {
470 journal_entry_null_range(entry, vstruct_next(entry));
471 return ret;
472 }
473
474 dev = le32_to_cpu(u->dev);
475
476 if (journal_entry_err_on(!bch2_dev_exists2(c, dev),
477 c, "invalid journal entry dev usage: bad dev")) {
478 journal_entry_null_range(entry, vstruct_next(entry));
479 return ret;
480 }
481
482 if (journal_entry_err_on(u->pad,
483 c, "invalid journal entry dev usage: bad pad")) {
484 journal_entry_null_range(entry, vstruct_next(entry));
485 return ret;
486 }
487
488fsck_err:
489 return ret;
490}
491
1c6fdbd8
KO
492struct jset_entry_ops {
493 int (*validate)(struct bch_fs *, struct jset *,
494 struct jset_entry *, int);
495};
496
497static const struct jset_entry_ops bch2_jset_entry_ops[] = {
498#define x(f, nr) \
499 [BCH_JSET_ENTRY_##f] = (struct jset_entry_ops) { \
500 .validate = journal_entry_validate_##f, \
501 },
502 BCH_JSET_ENTRY_TYPES()
503#undef x
504};
505
506static int journal_entry_validate(struct bch_fs *c, struct jset *jset,
507 struct jset_entry *entry, int write)
508{
2c5af169
KO
509 return entry->type < BCH_JSET_ENTRY_NR
510 ? bch2_jset_entry_ops[entry->type].validate(c, jset,
511 entry, write)
512 : 0;
1c6fdbd8
KO
513}
514
515static int jset_validate_entries(struct bch_fs *c, struct jset *jset,
516 int write)
517{
518 struct jset_entry *entry;
519 int ret = 0;
520
521 vstruct_for_each(jset, entry) {
522 if (journal_entry_err_on(vstruct_next(entry) >
523 vstruct_last(jset), c,
524 "journal entry extends past end of jset")) {
525 jset->u64s = cpu_to_le32((u64 *) entry - jset->_data);
526 break;
527 }
528
529 ret = journal_entry_validate(c, jset, entry, write);
530 if (ret)
531 break;
532 }
533fsck_err:
534 return ret;
535}
536
537static int jset_validate(struct bch_fs *c,
ca73852a 538 struct bch_dev *ca,
1c6fdbd8
KO
539 struct jset *jset, u64 sector,
540 unsigned bucket_sectors_left,
541 unsigned sectors_read,
542 int write)
543{
544 size_t bytes = vstruct_bytes(jset);
545 struct bch_csum csum;
26609b61 546 unsigned version;
1c6fdbd8
KO
547 int ret = 0;
548
549 if (le64_to_cpu(jset->magic) != jset_magic(c))
550 return JOURNAL_ENTRY_NONE;
551
26609b61 552 version = le32_to_cpu(jset->version);
ca73852a
KO
553 if (journal_entry_err_on((version != BCH_JSET_VERSION_OLD &&
554 version < bcachefs_metadata_version_min) ||
555 version >= bcachefs_metadata_version_max, c,
556 "%s sector %llu seq %llu: unknown journal entry version %u",
ed9d58a2
KO
557 ca ? ca->name : c->name,
558 sector, le64_to_cpu(jset->seq),
ca73852a 559 version)) {
35ef6df5
KO
560 /* don't try to continue: */
561 return EINVAL;
1c6fdbd8
KO
562 }
563
35ef6df5
KO
564 if (bytes > (sectors_read << 9) &&
565 sectors_read < bucket_sectors_left)
566 return JOURNAL_ENTRY_REREAD;
567
1c6fdbd8 568 if (journal_entry_err_on(bytes > bucket_sectors_left << 9, c,
ca73852a 569 "%s sector %llu seq %llu: journal entry too big (%zu bytes)",
ed9d58a2
KO
570 ca ? ca->name : c->name,
571 sector, le64_to_cpu(jset->seq), bytes)) {
35ef6df5
KO
572 ret = JOURNAL_ENTRY_BAD;
573 le32_add_cpu(&jset->u64s,
574 -((bytes - (bucket_sectors_left << 9)) / 8));
1c6fdbd8
KO
575 }
576
ed9d58a2 577 if (journal_entry_err_on(!bch2_checksum_type_valid(c, JSET_CSUM_TYPE(jset)), c,
ca73852a 578 "%s sector %llu seq %llu: journal entry with unknown csum type %llu",
ed9d58a2
KO
579 ca ? ca->name : c->name,
580 sector, le64_to_cpu(jset->seq),
35ef6df5
KO
581 JSET_CSUM_TYPE(jset))) {
582 ret = JOURNAL_ENTRY_BAD;
ed9d58a2 583 goto csum_done;
35ef6df5 584 }
1c6fdbd8 585
ed9d58a2
KO
586 if (write)
587 goto csum_done;
588
1c6fdbd8
KO
589 csum = csum_vstruct(c, JSET_CSUM_TYPE(jset), journal_nonce(jset), jset);
590 if (journal_entry_err_on(bch2_crc_cmp(csum, jset->csum), c,
ca73852a 591 "%s sector %llu seq %llu: journal checksum bad",
ed9d58a2
KO
592 ca ? ca->name : c->name,
593 sector, le64_to_cpu(jset->seq)))
35ef6df5 594 ret = JOURNAL_ENTRY_BAD;
1c6fdbd8
KO
595
596 bch2_encrypt(c, JSET_CSUM_TYPE(jset), journal_nonce(jset),
597 jset->encrypted_start,
598 vstruct_end(jset) - (void *) jset->encrypted_start);
ed9d58a2
KO
599csum_done:
600 /* last_seq is ignored when JSET_NO_FLUSH is true */
601 if (journal_entry_err_on(!JSET_NO_FLUSH(jset) &&
602 le64_to_cpu(jset->last_seq) > le64_to_cpu(jset->seq), c,
603 "invalid journal entry: last_seq > seq (%llu > %llu)",
604 le64_to_cpu(jset->last_seq),
605 le64_to_cpu(jset->seq))) {
1c6fdbd8 606 jset->last_seq = jset->seq;
ca73852a
KO
607 return JOURNAL_ENTRY_BAD;
608 }
1c6fdbd8
KO
609fsck_err:
610 return ret;
611}
612
ed9d58a2
KO
613static int jset_validate_for_write(struct bch_fs *c, struct jset *jset)
614{
615 unsigned sectors = vstruct_sectors(jset, c->block_bits);
616
617 return jset_validate(c, NULL, jset, 0, sectors, sectors, WRITE) ?:
618 jset_validate_entries(c, jset, WRITE);
619}
620
1c6fdbd8
KO
621struct journal_read_buf {
622 void *data;
623 size_t size;
624};
625
626static int journal_read_buf_realloc(struct journal_read_buf *b,
627 size_t new_size)
628{
629 void *n;
630
631 /* the bios are sized for this many pages, max: */
632 if (new_size > JOURNAL_ENTRY_SIZE_MAX)
633 return -ENOMEM;
634
635 new_size = roundup_pow_of_two(new_size);
636 n = kvpmalloc(new_size, GFP_KERNEL);
637 if (!n)
638 return -ENOMEM;
639
640 kvpfree(b->data, b->size);
641 b->data = n;
642 b->size = new_size;
643 return 0;
644}
645
646static int journal_read_bucket(struct bch_dev *ca,
647 struct journal_read_buf *buf,
648 struct journal_list *jlist,
a9ec3454 649 unsigned bucket)
1c6fdbd8
KO
650{
651 struct bch_fs *c = ca->fs;
652 struct journal_device *ja = &ca->journal;
1c6fdbd8
KO
653 struct jset *j = NULL;
654 unsigned sectors, sectors_read = 0;
655 u64 offset = bucket_to_sector(ca, ja->buckets[bucket]),
656 end = offset + ca->mi.bucket_size;
657 bool saw_bad = false;
658 int ret = 0;
659
660 pr_debug("reading %u", bucket);
661
662 while (offset < end) {
663 if (!sectors_read) {
ac10a961
KO
664 struct bio *bio;
665 unsigned nr_bvecs;
666reread:
667 sectors_read = min_t(unsigned,
1c6fdbd8 668 end - offset, buf->size >> 9);
ac10a961
KO
669 nr_bvecs = buf_pages(buf->data, sectors_read << 9);
670
671 bio = bio_kmalloc(nr_bvecs, GFP_KERNEL);
672 bio_init(bio, ca->disk_sb.bdev, bio->bi_inline_vecs, nr_bvecs, REQ_OP_READ);
1c6fdbd8 673
885678f6
KO
674 bio->bi_iter.bi_sector = offset;
675 bch2_bio_map(bio, buf->data, sectors_read << 9);
1c6fdbd8
KO
676
677 ret = submit_bio_wait(bio);
ac10a961 678 kfree(bio);
1c6fdbd8
KO
679
680 if (bch2_dev_io_err_on(ret, ca,
0fefe8d8 681 "journal read error: sector %llu",
1c6fdbd8 682 offset) ||
29d90f61
KO
683 bch2_meta_read_fault("journal")) {
684 /*
685 * We don't error out of the recovery process
686 * here, since the relevant journal entry may be
687 * found on a different device, and missing or
688 * no journal entries will be handled later
689 */
690 return 0;
691 }
1c6fdbd8
KO
692
693 j = buf->data;
694 }
695
ca73852a 696 ret = jset_validate(c, ca, j, offset,
1c6fdbd8
KO
697 end - offset, sectors_read,
698 READ);
699 switch (ret) {
700 case BCH_FSCK_OK:
ca73852a 701 sectors = vstruct_sectors(j, c->block_bits);
1c6fdbd8
KO
702 break;
703 case JOURNAL_ENTRY_REREAD:
704 if (vstruct_bytes(j) > buf->size) {
705 ret = journal_read_buf_realloc(buf,
706 vstruct_bytes(j));
707 if (ret)
708 return ret;
709 }
710 goto reread;
711 case JOURNAL_ENTRY_NONE:
712 if (!saw_bad)
713 return 0;
714 sectors = c->opts.block_size;
715 goto next_block;
716 case JOURNAL_ENTRY_BAD:
717 saw_bad = true;
ca73852a
KO
718 /*
719 * On checksum error we don't really trust the size
720 * field of the journal entry we read, so try reading
721 * again at next block boundary:
722 */
1c6fdbd8 723 sectors = c->opts.block_size;
ca73852a 724 break;
1c6fdbd8
KO
725 default:
726 return ret;
727 }
728
729 /*
730 * This happens sometimes if we don't have discards on -
731 * when we've partially overwritten a bucket with new
732 * journal entries. We don't need the rest of the
733 * bucket:
734 */
735 if (le64_to_cpu(j->seq) < ja->bucket_seq[bucket])
736 return 0;
737
738 ja->bucket_seq[bucket] = le64_to_cpu(j->seq);
739
740 mutex_lock(&jlist->lock);
e4c3f386
KO
741 ret = journal_entry_add(c, ca, (struct bch_extent_ptr) {
742 .dev = ca->dev_idx,
743 .offset = offset,
744 }, jlist, j, ret != 0);
1c6fdbd8
KO
745 mutex_unlock(&jlist->lock);
746
747 switch (ret) {
748 case JOURNAL_ENTRY_ADD_OK:
1c6fdbd8
KO
749 break;
750 case JOURNAL_ENTRY_ADD_OUT_OF_RANGE:
751 break;
752 default:
753 return ret;
754 }
1c6fdbd8
KO
755next_block:
756 pr_debug("next");
757 offset += sectors;
758 sectors_read -= sectors;
759 j = ((void *) j) + (sectors << 9);
760 }
761
762 return 0;
763}
764
765static void bch2_journal_read_device(struct closure *cl)
766{
1c6fdbd8
KO
767 struct journal_device *ja =
768 container_of(cl, struct journal_device, read);
769 struct bch_dev *ca = container_of(ja, struct bch_dev, journal);
770 struct journal_list *jlist =
771 container_of(cl->parent, struct journal_list, cl);
1c6fdbd8 772 struct journal_read_buf buf = { NULL, 0 };
a9ec3454
KO
773 u64 min_seq = U64_MAX;
774 unsigned i;
1c6fdbd8
KO
775 int ret;
776
777 if (!ja->nr)
778 goto out;
779
1c6fdbd8
KO
780 ret = journal_read_buf_realloc(&buf, PAGE_SIZE);
781 if (ret)
782 goto err;
783
784 pr_debug("%u journal buckets", ja->nr);
785
1c6fdbd8 786 for (i = 0; i < ja->nr; i++) {
a9ec3454
KO
787 ret = journal_read_bucket(ca, &buf, jlist, i);
788 if (ret)
789 goto err;
1c6fdbd8
KO
790 }
791
a9ec3454
KO
792 /* Find the journal bucket with the highest sequence number: */
793 for (i = 0; i < ja->nr; i++) {
794 if (ja->bucket_seq[i] > ja->bucket_seq[ja->cur_idx])
795 ja->cur_idx = i;
1c6fdbd8 796
a9ec3454 797 min_seq = min(ja->bucket_seq[i], min_seq);
1c6fdbd8
KO
798 }
799
1c6fdbd8 800 /*
1c6fdbd8
KO
801 * If there's duplicate journal entries in multiple buckets (which
802 * definitely isn't supposed to happen, but...) - make sure to start
803 * cur_idx at the last of those buckets, so we don't deadlock trying to
804 * allocate
805 */
a9ec3454
KO
806 while (ja->bucket_seq[ja->cur_idx] > min_seq &&
807 ja->bucket_seq[ja->cur_idx] >
808 ja->bucket_seq[(ja->cur_idx + 1) % ja->nr])
a36d3685 809 ja->cur_idx = (ja->cur_idx + 1) % ja->nr;
a9ec3454
KO
810
811 ja->sectors_free = 0;
1c6fdbd8
KO
812
813 /*
0ce2dbbe 814 * Set dirty_idx to indicate the entire journal is full and needs to be
1c6fdbd8
KO
815 * reclaimed - journal reclaim will immediately reclaim whatever isn't
816 * pinned when it first runs:
817 */
0ce2dbbe
KO
818 ja->discard_idx = ja->dirty_idx_ondisk =
819 ja->dirty_idx = (ja->cur_idx + 1) % ja->nr;
1c6fdbd8
KO
820out:
821 kvpfree(buf.data, buf.size);
1c6fdbd8
KO
822 percpu_ref_put(&ca->io_ref);
823 closure_return(cl);
824 return;
825err:
826 mutex_lock(&jlist->lock);
827 jlist->ret = ret;
828 mutex_unlock(&jlist->lock);
829 goto out;
1c6fdbd8
KO
830}
831
e4c3f386
KO
832static void bch2_journal_ptrs_to_text(struct printbuf *out, struct bch_fs *c,
833 struct journal_replay *j)
834{
835 unsigned i;
836
837 for (i = 0; i < j->nr_ptrs; i++) {
838 struct bch_dev *ca = c->devs[j->ptrs[i].dev];
514852c2
KO
839 u64 offset;
840
841 div64_u64_rem(j->ptrs[i].offset, ca->mi.bucket_size, &offset);
e4c3f386
KO
842
843 if (i)
844 pr_buf(out, " ");
845 pr_buf(out, "%u:%llu (offset %llu)",
846 j->ptrs[i].dev,
514852c2 847 (u64) j->ptrs[i].offset, offset);
e4c3f386
KO
848 }
849}
850
adbcada4
KO
851int bch2_journal_read(struct bch_fs *c, struct list_head *list,
852 u64 *blacklist_seq, u64 *start_seq)
1c6fdbd8 853{
1c6fdbd8 854 struct journal_list jlist;
adbcada4 855 struct journal_replay *i, *t;
1c6fdbd8 856 struct bch_dev *ca;
1c6fdbd8
KO
857 unsigned iter;
858 size_t keys = 0, entries = 0;
859 bool degraded = false;
adbcada4 860 u64 seq, last_seq = 0;
1c6fdbd8
KO
861 int ret = 0;
862
863 closure_init_stack(&jlist.cl);
864 mutex_init(&jlist.lock);
865 jlist.head = list;
866 jlist.ret = 0;
867
868 for_each_member_device(ca, c, iter) {
6bdbfa87 869 if (!test_bit(BCH_FS_REBUILD_REPLICAS, &c->flags) &&
89fd25be 870 !(bch2_dev_has_data(c, ca) & (1 << BCH_DATA_journal)))
1c6fdbd8
KO
871 continue;
872
2436cb9f
KO
873 if ((ca->mi.state == BCH_MEMBER_STATE_rw ||
874 ca->mi.state == BCH_MEMBER_STATE_ro) &&
1c6fdbd8
KO
875 percpu_ref_tryget(&ca->io_ref))
876 closure_call(&ca->journal.read,
877 bch2_journal_read_device,
878 system_unbound_wq,
879 &jlist.cl);
880 else
881 degraded = true;
882 }
883
884 closure_sync(&jlist.cl);
885
886 if (jlist.ret)
887 return jlist.ret;
888
adbcada4
KO
889 if (list_empty(list)) {
890 bch_info(c, "journal read done, but no entries found");
891 return 0;
892 }
893
894 i = list_last_entry(list, struct journal_replay, list);
895 *start_seq = le64_to_cpu(i->j.seq) + 1;
896
897 /*
898 * Find most recent flush entry, and ignore newer non flush entries -
899 * those entries will be blacklisted:
900 */
901 list_for_each_entry_safe_reverse(i, t, list, list) {
902 if (i->ignore)
903 continue;
904
905 if (!JSET_NO_FLUSH(&i->j)) {
906 last_seq = le64_to_cpu(i->j.last_seq);
907 *blacklist_seq = le64_to_cpu(i->j.seq) + 1;
908 break;
909 }
910
911 journal_replay_free(c, i);
912 }
913
914 if (!last_seq) {
915 fsck_err(c, "journal read done, but no entries found after dropping non-flushes");
916 return -1;
917 }
918
919 /* Drop blacklisted entries and entries older than last_seq: */
920 list_for_each_entry_safe(i, t, list, list) {
921 if (i->ignore)
922 continue;
923
924 seq = le64_to_cpu(i->j.seq);
925 if (seq < last_seq) {
926 journal_replay_free(c, i);
927 continue;
928 }
929
930 if (bch2_journal_seq_is_blacklisted(c, seq, true)) {
931 fsck_err_on(!JSET_NO_FLUSH(&i->j), c,
932 "found blacklisted journal entry %llu", seq);
933
934 journal_replay_free(c, i);
935 }
936 }
937
938 /* Check for missing entries: */
939 seq = last_seq;
940 list_for_each_entry(i, list, list) {
941 if (i->ignore)
942 continue;
943
944 BUG_ON(seq > le64_to_cpu(i->j.seq));
945
946 while (seq < le64_to_cpu(i->j.seq)) {
947 u64 missing_start, missing_end;
e4c3f386 948 char buf1[200], buf2[200];
adbcada4
KO
949
950 while (seq < le64_to_cpu(i->j.seq) &&
951 bch2_journal_seq_is_blacklisted(c, seq, false))
952 seq++;
953
954 if (seq == le64_to_cpu(i->j.seq))
955 break;
956
957 missing_start = seq;
958
959 while (seq < le64_to_cpu(i->j.seq) &&
960 !bch2_journal_seq_is_blacklisted(c, seq, false))
961 seq++;
962
e4c3f386
KO
963 if (i->list.prev != list) {
964 struct printbuf out = PBUF(buf1);
965 struct journal_replay *p = list_prev_entry(i, list);
966
967 bch2_journal_ptrs_to_text(&out, c, p);
968 pr_buf(&out, " size %llu", vstruct_sectors(&p->j, c->block_bits));
969 } else
970 sprintf(buf1, "(none)");
971 bch2_journal_ptrs_to_text(&PBUF(buf2), c, i);
972
adbcada4 973 missing_end = seq - 1;
e4c3f386
KO
974 fsck_err(c, "journal entries %llu-%llu missing! (replaying %llu-%llu)\n"
975 " prev at %s\n"
976 " next at %s",
adbcada4 977 missing_start, missing_end,
e4c3f386
KO
978 last_seq, *blacklist_seq - 1,
979 buf1, buf2);
adbcada4
KO
980 }
981
982 seq++;
983 }
984
1c6fdbd8 985 list_for_each_entry(i, list, list) {
1dd7f9d9
KO
986 struct jset_entry *entry;
987 struct bkey_i *k, *_n;
e4c3f386
KO
988 struct bch_replicas_padded replicas = {
989 .e.data_type = BCH_DATA_journal,
990 .e.nr_required = 1,
991 };
992 unsigned ptr;
7ef2a73a
KO
993 char buf[80];
994
adbcada4
KO
995 if (i->ignore)
996 continue;
997
1c6fdbd8
KO
998 ret = jset_validate_entries(c, &i->j, READ);
999 if (ret)
1000 goto fsck_err;
1001
e4c3f386
KO
1002 for (ptr = 0; ptr < i->nr_ptrs; ptr++)
1003 replicas.e.devs[replicas.e.nr_devs++] = i->ptrs[ptr].dev;
1004
26452d1d
KO
1005 bch2_replicas_entry_sort(&replicas.e);
1006
1c6fdbd8
KO
1007 /*
1008 * If we're mounting in degraded mode - if we didn't read all
1009 * the devices - this is wrong:
1010 */
1011
1012 if (!degraded &&
1013 (test_bit(BCH_FS_REBUILD_REPLICAS, &c->flags) ||
988e98cf 1014 fsck_err_on(!bch2_replicas_marked(c, &replicas.e), c,
7ef2a73a
KO
1015 "superblock not marked as containing replicas %s",
1016 (bch2_replicas_entry_to_text(&PBUF(buf),
1017 &replicas.e), buf)))) {
1018 ret = bch2_mark_replicas(c, &replicas.e);
1c6fdbd8
KO
1019 if (ret)
1020 return ret;
1021 }
1c6fdbd8
KO
1022
1023 for_each_jset_key(k, _n, entry, &i->j)
1024 keys++;
1025 entries++;
1026 }
1027
adbcada4
KO
1028 bch_info(c, "journal read done, %zu keys in %zu entries, seq %llu",
1029 keys, entries, *start_seq);
1dd7f9d9 1030
adbcada4
KO
1031 if (*start_seq != *blacklist_seq)
1032 bch_info(c, "dropped unflushed entries %llu-%llu",
1033 *blacklist_seq, *start_seq - 1);
1c6fdbd8
KO
1034fsck_err:
1035 return ret;
1036}
1037
1c6fdbd8
KO
1038/* journal write: */
1039
a9ec3454
KO
1040static void __journal_write_alloc(struct journal *j,
1041 struct journal_buf *w,
1042 struct dev_alloc_list *devs_sorted,
1043 unsigned sectors,
1044 unsigned *replicas,
1045 unsigned replicas_want)
1c6fdbd8
KO
1046{
1047 struct bch_fs *c = container_of(j, struct bch_fs, journal);
1c6fdbd8
KO
1048 struct journal_device *ja;
1049 struct bch_dev *ca;
a9ec3454 1050 unsigned i;
a2753581 1051
a9ec3454
KO
1052 if (*replicas >= replicas_want)
1053 return;
1c6fdbd8 1054
a9ec3454
KO
1055 for (i = 0; i < devs_sorted->nr; i++) {
1056 ca = rcu_dereference(c->devs[devs_sorted->devs[i]]);
1c6fdbd8
KO
1057 if (!ca)
1058 continue;
1059
1c6fdbd8 1060 ja = &ca->journal;
1c6fdbd8
KO
1061
1062 /*
1063 * Check that we can use this device, and aren't already using
1064 * it:
1065 */
a9ec3454 1066 if (!ca->mi.durability ||
2436cb9f 1067 ca->mi.state != BCH_MEMBER_STATE_rw ||
a9ec3454 1068 !ja->nr ||
26609b61
KO
1069 bch2_bkey_has_device(bkey_i_to_s_c(&w->key),
1070 ca->dev_idx) ||
a9ec3454 1071 sectors > ja->sectors_free)
1c6fdbd8
KO
1072 continue;
1073
3d080aa5 1074 bch2_dev_stripe_increment(ca, &j->wp.stripe);
1c6fdbd8 1075
26609b61 1076 bch2_bkey_append_ptr(&w->key,
1c6fdbd8
KO
1077 (struct bch_extent_ptr) {
1078 .offset = bucket_to_sector(ca,
a9ec3454
KO
1079 ja->buckets[ja->cur_idx]) +
1080 ca->mi.bucket_size -
1081 ja->sectors_free,
1c6fdbd8
KO
1082 .dev = ca->dev_idx,
1083 });
1084
a9ec3454
KO
1085 ja->sectors_free -= sectors;
1086 ja->bucket_seq[ja->cur_idx] = le64_to_cpu(w->data->seq);
1087
1088 *replicas += ca->mi.durability;
1089
1090 if (*replicas >= replicas_want)
1091 break;
1c6fdbd8 1092 }
a9ec3454 1093}
1c6fdbd8 1094
a9ec3454
KO
1095/**
1096 * journal_next_bucket - move on to the next journal bucket if possible
1097 */
1098static int journal_write_alloc(struct journal *j, struct journal_buf *w,
1099 unsigned sectors)
1100{
1101 struct bch_fs *c = container_of(j, struct bch_fs, journal);
d042b040 1102 struct bch_devs_mask devs;
a9ec3454
KO
1103 struct journal_device *ja;
1104 struct bch_dev *ca;
1105 struct dev_alloc_list devs_sorted;
d042b040
KO
1106 unsigned target = c->opts.metadata_target ?:
1107 c->opts.foreground_target;
a9ec3454
KO
1108 unsigned i, replicas = 0, replicas_want =
1109 READ_ONCE(c->opts.metadata_replicas);
1c6fdbd8 1110
a9ec3454 1111 rcu_read_lock();
d042b040
KO
1112retry:
1113 devs = target_rw_devs(c, BCH_DATA_journal, target);
1c6fdbd8 1114
d042b040 1115 devs_sorted = bch2_dev_alloc_list(c, &j->wp.stripe, &devs);
1c6fdbd8 1116
a9ec3454
KO
1117 __journal_write_alloc(j, w, &devs_sorted,
1118 sectors, &replicas, replicas_want);
1c6fdbd8 1119
a9ec3454
KO
1120 if (replicas >= replicas_want)
1121 goto done;
1122
1123 for (i = 0; i < devs_sorted.nr; i++) {
1124 ca = rcu_dereference(c->devs[devs_sorted.devs[i]]);
1125 if (!ca)
1126 continue;
1127
1128 ja = &ca->journal;
1129
1130 if (sectors > ja->sectors_free &&
1131 sectors <= ca->mi.bucket_size &&
03d5eaed
KO
1132 bch2_journal_dev_buckets_available(j, ja,
1133 journal_space_discarded)) {
a9ec3454
KO
1134 ja->cur_idx = (ja->cur_idx + 1) % ja->nr;
1135 ja->sectors_free = ca->mi.bucket_size;
68ef94a6
KO
1136
1137 /*
1138 * ja->bucket_seq[ja->cur_idx] must always have
1139 * something sensible:
1140 */
1141 ja->bucket_seq[ja->cur_idx] = le64_to_cpu(w->data->seq);
a9ec3454
KO
1142 }
1143 }
1144
1145 __journal_write_alloc(j, w, &devs_sorted,
1146 sectors, &replicas, replicas_want);
d042b040
KO
1147
1148 if (replicas < replicas_want && target) {
1149 /* Retry from all devices: */
1150 target = 0;
1151 goto retry;
1152 }
a9ec3454 1153done:
a9ec3454
KO
1154 rcu_read_unlock();
1155
07a1006a
KO
1156 BUG_ON(bkey_val_u64s(&w->key.k) > BCH_REPLICAS_MAX);
1157
57cb2142 1158 return replicas >= c->opts.metadata_replicas_required ? 0 : -EROFS;
1c6fdbd8
KO
1159}
1160
1161static void journal_write_compact(struct jset *jset)
1162{
1163 struct jset_entry *i, *next, *prev = NULL;
1164
1165 /*
1166 * Simple compaction, dropping empty jset_entries (from journal
1167 * reservations that weren't fully used) and merging jset_entries that
1168 * can be.
1169 *
1170 * If we wanted to be really fancy here, we could sort all the keys in
1171 * the jset and drop keys that were overwritten - probably not worth it:
1172 */
1173 vstruct_for_each_safe(jset, i, next) {
1174 unsigned u64s = le16_to_cpu(i->u64s);
1175
1176 /* Empty entry: */
1177 if (!u64s)
1178 continue;
1179
1180 /* Can we merge with previous entry? */
1181 if (prev &&
1182 i->btree_id == prev->btree_id &&
1183 i->level == prev->level &&
1184 i->type == prev->type &&
1185 i->type == BCH_JSET_ENTRY_btree_keys &&
1186 le16_to_cpu(prev->u64s) + u64s <= U16_MAX) {
1187 memmove_u64s_down(vstruct_next(prev),
1188 i->_data,
1189 u64s);
1190 le16_add_cpu(&prev->u64s, u64s);
1191 continue;
1192 }
1193
1194 /* Couldn't merge, move i into new position (after prev): */
1195 prev = prev ? vstruct_next(prev) : jset->start;
1196 if (i != prev)
1197 memmove_u64s_down(prev, i, jset_u64s(u64s));
1198 }
1199
1200 prev = prev ? vstruct_next(prev) : jset->start;
1201 jset->u64s = cpu_to_le32((u64 *) prev - jset->_data);
1202}
1203
1204static void journal_buf_realloc(struct journal *j, struct journal_buf *buf)
1205{
1206 /* we aren't holding j->lock: */
1207 unsigned new_size = READ_ONCE(j->buf_size_want);
1208 void *new_buf;
1209
d16b4a77 1210 if (buf->buf_size >= new_size)
1c6fdbd8
KO
1211 return;
1212
1213 new_buf = kvpmalloc(new_size, GFP_NOIO|__GFP_NOWARN);
1214 if (!new_buf)
1215 return;
1216
d16b4a77 1217 memcpy(new_buf, buf->data, buf->buf_size);
c859430b
KO
1218
1219 spin_lock(&j->lock);
1220 swap(buf->data, new_buf);
1221 swap(buf->buf_size, new_size);
1222 spin_unlock(&j->lock);
1223
1224 kvpfree(new_buf, new_size);
1c6fdbd8
KO
1225}
1226
ebb84d09
KO
1227static inline struct journal_buf *journal_last_unwritten_buf(struct journal *j)
1228{
1229 return j->buf + j->reservations.unwritten_idx;
1230}
1231
1c6fdbd8
KO
1232static void journal_write_done(struct closure *cl)
1233{
1234 struct journal *j = container_of(cl, struct journal, io);
1235 struct bch_fs *c = container_of(j, struct bch_fs, journal);
ebb84d09 1236 struct journal_buf *w = journal_last_unwritten_buf(j);
1c6fdbd8 1237 struct bch_devs_list devs =
26609b61 1238 bch2_bkey_devs(bkey_i_to_s_c(&w->key));
7ef2a73a 1239 struct bch_replicas_padded replicas;
ebb84d09 1240 union journal_res_state old, new;
ed9d58a2 1241 u64 v, seq, last_seq;
158eecb8 1242 int err = 0;
1c6fdbd8 1243
9c859dc9
KO
1244 bch2_time_stats_update(j->write_time, j->write_start_time);
1245
1c6fdbd8
KO
1246 if (!devs.nr) {
1247 bch_err(c, "unable to write journal to sufficient devices");
158eecb8
KO
1248 err = -EIO;
1249 } else {
1250 bch2_devlist_to_replicas(&replicas.e, BCH_DATA_journal, devs);
1251 if (bch2_mark_replicas(c, &replicas.e))
1252 err = -EIO;
1c6fdbd8
KO
1253 }
1254
158eecb8
KO
1255 if (err)
1256 bch2_fatal_error(c);
1c6fdbd8
KO
1257
1258 spin_lock(&j->lock);
ed9d58a2
KO
1259 seq = le64_to_cpu(w->data->seq);
1260 last_seq = le64_to_cpu(w->data->last_seq);
1261
1c6fdbd8
KO
1262 if (seq >= j->pin.front)
1263 journal_seq_pin(j, seq)->devs = devs;
1264
0ce2dbbe 1265 j->seq_ondisk = seq;
158eecb8
KO
1266 if (err && (!j->err_seq || seq < j->err_seq))
1267 j->err_seq = seq;
adbcada4 1268
ed9d58a2 1269 if (!JSET_NO_FLUSH(w->data)) {
adbcada4
KO
1270 j->flushed_seq_ondisk = seq;
1271 j->last_seq_ondisk = last_seq;
adbcada4 1272 }
0ce2dbbe 1273
1c6fdbd8
KO
1274 /*
1275 * Updating last_seq_ondisk may let bch2_journal_reclaim_work() discard
1276 * more buckets:
1277 *
1278 * Must come before signaling write completion, for
1279 * bch2_fs_journal_stop():
1280 */
b7a9bbfc 1281 journal_reclaim_kick(&c->journal);
158eecb8 1282
1c6fdbd8
KO
1283 /* also must come before signalling write completion: */
1284 closure_debug_destroy(cl);
1285
ebb84d09
KO
1286 v = atomic64_read(&j->reservations.counter);
1287 do {
1288 old.v = new.v = v;
1289 BUG_ON(new.idx == new.unwritten_idx);
1290
1291 new.unwritten_idx++;
1292 } while ((v = atomic64_cmpxchg(&j->reservations.counter,
1293 old.v, new.v)) != old.v);
1c6fdbd8 1294
5d32c5bb
KO
1295 bch2_journal_space_available(j);
1296
1c6fdbd8
KO
1297 closure_wake_up(&w->wait);
1298 journal_wake(j);
1299
1300 if (test_bit(JOURNAL_NEED_WRITE, &j->flags))
1301 mod_delayed_work(system_freezable_wq, &j->write_work, 0);
1302 spin_unlock(&j->lock);
ebb84d09
KO
1303
1304 if (new.unwritten_idx != new.idx &&
1305 !journal_state_count(new, new.unwritten_idx))
1306 closure_call(&j->io, bch2_journal_write, system_highpri_wq, NULL);
1c6fdbd8
KO
1307}
1308
1309static void journal_write_endio(struct bio *bio)
1310{
1311 struct bch_dev *ca = bio->bi_private;
1312 struct journal *j = &ca->fs->journal;
1313
0fefe8d8 1314 if (bch2_dev_io_err_on(bio->bi_status, ca, "journal write error: %s",
63b214e7 1315 bch2_blk_status_to_str(bio->bi_status)) ||
1c6fdbd8 1316 bch2_meta_write_fault("journal")) {
ebb84d09 1317 struct journal_buf *w = journal_last_unwritten_buf(j);
1c6fdbd8
KO
1318 unsigned long flags;
1319
1320 spin_lock_irqsave(&j->err_lock, flags);
26609b61 1321 bch2_bkey_drop_device(bkey_i_to_s(&w->key), ca->dev_idx);
1c6fdbd8
KO
1322 spin_unlock_irqrestore(&j->err_lock, flags);
1323 }
1324
1325 closure_put(&j->io);
1326 percpu_ref_put(&ca->io_ref);
1327}
1328
280249b9
KO
1329static void do_journal_write(struct closure *cl)
1330{
1331 struct journal *j = container_of(cl, struct journal, io);
1332 struct bch_fs *c = container_of(j, struct bch_fs, journal);
1333 struct bch_dev *ca;
1334 struct journal_buf *w = journal_last_unwritten_buf(j);
1335 struct bch_extent_ptr *ptr;
1336 struct bio *bio;
1337 unsigned sectors = vstruct_sectors(w->data, c->block_bits);
1338
1339 extent_for_each_ptr(bkey_i_to_s_extent(&w->key), ptr) {
1340 ca = bch_dev_bkey_exists(c, ptr->dev);
1341 if (!percpu_ref_tryget(&ca->io_ref)) {
1342 /* XXX: fix this */
1343 bch_err(c, "missing device for journal write\n");
1344 continue;
1345 }
1346
1347 this_cpu_add(ca->io_done->sectors[WRITE][BCH_DATA_journal],
1348 sectors);
1349
1350 bio = ca->journal.bio;
1351 bio_reset(bio, ca->disk_sb.bdev, REQ_OP_WRITE|REQ_SYNC|REQ_META);
1352 bio->bi_iter.bi_sector = ptr->offset;
1353 bio->bi_end_io = journal_write_endio;
1354 bio->bi_private = ca;
1355
a28bd48a
KO
1356 BUG_ON(bio->bi_iter.bi_sector == ca->prev_journal_sector);
1357 ca->prev_journal_sector = bio->bi_iter.bi_sector;
1358
280249b9
KO
1359 if (!JSET_NO_FLUSH(w->data))
1360 bio->bi_opf |= REQ_FUA;
1361 if (!JSET_NO_FLUSH(w->data) && !w->separate_flush)
1362 bio->bi_opf |= REQ_PREFLUSH;
1363
1364 bch2_bio_map(bio, w->data, sectors << 9);
1365
1366 trace_journal_write(bio);
1367 closure_bio_submit(bio, cl);
1368
1369 ca->journal.bucket_seq[ca->journal.cur_idx] =
1370 le64_to_cpu(w->data->seq);
1371 }
1372
1373 continue_at(cl, journal_write_done, system_highpri_wq);
1374 return;
1375}
1376
1c6fdbd8
KO
1377void bch2_journal_write(struct closure *cl)
1378{
1379 struct journal *j = container_of(cl, struct journal, io);
1380 struct bch_fs *c = container_of(j, struct bch_fs, journal);
1381 struct bch_dev *ca;
ebb84d09 1382 struct journal_buf *w = journal_last_unwritten_buf(j);
3ccc5c50 1383 struct jset_entry *start, *end;
1c6fdbd8
KO
1384 struct jset *jset;
1385 struct bio *bio;
85674154 1386 char *journal_debug_buf = NULL;
26609b61 1387 bool validate_before_checksum = false;
280249b9 1388 unsigned i, sectors, bytes, u64s, nr_rw_members = 0;
e5a66496
KO
1389 int ret;
1390
b7a9bbfc
KO
1391 BUG_ON(BCH_SB_CLEAN(c->disk_sb.sb));
1392
1c6fdbd8
KO
1393 journal_buf_realloc(j, w);
1394 jset = w->data;
1395
1396 j->write_start_time = local_clock();
1c6fdbd8 1397
adbcada4
KO
1398 spin_lock(&j->lock);
1399 if (c->sb.features & (1ULL << BCH_FEATURE_journal_no_flush) &&
1400 !w->must_flush &&
1401 (jiffies - j->last_flush_write) < msecs_to_jiffies(j->write_delay_ms) &&
1402 test_bit(JOURNAL_MAY_SKIP_FLUSH, &j->flags)) {
1403 w->noflush = true;
1404 SET_JSET_NO_FLUSH(jset, true);
ed9d58a2 1405 jset->last_seq = 0;
adbcada4
KO
1406
1407 j->nr_noflush_writes++;
1408 } else {
1409 j->last_flush_write = jiffies;
1410 j->nr_flush_writes++;
1411 }
1412 spin_unlock(&j->lock);
1413
00b8ccf7
KO
1414 /*
1415 * New btree roots are set by journalling them; when the journal entry
1416 * gets written we have to propagate them to c->btree_roots
1417 *
1418 * But, every journal entry we write has to contain all the btree roots
1419 * (at least for now); so after we copy btree roots to c->btree_roots we
1420 * have to get any missing btree roots and add them to this journal
1421 * entry:
1422 */
1423
1424 bch2_journal_entries_to_btree_roots(c, jset);
1425
1426 start = end = vstruct_last(jset);
1427
1428 end = bch2_btree_roots_to_journal_entries(c, jset->start, end);
1429
2abe5420
KO
1430 bch2_journal_super_entries_add_common(c, &end,
1431 le64_to_cpu(jset->seq));
3ccc5c50
KO
1432 u64s = (u64 *) end - (u64 *) start;
1433 BUG_ON(u64s > j->entry_u64s_reserved);
1434
d16b4a77
KO
1435 le32_add_cpu(&jset->u64s, u64s);
1436 BUG_ON(vstruct_sectors(jset, c->block_bits) > w->sectors);
1c6fdbd8
KO
1437
1438 journal_write_compact(jset);
1439
1c6fdbd8 1440 jset->magic = cpu_to_le64(jset_magic(c));
26609b61
KO
1441 jset->version = c->sb.version < bcachefs_metadata_version_new_versioning
1442 ? cpu_to_le32(BCH_JSET_VERSION_OLD)
1443 : cpu_to_le32(c->sb.version);
1c6fdbd8
KO
1444
1445 SET_JSET_BIG_ENDIAN(jset, CPU_BIG_ENDIAN);
1446 SET_JSET_CSUM_TYPE(jset, bch2_meta_checksum_type(c));
1447
158eecb8
KO
1448 if (journal_entry_empty(jset))
1449 j->last_empty_seq = le64_to_cpu(jset->seq);
1450
26609b61
KO
1451 if (bch2_csum_type_is_encryption(JSET_CSUM_TYPE(jset)))
1452 validate_before_checksum = true;
1453
ed9d58a2 1454 if (le32_to_cpu(jset->version) <= bcachefs_metadata_version_inode_btree_change)
26609b61
KO
1455 validate_before_checksum = true;
1456
1457 if (validate_before_checksum &&
ed9d58a2 1458 jset_validate_for_write(c, jset))
1c6fdbd8
KO
1459 goto err;
1460
1461 bch2_encrypt(c, JSET_CSUM_TYPE(jset), journal_nonce(jset),
1462 jset->encrypted_start,
1463 vstruct_end(jset) - (void *) jset->encrypted_start);
1464
1465 jset->csum = csum_vstruct(c, JSET_CSUM_TYPE(jset),
1466 journal_nonce(jset), jset);
1467
26609b61 1468 if (!validate_before_checksum &&
ed9d58a2 1469 jset_validate_for_write(c, jset))
1c6fdbd8
KO
1470 goto err;
1471
1472 sectors = vstruct_sectors(jset, c->block_bits);
d16b4a77 1473 BUG_ON(sectors > w->sectors);
1c6fdbd8 1474
d16b4a77
KO
1475 bytes = vstruct_bytes(jset);
1476 memset((void *) jset + bytes, 0, (sectors << 9) - bytes);
1c6fdbd8 1477
c18dade6 1478retry_alloc:
e5a66496
KO
1479 spin_lock(&j->lock);
1480 ret = journal_write_alloc(j, w, sectors);
1481
c18dade6
KO
1482 if (ret && j->can_discard) {
1483 spin_unlock(&j->lock);
1484 bch2_journal_do_discards(j);
1485 goto retry_alloc;
1486 }
1487
85674154
KO
1488 if (ret) {
1489 journal_debug_buf = kmalloc(4096, GFP_ATOMIC);
1490 if (journal_debug_buf)
1491 __bch2_journal_debug_to_text(&_PBUF(journal_debug_buf, 4096), j);
1492 }
1493
e5a66496
KO
1494 /*
1495 * write is allocated, no longer need to account for it in
1496 * bch2_journal_space_available():
1497 */
1498 w->sectors = 0;
1499
1500 /*
1501 * journal entry has been compacted and allocated, recalculate space
1502 * available:
1503 */
1504 bch2_journal_space_available(j);
1505 spin_unlock(&j->lock);
1506
1507 if (ret) {
85674154
KO
1508 bch_err(c, "Unable to allocate journal write:\n%s",
1509 journal_debug_buf);
1510 kfree(journal_debug_buf);
1c6fdbd8
KO
1511 bch2_fatal_error(c);
1512 continue_at(cl, journal_write_done, system_highpri_wq);
1513 return;
1514 }
1515
1516 /*
1517 * XXX: we really should just disable the entire journal in nochanges
1518 * mode
1519 */
1520 if (c->opts.nochanges)
1521 goto no_io;
1522
280249b9
KO
1523 for_each_rw_member(ca, c, i)
1524 nr_rw_members++;
1c6fdbd8 1525
280249b9
KO
1526 if (nr_rw_members > 1)
1527 w->separate_flush = true;
1c6fdbd8 1528
280249b9
KO
1529 if (!JSET_NO_FLUSH(jset) && w->separate_flush) {
1530 for_each_rw_member(ca, c, i) {
1531 percpu_ref_get(&ca->io_ref);
1c6fdbd8 1532
280249b9
KO
1533 bio = ca->journal.bio;
1534 bio_reset(bio, ca->disk_sb.bdev, REQ_OP_FLUSH);
1535 bio->bi_end_io = journal_write_endio;
1536 bio->bi_private = ca;
1537 closure_bio_submit(bio, cl);
1538 }
1c6fdbd8
KO
1539 }
1540
280249b9
KO
1541 bch2_bucket_seq_cleanup(c);
1542
1543 continue_at(cl, do_journal_write, system_highpri_wq);
1544 return;
1c6fdbd8 1545no_io:
c6923995
KO
1546 bch2_bucket_seq_cleanup(c);
1547
1c6fdbd8
KO
1548 continue_at(cl, journal_write_done, system_highpri_wq);
1549 return;
1550err:
1551 bch2_inconsistent_error(c);
1552 continue_at(cl, journal_write_done, system_highpri_wq);
1553}