bcachefs: BCH_JSET_ENTRY_log
[linux-block.git] / fs / bcachefs / journal_io.c
CommitLineData
1c6fdbd8
KO
1// SPDX-License-Identifier: GPL-2.0
2#include "bcachefs.h"
7b3f84ea 3#include "alloc_foreground.h"
39fb2983 4#include "btree_io.h"
00b8ccf7 5#include "btree_update_interior.h"
1c6fdbd8
KO
6#include "buckets.h"
7#include "checksum.h"
d042b040 8#include "disk_groups.h"
1c6fdbd8 9#include "error.h"
63b214e7 10#include "io.h"
1c6fdbd8
KO
11#include "journal.h"
12#include "journal_io.h"
13#include "journal_reclaim.h"
adbcada4 14#include "journal_seq_blacklist.h"
1c6fdbd8
KO
15#include "replicas.h"
16#include "trace.h"
17
adbcada4
KO
18static void __journal_replay_free(struct journal_replay *i)
19{
20 list_del(&i->list);
21 kvpfree(i, offsetof(struct journal_replay, j) +
22 vstruct_bytes(&i->j));
23
24}
25
26static void journal_replay_free(struct bch_fs *c, struct journal_replay *i)
27{
28 i->ignore = true;
29
30 if (!c->opts.read_entire_journal)
31 __journal_replay_free(i);
32}
33
1c6fdbd8
KO
34struct journal_list {
35 struct closure cl;
36 struct mutex lock;
37 struct list_head *head;
38 int ret;
39};
40
41#define JOURNAL_ENTRY_ADD_OK 0
42#define JOURNAL_ENTRY_ADD_OUT_OF_RANGE 5
43
44/*
45 * Given a journal entry we just read, add it to the list of journal entries to
46 * be replayed:
47 */
48static int journal_entry_add(struct bch_fs *c, struct bch_dev *ca,
e4c3f386 49 struct bch_extent_ptr entry_ptr,
ca73852a
KO
50 struct journal_list *jlist, struct jset *j,
51 bool bad)
1c6fdbd8 52{
e4c3f386
KO
53 struct journal_replay *i, *pos, *dup = NULL;
54 struct bch_extent_ptr *ptr;
1c6fdbd8
KO
55 struct list_head *where;
56 size_t bytes = vstruct_bytes(j);
adbcada4 57 u64 last_seq = 0;
e4c3f386 58 int ret = JOURNAL_ENTRY_ADD_OK;
1c6fdbd8 59
adbcada4
KO
60 list_for_each_entry_reverse(i, jlist->head, list) {
61 if (!JSET_NO_FLUSH(&i->j)) {
62 last_seq = le64_to_cpu(i->j.last_seq);
63 break;
7fffc85b 64 }
adbcada4 65 }
1c6fdbd8 66
adbcada4
KO
67 /* Is this entry older than the range we need? */
68 if (!c->opts.read_entire_journal &&
69 le64_to_cpu(j->seq) < last_seq) {
70 ret = JOURNAL_ENTRY_ADD_OUT_OF_RANGE;
71 goto out;
72 }
73
74 /* Drop entries we don't need anymore */
75 if (!JSET_NO_FLUSH(j)) {
7fffc85b
KO
76 list_for_each_entry_safe(i, pos, jlist->head, list) {
77 if (le64_to_cpu(i->j.seq) >= le64_to_cpu(j->last_seq))
78 break;
adbcada4 79 journal_replay_free(c, i);
7fffc85b 80 }
1c6fdbd8
KO
81 }
82
83 list_for_each_entry_reverse(i, jlist->head, list) {
ca73852a
KO
84 if (le64_to_cpu(j->seq) > le64_to_cpu(i->j.seq)) {
85 where = &i->list;
86 goto add;
87 }
88 }
89
90 where = jlist->head;
91add:
e4c3f386 92 dup = where->next != jlist->head
ca73852a
KO
93 ? container_of(where->next, struct journal_replay, list)
94 : NULL;
95
e4c3f386
KO
96 if (dup && le64_to_cpu(j->seq) != le64_to_cpu(dup->j.seq))
97 dup = NULL;
98
ca73852a
KO
99 /*
100 * Duplicate journal entries? If so we want the one that didn't have a
101 * checksum error:
102 */
e4c3f386
KO
103 if (dup) {
104 if (dup->bad) {
105 /* we'll replace @dup: */
ca73852a 106 } else if (bad) {
e4c3f386 107 i = dup;
ca73852a
KO
108 goto found;
109 } else {
e4c3f386
KO
110 fsck_err_on(bytes != vstruct_bytes(&dup->j) ||
111 memcmp(j, &dup->j, bytes), c,
1c6fdbd8
KO
112 "found duplicate but non identical journal entries (seq %llu)",
113 le64_to_cpu(j->seq));
e4c3f386 114 i = dup;
1c6fdbd8
KO
115 goto found;
116 }
1c6fdbd8
KO
117 }
118
1c6fdbd8
KO
119 i = kvpmalloc(offsetof(struct journal_replay, j) + bytes, GFP_KERNEL);
120 if (!i) {
121 ret = -ENOMEM;
122 goto out;
123 }
124
e4c3f386
KO
125 i->nr_ptrs = 0;
126 i->bad = bad;
127 i->ignore = false;
1c6fdbd8 128 unsafe_memcpy(&i->j, j, bytes, "embedded variable length struct");
e4c3f386
KO
129
130 if (dup) {
131 i->nr_ptrs = dup->nr_ptrs;
132 memcpy(i->ptrs, dup->ptrs, sizeof(dup->ptrs));
133 __journal_replay_free(dup);
134 }
135
136 list_add(&i->list, where);
1c6fdbd8 137found:
e4c3f386
KO
138 for (ptr = i->ptrs; ptr < i->ptrs + i->nr_ptrs; ptr++) {
139 if (ptr->dev == ca->dev_idx) {
140 bch_err(c, "duplicate journal entry %llu on same device",
141 le64_to_cpu(i->j.seq));
142 goto out;
143 }
144 }
145
146 if (i->nr_ptrs >= ARRAY_SIZE(i->ptrs)) {
147 bch_err(c, "found too many copies of journal entry %llu",
148 le64_to_cpu(i->j.seq));
149 goto out;
150 }
151
152 i->ptrs[i->nr_ptrs++] = entry_ptr;
1c6fdbd8
KO
153out:
154fsck_err:
155 return ret;
156}
157
158static struct nonce journal_nonce(const struct jset *jset)
159{
160 return (struct nonce) {{
161 [0] = 0,
162 [1] = ((__le32 *) &jset->seq)[0],
163 [2] = ((__le32 *) &jset->seq)[1],
164 [3] = BCH_NONCE_JOURNAL,
165 }};
166}
167
168/* this fills in a range with empty jset_entries: */
169static void journal_entry_null_range(void *start, void *end)
170{
171 struct jset_entry *entry;
172
173 for (entry = start; entry != end; entry = vstruct_next(entry))
174 memset(entry, 0, sizeof(*entry));
175}
176
177#define JOURNAL_ENTRY_REREAD 5
178#define JOURNAL_ENTRY_NONE 6
179#define JOURNAL_ENTRY_BAD 7
180
181#define journal_entry_err(c, msg, ...) \
182({ \
183 switch (write) { \
184 case READ: \
185 mustfix_fsck_err(c, msg, ##__VA_ARGS__); \
186 break; \
187 case WRITE: \
188 bch_err(c, "corrupt metadata before write:\n" \
189 msg, ##__VA_ARGS__); \
190 if (bch2_fs_inconsistent(c)) { \
191 ret = BCH_FSCK_ERRORS_NOT_FIXED; \
192 goto fsck_err; \
193 } \
194 break; \
195 } \
196 true; \
197})
198
199#define journal_entry_err_on(cond, c, msg, ...) \
200 ((cond) ? journal_entry_err(c, msg, ##__VA_ARGS__) : false)
201
4d54337c
KO
202#define FSCK_DELETED_KEY 5
203
7d6f07ed 204static int journal_validate_key(struct bch_fs *c, const char *where,
1c6fdbd8 205 struct jset_entry *entry,
39fb2983 206 unsigned level, enum btree_id btree_id,
7d6f07ed
KO
207 struct bkey_i *k, const char *type,
208 unsigned version, int big_endian, int write)
1c6fdbd8
KO
209{
210 void *next = vstruct_next(entry);
211 const char *invalid;
1c6fdbd8
KO
212 int ret = 0;
213
214 if (journal_entry_err_on(!k->k.u64s, c,
7d6f07ed
KO
215 "invalid %s in %s entry offset %zi/%u: k->u64s 0",
216 type, where,
4d54337c
KO
217 (u64 *) k - entry->_data,
218 le16_to_cpu(entry->u64s))) {
1c6fdbd8
KO
219 entry->u64s = cpu_to_le16((u64 *) k - entry->_data);
220 journal_entry_null_range(vstruct_next(entry), next);
4d54337c 221 return FSCK_DELETED_KEY;
1c6fdbd8
KO
222 }
223
224 if (journal_entry_err_on((void *) bkey_next(k) >
225 (void *) vstruct_next(entry), c,
7d6f07ed
KO
226 "invalid %s in %s entry offset %zi/%u: extends past end of journal entry",
227 type, where,
4d54337c
KO
228 (u64 *) k - entry->_data,
229 le16_to_cpu(entry->u64s))) {
1c6fdbd8
KO
230 entry->u64s = cpu_to_le16((u64 *) k - entry->_data);
231 journal_entry_null_range(vstruct_next(entry), next);
4d54337c 232 return FSCK_DELETED_KEY;
1c6fdbd8
KO
233 }
234
235 if (journal_entry_err_on(k->k.format != KEY_FORMAT_CURRENT, c,
7d6f07ed
KO
236 "invalid %s in %s entry offset %zi/%u: bad format %u",
237 type, where,
4d54337c
KO
238 (u64 *) k - entry->_data,
239 le16_to_cpu(entry->u64s),
ed0d631f 240 k->k.format)) {
4d54337c 241 le16_add_cpu(&entry->u64s, -((u16) k->k.u64s));
1c6fdbd8
KO
242 memmove(k, bkey_next(k), next - (void *) bkey_next(k));
243 journal_entry_null_range(vstruct_next(entry), next);
4d54337c 244 return FSCK_DELETED_KEY;
1c6fdbd8
KO
245 }
246
39fb2983 247 if (!write)
7d6f07ed
KO
248 bch2_bkey_compat(level, btree_id, version, big_endian,
249 write, NULL, bkey_to_packed(k));
26609b61 250
39fb2983
KO
251 invalid = bch2_bkey_invalid(c, bkey_i_to_s_c(k),
252 __btree_node_type(level, btree_id));
1c6fdbd8 253 if (invalid) {
319f9ac3
KO
254 char buf[160];
255
26609b61 256 bch2_bkey_val_to_text(&PBUF(buf), c, bkey_i_to_s_c(k));
7d6f07ed
KO
257 mustfix_fsck_err(c, "invalid %s in %s entry offset %zi/%u: %s\n%s",
258 type, where,
4d54337c
KO
259 (u64 *) k - entry->_data,
260 le16_to_cpu(entry->u64s),
ed0d631f 261 invalid, buf);
1c6fdbd8 262
4d54337c 263 le16_add_cpu(&entry->u64s, -((u16) k->k.u64s));
1c6fdbd8
KO
264 memmove(k, bkey_next(k), next - (void *) bkey_next(k));
265 journal_entry_null_range(vstruct_next(entry), next);
4d54337c 266 return FSCK_DELETED_KEY;
1c6fdbd8 267 }
26609b61 268
39fb2983 269 if (write)
7d6f07ed
KO
270 bch2_bkey_compat(level, btree_id, version, big_endian,
271 write, NULL, bkey_to_packed(k));
1c6fdbd8
KO
272fsck_err:
273 return ret;
274}
275
276static int journal_entry_validate_btree_keys(struct bch_fs *c,
7d6f07ed 277 const char *where,
1c6fdbd8 278 struct jset_entry *entry,
7d6f07ed 279 unsigned version, int big_endian, int write)
1c6fdbd8 280{
4d54337c 281 struct bkey_i *k = entry->start;
1c6fdbd8 282
4d54337c 283 while (k != vstruct_last(entry)) {
7d6f07ed 284 int ret = journal_validate_key(c, where, entry,
39fb2983
KO
285 entry->level,
286 entry->btree_id,
7d6f07ed 287 k, "key", version, big_endian, write);
4d54337c
KO
288 if (ret == FSCK_DELETED_KEY)
289 continue;
290
291 k = bkey_next(k);
1c6fdbd8
KO
292 }
293
294 return 0;
295}
296
297static int journal_entry_validate_btree_root(struct bch_fs *c,
7d6f07ed 298 const char *where,
1c6fdbd8 299 struct jset_entry *entry,
7d6f07ed 300 unsigned version, int big_endian, int write)
1c6fdbd8
KO
301{
302 struct bkey_i *k = entry->start;
303 int ret = 0;
304
305 if (journal_entry_err_on(!entry->u64s ||
306 le16_to_cpu(entry->u64s) != k->k.u64s, c,
307 "invalid btree root journal entry: wrong number of keys")) {
308 void *next = vstruct_next(entry);
309 /*
310 * we don't want to null out this jset_entry,
311 * just the contents, so that later we can tell
312 * we were _supposed_ to have a btree root
313 */
314 entry->u64s = 0;
315 journal_entry_null_range(vstruct_next(entry), next);
316 return 0;
317 }
318
7d6f07ed
KO
319 return journal_validate_key(c, where, entry, 1, entry->btree_id, k,
320 "btree root", version, big_endian, write);
1c6fdbd8
KO
321fsck_err:
322 return ret;
323}
324
325static int journal_entry_validate_prio_ptrs(struct bch_fs *c,
7d6f07ed 326 const char *where,
1c6fdbd8 327 struct jset_entry *entry,
7d6f07ed 328 unsigned version, int big_endian, int write)
1c6fdbd8
KO
329{
330 /* obsolete, don't care: */
331 return 0;
332}
333
334static int journal_entry_validate_blacklist(struct bch_fs *c,
7d6f07ed 335 const char *where,
1c6fdbd8 336 struct jset_entry *entry,
7d6f07ed 337 unsigned version, int big_endian, int write)
1c6fdbd8
KO
338{
339 int ret = 0;
340
341 if (journal_entry_err_on(le16_to_cpu(entry->u64s) != 1, c,
342 "invalid journal seq blacklist entry: bad size")) {
343 journal_entry_null_range(entry, vstruct_next(entry));
344 }
345fsck_err:
346 return ret;
347}
348
349static int journal_entry_validate_blacklist_v2(struct bch_fs *c,
7d6f07ed 350 const char *where,
1c6fdbd8 351 struct jset_entry *entry,
7d6f07ed 352 unsigned version, int big_endian, int write)
1c6fdbd8
KO
353{
354 struct jset_entry_blacklist_v2 *bl_entry;
355 int ret = 0;
356
357 if (journal_entry_err_on(le16_to_cpu(entry->u64s) != 2, c,
358 "invalid journal seq blacklist entry: bad size")) {
359 journal_entry_null_range(entry, vstruct_next(entry));
2c5af169 360 goto out;
1c6fdbd8
KO
361 }
362
363 bl_entry = container_of(entry, struct jset_entry_blacklist_v2, entry);
364
365 if (journal_entry_err_on(le64_to_cpu(bl_entry->start) >
366 le64_to_cpu(bl_entry->end), c,
367 "invalid journal seq blacklist entry: start > end")) {
368 journal_entry_null_range(entry, vstruct_next(entry));
369 }
2c5af169
KO
370out:
371fsck_err:
372 return ret;
373}
374
375static int journal_entry_validate_usage(struct bch_fs *c,
7d6f07ed 376 const char *where,
2c5af169 377 struct jset_entry *entry,
7d6f07ed 378 unsigned version, int big_endian, int write)
2c5af169
KO
379{
380 struct jset_entry_usage *u =
381 container_of(entry, struct jset_entry_usage, entry);
382 unsigned bytes = jset_u64s(le16_to_cpu(entry->u64s)) * sizeof(u64);
383 int ret = 0;
384
3577df5f
KO
385 if (journal_entry_err_on(bytes < sizeof(*u),
386 c,
387 "invalid journal entry usage: bad size")) {
388 journal_entry_null_range(entry, vstruct_next(entry));
389 return ret;
390 }
391
392fsck_err:
393 return ret;
394}
395
396static int journal_entry_validate_data_usage(struct bch_fs *c,
7d6f07ed 397 const char *where,
3577df5f 398 struct jset_entry *entry,
7d6f07ed 399 unsigned version, int big_endian, int write)
3577df5f
KO
400{
401 struct jset_entry_data_usage *u =
402 container_of(entry, struct jset_entry_data_usage, entry);
403 unsigned bytes = jset_u64s(le16_to_cpu(entry->u64s)) * sizeof(u64);
404 int ret = 0;
405
2c5af169
KO
406 if (journal_entry_err_on(bytes < sizeof(*u) ||
407 bytes < sizeof(*u) + u->r.nr_devs,
408 c,
409 "invalid journal entry usage: bad size")) {
410 journal_entry_null_range(entry, vstruct_next(entry));
411 return ret;
412 }
1c6fdbd8
KO
413
414fsck_err:
415 return ret;
416}
417
2abe5420 418static int journal_entry_validate_clock(struct bch_fs *c,
7d6f07ed 419 const char *where,
2abe5420 420 struct jset_entry *entry,
7d6f07ed 421 unsigned version, int big_endian, int write)
2abe5420
KO
422{
423 struct jset_entry_clock *clock =
424 container_of(entry, struct jset_entry_clock, entry);
425 unsigned bytes = jset_u64s(le16_to_cpu(entry->u64s)) * sizeof(u64);
426 int ret = 0;
427
428 if (journal_entry_err_on(bytes != sizeof(*clock),
429 c, "invalid journal entry clock: bad size")) {
430 journal_entry_null_range(entry, vstruct_next(entry));
431 return ret;
432 }
433
434 if (journal_entry_err_on(clock->rw > 1,
435 c, "invalid journal entry clock: bad rw")) {
436 journal_entry_null_range(entry, vstruct_next(entry));
437 return ret;
438 }
439
440fsck_err:
441 return ret;
442}
443
180fb49d 444static int journal_entry_validate_dev_usage(struct bch_fs *c,
7d6f07ed 445 const char *where,
180fb49d 446 struct jset_entry *entry,
7d6f07ed 447 unsigned version, int big_endian, int write)
180fb49d
KO
448{
449 struct jset_entry_dev_usage *u =
450 container_of(entry, struct jset_entry_dev_usage, entry);
451 unsigned bytes = jset_u64s(le16_to_cpu(entry->u64s)) * sizeof(u64);
45c2e33f 452 unsigned expected = sizeof(*u);
180fb49d
KO
453 unsigned dev;
454 int ret = 0;
455
456 if (journal_entry_err_on(bytes < expected,
457 c, "invalid journal entry dev usage: bad size (%u < %u)",
458 bytes, expected)) {
459 journal_entry_null_range(entry, vstruct_next(entry));
460 return ret;
461 }
462
463 dev = le32_to_cpu(u->dev);
464
465 if (journal_entry_err_on(!bch2_dev_exists2(c, dev),
466 c, "invalid journal entry dev usage: bad dev")) {
467 journal_entry_null_range(entry, vstruct_next(entry));
468 return ret;
469 }
470
471 if (journal_entry_err_on(u->pad,
472 c, "invalid journal entry dev usage: bad pad")) {
473 journal_entry_null_range(entry, vstruct_next(entry));
474 return ret;
475 }
476
477fsck_err:
478 return ret;
479}
480
fb64f3fd
KO
481static int journal_entry_validate_log(struct bch_fs *c,
482 const char *where,
483 struct jset_entry *entry,
484 unsigned version, int big_endian, int write)
485{
486 return 0;
487}
488
1c6fdbd8 489struct jset_entry_ops {
7d6f07ed
KO
490 int (*validate)(struct bch_fs *, const char *,
491 struct jset_entry *, unsigned, int, int);
1c6fdbd8
KO
492};
493
494static const struct jset_entry_ops bch2_jset_entry_ops[] = {
495#define x(f, nr) \
496 [BCH_JSET_ENTRY_##f] = (struct jset_entry_ops) { \
497 .validate = journal_entry_validate_##f, \
498 },
499 BCH_JSET_ENTRY_TYPES()
500#undef x
501};
502
7d6f07ed
KO
503int bch2_journal_entry_validate(struct bch_fs *c, const char *where,
504 struct jset_entry *entry,
505 unsigned version, int big_endian, int write)
1c6fdbd8 506{
2c5af169 507 return entry->type < BCH_JSET_ENTRY_NR
7d6f07ed
KO
508 ? bch2_jset_entry_ops[entry->type].validate(c, where, entry,
509 version, big_endian, write)
2c5af169 510 : 0;
1c6fdbd8
KO
511}
512
513static int jset_validate_entries(struct bch_fs *c, struct jset *jset,
514 int write)
515{
7d6f07ed 516 char buf[100];
1c6fdbd8
KO
517 struct jset_entry *entry;
518 int ret = 0;
519
520 vstruct_for_each(jset, entry) {
7d6f07ed
KO
521 scnprintf(buf, sizeof(buf), "jset %llu entry offset %zi/%u",
522 le64_to_cpu(jset->seq),
523 (u64 *) entry - jset->_data,
524 le32_to_cpu(jset->u64s));
525
1c6fdbd8
KO
526 if (journal_entry_err_on(vstruct_next(entry) >
527 vstruct_last(jset), c,
528 "journal entry extends past end of jset")) {
529 jset->u64s = cpu_to_le32((u64 *) entry - jset->_data);
530 break;
531 }
532
7d6f07ed
KO
533 ret = bch2_journal_entry_validate(c, buf, entry,
534 le32_to_cpu(jset->version),
535 JSET_BIG_ENDIAN(jset), write);
1c6fdbd8
KO
536 if (ret)
537 break;
538 }
539fsck_err:
540 return ret;
541}
542
543static int jset_validate(struct bch_fs *c,
ca73852a 544 struct bch_dev *ca,
1c6fdbd8
KO
545 struct jset *jset, u64 sector,
546 unsigned bucket_sectors_left,
547 unsigned sectors_read,
548 int write)
549{
550 size_t bytes = vstruct_bytes(jset);
551 struct bch_csum csum;
26609b61 552 unsigned version;
1c6fdbd8
KO
553 int ret = 0;
554
555 if (le64_to_cpu(jset->magic) != jset_magic(c))
556 return JOURNAL_ENTRY_NONE;
557
26609b61 558 version = le32_to_cpu(jset->version);
ca73852a
KO
559 if (journal_entry_err_on((version != BCH_JSET_VERSION_OLD &&
560 version < bcachefs_metadata_version_min) ||
561 version >= bcachefs_metadata_version_max, c,
562 "%s sector %llu seq %llu: unknown journal entry version %u",
ed9d58a2
KO
563 ca ? ca->name : c->name,
564 sector, le64_to_cpu(jset->seq),
ca73852a 565 version)) {
35ef6df5
KO
566 /* don't try to continue: */
567 return EINVAL;
1c6fdbd8
KO
568 }
569
35ef6df5
KO
570 if (bytes > (sectors_read << 9) &&
571 sectors_read < bucket_sectors_left)
572 return JOURNAL_ENTRY_REREAD;
573
1c6fdbd8 574 if (journal_entry_err_on(bytes > bucket_sectors_left << 9, c,
ca73852a 575 "%s sector %llu seq %llu: journal entry too big (%zu bytes)",
ed9d58a2
KO
576 ca ? ca->name : c->name,
577 sector, le64_to_cpu(jset->seq), bytes)) {
35ef6df5
KO
578 ret = JOURNAL_ENTRY_BAD;
579 le32_add_cpu(&jset->u64s,
580 -((bytes - (bucket_sectors_left << 9)) / 8));
1c6fdbd8
KO
581 }
582
ed9d58a2 583 if (journal_entry_err_on(!bch2_checksum_type_valid(c, JSET_CSUM_TYPE(jset)), c,
ca73852a 584 "%s sector %llu seq %llu: journal entry with unknown csum type %llu",
ed9d58a2
KO
585 ca ? ca->name : c->name,
586 sector, le64_to_cpu(jset->seq),
35ef6df5
KO
587 JSET_CSUM_TYPE(jset))) {
588 ret = JOURNAL_ENTRY_BAD;
ed9d58a2 589 goto csum_done;
35ef6df5 590 }
1c6fdbd8 591
ed9d58a2
KO
592 if (write)
593 goto csum_done;
594
1c6fdbd8
KO
595 csum = csum_vstruct(c, JSET_CSUM_TYPE(jset), journal_nonce(jset), jset);
596 if (journal_entry_err_on(bch2_crc_cmp(csum, jset->csum), c,
ca73852a 597 "%s sector %llu seq %llu: journal checksum bad",
ed9d58a2
KO
598 ca ? ca->name : c->name,
599 sector, le64_to_cpu(jset->seq)))
35ef6df5 600 ret = JOURNAL_ENTRY_BAD;
1c6fdbd8
KO
601
602 bch2_encrypt(c, JSET_CSUM_TYPE(jset), journal_nonce(jset),
603 jset->encrypted_start,
604 vstruct_end(jset) - (void *) jset->encrypted_start);
ed9d58a2
KO
605csum_done:
606 /* last_seq is ignored when JSET_NO_FLUSH is true */
607 if (journal_entry_err_on(!JSET_NO_FLUSH(jset) &&
608 le64_to_cpu(jset->last_seq) > le64_to_cpu(jset->seq), c,
609 "invalid journal entry: last_seq > seq (%llu > %llu)",
610 le64_to_cpu(jset->last_seq),
611 le64_to_cpu(jset->seq))) {
1c6fdbd8 612 jset->last_seq = jset->seq;
ca73852a
KO
613 return JOURNAL_ENTRY_BAD;
614 }
1c6fdbd8
KO
615fsck_err:
616 return ret;
617}
618
ed9d58a2
KO
619static int jset_validate_for_write(struct bch_fs *c, struct jset *jset)
620{
621 unsigned sectors = vstruct_sectors(jset, c->block_bits);
622
623 return jset_validate(c, NULL, jset, 0, sectors, sectors, WRITE) ?:
624 jset_validate_entries(c, jset, WRITE);
625}
626
1c6fdbd8
KO
627struct journal_read_buf {
628 void *data;
629 size_t size;
630};
631
632static int journal_read_buf_realloc(struct journal_read_buf *b,
633 size_t new_size)
634{
635 void *n;
636
637 /* the bios are sized for this many pages, max: */
638 if (new_size > JOURNAL_ENTRY_SIZE_MAX)
639 return -ENOMEM;
640
641 new_size = roundup_pow_of_two(new_size);
642 n = kvpmalloc(new_size, GFP_KERNEL);
643 if (!n)
644 return -ENOMEM;
645
646 kvpfree(b->data, b->size);
647 b->data = n;
648 b->size = new_size;
649 return 0;
650}
651
652static int journal_read_bucket(struct bch_dev *ca,
653 struct journal_read_buf *buf,
654 struct journal_list *jlist,
a9ec3454 655 unsigned bucket)
1c6fdbd8
KO
656{
657 struct bch_fs *c = ca->fs;
658 struct journal_device *ja = &ca->journal;
1c6fdbd8
KO
659 struct jset *j = NULL;
660 unsigned sectors, sectors_read = 0;
661 u64 offset = bucket_to_sector(ca, ja->buckets[bucket]),
662 end = offset + ca->mi.bucket_size;
663 bool saw_bad = false;
664 int ret = 0;
665
666 pr_debug("reading %u", bucket);
667
668 while (offset < end) {
669 if (!sectors_read) {
ac10a961
KO
670 struct bio *bio;
671 unsigned nr_bvecs;
672reread:
673 sectors_read = min_t(unsigned,
1c6fdbd8 674 end - offset, buf->size >> 9);
ac10a961
KO
675 nr_bvecs = buf_pages(buf->data, sectors_read << 9);
676
677 bio = bio_kmalloc(nr_bvecs, GFP_KERNEL);
678 bio_init(bio, ca->disk_sb.bdev, bio->bi_inline_vecs, nr_bvecs, REQ_OP_READ);
1c6fdbd8 679
885678f6
KO
680 bio->bi_iter.bi_sector = offset;
681 bch2_bio_map(bio, buf->data, sectors_read << 9);
1c6fdbd8
KO
682
683 ret = submit_bio_wait(bio);
ac10a961 684 kfree(bio);
1c6fdbd8
KO
685
686 if (bch2_dev_io_err_on(ret, ca,
0fefe8d8 687 "journal read error: sector %llu",
1c6fdbd8 688 offset) ||
29d90f61
KO
689 bch2_meta_read_fault("journal")) {
690 /*
691 * We don't error out of the recovery process
692 * here, since the relevant journal entry may be
693 * found on a different device, and missing or
694 * no journal entries will be handled later
695 */
696 return 0;
697 }
1c6fdbd8
KO
698
699 j = buf->data;
700 }
701
ca73852a 702 ret = jset_validate(c, ca, j, offset,
1c6fdbd8
KO
703 end - offset, sectors_read,
704 READ);
705 switch (ret) {
706 case BCH_FSCK_OK:
ca73852a 707 sectors = vstruct_sectors(j, c->block_bits);
1c6fdbd8
KO
708 break;
709 case JOURNAL_ENTRY_REREAD:
710 if (vstruct_bytes(j) > buf->size) {
711 ret = journal_read_buf_realloc(buf,
712 vstruct_bytes(j));
713 if (ret)
714 return ret;
715 }
716 goto reread;
717 case JOURNAL_ENTRY_NONE:
718 if (!saw_bad)
719 return 0;
8244f320 720 sectors = block_sectors(c);
1c6fdbd8
KO
721 goto next_block;
722 case JOURNAL_ENTRY_BAD:
723 saw_bad = true;
ca73852a
KO
724 /*
725 * On checksum error we don't really trust the size
726 * field of the journal entry we read, so try reading
727 * again at next block boundary:
728 */
8244f320 729 sectors = block_sectors(c);
ca73852a 730 break;
1c6fdbd8
KO
731 default:
732 return ret;
733 }
734
735 /*
736 * This happens sometimes if we don't have discards on -
737 * when we've partially overwritten a bucket with new
738 * journal entries. We don't need the rest of the
739 * bucket:
740 */
741 if (le64_to_cpu(j->seq) < ja->bucket_seq[bucket])
742 return 0;
743
744 ja->bucket_seq[bucket] = le64_to_cpu(j->seq);
745
746 mutex_lock(&jlist->lock);
e4c3f386
KO
747 ret = journal_entry_add(c, ca, (struct bch_extent_ptr) {
748 .dev = ca->dev_idx,
749 .offset = offset,
750 }, jlist, j, ret != 0);
1c6fdbd8
KO
751 mutex_unlock(&jlist->lock);
752
753 switch (ret) {
754 case JOURNAL_ENTRY_ADD_OK:
1c6fdbd8
KO
755 break;
756 case JOURNAL_ENTRY_ADD_OUT_OF_RANGE:
757 break;
758 default:
759 return ret;
760 }
1c6fdbd8
KO
761next_block:
762 pr_debug("next");
763 offset += sectors;
764 sectors_read -= sectors;
765 j = ((void *) j) + (sectors << 9);
766 }
767
768 return 0;
769}
770
771static void bch2_journal_read_device(struct closure *cl)
772{
1c6fdbd8
KO
773 struct journal_device *ja =
774 container_of(cl, struct journal_device, read);
775 struct bch_dev *ca = container_of(ja, struct bch_dev, journal);
776 struct journal_list *jlist =
777 container_of(cl->parent, struct journal_list, cl);
1c6fdbd8 778 struct journal_read_buf buf = { NULL, 0 };
a9ec3454
KO
779 u64 min_seq = U64_MAX;
780 unsigned i;
1c6fdbd8
KO
781 int ret;
782
783 if (!ja->nr)
784 goto out;
785
1c6fdbd8
KO
786 ret = journal_read_buf_realloc(&buf, PAGE_SIZE);
787 if (ret)
788 goto err;
789
790 pr_debug("%u journal buckets", ja->nr);
791
1c6fdbd8 792 for (i = 0; i < ja->nr; i++) {
a9ec3454
KO
793 ret = journal_read_bucket(ca, &buf, jlist, i);
794 if (ret)
795 goto err;
1c6fdbd8
KO
796 }
797
a9ec3454
KO
798 /* Find the journal bucket with the highest sequence number: */
799 for (i = 0; i < ja->nr; i++) {
800 if (ja->bucket_seq[i] > ja->bucket_seq[ja->cur_idx])
801 ja->cur_idx = i;
1c6fdbd8 802
a9ec3454 803 min_seq = min(ja->bucket_seq[i], min_seq);
1c6fdbd8
KO
804 }
805
1c6fdbd8 806 /*
1c6fdbd8
KO
807 * If there's duplicate journal entries in multiple buckets (which
808 * definitely isn't supposed to happen, but...) - make sure to start
809 * cur_idx at the last of those buckets, so we don't deadlock trying to
810 * allocate
811 */
a9ec3454
KO
812 while (ja->bucket_seq[ja->cur_idx] > min_seq &&
813 ja->bucket_seq[ja->cur_idx] >
814 ja->bucket_seq[(ja->cur_idx + 1) % ja->nr])
a36d3685 815 ja->cur_idx = (ja->cur_idx + 1) % ja->nr;
a9ec3454
KO
816
817 ja->sectors_free = 0;
1c6fdbd8
KO
818
819 /*
0ce2dbbe 820 * Set dirty_idx to indicate the entire journal is full and needs to be
1c6fdbd8
KO
821 * reclaimed - journal reclaim will immediately reclaim whatever isn't
822 * pinned when it first runs:
823 */
0ce2dbbe
KO
824 ja->discard_idx = ja->dirty_idx_ondisk =
825 ja->dirty_idx = (ja->cur_idx + 1) % ja->nr;
1c6fdbd8
KO
826out:
827 kvpfree(buf.data, buf.size);
1c6fdbd8
KO
828 percpu_ref_put(&ca->io_ref);
829 closure_return(cl);
830 return;
831err:
832 mutex_lock(&jlist->lock);
833 jlist->ret = ret;
834 mutex_unlock(&jlist->lock);
835 goto out;
1c6fdbd8
KO
836}
837
e4c3f386
KO
838static void bch2_journal_ptrs_to_text(struct printbuf *out, struct bch_fs *c,
839 struct journal_replay *j)
840{
841 unsigned i;
842
843 for (i = 0; i < j->nr_ptrs; i++) {
c0ebe3e4 844 struct bch_dev *ca = bch_dev_bkey_exists(c, j->ptrs[i].dev);
514852c2
KO
845 u64 offset;
846
847 div64_u64_rem(j->ptrs[i].offset, ca->mi.bucket_size, &offset);
e4c3f386
KO
848
849 if (i)
850 pr_buf(out, " ");
851 pr_buf(out, "%u:%llu (offset %llu)",
852 j->ptrs[i].dev,
514852c2 853 (u64) j->ptrs[i].offset, offset);
e4c3f386
KO
854 }
855}
856
adbcada4
KO
857int bch2_journal_read(struct bch_fs *c, struct list_head *list,
858 u64 *blacklist_seq, u64 *start_seq)
1c6fdbd8 859{
1c6fdbd8 860 struct journal_list jlist;
adbcada4 861 struct journal_replay *i, *t;
1c6fdbd8 862 struct bch_dev *ca;
1c6fdbd8
KO
863 unsigned iter;
864 size_t keys = 0, entries = 0;
865 bool degraded = false;
adbcada4 866 u64 seq, last_seq = 0;
1c6fdbd8
KO
867 int ret = 0;
868
869 closure_init_stack(&jlist.cl);
870 mutex_init(&jlist.lock);
871 jlist.head = list;
872 jlist.ret = 0;
873
874 for_each_member_device(ca, c, iter) {
6bdbfa87 875 if (!test_bit(BCH_FS_REBUILD_REPLICAS, &c->flags) &&
89fd25be 876 !(bch2_dev_has_data(c, ca) & (1 << BCH_DATA_journal)))
1c6fdbd8
KO
877 continue;
878
2436cb9f
KO
879 if ((ca->mi.state == BCH_MEMBER_STATE_rw ||
880 ca->mi.state == BCH_MEMBER_STATE_ro) &&
1c6fdbd8
KO
881 percpu_ref_tryget(&ca->io_ref))
882 closure_call(&ca->journal.read,
883 bch2_journal_read_device,
884 system_unbound_wq,
885 &jlist.cl);
886 else
887 degraded = true;
888 }
889
890 closure_sync(&jlist.cl);
891
892 if (jlist.ret)
893 return jlist.ret;
894
adbcada4
KO
895 if (list_empty(list)) {
896 bch_info(c, "journal read done, but no entries found");
897 return 0;
898 }
899
900 i = list_last_entry(list, struct journal_replay, list);
901 *start_seq = le64_to_cpu(i->j.seq) + 1;
902
903 /*
904 * Find most recent flush entry, and ignore newer non flush entries -
905 * those entries will be blacklisted:
906 */
907 list_for_each_entry_safe_reverse(i, t, list, list) {
908 if (i->ignore)
909 continue;
910
911 if (!JSET_NO_FLUSH(&i->j)) {
912 last_seq = le64_to_cpu(i->j.last_seq);
913 *blacklist_seq = le64_to_cpu(i->j.seq) + 1;
914 break;
915 }
916
917 journal_replay_free(c, i);
918 }
919
920 if (!last_seq) {
921 fsck_err(c, "journal read done, but no entries found after dropping non-flushes");
922 return -1;
923 }
924
925 /* Drop blacklisted entries and entries older than last_seq: */
926 list_for_each_entry_safe(i, t, list, list) {
927 if (i->ignore)
928 continue;
929
930 seq = le64_to_cpu(i->j.seq);
931 if (seq < last_seq) {
932 journal_replay_free(c, i);
933 continue;
934 }
935
936 if (bch2_journal_seq_is_blacklisted(c, seq, true)) {
937 fsck_err_on(!JSET_NO_FLUSH(&i->j), c,
938 "found blacklisted journal entry %llu", seq);
939
940 journal_replay_free(c, i);
941 }
942 }
943
944 /* Check for missing entries: */
945 seq = last_seq;
946 list_for_each_entry(i, list, list) {
947 if (i->ignore)
948 continue;
949
950 BUG_ON(seq > le64_to_cpu(i->j.seq));
951
952 while (seq < le64_to_cpu(i->j.seq)) {
953 u64 missing_start, missing_end;
e4c3f386 954 char buf1[200], buf2[200];
adbcada4
KO
955
956 while (seq < le64_to_cpu(i->j.seq) &&
957 bch2_journal_seq_is_blacklisted(c, seq, false))
958 seq++;
959
960 if (seq == le64_to_cpu(i->j.seq))
961 break;
962
963 missing_start = seq;
964
965 while (seq < le64_to_cpu(i->j.seq) &&
966 !bch2_journal_seq_is_blacklisted(c, seq, false))
967 seq++;
968
e4c3f386
KO
969 if (i->list.prev != list) {
970 struct printbuf out = PBUF(buf1);
971 struct journal_replay *p = list_prev_entry(i, list);
972
973 bch2_journal_ptrs_to_text(&out, c, p);
974 pr_buf(&out, " size %llu", vstruct_sectors(&p->j, c->block_bits));
975 } else
976 sprintf(buf1, "(none)");
977 bch2_journal_ptrs_to_text(&PBUF(buf2), c, i);
978
adbcada4 979 missing_end = seq - 1;
e4c3f386
KO
980 fsck_err(c, "journal entries %llu-%llu missing! (replaying %llu-%llu)\n"
981 " prev at %s\n"
982 " next at %s",
adbcada4 983 missing_start, missing_end,
e4c3f386
KO
984 last_seq, *blacklist_seq - 1,
985 buf1, buf2);
adbcada4
KO
986 }
987
988 seq++;
989 }
990
1c6fdbd8 991 list_for_each_entry(i, list, list) {
1dd7f9d9
KO
992 struct jset_entry *entry;
993 struct bkey_i *k, *_n;
e4c3f386
KO
994 struct bch_replicas_padded replicas = {
995 .e.data_type = BCH_DATA_journal,
996 .e.nr_required = 1,
997 };
998 unsigned ptr;
7ef2a73a
KO
999 char buf[80];
1000
adbcada4
KO
1001 if (i->ignore)
1002 continue;
1003
1c6fdbd8
KO
1004 ret = jset_validate_entries(c, &i->j, READ);
1005 if (ret)
1006 goto fsck_err;
1007
e4c3f386
KO
1008 for (ptr = 0; ptr < i->nr_ptrs; ptr++)
1009 replicas.e.devs[replicas.e.nr_devs++] = i->ptrs[ptr].dev;
1010
26452d1d
KO
1011 bch2_replicas_entry_sort(&replicas.e);
1012
1c6fdbd8
KO
1013 /*
1014 * If we're mounting in degraded mode - if we didn't read all
1015 * the devices - this is wrong:
1016 */
1017
1018 if (!degraded &&
1019 (test_bit(BCH_FS_REBUILD_REPLICAS, &c->flags) ||
988e98cf 1020 fsck_err_on(!bch2_replicas_marked(c, &replicas.e), c,
7ef2a73a
KO
1021 "superblock not marked as containing replicas %s",
1022 (bch2_replicas_entry_to_text(&PBUF(buf),
1023 &replicas.e), buf)))) {
1024 ret = bch2_mark_replicas(c, &replicas.e);
1c6fdbd8
KO
1025 if (ret)
1026 return ret;
1027 }
1c6fdbd8
KO
1028
1029 for_each_jset_key(k, _n, entry, &i->j)
1030 keys++;
1031 entries++;
1032 }
1033
adbcada4
KO
1034 bch_info(c, "journal read done, %zu keys in %zu entries, seq %llu",
1035 keys, entries, *start_seq);
1dd7f9d9 1036
adbcada4
KO
1037 if (*start_seq != *blacklist_seq)
1038 bch_info(c, "dropped unflushed entries %llu-%llu",
1039 *blacklist_seq, *start_seq - 1);
1c6fdbd8
KO
1040fsck_err:
1041 return ret;
1042}
1043
1c6fdbd8
KO
1044/* journal write: */
1045
a9ec3454
KO
1046static void __journal_write_alloc(struct journal *j,
1047 struct journal_buf *w,
1048 struct dev_alloc_list *devs_sorted,
1049 unsigned sectors,
1050 unsigned *replicas,
1051 unsigned replicas_want)
1c6fdbd8
KO
1052{
1053 struct bch_fs *c = container_of(j, struct bch_fs, journal);
1c6fdbd8
KO
1054 struct journal_device *ja;
1055 struct bch_dev *ca;
a9ec3454 1056 unsigned i;
a2753581 1057
a9ec3454
KO
1058 if (*replicas >= replicas_want)
1059 return;
1c6fdbd8 1060
a9ec3454
KO
1061 for (i = 0; i < devs_sorted->nr; i++) {
1062 ca = rcu_dereference(c->devs[devs_sorted->devs[i]]);
1c6fdbd8
KO
1063 if (!ca)
1064 continue;
1065
1c6fdbd8 1066 ja = &ca->journal;
1c6fdbd8
KO
1067
1068 /*
1069 * Check that we can use this device, and aren't already using
1070 * it:
1071 */
a9ec3454 1072 if (!ca->mi.durability ||
2436cb9f 1073 ca->mi.state != BCH_MEMBER_STATE_rw ||
a9ec3454 1074 !ja->nr ||
26609b61
KO
1075 bch2_bkey_has_device(bkey_i_to_s_c(&w->key),
1076 ca->dev_idx) ||
a9ec3454 1077 sectors > ja->sectors_free)
1c6fdbd8
KO
1078 continue;
1079
3d080aa5 1080 bch2_dev_stripe_increment(ca, &j->wp.stripe);
1c6fdbd8 1081
26609b61 1082 bch2_bkey_append_ptr(&w->key,
1c6fdbd8
KO
1083 (struct bch_extent_ptr) {
1084 .offset = bucket_to_sector(ca,
a9ec3454
KO
1085 ja->buckets[ja->cur_idx]) +
1086 ca->mi.bucket_size -
1087 ja->sectors_free,
1c6fdbd8
KO
1088 .dev = ca->dev_idx,
1089 });
1090
a9ec3454
KO
1091 ja->sectors_free -= sectors;
1092 ja->bucket_seq[ja->cur_idx] = le64_to_cpu(w->data->seq);
1093
1094 *replicas += ca->mi.durability;
1095
1096 if (*replicas >= replicas_want)
1097 break;
1c6fdbd8 1098 }
a9ec3454 1099}
1c6fdbd8 1100
a9ec3454
KO
1101/**
1102 * journal_next_bucket - move on to the next journal bucket if possible
1103 */
1104static int journal_write_alloc(struct journal *j, struct journal_buf *w,
1105 unsigned sectors)
1106{
1107 struct bch_fs *c = container_of(j, struct bch_fs, journal);
d042b040 1108 struct bch_devs_mask devs;
a9ec3454
KO
1109 struct journal_device *ja;
1110 struct bch_dev *ca;
1111 struct dev_alloc_list devs_sorted;
d042b040
KO
1112 unsigned target = c->opts.metadata_target ?:
1113 c->opts.foreground_target;
a9ec3454
KO
1114 unsigned i, replicas = 0, replicas_want =
1115 READ_ONCE(c->opts.metadata_replicas);
1c6fdbd8 1116
a9ec3454 1117 rcu_read_lock();
d042b040
KO
1118retry:
1119 devs = target_rw_devs(c, BCH_DATA_journal, target);
1c6fdbd8 1120
d042b040 1121 devs_sorted = bch2_dev_alloc_list(c, &j->wp.stripe, &devs);
1c6fdbd8 1122
a9ec3454
KO
1123 __journal_write_alloc(j, w, &devs_sorted,
1124 sectors, &replicas, replicas_want);
1c6fdbd8 1125
a9ec3454
KO
1126 if (replicas >= replicas_want)
1127 goto done;
1128
1129 for (i = 0; i < devs_sorted.nr; i++) {
1130 ca = rcu_dereference(c->devs[devs_sorted.devs[i]]);
1131 if (!ca)
1132 continue;
1133
1134 ja = &ca->journal;
1135
1136 if (sectors > ja->sectors_free &&
1137 sectors <= ca->mi.bucket_size &&
03d5eaed
KO
1138 bch2_journal_dev_buckets_available(j, ja,
1139 journal_space_discarded)) {
a9ec3454
KO
1140 ja->cur_idx = (ja->cur_idx + 1) % ja->nr;
1141 ja->sectors_free = ca->mi.bucket_size;
68ef94a6
KO
1142
1143 /*
1144 * ja->bucket_seq[ja->cur_idx] must always have
1145 * something sensible:
1146 */
1147 ja->bucket_seq[ja->cur_idx] = le64_to_cpu(w->data->seq);
a9ec3454
KO
1148 }
1149 }
1150
1151 __journal_write_alloc(j, w, &devs_sorted,
1152 sectors, &replicas, replicas_want);
d042b040
KO
1153
1154 if (replicas < replicas_want && target) {
1155 /* Retry from all devices: */
1156 target = 0;
1157 goto retry;
1158 }
a9ec3454 1159done:
a9ec3454
KO
1160 rcu_read_unlock();
1161
07a1006a
KO
1162 BUG_ON(bkey_val_u64s(&w->key.k) > BCH_REPLICAS_MAX);
1163
57cb2142 1164 return replicas >= c->opts.metadata_replicas_required ? 0 : -EROFS;
1c6fdbd8
KO
1165}
1166
1167static void journal_write_compact(struct jset *jset)
1168{
1169 struct jset_entry *i, *next, *prev = NULL;
1170
1171 /*
1172 * Simple compaction, dropping empty jset_entries (from journal
1173 * reservations that weren't fully used) and merging jset_entries that
1174 * can be.
1175 *
1176 * If we wanted to be really fancy here, we could sort all the keys in
1177 * the jset and drop keys that were overwritten - probably not worth it:
1178 */
1179 vstruct_for_each_safe(jset, i, next) {
1180 unsigned u64s = le16_to_cpu(i->u64s);
1181
1182 /* Empty entry: */
1183 if (!u64s)
1184 continue;
1185
1186 /* Can we merge with previous entry? */
1187 if (prev &&
1188 i->btree_id == prev->btree_id &&
1189 i->level == prev->level &&
1190 i->type == prev->type &&
1191 i->type == BCH_JSET_ENTRY_btree_keys &&
1192 le16_to_cpu(prev->u64s) + u64s <= U16_MAX) {
1193 memmove_u64s_down(vstruct_next(prev),
1194 i->_data,
1195 u64s);
1196 le16_add_cpu(&prev->u64s, u64s);
1197 continue;
1198 }
1199
1200 /* Couldn't merge, move i into new position (after prev): */
1201 prev = prev ? vstruct_next(prev) : jset->start;
1202 if (i != prev)
1203 memmove_u64s_down(prev, i, jset_u64s(u64s));
1204 }
1205
1206 prev = prev ? vstruct_next(prev) : jset->start;
1207 jset->u64s = cpu_to_le32((u64 *) prev - jset->_data);
1208}
1209
1210static void journal_buf_realloc(struct journal *j, struct journal_buf *buf)
1211{
1212 /* we aren't holding j->lock: */
1213 unsigned new_size = READ_ONCE(j->buf_size_want);
1214 void *new_buf;
1215
d16b4a77 1216 if (buf->buf_size >= new_size)
1c6fdbd8
KO
1217 return;
1218
1219 new_buf = kvpmalloc(new_size, GFP_NOIO|__GFP_NOWARN);
1220 if (!new_buf)
1221 return;
1222
d16b4a77 1223 memcpy(new_buf, buf->data, buf->buf_size);
c859430b
KO
1224
1225 spin_lock(&j->lock);
1226 swap(buf->data, new_buf);
1227 swap(buf->buf_size, new_size);
1228 spin_unlock(&j->lock);
1229
1230 kvpfree(new_buf, new_size);
1c6fdbd8
KO
1231}
1232
ebb84d09
KO
1233static inline struct journal_buf *journal_last_unwritten_buf(struct journal *j)
1234{
1235 return j->buf + j->reservations.unwritten_idx;
1236}
1237
1c6fdbd8
KO
1238static void journal_write_done(struct closure *cl)
1239{
1240 struct journal *j = container_of(cl, struct journal, io);
1241 struct bch_fs *c = container_of(j, struct bch_fs, journal);
ebb84d09 1242 struct journal_buf *w = journal_last_unwritten_buf(j);
7ef2a73a 1243 struct bch_replicas_padded replicas;
ebb84d09 1244 union journal_res_state old, new;
1784d43a 1245 u64 v, seq;
158eecb8 1246 int err = 0;
1c6fdbd8 1247
991ba021
KO
1248 bch2_time_stats_update(!JSET_NO_FLUSH(w->data)
1249 ? j->flush_write_time
1250 : j->noflush_write_time, j->write_start_time);
9c859dc9 1251
d797ca3d 1252 if (!w->devs_written.nr) {
1c6fdbd8 1253 bch_err(c, "unable to write journal to sufficient devices");
158eecb8
KO
1254 err = -EIO;
1255 } else {
d797ca3d
KO
1256 bch2_devlist_to_replicas(&replicas.e, BCH_DATA_journal,
1257 w->devs_written);
158eecb8
KO
1258 if (bch2_mark_replicas(c, &replicas.e))
1259 err = -EIO;
1c6fdbd8
KO
1260 }
1261
158eecb8
KO
1262 if (err)
1263 bch2_fatal_error(c);
1c6fdbd8
KO
1264
1265 spin_lock(&j->lock);
ed9d58a2 1266 seq = le64_to_cpu(w->data->seq);
ed9d58a2 1267
1c6fdbd8 1268 if (seq >= j->pin.front)
d797ca3d 1269 journal_seq_pin(j, seq)->devs = w->devs_written;
1c6fdbd8 1270
9be1efe9
KO
1271 if (!err) {
1272 j->seq_ondisk = seq;
adbcada4 1273
9be1efe9
KO
1274 if (!JSET_NO_FLUSH(w->data)) {
1275 j->flushed_seq_ondisk = seq;
1276 j->last_seq_ondisk = w->last_seq;
1277 }
1278 } else if (!j->err_seq || seq < j->err_seq)
1279 j->err_seq = seq;
0ce2dbbe 1280
1c6fdbd8
KO
1281 /*
1282 * Updating last_seq_ondisk may let bch2_journal_reclaim_work() discard
1283 * more buckets:
1284 *
1285 * Must come before signaling write completion, for
1286 * bch2_fs_journal_stop():
1287 */
b7a9bbfc 1288 journal_reclaim_kick(&c->journal);
158eecb8 1289
1c6fdbd8
KO
1290 /* also must come before signalling write completion: */
1291 closure_debug_destroy(cl);
1292
ebb84d09
KO
1293 v = atomic64_read(&j->reservations.counter);
1294 do {
1295 old.v = new.v = v;
1296 BUG_ON(new.idx == new.unwritten_idx);
1297
1298 new.unwritten_idx++;
1299 } while ((v = atomic64_cmpxchg(&j->reservations.counter,
1300 old.v, new.v)) != old.v);
1c6fdbd8 1301
5d32c5bb
KO
1302 bch2_journal_space_available(j);
1303
1c6fdbd8
KO
1304 closure_wake_up(&w->wait);
1305 journal_wake(j);
1306
1307 if (test_bit(JOURNAL_NEED_WRITE, &j->flags))
731bdd2e 1308 mod_delayed_work(c->io_complete_wq, &j->write_work, 0);
1c6fdbd8 1309 spin_unlock(&j->lock);
ebb84d09
KO
1310
1311 if (new.unwritten_idx != new.idx &&
1312 !journal_state_count(new, new.unwritten_idx))
731bdd2e 1313 closure_call(&j->io, bch2_journal_write, c->io_complete_wq, NULL);
1c6fdbd8
KO
1314}
1315
1316static void journal_write_endio(struct bio *bio)
1317{
1318 struct bch_dev *ca = bio->bi_private;
1319 struct journal *j = &ca->fs->journal;
d797ca3d
KO
1320 struct journal_buf *w = journal_last_unwritten_buf(j);
1321 unsigned long flags;
1c6fdbd8 1322
d797ca3d
KO
1323 if (bch2_dev_io_err_on(bio->bi_status, ca, "error writing journal entry %llu: %s",
1324 le64_to_cpu(w->data->seq),
63b214e7 1325 bch2_blk_status_to_str(bio->bi_status)) ||
1c6fdbd8 1326 bch2_meta_write_fault("journal")) {
1c6fdbd8 1327 spin_lock_irqsave(&j->err_lock, flags);
d797ca3d 1328 bch2_dev_list_drop_dev(&w->devs_written, ca->dev_idx);
1c6fdbd8
KO
1329 spin_unlock_irqrestore(&j->err_lock, flags);
1330 }
1331
1332 closure_put(&j->io);
1333 percpu_ref_put(&ca->io_ref);
1334}
1335
280249b9
KO
1336static void do_journal_write(struct closure *cl)
1337{
1338 struct journal *j = container_of(cl, struct journal, io);
1339 struct bch_fs *c = container_of(j, struct bch_fs, journal);
1340 struct bch_dev *ca;
1341 struct journal_buf *w = journal_last_unwritten_buf(j);
1342 struct bch_extent_ptr *ptr;
1343 struct bio *bio;
1344 unsigned sectors = vstruct_sectors(w->data, c->block_bits);
1345
1346 extent_for_each_ptr(bkey_i_to_s_extent(&w->key), ptr) {
1347 ca = bch_dev_bkey_exists(c, ptr->dev);
1348 if (!percpu_ref_tryget(&ca->io_ref)) {
1349 /* XXX: fix this */
1350 bch_err(c, "missing device for journal write\n");
1351 continue;
1352 }
1353
1354 this_cpu_add(ca->io_done->sectors[WRITE][BCH_DATA_journal],
1355 sectors);
1356
1357 bio = ca->journal.bio;
1358 bio_reset(bio, ca->disk_sb.bdev, REQ_OP_WRITE|REQ_SYNC|REQ_META);
1359 bio->bi_iter.bi_sector = ptr->offset;
1360 bio->bi_end_io = journal_write_endio;
1361 bio->bi_private = ca;
1362
a28bd48a
KO
1363 BUG_ON(bio->bi_iter.bi_sector == ca->prev_journal_sector);
1364 ca->prev_journal_sector = bio->bi_iter.bi_sector;
1365
280249b9
KO
1366 if (!JSET_NO_FLUSH(w->data))
1367 bio->bi_opf |= REQ_FUA;
1368 if (!JSET_NO_FLUSH(w->data) && !w->separate_flush)
1369 bio->bi_opf |= REQ_PREFLUSH;
1370
1371 bch2_bio_map(bio, w->data, sectors << 9);
1372
1373 trace_journal_write(bio);
1374 closure_bio_submit(bio, cl);
1375
1376 ca->journal.bucket_seq[ca->journal.cur_idx] =
1377 le64_to_cpu(w->data->seq);
1378 }
1379
731bdd2e 1380 continue_at(cl, journal_write_done, c->io_complete_wq);
280249b9
KO
1381 return;
1382}
1383
1c6fdbd8
KO
1384void bch2_journal_write(struct closure *cl)
1385{
1386 struct journal *j = container_of(cl, struct journal, io);
1387 struct bch_fs *c = container_of(j, struct bch_fs, journal);
1388 struct bch_dev *ca;
ebb84d09 1389 struct journal_buf *w = journal_last_unwritten_buf(j);
3ccc5c50 1390 struct jset_entry *start, *end;
1c6fdbd8
KO
1391 struct jset *jset;
1392 struct bio *bio;
85674154 1393 char *journal_debug_buf = NULL;
26609b61 1394 bool validate_before_checksum = false;
280249b9 1395 unsigned i, sectors, bytes, u64s, nr_rw_members = 0;
e5a66496
KO
1396 int ret;
1397
b7a9bbfc
KO
1398 BUG_ON(BCH_SB_CLEAN(c->disk_sb.sb));
1399
1c6fdbd8
KO
1400 journal_buf_realloc(j, w);
1401 jset = w->data;
1402
1403 j->write_start_time = local_clock();
1c6fdbd8 1404
adbcada4
KO
1405 spin_lock(&j->lock);
1406 if (c->sb.features & (1ULL << BCH_FEATURE_journal_no_flush) &&
5b2e599f
KO
1407 (w->noflush ||
1408 (!w->must_flush &&
1409 (jiffies - j->last_flush_write) < msecs_to_jiffies(c->opts.journal_flush_delay) &&
1410 test_bit(JOURNAL_MAY_SKIP_FLUSH, &j->flags)))) {
adbcada4
KO
1411 w->noflush = true;
1412 SET_JSET_NO_FLUSH(jset, true);
c0ebe3e4
KO
1413 jset->last_seq = 0;
1414 w->last_seq = 0;
adbcada4
KO
1415
1416 j->nr_noflush_writes++;
1417 } else {
1418 j->last_flush_write = jiffies;
1419 j->nr_flush_writes++;
1420 }
1421 spin_unlock(&j->lock);
1422
00b8ccf7
KO
1423 /*
1424 * New btree roots are set by journalling them; when the journal entry
1425 * gets written we have to propagate them to c->btree_roots
1426 *
1427 * But, every journal entry we write has to contain all the btree roots
1428 * (at least for now); so after we copy btree roots to c->btree_roots we
1429 * have to get any missing btree roots and add them to this journal
1430 * entry:
1431 */
1432
1433 bch2_journal_entries_to_btree_roots(c, jset);
1434
1435 start = end = vstruct_last(jset);
1436
1437 end = bch2_btree_roots_to_journal_entries(c, jset->start, end);
1438
2abe5420
KO
1439 bch2_journal_super_entries_add_common(c, &end,
1440 le64_to_cpu(jset->seq));
3ccc5c50
KO
1441 u64s = (u64 *) end - (u64 *) start;
1442 BUG_ON(u64s > j->entry_u64s_reserved);
1443
d16b4a77
KO
1444 le32_add_cpu(&jset->u64s, u64s);
1445 BUG_ON(vstruct_sectors(jset, c->block_bits) > w->sectors);
1c6fdbd8
KO
1446
1447 journal_write_compact(jset);
1448
1c6fdbd8 1449 jset->magic = cpu_to_le64(jset_magic(c));
26609b61
KO
1450 jset->version = c->sb.version < bcachefs_metadata_version_new_versioning
1451 ? cpu_to_le32(BCH_JSET_VERSION_OLD)
1452 : cpu_to_le32(c->sb.version);
1c6fdbd8
KO
1453
1454 SET_JSET_BIG_ENDIAN(jset, CPU_BIG_ENDIAN);
1455 SET_JSET_CSUM_TYPE(jset, bch2_meta_checksum_type(c));
1456
4141fde0 1457 if (!JSET_NO_FLUSH(jset) && journal_entry_empty(jset))
158eecb8
KO
1458 j->last_empty_seq = le64_to_cpu(jset->seq);
1459
26609b61
KO
1460 if (bch2_csum_type_is_encryption(JSET_CSUM_TYPE(jset)))
1461 validate_before_checksum = true;
1462
e751c01a 1463 if (le32_to_cpu(jset->version) < bcachefs_metadata_version_current)
26609b61
KO
1464 validate_before_checksum = true;
1465
1466 if (validate_before_checksum &&
ed9d58a2 1467 jset_validate_for_write(c, jset))
1c6fdbd8
KO
1468 goto err;
1469
1470 bch2_encrypt(c, JSET_CSUM_TYPE(jset), journal_nonce(jset),
1471 jset->encrypted_start,
1472 vstruct_end(jset) - (void *) jset->encrypted_start);
1473
1474 jset->csum = csum_vstruct(c, JSET_CSUM_TYPE(jset),
1475 journal_nonce(jset), jset);
1476
26609b61 1477 if (!validate_before_checksum &&
ed9d58a2 1478 jset_validate_for_write(c, jset))
1c6fdbd8
KO
1479 goto err;
1480
1481 sectors = vstruct_sectors(jset, c->block_bits);
d16b4a77 1482 BUG_ON(sectors > w->sectors);
1c6fdbd8 1483
d16b4a77
KO
1484 bytes = vstruct_bytes(jset);
1485 memset((void *) jset + bytes, 0, (sectors << 9) - bytes);
1c6fdbd8 1486
c18dade6 1487retry_alloc:
e5a66496
KO
1488 spin_lock(&j->lock);
1489 ret = journal_write_alloc(j, w, sectors);
1490
c18dade6
KO
1491 if (ret && j->can_discard) {
1492 spin_unlock(&j->lock);
1493 bch2_journal_do_discards(j);
1494 goto retry_alloc;
1495 }
1496
85674154
KO
1497 if (ret) {
1498 journal_debug_buf = kmalloc(4096, GFP_ATOMIC);
1499 if (journal_debug_buf)
1500 __bch2_journal_debug_to_text(&_PBUF(journal_debug_buf, 4096), j);
1501 }
1502
e5a66496
KO
1503 /*
1504 * write is allocated, no longer need to account for it in
1505 * bch2_journal_space_available():
1506 */
1507 w->sectors = 0;
1508
1509 /*
1510 * journal entry has been compacted and allocated, recalculate space
1511 * available:
1512 */
1513 bch2_journal_space_available(j);
1514 spin_unlock(&j->lock);
1515
1516 if (ret) {
85674154
KO
1517 bch_err(c, "Unable to allocate journal write:\n%s",
1518 journal_debug_buf);
1519 kfree(journal_debug_buf);
1c6fdbd8 1520 bch2_fatal_error(c);
731bdd2e 1521 continue_at(cl, journal_write_done, c->io_complete_wq);
1c6fdbd8
KO
1522 return;
1523 }
1524
d797ca3d
KO
1525 w->devs_written = bch2_bkey_devs(bkey_i_to_s_c(&w->key));
1526
fae1157d 1527 if (test_bit(JOURNAL_NOCHANGES, &j->flags))
1c6fdbd8
KO
1528 goto no_io;
1529
280249b9
KO
1530 for_each_rw_member(ca, c, i)
1531 nr_rw_members++;
1c6fdbd8 1532
280249b9
KO
1533 if (nr_rw_members > 1)
1534 w->separate_flush = true;
1c6fdbd8 1535
280249b9
KO
1536 if (!JSET_NO_FLUSH(jset) && w->separate_flush) {
1537 for_each_rw_member(ca, c, i) {
1538 percpu_ref_get(&ca->io_ref);
1c6fdbd8 1539
280249b9
KO
1540 bio = ca->journal.bio;
1541 bio_reset(bio, ca->disk_sb.bdev, REQ_OP_FLUSH);
1542 bio->bi_end_io = journal_write_endio;
1543 bio->bi_private = ca;
1544 closure_bio_submit(bio, cl);
1545 }
1c6fdbd8
KO
1546 }
1547
280249b9
KO
1548 bch2_bucket_seq_cleanup(c);
1549
731bdd2e 1550 continue_at(cl, do_journal_write, c->io_complete_wq);
280249b9 1551 return;
1c6fdbd8 1552no_io:
c6923995
KO
1553 bch2_bucket_seq_cleanup(c);
1554
731bdd2e 1555 continue_at(cl, journal_write_done, c->io_complete_wq);
1c6fdbd8
KO
1556 return;
1557err:
1558 bch2_inconsistent_error(c);
731bdd2e 1559 continue_at(cl, journal_write_done, c->io_complete_wq);
1c6fdbd8 1560}