bcachefs: Drop unneeded journal pin in bch2_btree_update_start()
[linux-block.git] / fs / bcachefs / journal_io.c
CommitLineData
1c6fdbd8
KO
1// SPDX-License-Identifier: GPL-2.0
2#include "bcachefs.h"
7b3f84ea 3#include "alloc_foreground.h"
39fb2983 4#include "btree_io.h"
00b8ccf7 5#include "btree_update_interior.h"
1c6fdbd8
KO
6#include "buckets.h"
7#include "checksum.h"
d042b040 8#include "disk_groups.h"
1c6fdbd8 9#include "error.h"
63b214e7 10#include "io.h"
1c6fdbd8
KO
11#include "journal.h"
12#include "journal_io.h"
13#include "journal_reclaim.h"
adbcada4 14#include "journal_seq_blacklist.h"
1c6fdbd8
KO
15#include "replicas.h"
16#include "trace.h"
17
adbcada4
KO
18static void __journal_replay_free(struct journal_replay *i)
19{
20 list_del(&i->list);
21 kvpfree(i, offsetof(struct journal_replay, j) +
22 vstruct_bytes(&i->j));
23
24}
25
26static void journal_replay_free(struct bch_fs *c, struct journal_replay *i)
27{
28 i->ignore = true;
29
30 if (!c->opts.read_entire_journal)
31 __journal_replay_free(i);
32}
33
1c6fdbd8
KO
34struct journal_list {
35 struct closure cl;
36 struct mutex lock;
37 struct list_head *head;
38 int ret;
39};
40
41#define JOURNAL_ENTRY_ADD_OK 0
42#define JOURNAL_ENTRY_ADD_OUT_OF_RANGE 5
43
44/*
45 * Given a journal entry we just read, add it to the list of journal entries to
46 * be replayed:
47 */
48static int journal_entry_add(struct bch_fs *c, struct bch_dev *ca,
72b7d633 49 struct journal_ptr entry_ptr,
ca73852a
KO
50 struct journal_list *jlist, struct jset *j,
51 bool bad)
1c6fdbd8 52{
e4c3f386 53 struct journal_replay *i, *pos, *dup = NULL;
72b7d633 54 struct journal_ptr *ptr;
1c6fdbd8
KO
55 struct list_head *where;
56 size_t bytes = vstruct_bytes(j);
adbcada4 57 u64 last_seq = 0;
e4c3f386 58 int ret = JOURNAL_ENTRY_ADD_OK;
1c6fdbd8 59
adbcada4
KO
60 list_for_each_entry_reverse(i, jlist->head, list) {
61 if (!JSET_NO_FLUSH(&i->j)) {
62 last_seq = le64_to_cpu(i->j.last_seq);
63 break;
7fffc85b 64 }
adbcada4 65 }
1c6fdbd8 66
adbcada4
KO
67 /* Is this entry older than the range we need? */
68 if (!c->opts.read_entire_journal &&
69 le64_to_cpu(j->seq) < last_seq) {
70 ret = JOURNAL_ENTRY_ADD_OUT_OF_RANGE;
71 goto out;
72 }
73
74 /* Drop entries we don't need anymore */
75 if (!JSET_NO_FLUSH(j)) {
7fffc85b
KO
76 list_for_each_entry_safe(i, pos, jlist->head, list) {
77 if (le64_to_cpu(i->j.seq) >= le64_to_cpu(j->last_seq))
78 break;
adbcada4 79 journal_replay_free(c, i);
7fffc85b 80 }
1c6fdbd8
KO
81 }
82
83 list_for_each_entry_reverse(i, jlist->head, list) {
ca73852a
KO
84 if (le64_to_cpu(j->seq) > le64_to_cpu(i->j.seq)) {
85 where = &i->list;
86 goto add;
87 }
88 }
89
90 where = jlist->head;
91add:
e4c3f386 92 dup = where->next != jlist->head
ca73852a
KO
93 ? container_of(where->next, struct journal_replay, list)
94 : NULL;
95
e4c3f386
KO
96 if (dup && le64_to_cpu(j->seq) != le64_to_cpu(dup->j.seq))
97 dup = NULL;
98
ca73852a
KO
99 /*
100 * Duplicate journal entries? If so we want the one that didn't have a
101 * checksum error:
102 */
e4c3f386
KO
103 if (dup) {
104 if (dup->bad) {
105 /* we'll replace @dup: */
ca73852a 106 } else if (bad) {
e4c3f386 107 i = dup;
ca73852a
KO
108 goto found;
109 } else {
e4c3f386
KO
110 fsck_err_on(bytes != vstruct_bytes(&dup->j) ||
111 memcmp(j, &dup->j, bytes), c,
1c6fdbd8
KO
112 "found duplicate but non identical journal entries (seq %llu)",
113 le64_to_cpu(j->seq));
e4c3f386 114 i = dup;
1c6fdbd8
KO
115 goto found;
116 }
1c6fdbd8
KO
117 }
118
1c6fdbd8
KO
119 i = kvpmalloc(offsetof(struct journal_replay, j) + bytes, GFP_KERNEL);
120 if (!i) {
121 ret = -ENOMEM;
122 goto out;
123 }
124
e4c3f386
KO
125 i->nr_ptrs = 0;
126 i->bad = bad;
127 i->ignore = false;
1c6fdbd8 128 unsafe_memcpy(&i->j, j, bytes, "embedded variable length struct");
e4c3f386
KO
129
130 if (dup) {
131 i->nr_ptrs = dup->nr_ptrs;
132 memcpy(i->ptrs, dup->ptrs, sizeof(dup->ptrs));
133 __journal_replay_free(dup);
134 }
135
136 list_add(&i->list, where);
1c6fdbd8 137found:
e4c3f386
KO
138 for (ptr = i->ptrs; ptr < i->ptrs + i->nr_ptrs; ptr++) {
139 if (ptr->dev == ca->dev_idx) {
140 bch_err(c, "duplicate journal entry %llu on same device",
141 le64_to_cpu(i->j.seq));
142 goto out;
143 }
144 }
145
146 if (i->nr_ptrs >= ARRAY_SIZE(i->ptrs)) {
147 bch_err(c, "found too many copies of journal entry %llu",
148 le64_to_cpu(i->j.seq));
149 goto out;
150 }
151
152 i->ptrs[i->nr_ptrs++] = entry_ptr;
1c6fdbd8
KO
153out:
154fsck_err:
155 return ret;
156}
157
158static struct nonce journal_nonce(const struct jset *jset)
159{
160 return (struct nonce) {{
161 [0] = 0,
162 [1] = ((__le32 *) &jset->seq)[0],
163 [2] = ((__le32 *) &jset->seq)[1],
164 [3] = BCH_NONCE_JOURNAL,
165 }};
166}
167
168/* this fills in a range with empty jset_entries: */
169static void journal_entry_null_range(void *start, void *end)
170{
171 struct jset_entry *entry;
172
173 for (entry = start; entry != end; entry = vstruct_next(entry))
174 memset(entry, 0, sizeof(*entry));
175}
176
177#define JOURNAL_ENTRY_REREAD 5
178#define JOURNAL_ENTRY_NONE 6
179#define JOURNAL_ENTRY_BAD 7
180
181#define journal_entry_err(c, msg, ...) \
182({ \
183 switch (write) { \
184 case READ: \
185 mustfix_fsck_err(c, msg, ##__VA_ARGS__); \
186 break; \
187 case WRITE: \
188 bch_err(c, "corrupt metadata before write:\n" \
189 msg, ##__VA_ARGS__); \
190 if (bch2_fs_inconsistent(c)) { \
191 ret = BCH_FSCK_ERRORS_NOT_FIXED; \
192 goto fsck_err; \
193 } \
194 break; \
195 } \
196 true; \
197})
198
199#define journal_entry_err_on(cond, c, msg, ...) \
200 ((cond) ? journal_entry_err(c, msg, ##__VA_ARGS__) : false)
201
4d54337c
KO
202#define FSCK_DELETED_KEY 5
203
7d6f07ed 204static int journal_validate_key(struct bch_fs *c, const char *where,
1c6fdbd8 205 struct jset_entry *entry,
39fb2983 206 unsigned level, enum btree_id btree_id,
7d6f07ed
KO
207 struct bkey_i *k, const char *type,
208 unsigned version, int big_endian, int write)
1c6fdbd8
KO
209{
210 void *next = vstruct_next(entry);
211 const char *invalid;
1c6fdbd8
KO
212 int ret = 0;
213
214 if (journal_entry_err_on(!k->k.u64s, c,
7d6f07ed
KO
215 "invalid %s in %s entry offset %zi/%u: k->u64s 0",
216 type, where,
4d54337c
KO
217 (u64 *) k - entry->_data,
218 le16_to_cpu(entry->u64s))) {
1c6fdbd8
KO
219 entry->u64s = cpu_to_le16((u64 *) k - entry->_data);
220 journal_entry_null_range(vstruct_next(entry), next);
4d54337c 221 return FSCK_DELETED_KEY;
1c6fdbd8
KO
222 }
223
224 if (journal_entry_err_on((void *) bkey_next(k) >
225 (void *) vstruct_next(entry), c,
7d6f07ed
KO
226 "invalid %s in %s entry offset %zi/%u: extends past end of journal entry",
227 type, where,
4d54337c
KO
228 (u64 *) k - entry->_data,
229 le16_to_cpu(entry->u64s))) {
1c6fdbd8
KO
230 entry->u64s = cpu_to_le16((u64 *) k - entry->_data);
231 journal_entry_null_range(vstruct_next(entry), next);
4d54337c 232 return FSCK_DELETED_KEY;
1c6fdbd8
KO
233 }
234
235 if (journal_entry_err_on(k->k.format != KEY_FORMAT_CURRENT, c,
7d6f07ed
KO
236 "invalid %s in %s entry offset %zi/%u: bad format %u",
237 type, where,
4d54337c
KO
238 (u64 *) k - entry->_data,
239 le16_to_cpu(entry->u64s),
ed0d631f 240 k->k.format)) {
4d54337c 241 le16_add_cpu(&entry->u64s, -((u16) k->k.u64s));
1c6fdbd8
KO
242 memmove(k, bkey_next(k), next - (void *) bkey_next(k));
243 journal_entry_null_range(vstruct_next(entry), next);
4d54337c 244 return FSCK_DELETED_KEY;
1c6fdbd8
KO
245 }
246
39fb2983 247 if (!write)
7d6f07ed
KO
248 bch2_bkey_compat(level, btree_id, version, big_endian,
249 write, NULL, bkey_to_packed(k));
26609b61 250
39fb2983
KO
251 invalid = bch2_bkey_invalid(c, bkey_i_to_s_c(k),
252 __btree_node_type(level, btree_id));
1c6fdbd8 253 if (invalid) {
fa8e94fa 254 struct printbuf buf = PRINTBUF;
319f9ac3 255
fa8e94fa 256 bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(k));
7d6f07ed
KO
257 mustfix_fsck_err(c, "invalid %s in %s entry offset %zi/%u: %s\n%s",
258 type, where,
4d54337c
KO
259 (u64 *) k - entry->_data,
260 le16_to_cpu(entry->u64s),
fa8e94fa
KO
261 invalid, buf.buf);
262 printbuf_exit(&buf);
1c6fdbd8 263
4d54337c 264 le16_add_cpu(&entry->u64s, -((u16) k->k.u64s));
1c6fdbd8
KO
265 memmove(k, bkey_next(k), next - (void *) bkey_next(k));
266 journal_entry_null_range(vstruct_next(entry), next);
4d54337c 267 return FSCK_DELETED_KEY;
1c6fdbd8 268 }
26609b61 269
39fb2983 270 if (write)
7d6f07ed
KO
271 bch2_bkey_compat(level, btree_id, version, big_endian,
272 write, NULL, bkey_to_packed(k));
1c6fdbd8
KO
273fsck_err:
274 return ret;
275}
276
528b18e6 277static int journal_entry_btree_keys_validate(struct bch_fs *c,
7d6f07ed 278 const char *where,
1c6fdbd8 279 struct jset_entry *entry,
7d6f07ed 280 unsigned version, int big_endian, int write)
1c6fdbd8 281{
4d54337c 282 struct bkey_i *k = entry->start;
1c6fdbd8 283
4d54337c 284 while (k != vstruct_last(entry)) {
7d6f07ed 285 int ret = journal_validate_key(c, where, entry,
39fb2983
KO
286 entry->level,
287 entry->btree_id,
7d6f07ed 288 k, "key", version, big_endian, write);
4d54337c
KO
289 if (ret == FSCK_DELETED_KEY)
290 continue;
291
292 k = bkey_next(k);
1c6fdbd8
KO
293 }
294
295 return 0;
296}
297
528b18e6
KO
298static void journal_entry_btree_keys_to_text(struct printbuf *out, struct bch_fs *c,
299 struct jset_entry *entry)
300{
301 struct bkey_i *k;
e7bc7cdf 302 bool first = true;
528b18e6 303
e7bc7cdf
KO
304 vstruct_for_each(entry, k) {
305 if (!first) {
12bf93a4 306 pr_newline(out);
e7bc7cdf
KO
307 pr_buf(out, "%s: ", bch2_jset_entry_types[entry->type]);
308 }
309 pr_buf(out, "btree=%s l=%u ", bch2_btree_ids[entry->btree_id], entry->level);
528b18e6 310 bch2_bkey_val_to_text(out, c, bkey_i_to_s_c(k));
e7bc7cdf
KO
311 first = false;
312 }
528b18e6
KO
313}
314
315static int journal_entry_btree_root_validate(struct bch_fs *c,
7d6f07ed 316 const char *where,
1c6fdbd8 317 struct jset_entry *entry,
7d6f07ed 318 unsigned version, int big_endian, int write)
1c6fdbd8
KO
319{
320 struct bkey_i *k = entry->start;
321 int ret = 0;
322
323 if (journal_entry_err_on(!entry->u64s ||
324 le16_to_cpu(entry->u64s) != k->k.u64s, c,
325 "invalid btree root journal entry: wrong number of keys")) {
326 void *next = vstruct_next(entry);
327 /*
328 * we don't want to null out this jset_entry,
329 * just the contents, so that later we can tell
330 * we were _supposed_ to have a btree root
331 */
332 entry->u64s = 0;
333 journal_entry_null_range(vstruct_next(entry), next);
334 return 0;
335 }
336
7d6f07ed
KO
337 return journal_validate_key(c, where, entry, 1, entry->btree_id, k,
338 "btree root", version, big_endian, write);
1c6fdbd8
KO
339fsck_err:
340 return ret;
341}
342
528b18e6
KO
343static void journal_entry_btree_root_to_text(struct printbuf *out, struct bch_fs *c,
344 struct jset_entry *entry)
345{
346 journal_entry_btree_keys_to_text(out, c, entry);
347}
348
349static int journal_entry_prio_ptrs_validate(struct bch_fs *c,
7d6f07ed 350 const char *where,
1c6fdbd8 351 struct jset_entry *entry,
7d6f07ed 352 unsigned version, int big_endian, int write)
1c6fdbd8
KO
353{
354 /* obsolete, don't care: */
355 return 0;
356}
357
528b18e6
KO
358static void journal_entry_prio_ptrs_to_text(struct printbuf *out, struct bch_fs *c,
359 struct jset_entry *entry)
360{
361}
362
363static int journal_entry_blacklist_validate(struct bch_fs *c,
7d6f07ed 364 const char *where,
1c6fdbd8 365 struct jset_entry *entry,
7d6f07ed 366 unsigned version, int big_endian, int write)
1c6fdbd8
KO
367{
368 int ret = 0;
369
370 if (journal_entry_err_on(le16_to_cpu(entry->u64s) != 1, c,
371 "invalid journal seq blacklist entry: bad size")) {
372 journal_entry_null_range(entry, vstruct_next(entry));
373 }
374fsck_err:
375 return ret;
376}
377
528b18e6
KO
378static void journal_entry_blacklist_to_text(struct printbuf *out, struct bch_fs *c,
379 struct jset_entry *entry)
380{
381 struct jset_entry_blacklist *bl =
382 container_of(entry, struct jset_entry_blacklist, entry);
383
384 pr_buf(out, "seq=%llu", le64_to_cpu(bl->seq));
385}
386
387static int journal_entry_blacklist_v2_validate(struct bch_fs *c,
7d6f07ed 388 const char *where,
1c6fdbd8 389 struct jset_entry *entry,
7d6f07ed 390 unsigned version, int big_endian, int write)
1c6fdbd8
KO
391{
392 struct jset_entry_blacklist_v2 *bl_entry;
393 int ret = 0;
394
395 if (journal_entry_err_on(le16_to_cpu(entry->u64s) != 2, c,
396 "invalid journal seq blacklist entry: bad size")) {
397 journal_entry_null_range(entry, vstruct_next(entry));
2c5af169 398 goto out;
1c6fdbd8
KO
399 }
400
401 bl_entry = container_of(entry, struct jset_entry_blacklist_v2, entry);
402
403 if (journal_entry_err_on(le64_to_cpu(bl_entry->start) >
404 le64_to_cpu(bl_entry->end), c,
405 "invalid journal seq blacklist entry: start > end")) {
406 journal_entry_null_range(entry, vstruct_next(entry));
407 }
2c5af169
KO
408out:
409fsck_err:
410 return ret;
411}
412
528b18e6
KO
413static void journal_entry_blacklist_v2_to_text(struct printbuf *out, struct bch_fs *c,
414 struct jset_entry *entry)
415{
416 struct jset_entry_blacklist_v2 *bl =
417 container_of(entry, struct jset_entry_blacklist_v2, entry);
418
419 pr_buf(out, "start=%llu end=%llu",
420 le64_to_cpu(bl->start),
421 le64_to_cpu(bl->end));
422}
423
424static int journal_entry_usage_validate(struct bch_fs *c,
7d6f07ed 425 const char *where,
2c5af169 426 struct jset_entry *entry,
7d6f07ed 427 unsigned version, int big_endian, int write)
2c5af169
KO
428{
429 struct jset_entry_usage *u =
430 container_of(entry, struct jset_entry_usage, entry);
431 unsigned bytes = jset_u64s(le16_to_cpu(entry->u64s)) * sizeof(u64);
432 int ret = 0;
433
3577df5f
KO
434 if (journal_entry_err_on(bytes < sizeof(*u),
435 c,
436 "invalid journal entry usage: bad size")) {
437 journal_entry_null_range(entry, vstruct_next(entry));
438 return ret;
439 }
440
441fsck_err:
442 return ret;
443}
444
528b18e6
KO
445static void journal_entry_usage_to_text(struct printbuf *out, struct bch_fs *c,
446 struct jset_entry *entry)
447{
448 struct jset_entry_usage *u =
449 container_of(entry, struct jset_entry_usage, entry);
450
451 pr_buf(out, "type=%s v=%llu",
452 bch2_fs_usage_types[u->entry.btree_id],
453 le64_to_cpu(u->v));
454}
455
456static int journal_entry_data_usage_validate(struct bch_fs *c,
7d6f07ed 457 const char *where,
3577df5f 458 struct jset_entry *entry,
7d6f07ed 459 unsigned version, int big_endian, int write)
3577df5f
KO
460{
461 struct jset_entry_data_usage *u =
462 container_of(entry, struct jset_entry_data_usage, entry);
463 unsigned bytes = jset_u64s(le16_to_cpu(entry->u64s)) * sizeof(u64);
464 int ret = 0;
465
2c5af169
KO
466 if (journal_entry_err_on(bytes < sizeof(*u) ||
467 bytes < sizeof(*u) + u->r.nr_devs,
468 c,
469 "invalid journal entry usage: bad size")) {
470 journal_entry_null_range(entry, vstruct_next(entry));
471 return ret;
472 }
1c6fdbd8
KO
473
474fsck_err:
475 return ret;
476}
477
528b18e6
KO
478static void journal_entry_data_usage_to_text(struct printbuf *out, struct bch_fs *c,
479 struct jset_entry *entry)
480{
481 struct jset_entry_data_usage *u =
482 container_of(entry, struct jset_entry_data_usage, entry);
483
484 bch2_replicas_entry_to_text(out, &u->r);
485 pr_buf(out, "=%llu", le64_to_cpu(u->v));
486}
487
488static int journal_entry_clock_validate(struct bch_fs *c,
7d6f07ed 489 const char *where,
2abe5420 490 struct jset_entry *entry,
7d6f07ed 491 unsigned version, int big_endian, int write)
2abe5420
KO
492{
493 struct jset_entry_clock *clock =
494 container_of(entry, struct jset_entry_clock, entry);
495 unsigned bytes = jset_u64s(le16_to_cpu(entry->u64s)) * sizeof(u64);
496 int ret = 0;
497
498 if (journal_entry_err_on(bytes != sizeof(*clock),
499 c, "invalid journal entry clock: bad size")) {
500 journal_entry_null_range(entry, vstruct_next(entry));
501 return ret;
502 }
503
504 if (journal_entry_err_on(clock->rw > 1,
505 c, "invalid journal entry clock: bad rw")) {
506 journal_entry_null_range(entry, vstruct_next(entry));
507 return ret;
508 }
509
510fsck_err:
511 return ret;
512}
513
528b18e6
KO
514static void journal_entry_clock_to_text(struct printbuf *out, struct bch_fs *c,
515 struct jset_entry *entry)
516{
517 struct jset_entry_clock *clock =
518 container_of(entry, struct jset_entry_clock, entry);
519
520 pr_buf(out, "%s=%llu", clock->rw ? "write" : "read", le64_to_cpu(clock->time));
521}
522
523static int journal_entry_dev_usage_validate(struct bch_fs *c,
7d6f07ed 524 const char *where,
180fb49d 525 struct jset_entry *entry,
7d6f07ed 526 unsigned version, int big_endian, int write)
180fb49d
KO
527{
528 struct jset_entry_dev_usage *u =
529 container_of(entry, struct jset_entry_dev_usage, entry);
530 unsigned bytes = jset_u64s(le16_to_cpu(entry->u64s)) * sizeof(u64);
45c2e33f 531 unsigned expected = sizeof(*u);
180fb49d
KO
532 unsigned dev;
533 int ret = 0;
534
535 if (journal_entry_err_on(bytes < expected,
536 c, "invalid journal entry dev usage: bad size (%u < %u)",
537 bytes, expected)) {
538 journal_entry_null_range(entry, vstruct_next(entry));
539 return ret;
540 }
541
542 dev = le32_to_cpu(u->dev);
543
544 if (journal_entry_err_on(!bch2_dev_exists2(c, dev),
545 c, "invalid journal entry dev usage: bad dev")) {
546 journal_entry_null_range(entry, vstruct_next(entry));
547 return ret;
548 }
549
550 if (journal_entry_err_on(u->pad,
551 c, "invalid journal entry dev usage: bad pad")) {
552 journal_entry_null_range(entry, vstruct_next(entry));
553 return ret;
554 }
555
556fsck_err:
557 return ret;
558}
559
528b18e6
KO
560static void journal_entry_dev_usage_to_text(struct printbuf *out, struct bch_fs *c,
561 struct jset_entry *entry)
562{
563 struct jset_entry_dev_usage *u =
564 container_of(entry, struct jset_entry_dev_usage, entry);
565 unsigned i, nr_types = jset_entry_dev_usage_nr_types(u);
566
567 pr_buf(out, "dev=%u", le32_to_cpu(u->dev));
568
569 for (i = 0; i < nr_types; i++) {
570 if (i < BCH_DATA_NR)
571 pr_buf(out, " %s", bch2_data_types[i]);
572 else
573 pr_buf(out, " (unknown data type %u)", i);
574 pr_buf(out, ": buckets=%llu sectors=%llu fragmented=%llu",
575 le64_to_cpu(u->d[i].buckets),
576 le64_to_cpu(u->d[i].sectors),
577 le64_to_cpu(u->d[i].fragmented));
578 }
579
580 pr_buf(out, " buckets_ec: %llu buckets_unavailable: %llu",
581 le64_to_cpu(u->buckets_ec),
582 le64_to_cpu(u->buckets_unavailable));
583}
584
585static int journal_entry_log_validate(struct bch_fs *c,
fb64f3fd
KO
586 const char *where,
587 struct jset_entry *entry,
588 unsigned version, int big_endian, int write)
589{
590 return 0;
591}
592
528b18e6
KO
593static void journal_entry_log_to_text(struct printbuf *out, struct bch_fs *c,
594 struct jset_entry *entry)
595{
596 struct jset_entry_log *l = container_of(entry, struct jset_entry_log, entry);
597 unsigned bytes = vstruct_bytes(entry) - offsetof(struct jset_entry_log, d);
598
d4b69152 599 pr_buf(out, "%.*s", bytes, l->d);
528b18e6
KO
600}
601
1c6fdbd8 602struct jset_entry_ops {
7d6f07ed
KO
603 int (*validate)(struct bch_fs *, const char *,
604 struct jset_entry *, unsigned, int, int);
528b18e6 605 void (*to_text)(struct printbuf *, struct bch_fs *, struct jset_entry *);
1c6fdbd8
KO
606};
607
608static const struct jset_entry_ops bch2_jset_entry_ops[] = {
609#define x(f, nr) \
610 [BCH_JSET_ENTRY_##f] = (struct jset_entry_ops) { \
528b18e6
KO
611 .validate = journal_entry_##f##_validate, \
612 .to_text = journal_entry_##f##_to_text, \
1c6fdbd8
KO
613 },
614 BCH_JSET_ENTRY_TYPES()
615#undef x
616};
617
7d6f07ed
KO
618int bch2_journal_entry_validate(struct bch_fs *c, const char *where,
619 struct jset_entry *entry,
620 unsigned version, int big_endian, int write)
1c6fdbd8 621{
2c5af169 622 return entry->type < BCH_JSET_ENTRY_NR
7d6f07ed
KO
623 ? bch2_jset_entry_ops[entry->type].validate(c, where, entry,
624 version, big_endian, write)
2c5af169 625 : 0;
1c6fdbd8
KO
626}
627
528b18e6
KO
628void bch2_journal_entry_to_text(struct printbuf *out, struct bch_fs *c,
629 struct jset_entry *entry)
630{
631 if (entry->type < BCH_JSET_ENTRY_NR) {
632 pr_buf(out, "%s: ", bch2_jset_entry_types[entry->type]);
633 bch2_jset_entry_ops[entry->type].to_text(out, c, entry);
634 } else {
635 pr_buf(out, "(unknown type %u)", entry->type);
636 }
637}
638
1c6fdbd8
KO
639static int jset_validate_entries(struct bch_fs *c, struct jset *jset,
640 int write)
641{
7d6f07ed 642 char buf[100];
1c6fdbd8
KO
643 struct jset_entry *entry;
644 int ret = 0;
645
646 vstruct_for_each(jset, entry) {
7d6f07ed
KO
647 scnprintf(buf, sizeof(buf), "jset %llu entry offset %zi/%u",
648 le64_to_cpu(jset->seq),
649 (u64 *) entry - jset->_data,
650 le32_to_cpu(jset->u64s));
651
1c6fdbd8
KO
652 if (journal_entry_err_on(vstruct_next(entry) >
653 vstruct_last(jset), c,
654 "journal entry extends past end of jset")) {
655 jset->u64s = cpu_to_le32((u64 *) entry - jset->_data);
656 break;
657 }
658
7d6f07ed
KO
659 ret = bch2_journal_entry_validate(c, buf, entry,
660 le32_to_cpu(jset->version),
661 JSET_BIG_ENDIAN(jset), write);
1c6fdbd8
KO
662 if (ret)
663 break;
664 }
665fsck_err:
666 return ret;
667}
668
669static int jset_validate(struct bch_fs *c,
ca73852a 670 struct bch_dev *ca,
1c6fdbd8
KO
671 struct jset *jset, u64 sector,
672 unsigned bucket_sectors_left,
673 unsigned sectors_read,
674 int write)
675{
676 size_t bytes = vstruct_bytes(jset);
677 struct bch_csum csum;
26609b61 678 unsigned version;
1c6fdbd8
KO
679 int ret = 0;
680
681 if (le64_to_cpu(jset->magic) != jset_magic(c))
682 return JOURNAL_ENTRY_NONE;
683
26609b61 684 version = le32_to_cpu(jset->version);
ca73852a
KO
685 if (journal_entry_err_on((version != BCH_JSET_VERSION_OLD &&
686 version < bcachefs_metadata_version_min) ||
687 version >= bcachefs_metadata_version_max, c,
688 "%s sector %llu seq %llu: unknown journal entry version %u",
ed9d58a2
KO
689 ca ? ca->name : c->name,
690 sector, le64_to_cpu(jset->seq),
ca73852a 691 version)) {
35ef6df5
KO
692 /* don't try to continue: */
693 return EINVAL;
1c6fdbd8
KO
694 }
695
35ef6df5
KO
696 if (bytes > (sectors_read << 9) &&
697 sectors_read < bucket_sectors_left)
698 return JOURNAL_ENTRY_REREAD;
699
1c6fdbd8 700 if (journal_entry_err_on(bytes > bucket_sectors_left << 9, c,
ca73852a 701 "%s sector %llu seq %llu: journal entry too big (%zu bytes)",
ed9d58a2
KO
702 ca ? ca->name : c->name,
703 sector, le64_to_cpu(jset->seq), bytes)) {
35ef6df5
KO
704 ret = JOURNAL_ENTRY_BAD;
705 le32_add_cpu(&jset->u64s,
706 -((bytes - (bucket_sectors_left << 9)) / 8));
1c6fdbd8
KO
707 }
708
ed9d58a2 709 if (journal_entry_err_on(!bch2_checksum_type_valid(c, JSET_CSUM_TYPE(jset)), c,
ca73852a 710 "%s sector %llu seq %llu: journal entry with unknown csum type %llu",
ed9d58a2
KO
711 ca ? ca->name : c->name,
712 sector, le64_to_cpu(jset->seq),
35ef6df5
KO
713 JSET_CSUM_TYPE(jset))) {
714 ret = JOURNAL_ENTRY_BAD;
ed9d58a2 715 goto csum_done;
35ef6df5 716 }
1c6fdbd8 717
ed9d58a2
KO
718 if (write)
719 goto csum_done;
720
1c6fdbd8
KO
721 csum = csum_vstruct(c, JSET_CSUM_TYPE(jset), journal_nonce(jset), jset);
722 if (journal_entry_err_on(bch2_crc_cmp(csum, jset->csum), c,
ca73852a 723 "%s sector %llu seq %llu: journal checksum bad",
ed9d58a2
KO
724 ca ? ca->name : c->name,
725 sector, le64_to_cpu(jset->seq)))
35ef6df5 726 ret = JOURNAL_ENTRY_BAD;
1c6fdbd8 727
a9de137b 728 ret = bch2_encrypt(c, JSET_CSUM_TYPE(jset), journal_nonce(jset),
1c6fdbd8
KO
729 jset->encrypted_start,
730 vstruct_end(jset) - (void *) jset->encrypted_start);
a9de137b
KO
731 bch2_fs_fatal_err_on(ret, c,
732 "error decrypting journal entry: %i", ret);
ed9d58a2
KO
733csum_done:
734 /* last_seq is ignored when JSET_NO_FLUSH is true */
735 if (journal_entry_err_on(!JSET_NO_FLUSH(jset) &&
736 le64_to_cpu(jset->last_seq) > le64_to_cpu(jset->seq), c,
737 "invalid journal entry: last_seq > seq (%llu > %llu)",
738 le64_to_cpu(jset->last_seq),
739 le64_to_cpu(jset->seq))) {
1c6fdbd8 740 jset->last_seq = jset->seq;
ca73852a
KO
741 return JOURNAL_ENTRY_BAD;
742 }
1c6fdbd8
KO
743fsck_err:
744 return ret;
745}
746
ed9d58a2
KO
747static int jset_validate_for_write(struct bch_fs *c, struct jset *jset)
748{
749 unsigned sectors = vstruct_sectors(jset, c->block_bits);
750
751 return jset_validate(c, NULL, jset, 0, sectors, sectors, WRITE) ?:
752 jset_validate_entries(c, jset, WRITE);
753}
754
1c6fdbd8
KO
755struct journal_read_buf {
756 void *data;
757 size_t size;
758};
759
760static int journal_read_buf_realloc(struct journal_read_buf *b,
761 size_t new_size)
762{
763 void *n;
764
765 /* the bios are sized for this many pages, max: */
766 if (new_size > JOURNAL_ENTRY_SIZE_MAX)
767 return -ENOMEM;
768
769 new_size = roundup_pow_of_two(new_size);
770 n = kvpmalloc(new_size, GFP_KERNEL);
771 if (!n)
772 return -ENOMEM;
773
774 kvpfree(b->data, b->size);
775 b->data = n;
776 b->size = new_size;
777 return 0;
778}
779
780static int journal_read_bucket(struct bch_dev *ca,
781 struct journal_read_buf *buf,
782 struct journal_list *jlist,
a9ec3454 783 unsigned bucket)
1c6fdbd8
KO
784{
785 struct bch_fs *c = ca->fs;
786 struct journal_device *ja = &ca->journal;
1c6fdbd8
KO
787 struct jset *j = NULL;
788 unsigned sectors, sectors_read = 0;
789 u64 offset = bucket_to_sector(ca, ja->buckets[bucket]),
790 end = offset + ca->mi.bucket_size;
791 bool saw_bad = false;
792 int ret = 0;
793
794 pr_debug("reading %u", bucket);
795
796 while (offset < end) {
797 if (!sectors_read) {
ac10a961
KO
798 struct bio *bio;
799 unsigned nr_bvecs;
800reread:
801 sectors_read = min_t(unsigned,
1c6fdbd8 802 end - offset, buf->size >> 9);
ac10a961
KO
803 nr_bvecs = buf_pages(buf->data, sectors_read << 9);
804
805 bio = bio_kmalloc(nr_bvecs, GFP_KERNEL);
806 bio_init(bio, ca->disk_sb.bdev, bio->bi_inline_vecs, nr_bvecs, REQ_OP_READ);
1c6fdbd8 807
885678f6
KO
808 bio->bi_iter.bi_sector = offset;
809 bch2_bio_map(bio, buf->data, sectors_read << 9);
1c6fdbd8
KO
810
811 ret = submit_bio_wait(bio);
ac10a961 812 kfree(bio);
1c6fdbd8
KO
813
814 if (bch2_dev_io_err_on(ret, ca,
0fefe8d8 815 "journal read error: sector %llu",
1c6fdbd8 816 offset) ||
29d90f61
KO
817 bch2_meta_read_fault("journal")) {
818 /*
819 * We don't error out of the recovery process
820 * here, since the relevant journal entry may be
821 * found on a different device, and missing or
822 * no journal entries will be handled later
823 */
824 return 0;
825 }
1c6fdbd8
KO
826
827 j = buf->data;
828 }
829
ca73852a 830 ret = jset_validate(c, ca, j, offset,
1c6fdbd8
KO
831 end - offset, sectors_read,
832 READ);
833 switch (ret) {
834 case BCH_FSCK_OK:
ca73852a 835 sectors = vstruct_sectors(j, c->block_bits);
1c6fdbd8
KO
836 break;
837 case JOURNAL_ENTRY_REREAD:
838 if (vstruct_bytes(j) > buf->size) {
839 ret = journal_read_buf_realloc(buf,
840 vstruct_bytes(j));
841 if (ret)
842 return ret;
843 }
844 goto reread;
845 case JOURNAL_ENTRY_NONE:
846 if (!saw_bad)
847 return 0;
8244f320 848 sectors = block_sectors(c);
1c6fdbd8
KO
849 goto next_block;
850 case JOURNAL_ENTRY_BAD:
851 saw_bad = true;
ca73852a
KO
852 /*
853 * On checksum error we don't really trust the size
854 * field of the journal entry we read, so try reading
855 * again at next block boundary:
856 */
8244f320 857 sectors = block_sectors(c);
ca73852a 858 break;
1c6fdbd8
KO
859 default:
860 return ret;
861 }
862
863 /*
864 * This happens sometimes if we don't have discards on -
865 * when we've partially overwritten a bucket with new
866 * journal entries. We don't need the rest of the
867 * bucket:
868 */
869 if (le64_to_cpu(j->seq) < ja->bucket_seq[bucket])
870 return 0;
871
872 ja->bucket_seq[bucket] = le64_to_cpu(j->seq);
873
874 mutex_lock(&jlist->lock);
72b7d633
KO
875 ret = journal_entry_add(c, ca, (struct journal_ptr) {
876 .dev = ca->dev_idx,
877 .bucket = bucket,
878 .bucket_offset = offset -
879 bucket_to_sector(ca, ja->buckets[bucket]),
880 .sector = offset,
e4c3f386 881 }, jlist, j, ret != 0);
1c6fdbd8
KO
882 mutex_unlock(&jlist->lock);
883
884 switch (ret) {
885 case JOURNAL_ENTRY_ADD_OK:
1c6fdbd8
KO
886 break;
887 case JOURNAL_ENTRY_ADD_OUT_OF_RANGE:
888 break;
889 default:
890 return ret;
891 }
1c6fdbd8
KO
892next_block:
893 pr_debug("next");
894 offset += sectors;
895 sectors_read -= sectors;
896 j = ((void *) j) + (sectors << 9);
897 }
898
899 return 0;
900}
901
902static void bch2_journal_read_device(struct closure *cl)
903{
1c6fdbd8
KO
904 struct journal_device *ja =
905 container_of(cl, struct journal_device, read);
906 struct bch_dev *ca = container_of(ja, struct bch_dev, journal);
365f64f3 907 struct bch_fs *c = ca->fs;
1c6fdbd8
KO
908 struct journal_list *jlist =
909 container_of(cl->parent, struct journal_list, cl);
1c6fdbd8 910 struct journal_read_buf buf = { NULL, 0 };
a9ec3454
KO
911 u64 min_seq = U64_MAX;
912 unsigned i;
9714baaa 913 int ret = 0;
1c6fdbd8
KO
914
915 if (!ja->nr)
916 goto out;
917
1c6fdbd8
KO
918 ret = journal_read_buf_realloc(&buf, PAGE_SIZE);
919 if (ret)
920 goto err;
921
922 pr_debug("%u journal buckets", ja->nr);
923
1c6fdbd8 924 for (i = 0; i < ja->nr; i++) {
a9ec3454
KO
925 ret = journal_read_bucket(ca, &buf, jlist, i);
926 if (ret)
927 goto err;
1c6fdbd8
KO
928 }
929
a9ec3454
KO
930 /* Find the journal bucket with the highest sequence number: */
931 for (i = 0; i < ja->nr; i++) {
932 if (ja->bucket_seq[i] > ja->bucket_seq[ja->cur_idx])
933 ja->cur_idx = i;
1c6fdbd8 934
a9ec3454 935 min_seq = min(ja->bucket_seq[i], min_seq);
1c6fdbd8
KO
936 }
937
1c6fdbd8 938 /*
1c6fdbd8
KO
939 * If there's duplicate journal entries in multiple buckets (which
940 * definitely isn't supposed to happen, but...) - make sure to start
941 * cur_idx at the last of those buckets, so we don't deadlock trying to
942 * allocate
943 */
a9ec3454
KO
944 while (ja->bucket_seq[ja->cur_idx] > min_seq &&
945 ja->bucket_seq[ja->cur_idx] >
946 ja->bucket_seq[(ja->cur_idx + 1) % ja->nr])
a36d3685 947 ja->cur_idx = (ja->cur_idx + 1) % ja->nr;
a9ec3454
KO
948
949 ja->sectors_free = 0;
1c6fdbd8
KO
950
951 /*
0ce2dbbe 952 * Set dirty_idx to indicate the entire journal is full and needs to be
1c6fdbd8
KO
953 * reclaimed - journal reclaim will immediately reclaim whatever isn't
954 * pinned when it first runs:
955 */
0ce2dbbe
KO
956 ja->discard_idx = ja->dirty_idx_ondisk =
957 ja->dirty_idx = (ja->cur_idx + 1) % ja->nr;
1c6fdbd8 958out:
365f64f3 959 bch_verbose(c, "journal read done on device %s, ret %i", ca->name, ret);
1c6fdbd8 960 kvpfree(buf.data, buf.size);
1c6fdbd8
KO
961 percpu_ref_put(&ca->io_ref);
962 closure_return(cl);
963 return;
964err:
965 mutex_lock(&jlist->lock);
966 jlist->ret = ret;
967 mutex_unlock(&jlist->lock);
968 goto out;
1c6fdbd8
KO
969}
970
72b7d633
KO
971void bch2_journal_ptrs_to_text(struct printbuf *out, struct bch_fs *c,
972 struct journal_replay *j)
e4c3f386
KO
973{
974 unsigned i;
975
976 for (i = 0; i < j->nr_ptrs; i++) {
c0ebe3e4 977 struct bch_dev *ca = bch_dev_bkey_exists(c, j->ptrs[i].dev);
514852c2
KO
978 u64 offset;
979
72b7d633 980 div64_u64_rem(j->ptrs[i].sector, ca->mi.bucket_size, &offset);
e4c3f386
KO
981
982 if (i)
983 pr_buf(out, " ");
72b7d633 984 pr_buf(out, "%u:%u:%u (sector %llu)",
e4c3f386 985 j->ptrs[i].dev,
72b7d633
KO
986 j->ptrs[i].bucket,
987 j->ptrs[i].bucket_offset,
988 j->ptrs[i].sector);
e4c3f386
KO
989 }
990}
991
adbcada4
KO
992int bch2_journal_read(struct bch_fs *c, struct list_head *list,
993 u64 *blacklist_seq, u64 *start_seq)
1c6fdbd8 994{
1c6fdbd8 995 struct journal_list jlist;
adbcada4 996 struct journal_replay *i, *t;
1c6fdbd8 997 struct bch_dev *ca;
1c6fdbd8 998 unsigned iter;
fa8e94fa 999 struct printbuf buf = PRINTBUF;
1c6fdbd8
KO
1000 size_t keys = 0, entries = 0;
1001 bool degraded = false;
adbcada4 1002 u64 seq, last_seq = 0;
1c6fdbd8
KO
1003 int ret = 0;
1004
1005 closure_init_stack(&jlist.cl);
1006 mutex_init(&jlist.lock);
1007 jlist.head = list;
1008 jlist.ret = 0;
1009
1010 for_each_member_device(ca, c, iter) {
6bdbfa87 1011 if (!test_bit(BCH_FS_REBUILD_REPLICAS, &c->flags) &&
89fd25be 1012 !(bch2_dev_has_data(c, ca) & (1 << BCH_DATA_journal)))
1c6fdbd8
KO
1013 continue;
1014
2436cb9f
KO
1015 if ((ca->mi.state == BCH_MEMBER_STATE_rw ||
1016 ca->mi.state == BCH_MEMBER_STATE_ro) &&
1c6fdbd8
KO
1017 percpu_ref_tryget(&ca->io_ref))
1018 closure_call(&ca->journal.read,
1019 bch2_journal_read_device,
1020 system_unbound_wq,
1021 &jlist.cl);
1022 else
1023 degraded = true;
1024 }
1025
1026 closure_sync(&jlist.cl);
1027
1028 if (jlist.ret)
1029 return jlist.ret;
1030
adbcada4
KO
1031 if (list_empty(list)) {
1032 bch_info(c, "journal read done, but no entries found");
1033 return 0;
1034 }
1035
1036 i = list_last_entry(list, struct journal_replay, list);
1037 *start_seq = le64_to_cpu(i->j.seq) + 1;
1038
1039 /*
1040 * Find most recent flush entry, and ignore newer non flush entries -
1041 * those entries will be blacklisted:
1042 */
1043 list_for_each_entry_safe_reverse(i, t, list, list) {
1044 if (i->ignore)
1045 continue;
1046
1047 if (!JSET_NO_FLUSH(&i->j)) {
1048 last_seq = le64_to_cpu(i->j.last_seq);
1049 *blacklist_seq = le64_to_cpu(i->j.seq) + 1;
1050 break;
1051 }
1052
1053 journal_replay_free(c, i);
1054 }
1055
1056 if (!last_seq) {
1057 fsck_err(c, "journal read done, but no entries found after dropping non-flushes");
fa8e94fa
KO
1058 ret = -1;
1059 goto err;
adbcada4
KO
1060 }
1061
1062 /* Drop blacklisted entries and entries older than last_seq: */
1063 list_for_each_entry_safe(i, t, list, list) {
1064 if (i->ignore)
1065 continue;
1066
1067 seq = le64_to_cpu(i->j.seq);
1068 if (seq < last_seq) {
1069 journal_replay_free(c, i);
1070 continue;
1071 }
1072
1073 if (bch2_journal_seq_is_blacklisted(c, seq, true)) {
1074 fsck_err_on(!JSET_NO_FLUSH(&i->j), c,
1075 "found blacklisted journal entry %llu", seq);
1076
1077 journal_replay_free(c, i);
1078 }
1079 }
1080
1081 /* Check for missing entries: */
1082 seq = last_seq;
1083 list_for_each_entry(i, list, list) {
1084 if (i->ignore)
1085 continue;
1086
1087 BUG_ON(seq > le64_to_cpu(i->j.seq));
1088
1089 while (seq < le64_to_cpu(i->j.seq)) {
1090 u64 missing_start, missing_end;
fa8e94fa 1091 struct printbuf buf1 = PRINTBUF, buf2 = PRINTBUF;
adbcada4
KO
1092
1093 while (seq < le64_to_cpu(i->j.seq) &&
1094 bch2_journal_seq_is_blacklisted(c, seq, false))
1095 seq++;
1096
1097 if (seq == le64_to_cpu(i->j.seq))
1098 break;
1099
1100 missing_start = seq;
1101
1102 while (seq < le64_to_cpu(i->j.seq) &&
1103 !bch2_journal_seq_is_blacklisted(c, seq, false))
1104 seq++;
1105
e4c3f386 1106 if (i->list.prev != list) {
e4c3f386
KO
1107 struct journal_replay *p = list_prev_entry(i, list);
1108
fa8e94fa
KO
1109 bch2_journal_ptrs_to_text(&buf1, c, p);
1110 pr_buf(&buf1, " size %zu", vstruct_sectors(&p->j, c->block_bits));
e4c3f386 1111 } else
fa8e94fa
KO
1112 pr_buf(&buf1, "(none)");
1113 bch2_journal_ptrs_to_text(&buf2, c, i);
e4c3f386 1114
adbcada4 1115 missing_end = seq - 1;
e4c3f386
KO
1116 fsck_err(c, "journal entries %llu-%llu missing! (replaying %llu-%llu)\n"
1117 " prev at %s\n"
1118 " next at %s",
adbcada4 1119 missing_start, missing_end,
e4c3f386 1120 last_seq, *blacklist_seq - 1,
fa8e94fa
KO
1121 buf1.buf, buf2.buf);
1122
1123 printbuf_exit(&buf1);
1124 printbuf_exit(&buf2);
adbcada4
KO
1125 }
1126
1127 seq++;
1128 }
1129
1c6fdbd8 1130 list_for_each_entry(i, list, list) {
1dd7f9d9
KO
1131 struct jset_entry *entry;
1132 struct bkey_i *k, *_n;
e4c3f386
KO
1133 struct bch_replicas_padded replicas = {
1134 .e.data_type = BCH_DATA_journal,
1135 .e.nr_required = 1,
1136 };
1137 unsigned ptr;
7ef2a73a 1138
adbcada4
KO
1139 if (i->ignore)
1140 continue;
1141
1c6fdbd8
KO
1142 ret = jset_validate_entries(c, &i->j, READ);
1143 if (ret)
fa8e94fa 1144 goto err;
1c6fdbd8 1145
e4c3f386
KO
1146 for (ptr = 0; ptr < i->nr_ptrs; ptr++)
1147 replicas.e.devs[replicas.e.nr_devs++] = i->ptrs[ptr].dev;
1148
26452d1d
KO
1149 bch2_replicas_entry_sort(&replicas.e);
1150
1c6fdbd8
KO
1151 /*
1152 * If we're mounting in degraded mode - if we didn't read all
1153 * the devices - this is wrong:
1154 */
1155
fa8e94fa
KO
1156 printbuf_reset(&buf);
1157 bch2_replicas_entry_to_text(&buf, &replicas.e);
1158
1c6fdbd8
KO
1159 if (!degraded &&
1160 (test_bit(BCH_FS_REBUILD_REPLICAS, &c->flags) ||
988e98cf 1161 fsck_err_on(!bch2_replicas_marked(c, &replicas.e), c,
7ef2a73a 1162 "superblock not marked as containing replicas %s",
fa8e94fa 1163 buf.buf))) {
7ef2a73a 1164 ret = bch2_mark_replicas(c, &replicas.e);
1c6fdbd8 1165 if (ret)
fa8e94fa 1166 goto err;
1c6fdbd8 1167 }
1c6fdbd8
KO
1168
1169 for_each_jset_key(k, _n, entry, &i->j)
1170 keys++;
1171 entries++;
1172 }
1173
adbcada4
KO
1174 bch_info(c, "journal read done, %zu keys in %zu entries, seq %llu",
1175 keys, entries, *start_seq);
1dd7f9d9 1176
adbcada4
KO
1177 if (*start_seq != *blacklist_seq)
1178 bch_info(c, "dropped unflushed entries %llu-%llu",
1179 *blacklist_seq, *start_seq - 1);
fa8e94fa 1180err:
1c6fdbd8 1181fsck_err:
fa8e94fa 1182 printbuf_exit(&buf);
1c6fdbd8
KO
1183 return ret;
1184}
1185
1c6fdbd8
KO
1186/* journal write: */
1187
a9ec3454
KO
1188static void __journal_write_alloc(struct journal *j,
1189 struct journal_buf *w,
1190 struct dev_alloc_list *devs_sorted,
1191 unsigned sectors,
1192 unsigned *replicas,
1193 unsigned replicas_want)
1c6fdbd8
KO
1194{
1195 struct bch_fs *c = container_of(j, struct bch_fs, journal);
1c6fdbd8
KO
1196 struct journal_device *ja;
1197 struct bch_dev *ca;
a9ec3454 1198 unsigned i;
a2753581 1199
a9ec3454
KO
1200 if (*replicas >= replicas_want)
1201 return;
1c6fdbd8 1202
a9ec3454
KO
1203 for (i = 0; i < devs_sorted->nr; i++) {
1204 ca = rcu_dereference(c->devs[devs_sorted->devs[i]]);
1c6fdbd8
KO
1205 if (!ca)
1206 continue;
1207
1c6fdbd8 1208 ja = &ca->journal;
1c6fdbd8
KO
1209
1210 /*
1211 * Check that we can use this device, and aren't already using
1212 * it:
1213 */
a9ec3454 1214 if (!ca->mi.durability ||
2436cb9f 1215 ca->mi.state != BCH_MEMBER_STATE_rw ||
a9ec3454 1216 !ja->nr ||
26609b61
KO
1217 bch2_bkey_has_device(bkey_i_to_s_c(&w->key),
1218 ca->dev_idx) ||
a9ec3454 1219 sectors > ja->sectors_free)
1c6fdbd8
KO
1220 continue;
1221
3d080aa5 1222 bch2_dev_stripe_increment(ca, &j->wp.stripe);
1c6fdbd8 1223
26609b61 1224 bch2_bkey_append_ptr(&w->key,
1c6fdbd8
KO
1225 (struct bch_extent_ptr) {
1226 .offset = bucket_to_sector(ca,
a9ec3454
KO
1227 ja->buckets[ja->cur_idx]) +
1228 ca->mi.bucket_size -
1229 ja->sectors_free,
1c6fdbd8
KO
1230 .dev = ca->dev_idx,
1231 });
1232
a9ec3454
KO
1233 ja->sectors_free -= sectors;
1234 ja->bucket_seq[ja->cur_idx] = le64_to_cpu(w->data->seq);
1235
1236 *replicas += ca->mi.durability;
1237
1238 if (*replicas >= replicas_want)
1239 break;
1c6fdbd8 1240 }
a9ec3454 1241}
1c6fdbd8 1242
a9ec3454
KO
1243/**
1244 * journal_next_bucket - move on to the next journal bucket if possible
1245 */
1246static int journal_write_alloc(struct journal *j, struct journal_buf *w,
1247 unsigned sectors)
1248{
1249 struct bch_fs *c = container_of(j, struct bch_fs, journal);
d042b040 1250 struct bch_devs_mask devs;
a9ec3454
KO
1251 struct journal_device *ja;
1252 struct bch_dev *ca;
1253 struct dev_alloc_list devs_sorted;
d042b040
KO
1254 unsigned target = c->opts.metadata_target ?:
1255 c->opts.foreground_target;
a9ec3454
KO
1256 unsigned i, replicas = 0, replicas_want =
1257 READ_ONCE(c->opts.metadata_replicas);
1c6fdbd8 1258
a9ec3454 1259 rcu_read_lock();
d042b040
KO
1260retry:
1261 devs = target_rw_devs(c, BCH_DATA_journal, target);
1c6fdbd8 1262
d042b040 1263 devs_sorted = bch2_dev_alloc_list(c, &j->wp.stripe, &devs);
1c6fdbd8 1264
a9ec3454
KO
1265 __journal_write_alloc(j, w, &devs_sorted,
1266 sectors, &replicas, replicas_want);
1c6fdbd8 1267
a9ec3454
KO
1268 if (replicas >= replicas_want)
1269 goto done;
1270
1271 for (i = 0; i < devs_sorted.nr; i++) {
1272 ca = rcu_dereference(c->devs[devs_sorted.devs[i]]);
1273 if (!ca)
1274 continue;
1275
1276 ja = &ca->journal;
1277
1278 if (sectors > ja->sectors_free &&
1279 sectors <= ca->mi.bucket_size &&
03d5eaed
KO
1280 bch2_journal_dev_buckets_available(j, ja,
1281 journal_space_discarded)) {
a9ec3454
KO
1282 ja->cur_idx = (ja->cur_idx + 1) % ja->nr;
1283 ja->sectors_free = ca->mi.bucket_size;
68ef94a6
KO
1284
1285 /*
1286 * ja->bucket_seq[ja->cur_idx] must always have
1287 * something sensible:
1288 */
1289 ja->bucket_seq[ja->cur_idx] = le64_to_cpu(w->data->seq);
a9ec3454
KO
1290 }
1291 }
1292
1293 __journal_write_alloc(j, w, &devs_sorted,
1294 sectors, &replicas, replicas_want);
d042b040
KO
1295
1296 if (replicas < replicas_want && target) {
1297 /* Retry from all devices: */
1298 target = 0;
1299 goto retry;
1300 }
a9ec3454 1301done:
a9ec3454
KO
1302 rcu_read_unlock();
1303
07a1006a
KO
1304 BUG_ON(bkey_val_u64s(&w->key.k) > BCH_REPLICAS_MAX);
1305
57cb2142 1306 return replicas >= c->opts.metadata_replicas_required ? 0 : -EROFS;
1c6fdbd8
KO
1307}
1308
1c6fdbd8
KO
1309static void journal_buf_realloc(struct journal *j, struct journal_buf *buf)
1310{
1311 /* we aren't holding j->lock: */
1312 unsigned new_size = READ_ONCE(j->buf_size_want);
1313 void *new_buf;
1314
d16b4a77 1315 if (buf->buf_size >= new_size)
1c6fdbd8
KO
1316 return;
1317
1318 new_buf = kvpmalloc(new_size, GFP_NOIO|__GFP_NOWARN);
1319 if (!new_buf)
1320 return;
1321
d16b4a77 1322 memcpy(new_buf, buf->data, buf->buf_size);
c859430b
KO
1323
1324 spin_lock(&j->lock);
1325 swap(buf->data, new_buf);
1326 swap(buf->buf_size, new_size);
1327 spin_unlock(&j->lock);
1328
1329 kvpfree(new_buf, new_size);
1c6fdbd8
KO
1330}
1331
ebb84d09
KO
1332static inline struct journal_buf *journal_last_unwritten_buf(struct journal *j)
1333{
1334 return j->buf + j->reservations.unwritten_idx;
1335}
1336
1c6fdbd8
KO
1337static void journal_write_done(struct closure *cl)
1338{
1339 struct journal *j = container_of(cl, struct journal, io);
1340 struct bch_fs *c = container_of(j, struct bch_fs, journal);
ebb84d09 1341 struct journal_buf *w = journal_last_unwritten_buf(j);
7ef2a73a 1342 struct bch_replicas_padded replicas;
ebb84d09 1343 union journal_res_state old, new;
1784d43a 1344 u64 v, seq;
158eecb8 1345 int err = 0;
1c6fdbd8 1346
991ba021
KO
1347 bch2_time_stats_update(!JSET_NO_FLUSH(w->data)
1348 ? j->flush_write_time
1349 : j->noflush_write_time, j->write_start_time);
9c859dc9 1350
d797ca3d 1351 if (!w->devs_written.nr) {
1c6fdbd8 1352 bch_err(c, "unable to write journal to sufficient devices");
158eecb8
KO
1353 err = -EIO;
1354 } else {
d797ca3d
KO
1355 bch2_devlist_to_replicas(&replicas.e, BCH_DATA_journal,
1356 w->devs_written);
158eecb8
KO
1357 if (bch2_mark_replicas(c, &replicas.e))
1358 err = -EIO;
1c6fdbd8
KO
1359 }
1360
158eecb8
KO
1361 if (err)
1362 bch2_fatal_error(c);
1c6fdbd8
KO
1363
1364 spin_lock(&j->lock);
ed9d58a2 1365 seq = le64_to_cpu(w->data->seq);
ed9d58a2 1366
1c6fdbd8 1367 if (seq >= j->pin.front)
d797ca3d 1368 journal_seq_pin(j, seq)->devs = w->devs_written;
1c6fdbd8 1369
9be1efe9
KO
1370 if (!err) {
1371 j->seq_ondisk = seq;
adbcada4 1372
9be1efe9
KO
1373 if (!JSET_NO_FLUSH(w->data)) {
1374 j->flushed_seq_ondisk = seq;
1375 j->last_seq_ondisk = w->last_seq;
1376 }
1377 } else if (!j->err_seq || seq < j->err_seq)
1378 j->err_seq = seq;
0ce2dbbe 1379
1c6fdbd8
KO
1380 /*
1381 * Updating last_seq_ondisk may let bch2_journal_reclaim_work() discard
1382 * more buckets:
1383 *
1384 * Must come before signaling write completion, for
1385 * bch2_fs_journal_stop():
1386 */
b7a9bbfc 1387 journal_reclaim_kick(&c->journal);
158eecb8 1388
1c6fdbd8
KO
1389 /* also must come before signalling write completion: */
1390 closure_debug_destroy(cl);
1391
ebb84d09
KO
1392 v = atomic64_read(&j->reservations.counter);
1393 do {
1394 old.v = new.v = v;
1395 BUG_ON(new.idx == new.unwritten_idx);
1396
1397 new.unwritten_idx++;
1398 } while ((v = atomic64_cmpxchg(&j->reservations.counter,
1399 old.v, new.v)) != old.v);
1c6fdbd8 1400
5d32c5bb
KO
1401 bch2_journal_space_available(j);
1402
1c6fdbd8
KO
1403 closure_wake_up(&w->wait);
1404 journal_wake(j);
1405
fbec3b88
KO
1406 if (new.unwritten_idx == new.idx) {
1407 struct journal_buf *buf = journal_cur_buf(j);
1408 long delta = buf->expires - jiffies;
ebb84d09 1409
fbec3b88
KO
1410 mod_delayed_work(c->io_complete_wq, &j->write_work, max(0L, delta));
1411 } else if (!journal_state_count(new, new.unwritten_idx))
731bdd2e 1412 closure_call(&j->io, bch2_journal_write, c->io_complete_wq, NULL);
fbec3b88
KO
1413
1414 spin_unlock(&j->lock);
1c6fdbd8
KO
1415}
1416
1417static void journal_write_endio(struct bio *bio)
1418{
1419 struct bch_dev *ca = bio->bi_private;
1420 struct journal *j = &ca->fs->journal;
d797ca3d
KO
1421 struct journal_buf *w = journal_last_unwritten_buf(j);
1422 unsigned long flags;
1c6fdbd8 1423
d797ca3d
KO
1424 if (bch2_dev_io_err_on(bio->bi_status, ca, "error writing journal entry %llu: %s",
1425 le64_to_cpu(w->data->seq),
63b214e7 1426 bch2_blk_status_to_str(bio->bi_status)) ||
1c6fdbd8 1427 bch2_meta_write_fault("journal")) {
1c6fdbd8 1428 spin_lock_irqsave(&j->err_lock, flags);
d797ca3d 1429 bch2_dev_list_drop_dev(&w->devs_written, ca->dev_idx);
1c6fdbd8
KO
1430 spin_unlock_irqrestore(&j->err_lock, flags);
1431 }
1432
1433 closure_put(&j->io);
1434 percpu_ref_put(&ca->io_ref);
1435}
1436
280249b9
KO
1437static void do_journal_write(struct closure *cl)
1438{
1439 struct journal *j = container_of(cl, struct journal, io);
1440 struct bch_fs *c = container_of(j, struct bch_fs, journal);
1441 struct bch_dev *ca;
1442 struct journal_buf *w = journal_last_unwritten_buf(j);
1443 struct bch_extent_ptr *ptr;
1444 struct bio *bio;
1445 unsigned sectors = vstruct_sectors(w->data, c->block_bits);
1446
1447 extent_for_each_ptr(bkey_i_to_s_extent(&w->key), ptr) {
1448 ca = bch_dev_bkey_exists(c, ptr->dev);
1449 if (!percpu_ref_tryget(&ca->io_ref)) {
1450 /* XXX: fix this */
1451 bch_err(c, "missing device for journal write\n");
1452 continue;
1453 }
1454
1455 this_cpu_add(ca->io_done->sectors[WRITE][BCH_DATA_journal],
1456 sectors);
1457
1458 bio = ca->journal.bio;
1459 bio_reset(bio, ca->disk_sb.bdev, REQ_OP_WRITE|REQ_SYNC|REQ_META);
1460 bio->bi_iter.bi_sector = ptr->offset;
1461 bio->bi_end_io = journal_write_endio;
1462 bio->bi_private = ca;
1463
a28bd48a
KO
1464 BUG_ON(bio->bi_iter.bi_sector == ca->prev_journal_sector);
1465 ca->prev_journal_sector = bio->bi_iter.bi_sector;
1466
280249b9
KO
1467 if (!JSET_NO_FLUSH(w->data))
1468 bio->bi_opf |= REQ_FUA;
1469 if (!JSET_NO_FLUSH(w->data) && !w->separate_flush)
1470 bio->bi_opf |= REQ_PREFLUSH;
1471
1472 bch2_bio_map(bio, w->data, sectors << 9);
1473
1474 trace_journal_write(bio);
1475 closure_bio_submit(bio, cl);
1476
1477 ca->journal.bucket_seq[ca->journal.cur_idx] =
1478 le64_to_cpu(w->data->seq);
1479 }
1480
731bdd2e 1481 continue_at(cl, journal_write_done, c->io_complete_wq);
280249b9
KO
1482 return;
1483}
1484
1c6fdbd8
KO
1485void bch2_journal_write(struct closure *cl)
1486{
1487 struct journal *j = container_of(cl, struct journal, io);
1488 struct bch_fs *c = container_of(j, struct bch_fs, journal);
1489 struct bch_dev *ca;
ebb84d09 1490 struct journal_buf *w = journal_last_unwritten_buf(j);
3ccc5c50 1491 struct jset_entry *start, *end;
1c6fdbd8
KO
1492 struct jset *jset;
1493 struct bio *bio;
fa8e94fa 1494 struct printbuf journal_debug_buf = PRINTBUF;
26609b61 1495 bool validate_before_checksum = false;
280249b9 1496 unsigned i, sectors, bytes, u64s, nr_rw_members = 0;
e5a66496
KO
1497 int ret;
1498
b7a9bbfc
KO
1499 BUG_ON(BCH_SB_CLEAN(c->disk_sb.sb));
1500
1c6fdbd8
KO
1501 journal_buf_realloc(j, w);
1502 jset = w->data;
1503
1504 j->write_start_time = local_clock();
1c6fdbd8 1505
adbcada4
KO
1506 spin_lock(&j->lock);
1507 if (c->sb.features & (1ULL << BCH_FEATURE_journal_no_flush) &&
5b2e599f
KO
1508 (w->noflush ||
1509 (!w->must_flush &&
1510 (jiffies - j->last_flush_write) < msecs_to_jiffies(c->opts.journal_flush_delay) &&
1511 test_bit(JOURNAL_MAY_SKIP_FLUSH, &j->flags)))) {
adbcada4
KO
1512 w->noflush = true;
1513 SET_JSET_NO_FLUSH(jset, true);
c0ebe3e4
KO
1514 jset->last_seq = 0;
1515 w->last_seq = 0;
adbcada4
KO
1516
1517 j->nr_noflush_writes++;
1518 } else {
1519 j->last_flush_write = jiffies;
1520 j->nr_flush_writes++;
1521 }
1522 spin_unlock(&j->lock);
1523
00b8ccf7
KO
1524 /*
1525 * New btree roots are set by journalling them; when the journal entry
1526 * gets written we have to propagate them to c->btree_roots
1527 *
1528 * But, every journal entry we write has to contain all the btree roots
1529 * (at least for now); so after we copy btree roots to c->btree_roots we
1530 * have to get any missing btree roots and add them to this journal
1531 * entry:
1532 */
1533
1534 bch2_journal_entries_to_btree_roots(c, jset);
1535
1536 start = end = vstruct_last(jset);
1537
1538 end = bch2_btree_roots_to_journal_entries(c, jset->start, end);
1539
2abe5420
KO
1540 bch2_journal_super_entries_add_common(c, &end,
1541 le64_to_cpu(jset->seq));
3ccc5c50
KO
1542 u64s = (u64 *) end - (u64 *) start;
1543 BUG_ON(u64s > j->entry_u64s_reserved);
1544
d16b4a77
KO
1545 le32_add_cpu(&jset->u64s, u64s);
1546 BUG_ON(vstruct_sectors(jset, c->block_bits) > w->sectors);
1c6fdbd8 1547
1c6fdbd8 1548 jset->magic = cpu_to_le64(jset_magic(c));
26609b61
KO
1549 jset->version = c->sb.version < bcachefs_metadata_version_new_versioning
1550 ? cpu_to_le32(BCH_JSET_VERSION_OLD)
1551 : cpu_to_le32(c->sb.version);
1c6fdbd8
KO
1552
1553 SET_JSET_BIG_ENDIAN(jset, CPU_BIG_ENDIAN);
1554 SET_JSET_CSUM_TYPE(jset, bch2_meta_checksum_type(c));
1555
4141fde0 1556 if (!JSET_NO_FLUSH(jset) && journal_entry_empty(jset))
158eecb8
KO
1557 j->last_empty_seq = le64_to_cpu(jset->seq);
1558
26609b61
KO
1559 if (bch2_csum_type_is_encryption(JSET_CSUM_TYPE(jset)))
1560 validate_before_checksum = true;
1561
e751c01a 1562 if (le32_to_cpu(jset->version) < bcachefs_metadata_version_current)
26609b61
KO
1563 validate_before_checksum = true;
1564
1565 if (validate_before_checksum &&
ed9d58a2 1566 jset_validate_for_write(c, jset))
1c6fdbd8
KO
1567 goto err;
1568
a9de137b 1569 ret = bch2_encrypt(c, JSET_CSUM_TYPE(jset), journal_nonce(jset),
1c6fdbd8
KO
1570 jset->encrypted_start,
1571 vstruct_end(jset) - (void *) jset->encrypted_start);
a9de137b
KO
1572 if (bch2_fs_fatal_err_on(ret, c,
1573 "error decrypting journal entry: %i", ret))
1574 goto err;
1c6fdbd8
KO
1575
1576 jset->csum = csum_vstruct(c, JSET_CSUM_TYPE(jset),
1577 journal_nonce(jset), jset);
1578
26609b61 1579 if (!validate_before_checksum &&
ed9d58a2 1580 jset_validate_for_write(c, jset))
1c6fdbd8
KO
1581 goto err;
1582
1583 sectors = vstruct_sectors(jset, c->block_bits);
d16b4a77 1584 BUG_ON(sectors > w->sectors);
1c6fdbd8 1585
d16b4a77
KO
1586 bytes = vstruct_bytes(jset);
1587 memset((void *) jset + bytes, 0, (sectors << 9) - bytes);
1c6fdbd8 1588
c18dade6 1589retry_alloc:
e5a66496
KO
1590 spin_lock(&j->lock);
1591 ret = journal_write_alloc(j, w, sectors);
1592
c18dade6
KO
1593 if (ret && j->can_discard) {
1594 spin_unlock(&j->lock);
1595 bch2_journal_do_discards(j);
1596 goto retry_alloc;
1597 }
1598
fa8e94fa
KO
1599 if (ret)
1600 __bch2_journal_debug_to_text(&journal_debug_buf, j);
85674154 1601
e5a66496
KO
1602 /*
1603 * write is allocated, no longer need to account for it in
1604 * bch2_journal_space_available():
1605 */
1606 w->sectors = 0;
1607
1608 /*
1609 * journal entry has been compacted and allocated, recalculate space
1610 * available:
1611 */
1612 bch2_journal_space_available(j);
1613 spin_unlock(&j->lock);
1614
1615 if (ret) {
85674154 1616 bch_err(c, "Unable to allocate journal write:\n%s",
fa8e94fa
KO
1617 journal_debug_buf.buf);
1618 printbuf_exit(&journal_debug_buf);
1c6fdbd8 1619 bch2_fatal_error(c);
731bdd2e 1620 continue_at(cl, journal_write_done, c->io_complete_wq);
1c6fdbd8
KO
1621 return;
1622 }
1623
d797ca3d
KO
1624 w->devs_written = bch2_bkey_devs(bkey_i_to_s_c(&w->key));
1625
b66b2bc0 1626 if (c->opts.nochanges)
1c6fdbd8
KO
1627 goto no_io;
1628
280249b9
KO
1629 for_each_rw_member(ca, c, i)
1630 nr_rw_members++;
1c6fdbd8 1631
280249b9
KO
1632 if (nr_rw_members > 1)
1633 w->separate_flush = true;
1c6fdbd8 1634
280249b9
KO
1635 if (!JSET_NO_FLUSH(jset) && w->separate_flush) {
1636 for_each_rw_member(ca, c, i) {
1637 percpu_ref_get(&ca->io_ref);
1c6fdbd8 1638
280249b9
KO
1639 bio = ca->journal.bio;
1640 bio_reset(bio, ca->disk_sb.bdev, REQ_OP_FLUSH);
1641 bio->bi_end_io = journal_write_endio;
1642 bio->bi_private = ca;
1643 closure_bio_submit(bio, cl);
1644 }
1c6fdbd8
KO
1645 }
1646
731bdd2e 1647 continue_at(cl, do_journal_write, c->io_complete_wq);
280249b9 1648 return;
1c6fdbd8 1649no_io:
731bdd2e 1650 continue_at(cl, journal_write_done, c->io_complete_wq);
1c6fdbd8
KO
1651 return;
1652err:
b74b147d 1653 bch2_fatal_error(c);
731bdd2e 1654 continue_at(cl, journal_write_done, c->io_complete_wq);
1c6fdbd8 1655}