powercap: intel_rapl_tpmi: Enable PMU support
[linux-block.git] / fs / bcachefs / btree_io.c
1 // SPDX-License-Identifier: GPL-2.0
2
3 #include "bcachefs.h"
4 #include "bkey_methods.h"
5 #include "bkey_sort.h"
6 #include "btree_cache.h"
7 #include "btree_io.h"
8 #include "btree_iter.h"
9 #include "btree_locking.h"
10 #include "btree_update.h"
11 #include "btree_update_interior.h"
12 #include "buckets.h"
13 #include "checksum.h"
14 #include "debug.h"
15 #include "error.h"
16 #include "extents.h"
17 #include "io_write.h"
18 #include "journal_reclaim.h"
19 #include "journal_seq_blacklist.h"
20 #include "recovery.h"
21 #include "super-io.h"
22 #include "trace.h"
23
24 #include <linux/sched/mm.h>
25
26 void bch2_btree_node_io_unlock(struct btree *b)
27 {
28         EBUG_ON(!btree_node_write_in_flight(b));
29
30         clear_btree_node_write_in_flight_inner(b);
31         clear_btree_node_write_in_flight(b);
32         wake_up_bit(&b->flags, BTREE_NODE_write_in_flight);
33 }
34
35 void bch2_btree_node_io_lock(struct btree *b)
36 {
37         bch2_assert_btree_nodes_not_locked();
38
39         wait_on_bit_lock_io(&b->flags, BTREE_NODE_write_in_flight,
40                             TASK_UNINTERRUPTIBLE);
41 }
42
43 void __bch2_btree_node_wait_on_read(struct btree *b)
44 {
45         wait_on_bit_io(&b->flags, BTREE_NODE_read_in_flight,
46                        TASK_UNINTERRUPTIBLE);
47 }
48
49 void __bch2_btree_node_wait_on_write(struct btree *b)
50 {
51         wait_on_bit_io(&b->flags, BTREE_NODE_write_in_flight,
52                        TASK_UNINTERRUPTIBLE);
53 }
54
55 void bch2_btree_node_wait_on_read(struct btree *b)
56 {
57         bch2_assert_btree_nodes_not_locked();
58
59         wait_on_bit_io(&b->flags, BTREE_NODE_read_in_flight,
60                        TASK_UNINTERRUPTIBLE);
61 }
62
63 void bch2_btree_node_wait_on_write(struct btree *b)
64 {
65         bch2_assert_btree_nodes_not_locked();
66
67         wait_on_bit_io(&b->flags, BTREE_NODE_write_in_flight,
68                        TASK_UNINTERRUPTIBLE);
69 }
70
71 static void verify_no_dups(struct btree *b,
72                            struct bkey_packed *start,
73                            struct bkey_packed *end)
74 {
75 #ifdef CONFIG_BCACHEFS_DEBUG
76         struct bkey_packed *k, *p;
77
78         if (start == end)
79                 return;
80
81         for (p = start, k = bkey_p_next(start);
82              k != end;
83              p = k, k = bkey_p_next(k)) {
84                 struct bkey l = bkey_unpack_key(b, p);
85                 struct bkey r = bkey_unpack_key(b, k);
86
87                 BUG_ON(bpos_ge(l.p, bkey_start_pos(&r)));
88         }
89 #endif
90 }
91
92 static void set_needs_whiteout(struct bset *i, int v)
93 {
94         struct bkey_packed *k;
95
96         for (k = i->start; k != vstruct_last(i); k = bkey_p_next(k))
97                 k->needs_whiteout = v;
98 }
99
100 static void btree_bounce_free(struct bch_fs *c, size_t size,
101                               bool used_mempool, void *p)
102 {
103         if (used_mempool)
104                 mempool_free(p, &c->btree_bounce_pool);
105         else
106                 kvfree(p);
107 }
108
109 static void *btree_bounce_alloc(struct bch_fs *c, size_t size,
110                                 bool *used_mempool)
111 {
112         unsigned flags = memalloc_nofs_save();
113         void *p;
114
115         BUG_ON(size > c->opts.btree_node_size);
116
117         *used_mempool = false;
118         p = kvmalloc(size, __GFP_NOWARN|GFP_NOWAIT);
119         if (!p) {
120                 *used_mempool = true;
121                 p = mempool_alloc(&c->btree_bounce_pool, GFP_NOFS);
122         }
123         memalloc_nofs_restore(flags);
124         return p;
125 }
126
127 static void sort_bkey_ptrs(const struct btree *bt,
128                            struct bkey_packed **ptrs, unsigned nr)
129 {
130         unsigned n = nr, a = nr / 2, b, c, d;
131
132         if (!a)
133                 return;
134
135         /* Heap sort: see lib/sort.c: */
136         while (1) {
137                 if (a)
138                         a--;
139                 else if (--n)
140                         swap(ptrs[0], ptrs[n]);
141                 else
142                         break;
143
144                 for (b = a; c = 2 * b + 1, (d = c + 1) < n;)
145                         b = bch2_bkey_cmp_packed(bt,
146                                             ptrs[c],
147                                             ptrs[d]) >= 0 ? c : d;
148                 if (d == n)
149                         b = c;
150
151                 while (b != a &&
152                        bch2_bkey_cmp_packed(bt,
153                                        ptrs[a],
154                                        ptrs[b]) >= 0)
155                         b = (b - 1) / 2;
156                 c = b;
157                 while (b != a) {
158                         b = (b - 1) / 2;
159                         swap(ptrs[b], ptrs[c]);
160                 }
161         }
162 }
163
164 static void bch2_sort_whiteouts(struct bch_fs *c, struct btree *b)
165 {
166         struct bkey_packed *new_whiteouts, **ptrs, **ptrs_end, *k;
167         bool used_mempool = false;
168         size_t bytes = b->whiteout_u64s * sizeof(u64);
169
170         if (!b->whiteout_u64s)
171                 return;
172
173         new_whiteouts = btree_bounce_alloc(c, bytes, &used_mempool);
174
175         ptrs = ptrs_end = ((void *) new_whiteouts + bytes);
176
177         for (k = unwritten_whiteouts_start(b);
178              k != unwritten_whiteouts_end(b);
179              k = bkey_p_next(k))
180                 *--ptrs = k;
181
182         sort_bkey_ptrs(b, ptrs, ptrs_end - ptrs);
183
184         k = new_whiteouts;
185
186         while (ptrs != ptrs_end) {
187                 bkey_p_copy(k, *ptrs);
188                 k = bkey_p_next(k);
189                 ptrs++;
190         }
191
192         verify_no_dups(b, new_whiteouts,
193                        (void *) ((u64 *) new_whiteouts + b->whiteout_u64s));
194
195         memcpy_u64s(unwritten_whiteouts_start(b),
196                     new_whiteouts, b->whiteout_u64s);
197
198         btree_bounce_free(c, bytes, used_mempool, new_whiteouts);
199 }
200
201 static bool should_compact_bset(struct btree *b, struct bset_tree *t,
202                                 bool compacting, enum compact_mode mode)
203 {
204         if (!bset_dead_u64s(b, t))
205                 return false;
206
207         switch (mode) {
208         case COMPACT_LAZY:
209                 return should_compact_bset_lazy(b, t) ||
210                         (compacting && !bset_written(b, bset(b, t)));
211         case COMPACT_ALL:
212                 return true;
213         default:
214                 BUG();
215         }
216 }
217
218 static bool bch2_drop_whiteouts(struct btree *b, enum compact_mode mode)
219 {
220         struct bset_tree *t;
221         bool ret = false;
222
223         for_each_bset(b, t) {
224                 struct bset *i = bset(b, t);
225                 struct bkey_packed *k, *n, *out, *start, *end;
226                 struct btree_node_entry *src = NULL, *dst = NULL;
227
228                 if (t != b->set && !bset_written(b, i)) {
229                         src = container_of(i, struct btree_node_entry, keys);
230                         dst = max(write_block(b),
231                                   (void *) btree_bkey_last(b, t - 1));
232                 }
233
234                 if (src != dst)
235                         ret = true;
236
237                 if (!should_compact_bset(b, t, ret, mode)) {
238                         if (src != dst) {
239                                 memmove(dst, src, sizeof(*src) +
240                                         le16_to_cpu(src->keys.u64s) *
241                                         sizeof(u64));
242                                 i = &dst->keys;
243                                 set_btree_bset(b, t, i);
244                         }
245                         continue;
246                 }
247
248                 start   = btree_bkey_first(b, t);
249                 end     = btree_bkey_last(b, t);
250
251                 if (src != dst) {
252                         memmove(dst, src, sizeof(*src));
253                         i = &dst->keys;
254                         set_btree_bset(b, t, i);
255                 }
256
257                 out = i->start;
258
259                 for (k = start; k != end; k = n) {
260                         n = bkey_p_next(k);
261
262                         if (!bkey_deleted(k)) {
263                                 bkey_p_copy(out, k);
264                                 out = bkey_p_next(out);
265                         } else {
266                                 BUG_ON(k->needs_whiteout);
267                         }
268                 }
269
270                 i->u64s = cpu_to_le16((u64 *) out - i->_data);
271                 set_btree_bset_end(b, t);
272                 bch2_bset_set_no_aux_tree(b, t);
273                 ret = true;
274         }
275
276         bch2_verify_btree_nr_keys(b);
277
278         bch2_btree_build_aux_trees(b);
279
280         return ret;
281 }
282
283 bool bch2_compact_whiteouts(struct bch_fs *c, struct btree *b,
284                             enum compact_mode mode)
285 {
286         return bch2_drop_whiteouts(b, mode);
287 }
288
289 static void btree_node_sort(struct bch_fs *c, struct btree *b,
290                             unsigned start_idx,
291                             unsigned end_idx,
292                             bool filter_whiteouts)
293 {
294         struct btree_node *out;
295         struct sort_iter_stack sort_iter;
296         struct bset_tree *t;
297         struct bset *start_bset = bset(b, &b->set[start_idx]);
298         bool used_mempool = false;
299         u64 start_time, seq = 0;
300         unsigned i, u64s = 0, bytes, shift = end_idx - start_idx - 1;
301         bool sorting_entire_node = start_idx == 0 &&
302                 end_idx == b->nsets;
303
304         sort_iter_stack_init(&sort_iter, b);
305
306         for (t = b->set + start_idx;
307              t < b->set + end_idx;
308              t++) {
309                 u64s += le16_to_cpu(bset(b, t)->u64s);
310                 sort_iter_add(&sort_iter.iter,
311                               btree_bkey_first(b, t),
312                               btree_bkey_last(b, t));
313         }
314
315         bytes = sorting_entire_node
316                 ? btree_buf_bytes(b)
317                 : __vstruct_bytes(struct btree_node, u64s);
318
319         out = btree_bounce_alloc(c, bytes, &used_mempool);
320
321         start_time = local_clock();
322
323         u64s = bch2_sort_keys(out->keys.start, &sort_iter.iter, filter_whiteouts);
324
325         out->keys.u64s = cpu_to_le16(u64s);
326
327         BUG_ON(vstruct_end(&out->keys) > (void *) out + bytes);
328
329         if (sorting_entire_node)
330                 bch2_time_stats_update(&c->times[BCH_TIME_btree_node_sort],
331                                        start_time);
332
333         /* Make sure we preserve bset journal_seq: */
334         for (t = b->set + start_idx; t < b->set + end_idx; t++)
335                 seq = max(seq, le64_to_cpu(bset(b, t)->journal_seq));
336         start_bset->journal_seq = cpu_to_le64(seq);
337
338         if (sorting_entire_node) {
339                 u64s = le16_to_cpu(out->keys.u64s);
340
341                 BUG_ON(bytes != btree_buf_bytes(b));
342
343                 /*
344                  * Our temporary buffer is the same size as the btree node's
345                  * buffer, we can just swap buffers instead of doing a big
346                  * memcpy()
347                  */
348                 *out = *b->data;
349                 out->keys.u64s = cpu_to_le16(u64s);
350                 swap(out, b->data);
351                 set_btree_bset(b, b->set, &b->data->keys);
352         } else {
353                 start_bset->u64s = out->keys.u64s;
354                 memcpy_u64s(start_bset->start,
355                             out->keys.start,
356                             le16_to_cpu(out->keys.u64s));
357         }
358
359         for (i = start_idx + 1; i < end_idx; i++)
360                 b->nr.bset_u64s[start_idx] +=
361                         b->nr.bset_u64s[i];
362
363         b->nsets -= shift;
364
365         for (i = start_idx + 1; i < b->nsets; i++) {
366                 b->nr.bset_u64s[i]      = b->nr.bset_u64s[i + shift];
367                 b->set[i]               = b->set[i + shift];
368         }
369
370         for (i = b->nsets; i < MAX_BSETS; i++)
371                 b->nr.bset_u64s[i] = 0;
372
373         set_btree_bset_end(b, &b->set[start_idx]);
374         bch2_bset_set_no_aux_tree(b, &b->set[start_idx]);
375
376         btree_bounce_free(c, bytes, used_mempool, out);
377
378         bch2_verify_btree_nr_keys(b);
379 }
380
381 void bch2_btree_sort_into(struct bch_fs *c,
382                          struct btree *dst,
383                          struct btree *src)
384 {
385         struct btree_nr_keys nr;
386         struct btree_node_iter src_iter;
387         u64 start_time = local_clock();
388
389         BUG_ON(dst->nsets != 1);
390
391         bch2_bset_set_no_aux_tree(dst, dst->set);
392
393         bch2_btree_node_iter_init_from_start(&src_iter, src);
394
395         nr = bch2_sort_repack(btree_bset_first(dst),
396                         src, &src_iter,
397                         &dst->format,
398                         true);
399
400         bch2_time_stats_update(&c->times[BCH_TIME_btree_node_sort],
401                                start_time);
402
403         set_btree_bset_end(dst, dst->set);
404
405         dst->nr.live_u64s       += nr.live_u64s;
406         dst->nr.bset_u64s[0]    += nr.bset_u64s[0];
407         dst->nr.packed_keys     += nr.packed_keys;
408         dst->nr.unpacked_keys   += nr.unpacked_keys;
409
410         bch2_verify_btree_nr_keys(dst);
411 }
412
413 /*
414  * We're about to add another bset to the btree node, so if there's currently
415  * too many bsets - sort some of them together:
416  */
417 static bool btree_node_compact(struct bch_fs *c, struct btree *b)
418 {
419         unsigned unwritten_idx;
420         bool ret = false;
421
422         for (unwritten_idx = 0;
423              unwritten_idx < b->nsets;
424              unwritten_idx++)
425                 if (!bset_written(b, bset(b, &b->set[unwritten_idx])))
426                         break;
427
428         if (b->nsets - unwritten_idx > 1) {
429                 btree_node_sort(c, b, unwritten_idx,
430                                 b->nsets, false);
431                 ret = true;
432         }
433
434         if (unwritten_idx > 1) {
435                 btree_node_sort(c, b, 0, unwritten_idx, false);
436                 ret = true;
437         }
438
439         return ret;
440 }
441
442 void bch2_btree_build_aux_trees(struct btree *b)
443 {
444         struct bset_tree *t;
445
446         for_each_bset(b, t)
447                 bch2_bset_build_aux_tree(b, t,
448                                 !bset_written(b, bset(b, t)) &&
449                                 t == bset_tree_last(b));
450 }
451
452 /*
453  * If we have MAX_BSETS (3) bsets, should we sort them all down to just one?
454  *
455  * The first bset is going to be of similar order to the size of the node, the
456  * last bset is bounded by btree_write_set_buffer(), which is set to keep the
457  * memmove on insert from being too expensive: the middle bset should, ideally,
458  * be the geometric mean of the first and the last.
459  *
460  * Returns true if the middle bset is greater than that geometric mean:
461  */
462 static inline bool should_compact_all(struct bch_fs *c, struct btree *b)
463 {
464         unsigned mid_u64s_bits =
465                 (ilog2(btree_max_u64s(c)) + BTREE_WRITE_SET_U64s_BITS) / 2;
466
467         return bset_u64s(&b->set[1]) > 1U << mid_u64s_bits;
468 }
469
470 /*
471  * @bch_btree_init_next - initialize a new (unwritten) bset that can then be
472  * inserted into
473  *
474  * Safe to call if there already is an unwritten bset - will only add a new bset
475  * if @b doesn't already have one.
476  *
477  * Returns true if we sorted (i.e. invalidated iterators
478  */
479 void bch2_btree_init_next(struct btree_trans *trans, struct btree *b)
480 {
481         struct bch_fs *c = trans->c;
482         struct btree_node_entry *bne;
483         bool reinit_iter = false;
484
485         EBUG_ON(!six_lock_counts(&b->c.lock).n[SIX_LOCK_write]);
486         BUG_ON(bset_written(b, bset(b, &b->set[1])));
487         BUG_ON(btree_node_just_written(b));
488
489         if (b->nsets == MAX_BSETS &&
490             !btree_node_write_in_flight(b) &&
491             should_compact_all(c, b)) {
492                 bch2_btree_node_write(c, b, SIX_LOCK_write,
493                                       BTREE_WRITE_init_next_bset);
494                 reinit_iter = true;
495         }
496
497         if (b->nsets == MAX_BSETS &&
498             btree_node_compact(c, b))
499                 reinit_iter = true;
500
501         BUG_ON(b->nsets >= MAX_BSETS);
502
503         bne = want_new_bset(c, b);
504         if (bne)
505                 bch2_bset_init_next(b, bne);
506
507         bch2_btree_build_aux_trees(b);
508
509         if (reinit_iter)
510                 bch2_trans_node_reinit_iter(trans, b);
511 }
512
513 static void btree_err_msg(struct printbuf *out, struct bch_fs *c,
514                           struct bch_dev *ca,
515                           struct btree *b, struct bset *i,
516                           unsigned offset, int write)
517 {
518         prt_printf(out, bch2_log_msg(c, "%s"),
519                    write == READ
520                    ? "error validating btree node "
521                    : "corrupt btree node before write ");
522         if (ca)
523                 prt_printf(out, "on %s ", ca->name);
524         prt_printf(out, "at btree ");
525         bch2_btree_pos_to_text(out, c, b);
526
527         prt_printf(out, "\n  node offset %u/%u",
528                    b->written, btree_ptr_sectors_written(&b->key));
529         if (i)
530                 prt_printf(out, " bset u64s %u", le16_to_cpu(i->u64s));
531         prt_str(out, ": ");
532 }
533
534 __printf(9, 10)
535 static int __btree_err(int ret,
536                        struct bch_fs *c,
537                        struct bch_dev *ca,
538                        struct btree *b,
539                        struct bset *i,
540                        int write,
541                        bool have_retry,
542                        enum bch_sb_error_id err_type,
543                        const char *fmt, ...)
544 {
545         struct printbuf out = PRINTBUF;
546         va_list args;
547
548         btree_err_msg(&out, c, ca, b, i, b->written, write);
549
550         va_start(args, fmt);
551         prt_vprintf(&out, fmt, args);
552         va_end(args);
553
554         if (write == WRITE) {
555                 bch2_print_string_as_lines(KERN_ERR, out.buf);
556                 ret = c->opts.errors == BCH_ON_ERROR_continue
557                         ? 0
558                         : -BCH_ERR_fsck_errors_not_fixed;
559                 goto out;
560         }
561
562         if (!have_retry && ret == -BCH_ERR_btree_node_read_err_want_retry)
563                 ret = -BCH_ERR_btree_node_read_err_fixable;
564         if (!have_retry && ret == -BCH_ERR_btree_node_read_err_must_retry)
565                 ret = -BCH_ERR_btree_node_read_err_bad_node;
566
567         if (ret != -BCH_ERR_btree_node_read_err_fixable)
568                 bch2_sb_error_count(c, err_type);
569
570         switch (ret) {
571         case -BCH_ERR_btree_node_read_err_fixable:
572                 ret = bch2_fsck_err(c, FSCK_CAN_FIX, err_type, "%s", out.buf);
573                 if (ret != -BCH_ERR_fsck_fix &&
574                     ret != -BCH_ERR_fsck_ignore)
575                         goto fsck_err;
576                 ret = -BCH_ERR_fsck_fix;
577                 break;
578         case -BCH_ERR_btree_node_read_err_want_retry:
579         case -BCH_ERR_btree_node_read_err_must_retry:
580                 bch2_print_string_as_lines(KERN_ERR, out.buf);
581                 break;
582         case -BCH_ERR_btree_node_read_err_bad_node:
583                 bch2_print_string_as_lines(KERN_ERR, out.buf);
584                 ret = bch2_topology_error(c);
585                 break;
586         case -BCH_ERR_btree_node_read_err_incompatible:
587                 bch2_print_string_as_lines(KERN_ERR, out.buf);
588                 ret = -BCH_ERR_fsck_errors_not_fixed;
589                 break;
590         default:
591                 BUG();
592         }
593 out:
594 fsck_err:
595         printbuf_exit(&out);
596         return ret;
597 }
598
599 #define btree_err(type, c, ca, b, i, _err_type, msg, ...)               \
600 ({                                                                      \
601         int _ret = __btree_err(type, c, ca, b, i, write, have_retry,    \
602                                BCH_FSCK_ERR_##_err_type,                \
603                                msg, ##__VA_ARGS__);                     \
604                                                                         \
605         if (_ret != -BCH_ERR_fsck_fix) {                                \
606                 ret = _ret;                                             \
607                 goto fsck_err;                                          \
608         }                                                               \
609                                                                         \
610         *saw_error = true;                                              \
611 })
612
613 #define btree_err_on(cond, ...) ((cond) ? btree_err(__VA_ARGS__) : false)
614
615 /*
616  * When btree topology repair changes the start or end of a node, that might
617  * mean we have to drop keys that are no longer inside the node:
618  */
619 __cold
620 void bch2_btree_node_drop_keys_outside_node(struct btree *b)
621 {
622         struct bset_tree *t;
623
624         for_each_bset(b, t) {
625                 struct bset *i = bset(b, t);
626                 struct bkey_packed *k;
627
628                 for (k = i->start; k != vstruct_last(i); k = bkey_p_next(k))
629                         if (bkey_cmp_left_packed(b, k, &b->data->min_key) >= 0)
630                                 break;
631
632                 if (k != i->start) {
633                         unsigned shift = (u64 *) k - (u64 *) i->start;
634
635                         memmove_u64s_down(i->start, k,
636                                           (u64 *) vstruct_end(i) - (u64 *) k);
637                         i->u64s = cpu_to_le16(le16_to_cpu(i->u64s) - shift);
638                         set_btree_bset_end(b, t);
639                 }
640
641                 for (k = i->start; k != vstruct_last(i); k = bkey_p_next(k))
642                         if (bkey_cmp_left_packed(b, k, &b->data->max_key) > 0)
643                                 break;
644
645                 if (k != vstruct_last(i)) {
646                         i->u64s = cpu_to_le16((u64 *) k - (u64 *) i->start);
647                         set_btree_bset_end(b, t);
648                 }
649         }
650
651         /*
652          * Always rebuild search trees: eytzinger search tree nodes directly
653          * depend on the values of min/max key:
654          */
655         bch2_bset_set_no_aux_tree(b, b->set);
656         bch2_btree_build_aux_trees(b);
657         b->nr = bch2_btree_node_count_keys(b);
658
659         struct bkey_s_c k;
660         struct bkey unpacked;
661         struct btree_node_iter iter;
662         for_each_btree_node_key_unpack(b, k, &iter, &unpacked) {
663                 BUG_ON(bpos_lt(k.k->p, b->data->min_key));
664                 BUG_ON(bpos_gt(k.k->p, b->data->max_key));
665         }
666 }
667
668 static int validate_bset(struct bch_fs *c, struct bch_dev *ca,
669                          struct btree *b, struct bset *i,
670                          unsigned offset, unsigned sectors,
671                          int write, bool have_retry, bool *saw_error)
672 {
673         unsigned version = le16_to_cpu(i->version);
674         struct printbuf buf1 = PRINTBUF;
675         struct printbuf buf2 = PRINTBUF;
676         int ret = 0;
677
678         btree_err_on(!bch2_version_compatible(version),
679                      -BCH_ERR_btree_node_read_err_incompatible,
680                      c, ca, b, i,
681                      btree_node_unsupported_version,
682                      "unsupported bset version %u.%u",
683                      BCH_VERSION_MAJOR(version),
684                      BCH_VERSION_MINOR(version));
685
686         if (btree_err_on(version < c->sb.version_min,
687                          -BCH_ERR_btree_node_read_err_fixable,
688                          c, NULL, b, i,
689                          btree_node_bset_older_than_sb_min,
690                          "bset version %u older than superblock version_min %u",
691                          version, c->sb.version_min)) {
692                 mutex_lock(&c->sb_lock);
693                 c->disk_sb.sb->version_min = cpu_to_le16(version);
694                 bch2_write_super(c);
695                 mutex_unlock(&c->sb_lock);
696         }
697
698         if (btree_err_on(BCH_VERSION_MAJOR(version) >
699                          BCH_VERSION_MAJOR(c->sb.version),
700                          -BCH_ERR_btree_node_read_err_fixable,
701                          c, NULL, b, i,
702                          btree_node_bset_newer_than_sb,
703                          "bset version %u newer than superblock version %u",
704                          version, c->sb.version)) {
705                 mutex_lock(&c->sb_lock);
706                 c->disk_sb.sb->version = cpu_to_le16(version);
707                 bch2_write_super(c);
708                 mutex_unlock(&c->sb_lock);
709         }
710
711         btree_err_on(BSET_SEPARATE_WHITEOUTS(i),
712                      -BCH_ERR_btree_node_read_err_incompatible,
713                      c, ca, b, i,
714                      btree_node_unsupported_version,
715                      "BSET_SEPARATE_WHITEOUTS no longer supported");
716
717         if (btree_err_on(offset + sectors > btree_sectors(c),
718                          -BCH_ERR_btree_node_read_err_fixable,
719                          c, ca, b, i,
720                          bset_past_end_of_btree_node,
721                          "bset past end of btree node")) {
722                 i->u64s = 0;
723                 ret = 0;
724                 goto out;
725         }
726
727         btree_err_on(offset && !i->u64s,
728                      -BCH_ERR_btree_node_read_err_fixable,
729                      c, ca, b, i,
730                      bset_empty,
731                      "empty bset");
732
733         btree_err_on(BSET_OFFSET(i) && BSET_OFFSET(i) != offset,
734                      -BCH_ERR_btree_node_read_err_want_retry,
735                      c, ca, b, i,
736                      bset_wrong_sector_offset,
737                      "bset at wrong sector offset");
738
739         if (!offset) {
740                 struct btree_node *bn =
741                         container_of(i, struct btree_node, keys);
742                 /* These indicate that we read the wrong btree node: */
743
744                 if (b->key.k.type == KEY_TYPE_btree_ptr_v2) {
745                         struct bch_btree_ptr_v2 *bp =
746                                 &bkey_i_to_btree_ptr_v2(&b->key)->v;
747
748                         /* XXX endianness */
749                         btree_err_on(bp->seq != bn->keys.seq,
750                                      -BCH_ERR_btree_node_read_err_must_retry,
751                                      c, ca, b, NULL,
752                                      bset_bad_seq,
753                                      "incorrect sequence number (wrong btree node)");
754                 }
755
756                 btree_err_on(BTREE_NODE_ID(bn) != b->c.btree_id,
757                              -BCH_ERR_btree_node_read_err_must_retry,
758                              c, ca, b, i,
759                              btree_node_bad_btree,
760                              "incorrect btree id");
761
762                 btree_err_on(BTREE_NODE_LEVEL(bn) != b->c.level,
763                              -BCH_ERR_btree_node_read_err_must_retry,
764                              c, ca, b, i,
765                              btree_node_bad_level,
766                              "incorrect level");
767
768                 if (!write)
769                         compat_btree_node(b->c.level, b->c.btree_id, version,
770                                           BSET_BIG_ENDIAN(i), write, bn);
771
772                 if (b->key.k.type == KEY_TYPE_btree_ptr_v2) {
773                         struct bch_btree_ptr_v2 *bp =
774                                 &bkey_i_to_btree_ptr_v2(&b->key)->v;
775
776                         if (BTREE_PTR_RANGE_UPDATED(bp)) {
777                                 b->data->min_key = bp->min_key;
778                                 b->data->max_key = b->key.k.p;
779                         }
780
781                         btree_err_on(!bpos_eq(b->data->min_key, bp->min_key),
782                                      -BCH_ERR_btree_node_read_err_must_retry,
783                                      c, ca, b, NULL,
784                                      btree_node_bad_min_key,
785                                      "incorrect min_key: got %s should be %s",
786                                      (printbuf_reset(&buf1),
787                                       bch2_bpos_to_text(&buf1, bn->min_key), buf1.buf),
788                                      (printbuf_reset(&buf2),
789                                       bch2_bpos_to_text(&buf2, bp->min_key), buf2.buf));
790                 }
791
792                 btree_err_on(!bpos_eq(bn->max_key, b->key.k.p),
793                              -BCH_ERR_btree_node_read_err_must_retry,
794                              c, ca, b, i,
795                              btree_node_bad_max_key,
796                              "incorrect max key %s",
797                              (printbuf_reset(&buf1),
798                               bch2_bpos_to_text(&buf1, bn->max_key), buf1.buf));
799
800                 if (write)
801                         compat_btree_node(b->c.level, b->c.btree_id, version,
802                                           BSET_BIG_ENDIAN(i), write, bn);
803
804                 btree_err_on(bch2_bkey_format_invalid(c, &bn->format, write, &buf1),
805                              -BCH_ERR_btree_node_read_err_bad_node,
806                              c, ca, b, i,
807                              btree_node_bad_format,
808                              "invalid bkey format: %s\n  %s", buf1.buf,
809                              (printbuf_reset(&buf2),
810                               bch2_bkey_format_to_text(&buf2, &bn->format), buf2.buf));
811                 printbuf_reset(&buf1);
812
813                 compat_bformat(b->c.level, b->c.btree_id, version,
814                                BSET_BIG_ENDIAN(i), write,
815                                &bn->format);
816         }
817 out:
818 fsck_err:
819         printbuf_exit(&buf2);
820         printbuf_exit(&buf1);
821         return ret;
822 }
823
824 static int bset_key_invalid(struct bch_fs *c, struct btree *b,
825                             struct bkey_s_c k,
826                             bool updated_range, int rw,
827                             struct printbuf *err)
828 {
829         return __bch2_bkey_invalid(c, k, btree_node_type(b), READ, err) ?:
830                 (!updated_range ? bch2_bkey_in_btree_node(c, b, k, err) : 0) ?:
831                 (rw == WRITE ? bch2_bkey_val_invalid(c, k, READ, err) : 0);
832 }
833
834 static bool __bkey_valid(struct bch_fs *c, struct btree *b,
835                          struct bset *i, struct bkey_packed *k)
836 {
837         if (bkey_p_next(k) > vstruct_last(i))
838                 return false;
839
840         if (k->format > KEY_FORMAT_CURRENT)
841                 return false;
842
843         if (k->u64s < bkeyp_key_u64s(&b->format, k))
844                 return false;
845
846         struct printbuf buf = PRINTBUF;
847         struct bkey tmp;
848         struct bkey_s u = __bkey_disassemble(b, k, &tmp);
849         bool ret = __bch2_bkey_invalid(c, u.s_c, btree_node_type(b), READ, &buf);
850         printbuf_exit(&buf);
851         return ret;
852 }
853
854 static int validate_bset_keys(struct bch_fs *c, struct btree *b,
855                          struct bset *i, int write,
856                          bool have_retry, bool *saw_error)
857 {
858         unsigned version = le16_to_cpu(i->version);
859         struct bkey_packed *k, *prev = NULL;
860         struct printbuf buf = PRINTBUF;
861         bool updated_range = b->key.k.type == KEY_TYPE_btree_ptr_v2 &&
862                 BTREE_PTR_RANGE_UPDATED(&bkey_i_to_btree_ptr_v2(&b->key)->v);
863         int ret = 0;
864
865         for (k = i->start;
866              k != vstruct_last(i);) {
867                 struct bkey_s u;
868                 struct bkey tmp;
869                 unsigned next_good_key;
870
871                 if (btree_err_on(bkey_p_next(k) > vstruct_last(i),
872                                  -BCH_ERR_btree_node_read_err_fixable,
873                                  c, NULL, b, i,
874                                  btree_node_bkey_past_bset_end,
875                                  "key extends past end of bset")) {
876                         i->u64s = cpu_to_le16((u64 *) k - i->_data);
877                         break;
878                 }
879
880                 if (btree_err_on(k->format > KEY_FORMAT_CURRENT,
881                                  -BCH_ERR_btree_node_read_err_fixable,
882                                  c, NULL, b, i,
883                                  btree_node_bkey_bad_format,
884                                  "invalid bkey format %u", k->format))
885                         goto drop_this_key;
886
887                 if (btree_err_on(k->u64s < bkeyp_key_u64s(&b->format, k),
888                                  -BCH_ERR_btree_node_read_err_fixable,
889                                  c, NULL, b, i,
890                                  btree_node_bkey_bad_u64s,
891                                  "k->u64s too small (%u < %u)", k->u64s, bkeyp_key_u64s(&b->format, k)))
892                         goto drop_this_key;
893
894                 if (!write)
895                         bch2_bkey_compat(b->c.level, b->c.btree_id, version,
896                                     BSET_BIG_ENDIAN(i), write,
897                                     &b->format, k);
898
899                 u = __bkey_disassemble(b, k, &tmp);
900
901                 printbuf_reset(&buf);
902                 if (bset_key_invalid(c, b, u.s_c, updated_range, write, &buf)) {
903                         printbuf_reset(&buf);
904                         bset_key_invalid(c, b, u.s_c, updated_range, write, &buf);
905                         prt_printf(&buf, "\n  ");
906                         bch2_bkey_val_to_text(&buf, c, u.s_c);
907
908                         btree_err(-BCH_ERR_btree_node_read_err_fixable,
909                                   c, NULL, b, i,
910                                   btree_node_bad_bkey,
911                                   "invalid bkey: %s", buf.buf);
912                         goto drop_this_key;
913                 }
914
915                 if (write)
916                         bch2_bkey_compat(b->c.level, b->c.btree_id, version,
917                                     BSET_BIG_ENDIAN(i), write,
918                                     &b->format, k);
919
920                 if (prev && bkey_iter_cmp(b, prev, k) > 0) {
921                         struct bkey up = bkey_unpack_key(b, prev);
922
923                         printbuf_reset(&buf);
924                         prt_printf(&buf, "keys out of order: ");
925                         bch2_bkey_to_text(&buf, &up);
926                         prt_printf(&buf, " > ");
927                         bch2_bkey_to_text(&buf, u.k);
928
929                         if (btree_err(-BCH_ERR_btree_node_read_err_fixable,
930                                       c, NULL, b, i,
931                                       btree_node_bkey_out_of_order,
932                                       "%s", buf.buf))
933                                 goto drop_this_key;
934                 }
935
936                 prev = k;
937                 k = bkey_p_next(k);
938                 continue;
939 drop_this_key:
940                 next_good_key = k->u64s;
941
942                 if (!next_good_key ||
943                     (BSET_BIG_ENDIAN(i) == CPU_BIG_ENDIAN &&
944                      version >= bcachefs_metadata_version_snapshot)) {
945                         /*
946                          * only do scanning if bch2_bkey_compat() has nothing to
947                          * do
948                          */
949
950                         if (!__bkey_valid(c, b, i, (void *) ((u64 *) k + next_good_key))) {
951                                 for (next_good_key = 1;
952                                      next_good_key < (u64 *) vstruct_last(i) - (u64 *) k;
953                                      next_good_key++)
954                                         if (__bkey_valid(c, b, i, (void *) ((u64 *) k + next_good_key)))
955                                                 goto got_good_key;
956
957                         }
958
959                         /*
960                          * didn't find a good key, have to truncate the rest of
961                          * the bset
962                          */
963                         next_good_key = (u64 *) vstruct_last(i) - (u64 *) k;
964                 }
965 got_good_key:
966                 le16_add_cpu(&i->u64s, -next_good_key);
967                 memmove_u64s_down(k, bkey_p_next(k), (u64 *) vstruct_end(i) - (u64 *) k);
968         }
969 fsck_err:
970         printbuf_exit(&buf);
971         return ret;
972 }
973
974 int bch2_btree_node_read_done(struct bch_fs *c, struct bch_dev *ca,
975                               struct btree *b, bool have_retry, bool *saw_error)
976 {
977         struct btree_node_entry *bne;
978         struct sort_iter *iter;
979         struct btree_node *sorted;
980         struct bkey_packed *k;
981         struct bset *i;
982         bool used_mempool, blacklisted;
983         bool updated_range = b->key.k.type == KEY_TYPE_btree_ptr_v2 &&
984                 BTREE_PTR_RANGE_UPDATED(&bkey_i_to_btree_ptr_v2(&b->key)->v);
985         unsigned u64s;
986         unsigned ptr_written = btree_ptr_sectors_written(&b->key);
987         struct printbuf buf = PRINTBUF;
988         int ret = 0, retry_read = 0, write = READ;
989         u64 start_time = local_clock();
990
991         b->version_ondisk = U16_MAX;
992         /* We might get called multiple times on read retry: */
993         b->written = 0;
994
995         iter = mempool_alloc(&c->fill_iter, GFP_NOFS);
996         sort_iter_init(iter, b, (btree_blocks(c) + 1) * 2);
997
998         if (bch2_meta_read_fault("btree"))
999                 btree_err(-BCH_ERR_btree_node_read_err_must_retry,
1000                           c, ca, b, NULL,
1001                           btree_node_fault_injected,
1002                           "dynamic fault");
1003
1004         btree_err_on(le64_to_cpu(b->data->magic) != bset_magic(c),
1005                      -BCH_ERR_btree_node_read_err_must_retry,
1006                      c, ca, b, NULL,
1007                      btree_node_bad_magic,
1008                      "bad magic: want %llx, got %llx",
1009                      bset_magic(c), le64_to_cpu(b->data->magic));
1010
1011         if (b->key.k.type == KEY_TYPE_btree_ptr_v2) {
1012                 struct bch_btree_ptr_v2 *bp =
1013                         &bkey_i_to_btree_ptr_v2(&b->key)->v;
1014
1015                 bch2_bpos_to_text(&buf, b->data->min_key);
1016                 prt_str(&buf, "-");
1017                 bch2_bpos_to_text(&buf, b->data->max_key);
1018
1019                 btree_err_on(b->data->keys.seq != bp->seq,
1020                              -BCH_ERR_btree_node_read_err_must_retry,
1021                              c, ca, b, NULL,
1022                              btree_node_bad_seq,
1023                              "got wrong btree node (want %llx got %llx)\n"
1024                              "got btree %s level %llu pos %s",
1025                              bp->seq, b->data->keys.seq,
1026                              bch2_btree_id_str(BTREE_NODE_ID(b->data)),
1027                              BTREE_NODE_LEVEL(b->data),
1028                              buf.buf);
1029         } else {
1030                 btree_err_on(!b->data->keys.seq,
1031                              -BCH_ERR_btree_node_read_err_must_retry,
1032                              c, ca, b, NULL,
1033                              btree_node_bad_seq,
1034                              "bad btree header: seq 0");
1035         }
1036
1037         while (b->written < (ptr_written ?: btree_sectors(c))) {
1038                 unsigned sectors;
1039                 struct nonce nonce;
1040                 bool first = !b->written;
1041                 bool csum_bad;
1042
1043                 if (!b->written) {
1044                         i = &b->data->keys;
1045
1046                         btree_err_on(!bch2_checksum_type_valid(c, BSET_CSUM_TYPE(i)),
1047                                      -BCH_ERR_btree_node_read_err_want_retry,
1048                                      c, ca, b, i,
1049                                      bset_unknown_csum,
1050                                      "unknown checksum type %llu", BSET_CSUM_TYPE(i));
1051
1052                         nonce = btree_nonce(i, b->written << 9);
1053
1054                         struct bch_csum csum = csum_vstruct(c, BSET_CSUM_TYPE(i), nonce, b->data);
1055                         csum_bad = bch2_crc_cmp(b->data->csum, csum);
1056                         if (csum_bad)
1057                                 bch2_io_error(ca, BCH_MEMBER_ERROR_checksum);
1058
1059                         btree_err_on(csum_bad,
1060                                      -BCH_ERR_btree_node_read_err_want_retry,
1061                                      c, ca, b, i,
1062                                      bset_bad_csum,
1063                                      "%s",
1064                                      (printbuf_reset(&buf),
1065                                       bch2_csum_err_msg(&buf, BSET_CSUM_TYPE(i), b->data->csum, csum),
1066                                       buf.buf));
1067
1068                         ret = bset_encrypt(c, i, b->written << 9);
1069                         if (bch2_fs_fatal_err_on(ret, c,
1070                                         "decrypting btree node: %s", bch2_err_str(ret)))
1071                                 goto fsck_err;
1072
1073                         btree_err_on(btree_node_type_is_extents(btree_node_type(b)) &&
1074                                      !BTREE_NODE_NEW_EXTENT_OVERWRITE(b->data),
1075                                      -BCH_ERR_btree_node_read_err_incompatible,
1076                                      c, NULL, b, NULL,
1077                                      btree_node_unsupported_version,
1078                                      "btree node does not have NEW_EXTENT_OVERWRITE set");
1079
1080                         sectors = vstruct_sectors(b->data, c->block_bits);
1081                 } else {
1082                         bne = write_block(b);
1083                         i = &bne->keys;
1084
1085                         if (i->seq != b->data->keys.seq)
1086                                 break;
1087
1088                         btree_err_on(!bch2_checksum_type_valid(c, BSET_CSUM_TYPE(i)),
1089                                      -BCH_ERR_btree_node_read_err_want_retry,
1090                                      c, ca, b, i,
1091                                      bset_unknown_csum,
1092                                      "unknown checksum type %llu", BSET_CSUM_TYPE(i));
1093
1094                         nonce = btree_nonce(i, b->written << 9);
1095                         struct bch_csum csum = csum_vstruct(c, BSET_CSUM_TYPE(i), nonce, bne);
1096                         csum_bad = bch2_crc_cmp(bne->csum, csum);
1097                         if (csum_bad)
1098                                 bch2_io_error(ca, BCH_MEMBER_ERROR_checksum);
1099
1100                         btree_err_on(csum_bad,
1101                                      -BCH_ERR_btree_node_read_err_want_retry,
1102                                      c, ca, b, i,
1103                                      bset_bad_csum,
1104                                      "%s",
1105                                      (printbuf_reset(&buf),
1106                                       bch2_csum_err_msg(&buf, BSET_CSUM_TYPE(i), bne->csum, csum),
1107                                       buf.buf));
1108
1109                         ret = bset_encrypt(c, i, b->written << 9);
1110                         if (bch2_fs_fatal_err_on(ret, c,
1111                                         "decrypting btree node: %s", bch2_err_str(ret)))
1112                                 goto fsck_err;
1113
1114                         sectors = vstruct_sectors(bne, c->block_bits);
1115                 }
1116
1117                 b->version_ondisk = min(b->version_ondisk,
1118                                         le16_to_cpu(i->version));
1119
1120                 ret = validate_bset(c, ca, b, i, b->written, sectors,
1121                                     READ, have_retry, saw_error);
1122                 if (ret)
1123                         goto fsck_err;
1124
1125                 if (!b->written)
1126                         btree_node_set_format(b, b->data->format);
1127
1128                 ret = validate_bset_keys(c, b, i, READ, have_retry, saw_error);
1129                 if (ret)
1130                         goto fsck_err;
1131
1132                 SET_BSET_BIG_ENDIAN(i, CPU_BIG_ENDIAN);
1133
1134                 blacklisted = bch2_journal_seq_is_blacklisted(c,
1135                                         le64_to_cpu(i->journal_seq),
1136                                         true);
1137
1138                 btree_err_on(blacklisted && first,
1139                              -BCH_ERR_btree_node_read_err_fixable,
1140                              c, ca, b, i,
1141                              bset_blacklisted_journal_seq,
1142                              "first btree node bset has blacklisted journal seq (%llu)",
1143                              le64_to_cpu(i->journal_seq));
1144
1145                 btree_err_on(blacklisted && ptr_written,
1146                              -BCH_ERR_btree_node_read_err_fixable,
1147                              c, ca, b, i,
1148                              first_bset_blacklisted_journal_seq,
1149                              "found blacklisted bset (journal seq %llu) in btree node at offset %u-%u/%u",
1150                              le64_to_cpu(i->journal_seq),
1151                              b->written, b->written + sectors, ptr_written);
1152
1153                 b->written += sectors;
1154
1155                 if (blacklisted && !first)
1156                         continue;
1157
1158                 sort_iter_add(iter,
1159                               vstruct_idx(i, 0),
1160                               vstruct_last(i));
1161         }
1162
1163         if (ptr_written) {
1164                 btree_err_on(b->written < ptr_written,
1165                              -BCH_ERR_btree_node_read_err_want_retry,
1166                              c, ca, b, NULL,
1167                              btree_node_data_missing,
1168                              "btree node data missing: expected %u sectors, found %u",
1169                              ptr_written, b->written);
1170         } else {
1171                 for (bne = write_block(b);
1172                      bset_byte_offset(b, bne) < btree_buf_bytes(b);
1173                      bne = (void *) bne + block_bytes(c))
1174                         btree_err_on(bne->keys.seq == b->data->keys.seq &&
1175                                      !bch2_journal_seq_is_blacklisted(c,
1176                                                                       le64_to_cpu(bne->keys.journal_seq),
1177                                                                       true),
1178                                      -BCH_ERR_btree_node_read_err_want_retry,
1179                                      c, ca, b, NULL,
1180                                      btree_node_bset_after_end,
1181                                      "found bset signature after last bset");
1182         }
1183
1184         sorted = btree_bounce_alloc(c, btree_buf_bytes(b), &used_mempool);
1185         sorted->keys.u64s = 0;
1186
1187         set_btree_bset(b, b->set, &b->data->keys);
1188
1189         b->nr = bch2_key_sort_fix_overlapping(c, &sorted->keys, iter);
1190
1191         u64s = le16_to_cpu(sorted->keys.u64s);
1192         *sorted = *b->data;
1193         sorted->keys.u64s = cpu_to_le16(u64s);
1194         swap(sorted, b->data);
1195         set_btree_bset(b, b->set, &b->data->keys);
1196         b->nsets = 1;
1197
1198         BUG_ON(b->nr.live_u64s != u64s);
1199
1200         btree_bounce_free(c, btree_buf_bytes(b), used_mempool, sorted);
1201
1202         if (updated_range)
1203                 bch2_btree_node_drop_keys_outside_node(b);
1204
1205         i = &b->data->keys;
1206         for (k = i->start; k != vstruct_last(i);) {
1207                 struct bkey tmp;
1208                 struct bkey_s u = __bkey_disassemble(b, k, &tmp);
1209
1210                 printbuf_reset(&buf);
1211
1212                 if (bch2_bkey_val_invalid(c, u.s_c, READ, &buf) ||
1213                     (bch2_inject_invalid_keys &&
1214                      !bversion_cmp(u.k->version, MAX_VERSION))) {
1215                         printbuf_reset(&buf);
1216
1217                         prt_printf(&buf, "invalid bkey: ");
1218                         bch2_bkey_val_invalid(c, u.s_c, READ, &buf);
1219                         prt_printf(&buf, "\n  ");
1220                         bch2_bkey_val_to_text(&buf, c, u.s_c);
1221
1222                         btree_err(-BCH_ERR_btree_node_read_err_fixable,
1223                                   c, NULL, b, i,
1224                                   btree_node_bad_bkey,
1225                                   "%s", buf.buf);
1226
1227                         btree_keys_account_key_drop(&b->nr, 0, k);
1228
1229                         i->u64s = cpu_to_le16(le16_to_cpu(i->u64s) - k->u64s);
1230                         memmove_u64s_down(k, bkey_p_next(k),
1231                                           (u64 *) vstruct_end(i) - (u64 *) k);
1232                         set_btree_bset_end(b, b->set);
1233                         continue;
1234                 }
1235
1236                 if (u.k->type == KEY_TYPE_btree_ptr_v2) {
1237                         struct bkey_s_btree_ptr_v2 bp = bkey_s_to_btree_ptr_v2(u);
1238
1239                         bp.v->mem_ptr = 0;
1240                 }
1241
1242                 k = bkey_p_next(k);
1243         }
1244
1245         bch2_bset_build_aux_tree(b, b->set, false);
1246
1247         set_needs_whiteout(btree_bset_first(b), true);
1248
1249         btree_node_reset_sib_u64s(b);
1250
1251         bkey_for_each_ptr(bch2_bkey_ptrs(bkey_i_to_s(&b->key)), ptr) {
1252                 struct bch_dev *ca2 = bch_dev_bkey_exists(c, ptr->dev);
1253
1254                 if (ca2->mi.state != BCH_MEMBER_STATE_rw)
1255                         set_btree_node_need_rewrite(b);
1256         }
1257
1258         if (!ptr_written)
1259                 set_btree_node_need_rewrite(b);
1260 out:
1261         mempool_free(iter, &c->fill_iter);
1262         printbuf_exit(&buf);
1263         bch2_time_stats_update(&c->times[BCH_TIME_btree_node_read_done], start_time);
1264         return retry_read;
1265 fsck_err:
1266         if (ret == -BCH_ERR_btree_node_read_err_want_retry ||
1267             ret == -BCH_ERR_btree_node_read_err_must_retry) {
1268                 retry_read = 1;
1269         } else {
1270                 set_btree_node_read_error(b);
1271                 bch2_btree_lost_data(c, b->c.btree_id);
1272         }
1273         goto out;
1274 }
1275
1276 static void btree_node_read_work(struct work_struct *work)
1277 {
1278         struct btree_read_bio *rb =
1279                 container_of(work, struct btree_read_bio, work);
1280         struct bch_fs *c        = rb->c;
1281         struct btree *b         = rb->b;
1282         struct bch_dev *ca      = bch_dev_bkey_exists(c, rb->pick.ptr.dev);
1283         struct bio *bio         = &rb->bio;
1284         struct bch_io_failures failed = { .nr = 0 };
1285         struct printbuf buf = PRINTBUF;
1286         bool saw_error = false;
1287         bool retry = false;
1288         bool can_retry;
1289
1290         goto start;
1291         while (1) {
1292                 retry = true;
1293                 bch_info(c, "retrying read");
1294                 ca = bch_dev_bkey_exists(c, rb->pick.ptr.dev);
1295                 rb->have_ioref          = bch2_dev_get_ioref(ca, READ);
1296                 bio_reset(bio, NULL, REQ_OP_READ|REQ_SYNC|REQ_META);
1297                 bio->bi_iter.bi_sector  = rb->pick.ptr.offset;
1298                 bio->bi_iter.bi_size    = btree_buf_bytes(b);
1299
1300                 if (rb->have_ioref) {
1301                         bio_set_dev(bio, ca->disk_sb.bdev);
1302                         submit_bio_wait(bio);
1303                 } else {
1304                         bio->bi_status = BLK_STS_REMOVED;
1305                 }
1306 start:
1307                 printbuf_reset(&buf);
1308                 bch2_btree_pos_to_text(&buf, c, b);
1309                 bch2_dev_io_err_on(bio->bi_status, ca, BCH_MEMBER_ERROR_read,
1310                                    "btree read error %s for %s",
1311                                    bch2_blk_status_to_str(bio->bi_status), buf.buf);
1312                 if (rb->have_ioref)
1313                         percpu_ref_put(&ca->io_ref);
1314                 rb->have_ioref = false;
1315
1316                 bch2_mark_io_failure(&failed, &rb->pick);
1317
1318                 can_retry = bch2_bkey_pick_read_device(c,
1319                                 bkey_i_to_s_c(&b->key),
1320                                 &failed, &rb->pick) > 0;
1321
1322                 if (!bio->bi_status &&
1323                     !bch2_btree_node_read_done(c, ca, b, can_retry, &saw_error)) {
1324                         if (retry)
1325                                 bch_info(c, "retry success");
1326                         break;
1327                 }
1328
1329                 saw_error = true;
1330
1331                 if (!can_retry) {
1332                         set_btree_node_read_error(b);
1333                         bch2_btree_lost_data(c, b->c.btree_id);
1334                         break;
1335                 }
1336         }
1337
1338         bch2_time_stats_update(&c->times[BCH_TIME_btree_node_read],
1339                                rb->start_time);
1340         bio_put(&rb->bio);
1341
1342         if (saw_error && !btree_node_read_error(b)) {
1343                 printbuf_reset(&buf);
1344                 bch2_bpos_to_text(&buf, b->key.k.p);
1345                 bch_err_ratelimited(c, "%s: rewriting btree node at btree=%s level=%u %s due to error",
1346                          __func__, bch2_btree_id_str(b->c.btree_id), b->c.level, buf.buf);
1347
1348                 bch2_btree_node_rewrite_async(c, b);
1349         }
1350
1351         printbuf_exit(&buf);
1352         clear_btree_node_read_in_flight(b);
1353         wake_up_bit(&b->flags, BTREE_NODE_read_in_flight);
1354 }
1355
1356 static void btree_node_read_endio(struct bio *bio)
1357 {
1358         struct btree_read_bio *rb =
1359                 container_of(bio, struct btree_read_bio, bio);
1360         struct bch_fs *c        = rb->c;
1361
1362         if (rb->have_ioref) {
1363                 struct bch_dev *ca = bch_dev_bkey_exists(c, rb->pick.ptr.dev);
1364
1365                 bch2_latency_acct(ca, rb->start_time, READ);
1366         }
1367
1368         queue_work(c->io_complete_wq, &rb->work);
1369 }
1370
1371 struct btree_node_read_all {
1372         struct closure          cl;
1373         struct bch_fs           *c;
1374         struct btree            *b;
1375         unsigned                nr;
1376         void                    *buf[BCH_REPLICAS_MAX];
1377         struct bio              *bio[BCH_REPLICAS_MAX];
1378         blk_status_t            err[BCH_REPLICAS_MAX];
1379 };
1380
1381 static unsigned btree_node_sectors_written(struct bch_fs *c, void *data)
1382 {
1383         struct btree_node *bn = data;
1384         struct btree_node_entry *bne;
1385         unsigned offset = 0;
1386
1387         if (le64_to_cpu(bn->magic) !=  bset_magic(c))
1388                 return 0;
1389
1390         while (offset < btree_sectors(c)) {
1391                 if (!offset) {
1392                         offset += vstruct_sectors(bn, c->block_bits);
1393                 } else {
1394                         bne = data + (offset << 9);
1395                         if (bne->keys.seq != bn->keys.seq)
1396                                 break;
1397                         offset += vstruct_sectors(bne, c->block_bits);
1398                 }
1399         }
1400
1401         return offset;
1402 }
1403
1404 static bool btree_node_has_extra_bsets(struct bch_fs *c, unsigned offset, void *data)
1405 {
1406         struct btree_node *bn = data;
1407         struct btree_node_entry *bne;
1408
1409         if (!offset)
1410                 return false;
1411
1412         while (offset < btree_sectors(c)) {
1413                 bne = data + (offset << 9);
1414                 if (bne->keys.seq == bn->keys.seq)
1415                         return true;
1416                 offset++;
1417         }
1418
1419         return false;
1420         return offset;
1421 }
1422
1423 static CLOSURE_CALLBACK(btree_node_read_all_replicas_done)
1424 {
1425         closure_type(ra, struct btree_node_read_all, cl);
1426         struct bch_fs *c = ra->c;
1427         struct btree *b = ra->b;
1428         struct printbuf buf = PRINTBUF;
1429         bool dump_bset_maps = false;
1430         bool have_retry = false;
1431         int ret = 0, best = -1, write = READ;
1432         unsigned i, written = 0, written2 = 0;
1433         __le64 seq = b->key.k.type == KEY_TYPE_btree_ptr_v2
1434                 ? bkey_i_to_btree_ptr_v2(&b->key)->v.seq : 0;
1435         bool _saw_error = false, *saw_error = &_saw_error;
1436
1437         for (i = 0; i < ra->nr; i++) {
1438                 struct btree_node *bn = ra->buf[i];
1439
1440                 if (ra->err[i])
1441                         continue;
1442
1443                 if (le64_to_cpu(bn->magic) != bset_magic(c) ||
1444                     (seq && seq != bn->keys.seq))
1445                         continue;
1446
1447                 if (best < 0) {
1448                         best = i;
1449                         written = btree_node_sectors_written(c, bn);
1450                         continue;
1451                 }
1452
1453                 written2 = btree_node_sectors_written(c, ra->buf[i]);
1454                 if (btree_err_on(written2 != written, -BCH_ERR_btree_node_read_err_fixable,
1455                                  c, NULL, b, NULL,
1456                                  btree_node_replicas_sectors_written_mismatch,
1457                                  "btree node sectors written mismatch: %u != %u",
1458                                  written, written2) ||
1459                     btree_err_on(btree_node_has_extra_bsets(c, written2, ra->buf[i]),
1460                                  -BCH_ERR_btree_node_read_err_fixable,
1461                                  c, NULL, b, NULL,
1462                                  btree_node_bset_after_end,
1463                                  "found bset signature after last bset") ||
1464                     btree_err_on(memcmp(ra->buf[best], ra->buf[i], written << 9),
1465                                  -BCH_ERR_btree_node_read_err_fixable,
1466                                  c, NULL, b, NULL,
1467                                  btree_node_replicas_data_mismatch,
1468                                  "btree node replicas content mismatch"))
1469                         dump_bset_maps = true;
1470
1471                 if (written2 > written) {
1472                         written = written2;
1473                         best = i;
1474                 }
1475         }
1476 fsck_err:
1477         if (dump_bset_maps) {
1478                 for (i = 0; i < ra->nr; i++) {
1479                         struct btree_node *bn = ra->buf[i];
1480                         struct btree_node_entry *bne = NULL;
1481                         unsigned offset = 0, sectors;
1482                         bool gap = false;
1483
1484                         if (ra->err[i])
1485                                 continue;
1486
1487                         printbuf_reset(&buf);
1488
1489                         while (offset < btree_sectors(c)) {
1490                                 if (!offset) {
1491                                         sectors = vstruct_sectors(bn, c->block_bits);
1492                                 } else {
1493                                         bne = ra->buf[i] + (offset << 9);
1494                                         if (bne->keys.seq != bn->keys.seq)
1495                                                 break;
1496                                         sectors = vstruct_sectors(bne, c->block_bits);
1497                                 }
1498
1499                                 prt_printf(&buf, " %u-%u", offset, offset + sectors);
1500                                 if (bne && bch2_journal_seq_is_blacklisted(c,
1501                                                         le64_to_cpu(bne->keys.journal_seq), false))
1502                                         prt_printf(&buf, "*");
1503                                 offset += sectors;
1504                         }
1505
1506                         while (offset < btree_sectors(c)) {
1507                                 bne = ra->buf[i] + (offset << 9);
1508                                 if (bne->keys.seq == bn->keys.seq) {
1509                                         if (!gap)
1510                                                 prt_printf(&buf, " GAP");
1511                                         gap = true;
1512
1513                                         sectors = vstruct_sectors(bne, c->block_bits);
1514                                         prt_printf(&buf, " %u-%u", offset, offset + sectors);
1515                                         if (bch2_journal_seq_is_blacklisted(c,
1516                                                         le64_to_cpu(bne->keys.journal_seq), false))
1517                                                 prt_printf(&buf, "*");
1518                                 }
1519                                 offset++;
1520                         }
1521
1522                         bch_err(c, "replica %u:%s", i, buf.buf);
1523                 }
1524         }
1525
1526         if (best >= 0) {
1527                 memcpy(b->data, ra->buf[best], btree_buf_bytes(b));
1528                 ret = bch2_btree_node_read_done(c, NULL, b, false, saw_error);
1529         } else {
1530                 ret = -1;
1531         }
1532
1533         if (ret) {
1534                 set_btree_node_read_error(b);
1535                 bch2_btree_lost_data(c, b->c.btree_id);
1536         } else if (*saw_error)
1537                 bch2_btree_node_rewrite_async(c, b);
1538
1539         for (i = 0; i < ra->nr; i++) {
1540                 mempool_free(ra->buf[i], &c->btree_bounce_pool);
1541                 bio_put(ra->bio[i]);
1542         }
1543
1544         closure_debug_destroy(&ra->cl);
1545         kfree(ra);
1546         printbuf_exit(&buf);
1547
1548         clear_btree_node_read_in_flight(b);
1549         wake_up_bit(&b->flags, BTREE_NODE_read_in_flight);
1550 }
1551
1552 static void btree_node_read_all_replicas_endio(struct bio *bio)
1553 {
1554         struct btree_read_bio *rb =
1555                 container_of(bio, struct btree_read_bio, bio);
1556         struct bch_fs *c        = rb->c;
1557         struct btree_node_read_all *ra = rb->ra;
1558
1559         if (rb->have_ioref) {
1560                 struct bch_dev *ca = bch_dev_bkey_exists(c, rb->pick.ptr.dev);
1561
1562                 bch2_latency_acct(ca, rb->start_time, READ);
1563         }
1564
1565         ra->err[rb->idx] = bio->bi_status;
1566         closure_put(&ra->cl);
1567 }
1568
1569 /*
1570  * XXX This allocates multiple times from the same mempools, and can deadlock
1571  * under sufficient memory pressure (but is only a debug path)
1572  */
1573 static int btree_node_read_all_replicas(struct bch_fs *c, struct btree *b, bool sync)
1574 {
1575         struct bkey_s_c k = bkey_i_to_s_c(&b->key);
1576         struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
1577         const union bch_extent_entry *entry;
1578         struct extent_ptr_decoded pick;
1579         struct btree_node_read_all *ra;
1580         unsigned i;
1581
1582         ra = kzalloc(sizeof(*ra), GFP_NOFS);
1583         if (!ra)
1584                 return -BCH_ERR_ENOMEM_btree_node_read_all_replicas;
1585
1586         closure_init(&ra->cl, NULL);
1587         ra->c   = c;
1588         ra->b   = b;
1589         ra->nr  = bch2_bkey_nr_ptrs(k);
1590
1591         for (i = 0; i < ra->nr; i++) {
1592                 ra->buf[i] = mempool_alloc(&c->btree_bounce_pool, GFP_NOFS);
1593                 ra->bio[i] = bio_alloc_bioset(NULL,
1594                                               buf_pages(ra->buf[i], btree_buf_bytes(b)),
1595                                               REQ_OP_READ|REQ_SYNC|REQ_META,
1596                                               GFP_NOFS,
1597                                               &c->btree_bio);
1598         }
1599
1600         i = 0;
1601         bkey_for_each_ptr_decode(k.k, ptrs, pick, entry) {
1602                 struct bch_dev *ca = bch_dev_bkey_exists(c, pick.ptr.dev);
1603                 struct btree_read_bio *rb =
1604                         container_of(ra->bio[i], struct btree_read_bio, bio);
1605                 rb->c                   = c;
1606                 rb->b                   = b;
1607                 rb->ra                  = ra;
1608                 rb->start_time          = local_clock();
1609                 rb->have_ioref          = bch2_dev_get_ioref(ca, READ);
1610                 rb->idx                 = i;
1611                 rb->pick                = pick;
1612                 rb->bio.bi_iter.bi_sector = pick.ptr.offset;
1613                 rb->bio.bi_end_io       = btree_node_read_all_replicas_endio;
1614                 bch2_bio_map(&rb->bio, ra->buf[i], btree_buf_bytes(b));
1615
1616                 if (rb->have_ioref) {
1617                         this_cpu_add(ca->io_done->sectors[READ][BCH_DATA_btree],
1618                                      bio_sectors(&rb->bio));
1619                         bio_set_dev(&rb->bio, ca->disk_sb.bdev);
1620
1621                         closure_get(&ra->cl);
1622                         submit_bio(&rb->bio);
1623                 } else {
1624                         ra->err[i] = BLK_STS_REMOVED;
1625                 }
1626
1627                 i++;
1628         }
1629
1630         if (sync) {
1631                 closure_sync(&ra->cl);
1632                 btree_node_read_all_replicas_done(&ra->cl.work);
1633         } else {
1634                 continue_at(&ra->cl, btree_node_read_all_replicas_done,
1635                             c->io_complete_wq);
1636         }
1637
1638         return 0;
1639 }
1640
1641 void bch2_btree_node_read(struct btree_trans *trans, struct btree *b,
1642                           bool sync)
1643 {
1644         struct bch_fs *c = trans->c;
1645         struct extent_ptr_decoded pick;
1646         struct btree_read_bio *rb;
1647         struct bch_dev *ca;
1648         struct bio *bio;
1649         int ret;
1650
1651         trace_and_count(c, btree_node_read, trans, b);
1652
1653         if (bch2_verify_all_btree_replicas &&
1654             !btree_node_read_all_replicas(c, b, sync))
1655                 return;
1656
1657         ret = bch2_bkey_pick_read_device(c, bkey_i_to_s_c(&b->key),
1658                                          NULL, &pick);
1659
1660         if (ret <= 0) {
1661                 struct printbuf buf = PRINTBUF;
1662
1663                 prt_str(&buf, "btree node read error: no device to read from\n at ");
1664                 bch2_btree_pos_to_text(&buf, c, b);
1665                 bch_err_ratelimited(c, "%s", buf.buf);
1666
1667                 if (c->recovery_passes_explicit & BIT_ULL(BCH_RECOVERY_PASS_check_topology) &&
1668                     c->curr_recovery_pass > BCH_RECOVERY_PASS_check_topology)
1669                         bch2_fatal_error(c);
1670
1671                 set_btree_node_read_error(b);
1672                 bch2_btree_lost_data(c, b->c.btree_id);
1673                 clear_btree_node_read_in_flight(b);
1674                 wake_up_bit(&b->flags, BTREE_NODE_read_in_flight);
1675                 printbuf_exit(&buf);
1676                 return;
1677         }
1678
1679         ca = bch_dev_bkey_exists(c, pick.ptr.dev);
1680
1681         bio = bio_alloc_bioset(NULL,
1682                                buf_pages(b->data, btree_buf_bytes(b)),
1683                                REQ_OP_READ|REQ_SYNC|REQ_META,
1684                                GFP_NOFS,
1685                                &c->btree_bio);
1686         rb = container_of(bio, struct btree_read_bio, bio);
1687         rb->c                   = c;
1688         rb->b                   = b;
1689         rb->ra                  = NULL;
1690         rb->start_time          = local_clock();
1691         rb->have_ioref          = bch2_dev_get_ioref(ca, READ);
1692         rb->pick                = pick;
1693         INIT_WORK(&rb->work, btree_node_read_work);
1694         bio->bi_iter.bi_sector  = pick.ptr.offset;
1695         bio->bi_end_io          = btree_node_read_endio;
1696         bch2_bio_map(bio, b->data, btree_buf_bytes(b));
1697
1698         if (rb->have_ioref) {
1699                 this_cpu_add(ca->io_done->sectors[READ][BCH_DATA_btree],
1700                              bio_sectors(bio));
1701                 bio_set_dev(bio, ca->disk_sb.bdev);
1702
1703                 if (sync) {
1704                         submit_bio_wait(bio);
1705                         bch2_latency_acct(ca, rb->start_time, READ);
1706                         btree_node_read_work(&rb->work);
1707                 } else {
1708                         submit_bio(bio);
1709                 }
1710         } else {
1711                 bio->bi_status = BLK_STS_REMOVED;
1712
1713                 if (sync)
1714                         btree_node_read_work(&rb->work);
1715                 else
1716                         queue_work(c->io_complete_wq, &rb->work);
1717         }
1718 }
1719
1720 static int __bch2_btree_root_read(struct btree_trans *trans, enum btree_id id,
1721                                   const struct bkey_i *k, unsigned level)
1722 {
1723         struct bch_fs *c = trans->c;
1724         struct closure cl;
1725         struct btree *b;
1726         int ret;
1727
1728         closure_init_stack(&cl);
1729
1730         do {
1731                 ret = bch2_btree_cache_cannibalize_lock(trans, &cl);
1732                 closure_sync(&cl);
1733         } while (ret);
1734
1735         b = bch2_btree_node_mem_alloc(trans, level != 0);
1736         bch2_btree_cache_cannibalize_unlock(trans);
1737
1738         BUG_ON(IS_ERR(b));
1739
1740         bkey_copy(&b->key, k);
1741         BUG_ON(bch2_btree_node_hash_insert(&c->btree_cache, b, level, id));
1742
1743         set_btree_node_read_in_flight(b);
1744
1745         bch2_btree_node_read(trans, b, true);
1746
1747         if (btree_node_read_error(b)) {
1748                 bch2_btree_node_hash_remove(&c->btree_cache, b);
1749
1750                 mutex_lock(&c->btree_cache.lock);
1751                 list_move(&b->list, &c->btree_cache.freeable);
1752                 mutex_unlock(&c->btree_cache.lock);
1753
1754                 ret = -BCH_ERR_btree_node_read_error;
1755                 goto err;
1756         }
1757
1758         bch2_btree_set_root_for_read(c, b);
1759 err:
1760         six_unlock_write(&b->c.lock);
1761         six_unlock_intent(&b->c.lock);
1762
1763         return ret;
1764 }
1765
1766 int bch2_btree_root_read(struct bch_fs *c, enum btree_id id,
1767                         const struct bkey_i *k, unsigned level)
1768 {
1769         return bch2_trans_run(c, __bch2_btree_root_read(trans, id, k, level));
1770 }
1771
1772 static void bch2_btree_complete_write(struct bch_fs *c, struct btree *b,
1773                                       struct btree_write *w)
1774 {
1775         unsigned long old, new, v = READ_ONCE(b->will_make_reachable);
1776
1777         do {
1778                 old = new = v;
1779                 if (!(old & 1))
1780                         break;
1781
1782                 new &= ~1UL;
1783         } while ((v = cmpxchg(&b->will_make_reachable, old, new)) != old);
1784
1785         if (old & 1)
1786                 closure_put(&((struct btree_update *) new)->cl);
1787
1788         bch2_journal_pin_drop(&c->journal, &w->journal);
1789 }
1790
1791 static void __btree_node_write_done(struct bch_fs *c, struct btree *b)
1792 {
1793         struct btree_write *w = btree_prev_write(b);
1794         unsigned long old, new, v;
1795         unsigned type = 0;
1796
1797         bch2_btree_complete_write(c, b, w);
1798
1799         v = READ_ONCE(b->flags);
1800         do {
1801                 old = new = v;
1802
1803                 if ((old & (1U << BTREE_NODE_dirty)) &&
1804                     (old & (1U << BTREE_NODE_need_write)) &&
1805                     !(old & (1U << BTREE_NODE_never_write)) &&
1806                     !(old & (1U << BTREE_NODE_write_blocked)) &&
1807                     !(old & (1U << BTREE_NODE_will_make_reachable))) {
1808                         new &= ~(1U << BTREE_NODE_dirty);
1809                         new &= ~(1U << BTREE_NODE_need_write);
1810                         new |=  (1U << BTREE_NODE_write_in_flight);
1811                         new |=  (1U << BTREE_NODE_write_in_flight_inner);
1812                         new |=  (1U << BTREE_NODE_just_written);
1813                         new ^=  (1U << BTREE_NODE_write_idx);
1814
1815                         type = new & BTREE_WRITE_TYPE_MASK;
1816                         new &= ~BTREE_WRITE_TYPE_MASK;
1817                 } else {
1818                         new &= ~(1U << BTREE_NODE_write_in_flight);
1819                         new &= ~(1U << BTREE_NODE_write_in_flight_inner);
1820                 }
1821         } while ((v = cmpxchg(&b->flags, old, new)) != old);
1822
1823         if (new & (1U << BTREE_NODE_write_in_flight))
1824                 __bch2_btree_node_write(c, b, BTREE_WRITE_ALREADY_STARTED|type);
1825         else
1826                 wake_up_bit(&b->flags, BTREE_NODE_write_in_flight);
1827 }
1828
1829 static void btree_node_write_done(struct bch_fs *c, struct btree *b)
1830 {
1831         struct btree_trans *trans = bch2_trans_get(c);
1832
1833         btree_node_lock_nopath_nofail(trans, &b->c, SIX_LOCK_read);
1834         __btree_node_write_done(c, b);
1835         six_unlock_read(&b->c.lock);
1836
1837         bch2_trans_put(trans);
1838 }
1839
1840 static void btree_node_write_work(struct work_struct *work)
1841 {
1842         struct btree_write_bio *wbio =
1843                 container_of(work, struct btree_write_bio, work);
1844         struct bch_fs *c        = wbio->wbio.c;
1845         struct btree *b         = wbio->wbio.bio.bi_private;
1846         struct bch_extent_ptr *ptr;
1847         int ret = 0;
1848
1849         btree_bounce_free(c,
1850                 wbio->data_bytes,
1851                 wbio->wbio.used_mempool,
1852                 wbio->data);
1853
1854         bch2_bkey_drop_ptrs(bkey_i_to_s(&wbio->key), ptr,
1855                 bch2_dev_list_has_dev(wbio->wbio.failed, ptr->dev));
1856
1857         if (!bch2_bkey_nr_ptrs(bkey_i_to_s_c(&wbio->key))) {
1858                 ret = -BCH_ERR_btree_node_write_all_failed;
1859                 goto err;
1860         }
1861
1862         if (wbio->wbio.first_btree_write) {
1863                 if (wbio->wbio.failed.nr) {
1864
1865                 }
1866         } else {
1867                 ret = bch2_trans_do(c, NULL, NULL, 0,
1868                         bch2_btree_node_update_key_get_iter(trans, b, &wbio->key,
1869                                         BCH_WATERMARK_interior_updates|
1870                                         BCH_TRANS_COMMIT_journal_reclaim|
1871                                         BCH_TRANS_COMMIT_no_enospc|
1872                                         BCH_TRANS_COMMIT_no_check_rw,
1873                                         !wbio->wbio.failed.nr));
1874                 if (ret)
1875                         goto err;
1876         }
1877 out:
1878         bio_put(&wbio->wbio.bio);
1879         btree_node_write_done(c, b);
1880         return;
1881 err:
1882         set_btree_node_noevict(b);
1883         bch2_fs_fatal_err_on(!bch2_err_matches(ret, EROFS), c,
1884                              "writing btree node: %s", bch2_err_str(ret));
1885         goto out;
1886 }
1887
1888 static void btree_node_write_endio(struct bio *bio)
1889 {
1890         struct bch_write_bio *wbio      = to_wbio(bio);
1891         struct bch_write_bio *parent    = wbio->split ? wbio->parent : NULL;
1892         struct bch_write_bio *orig      = parent ?: wbio;
1893         struct btree_write_bio *wb      = container_of(orig, struct btree_write_bio, wbio);
1894         struct bch_fs *c                = wbio->c;
1895         struct btree *b                 = wbio->bio.bi_private;
1896         struct bch_dev *ca              = bch_dev_bkey_exists(c, wbio->dev);
1897         unsigned long flags;
1898
1899         if (wbio->have_ioref)
1900                 bch2_latency_acct(ca, wbio->submit_time, WRITE);
1901
1902         if (bch2_dev_io_err_on(bio->bi_status, ca, BCH_MEMBER_ERROR_write,
1903                                "btree write error: %s",
1904                                bch2_blk_status_to_str(bio->bi_status)) ||
1905             bch2_meta_write_fault("btree")) {
1906                 spin_lock_irqsave(&c->btree_write_error_lock, flags);
1907                 bch2_dev_list_add_dev(&orig->failed, wbio->dev);
1908                 spin_unlock_irqrestore(&c->btree_write_error_lock, flags);
1909         }
1910
1911         if (wbio->have_ioref)
1912                 percpu_ref_put(&ca->io_ref);
1913
1914         if (parent) {
1915                 bio_put(bio);
1916                 bio_endio(&parent->bio);
1917                 return;
1918         }
1919
1920         clear_btree_node_write_in_flight_inner(b);
1921         wake_up_bit(&b->flags, BTREE_NODE_write_in_flight_inner);
1922         INIT_WORK(&wb->work, btree_node_write_work);
1923         queue_work(c->btree_io_complete_wq, &wb->work);
1924 }
1925
1926 static int validate_bset_for_write(struct bch_fs *c, struct btree *b,
1927                                    struct bset *i, unsigned sectors)
1928 {
1929         struct printbuf buf = PRINTBUF;
1930         bool saw_error;
1931         int ret;
1932
1933         ret = bch2_bkey_invalid(c, bkey_i_to_s_c(&b->key),
1934                                 BKEY_TYPE_btree, WRITE, &buf);
1935
1936         if (ret)
1937                 bch2_fs_inconsistent(c, "invalid btree node key before write: %s", buf.buf);
1938         printbuf_exit(&buf);
1939         if (ret)
1940                 return ret;
1941
1942         ret = validate_bset_keys(c, b, i, WRITE, false, &saw_error) ?:
1943                 validate_bset(c, NULL, b, i, b->written, sectors, WRITE, false, &saw_error);
1944         if (ret) {
1945                 bch2_inconsistent_error(c);
1946                 dump_stack();
1947         }
1948
1949         return ret;
1950 }
1951
1952 static void btree_write_submit(struct work_struct *work)
1953 {
1954         struct btree_write_bio *wbio = container_of(work, struct btree_write_bio, work);
1955         BKEY_PADDED_ONSTACK(k, BKEY_BTREE_PTR_VAL_U64s_MAX) tmp;
1956
1957         bkey_copy(&tmp.k, &wbio->key);
1958
1959         bkey_for_each_ptr(bch2_bkey_ptrs(bkey_i_to_s(&tmp.k)), ptr)
1960                 ptr->offset += wbio->sector_offset;
1961
1962         bch2_submit_wbio_replicas(&wbio->wbio, wbio->wbio.c, BCH_DATA_btree,
1963                                   &tmp.k, false);
1964 }
1965
1966 void __bch2_btree_node_write(struct bch_fs *c, struct btree *b, unsigned flags)
1967 {
1968         struct btree_write_bio *wbio;
1969         struct bset_tree *t;
1970         struct bset *i;
1971         struct btree_node *bn = NULL;
1972         struct btree_node_entry *bne = NULL;
1973         struct sort_iter_stack sort_iter;
1974         struct nonce nonce;
1975         unsigned bytes_to_write, sectors_to_write, bytes, u64s;
1976         u64 seq = 0;
1977         bool used_mempool;
1978         unsigned long old, new;
1979         bool validate_before_checksum = false;
1980         enum btree_write_type type = flags & BTREE_WRITE_TYPE_MASK;
1981         void *data;
1982         int ret;
1983
1984         if (flags & BTREE_WRITE_ALREADY_STARTED)
1985                 goto do_write;
1986
1987         /*
1988          * We may only have a read lock on the btree node - the dirty bit is our
1989          * "lock" against racing with other threads that may be trying to start
1990          * a write, we do a write iff we clear the dirty bit. Since setting the
1991          * dirty bit requires a write lock, we can't race with other threads
1992          * redirtying it:
1993          */
1994         do {
1995                 old = new = READ_ONCE(b->flags);
1996
1997                 if (!(old & (1 << BTREE_NODE_dirty)))
1998                         return;
1999
2000                 if ((flags & BTREE_WRITE_ONLY_IF_NEED) &&
2001                     !(old & (1 << BTREE_NODE_need_write)))
2002                         return;
2003
2004                 if (old &
2005                     ((1 << BTREE_NODE_never_write)|
2006                      (1 << BTREE_NODE_write_blocked)))
2007                         return;
2008
2009                 if (b->written &&
2010                     (old & (1 << BTREE_NODE_will_make_reachable)))
2011                         return;
2012
2013                 if (old & (1 << BTREE_NODE_write_in_flight))
2014                         return;
2015
2016                 if (flags & BTREE_WRITE_ONLY_IF_NEED)
2017                         type = new & BTREE_WRITE_TYPE_MASK;
2018                 new &= ~BTREE_WRITE_TYPE_MASK;
2019
2020                 new &= ~(1 << BTREE_NODE_dirty);
2021                 new &= ~(1 << BTREE_NODE_need_write);
2022                 new |=  (1 << BTREE_NODE_write_in_flight);
2023                 new |=  (1 << BTREE_NODE_write_in_flight_inner);
2024                 new |=  (1 << BTREE_NODE_just_written);
2025                 new ^=  (1 << BTREE_NODE_write_idx);
2026         } while (cmpxchg_acquire(&b->flags, old, new) != old);
2027
2028         if (new & (1U << BTREE_NODE_need_write))
2029                 return;
2030 do_write:
2031         BUG_ON((type == BTREE_WRITE_initial) != (b->written == 0));
2032
2033         atomic_dec(&c->btree_cache.dirty);
2034
2035         BUG_ON(btree_node_fake(b));
2036         BUG_ON((b->will_make_reachable != 0) != !b->written);
2037
2038         BUG_ON(b->written >= btree_sectors(c));
2039         BUG_ON(b->written & (block_sectors(c) - 1));
2040         BUG_ON(bset_written(b, btree_bset_last(b)));
2041         BUG_ON(le64_to_cpu(b->data->magic) != bset_magic(c));
2042         BUG_ON(memcmp(&b->data->format, &b->format, sizeof(b->format)));
2043
2044         bch2_sort_whiteouts(c, b);
2045
2046         sort_iter_stack_init(&sort_iter, b);
2047
2048         bytes = !b->written
2049                 ? sizeof(struct btree_node)
2050                 : sizeof(struct btree_node_entry);
2051
2052         bytes += b->whiteout_u64s * sizeof(u64);
2053
2054         for_each_bset(b, t) {
2055                 i = bset(b, t);
2056
2057                 if (bset_written(b, i))
2058                         continue;
2059
2060                 bytes += le16_to_cpu(i->u64s) * sizeof(u64);
2061                 sort_iter_add(&sort_iter.iter,
2062                               btree_bkey_first(b, t),
2063                               btree_bkey_last(b, t));
2064                 seq = max(seq, le64_to_cpu(i->journal_seq));
2065         }
2066
2067         BUG_ON(b->written && !seq);
2068
2069         /* bch2_varint_decode may read up to 7 bytes past the end of the buffer: */
2070         bytes += 8;
2071
2072         /* buffer must be a multiple of the block size */
2073         bytes = round_up(bytes, block_bytes(c));
2074
2075         data = btree_bounce_alloc(c, bytes, &used_mempool);
2076
2077         if (!b->written) {
2078                 bn = data;
2079                 *bn = *b->data;
2080                 i = &bn->keys;
2081         } else {
2082                 bne = data;
2083                 bne->keys = b->data->keys;
2084                 i = &bne->keys;
2085         }
2086
2087         i->journal_seq  = cpu_to_le64(seq);
2088         i->u64s         = 0;
2089
2090         sort_iter_add(&sort_iter.iter,
2091                       unwritten_whiteouts_start(b),
2092                       unwritten_whiteouts_end(b));
2093         SET_BSET_SEPARATE_WHITEOUTS(i, false);
2094
2095         b->whiteout_u64s = 0;
2096
2097         u64s = bch2_sort_keys(i->start, &sort_iter.iter, false);
2098         le16_add_cpu(&i->u64s, u64s);
2099
2100         BUG_ON(!b->written && i->u64s != b->data->keys.u64s);
2101
2102         set_needs_whiteout(i, false);
2103
2104         /* do we have data to write? */
2105         if (b->written && !i->u64s)
2106                 goto nowrite;
2107
2108         bytes_to_write = vstruct_end(i) - data;
2109         sectors_to_write = round_up(bytes_to_write, block_bytes(c)) >> 9;
2110
2111         if (!b->written &&
2112             b->key.k.type == KEY_TYPE_btree_ptr_v2)
2113                 BUG_ON(btree_ptr_sectors_written(&b->key) != sectors_to_write);
2114
2115         memset(data + bytes_to_write, 0,
2116                (sectors_to_write << 9) - bytes_to_write);
2117
2118         BUG_ON(b->written + sectors_to_write > btree_sectors(c));
2119         BUG_ON(BSET_BIG_ENDIAN(i) != CPU_BIG_ENDIAN);
2120         BUG_ON(i->seq != b->data->keys.seq);
2121
2122         i->version = cpu_to_le16(c->sb.version);
2123         SET_BSET_OFFSET(i, b->written);
2124         SET_BSET_CSUM_TYPE(i, bch2_meta_checksum_type(c));
2125
2126         if (bch2_csum_type_is_encryption(BSET_CSUM_TYPE(i)))
2127                 validate_before_checksum = true;
2128
2129         /* validate_bset will be modifying: */
2130         if (le16_to_cpu(i->version) < bcachefs_metadata_version_current)
2131                 validate_before_checksum = true;
2132
2133         /* if we're going to be encrypting, check metadata validity first: */
2134         if (validate_before_checksum &&
2135             validate_bset_for_write(c, b, i, sectors_to_write))
2136                 goto err;
2137
2138         ret = bset_encrypt(c, i, b->written << 9);
2139         if (bch2_fs_fatal_err_on(ret, c,
2140                         "encrypting btree node: %s", bch2_err_str(ret)))
2141                 goto err;
2142
2143         nonce = btree_nonce(i, b->written << 9);
2144
2145         if (bn)
2146                 bn->csum = csum_vstruct(c, BSET_CSUM_TYPE(i), nonce, bn);
2147         else
2148                 bne->csum = csum_vstruct(c, BSET_CSUM_TYPE(i), nonce, bne);
2149
2150         /* if we're not encrypting, check metadata after checksumming: */
2151         if (!validate_before_checksum &&
2152             validate_bset_for_write(c, b, i, sectors_to_write))
2153                 goto err;
2154
2155         /*
2156          * We handle btree write errors by immediately halting the journal -
2157          * after we've done that, we can't issue any subsequent btree writes
2158          * because they might have pointers to new nodes that failed to write.
2159          *
2160          * Furthermore, there's no point in doing any more btree writes because
2161          * with the journal stopped, we're never going to update the journal to
2162          * reflect that those writes were done and the data flushed from the
2163          * journal:
2164          *
2165          * Also on journal error, the pending write may have updates that were
2166          * never journalled (interior nodes, see btree_update_nodes_written()) -
2167          * it's critical that we don't do the write in that case otherwise we
2168          * will have updates visible that weren't in the journal:
2169          *
2170          * Make sure to update b->written so bch2_btree_init_next() doesn't
2171          * break:
2172          */
2173         if (bch2_journal_error(&c->journal) ||
2174             c->opts.nochanges)
2175                 goto err;
2176
2177         trace_and_count(c, btree_node_write, b, bytes_to_write, sectors_to_write);
2178
2179         wbio = container_of(bio_alloc_bioset(NULL,
2180                                 buf_pages(data, sectors_to_write << 9),
2181                                 REQ_OP_WRITE|REQ_META,
2182                                 GFP_NOFS,
2183                                 &c->btree_bio),
2184                             struct btree_write_bio, wbio.bio);
2185         wbio_init(&wbio->wbio.bio);
2186         wbio->data                      = data;
2187         wbio->data_bytes                = bytes;
2188         wbio->sector_offset             = b->written;
2189         wbio->wbio.c                    = c;
2190         wbio->wbio.used_mempool         = used_mempool;
2191         wbio->wbio.first_btree_write    = !b->written;
2192         wbio->wbio.bio.bi_end_io        = btree_node_write_endio;
2193         wbio->wbio.bio.bi_private       = b;
2194
2195         bch2_bio_map(&wbio->wbio.bio, data, sectors_to_write << 9);
2196
2197         bkey_copy(&wbio->key, &b->key);
2198
2199         b->written += sectors_to_write;
2200
2201         if (wbio->key.k.type == KEY_TYPE_btree_ptr_v2)
2202                 bkey_i_to_btree_ptr_v2(&wbio->key)->v.sectors_written =
2203                         cpu_to_le16(b->written);
2204
2205         atomic64_inc(&c->btree_write_stats[type].nr);
2206         atomic64_add(bytes_to_write, &c->btree_write_stats[type].bytes);
2207
2208         INIT_WORK(&wbio->work, btree_write_submit);
2209         queue_work(c->io_complete_wq, &wbio->work);
2210         return;
2211 err:
2212         set_btree_node_noevict(b);
2213         b->written += sectors_to_write;
2214 nowrite:
2215         btree_bounce_free(c, bytes, used_mempool, data);
2216         __btree_node_write_done(c, b);
2217 }
2218
2219 /*
2220  * Work that must be done with write lock held:
2221  */
2222 bool bch2_btree_post_write_cleanup(struct bch_fs *c, struct btree *b)
2223 {
2224         bool invalidated_iter = false;
2225         struct btree_node_entry *bne;
2226         struct bset_tree *t;
2227
2228         if (!btree_node_just_written(b))
2229                 return false;
2230
2231         BUG_ON(b->whiteout_u64s);
2232
2233         clear_btree_node_just_written(b);
2234
2235         /*
2236          * Note: immediately after write, bset_written() doesn't work - the
2237          * amount of data we had to write after compaction might have been
2238          * smaller than the offset of the last bset.
2239          *
2240          * However, we know that all bsets have been written here, as long as
2241          * we're still holding the write lock:
2242          */
2243
2244         /*
2245          * XXX: decide if we really want to unconditionally sort down to a
2246          * single bset:
2247          */
2248         if (b->nsets > 1) {
2249                 btree_node_sort(c, b, 0, b->nsets, true);
2250                 invalidated_iter = true;
2251         } else {
2252                 invalidated_iter = bch2_drop_whiteouts(b, COMPACT_ALL);
2253         }
2254
2255         for_each_bset(b, t)
2256                 set_needs_whiteout(bset(b, t), true);
2257
2258         bch2_btree_verify(c, b);
2259
2260         /*
2261          * If later we don't unconditionally sort down to a single bset, we have
2262          * to ensure this is still true:
2263          */
2264         BUG_ON((void *) btree_bkey_last(b, bset_tree_last(b)) > write_block(b));
2265
2266         bne = want_new_bset(c, b);
2267         if (bne)
2268                 bch2_bset_init_next(b, bne);
2269
2270         bch2_btree_build_aux_trees(b);
2271
2272         return invalidated_iter;
2273 }
2274
2275 /*
2276  * Use this one if the node is intent locked:
2277  */
2278 void bch2_btree_node_write(struct bch_fs *c, struct btree *b,
2279                            enum six_lock_type lock_type_held,
2280                            unsigned flags)
2281 {
2282         if (lock_type_held == SIX_LOCK_intent ||
2283             (lock_type_held == SIX_LOCK_read &&
2284              six_lock_tryupgrade(&b->c.lock))) {
2285                 __bch2_btree_node_write(c, b, flags);
2286
2287                 /* don't cycle lock unnecessarily: */
2288                 if (btree_node_just_written(b) &&
2289                     six_trylock_write(&b->c.lock)) {
2290                         bch2_btree_post_write_cleanup(c, b);
2291                         six_unlock_write(&b->c.lock);
2292                 }
2293
2294                 if (lock_type_held == SIX_LOCK_read)
2295                         six_lock_downgrade(&b->c.lock);
2296         } else {
2297                 __bch2_btree_node_write(c, b, flags);
2298                 if (lock_type_held == SIX_LOCK_write &&
2299                     btree_node_just_written(b))
2300                         bch2_btree_post_write_cleanup(c, b);
2301         }
2302 }
2303
2304 static bool __bch2_btree_flush_all(struct bch_fs *c, unsigned flag)
2305 {
2306         struct bucket_table *tbl;
2307         struct rhash_head *pos;
2308         struct btree *b;
2309         unsigned i;
2310         bool ret = false;
2311 restart:
2312         rcu_read_lock();
2313         for_each_cached_btree(b, c, tbl, i, pos)
2314                 if (test_bit(flag, &b->flags)) {
2315                         rcu_read_unlock();
2316                         wait_on_bit_io(&b->flags, flag, TASK_UNINTERRUPTIBLE);
2317                         ret = true;
2318                         goto restart;
2319                 }
2320         rcu_read_unlock();
2321
2322         return ret;
2323 }
2324
2325 bool bch2_btree_flush_all_reads(struct bch_fs *c)
2326 {
2327         return __bch2_btree_flush_all(c, BTREE_NODE_read_in_flight);
2328 }
2329
2330 bool bch2_btree_flush_all_writes(struct bch_fs *c)
2331 {
2332         return __bch2_btree_flush_all(c, BTREE_NODE_write_in_flight);
2333 }
2334
2335 static const char * const bch2_btree_write_types[] = {
2336 #define x(t, n) [n] = #t,
2337         BCH_BTREE_WRITE_TYPES()
2338         NULL
2339 };
2340
2341 void bch2_btree_write_stats_to_text(struct printbuf *out, struct bch_fs *c)
2342 {
2343         printbuf_tabstop_push(out, 20);
2344         printbuf_tabstop_push(out, 10);
2345
2346         prt_tab(out);
2347         prt_str(out, "nr");
2348         prt_tab(out);
2349         prt_str(out, "size");
2350         prt_newline(out);
2351
2352         for (unsigned i = 0; i < BTREE_WRITE_TYPE_NR; i++) {
2353                 u64 nr          = atomic64_read(&c->btree_write_stats[i].nr);
2354                 u64 bytes       = atomic64_read(&c->btree_write_stats[i].bytes);
2355
2356                 prt_printf(out, "%s:", bch2_btree_write_types[i]);
2357                 prt_tab(out);
2358                 prt_u64(out, nr);
2359                 prt_tab(out);
2360                 prt_human_readable_u64(out, nr ? div64_u64(bytes, nr) : 0);
2361                 prt_newline(out);
2362         }
2363 }