1 // SPDX-License-Identifier: GPL-2.0
4 #include "bkey_methods.h"
6 #include "btree_cache.h"
8 #include "btree_iter.h"
9 #include "btree_locking.h"
10 #include "btree_update.h"
11 #include "btree_update_interior.h"
18 #include "journal_reclaim.h"
19 #include "journal_seq_blacklist.h"
23 #include <linux/sched/mm.h>
25 static void verify_no_dups(struct btree *b,
26 struct bkey_packed *start,
27 struct bkey_packed *end)
29 #ifdef CONFIG_BCACHEFS_DEBUG
30 struct bkey_packed *k, *p;
35 for (p = start, k = bkey_next(start);
37 p = k, k = bkey_next(k)) {
38 struct bkey l = bkey_unpack_key(b, p);
39 struct bkey r = bkey_unpack_key(b, k);
41 BUG_ON(bpos_cmp(l.p, bkey_start_pos(&r)) >= 0);
46 static void set_needs_whiteout(struct bset *i, int v)
48 struct bkey_packed *k;
50 for (k = i->start; k != vstruct_last(i); k = bkey_next(k))
51 k->needs_whiteout = v;
54 static void btree_bounce_free(struct bch_fs *c, size_t size,
55 bool used_mempool, void *p)
58 mempool_free(p, &c->btree_bounce_pool);
63 static void *btree_bounce_alloc(struct bch_fs *c, size_t size,
66 unsigned flags = memalloc_nofs_save();
69 BUG_ON(size > btree_bytes(c));
71 *used_mempool = false;
72 p = vpmalloc(size, __GFP_NOWARN|GFP_NOWAIT);
75 p = mempool_alloc(&c->btree_bounce_pool, GFP_NOIO);
77 memalloc_nofs_restore(flags);
81 static void sort_bkey_ptrs(const struct btree *bt,
82 struct bkey_packed **ptrs, unsigned nr)
84 unsigned n = nr, a = nr / 2, b, c, d;
89 /* Heap sort: see lib/sort.c: */
94 swap(ptrs[0], ptrs[n]);
98 for (b = a; c = 2 * b + 1, (d = c + 1) < n;)
99 b = bch2_bkey_cmp_packed(bt,
101 ptrs[d]) >= 0 ? c : d;
106 bch2_bkey_cmp_packed(bt,
113 swap(ptrs[b], ptrs[c]);
118 static void bch2_sort_whiteouts(struct bch_fs *c, struct btree *b)
120 struct bkey_packed *new_whiteouts, **ptrs, **ptrs_end, *k;
121 bool used_mempool = false;
122 size_t bytes = b->whiteout_u64s * sizeof(u64);
124 if (!b->whiteout_u64s)
127 new_whiteouts = btree_bounce_alloc(c, bytes, &used_mempool);
129 ptrs = ptrs_end = ((void *) new_whiteouts + bytes);
131 for (k = unwritten_whiteouts_start(c, b);
132 k != unwritten_whiteouts_end(c, b);
136 sort_bkey_ptrs(b, ptrs, ptrs_end - ptrs);
140 while (ptrs != ptrs_end) {
146 verify_no_dups(b, new_whiteouts,
147 (void *) ((u64 *) new_whiteouts + b->whiteout_u64s));
149 memcpy_u64s(unwritten_whiteouts_start(c, b),
150 new_whiteouts, b->whiteout_u64s);
152 btree_bounce_free(c, bytes, used_mempool, new_whiteouts);
155 static bool should_compact_bset(struct btree *b, struct bset_tree *t,
156 bool compacting, enum compact_mode mode)
158 if (!bset_dead_u64s(b, t))
163 return should_compact_bset_lazy(b, t) ||
164 (compacting && !bset_written(b, bset(b, t)));
172 static bool bch2_drop_whiteouts(struct btree *b, enum compact_mode mode)
177 for_each_bset(b, t) {
178 struct bset *i = bset(b, t);
179 struct bkey_packed *k, *n, *out, *start, *end;
180 struct btree_node_entry *src = NULL, *dst = NULL;
182 if (t != b->set && !bset_written(b, i)) {
183 src = container_of(i, struct btree_node_entry, keys);
184 dst = max(write_block(b),
185 (void *) btree_bkey_last(b, t - 1));
191 if (!should_compact_bset(b, t, ret, mode)) {
193 memmove(dst, src, sizeof(*src) +
194 le16_to_cpu(src->keys.u64s) *
197 set_btree_bset(b, t, i);
202 start = btree_bkey_first(b, t);
203 end = btree_bkey_last(b, t);
206 memmove(dst, src, sizeof(*src));
208 set_btree_bset(b, t, i);
213 for (k = start; k != end; k = n) {
216 if (!bkey_deleted(k)) {
218 out = bkey_next(out);
220 BUG_ON(k->needs_whiteout);
224 i->u64s = cpu_to_le16((u64 *) out - i->_data);
225 set_btree_bset_end(b, t);
226 bch2_bset_set_no_aux_tree(b, t);
230 bch2_verify_btree_nr_keys(b);
232 bch2_btree_build_aux_trees(b);
237 bool bch2_compact_whiteouts(struct bch_fs *c, struct btree *b,
238 enum compact_mode mode)
240 return bch2_drop_whiteouts(b, mode);
243 static void btree_node_sort(struct bch_fs *c, struct btree *b,
246 bool filter_whiteouts)
248 struct btree_node *out;
249 struct sort_iter sort_iter;
251 struct bset *start_bset = bset(b, &b->set[start_idx]);
252 bool used_mempool = false;
253 u64 start_time, seq = 0;
254 unsigned i, u64s = 0, bytes, shift = end_idx - start_idx - 1;
255 bool sorting_entire_node = start_idx == 0 &&
258 sort_iter_init(&sort_iter, b);
260 for (t = b->set + start_idx;
261 t < b->set + end_idx;
263 u64s += le16_to_cpu(bset(b, t)->u64s);
264 sort_iter_add(&sort_iter,
265 btree_bkey_first(b, t),
266 btree_bkey_last(b, t));
269 bytes = sorting_entire_node
271 : __vstruct_bytes(struct btree_node, u64s);
273 out = btree_bounce_alloc(c, bytes, &used_mempool);
275 start_time = local_clock();
277 u64s = bch2_sort_keys(out->keys.start, &sort_iter, filter_whiteouts);
279 out->keys.u64s = cpu_to_le16(u64s);
281 BUG_ON(vstruct_end(&out->keys) > (void *) out + bytes);
283 if (sorting_entire_node)
284 bch2_time_stats_update(&c->times[BCH_TIME_btree_node_sort],
287 /* Make sure we preserve bset journal_seq: */
288 for (t = b->set + start_idx; t < b->set + end_idx; t++)
289 seq = max(seq, le64_to_cpu(bset(b, t)->journal_seq));
290 start_bset->journal_seq = cpu_to_le64(seq);
292 if (sorting_entire_node) {
293 unsigned u64s = le16_to_cpu(out->keys.u64s);
295 BUG_ON(bytes != btree_bytes(c));
298 * Our temporary buffer is the same size as the btree node's
299 * buffer, we can just swap buffers instead of doing a big
303 out->keys.u64s = cpu_to_le16(u64s);
305 set_btree_bset(b, b->set, &b->data->keys);
307 start_bset->u64s = out->keys.u64s;
308 memcpy_u64s(start_bset->start,
310 le16_to_cpu(out->keys.u64s));
313 for (i = start_idx + 1; i < end_idx; i++)
314 b->nr.bset_u64s[start_idx] +=
319 for (i = start_idx + 1; i < b->nsets; i++) {
320 b->nr.bset_u64s[i] = b->nr.bset_u64s[i + shift];
321 b->set[i] = b->set[i + shift];
324 for (i = b->nsets; i < MAX_BSETS; i++)
325 b->nr.bset_u64s[i] = 0;
327 set_btree_bset_end(b, &b->set[start_idx]);
328 bch2_bset_set_no_aux_tree(b, &b->set[start_idx]);
330 btree_bounce_free(c, bytes, used_mempool, out);
332 bch2_verify_btree_nr_keys(b);
335 void bch2_btree_sort_into(struct bch_fs *c,
339 struct btree_nr_keys nr;
340 struct btree_node_iter src_iter;
341 u64 start_time = local_clock();
343 BUG_ON(dst->nsets != 1);
345 bch2_bset_set_no_aux_tree(dst, dst->set);
347 bch2_btree_node_iter_init_from_start(&src_iter, src);
349 if (btree_node_is_extents(src))
350 nr = bch2_sort_repack_merge(c, btree_bset_first(dst),
355 nr = bch2_sort_repack(btree_bset_first(dst),
360 bch2_time_stats_update(&c->times[BCH_TIME_btree_node_sort],
363 set_btree_bset_end(dst, dst->set);
365 dst->nr.live_u64s += nr.live_u64s;
366 dst->nr.bset_u64s[0] += nr.bset_u64s[0];
367 dst->nr.packed_keys += nr.packed_keys;
368 dst->nr.unpacked_keys += nr.unpacked_keys;
370 bch2_verify_btree_nr_keys(dst);
373 #define SORT_CRIT (4096 / sizeof(u64))
376 * We're about to add another bset to the btree node, so if there's currently
377 * too many bsets - sort some of them together:
379 static bool btree_node_compact(struct bch_fs *c, struct btree *b)
381 unsigned unwritten_idx;
384 for (unwritten_idx = 0;
385 unwritten_idx < b->nsets;
387 if (!bset_written(b, bset(b, &b->set[unwritten_idx])))
390 if (b->nsets - unwritten_idx > 1) {
391 btree_node_sort(c, b, unwritten_idx,
396 if (unwritten_idx > 1) {
397 btree_node_sort(c, b, 0, unwritten_idx, false);
404 void bch2_btree_build_aux_trees(struct btree *b)
409 bch2_bset_build_aux_tree(b, t,
410 !bset_written(b, bset(b, t)) &&
411 t == bset_tree_last(b));
415 * @bch_btree_init_next - initialize a new (unwritten) bset that can then be
418 * Safe to call if there already is an unwritten bset - will only add a new bset
419 * if @b doesn't already have one.
421 * Returns true if we sorted (i.e. invalidated iterators
423 void bch2_btree_init_next(struct bch_fs *c, struct btree *b,
424 struct btree_iter *iter)
426 struct btree_node_entry *bne;
427 bool reinit_iter = false;
429 EBUG_ON(!(b->c.lock.state.seq & 1));
430 EBUG_ON(iter && iter->l[b->c.level].b != b);
431 BUG_ON(bset_written(b, bset(b, &b->set[1])));
433 if (b->nsets == MAX_BSETS) {
434 unsigned log_u64s[] = {
435 ilog2(bset_u64s(&b->set[0])),
436 ilog2(bset_u64s(&b->set[1])),
437 ilog2(bset_u64s(&b->set[2])),
440 if (log_u64s[1] >= (log_u64s[0] + log_u64s[2]) / 2) {
441 bch2_btree_node_write(c, b, SIX_LOCK_write);
446 if (b->nsets == MAX_BSETS &&
447 btree_node_compact(c, b))
450 BUG_ON(b->nsets >= MAX_BSETS);
452 bne = want_new_bset(c, b);
454 bch2_bset_init_next(c, b, bne);
456 bch2_btree_build_aux_trees(b);
458 if (iter && reinit_iter)
459 bch2_btree_iter_reinit_node(iter, b);
462 static void btree_pos_to_text(struct printbuf *out, struct bch_fs *c,
465 pr_buf(out, "%s level %u/%u\n ",
466 bch2_btree_ids[b->c.btree_id],
468 c->btree_roots[b->c.btree_id].level);
469 bch2_bkey_val_to_text(out, c, bkey_i_to_s_c(&b->key));
472 static void btree_err_msg(struct printbuf *out, struct bch_fs *c,
474 struct btree *b, struct bset *i,
475 unsigned offset, int write)
477 pr_buf(out, "error validating btree node ");
479 pr_buf(out, "before write ");
481 pr_buf(out, "on %s ", ca->name);
482 pr_buf(out, "at btree ");
483 btree_pos_to_text(out, c, b);
485 pr_buf(out, "\n node offset %u", b->written);
487 pr_buf(out, " bset u64s %u", le16_to_cpu(i->u64s));
490 enum btree_err_type {
492 BTREE_ERR_WANT_RETRY,
493 BTREE_ERR_MUST_RETRY,
497 enum btree_validate_ret {
498 BTREE_RETRY_READ = 64,
501 #define btree_err(type, c, ca, b, i, msg, ...) \
505 char *_buf2 = _buf; \
506 struct printbuf out = PBUF(_buf); \
508 _buf2 = kmalloc(4096, GFP_ATOMIC); \
510 out = _PBUF(_buf2, 4986); \
512 btree_err_msg(&out, c, ca, b, i, b->written, write); \
513 pr_buf(&out, ": " msg, ##__VA_ARGS__); \
515 if (type == BTREE_ERR_FIXABLE && \
517 !test_bit(BCH_FS_INITIAL_GC_DONE, &c->flags)) { \
518 mustfix_fsck_err(c, "%s", _buf2); \
524 bch_err(c, "%s", _buf2); \
527 case BTREE_ERR_FIXABLE: \
528 ret = BCH_FSCK_ERRORS_NOT_FIXED; \
530 case BTREE_ERR_WANT_RETRY: \
532 ret = BTREE_RETRY_READ; \
536 case BTREE_ERR_MUST_RETRY: \
537 ret = BTREE_RETRY_READ; \
539 case BTREE_ERR_FATAL: \
540 ret = BCH_FSCK_ERRORS_NOT_FIXED; \
545 bch_err(c, "corrupt metadata before write: %s", _buf2); \
547 if (bch2_fs_inconsistent(c)) { \
548 ret = BCH_FSCK_ERRORS_NOT_FIXED; \
559 #define btree_err_on(cond, ...) ((cond) ? btree_err(__VA_ARGS__) : false)
561 static int validate_bset(struct bch_fs *c, struct bch_dev *ca,
562 struct btree *b, struct bset *i,
563 unsigned sectors, int write, bool have_retry)
565 unsigned version = le16_to_cpu(i->version);
571 btree_err_on((version != BCH_BSET_VERSION_OLD &&
572 version < bcachefs_metadata_version_min) ||
573 version >= bcachefs_metadata_version_max,
574 BTREE_ERR_FATAL, c, ca, b, i,
575 "unsupported bset version");
577 if (btree_err_on(version < c->sb.version_min,
578 BTREE_ERR_FIXABLE, c, NULL, b, i,
579 "bset version %u older than superblock version_min %u",
580 version, c->sb.version_min)) {
581 mutex_lock(&c->sb_lock);
582 c->disk_sb.sb->version_min = cpu_to_le16(version);
584 mutex_unlock(&c->sb_lock);
587 if (btree_err_on(version > c->sb.version,
588 BTREE_ERR_FIXABLE, c, NULL, b, i,
589 "bset version %u newer than superblock version %u",
590 version, c->sb.version)) {
591 mutex_lock(&c->sb_lock);
592 c->disk_sb.sb->version = cpu_to_le16(version);
594 mutex_unlock(&c->sb_lock);
597 btree_err_on(BSET_SEPARATE_WHITEOUTS(i),
598 BTREE_ERR_FATAL, c, ca, b, i,
599 "BSET_SEPARATE_WHITEOUTS no longer supported");
601 if (btree_err_on(b->written + sectors > c->opts.btree_node_size,
602 BTREE_ERR_FIXABLE, c, ca, b, i,
603 "bset past end of btree node")) {
608 btree_err_on(b->written && !i->u64s,
609 BTREE_ERR_FIXABLE, c, ca, b, i,
613 struct btree_node *bn =
614 container_of(i, struct btree_node, keys);
615 /* These indicate that we read the wrong btree node: */
617 if (b->key.k.type == KEY_TYPE_btree_ptr_v2) {
618 struct bch_btree_ptr_v2 *bp =
619 &bkey_i_to_btree_ptr_v2(&b->key)->v;
622 btree_err_on(bp->seq != bn->keys.seq,
623 BTREE_ERR_MUST_RETRY, c, ca, b, NULL,
624 "incorrect sequence number (wrong btree node)");
627 btree_err_on(BTREE_NODE_ID(bn) != b->c.btree_id,
628 BTREE_ERR_MUST_RETRY, c, ca, b, i,
629 "incorrect btree id");
631 btree_err_on(BTREE_NODE_LEVEL(bn) != b->c.level,
632 BTREE_ERR_MUST_RETRY, c, ca, b, i,
636 compat_btree_node(b->c.level, b->c.btree_id, version,
637 BSET_BIG_ENDIAN(i), write, bn);
639 if (b->key.k.type == KEY_TYPE_btree_ptr_v2) {
640 struct bch_btree_ptr_v2 *bp =
641 &bkey_i_to_btree_ptr_v2(&b->key)->v;
643 if (BTREE_PTR_RANGE_UPDATED(bp)) {
644 b->data->min_key = bp->min_key;
645 b->data->max_key = b->key.k.p;
648 btree_err_on(bpos_cmp(b->data->min_key, bp->min_key),
649 BTREE_ERR_MUST_RETRY, c, ca, b, NULL,
650 "incorrect min_key: got %s should be %s",
651 (bch2_bpos_to_text(&PBUF(buf1), bn->min_key), buf1),
652 (bch2_bpos_to_text(&PBUF(buf2), bp->min_key), buf2));
655 btree_err_on(bpos_cmp(bn->max_key, b->key.k.p),
656 BTREE_ERR_MUST_RETRY, c, ca, b, i,
657 "incorrect max key %s",
658 (bch2_bpos_to_text(&PBUF(buf1), bn->max_key), buf1));
661 compat_btree_node(b->c.level, b->c.btree_id, version,
662 BSET_BIG_ENDIAN(i), write, bn);
664 err = bch2_bkey_format_validate(&bn->format);
666 BTREE_ERR_FATAL, c, ca, b, i,
667 "invalid bkey format: %s", err);
669 compat_bformat(b->c.level, b->c.btree_id, version,
670 BSET_BIG_ENDIAN(i), write,
677 static int validate_bset_keys(struct bch_fs *c, struct btree *b,
678 struct bset *i, unsigned *whiteout_u64s,
679 int write, bool have_retry)
681 unsigned version = le16_to_cpu(i->version);
682 struct bkey_packed *k, *prev = NULL;
686 k != vstruct_last(i);) {
691 if (btree_err_on(bkey_next(k) > vstruct_last(i),
692 BTREE_ERR_FIXABLE, c, NULL, b, i,
693 "key extends past end of bset")) {
694 i->u64s = cpu_to_le16((u64 *) k - i->_data);
698 if (btree_err_on(k->format > KEY_FORMAT_CURRENT,
699 BTREE_ERR_FIXABLE, c, NULL, b, i,
700 "invalid bkey format %u", k->format)) {
701 i->u64s = cpu_to_le16(le16_to_cpu(i->u64s) - k->u64s);
702 memmove_u64s_down(k, bkey_next(k),
703 (u64 *) vstruct_end(i) - (u64 *) k);
707 /* XXX: validate k->u64s */
709 bch2_bkey_compat(b->c.level, b->c.btree_id, version,
710 BSET_BIG_ENDIAN(i), write,
713 u = __bkey_disassemble(b, k, &tmp);
715 invalid = __bch2_bkey_invalid(c, u.s_c, btree_node_type(b)) ?:
716 bch2_bkey_in_btree_node(b, u.s_c) ?:
717 (write ? bch2_bkey_val_invalid(c, u.s_c) : NULL);
721 bch2_bkey_val_to_text(&PBUF(buf), c, u.s_c);
722 btree_err(BTREE_ERR_FIXABLE, c, NULL, b, i,
723 "invalid bkey: %s\n%s", invalid, buf);
725 i->u64s = cpu_to_le16(le16_to_cpu(i->u64s) - k->u64s);
726 memmove_u64s_down(k, bkey_next(k),
727 (u64 *) vstruct_end(i) - (u64 *) k);
732 bch2_bkey_compat(b->c.level, b->c.btree_id, version,
733 BSET_BIG_ENDIAN(i), write,
736 if (prev && bkey_iter_cmp(b, prev, k) > 0) {
739 struct bkey up = bkey_unpack_key(b, prev);
741 bch2_bkey_to_text(&PBUF(buf1), &up);
742 bch2_bkey_to_text(&PBUF(buf2), u.k);
744 bch2_dump_bset(c, b, i, 0);
746 if (btree_err(BTREE_ERR_FIXABLE, c, NULL, b, i,
747 "keys out of order: %s > %s",
749 i->u64s = cpu_to_le16(le16_to_cpu(i->u64s) - k->u64s);
750 memmove_u64s_down(k, bkey_next(k),
751 (u64 *) vstruct_end(i) - (u64 *) k);
763 int bch2_btree_node_read_done(struct bch_fs *c, struct bch_dev *ca,
764 struct btree *b, bool have_retry)
766 struct btree_node_entry *bne;
767 struct sort_iter *iter;
768 struct btree_node *sorted;
769 struct bkey_packed *k;
770 struct bch_extent_ptr *ptr;
772 bool used_mempool, blacklisted;
774 int ret, retry_read = 0, write = READ;
776 b->version_ondisk = U16_MAX;
778 iter = mempool_alloc(&c->fill_iter, GFP_NOIO);
779 sort_iter_init(iter, b);
780 iter->size = (btree_blocks(c) + 1) * 2;
782 if (bch2_meta_read_fault("btree"))
783 btree_err(BTREE_ERR_MUST_RETRY, c, ca, b, NULL,
786 btree_err_on(le64_to_cpu(b->data->magic) != bset_magic(c),
787 BTREE_ERR_MUST_RETRY, c, ca, b, NULL,
790 btree_err_on(!b->data->keys.seq,
791 BTREE_ERR_MUST_RETRY, c, ca, b, NULL,
794 if (b->key.k.type == KEY_TYPE_btree_ptr_v2) {
795 struct bch_btree_ptr_v2 *bp =
796 &bkey_i_to_btree_ptr_v2(&b->key)->v;
798 btree_err_on(b->data->keys.seq != bp->seq,
799 BTREE_ERR_MUST_RETRY, c, ca, b, NULL,
800 "got wrong btree node (seq %llx want %llx)",
801 b->data->keys.seq, bp->seq);
804 while (b->written < c->opts.btree_node_size) {
805 unsigned sectors, whiteout_u64s = 0;
807 struct bch_csum csum;
808 bool first = !b->written;
813 btree_err_on(!bch2_checksum_type_valid(c, BSET_CSUM_TYPE(i)),
814 BTREE_ERR_WANT_RETRY, c, ca, b, i,
815 "unknown checksum type %llu",
818 nonce = btree_nonce(i, b->written << 9);
819 csum = csum_vstruct(c, BSET_CSUM_TYPE(i), nonce, b->data);
821 btree_err_on(bch2_crc_cmp(csum, b->data->csum),
822 BTREE_ERR_WANT_RETRY, c, ca, b, i,
825 bset_encrypt(c, i, b->written << 9);
827 btree_err_on(btree_node_is_extents(b) &&
828 !BTREE_NODE_NEW_EXTENT_OVERWRITE(b->data),
829 BTREE_ERR_FATAL, c, NULL, b, NULL,
830 "btree node does not have NEW_EXTENT_OVERWRITE set");
832 sectors = vstruct_sectors(b->data, c->block_bits);
834 bne = write_block(b);
837 if (i->seq != b->data->keys.seq)
840 btree_err_on(!bch2_checksum_type_valid(c, BSET_CSUM_TYPE(i)),
841 BTREE_ERR_WANT_RETRY, c, ca, b, i,
842 "unknown checksum type %llu",
845 nonce = btree_nonce(i, b->written << 9);
846 csum = csum_vstruct(c, BSET_CSUM_TYPE(i), nonce, bne);
848 btree_err_on(bch2_crc_cmp(csum, bne->csum),
849 BTREE_ERR_WANT_RETRY, c, ca, b, i,
852 bset_encrypt(c, i, b->written << 9);
854 sectors = vstruct_sectors(bne, c->block_bits);
857 b->version_ondisk = min(b->version_ondisk,
858 le16_to_cpu(i->version));
860 ret = validate_bset(c, ca, b, i, sectors,
866 btree_node_set_format(b, b->data->format);
868 ret = validate_bset_keys(c, b, i, &whiteout_u64s,
873 SET_BSET_BIG_ENDIAN(i, CPU_BIG_ENDIAN);
875 b->written += sectors;
877 blacklisted = bch2_journal_seq_is_blacklisted(c,
878 le64_to_cpu(i->journal_seq),
881 btree_err_on(blacklisted && first,
882 BTREE_ERR_FIXABLE, c, ca, b, i,
883 "first btree node bset has blacklisted journal seq");
884 if (blacklisted && !first)
887 sort_iter_add(iter, i->start,
888 vstruct_idx(i, whiteout_u64s));
891 vstruct_idx(i, whiteout_u64s),
895 for (bne = write_block(b);
896 bset_byte_offset(b, bne) < btree_bytes(c);
897 bne = (void *) bne + block_bytes(c))
898 btree_err_on(bne->keys.seq == b->data->keys.seq,
899 BTREE_ERR_WANT_RETRY, c, ca, b, NULL,
900 "found bset signature after last bset");
902 sorted = btree_bounce_alloc(c, btree_bytes(c), &used_mempool);
903 sorted->keys.u64s = 0;
905 set_btree_bset(b, b->set, &b->data->keys);
907 b->nr = bch2_key_sort_fix_overlapping(c, &sorted->keys, iter);
909 u64s = le16_to_cpu(sorted->keys.u64s);
911 sorted->keys.u64s = cpu_to_le16(u64s);
912 swap(sorted, b->data);
913 set_btree_bset(b, b->set, &b->data->keys);
916 BUG_ON(b->nr.live_u64s != u64s);
918 btree_bounce_free(c, btree_bytes(c), used_mempool, sorted);
921 for (k = i->start; k != vstruct_last(i);) {
923 struct bkey_s u = __bkey_disassemble(b, k, &tmp);
924 const char *invalid = bch2_bkey_val_invalid(c, u.s_c);
927 (bch2_inject_invalid_keys &&
928 !bversion_cmp(u.k->version, MAX_VERSION))) {
931 bch2_bkey_val_to_text(&PBUF(buf), c, u.s_c);
932 btree_err(BTREE_ERR_FIXABLE, c, NULL, b, i,
933 "invalid bkey %s: %s", buf, invalid);
935 btree_keys_account_key_drop(&b->nr, 0, k);
937 i->u64s = cpu_to_le16(le16_to_cpu(i->u64s) - k->u64s);
938 memmove_u64s_down(k, bkey_next(k),
939 (u64 *) vstruct_end(i) - (u64 *) k);
940 set_btree_bset_end(b, b->set);
944 if (u.k->type == KEY_TYPE_btree_ptr_v2) {
945 struct bkey_s_btree_ptr_v2 bp = bkey_s_to_btree_ptr_v2(u);
953 bch2_bset_build_aux_tree(b, b->set, false);
955 set_needs_whiteout(btree_bset_first(b), true);
957 btree_node_reset_sib_u64s(b);
959 bkey_for_each_ptr(bch2_bkey_ptrs(bkey_i_to_s(&b->key)), ptr) {
960 struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
962 if (ca->mi.state != BCH_MEMBER_STATE_rw)
963 set_btree_node_need_rewrite(b);
966 mempool_free(iter, &c->fill_iter);
969 if (ret == BTREE_RETRY_READ) {
972 bch2_inconsistent_error(c);
973 set_btree_node_read_error(b);
978 static void btree_node_read_work(struct work_struct *work)
980 struct btree_read_bio *rb =
981 container_of(work, struct btree_read_bio, work);
982 struct bch_fs *c = rb->c;
983 struct bch_dev *ca = bch_dev_bkey_exists(c, rb->pick.ptr.dev);
984 struct btree *b = rb->bio.bi_private;
985 struct bio *bio = &rb->bio;
986 struct bch_io_failures failed = { .nr = 0 };
993 bch_info(c, "retrying read");
994 ca = bch_dev_bkey_exists(c, rb->pick.ptr.dev);
995 rb->have_ioref = bch2_dev_get_ioref(ca, READ);
996 bio_reset(bio, NULL, REQ_OP_READ|REQ_SYNC|REQ_META);
997 bio->bi_iter.bi_sector = rb->pick.ptr.offset;
998 bio->bi_iter.bi_size = btree_bytes(c);
1000 if (rb->have_ioref) {
1001 bio_set_dev(bio, ca->disk_sb.bdev);
1002 submit_bio_wait(bio);
1004 bio->bi_status = BLK_STS_REMOVED;
1008 btree_pos_to_text(&out, c, b);
1009 bch2_dev_io_err_on(bio->bi_status, ca, "btree read error %s for %s",
1010 bch2_blk_status_to_str(bio->bi_status), buf);
1012 percpu_ref_put(&ca->io_ref);
1013 rb->have_ioref = false;
1015 bch2_mark_io_failure(&failed, &rb->pick);
1017 can_retry = bch2_bkey_pick_read_device(c,
1018 bkey_i_to_s_c(&b->key),
1019 &failed, &rb->pick) > 0;
1021 if (!bio->bi_status &&
1022 !bch2_btree_node_read_done(c, ca, b, can_retry))
1026 set_btree_node_read_error(b);
1031 bch2_time_stats_update(&c->times[BCH_TIME_btree_node_read],
1034 clear_btree_node_read_in_flight(b);
1035 wake_up_bit(&b->flags, BTREE_NODE_read_in_flight);
1038 static void btree_node_read_endio(struct bio *bio)
1040 struct btree_read_bio *rb =
1041 container_of(bio, struct btree_read_bio, bio);
1042 struct bch_fs *c = rb->c;
1044 if (rb->have_ioref) {
1045 struct bch_dev *ca = bch_dev_bkey_exists(c, rb->pick.ptr.dev);
1046 bch2_latency_acct(ca, rb->start_time, READ);
1049 queue_work(system_unbound_wq, &rb->work);
1052 void bch2_btree_node_read(struct bch_fs *c, struct btree *b,
1055 struct extent_ptr_decoded pick;
1056 struct btree_read_bio *rb;
1062 btree_pos_to_text(&PBUF(buf), c, b);
1063 trace_btree_read(c, b);
1065 ret = bch2_bkey_pick_read_device(c, bkey_i_to_s_c(&b->key),
1067 if (bch2_fs_fatal_err_on(ret <= 0, c,
1068 "btree node read error: no device to read from\n"
1070 set_btree_node_read_error(b);
1074 ca = bch_dev_bkey_exists(c, pick.ptr.dev);
1076 bio = bio_alloc_bioset(NULL,
1077 buf_pages(b->data, btree_bytes(c)),
1078 REQ_OP_READ|REQ_SYNC|REQ_META,
1081 rb = container_of(bio, struct btree_read_bio, bio);
1083 rb->start_time = local_clock();
1084 rb->have_ioref = bch2_dev_get_ioref(ca, READ);
1086 INIT_WORK(&rb->work, btree_node_read_work);
1087 bio->bi_iter.bi_sector = pick.ptr.offset;
1088 bio->bi_end_io = btree_node_read_endio;
1089 bio->bi_private = b;
1090 bch2_bio_map(bio, b->data, btree_bytes(c));
1092 set_btree_node_read_in_flight(b);
1094 if (rb->have_ioref) {
1095 this_cpu_add(ca->io_done->sectors[READ][BCH_DATA_btree],
1097 bio_set_dev(bio, ca->disk_sb.bdev);
1100 submit_bio_wait(bio);
1102 bio->bi_private = b;
1103 btree_node_read_work(&rb->work);
1108 bio->bi_status = BLK_STS_REMOVED;
1111 btree_node_read_work(&rb->work);
1113 queue_work(system_unbound_wq, &rb->work);
1118 int bch2_btree_root_read(struct bch_fs *c, enum btree_id id,
1119 const struct bkey_i *k, unsigned level)
1125 closure_init_stack(&cl);
1128 ret = bch2_btree_cache_cannibalize_lock(c, &cl);
1132 b = bch2_btree_node_mem_alloc(c);
1133 bch2_btree_cache_cannibalize_unlock(c);
1137 bkey_copy(&b->key, k);
1138 BUG_ON(bch2_btree_node_hash_insert(&c->btree_cache, b, level, id));
1140 bch2_btree_node_read(c, b, true);
1142 if (btree_node_read_error(b)) {
1143 bch2_btree_node_hash_remove(&c->btree_cache, b);
1145 mutex_lock(&c->btree_cache.lock);
1146 list_move(&b->list, &c->btree_cache.freeable);
1147 mutex_unlock(&c->btree_cache.lock);
1153 bch2_btree_set_root_for_read(c, b);
1155 six_unlock_write(&b->c.lock);
1156 six_unlock_intent(&b->c.lock);
1161 void bch2_btree_complete_write(struct bch_fs *c, struct btree *b,
1162 struct btree_write *w)
1164 unsigned long old, new, v = READ_ONCE(b->will_make_reachable);
1172 } while ((v = cmpxchg(&b->will_make_reachable, old, new)) != old);
1175 closure_put(&((struct btree_update *) new)->cl);
1177 bch2_journal_pin_drop(&c->journal, &w->journal);
1180 static void btree_node_write_done(struct bch_fs *c, struct btree *b)
1182 struct btree_write *w = btree_prev_write(b);
1184 bch2_btree_complete_write(c, b, w);
1185 btree_node_io_unlock(b);
1188 static void bch2_btree_node_write_error(struct bch_fs *c,
1189 struct btree_write_bio *wbio)
1191 struct btree *b = wbio->wbio.bio.bi_private;
1193 struct bch_extent_ptr *ptr;
1194 struct btree_trans trans;
1195 struct btree_iter *iter;
1198 bch2_bkey_buf_init(&k);
1199 bch2_trans_init(&trans, c, 0, 0);
1201 iter = bch2_trans_get_node_iter(&trans, b->c.btree_id, b->key.k.p,
1202 BTREE_MAX_DEPTH, b->c.level, 0);
1204 ret = bch2_btree_iter_traverse(iter);
1208 /* has node been freed? */
1209 if (iter->l[b->c.level].b != b) {
1210 /* node has been freed: */
1211 BUG_ON(!btree_node_dying(b));
1215 BUG_ON(!btree_node_hashed(b));
1217 bch2_bkey_buf_copy(&k, c, &b->key);
1219 bch2_bkey_drop_ptrs(bkey_i_to_s(k.k), ptr,
1220 bch2_dev_list_has_dev(wbio->wbio.failed, ptr->dev));
1222 if (!bch2_bkey_nr_ptrs(bkey_i_to_s_c(k.k)))
1225 ret = bch2_btree_node_update_key(c, iter, b, k.k);
1231 bch2_trans_iter_put(&trans, iter);
1232 bch2_trans_exit(&trans);
1233 bch2_bkey_buf_exit(&k, c);
1234 bio_put(&wbio->wbio.bio);
1235 btree_node_write_done(c, b);
1238 set_btree_node_noevict(b);
1239 bch2_fs_fatal_error(c, "fatal error writing btree node");
1243 void bch2_btree_write_error_work(struct work_struct *work)
1245 struct bch_fs *c = container_of(work, struct bch_fs,
1246 btree_write_error_work);
1250 spin_lock_irq(&c->btree_write_error_lock);
1251 bio = bio_list_pop(&c->btree_write_error_list);
1252 spin_unlock_irq(&c->btree_write_error_lock);
1257 bch2_btree_node_write_error(c,
1258 container_of(bio, struct btree_write_bio, wbio.bio));
1262 static void btree_node_write_work(struct work_struct *work)
1264 struct btree_write_bio *wbio =
1265 container_of(work, struct btree_write_bio, work);
1266 struct bch_fs *c = wbio->wbio.c;
1267 struct btree *b = wbio->wbio.bio.bi_private;
1269 btree_bounce_free(c,
1271 wbio->wbio.used_mempool,
1274 if (wbio->wbio.failed.nr) {
1275 unsigned long flags;
1277 spin_lock_irqsave(&c->btree_write_error_lock, flags);
1278 bio_list_add(&c->btree_write_error_list, &wbio->wbio.bio);
1279 spin_unlock_irqrestore(&c->btree_write_error_lock, flags);
1281 queue_work(c->wq, &c->btree_write_error_work);
1285 bio_put(&wbio->wbio.bio);
1286 btree_node_write_done(c, b);
1289 static void btree_node_write_endio(struct bio *bio)
1291 struct bch_write_bio *wbio = to_wbio(bio);
1292 struct bch_write_bio *parent = wbio->split ? wbio->parent : NULL;
1293 struct bch_write_bio *orig = parent ?: wbio;
1294 struct bch_fs *c = wbio->c;
1295 struct bch_dev *ca = bch_dev_bkey_exists(c, wbio->dev);
1296 unsigned long flags;
1298 if (wbio->have_ioref)
1299 bch2_latency_acct(ca, wbio->submit_time, WRITE);
1301 if (bch2_dev_io_err_on(bio->bi_status, ca, "btree write error: %s",
1302 bch2_blk_status_to_str(bio->bi_status)) ||
1303 bch2_meta_write_fault("btree")) {
1304 spin_lock_irqsave(&c->btree_write_error_lock, flags);
1305 bch2_dev_list_add_dev(&orig->failed, wbio->dev);
1306 spin_unlock_irqrestore(&c->btree_write_error_lock, flags);
1309 if (wbio->have_ioref)
1310 percpu_ref_put(&ca->io_ref);
1314 bio_endio(&parent->bio);
1316 struct btree_write_bio *wb =
1317 container_of(orig, struct btree_write_bio, wbio);
1319 INIT_WORK(&wb->work, btree_node_write_work);
1320 queue_work(system_unbound_wq, &wb->work);
1324 static int validate_bset_for_write(struct bch_fs *c, struct btree *b,
1325 struct bset *i, unsigned sectors)
1327 unsigned whiteout_u64s = 0;
1330 if (bch2_bkey_invalid(c, bkey_i_to_s_c(&b->key), BKEY_TYPE_btree))
1333 ret = validate_bset_keys(c, b, i, &whiteout_u64s, WRITE, false) ?:
1334 validate_bset(c, NULL, b, i, sectors, WRITE, false);
1336 bch2_inconsistent_error(c);
1343 void __bch2_btree_node_write(struct bch_fs *c, struct btree *b)
1345 struct btree_write_bio *wbio;
1346 struct bset_tree *t;
1348 struct btree_node *bn = NULL;
1349 struct btree_node_entry *bne = NULL;
1351 struct bch_extent_ptr *ptr;
1352 struct sort_iter sort_iter;
1354 unsigned bytes_to_write, sectors_to_write, bytes, u64s;
1357 unsigned long old, new;
1358 bool validate_before_checksum = false;
1361 bch2_bkey_buf_init(&k);
1363 if (test_bit(BCH_FS_HOLD_BTREE_WRITES, &c->flags))
1367 * We may only have a read lock on the btree node - the dirty bit is our
1368 * "lock" against racing with other threads that may be trying to start
1369 * a write, we do a write iff we clear the dirty bit. Since setting the
1370 * dirty bit requires a write lock, we can't race with other threads
1374 old = new = READ_ONCE(b->flags);
1376 if (!(old & (1 << BTREE_NODE_dirty)))
1379 if (!btree_node_may_write(b))
1382 if (old & (1 << BTREE_NODE_never_write))
1385 if (old & (1 << BTREE_NODE_write_in_flight)) {
1386 btree_node_wait_on_io(b);
1390 new &= ~(1 << BTREE_NODE_dirty);
1391 new &= ~(1 << BTREE_NODE_need_write);
1392 new |= (1 << BTREE_NODE_write_in_flight);
1393 new |= (1 << BTREE_NODE_just_written);
1394 new ^= (1 << BTREE_NODE_write_idx);
1395 } while (cmpxchg_acquire(&b->flags, old, new) != old);
1397 atomic_dec(&c->btree_cache.dirty);
1399 BUG_ON(btree_node_fake(b));
1400 BUG_ON((b->will_make_reachable != 0) != !b->written);
1402 BUG_ON(b->written >= c->opts.btree_node_size);
1403 BUG_ON(b->written & (c->opts.block_size - 1));
1404 BUG_ON(bset_written(b, btree_bset_last(b)));
1405 BUG_ON(le64_to_cpu(b->data->magic) != bset_magic(c));
1406 BUG_ON(memcmp(&b->data->format, &b->format, sizeof(b->format)));
1408 bch2_sort_whiteouts(c, b);
1410 sort_iter_init(&sort_iter, b);
1413 ? sizeof(struct btree_node)
1414 : sizeof(struct btree_node_entry);
1416 bytes += b->whiteout_u64s * sizeof(u64);
1418 for_each_bset(b, t) {
1421 if (bset_written(b, i))
1424 bytes += le16_to_cpu(i->u64s) * sizeof(u64);
1425 sort_iter_add(&sort_iter,
1426 btree_bkey_first(b, t),
1427 btree_bkey_last(b, t));
1428 seq = max(seq, le64_to_cpu(i->journal_seq));
1431 BUG_ON(b->written && !seq);
1433 /* bch2_varint_decode may read up to 7 bytes past the end of the buffer: */
1436 data = btree_bounce_alloc(c, bytes, &used_mempool);
1444 bne->keys = b->data->keys;
1448 i->journal_seq = cpu_to_le64(seq);
1451 sort_iter_add(&sort_iter,
1452 unwritten_whiteouts_start(c, b),
1453 unwritten_whiteouts_end(c, b));
1454 SET_BSET_SEPARATE_WHITEOUTS(i, false);
1456 b->whiteout_u64s = 0;
1458 u64s = bch2_sort_keys(i->start, &sort_iter, false);
1459 le16_add_cpu(&i->u64s, u64s);
1461 set_needs_whiteout(i, false);
1463 /* do we have data to write? */
1464 if (b->written && !i->u64s)
1467 bytes_to_write = vstruct_end(i) - data;
1468 sectors_to_write = round_up(bytes_to_write, block_bytes(c)) >> 9;
1470 memset(data + bytes_to_write, 0,
1471 (sectors_to_write << 9) - bytes_to_write);
1473 BUG_ON(b->written + sectors_to_write > c->opts.btree_node_size);
1474 BUG_ON(BSET_BIG_ENDIAN(i) != CPU_BIG_ENDIAN);
1475 BUG_ON(i->seq != b->data->keys.seq);
1477 i->version = c->sb.version < bcachefs_metadata_version_new_versioning
1478 ? cpu_to_le16(BCH_BSET_VERSION_OLD)
1479 : cpu_to_le16(c->sb.version);
1480 SET_BSET_CSUM_TYPE(i, bch2_meta_checksum_type(c));
1482 if (bch2_csum_type_is_encryption(BSET_CSUM_TYPE(i)))
1483 validate_before_checksum = true;
1485 /* validate_bset will be modifying: */
1486 if (le16_to_cpu(i->version) < bcachefs_metadata_version_current)
1487 validate_before_checksum = true;
1489 /* if we're going to be encrypting, check metadata validity first: */
1490 if (validate_before_checksum &&
1491 validate_bset_for_write(c, b, i, sectors_to_write))
1494 bset_encrypt(c, i, b->written << 9);
1496 nonce = btree_nonce(i, b->written << 9);
1499 bn->csum = csum_vstruct(c, BSET_CSUM_TYPE(i), nonce, bn);
1501 bne->csum = csum_vstruct(c, BSET_CSUM_TYPE(i), nonce, bne);
1503 /* if we're not encrypting, check metadata after checksumming: */
1504 if (!validate_before_checksum &&
1505 validate_bset_for_write(c, b, i, sectors_to_write))
1509 * We handle btree write errors by immediately halting the journal -
1510 * after we've done that, we can't issue any subsequent btree writes
1511 * because they might have pointers to new nodes that failed to write.
1513 * Furthermore, there's no point in doing any more btree writes because
1514 * with the journal stopped, we're never going to update the journal to
1515 * reflect that those writes were done and the data flushed from the
1518 * Also on journal error, the pending write may have updates that were
1519 * never journalled (interior nodes, see btree_update_nodes_written()) -
1520 * it's critical that we don't do the write in that case otherwise we
1521 * will have updates visible that weren't in the journal:
1523 * Make sure to update b->written so bch2_btree_init_next() doesn't
1526 if (bch2_journal_error(&c->journal) ||
1530 trace_btree_write(b, bytes_to_write, sectors_to_write);
1532 wbio = container_of(bio_alloc_bioset(NULL,
1533 buf_pages(data, sectors_to_write << 9),
1534 REQ_OP_WRITE|REQ_META,
1537 struct btree_write_bio, wbio.bio);
1538 wbio_init(&wbio->wbio.bio);
1540 wbio->bytes = bytes;
1541 wbio->wbio.used_mempool = used_mempool;
1542 wbio->wbio.bio.bi_end_io = btree_node_write_endio;
1543 wbio->wbio.bio.bi_private = b;
1545 bch2_bio_map(&wbio->wbio.bio, data, sectors_to_write << 9);
1548 * If we're appending to a leaf node, we don't technically need FUA -
1549 * this write just needs to be persisted before the next journal write,
1550 * which will be marked FLUSH|FUA.
1552 * Similarly if we're writing a new btree root - the pointer is going to
1553 * be in the next journal entry.
1555 * But if we're writing a new btree node (that isn't a root) or
1556 * appending to a non leaf btree node, we need either FUA or a flush
1557 * when we write the parent with the new pointer. FUA is cheaper than a
1558 * flush, and writes appending to leaf nodes aren't blocking anything so
1559 * just make all btree node writes FUA to keep things sane.
1562 bch2_bkey_buf_copy(&k, c, &b->key);
1564 bkey_for_each_ptr(bch2_bkey_ptrs(bkey_i_to_s(k.k)), ptr)
1565 ptr->offset += b->written;
1567 b->written += sectors_to_write;
1569 atomic64_inc(&c->btree_writes_nr);
1570 atomic64_add(sectors_to_write, &c->btree_writes_sectors);
1572 /* XXX: submitting IO with btree locks held: */
1573 bch2_submit_wbio_replicas(&wbio->wbio, c, BCH_DATA_btree, k.k);
1574 bch2_bkey_buf_exit(&k, c);
1577 set_btree_node_noevict(b);
1578 b->written += sectors_to_write;
1580 btree_bounce_free(c, bytes, used_mempool, data);
1581 btree_node_write_done(c, b);
1585 * Work that must be done with write lock held:
1587 bool bch2_btree_post_write_cleanup(struct bch_fs *c, struct btree *b)
1589 bool invalidated_iter = false;
1590 struct btree_node_entry *bne;
1591 struct bset_tree *t;
1593 if (!btree_node_just_written(b))
1596 BUG_ON(b->whiteout_u64s);
1598 clear_btree_node_just_written(b);
1601 * Note: immediately after write, bset_written() doesn't work - the
1602 * amount of data we had to write after compaction might have been
1603 * smaller than the offset of the last bset.
1605 * However, we know that all bsets have been written here, as long as
1606 * we're still holding the write lock:
1610 * XXX: decide if we really want to unconditionally sort down to a
1614 btree_node_sort(c, b, 0, b->nsets, true);
1615 invalidated_iter = true;
1617 invalidated_iter = bch2_drop_whiteouts(b, COMPACT_ALL);
1621 set_needs_whiteout(bset(b, t), true);
1623 bch2_btree_verify(c, b);
1626 * If later we don't unconditionally sort down to a single bset, we have
1627 * to ensure this is still true:
1629 BUG_ON((void *) btree_bkey_last(b, bset_tree_last(b)) > write_block(b));
1631 bne = want_new_bset(c, b);
1633 bch2_bset_init_next(c, b, bne);
1635 bch2_btree_build_aux_trees(b);
1637 return invalidated_iter;
1641 * Use this one if the node is intent locked:
1643 void bch2_btree_node_write(struct bch_fs *c, struct btree *b,
1644 enum six_lock_type lock_type_held)
1646 if (lock_type_held == SIX_LOCK_intent ||
1647 (lock_type_held == SIX_LOCK_read &&
1648 six_lock_tryupgrade(&b->c.lock))) {
1649 __bch2_btree_node_write(c, b);
1651 /* don't cycle lock unnecessarily: */
1652 if (btree_node_just_written(b) &&
1653 six_trylock_write(&b->c.lock)) {
1654 bch2_btree_post_write_cleanup(c, b);
1655 six_unlock_write(&b->c.lock);
1658 if (lock_type_held == SIX_LOCK_read)
1659 six_lock_downgrade(&b->c.lock);
1661 __bch2_btree_node_write(c, b);
1662 if (lock_type_held == SIX_LOCK_write &&
1663 btree_node_just_written(b))
1664 bch2_btree_post_write_cleanup(c, b);
1668 static void __bch2_btree_flush_all(struct bch_fs *c, unsigned flag)
1670 struct bucket_table *tbl;
1671 struct rhash_head *pos;
1676 for_each_cached_btree(b, c, tbl, i, pos)
1677 if (test_bit(flag, &b->flags)) {
1679 wait_on_bit_io(&b->flags, flag, TASK_UNINTERRUPTIBLE);
1686 void bch2_btree_flush_all_reads(struct bch_fs *c)
1688 __bch2_btree_flush_all(c, BTREE_NODE_read_in_flight);
1691 void bch2_btree_flush_all_writes(struct bch_fs *c)
1693 __bch2_btree_flush_all(c, BTREE_NODE_write_in_flight);
1696 void bch2_dirty_btree_nodes_to_text(struct printbuf *out, struct bch_fs *c)
1698 struct bucket_table *tbl;
1699 struct rhash_head *pos;
1704 for_each_cached_btree(b, c, tbl, i, pos) {
1705 unsigned long flags = READ_ONCE(b->flags);
1707 if (!(flags & (1 << BTREE_NODE_dirty)))
1710 pr_buf(out, "%p d %u n %u l %u w %u b %u r %u:%lu\n",
1712 (flags & (1 << BTREE_NODE_dirty)) != 0,
1713 (flags & (1 << BTREE_NODE_need_write)) != 0,
1716 !list_empty_careful(&b->write_blocked),
1717 b->will_make_reachable != 0,
1718 b->will_make_reachable & 1);