1 // SPDX-License-Identifier: GPL-2.0
4 #include "bkey_methods.h"
6 #include "btree_cache.h"
7 #include "btree_iter.h"
8 #include "btree_journal_iter.h"
9 #include "btree_key_cache.h"
10 #include "btree_locking.h"
11 #include "btree_update.h"
16 #include "journal_io.h"
21 #include <linux/random.h>
22 #include <linux/prefetch.h>
24 static inline void btree_path_list_remove(struct btree_trans *, struct btree_path *);
25 static inline void btree_path_list_add(struct btree_trans *,
26 btree_path_idx_t, btree_path_idx_t);
28 static inline unsigned long btree_iter_ip_allocated(struct btree_iter *iter)
30 #ifdef TRACK_PATH_ALLOCATED
31 return iter->ip_allocated;
37 static btree_path_idx_t btree_path_alloc(struct btree_trans *, btree_path_idx_t);
38 static void bch2_trans_srcu_lock(struct btree_trans *);
40 static inline int __btree_path_cmp(const struct btree_path *l,
41 enum btree_id r_btree_id,
47 * Must match lock ordering as defined by __bch2_btree_node_lock:
49 return cmp_int(l->btree_id, r_btree_id) ?:
50 cmp_int((int) l->cached, (int) r_cached) ?:
51 bpos_cmp(l->pos, r_pos) ?:
52 -cmp_int(l->level, r_level);
55 static inline int btree_path_cmp(const struct btree_path *l,
56 const struct btree_path *r)
58 return __btree_path_cmp(l, r->btree_id, r->cached, r->pos, r->level);
61 static inline struct bpos bkey_successor(struct btree_iter *iter, struct bpos p)
63 /* Are we iterating over keys in all snapshots? */
64 if (iter->flags & BTREE_ITER_all_snapshots) {
65 p = bpos_successor(p);
67 p = bpos_nosnap_successor(p);
68 p.snapshot = iter->snapshot;
74 static inline struct bpos bkey_predecessor(struct btree_iter *iter, struct bpos p)
76 /* Are we iterating over keys in all snapshots? */
77 if (iter->flags & BTREE_ITER_all_snapshots) {
78 p = bpos_predecessor(p);
80 p = bpos_nosnap_predecessor(p);
81 p.snapshot = iter->snapshot;
87 static inline struct bpos btree_iter_search_key(struct btree_iter *iter)
89 struct bpos pos = iter->pos;
91 if ((iter->flags & BTREE_ITER_is_extents) &&
92 !bkey_eq(pos, POS_MAX))
93 pos = bkey_successor(iter, pos);
97 static inline bool btree_path_pos_before_node(struct btree_path *path,
100 return bpos_lt(path->pos, b->data->min_key);
103 static inline bool btree_path_pos_after_node(struct btree_path *path,
106 return bpos_gt(path->pos, b->key.k.p);
109 static inline bool btree_path_pos_in_node(struct btree_path *path,
112 return path->btree_id == b->c.btree_id &&
113 !btree_path_pos_before_node(path, b) &&
114 !btree_path_pos_after_node(path, b);
117 /* Btree iterator: */
119 #ifdef CONFIG_BCACHEFS_DEBUG
121 static void bch2_btree_path_verify_cached(struct btree_trans *trans,
122 struct btree_path *path)
124 struct bkey_cached *ck;
125 bool locked = btree_node_locked(path, 0);
127 if (!bch2_btree_node_relock(trans, path, 0))
130 ck = (void *) path->l[0].b;
131 BUG_ON(ck->key.btree_id != path->btree_id ||
132 !bkey_eq(ck->key.pos, path->pos));
135 btree_node_unlock(trans, path, 0);
138 static void bch2_btree_path_verify_level(struct btree_trans *trans,
139 struct btree_path *path, unsigned level)
141 struct btree_path_level *l;
142 struct btree_node_iter tmp;
144 struct bkey_packed *p, *k;
145 struct printbuf buf1 = PRINTBUF;
146 struct printbuf buf2 = PRINTBUF;
147 struct printbuf buf3 = PRINTBUF;
150 if (!bch2_debug_check_iterators)
155 locked = btree_node_locked(path, level);
159 bch2_btree_path_verify_cached(trans, path);
163 if (!btree_path_node(path, level))
166 if (!bch2_btree_node_relock_notrace(trans, path, level))
169 BUG_ON(!btree_path_pos_in_node(path, l->b));
171 bch2_btree_node_iter_verify(&l->iter, l->b);
174 * For interior nodes, the iterator will have skipped past deleted keys:
177 ? bch2_btree_node_iter_prev(&tmp, l->b)
178 : bch2_btree_node_iter_prev_all(&tmp, l->b);
179 k = bch2_btree_node_iter_peek_all(&l->iter, l->b);
181 if (p && bkey_iter_pos_cmp(l->b, p, &path->pos) >= 0) {
186 if (k && bkey_iter_pos_cmp(l->b, k, &path->pos) < 0) {
192 btree_node_unlock(trans, path, level);
195 bch2_bpos_to_text(&buf1, path->pos);
198 struct bkey uk = bkey_unpack_key(l->b, p);
200 bch2_bkey_to_text(&buf2, &uk);
202 prt_printf(&buf2, "(none)");
206 struct bkey uk = bkey_unpack_key(l->b, k);
208 bch2_bkey_to_text(&buf3, &uk);
210 prt_printf(&buf3, "(none)");
213 panic("path should be %s key at level %u:\n"
217 msg, level, buf1.buf, buf2.buf, buf3.buf);
220 static void bch2_btree_path_verify(struct btree_trans *trans,
221 struct btree_path *path)
223 struct bch_fs *c = trans->c;
225 for (unsigned i = 0; i < (!path->cached ? BTREE_MAX_DEPTH : 1); i++) {
227 BUG_ON(!path->cached &&
228 bch2_btree_id_root(c, path->btree_id)->b->c.level > i);
232 bch2_btree_path_verify_level(trans, path, i);
235 bch2_btree_path_verify_locks(path);
238 void bch2_trans_verify_paths(struct btree_trans *trans)
240 struct btree_path *path;
243 trans_for_each_path(trans, path, iter)
244 bch2_btree_path_verify(trans, path);
247 static void bch2_btree_iter_verify(struct btree_iter *iter)
249 struct btree_trans *trans = iter->trans;
251 BUG_ON(!!(iter->flags & BTREE_ITER_cached) != btree_iter_path(trans, iter)->cached);
253 BUG_ON((iter->flags & BTREE_ITER_is_extents) &&
254 (iter->flags & BTREE_ITER_all_snapshots));
256 BUG_ON(!(iter->flags & BTREE_ITER_snapshot_field) &&
257 (iter->flags & BTREE_ITER_all_snapshots) &&
258 !btree_type_has_snapshot_field(iter->btree_id));
260 if (iter->update_path)
261 bch2_btree_path_verify(trans, &trans->paths[iter->update_path]);
262 bch2_btree_path_verify(trans, btree_iter_path(trans, iter));
265 static void bch2_btree_iter_verify_entry_exit(struct btree_iter *iter)
267 BUG_ON((iter->flags & BTREE_ITER_filter_snapshots) &&
268 !iter->pos.snapshot);
270 BUG_ON(!(iter->flags & BTREE_ITER_all_snapshots) &&
271 iter->pos.snapshot != iter->snapshot);
273 BUG_ON(bkey_lt(iter->pos, bkey_start_pos(&iter->k)) ||
274 bkey_gt(iter->pos, iter->k.p));
277 static int bch2_btree_iter_verify_ret(struct btree_iter *iter, struct bkey_s_c k)
279 struct btree_trans *trans = iter->trans;
280 struct btree_iter copy;
281 struct bkey_s_c prev;
284 if (!bch2_debug_check_iterators)
287 if (!(iter->flags & BTREE_ITER_filter_snapshots))
290 if (bkey_err(k) || !k.k)
293 BUG_ON(!bch2_snapshot_is_ancestor(trans->c,
297 bch2_trans_iter_init(trans, ©, iter->btree_id, iter->pos,
298 BTREE_ITER_nopreserve|
299 BTREE_ITER_all_snapshots);
300 prev = bch2_btree_iter_prev(©);
304 ret = bkey_err(prev);
308 if (bkey_eq(prev.k->p, k.k->p) &&
309 bch2_snapshot_is_ancestor(trans->c, iter->snapshot,
310 prev.k->p.snapshot) > 0) {
311 struct printbuf buf1 = PRINTBUF, buf2 = PRINTBUF;
313 bch2_bkey_to_text(&buf1, k.k);
314 bch2_bkey_to_text(&buf2, prev.k);
316 panic("iter snap %u\n"
323 bch2_trans_iter_exit(trans, ©);
327 void bch2_assert_pos_locked(struct btree_trans *trans, enum btree_id id,
330 bch2_trans_verify_not_unlocked(trans);
332 struct btree_path *path;
333 struct trans_for_each_path_inorder_iter iter;
334 struct printbuf buf = PRINTBUF;
336 btree_trans_sort_paths(trans);
338 trans_for_each_path_inorder(trans, path, iter) {
339 if (path->btree_id != id ||
340 !btree_node_locked(path, 0) ||
341 !path->should_be_locked)
345 if (bkey_ge(pos, path->l[0].b->data->min_key) &&
346 bkey_le(pos, path->l[0].b->key.k.p))
349 if (bkey_eq(pos, path->pos))
354 bch2_dump_trans_paths_updates(trans);
355 bch2_bpos_to_text(&buf, pos);
357 panic("not locked: %s %s\n", bch2_btree_id_str(id), buf.buf);
362 static inline void bch2_btree_path_verify_level(struct btree_trans *trans,
363 struct btree_path *path, unsigned l) {}
364 static inline void bch2_btree_path_verify(struct btree_trans *trans,
365 struct btree_path *path) {}
366 static inline void bch2_btree_iter_verify(struct btree_iter *iter) {}
367 static inline void bch2_btree_iter_verify_entry_exit(struct btree_iter *iter) {}
368 static inline int bch2_btree_iter_verify_ret(struct btree_iter *iter, struct bkey_s_c k) { return 0; }
372 /* Btree path: fixups after btree updates */
374 static void btree_node_iter_set_set_pos(struct btree_node_iter *iter,
377 struct bkey_packed *k)
379 struct btree_node_iter_set *set;
381 btree_node_iter_for_each(iter, set)
382 if (set->end == t->end_offset) {
383 set->k = __btree_node_key_to_offset(b, k);
384 bch2_btree_node_iter_sort(iter, b);
388 bch2_btree_node_iter_push(iter, b, k, btree_bkey_last(b, t));
391 static void __bch2_btree_path_fix_key_modified(struct btree_path *path,
393 struct bkey_packed *where)
395 struct btree_path_level *l = &path->l[b->c.level];
397 if (where != bch2_btree_node_iter_peek_all(&l->iter, l->b))
400 if (bkey_iter_pos_cmp(l->b, where, &path->pos) < 0)
401 bch2_btree_node_iter_advance(&l->iter, l->b);
404 void bch2_btree_path_fix_key_modified(struct btree_trans *trans,
406 struct bkey_packed *where)
408 struct btree_path *path;
411 trans_for_each_path_with_node(trans, b, path, i) {
412 __bch2_btree_path_fix_key_modified(path, b, where);
413 bch2_btree_path_verify_level(trans, path, b->c.level);
417 static void __bch2_btree_node_iter_fix(struct btree_path *path,
419 struct btree_node_iter *node_iter,
421 struct bkey_packed *where,
422 unsigned clobber_u64s,
425 const struct bkey_packed *end = btree_bkey_last(b, t);
426 struct btree_node_iter_set *set;
427 unsigned offset = __btree_node_key_to_offset(b, where);
428 int shift = new_u64s - clobber_u64s;
429 unsigned old_end = t->end_offset - shift;
430 unsigned orig_iter_pos = node_iter->data[0].k;
431 bool iter_current_key_modified =
432 orig_iter_pos >= offset &&
433 orig_iter_pos <= offset + clobber_u64s;
435 btree_node_iter_for_each(node_iter, set)
436 if (set->end == old_end)
439 /* didn't find the bset in the iterator - might have to readd it: */
441 bkey_iter_pos_cmp(b, where, &path->pos) >= 0) {
442 bch2_btree_node_iter_push(node_iter, b, where, end);
445 /* Iterator is after key that changed */
449 set->end = t->end_offset;
451 /* Iterator hasn't gotten to the key that changed yet: */
456 bkey_iter_pos_cmp(b, where, &path->pos) >= 0) {
458 } else if (set->k < offset + clobber_u64s) {
459 set->k = offset + new_u64s;
460 if (set->k == set->end)
461 bch2_btree_node_iter_set_drop(node_iter, set);
463 /* Iterator is after key that changed */
464 set->k = (int) set->k + shift;
468 bch2_btree_node_iter_sort(node_iter, b);
470 if (node_iter->data[0].k != orig_iter_pos)
471 iter_current_key_modified = true;
474 * When a new key is added, and the node iterator now points to that
475 * key, the iterator might have skipped past deleted keys that should
476 * come after the key the iterator now points to. We have to rewind to
477 * before those deleted keys - otherwise
478 * bch2_btree_node_iter_prev_all() breaks:
480 if (!bch2_btree_node_iter_end(node_iter) &&
481 iter_current_key_modified &&
483 struct bkey_packed *k, *k2, *p;
485 k = bch2_btree_node_iter_peek_all(node_iter, b);
487 for_each_bset(b, t) {
488 bool set_pos = false;
490 if (node_iter->data[0].end == t->end_offset)
493 k2 = bch2_btree_node_iter_bset_pos(node_iter, b, t);
495 while ((p = bch2_bkey_prev_all(b, t, k2)) &&
496 bkey_iter_cmp(b, k, p) < 0) {
502 btree_node_iter_set_set_pos(node_iter,
508 void bch2_btree_node_iter_fix(struct btree_trans *trans,
509 struct btree_path *path,
511 struct btree_node_iter *node_iter,
512 struct bkey_packed *where,
513 unsigned clobber_u64s,
516 struct bset_tree *t = bch2_bkey_to_bset_inlined(b, where);
517 struct btree_path *linked;
520 if (node_iter != &path->l[b->c.level].iter) {
521 __bch2_btree_node_iter_fix(path, b, node_iter, t,
522 where, clobber_u64s, new_u64s);
524 if (bch2_debug_check_iterators)
525 bch2_btree_node_iter_verify(node_iter, b);
528 trans_for_each_path_with_node(trans, b, linked, i) {
529 __bch2_btree_node_iter_fix(linked, b,
530 &linked->l[b->c.level].iter, t,
531 where, clobber_u64s, new_u64s);
532 bch2_btree_path_verify_level(trans, linked, b->c.level);
536 /* Btree path level: pointer to a particular btree node and node iter */
538 static inline struct bkey_s_c __btree_iter_unpack(struct bch_fs *c,
539 struct btree_path_level *l,
541 struct bkey_packed *k)
545 * signal to bch2_btree_iter_peek_slot() that we're currently at
548 u->type = KEY_TYPE_deleted;
549 return bkey_s_c_null;
552 return bkey_disassemble(l->b, k, u);
555 static inline struct bkey_s_c btree_path_level_peek_all(struct bch_fs *c,
556 struct btree_path_level *l,
559 return __btree_iter_unpack(c, l, u,
560 bch2_btree_node_iter_peek_all(&l->iter, l->b));
563 static inline struct bkey_s_c btree_path_level_peek(struct btree_trans *trans,
564 struct btree_path *path,
565 struct btree_path_level *l,
568 struct bkey_s_c k = __btree_iter_unpack(trans->c, l, u,
569 bch2_btree_node_iter_peek(&l->iter, l->b));
571 path->pos = k.k ? k.k->p : l->b->key.k.p;
572 trans->paths_sorted = false;
573 bch2_btree_path_verify_level(trans, path, l - path->l);
577 static inline struct bkey_s_c btree_path_level_prev(struct btree_trans *trans,
578 struct btree_path *path,
579 struct btree_path_level *l,
582 struct bkey_s_c k = __btree_iter_unpack(trans->c, l, u,
583 bch2_btree_node_iter_prev(&l->iter, l->b));
585 path->pos = k.k ? k.k->p : l->b->data->min_key;
586 trans->paths_sorted = false;
587 bch2_btree_path_verify_level(trans, path, l - path->l);
591 static inline bool btree_path_advance_to_pos(struct btree_path *path,
592 struct btree_path_level *l,
595 struct bkey_packed *k;
598 while ((k = bch2_btree_node_iter_peek_all(&l->iter, l->b)) &&
599 bkey_iter_pos_cmp(l->b, k, &path->pos) < 0) {
600 if (max_advance > 0 && nr_advanced >= max_advance)
603 bch2_btree_node_iter_advance(&l->iter, l->b);
610 static inline void __btree_path_level_init(struct btree_path *path,
613 struct btree_path_level *l = &path->l[level];
615 bch2_btree_node_iter_init(&l->iter, l->b, &path->pos);
618 * Iterators to interior nodes should always be pointed at the first non
622 bch2_btree_node_iter_peek(&l->iter, l->b);
625 void bch2_btree_path_level_init(struct btree_trans *trans,
626 struct btree_path *path,
629 BUG_ON(path->cached);
631 EBUG_ON(!btree_path_pos_in_node(path, b));
633 path->l[b->c.level].lock_seq = six_lock_seq(&b->c.lock);
634 path->l[b->c.level].b = b;
635 __btree_path_level_init(path, b->c.level);
638 /* Btree path: fixups after btree node updates: */
640 static void bch2_trans_revalidate_updates_in_node(struct btree_trans *trans, struct btree *b)
642 struct bch_fs *c = trans->c;
644 trans_for_each_update(trans, i)
646 i->level == b->c.level &&
647 i->btree_id == b->c.btree_id &&
648 bpos_cmp(i->k->k.p, b->data->min_key) >= 0 &&
649 bpos_cmp(i->k->k.p, b->data->max_key) <= 0) {
650 i->old_v = bch2_btree_path_peek_slot(trans->paths + i->path, &i->old_k).v;
652 if (unlikely(trans->journal_replay_not_finished)) {
654 bch2_journal_keys_peek_slot(c, i->btree_id, i->level,
666 * A btree node is being replaced - update the iterator to point to the new
669 void bch2_trans_node_add(struct btree_trans *trans,
670 struct btree_path *path,
673 struct btree_path *prev;
675 BUG_ON(!btree_path_pos_in_node(path, b));
677 while ((prev = prev_btree_path(trans, path)) &&
678 btree_path_pos_in_node(prev, b))
682 path && btree_path_pos_in_node(path, b);
683 path = next_btree_path(trans, path))
684 if (path->uptodate == BTREE_ITER_UPTODATE && !path->cached) {
685 enum btree_node_locked_type t =
686 btree_lock_want(path, b->c.level);
688 if (t != BTREE_NODE_UNLOCKED) {
689 btree_node_unlock(trans, path, b->c.level);
690 six_lock_increment(&b->c.lock, (enum six_lock_type) t);
691 mark_btree_node_locked(trans, path, b->c.level, t);
694 bch2_btree_path_level_init(trans, path, b);
697 bch2_trans_revalidate_updates_in_node(trans, b);
701 * A btree node has been modified in such a way as to invalidate iterators - fix
704 void bch2_trans_node_reinit_iter(struct btree_trans *trans, struct btree *b)
706 struct btree_path *path;
709 trans_for_each_path_with_node(trans, b, path, i)
710 __btree_path_level_init(path, b->c.level);
712 bch2_trans_revalidate_updates_in_node(trans, b);
715 /* Btree path: traverse, set_pos: */
717 static inline int btree_path_lock_root(struct btree_trans *trans,
718 struct btree_path *path,
720 unsigned long trace_ip)
722 struct bch_fs *c = trans->c;
723 struct btree *b, **rootp = &bch2_btree_id_root(c, path->btree_id)->b;
724 enum six_lock_type lock_type;
728 EBUG_ON(path->nodes_locked);
731 b = READ_ONCE(*rootp);
732 path->level = READ_ONCE(b->c.level);
734 if (unlikely(path->level < depth_want)) {
736 * the root is at a lower depth than the depth we want:
737 * got to the end of the btree, or we're walking nodes
738 * greater than some depth and there are no nodes >=
741 path->level = depth_want;
742 for (i = path->level; i < BTREE_MAX_DEPTH; i++)
747 lock_type = __btree_lock_want(path, path->level);
748 ret = btree_node_lock(trans, path, &b->c,
749 path->level, lock_type, trace_ip);
751 if (bch2_err_matches(ret, BCH_ERR_lock_fail_root_changed))
753 if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
758 if (likely(b == READ_ONCE(*rootp) &&
759 b->c.level == path->level &&
761 for (i = 0; i < path->level; i++)
762 path->l[i].b = ERR_PTR(-BCH_ERR_no_btree_node_lock_root);
763 path->l[path->level].b = b;
764 for (i = path->level + 1; i < BTREE_MAX_DEPTH; i++)
767 mark_btree_node_locked(trans, path, path->level,
768 (enum btree_node_locked_type) lock_type);
769 bch2_btree_path_level_init(trans, path, b);
773 six_unlock_type(&b->c.lock, lock_type);
778 static int btree_path_prefetch(struct btree_trans *trans, struct btree_path *path)
780 struct bch_fs *c = trans->c;
781 struct btree_path_level *l = path_l(path);
782 struct btree_node_iter node_iter = l->iter;
783 struct bkey_packed *k;
785 unsigned nr = test_bit(BCH_FS_started, &c->flags)
786 ? (path->level > 1 ? 0 : 2)
787 : (path->level > 1 ? 1 : 16);
788 bool was_locked = btree_node_locked(path, path->level);
791 bch2_bkey_buf_init(&tmp);
793 while (nr-- && !ret) {
794 if (!bch2_btree_node_relock(trans, path, path->level))
797 bch2_btree_node_iter_advance(&node_iter, l->b);
798 k = bch2_btree_node_iter_peek(&node_iter, l->b);
802 bch2_bkey_buf_unpack(&tmp, c, l->b, k);
803 ret = bch2_btree_node_prefetch(trans, path, tmp.k, path->btree_id,
808 btree_node_unlock(trans, path, path->level);
810 bch2_bkey_buf_exit(&tmp, c);
814 static int btree_path_prefetch_j(struct btree_trans *trans, struct btree_path *path,
815 struct btree_and_journal_iter *jiter)
817 struct bch_fs *c = trans->c;
820 unsigned nr = test_bit(BCH_FS_started, &c->flags)
821 ? (path->level > 1 ? 0 : 2)
822 : (path->level > 1 ? 1 : 16);
823 bool was_locked = btree_node_locked(path, path->level);
826 bch2_bkey_buf_init(&tmp);
828 while (nr-- && !ret) {
829 if (!bch2_btree_node_relock(trans, path, path->level))
832 bch2_btree_and_journal_iter_advance(jiter);
833 k = bch2_btree_and_journal_iter_peek(jiter);
837 bch2_bkey_buf_reassemble(&tmp, c, k);
838 ret = bch2_btree_node_prefetch(trans, path, tmp.k, path->btree_id,
843 btree_node_unlock(trans, path, path->level);
845 bch2_bkey_buf_exit(&tmp, c);
849 static noinline void btree_node_mem_ptr_set(struct btree_trans *trans,
850 struct btree_path *path,
851 unsigned plevel, struct btree *b)
853 struct btree_path_level *l = &path->l[plevel];
854 bool locked = btree_node_locked(path, plevel);
855 struct bkey_packed *k;
856 struct bch_btree_ptr_v2 *bp;
858 if (!bch2_btree_node_relock(trans, path, plevel))
861 k = bch2_btree_node_iter_peek_all(&l->iter, l->b);
862 BUG_ON(k->type != KEY_TYPE_btree_ptr_v2);
864 bp = (void *) bkeyp_val(&l->b->format, k);
865 bp->mem_ptr = (unsigned long)b;
868 btree_node_unlock(trans, path, plevel);
871 static noinline int btree_node_iter_and_journal_peek(struct btree_trans *trans,
872 struct btree_path *path,
874 struct bkey_buf *out)
876 struct bch_fs *c = trans->c;
877 struct btree_path_level *l = path_l(path);
878 struct btree_and_journal_iter jiter;
882 __bch2_btree_and_journal_iter_init_node_iter(trans, &jiter, l->b, l->iter, path->pos);
884 k = bch2_btree_and_journal_iter_peek(&jiter);
886 bch2_bkey_buf_reassemble(out, c, k);
888 if ((flags & BTREE_ITER_prefetch) &&
889 c->opts.btree_node_prefetch)
890 ret = btree_path_prefetch_j(trans, path, &jiter);
892 bch2_btree_and_journal_iter_exit(&jiter);
896 static __always_inline int btree_path_down(struct btree_trans *trans,
897 struct btree_path *path,
899 unsigned long trace_ip)
901 struct bch_fs *c = trans->c;
902 struct btree_path_level *l = path_l(path);
904 unsigned level = path->level - 1;
905 enum six_lock_type lock_type = __btree_lock_want(path, level);
909 EBUG_ON(!btree_node_locked(path, path->level));
911 bch2_bkey_buf_init(&tmp);
913 if (unlikely(trans->journal_replay_not_finished)) {
914 ret = btree_node_iter_and_journal_peek(trans, path, flags, &tmp);
918 struct bkey_packed *k = bch2_btree_node_iter_peek(&l->iter, l->b);
920 struct printbuf buf = PRINTBUF;
922 prt_str(&buf, "node not found at pos ");
923 bch2_bpos_to_text(&buf, path->pos);
924 prt_str(&buf, " within parent node ");
925 bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&l->b->key));
927 bch2_fs_fatal_error(c, "%s", buf.buf);
929 ret = -BCH_ERR_btree_need_topology_repair;
933 bch2_bkey_buf_unpack(&tmp, c, l->b, k);
935 if ((flags & BTREE_ITER_prefetch) &&
936 c->opts.btree_node_prefetch) {
937 ret = btree_path_prefetch(trans, path);
943 b = bch2_btree_node_get(trans, path, tmp.k, level, lock_type, trace_ip);
944 ret = PTR_ERR_OR_ZERO(b);
948 if (likely(!trans->journal_replay_not_finished &&
949 tmp.k->k.type == KEY_TYPE_btree_ptr_v2) &&
950 unlikely(b != btree_node_mem_ptr(tmp.k)))
951 btree_node_mem_ptr_set(trans, path, level + 1, b);
953 if (btree_node_read_locked(path, level + 1))
954 btree_node_unlock(trans, path, level + 1);
956 mark_btree_node_locked(trans, path, level,
957 (enum btree_node_locked_type) lock_type);
959 bch2_btree_path_level_init(trans, path, b);
961 bch2_btree_path_verify_locks(path);
963 bch2_bkey_buf_exit(&tmp, c);
967 static int bch2_btree_path_traverse_all(struct btree_trans *trans)
969 struct bch_fs *c = trans->c;
970 struct btree_path *path;
971 unsigned long trace_ip = _RET_IP_;
975 if (trans->in_traverse_all)
976 return -BCH_ERR_transaction_restart_in_traverse_all;
978 trans->in_traverse_all = true;
980 trans->restarted = 0;
981 trans->last_restarted_ip = 0;
983 trans_for_each_path(trans, path, i)
984 path->should_be_locked = false;
986 btree_trans_sort_paths(trans);
988 bch2_trans_unlock(trans);
990 trans_set_locked(trans);
992 if (unlikely(trans->memory_allocation_failure)) {
995 closure_init_stack(&cl);
998 ret = bch2_btree_cache_cannibalize_lock(trans, &cl);
1003 /* Now, redo traversals in correct order: */
1005 while (i < trans->nr_sorted) {
1006 btree_path_idx_t idx = trans->sorted[i];
1009 * Traversing a path can cause another path to be added at about
1010 * the same position:
1012 if (trans->paths[idx].uptodate) {
1013 __btree_path_get(trans, &trans->paths[idx], false);
1014 ret = bch2_btree_path_traverse_one(trans, idx, 0, _THIS_IP_);
1015 __btree_path_put(trans, &trans->paths[idx], false);
1017 if (bch2_err_matches(ret, BCH_ERR_transaction_restart) ||
1018 bch2_err_matches(ret, ENOMEM))
1028 * We used to assert that all paths had been traversed here
1029 * (path->uptodate < BTREE_ITER_NEED_TRAVERSE); however, since
1030 * path->should_be_locked is not set yet, we might have unlocked and
1031 * then failed to relock a path - that's fine.
1034 bch2_btree_cache_cannibalize_unlock(trans);
1036 trans->in_traverse_all = false;
1038 trace_and_count(c, trans_traverse_all, trans, trace_ip);
1042 static inline bool btree_path_check_pos_in_node(struct btree_path *path,
1043 unsigned l, int check_pos)
1045 if (check_pos < 0 && btree_path_pos_before_node(path, path->l[l].b))
1047 if (check_pos > 0 && btree_path_pos_after_node(path, path->l[l].b))
1052 static inline bool btree_path_good_node(struct btree_trans *trans,
1053 struct btree_path *path,
1054 unsigned l, int check_pos)
1056 return is_btree_node(path, l) &&
1057 bch2_btree_node_relock(trans, path, l) &&
1058 btree_path_check_pos_in_node(path, l, check_pos);
1061 static void btree_path_set_level_down(struct btree_trans *trans,
1062 struct btree_path *path,
1067 path->level = new_level;
1069 for (l = path->level + 1; l < BTREE_MAX_DEPTH; l++)
1070 if (btree_lock_want(path, l) == BTREE_NODE_UNLOCKED)
1071 btree_node_unlock(trans, path, l);
1073 btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
1074 bch2_btree_path_verify(trans, path);
1077 static noinline unsigned __btree_path_up_until_good_node(struct btree_trans *trans,
1078 struct btree_path *path,
1081 unsigned i, l = path->level;
1083 while (btree_path_node(path, l) &&
1084 !btree_path_good_node(trans, path, l, check_pos))
1085 __btree_path_set_level_up(trans, path, l++);
1087 /* If we need intent locks, take them too: */
1089 i < path->locks_want && btree_path_node(path, i);
1091 if (!bch2_btree_node_relock(trans, path, i)) {
1093 __btree_path_set_level_up(trans, path, l++);
1100 static inline unsigned btree_path_up_until_good_node(struct btree_trans *trans,
1101 struct btree_path *path,
1104 return likely(btree_node_locked(path, path->level) &&
1105 btree_path_check_pos_in_node(path, path->level, check_pos))
1107 : __btree_path_up_until_good_node(trans, path, check_pos);
1111 * This is the main state machine for walking down the btree - walks down to a
1114 * Returns 0 on success, -EIO on error (error reading in a btree node).
1116 * On error, caller (peek_node()/peek_key()) must return NULL; the error is
1117 * stashed in the iterator and returned from bch2_trans_exit().
1119 int bch2_btree_path_traverse_one(struct btree_trans *trans,
1120 btree_path_idx_t path_idx,
1122 unsigned long trace_ip)
1124 struct btree_path *path = &trans->paths[path_idx];
1125 unsigned depth_want = path->level;
1126 int ret = -((int) trans->restarted);
1131 if (unlikely(!trans->srcu_held))
1132 bch2_trans_srcu_lock(trans);
1134 trace_btree_path_traverse_start(trans, path);
1137 * Ensure we obey path->should_be_locked: if it's set, we can't unlock
1138 * and re-traverse the path without a transaction restart:
1140 if (path->should_be_locked) {
1141 ret = bch2_btree_path_relock(trans, path, trace_ip);
1146 ret = bch2_btree_path_traverse_cached(trans, path, flags);
1150 path = &trans->paths[path_idx];
1152 if (unlikely(path->level >= BTREE_MAX_DEPTH))
1155 path->level = btree_path_up_until_good_node(trans, path, 0);
1156 unsigned max_level = path->level;
1158 EBUG_ON(btree_path_node(path, path->level) &&
1159 !btree_node_locked(path, path->level));
1162 * Note: path->nodes[path->level] may be temporarily NULL here - that
1163 * would indicate to other code that we got to the end of the btree,
1164 * here it indicates that relocking the root failed - it's critical that
1165 * btree_path_lock_root() comes next and that it can't fail
1167 while (path->level > depth_want) {
1168 ret = btree_path_node(path, path->level)
1169 ? btree_path_down(trans, path, flags, trace_ip)
1170 : btree_path_lock_root(trans, path, depth_want, trace_ip);
1171 if (unlikely(ret)) {
1174 * No nodes at this level - got to the end of
1181 __bch2_btree_path_unlock(trans, path);
1182 path->level = depth_want;
1183 path->l[path->level].b = ERR_PTR(ret);
1188 if (unlikely(max_level > path->level)) {
1189 struct btree_path *linked;
1192 trans_for_each_path_with_node(trans, path_l(path)->b, linked, iter)
1193 for (unsigned j = path->level + 1; j < max_level; j++)
1194 linked->l[j] = path->l[j];
1198 path->uptodate = BTREE_ITER_UPTODATE;
1199 trace_btree_path_traverse_end(trans, path);
1201 if (bch2_err_matches(ret, BCH_ERR_transaction_restart) != !!trans->restarted)
1202 panic("ret %s (%i) trans->restarted %s (%i)\n",
1203 bch2_err_str(ret), ret,
1204 bch2_err_str(trans->restarted), trans->restarted);
1205 bch2_btree_path_verify(trans, path);
1209 static inline void btree_path_copy(struct btree_trans *trans, struct btree_path *dst,
1210 struct btree_path *src)
1212 unsigned i, offset = offsetof(struct btree_path, pos);
1214 memcpy((void *) dst + offset,
1215 (void *) src + offset,
1216 sizeof(struct btree_path) - offset);
1218 for (i = 0; i < BTREE_MAX_DEPTH; i++) {
1219 unsigned t = btree_node_locked_type(dst, i);
1221 if (t != BTREE_NODE_UNLOCKED)
1222 six_lock_increment(&dst->l[i].b->c.lock, t);
1226 static btree_path_idx_t btree_path_clone(struct btree_trans *trans, btree_path_idx_t src,
1227 bool intent, unsigned long ip)
1229 btree_path_idx_t new = btree_path_alloc(trans, src);
1230 btree_path_copy(trans, trans->paths + new, trans->paths + src);
1231 __btree_path_get(trans, trans->paths + new, intent);
1232 #ifdef TRACK_PATH_ALLOCATED
1233 trans->paths[new].ip_allocated = ip;
1239 btree_path_idx_t __bch2_btree_path_make_mut(struct btree_trans *trans,
1240 btree_path_idx_t path, bool intent, unsigned long ip)
1242 struct btree_path *old = trans->paths + path;
1243 __btree_path_put(trans, trans->paths + path, intent);
1244 path = btree_path_clone(trans, path, intent, ip);
1245 trace_btree_path_clone(trans, old, trans->paths + path);
1246 trans->paths[path].preserve = false;
1250 btree_path_idx_t __must_check
1251 __bch2_btree_path_set_pos(struct btree_trans *trans,
1252 btree_path_idx_t path_idx, struct bpos new_pos,
1253 bool intent, unsigned long ip)
1255 int cmp = bpos_cmp(new_pos, trans->paths[path_idx].pos);
1257 bch2_trans_verify_not_in_restart(trans);
1258 EBUG_ON(!trans->paths[path_idx].ref);
1260 trace_btree_path_set_pos(trans, trans->paths + path_idx, &new_pos);
1262 path_idx = bch2_btree_path_make_mut(trans, path_idx, intent, ip);
1264 struct btree_path *path = trans->paths + path_idx;
1265 path->pos = new_pos;
1266 trans->paths_sorted = false;
1268 if (unlikely(path->cached)) {
1269 btree_node_unlock(trans, path, 0);
1270 path->l[0].b = ERR_PTR(-BCH_ERR_no_btree_node_up);
1271 btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
1275 unsigned level = btree_path_up_until_good_node(trans, path, cmp);
1277 if (btree_path_node(path, level)) {
1278 struct btree_path_level *l = &path->l[level];
1280 BUG_ON(!btree_node_locked(path, level));
1282 * We might have to skip over many keys, or just a few: try
1283 * advancing the node iterator, and if we have to skip over too
1284 * many keys just reinit it (or if we're rewinding, since that
1288 !btree_path_advance_to_pos(path, l, 8))
1289 bch2_btree_node_iter_init(&l->iter, l->b, &path->pos);
1292 * Iterators to interior nodes should always be pointed at the first non
1295 if (unlikely(level))
1296 bch2_btree_node_iter_peek(&l->iter, l->b);
1299 if (unlikely(level != path->level)) {
1300 btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
1301 __bch2_btree_path_unlock(trans, path);
1304 bch2_btree_path_verify(trans, path);
1308 /* Btree path: main interface: */
1310 static struct btree_path *have_path_at_pos(struct btree_trans *trans, struct btree_path *path)
1312 struct btree_path *sib;
1314 sib = prev_btree_path(trans, path);
1315 if (sib && !btree_path_cmp(sib, path))
1318 sib = next_btree_path(trans, path);
1319 if (sib && !btree_path_cmp(sib, path))
1325 static struct btree_path *have_node_at_pos(struct btree_trans *trans, struct btree_path *path)
1327 struct btree_path *sib;
1329 sib = prev_btree_path(trans, path);
1330 if (sib && sib->level == path->level && path_l(sib)->b == path_l(path)->b)
1333 sib = next_btree_path(trans, path);
1334 if (sib && sib->level == path->level && path_l(sib)->b == path_l(path)->b)
1340 static inline void __bch2_path_free(struct btree_trans *trans, btree_path_idx_t path)
1342 __bch2_btree_path_unlock(trans, trans->paths + path);
1343 btree_path_list_remove(trans, trans->paths + path);
1344 __clear_bit(path, trans->paths_allocated);
1347 static bool bch2_btree_path_can_relock(struct btree_trans *trans, struct btree_path *path)
1349 unsigned l = path->level;
1352 if (!btree_path_node(path, l))
1355 if (!is_btree_node(path, l))
1358 if (path->l[l].lock_seq != path->l[l].b->c.lock.seq)
1362 } while (l < path->locks_want);
1367 void bch2_path_put(struct btree_trans *trans, btree_path_idx_t path_idx, bool intent)
1369 struct btree_path *path = trans->paths + path_idx, *dup;
1371 if (!__btree_path_put(trans, path, intent))
1374 dup = path->preserve
1375 ? have_path_at_pos(trans, path)
1376 : have_node_at_pos(trans, path);
1378 trace_btree_path_free(trans, path_idx, dup);
1380 if (!dup && !(!path->preserve && !is_btree_node(path, path->level)))
1383 if (path->should_be_locked && !trans->restarted) {
1388 ? bch2_btree_path_relock_norestart(trans, dup)
1389 : bch2_btree_path_can_relock(trans, dup)))
1394 dup->preserve |= path->preserve;
1395 dup->should_be_locked |= path->should_be_locked;
1398 __bch2_path_free(trans, path_idx);
1401 static void bch2_path_put_nokeep(struct btree_trans *trans, btree_path_idx_t path,
1404 if (!__btree_path_put(trans, trans->paths + path, intent))
1407 __bch2_path_free(trans, path);
1410 void __noreturn bch2_trans_restart_error(struct btree_trans *trans, u32 restart_count)
1412 panic("trans->restart_count %u, should be %u, last restarted by %pS\n",
1413 trans->restart_count, restart_count,
1414 (void *) trans->last_begin_ip);
1417 void __noreturn bch2_trans_in_restart_error(struct btree_trans *trans)
1419 panic("in transaction restart: %s, last restarted by %pS\n",
1420 bch2_err_str(trans->restarted),
1421 (void *) trans->last_restarted_ip);
1424 void __noreturn bch2_trans_unlocked_error(struct btree_trans *trans)
1426 panic("trans should be locked, unlocked by %pS\n",
1427 (void *) trans->last_unlock_ip);
1431 void bch2_trans_updates_to_text(struct printbuf *buf, struct btree_trans *trans)
1433 prt_printf(buf, "%u transaction updates for %s journal seq %llu\n",
1434 trans->nr_updates, trans->fn, trans->journal_res.seq);
1435 printbuf_indent_add(buf, 2);
1437 trans_for_each_update(trans, i) {
1438 struct bkey_s_c old = { &i->old_k, i->old_v };
1440 prt_printf(buf, "update: btree=%s cached=%u %pS\n",
1441 bch2_btree_id_str(i->btree_id),
1443 (void *) i->ip_allocated);
1445 prt_printf(buf, " old ");
1446 bch2_bkey_val_to_text(buf, trans->c, old);
1449 prt_printf(buf, " new ");
1450 bch2_bkey_val_to_text(buf, trans->c, bkey_i_to_s_c(i->k));
1454 for (struct jset_entry *e = trans->journal_entries;
1455 e != btree_trans_journal_entries_top(trans);
1456 e = vstruct_next(e))
1457 bch2_journal_entry_to_text(buf, trans->c, e);
1459 printbuf_indent_sub(buf, 2);
1463 void bch2_dump_trans_updates(struct btree_trans *trans)
1465 struct printbuf buf = PRINTBUF;
1467 bch2_trans_updates_to_text(&buf, trans);
1468 bch2_print_str(trans->c, buf.buf);
1469 printbuf_exit(&buf);
1472 static void bch2_btree_path_to_text_short(struct printbuf *out, struct btree_trans *trans, btree_path_idx_t path_idx)
1474 struct btree_path *path = trans->paths + path_idx;
1476 prt_printf(out, "path: idx %3u ref %u:%u %c %c %c btree=%s l=%u pos ",
1477 path_idx, path->ref, path->intent_ref,
1478 path->preserve ? 'P' : ' ',
1479 path->should_be_locked ? 'S' : ' ',
1480 path->cached ? 'C' : 'B',
1481 bch2_btree_id_str(path->btree_id),
1483 bch2_bpos_to_text(out, path->pos);
1485 if (!path->cached && btree_node_locked(path, path->level)) {
1487 struct btree *b = path_l(path)->b;
1488 bch2_bpos_to_text(out, b->data->min_key);
1490 bch2_bpos_to_text(out, b->key.k.p);
1493 #ifdef TRACK_PATH_ALLOCATED
1494 prt_printf(out, " %pS", (void *) path->ip_allocated);
1498 static const char *btree_node_locked_str(enum btree_node_locked_type t)
1501 case BTREE_NODE_UNLOCKED:
1503 case BTREE_NODE_READ_LOCKED:
1505 case BTREE_NODE_INTENT_LOCKED:
1507 case BTREE_NODE_WRITE_LOCKED:
1514 void bch2_btree_path_to_text(struct printbuf *out, struct btree_trans *trans, btree_path_idx_t path_idx)
1516 bch2_btree_path_to_text_short(out, trans, path_idx);
1518 struct btree_path *path = trans->paths + path_idx;
1520 prt_printf(out, " uptodate %u locks_want %u", path->uptodate, path->locks_want);
1523 printbuf_indent_add(out, 2);
1524 for (unsigned l = 0; l < BTREE_MAX_DEPTH; l++) {
1525 prt_printf(out, "l=%u locks %s seq %u node ", l,
1526 btree_node_locked_str(btree_node_locked_type(path, l)),
1527 path->l[l].lock_seq);
1529 int ret = PTR_ERR_OR_ZERO(path->l[l].b);
1531 prt_str(out, bch2_err_str(ret));
1533 prt_printf(out, "%px", path->l[l].b);
1536 printbuf_indent_sub(out, 2);
1539 static noinline __cold
1540 void __bch2_trans_paths_to_text(struct printbuf *out, struct btree_trans *trans,
1543 struct trans_for_each_path_inorder_iter iter;
1546 btree_trans_sort_paths(trans);
1548 trans_for_each_path_idx_inorder(trans, iter) {
1549 bch2_btree_path_to_text_short(out, trans, iter.path_idx);
1555 void bch2_trans_paths_to_text(struct printbuf *out, struct btree_trans *trans)
1557 __bch2_trans_paths_to_text(out, trans, false);
1560 static noinline __cold
1561 void __bch2_dump_trans_paths_updates(struct btree_trans *trans, bool nosort)
1563 struct printbuf buf = PRINTBUF;
1565 __bch2_trans_paths_to_text(&buf, trans, nosort);
1566 bch2_trans_updates_to_text(&buf, trans);
1568 bch2_print_str(trans->c, buf.buf);
1569 printbuf_exit(&buf);
1573 void bch2_dump_trans_paths_updates(struct btree_trans *trans)
1575 __bch2_dump_trans_paths_updates(trans, false);
1579 static void bch2_trans_update_max_paths(struct btree_trans *trans)
1581 struct btree_transaction_stats *s = btree_trans_stats(trans);
1582 struct printbuf buf = PRINTBUF;
1583 size_t nr = bitmap_weight(trans->paths_allocated, trans->nr_paths);
1585 bch2_trans_paths_to_text(&buf, trans);
1587 if (!buf.allocation_failure) {
1588 mutex_lock(&s->lock);
1589 if (nr > s->nr_max_paths) {
1590 s->nr_max_paths = nr;
1591 swap(s->max_paths_text, buf.buf);
1593 mutex_unlock(&s->lock);
1596 printbuf_exit(&buf);
1598 trans->nr_paths_max = nr;
1602 int __bch2_btree_trans_too_many_iters(struct btree_trans *trans)
1604 if (trace_trans_restart_too_many_iters_enabled()) {
1605 struct printbuf buf = PRINTBUF;
1607 bch2_trans_paths_to_text(&buf, trans);
1608 trace_trans_restart_too_many_iters(trans, _THIS_IP_, buf.buf);
1609 printbuf_exit(&buf);
1612 count_event(trans->c, trans_restart_too_many_iters);
1614 return btree_trans_restart(trans, BCH_ERR_transaction_restart_too_many_iters);
1617 static noinline void btree_path_overflow(struct btree_trans *trans)
1619 bch2_dump_trans_paths_updates(trans);
1620 bch_err(trans->c, "trans path overflow");
1623 static noinline void btree_paths_realloc(struct btree_trans *trans)
1625 unsigned nr = trans->nr_paths * 2;
1627 void *p = kvzalloc(BITS_TO_LONGS(nr) * sizeof(unsigned long) +
1628 sizeof(struct btree_trans_paths) +
1629 nr * sizeof(struct btree_path) +
1630 nr * sizeof(btree_path_idx_t) + 8 +
1631 nr * sizeof(struct btree_insert_entry), GFP_KERNEL|__GFP_NOFAIL);
1633 unsigned long *paths_allocated = p;
1634 memcpy(paths_allocated, trans->paths_allocated, BITS_TO_LONGS(trans->nr_paths) * sizeof(unsigned long));
1635 p += BITS_TO_LONGS(nr) * sizeof(unsigned long);
1637 p += sizeof(struct btree_trans_paths);
1638 struct btree_path *paths = p;
1639 *trans_paths_nr(paths) = nr;
1640 memcpy(paths, trans->paths, trans->nr_paths * sizeof(struct btree_path));
1641 p += nr * sizeof(struct btree_path);
1643 btree_path_idx_t *sorted = p;
1644 memcpy(sorted, trans->sorted, trans->nr_sorted * sizeof(btree_path_idx_t));
1645 p += nr * sizeof(btree_path_idx_t) + 8;
1647 struct btree_insert_entry *updates = p;
1648 memcpy(updates, trans->updates, trans->nr_paths * sizeof(struct btree_insert_entry));
1650 unsigned long *old = trans->paths_allocated;
1652 rcu_assign_pointer(trans->paths_allocated, paths_allocated);
1653 rcu_assign_pointer(trans->paths, paths);
1654 rcu_assign_pointer(trans->sorted, sorted);
1655 rcu_assign_pointer(trans->updates, updates);
1657 trans->nr_paths = nr;
1659 if (old != trans->_paths_allocated)
1660 kfree_rcu_mightsleep(old);
1663 static inline btree_path_idx_t btree_path_alloc(struct btree_trans *trans,
1664 btree_path_idx_t pos)
1666 btree_path_idx_t idx = find_first_zero_bit(trans->paths_allocated, trans->nr_paths);
1668 if (unlikely(idx == trans->nr_paths)) {
1669 if (trans->nr_paths == BTREE_ITER_MAX) {
1670 btree_path_overflow(trans);
1674 btree_paths_realloc(trans);
1678 * Do this before marking the new path as allocated, since it won't be
1681 if (unlikely(idx > trans->nr_paths_max))
1682 bch2_trans_update_max_paths(trans);
1684 __set_bit(idx, trans->paths_allocated);
1686 struct btree_path *path = &trans->paths[idx];
1688 path->intent_ref = 0;
1689 path->nodes_locked = 0;
1691 btree_path_list_add(trans, pos, idx);
1692 trans->paths_sorted = false;
1696 btree_path_idx_t bch2_path_get(struct btree_trans *trans,
1697 enum btree_id btree_id, struct bpos pos,
1698 unsigned locks_want, unsigned level,
1699 unsigned flags, unsigned long ip)
1701 struct btree_path *path;
1702 bool cached = flags & BTREE_ITER_cached;
1703 bool intent = flags & BTREE_ITER_intent;
1704 struct trans_for_each_path_inorder_iter iter;
1705 btree_path_idx_t path_pos = 0, path_idx;
1707 bch2_trans_verify_not_unlocked(trans);
1708 bch2_trans_verify_not_in_restart(trans);
1709 bch2_trans_verify_locks(trans);
1711 btree_trans_sort_paths(trans);
1713 trans_for_each_path_inorder(trans, path, iter) {
1714 if (__btree_path_cmp(path,
1721 path_pos = iter.path_idx;
1725 trans->paths[path_pos].cached == cached &&
1726 trans->paths[path_pos].btree_id == btree_id &&
1727 trans->paths[path_pos].level == level) {
1728 trace_btree_path_get(trans, trans->paths + path_pos, &pos);
1730 __btree_path_get(trans, trans->paths + path_pos, intent);
1731 path_idx = bch2_btree_path_set_pos(trans, path_pos, pos, intent, ip);
1732 path = trans->paths + path_idx;
1734 path_idx = btree_path_alloc(trans, path_pos);
1735 path = trans->paths + path_idx;
1737 __btree_path_get(trans, path, intent);
1739 path->btree_id = btree_id;
1740 path->cached = cached;
1741 path->uptodate = BTREE_ITER_NEED_TRAVERSE;
1742 path->should_be_locked = false;
1743 path->level = level;
1744 path->locks_want = locks_want;
1745 path->nodes_locked = 0;
1746 for (unsigned i = 0; i < ARRAY_SIZE(path->l); i++)
1747 path->l[i].b = ERR_PTR(-BCH_ERR_no_btree_node_init);
1748 #ifdef TRACK_PATH_ALLOCATED
1749 path->ip_allocated = ip;
1751 trans->paths_sorted = false;
1753 trace_btree_path_alloc(trans, path);
1756 if (!(flags & BTREE_ITER_nopreserve))
1757 path->preserve = true;
1759 if (path->intent_ref)
1760 locks_want = max(locks_want, level + 1);
1763 * If the path has locks_want greater than requested, we don't downgrade
1764 * it here - on transaction restart because btree node split needs to
1765 * upgrade locks, we might be putting/getting the iterator again.
1766 * Downgrading iterators only happens via bch2_trans_downgrade(), after
1767 * a successful transaction commit.
1770 locks_want = min(locks_want, BTREE_MAX_DEPTH);
1771 if (locks_want > path->locks_want)
1772 bch2_btree_path_upgrade_noupgrade_sibs(trans, path, locks_want, NULL);
1777 btree_path_idx_t bch2_path_get_unlocked_mut(struct btree_trans *trans,
1778 enum btree_id btree_id,
1782 btree_path_idx_t path_idx = bch2_path_get(trans, btree_id, pos, level + 1, level,
1783 BTREE_ITER_nopreserve|
1784 BTREE_ITER_intent, _RET_IP_);
1785 path_idx = bch2_btree_path_make_mut(trans, path_idx, true, _RET_IP_);
1787 struct btree_path *path = trans->paths + path_idx;
1788 bch2_btree_path_downgrade(trans, path);
1789 __bch2_btree_path_unlock(trans, path);
1793 struct bkey_s_c bch2_btree_path_peek_slot(struct btree_path *path, struct bkey *u)
1796 struct btree_path_level *l = path_l(path);
1797 struct bkey_packed *_k;
1800 if (unlikely(!l->b))
1801 return bkey_s_c_null;
1803 EBUG_ON(path->uptodate != BTREE_ITER_UPTODATE);
1804 EBUG_ON(!btree_node_locked(path, path->level));
1806 if (!path->cached) {
1807 _k = bch2_btree_node_iter_peek_all(&l->iter, l->b);
1808 k = _k ? bkey_disassemble(l->b, _k, u) : bkey_s_c_null;
1810 EBUG_ON(k.k && bkey_deleted(k.k) && bpos_eq(k.k->p, path->pos));
1812 if (!k.k || !bpos_eq(path->pos, k.k->p))
1815 struct bkey_cached *ck = (void *) path->l[0].b;
1817 return bkey_s_c_null;
1819 EBUG_ON(path->btree_id != ck->key.btree_id ||
1820 !bkey_eq(path->pos, ck->key.pos));
1823 k = bkey_i_to_s_c(ck->k);
1830 return (struct bkey_s_c) { u, NULL };
1834 void bch2_set_btree_iter_dontneed(struct btree_iter *iter)
1836 struct btree_trans *trans = iter->trans;
1838 if (!iter->path || trans->restarted)
1841 struct btree_path *path = btree_iter_path(trans, iter);
1842 path->preserve = false;
1844 path->should_be_locked = false;
1846 /* Btree iterators: */
1849 __bch2_btree_iter_traverse(struct btree_iter *iter)
1851 return bch2_btree_path_traverse(iter->trans, iter->path, iter->flags);
1855 bch2_btree_iter_traverse(struct btree_iter *iter)
1857 struct btree_trans *trans = iter->trans;
1860 bch2_trans_verify_not_unlocked(trans);
1862 iter->path = bch2_btree_path_set_pos(trans, iter->path,
1863 btree_iter_search_key(iter),
1864 iter->flags & BTREE_ITER_intent,
1865 btree_iter_ip_allocated(iter));
1867 ret = bch2_btree_path_traverse(iter->trans, iter->path, iter->flags);
1871 struct btree_path *path = btree_iter_path(trans, iter);
1872 if (btree_path_node(path, path->level))
1873 btree_path_set_should_be_locked(trans, path);
1877 /* Iterate across nodes (leaf and interior nodes) */
1879 struct btree *bch2_btree_iter_peek_node(struct btree_iter *iter)
1881 struct btree_trans *trans = iter->trans;
1882 struct btree *b = NULL;
1885 EBUG_ON(trans->paths[iter->path].cached);
1886 bch2_btree_iter_verify(iter);
1888 ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
1892 struct btree_path *path = btree_iter_path(trans, iter);
1893 b = btree_path_node(path, path->level);
1897 BUG_ON(bpos_lt(b->key.k.p, iter->pos));
1899 bkey_init(&iter->k);
1900 iter->k.p = iter->pos = b->key.k.p;
1902 iter->path = bch2_btree_path_set_pos(trans, iter->path, b->key.k.p,
1903 iter->flags & BTREE_ITER_intent,
1904 btree_iter_ip_allocated(iter));
1905 btree_path_set_should_be_locked(trans, btree_iter_path(trans, iter));
1907 bch2_btree_iter_verify_entry_exit(iter);
1908 bch2_btree_iter_verify(iter);
1916 /* Only kept for -tools */
1917 struct btree *bch2_btree_iter_peek_node_and_restart(struct btree_iter *iter)
1921 while (b = bch2_btree_iter_peek_node(iter),
1922 bch2_err_matches(PTR_ERR_OR_ZERO(b), BCH_ERR_transaction_restart))
1923 bch2_trans_begin(iter->trans);
1928 struct btree *bch2_btree_iter_next_node(struct btree_iter *iter)
1930 struct btree_trans *trans = iter->trans;
1931 struct btree *b = NULL;
1934 EBUG_ON(trans->paths[iter->path].cached);
1935 bch2_trans_verify_not_in_restart(trans);
1936 bch2_btree_iter_verify(iter);
1938 ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
1943 struct btree_path *path = btree_iter_path(trans, iter);
1945 /* already at end? */
1946 if (!btree_path_node(path, path->level))
1950 if (!btree_path_node(path, path->level + 1)) {
1951 btree_path_set_level_up(trans, path);
1955 if (!bch2_btree_node_relock(trans, path, path->level + 1)) {
1956 __bch2_btree_path_unlock(trans, path);
1957 path->l[path->level].b = ERR_PTR(-BCH_ERR_no_btree_node_relock);
1958 path->l[path->level + 1].b = ERR_PTR(-BCH_ERR_no_btree_node_relock);
1959 btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
1960 trace_and_count(trans->c, trans_restart_relock_next_node, trans, _THIS_IP_, path);
1961 ret = btree_trans_restart(trans, BCH_ERR_transaction_restart_relock);
1965 b = btree_path_node(path, path->level + 1);
1967 if (bpos_eq(iter->pos, b->key.k.p)) {
1968 __btree_path_set_level_up(trans, path, path->level++);
1970 if (btree_lock_want(path, path->level + 1) == BTREE_NODE_UNLOCKED)
1971 btree_node_unlock(trans, path, path->level + 1);
1974 * Haven't gotten to the end of the parent node: go back down to
1975 * the next child node
1977 iter->path = bch2_btree_path_set_pos(trans, iter->path,
1978 bpos_successor(iter->pos),
1979 iter->flags & BTREE_ITER_intent,
1980 btree_iter_ip_allocated(iter));
1982 path = btree_iter_path(trans, iter);
1983 btree_path_set_level_down(trans, path, iter->min_depth);
1985 ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
1989 path = btree_iter_path(trans, iter);
1990 b = path->l[path->level].b;
1993 bkey_init(&iter->k);
1994 iter->k.p = iter->pos = b->key.k.p;
1996 iter->path = bch2_btree_path_set_pos(trans, iter->path, b->key.k.p,
1997 iter->flags & BTREE_ITER_intent,
1998 btree_iter_ip_allocated(iter));
1999 btree_path_set_should_be_locked(trans, btree_iter_path(trans, iter));
2000 EBUG_ON(btree_iter_path(trans, iter)->uptodate);
2002 bch2_btree_iter_verify_entry_exit(iter);
2003 bch2_btree_iter_verify(iter);
2011 /* Iterate across keys (in leaf nodes only) */
2013 inline bool bch2_btree_iter_advance(struct btree_iter *iter)
2015 struct bpos pos = iter->k.p;
2016 bool ret = !(iter->flags & BTREE_ITER_all_snapshots
2017 ? bpos_eq(pos, SPOS_MAX)
2018 : bkey_eq(pos, SPOS_MAX));
2020 if (ret && !(iter->flags & BTREE_ITER_is_extents))
2021 pos = bkey_successor(iter, pos);
2022 bch2_btree_iter_set_pos(iter, pos);
2026 inline bool bch2_btree_iter_rewind(struct btree_iter *iter)
2028 struct bpos pos = bkey_start_pos(&iter->k);
2029 bool ret = !(iter->flags & BTREE_ITER_all_snapshots
2030 ? bpos_eq(pos, POS_MIN)
2031 : bkey_eq(pos, POS_MIN));
2033 if (ret && !(iter->flags & BTREE_ITER_is_extents))
2034 pos = bkey_predecessor(iter, pos);
2035 bch2_btree_iter_set_pos(iter, pos);
2040 void bch2_btree_trans_peek_prev_updates(struct btree_trans *trans, struct btree_iter *iter,
2043 struct bpos end = path_l(btree_iter_path(trans, iter))->b->data->min_key;
2045 trans_for_each_update(trans, i)
2046 if (!i->key_cache_already_flushed &&
2047 i->btree_id == iter->btree_id &&
2048 bpos_le(i->k->k.p, iter->pos) &&
2049 bpos_ge(i->k->k.p, k->k ? k->k->p : end)) {
2051 *k = bkey_i_to_s_c(i->k);
2056 void bch2_btree_trans_peek_updates(struct btree_trans *trans, struct btree_iter *iter,
2059 struct btree_path *path = btree_iter_path(trans, iter);
2060 struct bpos end = path_l(path)->b->key.k.p;
2062 trans_for_each_update(trans, i)
2063 if (!i->key_cache_already_flushed &&
2064 i->btree_id == iter->btree_id &&
2065 bpos_ge(i->k->k.p, path->pos) &&
2066 bpos_le(i->k->k.p, k->k ? k->k->p : end)) {
2068 *k = bkey_i_to_s_c(i->k);
2073 void bch2_btree_trans_peek_slot_updates(struct btree_trans *trans, struct btree_iter *iter,
2076 trans_for_each_update(trans, i)
2077 if (!i->key_cache_already_flushed &&
2078 i->btree_id == iter->btree_id &&
2079 bpos_eq(i->k->k.p, iter->pos)) {
2081 *k = bkey_i_to_s_c(i->k);
2085 static struct bkey_i *bch2_btree_journal_peek(struct btree_trans *trans,
2086 struct btree_iter *iter,
2087 struct bpos end_pos)
2089 struct btree_path *path = btree_iter_path(trans, iter);
2091 return bch2_journal_keys_peek_upto(trans->c, iter->btree_id,
2095 &iter->journal_idx);
2099 struct bkey_s_c btree_trans_peek_slot_journal(struct btree_trans *trans,
2100 struct btree_iter *iter)
2102 struct btree_path *path = btree_iter_path(trans, iter);
2103 struct bkey_i *k = bch2_btree_journal_peek(trans, iter, path->pos);
2107 return bkey_i_to_s_c(k);
2109 return bkey_s_c_null;
2114 struct bkey_s_c btree_trans_peek_journal(struct btree_trans *trans,
2115 struct btree_iter *iter,
2118 struct btree_path *path = btree_iter_path(trans, iter);
2119 struct bkey_i *next_journal =
2120 bch2_btree_journal_peek(trans, iter,
2121 k.k ? k.k->p : path_l(path)->b->key.k.p);
2124 iter->k = next_journal->k;
2125 k = bkey_i_to_s_c(next_journal);
2132 * Checks btree key cache for key at iter->pos and returns it if present, or
2136 struct bkey_s_c btree_trans_peek_key_cache(struct btree_iter *iter, struct bpos pos)
2138 struct btree_trans *trans = iter->trans;
2139 struct bch_fs *c = trans->c;
2144 bch2_trans_verify_not_in_restart(trans);
2145 bch2_trans_verify_not_unlocked(trans);
2147 if ((iter->flags & BTREE_ITER_key_cache_fill) &&
2148 bpos_eq(iter->pos, pos))
2149 return bkey_s_c_null;
2151 if (!bch2_btree_key_cache_find(c, iter->btree_id, pos))
2152 return bkey_s_c_null;
2154 if (!iter->key_cache_path)
2155 iter->key_cache_path = bch2_path_get(trans, iter->btree_id, pos,
2156 iter->flags & BTREE_ITER_intent, 0,
2157 iter->flags|BTREE_ITER_cached|
2158 BTREE_ITER_cached_nofill,
2161 iter->key_cache_path = bch2_btree_path_set_pos(trans, iter->key_cache_path, pos,
2162 iter->flags & BTREE_ITER_intent,
2163 btree_iter_ip_allocated(iter));
2165 ret = bch2_btree_path_traverse(trans, iter->key_cache_path,
2166 iter->flags|BTREE_ITER_cached) ?:
2167 bch2_btree_path_relock(trans, btree_iter_path(trans, iter), _THIS_IP_);
2169 return bkey_s_c_err(ret);
2171 btree_path_set_should_be_locked(trans, trans->paths + iter->key_cache_path);
2173 k = bch2_btree_path_peek_slot(trans->paths + iter->key_cache_path, &u);
2174 if (k.k && !bkey_err(k)) {
2181 static struct bkey_s_c __bch2_btree_iter_peek(struct btree_iter *iter, struct bpos search_key)
2183 struct btree_trans *trans = iter->trans;
2184 struct bkey_s_c k, k2;
2187 EBUG_ON(btree_iter_path(trans, iter)->cached);
2188 bch2_btree_iter_verify(iter);
2191 struct btree_path_level *l;
2193 iter->path = bch2_btree_path_set_pos(trans, iter->path, search_key,
2194 iter->flags & BTREE_ITER_intent,
2195 btree_iter_ip_allocated(iter));
2197 ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
2198 if (unlikely(ret)) {
2199 /* ensure that iter->k is consistent with iter->pos: */
2200 bch2_btree_iter_set_pos(iter, iter->pos);
2201 k = bkey_s_c_err(ret);
2205 struct btree_path *path = btree_iter_path(trans, iter);
2208 if (unlikely(!l->b)) {
2209 /* No btree nodes at requested level: */
2210 bch2_btree_iter_set_pos(iter, SPOS_MAX);
2215 btree_path_set_should_be_locked(trans, path);
2217 k = btree_path_level_peek_all(trans->c, l, &iter->k);
2219 if (unlikely(iter->flags & BTREE_ITER_with_key_cache) &&
2221 (k2 = btree_trans_peek_key_cache(iter, k.k->p)).k) {
2225 bch2_btree_iter_set_pos(iter, iter->pos);
2230 if (unlikely(iter->flags & BTREE_ITER_with_journal))
2231 k = btree_trans_peek_journal(trans, iter, k);
2233 if (unlikely((iter->flags & BTREE_ITER_with_updates) &&
2235 bch2_btree_trans_peek_updates(trans, iter, &k);
2237 if (k.k && bkey_deleted(k.k)) {
2239 * If we've got a whiteout, and it's after the search
2240 * key, advance the search key to the whiteout instead
2241 * of just after the whiteout - it might be a btree
2242 * whiteout, with a real key at the same position, since
2243 * in the btree deleted keys sort before non deleted.
2245 search_key = !bpos_eq(search_key, k.k->p)
2247 : bpos_successor(k.k->p);
2253 } else if (likely(!bpos_eq(l->b->key.k.p, SPOS_MAX))) {
2254 /* Advance to next leaf node: */
2255 search_key = bpos_successor(l->b->key.k.p);
2258 bch2_btree_iter_set_pos(iter, SPOS_MAX);
2264 bch2_btree_iter_verify(iter);
2270 * bch2_btree_iter_peek_upto() - returns first key greater than or equal to
2271 * iterator's current position
2272 * @iter: iterator to peek from
2273 * @end: search limit: returns keys less than or equal to @end
2275 * Returns: key if found, or an error extractable with bkey_err().
2277 struct bkey_s_c bch2_btree_iter_peek_upto(struct btree_iter *iter, struct bpos end)
2279 struct btree_trans *trans = iter->trans;
2280 struct bpos search_key = btree_iter_search_key(iter);
2282 struct bpos iter_pos;
2285 bch2_trans_verify_not_unlocked(trans);
2286 EBUG_ON((iter->flags & BTREE_ITER_filter_snapshots) && bkey_eq(end, POS_MAX));
2288 if (iter->update_path) {
2289 bch2_path_put_nokeep(trans, iter->update_path,
2290 iter->flags & BTREE_ITER_intent);
2291 iter->update_path = 0;
2294 bch2_btree_iter_verify_entry_exit(iter);
2297 k = __bch2_btree_iter_peek(iter, search_key);
2300 if (unlikely(bkey_err(k)))
2304 * We need to check against @end before FILTER_SNAPSHOTS because
2305 * if we get to a different inode that requested we might be
2306 * seeing keys for a different snapshot tree that will all be
2309 * But we can't do the full check here, because bkey_start_pos()
2310 * isn't monotonically increasing before FILTER_SNAPSHOTS, and
2311 * that's what we check against in extents mode:
2313 if (unlikely(!(iter->flags & BTREE_ITER_is_extents)
2314 ? bkey_gt(k.k->p, end)
2315 : k.k->p.inode > end.inode))
2318 if (iter->update_path &&
2319 !bkey_eq(trans->paths[iter->update_path].pos, k.k->p)) {
2320 bch2_path_put_nokeep(trans, iter->update_path,
2321 iter->flags & BTREE_ITER_intent);
2322 iter->update_path = 0;
2325 if ((iter->flags & BTREE_ITER_filter_snapshots) &&
2326 (iter->flags & BTREE_ITER_intent) &&
2327 !(iter->flags & BTREE_ITER_is_extents) &&
2328 !iter->update_path) {
2329 struct bpos pos = k.k->p;
2331 if (pos.snapshot < iter->snapshot) {
2332 search_key = bpos_successor(k.k->p);
2336 pos.snapshot = iter->snapshot;
2339 * advance, same as on exit for iter->path, but only up
2342 __btree_path_get(trans, trans->paths + iter->path, iter->flags & BTREE_ITER_intent);
2343 iter->update_path = iter->path;
2345 iter->update_path = bch2_btree_path_set_pos(trans,
2346 iter->update_path, pos,
2347 iter->flags & BTREE_ITER_intent,
2349 ret = bch2_btree_path_traverse(trans, iter->update_path, iter->flags);
2350 if (unlikely(ret)) {
2351 k = bkey_s_c_err(ret);
2357 * We can never have a key in a leaf node at POS_MAX, so
2358 * we don't have to check these successor() calls:
2360 if ((iter->flags & BTREE_ITER_filter_snapshots) &&
2361 !bch2_snapshot_is_ancestor(trans->c,
2364 search_key = bpos_successor(k.k->p);
2368 if (bkey_whiteout(k.k) &&
2369 !(iter->flags & BTREE_ITER_all_snapshots)) {
2370 search_key = bkey_successor(iter, k.k->p);
2375 * iter->pos should be mononotically increasing, and always be
2376 * equal to the key we just returned - except extents can
2377 * straddle iter->pos:
2379 if (!(iter->flags & BTREE_ITER_is_extents))
2382 iter_pos = bkey_max(iter->pos, bkey_start_pos(k.k));
2384 if (unlikely(!(iter->flags & BTREE_ITER_is_extents)
2385 ? bkey_gt(iter_pos, end)
2386 : bkey_ge(iter_pos, end)))
2392 iter->pos = iter_pos;
2394 iter->path = bch2_btree_path_set_pos(trans, iter->path, k.k->p,
2395 iter->flags & BTREE_ITER_intent,
2396 btree_iter_ip_allocated(iter));
2398 btree_path_set_should_be_locked(trans, btree_iter_path(trans, iter));
2400 if (iter->update_path) {
2401 ret = bch2_btree_path_relock(trans, trans->paths + iter->update_path, _THIS_IP_);
2403 k = bkey_s_c_err(ret);
2405 btree_path_set_should_be_locked(trans, trans->paths + iter->update_path);
2408 if (!(iter->flags & BTREE_ITER_all_snapshots))
2409 iter->pos.snapshot = iter->snapshot;
2411 ret = bch2_btree_iter_verify_ret(iter, k);
2412 if (unlikely(ret)) {
2413 bch2_btree_iter_set_pos(iter, iter->pos);
2414 k = bkey_s_c_err(ret);
2417 bch2_btree_iter_verify_entry_exit(iter);
2421 bch2_btree_iter_set_pos(iter, end);
2427 * bch2_btree_iter_next() - returns first key greater than iterator's current
2429 * @iter: iterator to peek from
2431 * Returns: key if found, or an error extractable with bkey_err().
2433 struct bkey_s_c bch2_btree_iter_next(struct btree_iter *iter)
2435 if (!bch2_btree_iter_advance(iter))
2436 return bkey_s_c_null;
2438 return bch2_btree_iter_peek(iter);
2442 * bch2_btree_iter_peek_prev() - returns first key less than or equal to
2443 * iterator's current position
2444 * @iter: iterator to peek from
2446 * Returns: key if found, or an error extractable with bkey_err().
2448 struct bkey_s_c bch2_btree_iter_peek_prev(struct btree_iter *iter)
2450 struct btree_trans *trans = iter->trans;
2451 struct bpos search_key = iter->pos;
2453 struct bkey saved_k;
2454 const struct bch_val *saved_v;
2455 btree_path_idx_t saved_path = 0;
2458 bch2_trans_verify_not_unlocked(trans);
2459 EBUG_ON(btree_iter_path(trans, iter)->cached ||
2460 btree_iter_path(trans, iter)->level);
2462 if (iter->flags & BTREE_ITER_with_journal)
2463 return bkey_s_c_err(-BCH_ERR_btree_iter_with_journal_not_supported);
2465 bch2_btree_iter_verify(iter);
2466 bch2_btree_iter_verify_entry_exit(iter);
2468 if (iter->flags & BTREE_ITER_filter_snapshots)
2469 search_key.snapshot = U32_MAX;
2472 iter->path = bch2_btree_path_set_pos(trans, iter->path, search_key,
2473 iter->flags & BTREE_ITER_intent,
2474 btree_iter_ip_allocated(iter));
2476 ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
2477 if (unlikely(ret)) {
2478 /* ensure that iter->k is consistent with iter->pos: */
2479 bch2_btree_iter_set_pos(iter, iter->pos);
2480 k = bkey_s_c_err(ret);
2484 struct btree_path *path = btree_iter_path(trans, iter);
2486 k = btree_path_level_peek(trans, path, &path->l[0], &iter->k);
2488 ((iter->flags & BTREE_ITER_is_extents)
2489 ? bpos_ge(bkey_start_pos(k.k), search_key)
2490 : bpos_gt(k.k->p, search_key)))
2491 k = btree_path_level_prev(trans, path, &path->l[0], &iter->k);
2493 if (unlikely((iter->flags & BTREE_ITER_with_updates) &&
2495 bch2_btree_trans_peek_prev_updates(trans, iter, &k);
2498 if (iter->flags & BTREE_ITER_filter_snapshots) {
2499 if (k.k->p.snapshot == iter->snapshot)
2503 * If we have a saved candidate, and we're no
2504 * longer at the same _key_ (not pos), return
2507 if (saved_path && !bkey_eq(k.k->p, saved_k.p)) {
2508 bch2_path_put_nokeep(trans, iter->path,
2509 iter->flags & BTREE_ITER_intent);
2510 iter->path = saved_path;
2517 if (bch2_snapshot_is_ancestor(trans->c,
2521 bch2_path_put_nokeep(trans, saved_path,
2522 iter->flags & BTREE_ITER_intent);
2523 saved_path = btree_path_clone(trans, iter->path,
2524 iter->flags & BTREE_ITER_intent,
2526 path = btree_iter_path(trans, iter);
2527 trace_btree_path_save_pos(trans, path, trans->paths + saved_path);
2532 search_key = bpos_predecessor(k.k->p);
2536 if (bkey_whiteout(k.k) &&
2537 !(iter->flags & BTREE_ITER_all_snapshots)) {
2538 search_key = bkey_predecessor(iter, k.k->p);
2539 if (iter->flags & BTREE_ITER_filter_snapshots)
2540 search_key.snapshot = U32_MAX;
2544 btree_path_set_should_be_locked(trans, path);
2546 } else if (likely(!bpos_eq(path->l[0].b->data->min_key, POS_MIN))) {
2547 /* Advance to previous leaf node: */
2548 search_key = bpos_predecessor(path->l[0].b->data->min_key);
2550 /* Start of btree: */
2551 bch2_btree_iter_set_pos(iter, POS_MIN);
2557 EBUG_ON(bkey_gt(bkey_start_pos(k.k), iter->pos));
2559 /* Extents can straddle iter->pos: */
2560 if (bkey_lt(k.k->p, iter->pos))
2563 if (iter->flags & BTREE_ITER_filter_snapshots)
2564 iter->pos.snapshot = iter->snapshot;
2567 bch2_path_put_nokeep(trans, saved_path, iter->flags & BTREE_ITER_intent);
2569 bch2_btree_iter_verify_entry_exit(iter);
2570 bch2_btree_iter_verify(iter);
2576 * bch2_btree_iter_prev() - returns first key less than iterator's current
2578 * @iter: iterator to peek from
2580 * Returns: key if found, or an error extractable with bkey_err().
2582 struct bkey_s_c bch2_btree_iter_prev(struct btree_iter *iter)
2584 if (!bch2_btree_iter_rewind(iter))
2585 return bkey_s_c_null;
2587 return bch2_btree_iter_peek_prev(iter);
2590 struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_iter *iter)
2592 struct btree_trans *trans = iter->trans;
2593 struct bpos search_key;
2597 bch2_trans_verify_not_unlocked(trans);
2598 bch2_btree_iter_verify(iter);
2599 bch2_btree_iter_verify_entry_exit(iter);
2600 EBUG_ON(btree_iter_path(trans, iter)->level && (iter->flags & BTREE_ITER_with_key_cache));
2602 /* extents can't span inode numbers: */
2603 if ((iter->flags & BTREE_ITER_is_extents) &&
2604 unlikely(iter->pos.offset == KEY_OFFSET_MAX)) {
2605 if (iter->pos.inode == KEY_INODE_MAX)
2606 return bkey_s_c_null;
2608 bch2_btree_iter_set_pos(iter, bpos_nosnap_successor(iter->pos));
2611 search_key = btree_iter_search_key(iter);
2612 iter->path = bch2_btree_path_set_pos(trans, iter->path, search_key,
2613 iter->flags & BTREE_ITER_intent,
2614 btree_iter_ip_allocated(iter));
2616 ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
2617 if (unlikely(ret)) {
2618 k = bkey_s_c_err(ret);
2622 if ((iter->flags & BTREE_ITER_cached) ||
2623 !(iter->flags & (BTREE_ITER_is_extents|BTREE_ITER_filter_snapshots))) {
2626 if (unlikely((iter->flags & BTREE_ITER_with_updates) &&
2627 trans->nr_updates)) {
2628 bch2_btree_trans_peek_slot_updates(trans, iter, &k);
2633 if (unlikely(iter->flags & BTREE_ITER_with_journal) &&
2634 (k = btree_trans_peek_slot_journal(trans, iter)).k)
2637 if (unlikely(iter->flags & BTREE_ITER_with_key_cache) &&
2638 (k = btree_trans_peek_key_cache(iter, iter->pos)).k) {
2641 /* We're not returning a key from iter->path: */
2645 k = bch2_btree_path_peek_slot(trans->paths + iter->path, &iter->k);
2650 struct bpos end = iter->pos;
2652 if (iter->flags & BTREE_ITER_is_extents)
2653 end.offset = U64_MAX;
2655 EBUG_ON(btree_iter_path(trans, iter)->level);
2657 if (iter->flags & BTREE_ITER_intent) {
2658 struct btree_iter iter2;
2660 bch2_trans_copy_iter(&iter2, iter);
2661 k = bch2_btree_iter_peek_upto(&iter2, end);
2663 if (k.k && !bkey_err(k)) {
2664 swap(iter->key_cache_path, iter2.key_cache_path);
2668 bch2_trans_iter_exit(trans, &iter2);
2670 struct bpos pos = iter->pos;
2672 k = bch2_btree_iter_peek_upto(iter, end);
2673 if (unlikely(bkey_err(k)))
2674 bch2_btree_iter_set_pos(iter, pos);
2679 if (unlikely(bkey_err(k)))
2682 next = k.k ? bkey_start_pos(k.k) : POS_MAX;
2684 if (bkey_lt(iter->pos, next)) {
2685 bkey_init(&iter->k);
2686 iter->k.p = iter->pos;
2688 if (iter->flags & BTREE_ITER_is_extents) {
2689 bch2_key_resize(&iter->k,
2690 min_t(u64, KEY_SIZE_MAX,
2691 (next.inode == iter->pos.inode
2695 EBUG_ON(!iter->k.size);
2698 k = (struct bkey_s_c) { &iter->k, NULL };
2702 btree_path_set_should_be_locked(trans, btree_iter_path(trans, iter));
2704 bch2_btree_iter_verify_entry_exit(iter);
2705 bch2_btree_iter_verify(iter);
2706 ret = bch2_btree_iter_verify_ret(iter, k);
2708 return bkey_s_c_err(ret);
2713 struct bkey_s_c bch2_btree_iter_next_slot(struct btree_iter *iter)
2715 if (!bch2_btree_iter_advance(iter))
2716 return bkey_s_c_null;
2718 return bch2_btree_iter_peek_slot(iter);
2721 struct bkey_s_c bch2_btree_iter_prev_slot(struct btree_iter *iter)
2723 if (!bch2_btree_iter_rewind(iter))
2724 return bkey_s_c_null;
2726 return bch2_btree_iter_peek_slot(iter);
2729 /* Obsolete, but still used by rust wrapper in -tools */
2730 struct bkey_s_c bch2_btree_iter_peek_and_restart_outlined(struct btree_iter *iter)
2734 while (btree_trans_too_many_iters(iter->trans) ||
2735 (k = bch2_btree_iter_peek_type(iter, iter->flags),
2736 bch2_err_matches(bkey_err(k), BCH_ERR_transaction_restart)))
2737 bch2_trans_begin(iter->trans);
2742 /* new transactional stuff: */
2744 #ifdef CONFIG_BCACHEFS_DEBUG
2745 static void btree_trans_verify_sorted_refs(struct btree_trans *trans)
2747 struct btree_path *path;
2750 BUG_ON(trans->nr_sorted != bitmap_weight(trans->paths_allocated, trans->nr_paths) - 1);
2752 trans_for_each_path(trans, path, i) {
2753 BUG_ON(path->sorted_idx >= trans->nr_sorted);
2754 BUG_ON(trans->sorted[path->sorted_idx] != i);
2757 for (i = 0; i < trans->nr_sorted; i++) {
2758 unsigned idx = trans->sorted[i];
2760 BUG_ON(!test_bit(idx, trans->paths_allocated));
2761 BUG_ON(trans->paths[idx].sorted_idx != i);
2765 static void btree_trans_verify_sorted(struct btree_trans *trans)
2767 struct btree_path *path, *prev = NULL;
2768 struct trans_for_each_path_inorder_iter iter;
2770 if (!bch2_debug_check_iterators)
2773 trans_for_each_path_inorder(trans, path, iter) {
2774 if (prev && btree_path_cmp(prev, path) > 0) {
2775 __bch2_dump_trans_paths_updates(trans, true);
2776 panic("trans paths out of order!\n");
2782 static inline void btree_trans_verify_sorted_refs(struct btree_trans *trans) {}
2783 static inline void btree_trans_verify_sorted(struct btree_trans *trans) {}
2786 void __bch2_btree_trans_sort_paths(struct btree_trans *trans)
2788 int i, l = 0, r = trans->nr_sorted, inc = 1;
2791 btree_trans_verify_sorted_refs(trans);
2793 if (trans->paths_sorted)
2797 * Cocktail shaker sort: this is efficient because iterators will be
2803 for (i = inc > 0 ? l : r - 2;
2804 i + 1 < r && i >= l;
2806 if (btree_path_cmp(trans->paths + trans->sorted[i],
2807 trans->paths + trans->sorted[i + 1]) > 0) {
2808 swap(trans->sorted[i], trans->sorted[i + 1]);
2809 trans->paths[trans->sorted[i]].sorted_idx = i;
2810 trans->paths[trans->sorted[i + 1]].sorted_idx = i + 1;
2822 trans->paths_sorted = true;
2824 btree_trans_verify_sorted(trans);
2827 static inline void btree_path_list_remove(struct btree_trans *trans,
2828 struct btree_path *path)
2830 EBUG_ON(path->sorted_idx >= trans->nr_sorted);
2831 #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2833 memmove_u64s_down_small(trans->sorted + path->sorted_idx,
2834 trans->sorted + path->sorted_idx + 1,
2835 DIV_ROUND_UP(trans->nr_sorted - path->sorted_idx,
2836 sizeof(u64) / sizeof(btree_path_idx_t)));
2838 array_remove_item(trans->sorted, trans->nr_sorted, path->sorted_idx);
2840 for (unsigned i = path->sorted_idx; i < trans->nr_sorted; i++)
2841 trans->paths[trans->sorted[i]].sorted_idx = i;
2844 static inline void btree_path_list_add(struct btree_trans *trans,
2845 btree_path_idx_t pos,
2846 btree_path_idx_t path_idx)
2848 struct btree_path *path = trans->paths + path_idx;
2850 path->sorted_idx = pos ? trans->paths[pos].sorted_idx + 1 : trans->nr_sorted;
2852 #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2853 memmove_u64s_up_small(trans->sorted + path->sorted_idx + 1,
2854 trans->sorted + path->sorted_idx,
2855 DIV_ROUND_UP(trans->nr_sorted - path->sorted_idx,
2856 sizeof(u64) / sizeof(btree_path_idx_t)));
2858 trans->sorted[path->sorted_idx] = path_idx;
2860 array_insert_item(trans->sorted, trans->nr_sorted, path->sorted_idx, path_idx);
2863 for (unsigned i = path->sorted_idx; i < trans->nr_sorted; i++)
2864 trans->paths[trans->sorted[i]].sorted_idx = i;
2866 btree_trans_verify_sorted_refs(trans);
2869 void bch2_trans_iter_exit(struct btree_trans *trans, struct btree_iter *iter)
2871 if (iter->update_path)
2872 bch2_path_put_nokeep(trans, iter->update_path,
2873 iter->flags & BTREE_ITER_intent);
2875 bch2_path_put(trans, iter->path,
2876 iter->flags & BTREE_ITER_intent);
2877 if (iter->key_cache_path)
2878 bch2_path_put(trans, iter->key_cache_path,
2879 iter->flags & BTREE_ITER_intent);
2881 iter->update_path = 0;
2882 iter->key_cache_path = 0;
2886 void bch2_trans_iter_init_outlined(struct btree_trans *trans,
2887 struct btree_iter *iter,
2888 enum btree_id btree_id, struct bpos pos,
2891 bch2_trans_iter_init_common(trans, iter, btree_id, pos, 0, 0,
2892 bch2_btree_iter_flags(trans, btree_id, flags),
2896 void bch2_trans_node_iter_init(struct btree_trans *trans,
2897 struct btree_iter *iter,
2898 enum btree_id btree_id,
2900 unsigned locks_want,
2904 flags |= BTREE_ITER_not_extents;
2905 flags |= BTREE_ITER_snapshot_field;
2906 flags |= BTREE_ITER_all_snapshots;
2908 bch2_trans_iter_init_common(trans, iter, btree_id, pos, locks_want, depth,
2909 __bch2_btree_iter_flags(trans, btree_id, flags),
2912 iter->min_depth = depth;
2914 struct btree_path *path = btree_iter_path(trans, iter);
2915 BUG_ON(path->locks_want < min(locks_want, BTREE_MAX_DEPTH));
2916 BUG_ON(path->level != depth);
2917 BUG_ON(iter->min_depth != depth);
2920 void bch2_trans_copy_iter(struct btree_iter *dst, struct btree_iter *src)
2922 struct btree_trans *trans = src->trans;
2925 #ifdef TRACK_PATH_ALLOCATED
2926 dst->ip_allocated = _RET_IP_;
2929 __btree_path_get(trans, trans->paths + src->path, src->flags & BTREE_ITER_intent);
2930 if (src->update_path)
2931 __btree_path_get(trans, trans->paths + src->update_path, src->flags & BTREE_ITER_intent);
2932 dst->key_cache_path = 0;
2935 void *__bch2_trans_kmalloc(struct btree_trans *trans, size_t size)
2937 struct bch_fs *c = trans->c;
2938 unsigned new_top = trans->mem_top + size;
2939 unsigned old_bytes = trans->mem_bytes;
2940 unsigned new_bytes = roundup_pow_of_two(new_top);
2945 WARN_ON_ONCE(new_bytes > BTREE_TRANS_MEM_MAX);
2947 struct btree_transaction_stats *s = btree_trans_stats(trans);
2948 s->max_mem = max(s->max_mem, new_bytes);
2950 if (trans->used_mempool) {
2951 if (trans->mem_bytes >= new_bytes)
2952 goto out_change_top;
2954 /* No more space from mempool item, need malloc new one */
2955 new_mem = kmalloc(new_bytes, GFP_NOWAIT|__GFP_NOWARN);
2956 if (unlikely(!new_mem)) {
2957 bch2_trans_unlock(trans);
2959 new_mem = kmalloc(new_bytes, GFP_KERNEL);
2961 return ERR_PTR(-BCH_ERR_ENOMEM_trans_kmalloc);
2963 ret = bch2_trans_relock(trans);
2966 return ERR_PTR(ret);
2969 memcpy(new_mem, trans->mem, trans->mem_top);
2970 trans->used_mempool = false;
2971 mempool_free(trans->mem, &c->btree_trans_mem_pool);
2975 new_mem = krealloc(trans->mem, new_bytes, GFP_NOWAIT|__GFP_NOWARN);
2976 if (unlikely(!new_mem)) {
2977 bch2_trans_unlock(trans);
2979 new_mem = krealloc(trans->mem, new_bytes, GFP_KERNEL);
2980 if (!new_mem && new_bytes <= BTREE_TRANS_MEM_MAX) {
2981 new_mem = mempool_alloc(&c->btree_trans_mem_pool, GFP_KERNEL);
2982 new_bytes = BTREE_TRANS_MEM_MAX;
2983 memcpy(new_mem, trans->mem, trans->mem_top);
2984 trans->used_mempool = true;
2989 return ERR_PTR(-BCH_ERR_ENOMEM_trans_kmalloc);
2991 trans->mem = new_mem;
2992 trans->mem_bytes = new_bytes;
2994 ret = bch2_trans_relock(trans);
2996 return ERR_PTR(ret);
2999 trans->mem = new_mem;
3000 trans->mem_bytes = new_bytes;
3003 trace_and_count(c, trans_restart_mem_realloced, trans, _RET_IP_, new_bytes);
3004 return ERR_PTR(btree_trans_restart(trans, BCH_ERR_transaction_restart_mem_realloced));
3007 p = trans->mem + trans->mem_top;
3008 trans->mem_top += size;
3013 static inline void check_srcu_held_too_long(struct btree_trans *trans)
3015 WARN(trans->srcu_held && time_after(jiffies, trans->srcu_lock_time + HZ * 10),
3016 "btree trans held srcu lock (delaying memory reclaim) for %lu seconds",
3017 (jiffies - trans->srcu_lock_time) / HZ);
3020 void bch2_trans_srcu_unlock(struct btree_trans *trans)
3022 if (trans->srcu_held) {
3023 struct bch_fs *c = trans->c;
3024 struct btree_path *path;
3027 trans_for_each_path(trans, path, i)
3028 if (path->cached && !btree_node_locked(path, 0))
3029 path->l[0].b = ERR_PTR(-BCH_ERR_no_btree_node_srcu_reset);
3031 check_srcu_held_too_long(trans);
3032 srcu_read_unlock(&c->btree_trans_barrier, trans->srcu_idx);
3033 trans->srcu_held = false;
3037 static void bch2_trans_srcu_lock(struct btree_trans *trans)
3039 if (!trans->srcu_held) {
3040 trans->srcu_idx = srcu_read_lock(&trans->c->btree_trans_barrier);
3041 trans->srcu_lock_time = jiffies;
3042 trans->srcu_held = true;
3047 * bch2_trans_begin() - reset a transaction after a interrupted attempt
3048 * @trans: transaction to reset
3050 * Returns: current restart counter, to be used with trans_was_restarted()
3052 * While iterating over nodes or updating nodes a attempt to lock a btree node
3053 * may return BCH_ERR_transaction_restart when the trylock fails. When this
3054 * occurs bch2_trans_begin() should be called and the transaction retried.
3056 u32 bch2_trans_begin(struct btree_trans *trans)
3058 struct btree_path *path;
3062 bch2_trans_reset_updates(trans);
3064 trans->restart_count++;
3066 trans->journal_entries = NULL;
3068 trans_for_each_path(trans, path, i) {
3069 path->should_be_locked = false;
3072 * If the transaction wasn't restarted, we're presuming to be
3073 * doing something new: dont keep iterators excpt the ones that
3074 * are in use - except for the subvolumes btree:
3076 if (!trans->restarted && path->btree_id != BTREE_ID_subvolumes)
3077 path->preserve = false;
3080 * XXX: we probably shouldn't be doing this if the transaction
3081 * was restarted, but currently we still overflow transaction
3082 * iterators if we do that
3084 if (!path->ref && !path->preserve)
3085 __bch2_path_free(trans, i);
3087 path->preserve = false;
3090 now = local_clock();
3092 if (!IS_ENABLED(CONFIG_BCACHEFS_NO_LATENCY_ACCT) &&
3093 time_after64(now, trans->last_begin_time + 10))
3094 __bch2_time_stats_update(&btree_trans_stats(trans)->duration,
3095 trans->last_begin_time, now);
3097 if (!trans->restarted &&
3099 time_after64(now, trans->last_begin_time + BTREE_TRANS_MAX_LOCK_HOLD_TIME_NS))) {
3100 bch2_trans_unlock(trans);
3102 now = local_clock();
3104 trans->last_begin_time = now;
3106 if (unlikely(trans->srcu_held &&
3107 time_after(jiffies, trans->srcu_lock_time + msecs_to_jiffies(10))))
3108 bch2_trans_srcu_unlock(trans);
3110 trans->last_begin_ip = _RET_IP_;
3112 trans_set_locked(trans);
3114 if (trans->restarted) {
3115 bch2_btree_path_traverse_all(trans);
3116 trans->notrace_relock_fail = false;
3119 bch2_trans_verify_not_unlocked(trans);
3120 return trans->restart_count;
3123 const char *bch2_btree_transaction_fns[BCH_TRANSACTIONS_NR] = { "(unknown)" };
3125 unsigned bch2_trans_get_fn_idx(const char *fn)
3127 for (unsigned i = 0; i < ARRAY_SIZE(bch2_btree_transaction_fns); i++)
3128 if (!bch2_btree_transaction_fns[i] ||
3129 bch2_btree_transaction_fns[i] == fn) {
3130 bch2_btree_transaction_fns[i] = fn;
3134 pr_warn_once("BCH_TRANSACTIONS_NR not big enough!");
3138 struct btree_trans *__bch2_trans_get(struct bch_fs *c, unsigned fn_idx)
3139 __acquires(&c->btree_trans_barrier)
3141 struct btree_trans *trans;
3143 if (IS_ENABLED(__KERNEL__)) {
3144 trans = this_cpu_xchg(c->btree_trans_bufs->trans, NULL);
3146 memset(trans, 0, offsetof(struct btree_trans, list));
3151 trans = mempool_alloc(&c->btree_trans_pool, GFP_NOFS);
3152 memset(trans, 0, sizeof(*trans));
3154 seqmutex_lock(&c->btree_trans_lock);
3155 if (IS_ENABLED(CONFIG_BCACHEFS_DEBUG)) {
3156 struct btree_trans *pos;
3157 pid_t pid = current->pid;
3159 trans->locking_wait.task = current;
3161 list_for_each_entry(pos, &c->btree_trans_list, list) {
3162 struct task_struct *pos_task = READ_ONCE(pos->locking_wait.task);
3164 * We'd much prefer to be stricter here and completely
3165 * disallow multiple btree_trans in the same thread -
3166 * but the data move path calls bch2_write when we
3167 * already have a btree_trans initialized.
3170 pid == pos_task->pid &&
3175 list_add(&trans->list, &c->btree_trans_list);
3176 seqmutex_unlock(&c->btree_trans_lock);
3179 trans->last_begin_time = local_clock();
3180 trans->fn_idx = fn_idx;
3181 trans->locking_wait.task = current;
3182 trans->journal_replay_not_finished =
3183 unlikely(!test_bit(JOURNAL_replay_done, &c->journal.flags)) &&
3184 atomic_inc_not_zero(&c->journal_keys.ref);
3185 trans->nr_paths = ARRAY_SIZE(trans->_paths);
3186 trans->paths_allocated = trans->_paths_allocated;
3187 trans->sorted = trans->_sorted;
3188 trans->paths = trans->_paths;
3189 trans->updates = trans->_updates;
3191 *trans_paths_nr(trans->paths) = BTREE_ITER_INITIAL;
3193 trans->paths_allocated[0] = 1;
3195 static struct lock_class_key lockdep_key;
3196 lockdep_init_map(&trans->dep_map, "bcachefs_btree", &lockdep_key, 0);
3198 if (fn_idx < BCH_TRANSACTIONS_NR) {
3199 trans->fn = bch2_btree_transaction_fns[fn_idx];
3201 struct btree_transaction_stats *s = &c->btree_transaction_stats[fn_idx];
3204 unsigned expected_mem_bytes = roundup_pow_of_two(s->max_mem);
3206 trans->mem = kmalloc(expected_mem_bytes, GFP_KERNEL);
3207 if (likely(trans->mem))
3208 trans->mem_bytes = expected_mem_bytes;
3211 trans->nr_paths_max = s->nr_max_paths;
3212 trans->journal_entries_size = s->journal_entries_size;
3215 trans->srcu_idx = srcu_read_lock(&c->btree_trans_barrier);
3216 trans->srcu_lock_time = jiffies;
3217 trans->srcu_held = true;
3218 trans_set_locked(trans);
3220 closure_init_stack_release(&trans->ref);
3224 static void check_btree_paths_leaked(struct btree_trans *trans)
3226 #ifdef CONFIG_BCACHEFS_DEBUG
3227 struct bch_fs *c = trans->c;
3228 struct btree_path *path;
3231 trans_for_each_path(trans, path, i)
3236 bch_err(c, "btree paths leaked from %s!", trans->fn);
3237 trans_for_each_path(trans, path, i)
3239 printk(KERN_ERR " btree %s %pS\n",
3240 bch2_btree_id_str(path->btree_id),
3241 (void *) path->ip_allocated);
3242 /* Be noisy about this: */
3243 bch2_fatal_error(c);
3247 void bch2_trans_put(struct btree_trans *trans)
3248 __releases(&c->btree_trans_barrier)
3250 struct bch_fs *c = trans->c;
3252 bch2_trans_unlock(trans);
3254 trans_for_each_update(trans, i)
3255 __btree_path_put(trans, trans->paths + i->path, true);
3256 trans->nr_updates = 0;
3258 check_btree_paths_leaked(trans);
3260 if (trans->srcu_held) {
3261 check_srcu_held_too_long(trans);
3262 srcu_read_unlock(&c->btree_trans_barrier, trans->srcu_idx);
3265 if (unlikely(trans->journal_replay_not_finished))
3266 bch2_journal_keys_put(c);
3269 * trans->ref protects trans->locking_wait.task, btree_paths array; used
3272 closure_return_sync(&trans->ref);
3273 trans->locking_wait.task = NULL;
3275 unsigned long *paths_allocated = trans->paths_allocated;
3276 trans->paths_allocated = NULL;
3277 trans->paths = NULL;
3279 if (paths_allocated != trans->_paths_allocated)
3280 kvfree_rcu_mightsleep(paths_allocated);
3282 if (trans->used_mempool)
3283 mempool_free(trans->mem, &c->btree_trans_mem_pool);
3287 /* Userspace doesn't have a real percpu implementation: */
3288 if (IS_ENABLED(__KERNEL__))
3289 trans = this_cpu_xchg(c->btree_trans_bufs->trans, trans);
3292 seqmutex_lock(&c->btree_trans_lock);
3293 list_del(&trans->list);
3294 seqmutex_unlock(&c->btree_trans_lock);
3296 mempool_free(trans, &c->btree_trans_pool);
3300 bool bch2_current_has_btree_trans(struct bch_fs *c)
3302 seqmutex_lock(&c->btree_trans_lock);
3303 struct btree_trans *trans;
3305 list_for_each_entry(trans, &c->btree_trans_list, list)
3306 if (trans->locking_wait.task == current &&
3311 seqmutex_unlock(&c->btree_trans_lock);
3315 static void __maybe_unused
3316 bch2_btree_bkey_cached_common_to_text(struct printbuf *out,
3317 struct btree_bkey_cached_common *b)
3319 struct six_lock_count c = six_lock_counts(&b->lock);
3320 struct task_struct *owner;
3324 owner = READ_ONCE(b->lock.owner);
3325 pid = owner ? owner->pid : 0;
3328 prt_printf(out, "\t%px %c l=%u %s:", b, b->cached ? 'c' : 'b',
3329 b->level, bch2_btree_id_str(b->btree_id));
3330 bch2_bpos_to_text(out, btree_node_pos(b));
3332 prt_printf(out, "\t locks %u:%u:%u held by pid %u",
3333 c.n[0], c.n[1], c.n[2], pid);
3336 void bch2_btree_trans_to_text(struct printbuf *out, struct btree_trans *trans)
3338 struct btree_bkey_cached_common *b;
3339 static char lock_types[] = { 'r', 'i', 'w' };
3340 struct task_struct *task = READ_ONCE(trans->locking_wait.task);
3343 /* before rcu_read_lock(): */
3344 bch2_printbuf_make_room(out, 4096);
3346 if (!out->nr_tabstops) {
3347 printbuf_tabstop_push(out, 16);
3348 printbuf_tabstop_push(out, 32);
3351 prt_printf(out, "%i %s\n", task ? task->pid : 0, trans->fn);
3353 /* trans->paths is rcu protected vs. freeing */
3357 struct btree_path *paths = rcu_dereference(trans->paths);
3361 unsigned long *paths_allocated = trans_paths_allocated(paths);
3363 trans_for_each_path_idx_from(paths_allocated, *trans_paths_nr(paths), idx, 1) {
3364 struct btree_path *path = paths + idx;
3365 if (!path->nodes_locked)
3368 prt_printf(out, " path %u %c l=%u %s:",
3370 path->cached ? 'c' : 'b',
3372 bch2_btree_id_str(path->btree_id));
3373 bch2_bpos_to_text(out, path->pos);
3376 for (l = 0; l < BTREE_MAX_DEPTH; l++) {
3377 if (btree_node_locked(path, l) &&
3378 !IS_ERR_OR_NULL(b = (void *) READ_ONCE(path->l[l].b))) {
3379 prt_printf(out, " %c l=%u ",
3380 lock_types[btree_node_locked_type(path, l)], l);
3381 bch2_btree_bkey_cached_common_to_text(out, b);
3387 b = READ_ONCE(trans->locking);
3389 prt_printf(out, " blocked for %lluus on\n",
3390 div_u64(local_clock() - trans->locking_wait.start_time, 1000));
3391 prt_printf(out, " %c", lock_types[trans->locking_wait.lock_want]);
3392 bch2_btree_bkey_cached_common_to_text(out, b);
3400 void bch2_fs_btree_iter_exit(struct bch_fs *c)
3402 struct btree_transaction_stats *s;
3403 struct btree_trans *trans;
3406 if (c->btree_trans_bufs)
3407 for_each_possible_cpu(cpu) {
3408 struct btree_trans *trans =
3409 per_cpu_ptr(c->btree_trans_bufs, cpu)->trans;
3412 seqmutex_lock(&c->btree_trans_lock);
3413 list_del(&trans->list);
3414 seqmutex_unlock(&c->btree_trans_lock);
3418 free_percpu(c->btree_trans_bufs);
3420 trans = list_first_entry_or_null(&c->btree_trans_list, struct btree_trans, list);
3422 panic("%s leaked btree_trans\n", trans->fn);
3424 for (s = c->btree_transaction_stats;
3425 s < c->btree_transaction_stats + ARRAY_SIZE(c->btree_transaction_stats);
3427 kfree(s->max_paths_text);
3428 bch2_time_stats_exit(&s->lock_hold_times);
3431 if (c->btree_trans_barrier_initialized) {
3432 synchronize_srcu_expedited(&c->btree_trans_barrier);
3433 cleanup_srcu_struct(&c->btree_trans_barrier);
3435 mempool_exit(&c->btree_trans_mem_pool);
3436 mempool_exit(&c->btree_trans_pool);
3439 void bch2_fs_btree_iter_init_early(struct bch_fs *c)
3441 struct btree_transaction_stats *s;
3443 for (s = c->btree_transaction_stats;
3444 s < c->btree_transaction_stats + ARRAY_SIZE(c->btree_transaction_stats);
3446 bch2_time_stats_init(&s->duration);
3447 bch2_time_stats_init(&s->lock_hold_times);
3448 mutex_init(&s->lock);
3451 INIT_LIST_HEAD(&c->btree_trans_list);
3452 seqmutex_init(&c->btree_trans_lock);
3455 int bch2_fs_btree_iter_init(struct bch_fs *c)
3459 c->btree_trans_bufs = alloc_percpu(struct btree_trans_buf);
3460 if (!c->btree_trans_bufs)
3463 ret = mempool_init_kmalloc_pool(&c->btree_trans_pool, 1,
3464 sizeof(struct btree_trans)) ?:
3465 mempool_init_kmalloc_pool(&c->btree_trans_mem_pool, 1,
3466 BTREE_TRANS_MEM_MAX) ?:
3467 init_srcu_struct(&c->btree_trans_barrier);
3472 * static annotation (hackily done) for lock ordering of reclaim vs.
3475 #ifdef CONFIG_LOCKDEP
3476 fs_reclaim_acquire(GFP_KERNEL);
3477 struct btree_trans *trans = bch2_trans_get(c);
3478 trans_set_locked(trans);
3479 bch2_trans_put(trans);
3480 fs_reclaim_release(GFP_KERNEL);
3483 c->btree_trans_barrier_initialized = true;