1 // SPDX-License-Identifier: GPL-2.0
4 #include "bkey_methods.h"
6 #include "btree_cache.h"
7 #include "btree_iter.h"
8 #include "btree_key_cache.h"
9 #include "btree_locking.h"
10 #include "btree_update.h"
17 #include "subvolume.h"
20 #include <linux/prefetch.h>
22 static inline void btree_path_list_remove(struct btree_trans *, struct btree_path *);
23 static inline void btree_path_list_add(struct btree_trans *, struct btree_path *,
26 static struct btree_path *btree_path_alloc(struct btree_trans *, struct btree_path *);
29 * Unlocks before scheduling
30 * Note: does not revalidate iterator
32 static inline int bch2_trans_cond_resched(struct btree_trans *trans)
34 if (need_resched() || race_fault()) {
35 bch2_trans_unlock(trans);
37 return bch2_trans_relock(trans) ? 0 : -EINTR;
43 static inline int __btree_path_cmp(const struct btree_path *l,
44 enum btree_id r_btree_id,
50 * Must match lock ordering as defined by __bch2_btree_node_lock:
52 return cmp_int(l->btree_id, r_btree_id) ?:
53 cmp_int((int) l->cached, (int) r_cached) ?:
54 bpos_cmp(l->pos, r_pos) ?:
55 -cmp_int(l->level, r_level);
58 static inline int btree_path_cmp(const struct btree_path *l,
59 const struct btree_path *r)
61 return __btree_path_cmp(l, r->btree_id, r->cached, r->pos, r->level);
64 static inline struct bpos bkey_successor(struct btree_iter *iter, struct bpos p)
66 /* Are we iterating over keys in all snapshots? */
67 if (iter->flags & BTREE_ITER_ALL_SNAPSHOTS) {
68 p = bpos_successor(p);
70 p = bpos_nosnap_successor(p);
71 p.snapshot = iter->snapshot;
77 static inline struct bpos bkey_predecessor(struct btree_iter *iter, struct bpos p)
79 /* Are we iterating over keys in all snapshots? */
80 if (iter->flags & BTREE_ITER_ALL_SNAPSHOTS) {
81 p = bpos_predecessor(p);
83 p = bpos_nosnap_predecessor(p);
84 p.snapshot = iter->snapshot;
90 static inline bool is_btree_node(struct btree_path *path, unsigned l)
92 return l < BTREE_MAX_DEPTH &&
93 (unsigned long) path->l[l].b >= 128;
96 static inline struct bpos btree_iter_search_key(struct btree_iter *iter)
98 struct bpos pos = iter->pos;
100 if ((iter->flags & BTREE_ITER_IS_EXTENTS) &&
101 bkey_cmp(pos, POS_MAX))
102 pos = bkey_successor(iter, pos);
106 static inline bool btree_path_pos_before_node(struct btree_path *path,
109 return bpos_cmp(path->pos, b->data->min_key) < 0;
112 static inline bool btree_path_pos_after_node(struct btree_path *path,
115 return bpos_cmp(b->key.k.p, path->pos) < 0;
118 static inline bool btree_path_pos_in_node(struct btree_path *path,
121 return path->btree_id == b->c.btree_id &&
122 !btree_path_pos_before_node(path, b) &&
123 !btree_path_pos_after_node(path, b);
126 /* Btree node locking: */
128 void bch2_btree_node_unlock_write(struct btree_trans *trans,
129 struct btree_path *path, struct btree *b)
131 bch2_btree_node_unlock_write_inlined(trans, path, b);
134 void __bch2_btree_node_lock_write(struct btree_trans *trans, struct btree *b)
136 struct btree_path *linked;
137 unsigned readers = 0;
139 trans_for_each_path(trans, linked)
140 if (linked->l[b->c.level].b == b &&
141 btree_node_read_locked(linked, b->c.level))
145 * Must drop our read locks before calling six_lock_write() -
146 * six_unlock() won't do wakeups until the reader count
147 * goes to 0, and it's safe because we have the node intent
150 if (!b->c.lock.readers)
151 atomic64_sub(__SIX_VAL(read_lock, readers),
152 &b->c.lock.state.counter);
154 this_cpu_sub(*b->c.lock.readers, readers);
156 six_lock_write(&b->c.lock, NULL, NULL);
158 if (!b->c.lock.readers)
159 atomic64_add(__SIX_VAL(read_lock, readers),
160 &b->c.lock.state.counter);
162 this_cpu_add(*b->c.lock.readers, readers);
165 bool __bch2_btree_node_relock(struct btree_trans *trans,
166 struct btree_path *path, unsigned level)
168 struct btree *b = btree_path_node(path, level);
169 int want = __btree_lock_want(path, level);
171 if (!is_btree_node(path, level))
177 if (six_relock_type(&b->c.lock, want, path->l[level].lock_seq) ||
178 (btree_node_lock_seq_matches(path, b, level) &&
179 btree_node_lock_increment(trans, b, level, want))) {
180 mark_btree_node_locked(path, level, want);
184 trace_btree_node_relock_fail(trans->fn, _RET_IP_,
188 path->l[level].lock_seq,
189 is_btree_node(path, level) ? b->c.lock.state.seq : 0);
193 bool bch2_btree_node_upgrade(struct btree_trans *trans,
194 struct btree_path *path, unsigned level)
196 struct btree *b = path->l[level].b;
198 if (!is_btree_node(path, level))
201 switch (btree_lock_want(path, level)) {
202 case BTREE_NODE_UNLOCKED:
203 BUG_ON(btree_node_locked(path, level));
205 case BTREE_NODE_READ_LOCKED:
206 BUG_ON(btree_node_intent_locked(path, level));
207 return bch2_btree_node_relock(trans, path, level);
208 case BTREE_NODE_INTENT_LOCKED:
212 if (btree_node_intent_locked(path, level))
218 if (btree_node_locked(path, level)
219 ? six_lock_tryupgrade(&b->c.lock)
220 : six_relock_type(&b->c.lock, SIX_LOCK_intent, path->l[level].lock_seq))
223 if (btree_node_lock_seq_matches(path, b, level) &&
224 btree_node_lock_increment(trans, b, level, BTREE_NODE_INTENT_LOCKED)) {
225 btree_node_unlock(path, level);
231 mark_btree_node_intent_locked(path, level);
235 static inline bool btree_path_get_locks(struct btree_trans *trans,
236 struct btree_path *path,
239 unsigned l = path->level;
243 if (!btree_path_node(path, l))
247 ? bch2_btree_node_upgrade(trans, path, l)
248 : bch2_btree_node_relock(trans, path, l)))
252 } while (l < path->locks_want);
255 * When we fail to get a lock, we have to ensure that any child nodes
256 * can't be relocked so bch2_btree_path_traverse has to walk back up to
257 * the node that we failed to relock:
260 __bch2_btree_path_unlock(path);
261 btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
264 path->l[fail_idx].b = BTREE_ITER_NO_NODE_GET_LOCKS;
266 } while (fail_idx >= 0);
269 if (path->uptodate == BTREE_ITER_NEED_RELOCK)
270 path->uptodate = BTREE_ITER_UPTODATE;
272 bch2_trans_verify_locks(trans);
274 return path->uptodate < BTREE_ITER_NEED_RELOCK;
277 static struct bpos btree_node_pos(struct btree_bkey_cached_common *_b,
281 ? container_of(_b, struct btree, c)->key.k.p
282 : container_of(_b, struct bkey_cached, c)->key.pos;
286 bool __bch2_btree_node_lock(struct btree_trans *trans,
287 struct btree_path *path,
289 struct bpos pos, unsigned level,
290 enum six_lock_type type,
291 six_lock_should_sleep_fn should_sleep_fn, void *p,
294 struct btree_path *linked;
297 /* Check if it's safe to block: */
298 trans_for_each_path(trans, linked) {
299 if (!linked->nodes_locked)
303 * Can't block taking an intent lock if we have _any_ nodes read
306 * - Our read lock blocks another thread with an intent lock on
307 * the same node from getting a write lock, and thus from
308 * dropping its intent lock
310 * - And the other thread may have multiple nodes intent locked:
311 * both the node we want to intent lock, and the node we
312 * already have read locked - deadlock:
314 if (type == SIX_LOCK_intent &&
315 linked->nodes_locked != linked->nodes_intent_locked) {
320 if (linked->btree_id != path->btree_id) {
321 if (linked->btree_id < path->btree_id)
329 * Within the same btree, non-cached paths come before cached
332 if (linked->cached != path->cached) {
341 * Interior nodes must be locked before their descendants: if
342 * another path has possible descendants locked of the node
343 * we're about to lock, it must have the ancestors locked too:
345 if (level > __fls(linked->nodes_locked)) {
350 /* Must lock btree nodes in key order: */
351 if (btree_node_locked(linked, level) &&
352 bpos_cmp(pos, btree_node_pos((void *) linked->l[level].b,
353 linked->cached)) <= 0) {
359 return btree_node_lock_type(trans, path, b, pos, level,
360 type, should_sleep_fn, p);
362 trace_trans_restart_would_deadlock(trans->fn, ip,
363 trans->in_traverse_all, reason,
370 btree_trans_restart(trans);
374 /* Btree iterator locking: */
376 #ifdef CONFIG_BCACHEFS_DEBUG
378 static void bch2_btree_path_verify_locks(struct btree_path *path)
382 if (!path->nodes_locked) {
383 BUG_ON(path->uptodate == BTREE_ITER_UPTODATE &&
384 btree_path_node(path, path->level));
388 for (l = 0; btree_path_node(path, l); l++)
389 BUG_ON(btree_lock_want(path, l) !=
390 btree_node_locked_type(path, l));
393 void bch2_trans_verify_locks(struct btree_trans *trans)
395 struct btree_path *path;
397 trans_for_each_path(trans, path)
398 bch2_btree_path_verify_locks(path);
401 static inline void bch2_btree_path_verify_locks(struct btree_path *path) {}
404 /* Btree path locking: */
407 * Only for btree_cache.c - only relocks intent locks
409 bool bch2_btree_path_relock_intent(struct btree_trans *trans,
410 struct btree_path *path)
414 for (l = path->level;
415 l < path->locks_want && btree_path_node(path, l);
417 if (!bch2_btree_node_relock(trans, path, l)) {
418 __bch2_btree_path_unlock(path);
419 btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
420 trace_trans_restart_relock_path_intent(trans->fn, _RET_IP_,
421 path->btree_id, &path->pos);
422 btree_trans_restart(trans);
431 static bool __bch2_btree_path_relock(struct btree_trans *trans,
432 struct btree_path *path, unsigned long trace_ip)
434 bool ret = btree_path_get_locks(trans, path, false);
437 trace_trans_restart_relock_path(trans->fn, trace_ip,
438 path->btree_id, &path->pos);
439 btree_trans_restart(trans);
444 static inline bool bch2_btree_path_relock(struct btree_trans *trans,
445 struct btree_path *path, unsigned long trace_ip)
447 return btree_node_locked(path, path->level)
449 : __bch2_btree_path_relock(trans, path, trace_ip);
452 bool __bch2_btree_path_upgrade(struct btree_trans *trans,
453 struct btree_path *path,
454 unsigned new_locks_want)
456 struct btree_path *linked;
458 EBUG_ON(path->locks_want >= new_locks_want);
460 path->locks_want = new_locks_want;
462 if (btree_path_get_locks(trans, path, true))
466 * XXX: this is ugly - we'd prefer to not be mucking with other
467 * iterators in the btree_trans here.
469 * On failure to upgrade the iterator, setting iter->locks_want and
470 * calling get_locks() is sufficient to make bch2_btree_path_traverse()
471 * get the locks we want on transaction restart.
473 * But if this iterator was a clone, on transaction restart what we did
474 * to this iterator isn't going to be preserved.
476 * Possibly we could add an iterator field for the parent iterator when
477 * an iterator is a copy - for now, we'll just upgrade any other
478 * iterators with the same btree id.
480 * The code below used to be needed to ensure ancestor nodes get locked
481 * before interior nodes - now that's handled by
482 * bch2_btree_path_traverse_all().
484 if (!path->cached && !trans->in_traverse_all)
485 trans_for_each_path(trans, linked)
486 if (linked != path &&
487 linked->cached == path->cached &&
488 linked->btree_id == path->btree_id &&
489 linked->locks_want < new_locks_want) {
490 linked->locks_want = new_locks_want;
491 btree_path_get_locks(trans, linked, true);
497 void __bch2_btree_path_downgrade(struct btree_path *path,
498 unsigned new_locks_want)
502 EBUG_ON(path->locks_want < new_locks_want);
504 path->locks_want = new_locks_want;
506 while (path->nodes_locked &&
507 (l = __fls(path->nodes_locked)) >= path->locks_want) {
508 if (l > path->level) {
509 btree_node_unlock(path, l);
511 if (btree_node_intent_locked(path, l)) {
512 six_lock_downgrade(&path->l[l].b->c.lock);
513 path->nodes_intent_locked ^= 1 << l;
519 bch2_btree_path_verify_locks(path);
522 void bch2_trans_downgrade(struct btree_trans *trans)
524 struct btree_path *path;
526 trans_for_each_path(trans, path)
527 bch2_btree_path_downgrade(path);
530 /* Btree transaction locking: */
532 bool bch2_trans_relock(struct btree_trans *trans)
534 struct btree_path *path;
536 if (unlikely(trans->restarted))
539 trans_for_each_path(trans, path)
540 if (path->should_be_locked &&
541 !bch2_btree_path_relock(trans, path, _RET_IP_)) {
542 trace_trans_restart_relock(trans->fn, _RET_IP_,
543 path->btree_id, &path->pos);
544 BUG_ON(!trans->restarted);
550 void bch2_trans_unlock(struct btree_trans *trans)
552 struct btree_path *path;
554 trans_for_each_path(trans, path)
555 __bch2_btree_path_unlock(path);
558 /* Btree iterator: */
560 #ifdef CONFIG_BCACHEFS_DEBUG
562 static void bch2_btree_path_verify_cached(struct btree_trans *trans,
563 struct btree_path *path)
565 struct bkey_cached *ck;
566 bool locked = btree_node_locked(path, 0);
568 if (!bch2_btree_node_relock(trans, path, 0))
571 ck = (void *) path->l[0].b;
572 BUG_ON(ck->key.btree_id != path->btree_id ||
573 bkey_cmp(ck->key.pos, path->pos));
576 btree_node_unlock(path, 0);
579 static void bch2_btree_path_verify_level(struct btree_trans *trans,
580 struct btree_path *path, unsigned level)
582 struct btree_path_level *l;
583 struct btree_node_iter tmp;
585 struct bkey_packed *p, *k;
586 struct printbuf buf1 = PRINTBUF;
587 struct printbuf buf2 = PRINTBUF;
588 struct printbuf buf3 = PRINTBUF;
591 if (!bch2_debug_check_iterators)
596 locked = btree_node_locked(path, level);
600 bch2_btree_path_verify_cached(trans, path);
604 if (!btree_path_node(path, level))
607 if (!bch2_btree_node_relock(trans, path, level))
610 BUG_ON(!btree_path_pos_in_node(path, l->b));
612 bch2_btree_node_iter_verify(&l->iter, l->b);
615 * For interior nodes, the iterator will have skipped past deleted keys:
618 ? bch2_btree_node_iter_prev(&tmp, l->b)
619 : bch2_btree_node_iter_prev_all(&tmp, l->b);
620 k = bch2_btree_node_iter_peek_all(&l->iter, l->b);
622 if (p && bkey_iter_pos_cmp(l->b, p, &path->pos) >= 0) {
627 if (k && bkey_iter_pos_cmp(l->b, k, &path->pos) < 0) {
633 btree_node_unlock(path, level);
636 bch2_bpos_to_text(&buf1, path->pos);
639 struct bkey uk = bkey_unpack_key(l->b, p);
640 bch2_bkey_to_text(&buf2, &uk);
642 pr_buf(&buf2, "(none)");
646 struct bkey uk = bkey_unpack_key(l->b, k);
647 bch2_bkey_to_text(&buf3, &uk);
649 pr_buf(&buf3, "(none)");
652 panic("path should be %s key at level %u:\n"
656 msg, level, buf1.buf, buf2.buf, buf3.buf);
659 static void bch2_btree_path_verify(struct btree_trans *trans,
660 struct btree_path *path)
662 struct bch_fs *c = trans->c;
665 EBUG_ON(path->btree_id >= BTREE_ID_NR);
667 for (i = 0; i < (!path->cached ? BTREE_MAX_DEPTH : 1); i++) {
669 BUG_ON(!path->cached &&
670 c->btree_roots[path->btree_id].b->c.level > i);
674 bch2_btree_path_verify_level(trans, path, i);
677 bch2_btree_path_verify_locks(path);
680 void bch2_trans_verify_paths(struct btree_trans *trans)
682 struct btree_path *path;
684 trans_for_each_path(trans, path)
685 bch2_btree_path_verify(trans, path);
688 static void bch2_btree_iter_verify(struct btree_iter *iter)
690 struct btree_trans *trans = iter->trans;
692 BUG_ON(iter->btree_id >= BTREE_ID_NR);
694 BUG_ON(!!(iter->flags & BTREE_ITER_CACHED) != iter->path->cached);
696 BUG_ON((iter->flags & BTREE_ITER_IS_EXTENTS) &&
697 (iter->flags & BTREE_ITER_ALL_SNAPSHOTS));
699 BUG_ON(!(iter->flags & __BTREE_ITER_ALL_SNAPSHOTS) &&
700 (iter->flags & BTREE_ITER_ALL_SNAPSHOTS) &&
701 !btree_type_has_snapshots(iter->btree_id));
703 if (iter->update_path)
704 bch2_btree_path_verify(trans, iter->update_path);
705 bch2_btree_path_verify(trans, iter->path);
708 static void bch2_btree_iter_verify_entry_exit(struct btree_iter *iter)
710 BUG_ON((iter->flags & BTREE_ITER_FILTER_SNAPSHOTS) &&
711 !iter->pos.snapshot);
713 BUG_ON(!(iter->flags & BTREE_ITER_ALL_SNAPSHOTS) &&
714 iter->pos.snapshot != iter->snapshot);
716 BUG_ON(bkey_cmp(iter->pos, bkey_start_pos(&iter->k)) < 0 ||
717 bkey_cmp(iter->pos, iter->k.p) > 0);
720 static int bch2_btree_iter_verify_ret(struct btree_iter *iter, struct bkey_s_c k)
722 struct btree_trans *trans = iter->trans;
723 struct btree_iter copy;
724 struct bkey_s_c prev;
727 if (!bch2_debug_check_iterators)
730 if (!(iter->flags & BTREE_ITER_FILTER_SNAPSHOTS))
733 if (bkey_err(k) || !k.k)
736 BUG_ON(!bch2_snapshot_is_ancestor(trans->c,
740 bch2_trans_iter_init(trans, ©, iter->btree_id, iter->pos,
741 BTREE_ITER_NOPRESERVE|
742 BTREE_ITER_ALL_SNAPSHOTS);
743 prev = bch2_btree_iter_prev(©);
747 ret = bkey_err(prev);
751 if (!bkey_cmp(prev.k->p, k.k->p) &&
752 bch2_snapshot_is_ancestor(trans->c, iter->snapshot,
753 prev.k->p.snapshot) > 0) {
754 struct printbuf buf1 = PRINTBUF, buf2 = PRINTBUF;
756 bch2_bkey_to_text(&buf1, k.k);
757 bch2_bkey_to_text(&buf2, prev.k);
759 panic("iter snap %u\n"
766 bch2_trans_iter_exit(trans, ©);
770 void bch2_assert_pos_locked(struct btree_trans *trans, enum btree_id id,
771 struct bpos pos, bool key_cache)
773 struct btree_path *path;
775 struct printbuf buf = PRINTBUF;
777 trans_for_each_path_inorder(trans, path, idx) {
778 int cmp = cmp_int(path->btree_id, id) ?:
779 cmp_int(path->cached, key_cache);
786 if (!(path->nodes_locked & 1) ||
787 !path->should_be_locked)
791 if (bkey_cmp(pos, path->l[0].b->data->min_key) >= 0 &&
792 bkey_cmp(pos, path->l[0].b->key.k.p) <= 0)
795 if (!bkey_cmp(pos, path->pos))
800 bch2_dump_trans_paths_updates(trans);
801 bch2_bpos_to_text(&buf, pos);
803 panic("not locked: %s %s%s\n",
804 bch2_btree_ids[id], buf.buf,
805 key_cache ? " cached" : "");
810 static inline void bch2_btree_path_verify_level(struct btree_trans *trans,
811 struct btree_path *path, unsigned l) {}
812 static inline void bch2_btree_path_verify(struct btree_trans *trans,
813 struct btree_path *path) {}
814 static inline void bch2_btree_iter_verify(struct btree_iter *iter) {}
815 static inline void bch2_btree_iter_verify_entry_exit(struct btree_iter *iter) {}
816 static inline int bch2_btree_iter_verify_ret(struct btree_iter *iter, struct bkey_s_c k) { return 0; }
820 /* Btree path: fixups after btree updates */
822 static void btree_node_iter_set_set_pos(struct btree_node_iter *iter,
825 struct bkey_packed *k)
827 struct btree_node_iter_set *set;
829 btree_node_iter_for_each(iter, set)
830 if (set->end == t->end_offset) {
831 set->k = __btree_node_key_to_offset(b, k);
832 bch2_btree_node_iter_sort(iter, b);
836 bch2_btree_node_iter_push(iter, b, k, btree_bkey_last(b, t));
839 static void __bch2_btree_path_fix_key_modified(struct btree_path *path,
841 struct bkey_packed *where)
843 struct btree_path_level *l = &path->l[b->c.level];
845 if (where != bch2_btree_node_iter_peek_all(&l->iter, l->b))
848 if (bkey_iter_pos_cmp(l->b, where, &path->pos) < 0)
849 bch2_btree_node_iter_advance(&l->iter, l->b);
852 void bch2_btree_path_fix_key_modified(struct btree_trans *trans,
854 struct bkey_packed *where)
856 struct btree_path *path;
858 trans_for_each_path_with_node(trans, b, path) {
859 __bch2_btree_path_fix_key_modified(path, b, where);
860 bch2_btree_path_verify_level(trans, path, b->c.level);
864 static void __bch2_btree_node_iter_fix(struct btree_path *path,
866 struct btree_node_iter *node_iter,
868 struct bkey_packed *where,
869 unsigned clobber_u64s,
872 const struct bkey_packed *end = btree_bkey_last(b, t);
873 struct btree_node_iter_set *set;
874 unsigned offset = __btree_node_key_to_offset(b, where);
875 int shift = new_u64s - clobber_u64s;
876 unsigned old_end = t->end_offset - shift;
877 unsigned orig_iter_pos = node_iter->data[0].k;
878 bool iter_current_key_modified =
879 orig_iter_pos >= offset &&
880 orig_iter_pos <= offset + clobber_u64s;
882 btree_node_iter_for_each(node_iter, set)
883 if (set->end == old_end)
886 /* didn't find the bset in the iterator - might have to readd it: */
888 bkey_iter_pos_cmp(b, where, &path->pos) >= 0) {
889 bch2_btree_node_iter_push(node_iter, b, where, end);
892 /* Iterator is after key that changed */
896 set->end = t->end_offset;
898 /* Iterator hasn't gotten to the key that changed yet: */
903 bkey_iter_pos_cmp(b, where, &path->pos) >= 0) {
905 } else if (set->k < offset + clobber_u64s) {
906 set->k = offset + new_u64s;
907 if (set->k == set->end)
908 bch2_btree_node_iter_set_drop(node_iter, set);
910 /* Iterator is after key that changed */
911 set->k = (int) set->k + shift;
915 bch2_btree_node_iter_sort(node_iter, b);
917 if (node_iter->data[0].k != orig_iter_pos)
918 iter_current_key_modified = true;
921 * When a new key is added, and the node iterator now points to that
922 * key, the iterator might have skipped past deleted keys that should
923 * come after the key the iterator now points to. We have to rewind to
924 * before those deleted keys - otherwise
925 * bch2_btree_node_iter_prev_all() breaks:
927 if (!bch2_btree_node_iter_end(node_iter) &&
928 iter_current_key_modified &&
931 struct bkey_packed *k, *k2, *p;
933 k = bch2_btree_node_iter_peek_all(node_iter, b);
935 for_each_bset(b, t) {
936 bool set_pos = false;
938 if (node_iter->data[0].end == t->end_offset)
941 k2 = bch2_btree_node_iter_bset_pos(node_iter, b, t);
943 while ((p = bch2_bkey_prev_all(b, t, k2)) &&
944 bkey_iter_cmp(b, k, p) < 0) {
950 btree_node_iter_set_set_pos(node_iter,
956 void bch2_btree_node_iter_fix(struct btree_trans *trans,
957 struct btree_path *path,
959 struct btree_node_iter *node_iter,
960 struct bkey_packed *where,
961 unsigned clobber_u64s,
964 struct bset_tree *t = bch2_bkey_to_bset_inlined(b, where);
965 struct btree_path *linked;
967 if (node_iter != &path->l[b->c.level].iter) {
968 __bch2_btree_node_iter_fix(path, b, node_iter, t,
969 where, clobber_u64s, new_u64s);
971 if (bch2_debug_check_iterators)
972 bch2_btree_node_iter_verify(node_iter, b);
975 trans_for_each_path_with_node(trans, b, linked) {
976 __bch2_btree_node_iter_fix(linked, b,
977 &linked->l[b->c.level].iter, t,
978 where, clobber_u64s, new_u64s);
979 bch2_btree_path_verify_level(trans, linked, b->c.level);
983 /* Btree path level: pointer to a particular btree node and node iter */
985 static inline struct bkey_s_c __btree_iter_unpack(struct bch_fs *c,
986 struct btree_path_level *l,
988 struct bkey_packed *k)
992 * signal to bch2_btree_iter_peek_slot() that we're currently at
995 u->type = KEY_TYPE_deleted;
996 return bkey_s_c_null;
999 return bkey_disassemble(l->b, k, u);
1002 static inline struct bkey_s_c btree_path_level_peek_all(struct bch_fs *c,
1003 struct btree_path_level *l,
1006 return __btree_iter_unpack(c, l, u,
1007 bch2_btree_node_iter_peek_all(&l->iter, l->b));
1010 static inline struct bkey_s_c btree_path_level_peek(struct btree_trans *trans,
1011 struct btree_path *path,
1012 struct btree_path_level *l,
1015 struct bkey_s_c k = __btree_iter_unpack(trans->c, l, u,
1016 bch2_btree_node_iter_peek(&l->iter, l->b));
1018 path->pos = k.k ? k.k->p : l->b->key.k.p;
1019 trans->paths_sorted = false;
1023 static inline struct bkey_s_c btree_path_level_prev(struct btree_trans *trans,
1024 struct btree_path *path,
1025 struct btree_path_level *l,
1028 struct bkey_s_c k = __btree_iter_unpack(trans->c, l, u,
1029 bch2_btree_node_iter_prev(&l->iter, l->b));
1031 path->pos = k.k ? k.k->p : l->b->data->min_key;
1032 trans->paths_sorted = false;
1036 static inline bool btree_path_advance_to_pos(struct btree_path *path,
1037 struct btree_path_level *l,
1040 struct bkey_packed *k;
1041 int nr_advanced = 0;
1043 while ((k = bch2_btree_node_iter_peek_all(&l->iter, l->b)) &&
1044 bkey_iter_pos_cmp(l->b, k, &path->pos) < 0) {
1045 if (max_advance > 0 && nr_advanced >= max_advance)
1048 bch2_btree_node_iter_advance(&l->iter, l->b);
1056 * Verify that iterator for parent node points to child node:
1058 static void btree_path_verify_new_node(struct btree_trans *trans,
1059 struct btree_path *path, struct btree *b)
1061 struct bch_fs *c = trans->c;
1062 struct btree_path_level *l;
1065 struct bkey_packed *k;
1067 if (!IS_ENABLED(CONFIG_BCACHEFS_DEBUG))
1070 if (trans->journal_replay_not_finished)
1073 plevel = b->c.level + 1;
1074 if (!btree_path_node(path, plevel))
1077 parent_locked = btree_node_locked(path, plevel);
1079 if (!bch2_btree_node_relock(trans, path, plevel))
1082 l = &path->l[plevel];
1083 k = bch2_btree_node_iter_peek_all(&l->iter, l->b);
1086 bkey_cmp_left_packed(l->b, k, &b->key.k.p)) {
1087 struct printbuf buf1 = PRINTBUF;
1088 struct printbuf buf2 = PRINTBUF;
1089 struct printbuf buf3 = PRINTBUF;
1090 struct printbuf buf4 = PRINTBUF;
1091 struct bkey uk = bkey_unpack_key(b, k);
1093 bch2_dump_btree_node(c, l->b);
1094 bch2_bpos_to_text(&buf1, path->pos);
1095 bch2_bkey_to_text(&buf2, &uk);
1096 bch2_bpos_to_text(&buf3, b->data->min_key);
1097 bch2_bpos_to_text(&buf3, b->data->max_key);
1098 panic("parent iter doesn't point to new node:\n"
1102 bch2_btree_ids[path->btree_id],
1103 buf1.buf, buf2.buf, buf3.buf, buf4.buf);
1107 btree_node_unlock(path, plevel);
1110 static inline void __btree_path_level_init(struct btree_path *path,
1113 struct btree_path_level *l = &path->l[level];
1115 bch2_btree_node_iter_init(&l->iter, l->b, &path->pos);
1118 * Iterators to interior nodes should always be pointed at the first non
1122 bch2_btree_node_iter_peek(&l->iter, l->b);
1125 static inline void btree_path_level_init(struct btree_trans *trans,
1126 struct btree_path *path,
1129 BUG_ON(path->cached);
1131 btree_path_verify_new_node(trans, path, b);
1133 EBUG_ON(!btree_path_pos_in_node(path, b));
1134 EBUG_ON(b->c.lock.state.seq & 1);
1136 path->l[b->c.level].lock_seq = b->c.lock.state.seq;
1137 path->l[b->c.level].b = b;
1138 __btree_path_level_init(path, b->c.level);
1141 /* Btree path: fixups after btree node updates: */
1144 * A btree node is being replaced - update the iterator to point to the new
1147 void bch2_trans_node_add(struct btree_trans *trans, struct btree *b)
1149 struct btree_path *path;
1151 trans_for_each_path(trans, path)
1152 if (!path->cached &&
1153 btree_path_pos_in_node(path, b)) {
1154 enum btree_node_locked_type t =
1155 btree_lock_want(path, b->c.level);
1157 if (path->nodes_locked &&
1158 t != BTREE_NODE_UNLOCKED) {
1159 btree_node_unlock(path, b->c.level);
1160 six_lock_increment(&b->c.lock, (enum six_lock_type) t);
1161 mark_btree_node_locked(path, b->c.level, (enum six_lock_type) t);
1164 btree_path_level_init(trans, path, b);
1169 * A btree node has been modified in such a way as to invalidate iterators - fix
1172 void bch2_trans_node_reinit_iter(struct btree_trans *trans, struct btree *b)
1174 struct btree_path *path;
1176 trans_for_each_path_with_node(trans, b, path)
1177 __btree_path_level_init(path, b->c.level);
1180 /* Btree path: traverse, set_pos: */
1182 static int lock_root_check_fn(struct six_lock *lock, void *p)
1184 struct btree *b = container_of(lock, struct btree, c.lock);
1185 struct btree **rootp = p;
1187 return b == *rootp ? 0 : -1;
1190 static inline int btree_path_lock_root(struct btree_trans *trans,
1191 struct btree_path *path,
1192 unsigned depth_want,
1193 unsigned long trace_ip)
1195 struct bch_fs *c = trans->c;
1196 struct btree *b, **rootp = &c->btree_roots[path->btree_id].b;
1197 enum six_lock_type lock_type;
1200 EBUG_ON(path->nodes_locked);
1203 b = READ_ONCE(*rootp);
1204 path->level = READ_ONCE(b->c.level);
1206 if (unlikely(path->level < depth_want)) {
1208 * the root is at a lower depth than the depth we want:
1209 * got to the end of the btree, or we're walking nodes
1210 * greater than some depth and there are no nodes >=
1213 path->level = depth_want;
1214 for (i = path->level; i < BTREE_MAX_DEPTH; i++)
1215 path->l[i].b = NULL;
1219 lock_type = __btree_lock_want(path, path->level);
1220 if (unlikely(!btree_node_lock(trans, path, b, SPOS_MAX,
1221 path->level, lock_type,
1222 lock_root_check_fn, rootp,
1224 if (trans->restarted)
1229 if (likely(b == READ_ONCE(*rootp) &&
1230 b->c.level == path->level &&
1232 for (i = 0; i < path->level; i++)
1233 path->l[i].b = BTREE_ITER_NO_NODE_LOCK_ROOT;
1234 path->l[path->level].b = b;
1235 for (i = path->level + 1; i < BTREE_MAX_DEPTH; i++)
1236 path->l[i].b = NULL;
1238 mark_btree_node_locked(path, path->level, lock_type);
1239 btree_path_level_init(trans, path, b);
1243 six_unlock_type(&b->c.lock, lock_type);
1248 static int btree_path_prefetch(struct btree_trans *trans, struct btree_path *path)
1250 struct bch_fs *c = trans->c;
1251 struct btree_path_level *l = path_l(path);
1252 struct btree_node_iter node_iter = l->iter;
1253 struct bkey_packed *k;
1254 struct bkey_buf tmp;
1255 unsigned nr = test_bit(BCH_FS_STARTED, &c->flags)
1256 ? (path->level > 1 ? 0 : 2)
1257 : (path->level > 1 ? 1 : 16);
1258 bool was_locked = btree_node_locked(path, path->level);
1261 bch2_bkey_buf_init(&tmp);
1263 while (nr && !ret) {
1264 if (!bch2_btree_node_relock(trans, path, path->level))
1267 bch2_btree_node_iter_advance(&node_iter, l->b);
1268 k = bch2_btree_node_iter_peek(&node_iter, l->b);
1272 bch2_bkey_buf_unpack(&tmp, c, l->b, k);
1273 ret = bch2_btree_node_prefetch(c, trans, path, tmp.k, path->btree_id,
1278 btree_node_unlock(path, path->level);
1280 bch2_bkey_buf_exit(&tmp, c);
1284 static int btree_path_prefetch_j(struct btree_trans *trans, struct btree_path *path,
1285 struct btree_and_journal_iter *jiter)
1287 struct bch_fs *c = trans->c;
1289 struct bkey_buf tmp;
1290 unsigned nr = test_bit(BCH_FS_STARTED, &c->flags)
1291 ? (path->level > 1 ? 0 : 2)
1292 : (path->level > 1 ? 1 : 16);
1293 bool was_locked = btree_node_locked(path, path->level);
1296 bch2_bkey_buf_init(&tmp);
1298 while (nr && !ret) {
1299 if (!bch2_btree_node_relock(trans, path, path->level))
1302 bch2_btree_and_journal_iter_advance(jiter);
1303 k = bch2_btree_and_journal_iter_peek(jiter);
1307 bch2_bkey_buf_reassemble(&tmp, c, k);
1308 ret = bch2_btree_node_prefetch(c, trans, path, tmp.k, path->btree_id,
1313 btree_node_unlock(path, path->level);
1315 bch2_bkey_buf_exit(&tmp, c);
1319 static noinline void btree_node_mem_ptr_set(struct btree_trans *trans,
1320 struct btree_path *path,
1321 unsigned plevel, struct btree *b)
1323 struct btree_path_level *l = &path->l[plevel];
1324 bool locked = btree_node_locked(path, plevel);
1325 struct bkey_packed *k;
1326 struct bch_btree_ptr_v2 *bp;
1328 if (!bch2_btree_node_relock(trans, path, plevel))
1331 k = bch2_btree_node_iter_peek_all(&l->iter, l->b);
1332 BUG_ON(k->type != KEY_TYPE_btree_ptr_v2);
1334 bp = (void *) bkeyp_val(&l->b->format, k);
1335 bp->mem_ptr = (unsigned long)b;
1338 btree_node_unlock(path, plevel);
1341 static noinline int btree_node_iter_and_journal_peek(struct btree_trans *trans,
1342 struct btree_path *path,
1344 struct bkey_buf *out)
1346 struct bch_fs *c = trans->c;
1347 struct btree_path_level *l = path_l(path);
1348 struct btree_and_journal_iter jiter;
1352 __bch2_btree_and_journal_iter_init_node_iter(&jiter, c, l->b, l->iter, path->pos);
1354 k = bch2_btree_and_journal_iter_peek(&jiter);
1356 bch2_bkey_buf_reassemble(out, c, k);
1358 if (flags & BTREE_ITER_PREFETCH)
1359 ret = btree_path_prefetch_j(trans, path, &jiter);
1361 bch2_btree_and_journal_iter_exit(&jiter);
1365 static __always_inline int btree_path_down(struct btree_trans *trans,
1366 struct btree_path *path,
1368 unsigned long trace_ip)
1370 struct bch_fs *c = trans->c;
1371 struct btree_path_level *l = path_l(path);
1373 unsigned level = path->level - 1;
1374 enum six_lock_type lock_type = __btree_lock_want(path, level);
1375 struct bkey_buf tmp;
1378 EBUG_ON(!btree_node_locked(path, path->level));
1380 bch2_bkey_buf_init(&tmp);
1382 if (unlikely(trans->journal_replay_not_finished)) {
1383 ret = btree_node_iter_and_journal_peek(trans, path, flags, &tmp);
1387 bch2_bkey_buf_unpack(&tmp, c, l->b,
1388 bch2_btree_node_iter_peek(&l->iter, l->b));
1390 if (flags & BTREE_ITER_PREFETCH) {
1391 ret = btree_path_prefetch(trans, path);
1397 b = bch2_btree_node_get(trans, path, tmp.k, level, lock_type, trace_ip);
1398 ret = PTR_ERR_OR_ZERO(b);
1402 mark_btree_node_locked(path, level, lock_type);
1403 btree_path_level_init(trans, path, b);
1405 if (likely(!trans->journal_replay_not_finished &&
1406 tmp.k->k.type == KEY_TYPE_btree_ptr_v2) &&
1407 unlikely(b != btree_node_mem_ptr(tmp.k)))
1408 btree_node_mem_ptr_set(trans, path, level + 1, b);
1410 if (btree_node_read_locked(path, level + 1))
1411 btree_node_unlock(path, level + 1);
1412 path->level = level;
1414 bch2_btree_path_verify_locks(path);
1416 bch2_bkey_buf_exit(&tmp, c);
1420 static int btree_path_traverse_one(struct btree_trans *, struct btree_path *,
1421 unsigned, unsigned long);
1423 static int bch2_btree_path_traverse_all(struct btree_trans *trans)
1425 struct bch_fs *c = trans->c;
1426 struct btree_path *path, *prev;
1427 unsigned long trace_ip = _RET_IP_;
1430 if (trans->in_traverse_all)
1433 trans->in_traverse_all = true;
1436 trans->restarted = false;
1438 trans_for_each_path(trans, path)
1439 path->should_be_locked = false;
1441 btree_trans_sort_paths(trans);
1443 trans_for_each_path_inorder_reverse(trans, path, i) {
1445 if (path->btree_id == prev->btree_id &&
1446 path->locks_want < prev->locks_want)
1447 __bch2_btree_path_upgrade(trans, path, prev->locks_want);
1448 else if (!path->locks_want && prev->locks_want)
1449 __bch2_btree_path_upgrade(trans, path, 1);
1455 bch2_trans_unlock(trans);
1458 if (unlikely(trans->memory_allocation_failure)) {
1461 closure_init_stack(&cl);
1464 ret = bch2_btree_cache_cannibalize_lock(c, &cl);
1469 /* Now, redo traversals in correct order: */
1471 while (i < trans->nr_sorted) {
1472 path = trans->paths + trans->sorted[i];
1475 * Traversing a path can cause another path to be added at about
1476 * the same position:
1478 if (path->uptodate) {
1479 ret = btree_path_traverse_one(trans, path, 0, _THIS_IP_);
1480 if (ret == -EINTR || ret == -ENOMEM)
1490 * BTREE_ITER_NEED_RELOCK is ok here - if we called bch2_trans_unlock()
1491 * and relock(), relock() won't relock since path->should_be_locked
1492 * isn't set yet, which is all fine
1494 trans_for_each_path(trans, path)
1495 BUG_ON(path->uptodate >= BTREE_ITER_NEED_TRAVERSE);
1497 bch2_btree_cache_cannibalize_unlock(c);
1499 trans->in_traverse_all = false;
1501 trace_trans_traverse_all(trans->fn, trace_ip);
1505 static inline bool btree_path_good_node(struct btree_trans *trans,
1506 struct btree_path *path,
1507 unsigned l, int check_pos)
1509 if (!is_btree_node(path, l) ||
1510 !bch2_btree_node_relock(trans, path, l))
1513 if (check_pos < 0 && btree_path_pos_before_node(path, path->l[l].b))
1515 if (check_pos > 0 && btree_path_pos_after_node(path, path->l[l].b))
1520 static void btree_path_set_level_up(struct btree_path *path)
1522 btree_node_unlock(path, path->level);
1523 path->l[path->level].b = BTREE_ITER_NO_NODE_UP;
1525 btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
1528 static void btree_path_set_level_down(struct btree_trans *trans,
1529 struct btree_path *path,
1534 path->level = new_level;
1536 for (l = path->level + 1; l < BTREE_MAX_DEPTH; l++)
1537 if (btree_lock_want(path, l) == BTREE_NODE_UNLOCKED)
1538 btree_node_unlock(path, l);
1540 btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
1541 bch2_btree_path_verify(trans, path);
1544 static inline unsigned btree_path_up_until_good_node(struct btree_trans *trans,
1545 struct btree_path *path,
1548 unsigned i, l = path->level;
1550 while (btree_path_node(path, l) &&
1551 !btree_path_good_node(trans, path, l, check_pos)) {
1552 btree_node_unlock(path, l);
1553 path->l[l].b = BTREE_ITER_NO_NODE_UP;
1557 /* If we need intent locks, take them too: */
1559 i < path->locks_want && btree_path_node(path, i);
1561 if (!bch2_btree_node_relock(trans, path, i))
1563 btree_node_unlock(path, l);
1564 path->l[l].b = BTREE_ITER_NO_NODE_UP;
1572 * This is the main state machine for walking down the btree - walks down to a
1575 * Returns 0 on success, -EIO on error (error reading in a btree node).
1577 * On error, caller (peek_node()/peek_key()) must return NULL; the error is
1578 * stashed in the iterator and returned from bch2_trans_exit().
1580 static int btree_path_traverse_one(struct btree_trans *trans,
1581 struct btree_path *path,
1583 unsigned long trace_ip)
1585 unsigned depth_want = path->level;
1588 if (unlikely(trans->restarted)) {
1594 * Ensure we obey path->should_be_locked: if it's set, we can't unlock
1595 * and re-traverse the path without a transaction restart:
1597 if (path->should_be_locked) {
1598 ret = bch2_btree_path_relock(trans, path, trace_ip) ? 0 : -EINTR;
1603 ret = bch2_btree_path_traverse_cached(trans, path, flags);
1607 if (unlikely(path->level >= BTREE_MAX_DEPTH))
1610 path->level = btree_path_up_until_good_node(trans, path, 0);
1613 * Note: path->nodes[path->level] may be temporarily NULL here - that
1614 * would indicate to other code that we got to the end of the btree,
1615 * here it indicates that relocking the root failed - it's critical that
1616 * btree_path_lock_root() comes next and that it can't fail
1618 while (path->level > depth_want) {
1619 ret = btree_path_node(path, path->level)
1620 ? btree_path_down(trans, path, flags, trace_ip)
1621 : btree_path_lock_root(trans, path, depth_want, trace_ip);
1622 if (unlikely(ret)) {
1625 * No nodes at this level - got to the end of
1632 __bch2_btree_path_unlock(path);
1633 path->level = depth_want;
1636 path->l[path->level].b =
1637 BTREE_ITER_NO_NODE_ERROR;
1639 path->l[path->level].b =
1640 BTREE_ITER_NO_NODE_DOWN;
1645 path->uptodate = BTREE_ITER_UPTODATE;
1647 BUG_ON((ret == -EINTR) != !!trans->restarted);
1648 bch2_btree_path_verify(trans, path);
1652 int __must_check bch2_btree_path_traverse(struct btree_trans *trans,
1653 struct btree_path *path, unsigned flags)
1655 if (path->uptodate < BTREE_ITER_NEED_RELOCK)
1658 return bch2_trans_cond_resched(trans) ?:
1659 btree_path_traverse_one(trans, path, flags, _RET_IP_);
1662 static void btree_path_copy(struct btree_trans *trans, struct btree_path *dst,
1663 struct btree_path *src)
1665 unsigned i, offset = offsetof(struct btree_path, pos);
1667 memcpy((void *) dst + offset,
1668 (void *) src + offset,
1669 sizeof(struct btree_path) - offset);
1671 for (i = 0; i < BTREE_MAX_DEPTH; i++)
1672 if (btree_node_locked(dst, i))
1673 six_lock_increment(&dst->l[i].b->c.lock,
1674 __btree_lock_want(dst, i));
1676 trans->paths_sorted = false;
1679 static struct btree_path *btree_path_clone(struct btree_trans *trans, struct btree_path *src,
1682 struct btree_path *new = btree_path_alloc(trans, src);
1684 btree_path_copy(trans, new, src);
1685 __btree_path_get(new, intent);
1689 struct btree_path *__bch2_btree_path_make_mut(struct btree_trans *trans,
1690 struct btree_path *path, bool intent)
1692 __btree_path_put(path, intent);
1693 path = btree_path_clone(trans, path, intent);
1694 path->preserve = false;
1695 #ifdef CONFIG_BCACHEFS_DEBUG
1696 path->ip_allocated = _RET_IP_;
1698 path->should_be_locked = false;
1702 struct btree_path * __must_check
1703 __bch2_btree_path_set_pos(struct btree_trans *trans,
1704 struct btree_path *path, struct bpos new_pos,
1705 bool intent, int cmp)
1707 unsigned l = path->level;
1709 EBUG_ON(trans->restarted);
1710 EBUG_ON(!path->ref);
1712 path = bch2_btree_path_make_mut(trans, path, intent);
1714 path->pos = new_pos;
1715 trans->paths_sorted = false;
1717 if (unlikely(path->cached)) {
1718 btree_node_unlock(path, 0);
1719 path->l[0].b = BTREE_ITER_NO_NODE_CACHED;
1720 btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
1724 l = btree_path_up_until_good_node(trans, path, cmp);
1726 if (btree_path_node(path, l)) {
1727 BUG_ON(!btree_node_locked(path, l));
1729 * We might have to skip over many keys, or just a few: try
1730 * advancing the node iterator, and if we have to skip over too
1731 * many keys just reinit it (or if we're rewinding, since that
1735 !btree_path_advance_to_pos(path, &path->l[l], 8))
1736 __btree_path_level_init(path, l);
1739 if (l != path->level) {
1740 btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
1741 __bch2_btree_path_unlock(path);
1744 bch2_btree_path_verify(trans, path);
1748 /* Btree path: main interface: */
1750 static struct btree_path *have_path_at_pos(struct btree_trans *trans, struct btree_path *path)
1752 struct btree_path *next;
1754 next = prev_btree_path(trans, path);
1755 if (next && !btree_path_cmp(next, path))
1758 next = next_btree_path(trans, path);
1759 if (next && !btree_path_cmp(next, path))
1765 static struct btree_path *have_node_at_pos(struct btree_trans *trans, struct btree_path *path)
1767 struct btree_path *next;
1769 next = prev_btree_path(trans, path);
1770 if (next && next->level == path->level && path_l(next)->b == path_l(path)->b)
1773 next = next_btree_path(trans, path);
1774 if (next && next->level == path->level && path_l(next)->b == path_l(path)->b)
1780 static inline void __bch2_path_free(struct btree_trans *trans, struct btree_path *path)
1782 __bch2_btree_path_unlock(path);
1783 btree_path_list_remove(trans, path);
1784 trans->paths_allocated &= ~(1ULL << path->idx);
1787 void bch2_path_put(struct btree_trans *trans, struct btree_path *path, bool intent)
1789 struct btree_path *dup;
1791 EBUG_ON(trans->paths + path->idx != path);
1792 EBUG_ON(!path->ref);
1794 if (!__btree_path_put(path, intent))
1798 * Perhaps instead we should check for duplicate paths in traverse_all:
1800 if (path->preserve &&
1801 (dup = have_path_at_pos(trans, path))) {
1802 dup->preserve = true;
1803 path->preserve = false;
1807 if (!path->preserve &&
1808 (dup = have_node_at_pos(trans, path)))
1812 if (path->should_be_locked &&
1813 !btree_node_locked(dup, path->level))
1816 dup->should_be_locked |= path->should_be_locked;
1817 __bch2_path_free(trans, path);
1820 void bch2_trans_updates_to_text(struct printbuf *buf, struct btree_trans *trans)
1822 struct btree_insert_entry *i;
1824 pr_buf(buf, "transaction updates for %s journal seq %llu",
1825 trans->fn, trans->journal_res.seq);
1827 pr_indent_push(buf, 2);
1829 trans_for_each_update(trans, i) {
1830 struct bkey_s_c old = { &i->old_k, i->old_v };
1832 pr_buf(buf, "update: btree=%s cached=%u %pS",
1833 bch2_btree_ids[i->btree_id],
1835 (void *) i->ip_allocated);
1838 pr_buf(buf, " old ");
1839 bch2_bkey_val_to_text(buf, trans->c, old);
1842 pr_buf(buf, " new ");
1843 bch2_bkey_val_to_text(buf, trans->c, bkey_i_to_s_c(i->k));
1847 pr_indent_pop(buf, 2);
1851 void bch2_dump_trans_updates(struct btree_trans *trans)
1853 struct printbuf buf = PRINTBUF;
1855 bch2_trans_updates_to_text(&buf, trans);
1856 bch_err(trans->c, "%s", buf.buf);
1857 printbuf_exit(&buf);
1861 void bch2_dump_trans_paths_updates(struct btree_trans *trans)
1863 struct btree_path *path;
1864 struct printbuf buf = PRINTBUF;
1867 btree_trans_sort_paths(trans);
1869 trans_for_each_path_inorder(trans, path, idx) {
1870 printbuf_reset(&buf);
1872 bch2_bpos_to_text(&buf, path->pos);
1874 printk(KERN_ERR "path: idx %u ref %u:%u%s%s btree=%s l=%u pos %s locks %u %pS\n",
1875 path->idx, path->ref, path->intent_ref,
1876 path->should_be_locked ? " S" : "",
1877 path->preserve ? " P" : "",
1878 bch2_btree_ids[path->btree_id],
1882 #ifdef CONFIG_BCACHEFS_DEBUG
1883 (void *) path->ip_allocated
1890 printbuf_exit(&buf);
1892 bch2_dump_trans_updates(trans);
1895 static struct btree_path *btree_path_alloc(struct btree_trans *trans,
1896 struct btree_path *pos)
1898 struct btree_path *path;
1901 if (unlikely(trans->paths_allocated ==
1902 ~((~0ULL << 1) << (BTREE_ITER_MAX - 1)))) {
1903 bch2_dump_trans_paths_updates(trans);
1904 panic("trans path oveflow\n");
1907 idx = __ffs64(~trans->paths_allocated);
1908 trans->paths_allocated |= 1ULL << idx;
1910 path = &trans->paths[idx];
1914 path->intent_ref = 0;
1915 path->nodes_locked = 0;
1916 path->nodes_intent_locked = 0;
1918 btree_path_list_add(trans, pos, path);
1922 struct btree_path *bch2_path_get(struct btree_trans *trans,
1923 enum btree_id btree_id, struct bpos pos,
1924 unsigned locks_want, unsigned level,
1927 struct btree_path *path, *path_pos = NULL;
1928 bool cached = flags & BTREE_ITER_CACHED;
1929 bool intent = flags & BTREE_ITER_INTENT;
1932 BUG_ON(trans->restarted);
1933 bch2_trans_verify_locks(trans);
1935 btree_trans_sort_paths(trans);
1937 btree_trans_sort_paths(trans);
1939 trans_for_each_path_inorder(trans, path, i) {
1940 if (__btree_path_cmp(path,
1951 path_pos->cached == cached &&
1952 path_pos->btree_id == btree_id &&
1953 path_pos->level == level) {
1954 __btree_path_get(path_pos, intent);
1955 path = bch2_btree_path_set_pos(trans, path_pos, pos, intent);
1957 path = btree_path_alloc(trans, path_pos);
1960 __btree_path_get(path, intent);
1962 path->btree_id = btree_id;
1963 path->cached = cached;
1964 path->uptodate = BTREE_ITER_NEED_TRAVERSE;
1965 path->should_be_locked = false;
1966 path->level = level;
1967 path->locks_want = locks_want;
1968 path->nodes_locked = 0;
1969 path->nodes_intent_locked = 0;
1970 for (i = 0; i < ARRAY_SIZE(path->l); i++)
1971 path->l[i].b = BTREE_ITER_NO_NODE_INIT;
1972 #ifdef CONFIG_BCACHEFS_DEBUG
1973 path->ip_allocated = _RET_IP_;
1975 trans->paths_sorted = false;
1978 if (!(flags & BTREE_ITER_NOPRESERVE))
1979 path->preserve = true;
1981 if (path->intent_ref)
1982 locks_want = max(locks_want, level + 1);
1985 * If the path has locks_want greater than requested, we don't downgrade
1986 * it here - on transaction restart because btree node split needs to
1987 * upgrade locks, we might be putting/getting the iterator again.
1988 * Downgrading iterators only happens via bch2_trans_downgrade(), after
1989 * a successful transaction commit.
1992 locks_want = min(locks_want, BTREE_MAX_DEPTH);
1993 if (locks_want > path->locks_want) {
1994 path->locks_want = locks_want;
1995 btree_path_get_locks(trans, path, true);
2001 inline struct bkey_s_c bch2_btree_path_peek_slot(struct btree_path *path, struct bkey *u)
2006 if (!path->cached) {
2007 struct btree_path_level *l = path_l(path);
2008 struct bkey_packed *_k;
2010 EBUG_ON(path->uptodate != BTREE_ITER_UPTODATE);
2012 _k = bch2_btree_node_iter_peek_all(&l->iter, l->b);
2013 k = _k ? bkey_disassemble(l->b, _k, u) : bkey_s_c_null;
2015 EBUG_ON(k.k && bkey_deleted(k.k) && bpos_cmp(k.k->p, path->pos) == 0);
2017 if (!k.k || bpos_cmp(path->pos, k.k->p))
2020 struct bkey_cached *ck = (void *) path->l[0].b;
2023 (path->btree_id != ck->key.btree_id ||
2024 bkey_cmp(path->pos, ck->key.pos)));
2026 /* BTREE_ITER_CACHED_NOFILL|BTREE_ITER_CACHED_NOCREATE? */
2027 if (unlikely(!ck || !ck->valid))
2028 return bkey_s_c_null;
2030 EBUG_ON(path->uptodate != BTREE_ITER_UPTODATE);
2033 k = bkey_i_to_s_c(ck->k);
2040 return (struct bkey_s_c) { u, NULL };
2043 /* Btree iterators: */
2046 __bch2_btree_iter_traverse(struct btree_iter *iter)
2048 return bch2_btree_path_traverse(iter->trans, iter->path, iter->flags);
2052 bch2_btree_iter_traverse(struct btree_iter *iter)
2056 iter->path = bch2_btree_path_set_pos(iter->trans, iter->path,
2057 btree_iter_search_key(iter),
2058 iter->flags & BTREE_ITER_INTENT);
2060 ret = bch2_btree_path_traverse(iter->trans, iter->path, iter->flags);
2064 iter->path->should_be_locked = true;
2068 /* Iterate across nodes (leaf and interior nodes) */
2070 struct btree *bch2_btree_iter_peek_node(struct btree_iter *iter)
2072 struct btree_trans *trans = iter->trans;
2073 struct btree *b = NULL;
2076 EBUG_ON(iter->path->cached);
2077 bch2_btree_iter_verify(iter);
2079 ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
2083 b = btree_path_node(iter->path, iter->path->level);
2087 BUG_ON(bpos_cmp(b->key.k.p, iter->pos) < 0);
2089 bkey_init(&iter->k);
2090 iter->k.p = iter->pos = b->key.k.p;
2092 iter->path = bch2_btree_path_set_pos(trans, iter->path, b->key.k.p,
2093 iter->flags & BTREE_ITER_INTENT);
2094 iter->path->should_be_locked = true;
2095 BUG_ON(iter->path->uptodate);
2097 bch2_btree_iter_verify_entry_exit(iter);
2098 bch2_btree_iter_verify(iter);
2106 struct btree *bch2_btree_iter_next_node(struct btree_iter *iter)
2108 struct btree_trans *trans = iter->trans;
2109 struct btree_path *path = iter->path;
2110 struct btree *b = NULL;
2113 BUG_ON(trans->restarted);
2114 EBUG_ON(iter->path->cached);
2115 bch2_btree_iter_verify(iter);
2117 /* already at end? */
2118 if (!btree_path_node(path, path->level))
2122 if (!btree_path_node(path, path->level + 1)) {
2123 btree_path_set_level_up(path);
2127 if (!bch2_btree_node_relock(trans, path, path->level + 1)) {
2128 __bch2_btree_path_unlock(path);
2129 path->l[path->level].b = BTREE_ITER_NO_NODE_GET_LOCKS;
2130 path->l[path->level + 1].b = BTREE_ITER_NO_NODE_GET_LOCKS;
2131 btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
2132 trace_trans_restart_relock_next_node(trans->fn, _THIS_IP_,
2133 path->btree_id, &path->pos);
2134 btree_trans_restart(trans);
2139 b = btree_path_node(path, path->level + 1);
2141 if (!bpos_cmp(iter->pos, b->key.k.p)) {
2142 btree_node_unlock(path, path->level);
2143 path->l[path->level].b = BTREE_ITER_NO_NODE_UP;
2147 * Haven't gotten to the end of the parent node: go back down to
2148 * the next child node
2151 bch2_btree_path_set_pos(trans, path, bpos_successor(iter->pos),
2152 iter->flags & BTREE_ITER_INTENT);
2154 btree_path_set_level_down(trans, path, iter->min_depth);
2156 ret = bch2_btree_path_traverse(trans, path, iter->flags);
2160 b = path->l[path->level].b;
2163 bkey_init(&iter->k);
2164 iter->k.p = iter->pos = b->key.k.p;
2166 iter->path = bch2_btree_path_set_pos(trans, iter->path, b->key.k.p,
2167 iter->flags & BTREE_ITER_INTENT);
2168 iter->path->should_be_locked = true;
2169 BUG_ON(iter->path->uptodate);
2171 bch2_btree_iter_verify_entry_exit(iter);
2172 bch2_btree_iter_verify(iter);
2180 /* Iterate across keys (in leaf nodes only) */
2182 inline bool bch2_btree_iter_advance(struct btree_iter *iter)
2184 if (likely(!(iter->flags & BTREE_ITER_ALL_LEVELS))) {
2185 struct bpos pos = iter->k.p;
2186 bool ret = (iter->flags & BTREE_ITER_ALL_SNAPSHOTS
2187 ? bpos_cmp(pos, SPOS_MAX)
2188 : bkey_cmp(pos, SPOS_MAX)) != 0;
2190 if (ret && !(iter->flags & BTREE_ITER_IS_EXTENTS))
2191 pos = bkey_successor(iter, pos);
2192 bch2_btree_iter_set_pos(iter, pos);
2195 if (!btree_path_node(iter->path, iter->path->level))
2198 iter->advanced = true;
2203 inline bool bch2_btree_iter_rewind(struct btree_iter *iter)
2205 struct bpos pos = bkey_start_pos(&iter->k);
2206 bool ret = (iter->flags & BTREE_ITER_ALL_SNAPSHOTS
2207 ? bpos_cmp(pos, POS_MIN)
2208 : bkey_cmp(pos, POS_MIN)) != 0;
2210 if (ret && !(iter->flags & BTREE_ITER_IS_EXTENTS))
2211 pos = bkey_predecessor(iter, pos);
2212 bch2_btree_iter_set_pos(iter, pos);
2217 struct bkey_i *__bch2_btree_trans_peek_updates(struct btree_iter *iter)
2219 struct btree_insert_entry *i;
2220 struct bkey_i *ret = NULL;
2222 trans_for_each_update(iter->trans, i) {
2223 if (i->btree_id < iter->btree_id)
2225 if (i->btree_id > iter->btree_id)
2227 if (bpos_cmp(i->k->k.p, iter->path->pos) < 0)
2229 if (i->key_cache_already_flushed)
2231 if (!ret || bpos_cmp(i->k->k.p, ret->k.p) < 0)
2238 static inline struct bkey_i *btree_trans_peek_updates(struct btree_iter *iter)
2240 return iter->flags & BTREE_ITER_WITH_UPDATES
2241 ? __bch2_btree_trans_peek_updates(iter)
2246 struct bkey_s_c btree_trans_peek_slot_journal(struct btree_trans *trans,
2247 struct btree_iter *iter)
2249 struct bkey_i *k = bch2_journal_keys_peek_slot(trans->c, iter->btree_id,
2255 return bkey_i_to_s_c(k);
2257 return bkey_s_c_null;
2262 struct bkey_s_c btree_trans_peek_journal(struct btree_trans *trans,
2263 struct btree_iter *iter,
2266 struct bkey_i *next_journal =
2267 bch2_journal_keys_peek_upto(trans->c, iter->btree_id, 0,
2269 k.k ? k.k->p : iter->path->l[0].b->key.k.p);
2272 iter->k = next_journal->k;
2273 k = bkey_i_to_s_c(next_journal);
2280 * Checks btree key cache for key at iter->pos and returns it if present, or
2284 struct bkey_s_c btree_trans_peek_key_cache(struct btree_iter *iter, struct bpos pos)
2286 struct btree_trans *trans = iter->trans;
2287 struct bch_fs *c = trans->c;
2291 if (!bch2_btree_key_cache_find(c, iter->btree_id, pos))
2292 return bkey_s_c_null;
2294 if (!iter->key_cache_path)
2295 iter->key_cache_path = bch2_path_get(trans, iter->btree_id, pos,
2296 iter->flags & BTREE_ITER_INTENT, 0,
2297 iter->flags|BTREE_ITER_CACHED);
2299 iter->key_cache_path = bch2_btree_path_set_pos(trans, iter->key_cache_path, pos,
2300 iter->flags & BTREE_ITER_INTENT);
2302 ret = bch2_btree_path_traverse(trans, iter->key_cache_path, iter->flags|BTREE_ITER_CACHED);
2304 return bkey_s_c_err(ret);
2306 iter->key_cache_path->should_be_locked = true;
2308 return bch2_btree_path_peek_slot(iter->key_cache_path, &u);
2311 static struct bkey_s_c __bch2_btree_iter_peek(struct btree_iter *iter, struct bpos search_key)
2313 struct btree_trans *trans = iter->trans;
2314 struct bkey_i *next_update;
2315 struct bkey_s_c k, k2;
2318 EBUG_ON(iter->path->cached || iter->path->level);
2319 bch2_btree_iter_verify(iter);
2322 iter->path = bch2_btree_path_set_pos(trans, iter->path, search_key,
2323 iter->flags & BTREE_ITER_INTENT);
2325 ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
2326 if (unlikely(ret)) {
2327 /* ensure that iter->k is consistent with iter->pos: */
2328 bch2_btree_iter_set_pos(iter, iter->pos);
2329 k = bkey_s_c_err(ret);
2333 iter->path->should_be_locked = true;
2335 k = btree_path_level_peek_all(trans->c, &iter->path->l[0], &iter->k);
2337 if (unlikely(iter->flags & BTREE_ITER_WITH_KEY_CACHE) &&
2339 (k2 = btree_trans_peek_key_cache(iter, k.k->p)).k) {
2343 bch2_btree_iter_set_pos(iter, iter->pos);
2351 if (unlikely(iter->flags & BTREE_ITER_WITH_JOURNAL))
2352 k = btree_trans_peek_journal(trans, iter, k);
2354 next_update = btree_trans_peek_updates(iter);
2357 bpos_cmp(next_update->k.p,
2358 k.k ? k.k->p : iter->path->l[0].b->key.k.p) <= 0) {
2359 iter->k = next_update->k;
2360 k = bkey_i_to_s_c(next_update);
2363 if (k.k && bkey_deleted(k.k)) {
2365 * If we've got a whiteout, and it's after the search
2366 * key, advance the search key to the whiteout instead
2367 * of just after the whiteout - it might be a btree
2368 * whiteout, with a real key at the same position, since
2369 * in the btree deleted keys sort before non deleted.
2371 search_key = bpos_cmp(search_key, k.k->p)
2373 : bpos_successor(k.k->p);
2379 } else if (likely(bpos_cmp(iter->path->l[0].b->key.k.p, SPOS_MAX))) {
2380 /* Advance to next leaf node: */
2381 search_key = bpos_successor(iter->path->l[0].b->key.k.p);
2384 bch2_btree_iter_set_pos(iter, SPOS_MAX);
2390 bch2_btree_iter_verify(iter);
2396 * bch2_btree_iter_peek: returns first key greater than or equal to iterator's
2399 struct bkey_s_c bch2_btree_iter_peek_upto(struct btree_iter *iter, struct bpos end)
2401 struct btree_trans *trans = iter->trans;
2402 struct bpos search_key = btree_iter_search_key(iter);
2404 struct bpos iter_pos;
2407 EBUG_ON(iter->flags & BTREE_ITER_ALL_LEVELS);
2409 if (iter->update_path) {
2410 bch2_path_put(trans, iter->update_path,
2411 iter->flags & BTREE_ITER_INTENT);
2412 iter->update_path = NULL;
2415 bch2_btree_iter_verify_entry_exit(iter);
2418 k = __bch2_btree_iter_peek(iter, search_key);
2419 if (!k.k || bkey_err(k))
2423 * iter->pos should be mononotically increasing, and always be
2424 * equal to the key we just returned - except extents can
2425 * straddle iter->pos:
2427 if (!(iter->flags & BTREE_ITER_IS_EXTENTS))
2429 else if (bkey_cmp(bkey_start_pos(k.k), iter->pos) > 0)
2430 iter_pos = bkey_start_pos(k.k);
2432 iter_pos = iter->pos;
2434 if (bkey_cmp(iter_pos, end) > 0) {
2435 bch2_btree_iter_set_pos(iter, end);
2440 if (iter->update_path &&
2441 bkey_cmp(iter->update_path->pos, k.k->p)) {
2442 bch2_path_put(trans, iter->update_path,
2443 iter->flags & BTREE_ITER_INTENT);
2444 iter->update_path = NULL;
2447 if ((iter->flags & BTREE_ITER_FILTER_SNAPSHOTS) &&
2448 (iter->flags & BTREE_ITER_INTENT) &&
2449 !(iter->flags & BTREE_ITER_IS_EXTENTS) &&
2450 !iter->update_path) {
2451 struct bpos pos = k.k->p;
2453 if (pos.snapshot < iter->snapshot) {
2454 search_key = bpos_successor(k.k->p);
2458 pos.snapshot = iter->snapshot;
2461 * advance, same as on exit for iter->path, but only up
2464 __btree_path_get(iter->path, iter->flags & BTREE_ITER_INTENT);
2465 iter->update_path = iter->path;
2467 iter->update_path = bch2_btree_path_set_pos(trans,
2468 iter->update_path, pos,
2469 iter->flags & BTREE_ITER_INTENT);
2473 * We can never have a key in a leaf node at POS_MAX, so
2474 * we don't have to check these successor() calls:
2476 if ((iter->flags & BTREE_ITER_FILTER_SNAPSHOTS) &&
2477 !bch2_snapshot_is_ancestor(trans->c,
2480 search_key = bpos_successor(k.k->p);
2484 if (bkey_whiteout(k.k) &&
2485 !(iter->flags & BTREE_ITER_ALL_SNAPSHOTS)) {
2486 search_key = bkey_successor(iter, k.k->p);
2493 iter->pos = iter_pos;
2495 iter->path = bch2_btree_path_set_pos(trans, iter->path, k.k->p,
2496 iter->flags & BTREE_ITER_INTENT);
2497 BUG_ON(!iter->path->nodes_locked);
2499 if (iter->update_path) {
2500 if (unlikely(!bch2_btree_path_relock(trans, iter->update_path, _THIS_IP_))) {
2501 k = bkey_s_c_err(-EINTR);
2503 BUG_ON(!(iter->update_path->nodes_locked & 1));
2504 iter->update_path->should_be_locked = true;
2507 iter->path->should_be_locked = true;
2509 if (!(iter->flags & BTREE_ITER_ALL_SNAPSHOTS))
2510 iter->pos.snapshot = iter->snapshot;
2512 ret = bch2_btree_iter_verify_ret(iter, k);
2513 if (unlikely(ret)) {
2514 bch2_btree_iter_set_pos(iter, iter->pos);
2515 k = bkey_s_c_err(ret);
2518 bch2_btree_iter_verify_entry_exit(iter);
2524 * bch2_btree_iter_peek_all_levels: returns the first key greater than or equal
2525 * to iterator's current position, returning keys from every level of the btree.
2526 * For keys at different levels of the btree that compare equal, the key from
2527 * the lower level (leaf) is returned first.
2529 struct bkey_s_c bch2_btree_iter_peek_all_levels(struct btree_iter *iter)
2531 struct btree_trans *trans = iter->trans;
2535 EBUG_ON(iter->path->cached);
2536 bch2_btree_iter_verify(iter);
2537 BUG_ON(iter->path->level < iter->min_depth);
2538 BUG_ON(!(iter->flags & BTREE_ITER_ALL_SNAPSHOTS));
2539 EBUG_ON(!(iter->flags & BTREE_ITER_ALL_LEVELS));
2542 iter->path = bch2_btree_path_set_pos(trans, iter->path, iter->pos,
2543 iter->flags & BTREE_ITER_INTENT);
2545 ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
2546 if (unlikely(ret)) {
2547 /* ensure that iter->k is consistent with iter->pos: */
2548 bch2_btree_iter_set_pos(iter, iter->pos);
2549 k = bkey_s_c_err(ret);
2553 /* Already at end? */
2554 if (!btree_path_node(iter->path, iter->path->level)) {
2559 k = btree_path_level_peek_all(trans->c,
2560 &iter->path->l[iter->path->level], &iter->k);
2562 /* Check if we should go up to the parent node: */
2565 !bpos_cmp(path_l(iter->path)->b->key.k.p, iter->pos))) {
2566 iter->pos = path_l(iter->path)->b->key.k.p;
2567 btree_path_set_level_up(iter->path);
2568 iter->advanced = false;
2573 * Check if we should go back down to a leaf:
2574 * If we're not in a leaf node, we only return the current key
2575 * if it exactly matches iter->pos - otherwise we first have to
2576 * go back to the leaf:
2578 if (iter->path->level != iter->min_depth &&
2581 bpos_cmp(iter->pos, k.k->p))) {
2582 btree_path_set_level_down(trans, iter->path, iter->min_depth);
2583 iter->pos = bpos_successor(iter->pos);
2584 iter->advanced = false;
2588 /* Check if we should go to the next key: */
2589 if (iter->path->level == iter->min_depth &&
2592 !bpos_cmp(iter->pos, k.k->p)) {
2593 iter->pos = bpos_successor(iter->pos);
2594 iter->advanced = false;
2598 if (iter->advanced &&
2599 iter->path->level == iter->min_depth &&
2600 bpos_cmp(k.k->p, iter->pos))
2601 iter->advanced = false;
2603 BUG_ON(iter->advanced);
2610 iter->path->should_be_locked = true;
2611 bch2_btree_iter_verify(iter);
2617 * bch2_btree_iter_next: returns first key greater than iterator's current
2620 struct bkey_s_c bch2_btree_iter_next(struct btree_iter *iter)
2622 if (!bch2_btree_iter_advance(iter))
2623 return bkey_s_c_null;
2625 return bch2_btree_iter_peek(iter);
2629 * bch2_btree_iter_peek_prev: returns first key less than or equal to
2630 * iterator's current position
2632 struct bkey_s_c bch2_btree_iter_peek_prev(struct btree_iter *iter)
2634 struct btree_trans *trans = iter->trans;
2635 struct bpos search_key = iter->pos;
2636 struct btree_path *saved_path = NULL;
2638 struct bkey saved_k;
2639 const struct bch_val *saved_v;
2642 EBUG_ON(iter->path->cached || iter->path->level);
2643 EBUG_ON(iter->flags & BTREE_ITER_WITH_UPDATES);
2645 if (iter->flags & BTREE_ITER_WITH_JOURNAL)
2646 return bkey_s_c_err(-EIO);
2648 bch2_btree_iter_verify(iter);
2649 bch2_btree_iter_verify_entry_exit(iter);
2651 if (iter->flags & BTREE_ITER_FILTER_SNAPSHOTS)
2652 search_key.snapshot = U32_MAX;
2655 iter->path = bch2_btree_path_set_pos(trans, iter->path, search_key,
2656 iter->flags & BTREE_ITER_INTENT);
2658 ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
2659 if (unlikely(ret)) {
2660 /* ensure that iter->k is consistent with iter->pos: */
2661 bch2_btree_iter_set_pos(iter, iter->pos);
2662 k = bkey_s_c_err(ret);
2666 k = btree_path_level_peek(trans, iter->path,
2667 &iter->path->l[0], &iter->k);
2669 ((iter->flags & BTREE_ITER_IS_EXTENTS)
2670 ? bpos_cmp(bkey_start_pos(k.k), search_key) >= 0
2671 : bpos_cmp(k.k->p, search_key) > 0))
2672 k = btree_path_level_prev(trans, iter->path,
2673 &iter->path->l[0], &iter->k);
2676 if (iter->flags & BTREE_ITER_FILTER_SNAPSHOTS) {
2677 if (k.k->p.snapshot == iter->snapshot)
2681 * If we have a saved candidate, and we're no
2682 * longer at the same _key_ (not pos), return
2685 if (saved_path && bkey_cmp(k.k->p, saved_k.p)) {
2686 bch2_path_put(trans, iter->path,
2687 iter->flags & BTREE_ITER_INTENT);
2688 iter->path = saved_path;
2695 if (bch2_snapshot_is_ancestor(iter->trans->c,
2699 bch2_path_put(trans, saved_path,
2700 iter->flags & BTREE_ITER_INTENT);
2701 saved_path = btree_path_clone(trans, iter->path,
2702 iter->flags & BTREE_ITER_INTENT);
2707 search_key = bpos_predecessor(k.k->p);
2711 if (bkey_whiteout(k.k) &&
2712 !(iter->flags & BTREE_ITER_ALL_SNAPSHOTS)) {
2713 search_key = bkey_predecessor(iter, k.k->p);
2714 if (iter->flags & BTREE_ITER_FILTER_SNAPSHOTS)
2715 search_key.snapshot = U32_MAX;
2720 } else if (likely(bpos_cmp(iter->path->l[0].b->data->min_key, POS_MIN))) {
2721 /* Advance to previous leaf node: */
2722 search_key = bpos_predecessor(iter->path->l[0].b->data->min_key);
2724 /* Start of btree: */
2725 bch2_btree_iter_set_pos(iter, POS_MIN);
2731 EBUG_ON(bkey_cmp(bkey_start_pos(k.k), iter->pos) > 0);
2733 /* Extents can straddle iter->pos: */
2734 if (bkey_cmp(k.k->p, iter->pos) < 0)
2737 if (iter->flags & BTREE_ITER_FILTER_SNAPSHOTS)
2738 iter->pos.snapshot = iter->snapshot;
2741 bch2_path_put(trans, saved_path, iter->flags & BTREE_ITER_INTENT);
2742 iter->path->should_be_locked = true;
2744 bch2_btree_iter_verify_entry_exit(iter);
2745 bch2_btree_iter_verify(iter);
2751 * bch2_btree_iter_prev: returns first key less than iterator's current
2754 struct bkey_s_c bch2_btree_iter_prev(struct btree_iter *iter)
2756 if (!bch2_btree_iter_rewind(iter))
2757 return bkey_s_c_null;
2759 return bch2_btree_iter_peek_prev(iter);
2762 struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_iter *iter)
2764 struct btree_trans *trans = iter->trans;
2765 struct bpos search_key;
2769 bch2_btree_iter_verify(iter);
2770 bch2_btree_iter_verify_entry_exit(iter);
2771 EBUG_ON(iter->flags & BTREE_ITER_ALL_LEVELS);
2772 EBUG_ON(iter->path->level && (iter->flags & BTREE_ITER_WITH_KEY_CACHE));
2774 /* extents can't span inode numbers: */
2775 if ((iter->flags & BTREE_ITER_IS_EXTENTS) &&
2776 unlikely(iter->pos.offset == KEY_OFFSET_MAX)) {
2777 if (iter->pos.inode == KEY_INODE_MAX)
2778 return bkey_s_c_null;
2780 bch2_btree_iter_set_pos(iter, bpos_nosnap_successor(iter->pos));
2783 search_key = btree_iter_search_key(iter);
2784 iter->path = bch2_btree_path_set_pos(trans, iter->path, search_key,
2785 iter->flags & BTREE_ITER_INTENT);
2787 ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
2789 return bkey_s_c_err(ret);
2791 if ((iter->flags & BTREE_ITER_CACHED) ||
2792 !(iter->flags & (BTREE_ITER_IS_EXTENTS|BTREE_ITER_FILTER_SNAPSHOTS))) {
2793 struct bkey_i *next_update;
2795 if ((next_update = btree_trans_peek_updates(iter)) &&
2796 !bpos_cmp(next_update->k.p, iter->pos)) {
2797 iter->k = next_update->k;
2798 k = bkey_i_to_s_c(next_update);
2802 if (unlikely(iter->flags & BTREE_ITER_WITH_JOURNAL) &&
2803 (k = btree_trans_peek_slot_journal(trans, iter)).k)
2806 if (unlikely(iter->flags & BTREE_ITER_WITH_KEY_CACHE) &&
2807 (k = btree_trans_peek_key_cache(iter, iter->pos)).k) {
2813 k = bch2_btree_path_peek_slot(iter->path, &iter->k);
2817 EBUG_ON(iter->path->level);
2819 if (iter->flags & BTREE_ITER_INTENT) {
2820 struct btree_iter iter2;
2821 struct bpos end = iter->pos;
2823 if (iter->flags & BTREE_ITER_IS_EXTENTS)
2824 end.offset = U64_MAX;
2826 bch2_trans_copy_iter(&iter2, iter);
2827 k = bch2_btree_iter_peek_upto(&iter2, end);
2829 if (k.k && !bkey_err(k)) {
2833 bch2_trans_iter_exit(trans, &iter2);
2835 struct bpos pos = iter->pos;
2837 k = bch2_btree_iter_peek(iter);
2841 if (unlikely(bkey_err(k)))
2844 next = k.k ? bkey_start_pos(k.k) : POS_MAX;
2846 if (bkey_cmp(iter->pos, next) < 0) {
2847 bkey_init(&iter->k);
2848 iter->k.p = iter->pos;
2850 if (iter->flags & BTREE_ITER_IS_EXTENTS) {
2851 bch2_key_resize(&iter->k,
2852 min_t(u64, KEY_SIZE_MAX,
2853 (next.inode == iter->pos.inode
2857 EBUG_ON(!iter->k.size);
2860 k = (struct bkey_s_c) { &iter->k, NULL };
2864 iter->path->should_be_locked = true;
2866 bch2_btree_iter_verify_entry_exit(iter);
2867 bch2_btree_iter_verify(iter);
2868 ret = bch2_btree_iter_verify_ret(iter, k);
2870 return bkey_s_c_err(ret);
2875 struct bkey_s_c bch2_btree_iter_next_slot(struct btree_iter *iter)
2877 if (!bch2_btree_iter_advance(iter))
2878 return bkey_s_c_null;
2880 return bch2_btree_iter_peek_slot(iter);
2883 struct bkey_s_c bch2_btree_iter_prev_slot(struct btree_iter *iter)
2885 if (!bch2_btree_iter_rewind(iter))
2886 return bkey_s_c_null;
2888 return bch2_btree_iter_peek_slot(iter);
2891 /* new transactional stuff: */
2893 #ifdef CONFIG_BCACHEFS_DEBUG
2894 static void btree_trans_verify_sorted_refs(struct btree_trans *trans)
2896 struct btree_path *path;
2899 BUG_ON(trans->nr_sorted != hweight64(trans->paths_allocated));
2901 trans_for_each_path(trans, path) {
2902 BUG_ON(path->sorted_idx >= trans->nr_sorted);
2903 BUG_ON(trans->sorted[path->sorted_idx] != path->idx);
2906 for (i = 0; i < trans->nr_sorted; i++) {
2907 unsigned idx = trans->sorted[i];
2909 EBUG_ON(!(trans->paths_allocated & (1ULL << idx)));
2910 BUG_ON(trans->paths[idx].sorted_idx != i);
2914 static void btree_trans_verify_sorted(struct btree_trans *trans)
2916 struct btree_path *path, *prev = NULL;
2919 trans_for_each_path_inorder(trans, path, i) {
2920 if (prev && btree_path_cmp(prev, path) > 0) {
2921 bch2_dump_trans_paths_updates(trans);
2922 panic("trans paths out of order!\n");
2928 static inline void btree_trans_verify_sorted_refs(struct btree_trans *trans) {}
2929 static inline void btree_trans_verify_sorted(struct btree_trans *trans) {}
2932 void __bch2_btree_trans_sort_paths(struct btree_trans *trans)
2934 int i, l = 0, r = trans->nr_sorted, inc = 1;
2937 btree_trans_verify_sorted_refs(trans);
2939 if (trans->paths_sorted)
2943 * Cocktail shaker sort: this is efficient because iterators will be
2949 for (i = inc > 0 ? l : r - 2;
2950 i + 1 < r && i >= l;
2952 if (btree_path_cmp(trans->paths + trans->sorted[i],
2953 trans->paths + trans->sorted[i + 1]) > 0) {
2954 swap(trans->sorted[i], trans->sorted[i + 1]);
2955 trans->paths[trans->sorted[i]].sorted_idx = i;
2956 trans->paths[trans->sorted[i + 1]].sorted_idx = i + 1;
2968 trans->paths_sorted = true;
2970 btree_trans_verify_sorted(trans);
2973 static inline void btree_path_list_remove(struct btree_trans *trans,
2974 struct btree_path *path)
2978 EBUG_ON(path->sorted_idx >= trans->nr_sorted);
2979 #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2981 memmove_u64s_down_small(trans->sorted + path->sorted_idx,
2982 trans->sorted + path->sorted_idx + 1,
2983 DIV_ROUND_UP(trans->nr_sorted - path->sorted_idx, 8));
2985 array_remove_item(trans->sorted, trans->nr_sorted, path->sorted_idx);
2987 for (i = path->sorted_idx; i < trans->nr_sorted; i++)
2988 trans->paths[trans->sorted[i]].sorted_idx = i;
2990 path->sorted_idx = U8_MAX;
2993 static inline void btree_path_list_add(struct btree_trans *trans,
2994 struct btree_path *pos,
2995 struct btree_path *path)
2999 path->sorted_idx = pos ? pos->sorted_idx + 1 : trans->nr_sorted;
3001 #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
3002 memmove_u64s_up_small(trans->sorted + path->sorted_idx + 1,
3003 trans->sorted + path->sorted_idx,
3004 DIV_ROUND_UP(trans->nr_sorted - path->sorted_idx, 8));
3006 trans->sorted[path->sorted_idx] = path->idx;
3008 array_insert_item(trans->sorted, trans->nr_sorted, path->sorted_idx, path->idx);
3011 for (i = path->sorted_idx; i < trans->nr_sorted; i++)
3012 trans->paths[trans->sorted[i]].sorted_idx = i;
3014 btree_trans_verify_sorted_refs(trans);
3017 void bch2_trans_iter_exit(struct btree_trans *trans, struct btree_iter *iter)
3020 bch2_path_put(trans, iter->path,
3021 iter->flags & BTREE_ITER_INTENT);
3022 if (iter->update_path)
3023 bch2_path_put(trans, iter->update_path,
3024 iter->flags & BTREE_ITER_INTENT);
3025 if (iter->key_cache_path)
3026 bch2_path_put(trans, iter->key_cache_path,
3027 iter->flags & BTREE_ITER_INTENT);
3029 iter->update_path = NULL;
3030 iter->key_cache_path = NULL;
3033 static void __bch2_trans_iter_init(struct btree_trans *trans,
3034 struct btree_iter *iter,
3035 enum btree_id btree_id, struct bpos pos,
3036 unsigned locks_want,
3040 EBUG_ON(trans->restarted);
3042 if (flags & BTREE_ITER_ALL_LEVELS)
3043 flags |= BTREE_ITER_ALL_SNAPSHOTS|__BTREE_ITER_ALL_SNAPSHOTS;
3045 if (!(flags & (BTREE_ITER_ALL_SNAPSHOTS|BTREE_ITER_NOT_EXTENTS)) &&
3046 btree_node_type_is_extents(btree_id))
3047 flags |= BTREE_ITER_IS_EXTENTS;
3049 if (!(flags & __BTREE_ITER_ALL_SNAPSHOTS) &&
3050 !btree_type_has_snapshots(btree_id))
3051 flags &= ~BTREE_ITER_ALL_SNAPSHOTS;
3053 if (!(flags & BTREE_ITER_ALL_SNAPSHOTS) &&
3054 btree_type_has_snapshots(btree_id))
3055 flags |= BTREE_ITER_FILTER_SNAPSHOTS;
3057 if (trans->journal_replay_not_finished)
3058 flags |= BTREE_ITER_WITH_JOURNAL;
3060 iter->trans = trans;
3062 iter->update_path = NULL;
3063 iter->key_cache_path = NULL;
3064 iter->btree_id = btree_id;
3065 iter->min_depth = depth;
3066 iter->flags = flags;
3067 iter->snapshot = pos.snapshot;
3069 iter->k.type = KEY_TYPE_deleted;
3073 iter->path = bch2_path_get(trans, btree_id, iter->pos,
3074 locks_want, depth, flags);
3077 void bch2_trans_iter_init(struct btree_trans *trans,
3078 struct btree_iter *iter,
3079 unsigned btree_id, struct bpos pos,
3082 if (!btree_id_cached(trans->c, btree_id)) {
3083 flags &= ~BTREE_ITER_CACHED;
3084 flags &= ~BTREE_ITER_WITH_KEY_CACHE;
3085 } else if (!(flags & BTREE_ITER_CACHED))
3086 flags |= BTREE_ITER_WITH_KEY_CACHE;
3088 __bch2_trans_iter_init(trans, iter, btree_id, pos,
3092 void bch2_trans_node_iter_init(struct btree_trans *trans,
3093 struct btree_iter *iter,
3094 enum btree_id btree_id,
3096 unsigned locks_want,
3100 __bch2_trans_iter_init(trans, iter, btree_id, pos, locks_want, depth,
3101 BTREE_ITER_NOT_EXTENTS|
3102 __BTREE_ITER_ALL_SNAPSHOTS|
3103 BTREE_ITER_ALL_SNAPSHOTS|
3105 BUG_ON(iter->path->locks_want < min(locks_want, BTREE_MAX_DEPTH));
3106 BUG_ON(iter->path->level != depth);
3107 BUG_ON(iter->min_depth != depth);
3110 void bch2_trans_copy_iter(struct btree_iter *dst, struct btree_iter *src)
3114 __btree_path_get(src->path, src->flags & BTREE_ITER_INTENT);
3115 if (src->update_path)
3116 __btree_path_get(src->update_path, src->flags & BTREE_ITER_INTENT);
3117 dst->key_cache_path = NULL;
3120 void *bch2_trans_kmalloc(struct btree_trans *trans, size_t size)
3122 size_t new_top = trans->mem_top + size;
3125 if (new_top > trans->mem_bytes) {
3126 size_t old_bytes = trans->mem_bytes;
3127 size_t new_bytes = roundup_pow_of_two(new_top);
3130 WARN_ON_ONCE(new_bytes > BTREE_TRANS_MEM_MAX);
3132 new_mem = krealloc(trans->mem, new_bytes, GFP_NOFS);
3133 if (!new_mem && new_bytes <= BTREE_TRANS_MEM_MAX) {
3134 new_mem = mempool_alloc(&trans->c->btree_trans_mem_pool, GFP_KERNEL);
3135 new_bytes = BTREE_TRANS_MEM_MAX;
3140 return ERR_PTR(-ENOMEM);
3142 trans->mem = new_mem;
3143 trans->mem_bytes = new_bytes;
3146 trace_trans_restart_mem_realloced(trans->fn, _RET_IP_, new_bytes);
3147 btree_trans_restart(trans);
3148 return ERR_PTR(-EINTR);
3152 p = trans->mem + trans->mem_top;
3153 trans->mem_top += size;
3159 * bch2_trans_begin() - reset a transaction after a interrupted attempt
3160 * @trans: transaction to reset
3162 * While iterating over nodes or updating nodes a attempt to lock a btree
3163 * node may return EINTR when the trylock fails. When this occurs
3164 * bch2_trans_begin() should be called and the transaction retried.
3166 void bch2_trans_begin(struct btree_trans *trans)
3168 struct btree_insert_entry *i;
3169 struct btree_path *path;
3171 trans_for_each_update(trans, i)
3172 __btree_path_put(i->path, true);
3174 memset(&trans->journal_res, 0, sizeof(trans->journal_res));
3175 trans->extra_journal_res = 0;
3176 trans->nr_updates = 0;
3179 trans->hooks = NULL;
3180 trans->extra_journal_entries.nr = 0;
3182 if (trans->fs_usage_deltas) {
3183 trans->fs_usage_deltas->used = 0;
3184 memset((void *) trans->fs_usage_deltas +
3185 offsetof(struct replicas_delta_list, memset_start), 0,
3186 (void *) &trans->fs_usage_deltas->memset_end -
3187 (void *) &trans->fs_usage_deltas->memset_start);
3190 trans_for_each_path(trans, path) {
3191 path->should_be_locked = false;
3194 * If the transaction wasn't restarted, we're presuming to be
3195 * doing something new: dont keep iterators excpt the ones that
3196 * are in use - except for the subvolumes btree:
3198 if (!trans->restarted && path->btree_id != BTREE_ID_subvolumes)
3199 path->preserve = false;
3202 * XXX: we probably shouldn't be doing this if the transaction
3203 * was restarted, but currently we still overflow transaction
3204 * iterators if we do that
3206 if (!path->ref && !path->preserve)
3207 __bch2_path_free(trans, path);
3209 path->preserve = false;
3212 bch2_trans_cond_resched(trans);
3214 if (trans->restarted)
3215 bch2_btree_path_traverse_all(trans);
3217 trans->restarted = false;
3220 static void bch2_trans_alloc_paths(struct btree_trans *trans, struct bch_fs *c)
3222 size_t paths_bytes = sizeof(struct btree_path) * BTREE_ITER_MAX;
3223 size_t updates_bytes = sizeof(struct btree_insert_entry) * BTREE_ITER_MAX;
3226 BUG_ON(trans->used_mempool);
3229 p = this_cpu_xchg(c->btree_paths_bufs->path , NULL);
3232 p = mempool_alloc(&trans->c->btree_paths_pool, GFP_NOFS);
3234 trans->paths = p; p += paths_bytes;
3235 trans->updates = p; p += updates_bytes;
3238 void __bch2_trans_init(struct btree_trans *trans, struct bch_fs *c,
3239 unsigned expected_nr_iters,
3240 size_t expected_mem_bytes,
3242 __acquires(&c->btree_trans_barrier)
3244 memset(trans, 0, sizeof(*trans));
3247 trans->journal_replay_not_finished =
3248 !test_bit(JOURNAL_REPLAY_DONE, &c->journal.flags);
3250 bch2_trans_alloc_paths(trans, c);
3252 if (expected_mem_bytes) {
3253 expected_mem_bytes = roundup_pow_of_two(expected_mem_bytes);
3254 trans->mem = kmalloc(expected_mem_bytes, GFP_KERNEL);
3256 if (!unlikely(trans->mem)) {
3257 trans->mem = mempool_alloc(&c->btree_trans_mem_pool, GFP_KERNEL);
3258 trans->mem_bytes = BTREE_TRANS_MEM_MAX;
3260 trans->mem_bytes = expected_mem_bytes;
3264 trans->srcu_idx = srcu_read_lock(&c->btree_trans_barrier);
3266 if (IS_ENABLED(CONFIG_BCACHEFS_DEBUG_TRANSACTIONS)) {
3267 trans->pid = current->pid;
3268 mutex_lock(&c->btree_trans_lock);
3269 list_add(&trans->list, &c->btree_trans_list);
3270 mutex_unlock(&c->btree_trans_lock);
3274 static void check_btree_paths_leaked(struct btree_trans *trans)
3276 #ifdef CONFIG_BCACHEFS_DEBUG
3277 struct bch_fs *c = trans->c;
3278 struct btree_path *path;
3280 trans_for_each_path(trans, path)
3285 bch_err(c, "btree paths leaked from %s!", trans->fn);
3286 trans_for_each_path(trans, path)
3288 printk(KERN_ERR " btree %s %pS\n",
3289 bch2_btree_ids[path->btree_id],
3290 (void *) path->ip_allocated);
3291 /* Be noisy about this: */
3292 bch2_fatal_error(c);
3296 void bch2_trans_exit(struct btree_trans *trans)
3297 __releases(&c->btree_trans_barrier)
3299 struct btree_insert_entry *i;
3300 struct bch_fs *c = trans->c;
3302 bch2_trans_unlock(trans);
3304 trans_for_each_update(trans, i)
3305 __btree_path_put(i->path, true);
3306 trans->nr_updates = 0;
3308 check_btree_paths_leaked(trans);
3310 if (IS_ENABLED(CONFIG_BCACHEFS_DEBUG_TRANSACTIONS)) {
3311 mutex_lock(&c->btree_trans_lock);
3312 list_del(&trans->list);
3313 mutex_unlock(&c->btree_trans_lock);
3316 srcu_read_unlock(&c->btree_trans_barrier, trans->srcu_idx);
3318 bch2_journal_preres_put(&c->journal, &trans->journal_preres);
3320 kfree(trans->extra_journal_entries.data);
3322 if (trans->fs_usage_deltas) {
3323 if (trans->fs_usage_deltas->size + sizeof(trans->fs_usage_deltas) ==
3324 REPLICAS_DELTA_LIST_MAX)
3325 mempool_free(trans->fs_usage_deltas,
3326 &c->replicas_delta_pool);
3328 kfree(trans->fs_usage_deltas);
3331 if (trans->mem_bytes == BTREE_TRANS_MEM_MAX)
3332 mempool_free(trans->mem, &c->btree_trans_mem_pool);
3338 * Userspace doesn't have a real percpu implementation:
3340 trans->paths = this_cpu_xchg(c->btree_paths_bufs->path, trans->paths);
3344 mempool_free(trans->paths, &c->btree_paths_pool);
3346 trans->mem = (void *) 0x1;
3347 trans->paths = (void *) 0x1;
3350 static void __maybe_unused
3351 bch2_btree_path_node_to_text(struct printbuf *out,
3352 struct btree_bkey_cached_common *_b,
3355 pr_buf(out, " l=%u %s:",
3356 _b->level, bch2_btree_ids[_b->btree_id]);
3357 bch2_bpos_to_text(out, btree_node_pos(_b, cached));
3360 #ifdef CONFIG_BCACHEFS_DEBUG_TRANSACTIONS
3361 static bool trans_has_locks(struct btree_trans *trans)
3363 struct btree_path *path;
3365 trans_for_each_path(trans, path)
3366 if (path->nodes_locked)
3372 void bch2_btree_trans_to_text(struct printbuf *out, struct bch_fs *c)
3374 #ifdef CONFIG_BCACHEFS_DEBUG_TRANSACTIONS
3375 struct btree_trans *trans;
3376 struct btree_path *path;
3378 static char lock_types[] = { 'r', 'i', 'w' };
3381 mutex_lock(&c->btree_trans_lock);
3382 list_for_each_entry(trans, &c->btree_trans_list, list) {
3383 if (!trans_has_locks(trans))
3386 pr_buf(out, "%i %s\n", trans->pid, trans->fn);
3388 trans_for_each_path(trans, path) {
3389 if (!path->nodes_locked)
3392 pr_buf(out, " path %u %c l=%u %s:",
3394 path->cached ? 'c' : 'b',
3396 bch2_btree_ids[path->btree_id]);
3397 bch2_bpos_to_text(out, path->pos);
3400 for (l = 0; l < BTREE_MAX_DEPTH; l++) {
3401 if (btree_node_locked(path, l)) {
3402 pr_buf(out, " %s l=%u ",
3403 btree_node_intent_locked(path, l) ? "i" : "r", l);
3404 bch2_btree_path_node_to_text(out,
3405 (void *) path->l[l].b,
3412 b = READ_ONCE(trans->locking);
3414 path = &trans->paths[trans->locking_path_idx];
3415 pr_buf(out, " locking path %u %c l=%u %c %s:",
3416 trans->locking_path_idx,
3417 path->cached ? 'c' : 'b',
3418 trans->locking_level,
3419 lock_types[trans->locking_lock_type],
3420 bch2_btree_ids[trans->locking_btree_id]);
3421 bch2_bpos_to_text(out, trans->locking_pos);
3423 pr_buf(out, " node ");
3424 bch2_btree_path_node_to_text(out,
3425 (void *) b, path->cached);
3429 mutex_unlock(&c->btree_trans_lock);
3433 void bch2_fs_btree_iter_exit(struct bch_fs *c)
3435 if (c->btree_trans_barrier_initialized)
3436 cleanup_srcu_struct(&c->btree_trans_barrier);
3437 mempool_exit(&c->btree_trans_mem_pool);
3438 mempool_exit(&c->btree_paths_pool);
3441 int bch2_fs_btree_iter_init(struct bch_fs *c)
3443 unsigned nr = BTREE_ITER_MAX;
3446 INIT_LIST_HEAD(&c->btree_trans_list);
3447 mutex_init(&c->btree_trans_lock);
3449 ret = mempool_init_kmalloc_pool(&c->btree_paths_pool, 1,
3450 sizeof(struct btree_path) * nr +
3451 sizeof(struct btree_insert_entry) * nr) ?:
3452 mempool_init_kmalloc_pool(&c->btree_trans_mem_pool, 1,
3453 BTREE_TRANS_MEM_MAX) ?:
3454 init_srcu_struct(&c->btree_trans_barrier);
3456 c->btree_trans_barrier_initialized = true;