1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _BCACHEFS_BTREE_ITER_H
3 #define _BCACHEFS_BTREE_ITER_H
6 #include "btree_types.h"
9 static inline int __bkey_err(const struct bkey *k)
11 return PTR_ERR_OR_ZERO(k);
14 #define bkey_err(_k) __bkey_err((_k).k)
16 static inline void __btree_path_get(struct btree_path *path, bool intent)
19 path->intent_ref += intent;
22 static inline bool __btree_path_put(struct btree_path *path, bool intent)
25 EBUG_ON(!path->intent_ref && intent);
26 path->intent_ref -= intent;
27 return --path->ref == 0;
30 static inline void btree_path_set_dirty(struct btree_path *path,
31 enum btree_path_uptodate u)
33 path->uptodate = max_t(unsigned, path->uptodate, u);
36 static inline struct btree *btree_path_node(struct btree_path *path,
39 return level < BTREE_MAX_DEPTH ? path->l[level].b : NULL;
42 static inline bool btree_node_lock_seq_matches(const struct btree_path *path,
43 const struct btree *b, unsigned level)
45 return path->l[level].lock_seq == six_lock_seq(&b->c.lock);
48 static inline struct btree *btree_node_parent(struct btree_path *path,
51 return btree_path_node(path, b->c.level + 1);
54 /* Iterate over paths within a transaction: */
56 void __bch2_btree_trans_sort_paths(struct btree_trans *);
58 static inline void btree_trans_sort_paths(struct btree_trans *trans)
60 if (!IS_ENABLED(CONFIG_BCACHEFS_DEBUG) &&
63 __bch2_btree_trans_sort_paths(trans);
66 static inline struct btree_path *
67 __trans_next_path(struct btree_trans *trans, unsigned idx)
69 idx = find_next_bit(trans->paths_allocated, BTREE_ITER_MAX, idx);
70 if (idx == BTREE_ITER_MAX)
72 EBUG_ON(idx > BTREE_ITER_MAX);
73 EBUG_ON(trans->paths[idx].idx != idx);
74 return &trans->paths[idx];
77 #define trans_for_each_path_from(_trans, _path, _start) \
78 for (_path = __trans_next_path((_trans), _start); \
80 _path = __trans_next_path((_trans), (_path)->idx + 1))
82 #define trans_for_each_path(_trans, _path) \
83 trans_for_each_path_from(_trans, _path, 0)
85 static inline struct btree_path *
86 __trans_next_path_safe(struct btree_trans *trans, unsigned *idx)
88 *idx = find_next_bit(trans->paths_allocated, BTREE_ITER_MAX, *idx);
89 if (*idx == BTREE_ITER_MAX)
92 EBUG_ON(*idx > BTREE_ITER_MAX);
93 return &trans->paths[*idx];
97 * This version is intended to be safe for use on a btree_trans that is owned by
98 * another thread, for bch2_btree_trans_to_text();
100 #define trans_for_each_path_safe_from(_trans, _path, _idx, _start) \
101 for (_idx = _start; \
102 (_path = __trans_next_path_safe((_trans), &_idx)); \
105 #define trans_for_each_path_safe(_trans, _path, _idx) \
106 trans_for_each_path_safe_from(_trans, _path, _idx, 0)
108 static inline struct btree_path *next_btree_path(struct btree_trans *trans, struct btree_path *path)
110 unsigned idx = path ? path->sorted_idx + 1 : 0;
112 EBUG_ON(idx > trans->nr_sorted);
114 return idx < trans->nr_sorted
115 ? trans->paths + trans->sorted[idx]
119 static inline struct btree_path *prev_btree_path(struct btree_trans *trans, struct btree_path *path)
121 unsigned idx = path ? path->sorted_idx : trans->nr_sorted;
124 ? trans->paths + trans->sorted[idx - 1]
128 #define trans_for_each_path_inorder(_trans, _path, _i) \
130 ((_path) = (_trans)->paths + trans->sorted[_i]), (_i) < (_trans)->nr_sorted;\
133 #define trans_for_each_path_inorder_reverse(_trans, _path, _i) \
134 for (_i = trans->nr_sorted - 1; \
135 ((_path) = (_trans)->paths + trans->sorted[_i]), (_i) >= 0;\
138 static inline bool __path_has_node(const struct btree_path *path,
139 const struct btree *b)
141 return path->l[b->c.level].b == b &&
142 btree_node_lock_seq_matches(path, b, b->c.level);
145 static inline struct btree_path *
146 __trans_next_path_with_node(struct btree_trans *trans, struct btree *b,
149 struct btree_path *path = __trans_next_path(trans, idx);
151 while (path && !__path_has_node(path, b))
152 path = __trans_next_path(trans, path->idx + 1);
157 #define trans_for_each_path_with_node(_trans, _b, _path) \
158 for (_path = __trans_next_path_with_node((_trans), (_b), 0); \
160 _path = __trans_next_path_with_node((_trans), (_b), \
163 struct btree_path *__bch2_btree_path_make_mut(struct btree_trans *, struct btree_path *,
164 bool, unsigned long);
166 static inline struct btree_path * __must_check
167 bch2_btree_path_make_mut(struct btree_trans *trans,
168 struct btree_path *path, bool intent,
171 if (path->ref > 1 || path->preserve)
172 path = __bch2_btree_path_make_mut(trans, path, intent, ip);
173 path->should_be_locked = false;
177 struct btree_path * __must_check
178 __bch2_btree_path_set_pos(struct btree_trans *, struct btree_path *,
179 struct bpos, bool, unsigned long, int);
181 static inline struct btree_path * __must_check
182 bch2_btree_path_set_pos(struct btree_trans *trans,
183 struct btree_path *path, struct bpos new_pos,
184 bool intent, unsigned long ip)
186 int cmp = bpos_cmp(new_pos, path->pos);
189 ? __bch2_btree_path_set_pos(trans, path, new_pos, intent, ip, cmp)
193 int __must_check bch2_btree_path_traverse_one(struct btree_trans *, struct btree_path *,
194 unsigned, unsigned long);
196 static inline int __must_check bch2_btree_path_traverse(struct btree_trans *trans,
197 struct btree_path *path, unsigned flags)
199 if (path->uptodate < BTREE_ITER_NEED_RELOCK)
202 return bch2_btree_path_traverse_one(trans, path, flags, _RET_IP_);
205 int __must_check bch2_btree_path_traverse(struct btree_trans *,
206 struct btree_path *, unsigned);
207 struct btree_path *bch2_path_get(struct btree_trans *, enum btree_id, struct bpos,
208 unsigned, unsigned, unsigned, unsigned long);
209 struct bkey_s_c bch2_btree_path_peek_slot(struct btree_path *, struct bkey *);
212 * bch2_btree_path_peek_slot() for a cached iterator might return a key in a
213 * different snapshot:
215 static inline struct bkey_s_c bch2_btree_path_peek_slot_exact(struct btree_path *path, struct bkey *u)
217 struct bkey_s_c k = bch2_btree_path_peek_slot(path, u);
219 if (k.k && bpos_eq(path->pos, k.k->p))
224 return (struct bkey_s_c) { u, NULL };
227 struct bkey_i *bch2_btree_journal_peek_slot(struct btree_trans *,
228 struct btree_iter *, struct bpos);
230 void bch2_btree_path_level_init(struct btree_trans *, struct btree_path *, struct btree *);
232 int __bch2_trans_mutex_lock(struct btree_trans *, struct mutex *);
234 static inline int bch2_trans_mutex_lock(struct btree_trans *trans, struct mutex *lock)
236 return mutex_trylock(lock)
238 : __bch2_trans_mutex_lock(trans, lock);
241 #ifdef CONFIG_BCACHEFS_DEBUG
242 void bch2_trans_verify_paths(struct btree_trans *);
243 void bch2_assert_pos_locked(struct btree_trans *, enum btree_id,
246 static inline void bch2_trans_verify_paths(struct btree_trans *trans) {}
247 static inline void bch2_assert_pos_locked(struct btree_trans *trans, enum btree_id id,
248 struct bpos pos, bool key_cache) {}
251 void bch2_btree_path_fix_key_modified(struct btree_trans *trans,
252 struct btree *, struct bkey_packed *);
253 void bch2_btree_node_iter_fix(struct btree_trans *trans, struct btree_path *,
254 struct btree *, struct btree_node_iter *,
255 struct bkey_packed *, unsigned, unsigned);
257 int bch2_btree_path_relock_intent(struct btree_trans *, struct btree_path *);
259 void bch2_path_put(struct btree_trans *, struct btree_path *, bool);
261 int bch2_trans_relock(struct btree_trans *);
262 int bch2_trans_relock_notrace(struct btree_trans *);
263 void bch2_trans_unlock(struct btree_trans *);
264 void bch2_trans_unlock_long(struct btree_trans *);
265 bool bch2_trans_locked(struct btree_trans *);
267 static inline int trans_was_restarted(struct btree_trans *trans, u32 restart_count)
269 return restart_count != trans->restart_count
270 ? -BCH_ERR_transaction_restart_nested
274 void __noreturn bch2_trans_restart_error(struct btree_trans *, u32);
276 static inline void bch2_trans_verify_not_restarted(struct btree_trans *trans,
279 if (trans_was_restarted(trans, restart_count))
280 bch2_trans_restart_error(trans, restart_count);
283 void __noreturn bch2_trans_in_restart_error(struct btree_trans *);
285 static inline void bch2_trans_verify_not_in_restart(struct btree_trans *trans)
287 if (trans->restarted)
288 bch2_trans_in_restart_error(trans);
292 static int btree_trans_restart_nounlock(struct btree_trans *trans, int err)
295 BUG_ON(!bch2_err_matches(-err, BCH_ERR_transaction_restart));
297 trans->restarted = err;
298 trans->last_restarted_ip = _THIS_IP_;
303 static int btree_trans_restart(struct btree_trans *trans, int err)
305 btree_trans_restart_nounlock(trans, err);
309 bool bch2_btree_node_upgrade(struct btree_trans *,
310 struct btree_path *, unsigned);
312 void __bch2_btree_path_downgrade(struct btree_trans *, struct btree_path *, unsigned);
314 static inline void bch2_btree_path_downgrade(struct btree_trans *trans,
315 struct btree_path *path)
317 unsigned new_locks_want = path->level + !!path->intent_ref;
319 if (path->locks_want > new_locks_want)
320 __bch2_btree_path_downgrade(trans, path, new_locks_want);
323 void bch2_trans_downgrade(struct btree_trans *);
325 void bch2_trans_node_add(struct btree_trans *trans, struct btree *);
326 void bch2_trans_node_reinit_iter(struct btree_trans *, struct btree *);
328 int __must_check __bch2_btree_iter_traverse(struct btree_iter *iter);
329 int __must_check bch2_btree_iter_traverse(struct btree_iter *);
331 struct btree *bch2_btree_iter_peek_node(struct btree_iter *);
332 struct btree *bch2_btree_iter_peek_node_and_restart(struct btree_iter *);
333 struct btree *bch2_btree_iter_next_node(struct btree_iter *);
335 struct bkey_s_c bch2_btree_iter_peek_upto(struct btree_iter *, struct bpos);
336 struct bkey_s_c bch2_btree_iter_next(struct btree_iter *);
338 static inline struct bkey_s_c bch2_btree_iter_peek(struct btree_iter *iter)
340 return bch2_btree_iter_peek_upto(iter, SPOS_MAX);
343 struct bkey_s_c bch2_btree_iter_peek_prev(struct btree_iter *);
344 struct bkey_s_c bch2_btree_iter_prev(struct btree_iter *);
346 struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_iter *);
347 struct bkey_s_c bch2_btree_iter_next_slot(struct btree_iter *);
348 struct bkey_s_c bch2_btree_iter_prev_slot(struct btree_iter *);
350 bool bch2_btree_iter_advance(struct btree_iter *);
351 bool bch2_btree_iter_rewind(struct btree_iter *);
353 static inline void __bch2_btree_iter_set_pos(struct btree_iter *iter, struct bpos new_pos)
355 iter->k.type = KEY_TYPE_deleted;
356 iter->k.p.inode = iter->pos.inode = new_pos.inode;
357 iter->k.p.offset = iter->pos.offset = new_pos.offset;
358 iter->k.p.snapshot = iter->pos.snapshot = new_pos.snapshot;
362 static inline void bch2_btree_iter_set_pos(struct btree_iter *iter, struct bpos new_pos)
364 if (unlikely(iter->update_path))
365 bch2_path_put(iter->trans, iter->update_path,
366 iter->flags & BTREE_ITER_INTENT);
367 iter->update_path = NULL;
369 if (!(iter->flags & BTREE_ITER_ALL_SNAPSHOTS))
370 new_pos.snapshot = iter->snapshot;
372 __bch2_btree_iter_set_pos(iter, new_pos);
375 static inline void bch2_btree_iter_set_pos_to_extent_start(struct btree_iter *iter)
377 BUG_ON(!(iter->flags & BTREE_ITER_IS_EXTENTS));
378 iter->pos = bkey_start_pos(&iter->k);
381 static inline void bch2_btree_iter_set_snapshot(struct btree_iter *iter, u32 snapshot)
383 struct bpos pos = iter->pos;
385 iter->snapshot = snapshot;
386 pos.snapshot = snapshot;
387 bch2_btree_iter_set_pos(iter, pos);
390 void bch2_trans_iter_exit(struct btree_trans *, struct btree_iter *);
392 static inline unsigned __bch2_btree_iter_flags(struct btree_trans *trans,
396 if (!(flags & (BTREE_ITER_ALL_SNAPSHOTS|BTREE_ITER_NOT_EXTENTS)) &&
397 btree_id_is_extents(btree_id))
398 flags |= BTREE_ITER_IS_EXTENTS;
400 if (!(flags & __BTREE_ITER_ALL_SNAPSHOTS) &&
401 !btree_type_has_snapshot_field(btree_id))
402 flags &= ~BTREE_ITER_ALL_SNAPSHOTS;
404 if (!(flags & BTREE_ITER_ALL_SNAPSHOTS) &&
405 btree_type_has_snapshots(btree_id))
406 flags |= BTREE_ITER_FILTER_SNAPSHOTS;
408 if (trans->journal_replay_not_finished)
409 flags |= BTREE_ITER_WITH_JOURNAL;
414 static inline unsigned bch2_btree_iter_flags(struct btree_trans *trans,
418 if (!btree_id_cached(trans->c, btree_id)) {
419 flags &= ~BTREE_ITER_CACHED;
420 flags &= ~BTREE_ITER_WITH_KEY_CACHE;
421 } else if (!(flags & BTREE_ITER_CACHED))
422 flags |= BTREE_ITER_WITH_KEY_CACHE;
424 return __bch2_btree_iter_flags(trans, btree_id, flags);
427 static inline void bch2_trans_iter_init_common(struct btree_trans *trans,
428 struct btree_iter *iter,
429 unsigned btree_id, struct bpos pos,
436 iter->update_path = NULL;
437 iter->key_cache_path = NULL;
438 iter->btree_id = btree_id;
441 iter->snapshot = pos.snapshot;
443 iter->k = POS_KEY(pos);
444 iter->journal_idx = 0;
445 #ifdef CONFIG_BCACHEFS_DEBUG
446 iter->ip_allocated = ip;
448 iter->path = bch2_path_get(trans, btree_id, iter->pos,
449 locks_want, depth, flags, ip);
452 void bch2_trans_iter_init_outlined(struct btree_trans *, struct btree_iter *,
453 enum btree_id, struct bpos, unsigned);
455 static inline void bch2_trans_iter_init(struct btree_trans *trans,
456 struct btree_iter *iter,
457 unsigned btree_id, struct bpos pos,
460 if (__builtin_constant_p(btree_id) &&
461 __builtin_constant_p(flags))
462 bch2_trans_iter_init_common(trans, iter, btree_id, pos, 0, 0,
463 bch2_btree_iter_flags(trans, btree_id, flags),
466 bch2_trans_iter_init_outlined(trans, iter, btree_id, pos, flags);
469 void bch2_trans_node_iter_init(struct btree_trans *, struct btree_iter *,
470 enum btree_id, struct bpos,
471 unsigned, unsigned, unsigned);
472 void bch2_trans_copy_iter(struct btree_iter *, struct btree_iter *);
474 static inline void set_btree_iter_dontneed(struct btree_iter *iter)
476 if (!iter->trans->restarted)
477 iter->path->preserve = false;
480 void *__bch2_trans_kmalloc(struct btree_trans *, size_t);
482 static inline void *bch2_trans_kmalloc(struct btree_trans *trans, size_t size)
484 size = roundup(size, 8);
486 if (likely(trans->mem_top + size <= trans->mem_bytes)) {
487 void *p = trans->mem + trans->mem_top;
489 trans->mem_top += size;
493 return __bch2_trans_kmalloc(trans, size);
497 static inline void *bch2_trans_kmalloc_nomemzero(struct btree_trans *trans, size_t size)
499 size = roundup(size, 8);
501 if (likely(trans->mem_top + size <= trans->mem_bytes)) {
502 void *p = trans->mem + trans->mem_top;
504 trans->mem_top += size;
507 return __bch2_trans_kmalloc(trans, size);
511 static inline struct bkey_s_c __bch2_bkey_get_iter(struct btree_trans *trans,
512 struct btree_iter *iter,
513 unsigned btree_id, struct bpos pos,
514 unsigned flags, unsigned type)
518 bch2_trans_iter_init(trans, iter, btree_id, pos, flags);
519 k = bch2_btree_iter_peek_slot(iter);
521 if (!bkey_err(k) && type && k.k->type != type)
522 k = bkey_s_c_err(-BCH_ERR_ENOENT_bkey_type_mismatch);
523 if (unlikely(bkey_err(k)))
524 bch2_trans_iter_exit(trans, iter);
528 static inline struct bkey_s_c bch2_bkey_get_iter(struct btree_trans *trans,
529 struct btree_iter *iter,
530 unsigned btree_id, struct bpos pos,
533 return __bch2_bkey_get_iter(trans, iter, btree_id, pos, flags, 0);
536 #define bch2_bkey_get_iter_typed(_trans, _iter, _btree_id, _pos, _flags, _type)\
537 bkey_s_c_to_##_type(__bch2_bkey_get_iter(_trans, _iter, \
538 _btree_id, _pos, _flags, KEY_TYPE_##_type))
540 static inline int __bch2_bkey_get_val_typed(struct btree_trans *trans,
541 unsigned btree_id, struct bpos pos,
542 unsigned flags, unsigned type,
543 unsigned val_size, void *val)
545 struct btree_iter iter;
549 k = __bch2_bkey_get_iter(trans, &iter, btree_id, pos, flags, type);
552 unsigned b = min_t(unsigned, bkey_val_bytes(k.k), val_size);
555 if (unlikely(b < sizeof(*val)))
556 memset((void *) val + b, 0, sizeof(*val) - b);
557 bch2_trans_iter_exit(trans, &iter);
563 #define bch2_bkey_get_val_typed(_trans, _btree_id, _pos, _flags, _type, _val)\
564 __bch2_bkey_get_val_typed(_trans, _btree_id, _pos, _flags, \
565 KEY_TYPE_##_type, sizeof(*_val), _val)
567 void bch2_trans_srcu_unlock(struct btree_trans *);
568 void bch2_trans_srcu_lock(struct btree_trans *);
570 u32 bch2_trans_begin(struct btree_trans *);
574 * this does not handle transaction restarts from bch2_btree_iter_next_node()
577 #define __for_each_btree_node(_trans, _iter, _btree_id, _start, \
578 _locks_want, _depth, _flags, _b, _ret) \
579 for (bch2_trans_node_iter_init((_trans), &(_iter), (_btree_id), \
580 _start, _locks_want, _depth, _flags); \
581 (_b) = bch2_btree_iter_peek_node_and_restart(&(_iter)), \
582 !((_ret) = PTR_ERR_OR_ZERO(_b)) && (_b); \
583 (_b) = bch2_btree_iter_next_node(&(_iter)))
585 #define for_each_btree_node(_trans, _iter, _btree_id, _start, \
587 __for_each_btree_node(_trans, _iter, _btree_id, _start, \
588 0, 0, _flags, _b, _ret)
590 static inline struct bkey_s_c bch2_btree_iter_peek_prev_type(struct btree_iter *iter,
593 return flags & BTREE_ITER_SLOTS ? bch2_btree_iter_peek_slot(iter) :
594 bch2_btree_iter_peek_prev(iter);
597 static inline struct bkey_s_c bch2_btree_iter_peek_type(struct btree_iter *iter,
600 return flags & BTREE_ITER_SLOTS ? bch2_btree_iter_peek_slot(iter) :
601 bch2_btree_iter_peek(iter);
604 static inline struct bkey_s_c bch2_btree_iter_peek_upto_type(struct btree_iter *iter,
608 if (!(flags & BTREE_ITER_SLOTS))
609 return bch2_btree_iter_peek_upto(iter, end);
611 if (bkey_gt(iter->pos, end))
612 return bkey_s_c_null;
614 return bch2_btree_iter_peek_slot(iter);
617 int __bch2_btree_trans_too_many_iters(struct btree_trans *);
619 static inline int btree_trans_too_many_iters(struct btree_trans *trans)
621 if (bitmap_weight(trans->paths_allocated, BTREE_ITER_MAX) > BTREE_ITER_MAX - 8)
622 return __bch2_btree_trans_too_many_iters(trans);
627 struct bkey_s_c bch2_btree_iter_peek_and_restart_outlined(struct btree_iter *);
629 static inline struct bkey_s_c
630 __bch2_btree_iter_peek_and_restart(struct btree_trans *trans,
631 struct btree_iter *iter, unsigned flags)
635 while (btree_trans_too_many_iters(trans) ||
636 (k = bch2_btree_iter_peek_type(iter, flags),
637 bch2_err_matches(bkey_err(k), BCH_ERR_transaction_restart)))
638 bch2_trans_begin(trans);
643 static inline struct bkey_s_c
644 __bch2_btree_iter_peek_upto_and_restart(struct btree_trans *trans,
645 struct btree_iter *iter,
651 while (btree_trans_too_many_iters(trans) ||
652 (k = bch2_btree_iter_peek_upto_type(iter, end, flags),
653 bch2_err_matches(bkey_err(k), BCH_ERR_transaction_restart)))
654 bch2_trans_begin(trans);
660 * goto instead of loop, so that when used inside for_each_btree_key2()
661 * break/continue work correctly
663 #define lockrestart_do(_trans, _do) \
665 __label__ transaction_restart; \
666 u32 _restart_count; \
668 transaction_restart: \
669 _restart_count = bch2_trans_begin(_trans); \
672 if (bch2_err_matches(_ret2, BCH_ERR_transaction_restart)) \
673 goto transaction_restart; \
676 bch2_trans_verify_not_restarted(_trans, _restart_count);\
681 * nested_lockrestart_do(), nested_commit_do():
683 * These are like lockrestart_do() and commit_do(), with two differences:
685 * - We don't call bch2_trans_begin() unless we had a transaction restart
686 * - We return -BCH_ERR_transaction_restart_nested if we succeeded after a
687 * transaction restart
689 #define nested_lockrestart_do(_trans, _do) \
691 u32 _restart_count, _orig_restart_count; \
694 _restart_count = _orig_restart_count = (_trans)->restart_count; \
696 while (bch2_err_matches(_ret2 = (_do), BCH_ERR_transaction_restart))\
697 _restart_count = bch2_trans_begin(_trans); \
700 bch2_trans_verify_not_restarted(_trans, _restart_count);\
702 _ret2 ?: trans_was_restarted(_trans, _restart_count); \
705 #define for_each_btree_key2_upto(_trans, _iter, _btree_id, \
706 _start, _end, _flags, _k, _do) \
710 bch2_trans_iter_init((_trans), &(_iter), (_btree_id), \
711 (_start), (_flags)); \
714 _ret3 = lockrestart_do(_trans, ({ \
715 (_k) = bch2_btree_iter_peek_upto_type(&(_iter), \
720 bkey_err(_k) ?: (_do); \
722 } while (!_ret3 && bch2_btree_iter_advance(&(_iter))); \
724 bch2_trans_iter_exit((_trans), &(_iter)); \
728 #define for_each_btree_key2(_trans, _iter, _btree_id, \
729 _start, _flags, _k, _do) \
730 for_each_btree_key2_upto(_trans, _iter, _btree_id, _start, \
731 SPOS_MAX, _flags, _k, _do)
733 #define for_each_btree_key_reverse(_trans, _iter, _btree_id, \
734 _start, _flags, _k, _do) \
738 bch2_trans_iter_init((_trans), &(_iter), (_btree_id), \
739 (_start), (_flags)); \
742 _ret3 = lockrestart_do(_trans, ({ \
743 (_k) = bch2_btree_iter_peek_prev_type(&(_iter), \
748 bkey_err(_k) ?: (_do); \
750 } while (!_ret3 && bch2_btree_iter_rewind(&(_iter))); \
752 bch2_trans_iter_exit((_trans), &(_iter)); \
756 #define for_each_btree_key_commit(_trans, _iter, _btree_id, \
757 _start, _iter_flags, _k, \
758 _disk_res, _journal_seq, _commit_flags,\
760 for_each_btree_key2(_trans, _iter, _btree_id, _start, _iter_flags, _k,\
761 (_do) ?: bch2_trans_commit(_trans, (_disk_res),\
762 (_journal_seq), (_commit_flags)))
764 #define for_each_btree_key_reverse_commit(_trans, _iter, _btree_id, \
765 _start, _iter_flags, _k, \
766 _disk_res, _journal_seq, _commit_flags,\
768 for_each_btree_key_reverse(_trans, _iter, _btree_id, _start, _iter_flags, _k,\
769 (_do) ?: bch2_trans_commit(_trans, (_disk_res),\
770 (_journal_seq), (_commit_flags)))
772 #define for_each_btree_key_upto_commit(_trans, _iter, _btree_id, \
773 _start, _end, _iter_flags, _k, \
774 _disk_res, _journal_seq, _commit_flags,\
776 for_each_btree_key2_upto(_trans, _iter, _btree_id, _start, _end, _iter_flags, _k,\
777 (_do) ?: bch2_trans_commit(_trans, (_disk_res),\
778 (_journal_seq), (_commit_flags)))
780 #define for_each_btree_key(_trans, _iter, _btree_id, \
781 _start, _flags, _k, _ret) \
782 for (bch2_trans_iter_init((_trans), &(_iter), (_btree_id), \
783 (_start), (_flags)); \
784 (_k) = __bch2_btree_iter_peek_and_restart((_trans), &(_iter), _flags),\
785 !((_ret) = bkey_err(_k)) && (_k).k; \
786 bch2_btree_iter_advance(&(_iter)))
788 #define for_each_btree_key_upto(_trans, _iter, _btree_id, \
789 _start, _end, _flags, _k, _ret) \
790 for (bch2_trans_iter_init((_trans), &(_iter), (_btree_id), \
791 (_start), (_flags)); \
792 (_k) = __bch2_btree_iter_peek_upto_and_restart((_trans), \
793 &(_iter), _end, _flags),\
794 !((_ret) = bkey_err(_k)) && (_k).k; \
795 bch2_btree_iter_advance(&(_iter)))
797 #define for_each_btree_key_norestart(_trans, _iter, _btree_id, \
798 _start, _flags, _k, _ret) \
799 for (bch2_trans_iter_init((_trans), &(_iter), (_btree_id), \
800 (_start), (_flags)); \
801 (_k) = bch2_btree_iter_peek_type(&(_iter), _flags), \
802 !((_ret) = bkey_err(_k)) && (_k).k; \
803 bch2_btree_iter_advance(&(_iter)))
805 #define for_each_btree_key_upto_norestart(_trans, _iter, _btree_id, \
806 _start, _end, _flags, _k, _ret) \
807 for (bch2_trans_iter_init((_trans), &(_iter), (_btree_id), \
808 (_start), (_flags)); \
809 (_k) = bch2_btree_iter_peek_upto_type(&(_iter), _end, _flags),\
810 !((_ret) = bkey_err(_k)) && (_k).k; \
811 bch2_btree_iter_advance(&(_iter)))
813 #define for_each_btree_key_continue(_trans, _iter, _flags, _k, _ret) \
815 (_k) = __bch2_btree_iter_peek_and_restart((_trans), &(_iter), _flags),\
816 !((_ret) = bkey_err(_k)) && (_k).k; \
817 bch2_btree_iter_advance(&(_iter)))
819 #define for_each_btree_key_continue_norestart(_iter, _flags, _k, _ret) \
821 (_k) = bch2_btree_iter_peek_type(&(_iter), _flags), \
822 !((_ret) = bkey_err(_k)) && (_k).k; \
823 bch2_btree_iter_advance(&(_iter)))
825 #define for_each_btree_key_upto_continue_norestart(_iter, _end, _flags, _k, _ret)\
827 (_k) = bch2_btree_iter_peek_upto_type(&(_iter), _end, _flags), \
828 !((_ret) = bkey_err(_k)) && (_k).k; \
829 bch2_btree_iter_advance(&(_iter)))
831 #define drop_locks_do(_trans, _do) \
833 bch2_trans_unlock(_trans); \
834 _do ?: bch2_trans_relock(_trans); \
837 #define allocate_dropping_locks_errcode(_trans, _do) \
839 gfp_t _gfp = GFP_NOWAIT|__GFP_NOWARN; \
842 if (bch2_err_matches(_ret, ENOMEM)) { \
844 _ret = drop_locks_do(trans, _do); \
849 #define allocate_dropping_locks(_trans, _ret, _do) \
851 gfp_t _gfp = GFP_NOWAIT|__GFP_NOWARN; \
852 typeof(_do) _p = _do; \
855 if (unlikely(!_p)) { \
857 _ret = drop_locks_do(trans, ((_p = _do), 0)); \
862 /* new multiple iterator interface: */
864 void bch2_trans_updates_to_text(struct printbuf *, struct btree_trans *);
865 void bch2_btree_path_to_text(struct printbuf *, struct btree_path *);
866 void bch2_trans_paths_to_text(struct printbuf *, struct btree_trans *);
867 void bch2_dump_trans_updates(struct btree_trans *);
868 void bch2_dump_trans_paths_updates(struct btree_trans *);
870 struct btree_trans *__bch2_trans_get(struct bch_fs *, unsigned);
871 void bch2_trans_put(struct btree_trans *);
873 extern const char *bch2_btree_transaction_fns[BCH_TRANSACTIONS_NR];
874 unsigned bch2_trans_get_fn_idx(const char *);
876 #define bch2_trans_get(_c) \
878 static unsigned trans_fn_idx; \
880 if (unlikely(!trans_fn_idx)) \
881 trans_fn_idx = bch2_trans_get_fn_idx(__func__); \
882 __bch2_trans_get(_c, trans_fn_idx); \
885 void bch2_btree_trans_to_text(struct printbuf *, struct btree_trans *);
887 void bch2_fs_btree_iter_exit(struct bch_fs *);
888 void bch2_fs_btree_iter_init_early(struct bch_fs *);
889 int bch2_fs_btree_iter_init(struct bch_fs *);
891 #endif /* _BCACHEFS_BTREE_ITER_H */