bcachefs: make struct btree_iter a bit smaller
[linux-block.git] / fs / bcachefs / btree_iter.h
CommitLineData
1c6fdbd8
KO
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _BCACHEFS_BTREE_ITER_H
3#define _BCACHEFS_BTREE_ITER_H
4
5#include "btree_types.h"
6
7static inline void btree_iter_set_dirty(struct btree_iter *iter,
8 enum btree_iter_uptodate u)
9{
10 iter->uptodate = max_t(unsigned, iter->uptodate, u);
11}
12
13static inline struct btree *btree_iter_node(struct btree_iter *iter,
14 unsigned level)
15{
16 return level < BTREE_MAX_DEPTH ? iter->l[level].b : NULL;
17}
18
19static inline struct btree *btree_node_parent(struct btree_iter *iter,
20 struct btree *b)
21{
22 return btree_iter_node(iter, b->level + 1);
23}
24
25static inline bool btree_iter_linked(const struct btree_iter *iter)
26{
27 return iter->next != iter;
28}
29
30static inline bool __iter_has_node(const struct btree_iter *iter,
31 const struct btree *b)
32{
33 /*
34 * We don't compare the low bits of the lock sequence numbers because
35 * @iter might have taken a write lock on @b, and we don't want to skip
36 * the linked iterator if the sequence numbers were equal before taking
37 * that write lock. The lock sequence number is incremented by taking
38 * and releasing write locks and is even when unlocked:
39 */
40
41 return iter->l[b->level].b == b &&
e4ccb251 42 iter->l[b->level].lock_seq >> 1 == b->lock.state.seq >> 1;
1c6fdbd8
KO
43}
44
45static inline struct btree_iter *
46__next_linked_iter(struct btree_iter *iter, struct btree_iter *linked)
47{
48 return linked->next != iter ? linked->next : NULL;
49}
50
51static inline struct btree_iter *
52__next_iter_with_node(struct btree_iter *iter, struct btree *b,
53 struct btree_iter *linked)
54{
55 while (linked && !__iter_has_node(linked, b))
56 linked = __next_linked_iter(iter, linked);
57
58 return linked;
59}
60
61/**
62 * for_each_btree_iter - iterate over all iterators linked with @_iter,
63 * including @_iter
64 */
65#define for_each_btree_iter(_iter, _linked) \
66 for ((_linked) = (_iter); (_linked); \
67 (_linked) = __next_linked_iter(_iter, _linked))
68
69/**
70 * for_each_btree_iter_with_node - iterate over all iterators linked with @_iter
71 * that also point to @_b
72 *
73 * @_b is assumed to be locked by @_iter
74 *
75 * Filters out iterators that don't have a valid btree_node iterator for @_b -
76 * i.e. iterators for which bch2_btree_node_relock() would not succeed.
77 */
78#define for_each_btree_iter_with_node(_iter, _b, _linked) \
79 for ((_linked) = (_iter); \
80 ((_linked) = __next_iter_with_node(_iter, _b, _linked)); \
81 (_linked) = __next_linked_iter(_iter, _linked))
82
83/**
84 * for_each_linked_btree_iter - iterate over all iterators linked with @_iter,
85 * _not_ including @_iter
86 */
87#define for_each_linked_btree_iter(_iter, _linked) \
88 for ((_linked) = (_iter)->next; \
89 (_linked) != (_iter); \
90 (_linked) = (_linked)->next)
91
92#ifdef CONFIG_BCACHEFS_DEBUG
93void bch2_btree_iter_verify(struct btree_iter *, struct btree *);
94void bch2_btree_iter_verify_locks(struct btree_iter *);
95#else
96static inline void bch2_btree_iter_verify(struct btree_iter *iter,
97 struct btree *b) {}
98static inline void bch2_btree_iter_verify_locks(struct btree_iter *iter) {}
99#endif
100
101void bch2_btree_node_iter_fix(struct btree_iter *, struct btree *,
102 struct btree_node_iter *, struct bset_tree *,
103 struct bkey_packed *, unsigned, unsigned);
104
105int bch2_btree_iter_unlock(struct btree_iter *);
106
107bool __bch2_btree_iter_upgrade(struct btree_iter *, unsigned);
108bool __bch2_btree_iter_upgrade_nounlock(struct btree_iter *, unsigned);
109
110static inline bool bch2_btree_iter_upgrade(struct btree_iter *iter,
111 unsigned new_locks_want,
112 bool may_drop_locks)
113{
114 new_locks_want = min(new_locks_want, BTREE_MAX_DEPTH);
115
116 return iter->locks_want < new_locks_want
117 ? (may_drop_locks
118 ? __bch2_btree_iter_upgrade(iter, new_locks_want)
119 : __bch2_btree_iter_upgrade_nounlock(iter, new_locks_want))
120 : iter->uptodate <= BTREE_ITER_NEED_PEEK;
121}
122
123void __bch2_btree_iter_downgrade(struct btree_iter *, unsigned);
124
125static inline void bch2_btree_iter_downgrade(struct btree_iter *iter)
126{
127 if (iter->locks_want > (iter->flags & BTREE_ITER_INTENT) ? 1 : 0)
128 __bch2_btree_iter_downgrade(iter, 0);
129}
130
131void bch2_btree_iter_node_replace(struct btree_iter *, struct btree *);
132void bch2_btree_iter_node_drop(struct btree_iter *, struct btree *);
133
134void bch2_btree_iter_reinit_node(struct btree_iter *, struct btree *);
135
136int __must_check bch2_btree_iter_traverse(struct btree_iter *);
137
138struct btree *bch2_btree_iter_peek_node(struct btree_iter *);
139struct btree *bch2_btree_iter_next_node(struct btree_iter *, unsigned);
140
141struct bkey_s_c bch2_btree_iter_peek(struct btree_iter *);
142struct bkey_s_c bch2_btree_iter_next(struct btree_iter *);
143struct bkey_s_c bch2_btree_iter_prev(struct btree_iter *);
144
145struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_iter *);
146struct bkey_s_c bch2_btree_iter_next_slot(struct btree_iter *);
147
148void bch2_btree_iter_set_pos_same_leaf(struct btree_iter *, struct bpos);
149void bch2_btree_iter_set_pos(struct btree_iter *, struct bpos);
150
151void __bch2_btree_iter_init(struct btree_iter *, struct bch_fs *,
152 enum btree_id, struct bpos,
153 unsigned , unsigned, unsigned);
154
155static inline void bch2_btree_iter_init(struct btree_iter *iter,
156 struct bch_fs *c, enum btree_id btree_id,
157 struct bpos pos, unsigned flags)
158{
159 __bch2_btree_iter_init(iter, c, btree_id, pos,
160 flags & BTREE_ITER_INTENT ? 1 : 0, 0,
161 (btree_id == BTREE_ID_EXTENTS
162 ? BTREE_ITER_IS_EXTENTS : 0)|flags);
163}
164
165void bch2_btree_iter_link(struct btree_iter *, struct btree_iter *);
166void bch2_btree_iter_unlink(struct btree_iter *);
167void bch2_btree_iter_copy(struct btree_iter *, struct btree_iter *);
168
169static inline struct bpos btree_type_successor(enum btree_id id,
170 struct bpos pos)
171{
172 if (id == BTREE_ID_INODES) {
173 pos.inode++;
174 pos.offset = 0;
175 } else if (id != BTREE_ID_EXTENTS) {
176 pos = bkey_successor(pos);
177 }
178
179 return pos;
180}
181
182static inline struct bpos btree_type_predecessor(enum btree_id id,
183 struct bpos pos)
184{
185 if (id == BTREE_ID_INODES) {
186 --pos.inode;
187 pos.offset = 0;
188 } else /* if (id != BTREE_ID_EXTENTS) */ {
189 pos = bkey_predecessor(pos);
190 }
191
192 return pos;
193}
194
195static inline int __btree_iter_cmp(enum btree_id id,
196 struct bpos pos,
197 const struct btree_iter *r)
198{
199 if (id != r->btree_id)
200 return id < r->btree_id ? -1 : 1;
201 return bkey_cmp(pos, r->pos);
202}
203
204static inline int btree_iter_cmp(const struct btree_iter *l,
205 const struct btree_iter *r)
206{
207 return __btree_iter_cmp(l->btree_id, l->pos, r);
208}
209
210/*
211 * Unlocks before scheduling
212 * Note: does not revalidate iterator
213 */
214static inline void bch2_btree_iter_cond_resched(struct btree_iter *iter)
215{
216 if (need_resched()) {
217 bch2_btree_iter_unlock(iter);
218 schedule();
219 } else if (race_fault()) {
220 bch2_btree_iter_unlock(iter);
221 }
222}
223
224#define __for_each_btree_node(_iter, _c, _btree_id, _start, \
225 _locks_want, _depth, _flags, _b) \
226 for (__bch2_btree_iter_init((_iter), (_c), (_btree_id), _start, \
227 _locks_want, _depth, \
228 _flags|BTREE_ITER_NODES), \
229 _b = bch2_btree_iter_peek_node(_iter); \
230 (_b); \
231 (_b) = bch2_btree_iter_next_node(_iter, _depth))
232
233#define for_each_btree_node(_iter, _c, _btree_id, _start, _flags, _b) \
234 __for_each_btree_node(_iter, _c, _btree_id, _start, 0, 0, _flags, _b)
235
236static inline struct bkey_s_c __bch2_btree_iter_peek(struct btree_iter *iter,
237 unsigned flags)
238{
239 return flags & BTREE_ITER_SLOTS
240 ? bch2_btree_iter_peek_slot(iter)
241 : bch2_btree_iter_peek(iter);
242}
243
244static inline struct bkey_s_c __bch2_btree_iter_next(struct btree_iter *iter,
245 unsigned flags)
246{
247 bch2_btree_iter_cond_resched(iter);
248
249 return flags & BTREE_ITER_SLOTS
250 ? bch2_btree_iter_next_slot(iter)
251 : bch2_btree_iter_next(iter);
252}
253
254#define for_each_btree_key(_iter, _c, _btree_id, _start, _flags, _k) \
255 for (bch2_btree_iter_init((_iter), (_c), (_btree_id), \
256 (_start), (_flags)), \
257 (_k) = __bch2_btree_iter_peek(_iter, _flags); \
258 !IS_ERR_OR_NULL((_k).k); \
259 (_k) = __bch2_btree_iter_next(_iter, _flags))
260
261#define for_each_btree_key_continue(_iter, _flags, _k) \
262 for ((_k) = __bch2_btree_iter_peek(_iter, _flags); \
263 !IS_ERR_OR_NULL((_k).k); \
264 (_k) = __bch2_btree_iter_next(_iter, _flags))
265
266static inline int btree_iter_err(struct bkey_s_c k)
267{
268 return PTR_ERR_OR_ZERO(k.k);
269}
270
271/* new multiple iterator interface: */
272
273int bch2_trans_preload_iters(struct btree_trans *);
274void bch2_trans_iter_free(struct btree_trans *,
275 struct btree_iter *);
276
277struct btree_iter *__bch2_trans_get_iter(struct btree_trans *, enum btree_id,
278 struct bpos, unsigned, u64);
279struct btree_iter *__bch2_trans_copy_iter(struct btree_trans *,
280 struct btree_iter *, u64);
281
282static __always_inline u64 __btree_iter_id(void)
283{
284 u64 ret = 0;
285
286 ret <<= 32;
287 ret |= _RET_IP_ & U32_MAX;
288 ret <<= 32;
289 ret |= _THIS_IP_ & U32_MAX;
290 return ret;
291}
292
293static __always_inline struct btree_iter *
294bch2_trans_get_iter(struct btree_trans *trans, enum btree_id btree_id,
295 struct bpos pos, unsigned flags)
296{
297 return __bch2_trans_get_iter(trans, btree_id, pos, flags,
298 __btree_iter_id());
299}
300
301static __always_inline struct btree_iter *
302bch2_trans_copy_iter(struct btree_trans *trans, struct btree_iter *src)
303{
304
305 return __bch2_trans_copy_iter(trans, src, __btree_iter_id());
306}
307
1c7a0adf
KO
308void __bch2_trans_begin(struct btree_trans *);
309
1c6fdbd8
KO
310void *bch2_trans_kmalloc(struct btree_trans *, size_t);
311int bch2_trans_unlock(struct btree_trans *);
1c6fdbd8
KO
312void bch2_trans_init(struct btree_trans *, struct bch_fs *);
313int bch2_trans_exit(struct btree_trans *);
314
1c7a0adf
KO
315#ifdef TRACE_TRANSACTION_RESTARTS
316#define bch2_trans_begin(_trans) \
317do { \
318 if (is_power_of_2((_trans)->nr_restarts) && \
319 (_trans)->nr_restarts >= 8) \
320 pr_info("nr restarts: %zu", (_trans)->nr_restarts); \
321 \
322 (_trans)->nr_restarts++; \
323 __bch2_trans_begin(_trans); \
324} while (0)
325#else
326#define bch2_trans_begin(_trans) __bch2_trans_begin(_trans)
327#endif
328
329#ifdef TRACE_TRANSACTION_RESTARTS_ALL
330#define trans_restart(...) pr_info("transaction restart" __VA_ARGS__)
331#else
332#define trans_restart(...) no_printk("transaction restart" __VA_ARGS__)
333#endif
334
1c6fdbd8 335#endif /* _BCACHEFS_BTREE_ITER_H */