bcachefs: Simplify hash table checks
[linux-block.git] / fs / bcachefs / btree_locking.h
CommitLineData
1c6fdbd8
KO
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _BCACHEFS_BTREE_LOCKING_H
3#define _BCACHEFS_BTREE_LOCKING_H
4
5/*
6 * Only for internal btree use:
7 *
8 * The btree iterator tracks what locks it wants to take, and what locks it
9 * currently has - here we have wrappers for locking/unlocking btree nodes and
10 * updating the iterator state
11 */
12
13#include "btree_iter.h"
1c6fdbd8
KO
14#include "six.h"
15
16/* matches six lock types */
17enum btree_node_locked_type {
18 BTREE_NODE_UNLOCKED = -1,
19 BTREE_NODE_READ_LOCKED = SIX_LOCK_read,
20 BTREE_NODE_INTENT_LOCKED = SIX_LOCK_intent,
21};
22
23static inline int btree_node_locked_type(struct btree_iter *iter,
24 unsigned level)
25{
26 /*
27 * We're relying on the fact that if nodes_intent_locked is set
28 * nodes_locked must be set as well, so that we can compute without
29 * branches:
30 */
31 return BTREE_NODE_UNLOCKED +
32 ((iter->nodes_locked >> level) & 1) +
33 ((iter->nodes_intent_locked >> level) & 1);
34}
35
36static inline bool btree_node_intent_locked(struct btree_iter *iter,
37 unsigned level)
38{
39 return btree_node_locked_type(iter, level) == BTREE_NODE_INTENT_LOCKED;
40}
41
42static inline bool btree_node_read_locked(struct btree_iter *iter,
43 unsigned level)
44{
45 return btree_node_locked_type(iter, level) == BTREE_NODE_READ_LOCKED;
46}
47
48static inline bool btree_node_locked(struct btree_iter *iter, unsigned level)
49{
50 return iter->nodes_locked & (1 << level);
51}
52
53static inline void mark_btree_node_unlocked(struct btree_iter *iter,
54 unsigned level)
55{
56 iter->nodes_locked &= ~(1 << level);
57 iter->nodes_intent_locked &= ~(1 << level);
58}
59
60static inline void mark_btree_node_locked(struct btree_iter *iter,
61 unsigned level,
62 enum six_lock_type type)
63{
64 /* relying on this to avoid a branch */
65 BUILD_BUG_ON(SIX_LOCK_read != 0);
66 BUILD_BUG_ON(SIX_LOCK_intent != 1);
67
68 iter->nodes_locked |= 1 << level;
69 iter->nodes_intent_locked |= type << level;
70}
71
72static inline void mark_btree_node_intent_locked(struct btree_iter *iter,
73 unsigned level)
74{
75 mark_btree_node_locked(iter, level, SIX_LOCK_intent);
76}
77
78static inline enum six_lock_type __btree_lock_want(struct btree_iter *iter, int level)
79{
80 return level < iter->locks_want
81 ? SIX_LOCK_intent
82 : SIX_LOCK_read;
83}
84
85static inline enum btree_node_locked_type
86btree_lock_want(struct btree_iter *iter, int level)
87{
88 if (level < iter->level)
89 return BTREE_NODE_UNLOCKED;
90 if (level < iter->locks_want)
91 return BTREE_NODE_INTENT_LOCKED;
92 if (level == iter->level)
93 return BTREE_NODE_READ_LOCKED;
94 return BTREE_NODE_UNLOCKED;
95}
96
5c1d808a 97static inline void btree_node_unlock(struct btree_iter *iter, unsigned level)
1c6fdbd8
KO
98{
99 int lock_type = btree_node_locked_type(iter, level);
100
101 EBUG_ON(level >= BTREE_MAX_DEPTH);
102
103 if (lock_type != BTREE_NODE_UNLOCKED)
c43a6ef9 104 six_unlock_type(&iter->l[level].b->c.lock, lock_type);
1c6fdbd8
KO
105 mark_btree_node_unlocked(iter, level);
106}
107
108static inline void __bch2_btree_iter_unlock(struct btree_iter *iter)
109{
110 btree_iter_set_dirty(iter, BTREE_ITER_NEED_RELOCK);
111
112 while (iter->nodes_locked)
113 btree_node_unlock(iter, __ffs(iter->nodes_locked));
114}
115
116static inline enum bch_time_stats lock_to_time_stat(enum six_lock_type type)
117{
118 switch (type) {
119 case SIX_LOCK_read:
120 return BCH_TIME_btree_lock_contended_read;
121 case SIX_LOCK_intent:
122 return BCH_TIME_btree_lock_contended_intent;
123 case SIX_LOCK_write:
124 return BCH_TIME_btree_lock_contended_write;
125 default:
126 BUG();
127 }
128}
129
130/*
131 * wrapper around six locks that just traces lock contended time
132 */
133static inline void __btree_node_lock_type(struct bch_fs *c, struct btree *b,
134 enum six_lock_type type)
135{
136 u64 start_time = local_clock();
137
c43a6ef9 138 six_lock_type(&b->c.lock, type, NULL, NULL);
1c6fdbd8
KO
139 bch2_time_stats_update(&c->times[lock_to_time_stat(type)], start_time);
140}
141
142static inline void btree_node_lock_type(struct bch_fs *c, struct btree *b,
143 enum six_lock_type type)
144{
c43a6ef9 145 if (!six_trylock_type(&b->c.lock, type))
1c6fdbd8
KO
146 __btree_node_lock_type(c, b, type);
147}
148
647d7b60
KO
149/*
150 * Lock a btree node if we already have it locked on one of our linked
151 * iterators:
152 */
515282ac 153static inline bool btree_node_lock_increment(struct btree_trans *trans,
647d7b60
KO
154 struct btree *b, unsigned level,
155 enum btree_node_locked_type want)
156{
515282ac 157 struct btree_iter *iter;
647d7b60 158
515282ac
KO
159 trans_for_each_iter(trans, iter)
160 if (iter->l[level].b == b &&
161 btree_node_locked_type(iter, level) >= want) {
c43a6ef9 162 six_lock_increment(&b->c.lock, want);
647d7b60
KO
163 return true;
164 }
165
166 return false;
167}
168
1c6fdbd8 169bool __bch2_btree_node_lock(struct btree *, struct bpos, unsigned,
bd2bb273 170 struct btree_iter *, enum six_lock_type,
a301dc38
KO
171 six_lock_should_sleep_fn, void *,
172 unsigned long);
1c6fdbd8 173
bd2bb273
KO
174static inline bool btree_node_lock(struct btree *b,
175 struct bpos pos, unsigned level,
176 struct btree_iter *iter,
177 enum six_lock_type type,
a301dc38
KO
178 six_lock_should_sleep_fn should_sleep_fn, void *p,
179 unsigned long ip)
1c6fdbd8 180{
515282ac 181 struct btree_trans *trans = iter->trans;
495aabed 182
1c6fdbd8 183 EBUG_ON(level >= BTREE_MAX_DEPTH);
bd2bb273
KO
184 EBUG_ON(!(trans->iters_linked & (1ULL << iter->idx)));
185
acb3b26e 186 return likely(six_trylock_type(&b->c.lock, type)) ||
515282ac 187 btree_node_lock_increment(trans, b, level, type) ||
bd2bb273 188 __bch2_btree_node_lock(b, pos, level, iter, type,
a301dc38 189 should_sleep_fn, p, ip);
1c6fdbd8
KO
190}
191
192bool __bch2_btree_node_relock(struct btree_iter *, unsigned);
193
194static inline bool bch2_btree_node_relock(struct btree_iter *iter,
195 unsigned level)
196{
197 EBUG_ON(btree_node_locked(iter, level) &&
198 btree_node_locked_type(iter, level) !=
199 __btree_lock_want(iter, level));
200
201 return likely(btree_node_locked(iter, level)) ||
202 __bch2_btree_node_relock(iter, level);
203}
204
b7ba66c8
KO
205/*
206 * Updates the saved lock sequence number, so that bch2_btree_node_relock() will
207 * succeed:
208 */
209static inline void
210bch2_btree_node_unlock_write_inlined(struct btree *b, struct btree_iter *iter)
211{
212 struct btree_iter *linked;
213
214 EBUG_ON(iter->l[b->c.level].b != b);
215 EBUG_ON(iter->l[b->c.level].lock_seq + 1 != b->c.lock.state.seq);
216
217 trans_for_each_iter_with_node(iter->trans, b, linked)
218 linked->l[b->c.level].lock_seq += 2;
219
220 six_unlock_write(&b->c.lock);
221}
222
1c6fdbd8
KO
223void bch2_btree_node_unlock_write(struct btree *, struct btree_iter *);
224
225void __bch2_btree_node_lock_write(struct btree *, struct btree_iter *);
226
227static inline void bch2_btree_node_lock_write(struct btree *b, struct btree_iter *iter)
228{
c43a6ef9
KO
229 EBUG_ON(iter->l[b->c.level].b != b);
230 EBUG_ON(iter->l[b->c.level].lock_seq != b->c.lock.state.seq);
1c6fdbd8 231
fdfab313 232 if (unlikely(!six_trylock_write(&b->c.lock)))
1c6fdbd8
KO
233 __bch2_btree_node_lock_write(b, iter);
234}
235
236#endif /* _BCACHEFS_BTREE_LOCKING_H */
237
238