1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _BCACHEFS_BTREE_LOCKING_H
3 #define _BCACHEFS_BTREE_LOCKING_H
6 * Only for internal btree use:
8 * The btree iterator tracks what locks it wants to take, and what locks it
9 * currently has - here we have wrappers for locking/unlocking btree nodes and
10 * updating the iterator state
13 #include "btree_iter.h"
16 void bch2_btree_lock_init(struct btree_bkey_cached_common *, enum six_lock_init_flags);
19 void bch2_assert_btree_nodes_not_locked(void);
21 static inline void bch2_assert_btree_nodes_not_locked(void) {}
24 void bch2_trans_unlock_noassert(struct btree_trans *);
26 static inline bool is_btree_node(struct btree_path *path, unsigned l)
28 return l < BTREE_MAX_DEPTH && !IS_ERR_OR_NULL(path->l[l].b);
31 static inline struct btree_transaction_stats *btree_trans_stats(struct btree_trans *trans)
33 return trans->fn_idx < ARRAY_SIZE(trans->c->btree_transaction_stats)
34 ? &trans->c->btree_transaction_stats[trans->fn_idx]
38 /* matches six lock types */
39 enum btree_node_locked_type {
40 BTREE_NODE_UNLOCKED = -1,
41 BTREE_NODE_READ_LOCKED = SIX_LOCK_read,
42 BTREE_NODE_INTENT_LOCKED = SIX_LOCK_intent,
43 BTREE_NODE_WRITE_LOCKED = SIX_LOCK_write,
46 static inline int btree_node_locked_type(struct btree_path *path,
49 return BTREE_NODE_UNLOCKED + ((path->nodes_locked >> (level << 1)) & 3);
52 static inline bool btree_node_write_locked(struct btree_path *path, unsigned l)
54 return btree_node_locked_type(path, l) == BTREE_NODE_WRITE_LOCKED;
57 static inline bool btree_node_intent_locked(struct btree_path *path, unsigned l)
59 return btree_node_locked_type(path, l) == BTREE_NODE_INTENT_LOCKED;
62 static inline bool btree_node_read_locked(struct btree_path *path, unsigned l)
64 return btree_node_locked_type(path, l) == BTREE_NODE_READ_LOCKED;
67 static inline bool btree_node_locked(struct btree_path *path, unsigned level)
69 return btree_node_locked_type(path, level) != BTREE_NODE_UNLOCKED;
72 static inline void mark_btree_node_locked_noreset(struct btree_path *path,
74 enum btree_node_locked_type type)
76 /* relying on this to avoid a branch */
77 BUILD_BUG_ON(SIX_LOCK_read != 0);
78 BUILD_BUG_ON(SIX_LOCK_intent != 1);
80 path->nodes_locked &= ~(3U << (level << 1));
81 path->nodes_locked |= (type + 1) << (level << 1);
84 static inline void mark_btree_node_unlocked(struct btree_path *path,
87 EBUG_ON(btree_node_write_locked(path, level));
88 mark_btree_node_locked_noreset(path, level, BTREE_NODE_UNLOCKED);
91 static inline void mark_btree_node_locked(struct btree_trans *trans,
92 struct btree_path *path,
94 enum btree_node_locked_type type)
96 mark_btree_node_locked_noreset(path, level, (enum btree_node_locked_type) type);
97 #ifdef CONFIG_BCACHEFS_LOCK_TIME_STATS
98 path->l[level].lock_taken_time = local_clock();
102 static inline enum six_lock_type __btree_lock_want(struct btree_path *path, int level)
104 return level < path->locks_want
109 static inline enum btree_node_locked_type
110 btree_lock_want(struct btree_path *path, int level)
112 if (level < path->level)
113 return BTREE_NODE_UNLOCKED;
114 if (level < path->locks_want)
115 return BTREE_NODE_INTENT_LOCKED;
116 if (level == path->level)
117 return BTREE_NODE_READ_LOCKED;
118 return BTREE_NODE_UNLOCKED;
121 static void btree_trans_lock_hold_time_update(struct btree_trans *trans,
122 struct btree_path *path, unsigned level)
124 #ifdef CONFIG_BCACHEFS_LOCK_TIME_STATS
125 struct btree_transaction_stats *s = btree_trans_stats(trans);
128 __bch2_time_stats_update(&s->lock_hold_times,
129 path->l[level].lock_taken_time,
136 static inline void btree_node_unlock(struct btree_trans *trans,
137 struct btree_path *path, unsigned level)
139 int lock_type = btree_node_locked_type(path, level);
141 EBUG_ON(level >= BTREE_MAX_DEPTH);
143 if (lock_type != BTREE_NODE_UNLOCKED) {
144 six_unlock_type(&path->l[level].b->c.lock, lock_type);
145 btree_trans_lock_hold_time_update(trans, path, level);
147 mark_btree_node_unlocked(path, level);
150 static inline int btree_path_lowest_level_locked(struct btree_path *path)
152 return __ffs(path->nodes_locked) >> 1;
155 static inline int btree_path_highest_level_locked(struct btree_path *path)
157 return __fls(path->nodes_locked) >> 1;
160 static inline void __bch2_btree_path_unlock(struct btree_trans *trans,
161 struct btree_path *path)
163 btree_path_set_dirty(path, BTREE_ITER_NEED_RELOCK);
165 while (path->nodes_locked)
166 btree_node_unlock(trans, path, btree_path_lowest_level_locked(path));
170 * Updates the saved lock sequence number, so that bch2_btree_node_relock() will
174 bch2_btree_node_unlock_write_inlined(struct btree_trans *trans, struct btree_path *path,
177 struct btree_path *linked;
180 EBUG_ON(path->l[b->c.level].b != b);
181 EBUG_ON(path->l[b->c.level].lock_seq != six_lock_seq(&b->c.lock));
182 EBUG_ON(btree_node_locked_type(path, b->c.level) != SIX_LOCK_write);
184 mark_btree_node_locked_noreset(path, b->c.level, BTREE_NODE_INTENT_LOCKED);
186 trans_for_each_path_with_node(trans, b, linked, i)
187 linked->l[b->c.level].lock_seq++;
189 six_unlock_write(&b->c.lock);
192 void bch2_btree_node_unlock_write(struct btree_trans *,
193 struct btree_path *, struct btree *);
195 int bch2_six_check_for_deadlock(struct six_lock *lock, void *p);
199 static inline int __btree_node_lock_nopath(struct btree_trans *trans,
200 struct btree_bkey_cached_common *b,
201 enum six_lock_type type,
202 bool lock_may_not_fail,
207 trans->lock_may_not_fail = lock_may_not_fail;
208 trans->lock_must_abort = false;
211 ret = six_lock_ip_waiter(&b->lock, type, &trans->locking_wait,
212 bch2_six_check_for_deadlock, trans, ip);
213 WRITE_ONCE(trans->locking, NULL);
214 WRITE_ONCE(trans->locking_wait.start_time, 0);
218 static inline int __must_check
219 btree_node_lock_nopath(struct btree_trans *trans,
220 struct btree_bkey_cached_common *b,
221 enum six_lock_type type,
224 return __btree_node_lock_nopath(trans, b, type, false, ip);
227 static inline void btree_node_lock_nopath_nofail(struct btree_trans *trans,
228 struct btree_bkey_cached_common *b,
229 enum six_lock_type type)
231 int ret = __btree_node_lock_nopath(trans, b, type, true, _THIS_IP_);
237 * Lock a btree node if we already have it locked on one of our linked
240 static inline bool btree_node_lock_increment(struct btree_trans *trans,
241 struct btree_bkey_cached_common *b,
243 enum btree_node_locked_type want)
245 struct btree_path *path;
248 trans_for_each_path(trans, path, i)
249 if (&path->l[level].b->c == b &&
250 btree_node_locked_type(path, level) >= want) {
251 six_lock_increment(&b->lock, (enum six_lock_type) want);
258 static inline int btree_node_lock(struct btree_trans *trans,
259 struct btree_path *path,
260 struct btree_bkey_cached_common *b,
262 enum six_lock_type type,
267 EBUG_ON(level >= BTREE_MAX_DEPTH);
268 EBUG_ON(!test_bit(path->idx, trans->paths_allocated));
270 if (likely(six_trylock_type(&b->lock, type)) ||
271 btree_node_lock_increment(trans, b, level, (enum btree_node_locked_type) type) ||
272 !(ret = btree_node_lock_nopath(trans, b, type, btree_path_ip_allocated(path)))) {
273 #ifdef CONFIG_BCACHEFS_LOCK_TIME_STATS
274 path->l[b->level].lock_taken_time = local_clock();
281 int __bch2_btree_node_lock_write(struct btree_trans *, struct btree_path *,
282 struct btree_bkey_cached_common *b, bool);
284 static inline int __btree_node_lock_write(struct btree_trans *trans,
285 struct btree_path *path,
286 struct btree_bkey_cached_common *b,
287 bool lock_may_not_fail)
289 EBUG_ON(&path->l[b->level].b->c != b);
290 EBUG_ON(path->l[b->level].lock_seq != six_lock_seq(&b->lock));
291 EBUG_ON(!btree_node_intent_locked(path, b->level));
294 * six locks are unfair, and read locks block while a thread wants a
295 * write lock: thus, we need to tell the cycle detector we have a write
296 * lock _before_ taking the lock:
298 mark_btree_node_locked_noreset(path, b->level, BTREE_NODE_WRITE_LOCKED);
300 return likely(six_trylock_write(&b->lock))
302 : __bch2_btree_node_lock_write(trans, path, b, lock_may_not_fail);
305 static inline int __must_check
306 bch2_btree_node_lock_write(struct btree_trans *trans,
307 struct btree_path *path,
308 struct btree_bkey_cached_common *b)
310 return __btree_node_lock_write(trans, path, b, false);
313 void bch2_btree_node_lock_write_nofail(struct btree_trans *,
315 struct btree_bkey_cached_common *);
319 bool bch2_btree_path_relock_norestart(struct btree_trans *,
320 struct btree_path *, unsigned long);
321 int __bch2_btree_path_relock(struct btree_trans *,
322 struct btree_path *, unsigned long);
324 static inline int bch2_btree_path_relock(struct btree_trans *trans,
325 struct btree_path *path, unsigned long trace_ip)
327 return btree_node_locked(path, path->level)
329 : __bch2_btree_path_relock(trans, path, trace_ip);
332 bool __bch2_btree_node_relock(struct btree_trans *, struct btree_path *, unsigned, bool trace);
334 static inline bool bch2_btree_node_relock(struct btree_trans *trans,
335 struct btree_path *path, unsigned level)
337 EBUG_ON(btree_node_locked(path, level) &&
338 !btree_node_write_locked(path, level) &&
339 btree_node_locked_type(path, level) != __btree_lock_want(path, level));
341 return likely(btree_node_locked(path, level)) ||
342 (!IS_ERR_OR_NULL(path->l[level].b) &&
343 __bch2_btree_node_relock(trans, path, level, true));
346 static inline bool bch2_btree_node_relock_notrace(struct btree_trans *trans,
347 struct btree_path *path, unsigned level)
349 EBUG_ON(btree_node_locked(path, level) &&
350 !btree_node_write_locked(path, level) &&
351 btree_node_locked_type(path, level) != __btree_lock_want(path, level));
353 return likely(btree_node_locked(path, level)) ||
354 (!IS_ERR_OR_NULL(path->l[level].b) &&
355 __bch2_btree_node_relock(trans, path, level, false));
361 struct get_locks_fail {
366 bool bch2_btree_path_upgrade_noupgrade_sibs(struct btree_trans *,
367 struct btree_path *, unsigned,
368 struct get_locks_fail *);
370 bool __bch2_btree_path_upgrade(struct btree_trans *,
371 struct btree_path *, unsigned,
372 struct get_locks_fail *);
374 static inline int bch2_btree_path_upgrade(struct btree_trans *trans,
375 struct btree_path *path,
376 unsigned new_locks_want)
378 struct get_locks_fail f;
379 unsigned old_locks_want = path->locks_want;
381 new_locks_want = min(new_locks_want, BTREE_MAX_DEPTH);
383 if (path->locks_want < new_locks_want
384 ? __bch2_btree_path_upgrade(trans, path, new_locks_want, &f)
385 : path->uptodate == BTREE_ITER_UPTODATE)
388 trace_and_count(trans->c, trans_restart_upgrade, trans, _THIS_IP_, path,
389 old_locks_want, new_locks_want, &f);
390 return btree_trans_restart(trans, BCH_ERR_transaction_restart_upgrade);
395 static inline void btree_path_set_should_be_locked(struct btree_path *path)
397 EBUG_ON(!btree_node_locked(path, path->level));
398 EBUG_ON(path->uptodate);
400 path->should_be_locked = true;
403 static inline void __btree_path_set_level_up(struct btree_trans *trans,
404 struct btree_path *path,
407 btree_node_unlock(trans, path, l);
408 path->l[l].b = ERR_PTR(-BCH_ERR_no_btree_node_up);
411 static inline void btree_path_set_level_up(struct btree_trans *trans,
412 struct btree_path *path)
414 __btree_path_set_level_up(trans, path, path->level++);
415 btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
420 struct six_lock_count bch2_btree_node_lock_counts(struct btree_trans *,
422 struct btree_bkey_cached_common *b,
425 int bch2_check_for_deadlock(struct btree_trans *, struct printbuf *);
427 #ifdef CONFIG_BCACHEFS_DEBUG
428 void bch2_btree_path_verify_locks(struct btree_path *);
429 void bch2_trans_verify_locks(struct btree_trans *);
431 static inline void bch2_btree_path_verify_locks(struct btree_path *path) {}
432 static inline void bch2_trans_verify_locks(struct btree_trans *trans) {}
435 #endif /* _BCACHEFS_BTREE_LOCKING_H */