bcachefs: Fix locking in allocator thread
[linux-block.git] / fs / bcachefs / btree_locking.h
CommitLineData
1c6fdbd8
KO
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _BCACHEFS_BTREE_LOCKING_H
3#define _BCACHEFS_BTREE_LOCKING_H
4
5/*
6 * Only for internal btree use:
7 *
8 * The btree iterator tracks what locks it wants to take, and what locks it
9 * currently has - here we have wrappers for locking/unlocking btree nodes and
10 * updating the iterator state
11 */
12
13#include "btree_iter.h"
14#include "btree_io.h"
15#include "six.h"
16
17/* matches six lock types */
18enum btree_node_locked_type {
19 BTREE_NODE_UNLOCKED = -1,
20 BTREE_NODE_READ_LOCKED = SIX_LOCK_read,
21 BTREE_NODE_INTENT_LOCKED = SIX_LOCK_intent,
22};
23
24static inline int btree_node_locked_type(struct btree_iter *iter,
25 unsigned level)
26{
27 /*
28 * We're relying on the fact that if nodes_intent_locked is set
29 * nodes_locked must be set as well, so that we can compute without
30 * branches:
31 */
32 return BTREE_NODE_UNLOCKED +
33 ((iter->nodes_locked >> level) & 1) +
34 ((iter->nodes_intent_locked >> level) & 1);
35}
36
37static inline bool btree_node_intent_locked(struct btree_iter *iter,
38 unsigned level)
39{
40 return btree_node_locked_type(iter, level) == BTREE_NODE_INTENT_LOCKED;
41}
42
43static inline bool btree_node_read_locked(struct btree_iter *iter,
44 unsigned level)
45{
46 return btree_node_locked_type(iter, level) == BTREE_NODE_READ_LOCKED;
47}
48
49static inline bool btree_node_locked(struct btree_iter *iter, unsigned level)
50{
51 return iter->nodes_locked & (1 << level);
52}
53
54static inline void mark_btree_node_unlocked(struct btree_iter *iter,
55 unsigned level)
56{
57 iter->nodes_locked &= ~(1 << level);
58 iter->nodes_intent_locked &= ~(1 << level);
59}
60
61static inline void mark_btree_node_locked(struct btree_iter *iter,
62 unsigned level,
63 enum six_lock_type type)
64{
65 /* relying on this to avoid a branch */
66 BUILD_BUG_ON(SIX_LOCK_read != 0);
67 BUILD_BUG_ON(SIX_LOCK_intent != 1);
68
69 iter->nodes_locked |= 1 << level;
70 iter->nodes_intent_locked |= type << level;
71}
72
73static inline void mark_btree_node_intent_locked(struct btree_iter *iter,
74 unsigned level)
75{
76 mark_btree_node_locked(iter, level, SIX_LOCK_intent);
77}
78
79static inline enum six_lock_type __btree_lock_want(struct btree_iter *iter, int level)
80{
81 return level < iter->locks_want
82 ? SIX_LOCK_intent
83 : SIX_LOCK_read;
84}
85
86static inline enum btree_node_locked_type
87btree_lock_want(struct btree_iter *iter, int level)
88{
89 if (level < iter->level)
90 return BTREE_NODE_UNLOCKED;
91 if (level < iter->locks_want)
92 return BTREE_NODE_INTENT_LOCKED;
93 if (level == iter->level)
94 return BTREE_NODE_READ_LOCKED;
95 return BTREE_NODE_UNLOCKED;
96}
97
98static inline void btree_node_unlock(struct btree_iter *iter, unsigned level)
99{
100 int lock_type = btree_node_locked_type(iter, level);
101
102 EBUG_ON(level >= BTREE_MAX_DEPTH);
103
104 if (lock_type != BTREE_NODE_UNLOCKED)
105 six_unlock_type(&iter->l[level].b->lock, lock_type);
106 mark_btree_node_unlocked(iter, level);
107}
108
109static inline void __bch2_btree_iter_unlock(struct btree_iter *iter)
110{
111 btree_iter_set_dirty(iter, BTREE_ITER_NEED_RELOCK);
112
113 while (iter->nodes_locked)
114 btree_node_unlock(iter, __ffs(iter->nodes_locked));
115}
116
117static inline enum bch_time_stats lock_to_time_stat(enum six_lock_type type)
118{
119 switch (type) {
120 case SIX_LOCK_read:
121 return BCH_TIME_btree_lock_contended_read;
122 case SIX_LOCK_intent:
123 return BCH_TIME_btree_lock_contended_intent;
124 case SIX_LOCK_write:
125 return BCH_TIME_btree_lock_contended_write;
126 default:
127 BUG();
128 }
129}
130
131/*
132 * wrapper around six locks that just traces lock contended time
133 */
134static inline void __btree_node_lock_type(struct bch_fs *c, struct btree *b,
135 enum six_lock_type type)
136{
137 u64 start_time = local_clock();
138
139 six_lock_type(&b->lock, type, NULL, NULL);
140 bch2_time_stats_update(&c->times[lock_to_time_stat(type)], start_time);
141}
142
143static inline void btree_node_lock_type(struct bch_fs *c, struct btree *b,
144 enum six_lock_type type)
145{
146 if (!six_trylock_type(&b->lock, type))
147 __btree_node_lock_type(c, b, type);
148}
149
150bool __bch2_btree_node_lock(struct btree *, struct bpos, unsigned,
151 struct btree_iter *, enum six_lock_type, bool);
152
153static inline bool btree_node_lock(struct btree *b, struct bpos pos,
154 unsigned level,
155 struct btree_iter *iter,
156 enum six_lock_type type,
157 bool may_drop_locks)
158{
159 EBUG_ON(level >= BTREE_MAX_DEPTH);
160
161 return likely(six_trylock_type(&b->lock, type)) ||
162 __bch2_btree_node_lock(b, pos, level, iter,
163 type, may_drop_locks);
164}
165
166bool __bch2_btree_node_relock(struct btree_iter *, unsigned);
167
168static inline bool bch2_btree_node_relock(struct btree_iter *iter,
169 unsigned level)
170{
171 EBUG_ON(btree_node_locked(iter, level) &&
172 btree_node_locked_type(iter, level) !=
173 __btree_lock_want(iter, level));
174
175 return likely(btree_node_locked(iter, level)) ||
176 __bch2_btree_node_relock(iter, level);
177}
178
179bool bch2_btree_iter_relock(struct btree_iter *);
180
181void bch2_btree_node_unlock_write(struct btree *, struct btree_iter *);
182
183void __bch2_btree_node_lock_write(struct btree *, struct btree_iter *);
184
185static inline void bch2_btree_node_lock_write(struct btree *b, struct btree_iter *iter)
186{
187 EBUG_ON(iter->l[b->level].b != b);
188 EBUG_ON(iter->lock_seq[b->level] != b->lock.state.seq);
189
190 if (!six_trylock_write(&b->lock))
191 __bch2_btree_node_lock_write(b, iter);
192}
193
194#endif /* _BCACHEFS_BTREE_LOCKING_H */
195
196