bcachefs: Erasure coding fixes & refactoring
[linux-block.git] / fs / bcachefs / six.h
CommitLineData
1c6fdbd8
KO
1/* SPDX-License-Identifier: GPL-2.0 */
2
3#ifndef _LINUX_SIX_H
4#define _LINUX_SIX_H
5
6/*
7 * Shared/intent/exclusive locks: sleepable read/write locks, much like rw
8 * semaphores, except with a third intermediate state, intent. Basic operations
9 * are:
10 *
11 * six_lock_read(&foo->lock);
12 * six_unlock_read(&foo->lock);
13 *
14 * six_lock_intent(&foo->lock);
15 * six_unlock_intent(&foo->lock);
16 *
17 * six_lock_write(&foo->lock);
18 * six_unlock_write(&foo->lock);
19 *
20 * Intent locks block other intent locks, but do not block read locks, and you
21 * must have an intent lock held before taking a write lock, like so:
22 *
23 * six_lock_intent(&foo->lock);
24 * six_lock_write(&foo->lock);
25 * six_unlock_write(&foo->lock);
26 * six_unlock_intent(&foo->lock);
27 *
28 * Other operations:
29 *
30 * six_trylock_read()
31 * six_trylock_intent()
32 * six_trylock_write()
33 *
34 * six_lock_downgrade(): convert from intent to read
35 * six_lock_tryupgrade(): attempt to convert from read to intent
36 *
37 * Locks also embed a sequence number, which is incremented when the lock is
38 * locked or unlocked for write. The current sequence number can be grabbed
39 * while a lock is held from lock->state.seq; then, if you drop the lock you can
40 * use six_relock_(read|intent_write)(lock, seq) to attempt to retake the lock
41 * iff it hasn't been locked for write in the meantime.
42 *
43 * There are also operations that take the lock type as a parameter, where the
44 * type is one of SIX_LOCK_read, SIX_LOCK_intent, or SIX_LOCK_write:
45 *
46 * six_lock_type(lock, type)
47 * six_unlock_type(lock, type)
48 * six_relock(lock, type, seq)
49 * six_trylock_type(lock, type)
50 * six_trylock_convert(lock, from, to)
51 *
52 * A lock may be held multiple times by the same thread (for read or intent,
53 * not write). However, the six locks code does _not_ implement the actual
54 * recursive checks itself though - rather, if your code (e.g. btree iterator
55 * code) knows that the current thread already has a lock held, and for the
56 * correct type, six_lock_increment() may be used to bump up the counter for
57 * that type - the only effect is that one more call to unlock will be required
58 * before the lock is unlocked.
59 */
60
61#include <linux/lockdep.h>
62#include <linux/sched.h>
63#include <linux/types.h>
64
65#ifdef CONFIG_SIX_LOCK_SPIN_ON_OWNER
66#include <linux/osq_lock.h>
67#endif
68
69#define SIX_LOCK_SEPARATE_LOCKFNS
70
71union six_lock_state {
72 struct {
73 atomic64_t counter;
74 };
75
76 struct {
77 u64 v;
78 };
79
80 struct {
81 /* for waitlist_bitnr() */
82 unsigned long l;
83 };
84
85 struct {
86 unsigned read_lock:27;
87 unsigned write_locking:1;
88 unsigned intent_lock:1;
89 unsigned waiters:3;
90 /*
91 * seq works much like in seqlocks: it's incremented every time
92 * we lock and unlock for write.
93 *
94 * If it's odd write lock is held, even unlocked.
95 *
96 * Thus readers can unlock, and then lock again later iff it
97 * hasn't been modified in the meantime.
98 */
99 u32 seq;
100 };
101};
102
103enum six_lock_type {
104 SIX_LOCK_read,
105 SIX_LOCK_intent,
106 SIX_LOCK_write,
107};
108
109struct six_lock {
110 union six_lock_state state;
111 unsigned intent_lock_recurse;
112 struct task_struct *owner;
113#ifdef CONFIG_SIX_LOCK_SPIN_ON_OWNER
114 struct optimistic_spin_queue osq;
115#endif
116 unsigned __percpu *readers;
117
118 raw_spinlock_t wait_lock;
119 struct list_head wait_list[2];
120#ifdef CONFIG_DEBUG_LOCK_ALLOC
121 struct lockdep_map dep_map;
122#endif
123};
124
125typedef int (*six_lock_should_sleep_fn)(struct six_lock *lock, void *);
126
127static __always_inline void __six_lock_init(struct six_lock *lock,
128 const char *name,
129 struct lock_class_key *key)
130{
131 atomic64_set(&lock->state.counter, 0);
132 raw_spin_lock_init(&lock->wait_lock);
133 INIT_LIST_HEAD(&lock->wait_list[SIX_LOCK_read]);
134 INIT_LIST_HEAD(&lock->wait_list[SIX_LOCK_intent]);
135#ifdef CONFIG_DEBUG_LOCK_ALLOC
136 debug_check_no_locks_freed((void *) lock, sizeof(*lock));
137 lockdep_init_map(&lock->dep_map, name, key, 0);
138#endif
139}
140
141#define six_lock_init(lock) \
142do { \
143 static struct lock_class_key __key; \
144 \
145 __six_lock_init((lock), #lock, &__key); \
146} while (0)
147
148#define __SIX_VAL(field, _v) (((union six_lock_state) { .field = _v }).v)
149
150#define __SIX_LOCK(type) \
151bool six_trylock_##type(struct six_lock *); \
152bool six_relock_##type(struct six_lock *, u32); \
153int six_lock_##type(struct six_lock *, six_lock_should_sleep_fn, void *);\
154void six_unlock_##type(struct six_lock *);
155
156__SIX_LOCK(read)
157__SIX_LOCK(intent)
158__SIX_LOCK(write)
159#undef __SIX_LOCK
160
161#define SIX_LOCK_DISPATCH(type, fn, ...) \
162 switch (type) { \
163 case SIX_LOCK_read: \
164 return fn##_read(__VA_ARGS__); \
165 case SIX_LOCK_intent: \
166 return fn##_intent(__VA_ARGS__); \
167 case SIX_LOCK_write: \
168 return fn##_write(__VA_ARGS__); \
169 default: \
170 BUG(); \
171 }
172
173static inline bool six_trylock_type(struct six_lock *lock, enum six_lock_type type)
174{
175 SIX_LOCK_DISPATCH(type, six_trylock, lock);
176}
177
178static inline bool six_relock_type(struct six_lock *lock, enum six_lock_type type,
179 unsigned seq)
180{
181 SIX_LOCK_DISPATCH(type, six_relock, lock, seq);
182}
183
184static inline int six_lock_type(struct six_lock *lock, enum six_lock_type type,
185 six_lock_should_sleep_fn should_sleep_fn, void *p)
186{
187 SIX_LOCK_DISPATCH(type, six_lock, lock, should_sleep_fn, p);
188}
189
190static inline void six_unlock_type(struct six_lock *lock, enum six_lock_type type)
191{
192 SIX_LOCK_DISPATCH(type, six_unlock, lock);
193}
194
195void six_lock_downgrade(struct six_lock *);
196bool six_lock_tryupgrade(struct six_lock *);
197bool six_trylock_convert(struct six_lock *, enum six_lock_type,
198 enum six_lock_type);
199
200void six_lock_increment(struct six_lock *, enum six_lock_type);
201
202void six_lock_wakeup_all(struct six_lock *);
203
204void six_lock_pcpu_free_rcu(struct six_lock *);
205void six_lock_pcpu_free(struct six_lock *);
206void six_lock_pcpu_alloc(struct six_lock *);
207
208struct six_lock_count {
209 unsigned read;
210 unsigned intent;
211};
212
213struct six_lock_count six_lock_counts(struct six_lock *);
214
215#endif /* _LINUX_SIX_H */