Merge tag 'nfs-for-4.17-2' of git://git.linux-nfs.org/projects/anna/linux-nfs
[linux-2.6-block.git] / fs / btrfs / locking.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2008 Oracle.  All rights reserved.
4  */
5
6 #include <linux/sched.h>
7 #include <linux/pagemap.h>
8 #include <linux/spinlock.h>
9 #include <linux/page-flags.h>
10 #include <asm/bug.h>
11 #include "ctree.h"
12 #include "extent_io.h"
13 #include "locking.h"
14
15 static void btrfs_assert_tree_read_locked(struct extent_buffer *eb);
16
17 /*
18  * if we currently have a spinning reader or writer lock
19  * (indicated by the rw flag) this will bump the count
20  * of blocking holders and drop the spinlock.
21  */
22 void btrfs_set_lock_blocking_rw(struct extent_buffer *eb, int rw)
23 {
24         /*
25          * no lock is required.  The lock owner may change if
26          * we have a read lock, but it won't change to or away
27          * from us.  If we have the write lock, we are the owner
28          * and it'll never change.
29          */
30         if (eb->lock_nested && current->pid == eb->lock_owner)
31                 return;
32         if (rw == BTRFS_WRITE_LOCK) {
33                 if (atomic_read(&eb->blocking_writers) == 0) {
34                         WARN_ON(atomic_read(&eb->spinning_writers) != 1);
35                         atomic_dec(&eb->spinning_writers);
36                         btrfs_assert_tree_locked(eb);
37                         atomic_inc(&eb->blocking_writers);
38                         write_unlock(&eb->lock);
39                 }
40         } else if (rw == BTRFS_READ_LOCK) {
41                 btrfs_assert_tree_read_locked(eb);
42                 atomic_inc(&eb->blocking_readers);
43                 WARN_ON(atomic_read(&eb->spinning_readers) == 0);
44                 atomic_dec(&eb->spinning_readers);
45                 read_unlock(&eb->lock);
46         }
47 }
48
49 /*
50  * if we currently have a blocking lock, take the spinlock
51  * and drop our blocking count
52  */
53 void btrfs_clear_lock_blocking_rw(struct extent_buffer *eb, int rw)
54 {
55         /*
56          * no lock is required.  The lock owner may change if
57          * we have a read lock, but it won't change to or away
58          * from us.  If we have the write lock, we are the owner
59          * and it'll never change.
60          */
61         if (eb->lock_nested && current->pid == eb->lock_owner)
62                 return;
63
64         if (rw == BTRFS_WRITE_LOCK_BLOCKING) {
65                 BUG_ON(atomic_read(&eb->blocking_writers) != 1);
66                 write_lock(&eb->lock);
67                 WARN_ON(atomic_read(&eb->spinning_writers));
68                 atomic_inc(&eb->spinning_writers);
69                 /*
70                  * atomic_dec_and_test implies a barrier for waitqueue_active
71                  */
72                 if (atomic_dec_and_test(&eb->blocking_writers) &&
73                     waitqueue_active(&eb->write_lock_wq))
74                         wake_up(&eb->write_lock_wq);
75         } else if (rw == BTRFS_READ_LOCK_BLOCKING) {
76                 BUG_ON(atomic_read(&eb->blocking_readers) == 0);
77                 read_lock(&eb->lock);
78                 atomic_inc(&eb->spinning_readers);
79                 /*
80                  * atomic_dec_and_test implies a barrier for waitqueue_active
81                  */
82                 if (atomic_dec_and_test(&eb->blocking_readers) &&
83                     waitqueue_active(&eb->read_lock_wq))
84                         wake_up(&eb->read_lock_wq);
85         }
86 }
87
88 /*
89  * take a spinning read lock.  This will wait for any blocking
90  * writers
91  */
92 void btrfs_tree_read_lock(struct extent_buffer *eb)
93 {
94 again:
95         BUG_ON(!atomic_read(&eb->blocking_writers) &&
96                current->pid == eb->lock_owner);
97
98         read_lock(&eb->lock);
99         if (atomic_read(&eb->blocking_writers) &&
100             current->pid == eb->lock_owner) {
101                 /*
102                  * This extent is already write-locked by our thread. We allow
103                  * an additional read lock to be added because it's for the same
104                  * thread. btrfs_find_all_roots() depends on this as it may be
105                  * called on a partly (write-)locked tree.
106                  */
107                 BUG_ON(eb->lock_nested);
108                 eb->lock_nested = 1;
109                 read_unlock(&eb->lock);
110                 return;
111         }
112         if (atomic_read(&eb->blocking_writers)) {
113                 read_unlock(&eb->lock);
114                 wait_event(eb->write_lock_wq,
115                            atomic_read(&eb->blocking_writers) == 0);
116                 goto again;
117         }
118         atomic_inc(&eb->read_locks);
119         atomic_inc(&eb->spinning_readers);
120 }
121
122 /*
123  * take a spinning read lock.
124  * returns 1 if we get the read lock and 0 if we don't
125  * this won't wait for blocking writers
126  */
127 int btrfs_tree_read_lock_atomic(struct extent_buffer *eb)
128 {
129         if (atomic_read(&eb->blocking_writers))
130                 return 0;
131
132         read_lock(&eb->lock);
133         if (atomic_read(&eb->blocking_writers)) {
134                 read_unlock(&eb->lock);
135                 return 0;
136         }
137         atomic_inc(&eb->read_locks);
138         atomic_inc(&eb->spinning_readers);
139         return 1;
140 }
141
142 /*
143  * returns 1 if we get the read lock and 0 if we don't
144  * this won't wait for blocking writers
145  */
146 int btrfs_try_tree_read_lock(struct extent_buffer *eb)
147 {
148         if (atomic_read(&eb->blocking_writers))
149                 return 0;
150
151         if (!read_trylock(&eb->lock))
152                 return 0;
153
154         if (atomic_read(&eb->blocking_writers)) {
155                 read_unlock(&eb->lock);
156                 return 0;
157         }
158         atomic_inc(&eb->read_locks);
159         atomic_inc(&eb->spinning_readers);
160         return 1;
161 }
162
163 /*
164  * returns 1 if we get the read lock and 0 if we don't
165  * this won't wait for blocking writers or readers
166  */
167 int btrfs_try_tree_write_lock(struct extent_buffer *eb)
168 {
169         if (atomic_read(&eb->blocking_writers) ||
170             atomic_read(&eb->blocking_readers))
171                 return 0;
172
173         write_lock(&eb->lock);
174         if (atomic_read(&eb->blocking_writers) ||
175             atomic_read(&eb->blocking_readers)) {
176                 write_unlock(&eb->lock);
177                 return 0;
178         }
179         atomic_inc(&eb->write_locks);
180         atomic_inc(&eb->spinning_writers);
181         eb->lock_owner = current->pid;
182         return 1;
183 }
184
185 /*
186  * drop a spinning read lock
187  */
188 void btrfs_tree_read_unlock(struct extent_buffer *eb)
189 {
190         /*
191          * if we're nested, we have the write lock.  No new locking
192          * is needed as long as we are the lock owner.
193          * The write unlock will do a barrier for us, and the lock_nested
194          * field only matters to the lock owner.
195          */
196         if (eb->lock_nested && current->pid == eb->lock_owner) {
197                 eb->lock_nested = 0;
198                 return;
199         }
200         btrfs_assert_tree_read_locked(eb);
201         WARN_ON(atomic_read(&eb->spinning_readers) == 0);
202         atomic_dec(&eb->spinning_readers);
203         atomic_dec(&eb->read_locks);
204         read_unlock(&eb->lock);
205 }
206
207 /*
208  * drop a blocking read lock
209  */
210 void btrfs_tree_read_unlock_blocking(struct extent_buffer *eb)
211 {
212         /*
213          * if we're nested, we have the write lock.  No new locking
214          * is needed as long as we are the lock owner.
215          * The write unlock will do a barrier for us, and the lock_nested
216          * field only matters to the lock owner.
217          */
218         if (eb->lock_nested && current->pid == eb->lock_owner) {
219                 eb->lock_nested = 0;
220                 return;
221         }
222         btrfs_assert_tree_read_locked(eb);
223         WARN_ON(atomic_read(&eb->blocking_readers) == 0);
224         /*
225          * atomic_dec_and_test implies a barrier for waitqueue_active
226          */
227         if (atomic_dec_and_test(&eb->blocking_readers) &&
228             waitqueue_active(&eb->read_lock_wq))
229                 wake_up(&eb->read_lock_wq);
230         atomic_dec(&eb->read_locks);
231 }
232
233 /*
234  * take a spinning write lock.  This will wait for both
235  * blocking readers or writers
236  */
237 void btrfs_tree_lock(struct extent_buffer *eb)
238 {
239         WARN_ON(eb->lock_owner == current->pid);
240 again:
241         wait_event(eb->read_lock_wq, atomic_read(&eb->blocking_readers) == 0);
242         wait_event(eb->write_lock_wq, atomic_read(&eb->blocking_writers) == 0);
243         write_lock(&eb->lock);
244         if (atomic_read(&eb->blocking_readers)) {
245                 write_unlock(&eb->lock);
246                 wait_event(eb->read_lock_wq,
247                            atomic_read(&eb->blocking_readers) == 0);
248                 goto again;
249         }
250         if (atomic_read(&eb->blocking_writers)) {
251                 write_unlock(&eb->lock);
252                 wait_event(eb->write_lock_wq,
253                            atomic_read(&eb->blocking_writers) == 0);
254                 goto again;
255         }
256         WARN_ON(atomic_read(&eb->spinning_writers));
257         atomic_inc(&eb->spinning_writers);
258         atomic_inc(&eb->write_locks);
259         eb->lock_owner = current->pid;
260 }
261
262 /*
263  * drop a spinning or a blocking write lock.
264  */
265 void btrfs_tree_unlock(struct extent_buffer *eb)
266 {
267         int blockers = atomic_read(&eb->blocking_writers);
268
269         BUG_ON(blockers > 1);
270
271         btrfs_assert_tree_locked(eb);
272         eb->lock_owner = 0;
273         atomic_dec(&eb->write_locks);
274
275         if (blockers) {
276                 WARN_ON(atomic_read(&eb->spinning_writers));
277                 atomic_dec(&eb->blocking_writers);
278                 /*
279                  * Make sure counter is updated before we wake up waiters.
280                  */
281                 smp_mb__after_atomic();
282                 if (waitqueue_active(&eb->write_lock_wq))
283                         wake_up(&eb->write_lock_wq);
284         } else {
285                 WARN_ON(atomic_read(&eb->spinning_writers) != 1);
286                 atomic_dec(&eb->spinning_writers);
287                 write_unlock(&eb->lock);
288         }
289 }
290
291 void btrfs_assert_tree_locked(struct extent_buffer *eb)
292 {
293         BUG_ON(!atomic_read(&eb->write_locks));
294 }
295
296 static void btrfs_assert_tree_read_locked(struct extent_buffer *eb)
297 {
298         BUG_ON(!atomic_read(&eb->read_locks));
299 }