Commit | Line | Data |
---|---|---|
b2441318 | 1 | // SPDX-License-Identifier: GPL-2.0 |
1da177e4 LT |
2 | /* rwsem-spinlock.c: R/W semaphores: contention handling functions for |
3 | * generic spinlock implementation | |
4 | * | |
5 | * Copyright (c) 2001 David Howells (dhowells@redhat.com). | |
6 | * - Derived partially from idea by Andrea Arcangeli <andrea@suse.de> | |
7 | * - Derived also from comments by Linus | |
8 | */ | |
9 | #include <linux/rwsem.h> | |
174cd4b1 | 10 | #include <linux/sched/signal.h> |
b17b0153 | 11 | #include <linux/sched/debug.h> |
8bc3bcc9 | 12 | #include <linux/export.h> |
1da177e4 | 13 | |
e2d57f78 ML |
14 | enum rwsem_waiter_type { |
15 | RWSEM_WAITING_FOR_WRITE, | |
16 | RWSEM_WAITING_FOR_READ | |
17 | }; | |
18 | ||
1da177e4 LT |
19 | struct rwsem_waiter { |
20 | struct list_head list; | |
21 | struct task_struct *task; | |
e2d57f78 | 22 | enum rwsem_waiter_type type; |
1da177e4 LT |
23 | }; |
24 | ||
29671f22 AW |
25 | int rwsem_is_locked(struct rw_semaphore *sem) |
26 | { | |
27 | int ret = 1; | |
28 | unsigned long flags; | |
29 | ||
ddb6c9b5 | 30 | if (raw_spin_trylock_irqsave(&sem->wait_lock, flags)) { |
13b9a962 | 31 | ret = (sem->count != 0); |
ddb6c9b5 | 32 | raw_spin_unlock_irqrestore(&sem->wait_lock, flags); |
29671f22 AW |
33 | } |
34 | return ret; | |
35 | } | |
36 | EXPORT_SYMBOL(rwsem_is_locked); | |
37 | ||
1da177e4 LT |
38 | /* |
39 | * initialise the semaphore | |
40 | */ | |
4ea2176d IM |
41 | void __init_rwsem(struct rw_semaphore *sem, const char *name, |
42 | struct lock_class_key *key) | |
1da177e4 | 43 | { |
4ea2176d IM |
44 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
45 | /* | |
46 | * Make sure we are not reinitializing a held semaphore: | |
47 | */ | |
48 | debug_check_no_locks_freed((void *)sem, sizeof(*sem)); | |
4dfbb9d8 | 49 | lockdep_init_map(&sem->dep_map, name, key, 0); |
4ea2176d | 50 | #endif |
13b9a962 | 51 | sem->count = 0; |
ddb6c9b5 | 52 | raw_spin_lock_init(&sem->wait_lock); |
1da177e4 | 53 | INIT_LIST_HEAD(&sem->wait_list); |
1da177e4 | 54 | } |
118d52da | 55 | EXPORT_SYMBOL(__init_rwsem); |
1da177e4 LT |
56 | |
57 | /* | |
58 | * handle the lock release when processes blocked on it that can now run | |
59 | * - if we come here, then: | |
60 | * - the 'active count' _reached_ zero | |
61 | * - the 'waiting count' is non-zero | |
62 | * - the spinlock must be held by the caller | |
63 | * - woken process blocks are discarded from the list after having task zeroed | |
64 | * - writers are only woken if wakewrite is non-zero | |
65 | */ | |
66 | static inline struct rw_semaphore * | |
67 | __rwsem_do_wake(struct rw_semaphore *sem, int wakewrite) | |
68 | { | |
69 | struct rwsem_waiter *waiter; | |
70 | struct task_struct *tsk; | |
71 | int woken; | |
72 | ||
1da177e4 LT |
73 | waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list); |
74 | ||
e2d57f78 | 75 | if (waiter->type == RWSEM_WAITING_FOR_WRITE) { |
8cf5322c ML |
76 | if (wakewrite) |
77 | /* Wake up a writer. Note that we do not grant it the | |
78 | * lock - it will have to acquire it when it runs. */ | |
79 | wake_up_process(waiter->task); | |
1da177e4 LT |
80 | goto out; |
81 | } | |
82 | ||
83 | /* grant an infinite number of read locks to the front of the queue */ | |
1da177e4 | 84 | woken = 0; |
8cf5322c | 85 | do { |
1da177e4 LT |
86 | struct list_head *next = waiter->list.next; |
87 | ||
88 | list_del(&waiter->list); | |
89 | tsk = waiter->task; | |
49e4b2bc DB |
90 | /* |
91 | * Make sure we do not wakeup the next reader before | |
92 | * setting the nil condition to grant the next reader; | |
93 | * otherwise we could miss the wakeup on the other | |
94 | * side and end up sleeping again. See the pairing | |
95 | * in rwsem_down_read_failed(). | |
96 | */ | |
d59dd462 | 97 | smp_mb(); |
1da177e4 LT |
98 | waiter->task = NULL; |
99 | wake_up_process(tsk); | |
100 | put_task_struct(tsk); | |
101 | woken++; | |
8cf5322c | 102 | if (next == &sem->wait_list) |
1da177e4 LT |
103 | break; |
104 | waiter = list_entry(next, struct rwsem_waiter, list); | |
8cf5322c | 105 | } while (waiter->type != RWSEM_WAITING_FOR_WRITE); |
1da177e4 | 106 | |
13b9a962 | 107 | sem->count += woken; |
1da177e4 LT |
108 | |
109 | out: | |
1da177e4 LT |
110 | return sem; |
111 | } | |
112 | ||
113 | /* | |
114 | * wake a single writer | |
115 | */ | |
116 | static inline struct rw_semaphore * | |
117 | __rwsem_wake_one_writer(struct rw_semaphore *sem) | |
118 | { | |
119 | struct rwsem_waiter *waiter; | |
1da177e4 LT |
120 | |
121 | waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list); | |
41ef8f82 | 122 | wake_up_process(waiter->task); |
1da177e4 | 123 | |
1da177e4 LT |
124 | return sem; |
125 | } | |
126 | ||
127 | /* | |
128 | * get a read lock on the semaphore | |
129 | */ | |
0aa1125f | 130 | int __sched __down_read_common(struct rw_semaphore *sem, int state) |
1da177e4 LT |
131 | { |
132 | struct rwsem_waiter waiter; | |
3eac4aba | 133 | unsigned long flags; |
1da177e4 | 134 | |
ddb6c9b5 | 135 | raw_spin_lock_irqsave(&sem->wait_lock, flags); |
1da177e4 | 136 | |
13b9a962 | 137 | if (sem->count >= 0 && list_empty(&sem->wait_list)) { |
1da177e4 | 138 | /* granted */ |
13b9a962 | 139 | sem->count++; |
ddb6c9b5 | 140 | raw_spin_unlock_irqrestore(&sem->wait_lock, flags); |
1da177e4 LT |
141 | goto out; |
142 | } | |
143 | ||
1da177e4 | 144 | /* set up my own style of waitqueue */ |
d269a8b8 | 145 | waiter.task = current; |
e2d57f78 | 146 | waiter.type = RWSEM_WAITING_FOR_READ; |
d269a8b8 | 147 | get_task_struct(current); |
1da177e4 LT |
148 | |
149 | list_add_tail(&waiter.list, &sem->wait_list); | |
150 | ||
1da177e4 LT |
151 | /* wait to be given the lock */ |
152 | for (;;) { | |
153 | if (!waiter.task) | |
154 | break; | |
0aa1125f KT |
155 | if (signal_pending_state(state, current)) |
156 | goto out_nolock; | |
157 | set_current_state(state); | |
158 | raw_spin_unlock_irqrestore(&sem->wait_lock, flags); | |
1da177e4 | 159 | schedule(); |
0aa1125f | 160 | raw_spin_lock_irqsave(&sem->wait_lock, flags); |
1da177e4 LT |
161 | } |
162 | ||
0aa1125f | 163 | raw_spin_unlock_irqrestore(&sem->wait_lock, flags); |
1da177e4 | 164 | out: |
0aa1125f KT |
165 | return 0; |
166 | ||
167 | out_nolock: | |
168 | /* | |
169 | * We didn't take the lock, so that there is a writer, which | |
170 | * is owner or the first waiter of the sem. If it's a waiter, | |
171 | * it will be woken by current owner. Not need to wake anybody. | |
172 | */ | |
173 | list_del(&waiter.list); | |
174 | raw_spin_unlock_irqrestore(&sem->wait_lock, flags); | |
175 | return -EINTR; | |
176 | } | |
177 | ||
178 | void __sched __down_read(struct rw_semaphore *sem) | |
179 | { | |
180 | __down_read_common(sem, TASK_UNINTERRUPTIBLE); | |
181 | } | |
182 | ||
183 | int __sched __down_read_killable(struct rw_semaphore *sem) | |
184 | { | |
185 | return __down_read_common(sem, TASK_KILLABLE); | |
1da177e4 LT |
186 | } |
187 | ||
188 | /* | |
189 | * trylock for reading -- returns 1 if successful, 0 if contention | |
190 | */ | |
9f741cb8 | 191 | int __down_read_trylock(struct rw_semaphore *sem) |
1da177e4 LT |
192 | { |
193 | unsigned long flags; | |
194 | int ret = 0; | |
195 | ||
1da177e4 | 196 | |
ddb6c9b5 | 197 | raw_spin_lock_irqsave(&sem->wait_lock, flags); |
1da177e4 | 198 | |
13b9a962 | 199 | if (sem->count >= 0 && list_empty(&sem->wait_list)) { |
1da177e4 | 200 | /* granted */ |
13b9a962 | 201 | sem->count++; |
1da177e4 LT |
202 | ret = 1; |
203 | } | |
204 | ||
ddb6c9b5 | 205 | raw_spin_unlock_irqrestore(&sem->wait_lock, flags); |
1da177e4 | 206 | |
1da177e4 LT |
207 | return ret; |
208 | } | |
209 | ||
210 | /* | |
211 | * get a write lock on the semaphore | |
1da177e4 | 212 | */ |
d4799608 | 213 | int __sched __down_write_common(struct rw_semaphore *sem, int state) |
1da177e4 LT |
214 | { |
215 | struct rwsem_waiter waiter; | |
3eac4aba | 216 | unsigned long flags; |
d4799608 | 217 | int ret = 0; |
1da177e4 | 218 | |
ddb6c9b5 | 219 | raw_spin_lock_irqsave(&sem->wait_lock, flags); |
1da177e4 | 220 | |
1da177e4 | 221 | /* set up my own style of waitqueue */ |
d269a8b8 | 222 | waiter.task = current; |
e2d57f78 | 223 | waiter.type = RWSEM_WAITING_FOR_WRITE; |
1da177e4 LT |
224 | list_add_tail(&waiter.list, &sem->wait_list); |
225 | ||
41ef8f82 | 226 | /* wait for someone to release the lock */ |
1da177e4 | 227 | for (;;) { |
41ef8f82 YL |
228 | /* |
229 | * That is the key to support write lock stealing: allows the | |
230 | * task already on CPU to get the lock soon rather than put | |
231 | * itself into sleep and waiting for system woke it or someone | |
232 | * else in the head of the wait list up. | |
233 | */ | |
13b9a962 | 234 | if (sem->count == 0) |
1da177e4 | 235 | break; |
17fcbd59 NC |
236 | if (signal_pending_state(state, current)) |
237 | goto out_nolock; | |
238 | ||
642fa448 | 239 | set_current_state(state); |
41ef8f82 YL |
240 | raw_spin_unlock_irqrestore(&sem->wait_lock, flags); |
241 | schedule(); | |
242 | raw_spin_lock_irqsave(&sem->wait_lock, flags); | |
1da177e4 | 243 | } |
41ef8f82 | 244 | /* got the lock */ |
13b9a962 | 245 | sem->count = -1; |
41ef8f82 | 246 | list_del(&waiter.list); |
1da177e4 | 247 | |
41ef8f82 | 248 | raw_spin_unlock_irqrestore(&sem->wait_lock, flags); |
d4799608 MH |
249 | |
250 | return ret; | |
17fcbd59 NC |
251 | |
252 | out_nolock: | |
253 | list_del(&waiter.list); | |
a0c4acd2 KT |
254 | if (!list_empty(&sem->wait_list) && sem->count >= 0) |
255 | __rwsem_do_wake(sem, 0); | |
17fcbd59 NC |
256 | raw_spin_unlock_irqrestore(&sem->wait_lock, flags); |
257 | ||
258 | return -EINTR; | |
d4799608 MH |
259 | } |
260 | ||
261 | void __sched __down_write(struct rw_semaphore *sem) | |
262 | { | |
263 | __down_write_common(sem, TASK_UNINTERRUPTIBLE); | |
264 | } | |
265 | ||
266 | int __sched __down_write_killable(struct rw_semaphore *sem) | |
267 | { | |
268 | return __down_write_common(sem, TASK_KILLABLE); | |
1da177e4 LT |
269 | } |
270 | ||
271 | /* | |
272 | * trylock for writing -- returns 1 if successful, 0 if contention | |
273 | */ | |
9f741cb8 | 274 | int __down_write_trylock(struct rw_semaphore *sem) |
1da177e4 LT |
275 | { |
276 | unsigned long flags; | |
277 | int ret = 0; | |
278 | ||
ddb6c9b5 | 279 | raw_spin_lock_irqsave(&sem->wait_lock, flags); |
1da177e4 | 280 | |
13b9a962 | 281 | if (sem->count == 0) { |
41ef8f82 | 282 | /* got the lock */ |
13b9a962 | 283 | sem->count = -1; |
1da177e4 LT |
284 | ret = 1; |
285 | } | |
286 | ||
ddb6c9b5 | 287 | raw_spin_unlock_irqrestore(&sem->wait_lock, flags); |
1da177e4 | 288 | |
1da177e4 LT |
289 | return ret; |
290 | } | |
291 | ||
292 | /* | |
293 | * release a read lock on the semaphore | |
294 | */ | |
9f741cb8 | 295 | void __up_read(struct rw_semaphore *sem) |
1da177e4 LT |
296 | { |
297 | unsigned long flags; | |
298 | ||
ddb6c9b5 | 299 | raw_spin_lock_irqsave(&sem->wait_lock, flags); |
1da177e4 | 300 | |
13b9a962 | 301 | if (--sem->count == 0 && !list_empty(&sem->wait_list)) |
1da177e4 LT |
302 | sem = __rwsem_wake_one_writer(sem); |
303 | ||
ddb6c9b5 | 304 | raw_spin_unlock_irqrestore(&sem->wait_lock, flags); |
1da177e4 LT |
305 | } |
306 | ||
307 | /* | |
308 | * release a write lock on the semaphore | |
309 | */ | |
9f741cb8 | 310 | void __up_write(struct rw_semaphore *sem) |
1da177e4 LT |
311 | { |
312 | unsigned long flags; | |
313 | ||
ddb6c9b5 | 314 | raw_spin_lock_irqsave(&sem->wait_lock, flags); |
1da177e4 | 315 | |
13b9a962 | 316 | sem->count = 0; |
1da177e4 LT |
317 | if (!list_empty(&sem->wait_list)) |
318 | sem = __rwsem_do_wake(sem, 1); | |
319 | ||
ddb6c9b5 | 320 | raw_spin_unlock_irqrestore(&sem->wait_lock, flags); |
1da177e4 LT |
321 | } |
322 | ||
323 | /* | |
324 | * downgrade a write lock into a read lock | |
325 | * - just wake up any readers at the front of the queue | |
326 | */ | |
9f741cb8 | 327 | void __downgrade_write(struct rw_semaphore *sem) |
1da177e4 LT |
328 | { |
329 | unsigned long flags; | |
330 | ||
ddb6c9b5 | 331 | raw_spin_lock_irqsave(&sem->wait_lock, flags); |
1da177e4 | 332 | |
13b9a962 | 333 | sem->count = 1; |
1da177e4 LT |
334 | if (!list_empty(&sem->wait_list)) |
335 | sem = __rwsem_do_wake(sem, 0); | |
336 | ||
ddb6c9b5 | 337 | raw_spin_unlock_irqrestore(&sem->wait_lock, flags); |
1da177e4 LT |
338 | } |
339 |