Commit | Line | Data |
---|---|---|
4898e640 PH |
1 | /* |
2 | * Ldisc rw semaphore | |
3 | * | |
4 | * The ldisc semaphore is semantically a rw_semaphore but which enforces | |
5 | * an alternate policy, namely: | |
6 | * 1) Supports lock wait timeouts | |
7 | * 2) Write waiter has priority | |
8 | * 3) Downgrading is not supported | |
9 | * | |
10 | * Implementation notes: | |
11 | * 1) Upper half of semaphore count is a wait count (differs from rwsem | |
12 | * in that rwsem normalizes the upper half to the wait bias) | |
13 | * 2) Lacks overflow checking | |
14 | * | |
15 | * The generic counting was copied and modified from include/asm-generic/rwsem.h | |
16 | * by Paul Mackerras <paulus@samba.org>. | |
17 | * | |
18 | * The scheduling policy was copied and modified from lib/rwsem.c | |
19 | * Written by David Howells (dhowells@redhat.com). | |
20 | * | |
21 | * This implementation incorporates the write lock stealing work of | |
22 | * Michel Lespinasse <walken@google.com>. | |
23 | * | |
24 | * Copyright (C) 2013 Peter Hurley <peter@hurleysoftware.com> | |
25 | * | |
26 | * This file may be redistributed under the terms of the GNU General Public | |
27 | * License v2. | |
28 | */ | |
29 | ||
30 | #include <linux/list.h> | |
31 | #include <linux/spinlock.h> | |
32 | #include <linux/atomic.h> | |
33 | #include <linux/tty.h> | |
34 | #include <linux/sched.h> | |
35 | ||
36 | ||
37 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | |
38 | # define __acq(l, s, t, r, c, n, i) \ | |
39 | lock_acquire(&(l)->dep_map, s, t, r, c, n, i) | |
40 | # define __rel(l, n, i) \ | |
41 | lock_release(&(l)->dep_map, n, i) | |
42 | # ifdef CONFIG_PROVE_LOCKING | |
43 | # define lockdep_acquire(l, s, t, i) __acq(l, s, t, 0, 2, NULL, i) | |
44 | # define lockdep_acquire_nest(l, s, t, n, i) __acq(l, s, t, 0, 2, n, i) | |
45 | # define lockdep_acquire_read(l, s, t, i) __acq(l, s, t, 1, 2, NULL, i) | |
46 | # define lockdep_release(l, n, i) __rel(l, n, i) | |
47 | # else | |
48 | # define lockdep_acquire(l, s, t, i) __acq(l, s, t, 0, 1, NULL, i) | |
49 | # define lockdep_acquire_nest(l, s, t, n, i) __acq(l, s, t, 0, 1, n, i) | |
50 | # define lockdep_acquire_read(l, s, t, i) __acq(l, s, t, 1, 1, NULL, i) | |
51 | # define lockdep_release(l, n, i) __rel(l, n, i) | |
52 | # endif | |
53 | #else | |
54 | # define lockdep_acquire(l, s, t, i) do { } while (0) | |
55 | # define lockdep_acquire_nest(l, s, t, n, i) do { } while (0) | |
56 | # define lockdep_acquire_read(l, s, t, i) do { } while (0) | |
57 | # define lockdep_release(l, n, i) do { } while (0) | |
58 | #endif | |
59 | ||
60 | #ifdef CONFIG_LOCK_STAT | |
61 | # define lock_stat(_lock, stat) lock_##stat(&(_lock)->dep_map, _RET_IP_) | |
62 | #else | |
63 | # define lock_stat(_lock, stat) do { } while (0) | |
64 | #endif | |
65 | ||
66 | ||
67 | #if BITS_PER_LONG == 64 | |
68 | # define LDSEM_ACTIVE_MASK 0xffffffffL | |
69 | #else | |
70 | # define LDSEM_ACTIVE_MASK 0x0000ffffL | |
71 | #endif | |
72 | ||
73 | #define LDSEM_UNLOCKED 0L | |
74 | #define LDSEM_ACTIVE_BIAS 1L | |
75 | #define LDSEM_WAIT_BIAS (-LDSEM_ACTIVE_MASK-1) | |
76 | #define LDSEM_READ_BIAS LDSEM_ACTIVE_BIAS | |
77 | #define LDSEM_WRITE_BIAS (LDSEM_WAIT_BIAS + LDSEM_ACTIVE_BIAS) | |
78 | ||
79 | struct ldsem_waiter { | |
80 | struct list_head list; | |
81 | struct task_struct *task; | |
82 | }; | |
83 | ||
84 | static inline long ldsem_atomic_update(long delta, struct ld_semaphore *sem) | |
85 | { | |
86 | return atomic_long_add_return(delta, (atomic_long_t *)&sem->count); | |
87 | } | |
88 | ||
cf872776 PH |
89 | /* |
90 | * ldsem_cmpxchg() updates @*old with the last-known sem->count value. | |
91 | * Returns 1 if count was successfully changed; @*old will have @new value. | |
92 | * Returns 0 if count was not changed; @*old will have most recent sem->count | |
93 | */ | |
4898e640 PH |
94 | static inline int ldsem_cmpxchg(long *old, long new, struct ld_semaphore *sem) |
95 | { | |
cf872776 PH |
96 | long tmp = atomic_long_cmpxchg(&sem->count, *old, new); |
97 | if (tmp == *old) { | |
98 | *old = new; | |
99 | return 1; | |
100 | } else { | |
101 | *old = tmp; | |
102 | return 0; | |
103 | } | |
4898e640 PH |
104 | } |
105 | ||
106 | /* | |
107 | * Initialize an ldsem: | |
108 | */ | |
109 | void __init_ldsem(struct ld_semaphore *sem, const char *name, | |
110 | struct lock_class_key *key) | |
111 | { | |
112 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | |
113 | /* | |
114 | * Make sure we are not reinitializing a held semaphore: | |
115 | */ | |
116 | debug_check_no_locks_freed((void *)sem, sizeof(*sem)); | |
117 | lockdep_init_map(&sem->dep_map, name, key, 0); | |
118 | #endif | |
119 | sem->count = LDSEM_UNLOCKED; | |
120 | sem->wait_readers = 0; | |
121 | raw_spin_lock_init(&sem->wait_lock); | |
122 | INIT_LIST_HEAD(&sem->read_wait); | |
123 | INIT_LIST_HEAD(&sem->write_wait); | |
124 | } | |
125 | ||
126 | static void __ldsem_wake_readers(struct ld_semaphore *sem) | |
127 | { | |
128 | struct ldsem_waiter *waiter, *next; | |
129 | struct task_struct *tsk; | |
130 | long adjust, count; | |
131 | ||
132 | /* Try to grant read locks to all readers on the read wait list. | |
133 | * Note the 'active part' of the count is incremented by | |
134 | * the number of readers before waking any processes up. | |
135 | */ | |
136 | adjust = sem->wait_readers * (LDSEM_ACTIVE_BIAS - LDSEM_WAIT_BIAS); | |
137 | count = ldsem_atomic_update(adjust, sem); | |
138 | do { | |
139 | if (count > 0) | |
140 | break; | |
141 | if (ldsem_cmpxchg(&count, count - adjust, sem)) | |
142 | return; | |
143 | } while (1); | |
144 | ||
145 | list_for_each_entry_safe(waiter, next, &sem->read_wait, list) { | |
146 | tsk = waiter->task; | |
147 | smp_mb(); | |
148 | waiter->task = NULL; | |
149 | wake_up_process(tsk); | |
150 | put_task_struct(tsk); | |
151 | } | |
152 | INIT_LIST_HEAD(&sem->read_wait); | |
153 | sem->wait_readers = 0; | |
154 | } | |
155 | ||
156 | static inline int writer_trylock(struct ld_semaphore *sem) | |
157 | { | |
158 | /* only wake this writer if the active part of the count can be | |
159 | * transitioned from 0 -> 1 | |
160 | */ | |
161 | long count = ldsem_atomic_update(LDSEM_ACTIVE_BIAS, sem); | |
162 | do { | |
163 | if ((count & LDSEM_ACTIVE_MASK) == LDSEM_ACTIVE_BIAS) | |
164 | return 1; | |
165 | if (ldsem_cmpxchg(&count, count - LDSEM_ACTIVE_BIAS, sem)) | |
166 | return 0; | |
167 | } while (1); | |
168 | } | |
169 | ||
170 | static void __ldsem_wake_writer(struct ld_semaphore *sem) | |
171 | { | |
172 | struct ldsem_waiter *waiter; | |
173 | ||
174 | waiter = list_entry(sem->write_wait.next, struct ldsem_waiter, list); | |
175 | wake_up_process(waiter->task); | |
176 | } | |
177 | ||
178 | /* | |
179 | * handle the lock release when processes blocked on it that can now run | |
180 | * - if we come here from up_xxxx(), then: | |
181 | * - the 'active part' of count (&0x0000ffff) reached 0 (but may have changed) | |
182 | * - the 'waiting part' of count (&0xffff0000) is -ve (and will still be so) | |
183 | * - the spinlock must be held by the caller | |
184 | * - woken process blocks are discarded from the list after having task zeroed | |
185 | */ | |
186 | static void __ldsem_wake(struct ld_semaphore *sem) | |
187 | { | |
188 | if (!list_empty(&sem->write_wait)) | |
189 | __ldsem_wake_writer(sem); | |
190 | else if (!list_empty(&sem->read_wait)) | |
191 | __ldsem_wake_readers(sem); | |
192 | } | |
193 | ||
194 | static void ldsem_wake(struct ld_semaphore *sem) | |
195 | { | |
196 | unsigned long flags; | |
197 | ||
198 | raw_spin_lock_irqsave(&sem->wait_lock, flags); | |
199 | __ldsem_wake(sem); | |
200 | raw_spin_unlock_irqrestore(&sem->wait_lock, flags); | |
201 | } | |
202 | ||
203 | /* | |
204 | * wait for the read lock to be granted | |
205 | */ | |
206 | static struct ld_semaphore __sched * | |
207 | down_read_failed(struct ld_semaphore *sem, long count, long timeout) | |
208 | { | |
209 | struct ldsem_waiter waiter; | |
210 | struct task_struct *tsk = current; | |
211 | long adjust = -LDSEM_ACTIVE_BIAS + LDSEM_WAIT_BIAS; | |
212 | ||
213 | /* set up my own style of waitqueue */ | |
214 | raw_spin_lock_irq(&sem->wait_lock); | |
215 | ||
216 | /* Try to reverse the lock attempt but if the count has changed | |
217 | * so that reversing fails, check if there are are no waiters, | |
218 | * and early-out if not */ | |
219 | do { | |
220 | if (ldsem_cmpxchg(&count, count + adjust, sem)) | |
221 | break; | |
222 | if (count > 0) { | |
223 | raw_spin_unlock_irq(&sem->wait_lock); | |
224 | return sem; | |
225 | } | |
226 | } while (1); | |
227 | ||
228 | list_add_tail(&waiter.list, &sem->read_wait); | |
229 | sem->wait_readers++; | |
230 | ||
231 | waiter.task = tsk; | |
232 | get_task_struct(tsk); | |
233 | ||
234 | /* if there are no active locks, wake the new lock owner(s) */ | |
235 | if ((count & LDSEM_ACTIVE_MASK) == 0) | |
236 | __ldsem_wake(sem); | |
237 | ||
238 | raw_spin_unlock_irq(&sem->wait_lock); | |
239 | ||
240 | /* wait to be given the lock */ | |
241 | for (;;) { | |
242 | set_task_state(tsk, TASK_UNINTERRUPTIBLE); | |
243 | ||
244 | if (!waiter.task) | |
245 | break; | |
246 | if (!timeout) | |
247 | break; | |
248 | timeout = schedule_timeout(timeout); | |
249 | } | |
250 | ||
251 | __set_task_state(tsk, TASK_RUNNING); | |
252 | ||
253 | if (!timeout) { | |
254 | /* lock timed out but check if this task was just | |
255 | * granted lock ownership - if so, pretend there | |
256 | * was no timeout; otherwise, cleanup lock wait */ | |
257 | raw_spin_lock_irq(&sem->wait_lock); | |
258 | if (waiter.task) { | |
259 | ldsem_atomic_update(-LDSEM_WAIT_BIAS, sem); | |
260 | list_del(&waiter.list); | |
261 | raw_spin_unlock_irq(&sem->wait_lock); | |
262 | put_task_struct(waiter.task); | |
263 | return NULL; | |
264 | } | |
265 | raw_spin_unlock_irq(&sem->wait_lock); | |
266 | } | |
267 | ||
268 | return sem; | |
269 | } | |
270 | ||
271 | /* | |
272 | * wait for the write lock to be granted | |
273 | */ | |
274 | static struct ld_semaphore __sched * | |
275 | down_write_failed(struct ld_semaphore *sem, long count, long timeout) | |
276 | { | |
277 | struct ldsem_waiter waiter; | |
278 | struct task_struct *tsk = current; | |
279 | long adjust = -LDSEM_ACTIVE_BIAS; | |
280 | int locked = 0; | |
281 | ||
282 | /* set up my own style of waitqueue */ | |
283 | raw_spin_lock_irq(&sem->wait_lock); | |
284 | ||
285 | /* Try to reverse the lock attempt but if the count has changed | |
286 | * so that reversing fails, check if the lock is now owned, | |
287 | * and early-out if so */ | |
288 | do { | |
289 | if (ldsem_cmpxchg(&count, count + adjust, sem)) | |
290 | break; | |
291 | if ((count & LDSEM_ACTIVE_MASK) == LDSEM_ACTIVE_BIAS) { | |
292 | raw_spin_unlock_irq(&sem->wait_lock); | |
293 | return sem; | |
294 | } | |
295 | } while (1); | |
296 | ||
297 | list_add_tail(&waiter.list, &sem->write_wait); | |
298 | ||
299 | waiter.task = tsk; | |
300 | ||
301 | set_task_state(tsk, TASK_UNINTERRUPTIBLE); | |
302 | for (;;) { | |
303 | if (!timeout) | |
304 | break; | |
305 | raw_spin_unlock_irq(&sem->wait_lock); | |
306 | timeout = schedule_timeout(timeout); | |
307 | raw_spin_lock_irq(&sem->wait_lock); | |
308 | set_task_state(tsk, TASK_UNINTERRUPTIBLE); | |
309 | if ((locked = writer_trylock(sem))) | |
310 | break; | |
311 | } | |
312 | ||
313 | if (!locked) | |
314 | ldsem_atomic_update(-LDSEM_WAIT_BIAS, sem); | |
315 | list_del(&waiter.list); | |
316 | raw_spin_unlock_irq(&sem->wait_lock); | |
317 | ||
318 | __set_task_state(tsk, TASK_RUNNING); | |
319 | ||
320 | /* lock wait may have timed out */ | |
321 | if (!locked) | |
322 | return NULL; | |
323 | return sem; | |
324 | } | |
325 | ||
326 | ||
327 | ||
328 | static inline int __ldsem_down_read_nested(struct ld_semaphore *sem, | |
329 | int subclass, long timeout) | |
330 | { | |
331 | long count; | |
332 | ||
333 | lockdep_acquire_read(sem, subclass, 0, _RET_IP_); | |
334 | ||
335 | count = ldsem_atomic_update(LDSEM_READ_BIAS, sem); | |
336 | if (count <= 0) { | |
337 | lock_stat(sem, contended); | |
338 | if (!down_read_failed(sem, count, timeout)) { | |
339 | lockdep_release(sem, 1, _RET_IP_); | |
340 | return 0; | |
341 | } | |
342 | } | |
343 | lock_stat(sem, acquired); | |
344 | return 1; | |
345 | } | |
346 | ||
347 | static inline int __ldsem_down_write_nested(struct ld_semaphore *sem, | |
348 | int subclass, long timeout) | |
349 | { | |
350 | long count; | |
351 | ||
352 | lockdep_acquire(sem, subclass, 0, _RET_IP_); | |
353 | ||
354 | count = ldsem_atomic_update(LDSEM_WRITE_BIAS, sem); | |
355 | if ((count & LDSEM_ACTIVE_MASK) != LDSEM_ACTIVE_BIAS) { | |
356 | lock_stat(sem, contended); | |
357 | if (!down_write_failed(sem, count, timeout)) { | |
358 | lockdep_release(sem, 1, _RET_IP_); | |
359 | return 0; | |
360 | } | |
361 | } | |
362 | lock_stat(sem, acquired); | |
363 | return 1; | |
364 | } | |
365 | ||
366 | ||
367 | /* | |
368 | * lock for reading -- returns 1 if successful, 0 if timed out | |
369 | */ | |
370 | int __sched ldsem_down_read(struct ld_semaphore *sem, long timeout) | |
371 | { | |
372 | might_sleep(); | |
373 | return __ldsem_down_read_nested(sem, 0, timeout); | |
374 | } | |
375 | ||
376 | /* | |
377 | * trylock for reading -- returns 1 if successful, 0 if contention | |
378 | */ | |
379 | int ldsem_down_read_trylock(struct ld_semaphore *sem) | |
380 | { | |
381 | long count = sem->count; | |
382 | ||
383 | while (count >= 0) { | |
384 | if (ldsem_cmpxchg(&count, count + LDSEM_READ_BIAS, sem)) { | |
385 | lockdep_acquire_read(sem, 0, 1, _RET_IP_); | |
386 | lock_stat(sem, acquired); | |
387 | return 1; | |
388 | } | |
389 | } | |
390 | return 0; | |
391 | } | |
392 | ||
393 | /* | |
394 | * lock for writing -- returns 1 if successful, 0 if timed out | |
395 | */ | |
396 | int __sched ldsem_down_write(struct ld_semaphore *sem, long timeout) | |
397 | { | |
398 | might_sleep(); | |
399 | return __ldsem_down_write_nested(sem, 0, timeout); | |
400 | } | |
401 | ||
402 | /* | |
403 | * trylock for writing -- returns 1 if successful, 0 if contention | |
404 | */ | |
405 | int ldsem_down_write_trylock(struct ld_semaphore *sem) | |
406 | { | |
407 | long count = sem->count; | |
408 | ||
409 | while ((count & LDSEM_ACTIVE_MASK) == 0) { | |
410 | if (ldsem_cmpxchg(&count, count + LDSEM_WRITE_BIAS, sem)) { | |
411 | lockdep_acquire(sem, 0, 1, _RET_IP_); | |
412 | lock_stat(sem, acquired); | |
413 | return 1; | |
414 | } | |
415 | } | |
416 | return 0; | |
417 | } | |
418 | ||
419 | /* | |
420 | * release a read lock | |
421 | */ | |
422 | void ldsem_up_read(struct ld_semaphore *sem) | |
423 | { | |
424 | long count; | |
425 | ||
426 | lockdep_release(sem, 1, _RET_IP_); | |
427 | ||
428 | count = ldsem_atomic_update(-LDSEM_READ_BIAS, sem); | |
429 | if (count < 0 && (count & LDSEM_ACTIVE_MASK) == 0) | |
430 | ldsem_wake(sem); | |
431 | } | |
432 | ||
433 | /* | |
434 | * release a write lock | |
435 | */ | |
436 | void ldsem_up_write(struct ld_semaphore *sem) | |
437 | { | |
438 | long count; | |
439 | ||
440 | lockdep_release(sem, 1, _RET_IP_); | |
441 | ||
442 | count = ldsem_atomic_update(-LDSEM_WRITE_BIAS, sem); | |
443 | if (count < 0) | |
444 | ldsem_wake(sem); | |
445 | } | |
446 | ||
447 | ||
448 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | |
449 | ||
450 | int ldsem_down_read_nested(struct ld_semaphore *sem, int subclass, long timeout) | |
451 | { | |
452 | might_sleep(); | |
453 | return __ldsem_down_read_nested(sem, subclass, timeout); | |
454 | } | |
455 | ||
456 | int ldsem_down_write_nested(struct ld_semaphore *sem, int subclass, | |
457 | long timeout) | |
458 | { | |
459 | might_sleep(); | |
460 | return __ldsem_down_write_nested(sem, subclass, timeout); | |
461 | } | |
462 | ||
463 | #endif |