Commit | Line | Data |
---|---|---|
1da177e4 | 1 | /* |
52fdd089 | 2 | * i386 and x86-64 semaphore implementation. |
1da177e4 LT |
3 | * |
4 | * (C) Copyright 1999 Linus Torvalds | |
5 | * | |
6 | * Portions Copyright 1999 Red Hat, Inc. | |
7 | * | |
8 | * This program is free software; you can redistribute it and/or | |
9 | * modify it under the terms of the GNU General Public License | |
10 | * as published by the Free Software Foundation; either version | |
11 | * 2 of the License, or (at your option) any later version. | |
12 | * | |
13 | * rw semaphores implemented November 1999 by Benjamin LaHaise <bcrl@kvack.org> | |
14 | */ | |
15 | #include <linux/config.h> | |
16 | #include <linux/sched.h> | |
52fdd089 | 17 | #include <linux/err.h> |
1da177e4 | 18 | #include <linux/init.h> |
1da177e4 LT |
19 | #include <asm/semaphore.h> |
20 | ||
21 | /* | |
22 | * Semaphores are implemented using a two-way counter: | |
23 | * The "count" variable is decremented for each process | |
24 | * that tries to acquire the semaphore, while the "sleeping" | |
25 | * variable is a count of such acquires. | |
26 | * | |
27 | * Notably, the inline "up()" and "down()" functions can | |
28 | * efficiently test if they need to do any extra work (up | |
29 | * needs to do something only if count was negative before | |
30 | * the increment operation. | |
31 | * | |
32 | * "sleeping" and the contention routine ordering is protected | |
33 | * by the spinlock in the semaphore's waitqueue head. | |
34 | * | |
35 | * Note that these functions are only called when there is | |
36 | * contention on the lock, and as such all this is the | |
37 | * "non-critical" part of the whole semaphore business. The | |
38 | * critical part is the inline stuff in <asm/semaphore.h> | |
39 | * where we want to avoid any extra jumps and calls. | |
40 | */ | |
41 | ||
42 | /* | |
43 | * Logic: | |
44 | * - only on a boundary condition do we need to care. When we go | |
45 | * from a negative count to a non-negative, we wake people up. | |
46 | * - when we go from a non-negative count to a negative do we | |
47 | * (a) synchronize with the "sleeper" count and (b) make sure | |
48 | * that we're on the wakeup list before we synchronize so that | |
49 | * we cannot lose wakeup events. | |
50 | */ | |
51 | ||
52fdd089 | 52 | fastcall void __up(struct semaphore *sem) |
1da177e4 LT |
53 | { |
54 | wake_up(&sem->wait); | |
55 | } | |
56 | ||
52fdd089 | 57 | fastcall void __sched __down(struct semaphore * sem) |
1da177e4 LT |
58 | { |
59 | struct task_struct *tsk = current; | |
60 | DECLARE_WAITQUEUE(wait, tsk); | |
61 | unsigned long flags; | |
62 | ||
63 | tsk->state = TASK_UNINTERRUPTIBLE; | |
64 | spin_lock_irqsave(&sem->wait.lock, flags); | |
65 | add_wait_queue_exclusive_locked(&sem->wait, &wait); | |
66 | ||
67 | sem->sleepers++; | |
68 | for (;;) { | |
69 | int sleepers = sem->sleepers; | |
70 | ||
71 | /* | |
72 | * Add "everybody else" into it. They aren't | |
73 | * playing, because we own the spinlock in | |
74 | * the wait_queue_head. | |
75 | */ | |
76 | if (!atomic_add_negative(sleepers - 1, &sem->count)) { | |
77 | sem->sleepers = 0; | |
78 | break; | |
79 | } | |
80 | sem->sleepers = 1; /* us - see -1 above */ | |
81 | spin_unlock_irqrestore(&sem->wait.lock, flags); | |
82 | ||
83 | schedule(); | |
84 | ||
85 | spin_lock_irqsave(&sem->wait.lock, flags); | |
86 | tsk->state = TASK_UNINTERRUPTIBLE; | |
87 | } | |
88 | remove_wait_queue_locked(&sem->wait, &wait); | |
89 | wake_up_locked(&sem->wait); | |
90 | spin_unlock_irqrestore(&sem->wait.lock, flags); | |
91 | tsk->state = TASK_RUNNING; | |
92 | } | |
93 | ||
52fdd089 | 94 | fastcall int __sched __down_interruptible(struct semaphore * sem) |
1da177e4 LT |
95 | { |
96 | int retval = 0; | |
97 | struct task_struct *tsk = current; | |
98 | DECLARE_WAITQUEUE(wait, tsk); | |
99 | unsigned long flags; | |
100 | ||
101 | tsk->state = TASK_INTERRUPTIBLE; | |
102 | spin_lock_irqsave(&sem->wait.lock, flags); | |
103 | add_wait_queue_exclusive_locked(&sem->wait, &wait); | |
104 | ||
105 | sem->sleepers++; | |
106 | for (;;) { | |
107 | int sleepers = sem->sleepers; | |
108 | ||
109 | /* | |
110 | * With signals pending, this turns into | |
111 | * the trylock failure case - we won't be | |
112 | * sleeping, and we* can't get the lock as | |
113 | * it has contention. Just correct the count | |
114 | * and exit. | |
115 | */ | |
116 | if (signal_pending(current)) { | |
117 | retval = -EINTR; | |
118 | sem->sleepers = 0; | |
119 | atomic_add(sleepers, &sem->count); | |
120 | break; | |
121 | } | |
122 | ||
123 | /* | |
124 | * Add "everybody else" into it. They aren't | |
125 | * playing, because we own the spinlock in | |
126 | * wait_queue_head. The "-1" is because we're | |
127 | * still hoping to get the semaphore. | |
128 | */ | |
129 | if (!atomic_add_negative(sleepers - 1, &sem->count)) { | |
130 | sem->sleepers = 0; | |
131 | break; | |
132 | } | |
133 | sem->sleepers = 1; /* us - see -1 above */ | |
134 | spin_unlock_irqrestore(&sem->wait.lock, flags); | |
135 | ||
136 | schedule(); | |
137 | ||
138 | spin_lock_irqsave(&sem->wait.lock, flags); | |
139 | tsk->state = TASK_INTERRUPTIBLE; | |
140 | } | |
141 | remove_wait_queue_locked(&sem->wait, &wait); | |
142 | wake_up_locked(&sem->wait); | |
143 | spin_unlock_irqrestore(&sem->wait.lock, flags); | |
144 | ||
145 | tsk->state = TASK_RUNNING; | |
146 | return retval; | |
147 | } | |
148 | ||
149 | /* | |
150 | * Trylock failed - make sure we correct for | |
151 | * having decremented the count. | |
152 | * | |
153 | * We could have done the trylock with a | |
154 | * single "cmpxchg" without failure cases, | |
155 | * but then it wouldn't work on a 386. | |
156 | */ | |
52fdd089 | 157 | fastcall int __down_trylock(struct semaphore * sem) |
1da177e4 LT |
158 | { |
159 | int sleepers; | |
160 | unsigned long flags; | |
161 | ||
162 | spin_lock_irqsave(&sem->wait.lock, flags); | |
163 | sleepers = sem->sleepers + 1; | |
164 | sem->sleepers = 0; | |
165 | ||
166 | /* | |
167 | * Add "everybody else" and us into it. They aren't | |
168 | * playing, because we own the spinlock in the | |
169 | * wait_queue_head. | |
170 | */ | |
171 | if (!atomic_add_negative(sleepers, &sem->count)) { | |
172 | wake_up_locked(&sem->wait); | |
173 | } | |
174 | ||
175 | spin_unlock_irqrestore(&sem->wait.lock, flags); | |
176 | return 1; | |
177 | } |