Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * ARM semaphore implementation, taken from | |
3 | * | |
4 | * i386 semaphore implementation. | |
5 | * | |
6 | * (C) Copyright 1999 Linus Torvalds | |
7 | * | |
8 | * Modified for ARM by Russell King | |
9 | * | |
10 | * This program is free software; you can redistribute it and/or modify | |
11 | * it under the terms of the GNU General Public License version 2 as | |
12 | * published by the Free Software Foundation. | |
13 | */ | |
14 | #include <linux/module.h> | |
15 | #include <linux/sched.h> | |
16 | #include <linux/errno.h> | |
17 | #include <linux/init.h> | |
18 | ||
19 | #include <asm/semaphore.h> | |
20 | ||
21 | /* | |
22 | * Semaphores are implemented using a two-way counter: | |
23 | * The "count" variable is decremented for each process | |
24 | * that tries to acquire the semaphore, while the "sleeping" | |
25 | * variable is a count of such acquires. | |
26 | * | |
27 | * Notably, the inline "up()" and "down()" functions can | |
28 | * efficiently test if they need to do any extra work (up | |
29 | * needs to do something only if count was negative before | |
30 | * the increment operation. | |
31 | * | |
32 | * "sleeping" and the contention routine ordering is | |
33 | * protected by the semaphore spinlock. | |
34 | * | |
35 | * Note that these functions are only called when there is | |
36 | * contention on the lock, and as such all this is the | |
37 | * "non-critical" part of the whole semaphore business. The | |
38 | * critical part is the inline stuff in <asm/semaphore.h> | |
39 | * where we want to avoid any extra jumps and calls. | |
40 | */ | |
41 | ||
42 | /* | |
43 | * Logic: | |
44 | * - only on a boundary condition do we need to care. When we go | |
45 | * from a negative count to a non-negative, we wake people up. | |
46 | * - when we go from a non-negative count to a negative do we | |
47 | * (a) synchronize with the "sleeper" count and (b) make sure | |
48 | * that we're on the wakeup list before we synchronize so that | |
49 | * we cannot lose wakeup events. | |
50 | */ | |
51 | ||
52 | void __up(struct semaphore *sem) | |
53 | { | |
54 | wake_up(&sem->wait); | |
55 | } | |
56 | ||
57 | static DEFINE_SPINLOCK(semaphore_lock); | |
58 | ||
59 | void __sched __down(struct semaphore * sem) | |
60 | { | |
61 | struct task_struct *tsk = current; | |
62 | DECLARE_WAITQUEUE(wait, tsk); | |
63 | tsk->state = TASK_UNINTERRUPTIBLE; | |
64 | add_wait_queue_exclusive(&sem->wait, &wait); | |
65 | ||
66 | spin_lock_irq(&semaphore_lock); | |
67 | sem->sleepers++; | |
68 | for (;;) { | |
69 | int sleepers = sem->sleepers; | |
70 | ||
71 | /* | |
72 | * Add "everybody else" into it. They aren't | |
73 | * playing, because we own the spinlock. | |
74 | */ | |
75 | if (!atomic_add_negative(sleepers - 1, &sem->count)) { | |
76 | sem->sleepers = 0; | |
77 | break; | |
78 | } | |
79 | sem->sleepers = 1; /* us - see -1 above */ | |
80 | spin_unlock_irq(&semaphore_lock); | |
81 | ||
82 | schedule(); | |
83 | tsk->state = TASK_UNINTERRUPTIBLE; | |
84 | spin_lock_irq(&semaphore_lock); | |
85 | } | |
86 | spin_unlock_irq(&semaphore_lock); | |
87 | remove_wait_queue(&sem->wait, &wait); | |
88 | tsk->state = TASK_RUNNING; | |
89 | wake_up(&sem->wait); | |
90 | } | |
91 | ||
92 | int __sched __down_interruptible(struct semaphore * sem) | |
93 | { | |
94 | int retval = 0; | |
95 | struct task_struct *tsk = current; | |
96 | DECLARE_WAITQUEUE(wait, tsk); | |
97 | tsk->state = TASK_INTERRUPTIBLE; | |
98 | add_wait_queue_exclusive(&sem->wait, &wait); | |
99 | ||
100 | spin_lock_irq(&semaphore_lock); | |
101 | sem->sleepers ++; | |
102 | for (;;) { | |
103 | int sleepers = sem->sleepers; | |
104 | ||
105 | /* | |
106 | * With signals pending, this turns into | |
107 | * the trylock failure case - we won't be | |
108 | * sleeping, and we* can't get the lock as | |
109 | * it has contention. Just correct the count | |
110 | * and exit. | |
111 | */ | |
112 | if (signal_pending(current)) { | |
113 | retval = -EINTR; | |
114 | sem->sleepers = 0; | |
115 | atomic_add(sleepers, &sem->count); | |
116 | break; | |
117 | } | |
118 | ||
119 | /* | |
120 | * Add "everybody else" into it. They aren't | |
121 | * playing, because we own the spinlock. The | |
122 | * "-1" is because we're still hoping to get | |
123 | * the lock. | |
124 | */ | |
125 | if (!atomic_add_negative(sleepers - 1, &sem->count)) { | |
126 | sem->sleepers = 0; | |
127 | break; | |
128 | } | |
129 | sem->sleepers = 1; /* us - see -1 above */ | |
130 | spin_unlock_irq(&semaphore_lock); | |
131 | ||
132 | schedule(); | |
133 | tsk->state = TASK_INTERRUPTIBLE; | |
134 | spin_lock_irq(&semaphore_lock); | |
135 | } | |
136 | spin_unlock_irq(&semaphore_lock); | |
137 | tsk->state = TASK_RUNNING; | |
138 | remove_wait_queue(&sem->wait, &wait); | |
139 | wake_up(&sem->wait); | |
140 | return retval; | |
141 | } | |
142 | ||
143 | /* | |
144 | * Trylock failed - make sure we correct for | |
145 | * having decremented the count. | |
146 | * | |
147 | * We could have done the trylock with a | |
148 | * single "cmpxchg" without failure cases, | |
149 | * but then it wouldn't work on a 386. | |
150 | */ | |
151 | int __down_trylock(struct semaphore * sem) | |
152 | { | |
153 | int sleepers; | |
154 | unsigned long flags; | |
155 | ||
156 | spin_lock_irqsave(&semaphore_lock, flags); | |
157 | sleepers = sem->sleepers + 1; | |
158 | sem->sleepers = 0; | |
159 | ||
160 | /* | |
161 | * Add "everybody else" and us into it. They aren't | |
162 | * playing, because we own the spinlock. | |
163 | */ | |
164 | if (!atomic_add_negative(sleepers, &sem->count)) | |
165 | wake_up(&sem->wait); | |
166 | ||
167 | spin_unlock_irqrestore(&semaphore_lock, flags); | |
168 | return 1; | |
169 | } | |
170 | ||
171 | /* | |
172 | * The semaphore operations have a special calling sequence that | |
173 | * allow us to do a simpler in-line version of them. These routines | |
174 | * need to convert that sequence back into the C sequence when | |
175 | * there is contention on the semaphore. | |
176 | * | |
177 | * ip contains the semaphore pointer on entry. Save the C-clobbered | |
178 | * registers (r0 to r3 and lr), but not ip, as we use it as a return | |
179 | * value in some cases.. | |
499b2ea1 | 180 | * To remain AAPCS compliant (64-bit stack align) we save r4 as well. |
1da177e4 | 181 | */ |
63150fcf | 182 | asm(" .section .sched.text,\"ax\",%progbits \n\ |
1da177e4 LT |
183 | .align 5 \n\ |
184 | .globl __down_failed \n\ | |
185 | __down_failed: \n\ | |
499b2ea1 | 186 | stmfd sp!, {r0 - r4, lr} \n\ |
1da177e4 LT |
187 | mov r0, ip \n\ |
188 | bl __down \n\ | |
499b2ea1 | 189 | ldmfd sp!, {r0 - r4, pc} \n\ |
1da177e4 LT |
190 | \n\ |
191 | .align 5 \n\ | |
192 | .globl __down_interruptible_failed \n\ | |
193 | __down_interruptible_failed: \n\ | |
499b2ea1 | 194 | stmfd sp!, {r0 - r4, lr} \n\ |
1da177e4 LT |
195 | mov r0, ip \n\ |
196 | bl __down_interruptible \n\ | |
197 | mov ip, r0 \n\ | |
499b2ea1 | 198 | ldmfd sp!, {r0 - r4, pc} \n\ |
1da177e4 LT |
199 | \n\ |
200 | .align 5 \n\ | |
201 | .globl __down_trylock_failed \n\ | |
202 | __down_trylock_failed: \n\ | |
499b2ea1 | 203 | stmfd sp!, {r0 - r4, lr} \n\ |
1da177e4 LT |
204 | mov r0, ip \n\ |
205 | bl __down_trylock \n\ | |
206 | mov ip, r0 \n\ | |
499b2ea1 | 207 | ldmfd sp!, {r0 - r4, pc} \n\ |
1da177e4 LT |
208 | \n\ |
209 | .align 5 \n\ | |
210 | .globl __up_wakeup \n\ | |
211 | __up_wakeup: \n\ | |
499b2ea1 | 212 | stmfd sp!, {r0 - r4, lr} \n\ |
1da177e4 LT |
213 | mov r0, ip \n\ |
214 | bl __up \n\ | |
499b2ea1 | 215 | ldmfd sp!, {r0 - r4, pc} \n\ |
1da177e4 LT |
216 | "); |
217 | ||
218 | EXPORT_SYMBOL(__down_failed); | |
219 | EXPORT_SYMBOL(__down_interruptible_failed); | |
220 | EXPORT_SYMBOL(__down_trylock_failed); | |
221 | EXPORT_SYMBOL(__up_wakeup); |