[MIPS] SEAD defconfig build fix
[linux-2.6-block.git] / include / asm-mips / spinlock.h
CommitLineData
1da177e4
LT
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1999, 2000 by Ralf Baechle
7 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
8 */
9#ifndef _ASM_SPINLOCK_H
10#define _ASM_SPINLOCK_H
11
1da177e4
LT
12#include <asm/war.h>
13
14/*
15 * Your basic SMP spinlocks, allowing only a single CPU anywhere
16 */
17
beb3ca82 18#define __raw_spin_is_locked(x) ((x)->lock != 0)
fb1c8f93
IM
19#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
20#define __raw_spin_unlock_wait(x) \
beb3ca82 21 do { cpu_relax(); } while ((x)->lock)
1da177e4
LT
22
23/*
24 * Simple spin lock operations. There are two variants, one clears IRQ's
25 * on the local processor, one does not.
26 *
27 * We make no fairness assumptions. They have a cost.
28 */
29
fb1c8f93 30static inline void __raw_spin_lock(raw_spinlock_t *lock)
1da177e4
LT
31{
32 unsigned int tmp;
33
34 if (R10000_LLSC_WAR) {
35 __asm__ __volatile__(
fb1c8f93 36 " .set noreorder # __raw_spin_lock \n"
1da177e4
LT
37 "1: ll %1, %2 \n"
38 " bnez %1, 1b \n"
39 " li %1, 1 \n"
40 " sc %1, %0 \n"
41 " beqzl %1, 1b \n"
42 " nop \n"
43 " sync \n"
44 " .set reorder \n"
45 : "=m" (lock->lock), "=&r" (tmp)
46 : "m" (lock->lock)
47 : "memory");
48 } else {
49 __asm__ __volatile__(
fb1c8f93 50 " .set noreorder # __raw_spin_lock \n"
1da177e4
LT
51 "1: ll %1, %2 \n"
52 " bnez %1, 1b \n"
53 " li %1, 1 \n"
54 " sc %1, %0 \n"
55 " beqz %1, 1b \n"
56 " sync \n"
57 " .set reorder \n"
58 : "=m" (lock->lock), "=&r" (tmp)
59 : "m" (lock->lock)
60 : "memory");
61 }
62}
63
fb1c8f93 64static inline void __raw_spin_unlock(raw_spinlock_t *lock)
1da177e4
LT
65{
66 __asm__ __volatile__(
fb1c8f93 67 " .set noreorder # __raw_spin_unlock \n"
1da177e4
LT
68 " sync \n"
69 " sw $0, %0 \n"
70 " .set\treorder \n"
71 : "=m" (lock->lock)
72 : "m" (lock->lock)
73 : "memory");
74}
75
fb1c8f93 76static inline unsigned int __raw_spin_trylock(raw_spinlock_t *lock)
1da177e4
LT
77{
78 unsigned int temp, res;
79
80 if (R10000_LLSC_WAR) {
81 __asm__ __volatile__(
fb1c8f93 82 " .set noreorder # __raw_spin_trylock \n"
1da177e4
LT
83 "1: ll %0, %3 \n"
84 " ori %2, %0, 1 \n"
85 " sc %2, %1 \n"
86 " beqzl %2, 1b \n"
87 " nop \n"
88 " andi %2, %0, 1 \n"
89 " sync \n"
90 " .set reorder"
91 : "=&r" (temp), "=m" (lock->lock), "=&r" (res)
92 : "m" (lock->lock)
93 : "memory");
94 } else {
95 __asm__ __volatile__(
fb1c8f93 96 " .set noreorder # __raw_spin_trylock \n"
1da177e4
LT
97 "1: ll %0, %3 \n"
98 " ori %2, %0, 1 \n"
99 " sc %2, %1 \n"
100 " beqz %2, 1b \n"
101 " andi %2, %0, 1 \n"
102 " sync \n"
103 " .set reorder"
104 : "=&r" (temp), "=m" (lock->lock), "=&r" (res)
105 : "m" (lock->lock)
106 : "memory");
107 }
108
109 return res == 0;
110}
111
112/*
113 * Read-write spinlocks, allowing multiple readers but only one writer.
114 *
115 * NOTE! it is quite common to have readers in interrupts but no interrupt
116 * writers. For those circumstances we can "mix" irq-safe locks - any writer
117 * needs to get a irq-safe write-lock, but readers can get non-irqsafe
118 * read-locks.
119 */
120
e3c48078
RB
121/*
122 * read_can_lock - would read_trylock() succeed?
123 * @lock: the rwlock in question.
124 */
125#define __raw_read_can_lock(rw) ((rw)->lock >= 0)
126
127/*
128 * write_can_lock - would write_trylock() succeed?
129 * @lock: the rwlock in question.
130 */
131#define __raw_write_can_lock(rw) (!(rw)->lock)
132
fb1c8f93 133static inline void __raw_read_lock(raw_rwlock_t *rw)
1da177e4
LT
134{
135 unsigned int tmp;
136
137 if (R10000_LLSC_WAR) {
138 __asm__ __volatile__(
fb1c8f93 139 " .set noreorder # __raw_read_lock \n"
1da177e4
LT
140 "1: ll %1, %2 \n"
141 " bltz %1, 1b \n"
142 " addu %1, 1 \n"
143 " sc %1, %0 \n"
144 " beqzl %1, 1b \n"
145 " nop \n"
146 " sync \n"
147 " .set reorder \n"
148 : "=m" (rw->lock), "=&r" (tmp)
149 : "m" (rw->lock)
150 : "memory");
151 } else {
152 __asm__ __volatile__(
fb1c8f93 153 " .set noreorder # __raw_read_lock \n"
1da177e4
LT
154 "1: ll %1, %2 \n"
155 " bltz %1, 1b \n"
156 " addu %1, 1 \n"
157 " sc %1, %0 \n"
158 " beqz %1, 1b \n"
159 " sync \n"
160 " .set reorder \n"
161 : "=m" (rw->lock), "=&r" (tmp)
162 : "m" (rw->lock)
163 : "memory");
164 }
165}
166
167/* Note the use of sub, not subu which will make the kernel die with an
168 overflow exception if we ever try to unlock an rwlock that is already
169 unlocked or is being held by a writer. */
fb1c8f93 170static inline void __raw_read_unlock(raw_rwlock_t *rw)
1da177e4
LT
171{
172 unsigned int tmp;
173
174 if (R10000_LLSC_WAR) {
175 __asm__ __volatile__(
fb1c8f93 176 "1: ll %1, %2 # __raw_read_unlock \n"
1da177e4
LT
177 " sub %1, 1 \n"
178 " sc %1, %0 \n"
179 " beqzl %1, 1b \n"
180 " sync \n"
181 : "=m" (rw->lock), "=&r" (tmp)
182 : "m" (rw->lock)
183 : "memory");
184 } else {
185 __asm__ __volatile__(
fb1c8f93 186 " .set noreorder # __raw_read_unlock \n"
1da177e4
LT
187 "1: ll %1, %2 \n"
188 " sub %1, 1 \n"
189 " sc %1, %0 \n"
190 " beqz %1, 1b \n"
191 " sync \n"
192 " .set reorder \n"
193 : "=m" (rw->lock), "=&r" (tmp)
194 : "m" (rw->lock)
195 : "memory");
196 }
197}
198
fb1c8f93 199static inline void __raw_write_lock(raw_rwlock_t *rw)
1da177e4
LT
200{
201 unsigned int tmp;
202
203 if (R10000_LLSC_WAR) {
204 __asm__ __volatile__(
fb1c8f93 205 " .set noreorder # __raw_write_lock \n"
1da177e4
LT
206 "1: ll %1, %2 \n"
207 " bnez %1, 1b \n"
208 " lui %1, 0x8000 \n"
209 " sc %1, %0 \n"
210 " beqzl %1, 1b \n"
b63014ad 211 " sync \n"
1da177e4
LT
212 " .set reorder \n"
213 : "=m" (rw->lock), "=&r" (tmp)
214 : "m" (rw->lock)
215 : "memory");
216 } else {
217 __asm__ __volatile__(
fb1c8f93 218 " .set noreorder # __raw_write_lock \n"
1da177e4
LT
219 "1: ll %1, %2 \n"
220 " bnez %1, 1b \n"
221 " lui %1, 0x8000 \n"
222 " sc %1, %0 \n"
223 " beqz %1, 1b \n"
b63014ad 224 " sync \n"
1da177e4
LT
225 " .set reorder \n"
226 : "=m" (rw->lock), "=&r" (tmp)
227 : "m" (rw->lock)
228 : "memory");
229 }
230}
231
fb1c8f93 232static inline void __raw_write_unlock(raw_rwlock_t *rw)
1da177e4
LT
233{
234 __asm__ __volatile__(
fb1c8f93 235 " sync # __raw_write_unlock \n"
1da177e4
LT
236 " sw $0, %0 \n"
237 : "=m" (rw->lock)
238 : "m" (rw->lock)
239 : "memory");
240}
241
fb1c8f93 242#define __raw_read_trylock(lock) generic__raw_read_trylock(lock)
1da177e4 243
fb1c8f93 244static inline int __raw_write_trylock(raw_rwlock_t *rw)
1da177e4
LT
245{
246 unsigned int tmp;
247 int ret;
248
249 if (R10000_LLSC_WAR) {
250 __asm__ __volatile__(
fb1c8f93 251 " .set noreorder # __raw_write_trylock \n"
1da177e4
LT
252 " li %2, 0 \n"
253 "1: ll %1, %3 \n"
254 " bnez %1, 2f \n"
255 " lui %1, 0x8000 \n"
256 " sc %1, %0 \n"
257 " beqzl %1, 1b \n"
b63014ad 258 " sync \n"
1da177e4
LT
259 " li %2, 1 \n"
260 " .set reorder \n"
261 "2: \n"
262 : "=m" (rw->lock), "=&r" (tmp), "=&r" (ret)
263 : "m" (rw->lock)
264 : "memory");
265 } else {
266 __asm__ __volatile__(
fb1c8f93 267 " .set noreorder # __raw_write_trylock \n"
1da177e4
LT
268 " li %2, 0 \n"
269 "1: ll %1, %3 \n"
270 " bnez %1, 2f \n"
271 " lui %1, 0x8000 \n"
272 " sc %1, %0 \n"
273 " beqz %1, 1b \n"
274 " sync \n"
275 " li %2, 1 \n"
276 " .set reorder \n"
277 "2: \n"
278 : "=m" (rw->lock), "=&r" (tmp), "=&r" (ret)
279 : "m" (rw->lock)
280 : "memory");
281 }
282
283 return ret;
284}
285
286#endif /* _ASM_SPINLOCK_H */