MIPS: Remove redundant instructions from arch_spin_{,try}lock.
[linux-2.6-block.git] / arch / mips / include / asm / spinlock.h
CommitLineData
1da177e4
LT
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
f65e4fa8 6 * Copyright (C) 1999, 2000, 06 Ralf Baechle (ralf@linux-mips.org)
1da177e4
LT
7 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
8 */
9#ifndef _ASM_SPINLOCK_H
10#define _ASM_SPINLOCK_H
11
2a31b033
RB
12#include <linux/compiler.h>
13
0004a9df 14#include <asm/barrier.h>
1da177e4
LT
15#include <asm/war.h>
16
17/*
18 * Your basic SMP spinlocks, allowing only a single CPU anywhere
2a31b033 19 *
70342287 20 * Simple spin lock operations. There are two variants, one clears IRQ's
2a31b033
RB
21 * on the local processor, one does not.
22 *
23 * These are fair FIFO ticket locks
24 *
25 * (the type definitions are in asm/spinlock_types.h)
1da177e4
LT
26 */
27
1da177e4
LT
28
29/*
2a31b033
RB
30 * Ticket locks are conceptually two parts, one indicating the current head of
31 * the queue, and the other indicating the current tail. The lock is acquired
32 * by atomically noting the tail and incrementing it by one (thus adding
33 * ourself to the queue and noting our position), then waiting until the head
34 * becomes equal to the the initial value of the tail.
1da177e4
LT
35 */
36
0199c4e6 37static inline int arch_spin_is_locked(arch_spinlock_t *lock)
2a31b033 38{
500c2e1f 39 u32 counters = ACCESS_ONCE(lock->lock);
2a31b033 40
500c2e1f 41 return ((counters >> 16) ^ counters) & 0xffff;
2a31b033
RB
42}
43
0199c4e6
TG
44#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
45#define arch_spin_unlock_wait(x) \
46 while (arch_spin_is_locked(x)) { cpu_relax(); }
2a31b033 47
0199c4e6 48static inline int arch_spin_is_contended(arch_spinlock_t *lock)
2a31b033 49{
500c2e1f 50 u32 counters = ACCESS_ONCE(lock->lock);
2a31b033 51
500c2e1f 52 return (((counters >> 16) - counters) & 0xffff) > 1;
2a31b033 53}
0199c4e6 54#define arch_spin_is_contended arch_spin_is_contended
2a31b033 55
0199c4e6 56static inline void arch_spin_lock(arch_spinlock_t *lock)
1da177e4 57{
2a31b033
RB
58 int my_ticket;
59 int tmp;
500c2e1f 60 int inc = 0x10000;
1da177e4
LT
61
62 if (R10000_LLSC_WAR) {
2a31b033 63 __asm__ __volatile__ (
0199c4e6 64 " .set push # arch_spin_lock \n"
2a31b033
RB
65 " .set noreorder \n"
66 " \n"
67 "1: ll %[ticket], %[ticket_ptr] \n"
500c2e1f 68 " addu %[my_ticket], %[ticket], %[inc] \n"
2a31b033
RB
69 " sc %[my_ticket], %[ticket_ptr] \n"
70 " beqzl %[my_ticket], 1b \n"
1da177e4 71 " nop \n"
500c2e1f
DD
72 " srl %[my_ticket], %[ticket], 16 \n"
73 " andi %[ticket], %[ticket], 0xffff \n"
2a31b033
RB
74 " bne %[ticket], %[my_ticket], 4f \n"
75 " subu %[ticket], %[my_ticket], %[ticket] \n"
76 "2: \n"
77 " .subsection 2 \n"
500c2e1f 78 "4: andi %[ticket], %[ticket], 0xffff \n"
0e6826c7 79 " sll %[ticket], 5 \n"
2a31b033
RB
80 " \n"
81 "6: bnez %[ticket], 6b \n"
82 " subu %[ticket], 1 \n"
83 " \n"
500c2e1f 84 " lhu %[ticket], %[serving_now_ptr] \n"
2a31b033
RB
85 " beq %[ticket], %[my_ticket], 2b \n"
86 " subu %[ticket], %[my_ticket], %[ticket] \n"
0e6826c7 87 " b 4b \n"
2a31b033
RB
88 " subu %[ticket], %[ticket], 1 \n"
89 " .previous \n"
90 " .set pop \n"
91 : [ticket_ptr] "+m" (lock->lock),
500c2e1f 92 [serving_now_ptr] "+m" (lock->h.serving_now),
2a31b033 93 [ticket] "=&r" (tmp),
500c2e1f
DD
94 [my_ticket] "=&r" (my_ticket)
95 : [inc] "r" (inc));
1da177e4 96 } else {
2a31b033 97 __asm__ __volatile__ (
0199c4e6 98 " .set push # arch_spin_lock \n"
2a31b033
RB
99 " .set noreorder \n"
100 " \n"
500c2e1f
DD
101 "1: ll %[ticket], %[ticket_ptr] \n"
102 " addu %[my_ticket], %[ticket], %[inc] \n"
2a31b033 103 " sc %[my_ticket], %[ticket_ptr] \n"
500c2e1f
DD
104 " beqz %[my_ticket], 1b \n"
105 " srl %[my_ticket], %[ticket], 16 \n"
106 " andi %[ticket], %[ticket], 0xffff \n"
2a31b033
RB
107 " bne %[ticket], %[my_ticket], 4f \n"
108 " subu %[ticket], %[my_ticket], %[ticket] \n"
109 "2: \n"
f65e4fa8 110 " .subsection 2 \n"
2a31b033 111 "4: andi %[ticket], %[ticket], 0x1fff \n"
0e6826c7 112 " sll %[ticket], 5 \n"
2a31b033
RB
113 " \n"
114 "6: bnez %[ticket], 6b \n"
115 " subu %[ticket], 1 \n"
116 " \n"
500c2e1f 117 " lhu %[ticket], %[serving_now_ptr] \n"
2a31b033
RB
118 " beq %[ticket], %[my_ticket], 2b \n"
119 " subu %[ticket], %[my_ticket], %[ticket] \n"
0e6826c7 120 " b 4b \n"
2a31b033 121 " subu %[ticket], %[ticket], 1 \n"
f65e4fa8 122 " .previous \n"
2a31b033
RB
123 " .set pop \n"
124 : [ticket_ptr] "+m" (lock->lock),
500c2e1f 125 [serving_now_ptr] "+m" (lock->h.serving_now),
2a31b033 126 [ticket] "=&r" (tmp),
500c2e1f
DD
127 [my_ticket] "=&r" (my_ticket)
128 : [inc] "r" (inc));
1da177e4 129 }
0004a9df 130
17099b11 131 smp_llsc_mb();
1da177e4
LT
132}
133
0199c4e6 134static inline void arch_spin_unlock(arch_spinlock_t *lock)
1da177e4 135{
500c2e1f
DD
136 unsigned int serving_now = lock->h.serving_now + 1;
137 wmb();
138 lock->h.serving_now = (u16)serving_now;
139 nudge_writes();
1da177e4
LT
140}
141
0199c4e6 142static inline unsigned int arch_spin_trylock(arch_spinlock_t *lock)
1da177e4 143{
2a31b033 144 int tmp, tmp2, tmp3;
500c2e1f 145 int inc = 0x10000;
1da177e4
LT
146
147 if (R10000_LLSC_WAR) {
2a31b033 148 __asm__ __volatile__ (
0199c4e6 149 " .set push # arch_spin_trylock \n"
2a31b033
RB
150 " .set noreorder \n"
151 " \n"
152 "1: ll %[ticket], %[ticket_ptr] \n"
500c2e1f 153 " srl %[my_ticket], %[ticket], 16 \n"
500c2e1f 154 " andi %[now_serving], %[ticket], 0xffff \n"
2a31b033 155 " bne %[my_ticket], %[now_serving], 3f \n"
500c2e1f 156 " addu %[ticket], %[ticket], %[inc] \n"
2a31b033
RB
157 " sc %[ticket], %[ticket_ptr] \n"
158 " beqzl %[ticket], 1b \n"
159 " li %[ticket], 1 \n"
160 "2: \n"
161 " .subsection 2 \n"
162 "3: b 2b \n"
163 " li %[ticket], 0 \n"
164 " .previous \n"
165 " .set pop \n"
166 : [ticket_ptr] "+m" (lock->lock),
167 [ticket] "=&r" (tmp),
168 [my_ticket] "=&r" (tmp2),
500c2e1f
DD
169 [now_serving] "=&r" (tmp3)
170 : [inc] "r" (inc));
1da177e4 171 } else {
2a31b033 172 __asm__ __volatile__ (
0199c4e6 173 " .set push # arch_spin_trylock \n"
2a31b033
RB
174 " .set noreorder \n"
175 " \n"
500c2e1f
DD
176 "1: ll %[ticket], %[ticket_ptr] \n"
177 " srl %[my_ticket], %[ticket], 16 \n"
500c2e1f 178 " andi %[now_serving], %[ticket], 0xffff \n"
2a31b033 179 " bne %[my_ticket], %[now_serving], 3f \n"
500c2e1f 180 " addu %[ticket], %[ticket], %[inc] \n"
2a31b033 181 " sc %[ticket], %[ticket_ptr] \n"
500c2e1f 182 " beqz %[ticket], 1b \n"
2a31b033
RB
183 " li %[ticket], 1 \n"
184 "2: \n"
f65e4fa8 185 " .subsection 2 \n"
2a31b033
RB
186 "3: b 2b \n"
187 " li %[ticket], 0 \n"
f65e4fa8 188 " .previous \n"
2a31b033
RB
189 " .set pop \n"
190 : [ticket_ptr] "+m" (lock->lock),
191 [ticket] "=&r" (tmp),
192 [my_ticket] "=&r" (tmp2),
500c2e1f
DD
193 [now_serving] "=&r" (tmp3)
194 : [inc] "r" (inc));
1da177e4
LT
195 }
196
17099b11 197 smp_llsc_mb();
0004a9df 198
2a31b033 199 return tmp;
1da177e4
LT
200}
201
202/*
203 * Read-write spinlocks, allowing multiple readers but only one writer.
204 *
205 * NOTE! it is quite common to have readers in interrupts but no interrupt
206 * writers. For those circumstances we can "mix" irq-safe locks - any writer
207 * needs to get a irq-safe write-lock, but readers can get non-irqsafe
208 * read-locks.
209 */
210
e3c48078
RB
211/*
212 * read_can_lock - would read_trylock() succeed?
213 * @lock: the rwlock in question.
214 */
e5931943 215#define arch_read_can_lock(rw) ((rw)->lock >= 0)
e3c48078
RB
216
217/*
218 * write_can_lock - would write_trylock() succeed?
219 * @lock: the rwlock in question.
220 */
70342287 221#define arch_write_can_lock(rw) (!(rw)->lock)
e3c48078 222
e5931943 223static inline void arch_read_lock(arch_rwlock_t *rw)
1da177e4
LT
224{
225 unsigned int tmp;
226
227 if (R10000_LLSC_WAR) {
228 __asm__ __volatile__(
e5931943 229 " .set noreorder # arch_read_lock \n"
1da177e4
LT
230 "1: ll %1, %2 \n"
231 " bltz %1, 1b \n"
232 " addu %1, 1 \n"
233 " sc %1, %0 \n"
234 " beqzl %1, 1b \n"
235 " nop \n"
1da177e4
LT
236 " .set reorder \n"
237 : "=m" (rw->lock), "=&r" (tmp)
238 : "m" (rw->lock)
239 : "memory");
240 } else {
e01961ce
RB
241 do {
242 __asm__ __volatile__(
243 "1: ll %1, %2 # arch_read_lock \n"
244 " bltz %1, 1b \n"
245 " addu %1, 1 \n"
246 "2: sc %1, %0 \n"
247 : "=m" (rw->lock), "=&r" (tmp)
248 : "m" (rw->lock)
249 : "memory");
250 } while (unlikely(!tmp));
1da177e4 251 }
0004a9df 252
17099b11 253 smp_llsc_mb();
1da177e4
LT
254}
255
256/* Note the use of sub, not subu which will make the kernel die with an
257 overflow exception if we ever try to unlock an rwlock that is already
258 unlocked or is being held by a writer. */
e5931943 259static inline void arch_read_unlock(arch_rwlock_t *rw)
1da177e4
LT
260{
261 unsigned int tmp;
262
f252ffd5 263 smp_mb__before_llsc();
0004a9df 264
1da177e4
LT
265 if (R10000_LLSC_WAR) {
266 __asm__ __volatile__(
e5931943 267 "1: ll %1, %2 # arch_read_unlock \n"
1da177e4
LT
268 " sub %1, 1 \n"
269 " sc %1, %0 \n"
270 " beqzl %1, 1b \n"
1da177e4
LT
271 : "=m" (rw->lock), "=&r" (tmp)
272 : "m" (rw->lock)
273 : "memory");
274 } else {
e01961ce
RB
275 do {
276 __asm__ __volatile__(
277 "1: ll %1, %2 # arch_read_unlock \n"
278 " sub %1, 1 \n"
279 " sc %1, %0 \n"
280 : "=m" (rw->lock), "=&r" (tmp)
281 : "m" (rw->lock)
282 : "memory");
283 } while (unlikely(!tmp));
1da177e4
LT
284 }
285}
286
e5931943 287static inline void arch_write_lock(arch_rwlock_t *rw)
1da177e4
LT
288{
289 unsigned int tmp;
290
291 if (R10000_LLSC_WAR) {
292 __asm__ __volatile__(
e5931943 293 " .set noreorder # arch_write_lock \n"
1da177e4
LT
294 "1: ll %1, %2 \n"
295 " bnez %1, 1b \n"
296 " lui %1, 0x8000 \n"
297 " sc %1, %0 \n"
298 " beqzl %1, 1b \n"
0004a9df 299 " nop \n"
1da177e4
LT
300 " .set reorder \n"
301 : "=m" (rw->lock), "=&r" (tmp)
302 : "m" (rw->lock)
303 : "memory");
304 } else {
e01961ce
RB
305 do {
306 __asm__ __volatile__(
307 "1: ll %1, %2 # arch_write_lock \n"
308 " bnez %1, 1b \n"
309 " lui %1, 0x8000 \n"
310 "2: sc %1, %0 \n"
311 : "=m" (rw->lock), "=&r" (tmp)
312 : "m" (rw->lock)
313 : "memory");
314 } while (unlikely(!tmp));
1da177e4 315 }
0004a9df 316
17099b11 317 smp_llsc_mb();
1da177e4
LT
318}
319
e5931943 320static inline void arch_write_unlock(arch_rwlock_t *rw)
1da177e4 321{
0004a9df
RB
322 smp_mb();
323
1da177e4 324 __asm__ __volatile__(
e5931943 325 " # arch_write_unlock \n"
1da177e4
LT
326 " sw $0, %0 \n"
327 : "=m" (rw->lock)
328 : "m" (rw->lock)
329 : "memory");
330}
331
e5931943 332static inline int arch_read_trylock(arch_rwlock_t *rw)
65316fd1
RB
333{
334 unsigned int tmp;
335 int ret;
336
337 if (R10000_LLSC_WAR) {
338 __asm__ __volatile__(
e5931943 339 " .set noreorder # arch_read_trylock \n"
65316fd1
RB
340 " li %2, 0 \n"
341 "1: ll %1, %3 \n"
d52c2d5a 342 " bltz %1, 2f \n"
65316fd1
RB
343 " addu %1, 1 \n"
344 " sc %1, %0 \n"
65316fd1 345 " .set reorder \n"
0004a9df
RB
346 " beqzl %1, 1b \n"
347 " nop \n"
17099b11 348 __WEAK_LLSC_MB
65316fd1
RB
349 " li %2, 1 \n"
350 "2: \n"
351 : "=m" (rw->lock), "=&r" (tmp), "=&r" (ret)
352 : "m" (rw->lock)
353 : "memory");
354 } else {
355 __asm__ __volatile__(
e5931943 356 " .set noreorder # arch_read_trylock \n"
65316fd1
RB
357 " li %2, 0 \n"
358 "1: ll %1, %3 \n"
d52c2d5a 359 " bltz %1, 2f \n"
65316fd1
RB
360 " addu %1, 1 \n"
361 " sc %1, %0 \n"
362 " beqz %1, 1b \n"
0004a9df 363 " nop \n"
65316fd1 364 " .set reorder \n"
17099b11 365 __WEAK_LLSC_MB
65316fd1
RB
366 " li %2, 1 \n"
367 "2: \n"
368 : "=m" (rw->lock), "=&r" (tmp), "=&r" (ret)
369 : "m" (rw->lock)
370 : "memory");
371 }
372
373 return ret;
374}
1da177e4 375
e5931943 376static inline int arch_write_trylock(arch_rwlock_t *rw)
1da177e4
LT
377{
378 unsigned int tmp;
379 int ret;
380
381 if (R10000_LLSC_WAR) {
382 __asm__ __volatile__(
e5931943 383 " .set noreorder # arch_write_trylock \n"
1da177e4
LT
384 " li %2, 0 \n"
385 "1: ll %1, %3 \n"
386 " bnez %1, 2f \n"
387 " lui %1, 0x8000 \n"
388 " sc %1, %0 \n"
389 " beqzl %1, 1b \n"
0004a9df 390 " nop \n"
17099b11 391 __WEAK_LLSC_MB
1da177e4
LT
392 " li %2, 1 \n"
393 " .set reorder \n"
394 "2: \n"
395 : "=m" (rw->lock), "=&r" (tmp), "=&r" (ret)
396 : "m" (rw->lock)
397 : "memory");
398 } else {
e01961ce
RB
399 do {
400 __asm__ __volatile__(
401 " ll %1, %3 # arch_write_trylock \n"
402 " li %2, 0 \n"
403 " bnez %1, 2f \n"
404 " lui %1, 0x8000 \n"
405 " sc %1, %0 \n"
406 " li %2, 1 \n"
407 "2: \n"
408 : "=m" (rw->lock), "=&r" (tmp), "=&r" (ret)
409 : "m" (rw->lock)
410 : "memory");
411 } while (unlikely(!tmp));
412
413 smp_llsc_mb();
1da177e4
LT
414 }
415
416 return ret;
417}
418
e5931943
TG
419#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
420#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
65316fd1 421
0199c4e6
TG
422#define arch_spin_relax(lock) cpu_relax()
423#define arch_read_relax(lock) cpu_relax()
424#define arch_write_relax(lock) cpu_relax()
ef6edc97 425
1da177e4 426#endif /* _ASM_SPINLOCK_H */