MIPS: Alchemy: devboard PM needs to save CPLD registers.
[linux-2.6-block.git] / arch / mips / include / asm / spinlock.h
CommitLineData
1da177e4
LT
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
f65e4fa8 6 * Copyright (C) 1999, 2000, 06 Ralf Baechle (ralf@linux-mips.org)
1da177e4
LT
7 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
8 */
9#ifndef _ASM_SPINLOCK_H
10#define _ASM_SPINLOCK_H
11
2a31b033
RB
12#include <linux/compiler.h>
13
0004a9df 14#include <asm/barrier.h>
1da177e4
LT
15#include <asm/war.h>
16
17/*
18 * Your basic SMP spinlocks, allowing only a single CPU anywhere
2a31b033
RB
19 *
20 * Simple spin lock operations. There are two variants, one clears IRQ's
21 * on the local processor, one does not.
22 *
23 * These are fair FIFO ticket locks
24 *
25 * (the type definitions are in asm/spinlock_types.h)
1da177e4
LT
26 */
27
1da177e4
LT
28
29/*
2a31b033
RB
30 * Ticket locks are conceptually two parts, one indicating the current head of
31 * the queue, and the other indicating the current tail. The lock is acquired
32 * by atomically noting the tail and incrementing it by one (thus adding
33 * ourself to the queue and noting our position), then waiting until the head
34 * becomes equal to the the initial value of the tail.
1da177e4
LT
35 */
36
0199c4e6 37static inline int arch_spin_is_locked(arch_spinlock_t *lock)
2a31b033
RB
38{
39 unsigned int counters = ACCESS_ONCE(lock->lock);
40
41 return ((counters >> 14) ^ counters) & 0x1fff;
42}
43
0199c4e6
TG
44#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
45#define arch_spin_unlock_wait(x) \
46 while (arch_spin_is_locked(x)) { cpu_relax(); }
2a31b033 47
0199c4e6 48static inline int arch_spin_is_contended(arch_spinlock_t *lock)
2a31b033
RB
49{
50 unsigned int counters = ACCESS_ONCE(lock->lock);
51
52 return (((counters >> 14) - counters) & 0x1fff) > 1;
53}
0199c4e6 54#define arch_spin_is_contended arch_spin_is_contended
2a31b033 55
0199c4e6 56static inline void arch_spin_lock(arch_spinlock_t *lock)
1da177e4 57{
2a31b033
RB
58 int my_ticket;
59 int tmp;
1da177e4
LT
60
61 if (R10000_LLSC_WAR) {
2a31b033 62 __asm__ __volatile__ (
0199c4e6 63 " .set push # arch_spin_lock \n"
2a31b033
RB
64 " .set noreorder \n"
65 " \n"
66 "1: ll %[ticket], %[ticket_ptr] \n"
67 " addiu %[my_ticket], %[ticket], 0x4000 \n"
68 " sc %[my_ticket], %[ticket_ptr] \n"
69 " beqzl %[my_ticket], 1b \n"
1da177e4 70 " nop \n"
2a31b033
RB
71 " srl %[my_ticket], %[ticket], 14 \n"
72 " andi %[my_ticket], %[my_ticket], 0x1fff \n"
73 " andi %[ticket], %[ticket], 0x1fff \n"
74 " bne %[ticket], %[my_ticket], 4f \n"
75 " subu %[ticket], %[my_ticket], %[ticket] \n"
76 "2: \n"
77 " .subsection 2 \n"
78 "4: andi %[ticket], %[ticket], 0x1fff \n"
0e6826c7 79 " sll %[ticket], 5 \n"
2a31b033
RB
80 " \n"
81 "6: bnez %[ticket], 6b \n"
82 " subu %[ticket], 1 \n"
83 " \n"
84 " lw %[ticket], %[ticket_ptr] \n"
85 " andi %[ticket], %[ticket], 0x1fff \n"
86 " beq %[ticket], %[my_ticket], 2b \n"
87 " subu %[ticket], %[my_ticket], %[ticket] \n"
0e6826c7 88 " b 4b \n"
2a31b033
RB
89 " subu %[ticket], %[ticket], 1 \n"
90 " .previous \n"
91 " .set pop \n"
92 : [ticket_ptr] "+m" (lock->lock),
93 [ticket] "=&r" (tmp),
94 [my_ticket] "=&r" (my_ticket));
1da177e4 95 } else {
2a31b033 96 __asm__ __volatile__ (
0199c4e6 97 " .set push # arch_spin_lock \n"
2a31b033
RB
98 " .set noreorder \n"
99 " \n"
100 " ll %[ticket], %[ticket_ptr] \n"
101 "1: addiu %[my_ticket], %[ticket], 0x4000 \n"
102 " sc %[my_ticket], %[ticket_ptr] \n"
103 " beqz %[my_ticket], 3f \n"
0004a9df 104 " nop \n"
2a31b033
RB
105 " srl %[my_ticket], %[ticket], 14 \n"
106 " andi %[my_ticket], %[my_ticket], 0x1fff \n"
107 " andi %[ticket], %[ticket], 0x1fff \n"
108 " bne %[ticket], %[my_ticket], 4f \n"
109 " subu %[ticket], %[my_ticket], %[ticket] \n"
110 "2: \n"
f65e4fa8 111 " .subsection 2 \n"
2a31b033
RB
112 "3: b 1b \n"
113 " ll %[ticket], %[ticket_ptr] \n"
114 " \n"
115 "4: andi %[ticket], %[ticket], 0x1fff \n"
0e6826c7 116 " sll %[ticket], 5 \n"
2a31b033
RB
117 " \n"
118 "6: bnez %[ticket], 6b \n"
119 " subu %[ticket], 1 \n"
120 " \n"
121 " lw %[ticket], %[ticket_ptr] \n"
122 " andi %[ticket], %[ticket], 0x1fff \n"
123 " beq %[ticket], %[my_ticket], 2b \n"
124 " subu %[ticket], %[my_ticket], %[ticket] \n"
0e6826c7 125 " b 4b \n"
2a31b033 126 " subu %[ticket], %[ticket], 1 \n"
f65e4fa8 127 " .previous \n"
2a31b033
RB
128 " .set pop \n"
129 : [ticket_ptr] "+m" (lock->lock),
130 [ticket] "=&r" (tmp),
131 [my_ticket] "=&r" (my_ticket));
1da177e4 132 }
0004a9df 133
17099b11 134 smp_llsc_mb();
1da177e4
LT
135}
136
0199c4e6 137static inline void arch_spin_unlock(arch_spinlock_t *lock)
1da177e4 138{
2a31b033
RB
139 int tmp;
140
f252ffd5 141 smp_mb__before_llsc();
2a31b033
RB
142
143 if (R10000_LLSC_WAR) {
144 __asm__ __volatile__ (
0199c4e6 145 " # arch_spin_unlock \n"
2a31b033
RB
146 "1: ll %[ticket], %[ticket_ptr] \n"
147 " addiu %[ticket], %[ticket], 1 \n"
148 " ori %[ticket], %[ticket], 0x2000 \n"
149 " xori %[ticket], %[ticket], 0x2000 \n"
150 " sc %[ticket], %[ticket_ptr] \n"
9b8f3863 151 " beqzl %[ticket], 1b \n"
2a31b033
RB
152 : [ticket_ptr] "+m" (lock->lock),
153 [ticket] "=&r" (tmp));
154 } else {
155 __asm__ __volatile__ (
0199c4e6 156 " .set push # arch_spin_unlock \n"
2a31b033
RB
157 " .set noreorder \n"
158 " \n"
159 " ll %[ticket], %[ticket_ptr] \n"
160 "1: addiu %[ticket], %[ticket], 1 \n"
161 " ori %[ticket], %[ticket], 0x2000 \n"
162 " xori %[ticket], %[ticket], 0x2000 \n"
163 " sc %[ticket], %[ticket_ptr] \n"
164 " beqz %[ticket], 2f \n"
165 " nop \n"
166 " \n"
167 " .subsection 2 \n"
168 "2: b 1b \n"
169 " ll %[ticket], %[ticket_ptr] \n"
170 " .previous \n"
171 " .set pop \n"
172 : [ticket_ptr] "+m" (lock->lock),
173 [ticket] "=&r" (tmp));
174 }
1da177e4
LT
175}
176
0199c4e6 177static inline unsigned int arch_spin_trylock(arch_spinlock_t *lock)
1da177e4 178{
2a31b033 179 int tmp, tmp2, tmp3;
1da177e4
LT
180
181 if (R10000_LLSC_WAR) {
2a31b033 182 __asm__ __volatile__ (
0199c4e6 183 " .set push # arch_spin_trylock \n"
2a31b033
RB
184 " .set noreorder \n"
185 " \n"
186 "1: ll %[ticket], %[ticket_ptr] \n"
187 " srl %[my_ticket], %[ticket], 14 \n"
188 " andi %[my_ticket], %[my_ticket], 0x1fff \n"
189 " andi %[now_serving], %[ticket], 0x1fff \n"
190 " bne %[my_ticket], %[now_serving], 3f \n"
191 " addiu %[ticket], %[ticket], 0x4000 \n"
192 " sc %[ticket], %[ticket_ptr] \n"
193 " beqzl %[ticket], 1b \n"
194 " li %[ticket], 1 \n"
195 "2: \n"
196 " .subsection 2 \n"
197 "3: b 2b \n"
198 " li %[ticket], 0 \n"
199 " .previous \n"
200 " .set pop \n"
201 : [ticket_ptr] "+m" (lock->lock),
202 [ticket] "=&r" (tmp),
203 [my_ticket] "=&r" (tmp2),
204 [now_serving] "=&r" (tmp3));
1da177e4 205 } else {
2a31b033 206 __asm__ __volatile__ (
0199c4e6 207 " .set push # arch_spin_trylock \n"
2a31b033
RB
208 " .set noreorder \n"
209 " \n"
210 " ll %[ticket], %[ticket_ptr] \n"
211 "1: srl %[my_ticket], %[ticket], 14 \n"
212 " andi %[my_ticket], %[my_ticket], 0x1fff \n"
213 " andi %[now_serving], %[ticket], 0x1fff \n"
214 " bne %[my_ticket], %[now_serving], 3f \n"
215 " addiu %[ticket], %[ticket], 0x4000 \n"
216 " sc %[ticket], %[ticket_ptr] \n"
217 " beqz %[ticket], 4f \n"
218 " li %[ticket], 1 \n"
219 "2: \n"
f65e4fa8 220 " .subsection 2 \n"
2a31b033
RB
221 "3: b 2b \n"
222 " li %[ticket], 0 \n"
223 "4: b 1b \n"
224 " ll %[ticket], %[ticket_ptr] \n"
f65e4fa8 225 " .previous \n"
2a31b033
RB
226 " .set pop \n"
227 : [ticket_ptr] "+m" (lock->lock),
228 [ticket] "=&r" (tmp),
229 [my_ticket] "=&r" (tmp2),
230 [now_serving] "=&r" (tmp3));
1da177e4
LT
231 }
232
17099b11 233 smp_llsc_mb();
0004a9df 234
2a31b033 235 return tmp;
1da177e4
LT
236}
237
238/*
239 * Read-write spinlocks, allowing multiple readers but only one writer.
240 *
241 * NOTE! it is quite common to have readers in interrupts but no interrupt
242 * writers. For those circumstances we can "mix" irq-safe locks - any writer
243 * needs to get a irq-safe write-lock, but readers can get non-irqsafe
244 * read-locks.
245 */
246
e3c48078
RB
247/*
248 * read_can_lock - would read_trylock() succeed?
249 * @lock: the rwlock in question.
250 */
e5931943 251#define arch_read_can_lock(rw) ((rw)->lock >= 0)
e3c48078
RB
252
253/*
254 * write_can_lock - would write_trylock() succeed?
255 * @lock: the rwlock in question.
256 */
e5931943 257#define arch_write_can_lock(rw) (!(rw)->lock)
e3c48078 258
e5931943 259static inline void arch_read_lock(arch_rwlock_t *rw)
1da177e4
LT
260{
261 unsigned int tmp;
262
263 if (R10000_LLSC_WAR) {
264 __asm__ __volatile__(
e5931943 265 " .set noreorder # arch_read_lock \n"
1da177e4
LT
266 "1: ll %1, %2 \n"
267 " bltz %1, 1b \n"
268 " addu %1, 1 \n"
269 " sc %1, %0 \n"
270 " beqzl %1, 1b \n"
271 " nop \n"
1da177e4
LT
272 " .set reorder \n"
273 : "=m" (rw->lock), "=&r" (tmp)
274 : "m" (rw->lock)
275 : "memory");
276 } else {
277 __asm__ __volatile__(
e5931943 278 " .set noreorder # arch_read_lock \n"
1da177e4 279 "1: ll %1, %2 \n"
f65e4fa8 280 " bltz %1, 2f \n"
1da177e4
LT
281 " addu %1, 1 \n"
282 " sc %1, %0 \n"
283 " beqz %1, 1b \n"
0004a9df 284 " nop \n"
f65e4fa8
RB
285 " .subsection 2 \n"
286 "2: ll %1, %2 \n"
287 " bltz %1, 2b \n"
288 " addu %1, 1 \n"
289 " b 1b \n"
290 " nop \n"
291 " .previous \n"
1da177e4
LT
292 " .set reorder \n"
293 : "=m" (rw->lock), "=&r" (tmp)
294 : "m" (rw->lock)
295 : "memory");
296 }
0004a9df 297
17099b11 298 smp_llsc_mb();
1da177e4
LT
299}
300
301/* Note the use of sub, not subu which will make the kernel die with an
302 overflow exception if we ever try to unlock an rwlock that is already
303 unlocked or is being held by a writer. */
e5931943 304static inline void arch_read_unlock(arch_rwlock_t *rw)
1da177e4
LT
305{
306 unsigned int tmp;
307
f252ffd5 308 smp_mb__before_llsc();
0004a9df 309
1da177e4
LT
310 if (R10000_LLSC_WAR) {
311 __asm__ __volatile__(
e5931943 312 "1: ll %1, %2 # arch_read_unlock \n"
1da177e4
LT
313 " sub %1, 1 \n"
314 " sc %1, %0 \n"
315 " beqzl %1, 1b \n"
1da177e4
LT
316 : "=m" (rw->lock), "=&r" (tmp)
317 : "m" (rw->lock)
318 : "memory");
319 } else {
320 __asm__ __volatile__(
e5931943 321 " .set noreorder # arch_read_unlock \n"
1da177e4
LT
322 "1: ll %1, %2 \n"
323 " sub %1, 1 \n"
324 " sc %1, %0 \n"
f65e4fa8
RB
325 " beqz %1, 2f \n"
326 " nop \n"
327 " .subsection 2 \n"
328 "2: b 1b \n"
0004a9df 329 " nop \n"
f65e4fa8 330 " .previous \n"
1da177e4
LT
331 " .set reorder \n"
332 : "=m" (rw->lock), "=&r" (tmp)
333 : "m" (rw->lock)
334 : "memory");
335 }
336}
337
e5931943 338static inline void arch_write_lock(arch_rwlock_t *rw)
1da177e4
LT
339{
340 unsigned int tmp;
341
342 if (R10000_LLSC_WAR) {
343 __asm__ __volatile__(
e5931943 344 " .set noreorder # arch_write_lock \n"
1da177e4
LT
345 "1: ll %1, %2 \n"
346 " bnez %1, 1b \n"
347 " lui %1, 0x8000 \n"
348 " sc %1, %0 \n"
349 " beqzl %1, 1b \n"
0004a9df 350 " nop \n"
1da177e4
LT
351 " .set reorder \n"
352 : "=m" (rw->lock), "=&r" (tmp)
353 : "m" (rw->lock)
354 : "memory");
355 } else {
356 __asm__ __volatile__(
e5931943 357 " .set noreorder # arch_write_lock \n"
1da177e4 358 "1: ll %1, %2 \n"
f65e4fa8 359 " bnez %1, 2f \n"
1da177e4
LT
360 " lui %1, 0x8000 \n"
361 " sc %1, %0 \n"
f65e4fa8
RB
362 " beqz %1, 2f \n"
363 " nop \n"
364 " .subsection 2 \n"
365 "2: ll %1, %2 \n"
366 " bnez %1, 2b \n"
367 " lui %1, 0x8000 \n"
368 " b 1b \n"
0004a9df 369 " nop \n"
f65e4fa8 370 " .previous \n"
1da177e4
LT
371 " .set reorder \n"
372 : "=m" (rw->lock), "=&r" (tmp)
373 : "m" (rw->lock)
374 : "memory");
375 }
0004a9df 376
17099b11 377 smp_llsc_mb();
1da177e4
LT
378}
379
e5931943 380static inline void arch_write_unlock(arch_rwlock_t *rw)
1da177e4 381{
0004a9df
RB
382 smp_mb();
383
1da177e4 384 __asm__ __volatile__(
e5931943 385 " # arch_write_unlock \n"
1da177e4
LT
386 " sw $0, %0 \n"
387 : "=m" (rw->lock)
388 : "m" (rw->lock)
389 : "memory");
390}
391
e5931943 392static inline int arch_read_trylock(arch_rwlock_t *rw)
65316fd1
RB
393{
394 unsigned int tmp;
395 int ret;
396
397 if (R10000_LLSC_WAR) {
398 __asm__ __volatile__(
e5931943 399 " .set noreorder # arch_read_trylock \n"
65316fd1
RB
400 " li %2, 0 \n"
401 "1: ll %1, %3 \n"
d52c2d5a 402 " bltz %1, 2f \n"
65316fd1
RB
403 " addu %1, 1 \n"
404 " sc %1, %0 \n"
65316fd1 405 " .set reorder \n"
0004a9df
RB
406 " beqzl %1, 1b \n"
407 " nop \n"
17099b11 408 __WEAK_LLSC_MB
65316fd1
RB
409 " li %2, 1 \n"
410 "2: \n"
411 : "=m" (rw->lock), "=&r" (tmp), "=&r" (ret)
412 : "m" (rw->lock)
413 : "memory");
414 } else {
415 __asm__ __volatile__(
e5931943 416 " .set noreorder # arch_read_trylock \n"
65316fd1
RB
417 " li %2, 0 \n"
418 "1: ll %1, %3 \n"
d52c2d5a 419 " bltz %1, 2f \n"
65316fd1
RB
420 " addu %1, 1 \n"
421 " sc %1, %0 \n"
422 " beqz %1, 1b \n"
0004a9df 423 " nop \n"
65316fd1 424 " .set reorder \n"
17099b11 425 __WEAK_LLSC_MB
65316fd1
RB
426 " li %2, 1 \n"
427 "2: \n"
428 : "=m" (rw->lock), "=&r" (tmp), "=&r" (ret)
429 : "m" (rw->lock)
430 : "memory");
431 }
432
433 return ret;
434}
1da177e4 435
e5931943 436static inline int arch_write_trylock(arch_rwlock_t *rw)
1da177e4
LT
437{
438 unsigned int tmp;
439 int ret;
440
441 if (R10000_LLSC_WAR) {
442 __asm__ __volatile__(
e5931943 443 " .set noreorder # arch_write_trylock \n"
1da177e4
LT
444 " li %2, 0 \n"
445 "1: ll %1, %3 \n"
446 " bnez %1, 2f \n"
447 " lui %1, 0x8000 \n"
448 " sc %1, %0 \n"
449 " beqzl %1, 1b \n"
0004a9df 450 " nop \n"
17099b11 451 __WEAK_LLSC_MB
1da177e4
LT
452 " li %2, 1 \n"
453 " .set reorder \n"
454 "2: \n"
455 : "=m" (rw->lock), "=&r" (tmp), "=&r" (ret)
456 : "m" (rw->lock)
457 : "memory");
458 } else {
459 __asm__ __volatile__(
e5931943 460 " .set noreorder # arch_write_trylock \n"
1da177e4
LT
461 " li %2, 0 \n"
462 "1: ll %1, %3 \n"
463 " bnez %1, 2f \n"
464 " lui %1, 0x8000 \n"
465 " sc %1, %0 \n"
f65e4fa8
RB
466 " beqz %1, 3f \n"
467 " li %2, 1 \n"
468 "2: \n"
17099b11 469 __WEAK_LLSC_MB
f65e4fa8
RB
470 " .subsection 2 \n"
471 "3: b 1b \n"
472 " li %2, 0 \n"
473 " .previous \n"
1da177e4 474 " .set reorder \n"
1da177e4
LT
475 : "=m" (rw->lock), "=&r" (tmp), "=&r" (ret)
476 : "m" (rw->lock)
477 : "memory");
478 }
479
480 return ret;
481}
482
e5931943
TG
483#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
484#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
65316fd1 485
0199c4e6
TG
486#define arch_spin_relax(lock) cpu_relax()
487#define arch_read_relax(lock) cpu_relax()
488#define arch_write_relax(lock) cpu_relax()
ef6edc97 489
1da177e4 490#endif /* _ASM_SPINLOCK_H */