[SPARC64]: More fully work around Spitfire Errata 51.
[linux-2.6-block.git] / include / asm-sparc64 / spinlock.h
CommitLineData
1da177e4
LT
1/* spinlock.h: 64-bit Sparc spinlock support.
2 *
3 * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
4 */
5
6#ifndef __SPARC64_SPINLOCK_H
7#define __SPARC64_SPINLOCK_H
8
9#include <linux/config.h>
10#include <linux/threads.h> /* For NR_CPUS */
11
12#ifndef __ASSEMBLY__
13
14/* To get debugging spinlocks which detect and catch
15 * deadlock situations, set CONFIG_DEBUG_SPINLOCK
16 * and rebuild your kernel.
17 */
18
19/* All of these locking primitives are expected to work properly
20 * even in an RMO memory model, which currently is what the kernel
21 * runs in.
22 *
23 * There is another issue. Because we play games to save cycles
24 * in the non-contention case, we need to be extra careful about
25 * branch targets into the "spinning" code. They live in their
26 * own section, but the newer V9 branches have a shorter range
27 * than the traditional 32-bit sparc branch variants. The rule
28 * is that the branches that go into and out of the spinner sections
29 * must be pre-V9 branches.
30 */
31
32#ifndef CONFIG_DEBUG_SPINLOCK
33
489ec5f5
AV
34typedef struct {
35 volatile unsigned char lock;
36#ifdef CONFIG_PREEMPT
37 unsigned int break_lock;
38#endif
39} spinlock_t;
40#define SPIN_LOCK_UNLOCKED (spinlock_t) {0,}
1da177e4 41
489ec5f5
AV
42#define spin_lock_init(lp) do { *(lp)= SPIN_LOCK_UNLOCKED; } while(0)
43#define spin_is_locked(lp) ((lp)->lock != 0)
1da177e4 44
489ec5f5 45#define spin_unlock_wait(lp) \
4f07118f 46do { rmb(); \
9a59c186 47} while((lp)->lock)
1da177e4
LT
48
49static inline void _raw_spin_lock(spinlock_t *lock)
50{
51 unsigned long tmp;
52
53 __asm__ __volatile__(
54"1: ldstub [%1], %0\n"
b445e26c 55" membar #StoreLoad | #StoreStore\n"
1da177e4 56" brnz,pn %0, 2f\n"
b445e26c 57" nop\n"
1da177e4
LT
58" .subsection 2\n"
59"2: ldub [%1], %0\n"
b445e26c 60" membar #LoadLoad\n"
1da177e4 61" brnz,pt %0, 2b\n"
b445e26c 62" nop\n"
1da177e4
LT
63" ba,a,pt %%xcc, 1b\n"
64" .previous"
65 : "=&r" (tmp)
66 : "r" (lock)
67 : "memory");
68}
69
70static inline int _raw_spin_trylock(spinlock_t *lock)
71{
72 unsigned long result;
73
74 __asm__ __volatile__(
75" ldstub [%1], %0\n"
76" membar #StoreLoad | #StoreStore"
77 : "=r" (result)
78 : "r" (lock)
79 : "memory");
80
81 return (result == 0UL);
82}
83
84static inline void _raw_spin_unlock(spinlock_t *lock)
85{
86 __asm__ __volatile__(
87" membar #StoreStore | #LoadStore\n"
88" stb %%g0, [%0]"
89 : /* No outputs */
90 : "r" (lock)
91 : "memory");
92}
93
94static inline void _raw_spin_lock_flags(spinlock_t *lock, unsigned long flags)
95{
96 unsigned long tmp1, tmp2;
97
98 __asm__ __volatile__(
99"1: ldstub [%2], %0\n"
1da177e4 100" membar #StoreLoad | #StoreStore\n"
b445e26c
DM
101" brnz,pn %0, 2f\n"
102" nop\n"
1da177e4
LT
103" .subsection 2\n"
104"2: rdpr %%pil, %1\n"
105" wrpr %3, %%pil\n"
106"3: ldub [%2], %0\n"
1da177e4 107" membar #LoadLoad\n"
b445e26c
DM
108" brnz,pt %0, 3b\n"
109" nop\n"
1da177e4 110" ba,pt %%xcc, 1b\n"
b445e26c 111" wrpr %1, %%pil\n"
1da177e4
LT
112" .previous"
113 : "=&r" (tmp1), "=&r" (tmp2)
114 : "r"(lock), "r"(flags)
115 : "memory");
116}
117
118#else /* !(CONFIG_DEBUG_SPINLOCK) */
119
120typedef struct {
489ec5f5 121 volatile unsigned char lock;
1da177e4 122 unsigned int owner_pc, owner_cpu;
489ec5f5
AV
123#ifdef CONFIG_PREEMPT
124 unsigned int break_lock;
125#endif
1da177e4
LT
126} spinlock_t;
127#define SPIN_LOCK_UNLOCKED (spinlock_t) { 0, 0, 0xff }
489ec5f5
AV
128#define spin_lock_init(lp) do { *(lp)= SPIN_LOCK_UNLOCKED; } while(0)
129#define spin_is_locked(__lock) ((__lock)->lock != 0)
1da177e4
LT
130#define spin_unlock_wait(__lock) \
131do { \
4f07118f 132 rmb(); \
489ec5f5 133} while((__lock)->lock)
1da177e4 134
442464a5
DM
135extern void _do_spin_lock(spinlock_t *lock, char *str, unsigned long caller);
136extern void _do_spin_unlock(spinlock_t *lock);
137extern int _do_spin_trylock(spinlock_t *lock, unsigned long caller);
138
139#define _raw_spin_trylock(lp) \
140 _do_spin_trylock(lp, (unsigned long) __builtin_return_address(0))
141#define _raw_spin_lock(lock) \
142 _do_spin_lock(lock, "spin_lock", \
143 (unsigned long) __builtin_return_address(0))
1da177e4
LT
144#define _raw_spin_unlock(lock) _do_spin_unlock(lock)
145#define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock)
146
147#endif /* CONFIG_DEBUG_SPINLOCK */
148
149/* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
150
151#ifndef CONFIG_DEBUG_SPINLOCK
152
489ec5f5
AV
153typedef struct {
154 volatile unsigned int lock;
155#ifdef CONFIG_PREEMPT
156 unsigned int break_lock;
157#endif
158} rwlock_t;
9a59c186 159#define RW_LOCK_UNLOCKED (rwlock_t) {0,}
1da177e4
LT
160#define rwlock_init(lp) do { *(lp) = RW_LOCK_UNLOCKED; } while(0)
161
162static void inline __read_lock(rwlock_t *lock)
163{
164 unsigned long tmp1, tmp2;
165
166 __asm__ __volatile__ (
167"1: ldsw [%2], %0\n"
168" brlz,pn %0, 2f\n"
169"4: add %0, 1, %1\n"
170" cas [%2], %0, %1\n"
171" cmp %0, %1\n"
b445e26c 172" membar #StoreLoad | #StoreStore\n"
1da177e4 173" bne,pn %%icc, 1b\n"
b445e26c 174" nop\n"
1da177e4
LT
175" .subsection 2\n"
176"2: ldsw [%2], %0\n"
b445e26c 177" membar #LoadLoad\n"
1da177e4 178" brlz,pt %0, 2b\n"
b445e26c 179" nop\n"
1da177e4
LT
180" ba,a,pt %%xcc, 4b\n"
181" .previous"
182 : "=&r" (tmp1), "=&r" (tmp2)
183 : "r" (lock)
184 : "memory");
185}
186
187static void inline __read_unlock(rwlock_t *lock)
188{
189 unsigned long tmp1, tmp2;
190
191 __asm__ __volatile__(
192" membar #StoreLoad | #LoadLoad\n"
193"1: lduw [%2], %0\n"
194" sub %0, 1, %1\n"
195" cas [%2], %0, %1\n"
196" cmp %0, %1\n"
197" bne,pn %%xcc, 1b\n"
198" nop"
199 : "=&r" (tmp1), "=&r" (tmp2)
200 : "r" (lock)
201 : "memory");
202}
203
204static void inline __write_lock(rwlock_t *lock)
205{
206 unsigned long mask, tmp1, tmp2;
207
208 mask = 0x80000000UL;
209
210 __asm__ __volatile__(
211"1: lduw [%2], %0\n"
212" brnz,pn %0, 2f\n"
213"4: or %0, %3, %1\n"
214" cas [%2], %0, %1\n"
215" cmp %0, %1\n"
b445e26c 216" membar #StoreLoad | #StoreStore\n"
1da177e4 217" bne,pn %%icc, 1b\n"
b445e26c 218" nop\n"
1da177e4
LT
219" .subsection 2\n"
220"2: lduw [%2], %0\n"
b445e26c 221" membar #LoadLoad\n"
1da177e4 222" brnz,pt %0, 2b\n"
b445e26c 223" nop\n"
1da177e4
LT
224" ba,a,pt %%xcc, 4b\n"
225" .previous"
226 : "=&r" (tmp1), "=&r" (tmp2)
227 : "r" (lock), "r" (mask)
228 : "memory");
229}
230
231static void inline __write_unlock(rwlock_t *lock)
232{
233 __asm__ __volatile__(
234" membar #LoadStore | #StoreStore\n"
235" stw %%g0, [%0]"
236 : /* no outputs */
237 : "r" (lock)
238 : "memory");
239}
240
241static int inline __write_trylock(rwlock_t *lock)
242{
243 unsigned long mask, tmp1, tmp2, result;
244
245 mask = 0x80000000UL;
246
247 __asm__ __volatile__(
248" mov 0, %2\n"
249"1: lduw [%3], %0\n"
250" brnz,pn %0, 2f\n"
251" or %0, %4, %1\n"
252" cas [%3], %0, %1\n"
253" cmp %0, %1\n"
b445e26c 254" membar #StoreLoad | #StoreStore\n"
1da177e4 255" bne,pn %%icc, 1b\n"
b445e26c 256" nop\n"
1da177e4
LT
257" mov 1, %2\n"
258"2:"
259 : "=&r" (tmp1), "=&r" (tmp2), "=&r" (result)
260 : "r" (lock), "r" (mask)
261 : "memory");
262
263 return result;
264}
265
266#define _raw_read_lock(p) __read_lock(p)
267#define _raw_read_unlock(p) __read_unlock(p)
268#define _raw_write_lock(p) __write_lock(p)
269#define _raw_write_unlock(p) __write_unlock(p)
270#define _raw_write_trylock(p) __write_trylock(p)
271
272#else /* !(CONFIG_DEBUG_SPINLOCK) */
273
274typedef struct {
489ec5f5 275 volatile unsigned long lock;
1da177e4
LT
276 unsigned int writer_pc, writer_cpu;
277 unsigned int reader_pc[NR_CPUS];
489ec5f5
AV
278#ifdef CONFIG_PREEMPT
279 unsigned int break_lock;
280#endif
1da177e4
LT
281} rwlock_t;
282#define RW_LOCK_UNLOCKED (rwlock_t) { 0, 0, 0xff, { } }
283#define rwlock_init(lp) do { *(lp) = RW_LOCK_UNLOCKED; } while(0)
284
442464a5
DM
285extern void _do_read_lock(rwlock_t *rw, char *str, unsigned long caller);
286extern void _do_read_unlock(rwlock_t *rw, char *str, unsigned long caller);
287extern void _do_write_lock(rwlock_t *rw, char *str, unsigned long caller);
288extern void _do_write_unlock(rwlock_t *rw, unsigned long caller);
289extern int _do_write_trylock(rwlock_t *rw, char *str, unsigned long caller);
1da177e4
LT
290
291#define _raw_read_lock(lock) \
292do { unsigned long flags; \
293 local_irq_save(flags); \
442464a5
DM
294 _do_read_lock(lock, "read_lock", \
295 (unsigned long) __builtin_return_address(0)); \
1da177e4
LT
296 local_irq_restore(flags); \
297} while(0)
298
299#define _raw_read_unlock(lock) \
300do { unsigned long flags; \
301 local_irq_save(flags); \
442464a5
DM
302 _do_read_unlock(lock, "read_unlock", \
303 (unsigned long) __builtin_return_address(0)); \
1da177e4
LT
304 local_irq_restore(flags); \
305} while(0)
306
307#define _raw_write_lock(lock) \
308do { unsigned long flags; \
309 local_irq_save(flags); \
442464a5
DM
310 _do_write_lock(lock, "write_lock", \
311 (unsigned long) __builtin_return_address(0)); \
1da177e4
LT
312 local_irq_restore(flags); \
313} while(0)
314
315#define _raw_write_unlock(lock) \
316do { unsigned long flags; \
317 local_irq_save(flags); \
442464a5
DM
318 _do_write_unlock(lock, \
319 (unsigned long) __builtin_return_address(0)); \
1da177e4
LT
320 local_irq_restore(flags); \
321} while(0)
322
323#define _raw_write_trylock(lock) \
324({ unsigned long flags; \
325 int val; \
326 local_irq_save(flags); \
442464a5
DM
327 val = _do_write_trylock(lock, "write_trylock", \
328 (unsigned long) __builtin_return_address(0)); \
1da177e4
LT
329 local_irq_restore(flags); \
330 val; \
331})
332
333#endif /* CONFIG_DEBUG_SPINLOCK */
334
335#define _raw_read_trylock(lock) generic_raw_read_trylock(lock)
489ec5f5
AV
336#define read_can_lock(rw) (!((rw)->lock & 0x80000000UL))
337#define write_can_lock(rw) (!(rw)->lock)
1da177e4
LT
338
339#endif /* !(__ASSEMBLY__) */
340
341#endif /* !(__SPARC64_SPINLOCK_H) */