locking/spinlock, arch: Update and fix spin_unlock_wait() implementations
[linux-2.6-block.git] / arch / ia64 / include / asm / spinlock.h
CommitLineData
1da177e4
LT
1#ifndef _ASM_IA64_SPINLOCK_H
2#define _ASM_IA64_SPINLOCK_H
3
4/*
5 * Copyright (C) 1998-2003 Hewlett-Packard Co
6 * David Mosberger-Tang <davidm@hpl.hp.com>
7 * Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
8 *
9 * This file is used for SMP configurations only.
10 */
11
12#include <linux/compiler.h>
13#include <linux/kernel.h>
1977f032 14#include <linux/bitops.h>
1da177e4 15
60063497 16#include <linux/atomic.h>
1da177e4 17#include <asm/intrinsics.h>
726328d9
PZ
18#include <asm/barrier.h>
19#include <asm/processor.h>
1da177e4 20
0199c4e6 21#define arch_spin_lock_init(x) ((x)->lock = 0)
1da177e4 22
1da177e4 23/*
2c86963b
TL
24 * Ticket locks are conceptually two parts, one indicating the current head of
25 * the queue, and the other indicating the current tail. The lock is acquired
26 * by atomically noting the tail and incrementing it by one (thus adding
27 * ourself to the queue and noting our position), then waiting until the head
28 * becomes equal to the the initial value of the tail.
9d40ee20
TL
29 * The pad bits in the middle are used to prevent the next_ticket number
30 * overflowing into the now_serving number.
2c86963b 31 *
9d40ee20 32 * 31 17 16 15 14 0
2c86963b 33 * +----------------------------------------------------+
9d40ee20 34 * | now_serving | padding | next_ticket |
2c86963b 35 * +----------------------------------------------------+
1da177e4
LT
36 */
37
9d40ee20
TL
38#define TICKET_SHIFT 17
39#define TICKET_BITS 15
40#define TICKET_MASK ((1 << TICKET_BITS) - 1)
2c86963b 41
445c8951 42static __always_inline void __ticket_spin_lock(arch_spinlock_t *lock)
2c86963b 43{
9d40ee20 44 int *p = (int *)&lock->lock, ticket, serve;
2c86963b 45
9d40ee20 46 ticket = ia64_fetchadd(1, p, acq);
2c86963b 47
9d40ee20 48 if (!(((ticket >> TICKET_SHIFT) ^ ticket) & TICKET_MASK))
2c86963b
TL
49 return;
50
9d40ee20
TL
51 ia64_invala();
52
53 for (;;) {
54 asm volatile ("ld4.c.nc %0=[%1]" : "=r"(serve) : "r"(p) : "memory");
55
56 if (!(((serve >> TICKET_SHIFT) ^ ticket) & TICKET_MASK))
57 return;
2c86963b 58 cpu_relax();
9d40ee20 59 }
2c86963b
TL
60}
61
445c8951 62static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock)
2c86963b 63{
9d40ee20 64 int tmp = ACCESS_ONCE(lock->lock);
2c86963b 65
9d40ee20
TL
66 if (!(((tmp >> TICKET_SHIFT) ^ tmp) & TICKET_MASK))
67 return ia64_cmpxchg(acq, &lock->lock, tmp, tmp + 1, sizeof (tmp)) == tmp;
2c86963b
TL
68 return 0;
69}
70
445c8951 71static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
2c86963b 72{
9d40ee20 73 unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
2c86963b 74
9d40ee20
TL
75 asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
76 ACCESS_ONCE(*p) = (tmp + 2) & ~1;
2c86963b 77}
1da177e4 78
445c8951 79static __always_inline void __ticket_spin_unlock_wait(arch_spinlock_t *lock)
1502f08e
TL
80{
81 int *p = (int *)&lock->lock, ticket;
82
83 ia64_invala();
84
85 for (;;) {
86 asm volatile ("ld4.c.nc %0=[%1]" : "=r"(ticket) : "r"(p) : "memory");
87 if (!(((ticket >> TICKET_SHIFT) ^ ticket) & TICKET_MASK))
88 return;
89 cpu_relax();
90 }
726328d9
PZ
91
92 smp_acquire__after_ctrl_dep();
1502f08e
TL
93}
94
445c8951 95static inline int __ticket_spin_is_locked(arch_spinlock_t *lock)
1da177e4 96{
2c86963b
TL
97 long tmp = ACCESS_ONCE(lock->lock);
98
9d40ee20 99 return !!(((tmp >> TICKET_SHIFT) ^ tmp) & TICKET_MASK);
1da177e4 100}
f5210891 101
445c8951 102static inline int __ticket_spin_is_contended(arch_spinlock_t *lock)
2c86963b
TL
103{
104 long tmp = ACCESS_ONCE(lock->lock);
f5210891 105
9d40ee20 106 return ((tmp - (tmp >> TICKET_SHIFT)) & TICKET_MASK) > 1;
f5210891
CL
107}
108
71c7356f
LT
109static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock)
110{
111 return !(((lock.lock >> TICKET_SHIFT) ^ lock.lock) & TICKET_MASK);
112}
113
0199c4e6 114static inline int arch_spin_is_locked(arch_spinlock_t *lock)
2c86963b
TL
115{
116 return __ticket_spin_is_locked(lock);
117}
118
0199c4e6 119static inline int arch_spin_is_contended(arch_spinlock_t *lock)
2c86963b
TL
120{
121 return __ticket_spin_is_contended(lock);
122}
0199c4e6 123#define arch_spin_is_contended arch_spin_is_contended
2c86963b 124
0199c4e6 125static __always_inline void arch_spin_lock(arch_spinlock_t *lock)
2c86963b
TL
126{
127 __ticket_spin_lock(lock);
128}
129
0199c4e6 130static __always_inline int arch_spin_trylock(arch_spinlock_t *lock)
2c86963b
TL
131{
132 return __ticket_spin_trylock(lock);
133}
134
0199c4e6 135static __always_inline void arch_spin_unlock(arch_spinlock_t *lock)
2c86963b
TL
136{
137 __ticket_spin_unlock(lock);
138}
139
0199c4e6 140static __always_inline void arch_spin_lock_flags(arch_spinlock_t *lock,
2c86963b
TL
141 unsigned long flags)
142{
0199c4e6 143 arch_spin_lock(lock);
2c86963b
TL
144}
145
0199c4e6 146static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
2c86963b 147{
1502f08e 148 __ticket_spin_unlock_wait(lock);
2c86963b 149}
1da177e4 150
e5931943
TG
151#define arch_read_can_lock(rw) (*(volatile int *)(rw) >= 0)
152#define arch_write_can_lock(rw) (*(volatile int *)(rw) == 0)
1da177e4 153
2d09cde9
RH
154#ifdef ASM_SUPPORTED
155
156static __always_inline void
e5931943 157arch_read_lock_flags(arch_rwlock_t *lock, unsigned long flags)
2d09cde9
RH
158{
159 __asm__ __volatile__ (
160 "tbit.nz p6, p0 = %1,%2\n"
161 "br.few 3f\n"
162 "1:\n"
163 "fetchadd4.rel r2 = [%0], -1;;\n"
164 "(p6) ssm psr.i\n"
165 "2:\n"
166 "hint @pause\n"
167 "ld4 r2 = [%0];;\n"
168 "cmp4.lt p7,p0 = r2, r0\n"
169 "(p7) br.cond.spnt.few 2b\n"
170 "(p6) rsm psr.i\n"
171 ";;\n"
172 "3:\n"
173 "fetchadd4.acq r2 = [%0], 1;;\n"
174 "cmp4.lt p7,p0 = r2, r0\n"
175 "(p7) br.cond.spnt.few 1b\n"
176 : : "r"(lock), "r"(flags), "i"(IA64_PSR_I_BIT)
177 : "p6", "p7", "r2", "memory");
178}
179
e5931943 180#define arch_read_lock(lock) arch_read_lock_flags(lock, 0)
2d09cde9
RH
181
182#else /* !ASM_SUPPORTED */
183
e5931943 184#define arch_read_lock_flags(rw, flags) arch_read_lock(rw)
2d09cde9 185
e5931943 186#define arch_read_lock(rw) \
1da177e4 187do { \
fb3a6bbc 188 arch_rwlock_t *__read_lock_ptr = (rw); \
1da177e4
LT
189 \
190 while (unlikely(ia64_fetchadd(1, (int *) __read_lock_ptr, acq) < 0)) { \
191 ia64_fetchadd(-1, (int *) __read_lock_ptr, rel); \
192 while (*(volatile int *)__read_lock_ptr < 0) \
193 cpu_relax(); \
194 } \
195} while (0)
196
2d09cde9
RH
197#endif /* !ASM_SUPPORTED */
198
e5931943 199#define arch_read_unlock(rw) \
1da177e4 200do { \
fb3a6bbc 201 arch_rwlock_t *__read_lock_ptr = (rw); \
1da177e4
LT
202 ia64_fetchadd(-1, (int *) __read_lock_ptr, rel); \
203} while (0)
204
205#ifdef ASM_SUPPORTED
2d09cde9
RH
206
207static __always_inline void
e5931943 208arch_write_lock_flags(arch_rwlock_t *lock, unsigned long flags)
2d09cde9
RH
209{
210 __asm__ __volatile__ (
211 "tbit.nz p6, p0 = %1, %2\n"
212 "mov ar.ccv = r0\n"
213 "dep r29 = -1, r0, 31, 1\n"
214 "br.few 3f;;\n"
215 "1:\n"
216 "(p6) ssm psr.i\n"
217 "2:\n"
218 "hint @pause\n"
219 "ld4 r2 = [%0];;\n"
220 "cmp4.eq p0,p7 = r0, r2\n"
221 "(p7) br.cond.spnt.few 2b\n"
222 "(p6) rsm psr.i\n"
223 ";;\n"
224 "3:\n"
225 "cmpxchg4.acq r2 = [%0], r29, ar.ccv;;\n"
226 "cmp4.eq p0,p7 = r0, r2\n"
227 "(p7) br.cond.spnt.few 1b;;\n"
228 : : "r"(lock), "r"(flags), "i"(IA64_PSR_I_BIT)
229 : "ar.ccv", "p6", "p7", "r2", "r29", "memory");
230}
231
e5931943 232#define arch_write_lock(rw) arch_write_lock_flags(rw, 0)
1da177e4 233
e5931943 234#define arch_write_trylock(rw) \
1da177e4
LT
235({ \
236 register long result; \
237 \
238 __asm__ __volatile__ ( \
239 "mov ar.ccv = r0\n" \
240 "dep r29 = -1, r0, 31, 1;;\n" \
241 "cmpxchg4.acq %0 = [%1], r29, ar.ccv\n" \
242 : "=r"(result) : "r"(rw) : "ar.ccv", "r29", "memory"); \
243 (result == 0); \
244})
245
e5931943 246static inline void arch_write_unlock(arch_rwlock_t *x)
f5210891
CL
247{
248 u8 *y = (u8 *)x;
249 barrier();
250 asm volatile ("st1.rel.nta [%0] = r0\n\t" :: "r"(y+3) : "memory" );
251}
252
1da177e4
LT
253#else /* !ASM_SUPPORTED */
254
e5931943 255#define arch_write_lock_flags(l, flags) arch_write_lock(l)
2d09cde9 256
e5931943 257#define arch_write_lock(l) \
1da177e4
LT
258({ \
259 __u64 ia64_val, ia64_set_val = ia64_dep_mi(-1, 0, 31, 1); \
260 __u32 *ia64_write_lock_ptr = (__u32 *) (l); \
261 do { \
262 while (*ia64_write_lock_ptr) \
263 ia64_barrier(); \
264 ia64_val = ia64_cmpxchg4_acq(ia64_write_lock_ptr, ia64_set_val, 0); \
265 } while (ia64_val); \
266})
267
e5931943 268#define arch_write_trylock(rw) \
1da177e4
LT
269({ \
270 __u64 ia64_val; \
271 __u64 ia64_set_val = ia64_dep_mi(-1, 0, 31,1); \
272 ia64_val = ia64_cmpxchg4_acq((__u32 *)(rw), ia64_set_val, 0); \
273 (ia64_val == 0); \
274})
275
e5931943 276static inline void arch_write_unlock(arch_rwlock_t *x)
f5210891
CL
277{
278 barrier();
279 x->write_lock = 0;
280}
281
1da177e4
LT
282#endif /* !ASM_SUPPORTED */
283
e5931943 284static inline int arch_read_trylock(arch_rwlock_t *x)
bf7ececa
KO
285{
286 union {
fb3a6bbc 287 arch_rwlock_t lock;
bf7ececa
KO
288 __u32 word;
289 } old, new;
290 old.lock = new.lock = *x;
291 old.lock.write_lock = new.lock.write_lock = 0;
292 ++new.lock.read_counter;
293 return (u32)ia64_cmpxchg4_acq((__u32 *)(x), new.word, old.word) == old.word;
294}
1da177e4 295
0199c4e6
TG
296#define arch_spin_relax(lock) cpu_relax()
297#define arch_read_relax(lock) cpu_relax()
298#define arch_write_relax(lock) cpu_relax()
ef6edc97 299
1da177e4 300#endif /* _ASM_IA64_SPINLOCK_H */