5 * Written by Ivan Kokshaysky <ink@jurassic.park.msu.ru>, 2001.
6 * Based on asm-alpha/semaphore.h and asm-i386/rwsem.h
10 #error "please don't include asm/rwsem.h directly, use linux/rwsem.h instead"
15 #include <linux/compiler.h>
17 #define RWSEM_UNLOCKED_VALUE 0x0000000000000000L
18 #define RWSEM_ACTIVE_BIAS 0x0000000000000001L
19 #define RWSEM_ACTIVE_MASK 0x00000000ffffffffL
20 #define RWSEM_WAITING_BIAS (-0x0000000100000000L)
21 #define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
22 #define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
24 static inline void __down_read(struct rw_semaphore *sem)
28 oldcount = sem->count;
29 sem->count += RWSEM_ACTIVE_READ_BIAS;
41 :"=&r" (oldcount), "=m" (sem->count), "=&r" (temp)
42 :"Ir" (RWSEM_ACTIVE_READ_BIAS), "m" (sem->count) : "memory");
44 if (unlikely(oldcount < 0))
45 rwsem_down_read_failed(sem);
49 * trylock for reading -- returns 1 if successful, 0 if contention
51 static inline int __down_read_trylock(struct rw_semaphore *sem)
57 new = res + RWSEM_ACTIVE_READ_BIAS;
61 res = cmpxchg(&sem->count, old, new);
63 return res >= 0 ? 1 : 0;
66 static inline long ___down_write(struct rw_semaphore *sem)
70 oldcount = sem->count;
71 sem->count += RWSEM_ACTIVE_WRITE_BIAS;
83 :"=&r" (oldcount), "=m" (sem->count), "=&r" (temp)
84 :"Ir" (RWSEM_ACTIVE_WRITE_BIAS), "m" (sem->count) : "memory");
89 static inline void __down_write(struct rw_semaphore *sem)
91 if (unlikely(___down_write(sem)))
92 rwsem_down_write_failed(sem);
95 static inline int __down_write_killable(struct rw_semaphore *sem)
97 if (unlikely(___down_write(sem)))
98 if (IS_ERR(rwsem_down_write_failed_killable(sem)))
105 * trylock for writing -- returns 1 if successful, 0 if contention
107 static inline int __down_write_trylock(struct rw_semaphore *sem)
109 long ret = cmpxchg(&sem->count, RWSEM_UNLOCKED_VALUE,
110 RWSEM_ACTIVE_WRITE_BIAS);
111 if (ret == RWSEM_UNLOCKED_VALUE)
116 static inline void __up_read(struct rw_semaphore *sem)
120 oldcount = sem->count;
121 sem->count -= RWSEM_ACTIVE_READ_BIAS;
124 __asm__ __volatile__(
133 :"=&r" (oldcount), "=m" (sem->count), "=&r" (temp)
134 :"Ir" (RWSEM_ACTIVE_READ_BIAS), "m" (sem->count) : "memory");
136 if (unlikely(oldcount < 0))
137 if ((int)oldcount - RWSEM_ACTIVE_READ_BIAS == 0)
141 static inline void __up_write(struct rw_semaphore *sem)
145 sem->count -= RWSEM_ACTIVE_WRITE_BIAS;
149 __asm__ __volatile__(
159 :"=&r" (count), "=m" (sem->count), "=&r" (temp)
160 :"Ir" (RWSEM_ACTIVE_WRITE_BIAS), "m" (sem->count) : "memory");
168 * downgrade write lock to read lock
170 static inline void __downgrade_write(struct rw_semaphore *sem)
174 oldcount = sem->count;
175 sem->count -= RWSEM_WAITING_BIAS;
178 __asm__ __volatile__(
187 :"=&r" (oldcount), "=m" (sem->count), "=&r" (temp)
188 :"Ir" (-RWSEM_WAITING_BIAS), "m" (sem->count) : "memory");
190 if (unlikely(oldcount < 0))
191 rwsem_downgrade_wake(sem);
194 static inline void rwsem_atomic_add(long val, struct rw_semaphore *sem)
200 __asm__ __volatile__(
208 :"=&r" (temp), "=m" (sem->count)
209 :"Ir" (val), "m" (sem->count));
213 static inline long rwsem_atomic_update(long val, struct rw_semaphore *sem)
220 __asm__ __volatile__(
229 :"=&r" (ret), "=m" (sem->count), "=&r" (temp)
230 :"Ir" (val), "m" (sem->count));
236 #endif /* __KERNEL__ */
237 #endif /* _ALPHA_RWSEM_H */