x86: Cleanup rwsem_count_t typedef
[linux-2.6-block.git] / arch / sparc / include / asm / rwsem.h
CommitLineData
a00736e9
SR
1/*
2 * rwsem.h: R/W semaphores implemented using CAS
3 *
4 * Written by David S. Miller (davem@redhat.com), 2001.
5 * Derived from asm-i386/rwsem.h
6 */
7#ifndef _SPARC64_RWSEM_H
8#define _SPARC64_RWSEM_H
9
10#ifndef _LINUX_RWSEM_H
11#error "please don't include asm/rwsem.h directly, use linux/rwsem.h instead"
12#endif
13
14#ifdef __KERNEL__
a00736e9 15struct rw_semaphore {
9b3bb86a
DM
16 signed long count;
17#define RWSEM_UNLOCKED_VALUE 0x00000000L
18#define RWSEM_ACTIVE_BIAS 0x00000001L
19#define RWSEM_ACTIVE_MASK 0xffffffffL
20#define RWSEM_WAITING_BIAS (-RWSEM_ACTIVE_MASK-1)
21#define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
22#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
23 spinlock_t wait_lock;
24 struct list_head wait_list;
a00736e9 25#ifdef CONFIG_DEBUG_LOCK_ALLOC
9b3bb86a 26 struct lockdep_map dep_map;
a00736e9
SR
27#endif
28};
29
30#ifdef CONFIG_DEBUG_LOCK_ALLOC
31# define __RWSEM_DEP_MAP_INIT(lockname) , .dep_map = { .name = #lockname }
32#else
33# define __RWSEM_DEP_MAP_INIT(lockname)
34#endif
35
36#define __RWSEM_INITIALIZER(name) \
8a2fe6c7
TG
37{ RWSEM_UNLOCKED_VALUE, __SPIN_LOCK_UNLOCKED((name).wait_lock), \
38 LIST_HEAD_INIT((name).wait_list) __RWSEM_DEP_MAP_INIT(name) }
a00736e9
SR
39
40#define DECLARE_RWSEM(name) \
41 struct rw_semaphore name = __RWSEM_INITIALIZER(name)
42
9b3bb86a
DM
43extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem);
44extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem);
45extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem);
46extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem);
47
a00736e9
SR
48extern void __init_rwsem(struct rw_semaphore *sem, const char *name,
49 struct lock_class_key *key);
50
51#define init_rwsem(sem) \
52do { \
53 static struct lock_class_key __key; \
54 \
55 __init_rwsem((sem), #sem, &__key); \
56} while (0)
57
9b3bb86a
DM
58/*
59 * lock for reading
60 */
61static inline void __down_read(struct rw_semaphore *sem)
62{
63 if (unlikely(atomic64_inc_return((atomic64_t *)(&sem->count)) <= 0L))
64 rwsem_down_read_failed(sem);
65}
66
67static inline int __down_read_trylock(struct rw_semaphore *sem)
68{
69 long tmp;
70
71 while ((tmp = sem->count) >= 0L) {
72 if (tmp == cmpxchg(&sem->count, tmp,
73 tmp + RWSEM_ACTIVE_READ_BIAS)) {
74 return 1;
75 }
76 }
77 return 0;
78}
a00736e9 79
9b3bb86a
DM
80/*
81 * lock for writing
82 */
a00736e9
SR
83static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
84{
9b3bb86a
DM
85 long tmp;
86
87 tmp = atomic64_add_return(RWSEM_ACTIVE_WRITE_BIAS,
88 (atomic64_t *)(&sem->count));
89 if (unlikely(tmp != RWSEM_ACTIVE_WRITE_BIAS))
90 rwsem_down_write_failed(sem);
a00736e9
SR
91}
92
9b3bb86a 93static inline void __down_write(struct rw_semaphore *sem)
a00736e9 94{
9b3bb86a
DM
95 __down_write_nested(sem, 0);
96}
97
98static inline int __down_write_trylock(struct rw_semaphore *sem)
99{
100 long tmp;
101
102 tmp = cmpxchg(&sem->count, RWSEM_UNLOCKED_VALUE,
103 RWSEM_ACTIVE_WRITE_BIAS);
104 return tmp == RWSEM_UNLOCKED_VALUE;
a00736e9
SR
105}
106
9b3bb86a
DM
107/*
108 * unlock after reading
109 */
110static inline void __up_read(struct rw_semaphore *sem)
111{
112 long tmp;
113
114 tmp = atomic64_dec_return((atomic64_t *)(&sem->count));
115 if (unlikely(tmp < -1L && (tmp & RWSEM_ACTIVE_MASK) == 0L))
116 rwsem_wake(sem);
117}
118
119/*
120 * unlock after writing
121 */
122static inline void __up_write(struct rw_semaphore *sem)
123{
124 if (unlikely(atomic64_sub_return(RWSEM_ACTIVE_WRITE_BIAS,
125 (atomic64_t *)(&sem->count)) < 0L))
126 rwsem_wake(sem);
127}
128
129/*
130 * implement atomic add functionality
131 */
132static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
133{
134 atomic64_add(delta, (atomic64_t *)(&sem->count));
135}
136
137/*
138 * downgrade write lock to read lock
139 */
140static inline void __downgrade_write(struct rw_semaphore *sem)
141{
142 long tmp;
143
144 tmp = atomic64_add_return(-RWSEM_WAITING_BIAS, (atomic64_t *)(&sem->count));
145 if (tmp < 0L)
146 rwsem_downgrade_wake(sem);
147}
148
149/*
150 * implement exchange and add functionality
151 */
152static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
a00736e9 153{
9b3bb86a 154 return atomic64_add_return(delta, (atomic64_t *)(&sem->count));
a00736e9
SR
155}
156
157static inline int rwsem_is_locked(struct rw_semaphore *sem)
158{
159 return (sem->count != 0);
160}
161
162#endif /* __KERNEL__ */
163
164#endif /* _SPARC64_RWSEM_H */