Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * asm-ia64/rwsem.h: R/W semaphores for ia64 | |
3 | * | |
4 | * Copyright (C) 2003 Ken Chen <kenneth.w.chen@intel.com> | |
5 | * Copyright (C) 2003 Asit Mallick <asit.k.mallick@intel.com> | |
6 | * | |
7 | * Based on asm-i386/rwsem.h and other architecture implementation. | |
8 | * | |
9 | * The MSW of the count is the negated number of active writers and | |
10 | * waiting lockers, and the LSW is the total number of active locks. | |
11 | * | |
12 | * The lock count is initialized to 0 (no active and no waiting lockers). | |
13 | * | |
14 | * When a writer subtracts WRITE_BIAS, it'll get 0xffff0001 for the case | |
15 | * of an uncontended lock. Readers increment by 1 and see a positive value | |
16 | * when uncontended, negative if there are writers (and maybe) readers | |
17 | * waiting (in which case it goes to sleep). | |
18 | */ | |
19 | ||
20 | #ifndef _ASM_IA64_RWSEM_H | |
21 | #define _ASM_IA64_RWSEM_H | |
22 | ||
23 | #include <linux/list.h> | |
24 | #include <linux/spinlock.h> | |
25 | ||
26 | #include <asm/intrinsics.h> | |
27 | ||
28 | /* | |
29 | * the semaphore definition | |
30 | */ | |
31 | struct rw_semaphore { | |
32 | signed int count; | |
33 | spinlock_t wait_lock; | |
34 | struct list_head wait_list; | |
35 | #if RWSEM_DEBUG | |
36 | int debug; | |
37 | #endif | |
38 | }; | |
39 | ||
40 | #define RWSEM_UNLOCKED_VALUE 0x00000000 | |
41 | #define RWSEM_ACTIVE_BIAS 0x00000001 | |
42 | #define RWSEM_ACTIVE_MASK 0x0000ffff | |
43 | #define RWSEM_WAITING_BIAS (-0x00010000) | |
44 | #define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS | |
45 | #define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS) | |
46 | ||
47 | /* | |
48 | * initialization | |
49 | */ | |
50 | #if RWSEM_DEBUG | |
51 | #define __RWSEM_DEBUG_INIT , 0 | |
52 | #else | |
53 | #define __RWSEM_DEBUG_INIT /* */ | |
54 | #endif | |
55 | ||
56 | #define __RWSEM_INITIALIZER(name) \ | |
57 | { RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, \ | |
58 | LIST_HEAD_INIT((name).wait_list) \ | |
59 | __RWSEM_DEBUG_INIT } | |
60 | ||
61 | #define DECLARE_RWSEM(name) \ | |
62 | struct rw_semaphore name = __RWSEM_INITIALIZER(name) | |
63 | ||
64 | extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem); | |
65 | extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem); | |
66 | extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem); | |
67 | extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem); | |
68 | ||
69 | static inline void | |
70 | init_rwsem (struct rw_semaphore *sem) | |
71 | { | |
72 | sem->count = RWSEM_UNLOCKED_VALUE; | |
73 | spin_lock_init(&sem->wait_lock); | |
74 | INIT_LIST_HEAD(&sem->wait_list); | |
75 | #if RWSEM_DEBUG | |
76 | sem->debug = 0; | |
77 | #endif | |
78 | } | |
79 | ||
80 | /* | |
81 | * lock for reading | |
82 | */ | |
83 | static inline void | |
84 | __down_read (struct rw_semaphore *sem) | |
85 | { | |
86 | int result = ia64_fetchadd4_acq((unsigned int *)&sem->count, 1); | |
87 | ||
88 | if (result < 0) | |
89 | rwsem_down_read_failed(sem); | |
90 | } | |
91 | ||
92 | /* | |
93 | * lock for writing | |
94 | */ | |
95 | static inline void | |
96 | __down_write (struct rw_semaphore *sem) | |
97 | { | |
98 | int old, new; | |
99 | ||
100 | do { | |
101 | old = sem->count; | |
102 | new = old + RWSEM_ACTIVE_WRITE_BIAS; | |
103 | } while (cmpxchg_acq(&sem->count, old, new) != old); | |
104 | ||
105 | if (old != 0) | |
106 | rwsem_down_write_failed(sem); | |
107 | } | |
108 | ||
109 | /* | |
110 | * unlock after reading | |
111 | */ | |
112 | static inline void | |
113 | __up_read (struct rw_semaphore *sem) | |
114 | { | |
115 | int result = ia64_fetchadd4_rel((unsigned int *)&sem->count, -1); | |
116 | ||
117 | if (result < 0 && (--result & RWSEM_ACTIVE_MASK) == 0) | |
118 | rwsem_wake(sem); | |
119 | } | |
120 | ||
121 | /* | |
122 | * unlock after writing | |
123 | */ | |
124 | static inline void | |
125 | __up_write (struct rw_semaphore *sem) | |
126 | { | |
127 | int old, new; | |
128 | ||
129 | do { | |
130 | old = sem->count; | |
131 | new = old - RWSEM_ACTIVE_WRITE_BIAS; | |
132 | } while (cmpxchg_rel(&sem->count, old, new) != old); | |
133 | ||
134 | if (new < 0 && (new & RWSEM_ACTIVE_MASK) == 0) | |
135 | rwsem_wake(sem); | |
136 | } | |
137 | ||
138 | /* | |
139 | * trylock for reading -- returns 1 if successful, 0 if contention | |
140 | */ | |
141 | static inline int | |
142 | __down_read_trylock (struct rw_semaphore *sem) | |
143 | { | |
144 | int tmp; | |
145 | while ((tmp = sem->count) >= 0) { | |
146 | if (tmp == cmpxchg_acq(&sem->count, tmp, tmp+1)) { | |
147 | return 1; | |
148 | } | |
149 | } | |
150 | return 0; | |
151 | } | |
152 | ||
153 | /* | |
154 | * trylock for writing -- returns 1 if successful, 0 if contention | |
155 | */ | |
156 | static inline int | |
157 | __down_write_trylock (struct rw_semaphore *sem) | |
158 | { | |
159 | int tmp = cmpxchg_acq(&sem->count, RWSEM_UNLOCKED_VALUE, | |
160 | RWSEM_ACTIVE_WRITE_BIAS); | |
161 | return tmp == RWSEM_UNLOCKED_VALUE; | |
162 | } | |
163 | ||
164 | /* | |
165 | * downgrade write lock to read lock | |
166 | */ | |
167 | static inline void | |
168 | __downgrade_write (struct rw_semaphore *sem) | |
169 | { | |
170 | int old, new; | |
171 | ||
172 | do { | |
173 | old = sem->count; | |
174 | new = old - RWSEM_WAITING_BIAS; | |
175 | } while (cmpxchg_rel(&sem->count, old, new) != old); | |
176 | ||
177 | if (old < 0) | |
178 | rwsem_downgrade_wake(sem); | |
179 | } | |
180 | ||
181 | /* | |
182 | * Implement atomic add functionality. These used to be "inline" functions, but GCC v3.1 | |
183 | * doesn't quite optimize this stuff right and ends up with bad calls to fetchandadd. | |
184 | */ | |
185 | #define rwsem_atomic_add(delta, sem) atomic_add(delta, (atomic_t *)(&(sem)->count)) | |
186 | #define rwsem_atomic_update(delta, sem) atomic_add_return(delta, (atomic_t *)(&(sem)->count)) | |
187 | ||
188 | #endif /* _ASM_IA64_RWSEM_H */ |