Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | #ifndef __ARCH_S390_ATOMIC__ |
2 | #define __ARCH_S390_ATOMIC__ | |
3 | ||
4 | /* | |
12751058 HC |
5 | * Copyright 1999,2009 IBM Corp. |
6 | * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>, | |
7 | * Denis Joseph Barrow, | |
8 | * Arnd Bergmann <arndb@de.ibm.com>, | |
1da177e4 | 9 | * |
12751058 HC |
10 | * Atomic operations that C can't guarantee us. |
11 | * Useful for resource counting etc. | |
12 | * s390 uses 'Compare And Swap' for atomicity in SMP enviroment. | |
1da177e4 LT |
13 | * |
14 | */ | |
15 | ||
12751058 HC |
16 | #include <linux/compiler.h> |
17 | #include <linux/types.h> | |
2ddb3ec4 | 18 | #include <asm/system.h> |
1da177e4 | 19 | |
1da177e4 LT |
20 | #define ATOMIC_INIT(i) { (i) } |
21 | ||
1da177e4 | 22 | #define __CS_LOOP(ptr, op_val, op_string) ({ \ |
39475179 | 23 | int old_val, new_val; \ |
94c12cc7 MS |
24 | asm volatile( \ |
25 | " l %0,%2\n" \ | |
26 | "0: lr %1,%0\n" \ | |
27 | op_string " %1,%3\n" \ | |
28 | " cs %0,%1,%2\n" \ | |
29 | " jl 0b" \ | |
30 | : "=&d" (old_val), "=&d" (new_val), \ | |
31 | "=Q" (((atomic_t *)(ptr))->counter) \ | |
32 | : "d" (op_val), "Q" (((atomic_t *)(ptr))->counter) \ | |
33 | : "cc", "memory"); \ | |
1da177e4 LT |
34 | new_val; \ |
35 | }) | |
94c12cc7 | 36 | |
c51b9621 HC |
37 | static inline int atomic_read(const atomic_t *v) |
38 | { | |
03e3b5a0 | 39 | return ACCESS_ONCE(v->counter); |
c51b9621 HC |
40 | } |
41 | ||
42 | static inline void atomic_set(atomic_t *v, int i) | |
43 | { | |
44 | v->counter = i; | |
c51b9621 | 45 | } |
1da177e4 | 46 | |
bfe3349b | 47 | static inline int atomic_add_return(int i, atomic_t *v) |
1da177e4 LT |
48 | { |
49 | return __CS_LOOP(v, i, "ar"); | |
50 | } | |
973bd993 MS |
51 | #define atomic_add(_i, _v) atomic_add_return(_i, _v) |
52 | #define atomic_add_negative(_i, _v) (atomic_add_return(_i, _v) < 0) | |
53 | #define atomic_inc(_v) atomic_add_return(1, _v) | |
54 | #define atomic_inc_return(_v) atomic_add_return(1, _v) | |
55 | #define atomic_inc_and_test(_v) (atomic_add_return(1, _v) == 0) | |
56 | ||
bfe3349b | 57 | static inline int atomic_sub_return(int i, atomic_t *v) |
1da177e4 LT |
58 | { |
59 | return __CS_LOOP(v, i, "sr"); | |
60 | } | |
973bd993 MS |
61 | #define atomic_sub(_i, _v) atomic_sub_return(_i, _v) |
62 | #define atomic_sub_and_test(_i, _v) (atomic_sub_return(_i, _v) == 0) | |
63 | #define atomic_dec(_v) atomic_sub_return(1, _v) | |
64 | #define atomic_dec_return(_v) atomic_sub_return(1, _v) | |
65 | #define atomic_dec_and_test(_v) (atomic_sub_return(1, _v) == 0) | |
1da177e4 | 66 | |
bfe3349b | 67 | static inline void atomic_clear_mask(unsigned long mask, atomic_t *v) |
1da177e4 | 68 | { |
bfe3349b | 69 | __CS_LOOP(v, ~mask, "nr"); |
1da177e4 | 70 | } |
973bd993 | 71 | |
bfe3349b | 72 | static inline void atomic_set_mask(unsigned long mask, atomic_t *v) |
1da177e4 | 73 | { |
bfe3349b | 74 | __CS_LOOP(v, mask, "or"); |
1da177e4 | 75 | } |
973bd993 | 76 | |
ffbf670f IM |
77 | #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) |
78 | ||
bfe3349b | 79 | static inline int atomic_cmpxchg(atomic_t *v, int old, int new) |
973bd993 | 80 | { |
94c12cc7 MS |
81 | asm volatile( |
82 | " cs %0,%2,%1" | |
83 | : "+d" (old), "=Q" (v->counter) | |
84 | : "d" (new), "Q" (v->counter) | |
85 | : "cc", "memory"); | |
973bd993 MS |
86 | return old; |
87 | } | |
88 | ||
bfe3349b | 89 | static inline int atomic_add_unless(atomic_t *v, int a, int u) |
973bd993 MS |
90 | { |
91 | int c, old; | |
973bd993 | 92 | c = atomic_read(v); |
0b2fcfdb NP |
93 | for (;;) { |
94 | if (unlikely(c == u)) | |
95 | break; | |
96 | old = atomic_cmpxchg(v, c, c + a); | |
97 | if (likely(old == c)) | |
98 | break; | |
973bd993 | 99 | c = old; |
0b2fcfdb | 100 | } |
973bd993 MS |
101 | return c != u; |
102 | } | |
103 | ||
104 | #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) | |
105 | ||
1da177e4 LT |
106 | #undef __CS_LOOP |
107 | ||
1da177e4 LT |
108 | #define ATOMIC64_INIT(i) { (i) } |
109 | ||
12751058 HC |
110 | #ifdef CONFIG_64BIT |
111 | ||
1da177e4 | 112 | #define __CSG_LOOP(ptr, op_val, op_string) ({ \ |
39475179 | 113 | long long old_val, new_val; \ |
94c12cc7 MS |
114 | asm volatile( \ |
115 | " lg %0,%2\n" \ | |
116 | "0: lgr %1,%0\n" \ | |
117 | op_string " %1,%3\n" \ | |
118 | " csg %0,%1,%2\n" \ | |
119 | " jl 0b" \ | |
120 | : "=&d" (old_val), "=&d" (new_val), \ | |
121 | "=Q" (((atomic_t *)(ptr))->counter) \ | |
122 | : "d" (op_val), "Q" (((atomic_t *)(ptr))->counter) \ | |
bfe3349b | 123 | : "cc", "memory"); \ |
1da177e4 LT |
124 | new_val; \ |
125 | }) | |
94c12cc7 | 126 | |
c51b9621 HC |
127 | static inline long long atomic64_read(const atomic64_t *v) |
128 | { | |
03e3b5a0 | 129 | return ACCESS_ONCE(v->counter); |
c51b9621 HC |
130 | } |
131 | ||
132 | static inline void atomic64_set(atomic64_t *v, long long i) | |
133 | { | |
134 | v->counter = i; | |
c51b9621 | 135 | } |
1da177e4 | 136 | |
bfe3349b | 137 | static inline long long atomic64_add_return(long long i, atomic64_t *v) |
1da177e4 LT |
138 | { |
139 | return __CSG_LOOP(v, i, "agr"); | |
140 | } | |
973bd993 | 141 | |
bfe3349b | 142 | static inline long long atomic64_sub_return(long long i, atomic64_t *v) |
1da177e4 | 143 | { |
973bd993 | 144 | return __CSG_LOOP(v, i, "sgr"); |
1da177e4 | 145 | } |
973bd993 | 146 | |
bfe3349b | 147 | static inline void atomic64_clear_mask(unsigned long mask, atomic64_t *v) |
1da177e4 | 148 | { |
bfe3349b | 149 | __CSG_LOOP(v, ~mask, "ngr"); |
1da177e4 | 150 | } |
973bd993 | 151 | |
bfe3349b | 152 | static inline void atomic64_set_mask(unsigned long mask, atomic64_t *v) |
1da177e4 | 153 | { |
bfe3349b | 154 | __CSG_LOOP(v, mask, "ogr"); |
1da177e4 LT |
155 | } |
156 | ||
3a5f10e3 MD |
157 | #define atomic64_xchg(v, new) (xchg(&((v)->counter), new)) |
158 | ||
bfe3349b | 159 | static inline long long atomic64_cmpxchg(atomic64_t *v, |
973bd993 MS |
160 | long long old, long long new) |
161 | { | |
94c12cc7 MS |
162 | asm volatile( |
163 | " csg %0,%2,%1" | |
164 | : "+d" (old), "=Q" (v->counter) | |
165 | : "d" (new), "Q" (v->counter) | |
166 | : "cc", "memory"); | |
973bd993 MS |
167 | return old; |
168 | } | |
1da177e4 | 169 | |
12751058 HC |
170 | #undef __CSG_LOOP |
171 | ||
172 | #else /* CONFIG_64BIT */ | |
173 | ||
174 | typedef struct { | |
175 | long long counter; | |
176 | } atomic64_t; | |
177 | ||
178 | static inline long long atomic64_read(const atomic64_t *v) | |
179 | { | |
180 | register_pair rp; | |
181 | ||
182 | asm volatile( | |
987bcdac MS |
183 | " lm %0,%N0,%1" |
184 | : "=&d" (rp) : "Q" (v->counter) ); | |
12751058 HC |
185 | return rp.pair; |
186 | } | |
187 | ||
188 | static inline void atomic64_set(atomic64_t *v, long long i) | |
189 | { | |
190 | register_pair rp = {.pair = i}; | |
191 | ||
192 | asm volatile( | |
987bcdac MS |
193 | " stm %1,%N1,%0" |
194 | : "=Q" (v->counter) : "d" (rp) ); | |
12751058 HC |
195 | } |
196 | ||
197 | static inline long long atomic64_xchg(atomic64_t *v, long long new) | |
198 | { | |
199 | register_pair rp_new = {.pair = new}; | |
200 | register_pair rp_old; | |
201 | ||
202 | asm volatile( | |
987bcdac MS |
203 | " lm %0,%N0,%1\n" |
204 | "0: cds %0,%2,%1\n" | |
12751058 | 205 | " jl 0b\n" |
987bcdac MS |
206 | : "=&d" (rp_old), "=Q" (v->counter) |
207 | : "d" (rp_new), "Q" (v->counter) | |
12751058 HC |
208 | : "cc"); |
209 | return rp_old.pair; | |
210 | } | |
211 | ||
212 | static inline long long atomic64_cmpxchg(atomic64_t *v, | |
213 | long long old, long long new) | |
214 | { | |
215 | register_pair rp_old = {.pair = old}; | |
216 | register_pair rp_new = {.pair = new}; | |
217 | ||
218 | asm volatile( | |
987bcdac MS |
219 | " cds %0,%2,%1" |
220 | : "+&d" (rp_old), "=Q" (v->counter) | |
221 | : "d" (rp_new), "Q" (v->counter) | |
12751058 HC |
222 | : "cc"); |
223 | return rp_old.pair; | |
224 | } | |
225 | ||
226 | ||
227 | static inline long long atomic64_add_return(long long i, atomic64_t *v) | |
228 | { | |
229 | long long old, new; | |
230 | ||
231 | do { | |
232 | old = atomic64_read(v); | |
233 | new = old + i; | |
234 | } while (atomic64_cmpxchg(v, old, new) != old); | |
235 | return new; | |
236 | } | |
237 | ||
238 | static inline long long atomic64_sub_return(long long i, atomic64_t *v) | |
239 | { | |
240 | long long old, new; | |
241 | ||
242 | do { | |
243 | old = atomic64_read(v); | |
244 | new = old - i; | |
245 | } while (atomic64_cmpxchg(v, old, new) != old); | |
246 | return new; | |
247 | } | |
248 | ||
249 | static inline void atomic64_set_mask(unsigned long long mask, atomic64_t *v) | |
250 | { | |
251 | long long old, new; | |
252 | ||
253 | do { | |
254 | old = atomic64_read(v); | |
255 | new = old | mask; | |
256 | } while (atomic64_cmpxchg(v, old, new) != old); | |
257 | } | |
258 | ||
259 | static inline void atomic64_clear_mask(unsigned long long mask, atomic64_t *v) | |
260 | { | |
261 | long long old, new; | |
262 | ||
263 | do { | |
264 | old = atomic64_read(v); | |
265 | new = old & mask; | |
266 | } while (atomic64_cmpxchg(v, old, new) != old); | |
267 | } | |
268 | ||
269 | #endif /* CONFIG_64BIT */ | |
270 | ||
bfe3349b | 271 | static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u) |
1da177e4 | 272 | { |
973bd993 | 273 | long long c, old; |
2ddb3ec4 | 274 | |
973bd993 | 275 | c = atomic64_read(v); |
0b2fcfdb NP |
276 | for (;;) { |
277 | if (unlikely(c == u)) | |
278 | break; | |
279 | old = atomic64_cmpxchg(v, c, c + a); | |
280 | if (likely(old == c)) | |
281 | break; | |
973bd993 | 282 | c = old; |
0b2fcfdb | 283 | } |
973bd993 | 284 | return c != u; |
1da177e4 LT |
285 | } |
286 | ||
2ddb3ec4 HC |
287 | static inline long long atomic64_dec_if_positive(atomic64_t *v) |
288 | { | |
289 | long long c, old, dec; | |
290 | ||
291 | c = atomic64_read(v); | |
292 | for (;;) { | |
293 | dec = c - 1; | |
294 | if (unlikely(dec < 0)) | |
295 | break; | |
296 | old = atomic64_cmpxchg((v), c, dec); | |
297 | if (likely(old == c)) | |
298 | break; | |
299 | c = old; | |
300 | } | |
301 | return dec; | |
302 | } | |
303 | ||
12751058 HC |
304 | #define atomic64_add(_i, _v) atomic64_add_return(_i, _v) |
305 | #define atomic64_add_negative(_i, _v) (atomic64_add_return(_i, _v) < 0) | |
306 | #define atomic64_inc(_v) atomic64_add_return(1, _v) | |
307 | #define atomic64_inc_return(_v) atomic64_add_return(1, _v) | |
308 | #define atomic64_inc_and_test(_v) (atomic64_add_return(1, _v) == 0) | |
309 | #define atomic64_sub(_i, _v) atomic64_sub_return(_i, _v) | |
310 | #define atomic64_sub_and_test(_i, _v) (atomic64_sub_return(_i, _v) == 0) | |
311 | #define atomic64_dec(_v) atomic64_sub_return(1, _v) | |
312 | #define atomic64_dec_return(_v) atomic64_sub_return(1, _v) | |
313 | #define atomic64_dec_and_test(_v) (atomic64_sub_return(1, _v) == 0) | |
314 | #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) | |
8426e1f6 | 315 | |
1da177e4 LT |
316 | #define smp_mb__before_atomic_dec() smp_mb() |
317 | #define smp_mb__after_atomic_dec() smp_mb() | |
318 | #define smp_mb__before_atomic_inc() smp_mb() | |
319 | #define smp_mb__after_atomic_inc() smp_mb() | |
320 | ||
72099ed2 | 321 | #include <asm-generic/atomic-long.h> |
bfe3349b | 322 | |
1da177e4 | 323 | #endif /* __ARCH_S390_ATOMIC__ */ |