asm-generic: add a generic unistd.h
[linux-2.6-block.git] / arch / s390 / include / asm / atomic.h
CommitLineData
1da177e4
LT
1#ifndef __ARCH_S390_ATOMIC__
2#define __ARCH_S390_ATOMIC__
3
5bd1db65 4#include <linux/compiler.h>
ea435467 5#include <linux/types.h>
5bd1db65 6
1da177e4
LT
7/*
8 * include/asm-s390/atomic.h
9 *
10 * S390 version
973bd993 11 * Copyright (C) 1999-2005 IBM Deutschland Entwicklung GmbH, IBM Corporation
1da177e4
LT
12 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
13 * Denis Joseph Barrow,
14 * Arnd Bergmann (arndb@de.ibm.com)
15 *
16 * Derived from "include/asm-i386/bitops.h"
17 * Copyright (C) 1992, Linus Torvalds
18 *
19 */
20
21/*
22 * Atomic operations that C can't guarantee us. Useful for
23 * resource counting etc..
24 * S390 uses 'Compare And Swap' for atomicity in SMP enviroment
25 */
26
1da177e4
LT
27#define ATOMIC_INIT(i) { (i) }
28
29#ifdef __KERNEL__
30
94c12cc7
MS
31#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2)
32
1da177e4
LT
33#define __CS_LOOP(ptr, op_val, op_string) ({ \
34 typeof(ptr->counter) old_val, new_val; \
94c12cc7
MS
35 asm volatile( \
36 " l %0,%2\n" \
37 "0: lr %1,%0\n" \
38 op_string " %1,%3\n" \
39 " cs %0,%1,%2\n" \
40 " jl 0b" \
41 : "=&d" (old_val), "=&d" (new_val), \
42 "=Q" (((atomic_t *)(ptr))->counter) \
43 : "d" (op_val), "Q" (((atomic_t *)(ptr))->counter) \
44 : "cc", "memory"); \
1da177e4
LT
45 new_val; \
46})
94c12cc7
MS
47
48#else /* __GNUC__ */
49
50#define __CS_LOOP(ptr, op_val, op_string) ({ \
51 typeof(ptr->counter) old_val, new_val; \
52 asm volatile( \
53 " l %0,0(%3)\n" \
54 "0: lr %1,%0\n" \
55 op_string " %1,%4\n" \
56 " cs %0,%1,0(%3)\n" \
57 " jl 0b" \
58 : "=&d" (old_val), "=&d" (new_val), \
59 "=m" (((atomic_t *)(ptr))->counter) \
60 : "a" (ptr), "d" (op_val), \
61 "m" (((atomic_t *)(ptr))->counter) \
62 : "cc", "memory"); \
63 new_val; \
64})
65
66#endif /* __GNUC__ */
67
c51b9621
HC
68static inline int atomic_read(const atomic_t *v)
69{
70 barrier();
71 return v->counter;
72}
73
74static inline void atomic_set(atomic_t *v, int i)
75{
76 v->counter = i;
77 barrier();
78}
1da177e4 79
1da177e4
LT
80static __inline__ int atomic_add_return(int i, atomic_t * v)
81{
82 return __CS_LOOP(v, i, "ar");
83}
973bd993
MS
84#define atomic_add(_i, _v) atomic_add_return(_i, _v)
85#define atomic_add_negative(_i, _v) (atomic_add_return(_i, _v) < 0)
86#define atomic_inc(_v) atomic_add_return(1, _v)
87#define atomic_inc_return(_v) atomic_add_return(1, _v)
88#define atomic_inc_and_test(_v) (atomic_add_return(1, _v) == 0)
89
1da177e4
LT
90static __inline__ int atomic_sub_return(int i, atomic_t * v)
91{
92 return __CS_LOOP(v, i, "sr");
93}
973bd993
MS
94#define atomic_sub(_i, _v) atomic_sub_return(_i, _v)
95#define atomic_sub_and_test(_i, _v) (atomic_sub_return(_i, _v) == 0)
96#define atomic_dec(_v) atomic_sub_return(1, _v)
97#define atomic_dec_return(_v) atomic_sub_return(1, _v)
98#define atomic_dec_and_test(_v) (atomic_sub_return(1, _v) == 0)
1da177e4 99
1da177e4
LT
100static __inline__ void atomic_clear_mask(unsigned long mask, atomic_t * v)
101{
102 __CS_LOOP(v, ~mask, "nr");
103}
973bd993 104
1da177e4
LT
105static __inline__ void atomic_set_mask(unsigned long mask, atomic_t * v)
106{
107 __CS_LOOP(v, mask, "or");
108}
973bd993 109
ffbf670f
IM
110#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
111
973bd993
MS
112static __inline__ int atomic_cmpxchg(atomic_t *v, int old, int new)
113{
94c12cc7
MS
114#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2)
115 asm volatile(
116 " cs %0,%2,%1"
117 : "+d" (old), "=Q" (v->counter)
118 : "d" (new), "Q" (v->counter)
119 : "cc", "memory");
120#else /* __GNUC__ */
121 asm volatile(
122 " cs %0,%3,0(%2)"
123 : "+d" (old), "=m" (v->counter)
124 : "a" (v), "d" (new), "m" (v->counter)
125 : "cc", "memory");
126#endif /* __GNUC__ */
973bd993
MS
127 return old;
128}
129
130static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
131{
132 int c, old;
973bd993 133 c = atomic_read(v);
0b2fcfdb
NP
134 for (;;) {
135 if (unlikely(c == u))
136 break;
137 old = atomic_cmpxchg(v, c, c + a);
138 if (likely(old == c))
139 break;
973bd993 140 c = old;
0b2fcfdb 141 }
973bd993
MS
142 return c != u;
143}
144
145#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
146
1da177e4
LT
147#undef __CS_LOOP
148
149#ifdef __s390x__
1da177e4
LT
150#define ATOMIC64_INIT(i) { (i) }
151
94c12cc7
MS
152#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2)
153
1da177e4
LT
154#define __CSG_LOOP(ptr, op_val, op_string) ({ \
155 typeof(ptr->counter) old_val, new_val; \
94c12cc7
MS
156 asm volatile( \
157 " lg %0,%2\n" \
158 "0: lgr %1,%0\n" \
159 op_string " %1,%3\n" \
160 " csg %0,%1,%2\n" \
161 " jl 0b" \
162 : "=&d" (old_val), "=&d" (new_val), \
163 "=Q" (((atomic_t *)(ptr))->counter) \
164 : "d" (op_val), "Q" (((atomic_t *)(ptr))->counter) \
165 : "cc", "memory" ); \
1da177e4
LT
166 new_val; \
167})
94c12cc7
MS
168
169#else /* __GNUC__ */
170
171#define __CSG_LOOP(ptr, op_val, op_string) ({ \
172 typeof(ptr->counter) old_val, new_val; \
173 asm volatile( \
174 " lg %0,0(%3)\n" \
175 "0: lgr %1,%0\n" \
176 op_string " %1,%4\n" \
177 " csg %0,%1,0(%3)\n" \
178 " jl 0b" \
179 : "=&d" (old_val), "=&d" (new_val), \
180 "=m" (((atomic_t *)(ptr))->counter) \
181 : "a" (ptr), "d" (op_val), \
182 "m" (((atomic_t *)(ptr))->counter) \
183 : "cc", "memory" ); \
184 new_val; \
185})
186
187#endif /* __GNUC__ */
188
c51b9621
HC
189static inline long long atomic64_read(const atomic64_t *v)
190{
191 barrier();
192 return v->counter;
193}
194
195static inline void atomic64_set(atomic64_t *v, long long i)
196{
197 v->counter = i;
198 barrier();
199}
1da177e4 200
46ee058c 201static __inline__ long long atomic64_add_return(long long i, atomic64_t * v)
1da177e4
LT
202{
203 return __CSG_LOOP(v, i, "agr");
204}
973bd993
MS
205#define atomic64_add(_i, _v) atomic64_add_return(_i, _v)
206#define atomic64_add_negative(_i, _v) (atomic64_add_return(_i, _v) < 0)
207#define atomic64_inc(_v) atomic64_add_return(1, _v)
208#define atomic64_inc_return(_v) atomic64_add_return(1, _v)
209#define atomic64_inc_and_test(_v) (atomic64_add_return(1, _v) == 0)
210
211static __inline__ long long atomic64_sub_return(long long i, atomic64_t * v)
1da177e4 212{
973bd993 213 return __CSG_LOOP(v, i, "sgr");
1da177e4 214}
973bd993
MS
215#define atomic64_sub(_i, _v) atomic64_sub_return(_i, _v)
216#define atomic64_sub_and_test(_i, _v) (atomic64_sub_return(_i, _v) == 0)
217#define atomic64_dec(_v) atomic64_sub_return(1, _v)
218#define atomic64_dec_return(_v) atomic64_sub_return(1, _v)
219#define atomic64_dec_and_test(_v) (atomic64_sub_return(1, _v) == 0)
220
1da177e4
LT
221static __inline__ void atomic64_clear_mask(unsigned long mask, atomic64_t * v)
222{
223 __CSG_LOOP(v, ~mask, "ngr");
224}
973bd993 225
1da177e4
LT
226static __inline__ void atomic64_set_mask(unsigned long mask, atomic64_t * v)
227{
228 __CSG_LOOP(v, mask, "ogr");
229}
230
3a5f10e3
MD
231#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
232
973bd993
MS
233static __inline__ long long atomic64_cmpxchg(atomic64_t *v,
234 long long old, long long new)
235{
94c12cc7
MS
236#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2)
237 asm volatile(
238 " csg %0,%2,%1"
239 : "+d" (old), "=Q" (v->counter)
240 : "d" (new), "Q" (v->counter)
241 : "cc", "memory");
242#else /* __GNUC__ */
243 asm volatile(
244 " csg %0,%3,0(%2)"
245 : "+d" (old), "=m" (v->counter)
246 : "a" (v), "d" (new), "m" (v->counter)
247 : "cc", "memory");
248#endif /* __GNUC__ */
973bd993
MS
249 return old;
250}
1da177e4 251
973bd993
MS
252static __inline__ int atomic64_add_unless(atomic64_t *v,
253 long long a, long long u)
1da177e4 254{
973bd993 255 long long c, old;
973bd993 256 c = atomic64_read(v);
0b2fcfdb
NP
257 for (;;) {
258 if (unlikely(c == u))
259 break;
260 old = atomic64_cmpxchg(v, c, c + a);
261 if (likely(old == c))
262 break;
973bd993 263 c = old;
0b2fcfdb 264 }
973bd993 265 return c != u;
1da177e4
LT
266}
267
973bd993 268#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
4a6dae6d 269
973bd993
MS
270#undef __CSG_LOOP
271#endif
8426e1f6 272
1da177e4
LT
273#define smp_mb__before_atomic_dec() smp_mb()
274#define smp_mb__after_atomic_dec() smp_mb()
275#define smp_mb__before_atomic_inc() smp_mb()
276#define smp_mb__after_atomic_inc() smp_mb()
277
d3cb4871 278#include <asm-generic/atomic.h>
1da177e4
LT
279#endif /* __KERNEL__ */
280#endif /* __ARCH_S390_ATOMIC__ */