[S390] s390: rename CPU_IDLE to S390_CPU_IDLE
[linux-block.git] / include / asm-s390 / atomic.h
CommitLineData
1da177e4
LT
1#ifndef __ARCH_S390_ATOMIC__
2#define __ARCH_S390_ATOMIC__
3
5bd1db65
DJ
4#include <linux/compiler.h>
5
1da177e4
LT
6/*
7 * include/asm-s390/atomic.h
8 *
9 * S390 version
973bd993 10 * Copyright (C) 1999-2005 IBM Deutschland Entwicklung GmbH, IBM Corporation
1da177e4
LT
11 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
12 * Denis Joseph Barrow,
13 * Arnd Bergmann (arndb@de.ibm.com)
14 *
15 * Derived from "include/asm-i386/bitops.h"
16 * Copyright (C) 1992, Linus Torvalds
17 *
18 */
19
20/*
21 * Atomic operations that C can't guarantee us. Useful for
22 * resource counting etc..
23 * S390 uses 'Compare And Swap' for atomicity in SMP enviroment
24 */
25
26typedef struct {
048d5ce6 27 int counter;
1da177e4
LT
28} __attribute__ ((aligned (4))) atomic_t;
29#define ATOMIC_INIT(i) { (i) }
30
31#ifdef __KERNEL__
32
94c12cc7
MS
33#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2)
34
1da177e4
LT
35#define __CS_LOOP(ptr, op_val, op_string) ({ \
36 typeof(ptr->counter) old_val, new_val; \
94c12cc7
MS
37 asm volatile( \
38 " l %0,%2\n" \
39 "0: lr %1,%0\n" \
40 op_string " %1,%3\n" \
41 " cs %0,%1,%2\n" \
42 " jl 0b" \
43 : "=&d" (old_val), "=&d" (new_val), \
44 "=Q" (((atomic_t *)(ptr))->counter) \
45 : "d" (op_val), "Q" (((atomic_t *)(ptr))->counter) \
46 : "cc", "memory"); \
1da177e4
LT
47 new_val; \
48})
94c12cc7
MS
49
50#else /* __GNUC__ */
51
52#define __CS_LOOP(ptr, op_val, op_string) ({ \
53 typeof(ptr->counter) old_val, new_val; \
54 asm volatile( \
55 " l %0,0(%3)\n" \
56 "0: lr %1,%0\n" \
57 op_string " %1,%4\n" \
58 " cs %0,%1,0(%3)\n" \
59 " jl 0b" \
60 : "=&d" (old_val), "=&d" (new_val), \
61 "=m" (((atomic_t *)(ptr))->counter) \
62 : "a" (ptr), "d" (op_val), \
63 "m" (((atomic_t *)(ptr))->counter) \
64 : "cc", "memory"); \
65 new_val; \
66})
67
68#endif /* __GNUC__ */
69
1da177e4
LT
70#define atomic_read(v) ((v)->counter)
71#define atomic_set(v,i) (((v)->counter) = (i))
72
1da177e4
LT
73static __inline__ int atomic_add_return(int i, atomic_t * v)
74{
75 return __CS_LOOP(v, i, "ar");
76}
973bd993
MS
77#define atomic_add(_i, _v) atomic_add_return(_i, _v)
78#define atomic_add_negative(_i, _v) (atomic_add_return(_i, _v) < 0)
79#define atomic_inc(_v) atomic_add_return(1, _v)
80#define atomic_inc_return(_v) atomic_add_return(1, _v)
81#define atomic_inc_and_test(_v) (atomic_add_return(1, _v) == 0)
82
1da177e4
LT
83static __inline__ int atomic_sub_return(int i, atomic_t * v)
84{
85 return __CS_LOOP(v, i, "sr");
86}
973bd993
MS
87#define atomic_sub(_i, _v) atomic_sub_return(_i, _v)
88#define atomic_sub_and_test(_i, _v) (atomic_sub_return(_i, _v) == 0)
89#define atomic_dec(_v) atomic_sub_return(1, _v)
90#define atomic_dec_return(_v) atomic_sub_return(1, _v)
91#define atomic_dec_and_test(_v) (atomic_sub_return(1, _v) == 0)
1da177e4 92
1da177e4
LT
93static __inline__ void atomic_clear_mask(unsigned long mask, atomic_t * v)
94{
95 __CS_LOOP(v, ~mask, "nr");
96}
973bd993 97
1da177e4
LT
98static __inline__ void atomic_set_mask(unsigned long mask, atomic_t * v)
99{
100 __CS_LOOP(v, mask, "or");
101}
973bd993 102
ffbf670f
IM
103#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
104
973bd993
MS
105static __inline__ int atomic_cmpxchg(atomic_t *v, int old, int new)
106{
94c12cc7
MS
107#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2)
108 asm volatile(
109 " cs %0,%2,%1"
110 : "+d" (old), "=Q" (v->counter)
111 : "d" (new), "Q" (v->counter)
112 : "cc", "memory");
113#else /* __GNUC__ */
114 asm volatile(
115 " cs %0,%3,0(%2)"
116 : "+d" (old), "=m" (v->counter)
117 : "a" (v), "d" (new), "m" (v->counter)
118 : "cc", "memory");
119#endif /* __GNUC__ */
973bd993
MS
120 return old;
121}
122
123static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
124{
125 int c, old;
973bd993 126 c = atomic_read(v);
0b2fcfdb
NP
127 for (;;) {
128 if (unlikely(c == u))
129 break;
130 old = atomic_cmpxchg(v, c, c + a);
131 if (likely(old == c))
132 break;
973bd993 133 c = old;
0b2fcfdb 134 }
973bd993
MS
135 return c != u;
136}
137
138#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
139
1da177e4
LT
140#undef __CS_LOOP
141
142#ifdef __s390x__
143typedef struct {
048d5ce6 144 long long counter;
1da177e4
LT
145} __attribute__ ((aligned (8))) atomic64_t;
146#define ATOMIC64_INIT(i) { (i) }
147
94c12cc7
MS
148#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2)
149
1da177e4
LT
150#define __CSG_LOOP(ptr, op_val, op_string) ({ \
151 typeof(ptr->counter) old_val, new_val; \
94c12cc7
MS
152 asm volatile( \
153 " lg %0,%2\n" \
154 "0: lgr %1,%0\n" \
155 op_string " %1,%3\n" \
156 " csg %0,%1,%2\n" \
157 " jl 0b" \
158 : "=&d" (old_val), "=&d" (new_val), \
159 "=Q" (((atomic_t *)(ptr))->counter) \
160 : "d" (op_val), "Q" (((atomic_t *)(ptr))->counter) \
161 : "cc", "memory" ); \
1da177e4
LT
162 new_val; \
163})
94c12cc7
MS
164
165#else /* __GNUC__ */
166
167#define __CSG_LOOP(ptr, op_val, op_string) ({ \
168 typeof(ptr->counter) old_val, new_val; \
169 asm volatile( \
170 " lg %0,0(%3)\n" \
171 "0: lgr %1,%0\n" \
172 op_string " %1,%4\n" \
173 " csg %0,%1,0(%3)\n" \
174 " jl 0b" \
175 : "=&d" (old_val), "=&d" (new_val), \
176 "=m" (((atomic_t *)(ptr))->counter) \
177 : "a" (ptr), "d" (op_val), \
178 "m" (((atomic_t *)(ptr))->counter) \
179 : "cc", "memory" ); \
180 new_val; \
181})
182
183#endif /* __GNUC__ */
184
1da177e4
LT
185#define atomic64_read(v) ((v)->counter)
186#define atomic64_set(v,i) (((v)->counter) = (i))
187
46ee058c 188static __inline__ long long atomic64_add_return(long long i, atomic64_t * v)
1da177e4
LT
189{
190 return __CSG_LOOP(v, i, "agr");
191}
973bd993
MS
192#define atomic64_add(_i, _v) atomic64_add_return(_i, _v)
193#define atomic64_add_negative(_i, _v) (atomic64_add_return(_i, _v) < 0)
194#define atomic64_inc(_v) atomic64_add_return(1, _v)
195#define atomic64_inc_return(_v) atomic64_add_return(1, _v)
196#define atomic64_inc_and_test(_v) (atomic64_add_return(1, _v) == 0)
197
198static __inline__ long long atomic64_sub_return(long long i, atomic64_t * v)
1da177e4 199{
973bd993 200 return __CSG_LOOP(v, i, "sgr");
1da177e4 201}
973bd993
MS
202#define atomic64_sub(_i, _v) atomic64_sub_return(_i, _v)
203#define atomic64_sub_and_test(_i, _v) (atomic64_sub_return(_i, _v) == 0)
204#define atomic64_dec(_v) atomic64_sub_return(1, _v)
205#define atomic64_dec_return(_v) atomic64_sub_return(1, _v)
206#define atomic64_dec_and_test(_v) (atomic64_sub_return(1, _v) == 0)
207
1da177e4
LT
208static __inline__ void atomic64_clear_mask(unsigned long mask, atomic64_t * v)
209{
210 __CSG_LOOP(v, ~mask, "ngr");
211}
973bd993 212
1da177e4
LT
213static __inline__ void atomic64_set_mask(unsigned long mask, atomic64_t * v)
214{
215 __CSG_LOOP(v, mask, "ogr");
216}
217
3a5f10e3
MD
218#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
219
973bd993
MS
220static __inline__ long long atomic64_cmpxchg(atomic64_t *v,
221 long long old, long long new)
222{
94c12cc7
MS
223#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2)
224 asm volatile(
225 " csg %0,%2,%1"
226 : "+d" (old), "=Q" (v->counter)
227 : "d" (new), "Q" (v->counter)
228 : "cc", "memory");
229#else /* __GNUC__ */
230 asm volatile(
231 " csg %0,%3,0(%2)"
232 : "+d" (old), "=m" (v->counter)
233 : "a" (v), "d" (new), "m" (v->counter)
234 : "cc", "memory");
235#endif /* __GNUC__ */
973bd993
MS
236 return old;
237}
1da177e4 238
973bd993
MS
239static __inline__ int atomic64_add_unless(atomic64_t *v,
240 long long a, long long u)
1da177e4 241{
973bd993 242 long long c, old;
973bd993 243 c = atomic64_read(v);
0b2fcfdb
NP
244 for (;;) {
245 if (unlikely(c == u))
246 break;
247 old = atomic64_cmpxchg(v, c, c + a);
248 if (likely(old == c))
249 break;
973bd993 250 c = old;
0b2fcfdb 251 }
973bd993 252 return c != u;
1da177e4
LT
253}
254
973bd993 255#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
4a6dae6d 256
973bd993
MS
257#undef __CSG_LOOP
258#endif
8426e1f6 259
1da177e4
LT
260#define smp_mb__before_atomic_dec() smp_mb()
261#define smp_mb__after_atomic_dec() smp_mb()
262#define smp_mb__before_atomic_inc() smp_mb()
263#define smp_mb__after_atomic_inc() smp_mb()
264
d3cb4871 265#include <asm-generic/atomic.h>
1da177e4
LT
266#endif /* __KERNEL__ */
267#endif /* __ARCH_S390_ATOMIC__ */