power: improve inline asm memory constraints
[linux-block.git] / include / asm-i386 / atomic.h
CommitLineData
1da177e4
LT
1#ifndef __ARCH_I386_ATOMIC__
2#define __ARCH_I386_ATOMIC__
3
1da177e4
LT
4#include <linux/compiler.h>
5#include <asm/processor.h>
6
7/*
8 * Atomic operations that C can't guarantee us. Useful for
9 * resource counting etc..
10 */
11
1da177e4
LT
12/*
13 * Make sure gcc doesn't try to be clever and move things around
14 * on us. We need to use _exactly_ the address the user gave us,
15 * not some alias that contains the same information.
16 */
17typedef struct { volatile int counter; } atomic_t;
18
19#define ATOMIC_INIT(i) { (i) }
20
21/**
22 * atomic_read - read atomic variable
23 * @v: pointer of type atomic_t
24 *
25 * Atomically reads the value of @v.
26 */
27#define atomic_read(v) ((v)->counter)
28
29/**
30 * atomic_set - set atomic variable
31 * @v: pointer of type atomic_t
32 * @i: required value
33 *
34 * Atomically sets the value of @v to @i.
35 */
36#define atomic_set(v,i) (((v)->counter) = (i))
37
38/**
39 * atomic_add - add integer to atomic variable
40 * @i: integer value to add
41 * @v: pointer of type atomic_t
42 *
43 * Atomically adds @i to @v.
44 */
45static __inline__ void atomic_add(int i, atomic_t *v)
46{
47 __asm__ __volatile__(
9a0b5817 48 LOCK_PREFIX "addl %1,%0"
1da177e4
LT
49 :"=m" (v->counter)
50 :"ir" (i), "m" (v->counter));
51}
52
53/**
54 * atomic_sub - subtract the atomic variable
55 * @i: integer value to subtract
56 * @v: pointer of type atomic_t
57 *
58 * Atomically subtracts @i from @v.
59 */
60static __inline__ void atomic_sub(int i, atomic_t *v)
61{
62 __asm__ __volatile__(
9a0b5817 63 LOCK_PREFIX "subl %1,%0"
1da177e4
LT
64 :"=m" (v->counter)
65 :"ir" (i), "m" (v->counter));
66}
67
68/**
69 * atomic_sub_and_test - subtract value from variable and test result
70 * @i: integer value to subtract
71 * @v: pointer of type atomic_t
72 *
73 * Atomically subtracts @i from @v and returns
74 * true if the result is zero, or false for all
75 * other cases.
76 */
77static __inline__ int atomic_sub_and_test(int i, atomic_t *v)
78{
79 unsigned char c;
80
81 __asm__ __volatile__(
9a0b5817 82 LOCK_PREFIX "subl %2,%0; sete %1"
1da177e4
LT
83 :"=m" (v->counter), "=qm" (c)
84 :"ir" (i), "m" (v->counter) : "memory");
85 return c;
86}
87
88/**
89 * atomic_inc - increment atomic variable
90 * @v: pointer of type atomic_t
91 *
92 * Atomically increments @v by 1.
93 */
94static __inline__ void atomic_inc(atomic_t *v)
95{
96 __asm__ __volatile__(
9a0b5817 97 LOCK_PREFIX "incl %0"
1da177e4
LT
98 :"=m" (v->counter)
99 :"m" (v->counter));
100}
101
102/**
103 * atomic_dec - decrement atomic variable
104 * @v: pointer of type atomic_t
105 *
106 * Atomically decrements @v by 1.
107 */
108static __inline__ void atomic_dec(atomic_t *v)
109{
110 __asm__ __volatile__(
9a0b5817 111 LOCK_PREFIX "decl %0"
1da177e4
LT
112 :"=m" (v->counter)
113 :"m" (v->counter));
114}
115
116/**
117 * atomic_dec_and_test - decrement and test
118 * @v: pointer of type atomic_t
119 *
120 * Atomically decrements @v by 1 and
121 * returns true if the result is 0, or false for all other
122 * cases.
123 */
124static __inline__ int atomic_dec_and_test(atomic_t *v)
125{
126 unsigned char c;
127
128 __asm__ __volatile__(
9a0b5817 129 LOCK_PREFIX "decl %0; sete %1"
1da177e4
LT
130 :"=m" (v->counter), "=qm" (c)
131 :"m" (v->counter) : "memory");
132 return c != 0;
133}
134
135/**
136 * atomic_inc_and_test - increment and test
137 * @v: pointer of type atomic_t
138 *
139 * Atomically increments @v by 1
140 * and returns true if the result is zero, or false for all
141 * other cases.
142 */
143static __inline__ int atomic_inc_and_test(atomic_t *v)
144{
145 unsigned char c;
146
147 __asm__ __volatile__(
9a0b5817 148 LOCK_PREFIX "incl %0; sete %1"
1da177e4
LT
149 :"=m" (v->counter), "=qm" (c)
150 :"m" (v->counter) : "memory");
151 return c != 0;
152}
153
154/**
155 * atomic_add_negative - add and test if negative
156 * @v: pointer of type atomic_t
157 * @i: integer value to add
158 *
159 * Atomically adds @i to @v and returns true
160 * if the result is negative, or false when
161 * result is greater than or equal to zero.
162 */
163static __inline__ int atomic_add_negative(int i, atomic_t *v)
164{
165 unsigned char c;
166
167 __asm__ __volatile__(
9a0b5817 168 LOCK_PREFIX "addl %2,%0; sets %1"
1da177e4
LT
169 :"=m" (v->counter), "=qm" (c)
170 :"ir" (i), "m" (v->counter) : "memory");
171 return c;
172}
173
174/**
175 * atomic_add_return - add and return
176 * @v: pointer of type atomic_t
177 * @i: integer value to add
178 *
179 * Atomically adds @i to @v and returns @i + @v
180 */
181static __inline__ int atomic_add_return(int i, atomic_t *v)
182{
183 int __i;
184#ifdef CONFIG_M386
1bb858f2 185 unsigned long flags;
1da177e4
LT
186 if(unlikely(boot_cpu_data.x86==3))
187 goto no_xadd;
188#endif
189 /* Modern 486+ processor */
190 __i = i;
191 __asm__ __volatile__(
9a0b5817 192 LOCK_PREFIX "xaddl %0, %1;"
1da177e4
LT
193 :"=r"(i)
194 :"m"(v->counter), "0"(i));
195 return i + __i;
196
197#ifdef CONFIG_M386
198no_xadd: /* Legacy 386 processor */
1bb858f2 199 local_irq_save(flags);
1da177e4
LT
200 __i = atomic_read(v);
201 atomic_set(v, i + __i);
1bb858f2 202 local_irq_restore(flags);
1da177e4
LT
203 return i + __i;
204#endif
205}
206
207static __inline__ int atomic_sub_return(int i, atomic_t *v)
208{
209 return atomic_add_return(-i,v);
210}
211
4a6dae6d 212#define atomic_cmpxchg(v, old, new) ((int)cmpxchg(&((v)->counter), old, new))
ffbf670f 213#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
4a6dae6d 214
8426e1f6
NP
215/**
216 * atomic_add_unless - add unless the number is a given value
217 * @v: pointer of type atomic_t
218 * @a: the amount to add to v...
219 * @u: ...unless v is equal to u.
220 *
221 * Atomically adds @a to @v, so long as it was not @u.
222 * Returns non-zero if @v was not @u, and zero otherwise.
223 */
224#define atomic_add_unless(v, a, u) \
225({ \
226 int c, old; \
227 c = atomic_read(v); \
0b2fcfdb
NP
228 for (;;) { \
229 if (unlikely(c == (u))) \
230 break; \
231 old = atomic_cmpxchg((v), c, c + (a)); \
232 if (likely(old == c)) \
233 break; \
8426e1f6 234 c = old; \
0b2fcfdb 235 } \
8426e1f6
NP
236 c != (u); \
237})
238#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
239
1da177e4
LT
240#define atomic_inc_return(v) (atomic_add_return(1,v))
241#define atomic_dec_return(v) (atomic_sub_return(1,v))
242
243/* These are x86-specific, used by some header files */
244#define atomic_clear_mask(mask, addr) \
9a0b5817 245__asm__ __volatile__(LOCK_PREFIX "andl %0,%1" \
1da177e4
LT
246: : "r" (~(mask)),"m" (*addr) : "memory")
247
248#define atomic_set_mask(mask, addr) \
9a0b5817 249__asm__ __volatile__(LOCK_PREFIX "orl %0,%1" \
1da177e4
LT
250: : "r" (mask),"m" (*(addr)) : "memory")
251
252/* Atomic operations are already serializing on x86 */
253#define smp_mb__before_atomic_dec() barrier()
254#define smp_mb__after_atomic_dec() barrier()
255#define smp_mb__before_atomic_inc() barrier()
256#define smp_mb__after_atomic_inc() barrier()
257
d3cb4871 258#include <asm-generic/atomic.h>
1da177e4 259#endif