Commit | Line | Data |
---|---|---|
feaf7cf1 BB |
1 | #ifndef _ASM_POWERPC_ATOMIC_H_ |
2 | #define _ASM_POWERPC_ATOMIC_H_ | |
3 | ||
1da177e4 LT |
4 | /* |
5 | * PowerPC atomic operations | |
6 | */ | |
7 | ||
1da177e4 | 8 | #ifdef __KERNEL__ |
ae3a197e DH |
9 | #include <linux/types.h> |
10 | #include <asm/cmpxchg.h> | |
c645073f | 11 | #include <asm/barrier.h> |
1da177e4 | 12 | |
feaf7cf1 | 13 | #define ATOMIC_INIT(i) { (i) } |
1da177e4 | 14 | |
dc53617c BF |
15 | /* |
16 | * Since *_return_relaxed and {cmp}xchg_relaxed are implemented with | |
17 | * a "bne-" instruction at the end, so an isync is enough as a acquire barrier | |
18 | * on the platform without lwsync. | |
19 | */ | |
20 | #define __atomic_op_acquire(op, args...) \ | |
21 | ({ \ | |
22 | typeof(op##_relaxed(args)) __ret = op##_relaxed(args); \ | |
23 | __asm__ __volatile__(PPC_ACQUIRE_BARRIER "" : : : "memory"); \ | |
24 | __ret; \ | |
25 | }) | |
26 | ||
27 | #define __atomic_op_release(op, args...) \ | |
28 | ({ \ | |
29 | __asm__ __volatile__(PPC_RELEASE_BARRIER "" : : : "memory"); \ | |
30 | op##_relaxed(args); \ | |
31 | }) | |
32 | ||
9f0cbea0 SB |
33 | static __inline__ int atomic_read(const atomic_t *v) |
34 | { | |
35 | int t; | |
36 | ||
37 | __asm__ __volatile__("lwz%U1%X1 %0,%1" : "=r"(t) : "m"(v->counter)); | |
38 | ||
39 | return t; | |
40 | } | |
41 | ||
42 | static __inline__ void atomic_set(atomic_t *v, int i) | |
43 | { | |
44 | __asm__ __volatile__("stw%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i)); | |
45 | } | |
1da177e4 | 46 | |
af095dd6 PZ |
47 | #define ATOMIC_OP(op, asm_op) \ |
48 | static __inline__ void atomic_##op(int a, atomic_t *v) \ | |
49 | { \ | |
50 | int t; \ | |
51 | \ | |
52 | __asm__ __volatile__( \ | |
53 | "1: lwarx %0,0,%3 # atomic_" #op "\n" \ | |
54 | #asm_op " %0,%2,%0\n" \ | |
55 | PPC405_ERR77(0,%3) \ | |
56 | " stwcx. %0,0,%3 \n" \ | |
57 | " bne- 1b\n" \ | |
58 | : "=&r" (t), "+m" (v->counter) \ | |
59 | : "r" (a), "r" (&v->counter) \ | |
60 | : "cc"); \ | |
61 | } \ | |
62 | ||
dc53617c BF |
63 | #define ATOMIC_OP_RETURN_RELAXED(op, asm_op) \ |
64 | static inline int atomic_##op##_return_relaxed(int a, atomic_t *v) \ | |
af095dd6 PZ |
65 | { \ |
66 | int t; \ | |
67 | \ | |
68 | __asm__ __volatile__( \ | |
dc53617c BF |
69 | "1: lwarx %0,0,%3 # atomic_" #op "_return_relaxed\n" \ |
70 | #asm_op " %0,%2,%0\n" \ | |
71 | PPC405_ERR77(0, %3) \ | |
72 | " stwcx. %0,0,%3\n" \ | |
af095dd6 | 73 | " bne- 1b\n" \ |
dc53617c | 74 | : "=&r" (t), "+m" (v->counter) \ |
af095dd6 | 75 | : "r" (a), "r" (&v->counter) \ |
dc53617c | 76 | : "cc"); \ |
af095dd6 PZ |
77 | \ |
78 | return t; \ | |
1da177e4 LT |
79 | } |
80 | ||
a28cc7bb PZ |
81 | #define ATOMIC_FETCH_OP_RELAXED(op, asm_op) \ |
82 | static inline int atomic_fetch_##op##_relaxed(int a, atomic_t *v) \ | |
83 | { \ | |
84 | int res, t; \ | |
85 | \ | |
86 | __asm__ __volatile__( \ | |
87 | "1: lwarx %0,0,%4 # atomic_fetch_" #op "_relaxed\n" \ | |
88 | #asm_op " %1,%3,%0\n" \ | |
89 | PPC405_ERR77(0, %4) \ | |
90 | " stwcx. %1,0,%4\n" \ | |
91 | " bne- 1b\n" \ | |
92 | : "=&r" (res), "=&r" (t), "+m" (v->counter) \ | |
93 | : "r" (a), "r" (&v->counter) \ | |
94 | : "cc"); \ | |
95 | \ | |
96 | return res; \ | |
97 | } | |
98 | ||
dc53617c BF |
99 | #define ATOMIC_OPS(op, asm_op) \ |
100 | ATOMIC_OP(op, asm_op) \ | |
a28cc7bb PZ |
101 | ATOMIC_OP_RETURN_RELAXED(op, asm_op) \ |
102 | ATOMIC_FETCH_OP_RELAXED(op, asm_op) | |
1da177e4 | 103 | |
af095dd6 PZ |
104 | ATOMIC_OPS(add, add) |
105 | ATOMIC_OPS(sub, subf) | |
1da177e4 | 106 | |
dc53617c BF |
107 | #define atomic_add_return_relaxed atomic_add_return_relaxed |
108 | #define atomic_sub_return_relaxed atomic_sub_return_relaxed | |
109 | ||
a28cc7bb PZ |
110 | #define atomic_fetch_add_relaxed atomic_fetch_add_relaxed |
111 | #define atomic_fetch_sub_relaxed atomic_fetch_sub_relaxed | |
112 | ||
113 | #undef ATOMIC_OPS | |
114 | #define ATOMIC_OPS(op, asm_op) \ | |
115 | ATOMIC_OP(op, asm_op) \ | |
116 | ATOMIC_FETCH_OP_RELAXED(op, asm_op) | |
117 | ||
118 | ATOMIC_OPS(and, and) | |
119 | ATOMIC_OPS(or, or) | |
120 | ATOMIC_OPS(xor, xor) | |
121 | ||
122 | #define atomic_fetch_and_relaxed atomic_fetch_and_relaxed | |
123 | #define atomic_fetch_or_relaxed atomic_fetch_or_relaxed | |
124 | #define atomic_fetch_xor_relaxed atomic_fetch_xor_relaxed | |
125 | ||
af095dd6 | 126 | #undef ATOMIC_OPS |
a28cc7bb | 127 | #undef ATOMIC_FETCH_OP_RELAXED |
dc53617c | 128 | #undef ATOMIC_OP_RETURN_RELAXED |
af095dd6 | 129 | #undef ATOMIC_OP |
1da177e4 LT |
130 | |
131 | #define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0) | |
132 | ||
1da177e4 LT |
133 | static __inline__ void atomic_inc(atomic_t *v) |
134 | { | |
135 | int t; | |
136 | ||
137 | __asm__ __volatile__( | |
138 | "1: lwarx %0,0,%2 # atomic_inc\n\ | |
139 | addic %0,%0,1\n" | |
140 | PPC405_ERR77(0,%2) | |
141 | " stwcx. %0,0,%2 \n\ | |
142 | bne- 1b" | |
e2a3d402 LT |
143 | : "=&r" (t), "+m" (v->counter) |
144 | : "r" (&v->counter) | |
efc3624c | 145 | : "cc", "xer"); |
1da177e4 LT |
146 | } |
147 | ||
dc53617c | 148 | static __inline__ int atomic_inc_return_relaxed(atomic_t *v) |
1da177e4 LT |
149 | { |
150 | int t; | |
151 | ||
152 | __asm__ __volatile__( | |
dc53617c BF |
153 | "1: lwarx %0,0,%2 # atomic_inc_return_relaxed\n" |
154 | " addic %0,%0,1\n" | |
155 | PPC405_ERR77(0, %2) | |
156 | " stwcx. %0,0,%2\n" | |
157 | " bne- 1b" | |
158 | : "=&r" (t), "+m" (v->counter) | |
1da177e4 | 159 | : "r" (&v->counter) |
dc53617c | 160 | : "cc", "xer"); |
1da177e4 LT |
161 | |
162 | return t; | |
163 | } | |
164 | ||
165 | /* | |
166 | * atomic_inc_and_test - increment and test | |
167 | * @v: pointer of type atomic_t | |
168 | * | |
169 | * Atomically increments @v by 1 | |
170 | * and returns true if the result is zero, or false for all | |
171 | * other cases. | |
172 | */ | |
173 | #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0) | |
174 | ||
175 | static __inline__ void atomic_dec(atomic_t *v) | |
176 | { | |
177 | int t; | |
178 | ||
179 | __asm__ __volatile__( | |
180 | "1: lwarx %0,0,%2 # atomic_dec\n\ | |
181 | addic %0,%0,-1\n" | |
182 | PPC405_ERR77(0,%2)\ | |
183 | " stwcx. %0,0,%2\n\ | |
184 | bne- 1b" | |
e2a3d402 LT |
185 | : "=&r" (t), "+m" (v->counter) |
186 | : "r" (&v->counter) | |
efc3624c | 187 | : "cc", "xer"); |
1da177e4 LT |
188 | } |
189 | ||
dc53617c | 190 | static __inline__ int atomic_dec_return_relaxed(atomic_t *v) |
1da177e4 LT |
191 | { |
192 | int t; | |
193 | ||
194 | __asm__ __volatile__( | |
dc53617c BF |
195 | "1: lwarx %0,0,%2 # atomic_dec_return_relaxed\n" |
196 | " addic %0,%0,-1\n" | |
197 | PPC405_ERR77(0, %2) | |
198 | " stwcx. %0,0,%2\n" | |
199 | " bne- 1b" | |
200 | : "=&r" (t), "+m" (v->counter) | |
1da177e4 | 201 | : "r" (&v->counter) |
dc53617c | 202 | : "cc", "xer"); |
1da177e4 LT |
203 | |
204 | return t; | |
205 | } | |
206 | ||
dc53617c BF |
207 | #define atomic_inc_return_relaxed atomic_inc_return_relaxed |
208 | #define atomic_dec_return_relaxed atomic_dec_return_relaxed | |
209 | ||
f46e477e | 210 | #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n))) |
56c08e6d BF |
211 | #define atomic_cmpxchg_relaxed(v, o, n) \ |
212 | cmpxchg_relaxed(&((v)->counter), (o), (n)) | |
213 | #define atomic_cmpxchg_acquire(v, o, n) \ | |
214 | cmpxchg_acquire(&((v)->counter), (o), (n)) | |
215 | ||
ffbf670f | 216 | #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) |
26760fc1 | 217 | #define atomic_xchg_relaxed(v, new) xchg_relaxed(&((v)->counter), (new)) |
4a6dae6d | 218 | |
8426e1f6 | 219 | /** |
f24219b4 | 220 | * __atomic_add_unless - add unless the number is a given value |
8426e1f6 NP |
221 | * @v: pointer of type atomic_t |
222 | * @a: the amount to add to v... | |
223 | * @u: ...unless v is equal to u. | |
224 | * | |
225 | * Atomically adds @a to @v, so long as it was not @u. | |
f24219b4 | 226 | * Returns the old value of @v. |
8426e1f6 | 227 | */ |
f24219b4 | 228 | static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u) |
f055affb NP |
229 | { |
230 | int t; | |
231 | ||
232 | __asm__ __volatile__ ( | |
b97021f8 | 233 | PPC_ATOMIC_ENTRY_BARRIER |
f24219b4 | 234 | "1: lwarx %0,0,%1 # __atomic_add_unless\n\ |
f055affb NP |
235 | cmpw 0,%0,%3 \n\ |
236 | beq- 2f \n\ | |
237 | add %0,%2,%0 \n" | |
238 | PPC405_ERR77(0,%2) | |
239 | " stwcx. %0,0,%1 \n\ | |
240 | bne- 1b \n" | |
b97021f8 | 241 | PPC_ATOMIC_EXIT_BARRIER |
f055affb NP |
242 | " subf %0,%2,%0 \n\ |
243 | 2:" | |
244 | : "=&r" (t) | |
245 | : "r" (&v->counter), "r" (a), "r" (u) | |
246 | : "cc", "memory"); | |
247 | ||
f24219b4 | 248 | return t; |
f055affb NP |
249 | } |
250 | ||
a6cf7ed5 AB |
251 | /** |
252 | * atomic_inc_not_zero - increment unless the number is zero | |
253 | * @v: pointer of type atomic_t | |
254 | * | |
255 | * Atomically increments @v by 1, so long as @v is non-zero. | |
256 | * Returns non-zero if @v was non-zero, and zero otherwise. | |
257 | */ | |
258 | static __inline__ int atomic_inc_not_zero(atomic_t *v) | |
259 | { | |
260 | int t1, t2; | |
261 | ||
262 | __asm__ __volatile__ ( | |
263 | PPC_ATOMIC_ENTRY_BARRIER | |
264 | "1: lwarx %0,0,%2 # atomic_inc_not_zero\n\ | |
265 | cmpwi 0,%0,0\n\ | |
266 | beq- 2f\n\ | |
267 | addic %1,%0,1\n" | |
268 | PPC405_ERR77(0,%2) | |
269 | " stwcx. %1,0,%2\n\ | |
270 | bne- 1b\n" | |
271 | PPC_ATOMIC_EXIT_BARRIER | |
272 | "\n\ | |
273 | 2:" | |
274 | : "=&r" (t1), "=&r" (t2) | |
275 | : "r" (&v->counter) | |
276 | : "cc", "xer", "memory"); | |
277 | ||
278 | return t1; | |
279 | } | |
280 | #define atomic_inc_not_zero(v) atomic_inc_not_zero((v)) | |
8426e1f6 | 281 | |
1da177e4 LT |
282 | #define atomic_sub_and_test(a, v) (atomic_sub_return((a), (v)) == 0) |
283 | #define atomic_dec_and_test(v) (atomic_dec_return((v)) == 0) | |
284 | ||
285 | /* | |
286 | * Atomically test *v and decrement if it is greater than 0. | |
434f98c4 RJ |
287 | * The function returns the old value of *v minus 1, even if |
288 | * the atomic variable, v, was not decremented. | |
1da177e4 LT |
289 | */ |
290 | static __inline__ int atomic_dec_if_positive(atomic_t *v) | |
291 | { | |
292 | int t; | |
293 | ||
294 | __asm__ __volatile__( | |
b97021f8 | 295 | PPC_ATOMIC_ENTRY_BARRIER |
1da177e4 | 296 | "1: lwarx %0,0,%1 # atomic_dec_if_positive\n\ |
434f98c4 RJ |
297 | cmpwi %0,1\n\ |
298 | addi %0,%0,-1\n\ | |
1da177e4 LT |
299 | blt- 2f\n" |
300 | PPC405_ERR77(0,%1) | |
301 | " stwcx. %0,0,%1\n\ | |
302 | bne- 1b" | |
b97021f8 | 303 | PPC_ATOMIC_EXIT_BARRIER |
1da177e4 | 304 | "\n\ |
434f98c4 | 305 | 2:" : "=&b" (t) |
1da177e4 LT |
306 | : "r" (&v->counter) |
307 | : "cc", "memory"); | |
308 | ||
309 | return t; | |
310 | } | |
e79bee24 | 311 | #define atomic_dec_if_positive atomic_dec_if_positive |
1da177e4 | 312 | |
06a98dba SR |
313 | #ifdef __powerpc64__ |
314 | ||
06a98dba SR |
315 | #define ATOMIC64_INIT(i) { (i) } |
316 | ||
9f0cbea0 SB |
317 | static __inline__ long atomic64_read(const atomic64_t *v) |
318 | { | |
319 | long t; | |
320 | ||
321 | __asm__ __volatile__("ld%U1%X1 %0,%1" : "=r"(t) : "m"(v->counter)); | |
322 | ||
323 | return t; | |
324 | } | |
325 | ||
326 | static __inline__ void atomic64_set(atomic64_t *v, long i) | |
327 | { | |
328 | __asm__ __volatile__("std%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i)); | |
329 | } | |
06a98dba | 330 | |
af095dd6 PZ |
331 | #define ATOMIC64_OP(op, asm_op) \ |
332 | static __inline__ void atomic64_##op(long a, atomic64_t *v) \ | |
333 | { \ | |
334 | long t; \ | |
335 | \ | |
336 | __asm__ __volatile__( \ | |
337 | "1: ldarx %0,0,%3 # atomic64_" #op "\n" \ | |
338 | #asm_op " %0,%2,%0\n" \ | |
339 | " stdcx. %0,0,%3 \n" \ | |
340 | " bne- 1b\n" \ | |
341 | : "=&r" (t), "+m" (v->counter) \ | |
342 | : "r" (a), "r" (&v->counter) \ | |
343 | : "cc"); \ | |
06a98dba SR |
344 | } |
345 | ||
dc53617c BF |
346 | #define ATOMIC64_OP_RETURN_RELAXED(op, asm_op) \ |
347 | static inline long \ | |
348 | atomic64_##op##_return_relaxed(long a, atomic64_t *v) \ | |
af095dd6 PZ |
349 | { \ |
350 | long t; \ | |
351 | \ | |
352 | __asm__ __volatile__( \ | |
dc53617c BF |
353 | "1: ldarx %0,0,%3 # atomic64_" #op "_return_relaxed\n" \ |
354 | #asm_op " %0,%2,%0\n" \ | |
355 | " stdcx. %0,0,%3\n" \ | |
af095dd6 | 356 | " bne- 1b\n" \ |
dc53617c | 357 | : "=&r" (t), "+m" (v->counter) \ |
af095dd6 | 358 | : "r" (a), "r" (&v->counter) \ |
dc53617c | 359 | : "cc"); \ |
af095dd6 PZ |
360 | \ |
361 | return t; \ | |
06a98dba SR |
362 | } |
363 | ||
a28cc7bb PZ |
364 | #define ATOMIC64_FETCH_OP_RELAXED(op, asm_op) \ |
365 | static inline long \ | |
366 | atomic64_fetch_##op##_relaxed(long a, atomic64_t *v) \ | |
367 | { \ | |
368 | long res, t; \ | |
369 | \ | |
370 | __asm__ __volatile__( \ | |
371 | "1: ldarx %0,0,%4 # atomic64_fetch_" #op "_relaxed\n" \ | |
372 | #asm_op " %1,%3,%0\n" \ | |
373 | " stdcx. %1,0,%4\n" \ | |
374 | " bne- 1b\n" \ | |
375 | : "=&r" (res), "=&r" (t), "+m" (v->counter) \ | |
376 | : "r" (a), "r" (&v->counter) \ | |
377 | : "cc"); \ | |
378 | \ | |
379 | return res; \ | |
380 | } | |
381 | ||
dc53617c BF |
382 | #define ATOMIC64_OPS(op, asm_op) \ |
383 | ATOMIC64_OP(op, asm_op) \ | |
a28cc7bb PZ |
384 | ATOMIC64_OP_RETURN_RELAXED(op, asm_op) \ |
385 | ATOMIC64_FETCH_OP_RELAXED(op, asm_op) | |
06a98dba | 386 | |
af095dd6 PZ |
387 | ATOMIC64_OPS(add, add) |
388 | ATOMIC64_OPS(sub, subf) | |
06a98dba | 389 | |
dc53617c BF |
390 | #define atomic64_add_return_relaxed atomic64_add_return_relaxed |
391 | #define atomic64_sub_return_relaxed atomic64_sub_return_relaxed | |
392 | ||
a28cc7bb PZ |
393 | #define atomic64_fetch_add_relaxed atomic64_fetch_add_relaxed |
394 | #define atomic64_fetch_sub_relaxed atomic64_fetch_sub_relaxed | |
395 | ||
396 | #undef ATOMIC64_OPS | |
397 | #define ATOMIC64_OPS(op, asm_op) \ | |
398 | ATOMIC64_OP(op, asm_op) \ | |
399 | ATOMIC64_FETCH_OP_RELAXED(op, asm_op) | |
400 | ||
401 | ATOMIC64_OPS(and, and) | |
402 | ATOMIC64_OPS(or, or) | |
403 | ATOMIC64_OPS(xor, xor) | |
404 | ||
405 | #define atomic64_fetch_and_relaxed atomic64_fetch_and_relaxed | |
406 | #define atomic64_fetch_or_relaxed atomic64_fetch_or_relaxed | |
407 | #define atomic64_fetch_xor_relaxed atomic64_fetch_xor_relaxed | |
408 | ||
dc53617c | 409 | #undef ATOPIC64_OPS |
a28cc7bb | 410 | #undef ATOMIC64_FETCH_OP_RELAXED |
dc53617c | 411 | #undef ATOMIC64_OP_RETURN_RELAXED |
af095dd6 | 412 | #undef ATOMIC64_OP |
06a98dba | 413 | |
af095dd6 | 414 | #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0) |
06a98dba SR |
415 | |
416 | static __inline__ void atomic64_inc(atomic64_t *v) | |
417 | { | |
418 | long t; | |
419 | ||
420 | __asm__ __volatile__( | |
421 | "1: ldarx %0,0,%2 # atomic64_inc\n\ | |
422 | addic %0,%0,1\n\ | |
423 | stdcx. %0,0,%2 \n\ | |
424 | bne- 1b" | |
e2a3d402 LT |
425 | : "=&r" (t), "+m" (v->counter) |
426 | : "r" (&v->counter) | |
efc3624c | 427 | : "cc", "xer"); |
06a98dba SR |
428 | } |
429 | ||
dc53617c | 430 | static __inline__ long atomic64_inc_return_relaxed(atomic64_t *v) |
06a98dba SR |
431 | { |
432 | long t; | |
433 | ||
434 | __asm__ __volatile__( | |
dc53617c BF |
435 | "1: ldarx %0,0,%2 # atomic64_inc_return_relaxed\n" |
436 | " addic %0,%0,1\n" | |
437 | " stdcx. %0,0,%2\n" | |
438 | " bne- 1b" | |
439 | : "=&r" (t), "+m" (v->counter) | |
06a98dba | 440 | : "r" (&v->counter) |
dc53617c | 441 | : "cc", "xer"); |
06a98dba SR |
442 | |
443 | return t; | |
444 | } | |
445 | ||
446 | /* | |
447 | * atomic64_inc_and_test - increment and test | |
448 | * @v: pointer of type atomic64_t | |
449 | * | |
450 | * Atomically increments @v by 1 | |
451 | * and returns true if the result is zero, or false for all | |
452 | * other cases. | |
453 | */ | |
454 | #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0) | |
455 | ||
456 | static __inline__ void atomic64_dec(atomic64_t *v) | |
457 | { | |
458 | long t; | |
459 | ||
460 | __asm__ __volatile__( | |
461 | "1: ldarx %0,0,%2 # atomic64_dec\n\ | |
462 | addic %0,%0,-1\n\ | |
463 | stdcx. %0,0,%2\n\ | |
464 | bne- 1b" | |
e2a3d402 LT |
465 | : "=&r" (t), "+m" (v->counter) |
466 | : "r" (&v->counter) | |
efc3624c | 467 | : "cc", "xer"); |
06a98dba SR |
468 | } |
469 | ||
dc53617c | 470 | static __inline__ long atomic64_dec_return_relaxed(atomic64_t *v) |
06a98dba SR |
471 | { |
472 | long t; | |
473 | ||
474 | __asm__ __volatile__( | |
dc53617c BF |
475 | "1: ldarx %0,0,%2 # atomic64_dec_return_relaxed\n" |
476 | " addic %0,%0,-1\n" | |
477 | " stdcx. %0,0,%2\n" | |
478 | " bne- 1b" | |
479 | : "=&r" (t), "+m" (v->counter) | |
06a98dba | 480 | : "r" (&v->counter) |
dc53617c | 481 | : "cc", "xer"); |
06a98dba SR |
482 | |
483 | return t; | |
484 | } | |
485 | ||
dc53617c BF |
486 | #define atomic64_inc_return_relaxed atomic64_inc_return_relaxed |
487 | #define atomic64_dec_return_relaxed atomic64_dec_return_relaxed | |
488 | ||
06a98dba SR |
489 | #define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0) |
490 | #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0) | |
491 | ||
492 | /* | |
493 | * Atomically test *v and decrement if it is greater than 0. | |
494 | * The function returns the old value of *v minus 1. | |
495 | */ | |
496 | static __inline__ long atomic64_dec_if_positive(atomic64_t *v) | |
497 | { | |
498 | long t; | |
499 | ||
500 | __asm__ __volatile__( | |
b97021f8 | 501 | PPC_ATOMIC_ENTRY_BARRIER |
06a98dba SR |
502 | "1: ldarx %0,0,%1 # atomic64_dec_if_positive\n\ |
503 | addic. %0,%0,-1\n\ | |
504 | blt- 2f\n\ | |
505 | stdcx. %0,0,%1\n\ | |
506 | bne- 1b" | |
b97021f8 | 507 | PPC_ATOMIC_EXIT_BARRIER |
06a98dba SR |
508 | "\n\ |
509 | 2:" : "=&r" (t) | |
510 | : "r" (&v->counter) | |
efc3624c | 511 | : "cc", "xer", "memory"); |
06a98dba SR |
512 | |
513 | return t; | |
514 | } | |
515 | ||
f46e477e | 516 | #define atomic64_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n))) |
56c08e6d BF |
517 | #define atomic64_cmpxchg_relaxed(v, o, n) \ |
518 | cmpxchg_relaxed(&((v)->counter), (o), (n)) | |
519 | #define atomic64_cmpxchg_acquire(v, o, n) \ | |
520 | cmpxchg_acquire(&((v)->counter), (o), (n)) | |
521 | ||
41806ef4 | 522 | #define atomic64_xchg(v, new) (xchg(&((v)->counter), new)) |
26760fc1 | 523 | #define atomic64_xchg_relaxed(v, new) xchg_relaxed(&((v)->counter), (new)) |
41806ef4 MD |
524 | |
525 | /** | |
526 | * atomic64_add_unless - add unless the number is a given value | |
527 | * @v: pointer of type atomic64_t | |
528 | * @a: the amount to add to v... | |
529 | * @u: ...unless v is equal to u. | |
530 | * | |
531 | * Atomically adds @a to @v, so long as it was not @u. | |
f24219b4 | 532 | * Returns the old value of @v. |
41806ef4 MD |
533 | */ |
534 | static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u) | |
535 | { | |
536 | long t; | |
537 | ||
538 | __asm__ __volatile__ ( | |
b97021f8 | 539 | PPC_ATOMIC_ENTRY_BARRIER |
f24219b4 | 540 | "1: ldarx %0,0,%1 # __atomic_add_unless\n\ |
41806ef4 MD |
541 | cmpd 0,%0,%3 \n\ |
542 | beq- 2f \n\ | |
543 | add %0,%2,%0 \n" | |
544 | " stdcx. %0,0,%1 \n\ | |
545 | bne- 1b \n" | |
b97021f8 | 546 | PPC_ATOMIC_EXIT_BARRIER |
41806ef4 MD |
547 | " subf %0,%2,%0 \n\ |
548 | 2:" | |
549 | : "=&r" (t) | |
550 | : "r" (&v->counter), "r" (a), "r" (u) | |
551 | : "cc", "memory"); | |
552 | ||
553 | return t != u; | |
554 | } | |
555 | ||
a6cf7ed5 AB |
556 | /** |
557 | * atomic_inc64_not_zero - increment unless the number is zero | |
558 | * @v: pointer of type atomic64_t | |
559 | * | |
560 | * Atomically increments @v by 1, so long as @v is non-zero. | |
561 | * Returns non-zero if @v was non-zero, and zero otherwise. | |
562 | */ | |
563 | static __inline__ long atomic64_inc_not_zero(atomic64_t *v) | |
564 | { | |
565 | long t1, t2; | |
566 | ||
567 | __asm__ __volatile__ ( | |
568 | PPC_ATOMIC_ENTRY_BARRIER | |
569 | "1: ldarx %0,0,%2 # atomic64_inc_not_zero\n\ | |
570 | cmpdi 0,%0,0\n\ | |
571 | beq- 2f\n\ | |
572 | addic %1,%0,1\n\ | |
573 | stdcx. %1,0,%2\n\ | |
574 | bne- 1b\n" | |
575 | PPC_ATOMIC_EXIT_BARRIER | |
576 | "\n\ | |
577 | 2:" | |
578 | : "=&r" (t1), "=&r" (t2) | |
579 | : "r" (&v->counter) | |
580 | : "cc", "xer", "memory"); | |
581 | ||
582 | return t1; | |
583 | } | |
41806ef4 | 584 | |
06a98dba SR |
585 | #endif /* __powerpc64__ */ |
586 | ||
1da177e4 | 587 | #endif /* __KERNEL__ */ |
feaf7cf1 | 588 | #endif /* _ASM_POWERPC_ATOMIC_H_ */ |