Commit | Line | Data |
---|---|---|
feaf7cf1 BB |
1 | #ifndef _ASM_POWERPC_ATOMIC_H_ |
2 | #define _ASM_POWERPC_ATOMIC_H_ | |
3 | ||
1da177e4 LT |
4 | /* |
5 | * PowerPC atomic operations | |
6 | */ | |
7 | ||
1da177e4 LT |
8 | typedef struct { volatile int counter; } atomic_t; |
9 | ||
10 | #ifdef __KERNEL__ | |
f055affb | 11 | #include <linux/compiler.h> |
feaf7cf1 | 12 | #include <asm/synch.h> |
3ddfbcf1 | 13 | #include <asm/asm-compat.h> |
2856f5e3 | 14 | #include <asm/system.h> |
1da177e4 | 15 | |
feaf7cf1 | 16 | #define ATOMIC_INIT(i) { (i) } |
1da177e4 LT |
17 | |
18 | #define atomic_read(v) ((v)->counter) | |
19 | #define atomic_set(v,i) (((v)->counter) = (i)) | |
20 | ||
1da177e4 LT |
21 | static __inline__ void atomic_add(int a, atomic_t *v) |
22 | { | |
23 | int t; | |
24 | ||
25 | __asm__ __volatile__( | |
26 | "1: lwarx %0,0,%3 # atomic_add\n\ | |
27 | add %0,%2,%0\n" | |
28 | PPC405_ERR77(0,%3) | |
29 | " stwcx. %0,0,%3 \n\ | |
30 | bne- 1b" | |
e2a3d402 LT |
31 | : "=&r" (t), "+m" (v->counter) |
32 | : "r" (a), "r" (&v->counter) | |
1da177e4 LT |
33 | : "cc"); |
34 | } | |
35 | ||
36 | static __inline__ int atomic_add_return(int a, atomic_t *v) | |
37 | { | |
38 | int t; | |
39 | ||
40 | __asm__ __volatile__( | |
144b9c13 | 41 | LWSYNC_ON_SMP |
1da177e4 LT |
42 | "1: lwarx %0,0,%2 # atomic_add_return\n\ |
43 | add %0,%1,%0\n" | |
44 | PPC405_ERR77(0,%2) | |
45 | " stwcx. %0,0,%2 \n\ | |
46 | bne- 1b" | |
feaf7cf1 | 47 | ISYNC_ON_SMP |
1da177e4 LT |
48 | : "=&r" (t) |
49 | : "r" (a), "r" (&v->counter) | |
50 | : "cc", "memory"); | |
51 | ||
52 | return t; | |
53 | } | |
54 | ||
55 | #define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0) | |
56 | ||
57 | static __inline__ void atomic_sub(int a, atomic_t *v) | |
58 | { | |
59 | int t; | |
60 | ||
61 | __asm__ __volatile__( | |
62 | "1: lwarx %0,0,%3 # atomic_sub\n\ | |
63 | subf %0,%2,%0\n" | |
64 | PPC405_ERR77(0,%3) | |
65 | " stwcx. %0,0,%3 \n\ | |
66 | bne- 1b" | |
e2a3d402 LT |
67 | : "=&r" (t), "+m" (v->counter) |
68 | : "r" (a), "r" (&v->counter) | |
1da177e4 LT |
69 | : "cc"); |
70 | } | |
71 | ||
72 | static __inline__ int atomic_sub_return(int a, atomic_t *v) | |
73 | { | |
74 | int t; | |
75 | ||
76 | __asm__ __volatile__( | |
144b9c13 | 77 | LWSYNC_ON_SMP |
1da177e4 LT |
78 | "1: lwarx %0,0,%2 # atomic_sub_return\n\ |
79 | subf %0,%1,%0\n" | |
80 | PPC405_ERR77(0,%2) | |
81 | " stwcx. %0,0,%2 \n\ | |
82 | bne- 1b" | |
feaf7cf1 | 83 | ISYNC_ON_SMP |
1da177e4 LT |
84 | : "=&r" (t) |
85 | : "r" (a), "r" (&v->counter) | |
86 | : "cc", "memory"); | |
87 | ||
88 | return t; | |
89 | } | |
90 | ||
91 | static __inline__ void atomic_inc(atomic_t *v) | |
92 | { | |
93 | int t; | |
94 | ||
95 | __asm__ __volatile__( | |
96 | "1: lwarx %0,0,%2 # atomic_inc\n\ | |
97 | addic %0,%0,1\n" | |
98 | PPC405_ERR77(0,%2) | |
99 | " stwcx. %0,0,%2 \n\ | |
100 | bne- 1b" | |
e2a3d402 LT |
101 | : "=&r" (t), "+m" (v->counter) |
102 | : "r" (&v->counter) | |
1da177e4 LT |
103 | : "cc"); |
104 | } | |
105 | ||
106 | static __inline__ int atomic_inc_return(atomic_t *v) | |
107 | { | |
108 | int t; | |
109 | ||
110 | __asm__ __volatile__( | |
144b9c13 | 111 | LWSYNC_ON_SMP |
1da177e4 LT |
112 | "1: lwarx %0,0,%1 # atomic_inc_return\n\ |
113 | addic %0,%0,1\n" | |
114 | PPC405_ERR77(0,%1) | |
115 | " stwcx. %0,0,%1 \n\ | |
116 | bne- 1b" | |
feaf7cf1 | 117 | ISYNC_ON_SMP |
1da177e4 LT |
118 | : "=&r" (t) |
119 | : "r" (&v->counter) | |
120 | : "cc", "memory"); | |
121 | ||
122 | return t; | |
123 | } | |
124 | ||
125 | /* | |
126 | * atomic_inc_and_test - increment and test | |
127 | * @v: pointer of type atomic_t | |
128 | * | |
129 | * Atomically increments @v by 1 | |
130 | * and returns true if the result is zero, or false for all | |
131 | * other cases. | |
132 | */ | |
133 | #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0) | |
134 | ||
135 | static __inline__ void atomic_dec(atomic_t *v) | |
136 | { | |
137 | int t; | |
138 | ||
139 | __asm__ __volatile__( | |
140 | "1: lwarx %0,0,%2 # atomic_dec\n\ | |
141 | addic %0,%0,-1\n" | |
142 | PPC405_ERR77(0,%2)\ | |
143 | " stwcx. %0,0,%2\n\ | |
144 | bne- 1b" | |
e2a3d402 LT |
145 | : "=&r" (t), "+m" (v->counter) |
146 | : "r" (&v->counter) | |
1da177e4 LT |
147 | : "cc"); |
148 | } | |
149 | ||
150 | static __inline__ int atomic_dec_return(atomic_t *v) | |
151 | { | |
152 | int t; | |
153 | ||
154 | __asm__ __volatile__( | |
144b9c13 | 155 | LWSYNC_ON_SMP |
1da177e4 LT |
156 | "1: lwarx %0,0,%1 # atomic_dec_return\n\ |
157 | addic %0,%0,-1\n" | |
158 | PPC405_ERR77(0,%1) | |
159 | " stwcx. %0,0,%1\n\ | |
160 | bne- 1b" | |
feaf7cf1 | 161 | ISYNC_ON_SMP |
1da177e4 LT |
162 | : "=&r" (t) |
163 | : "r" (&v->counter) | |
164 | : "cc", "memory"); | |
165 | ||
166 | return t; | |
167 | } | |
168 | ||
f46e477e | 169 | #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n))) |
ffbf670f | 170 | #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) |
4a6dae6d | 171 | |
8426e1f6 NP |
172 | /** |
173 | * atomic_add_unless - add unless the number is a given value | |
174 | * @v: pointer of type atomic_t | |
175 | * @a: the amount to add to v... | |
176 | * @u: ...unless v is equal to u. | |
177 | * | |
178 | * Atomically adds @a to @v, so long as it was not @u. | |
179 | * Returns non-zero if @v was not @u, and zero otherwise. | |
180 | */ | |
f055affb NP |
181 | static __inline__ int atomic_add_unless(atomic_t *v, int a, int u) |
182 | { | |
183 | int t; | |
184 | ||
185 | __asm__ __volatile__ ( | |
186 | LWSYNC_ON_SMP | |
187 | "1: lwarx %0,0,%1 # atomic_add_unless\n\ | |
188 | cmpw 0,%0,%3 \n\ | |
189 | beq- 2f \n\ | |
190 | add %0,%2,%0 \n" | |
191 | PPC405_ERR77(0,%2) | |
192 | " stwcx. %0,0,%1 \n\ | |
193 | bne- 1b \n" | |
194 | ISYNC_ON_SMP | |
195 | " subf %0,%2,%0 \n\ | |
196 | 2:" | |
197 | : "=&r" (t) | |
198 | : "r" (&v->counter), "r" (a), "r" (u) | |
199 | : "cc", "memory"); | |
200 | ||
201 | return t != u; | |
202 | } | |
203 | ||
8426e1f6 NP |
204 | #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) |
205 | ||
1da177e4 LT |
206 | #define atomic_sub_and_test(a, v) (atomic_sub_return((a), (v)) == 0) |
207 | #define atomic_dec_and_test(v) (atomic_dec_return((v)) == 0) | |
208 | ||
209 | /* | |
210 | * Atomically test *v and decrement if it is greater than 0. | |
434f98c4 RJ |
211 | * The function returns the old value of *v minus 1, even if |
212 | * the atomic variable, v, was not decremented. | |
1da177e4 LT |
213 | */ |
214 | static __inline__ int atomic_dec_if_positive(atomic_t *v) | |
215 | { | |
216 | int t; | |
217 | ||
218 | __asm__ __volatile__( | |
144b9c13 | 219 | LWSYNC_ON_SMP |
1da177e4 | 220 | "1: lwarx %0,0,%1 # atomic_dec_if_positive\n\ |
434f98c4 RJ |
221 | cmpwi %0,1\n\ |
222 | addi %0,%0,-1\n\ | |
1da177e4 LT |
223 | blt- 2f\n" |
224 | PPC405_ERR77(0,%1) | |
225 | " stwcx. %0,0,%1\n\ | |
226 | bne- 1b" | |
feaf7cf1 | 227 | ISYNC_ON_SMP |
1da177e4 | 228 | "\n\ |
434f98c4 | 229 | 2:" : "=&b" (t) |
1da177e4 LT |
230 | : "r" (&v->counter) |
231 | : "cc", "memory"); | |
232 | ||
233 | return t; | |
234 | } | |
235 | ||
feaf7cf1 BB |
236 | #define smp_mb__before_atomic_dec() smp_mb() |
237 | #define smp_mb__after_atomic_dec() smp_mb() | |
238 | #define smp_mb__before_atomic_inc() smp_mb() | |
239 | #define smp_mb__after_atomic_inc() smp_mb() | |
1da177e4 | 240 | |
06a98dba SR |
241 | #ifdef __powerpc64__ |
242 | ||
243 | typedef struct { volatile long counter; } atomic64_t; | |
244 | ||
245 | #define ATOMIC64_INIT(i) { (i) } | |
246 | ||
247 | #define atomic64_read(v) ((v)->counter) | |
248 | #define atomic64_set(v,i) (((v)->counter) = (i)) | |
249 | ||
250 | static __inline__ void atomic64_add(long a, atomic64_t *v) | |
251 | { | |
252 | long t; | |
253 | ||
254 | __asm__ __volatile__( | |
255 | "1: ldarx %0,0,%3 # atomic64_add\n\ | |
256 | add %0,%2,%0\n\ | |
257 | stdcx. %0,0,%3 \n\ | |
258 | bne- 1b" | |
e2a3d402 LT |
259 | : "=&r" (t), "+m" (v->counter) |
260 | : "r" (a), "r" (&v->counter) | |
06a98dba SR |
261 | : "cc"); |
262 | } | |
263 | ||
264 | static __inline__ long atomic64_add_return(long a, atomic64_t *v) | |
265 | { | |
266 | long t; | |
267 | ||
268 | __asm__ __volatile__( | |
144b9c13 | 269 | LWSYNC_ON_SMP |
06a98dba SR |
270 | "1: ldarx %0,0,%2 # atomic64_add_return\n\ |
271 | add %0,%1,%0\n\ | |
272 | stdcx. %0,0,%2 \n\ | |
273 | bne- 1b" | |
274 | ISYNC_ON_SMP | |
275 | : "=&r" (t) | |
276 | : "r" (a), "r" (&v->counter) | |
277 | : "cc", "memory"); | |
278 | ||
279 | return t; | |
280 | } | |
281 | ||
282 | #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0) | |
283 | ||
284 | static __inline__ void atomic64_sub(long a, atomic64_t *v) | |
285 | { | |
286 | long t; | |
287 | ||
288 | __asm__ __volatile__( | |
289 | "1: ldarx %0,0,%3 # atomic64_sub\n\ | |
290 | subf %0,%2,%0\n\ | |
291 | stdcx. %0,0,%3 \n\ | |
292 | bne- 1b" | |
e2a3d402 LT |
293 | : "=&r" (t), "+m" (v->counter) |
294 | : "r" (a), "r" (&v->counter) | |
06a98dba SR |
295 | : "cc"); |
296 | } | |
297 | ||
298 | static __inline__ long atomic64_sub_return(long a, atomic64_t *v) | |
299 | { | |
300 | long t; | |
301 | ||
302 | __asm__ __volatile__( | |
144b9c13 | 303 | LWSYNC_ON_SMP |
06a98dba SR |
304 | "1: ldarx %0,0,%2 # atomic64_sub_return\n\ |
305 | subf %0,%1,%0\n\ | |
306 | stdcx. %0,0,%2 \n\ | |
307 | bne- 1b" | |
308 | ISYNC_ON_SMP | |
309 | : "=&r" (t) | |
310 | : "r" (a), "r" (&v->counter) | |
311 | : "cc", "memory"); | |
312 | ||
313 | return t; | |
314 | } | |
315 | ||
316 | static __inline__ void atomic64_inc(atomic64_t *v) | |
317 | { | |
318 | long t; | |
319 | ||
320 | __asm__ __volatile__( | |
321 | "1: ldarx %0,0,%2 # atomic64_inc\n\ | |
322 | addic %0,%0,1\n\ | |
323 | stdcx. %0,0,%2 \n\ | |
324 | bne- 1b" | |
e2a3d402 LT |
325 | : "=&r" (t), "+m" (v->counter) |
326 | : "r" (&v->counter) | |
06a98dba SR |
327 | : "cc"); |
328 | } | |
329 | ||
330 | static __inline__ long atomic64_inc_return(atomic64_t *v) | |
331 | { | |
332 | long t; | |
333 | ||
334 | __asm__ __volatile__( | |
144b9c13 | 335 | LWSYNC_ON_SMP |
06a98dba SR |
336 | "1: ldarx %0,0,%1 # atomic64_inc_return\n\ |
337 | addic %0,%0,1\n\ | |
338 | stdcx. %0,0,%1 \n\ | |
339 | bne- 1b" | |
340 | ISYNC_ON_SMP | |
341 | : "=&r" (t) | |
342 | : "r" (&v->counter) | |
343 | : "cc", "memory"); | |
344 | ||
345 | return t; | |
346 | } | |
347 | ||
348 | /* | |
349 | * atomic64_inc_and_test - increment and test | |
350 | * @v: pointer of type atomic64_t | |
351 | * | |
352 | * Atomically increments @v by 1 | |
353 | * and returns true if the result is zero, or false for all | |
354 | * other cases. | |
355 | */ | |
356 | #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0) | |
357 | ||
358 | static __inline__ void atomic64_dec(atomic64_t *v) | |
359 | { | |
360 | long t; | |
361 | ||
362 | __asm__ __volatile__( | |
363 | "1: ldarx %0,0,%2 # atomic64_dec\n\ | |
364 | addic %0,%0,-1\n\ | |
365 | stdcx. %0,0,%2\n\ | |
366 | bne- 1b" | |
e2a3d402 LT |
367 | : "=&r" (t), "+m" (v->counter) |
368 | : "r" (&v->counter) | |
06a98dba SR |
369 | : "cc"); |
370 | } | |
371 | ||
372 | static __inline__ long atomic64_dec_return(atomic64_t *v) | |
373 | { | |
374 | long t; | |
375 | ||
376 | __asm__ __volatile__( | |
144b9c13 | 377 | LWSYNC_ON_SMP |
06a98dba SR |
378 | "1: ldarx %0,0,%1 # atomic64_dec_return\n\ |
379 | addic %0,%0,-1\n\ | |
380 | stdcx. %0,0,%1\n\ | |
381 | bne- 1b" | |
382 | ISYNC_ON_SMP | |
383 | : "=&r" (t) | |
384 | : "r" (&v->counter) | |
385 | : "cc", "memory"); | |
386 | ||
387 | return t; | |
388 | } | |
389 | ||
390 | #define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0) | |
391 | #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0) | |
392 | ||
393 | /* | |
394 | * Atomically test *v and decrement if it is greater than 0. | |
395 | * The function returns the old value of *v minus 1. | |
396 | */ | |
397 | static __inline__ long atomic64_dec_if_positive(atomic64_t *v) | |
398 | { | |
399 | long t; | |
400 | ||
401 | __asm__ __volatile__( | |
144b9c13 | 402 | LWSYNC_ON_SMP |
06a98dba SR |
403 | "1: ldarx %0,0,%1 # atomic64_dec_if_positive\n\ |
404 | addic. %0,%0,-1\n\ | |
405 | blt- 2f\n\ | |
406 | stdcx. %0,0,%1\n\ | |
407 | bne- 1b" | |
408 | ISYNC_ON_SMP | |
409 | "\n\ | |
410 | 2:" : "=&r" (t) | |
411 | : "r" (&v->counter) | |
412 | : "cc", "memory"); | |
413 | ||
414 | return t; | |
415 | } | |
416 | ||
f46e477e | 417 | #define atomic64_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n))) |
41806ef4 MD |
418 | #define atomic64_xchg(v, new) (xchg(&((v)->counter), new)) |
419 | ||
420 | /** | |
421 | * atomic64_add_unless - add unless the number is a given value | |
422 | * @v: pointer of type atomic64_t | |
423 | * @a: the amount to add to v... | |
424 | * @u: ...unless v is equal to u. | |
425 | * | |
426 | * Atomically adds @a to @v, so long as it was not @u. | |
427 | * Returns non-zero if @v was not @u, and zero otherwise. | |
428 | */ | |
429 | static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u) | |
430 | { | |
431 | long t; | |
432 | ||
433 | __asm__ __volatile__ ( | |
434 | LWSYNC_ON_SMP | |
435 | "1: ldarx %0,0,%1 # atomic_add_unless\n\ | |
436 | cmpd 0,%0,%3 \n\ | |
437 | beq- 2f \n\ | |
438 | add %0,%2,%0 \n" | |
439 | " stdcx. %0,0,%1 \n\ | |
440 | bne- 1b \n" | |
441 | ISYNC_ON_SMP | |
442 | " subf %0,%2,%0 \n\ | |
443 | 2:" | |
444 | : "=&r" (t) | |
445 | : "r" (&v->counter), "r" (a), "r" (u) | |
446 | : "cc", "memory"); | |
447 | ||
448 | return t != u; | |
449 | } | |
450 | ||
451 | #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) | |
452 | ||
06a98dba SR |
453 | #endif /* __powerpc64__ */ |
454 | ||
d3cb4871 | 455 | #include <asm-generic/atomic.h> |
1da177e4 | 456 | #endif /* __KERNEL__ */ |
feaf7cf1 | 457 | #endif /* _ASM_POWERPC_ATOMIC_H_ */ |