Commit | Line | Data |
---|---|---|
6170a974 CM |
1 | /* |
2 | * Based on arch/arm/include/asm/atomic.h | |
3 | * | |
4 | * Copyright (C) 1996 Russell King. | |
5 | * Copyright (C) 2002 Deep Blue Solutions Ltd. | |
6 | * Copyright (C) 2012 ARM Ltd. | |
7 | * | |
8 | * This program is free software; you can redistribute it and/or modify | |
9 | * it under the terms of the GNU General Public License version 2 as | |
10 | * published by the Free Software Foundation. | |
11 | * | |
12 | * This program is distributed in the hope that it will be useful, | |
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
15 | * GNU General Public License for more details. | |
16 | * | |
17 | * You should have received a copy of the GNU General Public License | |
18 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | |
19 | */ | |
20 | #ifndef __ASM_ATOMIC_H | |
21 | #define __ASM_ATOMIC_H | |
22 | ||
23 | #include <linux/compiler.h> | |
24 | #include <linux/types.h> | |
25 | ||
26 | #include <asm/barrier.h> | |
27 | #include <asm/cmpxchg.h> | |
28 | ||
29 | #define ATOMIC_INIT(i) { (i) } | |
30 | ||
31 | #ifdef __KERNEL__ | |
32 | ||
33 | /* | |
34 | * On ARM, ordinary assignment (str instruction) doesn't clear the local | |
35 | * strex/ldrex monitor on some implementations. The reason we can use it for | |
36 | * atomic_set() is the clrex or dummy strex done on every exception return. | |
37 | */ | |
38 | #define atomic_read(v) (*(volatile int *)&(v)->counter) | |
39 | #define atomic_set(v,i) (((v)->counter) = (i)) | |
40 | ||
41 | /* | |
42 | * AArch64 UP and SMP safe atomic ops. We use load exclusive and | |
43 | * store exclusive to ensure that these are atomic. We may loop | |
44 | * to ensure that the update happens. | |
45 | */ | |
46 | static inline void atomic_add(int i, atomic_t *v) | |
47 | { | |
48 | unsigned long tmp; | |
49 | int result; | |
50 | ||
51 | asm volatile("// atomic_add\n" | |
3a0310eb WD |
52 | "1: ldxr %w0, %2\n" |
53 | " add %w0, %w0, %w3\n" | |
54 | " stxr %w1, %w0, %2\n" | |
6170a974 | 55 | " cbnz %w1, 1b" |
3a0310eb WD |
56 | : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) |
57 | : "Ir" (i) | |
6170a974 CM |
58 | : "cc"); |
59 | } | |
60 | ||
61 | static inline int atomic_add_return(int i, atomic_t *v) | |
62 | { | |
63 | unsigned long tmp; | |
64 | int result; | |
65 | ||
66 | asm volatile("// atomic_add_return\n" | |
3a0310eb WD |
67 | "1: ldaxr %w0, %2\n" |
68 | " add %w0, %w0, %w3\n" | |
69 | " stlxr %w1, %w0, %2\n" | |
6170a974 | 70 | " cbnz %w1, 1b" |
3a0310eb WD |
71 | : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) |
72 | : "Ir" (i) | |
73 | : "cc", "memory"); | |
6170a974 CM |
74 | |
75 | return result; | |
76 | } | |
77 | ||
78 | static inline void atomic_sub(int i, atomic_t *v) | |
79 | { | |
80 | unsigned long tmp; | |
81 | int result; | |
82 | ||
83 | asm volatile("// atomic_sub\n" | |
3a0310eb WD |
84 | "1: ldxr %w0, %2\n" |
85 | " sub %w0, %w0, %w3\n" | |
86 | " stxr %w1, %w0, %2\n" | |
6170a974 | 87 | " cbnz %w1, 1b" |
3a0310eb WD |
88 | : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) |
89 | : "Ir" (i) | |
6170a974 CM |
90 | : "cc"); |
91 | } | |
92 | ||
93 | static inline int atomic_sub_return(int i, atomic_t *v) | |
94 | { | |
95 | unsigned long tmp; | |
96 | int result; | |
97 | ||
98 | asm volatile("// atomic_sub_return\n" | |
3a0310eb WD |
99 | "1: ldaxr %w0, %2\n" |
100 | " sub %w0, %w0, %w3\n" | |
101 | " stlxr %w1, %w0, %2\n" | |
6170a974 | 102 | " cbnz %w1, 1b" |
3a0310eb WD |
103 | : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) |
104 | : "Ir" (i) | |
105 | : "cc", "memory"); | |
6170a974 CM |
106 | |
107 | return result; | |
108 | } | |
109 | ||
110 | static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new) | |
111 | { | |
112 | unsigned long tmp; | |
113 | int oldval; | |
114 | ||
115 | asm volatile("// atomic_cmpxchg\n" | |
3a0310eb WD |
116 | "1: ldaxr %w1, %2\n" |
117 | " cmp %w1, %w3\n" | |
6170a974 | 118 | " b.ne 2f\n" |
3a0310eb | 119 | " stlxr %w0, %w4, %2\n" |
6170a974 CM |
120 | " cbnz %w0, 1b\n" |
121 | "2:" | |
3a0310eb WD |
122 | : "=&r" (tmp), "=&r" (oldval), "+Q" (ptr->counter) |
123 | : "Ir" (old), "r" (new) | |
124 | : "cc", "memory"); | |
6170a974 CM |
125 | |
126 | return oldval; | |
127 | } | |
128 | ||
129 | static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr) | |
130 | { | |
131 | unsigned long tmp, tmp2; | |
132 | ||
133 | asm volatile("// atomic_clear_mask\n" | |
3a0310eb WD |
134 | "1: ldxr %0, %2\n" |
135 | " bic %0, %0, %3\n" | |
136 | " stxr %w1, %0, %2\n" | |
6170a974 | 137 | " cbnz %w1, 1b" |
3a0310eb WD |
138 | : "=&r" (tmp), "=&r" (tmp2), "+Q" (*addr) |
139 | : "Ir" (mask) | |
6170a974 CM |
140 | : "cc"); |
141 | } | |
142 | ||
143 | #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) | |
144 | ||
145 | static inline int __atomic_add_unless(atomic_t *v, int a, int u) | |
146 | { | |
147 | int c, old; | |
148 | ||
149 | c = atomic_read(v); | |
150 | while (c != u && (old = atomic_cmpxchg((v), c, c + a)) != c) | |
151 | c = old; | |
152 | return c; | |
153 | } | |
154 | ||
155 | #define atomic_inc(v) atomic_add(1, v) | |
156 | #define atomic_dec(v) atomic_sub(1, v) | |
157 | ||
158 | #define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0) | |
159 | #define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0) | |
160 | #define atomic_inc_return(v) (atomic_add_return(1, v)) | |
161 | #define atomic_dec_return(v) (atomic_sub_return(1, v)) | |
162 | #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0) | |
163 | ||
164 | #define atomic_add_negative(i,v) (atomic_add_return(i, v) < 0) | |
165 | ||
166 | #define smp_mb__before_atomic_dec() smp_mb() | |
167 | #define smp_mb__after_atomic_dec() smp_mb() | |
168 | #define smp_mb__before_atomic_inc() smp_mb() | |
169 | #define smp_mb__after_atomic_inc() smp_mb() | |
170 | ||
171 | /* | |
172 | * 64-bit atomic operations. | |
173 | */ | |
174 | #define ATOMIC64_INIT(i) { (i) } | |
175 | ||
176 | #define atomic64_read(v) (*(volatile long long *)&(v)->counter) | |
177 | #define atomic64_set(v,i) (((v)->counter) = (i)) | |
178 | ||
179 | static inline void atomic64_add(u64 i, atomic64_t *v) | |
180 | { | |
181 | long result; | |
182 | unsigned long tmp; | |
183 | ||
184 | asm volatile("// atomic64_add\n" | |
3a0310eb WD |
185 | "1: ldxr %0, %2\n" |
186 | " add %0, %0, %3\n" | |
187 | " stxr %w1, %0, %2\n" | |
6170a974 | 188 | " cbnz %w1, 1b" |
3a0310eb WD |
189 | : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) |
190 | : "Ir" (i) | |
6170a974 CM |
191 | : "cc"); |
192 | } | |
193 | ||
194 | static inline long atomic64_add_return(long i, atomic64_t *v) | |
195 | { | |
196 | long result; | |
197 | unsigned long tmp; | |
198 | ||
199 | asm volatile("// atomic64_add_return\n" | |
3a0310eb WD |
200 | "1: ldaxr %0, %2\n" |
201 | " add %0, %0, %3\n" | |
202 | " stlxr %w1, %0, %2\n" | |
6170a974 | 203 | " cbnz %w1, 1b" |
3a0310eb WD |
204 | : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) |
205 | : "Ir" (i) | |
206 | : "cc", "memory"); | |
6170a974 CM |
207 | |
208 | return result; | |
209 | } | |
210 | ||
211 | static inline void atomic64_sub(u64 i, atomic64_t *v) | |
212 | { | |
213 | long result; | |
214 | unsigned long tmp; | |
215 | ||
216 | asm volatile("// atomic64_sub\n" | |
3a0310eb WD |
217 | "1: ldxr %0, %2\n" |
218 | " sub %0, %0, %3\n" | |
219 | " stxr %w1, %0, %2\n" | |
6170a974 | 220 | " cbnz %w1, 1b" |
3a0310eb WD |
221 | : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) |
222 | : "Ir" (i) | |
6170a974 CM |
223 | : "cc"); |
224 | } | |
225 | ||
226 | static inline long atomic64_sub_return(long i, atomic64_t *v) | |
227 | { | |
228 | long result; | |
229 | unsigned long tmp; | |
230 | ||
231 | asm volatile("// atomic64_sub_return\n" | |
3a0310eb WD |
232 | "1: ldaxr %0, %2\n" |
233 | " sub %0, %0, %3\n" | |
234 | " stlxr %w1, %0, %2\n" | |
6170a974 | 235 | " cbnz %w1, 1b" |
3a0310eb WD |
236 | : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) |
237 | : "Ir" (i) | |
238 | : "cc", "memory"); | |
6170a974 CM |
239 | |
240 | return result; | |
241 | } | |
242 | ||
243 | static inline long atomic64_cmpxchg(atomic64_t *ptr, long old, long new) | |
244 | { | |
245 | long oldval; | |
246 | unsigned long res; | |
247 | ||
248 | asm volatile("// atomic64_cmpxchg\n" | |
3a0310eb WD |
249 | "1: ldaxr %1, %2\n" |
250 | " cmp %1, %3\n" | |
6170a974 | 251 | " b.ne 2f\n" |
3a0310eb | 252 | " stlxr %w0, %4, %2\n" |
6170a974 CM |
253 | " cbnz %w0, 1b\n" |
254 | "2:" | |
3a0310eb WD |
255 | : "=&r" (res), "=&r" (oldval), "+Q" (ptr->counter) |
256 | : "Ir" (old), "r" (new) | |
257 | : "cc", "memory"); | |
6170a974 CM |
258 | |
259 | return oldval; | |
260 | } | |
261 | ||
262 | #define atomic64_xchg(v, new) (xchg(&((v)->counter), new)) | |
263 | ||
264 | static inline long atomic64_dec_if_positive(atomic64_t *v) | |
265 | { | |
266 | long result; | |
267 | unsigned long tmp; | |
268 | ||
269 | asm volatile("// atomic64_dec_if_positive\n" | |
3a0310eb | 270 | "1: ldaxr %0, %2\n" |
6170a974 CM |
271 | " subs %0, %0, #1\n" |
272 | " b.mi 2f\n" | |
3a0310eb | 273 | " stlxr %w1, %0, %2\n" |
6170a974 CM |
274 | " cbnz %w1, 1b\n" |
275 | "2:" | |
3a0310eb WD |
276 | : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) |
277 | : | |
278 | : "cc", "memory"); | |
6170a974 CM |
279 | |
280 | return result; | |
281 | } | |
282 | ||
283 | static inline int atomic64_add_unless(atomic64_t *v, long a, long u) | |
284 | { | |
285 | long c, old; | |
286 | ||
287 | c = atomic64_read(v); | |
288 | while (c != u && (old = atomic64_cmpxchg((v), c, c + a)) != c) | |
289 | c = old; | |
290 | ||
291 | return c != u; | |
292 | } | |
293 | ||
294 | #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0) | |
295 | #define atomic64_inc(v) atomic64_add(1LL, (v)) | |
296 | #define atomic64_inc_return(v) atomic64_add_return(1LL, (v)) | |
297 | #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0) | |
298 | #define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0) | |
299 | #define atomic64_dec(v) atomic64_sub(1LL, (v)) | |
300 | #define atomic64_dec_return(v) atomic64_sub_return(1LL, (v)) | |
301 | #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0) | |
302 | #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL) | |
303 | ||
304 | #endif | |
305 | #endif |