ARC: uaccess: get_user to zero out dest in cause of fault
[linux-2.6-block.git] / arch / arc / include / asm / atomic.h
CommitLineData
14e968ba
VG
1/*
2 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9#ifndef _ASM_ARC_ATOMIC_H
10#define _ASM_ARC_ATOMIC_H
11
14e968ba
VG
12#ifndef __ASSEMBLY__
13
14#include <linux/types.h>
15#include <linux/compiler.h>
16#include <asm/cmpxchg.h>
17#include <asm/barrier.h>
18#include <asm/smp.h>
19
a5a10d99
NC
20#ifndef CONFIG_ARC_PLAT_EZNPS
21
62e8a325 22#define atomic_read(v) READ_ONCE((v)->counter)
14e968ba
VG
23
24#ifdef CONFIG_ARC_HAS_LLSC
25
62e8a325 26#define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
14e968ba 27
f7d11e93
PZ
28#define ATOMIC_OP(op, c_op, asm_op) \
29static inline void atomic_##op(int i, atomic_t *v) \
30{ \
ed6aefed 31 unsigned int val; \
f7d11e93
PZ
32 \
33 __asm__ __volatile__( \
8ac0665f
VG
34 "1: llock %[val], [%[ctr]] \n" \
35 " " #asm_op " %[val], %[val], %[i] \n" \
36 " scond %[val], [%[ctr]] \n" \
ed6aefed 37 " bnz 1b \n" \
8ac0665f
VG
38 : [val] "=&r" (val) /* Early clobber to prevent reg reuse */ \
39 : [ctr] "r" (&v->counter), /* Not "m": llock only supports reg direct addr mode */ \
40 [i] "ir" (i) \
f7d11e93
PZ
41 : "cc"); \
42} \
43
44#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
45static inline int atomic_##op##_return(int i, atomic_t *v) \
46{ \
ed6aefed 47 unsigned int val; \
f7d11e93 48 \
2576c28e
VG
49 /* \
50 * Explicit full memory barrier needed before/after as \
51 * LLOCK/SCOND thmeselves don't provide any such semantics \
52 */ \
53 smp_mb(); \
54 \
f7d11e93 55 __asm__ __volatile__( \
8ac0665f
VG
56 "1: llock %[val], [%[ctr]] \n" \
57 " " #asm_op " %[val], %[val], %[i] \n" \
58 " scond %[val], [%[ctr]] \n" \
ed6aefed 59 " bnz 1b \n" \
8ac0665f
VG
60 : [val] "=&r" (val) \
61 : [ctr] "r" (&v->counter), \
62 [i] "ir" (i) \
f7d11e93
PZ
63 : "cc"); \
64 \
2576c28e
VG
65 smp_mb(); \
66 \
8ac0665f 67 return val; \
14e968ba
VG
68}
69
fbffe892
PZ
70#define ATOMIC_FETCH_OP(op, c_op, asm_op) \
71static inline int atomic_fetch_##op(int i, atomic_t *v) \
72{ \
73 unsigned int val, orig; \
fbffe892
PZ
74 \
75 /* \
76 * Explicit full memory barrier needed before/after as \
77 * LLOCK/SCOND thmeselves don't provide any such semantics \
78 */ \
79 smp_mb(); \
80 \
81 __asm__ __volatile__( \
82 "1: llock %[orig], [%[ctr]] \n" \
83 " " #asm_op " %[val], %[orig], %[i] \n" \
84 " scond %[val], [%[ctr]] \n" \
85 " \n" \
fbffe892
PZ
86 : [val] "=&r" (val), \
87 [orig] "=&r" (orig) \
fbffe892
PZ
88 : [ctr] "r" (&v->counter), \
89 [i] "ir" (i) \
90 : "cc"); \
91 \
92 smp_mb(); \
93 \
94 return orig; \
95}
96
14e968ba
VG
97#else /* !CONFIG_ARC_HAS_LLSC */
98
99#ifndef CONFIG_SMP
100
101 /* violating atomic_xxx API locking protocol in UP for optimization sake */
62e8a325 102#define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
14e968ba
VG
103
104#else
105
106static inline void atomic_set(atomic_t *v, int i)
107{
108 /*
109 * Independent of hardware support, all of the atomic_xxx() APIs need
110 * to follow the same locking rules to make sure that a "hardware"
111 * atomic insn (e.g. LD) doesn't clobber an "emulated" atomic insn
112 * sequence
113 *
114 * Thus atomic_set() despite being 1 insn (and seemingly atomic)
115 * requires the locking.
116 */
117 unsigned long flags;
118
119 atomic_ops_lock(flags);
62e8a325 120 WRITE_ONCE(v->counter, i);
14e968ba
VG
121 atomic_ops_unlock(flags);
122}
f7d11e93 123
14e968ba
VG
124#endif
125
126/*
127 * Non hardware assisted Atomic-R-M-W
128 * Locking would change to irq-disabling only (UP) and spinlocks (SMP)
129 */
130
f7d11e93
PZ
131#define ATOMIC_OP(op, c_op, asm_op) \
132static inline void atomic_##op(int i, atomic_t *v) \
133{ \
134 unsigned long flags; \
135 \
136 atomic_ops_lock(flags); \
137 v->counter c_op i; \
138 atomic_ops_unlock(flags); \
14e968ba
VG
139}
140
daaf40e5 141#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
f7d11e93
PZ
142static inline int atomic_##op##_return(int i, atomic_t *v) \
143{ \
144 unsigned long flags; \
145 unsigned long temp; \
146 \
2576c28e
VG
147 /* \
148 * spin lock/unlock provides the needed smp_mb() before/after \
149 */ \
f7d11e93
PZ
150 atomic_ops_lock(flags); \
151 temp = v->counter; \
152 temp c_op i; \
153 v->counter = temp; \
154 atomic_ops_unlock(flags); \
155 \
156 return temp; \
14e968ba
VG
157}
158
fbffe892
PZ
159#define ATOMIC_FETCH_OP(op, c_op, asm_op) \
160static inline int atomic_fetch_##op(int i, atomic_t *v) \
161{ \
162 unsigned long flags; \
163 unsigned long orig; \
164 \
165 /* \
166 * spin lock/unlock provides the needed smp_mb() before/after \
167 */ \
168 atomic_ops_lock(flags); \
169 orig = v->counter; \
170 v->counter c_op i; \
171 atomic_ops_unlock(flags); \
172 \
173 return orig; \
174}
175
f7d11e93 176#endif /* !CONFIG_ARC_HAS_LLSC */
14e968ba 177
f7d11e93
PZ
178#define ATOMIC_OPS(op, c_op, asm_op) \
179 ATOMIC_OP(op, c_op, asm_op) \
fbffe892
PZ
180 ATOMIC_OP_RETURN(op, c_op, asm_op) \
181 ATOMIC_FETCH_OP(op, c_op, asm_op)
14e968ba 182
f7d11e93
PZ
183ATOMIC_OPS(add, +=, add)
184ATOMIC_OPS(sub, -=, sub)
14e968ba 185
cda7e413
PZ
186#define atomic_andnot atomic_andnot
187
fbffe892
PZ
188#undef ATOMIC_OPS
189#define ATOMIC_OPS(op, c_op, asm_op) \
190 ATOMIC_OP(op, c_op, asm_op) \
191 ATOMIC_FETCH_OP(op, c_op, asm_op)
192
193ATOMIC_OPS(and, &=, and)
194ATOMIC_OPS(andnot, &= ~, bic)
195ATOMIC_OPS(or, |=, or)
196ATOMIC_OPS(xor, ^=, xor)
14e968ba 197
a5a10d99
NC
198#else /* CONFIG_ARC_PLAT_EZNPS */
199
200static inline int atomic_read(const atomic_t *v)
201{
202 int temp;
203
204 __asm__ __volatile__(
205 " ld.di %0, [%1]"
206 : "=r"(temp)
207 : "r"(&v->counter)
208 : "memory");
209 return temp;
210}
211
212static inline void atomic_set(atomic_t *v, int i)
213{
214 __asm__ __volatile__(
215 " st.di %0,[%1]"
216 :
217 : "r"(i), "r"(&v->counter)
218 : "memory");
219}
220
221#define ATOMIC_OP(op, c_op, asm_op) \
222static inline void atomic_##op(int i, atomic_t *v) \
223{ \
224 __asm__ __volatile__( \
225 " mov r2, %0\n" \
226 " mov r3, %1\n" \
227 " .word %2\n" \
228 : \
229 : "r"(i), "r"(&v->counter), "i"(asm_op) \
230 : "r2", "r3", "memory"); \
231} \
232
233#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
234static inline int atomic_##op##_return(int i, atomic_t *v) \
235{ \
236 unsigned int temp = i; \
237 \
238 /* Explicit full memory barrier needed before/after */ \
239 smp_mb(); \
240 \
241 __asm__ __volatile__( \
242 " mov r2, %0\n" \
243 " mov r3, %1\n" \
244 " .word %2\n" \
245 " mov %0, r2" \
246 : "+r"(temp) \
247 : "r"(&v->counter), "i"(asm_op) \
248 : "r2", "r3", "memory"); \
249 \
250 smp_mb(); \
251 \
252 temp c_op i; \
253 \
254 return temp; \
255}
256
fbffe892
PZ
257#define ATOMIC_FETCH_OP(op, c_op, asm_op) \
258static inline int atomic_fetch_##op(int i, atomic_t *v) \
259{ \
260 unsigned int temp = i; \
261 \
262 /* Explicit full memory barrier needed before/after */ \
263 smp_mb(); \
264 \
265 __asm__ __volatile__( \
266 " mov r2, %0\n" \
267 " mov r3, %1\n" \
268 " .word %2\n" \
269 " mov %0, r2" \
270 : "+r"(temp) \
271 : "r"(&v->counter), "i"(asm_op) \
272 : "r2", "r3", "memory"); \
273 \
274 smp_mb(); \
275 \
276 return temp; \
277}
278
a5a10d99
NC
279#define ATOMIC_OPS(op, c_op, asm_op) \
280 ATOMIC_OP(op, c_op, asm_op) \
fbffe892
PZ
281 ATOMIC_OP_RETURN(op, c_op, asm_op) \
282 ATOMIC_FETCH_OP(op, c_op, asm_op)
a5a10d99
NC
283
284ATOMIC_OPS(add, +=, CTOP_INST_AADD_DI_R2_R2_R3)
285#define atomic_sub(i, v) atomic_add(-(i), (v))
286#define atomic_sub_return(i, v) atomic_add_return(-(i), (v))
287
fbffe892
PZ
288#undef ATOMIC_OPS
289#define ATOMIC_OPS(op, c_op, asm_op) \
290 ATOMIC_OP(op, c_op, asm_op) \
291 ATOMIC_FETCH_OP(op, c_op, asm_op)
292
293ATOMIC_OPS(and, &=, CTOP_INST_AAND_DI_R2_R2_R3)
a5a10d99 294#define atomic_andnot(mask, v) atomic_and(~(mask), (v))
fbffe892
PZ
295ATOMIC_OPS(or, |=, CTOP_INST_AOR_DI_R2_R2_R3)
296ATOMIC_OPS(xor, ^=, CTOP_INST_AXOR_DI_R2_R2_R3)
a5a10d99
NC
297
298#endif /* CONFIG_ARC_PLAT_EZNPS */
299
300#undef ATOMIC_OPS
fbffe892 301#undef ATOMIC_FETCH_OP
a5a10d99
NC
302#undef ATOMIC_OP_RETURN
303#undef ATOMIC_OP
304
14e968ba
VG
305/**
306 * __atomic_add_unless - add unless the number is a given value
307 * @v: pointer of type atomic_t
308 * @a: the amount to add to v...
309 * @u: ...unless v is equal to u.
310 *
311 * Atomically adds @a to @v, so long as it was not @u.
312 * Returns the old value of @v
313 */
314#define __atomic_add_unless(v, a, u) \
315({ \
316 int c, old; \
2576c28e
VG
317 \
318 /* \
319 * Explicit full memory barrier needed before/after as \
320 * LLOCK/SCOND thmeselves don't provide any such semantics \
321 */ \
322 smp_mb(); \
323 \
14e968ba
VG
324 c = atomic_read(v); \
325 while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c)\
326 c = old; \
2576c28e
VG
327 \
328 smp_mb(); \
329 \
14e968ba
VG
330 c; \
331})
332
333#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
334
335#define atomic_inc(v) atomic_add(1, v)
336#define atomic_dec(v) atomic_sub(1, v)
337
338#define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
339#define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
340#define atomic_inc_return(v) atomic_add_return(1, (v))
341#define atomic_dec_return(v) atomic_sub_return(1, (v))
342#define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
343
344#define atomic_add_negative(i, v) (atomic_add_return(i, v) < 0)
345
346#define ATOMIC_INIT(i) { (i) }
347
348#include <asm-generic/atomic64.h>
349
350#endif
351
352#endif