Commit | Line | Data |
---|---|---|
14e968ba VG |
1 | /* |
2 | * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or modify | |
5 | * it under the terms of the GNU General Public License version 2 as | |
6 | * published by the Free Software Foundation. | |
7 | */ | |
8 | ||
9 | #ifndef _ASM_ARC_ATOMIC_H | |
10 | #define _ASM_ARC_ATOMIC_H | |
11 | ||
14e968ba VG |
12 | #ifndef __ASSEMBLY__ |
13 | ||
14 | #include <linux/types.h> | |
15 | #include <linux/compiler.h> | |
16 | #include <asm/cmpxchg.h> | |
17 | #include <asm/barrier.h> | |
18 | #include <asm/smp.h> | |
19 | ||
20 | #define atomic_read(v) ((v)->counter) | |
21 | ||
22 | #ifdef CONFIG_ARC_HAS_LLSC | |
23 | ||
24 | #define atomic_set(v, i) (((v)->counter) = (i)) | |
25 | ||
f7d11e93 PZ |
26 | #define ATOMIC_OP(op, c_op, asm_op) \ |
27 | static inline void atomic_##op(int i, atomic_t *v) \ | |
28 | { \ | |
29 | unsigned int temp; \ | |
30 | \ | |
31 | __asm__ __volatile__( \ | |
f5959cb0 | 32 | "1: llock %0, [%1] \n" \ |
f7d11e93 PZ |
33 | " " #asm_op " %0, %0, %2 \n" \ |
34 | " scond %0, [%1] \n" \ | |
35 | " bnz 1b \n" \ | |
36 | : "=&r"(temp) /* Early clobber, to prevent reg reuse */ \ | |
37 | : "r"(&v->counter), "ir"(i) \ | |
38 | : "cc"); \ | |
39 | } \ | |
40 | ||
41 | #define ATOMIC_OP_RETURN(op, c_op, asm_op) \ | |
42 | static inline int atomic_##op##_return(int i, atomic_t *v) \ | |
43 | { \ | |
44 | unsigned int temp; \ | |
45 | \ | |
2576c28e VG |
46 | /* \ |
47 | * Explicit full memory barrier needed before/after as \ | |
48 | * LLOCK/SCOND thmeselves don't provide any such semantics \ | |
49 | */ \ | |
50 | smp_mb(); \ | |
51 | \ | |
f7d11e93 | 52 | __asm__ __volatile__( \ |
f5959cb0 | 53 | "1: llock %0, [%1] \n" \ |
f7d11e93 PZ |
54 | " " #asm_op " %0, %0, %2 \n" \ |
55 | " scond %0, [%1] \n" \ | |
56 | " bnz 1b \n" \ | |
57 | : "=&r"(temp) \ | |
58 | : "r"(&v->counter), "ir"(i) \ | |
59 | : "cc"); \ | |
60 | \ | |
2576c28e VG |
61 | smp_mb(); \ |
62 | \ | |
f7d11e93 | 63 | return temp; \ |
14e968ba VG |
64 | } |
65 | ||
66 | #else /* !CONFIG_ARC_HAS_LLSC */ | |
67 | ||
68 | #ifndef CONFIG_SMP | |
69 | ||
70 | /* violating atomic_xxx API locking protocol in UP for optimization sake */ | |
71 | #define atomic_set(v, i) (((v)->counter) = (i)) | |
72 | ||
73 | #else | |
74 | ||
75 | static inline void atomic_set(atomic_t *v, int i) | |
76 | { | |
77 | /* | |
78 | * Independent of hardware support, all of the atomic_xxx() APIs need | |
79 | * to follow the same locking rules to make sure that a "hardware" | |
80 | * atomic insn (e.g. LD) doesn't clobber an "emulated" atomic insn | |
81 | * sequence | |
82 | * | |
83 | * Thus atomic_set() despite being 1 insn (and seemingly atomic) | |
84 | * requires the locking. | |
85 | */ | |
86 | unsigned long flags; | |
87 | ||
88 | atomic_ops_lock(flags); | |
89 | v->counter = i; | |
90 | atomic_ops_unlock(flags); | |
91 | } | |
f7d11e93 | 92 | |
14e968ba VG |
93 | #endif |
94 | ||
95 | /* | |
96 | * Non hardware assisted Atomic-R-M-W | |
97 | * Locking would change to irq-disabling only (UP) and spinlocks (SMP) | |
98 | */ | |
99 | ||
f7d11e93 PZ |
100 | #define ATOMIC_OP(op, c_op, asm_op) \ |
101 | static inline void atomic_##op(int i, atomic_t *v) \ | |
102 | { \ | |
103 | unsigned long flags; \ | |
104 | \ | |
105 | atomic_ops_lock(flags); \ | |
106 | v->counter c_op i; \ | |
107 | atomic_ops_unlock(flags); \ | |
14e968ba VG |
108 | } |
109 | ||
daaf40e5 | 110 | #define ATOMIC_OP_RETURN(op, c_op, asm_op) \ |
f7d11e93 PZ |
111 | static inline int atomic_##op##_return(int i, atomic_t *v) \ |
112 | { \ | |
113 | unsigned long flags; \ | |
114 | unsigned long temp; \ | |
115 | \ | |
2576c28e VG |
116 | /* \ |
117 | * spin lock/unlock provides the needed smp_mb() before/after \ | |
118 | */ \ | |
f7d11e93 PZ |
119 | atomic_ops_lock(flags); \ |
120 | temp = v->counter; \ | |
121 | temp c_op i; \ | |
122 | v->counter = temp; \ | |
123 | atomic_ops_unlock(flags); \ | |
124 | \ | |
125 | return temp; \ | |
14e968ba VG |
126 | } |
127 | ||
f7d11e93 | 128 | #endif /* !CONFIG_ARC_HAS_LLSC */ |
14e968ba | 129 | |
f7d11e93 PZ |
130 | #define ATOMIC_OPS(op, c_op, asm_op) \ |
131 | ATOMIC_OP(op, c_op, asm_op) \ | |
132 | ATOMIC_OP_RETURN(op, c_op, asm_op) | |
14e968ba | 133 | |
f7d11e93 PZ |
134 | ATOMIC_OPS(add, +=, add) |
135 | ATOMIC_OPS(sub, -=, sub) | |
136 | ATOMIC_OP(and, &=, and) | |
14e968ba | 137 | |
f7d11e93 | 138 | #define atomic_clear_mask(mask, v) atomic_and(~(mask), (v)) |
14e968ba | 139 | |
f7d11e93 PZ |
140 | #undef ATOMIC_OPS |
141 | #undef ATOMIC_OP_RETURN | |
142 | #undef ATOMIC_OP | |
14e968ba VG |
143 | |
144 | /** | |
145 | * __atomic_add_unless - add unless the number is a given value | |
146 | * @v: pointer of type atomic_t | |
147 | * @a: the amount to add to v... | |
148 | * @u: ...unless v is equal to u. | |
149 | * | |
150 | * Atomically adds @a to @v, so long as it was not @u. | |
151 | * Returns the old value of @v | |
152 | */ | |
153 | #define __atomic_add_unless(v, a, u) \ | |
154 | ({ \ | |
155 | int c, old; \ | |
2576c28e VG |
156 | \ |
157 | /* \ | |
158 | * Explicit full memory barrier needed before/after as \ | |
159 | * LLOCK/SCOND thmeselves don't provide any such semantics \ | |
160 | */ \ | |
161 | smp_mb(); \ | |
162 | \ | |
14e968ba VG |
163 | c = atomic_read(v); \ |
164 | while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c)\ | |
165 | c = old; \ | |
2576c28e VG |
166 | \ |
167 | smp_mb(); \ | |
168 | \ | |
14e968ba VG |
169 | c; \ |
170 | }) | |
171 | ||
172 | #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) | |
173 | ||
174 | #define atomic_inc(v) atomic_add(1, v) | |
175 | #define atomic_dec(v) atomic_sub(1, v) | |
176 | ||
177 | #define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0) | |
178 | #define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0) | |
179 | #define atomic_inc_return(v) atomic_add_return(1, (v)) | |
180 | #define atomic_dec_return(v) atomic_sub_return(1, (v)) | |
181 | #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0) | |
182 | ||
183 | #define atomic_add_negative(i, v) (atomic_add_return(i, v) < 0) | |
184 | ||
185 | #define ATOMIC_INIT(i) { (i) } | |
186 | ||
187 | #include <asm-generic/atomic64.h> | |
188 | ||
189 | #endif | |
190 | ||
191 | #endif |