Commit | Line | Data |
---|---|---|
18aecc2b CM |
1 | /* |
2 | * Copyright 2011 Tilera Corporation. All Rights Reserved. | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or | |
5 | * modify it under the terms of the GNU General Public License | |
6 | * as published by the Free Software Foundation, version 2. | |
7 | * | |
8 | * This program is distributed in the hope that it will be useful, but | |
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | |
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | |
11 | * NON INFRINGEMENT. See the GNU General Public License for | |
12 | * more details. | |
13 | * | |
60063497 | 14 | * Do not include directly; use <linux/atomic.h>. |
18aecc2b CM |
15 | */ |
16 | ||
17 | #ifndef _ASM_TILE_ATOMIC_64_H | |
18 | #define _ASM_TILE_ATOMIC_64_H | |
19 | ||
20 | #ifndef __ASSEMBLY__ | |
21 | ||
bd119c69 | 22 | #include <asm/barrier.h> |
18aecc2b CM |
23 | #include <arch/spr_def.h> |
24 | ||
25 | /* First, the 32-bit atomic ops that are "real" on our 64-bit platform. */ | |
26 | ||
62e8a325 | 27 | #define atomic_set(v, i) WRITE_ONCE((v)->counter, (i)) |
18aecc2b CM |
28 | |
29 | /* | |
30 | * The smp_mb() operations throughout are to support the fact that | |
31 | * Linux requires memory barriers before and after the operation, | |
32 | * on any routine which updates memory and returns a value. | |
33 | */ | |
34 | ||
15384758 CM |
35 | /* |
36 | * Note a subtlety of the locking here. We are required to provide a | |
37 | * full memory barrier before and after the operation. However, we | |
38 | * only provide an explicit mb before the operation. After the | |
39 | * operation, we use barrier() to get a full mb for free, because: | |
40 | * | |
41 | * (1) The barrier directive to the compiler prohibits any instructions | |
42 | * being statically hoisted before the barrier; | |
43 | * (2) the microarchitecture will not issue any further instructions | |
44 | * until the fetchadd result is available for the "+ i" add instruction; | |
45 | * (3) the smb_mb before the fetchadd ensures that no other memory | |
46 | * operations are in flight at this point. | |
47 | */ | |
18aecc2b CM |
48 | static inline int atomic_add_return(int i, atomic_t *v) |
49 | { | |
50 | int val; | |
51 | smp_mb(); /* barrier for proper semantics */ | |
52 | val = __insn_fetchadd4((void *)&v->counter, i) + i; | |
15384758 | 53 | barrier(); /* equivalent to smp_mb(); see block comment above */ |
18aecc2b CM |
54 | return val; |
55 | } | |
56 | ||
1af5de9a PZ |
57 | #define ATOMIC_OPS(op) \ |
58 | static inline int atomic_fetch_##op(int i, atomic_t *v) \ | |
59 | { \ | |
60 | int val; \ | |
61 | smp_mb(); \ | |
62 | val = __insn_fetch##op##4((void *)&v->counter, i); \ | |
63 | smp_mb(); \ | |
64 | return val; \ | |
65 | } \ | |
66 | static inline void atomic_##op(int i, atomic_t *v) \ | |
67 | { \ | |
68 | __insn_fetch##op##4((void *)&v->counter, i); \ | |
69 | } | |
70 | ||
71 | ATOMIC_OPS(add) | |
72 | ATOMIC_OPS(and) | |
73 | ATOMIC_OPS(or) | |
74 | ||
75 | #undef ATOMIC_OPS | |
76 | ||
77 | static inline int atomic_fetch_xor(int i, atomic_t *v) | |
18aecc2b CM |
78 | { |
79 | int guess, oldval = v->counter; | |
1af5de9a | 80 | smp_mb(); |
18aecc2b | 81 | do { |
18aecc2b | 82 | guess = oldval; |
1af5de9a PZ |
83 | __insn_mtspr(SPR_CMPEXCH_VALUE, guess); |
84 | oldval = __insn_cmpexch4(&v->counter, guess ^ i); | |
18aecc2b | 85 | } while (guess != oldval); |
1af5de9a | 86 | smp_mb(); |
f24219b4 | 87 | return oldval; |
18aecc2b CM |
88 | } |
89 | ||
2957c035 CM |
90 | static inline void atomic_xor(int i, atomic_t *v) |
91 | { | |
92 | int guess, oldval = v->counter; | |
93 | do { | |
94 | guess = oldval; | |
95 | __insn_mtspr(SPR_CMPEXCH_VALUE, guess); | |
96 | oldval = __insn_cmpexch4(&v->counter, guess ^ i); | |
97 | } while (guess != oldval); | |
98 | } | |
99 | ||
1af5de9a PZ |
100 | static inline int __atomic_add_unless(atomic_t *v, int a, int u) |
101 | { | |
102 | int guess, oldval = v->counter; | |
103 | do { | |
104 | if (oldval == u) | |
105 | break; | |
106 | guess = oldval; | |
107 | oldval = cmpxchg(&v->counter, guess, guess + a); | |
108 | } while (guess != oldval); | |
109 | return oldval; | |
110 | } | |
111 | ||
18aecc2b CM |
112 | /* Now the true 64-bit operations. */ |
113 | ||
114 | #define ATOMIC64_INIT(i) { (i) } | |
115 | ||
62e8a325 PZ |
116 | #define atomic64_read(v) READ_ONCE((v)->counter) |
117 | #define atomic64_set(v, i) WRITE_ONCE((v)->counter, (i)) | |
18aecc2b | 118 | |
18aecc2b CM |
119 | static inline long atomic64_add_return(long i, atomic64_t *v) |
120 | { | |
121 | int val; | |
122 | smp_mb(); /* barrier for proper semantics */ | |
123 | val = __insn_fetchadd((void *)&v->counter, i) + i; | |
15384758 | 124 | barrier(); /* equivalent to smp_mb; see atomic_add_return() */ |
18aecc2b CM |
125 | return val; |
126 | } | |
127 | ||
1af5de9a PZ |
128 | #define ATOMIC64_OPS(op) \ |
129 | static inline long atomic64_fetch_##op(long i, atomic64_t *v) \ | |
130 | { \ | |
131 | long val; \ | |
132 | smp_mb(); \ | |
133 | val = __insn_fetch##op((void *)&v->counter, i); \ | |
134 | smp_mb(); \ | |
135 | return val; \ | |
136 | } \ | |
137 | static inline void atomic64_##op(long i, atomic64_t *v) \ | |
138 | { \ | |
139 | __insn_fetch##op((void *)&v->counter, i); \ | |
140 | } | |
141 | ||
142 | ATOMIC64_OPS(add) | |
143 | ATOMIC64_OPS(and) | |
144 | ATOMIC64_OPS(or) | |
145 | ||
146 | #undef ATOMIC64_OPS | |
147 | ||
148 | static inline long atomic64_fetch_xor(long i, atomic64_t *v) | |
18aecc2b CM |
149 | { |
150 | long guess, oldval = v->counter; | |
1af5de9a | 151 | smp_mb(); |
18aecc2b | 152 | do { |
18aecc2b | 153 | guess = oldval; |
1af5de9a PZ |
154 | __insn_mtspr(SPR_CMPEXCH_VALUE, guess); |
155 | oldval = __insn_cmpexch(&v->counter, guess ^ i); | |
18aecc2b | 156 | } while (guess != oldval); |
1af5de9a PZ |
157 | smp_mb(); |
158 | return oldval; | |
2957c035 CM |
159 | } |
160 | ||
161 | static inline void atomic64_xor(long i, atomic64_t *v) | |
162 | { | |
163 | long guess, oldval = v->counter; | |
164 | do { | |
165 | guess = oldval; | |
166 | __insn_mtspr(SPR_CMPEXCH_VALUE, guess); | |
167 | oldval = __insn_cmpexch(&v->counter, guess ^ i); | |
168 | } while (guess != oldval); | |
169 | } | |
170 | ||
1af5de9a PZ |
171 | static inline long atomic64_add_unless(atomic64_t *v, long a, long u) |
172 | { | |
173 | long guess, oldval = v->counter; | |
174 | do { | |
175 | if (oldval == u) | |
176 | break; | |
177 | guess = oldval; | |
178 | oldval = cmpxchg(&v->counter, guess, guess + a); | |
179 | } while (guess != oldval); | |
180 | return oldval != u; | |
181 | } | |
182 | ||
18aecc2b | 183 | #define atomic64_sub_return(i, v) atomic64_add_return(-(i), (v)) |
1af5de9a | 184 | #define atomic64_fetch_sub(i, v) atomic64_fetch_add(-(i), (v)) |
18aecc2b CM |
185 | #define atomic64_sub(i, v) atomic64_add(-(i), (v)) |
186 | #define atomic64_inc_return(v) atomic64_add_return(1, (v)) | |
187 | #define atomic64_dec_return(v) atomic64_sub_return(1, (v)) | |
188 | #define atomic64_inc(v) atomic64_add(1, (v)) | |
189 | #define atomic64_dec(v) atomic64_sub(1, (v)) | |
190 | ||
191 | #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0) | |
192 | #define atomic64_dec_and_test(v) (atomic64_dec_return(v) == 0) | |
193 | #define atomic64_sub_and_test(i, v) (atomic64_sub_return((i), (v)) == 0) | |
194 | #define atomic64_add_negative(i, v) (atomic64_add_return((i), (v)) < 0) | |
195 | ||
196 | #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) | |
197 | ||
18aecc2b CM |
198 | #endif /* !__ASSEMBLY__ */ |
199 | ||
200 | #endif /* _ASM_TILE_ATOMIC_64_H */ |