Commit | Line | Data |
---|---|---|
fab957c1 PD |
1 | /* |
2 | * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. | |
3 | * Copyright (C) 2012 Regents of the University of California | |
4 | * Copyright (C) 2017 SiFive | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or | |
7 | * modify it under the terms of the GNU General Public Licence | |
8 | * as published by the Free Software Foundation; either version | |
9 | * 2 of the Licence, or (at your option) any later version. | |
10 | */ | |
11 | ||
12 | #ifndef _ASM_RISCV_ATOMIC_H | |
13 | #define _ASM_RISCV_ATOMIC_H | |
14 | ||
15 | #ifdef CONFIG_GENERIC_ATOMIC64 | |
16 | # include <asm-generic/atomic64.h> | |
17 | #else | |
18 | # if (__riscv_xlen < 64) | |
19 | # error "64-bit atomics require XLEN to be at least 64" | |
20 | # endif | |
21 | #endif | |
22 | ||
23 | #include <asm/cmpxchg.h> | |
24 | #include <asm/barrier.h> | |
25 | ||
26 | #define ATOMIC_INIT(i) { (i) } | |
27 | static __always_inline int atomic_read(const atomic_t *v) | |
28 | { | |
29 | return READ_ONCE(v->counter); | |
30 | } | |
31 | static __always_inline void atomic_set(atomic_t *v, int i) | |
32 | { | |
33 | WRITE_ONCE(v->counter, i); | |
34 | } | |
35 | ||
36 | #ifndef CONFIG_GENERIC_ATOMIC64 | |
37 | #define ATOMIC64_INIT(i) { (i) } | |
38 | static __always_inline long atomic64_read(const atomic64_t *v) | |
39 | { | |
40 | return READ_ONCE(v->counter); | |
41 | } | |
42 | static __always_inline void atomic64_set(atomic64_t *v, long i) | |
43 | { | |
44 | WRITE_ONCE(v->counter, i); | |
45 | } | |
46 | #endif | |
47 | ||
48 | /* | |
49 | * First, the atomic ops that have no ordering constraints and therefor don't | |
50 | * have the AQ or RL bits set. These don't return anything, so there's only | |
51 | * one version to worry about. | |
52 | */ | |
53 | #define ATOMIC_OP(op, asm_op, c_op, I, asm_type, c_type, prefix) \ | |
54 | static __always_inline void atomic##prefix##_##op(c_type i, atomic##prefix##_t *v) \ | |
55 | { \ | |
56 | __asm__ __volatile__ ( \ | |
57 | "amo" #asm_op "." #asm_type " zero, %1, %0" \ | |
58 | : "+A" (v->counter) \ | |
59 | : "r" (I) \ | |
60 | : "memory"); \ | |
61 | } | |
62 | ||
63 | #ifdef CONFIG_GENERIC_ATOMIC64 | |
64 | #define ATOMIC_OPS(op, asm_op, c_op, I) \ | |
65 | ATOMIC_OP (op, asm_op, c_op, I, w, int, ) | |
66 | #else | |
67 | #define ATOMIC_OPS(op, asm_op, c_op, I) \ | |
68 | ATOMIC_OP (op, asm_op, c_op, I, w, int, ) \ | |
69 | ATOMIC_OP (op, asm_op, c_op, I, d, long, 64) | |
70 | #endif | |
71 | ||
72 | ATOMIC_OPS(add, add, +, i) | |
73 | ATOMIC_OPS(sub, add, +, -i) | |
74 | ATOMIC_OPS(and, and, &, i) | |
75 | ATOMIC_OPS( or, or, |, i) | |
76 | ATOMIC_OPS(xor, xor, ^, i) | |
77 | ||
78 | #undef ATOMIC_OP | |
79 | #undef ATOMIC_OPS | |
80 | ||
81 | /* | |
82 | * Atomic ops that have ordered, relaxed, acquire, and relese variants. | |
83 | * There's two flavors of these: the arithmatic ops have both fetch and return | |
84 | * versions, while the logical ops only have fetch versions. | |
85 | */ | |
86 | #define ATOMIC_FETCH_OP(op, asm_op, c_op, I, asm_or, c_or, asm_type, c_type, prefix) \ | |
87 | static __always_inline c_type atomic##prefix##_fetch_##op##c_or(c_type i, atomic##prefix##_t *v) \ | |
88 | { \ | |
89 | register c_type ret; \ | |
90 | __asm__ __volatile__ ( \ | |
91 | "amo" #asm_op "." #asm_type #asm_or " %1, %2, %0" \ | |
92 | : "+A" (v->counter), "=r" (ret) \ | |
93 | : "r" (I) \ | |
94 | : "memory"); \ | |
95 | return ret; \ | |
96 | } | |
97 | ||
98 | #define ATOMIC_OP_RETURN(op, asm_op, c_op, I, asm_or, c_or, asm_type, c_type, prefix) \ | |
99 | static __always_inline c_type atomic##prefix##_##op##_return##c_or(c_type i, atomic##prefix##_t *v) \ | |
100 | { \ | |
101 | return atomic##prefix##_fetch_##op##c_or(i, v) c_op I; \ | |
102 | } | |
103 | ||
104 | #ifdef CONFIG_GENERIC_ATOMIC64 | |
105 | #define ATOMIC_OPS(op, asm_op, c_op, I, asm_or, c_or) \ | |
106 | ATOMIC_FETCH_OP (op, asm_op, c_op, I, asm_or, c_or, w, int, ) \ | |
107 | ATOMIC_OP_RETURN(op, asm_op, c_op, I, asm_or, c_or, w, int, ) | |
108 | #else | |
109 | #define ATOMIC_OPS(op, asm_op, c_op, I, asm_or, c_or) \ | |
110 | ATOMIC_FETCH_OP (op, asm_op, c_op, I, asm_or, c_or, w, int, ) \ | |
111 | ATOMIC_OP_RETURN(op, asm_op, c_op, I, asm_or, c_or, w, int, ) \ | |
112 | ATOMIC_FETCH_OP (op, asm_op, c_op, I, asm_or, c_or, d, long, 64) \ | |
113 | ATOMIC_OP_RETURN(op, asm_op, c_op, I, asm_or, c_or, d, long, 64) | |
114 | #endif | |
115 | ||
116 | ATOMIC_OPS(add, add, +, i, , _relaxed) | |
117 | ATOMIC_OPS(add, add, +, i, .aq , _acquire) | |
118 | ATOMIC_OPS(add, add, +, i, .rl , _release) | |
119 | ATOMIC_OPS(add, add, +, i, .aqrl, ) | |
120 | ||
121 | ATOMIC_OPS(sub, add, +, -i, , _relaxed) | |
122 | ATOMIC_OPS(sub, add, +, -i, .aq , _acquire) | |
123 | ATOMIC_OPS(sub, add, +, -i, .rl , _release) | |
124 | ATOMIC_OPS(sub, add, +, -i, .aqrl, ) | |
125 | ||
126 | #undef ATOMIC_OPS | |
127 | ||
128 | #ifdef CONFIG_GENERIC_ATOMIC64 | |
129 | #define ATOMIC_OPS(op, asm_op, c_op, I, asm_or, c_or) \ | |
130 | ATOMIC_FETCH_OP(op, asm_op, c_op, I, asm_or, c_or, w, int, ) | |
131 | #else | |
132 | #define ATOMIC_OPS(op, asm_op, c_op, I, asm_or, c_or) \ | |
133 | ATOMIC_FETCH_OP(op, asm_op, c_op, I, asm_or, c_or, w, int, ) \ | |
134 | ATOMIC_FETCH_OP(op, asm_op, c_op, I, asm_or, c_or, d, long, 64) | |
135 | #endif | |
136 | ||
137 | ATOMIC_OPS(and, and, &, i, , _relaxed) | |
138 | ATOMIC_OPS(and, and, &, i, .aq , _acquire) | |
139 | ATOMIC_OPS(and, and, &, i, .rl , _release) | |
140 | ATOMIC_OPS(and, and, &, i, .aqrl, ) | |
141 | ||
142 | ATOMIC_OPS( or, or, |, i, , _relaxed) | |
143 | ATOMIC_OPS( or, or, |, i, .aq , _acquire) | |
144 | ATOMIC_OPS( or, or, |, i, .rl , _release) | |
145 | ATOMIC_OPS( or, or, |, i, .aqrl, ) | |
146 | ||
147 | ATOMIC_OPS(xor, xor, ^, i, , _relaxed) | |
148 | ATOMIC_OPS(xor, xor, ^, i, .aq , _acquire) | |
149 | ATOMIC_OPS(xor, xor, ^, i, .rl , _release) | |
150 | ATOMIC_OPS(xor, xor, ^, i, .aqrl, ) | |
151 | ||
152 | #undef ATOMIC_OPS | |
153 | ||
154 | #undef ATOMIC_FETCH_OP | |
155 | #undef ATOMIC_OP_RETURN | |
156 | ||
157 | /* | |
158 | * The extra atomic operations that are constructed from one of the core | |
159 | * AMO-based operations above (aside from sub, which is easier to fit above). | |
160 | * These are required to perform a barrier, but they're OK this way because | |
161 | * atomic_*_return is also required to perform a barrier. | |
162 | */ | |
163 | #define ATOMIC_OP(op, func_op, comp_op, I, c_type, prefix) \ | |
164 | static __always_inline bool atomic##prefix##_##op(c_type i, atomic##prefix##_t *v) \ | |
165 | { \ | |
166 | return atomic##prefix##_##func_op##_return(i, v) comp_op I; \ | |
167 | } | |
168 | ||
169 | #ifdef CONFIG_GENERIC_ATOMIC64 | |
170 | #define ATOMIC_OPS(op, func_op, comp_op, I) \ | |
171 | ATOMIC_OP (op, func_op, comp_op, I, int, ) | |
172 | #else | |
173 | #define ATOMIC_OPS(op, func_op, comp_op, I) \ | |
174 | ATOMIC_OP (op, func_op, comp_op, I, int, ) \ | |
175 | ATOMIC_OP (op, func_op, comp_op, I, long, 64) | |
176 | #endif | |
177 | ||
178 | ATOMIC_OPS(add_and_test, add, ==, 0) | |
179 | ATOMIC_OPS(sub_and_test, sub, ==, 0) | |
180 | ATOMIC_OPS(add_negative, add, <, 0) | |
181 | ||
182 | #undef ATOMIC_OP | |
183 | #undef ATOMIC_OPS | |
184 | ||
185 | #define ATOMIC_OP(op, func_op, c_op, I, c_type, prefix) \ | |
186 | static __always_inline void atomic##prefix##_##op(atomic##prefix##_t *v) \ | |
187 | { \ | |
188 | atomic##prefix##_##func_op(I, v); \ | |
189 | } | |
190 | ||
191 | #define ATOMIC_FETCH_OP(op, func_op, c_op, I, c_type, prefix) \ | |
192 | static __always_inline c_type atomic##prefix##_fetch_##op(atomic##prefix##_t *v) \ | |
193 | { \ | |
194 | return atomic##prefix##_fetch_##func_op(I, v); \ | |
195 | } | |
196 | ||
197 | #define ATOMIC_OP_RETURN(op, asm_op, c_op, I, c_type, prefix) \ | |
198 | static __always_inline c_type atomic##prefix##_##op##_return(atomic##prefix##_t *v) \ | |
199 | { \ | |
200 | return atomic##prefix##_fetch_##op(v) c_op I; \ | |
201 | } | |
202 | ||
203 | #ifdef CONFIG_GENERIC_ATOMIC64 | |
204 | #define ATOMIC_OPS(op, asm_op, c_op, I) \ | |
205 | ATOMIC_OP (op, asm_op, c_op, I, int, ) \ | |
206 | ATOMIC_FETCH_OP (op, asm_op, c_op, I, int, ) \ | |
207 | ATOMIC_OP_RETURN(op, asm_op, c_op, I, int, ) | |
208 | #else | |
209 | #define ATOMIC_OPS(op, asm_op, c_op, I) \ | |
210 | ATOMIC_OP (op, asm_op, c_op, I, int, ) \ | |
211 | ATOMIC_FETCH_OP (op, asm_op, c_op, I, int, ) \ | |
212 | ATOMIC_OP_RETURN(op, asm_op, c_op, I, int, ) \ | |
213 | ATOMIC_OP (op, asm_op, c_op, I, long, 64) \ | |
214 | ATOMIC_FETCH_OP (op, asm_op, c_op, I, long, 64) \ | |
215 | ATOMIC_OP_RETURN(op, asm_op, c_op, I, long, 64) | |
216 | #endif | |
217 | ||
218 | ATOMIC_OPS(inc, add, +, 1) | |
219 | ATOMIC_OPS(dec, add, +, -1) | |
220 | ||
221 | #undef ATOMIC_OPS | |
222 | #undef ATOMIC_OP | |
223 | #undef ATOMIC_FETCH_OP | |
224 | #undef ATOMIC_OP_RETURN | |
225 | ||
226 | #define ATOMIC_OP(op, func_op, comp_op, I, prefix) \ | |
227 | static __always_inline bool atomic##prefix##_##op(atomic##prefix##_t *v) \ | |
228 | { \ | |
229 | return atomic##prefix##_##func_op##_return(v) comp_op I; \ | |
230 | } | |
231 | ||
232 | ATOMIC_OP(inc_and_test, inc, ==, 0, ) | |
233 | ATOMIC_OP(dec_and_test, dec, ==, 0, ) | |
234 | #ifndef CONFIG_GENERIC_ATOMIC64 | |
235 | ATOMIC_OP(inc_and_test, inc, ==, 0, 64) | |
236 | ATOMIC_OP(dec_and_test, dec, ==, 0, 64) | |
237 | #endif | |
238 | ||
239 | #undef ATOMIC_OP | |
240 | ||
241 | /* This is required to provide a barrier on success. */ | |
242 | static __always_inline int __atomic_add_unless(atomic_t *v, int a, int u) | |
243 | { | |
244 | int prev, rc; | |
245 | ||
246 | __asm__ __volatile__ ( | |
247 | "0:\n\t" | |
248 | "lr.w.aqrl %[p], %[c]\n\t" | |
249 | "beq %[p], %[u], 1f\n\t" | |
250 | "add %[rc], %[p], %[a]\n\t" | |
251 | "sc.w.aqrl %[rc], %[rc], %[c]\n\t" | |
252 | "bnez %[rc], 0b\n\t" | |
253 | "1:" | |
254 | : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter) | |
255 | : [a]"r" (a), [u]"r" (u) | |
256 | : "memory"); | |
257 | return prev; | |
258 | } | |
259 | ||
260 | #ifndef CONFIG_GENERIC_ATOMIC64 | |
261 | static __always_inline long __atomic64_add_unless(atomic64_t *v, long a, long u) | |
262 | { | |
263 | long prev, rc; | |
264 | ||
265 | __asm__ __volatile__ ( | |
266 | "0:\n\t" | |
267 | "lr.d.aqrl %[p], %[c]\n\t" | |
268 | "beq %[p], %[u], 1f\n\t" | |
269 | "add %[rc], %[p], %[a]\n\t" | |
270 | "sc.d.aqrl %[rc], %[rc], %[c]\n\t" | |
271 | "bnez %[rc], 0b\n\t" | |
272 | "1:" | |
273 | : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter) | |
274 | : [a]"r" (a), [u]"r" (u) | |
275 | : "memory"); | |
276 | return prev; | |
277 | } | |
278 | ||
279 | static __always_inline int atomic64_add_unless(atomic64_t *v, long a, long u) | |
280 | { | |
281 | return __atomic64_add_unless(v, a, u) != u; | |
282 | } | |
283 | #endif | |
284 | ||
285 | /* | |
286 | * The extra atomic operations that are constructed from one of the core | |
287 | * LR/SC-based operations above. | |
288 | */ | |
289 | static __always_inline int atomic_inc_not_zero(atomic_t *v) | |
290 | { | |
291 | return __atomic_add_unless(v, 1, 0); | |
292 | } | |
293 | ||
294 | #ifndef CONFIG_GENERIC_ATOMIC64 | |
295 | static __always_inline long atomic64_inc_not_zero(atomic64_t *v) | |
296 | { | |
297 | return atomic64_add_unless(v, 1, 0); | |
298 | } | |
299 | #endif | |
300 | ||
301 | /* | |
302 | * atomic_{cmp,}xchg is required to have exactly the same ordering semantics as | |
303 | * {cmp,}xchg and the operations that return, so they need a barrier. We just | |
304 | * use the other implementations directly. | |
305 | */ | |
306 | #define ATOMIC_OP(c_t, prefix, c_or, size, asm_or) \ | |
307 | static __always_inline c_t atomic##prefix##_cmpxchg##c_or(atomic##prefix##_t *v, c_t o, c_t n) \ | |
308 | { \ | |
309 | return __cmpxchg(&(v->counter), o, n, size, asm_or, asm_or); \ | |
310 | } \ | |
311 | static __always_inline c_t atomic##prefix##_xchg##c_or(atomic##prefix##_t *v, c_t n) \ | |
312 | { \ | |
313 | return __xchg(n, &(v->counter), size, asm_or); \ | |
314 | } | |
315 | ||
316 | #ifdef CONFIG_GENERIC_ATOMIC64 | |
317 | #define ATOMIC_OPS(c_or, asm_or) \ | |
318 | ATOMIC_OP( int, , c_or, 4, asm_or) | |
319 | #else | |
320 | #define ATOMIC_OPS(c_or, asm_or) \ | |
321 | ATOMIC_OP( int, , c_or, 4, asm_or) \ | |
322 | ATOMIC_OP(long, 64, c_or, 8, asm_or) | |
323 | #endif | |
324 | ||
325 | ATOMIC_OPS( , .aqrl) | |
326 | ATOMIC_OPS(_acquire, .aq) | |
327 | ATOMIC_OPS(_release, .rl) | |
328 | ATOMIC_OPS(_relaxed, ) | |
329 | ||
330 | #undef ATOMIC_OPS | |
331 | #undef ATOMIC_OP | |
332 | ||
333 | static __always_inline int atomic_sub_if_positive(atomic_t *v, int offset) | |
334 | { | |
335 | int prev, rc; | |
336 | ||
337 | __asm__ __volatile__ ( | |
338 | "0:\n\t" | |
339 | "lr.w.aqrl %[p], %[c]\n\t" | |
340 | "sub %[rc], %[p], %[o]\n\t" | |
341 | "bltz %[rc], 1f\n\t" | |
342 | "sc.w.aqrl %[rc], %[rc], %[c]\n\t" | |
343 | "bnez %[rc], 0b\n\t" | |
344 | "1:" | |
345 | : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter) | |
346 | : [o]"r" (offset) | |
347 | : "memory"); | |
348 | return prev - offset; | |
349 | } | |
350 | ||
351 | #define atomic_dec_if_positive(v) atomic_sub_if_positive(v, 1) | |
352 | ||
353 | #ifndef CONFIG_GENERIC_ATOMIC64 | |
354 | static __always_inline long atomic64_sub_if_positive(atomic64_t *v, int offset) | |
355 | { | |
356 | long prev, rc; | |
357 | ||
358 | __asm__ __volatile__ ( | |
359 | "0:\n\t" | |
360 | "lr.d.aqrl %[p], %[c]\n\t" | |
361 | "sub %[rc], %[p], %[o]\n\t" | |
362 | "bltz %[rc], 1f\n\t" | |
363 | "sc.d.aqrl %[rc], %[rc], %[c]\n\t" | |
364 | "bnez %[rc], 0b\n\t" | |
365 | "1:" | |
366 | : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter) | |
367 | : [o]"r" (offset) | |
368 | : "memory"); | |
369 | return prev - offset; | |
370 | } | |
371 | ||
372 | #define atomic64_dec_if_positive(v) atomic64_sub_if_positive(v, 1) | |
373 | #endif | |
374 | ||
375 | #endif /* _ASM_RISCV_ATOMIC_H */ |