Commit | Line | Data |
---|---|---|
d2912cb1 | 1 | /* SPDX-License-Identifier: GPL-2.0-only */ |
14e968ba VG |
2 | /* |
3 | * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) | |
14e968ba VG |
4 | */ |
5 | ||
6 | #ifndef __ASM_ARC_CMPXCHG_H | |
7 | #define __ASM_ARC_CMPXCHG_H | |
8 | ||
9 | #include <linux/types.h> | |
2576c28e VG |
10 | |
11 | #include <asm/barrier.h> | |
14e968ba VG |
12 | #include <asm/smp.h> |
13 | ||
14 | #ifdef CONFIG_ARC_HAS_LLSC | |
15 | ||
16 | static inline unsigned long | |
17 | __cmpxchg(volatile void *ptr, unsigned long expected, unsigned long new) | |
18 | { | |
19 | unsigned long prev; | |
20 | ||
2576c28e VG |
21 | /* |
22 | * Explicit full memory barrier needed before/after as | |
23 | * LLOCK/SCOND thmeselves don't provide any such semantics | |
24 | */ | |
25 | smp_mb(); | |
26 | ||
14e968ba VG |
27 | __asm__ __volatile__( |
28 | "1: llock %0, [%1] \n" | |
29 | " brne %0, %2, 2f \n" | |
30 | " scond %3, [%1] \n" | |
31 | " bnz 1b \n" | |
32 | "2: \n" | |
d57f7272 VG |
33 | : "=&r"(prev) /* Early clobber, to prevent reg reuse */ |
34 | : "r"(ptr), /* Not "m": llock only supports reg direct addr mode */ | |
35 | "ir"(expected), | |
36 | "r"(new) /* can't be "ir". scond can't take LIMM for "b" */ | |
37 | : "cc", "memory"); /* so that gcc knows memory is being written here */ | |
14e968ba | 38 | |
2576c28e VG |
39 | smp_mb(); |
40 | ||
14e968ba VG |
41 | return prev; |
42 | } | |
43 | ||
a5a10d99 | 44 | #elif !defined(CONFIG_ARC_PLAT_EZNPS) |
14e968ba VG |
45 | |
46 | static inline unsigned long | |
47 | __cmpxchg(volatile void *ptr, unsigned long expected, unsigned long new) | |
48 | { | |
49 | unsigned long flags; | |
50 | int prev; | |
51 | volatile unsigned long *p = ptr; | |
52 | ||
2576c28e VG |
53 | /* |
54 | * spin lock/unlock provide the needed smp_mb() before/after | |
55 | */ | |
14e968ba VG |
56 | atomic_ops_lock(flags); |
57 | prev = *p; | |
58 | if (prev == expected) | |
59 | *p = new; | |
60 | atomic_ops_unlock(flags); | |
61 | return prev; | |
62 | } | |
63 | ||
a5a10d99 NC |
64 | #else /* CONFIG_ARC_PLAT_EZNPS */ |
65 | ||
66 | static inline unsigned long | |
67 | __cmpxchg(volatile void *ptr, unsigned long expected, unsigned long new) | |
68 | { | |
69 | /* | |
70 | * Explicit full memory barrier needed before/after | |
71 | */ | |
72 | smp_mb(); | |
73 | ||
74 | write_aux_reg(CTOP_AUX_GPA1, expected); | |
75 | ||
76 | __asm__ __volatile__( | |
77 | " mov r2, %0\n" | |
78 | " mov r3, %1\n" | |
79 | " .word %2\n" | |
80 | " mov %0, r2" | |
81 | : "+r"(new) | |
82 | : "r"(ptr), "i"(CTOP_INST_EXC_DI_R2_R2_R3) | |
83 | : "r2", "r3", "memory"); | |
84 | ||
85 | smp_mb(); | |
86 | ||
87 | return new; | |
88 | } | |
89 | ||
14e968ba VG |
90 | #endif /* CONFIG_ARC_HAS_LLSC */ |
91 | ||
89c92142 VG |
92 | #define cmpxchg(ptr, o, n) ({ \ |
93 | (typeof(*(ptr)))__cmpxchg((ptr), \ | |
94 | (unsigned long)(o), \ | |
95 | (unsigned long)(n)); \ | |
96 | }) | |
14e968ba VG |
97 | |
98 | /* | |
a5a10d99 NC |
99 | * atomic_cmpxchg is same as cmpxchg |
100 | * LLSC: only different in data-type, semantics are exactly same | |
101 | * !LLSC: cmpxchg() has to use an external lock atomic_ops_lock to guarantee | |
102 | * semantics, and this lock also happens to be used by atomic_*() | |
14e968ba VG |
103 | */ |
104 | #define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n))) | |
105 | ||
106 | ||
a5a10d99 NC |
107 | #ifndef CONFIG_ARC_PLAT_EZNPS |
108 | ||
14e968ba VG |
109 | /* |
110 | * xchg (reg with memory) based on "Native atomic" EX insn | |
111 | */ | |
112 | static inline unsigned long __xchg(unsigned long val, volatile void *ptr, | |
113 | int size) | |
114 | { | |
115 | extern unsigned long __xchg_bad_pointer(void); | |
116 | ||
117 | switch (size) { | |
118 | case 4: | |
2576c28e VG |
119 | smp_mb(); |
120 | ||
14e968ba VG |
121 | __asm__ __volatile__( |
122 | " ex %0, [%1] \n" | |
123 | : "+r"(val) | |
124 | : "r"(ptr) | |
125 | : "memory"); | |
126 | ||
2576c28e VG |
127 | smp_mb(); |
128 | ||
14e968ba VG |
129 | return val; |
130 | } | |
131 | return __xchg_bad_pointer(); | |
132 | } | |
133 | ||
134 | #define _xchg(ptr, with) ((typeof(*(ptr)))__xchg((unsigned long)(with), (ptr), \ | |
135 | sizeof(*(ptr)))) | |
136 | ||
137 | /* | |
09074950 VG |
138 | * xchg() maps directly to ARC EX instruction which guarantees atomicity. |
139 | * However in !LLSC config, it also needs to be use @atomic_ops_lock spinlock | |
140 | * due to a subtle reason: | |
141 | * - For !LLSC, cmpxchg() needs to use that lock (see above) and there is lot | |
142 | * of kernel code which calls xchg()/cmpxchg() on same data (see llist.h) | |
143 | * Hence xchg() needs to follow same locking rules. | |
14e968ba | 144 | * |
09074950 VG |
145 | * Technically the lock is also needed for UP (boils down to irq save/restore) |
146 | * but we can cheat a bit since cmpxchg() atomic_ops_lock() would cause irqs to | |
147 | * be disabled thus can't possibly be interrpted/preempted/clobbered by xchg() | |
148 | * Other way around, xchg is one instruction anyways, so can't be interrupted | |
149 | * as such | |
14e968ba VG |
150 | */ |
151 | ||
152 | #if !defined(CONFIG_ARC_HAS_LLSC) && defined(CONFIG_SMP) | |
153 | ||
154 | #define xchg(ptr, with) \ | |
155 | ({ \ | |
156 | unsigned long flags; \ | |
157 | typeof(*(ptr)) old_val; \ | |
158 | \ | |
159 | atomic_ops_lock(flags); \ | |
160 | old_val = _xchg(ptr, with); \ | |
161 | atomic_ops_unlock(flags); \ | |
162 | old_val; \ | |
163 | }) | |
164 | ||
165 | #else | |
166 | ||
167 | #define xchg(ptr, with) _xchg(ptr, with) | |
168 | ||
169 | #endif | |
170 | ||
a5a10d99 NC |
171 | #else /* CONFIG_ARC_PLAT_EZNPS */ |
172 | ||
173 | static inline unsigned long __xchg(unsigned long val, volatile void *ptr, | |
174 | int size) | |
175 | { | |
176 | extern unsigned long __xchg_bad_pointer(void); | |
177 | ||
178 | switch (size) { | |
179 | case 4: | |
180 | /* | |
181 | * Explicit full memory barrier needed before/after | |
182 | */ | |
183 | smp_mb(); | |
184 | ||
185 | __asm__ __volatile__( | |
186 | " mov r2, %0\n" | |
187 | " mov r3, %1\n" | |
188 | " .word %2\n" | |
189 | " mov %0, r2\n" | |
190 | : "+r"(val) | |
191 | : "r"(ptr), "i"(CTOP_INST_XEX_DI_R2_R2_R3) | |
192 | : "r2", "r3", "memory"); | |
193 | ||
194 | smp_mb(); | |
195 | ||
196 | return val; | |
197 | } | |
198 | return __xchg_bad_pointer(); | |
199 | } | |
200 | ||
89c92142 VG |
201 | #define xchg(ptr, with) ({ \ |
202 | (typeof(*(ptr)))__xchg((unsigned long)(with), \ | |
203 | (ptr), \ | |
204 | sizeof(*(ptr))); \ | |
205 | }) | |
a5a10d99 NC |
206 | |
207 | #endif /* CONFIG_ARC_PLAT_EZNPS */ | |
208 | ||
14e968ba VG |
209 | /* |
210 | * "atomic" variant of xchg() | |
211 | * REQ: It needs to follow the same serialization rules as other atomic_xxx() | |
212 | * Since xchg() doesn't always do that, it would seem that following defintion | |
213 | * is incorrect. But here's the rationale: | |
214 | * SMP : Even xchg() takes the atomic_ops_lock, so OK. | |
7423cc0c | 215 | * LLSC: atomic_ops_lock are not relevant at all (even if SMP, since LLSC |
14e968ba VG |
216 | * is natively "SMP safe", no serialization required). |
217 | * UP : other atomics disable IRQ, so no way a difft ctxt atomic_xchg() | |
218 | * could clobber them. atomic_xchg() itself would be 1 insn, so it | |
219 | * can't be clobbered by others. Thus no serialization required when | |
220 | * atomic_xchg is involved. | |
221 | */ | |
222 | #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) | |
223 | ||
224 | #endif |