Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
1da177e4 LT |
2 | #ifndef _ASM_M32R_SPINLOCK_H |
3 | #define _ASM_M32R_SPINLOCK_H | |
4 | ||
5 | /* | |
6 | * linux/include/asm-m32r/spinlock.h | |
7 | * | |
8 | * M32R version: | |
9 | * Copyright (C) 2001, 2002 Hitoshi Yamamoto | |
10 | * Copyright (C) 2004 Hirokazu Takata <takata at linux-m32r.org> | |
11 | */ | |
12 | ||
1da177e4 | 13 | #include <linux/compiler.h> |
60063497 | 14 | #include <linux/atomic.h> |
c9034c3a | 15 | #include <asm/dcache_clear.h> |
1da177e4 | 16 | #include <asm/page.h> |
726328d9 PZ |
17 | #include <asm/barrier.h> |
18 | #include <asm/processor.h> | |
1da177e4 | 19 | |
1da177e4 LT |
20 | /* |
21 | * Your basic SMP spinlocks, allowing only a single CPU anywhere | |
fb1c8f93 IM |
22 | * |
23 | * (the type definitions are in asm/spinlock_types.h) | |
24 | * | |
1da177e4 LT |
25 | * Simple spin lock operations. There are two variants, one clears IRQ's |
26 | * on the local processor, one does not. | |
27 | * | |
28 | * We make no fairness assumptions. They have a cost. | |
29 | */ | |
30 | ||
0199c4e6 TG |
31 | #define arch_spin_is_locked(x) (*(volatile int *)(&(x)->slock) <= 0) |
32 | #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock) | |
726328d9 | 33 | |
1da177e4 | 34 | /** |
0199c4e6 | 35 | * arch_spin_trylock - Try spin lock and return a result |
1da177e4 LT |
36 | * @lock: Pointer to the lock variable |
37 | * | |
0199c4e6 | 38 | * arch_spin_trylock() tries to get the lock and returns a result. |
1da177e4 LT |
39 | * On the m32r, the result value is 1 (= Success) or 0 (= Failure). |
40 | */ | |
0199c4e6 | 41 | static inline int arch_spin_trylock(arch_spinlock_t *lock) |
1da177e4 LT |
42 | { |
43 | int oldval; | |
44 | unsigned long tmp1, tmp2; | |
45 | ||
46 | /* | |
47 | * lock->slock : =1 : unlock | |
48 | * : <=0 : lock | |
49 | * { | |
50 | * oldval = lock->slock; <--+ need atomic operation | |
51 | * lock->slock = 0; <--+ | |
52 | * } | |
53 | */ | |
54 | __asm__ __volatile__ ( | |
0199c4e6 | 55 | "# arch_spin_trylock \n\t" |
1da177e4 LT |
56 | "ldi %1, #0; \n\t" |
57 | "mvfc %2, psw; \n\t" | |
58 | "clrpsw #0x40 -> nop; \n\t" | |
59 | DCACHE_CLEAR("%0", "r6", "%3") | |
60 | "lock %0, @%3; \n\t" | |
61 | "unlock %1, @%3; \n\t" | |
62 | "mvtc %2, psw; \n\t" | |
63 | : "=&r" (oldval), "=&r" (tmp1), "=&r" (tmp2) | |
64 | : "r" (&lock->slock) | |
65 | : "memory" | |
66 | #ifdef CONFIG_CHIP_M32700_TS1 | |
67 | , "r6" | |
68 | #endif /* CONFIG_CHIP_M32700_TS1 */ | |
69 | ); | |
70 | ||
71 | return (oldval > 0); | |
72 | } | |
73 | ||
0199c4e6 | 74 | static inline void arch_spin_lock(arch_spinlock_t *lock) |
1da177e4 LT |
75 | { |
76 | unsigned long tmp0, tmp1; | |
77 | ||
1da177e4 LT |
78 | /* |
79 | * lock->slock : =1 : unlock | |
80 | * : <=0 : lock | |
81 | * | |
82 | * for ( ; ; ) { | |
83 | * lock->slock -= 1; <-- need atomic operation | |
84 | * if (lock->slock == 0) break; | |
85 | * for ( ; lock->slock <= 0 ; ); | |
86 | * } | |
87 | */ | |
88 | __asm__ __volatile__ ( | |
0199c4e6 | 89 | "# arch_spin_lock \n\t" |
1da177e4 LT |
90 | ".fillinsn \n" |
91 | "1: \n\t" | |
92 | "mvfc %1, psw; \n\t" | |
93 | "clrpsw #0x40 -> nop; \n\t" | |
94 | DCACHE_CLEAR("%0", "r6", "%2") | |
95 | "lock %0, @%2; \n\t" | |
96 | "addi %0, #-1; \n\t" | |
97 | "unlock %0, @%2; \n\t" | |
98 | "mvtc %1, psw; \n\t" | |
99 | "bltz %0, 2f; \n\t" | |
100 | LOCK_SECTION_START(".balign 4 \n\t") | |
101 | ".fillinsn \n" | |
102 | "2: \n\t" | |
103 | "ld %0, @%2; \n\t" | |
104 | "bgtz %0, 1b; \n\t" | |
105 | "bra 2b; \n\t" | |
106 | LOCK_SECTION_END | |
107 | : "=&r" (tmp0), "=&r" (tmp1) | |
108 | : "r" (&lock->slock) | |
109 | : "memory" | |
110 | #ifdef CONFIG_CHIP_M32700_TS1 | |
111 | , "r6" | |
112 | #endif /* CONFIG_CHIP_M32700_TS1 */ | |
113 | ); | |
114 | } | |
115 | ||
0199c4e6 | 116 | static inline void arch_spin_unlock(arch_spinlock_t *lock) |
1da177e4 | 117 | { |
1da177e4 LT |
118 | mb(); |
119 | lock->slock = 1; | |
120 | } | |
121 | ||
122 | /* | |
123 | * Read-write spinlocks, allowing multiple readers | |
124 | * but only one writer. | |
125 | * | |
126 | * NOTE! it is quite common to have readers in interrupts | |
127 | * but no interrupt writers. For those circumstances we | |
128 | * can "mix" irq-safe locks - any writer needs to get a | |
129 | * irq-safe write-lock, but readers can get non-irqsafe | |
130 | * read-locks. | |
fb1c8f93 IM |
131 | * |
132 | * On x86, we implement read-write locks as a 32-bit counter | |
133 | * with the high bit (sign) being the "contended" bit. | |
134 | * | |
135 | * The inline assembly is non-obvious. Think about it. | |
136 | * | |
137 | * Changed to use the same technique as rw semaphores. See | |
138 | * semaphore.h for details. -ben | |
1da177e4 | 139 | */ |
1da177e4 LT |
140 | |
141 | /** | |
142 | * read_can_lock - would read_trylock() succeed? | |
143 | * @lock: the rwlock in question. | |
144 | */ | |
e5931943 | 145 | #define arch_read_can_lock(x) ((int)(x)->lock > 0) |
1da177e4 LT |
146 | |
147 | /** | |
148 | * write_can_lock - would write_trylock() succeed? | |
149 | * @lock: the rwlock in question. | |
150 | */ | |
e5931943 | 151 | #define arch_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS) |
1da177e4 | 152 | |
e5931943 | 153 | static inline void arch_read_lock(arch_rwlock_t *rw) |
1da177e4 LT |
154 | { |
155 | unsigned long tmp0, tmp1; | |
156 | ||
1da177e4 LT |
157 | /* |
158 | * rw->lock : >0 : unlock | |
159 | * : <=0 : lock | |
160 | * | |
161 | * for ( ; ; ) { | |
162 | * rw->lock -= 1; <-- need atomic operation | |
163 | * if (rw->lock >= 0) break; | |
164 | * rw->lock += 1; <-- need atomic operation | |
165 | * for ( ; rw->lock <= 0 ; ); | |
166 | * } | |
167 | */ | |
168 | __asm__ __volatile__ ( | |
169 | "# read_lock \n\t" | |
170 | ".fillinsn \n" | |
171 | "1: \n\t" | |
172 | "mvfc %1, psw; \n\t" | |
173 | "clrpsw #0x40 -> nop; \n\t" | |
174 | DCACHE_CLEAR("%0", "r6", "%2") | |
175 | "lock %0, @%2; \n\t" | |
176 | "addi %0, #-1; \n\t" | |
177 | "unlock %0, @%2; \n\t" | |
178 | "mvtc %1, psw; \n\t" | |
179 | "bltz %0, 2f; \n\t" | |
180 | LOCK_SECTION_START(".balign 4 \n\t") | |
181 | ".fillinsn \n" | |
182 | "2: \n\t" | |
183 | "clrpsw #0x40 -> nop; \n\t" | |
184 | DCACHE_CLEAR("%0", "r6", "%2") | |
185 | "lock %0, @%2; \n\t" | |
186 | "addi %0, #1; \n\t" | |
187 | "unlock %0, @%2; \n\t" | |
188 | "mvtc %1, psw; \n\t" | |
189 | ".fillinsn \n" | |
190 | "3: \n\t" | |
191 | "ld %0, @%2; \n\t" | |
192 | "bgtz %0, 1b; \n\t" | |
193 | "bra 3b; \n\t" | |
194 | LOCK_SECTION_END | |
195 | : "=&r" (tmp0), "=&r" (tmp1) | |
196 | : "r" (&rw->lock) | |
197 | : "memory" | |
198 | #ifdef CONFIG_CHIP_M32700_TS1 | |
199 | , "r6" | |
200 | #endif /* CONFIG_CHIP_M32700_TS1 */ | |
201 | ); | |
202 | } | |
203 | ||
e5931943 | 204 | static inline void arch_write_lock(arch_rwlock_t *rw) |
1da177e4 LT |
205 | { |
206 | unsigned long tmp0, tmp1, tmp2; | |
207 | ||
1da177e4 LT |
208 | /* |
209 | * rw->lock : =RW_LOCK_BIAS_STR : unlock | |
210 | * : !=RW_LOCK_BIAS_STR : lock | |
211 | * | |
212 | * for ( ; ; ) { | |
213 | * rw->lock -= RW_LOCK_BIAS_STR; <-- need atomic operation | |
214 | * if (rw->lock == 0) break; | |
215 | * rw->lock += RW_LOCK_BIAS_STR; <-- need atomic operation | |
216 | * for ( ; rw->lock != RW_LOCK_BIAS_STR ; ) ; | |
217 | * } | |
218 | */ | |
219 | __asm__ __volatile__ ( | |
220 | "# write_lock \n\t" | |
221 | "seth %1, #high(" RW_LOCK_BIAS_STR "); \n\t" | |
222 | "or3 %1, %1, #low(" RW_LOCK_BIAS_STR "); \n\t" | |
223 | ".fillinsn \n" | |
224 | "1: \n\t" | |
225 | "mvfc %2, psw; \n\t" | |
226 | "clrpsw #0x40 -> nop; \n\t" | |
227 | DCACHE_CLEAR("%0", "r7", "%3") | |
228 | "lock %0, @%3; \n\t" | |
229 | "sub %0, %1; \n\t" | |
230 | "unlock %0, @%3; \n\t" | |
231 | "mvtc %2, psw; \n\t" | |
232 | "bnez %0, 2f; \n\t" | |
233 | LOCK_SECTION_START(".balign 4 \n\t") | |
234 | ".fillinsn \n" | |
235 | "2: \n\t" | |
236 | "clrpsw #0x40 -> nop; \n\t" | |
237 | DCACHE_CLEAR("%0", "r7", "%3") | |
238 | "lock %0, @%3; \n\t" | |
239 | "add %0, %1; \n\t" | |
240 | "unlock %0, @%3; \n\t" | |
241 | "mvtc %2, psw; \n\t" | |
242 | ".fillinsn \n" | |
243 | "3: \n\t" | |
244 | "ld %0, @%3; \n\t" | |
245 | "beq %0, %1, 1b; \n\t" | |
246 | "bra 3b; \n\t" | |
247 | LOCK_SECTION_END | |
248 | : "=&r" (tmp0), "=&r" (tmp1), "=&r" (tmp2) | |
249 | : "r" (&rw->lock) | |
250 | : "memory" | |
251 | #ifdef CONFIG_CHIP_M32700_TS1 | |
252 | , "r7" | |
253 | #endif /* CONFIG_CHIP_M32700_TS1 */ | |
254 | ); | |
255 | } | |
256 | ||
e5931943 | 257 | static inline void arch_read_unlock(arch_rwlock_t *rw) |
1da177e4 LT |
258 | { |
259 | unsigned long tmp0, tmp1; | |
260 | ||
261 | __asm__ __volatile__ ( | |
262 | "# read_unlock \n\t" | |
263 | "mvfc %1, psw; \n\t" | |
264 | "clrpsw #0x40 -> nop; \n\t" | |
265 | DCACHE_CLEAR("%0", "r6", "%2") | |
266 | "lock %0, @%2; \n\t" | |
267 | "addi %0, #1; \n\t" | |
268 | "unlock %0, @%2; \n\t" | |
269 | "mvtc %1, psw; \n\t" | |
270 | : "=&r" (tmp0), "=&r" (tmp1) | |
271 | : "r" (&rw->lock) | |
272 | : "memory" | |
273 | #ifdef CONFIG_CHIP_M32700_TS1 | |
274 | , "r6" | |
275 | #endif /* CONFIG_CHIP_M32700_TS1 */ | |
276 | ); | |
277 | } | |
278 | ||
e5931943 | 279 | static inline void arch_write_unlock(arch_rwlock_t *rw) |
1da177e4 LT |
280 | { |
281 | unsigned long tmp0, tmp1, tmp2; | |
282 | ||
283 | __asm__ __volatile__ ( | |
284 | "# write_unlock \n\t" | |
285 | "seth %1, #high(" RW_LOCK_BIAS_STR "); \n\t" | |
286 | "or3 %1, %1, #low(" RW_LOCK_BIAS_STR "); \n\t" | |
287 | "mvfc %2, psw; \n\t" | |
288 | "clrpsw #0x40 -> nop; \n\t" | |
289 | DCACHE_CLEAR("%0", "r7", "%3") | |
290 | "lock %0, @%3; \n\t" | |
291 | "add %0, %1; \n\t" | |
292 | "unlock %0, @%3; \n\t" | |
293 | "mvtc %2, psw; \n\t" | |
294 | : "=&r" (tmp0), "=&r" (tmp1), "=&r" (tmp2) | |
295 | : "r" (&rw->lock) | |
296 | : "memory" | |
297 | #ifdef CONFIG_CHIP_M32700_TS1 | |
298 | , "r7" | |
299 | #endif /* CONFIG_CHIP_M32700_TS1 */ | |
300 | ); | |
301 | } | |
302 | ||
e5931943 | 303 | static inline int arch_read_trylock(arch_rwlock_t *lock) |
85f65179 HT |
304 | { |
305 | atomic_t *count = (atomic_t*)lock; | |
306 | if (atomic_dec_return(count) >= 0) | |
307 | return 1; | |
308 | atomic_inc(count); | |
309 | return 0; | |
310 | } | |
1da177e4 | 311 | |
e5931943 | 312 | static inline int arch_write_trylock(arch_rwlock_t *lock) |
1da177e4 LT |
313 | { |
314 | atomic_t *count = (atomic_t *)lock; | |
315 | if (atomic_sub_and_test(RW_LOCK_BIAS, count)) | |
316 | return 1; | |
317 | atomic_add(RW_LOCK_BIAS, count); | |
318 | return 0; | |
319 | } | |
320 | ||
e5931943 TG |
321 | #define arch_read_lock_flags(lock, flags) arch_read_lock(lock) |
322 | #define arch_write_lock_flags(lock, flags) arch_write_lock(lock) | |
f5f7eac4 | 323 | |
0199c4e6 TG |
324 | #define arch_spin_relax(lock) cpu_relax() |
325 | #define arch_read_relax(lock) cpu_relax() | |
326 | #define arch_write_relax(lock) cpu_relax() | |
ef6edc97 | 327 | |
1da177e4 | 328 | #endif /* _ASM_M32R_SPINLOCK_H */ |