Revert "ARCv2: spinlock/rwlock/atomics: Delayed retry of failed SCOND with exponentia...
[linux-2.6-block.git] / arch / arc / include / asm / spinlock.h
CommitLineData
6e35fa2d
VG
1/*
2 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9#ifndef __ASM_SPINLOCK_H
10#define __ASM_SPINLOCK_H
11
12#include <asm/spinlock_types.h>
13#include <asm/processor.h>
14#include <asm/barrier.h>
15
16#define arch_spin_is_locked(x) ((x)->slock != __ARCH_SPIN_LOCK_UNLOCKED__)
17#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
18#define arch_spin_unlock_wait(x) \
19 do { while (arch_spin_is_locked(x)) cpu_relax(); } while (0)
20
ae7eae9e
VG
21#ifdef CONFIG_ARC_HAS_LLSC
22
23static inline void arch_spin_lock(arch_spinlock_t *lock)
24{
25 unsigned int val;
26
27 smp_mb();
28
29 __asm__ __volatile__(
30 "1: llock %[val], [%[slock]] \n"
31 " breq %[val], %[LOCKED], 1b \n" /* spin while LOCKED */
32 " scond %[LOCKED], [%[slock]] \n" /* acquire */
33 " bnz 1b \n"
34 " \n"
35 : [val] "=&r" (val)
36 : [slock] "r" (&(lock->slock)),
37 [LOCKED] "r" (__ARCH_SPIN_LOCK_LOCKED__)
38 : "memory", "cc");
39
40 smp_mb();
41}
42
43/* 1 - lock taken successfully */
44static inline int arch_spin_trylock(arch_spinlock_t *lock)
45{
46 unsigned int val, got_it = 0;
47
48 smp_mb();
49
50 __asm__ __volatile__(
51 "1: llock %[val], [%[slock]] \n"
52 " breq %[val], %[LOCKED], 4f \n" /* already LOCKED, just bail */
53 " scond %[LOCKED], [%[slock]] \n" /* acquire */
54 " bnz 1b \n"
55 " mov %[got_it], 1 \n"
56 "4: \n"
57 " \n"
58 : [val] "=&r" (val),
59 [got_it] "+&r" (got_it)
60 : [slock] "r" (&(lock->slock)),
61 [LOCKED] "r" (__ARCH_SPIN_LOCK_LOCKED__)
62 : "memory", "cc");
63
64 smp_mb();
65
66 return got_it;
67}
68
69static inline void arch_spin_unlock(arch_spinlock_t *lock)
70{
71 smp_mb();
72
73 lock->slock = __ARCH_SPIN_LOCK_UNLOCKED__;
74
75 smp_mb();
76}
77
69cbe630
VG
78/*
79 * Read-write spinlocks, allowing multiple readers but only one writer.
80 * Unfair locking as Writers could be starved indefinitely by Reader(s)
81 */
82
83static inline void arch_read_lock(arch_rwlock_t *rw)
84{
85 unsigned int val;
86
87 smp_mb();
88
89 /*
90 * zero means writer holds the lock exclusively, deny Reader.
91 * Otherwise grant lock to first/subseq reader
92 *
93 * if (rw->counter > 0) {
94 * rw->counter--;
95 * ret = 1;
96 * }
97 */
98
99 __asm__ __volatile__(
100 "1: llock %[val], [%[rwlock]] \n"
101 " brls %[val], %[WR_LOCKED], 1b\n" /* <= 0: spin while write locked */
102 " sub %[val], %[val], 1 \n" /* reader lock */
103 " scond %[val], [%[rwlock]] \n"
104 " bnz 1b \n"
105 " \n"
106 : [val] "=&r" (val)
107 : [rwlock] "r" (&(rw->counter)),
108 [WR_LOCKED] "ir" (0)
109 : "memory", "cc");
110
111 smp_mb();
112}
113
114/* 1 - lock taken successfully */
115static inline int arch_read_trylock(arch_rwlock_t *rw)
116{
117 unsigned int val, got_it = 0;
118
119 smp_mb();
120
121 __asm__ __volatile__(
122 "1: llock %[val], [%[rwlock]] \n"
123 " brls %[val], %[WR_LOCKED], 4f\n" /* <= 0: already write locked, bail */
124 " sub %[val], %[val], 1 \n" /* counter-- */
125 " scond %[val], [%[rwlock]] \n"
126 " bnz 1b \n" /* retry if collided with someone */
127 " mov %[got_it], 1 \n"
128 " \n"
129 "4: ; --- done --- \n"
130
131 : [val] "=&r" (val),
132 [got_it] "+&r" (got_it)
133 : [rwlock] "r" (&(rw->counter)),
134 [WR_LOCKED] "ir" (0)
135 : "memory", "cc");
136
137 smp_mb();
138
139 return got_it;
140}
141
142static inline void arch_write_lock(arch_rwlock_t *rw)
143{
144 unsigned int val;
145
146 smp_mb();
147
148 /*
149 * If reader(s) hold lock (lock < __ARCH_RW_LOCK_UNLOCKED__),
150 * deny writer. Otherwise if unlocked grant to writer
151 * Hence the claim that Linux rwlocks are unfair to writers.
152 * (can be starved for an indefinite time by readers).
153 *
154 * if (rw->counter == __ARCH_RW_LOCK_UNLOCKED__) {
155 * rw->counter = 0;
156 * ret = 1;
157 * }
158 */
159
160 __asm__ __volatile__(
161 "1: llock %[val], [%[rwlock]] \n"
162 " brne %[val], %[UNLOCKED], 1b \n" /* while !UNLOCKED spin */
163 " mov %[val], %[WR_LOCKED] \n"
164 " scond %[val], [%[rwlock]] \n"
165 " bnz 1b \n"
166 " \n"
167 : [val] "=&r" (val)
168 : [rwlock] "r" (&(rw->counter)),
169 [UNLOCKED] "ir" (__ARCH_RW_LOCK_UNLOCKED__),
170 [WR_LOCKED] "ir" (0)
171 : "memory", "cc");
172
173 smp_mb();
174}
175
176/* 1 - lock taken successfully */
177static inline int arch_write_trylock(arch_rwlock_t *rw)
178{
179 unsigned int val, got_it = 0;
180
181 smp_mb();
182
183 __asm__ __volatile__(
184 "1: llock %[val], [%[rwlock]] \n"
185 " brne %[val], %[UNLOCKED], 4f \n" /* !UNLOCKED, bail */
186 " mov %[val], %[WR_LOCKED] \n"
187 " scond %[val], [%[rwlock]] \n"
188 " bnz 1b \n" /* retry if collided with someone */
189 " mov %[got_it], 1 \n"
190 " \n"
191 "4: ; --- done --- \n"
192
193 : [val] "=&r" (val),
194 [got_it] "+&r" (got_it)
195 : [rwlock] "r" (&(rw->counter)),
196 [UNLOCKED] "ir" (__ARCH_RW_LOCK_UNLOCKED__),
197 [WR_LOCKED] "ir" (0)
198 : "memory", "cc");
199
200 smp_mb();
201
202 return got_it;
203}
204
205static inline void arch_read_unlock(arch_rwlock_t *rw)
206{
207 unsigned int val;
208
209 smp_mb();
210
211 /*
212 * rw->counter++;
213 */
214 __asm__ __volatile__(
215 "1: llock %[val], [%[rwlock]] \n"
216 " add %[val], %[val], 1 \n"
217 " scond %[val], [%[rwlock]] \n"
218 " bnz 1b \n"
219 " \n"
220 : [val] "=&r" (val)
221 : [rwlock] "r" (&(rw->counter))
222 : "memory", "cc");
223
224 smp_mb();
225}
226
227static inline void arch_write_unlock(arch_rwlock_t *rw)
228{
229 smp_mb();
230
231 rw->counter = __ARCH_RW_LOCK_UNLOCKED__;
232
233 smp_mb();
234}
235
ae7eae9e
VG
236#else /* !CONFIG_ARC_HAS_LLSC */
237
6e35fa2d
VG
238static inline void arch_spin_lock(arch_spinlock_t *lock)
239{
ae7eae9e 240 unsigned int val = __ARCH_SPIN_LOCK_LOCKED__;
6e35fa2d 241
2576c28e
VG
242 /*
243 * This smp_mb() is technically superfluous, we only need the one
244 * after the lock for providing the ACQUIRE semantics.
245 * However doing the "right" thing was regressing hackbench
246 * so keeping this, pending further investigation
247 */
248 smp_mb();
249
6e35fa2d
VG
250 __asm__ __volatile__(
251 "1: ex %0, [%1] \n"
252 " breq %0, %2, 1b \n"
ae7eae9e 253 : "+&r" (val)
6e35fa2d
VG
254 : "r"(&(lock->slock)), "ir"(__ARCH_SPIN_LOCK_LOCKED__)
255 : "memory");
2576c28e
VG
256
257 /*
258 * ACQUIRE barrier to ensure load/store after taking the lock
259 * don't "bleed-up" out of the critical section (leak-in is allowed)
260 * http://www.spinics.net/lists/kernel/msg2010409.html
261 *
262 * ARCv2 only has load-load, store-store and all-all barrier
263 * thus need the full all-all barrier
264 */
265 smp_mb();
6e35fa2d
VG
266}
267
ae7eae9e 268/* 1 - lock taken successfully */
6e35fa2d
VG
269static inline int arch_spin_trylock(arch_spinlock_t *lock)
270{
ae7eae9e 271 unsigned int val = __ARCH_SPIN_LOCK_LOCKED__;
6e35fa2d 272
2576c28e
VG
273 smp_mb();
274
6e35fa2d
VG
275 __asm__ __volatile__(
276 "1: ex %0, [%1] \n"
ae7eae9e 277 : "+r" (val)
6e35fa2d
VG
278 : "r"(&(lock->slock))
279 : "memory");
280
2576c28e
VG
281 smp_mb();
282
ae7eae9e 283 return (val == __ARCH_SPIN_LOCK_UNLOCKED__);
6e35fa2d
VG
284}
285
286static inline void arch_spin_unlock(arch_spinlock_t *lock)
287{
ae7eae9e 288 unsigned int val = __ARCH_SPIN_LOCK_UNLOCKED__;
6c00350b 289
2576c28e
VG
290 /*
291 * RELEASE barrier: given the instructions avail on ARCv2, full barrier
292 * is the only option
293 */
294 smp_mb();
295
6c00350b
VG
296 __asm__ __volatile__(
297 " ex %0, [%1] \n"
ae7eae9e 298 : "+r" (val)
6c00350b
VG
299 : "r"(&(lock->slock))
300 : "memory");
301
2576c28e
VG
302 /*
303 * superfluous, but keeping for now - see pairing version in
304 * arch_spin_lock above
305 */
6e35fa2d
VG
306 smp_mb();
307}
308
309/*
310 * Read-write spinlocks, allowing multiple readers but only one writer.
69cbe630 311 * Unfair locking as Writers could be starved indefinitely by Reader(s)
6e35fa2d
VG
312 *
313 * The spinlock itself is contained in @counter and access to it is
314 * serialized with @lock_mutex.
6e35fa2d
VG
315 */
316
6e35fa2d
VG
317/* 1 - lock taken successfully */
318static inline int arch_read_trylock(arch_rwlock_t *rw)
319{
320 int ret = 0;
2a1021fc 321 unsigned long flags;
6e35fa2d 322
2a1021fc 323 local_irq_save(flags);
6e35fa2d
VG
324 arch_spin_lock(&(rw->lock_mutex));
325
326 /*
327 * zero means writer holds the lock exclusively, deny Reader.
328 * Otherwise grant lock to first/subseq reader
329 */
330 if (rw->counter > 0) {
331 rw->counter--;
332 ret = 1;
333 }
334
335 arch_spin_unlock(&(rw->lock_mutex));
2a1021fc 336 local_irq_restore(flags);
6e35fa2d
VG
337
338 smp_mb();
339 return ret;
340}
341
342/* 1 - lock taken successfully */
343static inline int arch_write_trylock(arch_rwlock_t *rw)
344{
345 int ret = 0;
2a1021fc 346 unsigned long flags;
6e35fa2d 347
2a1021fc 348 local_irq_save(flags);
6e35fa2d
VG
349 arch_spin_lock(&(rw->lock_mutex));
350
351 /*
352 * If reader(s) hold lock (lock < __ARCH_RW_LOCK_UNLOCKED__),
353 * deny writer. Otherwise if unlocked grant to writer
354 * Hence the claim that Linux rwlocks are unfair to writers.
355 * (can be starved for an indefinite time by readers).
356 */
357 if (rw->counter == __ARCH_RW_LOCK_UNLOCKED__) {
358 rw->counter = 0;
359 ret = 1;
360 }
361 arch_spin_unlock(&(rw->lock_mutex));
2a1021fc 362 local_irq_restore(flags);
6e35fa2d
VG
363
364 return ret;
365}
366
367static inline void arch_read_lock(arch_rwlock_t *rw)
368{
369 while (!arch_read_trylock(rw))
370 cpu_relax();
371}
372
373static inline void arch_write_lock(arch_rwlock_t *rw)
374{
375 while (!arch_write_trylock(rw))
376 cpu_relax();
377}
378
379static inline void arch_read_unlock(arch_rwlock_t *rw)
380{
2a1021fc
NC
381 unsigned long flags;
382
383 local_irq_save(flags);
6e35fa2d
VG
384 arch_spin_lock(&(rw->lock_mutex));
385 rw->counter++;
386 arch_spin_unlock(&(rw->lock_mutex));
2a1021fc 387 local_irq_restore(flags);
6e35fa2d
VG
388}
389
390static inline void arch_write_unlock(arch_rwlock_t *rw)
391{
2a1021fc
NC
392 unsigned long flags;
393
394 local_irq_save(flags);
6e35fa2d
VG
395 arch_spin_lock(&(rw->lock_mutex));
396 rw->counter = __ARCH_RW_LOCK_UNLOCKED__;
397 arch_spin_unlock(&(rw->lock_mutex));
2a1021fc 398 local_irq_restore(flags);
6e35fa2d
VG
399}
400
69cbe630
VG
401#endif
402
403#define arch_read_can_lock(x) ((x)->counter > 0)
404#define arch_write_can_lock(x) ((x)->counter == __ARCH_RW_LOCK_UNLOCKED__)
405
6e35fa2d
VG
406#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
407#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
408
409#define arch_spin_relax(lock) cpu_relax()
410#define arch_read_relax(lock) cpu_relax()
411#define arch_write_relax(lock) cpu_relax()
412
413#endif /* __ASM_SPINLOCK_H */