ARC: spinlock: Document the EX based spin_unlock
[linux-2.6-block.git] / arch / arc / include / asm / spinlock.h
CommitLineData
6e35fa2d
VG
1/*
2 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9#ifndef __ASM_SPINLOCK_H
10#define __ASM_SPINLOCK_H
11
12#include <asm/spinlock_types.h>
13#include <asm/processor.h>
14#include <asm/barrier.h>
15
16#define arch_spin_is_locked(x) ((x)->slock != __ARCH_SPIN_LOCK_UNLOCKED__)
17#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
726328d9
PZ
18
19static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
20{
21 smp_cond_load_acquire(&lock->slock, !VAL);
22}
6e35fa2d 23
ae7eae9e
VG
24#ifdef CONFIG_ARC_HAS_LLSC
25
26static inline void arch_spin_lock(arch_spinlock_t *lock)
27{
28 unsigned int val;
29
30 smp_mb();
31
32 __asm__ __volatile__(
33 "1: llock %[val], [%[slock]] \n"
34 " breq %[val], %[LOCKED], 1b \n" /* spin while LOCKED */
35 " scond %[LOCKED], [%[slock]] \n" /* acquire */
36 " bnz 1b \n"
37 " \n"
38 : [val] "=&r" (val)
39 : [slock] "r" (&(lock->slock)),
40 [LOCKED] "r" (__ARCH_SPIN_LOCK_LOCKED__)
41 : "memory", "cc");
42
43 smp_mb();
44}
45
46/* 1 - lock taken successfully */
47static inline int arch_spin_trylock(arch_spinlock_t *lock)
48{
49 unsigned int val, got_it = 0;
50
51 smp_mb();
52
53 __asm__ __volatile__(
54 "1: llock %[val], [%[slock]] \n"
55 " breq %[val], %[LOCKED], 4f \n" /* already LOCKED, just bail */
56 " scond %[LOCKED], [%[slock]] \n" /* acquire */
57 " bnz 1b \n"
58 " mov %[got_it], 1 \n"
59 "4: \n"
60 " \n"
61 : [val] "=&r" (val),
62 [got_it] "+&r" (got_it)
63 : [slock] "r" (&(lock->slock)),
64 [LOCKED] "r" (__ARCH_SPIN_LOCK_LOCKED__)
65 : "memory", "cc");
66
67 smp_mb();
68
69 return got_it;
70}
71
72static inline void arch_spin_unlock(arch_spinlock_t *lock)
73{
74 smp_mb();
75
76 lock->slock = __ARCH_SPIN_LOCK_UNLOCKED__;
77
78 smp_mb();
79}
80
69cbe630
VG
81/*
82 * Read-write spinlocks, allowing multiple readers but only one writer.
83 * Unfair locking as Writers could be starved indefinitely by Reader(s)
84 */
85
86static inline void arch_read_lock(arch_rwlock_t *rw)
87{
88 unsigned int val;
89
90 smp_mb();
91
92 /*
93 * zero means writer holds the lock exclusively, deny Reader.
94 * Otherwise grant lock to first/subseq reader
95 *
96 * if (rw->counter > 0) {
97 * rw->counter--;
98 * ret = 1;
99 * }
100 */
101
102 __asm__ __volatile__(
103 "1: llock %[val], [%[rwlock]] \n"
104 " brls %[val], %[WR_LOCKED], 1b\n" /* <= 0: spin while write locked */
105 " sub %[val], %[val], 1 \n" /* reader lock */
106 " scond %[val], [%[rwlock]] \n"
107 " bnz 1b \n"
108 " \n"
109 : [val] "=&r" (val)
110 : [rwlock] "r" (&(rw->counter)),
111 [WR_LOCKED] "ir" (0)
112 : "memory", "cc");
113
114 smp_mb();
115}
116
117/* 1 - lock taken successfully */
118static inline int arch_read_trylock(arch_rwlock_t *rw)
119{
120 unsigned int val, got_it = 0;
121
122 smp_mb();
123
124 __asm__ __volatile__(
125 "1: llock %[val], [%[rwlock]] \n"
126 " brls %[val], %[WR_LOCKED], 4f\n" /* <= 0: already write locked, bail */
127 " sub %[val], %[val], 1 \n" /* counter-- */
128 " scond %[val], [%[rwlock]] \n"
129 " bnz 1b \n" /* retry if collided with someone */
130 " mov %[got_it], 1 \n"
131 " \n"
132 "4: ; --- done --- \n"
133
134 : [val] "=&r" (val),
135 [got_it] "+&r" (got_it)
136 : [rwlock] "r" (&(rw->counter)),
137 [WR_LOCKED] "ir" (0)
138 : "memory", "cc");
139
140 smp_mb();
141
142 return got_it;
143}
144
145static inline void arch_write_lock(arch_rwlock_t *rw)
146{
147 unsigned int val;
148
149 smp_mb();
150
151 /*
152 * If reader(s) hold lock (lock < __ARCH_RW_LOCK_UNLOCKED__),
153 * deny writer. Otherwise if unlocked grant to writer
154 * Hence the claim that Linux rwlocks are unfair to writers.
155 * (can be starved for an indefinite time by readers).
156 *
157 * if (rw->counter == __ARCH_RW_LOCK_UNLOCKED__) {
158 * rw->counter = 0;
159 * ret = 1;
160 * }
161 */
162
163 __asm__ __volatile__(
164 "1: llock %[val], [%[rwlock]] \n"
165 " brne %[val], %[UNLOCKED], 1b \n" /* while !UNLOCKED spin */
166 " mov %[val], %[WR_LOCKED] \n"
167 " scond %[val], [%[rwlock]] \n"
168 " bnz 1b \n"
169 " \n"
170 : [val] "=&r" (val)
171 : [rwlock] "r" (&(rw->counter)),
172 [UNLOCKED] "ir" (__ARCH_RW_LOCK_UNLOCKED__),
173 [WR_LOCKED] "ir" (0)
174 : "memory", "cc");
175
176 smp_mb();
177}
178
179/* 1 - lock taken successfully */
180static inline int arch_write_trylock(arch_rwlock_t *rw)
181{
182 unsigned int val, got_it = 0;
183
184 smp_mb();
185
186 __asm__ __volatile__(
187 "1: llock %[val], [%[rwlock]] \n"
188 " brne %[val], %[UNLOCKED], 4f \n" /* !UNLOCKED, bail */
189 " mov %[val], %[WR_LOCKED] \n"
190 " scond %[val], [%[rwlock]] \n"
191 " bnz 1b \n" /* retry if collided with someone */
192 " mov %[got_it], 1 \n"
193 " \n"
194 "4: ; --- done --- \n"
195
196 : [val] "=&r" (val),
197 [got_it] "+&r" (got_it)
198 : [rwlock] "r" (&(rw->counter)),
199 [UNLOCKED] "ir" (__ARCH_RW_LOCK_UNLOCKED__),
200 [WR_LOCKED] "ir" (0)
201 : "memory", "cc");
202
203 smp_mb();
204
205 return got_it;
206}
207
208static inline void arch_read_unlock(arch_rwlock_t *rw)
209{
210 unsigned int val;
211
212 smp_mb();
213
214 /*
215 * rw->counter++;
216 */
217 __asm__ __volatile__(
218 "1: llock %[val], [%[rwlock]] \n"
219 " add %[val], %[val], 1 \n"
220 " scond %[val], [%[rwlock]] \n"
221 " bnz 1b \n"
222 " \n"
223 : [val] "=&r" (val)
224 : [rwlock] "r" (&(rw->counter))
225 : "memory", "cc");
226
227 smp_mb();
228}
229
230static inline void arch_write_unlock(arch_rwlock_t *rw)
231{
232 smp_mb();
233
234 rw->counter = __ARCH_RW_LOCK_UNLOCKED__;
235
236 smp_mb();
237}
238
ae7eae9e
VG
239#else /* !CONFIG_ARC_HAS_LLSC */
240
6e35fa2d
VG
241static inline void arch_spin_lock(arch_spinlock_t *lock)
242{
ae7eae9e 243 unsigned int val = __ARCH_SPIN_LOCK_LOCKED__;
6e35fa2d 244
2576c28e
VG
245 /*
246 * This smp_mb() is technically superfluous, we only need the one
247 * after the lock for providing the ACQUIRE semantics.
248 * However doing the "right" thing was regressing hackbench
249 * so keeping this, pending further investigation
250 */
251 smp_mb();
252
6e35fa2d
VG
253 __asm__ __volatile__(
254 "1: ex %0, [%1] \n"
255 " breq %0, %2, 1b \n"
ae7eae9e 256 : "+&r" (val)
6e35fa2d
VG
257 : "r"(&(lock->slock)), "ir"(__ARCH_SPIN_LOCK_LOCKED__)
258 : "memory");
2576c28e
VG
259
260 /*
261 * ACQUIRE barrier to ensure load/store after taking the lock
262 * don't "bleed-up" out of the critical section (leak-in is allowed)
263 * http://www.spinics.net/lists/kernel/msg2010409.html
264 *
265 * ARCv2 only has load-load, store-store and all-all barrier
266 * thus need the full all-all barrier
267 */
268 smp_mb();
6e35fa2d
VG
269}
270
ae7eae9e 271/* 1 - lock taken successfully */
6e35fa2d
VG
272static inline int arch_spin_trylock(arch_spinlock_t *lock)
273{
ae7eae9e 274 unsigned int val = __ARCH_SPIN_LOCK_LOCKED__;
6e35fa2d 275
2576c28e
VG
276 smp_mb();
277
6e35fa2d
VG
278 __asm__ __volatile__(
279 "1: ex %0, [%1] \n"
ae7eae9e 280 : "+r" (val)
6e35fa2d
VG
281 : "r"(&(lock->slock))
282 : "memory");
283
2576c28e
VG
284 smp_mb();
285
ae7eae9e 286 return (val == __ARCH_SPIN_LOCK_UNLOCKED__);
6e35fa2d
VG
287}
288
289static inline void arch_spin_unlock(arch_spinlock_t *lock)
290{
ae7eae9e 291 unsigned int val = __ARCH_SPIN_LOCK_UNLOCKED__;
6c00350b 292
2576c28e
VG
293 /*
294 * RELEASE barrier: given the instructions avail on ARCv2, full barrier
295 * is the only option
296 */
297 smp_mb();
298
c2bdac14
VG
299 /*
300 * EX is not really required here, a simple STore of 0 suffices.
301 * However this causes tasklist livelocks in SystemC based SMP virtual
302 * platforms where the systemc core scheduler uses EX as a cue for
303 * moving to next core. Do a git log of this file for details
304 */
6c00350b
VG
305 __asm__ __volatile__(
306 " ex %0, [%1] \n"
ae7eae9e 307 : "+r" (val)
6c00350b
VG
308 : "r"(&(lock->slock))
309 : "memory");
310
2576c28e
VG
311 /*
312 * superfluous, but keeping for now - see pairing version in
313 * arch_spin_lock above
314 */
6e35fa2d
VG
315 smp_mb();
316}
317
318/*
319 * Read-write spinlocks, allowing multiple readers but only one writer.
69cbe630 320 * Unfair locking as Writers could be starved indefinitely by Reader(s)
6e35fa2d
VG
321 *
322 * The spinlock itself is contained in @counter and access to it is
323 * serialized with @lock_mutex.
6e35fa2d
VG
324 */
325
6e35fa2d
VG
326/* 1 - lock taken successfully */
327static inline int arch_read_trylock(arch_rwlock_t *rw)
328{
329 int ret = 0;
2a1021fc 330 unsigned long flags;
6e35fa2d 331
2a1021fc 332 local_irq_save(flags);
6e35fa2d
VG
333 arch_spin_lock(&(rw->lock_mutex));
334
335 /*
336 * zero means writer holds the lock exclusively, deny Reader.
337 * Otherwise grant lock to first/subseq reader
338 */
339 if (rw->counter > 0) {
340 rw->counter--;
341 ret = 1;
342 }
343
344 arch_spin_unlock(&(rw->lock_mutex));
2a1021fc 345 local_irq_restore(flags);
6e35fa2d
VG
346
347 smp_mb();
348 return ret;
349}
350
351/* 1 - lock taken successfully */
352static inline int arch_write_trylock(arch_rwlock_t *rw)
353{
354 int ret = 0;
2a1021fc 355 unsigned long flags;
6e35fa2d 356
2a1021fc 357 local_irq_save(flags);
6e35fa2d
VG
358 arch_spin_lock(&(rw->lock_mutex));
359
360 /*
361 * If reader(s) hold lock (lock < __ARCH_RW_LOCK_UNLOCKED__),
362 * deny writer. Otherwise if unlocked grant to writer
363 * Hence the claim that Linux rwlocks are unfair to writers.
364 * (can be starved for an indefinite time by readers).
365 */
366 if (rw->counter == __ARCH_RW_LOCK_UNLOCKED__) {
367 rw->counter = 0;
368 ret = 1;
369 }
370 arch_spin_unlock(&(rw->lock_mutex));
2a1021fc 371 local_irq_restore(flags);
6e35fa2d
VG
372
373 return ret;
374}
375
376static inline void arch_read_lock(arch_rwlock_t *rw)
377{
378 while (!arch_read_trylock(rw))
379 cpu_relax();
380}
381
382static inline void arch_write_lock(arch_rwlock_t *rw)
383{
384 while (!arch_write_trylock(rw))
385 cpu_relax();
386}
387
388static inline void arch_read_unlock(arch_rwlock_t *rw)
389{
2a1021fc
NC
390 unsigned long flags;
391
392 local_irq_save(flags);
6e35fa2d
VG
393 arch_spin_lock(&(rw->lock_mutex));
394 rw->counter++;
395 arch_spin_unlock(&(rw->lock_mutex));
2a1021fc 396 local_irq_restore(flags);
6e35fa2d
VG
397}
398
399static inline void arch_write_unlock(arch_rwlock_t *rw)
400{
2a1021fc
NC
401 unsigned long flags;
402
403 local_irq_save(flags);
6e35fa2d
VG
404 arch_spin_lock(&(rw->lock_mutex));
405 rw->counter = __ARCH_RW_LOCK_UNLOCKED__;
406 arch_spin_unlock(&(rw->lock_mutex));
2a1021fc 407 local_irq_restore(flags);
6e35fa2d
VG
408}
409
69cbe630
VG
410#endif
411
412#define arch_read_can_lock(x) ((x)->counter > 0)
413#define arch_write_can_lock(x) ((x)->counter == __ARCH_RW_LOCK_UNLOCKED__)
414
6e35fa2d
VG
415#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
416#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
417
418#define arch_spin_relax(lock) cpu_relax()
419#define arch_read_relax(lock) cpu_relax()
420#define arch_write_relax(lock) cpu_relax()
421
422#endif /* __ASM_SPINLOCK_H */