2 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
9 #ifndef __ASM_SPINLOCK_H
10 #define __ASM_SPINLOCK_H
12 #include <asm/spinlock_types.h>
13 #include <asm/processor.h>
14 #include <asm/barrier.h>
16 #define arch_spin_is_locked(x) ((x)->slock != __ARCH_SPIN_LOCK_UNLOCKED__)
17 #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
19 static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
21 smp_cond_load_acquire(&lock->slock, !VAL);
24 #ifdef CONFIG_ARC_HAS_LLSC
26 static inline void arch_spin_lock(arch_spinlock_t *lock)
33 "1: llock %[val], [%[slock]] \n"
34 " breq %[val], %[LOCKED], 1b \n" /* spin while LOCKED */
35 " scond %[LOCKED], [%[slock]] \n" /* acquire */
39 : [slock] "r" (&(lock->slock)),
40 [LOCKED] "r" (__ARCH_SPIN_LOCK_LOCKED__)
46 /* 1 - lock taken successfully */
47 static inline int arch_spin_trylock(arch_spinlock_t *lock)
49 unsigned int val, got_it = 0;
54 "1: llock %[val], [%[slock]] \n"
55 " breq %[val], %[LOCKED], 4f \n" /* already LOCKED, just bail */
56 " scond %[LOCKED], [%[slock]] \n" /* acquire */
58 " mov %[got_it], 1 \n"
62 [got_it] "+&r" (got_it)
63 : [slock] "r" (&(lock->slock)),
64 [LOCKED] "r" (__ARCH_SPIN_LOCK_LOCKED__)
72 static inline void arch_spin_unlock(arch_spinlock_t *lock)
76 lock->slock = __ARCH_SPIN_LOCK_UNLOCKED__;
82 * Read-write spinlocks, allowing multiple readers but only one writer.
83 * Unfair locking as Writers could be starved indefinitely by Reader(s)
86 static inline void arch_read_lock(arch_rwlock_t *rw)
93 * zero means writer holds the lock exclusively, deny Reader.
94 * Otherwise grant lock to first/subseq reader
96 * if (rw->counter > 0) {
102 __asm__ __volatile__(
103 "1: llock %[val], [%[rwlock]] \n"
104 " brls %[val], %[WR_LOCKED], 1b\n" /* <= 0: spin while write locked */
105 " sub %[val], %[val], 1 \n" /* reader lock */
106 " scond %[val], [%[rwlock]] \n"
110 : [rwlock] "r" (&(rw->counter)),
117 /* 1 - lock taken successfully */
118 static inline int arch_read_trylock(arch_rwlock_t *rw)
120 unsigned int val, got_it = 0;
124 __asm__ __volatile__(
125 "1: llock %[val], [%[rwlock]] \n"
126 " brls %[val], %[WR_LOCKED], 4f\n" /* <= 0: already write locked, bail */
127 " sub %[val], %[val], 1 \n" /* counter-- */
128 " scond %[val], [%[rwlock]] \n"
129 " bnz 1b \n" /* retry if collided with someone */
130 " mov %[got_it], 1 \n"
132 "4: ; --- done --- \n"
135 [got_it] "+&r" (got_it)
136 : [rwlock] "r" (&(rw->counter)),
145 static inline void arch_write_lock(arch_rwlock_t *rw)
152 * If reader(s) hold lock (lock < __ARCH_RW_LOCK_UNLOCKED__),
153 * deny writer. Otherwise if unlocked grant to writer
154 * Hence the claim that Linux rwlocks are unfair to writers.
155 * (can be starved for an indefinite time by readers).
157 * if (rw->counter == __ARCH_RW_LOCK_UNLOCKED__) {
163 __asm__ __volatile__(
164 "1: llock %[val], [%[rwlock]] \n"
165 " brne %[val], %[UNLOCKED], 1b \n" /* while !UNLOCKED spin */
166 " mov %[val], %[WR_LOCKED] \n"
167 " scond %[val], [%[rwlock]] \n"
171 : [rwlock] "r" (&(rw->counter)),
172 [UNLOCKED] "ir" (__ARCH_RW_LOCK_UNLOCKED__),
179 /* 1 - lock taken successfully */
180 static inline int arch_write_trylock(arch_rwlock_t *rw)
182 unsigned int val, got_it = 0;
186 __asm__ __volatile__(
187 "1: llock %[val], [%[rwlock]] \n"
188 " brne %[val], %[UNLOCKED], 4f \n" /* !UNLOCKED, bail */
189 " mov %[val], %[WR_LOCKED] \n"
190 " scond %[val], [%[rwlock]] \n"
191 " bnz 1b \n" /* retry if collided with someone */
192 " mov %[got_it], 1 \n"
194 "4: ; --- done --- \n"
197 [got_it] "+&r" (got_it)
198 : [rwlock] "r" (&(rw->counter)),
199 [UNLOCKED] "ir" (__ARCH_RW_LOCK_UNLOCKED__),
208 static inline void arch_read_unlock(arch_rwlock_t *rw)
217 __asm__ __volatile__(
218 "1: llock %[val], [%[rwlock]] \n"
219 " add %[val], %[val], 1 \n"
220 " scond %[val], [%[rwlock]] \n"
224 : [rwlock] "r" (&(rw->counter))
230 static inline void arch_write_unlock(arch_rwlock_t *rw)
234 rw->counter = __ARCH_RW_LOCK_UNLOCKED__;
239 #else /* !CONFIG_ARC_HAS_LLSC */
241 static inline void arch_spin_lock(arch_spinlock_t *lock)
243 unsigned int val = __ARCH_SPIN_LOCK_LOCKED__;
246 * This smp_mb() is technically superfluous, we only need the one
247 * after the lock for providing the ACQUIRE semantics.
248 * However doing the "right" thing was regressing hackbench
249 * so keeping this, pending further investigation
253 __asm__ __volatile__(
255 #ifdef CONFIG_EZNPS_MTM_EXT
258 " breq %0, %2, 1b \n"
260 : "r"(&(lock->slock)), "ir"(__ARCH_SPIN_LOCK_LOCKED__)
261 #ifdef CONFIG_EZNPS_MTM_EXT
262 , "i"(CTOP_INST_SCHD_RW)
267 * ACQUIRE barrier to ensure load/store after taking the lock
268 * don't "bleed-up" out of the critical section (leak-in is allowed)
269 * http://www.spinics.net/lists/kernel/msg2010409.html
271 * ARCv2 only has load-load, store-store and all-all barrier
272 * thus need the full all-all barrier
277 /* 1 - lock taken successfully */
278 static inline int arch_spin_trylock(arch_spinlock_t *lock)
280 unsigned int val = __ARCH_SPIN_LOCK_LOCKED__;
284 __asm__ __volatile__(
287 : "r"(&(lock->slock))
292 return (val == __ARCH_SPIN_LOCK_UNLOCKED__);
295 static inline void arch_spin_unlock(arch_spinlock_t *lock)
297 unsigned int val = __ARCH_SPIN_LOCK_UNLOCKED__;
300 * RELEASE barrier: given the instructions avail on ARCv2, full barrier
306 * EX is not really required here, a simple STore of 0 suffices.
307 * However this causes tasklist livelocks in SystemC based SMP virtual
308 * platforms where the systemc core scheduler uses EX as a cue for
309 * moving to next core. Do a git log of this file for details
311 __asm__ __volatile__(
314 : "r"(&(lock->slock))
318 * superfluous, but keeping for now - see pairing version in
319 * arch_spin_lock above
325 * Read-write spinlocks, allowing multiple readers but only one writer.
326 * Unfair locking as Writers could be starved indefinitely by Reader(s)
328 * The spinlock itself is contained in @counter and access to it is
329 * serialized with @lock_mutex.
332 /* 1 - lock taken successfully */
333 static inline int arch_read_trylock(arch_rwlock_t *rw)
338 local_irq_save(flags);
339 arch_spin_lock(&(rw->lock_mutex));
342 * zero means writer holds the lock exclusively, deny Reader.
343 * Otherwise grant lock to first/subseq reader
345 if (rw->counter > 0) {
350 arch_spin_unlock(&(rw->lock_mutex));
351 local_irq_restore(flags);
357 /* 1 - lock taken successfully */
358 static inline int arch_write_trylock(arch_rwlock_t *rw)
363 local_irq_save(flags);
364 arch_spin_lock(&(rw->lock_mutex));
367 * If reader(s) hold lock (lock < __ARCH_RW_LOCK_UNLOCKED__),
368 * deny writer. Otherwise if unlocked grant to writer
369 * Hence the claim that Linux rwlocks are unfair to writers.
370 * (can be starved for an indefinite time by readers).
372 if (rw->counter == __ARCH_RW_LOCK_UNLOCKED__) {
376 arch_spin_unlock(&(rw->lock_mutex));
377 local_irq_restore(flags);
382 static inline void arch_read_lock(arch_rwlock_t *rw)
384 while (!arch_read_trylock(rw))
388 static inline void arch_write_lock(arch_rwlock_t *rw)
390 while (!arch_write_trylock(rw))
394 static inline void arch_read_unlock(arch_rwlock_t *rw)
398 local_irq_save(flags);
399 arch_spin_lock(&(rw->lock_mutex));
401 arch_spin_unlock(&(rw->lock_mutex));
402 local_irq_restore(flags);
405 static inline void arch_write_unlock(arch_rwlock_t *rw)
409 local_irq_save(flags);
410 arch_spin_lock(&(rw->lock_mutex));
411 rw->counter = __ARCH_RW_LOCK_UNLOCKED__;
412 arch_spin_unlock(&(rw->lock_mutex));
413 local_irq_restore(flags);
418 #define arch_read_can_lock(x) ((x)->counter > 0)
419 #define arch_write_can_lock(x) ((x)->counter == __ARCH_RW_LOCK_UNLOCKED__)
421 #define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
422 #define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
424 #define arch_spin_relax(lock) cpu_relax()
425 #define arch_read_relax(lock) cpu_relax()
426 #define arch_write_relax(lock) cpu_relax()
428 #endif /* __ASM_SPINLOCK_H */