ARC: LLOCK/SCOND based spin_lock
[linux-2.6-block.git] / arch / arc / include / asm / spinlock.h
CommitLineData
6e35fa2d
VG
1/*
2 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9#ifndef __ASM_SPINLOCK_H
10#define __ASM_SPINLOCK_H
11
12#include <asm/spinlock_types.h>
13#include <asm/processor.h>
14#include <asm/barrier.h>
15
16#define arch_spin_is_locked(x) ((x)->slock != __ARCH_SPIN_LOCK_UNLOCKED__)
17#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
18#define arch_spin_unlock_wait(x) \
19 do { while (arch_spin_is_locked(x)) cpu_relax(); } while (0)
20
ae7eae9e
VG
21#ifdef CONFIG_ARC_HAS_LLSC
22
23static inline void arch_spin_lock(arch_spinlock_t *lock)
24{
25 unsigned int val;
26
27 smp_mb();
28
29 __asm__ __volatile__(
30 "1: llock %[val], [%[slock]] \n"
31 " breq %[val], %[LOCKED], 1b \n" /* spin while LOCKED */
32 " scond %[LOCKED], [%[slock]] \n" /* acquire */
33 " bnz 1b \n"
34 " \n"
35 : [val] "=&r" (val)
36 : [slock] "r" (&(lock->slock)),
37 [LOCKED] "r" (__ARCH_SPIN_LOCK_LOCKED__)
38 : "memory", "cc");
39
40 smp_mb();
41}
42
43/* 1 - lock taken successfully */
44static inline int arch_spin_trylock(arch_spinlock_t *lock)
45{
46 unsigned int val, got_it = 0;
47
48 smp_mb();
49
50 __asm__ __volatile__(
51 "1: llock %[val], [%[slock]] \n"
52 " breq %[val], %[LOCKED], 4f \n" /* already LOCKED, just bail */
53 " scond %[LOCKED], [%[slock]] \n" /* acquire */
54 " bnz 1b \n"
55 " mov %[got_it], 1 \n"
56 "4: \n"
57 " \n"
58 : [val] "=&r" (val),
59 [got_it] "+&r" (got_it)
60 : [slock] "r" (&(lock->slock)),
61 [LOCKED] "r" (__ARCH_SPIN_LOCK_LOCKED__)
62 : "memory", "cc");
63
64 smp_mb();
65
66 return got_it;
67}
68
69static inline void arch_spin_unlock(arch_spinlock_t *lock)
70{
71 smp_mb();
72
73 lock->slock = __ARCH_SPIN_LOCK_UNLOCKED__;
74
75 smp_mb();
76}
77
78#else /* !CONFIG_ARC_HAS_LLSC */
79
6e35fa2d
VG
80static inline void arch_spin_lock(arch_spinlock_t *lock)
81{
ae7eae9e 82 unsigned int val = __ARCH_SPIN_LOCK_LOCKED__;
6e35fa2d 83
2576c28e
VG
84 /*
85 * This smp_mb() is technically superfluous, we only need the one
86 * after the lock for providing the ACQUIRE semantics.
87 * However doing the "right" thing was regressing hackbench
88 * so keeping this, pending further investigation
89 */
90 smp_mb();
91
6e35fa2d
VG
92 __asm__ __volatile__(
93 "1: ex %0, [%1] \n"
94 " breq %0, %2, 1b \n"
ae7eae9e 95 : "+&r" (val)
6e35fa2d
VG
96 : "r"(&(lock->slock)), "ir"(__ARCH_SPIN_LOCK_LOCKED__)
97 : "memory");
2576c28e
VG
98
99 /*
100 * ACQUIRE barrier to ensure load/store after taking the lock
101 * don't "bleed-up" out of the critical section (leak-in is allowed)
102 * http://www.spinics.net/lists/kernel/msg2010409.html
103 *
104 * ARCv2 only has load-load, store-store and all-all barrier
105 * thus need the full all-all barrier
106 */
107 smp_mb();
6e35fa2d
VG
108}
109
ae7eae9e 110/* 1 - lock taken successfully */
6e35fa2d
VG
111static inline int arch_spin_trylock(arch_spinlock_t *lock)
112{
ae7eae9e 113 unsigned int val = __ARCH_SPIN_LOCK_LOCKED__;
6e35fa2d 114
2576c28e
VG
115 smp_mb();
116
6e35fa2d
VG
117 __asm__ __volatile__(
118 "1: ex %0, [%1] \n"
ae7eae9e 119 : "+r" (val)
6e35fa2d
VG
120 : "r"(&(lock->slock))
121 : "memory");
122
2576c28e
VG
123 smp_mb();
124
ae7eae9e 125 return (val == __ARCH_SPIN_LOCK_UNLOCKED__);
6e35fa2d
VG
126}
127
128static inline void arch_spin_unlock(arch_spinlock_t *lock)
129{
ae7eae9e 130 unsigned int val = __ARCH_SPIN_LOCK_UNLOCKED__;
6c00350b 131
2576c28e
VG
132 /*
133 * RELEASE barrier: given the instructions avail on ARCv2, full barrier
134 * is the only option
135 */
136 smp_mb();
137
6c00350b
VG
138 __asm__ __volatile__(
139 " ex %0, [%1] \n"
ae7eae9e 140 : "+r" (val)
6c00350b
VG
141 : "r"(&(lock->slock))
142 : "memory");
143
2576c28e
VG
144 /*
145 * superfluous, but keeping for now - see pairing version in
146 * arch_spin_lock above
147 */
6e35fa2d
VG
148 smp_mb();
149}
150
ae7eae9e
VG
151#endif
152
6e35fa2d
VG
153/*
154 * Read-write spinlocks, allowing multiple readers but only one writer.
155 *
156 * The spinlock itself is contained in @counter and access to it is
157 * serialized with @lock_mutex.
158 *
159 * Unfair locking as Writers could be starved indefinitely by Reader(s)
160 */
161
162/* Would read_trylock() succeed? */
163#define arch_read_can_lock(x) ((x)->counter > 0)
164
165/* Would write_trylock() succeed? */
166#define arch_write_can_lock(x) ((x)->counter == __ARCH_RW_LOCK_UNLOCKED__)
167
168/* 1 - lock taken successfully */
169static inline int arch_read_trylock(arch_rwlock_t *rw)
170{
171 int ret = 0;
172
173 arch_spin_lock(&(rw->lock_mutex));
174
175 /*
176 * zero means writer holds the lock exclusively, deny Reader.
177 * Otherwise grant lock to first/subseq reader
178 */
179 if (rw->counter > 0) {
180 rw->counter--;
181 ret = 1;
182 }
183
184 arch_spin_unlock(&(rw->lock_mutex));
185
186 smp_mb();
187 return ret;
188}
189
190/* 1 - lock taken successfully */
191static inline int arch_write_trylock(arch_rwlock_t *rw)
192{
193 int ret = 0;
194
195 arch_spin_lock(&(rw->lock_mutex));
196
197 /*
198 * If reader(s) hold lock (lock < __ARCH_RW_LOCK_UNLOCKED__),
199 * deny writer. Otherwise if unlocked grant to writer
200 * Hence the claim that Linux rwlocks are unfair to writers.
201 * (can be starved for an indefinite time by readers).
202 */
203 if (rw->counter == __ARCH_RW_LOCK_UNLOCKED__) {
204 rw->counter = 0;
205 ret = 1;
206 }
207 arch_spin_unlock(&(rw->lock_mutex));
208
209 return ret;
210}
211
212static inline void arch_read_lock(arch_rwlock_t *rw)
213{
214 while (!arch_read_trylock(rw))
215 cpu_relax();
216}
217
218static inline void arch_write_lock(arch_rwlock_t *rw)
219{
220 while (!arch_write_trylock(rw))
221 cpu_relax();
222}
223
224static inline void arch_read_unlock(arch_rwlock_t *rw)
225{
226 arch_spin_lock(&(rw->lock_mutex));
227 rw->counter++;
228 arch_spin_unlock(&(rw->lock_mutex));
229}
230
231static inline void arch_write_unlock(arch_rwlock_t *rw)
232{
233 arch_spin_lock(&(rw->lock_mutex));
234 rw->counter = __ARCH_RW_LOCK_UNLOCKED__;
235 arch_spin_unlock(&(rw->lock_mutex));
236}
237
238#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
239#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
240
241#define arch_spin_relax(lock) cpu_relax()
242#define arch_read_relax(lock) cpu_relax()
243#define arch_write_relax(lock) cpu_relax()
244
245#endif /* __ASM_SPINLOCK_H */