2 * Copyright (C) 2012 ARM Ltd.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
16 #ifndef __ASM_SPINLOCK_H
17 #define __ASM_SPINLOCK_H
20 #include <asm/spinlock_types.h>
21 #include <asm/processor.h>
24 * Spinlock implementation.
26 * The memory barriers are implicit with the load-acquire and store-release
29 static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
32 arch_spinlock_t lockval;
36 * Ensure prior spin_lock operations to other locks have completed
37 * on this CPU before we test whether "lock" is locked.
40 owner = READ_ONCE(lock->owner) << 16;
46 /* Is the lock free? */
47 " eor %w1, %w0, %w0, ror #16\n"
49 /* Lock taken -- has there been a subsequent unlock->lock transition? */
50 " eor %w1, %w3, %w0, lsl #16\n"
53 * The owner has been updated, so there was an unlock->lock
54 * transition that we missed. That means we can rely on the
55 * store-release of the unlock operation paired with the
56 * load-acquire of the lock operation to publish any of our
57 * previous stores to the new lock owner and therefore don't
58 * need to bother with the writeback below.
63 * Serialise against any concurrent lockers by writing back the
66 ARM64_LSE_ATOMIC_INSN(
68 " stxr %w1, %w0, %2\n"
73 " eor %w1, %w1, %w0\n")
74 /* Somebody else wrote to the lock, GOTO 10 and reload the value */
77 : "=&r" (lockval), "=&r" (tmp), "+Q" (*lock)
82 #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
84 static inline void arch_spin_lock(arch_spinlock_t *lock)
87 arch_spinlock_t lockval, newval;
90 /* Atomically increment the next ticket. */
91 ARM64_LSE_ATOMIC_INSN(
93 " prfm pstl1strm, %3\n"
95 " add %w1, %w0, %w5\n"
96 " stxr %w2, %w1, %3\n"
100 " ldadda %w2, %w0, %3\n"
104 /* Did we get the lock? */
105 " eor %w1, %w0, %w0, ror #16\n"
108 * No: spin on the owner. Send a local event to avoid missing an
109 * unlock before the exclusive load.
114 " eor %w1, %w2, %w0, lsr #16\n"
116 /* We got the lock. Critical section starts here. */
118 : "=&r" (lockval), "=&r" (newval), "=&r" (tmp), "+Q" (*lock)
119 : "Q" (lock->owner), "I" (1 << TICKET_SHIFT)
123 static inline int arch_spin_trylock(arch_spinlock_t *lock)
126 arch_spinlock_t lockval;
128 asm volatile(ARM64_LSE_ATOMIC_INSN(
130 " prfm pstl1strm, %2\n"
132 " eor %w1, %w0, %w0, ror #16\n"
134 " add %w0, %w0, %3\n"
135 " stxr %w1, %w0, %2\n"
140 " eor %w1, %w0, %w0, ror #16\n"
142 " add %w1, %w0, %3\n"
143 " casa %w0, %w1, %2\n"
144 " and %w1, %w1, #0xffff\n"
145 " eor %w1, %w1, %w0, lsr #16\n"
147 : "=&r" (lockval), "=&r" (tmp), "+Q" (*lock)
148 : "I" (1 << TICKET_SHIFT)
154 static inline void arch_spin_unlock(arch_spinlock_t *lock)
158 asm volatile(ARM64_LSE_ATOMIC_INSN(
161 " add %w1, %w1, #1\n"
167 : "=Q" (lock->owner), "=&r" (tmp)
172 static inline int arch_spin_value_unlocked(arch_spinlock_t lock)
174 return lock.owner == lock.next;
177 static inline int arch_spin_is_locked(arch_spinlock_t *lock)
179 smp_mb(); /* See arch_spin_unlock_wait */
180 return !arch_spin_value_unlocked(READ_ONCE(*lock));
183 static inline int arch_spin_is_contended(arch_spinlock_t *lock)
185 arch_spinlock_t lockval = READ_ONCE(*lock);
186 return (lockval.next - lockval.owner) > 1;
188 #define arch_spin_is_contended arch_spin_is_contended
191 * Write lock implementation.
193 * Write locks set bit 31. Unlocking, is done by writing 0 since the lock is
196 * The memory barriers are implicit with the load-acquire and store-release
200 static inline void arch_write_lock(arch_rwlock_t *rw)
204 asm volatile(ARM64_LSE_ATOMIC_INSN(
210 " stxr %w0, %w2, %1\n"
215 "2: casa %w0, %w2, %1\n"
222 : "=&r" (tmp), "+Q" (rw->lock)
227 static inline int arch_write_trylock(arch_rwlock_t *rw)
231 asm volatile(ARM64_LSE_ATOMIC_INSN(
235 " stxr %w0, %w2, %1\n"
240 " casa %w0, %w2, %1\n"
242 : "=&r" (tmp), "+Q" (rw->lock)
249 static inline void arch_write_unlock(arch_rwlock_t *rw)
251 asm volatile(ARM64_LSE_ATOMIC_INSN(
253 " swpl wzr, wzr, %0")
254 : "=Q" (rw->lock) :: "memory");
257 /* write_can_lock - would write_trylock() succeed? */
258 #define arch_write_can_lock(x) ((x)->lock == 0)
261 * Read lock implementation.
263 * It exclusively loads the lock value, increments it and stores the new value
264 * back if positive and the CPU still exclusively owns the location. If the
265 * value is negative, the lock is already held.
267 * During unlocking there may be multiple active read locks but no write lock.
269 * The memory barriers are implicit with the load-acquire and store-release
272 * Note that in UNDEFINED cases, such as unlocking a lock twice, the LL/SC
273 * and LSE implementations may exhibit different behaviour (although this
274 * will have no effect on lockdep).
276 static inline void arch_read_lock(arch_rwlock_t *rw)
278 unsigned int tmp, tmp2;
282 ARM64_LSE_ATOMIC_INSN(
286 " add %w0, %w0, #1\n"
287 " tbnz %w0, #31, 1b\n"
288 " stxr %w1, %w0, %2\n"
294 " adds %w1, %w0, #1\n"
295 " tbnz %w1, #31, 1b\n"
296 " casa %w0, %w1, %2\n"
297 " sbc %w0, %w1, %w0\n"
299 : "=&r" (tmp), "=&r" (tmp2), "+Q" (rw->lock)
304 static inline void arch_read_unlock(arch_rwlock_t *rw)
306 unsigned int tmp, tmp2;
308 asm volatile(ARM64_LSE_ATOMIC_INSN(
311 " sub %w0, %w0, #1\n"
312 " stlxr %w1, %w0, %2\n"
318 : "=&r" (tmp), "=&r" (tmp2), "+Q" (rw->lock)
323 static inline int arch_read_trylock(arch_rwlock_t *rw)
325 unsigned int tmp, tmp2;
327 asm volatile(ARM64_LSE_ATOMIC_INSN(
331 " add %w0, %w0, #1\n"
332 " tbnz %w0, #31, 2f\n"
333 " stxr %w1, %w0, %2\n"
338 " adds %w1, %w0, #1\n"
339 " tbnz %w1, #31, 1f\n"
340 " casa %w0, %w1, %2\n"
341 " sbc %w1, %w1, %w0\n"
344 : "=&r" (tmp), "=&r" (tmp2), "+Q" (rw->lock)
351 /* read_can_lock - would read_trylock() succeed? */
352 #define arch_read_can_lock(x) ((x)->lock < 0x80000000)
354 #define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
355 #define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
357 #define arch_spin_relax(lock) cpu_relax()
358 #define arch_read_relax(lock) cpu_relax()
359 #define arch_write_relax(lock) cpu_relax()
362 * Accesses appearing in program order before a spin_lock() operation
363 * can be reordered with accesses inside the critical section, by virtue
364 * of arch_spin_lock being constructed using acquire semantics.
366 * In cases where this is problematic (e.g. try_to_wake_up), an
367 * smp_mb__before_spinlock() can restore the required ordering.
369 #define smp_mb__before_spinlock() smp_mb()
371 #endif /* __ASM_SPINLOCK_H */