1 #ifndef __ASM_SPINLOCK_H
2 #define __ASM_SPINLOCK_H
6 * Simple spin lock operations.
8 * Copyright (C) 2001-2004 Paul Mackerras <paulus@au.ibm.com>, IBM
9 * Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM
10 * Copyright (C) 2002 Dave Engebretsen <engebret@us.ibm.com>, IBM
11 * Rework to support virtual processors
13 * Type of int is used as a full 64b word is not necessary.
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version
18 * 2 of the License, or (at your option) any later version.
20 * (the type definitions are in asm/spinlock_types.h)
22 #include <linux/irqflags.h>
25 #include <asm/hvcall.h>
27 #include <asm/asm-compat.h>
28 #include <asm/synch.h>
29 #include <asm/ppc-opcode.h>
32 /* use 0x800000yy when locked, where yy == CPU number */
34 #define LOCK_TOKEN (*(u32 *)(&get_paca()->lock_token))
36 #define LOCK_TOKEN (*(u32 *)(&get_paca()->paca_index))
42 #if defined(CONFIG_PPC64) && defined(CONFIG_SMP)
43 #define CLEAR_IO_SYNC (get_paca()->io_sync = 0)
44 #define SYNC_IO do { \
45 if (unlikely(get_paca()->io_sync)) { \
47 get_paca()->io_sync = 0; \
55 static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock)
57 return lock.slock == 0;
60 static inline int arch_spin_is_locked(arch_spinlock_t *lock)
63 return !arch_spin_value_unlocked(*lock);
67 * This returns the old value in the lock, so we succeeded
68 * in getting the lock if the return value is 0.
70 static inline unsigned long __arch_spin_trylock(arch_spinlock_t *lock)
72 unsigned long tmp, token;
76 "1: " PPC_LWARX(%0,0,%2,1) "\n\
84 : "r" (token), "r" (&lock->slock)
90 static inline int arch_spin_trylock(arch_spinlock_t *lock)
93 return __arch_spin_trylock(lock) == 0;
97 * On a system with shared processors (that is, where a physical
98 * processor is multiplexed between several virtual processors),
99 * there is no point spinning on a lock if the holder of the lock
100 * isn't currently scheduled on a physical processor. Instead
101 * we detect this situation and ask the hypervisor to give the
102 * rest of our timeslice to the lock holder.
104 * So that we can tell which virtual processor is holding a lock,
105 * we put 0x80000000 | smp_processor_id() in the lock when it is
106 * held. Conveniently, we have a word in the paca that holds this
110 #if defined(CONFIG_PPC_SPLPAR)
111 /* We only yield to the hypervisor if we are in shared processor mode */
112 #define SHARED_PROCESSOR (lppaca_shared_proc(local_paca->lppaca_ptr))
113 extern void __spin_yield(arch_spinlock_t *lock);
114 extern void __rw_yield(arch_rwlock_t *lock);
116 #define __spin_yield(x) barrier()
117 #define __rw_yield(x) barrier()
118 #define SHARED_PROCESSOR 0
121 static inline void arch_spin_lock(arch_spinlock_t *lock)
125 if (likely(__arch_spin_trylock(lock) == 0))
129 if (SHARED_PROCESSOR)
131 } while (unlikely(lock->slock != 0));
137 void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
139 unsigned long flags_dis;
143 if (likely(__arch_spin_trylock(lock) == 0))
145 local_save_flags(flags_dis);
146 local_irq_restore(flags);
149 if (SHARED_PROCESSOR)
151 } while (unlikely(lock->slock != 0));
153 local_irq_restore(flags_dis);
157 static inline void arch_spin_unlock(arch_spinlock_t *lock)
160 __asm__ __volatile__("# arch_spin_unlock\n\t"
161 PPC_RELEASE_BARRIER: : :"memory");
165 static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
167 arch_spinlock_t lock_val;
172 * Atomically load and store back the lock value (unchanged). This
173 * ensures that our observation of the lock value is ordered with
174 * respect to other lock operations.
176 __asm__ __volatile__(
177 "1: " PPC_LWARX(%0, 0, %2, 0) "\n"
178 " stwcx. %0, 0, %2\n"
180 : "=&r" (lock_val), "+m" (*lock)
184 if (arch_spin_value_unlocked(lock_val))
187 while (lock->slock) {
189 if (SHARED_PROCESSOR)
199 * Read-write spinlocks, allowing multiple readers
200 * but only one writer.
202 * NOTE! it is quite common to have readers in interrupts
203 * but no interrupt writers. For those circumstances we
204 * can "mix" irq-safe locks - any writer needs to get a
205 * irq-safe write-lock, but readers can get non-irqsafe
209 #define arch_read_can_lock(rw) ((rw)->lock >= 0)
210 #define arch_write_can_lock(rw) (!(rw)->lock)
213 #define __DO_SIGN_EXTEND "extsw %0,%0\n"
214 #define WRLOCK_TOKEN LOCK_TOKEN /* it's negative */
216 #define __DO_SIGN_EXTEND
217 #define WRLOCK_TOKEN (-1)
221 * This returns the old value in the lock + 1,
222 * so we got a read lock if the return value is > 0.
224 static inline long __arch_read_trylock(arch_rwlock_t *rw)
228 __asm__ __volatile__(
229 "1: " PPC_LWARX(%0,0,%1,1) "\n"
239 : "cr0", "xer", "memory");
245 * This returns the old value in the lock,
246 * so we got the write lock if the return value is 0.
248 static inline long __arch_write_trylock(arch_rwlock_t *rw)
252 token = WRLOCK_TOKEN;
253 __asm__ __volatile__(
254 "1: " PPC_LWARX(%0,0,%2,1) "\n\
262 : "r" (token), "r" (&rw->lock)
268 static inline void arch_read_lock(arch_rwlock_t *rw)
271 if (likely(__arch_read_trylock(rw) > 0))
275 if (SHARED_PROCESSOR)
277 } while (unlikely(rw->lock < 0));
282 static inline void arch_write_lock(arch_rwlock_t *rw)
285 if (likely(__arch_write_trylock(rw) == 0))
289 if (SHARED_PROCESSOR)
291 } while (unlikely(rw->lock != 0));
296 static inline int arch_read_trylock(arch_rwlock_t *rw)
298 return __arch_read_trylock(rw) > 0;
301 static inline int arch_write_trylock(arch_rwlock_t *rw)
303 return __arch_write_trylock(rw) == 0;
306 static inline void arch_read_unlock(arch_rwlock_t *rw)
310 __asm__ __volatile__(
320 : "cr0", "xer", "memory");
323 static inline void arch_write_unlock(arch_rwlock_t *rw)
325 __asm__ __volatile__("# write_unlock\n\t"
326 PPC_RELEASE_BARRIER: : :"memory");
330 #define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
331 #define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
333 #define arch_spin_relax(lock) __spin_yield(lock)
334 #define arch_read_relax(lock) __rw_yield(lock)
335 #define arch_write_relax(lock) __rw_yield(lock)
337 #endif /* __KERNEL__ */
338 #endif /* __ASM_SPINLOCK_H */