1 #ifndef ASM_X86__SPINLOCK_H
2 #define ASM_X86__SPINLOCK_H
4 #include <asm/atomic.h>
5 #include <asm/rwlock.h>
7 #include <asm/processor.h>
8 #include <linux/compiler.h>
9 #include <asm/paravirt.h>
11 * Your basic SMP spinlocks, allowing only a single CPU anywhere
13 * Simple spin lock operations. There are two variants, one clears IRQ's
14 * on the local processor, one does not.
16 * These are fair FIFO ticket locks, which are currently limited to 256
19 * (the type definitions are in asm/spinlock_types.h)
23 # define LOCK_PTR_REG "a"
25 # define LOCK_PTR_REG "D"
28 #if defined(CONFIG_X86_32) && \
29 (defined(CONFIG_X86_OOSTORE) || defined(CONFIG_X86_PPRO_FENCE))
31 * On PPro SMP or if we are using OOSTORE, we use a locked operation to unlock
32 * (PPro errata 66, 92)
34 # define UNLOCK_LOCK_PREFIX LOCK_PREFIX
36 # define UNLOCK_LOCK_PREFIX
40 * Ticket locks are conceptually two parts, one indicating the current head of
41 * the queue, and the other indicating the current tail. The lock is acquired
42 * by atomically noting the tail and incrementing it by one (thus adding
43 * ourself to the queue and noting our position), then waiting until the head
44 * becomes equal to the the initial value of the tail.
46 * We use an xadd covering *both* parts of the lock, to increment the tail and
47 * also load the position of the head, which takes care of memory ordering
48 * issues and should be optimal for the uncontended case. Note the tail must be
49 * in the high part, because a wide xadd increment of the low part would carry
50 * up and contaminate the high part.
52 * With fewer than 2^8 possible CPUs, we can use x86's partial registers to
53 * save some instructions and make the code more elegant. There really isn't
54 * much between them in performance though, especially as locks are out of line.
57 static inline int __ticket_spin_is_locked(raw_spinlock_t *lock)
59 int tmp = ACCESS_ONCE(lock->slock);
61 return (((tmp >> 8) & 0xff) != (tmp & 0xff));
64 static inline int __ticket_spin_is_contended(raw_spinlock_t *lock)
66 int tmp = ACCESS_ONCE(lock->slock);
68 return (((tmp >> 8) - tmp) & 0xff) > 1;
71 static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock)
76 LOCK_PREFIX "xaddw %w0, %1\n"
82 /* don't need lfence here, because loads are in-order */
85 : "+Q" (inc), "+m" (lock->slock)
90 static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock)
95 asm volatile("movw %2,%w0\n\t"
100 LOCK_PREFIX "cmpxchgw %w1,%2\n\t"
104 : "=&a" (tmp), "=Q" (new), "+m" (lock->slock)
111 static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock)
113 asm volatile(UNLOCK_LOCK_PREFIX "incb %0"
119 static inline int __ticket_spin_is_locked(raw_spinlock_t *lock)
121 int tmp = ACCESS_ONCE(lock->slock);
123 return (((tmp >> 16) & 0xffff) != (tmp & 0xffff));
126 static inline int __ticket_spin_is_contended(raw_spinlock_t *lock)
128 int tmp = ACCESS_ONCE(lock->slock);
130 return (((tmp >> 16) - tmp) & 0xffff) > 1;
133 static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock)
135 int inc = 0x00010000;
138 asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
146 /* don't need lfence here, because loads are in-order */
149 : "+Q" (inc), "+m" (lock->slock), "=r" (tmp)
154 static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock)
159 asm volatile("movl %2,%0\n\t"
164 "addl $0x00010000, %1\n\t"
165 LOCK_PREFIX "cmpxchgl %1,%2\n\t"
169 : "=&a" (tmp), "=r" (new), "+m" (lock->slock)
176 static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock)
178 asm volatile(UNLOCK_LOCK_PREFIX "incw %0"
185 #ifdef CONFIG_PARAVIRT
187 * Define virtualization-friendly old-style lock byte lock, for use in
188 * pv_lock_ops if desired.
190 * This differs from the pre-2.6.24 spinlock by always using xchgb
191 * rather than decb to take the lock; this allows it to use a
192 * zero-initialized lock structure. It also maintains a 1-byte
193 * contention counter, so that we can implement
194 * __byte_spin_is_contended.
196 struct __byte_spinlock {
201 static inline int __byte_spin_is_locked(raw_spinlock_t *lock)
203 struct __byte_spinlock *bl = (struct __byte_spinlock *)lock;
204 return bl->lock != 0;
207 static inline int __byte_spin_is_contended(raw_spinlock_t *lock)
209 struct __byte_spinlock *bl = (struct __byte_spinlock *)lock;
210 return bl->spinners != 0;
213 static inline void __byte_spin_lock(raw_spinlock_t *lock)
215 struct __byte_spinlock *bl = (struct __byte_spinlock *)lock;
218 asm("1: xchgb %1, %0\n"
221 " " LOCK_PREFIX "incb %2\n"
225 " " LOCK_PREFIX "decb %2\n"
228 : "+m" (bl->lock), "+q" (val), "+m" (bl->spinners): : "memory");
231 static inline int __byte_spin_trylock(raw_spinlock_t *lock)
233 struct __byte_spinlock *bl = (struct __byte_spinlock *)lock;
237 : "+m" (bl->lock), "+q" (old) : : "memory");
242 static inline void __byte_spin_unlock(raw_spinlock_t *lock)
244 struct __byte_spinlock *bl = (struct __byte_spinlock *)lock;
248 #else /* !CONFIG_PARAVIRT */
249 static inline int __raw_spin_is_locked(raw_spinlock_t *lock)
251 return __ticket_spin_is_locked(lock);
254 static inline int __raw_spin_is_contended(raw_spinlock_t *lock)
256 return __ticket_spin_is_contended(lock);
259 static __always_inline void __raw_spin_lock(raw_spinlock_t *lock)
261 __ticket_spin_lock(lock);
264 static __always_inline int __raw_spin_trylock(raw_spinlock_t *lock)
266 return __ticket_spin_trylock(lock);
269 static __always_inline void __raw_spin_unlock(raw_spinlock_t *lock)
271 __ticket_spin_unlock(lock);
274 static __always_inline void __raw_spin_lock_flags(raw_spinlock_t *lock,
277 __raw_spin_lock(lock);
280 #endif /* CONFIG_PARAVIRT */
282 static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock)
284 while (__raw_spin_is_locked(lock))
289 * Read-write spinlocks, allowing multiple readers
290 * but only one writer.
292 * NOTE! it is quite common to have readers in interrupts
293 * but no interrupt writers. For those circumstances we
294 * can "mix" irq-safe locks - any writer needs to get a
295 * irq-safe write-lock, but readers can get non-irqsafe
298 * On x86, we implement read-write locks as a 32-bit counter
299 * with the high bit (sign) being the "contended" bit.
303 * read_can_lock - would read_trylock() succeed?
304 * @lock: the rwlock in question.
306 static inline int __raw_read_can_lock(raw_rwlock_t *lock)
308 return (int)(lock)->lock > 0;
312 * write_can_lock - would write_trylock() succeed?
313 * @lock: the rwlock in question.
315 static inline int __raw_write_can_lock(raw_rwlock_t *lock)
317 return (lock)->lock == RW_LOCK_BIAS;
320 static inline void __raw_read_lock(raw_rwlock_t *rw)
322 asm volatile(LOCK_PREFIX " subl $1,(%0)\n\t"
324 "call __read_lock_failed\n\t"
326 ::LOCK_PTR_REG (rw) : "memory");
329 static inline void __raw_write_lock(raw_rwlock_t *rw)
331 asm volatile(LOCK_PREFIX " subl %1,(%0)\n\t"
333 "call __write_lock_failed\n\t"
335 ::LOCK_PTR_REG (rw), "i" (RW_LOCK_BIAS) : "memory");
338 static inline int __raw_read_trylock(raw_rwlock_t *lock)
340 atomic_t *count = (atomic_t *)lock;
343 if (atomic_read(count) >= 0)
349 static inline int __raw_write_trylock(raw_rwlock_t *lock)
351 atomic_t *count = (atomic_t *)lock;
353 if (atomic_sub_and_test(RW_LOCK_BIAS, count))
355 atomic_add(RW_LOCK_BIAS, count);
359 static inline void __raw_read_unlock(raw_rwlock_t *rw)
361 asm volatile(LOCK_PREFIX "incl %0" :"+m" (rw->lock) : : "memory");
364 static inline void __raw_write_unlock(raw_rwlock_t *rw)
366 asm volatile(LOCK_PREFIX "addl %1, %0"
367 : "+m" (rw->lock) : "i" (RW_LOCK_BIAS) : "memory");
370 #define _raw_spin_relax(lock) cpu_relax()
371 #define _raw_read_relax(lock) cpu_relax()
372 #define _raw_write_relax(lock) cpu_relax()
374 #endif /* ASM_X86__SPINLOCK_H */