Linux-2.6.12-rc2
[linux-2.6-block.git] / include / asm-i386 / spinlock.h
CommitLineData
1da177e4
LT
1#ifndef __ASM_SPINLOCK_H
2#define __ASM_SPINLOCK_H
3
4#include <asm/atomic.h>
5#include <asm/rwlock.h>
6#include <asm/page.h>
7#include <linux/config.h>
8#include <linux/compiler.h>
9
10asmlinkage int printk(const char * fmt, ...)
11 __attribute__ ((format (printf, 1, 2)));
12
13/*
14 * Your basic SMP spinlocks, allowing only a single CPU anywhere
15 */
16
17typedef struct {
18 volatile unsigned int slock;
19#ifdef CONFIG_DEBUG_SPINLOCK
20 unsigned magic;
21#endif
22#ifdef CONFIG_PREEMPT
23 unsigned int break_lock;
24#endif
25} spinlock_t;
26
27#define SPINLOCK_MAGIC 0xdead4ead
28
29#ifdef CONFIG_DEBUG_SPINLOCK
30#define SPINLOCK_MAGIC_INIT , SPINLOCK_MAGIC
31#else
32#define SPINLOCK_MAGIC_INIT /* */
33#endif
34
35#define SPIN_LOCK_UNLOCKED (spinlock_t) { 1 SPINLOCK_MAGIC_INIT }
36
37#define spin_lock_init(x) do { *(x) = SPIN_LOCK_UNLOCKED; } while(0)
38
39/*
40 * Simple spin lock operations. There are two variants, one clears IRQ's
41 * on the local processor, one does not.
42 *
43 * We make no fairness assumptions. They have a cost.
44 */
45
46#define spin_is_locked(x) (*(volatile signed char *)(&(x)->slock) <= 0)
47#define spin_unlock_wait(x) do { barrier(); } while(spin_is_locked(x))
48
49#define spin_lock_string \
50 "\n1:\t" \
51 "lock ; decb %0\n\t" \
52 "jns 3f\n" \
53 "2:\t" \
54 "rep;nop\n\t" \
55 "cmpb $0,%0\n\t" \
56 "jle 2b\n\t" \
57 "jmp 1b\n" \
58 "3:\n\t"
59
60#define spin_lock_string_flags \
61 "\n1:\t" \
62 "lock ; decb %0\n\t" \
63 "jns 4f\n\t" \
64 "2:\t" \
65 "testl $0x200, %1\n\t" \
66 "jz 3f\n\t" \
67 "sti\n\t" \
68 "3:\t" \
69 "rep;nop\n\t" \
70 "cmpb $0, %0\n\t" \
71 "jle 3b\n\t" \
72 "cli\n\t" \
73 "jmp 1b\n" \
74 "4:\n\t"
75
76/*
77 * This works. Despite all the confusion.
78 * (except on PPro SMP or if we are using OOSTORE)
79 * (PPro errata 66, 92)
80 */
81
82#if !defined(CONFIG_X86_OOSTORE) && !defined(CONFIG_X86_PPRO_FENCE)
83
84#define spin_unlock_string \
85 "movb $1,%0" \
86 :"=m" (lock->slock) : : "memory"
87
88
89static inline void _raw_spin_unlock(spinlock_t *lock)
90{
91#ifdef CONFIG_DEBUG_SPINLOCK
92 BUG_ON(lock->magic != SPINLOCK_MAGIC);
93 BUG_ON(!spin_is_locked(lock));
94#endif
95 __asm__ __volatile__(
96 spin_unlock_string
97 );
98}
99
100#else
101
102#define spin_unlock_string \
103 "xchgb %b0, %1" \
104 :"=q" (oldval), "=m" (lock->slock) \
105 :"0" (oldval) : "memory"
106
107static inline void _raw_spin_unlock(spinlock_t *lock)
108{
109 char oldval = 1;
110#ifdef CONFIG_DEBUG_SPINLOCK
111 BUG_ON(lock->magic != SPINLOCK_MAGIC);
112 BUG_ON(!spin_is_locked(lock));
113#endif
114 __asm__ __volatile__(
115 spin_unlock_string
116 );
117}
118
119#endif
120
121static inline int _raw_spin_trylock(spinlock_t *lock)
122{
123 char oldval;
124 __asm__ __volatile__(
125 "xchgb %b0,%1"
126 :"=q" (oldval), "=m" (lock->slock)
127 :"0" (0) : "memory");
128 return oldval > 0;
129}
130
131static inline void _raw_spin_lock(spinlock_t *lock)
132{
133#ifdef CONFIG_DEBUG_SPINLOCK
134 if (unlikely(lock->magic != SPINLOCK_MAGIC)) {
135 printk("eip: %p\n", __builtin_return_address(0));
136 BUG();
137 }
138#endif
139 __asm__ __volatile__(
140 spin_lock_string
141 :"=m" (lock->slock) : : "memory");
142}
143
144static inline void _raw_spin_lock_flags (spinlock_t *lock, unsigned long flags)
145{
146#ifdef CONFIG_DEBUG_SPINLOCK
147 if (unlikely(lock->magic != SPINLOCK_MAGIC)) {
148 printk("eip: %p\n", __builtin_return_address(0));
149 BUG();
150 }
151#endif
152 __asm__ __volatile__(
153 spin_lock_string_flags
154 :"=m" (lock->slock) : "r" (flags) : "memory");
155}
156
157/*
158 * Read-write spinlocks, allowing multiple readers
159 * but only one writer.
160 *
161 * NOTE! it is quite common to have readers in interrupts
162 * but no interrupt writers. For those circumstances we
163 * can "mix" irq-safe locks - any writer needs to get a
164 * irq-safe write-lock, but readers can get non-irqsafe
165 * read-locks.
166 */
167typedef struct {
168 volatile unsigned int lock;
169#ifdef CONFIG_DEBUG_SPINLOCK
170 unsigned magic;
171#endif
172#ifdef CONFIG_PREEMPT
173 unsigned int break_lock;
174#endif
175} rwlock_t;
176
177#define RWLOCK_MAGIC 0xdeaf1eed
178
179#ifdef CONFIG_DEBUG_SPINLOCK
180#define RWLOCK_MAGIC_INIT , RWLOCK_MAGIC
181#else
182#define RWLOCK_MAGIC_INIT /* */
183#endif
184
185#define RW_LOCK_UNLOCKED (rwlock_t) { RW_LOCK_BIAS RWLOCK_MAGIC_INIT }
186
187#define rwlock_init(x) do { *(x) = RW_LOCK_UNLOCKED; } while(0)
188
189/**
190 * read_can_lock - would read_trylock() succeed?
191 * @lock: the rwlock in question.
192 */
193#define read_can_lock(x) ((int)(x)->lock > 0)
194
195/**
196 * write_can_lock - would write_trylock() succeed?
197 * @lock: the rwlock in question.
198 */
199#define write_can_lock(x) ((x)->lock == RW_LOCK_BIAS)
200
201/*
202 * On x86, we implement read-write locks as a 32-bit counter
203 * with the high bit (sign) being the "contended" bit.
204 *
205 * The inline assembly is non-obvious. Think about it.
206 *
207 * Changed to use the same technique as rw semaphores. See
208 * semaphore.h for details. -ben
209 */
210/* the spinlock helpers are in arch/i386/kernel/semaphore.c */
211
212static inline void _raw_read_lock(rwlock_t *rw)
213{
214#ifdef CONFIG_DEBUG_SPINLOCK
215 BUG_ON(rw->magic != RWLOCK_MAGIC);
216#endif
217 __build_read_lock(rw, "__read_lock_failed");
218}
219
220static inline void _raw_write_lock(rwlock_t *rw)
221{
222#ifdef CONFIG_DEBUG_SPINLOCK
223 BUG_ON(rw->magic != RWLOCK_MAGIC);
224#endif
225 __build_write_lock(rw, "__write_lock_failed");
226}
227
228#define _raw_read_unlock(rw) asm volatile("lock ; incl %0" :"=m" ((rw)->lock) : : "memory")
229#define _raw_write_unlock(rw) asm volatile("lock ; addl $" RW_LOCK_BIAS_STR ",%0":"=m" ((rw)->lock) : : "memory")
230
231static inline int _raw_read_trylock(rwlock_t *lock)
232{
233 atomic_t *count = (atomic_t *)lock;
234 atomic_dec(count);
235 if (atomic_read(count) >= 0)
236 return 1;
237 atomic_inc(count);
238 return 0;
239}
240
241static inline int _raw_write_trylock(rwlock_t *lock)
242{
243 atomic_t *count = (atomic_t *)lock;
244 if (atomic_sub_and_test(RW_LOCK_BIAS, count))
245 return 1;
246 atomic_add(RW_LOCK_BIAS, count);
247 return 0;
248}
249
250#endif /* __ASM_SPINLOCK_H */