Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | #ifndef __ASM_SPINLOCK_H |
2 | #define __ASM_SPINLOCK_H | |
3 | ||
4 | #include <asm/atomic.h> | |
5 | #include <asm/rwlock.h> | |
6 | #include <asm/page.h> | |
2bd0cfbd | 7 | #include <asm/processor.h> |
1da177e4 | 8 | |
1da177e4 LT |
9 | /* |
10 | * Your basic SMP spinlocks, allowing only a single CPU anywhere | |
fb1c8f93 | 11 | * |
1da177e4 LT |
12 | * Simple spin lock operations. There are two variants, one clears IRQ's |
13 | * on the local processor, one does not. | |
14 | * | |
15 | * We make no fairness assumptions. They have a cost. | |
fb1c8f93 IM |
16 | * |
17 | * (the type definitions are in asm/spinlock_types.h) | |
1da177e4 LT |
18 | */ |
19 | ||
8b059d23 AK |
20 | static inline int __raw_spin_is_locked(raw_spinlock_t *lock) |
21 | { | |
22 | return *(volatile signed int *)(&(lock)->slock) <= 0; | |
23 | } | |
1da177e4 | 24 | |
fb1c8f93 | 25 | static inline void __raw_spin_lock(raw_spinlock_t *lock) |
1da177e4 | 26 | { |
8b059d23 AK |
27 | asm volatile( |
28 | "\n1:\t" | |
29 | LOCK_PREFIX " ; decl %0\n\t" | |
30 | "jns 2f\n" | |
31 | "3:\n" | |
32 | "rep;nop\n\t" | |
33 | "cmpl $0,%0\n\t" | |
34 | "jle 3b\n\t" | |
35 | "jmp 1b\n" | |
2fed0c50 | 36 | "2:\t" |
a33fff3a | 37 | : "+m" (lock->slock) : : "memory"); |
1da177e4 LT |
38 | } |
39 | ||
87e1652c | 40 | /* |
2fed0c50 GOC |
41 | * It is easier for the lock validator if interrupts are not re-enabled |
42 | * in the middle of a lock-acquire. This is a performance feature anyway | |
43 | * so we turn it off: | |
44 | * | |
45 | * NOTE: there's an irqs-on section here, which normally would have to be | |
46 | * irq-traced, but on CONFIG_TRACE_IRQFLAGS we never use this variant. | |
87e1652c AK |
47 | */ |
48 | #ifndef CONFIG_PROVE_LOCKING | |
2fed0c50 GOC |
49 | static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, |
50 | unsigned long flags) | |
87e1652c AK |
51 | { |
52 | asm volatile( | |
53 | "\n1:\t" | |
54 | LOCK_PREFIX " ; decl %0\n\t" | |
55 | "jns 5f\n" | |
56 | "testl $0x200, %1\n\t" /* interrupts were disabled? */ | |
57 | "jz 4f\n\t" | |
2fed0c50 | 58 | STI_STRING "\n" |
87e1652c AK |
59 | "3:\t" |
60 | "rep;nop\n\t" | |
61 | "cmpl $0, %0\n\t" | |
62 | "jle 3b\n\t" | |
2fed0c50 | 63 | CLI_STRING "\n\t" |
87e1652c AK |
64 | "jmp 1b\n" |
65 | "4:\t" | |
66 | "rep;nop\n\t" | |
67 | "cmpl $0, %0\n\t" | |
68 | "jg 1b\n\t" | |
69 | "jmp 4b\n" | |
70 | "5:\n\t" | |
2fed0c50 GOC |
71 | : "+m" (lock->slock) |
72 | : "r" ((unsigned)flags) CLI_STI_INPUT_ARGS | |
73 | : "memory" CLI_STI_CLOBBERS); | |
87e1652c AK |
74 | } |
75 | #endif | |
1da177e4 | 76 | |
fb1c8f93 | 77 | static inline int __raw_spin_trylock(raw_spinlock_t *lock) |
1da177e4 | 78 | { |
485832a5 | 79 | int oldval; |
fb1c8f93 | 80 | |
8b059d23 | 81 | asm volatile( |
485832a5 | 82 | "xchgl %0,%1" |
a33fff3a | 83 | :"=q" (oldval), "+m" (lock->slock) |
1da177e4 | 84 | :"0" (0) : "memory"); |
fb1c8f93 | 85 | |
1da177e4 LT |
86 | return oldval > 0; |
87 | } | |
88 | ||
fb1c8f93 | 89 | static inline void __raw_spin_unlock(raw_spinlock_t *lock) |
1da177e4 | 90 | { |
2fed0c50 | 91 | asm volatile("movl $1,%0" : "=m" (lock->slock) :: "memory"); |
1da177e4 LT |
92 | } |
93 | ||
8b059d23 AK |
94 | static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock) |
95 | { | |
96 | while (__raw_spin_is_locked(lock)) | |
97 | cpu_relax(); | |
98 | } | |
1da177e4 LT |
99 | |
100 | /* | |
101 | * Read-write spinlocks, allowing multiple readers | |
102 | * but only one writer. | |
103 | * | |
104 | * NOTE! it is quite common to have readers in interrupts | |
105 | * but no interrupt writers. For those circumstances we | |
106 | * can "mix" irq-safe locks - any writer needs to get a | |
107 | * irq-safe write-lock, but readers can get non-irqsafe | |
108 | * read-locks. | |
fb1c8f93 | 109 | * |
1da177e4 LT |
110 | * On x86, we implement read-write locks as a 32-bit counter |
111 | * with the high bit (sign) being the "contended" bit. | |
1da177e4 | 112 | */ |
1da177e4 | 113 | |
8b059d23 AK |
114 | static inline int __raw_read_can_lock(raw_rwlock_t *lock) |
115 | { | |
116 | return (int)(lock)->lock > 0; | |
117 | } | |
118 | ||
119 | static inline int __raw_write_can_lock(raw_rwlock_t *lock) | |
120 | { | |
121 | return (lock)->lock == RW_LOCK_BIAS; | |
122 | } | |
fb1c8f93 IM |
123 | |
124 | static inline void __raw_read_lock(raw_rwlock_t *rw) | |
1da177e4 | 125 | { |
2fed0c50 | 126 | asm volatile(LOCK_PREFIX " subl $1,(%0)\n\t" |
8b059d23 | 127 | "jns 1f\n" |
2fed0c50 | 128 | "call __read_lock_failed\n\t" |
8b059d23 | 129 | "1:\n" |
6514f93a | 130 | ::"D" (rw) : "memory"); |
1da177e4 LT |
131 | } |
132 | ||
fb1c8f93 | 133 | static inline void __raw_write_lock(raw_rwlock_t *rw) |
1da177e4 | 134 | { |
2fed0c50 | 135 | asm volatile(LOCK_PREFIX " subl %1,(%0)\n\t" |
8b059d23 | 136 | "jz 1f\n" |
2fed0c50 | 137 | "call __write_lock_failed\n\t" |
8b059d23 AK |
138 | "1:\n" |
139 | ::"D" (rw), "i" (RW_LOCK_BIAS) : "memory"); | |
1da177e4 LT |
140 | } |
141 | ||
fb1c8f93 | 142 | static inline int __raw_read_trylock(raw_rwlock_t *lock) |
1da177e4 LT |
143 | { |
144 | atomic_t *count = (atomic_t *)lock; | |
2fed0c50 | 145 | |
1da177e4 LT |
146 | atomic_dec(count); |
147 | if (atomic_read(count) >= 0) | |
148 | return 1; | |
149 | atomic_inc(count); | |
150 | return 0; | |
151 | } | |
152 | ||
fb1c8f93 | 153 | static inline int __raw_write_trylock(raw_rwlock_t *lock) |
1da177e4 LT |
154 | { |
155 | atomic_t *count = (atomic_t *)lock; | |
2fed0c50 | 156 | |
1da177e4 LT |
157 | if (atomic_sub_and_test(RW_LOCK_BIAS, count)) |
158 | return 1; | |
159 | atomic_add(RW_LOCK_BIAS, count); | |
160 | return 0; | |
161 | } | |
162 | ||
fb1c8f93 IM |
163 | static inline void __raw_read_unlock(raw_rwlock_t *rw) |
164 | { | |
a33fff3a | 165 | asm volatile(LOCK_PREFIX "incl %0" :"+m" (rw->lock) : : "memory"); |
fb1c8f93 IM |
166 | } |
167 | ||
168 | static inline void __raw_write_unlock(raw_rwlock_t *rw) | |
169 | { | |
6514f93a TG |
170 | asm volatile(LOCK_PREFIX "addl %1, %0" |
171 | : "+m" (rw->lock) : "i" (RW_LOCK_BIAS) : "memory"); | |
fb1c8f93 IM |
172 | } |
173 | ||
ef6edc97 MS |
174 | #define _raw_spin_relax(lock) cpu_relax() |
175 | #define _raw_read_relax(lock) cpu_relax() | |
176 | #define _raw_write_relax(lock) cpu_relax() | |
177 | ||
1da177e4 | 178 | #endif /* __ASM_SPINLOCK_H */ |