Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | #ifndef __ASM_SPINLOCK_H |
2 | #define __ASM_SPINLOCK_H | |
3 | ||
4 | #if __LINUX_ARM_ARCH__ < 6 | |
5 | #error SMP not supported on pre-ARMv6 CPUs | |
6 | #endif | |
7 | ||
603605ab MZ |
8 | #include <asm/processor.h> |
9 | ||
000d9c78 RK |
10 | /* |
11 | * sev and wfe are ARMv6K extensions. Uniprocessor ARMv6 may not have the K | |
12 | * extensions, so when running on UP, we have to patch these instructions away. | |
13 | */ | |
14 | #define ALT_SMP(smp, up) \ | |
15 | "9998: " smp "\n" \ | |
16 | " .pushsection \".alt.smp.init\", \"a\"\n" \ | |
17 | " .long 9998b\n" \ | |
18 | " " up "\n" \ | |
19 | " .popsection\n" | |
20 | ||
21 | #ifdef CONFIG_THUMB2_KERNEL | |
22 | #define SEV ALT_SMP("sev.w", "nop.w") | |
917692f5 DM |
23 | /* |
24 | * For Thumb-2, special care is needed to ensure that the conditional WFE | |
25 | * instruction really does assemble to exactly 4 bytes (as required by | |
26 | * the SMP_ON_UP fixup code). By itself "wfene" might cause the | |
27 | * assembler to insert a extra (16-bit) IT instruction, depending on the | |
28 | * presence or absence of neighbouring conditional instructions. | |
29 | * | |
30 | * To avoid this unpredictableness, an approprite IT is inserted explicitly: | |
31 | * the assembler won't change IT instructions which are explicitly present | |
32 | * in the input. | |
33 | */ | |
34 | #define WFE(cond) ALT_SMP( \ | |
35 | "it " cond "\n\t" \ | |
36 | "wfe" cond ".n", \ | |
37 | \ | |
38 | "nop.w" \ | |
39 | ) | |
000d9c78 RK |
40 | #else |
41 | #define SEV ALT_SMP("sev", "nop") | |
42 | #define WFE(cond) ALT_SMP("wfe" cond, "nop") | |
43 | #endif | |
44 | ||
c5113b61 RV |
45 | static inline void dsb_sev(void) |
46 | { | |
47 | #if __LINUX_ARM_ARCH__ >= 7 | |
48 | __asm__ __volatile__ ( | |
49 | "dsb\n" | |
000d9c78 | 50 | SEV |
c5113b61 | 51 | ); |
000d9c78 | 52 | #else |
c5113b61 RV |
53 | __asm__ __volatile__ ( |
54 | "mcr p15, 0, %0, c7, c10, 4\n" | |
000d9c78 | 55 | SEV |
c5113b61 RV |
56 | : : "r" (0) |
57 | ); | |
58 | #endif | |
59 | } | |
60 | ||
1da177e4 LT |
61 | /* |
62 | * ARMv6 Spin-locking. | |
63 | * | |
6d9b37a3 RK |
64 | * We exclusively read the old value. If it is zero, we may have |
65 | * won the lock, so we try exclusively storing it. A memory barrier | |
66 | * is required after we get a lock, and before we release it, because | |
67 | * V6 CPUs are assumed to have weakly ordered memory. | |
1da177e4 LT |
68 | * |
69 | * Unlocked value: 0 | |
70 | * Locked value: 1 | |
71 | */ | |
1da177e4 | 72 | |
0199c4e6 TG |
73 | #define arch_spin_is_locked(x) ((x)->lock != 0) |
74 | #define arch_spin_unlock_wait(lock) \ | |
75 | do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0) | |
1da177e4 | 76 | |
0199c4e6 | 77 | #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock) |
1da177e4 | 78 | |
0199c4e6 | 79 | static inline void arch_spin_lock(arch_spinlock_t *lock) |
1da177e4 LT |
80 | { |
81 | unsigned long tmp; | |
82 | ||
83 | __asm__ __volatile__( | |
84 | "1: ldrex %0, [%1]\n" | |
85 | " teq %0, #0\n" | |
000d9c78 | 86 | WFE("ne") |
1da177e4 LT |
87 | " strexeq %0, %2, [%1]\n" |
88 | " teqeq %0, #0\n" | |
89 | " bne 1b" | |
90 | : "=&r" (tmp) | |
91 | : "r" (&lock->lock), "r" (1) | |
6d9b37a3 RK |
92 | : "cc"); |
93 | ||
94 | smp_mb(); | |
1da177e4 LT |
95 | } |
96 | ||
0199c4e6 | 97 | static inline int arch_spin_trylock(arch_spinlock_t *lock) |
1da177e4 LT |
98 | { |
99 | unsigned long tmp; | |
100 | ||
101 | __asm__ __volatile__( | |
102 | " ldrex %0, [%1]\n" | |
103 | " teq %0, #0\n" | |
104 | " strexeq %0, %2, [%1]" | |
105 | : "=&r" (tmp) | |
106 | : "r" (&lock->lock), "r" (1) | |
6d9b37a3 RK |
107 | : "cc"); |
108 | ||
109 | if (tmp == 0) { | |
110 | smp_mb(); | |
111 | return 1; | |
112 | } else { | |
113 | return 0; | |
114 | } | |
1da177e4 LT |
115 | } |
116 | ||
0199c4e6 | 117 | static inline void arch_spin_unlock(arch_spinlock_t *lock) |
1da177e4 | 118 | { |
6d9b37a3 RK |
119 | smp_mb(); |
120 | ||
1da177e4 | 121 | __asm__ __volatile__( |
00b4c907 | 122 | " str %1, [%0]\n" |
1da177e4 LT |
123 | : |
124 | : "r" (&lock->lock), "r" (0) | |
6d9b37a3 | 125 | : "cc"); |
c5113b61 RV |
126 | |
127 | dsb_sev(); | |
1da177e4 LT |
128 | } |
129 | ||
130 | /* | |
131 | * RWLOCKS | |
fb1c8f93 IM |
132 | * |
133 | * | |
1da177e4 LT |
134 | * Write locks are easy - we just set bit 31. When unlocking, we can |
135 | * just write zero since the lock is exclusively held. | |
136 | */ | |
fb1c8f93 | 137 | |
e5931943 | 138 | static inline void arch_write_lock(arch_rwlock_t *rw) |
1da177e4 LT |
139 | { |
140 | unsigned long tmp; | |
141 | ||
142 | __asm__ __volatile__( | |
143 | "1: ldrex %0, [%1]\n" | |
144 | " teq %0, #0\n" | |
000d9c78 | 145 | WFE("ne") |
1da177e4 LT |
146 | " strexeq %0, %2, [%1]\n" |
147 | " teq %0, #0\n" | |
148 | " bne 1b" | |
149 | : "=&r" (tmp) | |
150 | : "r" (&rw->lock), "r" (0x80000000) | |
6d9b37a3 RK |
151 | : "cc"); |
152 | ||
153 | smp_mb(); | |
1da177e4 LT |
154 | } |
155 | ||
e5931943 | 156 | static inline int arch_write_trylock(arch_rwlock_t *rw) |
4e8fd22b RK |
157 | { |
158 | unsigned long tmp; | |
159 | ||
160 | __asm__ __volatile__( | |
161 | "1: ldrex %0, [%1]\n" | |
162 | " teq %0, #0\n" | |
163 | " strexeq %0, %2, [%1]" | |
164 | : "=&r" (tmp) | |
165 | : "r" (&rw->lock), "r" (0x80000000) | |
6d9b37a3 RK |
166 | : "cc"); |
167 | ||
168 | if (tmp == 0) { | |
169 | smp_mb(); | |
170 | return 1; | |
171 | } else { | |
172 | return 0; | |
173 | } | |
4e8fd22b RK |
174 | } |
175 | ||
e5931943 | 176 | static inline void arch_write_unlock(arch_rwlock_t *rw) |
1da177e4 | 177 | { |
6d9b37a3 RK |
178 | smp_mb(); |
179 | ||
1da177e4 | 180 | __asm__ __volatile__( |
00b4c907 | 181 | "str %1, [%0]\n" |
1da177e4 LT |
182 | : |
183 | : "r" (&rw->lock), "r" (0) | |
6d9b37a3 | 184 | : "cc"); |
c5113b61 RV |
185 | |
186 | dsb_sev(); | |
1da177e4 LT |
187 | } |
188 | ||
c2a4c406 | 189 | /* write_can_lock - would write_trylock() succeed? */ |
e5931943 | 190 | #define arch_write_can_lock(x) ((x)->lock == 0) |
c2a4c406 | 191 | |
1da177e4 LT |
192 | /* |
193 | * Read locks are a bit more hairy: | |
194 | * - Exclusively load the lock value. | |
195 | * - Increment it. | |
196 | * - Store new lock value if positive, and we still own this location. | |
197 | * If the value is negative, we've already failed. | |
198 | * - If we failed to store the value, we want a negative result. | |
199 | * - If we failed, try again. | |
200 | * Unlocking is similarly hairy. We may have multiple read locks | |
201 | * currently active. However, we know we won't have any write | |
202 | * locks. | |
203 | */ | |
e5931943 | 204 | static inline void arch_read_lock(arch_rwlock_t *rw) |
1da177e4 LT |
205 | { |
206 | unsigned long tmp, tmp2; | |
207 | ||
208 | __asm__ __volatile__( | |
209 | "1: ldrex %0, [%2]\n" | |
210 | " adds %0, %0, #1\n" | |
211 | " strexpl %1, %0, [%2]\n" | |
000d9c78 | 212 | WFE("mi") |
1da177e4 LT |
213 | " rsbpls %0, %1, #0\n" |
214 | " bmi 1b" | |
215 | : "=&r" (tmp), "=&r" (tmp2) | |
216 | : "r" (&rw->lock) | |
6d9b37a3 RK |
217 | : "cc"); |
218 | ||
219 | smp_mb(); | |
1da177e4 LT |
220 | } |
221 | ||
e5931943 | 222 | static inline void arch_read_unlock(arch_rwlock_t *rw) |
1da177e4 | 223 | { |
4e8fd22b RK |
224 | unsigned long tmp, tmp2; |
225 | ||
6d9b37a3 RK |
226 | smp_mb(); |
227 | ||
1da177e4 LT |
228 | __asm__ __volatile__( |
229 | "1: ldrex %0, [%2]\n" | |
230 | " sub %0, %0, #1\n" | |
231 | " strex %1, %0, [%2]\n" | |
232 | " teq %1, #0\n" | |
233 | " bne 1b" | |
234 | : "=&r" (tmp), "=&r" (tmp2) | |
235 | : "r" (&rw->lock) | |
6d9b37a3 | 236 | : "cc"); |
c5113b61 RV |
237 | |
238 | if (tmp == 0) | |
239 | dsb_sev(); | |
1da177e4 LT |
240 | } |
241 | ||
e5931943 | 242 | static inline int arch_read_trylock(arch_rwlock_t *rw) |
8e34703b | 243 | { |
e89bc811 | 244 | unsigned long tmp, tmp2 = 1; |
8e34703b RK |
245 | |
246 | __asm__ __volatile__( | |
247 | "1: ldrex %0, [%2]\n" | |
248 | " adds %0, %0, #1\n" | |
249 | " strexpl %1, %0, [%2]\n" | |
250 | : "=&r" (tmp), "+r" (tmp2) | |
251 | : "r" (&rw->lock) | |
252 | : "cc"); | |
253 | ||
254 | smp_mb(); | |
255 | return tmp2 == 0; | |
256 | } | |
1da177e4 | 257 | |
c2a4c406 | 258 | /* read_can_lock - would read_trylock() succeed? */ |
e5931943 | 259 | #define arch_read_can_lock(x) ((x)->lock < 0x80000000) |
c2a4c406 | 260 | |
e5931943 TG |
261 | #define arch_read_lock_flags(lock, flags) arch_read_lock(lock) |
262 | #define arch_write_lock_flags(lock, flags) arch_write_lock(lock) | |
f5f7eac4 | 263 | |
0199c4e6 TG |
264 | #define arch_spin_relax(lock) cpu_relax() |
265 | #define arch_read_relax(lock) cpu_relax() | |
266 | #define arch_write_relax(lock) cpu_relax() | |
ef6edc97 | 267 | |
1da177e4 | 268 | #endif /* __ASM_SPINLOCK_H */ |