Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | #ifndef _ASM_M32R_SPINLOCK_H |
2 | #define _ASM_M32R_SPINLOCK_H | |
3 | ||
4 | /* | |
5 | * linux/include/asm-m32r/spinlock.h | |
6 | * | |
7 | * M32R version: | |
8 | * Copyright (C) 2001, 2002 Hitoshi Yamamoto | |
9 | * Copyright (C) 2004 Hirokazu Takata <takata at linux-m32r.org> | |
10 | */ | |
11 | ||
12 | #include <linux/config.h> /* CONFIG_DEBUG_SPINLOCK, CONFIG_SMP */ | |
13 | #include <linux/compiler.h> | |
14 | #include <asm/atomic.h> | |
15 | #include <asm/page.h> | |
16 | ||
1da177e4 LT |
17 | /* |
18 | * Your basic SMP spinlocks, allowing only a single CPU anywhere | |
fb1c8f93 IM |
19 | * |
20 | * (the type definitions are in asm/spinlock_types.h) | |
21 | * | |
1da177e4 LT |
22 | * Simple spin lock operations. There are two variants, one clears IRQ's |
23 | * on the local processor, one does not. | |
24 | * | |
25 | * We make no fairness assumptions. They have a cost. | |
26 | */ | |
27 | ||
fb1c8f93 IM |
28 | #define __raw_spin_is_locked(x) (*(volatile int *)(&(x)->slock) <= 0) |
29 | #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) | |
30 | #define __raw_spin_unlock_wait(x) \ | |
31 | do { cpu_relax(); } while (__raw_spin_is_locked(x)) | |
1da177e4 LT |
32 | |
33 | /** | |
fb1c8f93 | 34 | * __raw_spin_trylock - Try spin lock and return a result |
1da177e4 LT |
35 | * @lock: Pointer to the lock variable |
36 | * | |
fb1c8f93 | 37 | * __raw_spin_trylock() tries to get the lock and returns a result. |
1da177e4 LT |
38 | * On the m32r, the result value is 1 (= Success) or 0 (= Failure). |
39 | */ | |
fb1c8f93 | 40 | static inline int __raw_spin_trylock(raw_spinlock_t *lock) |
1da177e4 LT |
41 | { |
42 | int oldval; | |
43 | unsigned long tmp1, tmp2; | |
44 | ||
45 | /* | |
46 | * lock->slock : =1 : unlock | |
47 | * : <=0 : lock | |
48 | * { | |
49 | * oldval = lock->slock; <--+ need atomic operation | |
50 | * lock->slock = 0; <--+ | |
51 | * } | |
52 | */ | |
53 | __asm__ __volatile__ ( | |
fb1c8f93 | 54 | "# __raw_spin_trylock \n\t" |
1da177e4 LT |
55 | "ldi %1, #0; \n\t" |
56 | "mvfc %2, psw; \n\t" | |
57 | "clrpsw #0x40 -> nop; \n\t" | |
58 | DCACHE_CLEAR("%0", "r6", "%3") | |
59 | "lock %0, @%3; \n\t" | |
60 | "unlock %1, @%3; \n\t" | |
61 | "mvtc %2, psw; \n\t" | |
62 | : "=&r" (oldval), "=&r" (tmp1), "=&r" (tmp2) | |
63 | : "r" (&lock->slock) | |
64 | : "memory" | |
65 | #ifdef CONFIG_CHIP_M32700_TS1 | |
66 | , "r6" | |
67 | #endif /* CONFIG_CHIP_M32700_TS1 */ | |
68 | ); | |
69 | ||
70 | return (oldval > 0); | |
71 | } | |
72 | ||
fb1c8f93 | 73 | static inline void __raw_spin_lock(raw_spinlock_t *lock) |
1da177e4 LT |
74 | { |
75 | unsigned long tmp0, tmp1; | |
76 | ||
1da177e4 LT |
77 | /* |
78 | * lock->slock : =1 : unlock | |
79 | * : <=0 : lock | |
80 | * | |
81 | * for ( ; ; ) { | |
82 | * lock->slock -= 1; <-- need atomic operation | |
83 | * if (lock->slock == 0) break; | |
84 | * for ( ; lock->slock <= 0 ; ); | |
85 | * } | |
86 | */ | |
87 | __asm__ __volatile__ ( | |
fb1c8f93 | 88 | "# __raw_spin_lock \n\t" |
1da177e4 LT |
89 | ".fillinsn \n" |
90 | "1: \n\t" | |
91 | "mvfc %1, psw; \n\t" | |
92 | "clrpsw #0x40 -> nop; \n\t" | |
93 | DCACHE_CLEAR("%0", "r6", "%2") | |
94 | "lock %0, @%2; \n\t" | |
95 | "addi %0, #-1; \n\t" | |
96 | "unlock %0, @%2; \n\t" | |
97 | "mvtc %1, psw; \n\t" | |
98 | "bltz %0, 2f; \n\t" | |
99 | LOCK_SECTION_START(".balign 4 \n\t") | |
100 | ".fillinsn \n" | |
101 | "2: \n\t" | |
102 | "ld %0, @%2; \n\t" | |
103 | "bgtz %0, 1b; \n\t" | |
104 | "bra 2b; \n\t" | |
105 | LOCK_SECTION_END | |
106 | : "=&r" (tmp0), "=&r" (tmp1) | |
107 | : "r" (&lock->slock) | |
108 | : "memory" | |
109 | #ifdef CONFIG_CHIP_M32700_TS1 | |
110 | , "r6" | |
111 | #endif /* CONFIG_CHIP_M32700_TS1 */ | |
112 | ); | |
113 | } | |
114 | ||
fb1c8f93 | 115 | static inline void __raw_spin_unlock(raw_spinlock_t *lock) |
1da177e4 | 116 | { |
1da177e4 LT |
117 | mb(); |
118 | lock->slock = 1; | |
119 | } | |
120 | ||
121 | /* | |
122 | * Read-write spinlocks, allowing multiple readers | |
123 | * but only one writer. | |
124 | * | |
125 | * NOTE! it is quite common to have readers in interrupts | |
126 | * but no interrupt writers. For those circumstances we | |
127 | * can "mix" irq-safe locks - any writer needs to get a | |
128 | * irq-safe write-lock, but readers can get non-irqsafe | |
129 | * read-locks. | |
fb1c8f93 IM |
130 | * |
131 | * On x86, we implement read-write locks as a 32-bit counter | |
132 | * with the high bit (sign) being the "contended" bit. | |
133 | * | |
134 | * The inline assembly is non-obvious. Think about it. | |
135 | * | |
136 | * Changed to use the same technique as rw semaphores. See | |
137 | * semaphore.h for details. -ben | |
1da177e4 | 138 | */ |
1da177e4 LT |
139 | |
140 | /** | |
141 | * read_can_lock - would read_trylock() succeed? | |
142 | * @lock: the rwlock in question. | |
143 | */ | |
fb1c8f93 | 144 | #define __raw_read_can_lock(x) ((int)(x)->lock > 0) |
1da177e4 LT |
145 | |
146 | /** | |
147 | * write_can_lock - would write_trylock() succeed? | |
148 | * @lock: the rwlock in question. | |
149 | */ | |
fb1c8f93 | 150 | #define __raw_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS) |
1da177e4 | 151 | |
fb1c8f93 | 152 | static inline void __raw_read_lock(raw_rwlock_t *rw) |
1da177e4 LT |
153 | { |
154 | unsigned long tmp0, tmp1; | |
155 | ||
1da177e4 LT |
156 | /* |
157 | * rw->lock : >0 : unlock | |
158 | * : <=0 : lock | |
159 | * | |
160 | * for ( ; ; ) { | |
161 | * rw->lock -= 1; <-- need atomic operation | |
162 | * if (rw->lock >= 0) break; | |
163 | * rw->lock += 1; <-- need atomic operation | |
164 | * for ( ; rw->lock <= 0 ; ); | |
165 | * } | |
166 | */ | |
167 | __asm__ __volatile__ ( | |
168 | "# read_lock \n\t" | |
169 | ".fillinsn \n" | |
170 | "1: \n\t" | |
171 | "mvfc %1, psw; \n\t" | |
172 | "clrpsw #0x40 -> nop; \n\t" | |
173 | DCACHE_CLEAR("%0", "r6", "%2") | |
174 | "lock %0, @%2; \n\t" | |
175 | "addi %0, #-1; \n\t" | |
176 | "unlock %0, @%2; \n\t" | |
177 | "mvtc %1, psw; \n\t" | |
178 | "bltz %0, 2f; \n\t" | |
179 | LOCK_SECTION_START(".balign 4 \n\t") | |
180 | ".fillinsn \n" | |
181 | "2: \n\t" | |
182 | "clrpsw #0x40 -> nop; \n\t" | |
183 | DCACHE_CLEAR("%0", "r6", "%2") | |
184 | "lock %0, @%2; \n\t" | |
185 | "addi %0, #1; \n\t" | |
186 | "unlock %0, @%2; \n\t" | |
187 | "mvtc %1, psw; \n\t" | |
188 | ".fillinsn \n" | |
189 | "3: \n\t" | |
190 | "ld %0, @%2; \n\t" | |
191 | "bgtz %0, 1b; \n\t" | |
192 | "bra 3b; \n\t" | |
193 | LOCK_SECTION_END | |
194 | : "=&r" (tmp0), "=&r" (tmp1) | |
195 | : "r" (&rw->lock) | |
196 | : "memory" | |
197 | #ifdef CONFIG_CHIP_M32700_TS1 | |
198 | , "r6" | |
199 | #endif /* CONFIG_CHIP_M32700_TS1 */ | |
200 | ); | |
201 | } | |
202 | ||
fb1c8f93 | 203 | static inline void __raw_write_lock(raw_rwlock_t *rw) |
1da177e4 LT |
204 | { |
205 | unsigned long tmp0, tmp1, tmp2; | |
206 | ||
1da177e4 LT |
207 | /* |
208 | * rw->lock : =RW_LOCK_BIAS_STR : unlock | |
209 | * : !=RW_LOCK_BIAS_STR : lock | |
210 | * | |
211 | * for ( ; ; ) { | |
212 | * rw->lock -= RW_LOCK_BIAS_STR; <-- need atomic operation | |
213 | * if (rw->lock == 0) break; | |
214 | * rw->lock += RW_LOCK_BIAS_STR; <-- need atomic operation | |
215 | * for ( ; rw->lock != RW_LOCK_BIAS_STR ; ) ; | |
216 | * } | |
217 | */ | |
218 | __asm__ __volatile__ ( | |
219 | "# write_lock \n\t" | |
220 | "seth %1, #high(" RW_LOCK_BIAS_STR "); \n\t" | |
221 | "or3 %1, %1, #low(" RW_LOCK_BIAS_STR "); \n\t" | |
222 | ".fillinsn \n" | |
223 | "1: \n\t" | |
224 | "mvfc %2, psw; \n\t" | |
225 | "clrpsw #0x40 -> nop; \n\t" | |
226 | DCACHE_CLEAR("%0", "r7", "%3") | |
227 | "lock %0, @%3; \n\t" | |
228 | "sub %0, %1; \n\t" | |
229 | "unlock %0, @%3; \n\t" | |
230 | "mvtc %2, psw; \n\t" | |
231 | "bnez %0, 2f; \n\t" | |
232 | LOCK_SECTION_START(".balign 4 \n\t") | |
233 | ".fillinsn \n" | |
234 | "2: \n\t" | |
235 | "clrpsw #0x40 -> nop; \n\t" | |
236 | DCACHE_CLEAR("%0", "r7", "%3") | |
237 | "lock %0, @%3; \n\t" | |
238 | "add %0, %1; \n\t" | |
239 | "unlock %0, @%3; \n\t" | |
240 | "mvtc %2, psw; \n\t" | |
241 | ".fillinsn \n" | |
242 | "3: \n\t" | |
243 | "ld %0, @%3; \n\t" | |
244 | "beq %0, %1, 1b; \n\t" | |
245 | "bra 3b; \n\t" | |
246 | LOCK_SECTION_END | |
247 | : "=&r" (tmp0), "=&r" (tmp1), "=&r" (tmp2) | |
248 | : "r" (&rw->lock) | |
249 | : "memory" | |
250 | #ifdef CONFIG_CHIP_M32700_TS1 | |
251 | , "r7" | |
252 | #endif /* CONFIG_CHIP_M32700_TS1 */ | |
253 | ); | |
254 | } | |
255 | ||
fb1c8f93 | 256 | static inline void __raw_read_unlock(raw_rwlock_t *rw) |
1da177e4 LT |
257 | { |
258 | unsigned long tmp0, tmp1; | |
259 | ||
260 | __asm__ __volatile__ ( | |
261 | "# read_unlock \n\t" | |
262 | "mvfc %1, psw; \n\t" | |
263 | "clrpsw #0x40 -> nop; \n\t" | |
264 | DCACHE_CLEAR("%0", "r6", "%2") | |
265 | "lock %0, @%2; \n\t" | |
266 | "addi %0, #1; \n\t" | |
267 | "unlock %0, @%2; \n\t" | |
268 | "mvtc %1, psw; \n\t" | |
269 | : "=&r" (tmp0), "=&r" (tmp1) | |
270 | : "r" (&rw->lock) | |
271 | : "memory" | |
272 | #ifdef CONFIG_CHIP_M32700_TS1 | |
273 | , "r6" | |
274 | #endif /* CONFIG_CHIP_M32700_TS1 */ | |
275 | ); | |
276 | } | |
277 | ||
fb1c8f93 | 278 | static inline void __raw_write_unlock(raw_rwlock_t *rw) |
1da177e4 LT |
279 | { |
280 | unsigned long tmp0, tmp1, tmp2; | |
281 | ||
282 | __asm__ __volatile__ ( | |
283 | "# write_unlock \n\t" | |
284 | "seth %1, #high(" RW_LOCK_BIAS_STR "); \n\t" | |
285 | "or3 %1, %1, #low(" RW_LOCK_BIAS_STR "); \n\t" | |
286 | "mvfc %2, psw; \n\t" | |
287 | "clrpsw #0x40 -> nop; \n\t" | |
288 | DCACHE_CLEAR("%0", "r7", "%3") | |
289 | "lock %0, @%3; \n\t" | |
290 | "add %0, %1; \n\t" | |
291 | "unlock %0, @%3; \n\t" | |
292 | "mvtc %2, psw; \n\t" | |
293 | : "=&r" (tmp0), "=&r" (tmp1), "=&r" (tmp2) | |
294 | : "r" (&rw->lock) | |
295 | : "memory" | |
296 | #ifdef CONFIG_CHIP_M32700_TS1 | |
297 | , "r7" | |
298 | #endif /* CONFIG_CHIP_M32700_TS1 */ | |
299 | ); | |
300 | } | |
301 | ||
fb1c8f93 | 302 | #define __raw_read_trylock(lock) generic__raw_read_trylock(lock) |
1da177e4 | 303 | |
fb1c8f93 | 304 | static inline int __raw_write_trylock(raw_rwlock_t *lock) |
1da177e4 LT |
305 | { |
306 | atomic_t *count = (atomic_t *)lock; | |
307 | if (atomic_sub_and_test(RW_LOCK_BIAS, count)) | |
308 | return 1; | |
309 | atomic_add(RW_LOCK_BIAS, count); | |
310 | return 0; | |
311 | } | |
312 | ||
313 | #endif /* _ASM_M32R_SPINLOCK_H */ |