MIPS: IP checksums: Optimize adjust of sum on buffers of odd alignment.
[linux-2.6-block.git] / arch / mips / include / asm / spinlock.h
CommitLineData
1da177e4
LT
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
f65e4fa8 6 * Copyright (C) 1999, 2000, 06 Ralf Baechle (ralf@linux-mips.org)
1da177e4
LT
7 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
8 */
9#ifndef _ASM_SPINLOCK_H
10#define _ASM_SPINLOCK_H
11
0004a9df 12#include <asm/barrier.h>
1da177e4
LT
13#include <asm/war.h>
14
15/*
16 * Your basic SMP spinlocks, allowing only a single CPU anywhere
17 */
18
beb3ca82 19#define __raw_spin_is_locked(x) ((x)->lock != 0)
fb1c8f93
IM
20#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
21#define __raw_spin_unlock_wait(x) \
beb3ca82 22 do { cpu_relax(); } while ((x)->lock)
1da177e4
LT
23
24/*
25 * Simple spin lock operations. There are two variants, one clears IRQ's
26 * on the local processor, one does not.
27 *
28 * We make no fairness assumptions. They have a cost.
29 */
30
fb1c8f93 31static inline void __raw_spin_lock(raw_spinlock_t *lock)
1da177e4
LT
32{
33 unsigned int tmp;
34
35 if (R10000_LLSC_WAR) {
36 __asm__ __volatile__(
fb1c8f93 37 " .set noreorder # __raw_spin_lock \n"
1da177e4
LT
38 "1: ll %1, %2 \n"
39 " bnez %1, 1b \n"
40 " li %1, 1 \n"
41 " sc %1, %0 \n"
42 " beqzl %1, 1b \n"
43 " nop \n"
1da177e4
LT
44 " .set reorder \n"
45 : "=m" (lock->lock), "=&r" (tmp)
46 : "m" (lock->lock)
47 : "memory");
48 } else {
49 __asm__ __volatile__(
fb1c8f93 50 " .set noreorder # __raw_spin_lock \n"
1da177e4 51 "1: ll %1, %2 \n"
f65e4fa8 52 " bnez %1, 2f \n"
1da177e4
LT
53 " li %1, 1 \n"
54 " sc %1, %0 \n"
f65e4fa8 55 " beqz %1, 2f \n"
0004a9df 56 " nop \n"
f65e4fa8
RB
57 " .subsection 2 \n"
58 "2: ll %1, %2 \n"
59 " bnez %1, 2b \n"
60 " li %1, 1 \n"
61 " b 1b \n"
62 " nop \n"
63 " .previous \n"
1da177e4
LT
64 " .set reorder \n"
65 : "=m" (lock->lock), "=&r" (tmp)
66 : "m" (lock->lock)
67 : "memory");
68 }
0004a9df 69
17099b11 70 smp_llsc_mb();
1da177e4
LT
71}
72
fb1c8f93 73static inline void __raw_spin_unlock(raw_spinlock_t *lock)
1da177e4 74{
0004a9df
RB
75 smp_mb();
76
1da177e4 77 __asm__ __volatile__(
fb1c8f93 78 " .set noreorder # __raw_spin_unlock \n"
1da177e4
LT
79 " sw $0, %0 \n"
80 " .set\treorder \n"
81 : "=m" (lock->lock)
82 : "m" (lock->lock)
83 : "memory");
84}
85
fb1c8f93 86static inline unsigned int __raw_spin_trylock(raw_spinlock_t *lock)
1da177e4
LT
87{
88 unsigned int temp, res;
89
90 if (R10000_LLSC_WAR) {
91 __asm__ __volatile__(
fb1c8f93 92 " .set noreorder # __raw_spin_trylock \n"
1da177e4
LT
93 "1: ll %0, %3 \n"
94 " ori %2, %0, 1 \n"
95 " sc %2, %1 \n"
96 " beqzl %2, 1b \n"
97 " nop \n"
98 " andi %2, %0, 1 \n"
1da177e4
LT
99 " .set reorder"
100 : "=&r" (temp), "=m" (lock->lock), "=&r" (res)
101 : "m" (lock->lock)
102 : "memory");
103 } else {
104 __asm__ __volatile__(
fb1c8f93 105 " .set noreorder # __raw_spin_trylock \n"
1da177e4
LT
106 "1: ll %0, %3 \n"
107 " ori %2, %0, 1 \n"
108 " sc %2, %1 \n"
f65e4fa8 109 " beqz %2, 2f \n"
1da177e4 110 " andi %2, %0, 1 \n"
f65e4fa8
RB
111 " .subsection 2 \n"
112 "2: b 1b \n"
113 " nop \n"
114 " .previous \n"
1da177e4
LT
115 " .set reorder"
116 : "=&r" (temp), "=m" (lock->lock), "=&r" (res)
117 : "m" (lock->lock)
118 : "memory");
119 }
120
17099b11 121 smp_llsc_mb();
0004a9df 122
1da177e4
LT
123 return res == 0;
124}
125
126/*
127 * Read-write spinlocks, allowing multiple readers but only one writer.
128 *
129 * NOTE! it is quite common to have readers in interrupts but no interrupt
130 * writers. For those circumstances we can "mix" irq-safe locks - any writer
131 * needs to get a irq-safe write-lock, but readers can get non-irqsafe
132 * read-locks.
133 */
134
e3c48078
RB
135/*
136 * read_can_lock - would read_trylock() succeed?
137 * @lock: the rwlock in question.
138 */
139#define __raw_read_can_lock(rw) ((rw)->lock >= 0)
140
141/*
142 * write_can_lock - would write_trylock() succeed?
143 * @lock: the rwlock in question.
144 */
145#define __raw_write_can_lock(rw) (!(rw)->lock)
146
fb1c8f93 147static inline void __raw_read_lock(raw_rwlock_t *rw)
1da177e4
LT
148{
149 unsigned int tmp;
150
151 if (R10000_LLSC_WAR) {
152 __asm__ __volatile__(
fb1c8f93 153 " .set noreorder # __raw_read_lock \n"
1da177e4
LT
154 "1: ll %1, %2 \n"
155 " bltz %1, 1b \n"
156 " addu %1, 1 \n"
157 " sc %1, %0 \n"
158 " beqzl %1, 1b \n"
159 " nop \n"
1da177e4
LT
160 " .set reorder \n"
161 : "=m" (rw->lock), "=&r" (tmp)
162 : "m" (rw->lock)
163 : "memory");
164 } else {
165 __asm__ __volatile__(
fb1c8f93 166 " .set noreorder # __raw_read_lock \n"
1da177e4 167 "1: ll %1, %2 \n"
f65e4fa8 168 " bltz %1, 2f \n"
1da177e4
LT
169 " addu %1, 1 \n"
170 " sc %1, %0 \n"
171 " beqz %1, 1b \n"
0004a9df 172 " nop \n"
f65e4fa8
RB
173 " .subsection 2 \n"
174 "2: ll %1, %2 \n"
175 " bltz %1, 2b \n"
176 " addu %1, 1 \n"
177 " b 1b \n"
178 " nop \n"
179 " .previous \n"
1da177e4
LT
180 " .set reorder \n"
181 : "=m" (rw->lock), "=&r" (tmp)
182 : "m" (rw->lock)
183 : "memory");
184 }
0004a9df 185
17099b11 186 smp_llsc_mb();
1da177e4
LT
187}
188
189/* Note the use of sub, not subu which will make the kernel die with an
190 overflow exception if we ever try to unlock an rwlock that is already
191 unlocked or is being held by a writer. */
fb1c8f93 192static inline void __raw_read_unlock(raw_rwlock_t *rw)
1da177e4
LT
193{
194 unsigned int tmp;
195
17099b11 196 smp_llsc_mb();
0004a9df 197
1da177e4
LT
198 if (R10000_LLSC_WAR) {
199 __asm__ __volatile__(
fb1c8f93 200 "1: ll %1, %2 # __raw_read_unlock \n"
1da177e4
LT
201 " sub %1, 1 \n"
202 " sc %1, %0 \n"
203 " beqzl %1, 1b \n"
1da177e4
LT
204 : "=m" (rw->lock), "=&r" (tmp)
205 : "m" (rw->lock)
206 : "memory");
207 } else {
208 __asm__ __volatile__(
fb1c8f93 209 " .set noreorder # __raw_read_unlock \n"
1da177e4
LT
210 "1: ll %1, %2 \n"
211 " sub %1, 1 \n"
212 " sc %1, %0 \n"
f65e4fa8
RB
213 " beqz %1, 2f \n"
214 " nop \n"
215 " .subsection 2 \n"
216 "2: b 1b \n"
0004a9df 217 " nop \n"
f65e4fa8 218 " .previous \n"
1da177e4
LT
219 " .set reorder \n"
220 : "=m" (rw->lock), "=&r" (tmp)
221 : "m" (rw->lock)
222 : "memory");
223 }
224}
225
fb1c8f93 226static inline void __raw_write_lock(raw_rwlock_t *rw)
1da177e4
LT
227{
228 unsigned int tmp;
229
230 if (R10000_LLSC_WAR) {
231 __asm__ __volatile__(
fb1c8f93 232 " .set noreorder # __raw_write_lock \n"
1da177e4
LT
233 "1: ll %1, %2 \n"
234 " bnez %1, 1b \n"
235 " lui %1, 0x8000 \n"
236 " sc %1, %0 \n"
237 " beqzl %1, 1b \n"
0004a9df 238 " nop \n"
1da177e4
LT
239 " .set reorder \n"
240 : "=m" (rw->lock), "=&r" (tmp)
241 : "m" (rw->lock)
242 : "memory");
243 } else {
244 __asm__ __volatile__(
fb1c8f93 245 " .set noreorder # __raw_write_lock \n"
1da177e4 246 "1: ll %1, %2 \n"
f65e4fa8 247 " bnez %1, 2f \n"
1da177e4
LT
248 " lui %1, 0x8000 \n"
249 " sc %1, %0 \n"
f65e4fa8
RB
250 " beqz %1, 2f \n"
251 " nop \n"
252 " .subsection 2 \n"
253 "2: ll %1, %2 \n"
254 " bnez %1, 2b \n"
255 " lui %1, 0x8000 \n"
256 " b 1b \n"
0004a9df 257 " nop \n"
f65e4fa8 258 " .previous \n"
1da177e4
LT
259 " .set reorder \n"
260 : "=m" (rw->lock), "=&r" (tmp)
261 : "m" (rw->lock)
262 : "memory");
263 }
0004a9df 264
17099b11 265 smp_llsc_mb();
1da177e4
LT
266}
267
fb1c8f93 268static inline void __raw_write_unlock(raw_rwlock_t *rw)
1da177e4 269{
0004a9df
RB
270 smp_mb();
271
1da177e4 272 __asm__ __volatile__(
0004a9df 273 " # __raw_write_unlock \n"
1da177e4
LT
274 " sw $0, %0 \n"
275 : "=m" (rw->lock)
276 : "m" (rw->lock)
277 : "memory");
278}
279
65316fd1
RB
280static inline int __raw_read_trylock(raw_rwlock_t *rw)
281{
282 unsigned int tmp;
283 int ret;
284
285 if (R10000_LLSC_WAR) {
286 __asm__ __volatile__(
287 " .set noreorder # __raw_read_trylock \n"
288 " li %2, 0 \n"
289 "1: ll %1, %3 \n"
d52c2d5a 290 " bltz %1, 2f \n"
65316fd1
RB
291 " addu %1, 1 \n"
292 " sc %1, %0 \n"
65316fd1 293 " .set reorder \n"
0004a9df
RB
294 " beqzl %1, 1b \n"
295 " nop \n"
17099b11 296 __WEAK_LLSC_MB
65316fd1
RB
297 " li %2, 1 \n"
298 "2: \n"
299 : "=m" (rw->lock), "=&r" (tmp), "=&r" (ret)
300 : "m" (rw->lock)
301 : "memory");
302 } else {
303 __asm__ __volatile__(
304 " .set noreorder # __raw_read_trylock \n"
305 " li %2, 0 \n"
306 "1: ll %1, %3 \n"
d52c2d5a 307 " bltz %1, 2f \n"
65316fd1
RB
308 " addu %1, 1 \n"
309 " sc %1, %0 \n"
310 " beqz %1, 1b \n"
0004a9df 311 " nop \n"
65316fd1 312 " .set reorder \n"
17099b11 313 __WEAK_LLSC_MB
65316fd1
RB
314 " li %2, 1 \n"
315 "2: \n"
316 : "=m" (rw->lock), "=&r" (tmp), "=&r" (ret)
317 : "m" (rw->lock)
318 : "memory");
319 }
320
321 return ret;
322}
1da177e4 323
fb1c8f93 324static inline int __raw_write_trylock(raw_rwlock_t *rw)
1da177e4
LT
325{
326 unsigned int tmp;
327 int ret;
328
329 if (R10000_LLSC_WAR) {
330 __asm__ __volatile__(
fb1c8f93 331 " .set noreorder # __raw_write_trylock \n"
1da177e4
LT
332 " li %2, 0 \n"
333 "1: ll %1, %3 \n"
334 " bnez %1, 2f \n"
335 " lui %1, 0x8000 \n"
336 " sc %1, %0 \n"
337 " beqzl %1, 1b \n"
0004a9df 338 " nop \n"
17099b11 339 __WEAK_LLSC_MB
1da177e4
LT
340 " li %2, 1 \n"
341 " .set reorder \n"
342 "2: \n"
343 : "=m" (rw->lock), "=&r" (tmp), "=&r" (ret)
344 : "m" (rw->lock)
345 : "memory");
346 } else {
347 __asm__ __volatile__(
fb1c8f93 348 " .set noreorder # __raw_write_trylock \n"
1da177e4
LT
349 " li %2, 0 \n"
350 "1: ll %1, %3 \n"
351 " bnez %1, 2f \n"
352 " lui %1, 0x8000 \n"
353 " sc %1, %0 \n"
f65e4fa8
RB
354 " beqz %1, 3f \n"
355 " li %2, 1 \n"
356 "2: \n"
17099b11 357 __WEAK_LLSC_MB
f65e4fa8
RB
358 " .subsection 2 \n"
359 "3: b 1b \n"
360 " li %2, 0 \n"
361 " .previous \n"
1da177e4 362 " .set reorder \n"
1da177e4
LT
363 : "=m" (rw->lock), "=&r" (tmp), "=&r" (ret)
364 : "m" (rw->lock)
365 : "memory");
366 }
367
368 return ret;
369}
370
65316fd1 371
ef6edc97
MS
372#define _raw_spin_relax(lock) cpu_relax()
373#define _raw_read_relax(lock) cpu_relax()
374#define _raw_write_relax(lock) cpu_relax()
375
1da177e4 376#endif /* _ASM_SPINLOCK_H */