[PATCH] i386/x86-64: Don't randomize stack top when no randomization personality...
[linux-2.6-block.git] / kernel / spinlock.c
CommitLineData
1da177e4
LT
1/*
2 * Copyright (2004) Linus Torvalds
3 *
4 * Author: Zwane Mwaikambo <zwane@fsmlabs.com>
5 *
fb1c8f93
IM
6 * Copyright (2004, 2005) Ingo Molnar
7 *
8 * This file contains the spinlock/rwlock implementations for the
9 * SMP and the DEBUG_SPINLOCK cases. (UP-nondebug inlines them)
1da177e4
LT
10 */
11
1da177e4
LT
12#include <linux/linkage.h>
13#include <linux/preempt.h>
14#include <linux/spinlock.h>
15#include <linux/interrupt.h>
8a25d5de 16#include <linux/debug_locks.h>
1da177e4
LT
17#include <linux/module.h>
18
19/*
20 * Generic declaration of the raw read_trylock() function,
21 * architectures are supposed to optimize this:
22 */
fb1c8f93 23int __lockfunc generic__raw_read_trylock(raw_rwlock_t *lock)
1da177e4 24{
fb1c8f93 25 __raw_read_lock(lock);
1da177e4
LT
26 return 1;
27}
fb1c8f93 28EXPORT_SYMBOL(generic__raw_read_trylock);
1da177e4
LT
29
30int __lockfunc _spin_trylock(spinlock_t *lock)
31{
32 preempt_disable();
8a25d5de
IM
33 if (_raw_spin_trylock(lock)) {
34 spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
1da177e4 35 return 1;
8a25d5de 36 }
1da177e4
LT
37
38 preempt_enable();
39 return 0;
40}
41EXPORT_SYMBOL(_spin_trylock);
42
43int __lockfunc _read_trylock(rwlock_t *lock)
44{
45 preempt_disable();
8a25d5de
IM
46 if (_raw_read_trylock(lock)) {
47 rwlock_acquire_read(&lock->dep_map, 0, 1, _RET_IP_);
1da177e4 48 return 1;
8a25d5de 49 }
1da177e4
LT
50
51 preempt_enable();
52 return 0;
53}
54EXPORT_SYMBOL(_read_trylock);
55
56int __lockfunc _write_trylock(rwlock_t *lock)
57{
58 preempt_disable();
8a25d5de
IM
59 if (_raw_write_trylock(lock)) {
60 rwlock_acquire(&lock->dep_map, 0, 1, _RET_IP_);
1da177e4 61 return 1;
8a25d5de 62 }
1da177e4
LT
63
64 preempt_enable();
65 return 0;
66}
67EXPORT_SYMBOL(_write_trylock);
68
8a25d5de
IM
69/*
70 * If lockdep is enabled then we use the non-preemption spin-ops
71 * even on CONFIG_PREEMPT, because lockdep assumes that interrupts are
72 * not re-enabled during lock-acquire (which the preempt-spin-ops do):
73 */
74#if !defined(CONFIG_PREEMPT) || !defined(CONFIG_SMP) || \
fc47e7b5 75 defined(CONFIG_DEBUG_LOCK_ALLOC)
1da177e4
LT
76
77void __lockfunc _read_lock(rwlock_t *lock)
78{
79 preempt_disable();
8a25d5de 80 rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
1da177e4
LT
81 _raw_read_lock(lock);
82}
83EXPORT_SYMBOL(_read_lock);
84
85unsigned long __lockfunc _spin_lock_irqsave(spinlock_t *lock)
86{
87 unsigned long flags;
88
89 local_irq_save(flags);
90 preempt_disable();
8a25d5de
IM
91 spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
92 /*
93 * On lockdep we dont want the hand-coded irq-enable of
94 * _raw_spin_lock_flags() code, because lockdep assumes
95 * that interrupts are not re-enabled during lock-acquire:
96 */
97#ifdef CONFIG_PROVE_LOCKING
98 _raw_spin_lock(lock);
99#else
fb1c8f93 100 _raw_spin_lock_flags(lock, &flags);
8a25d5de 101#endif
1da177e4
LT
102 return flags;
103}
104EXPORT_SYMBOL(_spin_lock_irqsave);
105
106void __lockfunc _spin_lock_irq(spinlock_t *lock)
107{
108 local_irq_disable();
109 preempt_disable();
8a25d5de 110 spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
1da177e4
LT
111 _raw_spin_lock(lock);
112}
113EXPORT_SYMBOL(_spin_lock_irq);
114
115void __lockfunc _spin_lock_bh(spinlock_t *lock)
116{
117 local_bh_disable();
118 preempt_disable();
8a25d5de 119 spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
1da177e4
LT
120 _raw_spin_lock(lock);
121}
122EXPORT_SYMBOL(_spin_lock_bh);
123
124unsigned long __lockfunc _read_lock_irqsave(rwlock_t *lock)
125{
126 unsigned long flags;
127
128 local_irq_save(flags);
129 preempt_disable();
8a25d5de 130 rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
1da177e4
LT
131 _raw_read_lock(lock);
132 return flags;
133}
134EXPORT_SYMBOL(_read_lock_irqsave);
135
136void __lockfunc _read_lock_irq(rwlock_t *lock)
137{
138 local_irq_disable();
139 preempt_disable();
8a25d5de 140 rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
1da177e4
LT
141 _raw_read_lock(lock);
142}
143EXPORT_SYMBOL(_read_lock_irq);
144
145void __lockfunc _read_lock_bh(rwlock_t *lock)
146{
147 local_bh_disable();
148 preempt_disable();
8a25d5de 149 rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
1da177e4
LT
150 _raw_read_lock(lock);
151}
152EXPORT_SYMBOL(_read_lock_bh);
153
154unsigned long __lockfunc _write_lock_irqsave(rwlock_t *lock)
155{
156 unsigned long flags;
157
158 local_irq_save(flags);
159 preempt_disable();
8a25d5de 160 rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
1da177e4
LT
161 _raw_write_lock(lock);
162 return flags;
163}
164EXPORT_SYMBOL(_write_lock_irqsave);
165
166void __lockfunc _write_lock_irq(rwlock_t *lock)
167{
168 local_irq_disable();
169 preempt_disable();
8a25d5de 170 rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
1da177e4
LT
171 _raw_write_lock(lock);
172}
173EXPORT_SYMBOL(_write_lock_irq);
174
175void __lockfunc _write_lock_bh(rwlock_t *lock)
176{
177 local_bh_disable();
178 preempt_disable();
8a25d5de 179 rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
1da177e4
LT
180 _raw_write_lock(lock);
181}
182EXPORT_SYMBOL(_write_lock_bh);
183
184void __lockfunc _spin_lock(spinlock_t *lock)
185{
186 preempt_disable();
8a25d5de 187 spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
1da177e4
LT
188 _raw_spin_lock(lock);
189}
190
191EXPORT_SYMBOL(_spin_lock);
192
193void __lockfunc _write_lock(rwlock_t *lock)
194{
195 preempt_disable();
8a25d5de 196 rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
1da177e4
LT
197 _raw_write_lock(lock);
198}
199
200EXPORT_SYMBOL(_write_lock);
201
202#else /* CONFIG_PREEMPT: */
203
204/*
205 * This could be a long-held lock. We both prepare to spin for a long
206 * time (making _this_ CPU preemptable if possible), and we also signal
207 * towards that other CPU that it should break the lock ASAP.
208 *
209 * (We do this in a function because inlining it would be excessive.)
210 */
211
212#define BUILD_LOCK_OPS(op, locktype) \
213void __lockfunc _##op##_lock(locktype##_t *lock) \
214{ \
1da177e4 215 for (;;) { \
ee25e96f 216 preempt_disable(); \
1da177e4
LT
217 if (likely(_raw_##op##_trylock(lock))) \
218 break; \
219 preempt_enable(); \
ee25e96f 220 \
1da177e4
LT
221 if (!(lock)->break_lock) \
222 (lock)->break_lock = 1; \
223 while (!op##_can_lock(lock) && (lock)->break_lock) \
224 cpu_relax(); \
1da177e4
LT
225 } \
226 (lock)->break_lock = 0; \
227} \
228 \
229EXPORT_SYMBOL(_##op##_lock); \
230 \
231unsigned long __lockfunc _##op##_lock_irqsave(locktype##_t *lock) \
232{ \
233 unsigned long flags; \
234 \
1da177e4 235 for (;;) { \
ee25e96f 236 preempt_disable(); \
1da177e4
LT
237 local_irq_save(flags); \
238 if (likely(_raw_##op##_trylock(lock))) \
239 break; \
240 local_irq_restore(flags); \
1da177e4 241 preempt_enable(); \
ee25e96f 242 \
1da177e4
LT
243 if (!(lock)->break_lock) \
244 (lock)->break_lock = 1; \
245 while (!op##_can_lock(lock) && (lock)->break_lock) \
246 cpu_relax(); \
1da177e4
LT
247 } \
248 (lock)->break_lock = 0; \
249 return flags; \
250} \
251 \
252EXPORT_SYMBOL(_##op##_lock_irqsave); \
253 \
254void __lockfunc _##op##_lock_irq(locktype##_t *lock) \
255{ \
256 _##op##_lock_irqsave(lock); \
257} \
258 \
259EXPORT_SYMBOL(_##op##_lock_irq); \
260 \
261void __lockfunc _##op##_lock_bh(locktype##_t *lock) \
262{ \
263 unsigned long flags; \
264 \
265 /* */ \
266 /* Careful: we must exclude softirqs too, hence the */ \
267 /* irq-disabling. We use the generic preemption-aware */ \
268 /* function: */ \
269 /**/ \
270 flags = _##op##_lock_irqsave(lock); \
271 local_bh_disable(); \
272 local_irq_restore(flags); \
273} \
274 \
275EXPORT_SYMBOL(_##op##_lock_bh)
276
277/*
278 * Build preemption-friendly versions of the following
279 * lock-spinning functions:
280 *
281 * _[spin|read|write]_lock()
282 * _[spin|read|write]_lock_irq()
283 * _[spin|read|write]_lock_irqsave()
284 * _[spin|read|write]_lock_bh()
285 */
286BUILD_LOCK_OPS(spin, spinlock);
287BUILD_LOCK_OPS(read, rwlock);
288BUILD_LOCK_OPS(write, rwlock);
289
290#endif /* CONFIG_PREEMPT */
291
8a25d5de
IM
292#ifdef CONFIG_DEBUG_LOCK_ALLOC
293
294void __lockfunc _spin_lock_nested(spinlock_t *lock, int subclass)
295{
296 preempt_disable();
297 spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
298 _raw_spin_lock(lock);
299}
300
301EXPORT_SYMBOL(_spin_lock_nested);
302
303#endif
304
1da177e4
LT
305void __lockfunc _spin_unlock(spinlock_t *lock)
306{
8a25d5de 307 spin_release(&lock->dep_map, 1, _RET_IP_);
1da177e4
LT
308 _raw_spin_unlock(lock);
309 preempt_enable();
310}
311EXPORT_SYMBOL(_spin_unlock);
312
313void __lockfunc _write_unlock(rwlock_t *lock)
314{
8a25d5de 315 rwlock_release(&lock->dep_map, 1, _RET_IP_);
1da177e4
LT
316 _raw_write_unlock(lock);
317 preempt_enable();
318}
319EXPORT_SYMBOL(_write_unlock);
320
321void __lockfunc _read_unlock(rwlock_t *lock)
322{
8a25d5de 323 rwlock_release(&lock->dep_map, 1, _RET_IP_);
1da177e4
LT
324 _raw_read_unlock(lock);
325 preempt_enable();
326}
327EXPORT_SYMBOL(_read_unlock);
328
329void __lockfunc _spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
330{
8a25d5de 331 spin_release(&lock->dep_map, 1, _RET_IP_);
1da177e4
LT
332 _raw_spin_unlock(lock);
333 local_irq_restore(flags);
334 preempt_enable();
335}
336EXPORT_SYMBOL(_spin_unlock_irqrestore);
337
338void __lockfunc _spin_unlock_irq(spinlock_t *lock)
339{
8a25d5de 340 spin_release(&lock->dep_map, 1, _RET_IP_);
1da177e4
LT
341 _raw_spin_unlock(lock);
342 local_irq_enable();
343 preempt_enable();
344}
345EXPORT_SYMBOL(_spin_unlock_irq);
346
347void __lockfunc _spin_unlock_bh(spinlock_t *lock)
348{
8a25d5de 349 spin_release(&lock->dep_map, 1, _RET_IP_);
1da177e4 350 _raw_spin_unlock(lock);
10f02d1c 351 preempt_enable_no_resched();
8a25d5de 352 local_bh_enable_ip((unsigned long)__builtin_return_address(0));
1da177e4
LT
353}
354EXPORT_SYMBOL(_spin_unlock_bh);
355
356void __lockfunc _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
357{
8a25d5de 358 rwlock_release(&lock->dep_map, 1, _RET_IP_);
1da177e4
LT
359 _raw_read_unlock(lock);
360 local_irq_restore(flags);
361 preempt_enable();
362}
363EXPORT_SYMBOL(_read_unlock_irqrestore);
364
365void __lockfunc _read_unlock_irq(rwlock_t *lock)
366{
8a25d5de 367 rwlock_release(&lock->dep_map, 1, _RET_IP_);
1da177e4
LT
368 _raw_read_unlock(lock);
369 local_irq_enable();
370 preempt_enable();
371}
372EXPORT_SYMBOL(_read_unlock_irq);
373
374void __lockfunc _read_unlock_bh(rwlock_t *lock)
375{
8a25d5de 376 rwlock_release(&lock->dep_map, 1, _RET_IP_);
1da177e4 377 _raw_read_unlock(lock);
10f02d1c 378 preempt_enable_no_resched();
8a25d5de 379 local_bh_enable_ip((unsigned long)__builtin_return_address(0));
1da177e4
LT
380}
381EXPORT_SYMBOL(_read_unlock_bh);
382
383void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
384{
8a25d5de 385 rwlock_release(&lock->dep_map, 1, _RET_IP_);
1da177e4
LT
386 _raw_write_unlock(lock);
387 local_irq_restore(flags);
388 preempt_enable();
389}
390EXPORT_SYMBOL(_write_unlock_irqrestore);
391
392void __lockfunc _write_unlock_irq(rwlock_t *lock)
393{
8a25d5de 394 rwlock_release(&lock->dep_map, 1, _RET_IP_);
1da177e4
LT
395 _raw_write_unlock(lock);
396 local_irq_enable();
397 preempt_enable();
398}
399EXPORT_SYMBOL(_write_unlock_irq);
400
401void __lockfunc _write_unlock_bh(rwlock_t *lock)
402{
8a25d5de 403 rwlock_release(&lock->dep_map, 1, _RET_IP_);
1da177e4 404 _raw_write_unlock(lock);
10f02d1c 405 preempt_enable_no_resched();
8a25d5de 406 local_bh_enable_ip((unsigned long)__builtin_return_address(0));
1da177e4
LT
407}
408EXPORT_SYMBOL(_write_unlock_bh);
409
410int __lockfunc _spin_trylock_bh(spinlock_t *lock)
411{
412 local_bh_disable();
413 preempt_disable();
8a25d5de
IM
414 if (_raw_spin_trylock(lock)) {
415 spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
1da177e4 416 return 1;
8a25d5de 417 }
1da177e4 418
10f02d1c 419 preempt_enable_no_resched();
8a25d5de 420 local_bh_enable_ip((unsigned long)__builtin_return_address(0));
1da177e4
LT
421 return 0;
422}
423EXPORT_SYMBOL(_spin_trylock_bh);
424
425int in_lock_functions(unsigned long addr)
426{
427 /* Linker adds these: start and end of __lockfunc functions */
428 extern char __lock_text_start[], __lock_text_end[];
429
430 return addr >= (unsigned long)__lock_text_start
431 && addr < (unsigned long)__lock_text_end;
432}
433EXPORT_SYMBOL(in_lock_functions);