[PATCH] Subject: PATCH: fix numa caused compile warnings
[linux-2.6-block.git] / kernel / spinlock.c
CommitLineData
1da177e4
LT
1/*
2 * Copyright (2004) Linus Torvalds
3 *
4 * Author: Zwane Mwaikambo <zwane@fsmlabs.com>
5 *
6 * Copyright (2004) Ingo Molnar
7 */
8
9#include <linux/config.h>
10#include <linux/linkage.h>
11#include <linux/preempt.h>
12#include <linux/spinlock.h>
13#include <linux/interrupt.h>
14#include <linux/module.h>
15
16/*
17 * Generic declaration of the raw read_trylock() function,
18 * architectures are supposed to optimize this:
19 */
20int __lockfunc generic_raw_read_trylock(rwlock_t *lock)
21{
22 _raw_read_lock(lock);
23 return 1;
24}
25EXPORT_SYMBOL(generic_raw_read_trylock);
26
27int __lockfunc _spin_trylock(spinlock_t *lock)
28{
29 preempt_disable();
30 if (_raw_spin_trylock(lock))
31 return 1;
32
33 preempt_enable();
34 return 0;
35}
36EXPORT_SYMBOL(_spin_trylock);
37
38int __lockfunc _read_trylock(rwlock_t *lock)
39{
40 preempt_disable();
41 if (_raw_read_trylock(lock))
42 return 1;
43
44 preempt_enable();
45 return 0;
46}
47EXPORT_SYMBOL(_read_trylock);
48
49int __lockfunc _write_trylock(rwlock_t *lock)
50{
51 preempt_disable();
52 if (_raw_write_trylock(lock))
53 return 1;
54
55 preempt_enable();
56 return 0;
57}
58EXPORT_SYMBOL(_write_trylock);
59
60#ifndef CONFIG_PREEMPT
61
62void __lockfunc _read_lock(rwlock_t *lock)
63{
64 preempt_disable();
65 _raw_read_lock(lock);
66}
67EXPORT_SYMBOL(_read_lock);
68
69unsigned long __lockfunc _spin_lock_irqsave(spinlock_t *lock)
70{
71 unsigned long flags;
72
73 local_irq_save(flags);
74 preempt_disable();
75 _raw_spin_lock_flags(lock, flags);
76 return flags;
77}
78EXPORT_SYMBOL(_spin_lock_irqsave);
79
80void __lockfunc _spin_lock_irq(spinlock_t *lock)
81{
82 local_irq_disable();
83 preempt_disable();
84 _raw_spin_lock(lock);
85}
86EXPORT_SYMBOL(_spin_lock_irq);
87
88void __lockfunc _spin_lock_bh(spinlock_t *lock)
89{
90 local_bh_disable();
91 preempt_disable();
92 _raw_spin_lock(lock);
93}
94EXPORT_SYMBOL(_spin_lock_bh);
95
96unsigned long __lockfunc _read_lock_irqsave(rwlock_t *lock)
97{
98 unsigned long flags;
99
100 local_irq_save(flags);
101 preempt_disable();
102 _raw_read_lock(lock);
103 return flags;
104}
105EXPORT_SYMBOL(_read_lock_irqsave);
106
107void __lockfunc _read_lock_irq(rwlock_t *lock)
108{
109 local_irq_disable();
110 preempt_disable();
111 _raw_read_lock(lock);
112}
113EXPORT_SYMBOL(_read_lock_irq);
114
115void __lockfunc _read_lock_bh(rwlock_t *lock)
116{
117 local_bh_disable();
118 preempt_disable();
119 _raw_read_lock(lock);
120}
121EXPORT_SYMBOL(_read_lock_bh);
122
123unsigned long __lockfunc _write_lock_irqsave(rwlock_t *lock)
124{
125 unsigned long flags;
126
127 local_irq_save(flags);
128 preempt_disable();
129 _raw_write_lock(lock);
130 return flags;
131}
132EXPORT_SYMBOL(_write_lock_irqsave);
133
134void __lockfunc _write_lock_irq(rwlock_t *lock)
135{
136 local_irq_disable();
137 preempt_disable();
138 _raw_write_lock(lock);
139}
140EXPORT_SYMBOL(_write_lock_irq);
141
142void __lockfunc _write_lock_bh(rwlock_t *lock)
143{
144 local_bh_disable();
145 preempt_disable();
146 _raw_write_lock(lock);
147}
148EXPORT_SYMBOL(_write_lock_bh);
149
150void __lockfunc _spin_lock(spinlock_t *lock)
151{
152 preempt_disable();
153 _raw_spin_lock(lock);
154}
155
156EXPORT_SYMBOL(_spin_lock);
157
158void __lockfunc _write_lock(rwlock_t *lock)
159{
160 preempt_disable();
161 _raw_write_lock(lock);
162}
163
164EXPORT_SYMBOL(_write_lock);
165
166#else /* CONFIG_PREEMPT: */
167
168/*
169 * This could be a long-held lock. We both prepare to spin for a long
170 * time (making _this_ CPU preemptable if possible), and we also signal
171 * towards that other CPU that it should break the lock ASAP.
172 *
173 * (We do this in a function because inlining it would be excessive.)
174 */
175
176#define BUILD_LOCK_OPS(op, locktype) \
177void __lockfunc _##op##_lock(locktype##_t *lock) \
178{ \
179 preempt_disable(); \
180 for (;;) { \
181 if (likely(_raw_##op##_trylock(lock))) \
182 break; \
183 preempt_enable(); \
184 if (!(lock)->break_lock) \
185 (lock)->break_lock = 1; \
186 while (!op##_can_lock(lock) && (lock)->break_lock) \
187 cpu_relax(); \
188 preempt_disable(); \
189 } \
190 (lock)->break_lock = 0; \
191} \
192 \
193EXPORT_SYMBOL(_##op##_lock); \
194 \
195unsigned long __lockfunc _##op##_lock_irqsave(locktype##_t *lock) \
196{ \
197 unsigned long flags; \
198 \
199 preempt_disable(); \
200 for (;;) { \
201 local_irq_save(flags); \
202 if (likely(_raw_##op##_trylock(lock))) \
203 break; \
204 local_irq_restore(flags); \
205 \
206 preempt_enable(); \
207 if (!(lock)->break_lock) \
208 (lock)->break_lock = 1; \
209 while (!op##_can_lock(lock) && (lock)->break_lock) \
210 cpu_relax(); \
211 preempt_disable(); \
212 } \
213 (lock)->break_lock = 0; \
214 return flags; \
215} \
216 \
217EXPORT_SYMBOL(_##op##_lock_irqsave); \
218 \
219void __lockfunc _##op##_lock_irq(locktype##_t *lock) \
220{ \
221 _##op##_lock_irqsave(lock); \
222} \
223 \
224EXPORT_SYMBOL(_##op##_lock_irq); \
225 \
226void __lockfunc _##op##_lock_bh(locktype##_t *lock) \
227{ \
228 unsigned long flags; \
229 \
230 /* */ \
231 /* Careful: we must exclude softirqs too, hence the */ \
232 /* irq-disabling. We use the generic preemption-aware */ \
233 /* function: */ \
234 /**/ \
235 flags = _##op##_lock_irqsave(lock); \
236 local_bh_disable(); \
237 local_irq_restore(flags); \
238} \
239 \
240EXPORT_SYMBOL(_##op##_lock_bh)
241
242/*
243 * Build preemption-friendly versions of the following
244 * lock-spinning functions:
245 *
246 * _[spin|read|write]_lock()
247 * _[spin|read|write]_lock_irq()
248 * _[spin|read|write]_lock_irqsave()
249 * _[spin|read|write]_lock_bh()
250 */
251BUILD_LOCK_OPS(spin, spinlock);
252BUILD_LOCK_OPS(read, rwlock);
253BUILD_LOCK_OPS(write, rwlock);
254
255#endif /* CONFIG_PREEMPT */
256
257void __lockfunc _spin_unlock(spinlock_t *lock)
258{
259 _raw_spin_unlock(lock);
260 preempt_enable();
261}
262EXPORT_SYMBOL(_spin_unlock);
263
264void __lockfunc _write_unlock(rwlock_t *lock)
265{
266 _raw_write_unlock(lock);
267 preempt_enable();
268}
269EXPORT_SYMBOL(_write_unlock);
270
271void __lockfunc _read_unlock(rwlock_t *lock)
272{
273 _raw_read_unlock(lock);
274 preempt_enable();
275}
276EXPORT_SYMBOL(_read_unlock);
277
278void __lockfunc _spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
279{
280 _raw_spin_unlock(lock);
281 local_irq_restore(flags);
282 preempt_enable();
283}
284EXPORT_SYMBOL(_spin_unlock_irqrestore);
285
286void __lockfunc _spin_unlock_irq(spinlock_t *lock)
287{
288 _raw_spin_unlock(lock);
289 local_irq_enable();
290 preempt_enable();
291}
292EXPORT_SYMBOL(_spin_unlock_irq);
293
294void __lockfunc _spin_unlock_bh(spinlock_t *lock)
295{
296 _raw_spin_unlock(lock);
10f02d1c 297 preempt_enable_no_resched();
1da177e4
LT
298 local_bh_enable();
299}
300EXPORT_SYMBOL(_spin_unlock_bh);
301
302void __lockfunc _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
303{
304 _raw_read_unlock(lock);
305 local_irq_restore(flags);
306 preempt_enable();
307}
308EXPORT_SYMBOL(_read_unlock_irqrestore);
309
310void __lockfunc _read_unlock_irq(rwlock_t *lock)
311{
312 _raw_read_unlock(lock);
313 local_irq_enable();
314 preempt_enable();
315}
316EXPORT_SYMBOL(_read_unlock_irq);
317
318void __lockfunc _read_unlock_bh(rwlock_t *lock)
319{
320 _raw_read_unlock(lock);
10f02d1c 321 preempt_enable_no_resched();
1da177e4
LT
322 local_bh_enable();
323}
324EXPORT_SYMBOL(_read_unlock_bh);
325
326void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
327{
328 _raw_write_unlock(lock);
329 local_irq_restore(flags);
330 preempt_enable();
331}
332EXPORT_SYMBOL(_write_unlock_irqrestore);
333
334void __lockfunc _write_unlock_irq(rwlock_t *lock)
335{
336 _raw_write_unlock(lock);
337 local_irq_enable();
338 preempt_enable();
339}
340EXPORT_SYMBOL(_write_unlock_irq);
341
342void __lockfunc _write_unlock_bh(rwlock_t *lock)
343{
344 _raw_write_unlock(lock);
10f02d1c 345 preempt_enable_no_resched();
1da177e4
LT
346 local_bh_enable();
347}
348EXPORT_SYMBOL(_write_unlock_bh);
349
350int __lockfunc _spin_trylock_bh(spinlock_t *lock)
351{
352 local_bh_disable();
353 preempt_disable();
354 if (_raw_spin_trylock(lock))
355 return 1;
356
10f02d1c 357 preempt_enable_no_resched();
1da177e4
LT
358 local_bh_enable();
359 return 0;
360}
361EXPORT_SYMBOL(_spin_trylock_bh);
362
363int in_lock_functions(unsigned long addr)
364{
365 /* Linker adds these: start and end of __lockfunc functions */
366 extern char __lock_text_start[], __lock_text_end[];
367
368 return addr >= (unsigned long)__lock_text_start
369 && addr < (unsigned long)__lock_text_end;
370}
371EXPORT_SYMBOL(in_lock_functions);