alpha: Fix fallout from locking changes
[linux-block.git] / include / linux / spinlock_api_smp.h
CommitLineData
fb1c8f93
IM
1#ifndef __LINUX_SPINLOCK_API_SMP_H
2#define __LINUX_SPINLOCK_API_SMP_H
3
4#ifndef __LINUX_SPINLOCK_H
5# error "please don't include this file directly"
6#endif
7
8/*
9 * include/linux/spinlock_api_smp.h
10 *
11 * spinlock API declarations on SMP (and debug)
12 * (implemented in kernel/spinlock.c)
13 *
14 * portions Copyright 2005, Red Hat, Inc., Ingo Molnar
15 * Released under the General Public License (GPL).
16 */
17
18int in_lock_functions(unsigned long addr);
19
c2f21ce2 20#define assert_raw_spin_locked(x) BUG_ON(!raw_spin_is_locked(x))
fb1c8f93 21
c2f21ce2
TG
22void __lockfunc _spin_lock(raw_spinlock_t *lock) __acquires(lock);
23void __lockfunc _spin_lock_nested(raw_spinlock_t *lock, int subclass)
9f50b93f 24 __acquires(lock);
c2f21ce2
TG
25void __lockfunc
26_spin_lock_nest_lock(raw_spinlock_t *lock, struct lockdep_map *map)
b7d39aff 27 __acquires(lock);
c2f21ce2
TG
28void __lockfunc _spin_lock_bh(raw_spinlock_t *lock) __acquires(lock);
29void __lockfunc _spin_lock_irq(raw_spinlock_t *lock) __acquires(lock);
6b6b4792 30
c2f21ce2 31unsigned long __lockfunc _spin_lock_irqsave(raw_spinlock_t *lock)
9f50b93f 32 __acquires(lock);
c2f21ce2
TG
33unsigned long __lockfunc
34_spin_lock_irqsave_nested(raw_spinlock_t *lock, int subclass)
cfd3ef23 35 __acquires(lock);
c2f21ce2
TG
36int __lockfunc _spin_trylock(raw_spinlock_t *lock);
37int __lockfunc _spin_trylock_bh(raw_spinlock_t *lock);
38void __lockfunc _spin_unlock(raw_spinlock_t *lock) __releases(lock);
39void __lockfunc _spin_unlock_bh(raw_spinlock_t *lock) __releases(lock);
40void __lockfunc _spin_unlock_irq(raw_spinlock_t *lock) __releases(lock);
41void __lockfunc
42_spin_unlock_irqrestore(raw_spinlock_t *lock, unsigned long flags)
9f50b93f 43 __releases(lock);
fb1c8f93 44
6beb0009 45#ifdef CONFIG_INLINE_SPIN_LOCK
892a7c67
HC
46#define _spin_lock(lock) __spin_lock(lock)
47#endif
48
6beb0009 49#ifdef CONFIG_INLINE_SPIN_LOCK_BH
892a7c67
HC
50#define _spin_lock_bh(lock) __spin_lock_bh(lock)
51#endif
52
6beb0009 53#ifdef CONFIG_INLINE_SPIN_LOCK_IRQ
892a7c67
HC
54#define _spin_lock_irq(lock) __spin_lock_irq(lock)
55#endif
56
6beb0009 57#ifdef CONFIG_INLINE_SPIN_LOCK_IRQSAVE
892a7c67
HC
58#define _spin_lock_irqsave(lock) __spin_lock_irqsave(lock)
59#endif
60
6beb0009 61#ifdef CONFIG_INLINE_SPIN_TRYLOCK
892a7c67
HC
62#define _spin_trylock(lock) __spin_trylock(lock)
63#endif
64
6beb0009 65#ifdef CONFIG_INLINE_SPIN_TRYLOCK_BH
892a7c67
HC
66#define _spin_trylock_bh(lock) __spin_trylock_bh(lock)
67#endif
68
6beb0009 69#ifdef CONFIG_INLINE_SPIN_UNLOCK
892a7c67
HC
70#define _spin_unlock(lock) __spin_unlock(lock)
71#endif
72
6beb0009 73#ifdef CONFIG_INLINE_SPIN_UNLOCK_BH
892a7c67
HC
74#define _spin_unlock_bh(lock) __spin_unlock_bh(lock)
75#endif
76
6beb0009 77#ifdef CONFIG_INLINE_SPIN_UNLOCK_IRQ
892a7c67
HC
78#define _spin_unlock_irq(lock) __spin_unlock_irq(lock)
79#endif
80
6beb0009 81#ifdef CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE
892a7c67
HC
82#define _spin_unlock_irqrestore(lock, flags) __spin_unlock_irqrestore(lock, flags)
83#endif
84
c2f21ce2 85static inline int __spin_trylock(raw_spinlock_t *lock)
69d0ee73
HC
86{
87 preempt_disable();
88 if (_raw_spin_trylock(lock)) {
89 spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
90 return 1;
91 }
92 preempt_enable();
93 return 0;
94}
95
69d0ee73
HC
96/*
97 * If lockdep is enabled then we use the non-preemption spin-ops
98 * even on CONFIG_PREEMPT, because lockdep assumes that interrupts are
99 * not re-enabled during lock-acquire (which the preempt-spin-ops do):
100 */
101#if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC)
102
c2f21ce2 103static inline unsigned long __spin_lock_irqsave(raw_spinlock_t *lock)
69d0ee73
HC
104{
105 unsigned long flags;
106
107 local_irq_save(flags);
108 preempt_disable();
109 spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
110 /*
111 * On lockdep we dont want the hand-coded irq-enable of
112 * _raw_spin_lock_flags() code, because lockdep assumes
113 * that interrupts are not re-enabled during lock-acquire:
114 */
115#ifdef CONFIG_LOCKDEP
116 LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
117#else
118 _raw_spin_lock_flags(lock, &flags);
119#endif
120 return flags;
121}
122
c2f21ce2 123static inline void __spin_lock_irq(raw_spinlock_t *lock)
69d0ee73
HC
124{
125 local_irq_disable();
126 preempt_disable();
127 spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
128 LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
129}
130
c2f21ce2 131static inline void __spin_lock_bh(raw_spinlock_t *lock)
69d0ee73
HC
132{
133 local_bh_disable();
134 preempt_disable();
135 spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
136 LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
137}
138
c2f21ce2 139static inline void __spin_lock(raw_spinlock_t *lock)
69d0ee73
HC
140{
141 preempt_disable();
142 spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
143 LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
144}
145
69d0ee73
HC
146#endif /* CONFIG_PREEMPT */
147
c2f21ce2 148static inline void __spin_unlock(raw_spinlock_t *lock)
69d0ee73
HC
149{
150 spin_release(&lock->dep_map, 1, _RET_IP_);
151 _raw_spin_unlock(lock);
152 preempt_enable();
153}
154
c2f21ce2 155static inline void __spin_unlock_irqrestore(raw_spinlock_t *lock,
69d0ee73
HC
156 unsigned long flags)
157{
158 spin_release(&lock->dep_map, 1, _RET_IP_);
159 _raw_spin_unlock(lock);
160 local_irq_restore(flags);
161 preempt_enable();
162}
163
c2f21ce2 164static inline void __spin_unlock_irq(raw_spinlock_t *lock)
69d0ee73
HC
165{
166 spin_release(&lock->dep_map, 1, _RET_IP_);
167 _raw_spin_unlock(lock);
168 local_irq_enable();
169 preempt_enable();
170}
171
c2f21ce2 172static inline void __spin_unlock_bh(raw_spinlock_t *lock)
69d0ee73
HC
173{
174 spin_release(&lock->dep_map, 1, _RET_IP_);
175 _raw_spin_unlock(lock);
176 preempt_enable_no_resched();
177 local_bh_enable_ip((unsigned long)__builtin_return_address(0));
178}
179
c2f21ce2 180static inline int __spin_trylock_bh(raw_spinlock_t *lock)
69d0ee73
HC
181{
182 local_bh_disable();
183 preempt_disable();
184 if (_raw_spin_trylock(lock)) {
185 spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
186 return 1;
187 }
188 preempt_enable_no_resched();
189 local_bh_enable_ip((unsigned long)__builtin_return_address(0));
190 return 0;
191}
192
6b6b4792
TG
193#include <linux/rwlock_api_smp.h>
194
fb1c8f93 195#endif /* __LINUX_SPINLOCK_API_SMP_H */