Commit | Line | Data |
---|---|---|
fb1c8f93 IM |
1 | #ifndef __LINUX_SPINLOCK_API_SMP_H |
2 | #define __LINUX_SPINLOCK_API_SMP_H | |
3 | ||
2747b93e | 4 | #ifndef __LINUX_INSIDE_SPINLOCK_H |
fb1c8f93 IM |
5 | # error "please don't include this file directly" |
6 | #endif | |
7 | ||
8 | /* | |
9 | * include/linux/spinlock_api_smp.h | |
10 | * | |
11 | * spinlock API declarations on SMP (and debug) | |
12 | * (implemented in kernel/spinlock.c) | |
13 | * | |
14 | * portions Copyright 2005, Red Hat, Inc., Ingo Molnar | |
15 | * Released under the General Public License (GPL). | |
16 | */ | |
17 | ||
18 | int in_lock_functions(unsigned long addr); | |
19 | ||
c2f21ce2 | 20 | #define assert_raw_spin_locked(x) BUG_ON(!raw_spin_is_locked(x)) |
fb1c8f93 | 21 | |
9c1721aa TG |
22 | void __lockfunc _raw_spin_lock(raw_spinlock_t *lock) __acquires(lock); |
23 | void __lockfunc _raw_spin_lock_nested(raw_spinlock_t *lock, int subclass) | |
24 | __acquires(lock); | |
c2f21ce2 | 25 | void __lockfunc |
9c1721aa TG |
26 | _raw_spin_lock_nest_lock(raw_spinlock_t *lock, struct lockdep_map *map) |
27 | __acquires(lock); | |
28 | void __lockfunc _raw_spin_lock_bh(raw_spinlock_t *lock) __acquires(lock); | |
29 | void __lockfunc _raw_spin_lock_irq(raw_spinlock_t *lock) | |
30 | __acquires(lock); | |
31 | ||
32 | unsigned long __lockfunc _raw_spin_lock_irqsave(raw_spinlock_t *lock) | |
33 | __acquires(lock); | |
c2f21ce2 | 34 | unsigned long __lockfunc |
9c1721aa TG |
35 | _raw_spin_lock_irqsave_nested(raw_spinlock_t *lock, int subclass) |
36 | __acquires(lock); | |
37 | int __lockfunc _raw_spin_trylock(raw_spinlock_t *lock); | |
38 | int __lockfunc _raw_spin_trylock_bh(raw_spinlock_t *lock); | |
39 | void __lockfunc _raw_spin_unlock(raw_spinlock_t *lock) __releases(lock); | |
40 | void __lockfunc _raw_spin_unlock_bh(raw_spinlock_t *lock) __releases(lock); | |
41 | void __lockfunc _raw_spin_unlock_irq(raw_spinlock_t *lock) __releases(lock); | |
c2f21ce2 | 42 | void __lockfunc |
9c1721aa TG |
43 | _raw_spin_unlock_irqrestore(raw_spinlock_t *lock, unsigned long flags) |
44 | __releases(lock); | |
fb1c8f93 | 45 | |
6beb0009 | 46 | #ifdef CONFIG_INLINE_SPIN_LOCK |
9c1721aa | 47 | #define _raw_spin_lock(lock) __raw_spin_lock(lock) |
892a7c67 HC |
48 | #endif |
49 | ||
6beb0009 | 50 | #ifdef CONFIG_INLINE_SPIN_LOCK_BH |
9c1721aa | 51 | #define _raw_spin_lock_bh(lock) __raw_spin_lock_bh(lock) |
892a7c67 HC |
52 | #endif |
53 | ||
6beb0009 | 54 | #ifdef CONFIG_INLINE_SPIN_LOCK_IRQ |
9c1721aa | 55 | #define _raw_spin_lock_irq(lock) __raw_spin_lock_irq(lock) |
892a7c67 HC |
56 | #endif |
57 | ||
6beb0009 | 58 | #ifdef CONFIG_INLINE_SPIN_LOCK_IRQSAVE |
9c1721aa | 59 | #define _raw_spin_lock_irqsave(lock) __raw_spin_lock_irqsave(lock) |
892a7c67 HC |
60 | #endif |
61 | ||
6beb0009 | 62 | #ifdef CONFIG_INLINE_SPIN_TRYLOCK |
9c1721aa | 63 | #define _raw_spin_trylock(lock) __raw_spin_trylock(lock) |
892a7c67 HC |
64 | #endif |
65 | ||
6beb0009 | 66 | #ifdef CONFIG_INLINE_SPIN_TRYLOCK_BH |
9c1721aa | 67 | #define _raw_spin_trylock_bh(lock) __raw_spin_trylock_bh(lock) |
892a7c67 HC |
68 | #endif |
69 | ||
e335e3eb | 70 | #ifndef CONFIG_UNINLINE_SPIN_UNLOCK |
9c1721aa | 71 | #define _raw_spin_unlock(lock) __raw_spin_unlock(lock) |
892a7c67 HC |
72 | #endif |
73 | ||
6beb0009 | 74 | #ifdef CONFIG_INLINE_SPIN_UNLOCK_BH |
9c1721aa | 75 | #define _raw_spin_unlock_bh(lock) __raw_spin_unlock_bh(lock) |
892a7c67 HC |
76 | #endif |
77 | ||
6beb0009 | 78 | #ifdef CONFIG_INLINE_SPIN_UNLOCK_IRQ |
9c1721aa | 79 | #define _raw_spin_unlock_irq(lock) __raw_spin_unlock_irq(lock) |
892a7c67 HC |
80 | #endif |
81 | ||
6beb0009 | 82 | #ifdef CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE |
9c1721aa | 83 | #define _raw_spin_unlock_irqrestore(lock, flags) __raw_spin_unlock_irqrestore(lock, flags) |
892a7c67 HC |
84 | #endif |
85 | ||
9c1721aa | 86 | static inline int __raw_spin_trylock(raw_spinlock_t *lock) |
69d0ee73 HC |
87 | { |
88 | preempt_disable(); | |
9828ea9d | 89 | if (do_raw_spin_trylock(lock)) { |
69d0ee73 HC |
90 | spin_acquire(&lock->dep_map, 0, 1, _RET_IP_); |
91 | return 1; | |
92 | } | |
93 | preempt_enable(); | |
94 | return 0; | |
95 | } | |
96 | ||
69d0ee73 HC |
97 | /* |
98 | * If lockdep is enabled then we use the non-preemption spin-ops | |
27972765 | 99 | * even on CONFIG_PREEMPTION, because lockdep assumes that interrupts are |
69d0ee73 HC |
100 | * not re-enabled during lock-acquire (which the preempt-spin-ops do): |
101 | */ | |
102 | #if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC) | |
103 | ||
9c1721aa | 104 | static inline unsigned long __raw_spin_lock_irqsave(raw_spinlock_t *lock) |
69d0ee73 HC |
105 | { |
106 | unsigned long flags; | |
107 | ||
108 | local_irq_save(flags); | |
109 | preempt_disable(); | |
110 | spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); | |
9828ea9d | 111 | LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock); |
69d0ee73 HC |
112 | return flags; |
113 | } | |
114 | ||
9c1721aa | 115 | static inline void __raw_spin_lock_irq(raw_spinlock_t *lock) |
69d0ee73 HC |
116 | { |
117 | local_irq_disable(); | |
118 | preempt_disable(); | |
119 | spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); | |
9828ea9d | 120 | LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock); |
69d0ee73 HC |
121 | } |
122 | ||
9c1721aa | 123 | static inline void __raw_spin_lock_bh(raw_spinlock_t *lock) |
69d0ee73 | 124 | { |
9ea4c380 | 125 | __local_bh_disable_ip(_RET_IP_, SOFTIRQ_LOCK_OFFSET); |
69d0ee73 | 126 | spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); |
9828ea9d | 127 | LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock); |
69d0ee73 HC |
128 | } |
129 | ||
9c1721aa | 130 | static inline void __raw_spin_lock(raw_spinlock_t *lock) |
69d0ee73 HC |
131 | { |
132 | preempt_disable(); | |
133 | spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); | |
9828ea9d | 134 | LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock); |
69d0ee73 HC |
135 | } |
136 | ||
ae58403f | 137 | #endif /* !CONFIG_GENERIC_LOCKBREAK || CONFIG_DEBUG_LOCK_ALLOC */ |
69d0ee73 | 138 | |
9c1721aa | 139 | static inline void __raw_spin_unlock(raw_spinlock_t *lock) |
69d0ee73 | 140 | { |
5facae4f | 141 | spin_release(&lock->dep_map, _RET_IP_); |
9828ea9d | 142 | do_raw_spin_unlock(lock); |
69d0ee73 HC |
143 | preempt_enable(); |
144 | } | |
145 | ||
9c1721aa | 146 | static inline void __raw_spin_unlock_irqrestore(raw_spinlock_t *lock, |
69d0ee73 HC |
147 | unsigned long flags) |
148 | { | |
5facae4f | 149 | spin_release(&lock->dep_map, _RET_IP_); |
9828ea9d | 150 | do_raw_spin_unlock(lock); |
69d0ee73 HC |
151 | local_irq_restore(flags); |
152 | preempt_enable(); | |
153 | } | |
154 | ||
9c1721aa | 155 | static inline void __raw_spin_unlock_irq(raw_spinlock_t *lock) |
69d0ee73 | 156 | { |
5facae4f | 157 | spin_release(&lock->dep_map, _RET_IP_); |
9828ea9d | 158 | do_raw_spin_unlock(lock); |
69d0ee73 HC |
159 | local_irq_enable(); |
160 | preempt_enable(); | |
161 | } | |
162 | ||
9c1721aa | 163 | static inline void __raw_spin_unlock_bh(raw_spinlock_t *lock) |
69d0ee73 | 164 | { |
5facae4f | 165 | spin_release(&lock->dep_map, _RET_IP_); |
9828ea9d | 166 | do_raw_spin_unlock(lock); |
9ea4c380 | 167 | __local_bh_enable_ip(_RET_IP_, SOFTIRQ_LOCK_OFFSET); |
69d0ee73 HC |
168 | } |
169 | ||
9c1721aa | 170 | static inline int __raw_spin_trylock_bh(raw_spinlock_t *lock) |
69d0ee73 | 171 | { |
9ea4c380 | 172 | __local_bh_disable_ip(_RET_IP_, SOFTIRQ_LOCK_OFFSET); |
9828ea9d | 173 | if (do_raw_spin_trylock(lock)) { |
69d0ee73 HC |
174 | spin_acquire(&lock->dep_map, 0, 1, _RET_IP_); |
175 | return 1; | |
176 | } | |
9ea4c380 | 177 | __local_bh_enable_ip(_RET_IP_, SOFTIRQ_LOCK_OFFSET); |
69d0ee73 HC |
178 | return 0; |
179 | } | |
180 | ||
342a9324 TG |
181 | /* PREEMPT_RT has its own rwlock implementation */ |
182 | #ifndef CONFIG_PREEMPT_RT | |
6b6b4792 | 183 | #include <linux/rwlock_api_smp.h> |
342a9324 | 184 | #endif |
6b6b4792 | 185 | |
fb1c8f93 | 186 | #endif /* __LINUX_SPINLOCK_API_SMP_H */ |