Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | #ifndef __ASM_SPINLOCK_H |
2 | #define __ASM_SPINLOCK_H | |
3 | ||
1cab4201 REB |
4 | #include <asm/barrier.h> |
5 | #include <asm/ldcw.h> | |
fb1c8f93 IM |
6 | #include <asm/processor.h> |
7 | #include <asm/spinlock_types.h> | |
1da177e4 | 8 | |
0199c4e6 | 9 | static inline int arch_spin_is_locked(arch_spinlock_t *x) |
1da177e4 LT |
10 | { |
11 | volatile unsigned int *a = __ldcw_align(x); | |
12 | return *a == 0; | |
13 | } | |
14 | ||
0199c4e6 | 15 | #define arch_spin_lock(lock) arch_spin_lock_flags(lock, 0) |
726328d9 PZ |
16 | |
17 | static inline void arch_spin_unlock_wait(arch_spinlock_t *x) | |
18 | { | |
19 | volatile unsigned int *a = __ldcw_align(x); | |
20 | ||
21 | smp_cond_load_acquire(a, VAL); | |
22 | } | |
1da177e4 | 23 | |
0199c4e6 | 24 | static inline void arch_spin_lock_flags(arch_spinlock_t *x, |
08dc2ca6 | 25 | unsigned long flags) |
1da177e4 LT |
26 | { |
27 | volatile unsigned int *a; | |
28 | ||
29 | mb(); | |
30 | a = __ldcw_align(x); | |
31 | while (__ldcw(a) == 0) | |
08dc2ca6 JB |
32 | while (*a == 0) |
33 | if (flags & PSW_SM_I) { | |
34 | local_irq_enable(); | |
35 | cpu_relax(); | |
36 | local_irq_disable(); | |
37 | } else | |
38 | cpu_relax(); | |
1da177e4 LT |
39 | mb(); |
40 | } | |
41 | ||
0199c4e6 | 42 | static inline void arch_spin_unlock(arch_spinlock_t *x) |
1da177e4 LT |
43 | { |
44 | volatile unsigned int *a; | |
45 | mb(); | |
46 | a = __ldcw_align(x); | |
47 | *a = 1; | |
48 | mb(); | |
49 | } | |
50 | ||
0199c4e6 | 51 | static inline int arch_spin_trylock(arch_spinlock_t *x) |
1da177e4 LT |
52 | { |
53 | volatile unsigned int *a; | |
54 | int ret; | |
55 | ||
56 | mb(); | |
57 | a = __ldcw_align(x); | |
58 | ret = __ldcw(a) != 0; | |
59 | mb(); | |
60 | ||
61 | return ret; | |
62 | } | |
1da177e4 LT |
63 | |
64 | /* | |
6e071852 | 65 | * Read-write spinlocks, allowing multiple readers but only one writer. |
65ee8f0a MW |
66 | * Linux rwlocks are unfair to writers; they can be starved for an indefinite |
67 | * time by readers. With care, they can also be taken in interrupt context. | |
68 | * | |
69 | * In the PA-RISC implementation, we have a spinlock and a counter. | |
70 | * Readers use the lock to serialise their access to the counter (which | |
71 | * records how many readers currently hold the lock). | |
72 | * Writers hold the spinlock, preventing any readers or other writers from | |
73 | * grabbing the rwlock. | |
1da177e4 | 74 | */ |
1da177e4 | 75 | |
65ee8f0a MW |
76 | /* Note that we have to ensure interrupts are disabled in case we're |
77 | * interrupted by some other code that wants to grab the same read lock */ | |
e5931943 | 78 | static __inline__ void arch_read_lock(arch_rwlock_t *rw) |
1da177e4 | 79 | { |
6e071852 MW |
80 | unsigned long flags; |
81 | local_irq_save(flags); | |
0199c4e6 | 82 | arch_spin_lock_flags(&rw->lock, flags); |
1da177e4 | 83 | rw->counter++; |
0199c4e6 | 84 | arch_spin_unlock(&rw->lock); |
6e071852 | 85 | local_irq_restore(flags); |
1da177e4 | 86 | } |
1da177e4 | 87 | |
65ee8f0a MW |
88 | /* Note that we have to ensure interrupts are disabled in case we're |
89 | * interrupted by some other code that wants to grab the same read lock */ | |
e5931943 | 90 | static __inline__ void arch_read_unlock(arch_rwlock_t *rw) |
1da177e4 | 91 | { |
6e071852 MW |
92 | unsigned long flags; |
93 | local_irq_save(flags); | |
0199c4e6 | 94 | arch_spin_lock_flags(&rw->lock, flags); |
1da177e4 | 95 | rw->counter--; |
0199c4e6 | 96 | arch_spin_unlock(&rw->lock); |
6e071852 | 97 | local_irq_restore(flags); |
1da177e4 LT |
98 | } |
99 | ||
65ee8f0a MW |
100 | /* Note that we have to ensure interrupts are disabled in case we're |
101 | * interrupted by some other code that wants to grab the same read lock */ | |
e5931943 | 102 | static __inline__ int arch_read_trylock(arch_rwlock_t *rw) |
6e071852 MW |
103 | { |
104 | unsigned long flags; | |
105 | retry: | |
106 | local_irq_save(flags); | |
0199c4e6 | 107 | if (arch_spin_trylock(&rw->lock)) { |
6e071852 | 108 | rw->counter++; |
0199c4e6 | 109 | arch_spin_unlock(&rw->lock); |
6e071852 MW |
110 | local_irq_restore(flags); |
111 | return 1; | |
112 | } | |
113 | ||
114 | local_irq_restore(flags); | |
115 | /* If write-locked, we fail to acquire the lock */ | |
116 | if (rw->counter < 0) | |
117 | return 0; | |
118 | ||
119 | /* Wait until we have a realistic chance at the lock */ | |
0199c4e6 | 120 | while (arch_spin_is_locked(&rw->lock) && rw->counter >= 0) |
6e071852 MW |
121 | cpu_relax(); |
122 | ||
123 | goto retry; | |
124 | } | |
1da177e4 | 125 | |
65ee8f0a MW |
126 | /* Note that we have to ensure interrupts are disabled in case we're |
127 | * interrupted by some other code that wants to read_trylock() this lock */ | |
e5931943 | 128 | static __inline__ void arch_write_lock(arch_rwlock_t *rw) |
1da177e4 | 129 | { |
6e071852 | 130 | unsigned long flags; |
1da177e4 | 131 | retry: |
6e071852 | 132 | local_irq_save(flags); |
0199c4e6 | 133 | arch_spin_lock_flags(&rw->lock, flags); |
1da177e4 | 134 | |
6e071852 | 135 | if (rw->counter != 0) { |
0199c4e6 | 136 | arch_spin_unlock(&rw->lock); |
6e071852 | 137 | local_irq_restore(flags); |
1da177e4 | 138 | |
fb1c8f93 IM |
139 | while (rw->counter != 0) |
140 | cpu_relax(); | |
1da177e4 LT |
141 | |
142 | goto retry; | |
143 | } | |
144 | ||
6e071852 MW |
145 | rw->counter = -1; /* mark as write-locked */ |
146 | mb(); | |
147 | local_irq_restore(flags); | |
1da177e4 | 148 | } |
1da177e4 | 149 | |
e5931943 | 150 | static __inline__ void arch_write_unlock(arch_rwlock_t *rw) |
1da177e4 LT |
151 | { |
152 | rw->counter = 0; | |
0199c4e6 | 153 | arch_spin_unlock(&rw->lock); |
1da177e4 LT |
154 | } |
155 | ||
65ee8f0a MW |
156 | /* Note that we have to ensure interrupts are disabled in case we're |
157 | * interrupted by some other code that wants to read_trylock() this lock */ | |
e5931943 | 158 | static __inline__ int arch_write_trylock(arch_rwlock_t *rw) |
1da177e4 | 159 | { |
6e071852 MW |
160 | unsigned long flags; |
161 | int result = 0; | |
162 | ||
163 | local_irq_save(flags); | |
0199c4e6 | 164 | if (arch_spin_trylock(&rw->lock)) { |
6e071852 MW |
165 | if (rw->counter == 0) { |
166 | rw->counter = -1; | |
167 | result = 1; | |
168 | } else { | |
169 | /* Read-locked. Oh well. */ | |
0199c4e6 | 170 | arch_spin_unlock(&rw->lock); |
6e071852 | 171 | } |
1da177e4 | 172 | } |
6e071852 | 173 | local_irq_restore(flags); |
1da177e4 | 174 | |
6e071852 | 175 | return result; |
1da177e4 | 176 | } |
1da177e4 | 177 | |
bc8846c5 KM |
178 | /* |
179 | * read_can_lock - would read_trylock() succeed? | |
180 | * @lock: the rwlock in question. | |
181 | */ | |
e5931943 | 182 | static __inline__ int arch_read_can_lock(arch_rwlock_t *rw) |
1da177e4 | 183 | { |
bc8846c5 | 184 | return rw->counter >= 0; |
1da177e4 LT |
185 | } |
186 | ||
bc8846c5 KM |
187 | /* |
188 | * write_can_lock - would write_trylock() succeed? | |
189 | * @lock: the rwlock in question. | |
190 | */ | |
e5931943 | 191 | static __inline__ int arch_write_can_lock(arch_rwlock_t *rw) |
1da177e4 | 192 | { |
bc8846c5 | 193 | return !rw->counter; |
1da177e4 LT |
194 | } |
195 | ||
e5931943 TG |
196 | #define arch_read_lock_flags(lock, flags) arch_read_lock(lock) |
197 | #define arch_write_lock_flags(lock, flags) arch_write_lock(lock) | |
f5f7eac4 | 198 | |
1da177e4 | 199 | #endif /* __ASM_SPINLOCK_H */ |