Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | #ifndef __ASM_SH_ATOMIC_H |
2 | #define __ASM_SH_ATOMIC_H | |
3 | ||
4 | /* | |
5 | * Atomic operations that C can't guarantee us. Useful for | |
6 | * resource counting etc.. | |
7 | * | |
8 | */ | |
9 | ||
10 | typedef struct { volatile int counter; } atomic_t; | |
11 | ||
12 | #define ATOMIC_INIT(i) ( (atomic_t) { (i) } ) | |
13 | ||
14 | #define atomic_read(v) ((v)->counter) | |
15 | #define atomic_set(v,i) ((v)->counter = (i)) | |
16 | ||
e4c2cfee | 17 | #include <linux/compiler.h> |
1da177e4 LT |
18 | #include <asm/system.h> |
19 | ||
20 | /* | |
21 | * To get proper branch prediction for the main line, we must branch | |
22 | * forward to code at the end of this object's .text section, then | |
23 | * branch back to restart the operation. | |
24 | */ | |
781125ca | 25 | static inline void atomic_add(int i, atomic_t *v) |
1da177e4 | 26 | { |
781125ca PM |
27 | #ifdef CONFIG_CPU_SH4A |
28 | unsigned long tmp; | |
29 | ||
30 | __asm__ __volatile__ ( | |
31 | "1: movli.l @%3, %0 ! atomic_add \n" | |
32 | " add %2, %0 \n" | |
33 | " movco.l %0, @%3 \n" | |
34 | " bf 1b \n" | |
35 | : "=&z" (tmp), "=r" (&v->counter) | |
36 | : "r" (i), "r" (&v->counter) | |
37 | : "t"); | |
38 | #else | |
1da177e4 LT |
39 | unsigned long flags; |
40 | ||
41 | local_irq_save(flags); | |
42 | *(long *)v += i; | |
43 | local_irq_restore(flags); | |
781125ca | 44 | #endif |
1da177e4 LT |
45 | } |
46 | ||
781125ca | 47 | static inline void atomic_sub(int i, atomic_t *v) |
1da177e4 | 48 | { |
781125ca PM |
49 | #ifdef CONFIG_CPU_SH4A |
50 | unsigned long tmp; | |
51 | ||
52 | __asm__ __volatile__ ( | |
53 | "1: movli.l @%3, %0 ! atomic_sub \n" | |
54 | " sub %2, %0 \n" | |
55 | " movco.l %0, @%3 \n" | |
56 | " bf 1b \n" | |
57 | : "=&z" (tmp), "=r" (&v->counter) | |
58 | : "r" (i), "r" (&v->counter) | |
59 | : "t"); | |
60 | #else | |
1da177e4 LT |
61 | unsigned long flags; |
62 | ||
63 | local_irq_save(flags); | |
64 | *(long *)v -= i; | |
65 | local_irq_restore(flags); | |
781125ca | 66 | #endif |
1da177e4 LT |
67 | } |
68 | ||
781125ca PM |
69 | /* |
70 | * SH-4A note: | |
71 | * | |
72 | * We basically get atomic_xxx_return() for free compared with | |
73 | * atomic_xxx(). movli.l/movco.l require r0 due to the instruction | |
74 | * encoding, so the retval is automatically set without having to | |
75 | * do any special work. | |
76 | */ | |
77 | static inline int atomic_add_return(int i, atomic_t *v) | |
1da177e4 | 78 | { |
781125ca PM |
79 | unsigned long temp; |
80 | ||
81 | #ifdef CONFIG_CPU_SH4A | |
82 | __asm__ __volatile__ ( | |
83 | "1: movli.l @%3, %0 ! atomic_add_return \n" | |
84 | " add %2, %0 \n" | |
85 | " movco.l %0, @%3 \n" | |
86 | " bf 1b \n" | |
87 | " synco \n" | |
88 | : "=&z" (temp), "=r" (&v->counter) | |
89 | : "r" (i), "r" (&v->counter) | |
90 | : "t"); | |
91 | #else | |
92 | unsigned long flags; | |
1da177e4 LT |
93 | |
94 | local_irq_save(flags); | |
95 | temp = *(long *)v; | |
96 | temp += i; | |
97 | *(long *)v = temp; | |
98 | local_irq_restore(flags); | |
781125ca | 99 | #endif |
1da177e4 LT |
100 | |
101 | return temp; | |
102 | } | |
103 | ||
104 | #define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0) | |
105 | ||
781125ca | 106 | static inline int atomic_sub_return(int i, atomic_t *v) |
1da177e4 | 107 | { |
781125ca PM |
108 | unsigned long temp; |
109 | ||
110 | #ifdef CONFIG_CPU_SH4A | |
111 | __asm__ __volatile__ ( | |
112 | "1: movli.l @%3, %0 ! atomic_sub_return \n" | |
113 | " sub %2, %0 \n" | |
114 | " movco.l %0, @%3 \n" | |
115 | " bf 1b \n" | |
116 | " synco \n" | |
117 | : "=&z" (temp), "=r" (&v->counter) | |
118 | : "r" (i), "r" (&v->counter) | |
119 | : "t"); | |
120 | #else | |
121 | unsigned long flags; | |
1da177e4 LT |
122 | |
123 | local_irq_save(flags); | |
124 | temp = *(long *)v; | |
125 | temp -= i; | |
126 | *(long *)v = temp; | |
127 | local_irq_restore(flags); | |
781125ca | 128 | #endif |
1da177e4 LT |
129 | |
130 | return temp; | |
131 | } | |
132 | ||
133 | #define atomic_dec_return(v) atomic_sub_return(1,(v)) | |
134 | #define atomic_inc_return(v) atomic_add_return(1,(v)) | |
135 | ||
136 | /* | |
137 | * atomic_inc_and_test - increment and test | |
138 | * @v: pointer of type atomic_t | |
139 | * | |
140 | * Atomically increments @v by 1 | |
141 | * and returns true if the result is zero, or false for all | |
142 | * other cases. | |
143 | */ | |
144 | #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0) | |
145 | ||
146 | #define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0) | |
147 | #define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0) | |
148 | ||
149 | #define atomic_inc(v) atomic_add(1,(v)) | |
150 | #define atomic_dec(v) atomic_sub(1,(v)) | |
151 | ||
4a6dae6d NP |
152 | static inline int atomic_cmpxchg(atomic_t *v, int old, int new) |
153 | { | |
154 | int ret; | |
155 | unsigned long flags; | |
156 | ||
157 | local_irq_save(flags); | |
158 | ret = v->counter; | |
159 | if (likely(ret == old)) | |
160 | v->counter = new; | |
161 | local_irq_restore(flags); | |
162 | ||
163 | return ret; | |
164 | } | |
165 | ||
ffbf670f IM |
166 | #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) |
167 | ||
8426e1f6 NP |
168 | static inline int atomic_add_unless(atomic_t *v, int a, int u) |
169 | { | |
170 | int ret; | |
171 | unsigned long flags; | |
172 | ||
173 | local_irq_save(flags); | |
174 | ret = v->counter; | |
175 | if (ret != u) | |
176 | v->counter += a; | |
177 | local_irq_restore(flags); | |
178 | ||
179 | return ret != u; | |
180 | } | |
181 | #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) | |
182 | ||
781125ca | 183 | static inline void atomic_clear_mask(unsigned int mask, atomic_t *v) |
1da177e4 | 184 | { |
781125ca PM |
185 | #ifdef CONFIG_CPU_SH4A |
186 | unsigned long tmp; | |
187 | ||
188 | __asm__ __volatile__ ( | |
189 | "1: movli.l @%3, %0 ! atomic_clear_mask \n" | |
190 | " and %2, %0 \n" | |
191 | " movco.l %0, @%3 \n" | |
192 | " bf 1b \n" | |
193 | : "=&z" (tmp), "=r" (&v->counter) | |
194 | : "r" (~mask), "r" (&v->counter) | |
195 | : "t"); | |
196 | #else | |
1da177e4 LT |
197 | unsigned long flags; |
198 | ||
199 | local_irq_save(flags); | |
200 | *(long *)v &= ~mask; | |
201 | local_irq_restore(flags); | |
781125ca | 202 | #endif |
1da177e4 LT |
203 | } |
204 | ||
781125ca | 205 | static inline void atomic_set_mask(unsigned int mask, atomic_t *v) |
1da177e4 | 206 | { |
781125ca PM |
207 | #ifdef CONFIG_CPU_SH4A |
208 | unsigned long tmp; | |
209 | ||
210 | __asm__ __volatile__ ( | |
211 | "1: movli.l @%3, %0 ! atomic_set_mask \n" | |
212 | " or %2, %0 \n" | |
213 | " movco.l %0, @%3 \n" | |
214 | " bf 1b \n" | |
215 | : "=&z" (tmp), "=r" (&v->counter) | |
216 | : "r" (mask), "r" (&v->counter) | |
217 | : "t"); | |
218 | #else | |
1da177e4 LT |
219 | unsigned long flags; |
220 | ||
221 | local_irq_save(flags); | |
222 | *(long *)v |= mask; | |
223 | local_irq_restore(flags); | |
781125ca | 224 | #endif |
1da177e4 LT |
225 | } |
226 | ||
227 | /* Atomic operations are already serializing on SH */ | |
228 | #define smp_mb__before_atomic_dec() barrier() | |
229 | #define smp_mb__after_atomic_dec() barrier() | |
230 | #define smp_mb__before_atomic_inc() barrier() | |
231 | #define smp_mb__after_atomic_inc() barrier() | |
232 | ||
d3cb4871 | 233 | #include <asm-generic/atomic.h> |
1da177e4 | 234 | #endif /* __ASM_SH_ATOMIC_H */ |