Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* bitops.h: bit operations for the Fujitsu FR-V CPUs |
2 | * | |
3 | * For an explanation of how atomic ops work in this arch, see: | |
0868ff7a | 4 | * Documentation/frv/atomic-ops.txt |
1da177e4 LT |
5 | * |
6 | * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved. | |
7 | * Written by David Howells (dhowells@redhat.com) | |
8 | * | |
9 | * This program is free software; you can redistribute it and/or | |
10 | * modify it under the terms of the GNU General Public License | |
11 | * as published by the Free Software Foundation; either version | |
12 | * 2 of the License, or (at your option) any later version. | |
13 | */ | |
14 | #ifndef _ASM_BITOPS_H | |
15 | #define _ASM_BITOPS_H | |
16 | ||
1da177e4 LT |
17 | #include <linux/compiler.h> |
18 | #include <asm/byteorder.h> | |
1da177e4 LT |
19 | |
20 | #ifdef __KERNEL__ | |
21 | ||
0624517d JS |
22 | #ifndef _LINUX_BITOPS_H |
23 | #error only <linux/bitops.h> can be included directly | |
24 | #endif | |
25 | ||
1f6d7a93 | 26 | #include <asm-generic/bitops/ffz.h> |
1da177e4 LT |
27 | |
28 | /* | |
29 | * clear_bit() doesn't provide any barrier for the compiler. | |
30 | */ | |
31 | #define smp_mb__before_clear_bit() barrier() | |
32 | #define smp_mb__after_clear_bit() barrier() | |
33 | ||
6784fd59 MD |
34 | #ifndef CONFIG_FRV_OUTOFLINE_ATOMIC_OPS |
35 | static inline | |
36 | unsigned long atomic_test_and_ANDNOT_mask(unsigned long mask, volatile unsigned long *v) | |
37 | { | |
38 | unsigned long old, tmp; | |
39 | ||
40 | asm volatile( | |
41 | "0: \n" | |
42 | " orcc gr0,gr0,gr0,icc3 \n" /* set ICC3.Z */ | |
43 | " ckeq icc3,cc7 \n" | |
44 | " ld.p %M0,%1 \n" /* LD.P/ORCR are atomic */ | |
45 | " orcr cc7,cc7,cc3 \n" /* set CC3 to true */ | |
46 | " and%I3 %1,%3,%2 \n" | |
47 | " cst.p %2,%M0 ,cc3,#1 \n" /* if store happens... */ | |
48 | " corcc gr29,gr29,gr0 ,cc3,#1 \n" /* ... clear ICC3.Z */ | |
49 | " beq icc3,#0,0b \n" | |
50 | : "+U"(*v), "=&r"(old), "=r"(tmp) | |
51 | : "NPr"(~mask) | |
52 | : "memory", "cc7", "cc3", "icc3" | |
53 | ); | |
54 | ||
55 | return old; | |
56 | } | |
57 | ||
58 | static inline | |
59 | unsigned long atomic_test_and_OR_mask(unsigned long mask, volatile unsigned long *v) | |
60 | { | |
61 | unsigned long old, tmp; | |
62 | ||
63 | asm volatile( | |
64 | "0: \n" | |
65 | " orcc gr0,gr0,gr0,icc3 \n" /* set ICC3.Z */ | |
66 | " ckeq icc3,cc7 \n" | |
67 | " ld.p %M0,%1 \n" /* LD.P/ORCR are atomic */ | |
68 | " orcr cc7,cc7,cc3 \n" /* set CC3 to true */ | |
69 | " or%I3 %1,%3,%2 \n" | |
70 | " cst.p %2,%M0 ,cc3,#1 \n" /* if store happens... */ | |
71 | " corcc gr29,gr29,gr0 ,cc3,#1 \n" /* ... clear ICC3.Z */ | |
72 | " beq icc3,#0,0b \n" | |
73 | : "+U"(*v), "=&r"(old), "=r"(tmp) | |
74 | : "NPr"(mask) | |
75 | : "memory", "cc7", "cc3", "icc3" | |
76 | ); | |
77 | ||
78 | return old; | |
79 | } | |
80 | ||
81 | static inline | |
82 | unsigned long atomic_test_and_XOR_mask(unsigned long mask, volatile unsigned long *v) | |
83 | { | |
84 | unsigned long old, tmp; | |
85 | ||
86 | asm volatile( | |
87 | "0: \n" | |
88 | " orcc gr0,gr0,gr0,icc3 \n" /* set ICC3.Z */ | |
89 | " ckeq icc3,cc7 \n" | |
90 | " ld.p %M0,%1 \n" /* LD.P/ORCR are atomic */ | |
91 | " orcr cc7,cc7,cc3 \n" /* set CC3 to true */ | |
92 | " xor%I3 %1,%3,%2 \n" | |
93 | " cst.p %2,%M0 ,cc3,#1 \n" /* if store happens... */ | |
94 | " corcc gr29,gr29,gr0 ,cc3,#1 \n" /* ... clear ICC3.Z */ | |
95 | " beq icc3,#0,0b \n" | |
96 | : "+U"(*v), "=&r"(old), "=r"(tmp) | |
97 | : "NPr"(mask) | |
98 | : "memory", "cc7", "cc3", "icc3" | |
99 | ); | |
100 | ||
101 | return old; | |
102 | } | |
103 | ||
104 | #else | |
105 | ||
106 | extern unsigned long atomic_test_and_ANDNOT_mask(unsigned long mask, volatile unsigned long *v); | |
107 | extern unsigned long atomic_test_and_OR_mask(unsigned long mask, volatile unsigned long *v); | |
108 | extern unsigned long atomic_test_and_XOR_mask(unsigned long mask, volatile unsigned long *v); | |
109 | ||
110 | #endif | |
111 | ||
112 | #define atomic_clear_mask(mask, v) atomic_test_and_ANDNOT_mask((mask), (v)) | |
113 | #define atomic_set_mask(mask, v) atomic_test_and_OR_mask((mask), (v)) | |
114 | ||
d2f11bf7 | 115 | static inline int test_and_clear_bit(unsigned long nr, volatile void *addr) |
1da177e4 LT |
116 | { |
117 | volatile unsigned long *ptr = addr; | |
118 | unsigned long mask = 1UL << (nr & 31); | |
119 | ptr += nr >> 5; | |
120 | return (atomic_test_and_ANDNOT_mask(mask, ptr) & mask) != 0; | |
121 | } | |
122 | ||
d2f11bf7 | 123 | static inline int test_and_set_bit(unsigned long nr, volatile void *addr) |
1da177e4 LT |
124 | { |
125 | volatile unsigned long *ptr = addr; | |
126 | unsigned long mask = 1UL << (nr & 31); | |
127 | ptr += nr >> 5; | |
128 | return (atomic_test_and_OR_mask(mask, ptr) & mask) != 0; | |
129 | } | |
130 | ||
d2f11bf7 | 131 | static inline int test_and_change_bit(unsigned long nr, volatile void *addr) |
1da177e4 LT |
132 | { |
133 | volatile unsigned long *ptr = addr; | |
134 | unsigned long mask = 1UL << (nr & 31); | |
135 | ptr += nr >> 5; | |
136 | return (atomic_test_and_XOR_mask(mask, ptr) & mask) != 0; | |
137 | } | |
138 | ||
d2f11bf7 | 139 | static inline void clear_bit(unsigned long nr, volatile void *addr) |
1da177e4 LT |
140 | { |
141 | test_and_clear_bit(nr, addr); | |
142 | } | |
143 | ||
d2f11bf7 | 144 | static inline void set_bit(unsigned long nr, volatile void *addr) |
1da177e4 LT |
145 | { |
146 | test_and_set_bit(nr, addr); | |
147 | } | |
148 | ||
d2f11bf7 | 149 | static inline void change_bit(unsigned long nr, volatile void *addr) |
1da177e4 LT |
150 | { |
151 | test_and_change_bit(nr, addr); | |
152 | } | |
153 | ||
d2f11bf7 | 154 | static inline void __clear_bit(unsigned long nr, volatile void *addr) |
1da177e4 LT |
155 | { |
156 | volatile unsigned long *a = addr; | |
157 | int mask; | |
158 | ||
159 | a += nr >> 5; | |
160 | mask = 1 << (nr & 31); | |
161 | *a &= ~mask; | |
162 | } | |
163 | ||
d2f11bf7 | 164 | static inline void __set_bit(unsigned long nr, volatile void *addr) |
1da177e4 LT |
165 | { |
166 | volatile unsigned long *a = addr; | |
167 | int mask; | |
168 | ||
169 | a += nr >> 5; | |
170 | mask = 1 << (nr & 31); | |
171 | *a |= mask; | |
172 | } | |
173 | ||
d2f11bf7 | 174 | static inline void __change_bit(unsigned long nr, volatile void *addr) |
1da177e4 LT |
175 | { |
176 | volatile unsigned long *a = addr; | |
177 | int mask; | |
178 | ||
179 | a += nr >> 5; | |
180 | mask = 1 << (nr & 31); | |
181 | *a ^= mask; | |
182 | } | |
183 | ||
d2f11bf7 | 184 | static inline int __test_and_clear_bit(unsigned long nr, volatile void *addr) |
1da177e4 LT |
185 | { |
186 | volatile unsigned long *a = addr; | |
187 | int mask, retval; | |
188 | ||
189 | a += nr >> 5; | |
190 | mask = 1 << (nr & 31); | |
191 | retval = (mask & *a) != 0; | |
192 | *a &= ~mask; | |
193 | return retval; | |
194 | } | |
195 | ||
d2f11bf7 | 196 | static inline int __test_and_set_bit(unsigned long nr, volatile void *addr) |
1da177e4 LT |
197 | { |
198 | volatile unsigned long *a = addr; | |
199 | int mask, retval; | |
200 | ||
201 | a += nr >> 5; | |
202 | mask = 1 << (nr & 31); | |
203 | retval = (mask & *a) != 0; | |
204 | *a |= mask; | |
205 | return retval; | |
206 | } | |
207 | ||
d2f11bf7 | 208 | static inline int __test_and_change_bit(unsigned long nr, volatile void *addr) |
1da177e4 LT |
209 | { |
210 | volatile unsigned long *a = addr; | |
211 | int mask, retval; | |
212 | ||
213 | a += nr >> 5; | |
214 | mask = 1 << (nr & 31); | |
215 | retval = (mask & *a) != 0; | |
216 | *a ^= mask; | |
217 | return retval; | |
218 | } | |
219 | ||
220 | /* | |
221 | * This routine doesn't need to be atomic. | |
222 | */ | |
d2f11bf7 JC |
223 | static inline int |
224 | __constant_test_bit(unsigned long nr, const volatile void *addr) | |
1da177e4 LT |
225 | { |
226 | return ((1UL << (nr & 31)) & (((const volatile unsigned int *) addr)[nr >> 5])) != 0; | |
227 | } | |
228 | ||
d2f11bf7 | 229 | static inline int __test_bit(unsigned long nr, const volatile void *addr) |
1da177e4 LT |
230 | { |
231 | int * a = (int *) addr; | |
232 | int mask; | |
233 | ||
234 | a += nr >> 5; | |
235 | mask = 1 << (nr & 0x1f); | |
236 | return ((mask & *a) != 0); | |
237 | } | |
238 | ||
239 | #define test_bit(nr,addr) \ | |
240 | (__builtin_constant_p(nr) ? \ | |
241 | __constant_test_bit((nr),(addr)) : \ | |
242 | __test_bit((nr),(addr))) | |
243 | ||
1f6d7a93 | 244 | #include <asm-generic/bitops/find.h> |
1da177e4 | 245 | |
92fc7072 DH |
246 | /** |
247 | * fls - find last bit set | |
248 | * @x: the word to search | |
249 | * | |
250 | * This is defined the same way as ffs: | |
251 | * - return 32..1 to indicate bit 31..0 most significant bit set | |
252 | * - return 0 to indicate no bits set | |
1da177e4 LT |
253 | */ |
254 | #define fls(x) \ | |
255 | ({ \ | |
256 | int bit; \ | |
257 | \ | |
92fc7072 DH |
258 | asm(" subcc %1,gr0,gr0,icc0 \n" \ |
259 | " ckne icc0,cc4 \n" \ | |
260 | " cscan.p %1,gr0,%0 ,cc4,#1 \n" \ | |
261 | " csub %0,%0,%0 ,cc4,#0 \n" \ | |
262 | " csub %2,%0,%0 ,cc4,#1 \n" \ | |
263 | : "=&r"(bit) \ | |
264 | : "r"(x), "r"(32) \ | |
265 | : "icc0", "cc4" \ | |
266 | ); \ | |
1da177e4 | 267 | \ |
92fc7072 | 268 | bit; \ |
1da177e4 LT |
269 | }) |
270 | ||
a8ad27d0 DH |
271 | /** |
272 | * fls64 - find last bit set in a 64-bit value | |
273 | * @n: the value to search | |
274 | * | |
275 | * This is defined the same way as ffs: | |
276 | * - return 64..1 to indicate bit 63..0 most significant bit set | |
277 | * - return 0 to indicate no bits set | |
278 | */ | |
279 | static inline __attribute__((const)) | |
280 | int fls64(u64 n) | |
281 | { | |
282 | union { | |
283 | u64 ll; | |
284 | struct { u32 h, l; }; | |
285 | } _; | |
286 | int bit, x, y; | |
287 | ||
288 | _.ll = n; | |
289 | ||
290 | asm(" subcc.p %3,gr0,gr0,icc0 \n" | |
291 | " subcc %4,gr0,gr0,icc1 \n" | |
292 | " ckne icc0,cc4 \n" | |
293 | " ckne icc1,cc5 \n" | |
294 | " norcr cc4,cc5,cc6 \n" | |
295 | " csub.p %0,%0,%0 ,cc6,1 \n" | |
296 | " orcr cc5,cc4,cc4 \n" | |
297 | " andcr cc4,cc5,cc4 \n" | |
298 | " cscan.p %3,gr0,%0 ,cc4,0 \n" | |
299 | " setlos #64,%1 \n" | |
300 | " cscan.p %4,gr0,%0 ,cc4,1 \n" | |
301 | " setlos #32,%2 \n" | |
302 | " csub.p %1,%0,%0 ,cc4,0 \n" | |
303 | " csub %2,%0,%0 ,cc4,1 \n" | |
304 | : "=&r"(bit), "=r"(x), "=r"(y) | |
305 | : "0r"(_.h), "r"(_.l) | |
306 | : "icc0", "icc1", "cc4", "cc5", "cc6" | |
307 | ); | |
308 | return bit; | |
309 | ||
310 | } | |
311 | ||
cf134483 DH |
312 | /** |
313 | * ffs - find first bit set | |
314 | * @x: the word to search | |
315 | * | |
316 | * - return 32..1 to indicate bit 31..0 most least significant bit set | |
317 | * - return 0 to indicate no bits set | |
318 | */ | |
319 | static inline __attribute__((const)) | |
320 | int ffs(int x) | |
321 | { | |
322 | /* Note: (x & -x) gives us a mask that is the least significant | |
323 | * (rightmost) 1-bit of the value in x. | |
324 | */ | |
325 | return fls(x & -x); | |
326 | } | |
327 | ||
328 | /** | |
329 | * __ffs - find first bit set | |
330 | * @x: the word to search | |
331 | * | |
332 | * - return 31..0 to indicate bit 31..0 most least significant bit set | |
333 | * - if no bits are set in x, the result is undefined | |
334 | */ | |
335 | static inline __attribute__((const)) | |
336 | int __ffs(unsigned long x) | |
337 | { | |
338 | int bit; | |
339 | asm("scan %1,gr0,%0" : "=r"(bit) : "r"(x & -x)); | |
340 | return 31 - bit; | |
341 | } | |
342 | ||
ee38e514 RR |
343 | /** |
344 | * __fls - find last (most-significant) set bit in a long word | |
345 | * @word: the word to search | |
346 | * | |
347 | * Undefined if no set bit exists, so code should check against 0 first. | |
348 | */ | |
349 | static inline unsigned long __fls(unsigned long word) | |
350 | { | |
351 | unsigned long bit; | |
352 | asm("scan %1,gr0,%0" : "=r"(bit) : "r"(word)); | |
353 | return bit; | |
354 | } | |
355 | ||
f0d1b0b3 DH |
356 | /* |
357 | * special slimline version of fls() for calculating ilog2_u32() | |
358 | * - note: no protection against n == 0 | |
359 | */ | |
360 | #define ARCH_HAS_ILOG2_U32 | |
361 | static inline __attribute__((const)) | |
362 | int __ilog2_u32(u32 n) | |
363 | { | |
364 | int bit; | |
365 | asm("scan %1,gr0,%0" : "=r"(bit) : "r"(n)); | |
366 | return 31 - bit; | |
367 | } | |
368 | ||
369 | /* | |
370 | * special slimline version of fls64() for calculating ilog2_u64() | |
371 | * - note: no protection against n == 0 | |
372 | */ | |
373 | #define ARCH_HAS_ILOG2_U64 | |
374 | static inline __attribute__((const)) | |
375 | int __ilog2_u64(u64 n) | |
376 | { | |
377 | union { | |
378 | u64 ll; | |
379 | struct { u32 h, l; }; | |
380 | } _; | |
381 | int bit, x, y; | |
382 | ||
383 | _.ll = n; | |
384 | ||
385 | asm(" subcc %3,gr0,gr0,icc0 \n" | |
386 | " ckeq icc0,cc4 \n" | |
387 | " cscan.p %3,gr0,%0 ,cc4,0 \n" | |
388 | " setlos #63,%1 \n" | |
389 | " cscan.p %4,gr0,%0 ,cc4,1 \n" | |
390 | " setlos #31,%2 \n" | |
391 | " csub.p %1,%0,%0 ,cc4,0 \n" | |
392 | " csub %2,%0,%0 ,cc4,1 \n" | |
393 | : "=&r"(bit), "=r"(x), "=r"(y) | |
394 | : "0r"(_.h), "r"(_.l) | |
395 | : "icc0", "cc4" | |
396 | ); | |
397 | return bit; | |
398 | } | |
399 | ||
1f6d7a93 AM |
400 | #include <asm-generic/bitops/sched.h> |
401 | #include <asm-generic/bitops/hweight.h> | |
26333576 | 402 | #include <asm-generic/bitops/lock.h> |
1da177e4 | 403 | |
861b5ae7 | 404 | #include <asm-generic/bitops/le.h> |
1da177e4 | 405 | |
148817ba | 406 | #include <asm-generic/bitops/ext2-atomic-setbit.h> |
1da177e4 | 407 | |
1da177e4 LT |
408 | #endif /* __KERNEL__ */ |
409 | ||
410 | #endif /* _ASM_BITOPS_H */ |