ext4: Add ext4_find_next_bit()
[linux-2.6-block.git] / include / asm-m68knommu / bitops.h
CommitLineData
1da177e4
LT
1#ifndef _M68KNOMMU_BITOPS_H
2#define _M68KNOMMU_BITOPS_H
3
4/*
5 * Copyright 1992, Linus Torvalds.
6 */
7
1da177e4
LT
8#include <linux/compiler.h>
9#include <asm/byteorder.h> /* swab32 */
1da177e4
LT
10
11#ifdef __KERNEL__
12
0624517d
JS
13#ifndef _LINUX_BITOPS_H
14#error only <linux/bitops.h> can be included directly
15#endif
16
d2d7cdcf
AM
17#include <asm-generic/bitops/ffs.h>
18#include <asm-generic/bitops/__ffs.h>
19#include <asm-generic/bitops/sched.h>
20#include <asm-generic/bitops/ffz.h>
1da177e4
LT
21
22static __inline__ void set_bit(int nr, volatile unsigned long * addr)
23{
24#ifdef CONFIG_COLDFIRE
25 __asm__ __volatile__ ("lea %0,%%a0; bset %1,(%%a0)"
26 : "+m" (((volatile char *)addr)[(nr^31) >> 3])
27 : "d" (nr)
28 : "%a0", "cc");
29#else
30 __asm__ __volatile__ ("bset %1,%0"
31 : "+m" (((volatile char *)addr)[(nr^31) >> 3])
32 : "di" (nr)
33 : "cc");
34#endif
35}
36
37#define __set_bit(nr, addr) set_bit(nr, addr)
38
39/*
40 * clear_bit() doesn't provide any barrier for the compiler.
41 */
42#define smp_mb__before_clear_bit() barrier()
43#define smp_mb__after_clear_bit() barrier()
44
45static __inline__ void clear_bit(int nr, volatile unsigned long * addr)
46{
47#ifdef CONFIG_COLDFIRE
48 __asm__ __volatile__ ("lea %0,%%a0; bclr %1,(%%a0)"
49 : "+m" (((volatile char *)addr)[(nr^31) >> 3])
50 : "d" (nr)
51 : "%a0", "cc");
52#else
53 __asm__ __volatile__ ("bclr %1,%0"
54 : "+m" (((volatile char *)addr)[(nr^31) >> 3])
55 : "di" (nr)
56 : "cc");
57#endif
58}
59
60#define __clear_bit(nr, addr) clear_bit(nr, addr)
61
62static __inline__ void change_bit(int nr, volatile unsigned long * addr)
63{
64#ifdef CONFIG_COLDFIRE
65 __asm__ __volatile__ ("lea %0,%%a0; bchg %1,(%%a0)"
66 : "+m" (((volatile char *)addr)[(nr^31) >> 3])
67 : "d" (nr)
68 : "%a0", "cc");
69#else
70 __asm__ __volatile__ ("bchg %1,%0"
71 : "+m" (((volatile char *)addr)[(nr^31) >> 3])
72 : "di" (nr)
73 : "cc");
74#endif
75}
76
77#define __change_bit(nr, addr) change_bit(nr, addr)
78
79static __inline__ int test_and_set_bit(int nr, volatile unsigned long * addr)
80{
81 char retval;
82
83#ifdef CONFIG_COLDFIRE
84 __asm__ __volatile__ ("lea %1,%%a0; bset %2,(%%a0); sne %0"
85 : "=d" (retval), "+m" (((volatile char *)addr)[(nr^31) >> 3])
86 : "d" (nr)
87 : "%a0");
88#else
89 __asm__ __volatile__ ("bset %2,%1; sne %0"
90 : "=d" (retval), "+m" (((volatile char *)addr)[(nr^31) >> 3])
91 : "di" (nr)
92 /* No clobber */);
93#endif
94
95 return retval;
96}
97
98#define __test_and_set_bit(nr, addr) test_and_set_bit(nr, addr)
99
100static __inline__ int test_and_clear_bit(int nr, volatile unsigned long * addr)
101{
102 char retval;
103
104#ifdef CONFIG_COLDFIRE
105 __asm__ __volatile__ ("lea %1,%%a0; bclr %2,(%%a0); sne %0"
106 : "=d" (retval), "+m" (((volatile char *)addr)[(nr^31) >> 3])
107 : "d" (nr)
108 : "%a0");
109#else
110 __asm__ __volatile__ ("bclr %2,%1; sne %0"
111 : "=d" (retval), "+m" (((volatile char *)addr)[(nr^31) >> 3])
112 : "di" (nr)
113 /* No clobber */);
114#endif
115
116 return retval;
117}
118
119#define __test_and_clear_bit(nr, addr) test_and_clear_bit(nr, addr)
120
121static __inline__ int test_and_change_bit(int nr, volatile unsigned long * addr)
122{
123 char retval;
124
125#ifdef CONFIG_COLDFIRE
126 __asm__ __volatile__ ("lea %1,%%a0\n\tbchg %2,(%%a0)\n\tsne %0"
127 : "=d" (retval), "+m" (((volatile char *)addr)[(nr^31) >> 3])
128 : "d" (nr)
129 : "%a0");
130#else
131 __asm__ __volatile__ ("bchg %2,%1; sne %0"
132 : "=d" (retval), "+m" (((volatile char *)addr)[(nr^31) >> 3])
133 : "di" (nr)
134 /* No clobber */);
135#endif
136
137 return retval;
138}
139
140#define __test_and_change_bit(nr, addr) test_and_change_bit(nr, addr)
141
142/*
143 * This routine doesn't need to be atomic.
144 */
145static __inline__ int __constant_test_bit(int nr, const volatile unsigned long * addr)
146{
147 return ((1UL << (nr & 31)) & (((const volatile unsigned int *) addr)[nr >> 5])) != 0;
148}
149
150static __inline__ int __test_bit(int nr, const volatile unsigned long * addr)
151{
152 int * a = (int *) addr;
153 int mask;
154
155 a += nr >> 5;
156 mask = 1 << (nr & 0x1f);
157 return ((mask & *a) != 0);
158}
159
160#define test_bit(nr,addr) \
161(__builtin_constant_p(nr) ? \
162 __constant_test_bit((nr),(addr)) : \
163 __test_bit((nr),(addr)))
164
d2d7cdcf
AM
165#include <asm-generic/bitops/find.h>
166#include <asm-generic/bitops/hweight.h>
26333576 167#include <asm-generic/bitops/lock.h>
1da177e4
LT
168
169static __inline__ int ext2_set_bit(int nr, volatile void * addr)
170{
171 char retval;
172
173#ifdef CONFIG_COLDFIRE
174 __asm__ __volatile__ ("lea %1,%%a0; bset %2,(%%a0); sne %0"
175 : "=d" (retval), "+m" (((volatile char *)addr)[nr >> 3])
176 : "d" (nr)
177 : "%a0");
178#else
179 __asm__ __volatile__ ("bset %2,%1; sne %0"
180 : "=d" (retval), "+m" (((volatile char *)addr)[nr >> 3])
181 : "di" (nr)
182 /* No clobber */);
183#endif
184
185 return retval;
186}
187
188static __inline__ int ext2_clear_bit(int nr, volatile void * addr)
189{
190 char retval;
191
192#ifdef CONFIG_COLDFIRE
193 __asm__ __volatile__ ("lea %1,%%a0; bclr %2,(%%a0); sne %0"
194 : "=d" (retval), "+m" (((volatile char *)addr)[nr >> 3])
195 : "d" (nr)
196 : "%a0");
197#else
198 __asm__ __volatile__ ("bclr %2,%1; sne %0"
199 : "=d" (retval), "+m" (((volatile char *)addr)[nr >> 3])
200 : "di" (nr)
201 /* No clobber */);
202#endif
203
204 return retval;
205}
206
207#define ext2_set_bit_atomic(lock, nr, addr) \
208 ({ \
209 int ret; \
210 spin_lock(lock); \
211 ret = ext2_set_bit((nr), (addr)); \
212 spin_unlock(lock); \
213 ret; \
214 })
215
216#define ext2_clear_bit_atomic(lock, nr, addr) \
217 ({ \
218 int ret; \
219 spin_lock(lock); \
220 ret = ext2_clear_bit((nr), (addr)); \
221 spin_unlock(lock); \
222 ret; \
223 })
224
225static __inline__ int ext2_test_bit(int nr, const volatile void * addr)
226{
227 char retval;
228
229#ifdef CONFIG_COLDFIRE
230 __asm__ __volatile__ ("lea %1,%%a0; btst %2,(%%a0); sne %0"
231 : "=d" (retval)
232 : "m" (((const volatile char *)addr)[nr >> 3]), "d" (nr)
233 : "%a0");
234#else
235 __asm__ __volatile__ ("btst %2,%1; sne %0"
236 : "=d" (retval)
237 : "m" (((const volatile char *)addr)[nr >> 3]), "di" (nr)
238 /* No clobber */);
239#endif
240
241 return retval;
242}
243
244#define ext2_find_first_zero_bit(addr, size) \
245 ext2_find_next_zero_bit((addr), (size), 0)
246
247static __inline__ unsigned long ext2_find_next_zero_bit(void *addr, unsigned long size, unsigned long offset)
248{
249 unsigned long *p = ((unsigned long *) addr) + (offset >> 5);
250 unsigned long result = offset & ~31UL;
251 unsigned long tmp;
252
253 if (offset >= size)
254 return size;
255 size -= result;
256 offset &= 31UL;
257 if(offset) {
258 /* We hold the little endian value in tmp, but then the
259 * shift is illegal. So we could keep a big endian value
260 * in tmp, like this:
261 *
262 * tmp = __swab32(*(p++));
263 * tmp |= ~0UL >> (32-offset);
264 *
265 * but this would decrease preformance, so we change the
266 * shift:
267 */
268 tmp = *(p++);
269 tmp |= __swab32(~0UL >> (32-offset));
270 if(size < 32)
271 goto found_first;
272 if(~tmp)
273 goto found_middle;
274 size -= 32;
275 result += 32;
276 }
277 while(size & ~31UL) {
278 if(~(tmp = *(p++)))
279 goto found_middle;
280 result += 32;
281 size -= 32;
282 }
283 if(!size)
284 return result;
285 tmp = *p;
286
287found_first:
288 /* tmp is little endian, so we would have to swab the shift,
289 * see above. But then we have to swab tmp below for ffz, so
290 * we might as well do this here.
291 */
292 return result + ffz(__swab32(tmp) | (~0UL << size));
293found_middle:
294 return result + ffz(__swab32(tmp));
295}
296
aa02ad67
AK
297#define ext2_find_next_bit(addr, size, off) \
298 generic_find_next_le_bit((unsigned long *)(addr), (size), (off))
d2d7cdcf 299#include <asm-generic/bitops/minix.h>
1da177e4
LT
300
301#endif /* __KERNEL__ */
302
d2d7cdcf
AM
303#include <asm-generic/bitops/fls.h>
304#include <asm-generic/bitops/fls64.h>
1da177e4
LT
305
306#endif /* _M68KNOMMU_BITOPS_H */