Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | #ifndef _M68KNOMMU_BITOPS_H |
2 | #define _M68KNOMMU_BITOPS_H | |
3 | ||
4 | /* | |
5 | * Copyright 1992, Linus Torvalds. | |
6 | */ | |
7 | ||
8 | #include <linux/config.h> | |
9 | #include <linux/compiler.h> | |
10 | #include <asm/byteorder.h> /* swab32 */ | |
11 | #include <asm/system.h> /* save_flags */ | |
12 | ||
13 | #ifdef __KERNEL__ | |
14 | ||
15 | /* | |
16 | * Generic ffs(). | |
17 | */ | |
18 | static inline int ffs(int x) | |
19 | { | |
20 | int r = 1; | |
21 | ||
22 | if (!x) | |
23 | return 0; | |
24 | if (!(x & 0xffff)) { | |
25 | x >>= 16; | |
26 | r += 16; | |
27 | } | |
28 | if (!(x & 0xff)) { | |
29 | x >>= 8; | |
30 | r += 8; | |
31 | } | |
32 | if (!(x & 0xf)) { | |
33 | x >>= 4; | |
34 | r += 4; | |
35 | } | |
36 | if (!(x & 3)) { | |
37 | x >>= 2; | |
38 | r += 2; | |
39 | } | |
40 | if (!(x & 1)) { | |
41 | x >>= 1; | |
42 | r += 1; | |
43 | } | |
44 | return r; | |
45 | } | |
46 | ||
47 | /* | |
48 | * Generic __ffs(). | |
49 | */ | |
50 | static inline int __ffs(int x) | |
51 | { | |
52 | int r = 0; | |
53 | ||
54 | if (!x) | |
55 | return 0; | |
56 | if (!(x & 0xffff)) { | |
57 | x >>= 16; | |
58 | r += 16; | |
59 | } | |
60 | if (!(x & 0xff)) { | |
61 | x >>= 8; | |
62 | r += 8; | |
63 | } | |
64 | if (!(x & 0xf)) { | |
65 | x >>= 4; | |
66 | r += 4; | |
67 | } | |
68 | if (!(x & 3)) { | |
69 | x >>= 2; | |
70 | r += 2; | |
71 | } | |
72 | if (!(x & 1)) { | |
73 | x >>= 1; | |
74 | r += 1; | |
75 | } | |
76 | return r; | |
77 | } | |
78 | ||
79 | /* | |
80 | * Every architecture must define this function. It's the fastest | |
81 | * way of searching a 140-bit bitmap where the first 100 bits are | |
82 | * unlikely to be set. It's guaranteed that at least one of the 140 | |
83 | * bits is cleared. | |
84 | */ | |
85 | static inline int sched_find_first_bit(unsigned long *b) | |
86 | { | |
87 | if (unlikely(b[0])) | |
88 | return __ffs(b[0]); | |
89 | if (unlikely(b[1])) | |
90 | return __ffs(b[1]) + 32; | |
91 | if (unlikely(b[2])) | |
92 | return __ffs(b[2]) + 64; | |
93 | if (b[3]) | |
94 | return __ffs(b[3]) + 96; | |
95 | return __ffs(b[4]) + 128; | |
96 | } | |
97 | ||
98 | /* | |
99 | * ffz = Find First Zero in word. Undefined if no zero exists, | |
100 | * so code should check against ~0UL first.. | |
101 | */ | |
102 | static __inline__ unsigned long ffz(unsigned long word) | |
103 | { | |
104 | unsigned long result = 0; | |
105 | ||
106 | while(word & 1) { | |
107 | result++; | |
108 | word >>= 1; | |
109 | } | |
110 | return result; | |
111 | } | |
112 | ||
113 | ||
114 | static __inline__ void set_bit(int nr, volatile unsigned long * addr) | |
115 | { | |
116 | #ifdef CONFIG_COLDFIRE | |
117 | __asm__ __volatile__ ("lea %0,%%a0; bset %1,(%%a0)" | |
118 | : "+m" (((volatile char *)addr)[(nr^31) >> 3]) | |
119 | : "d" (nr) | |
120 | : "%a0", "cc"); | |
121 | #else | |
122 | __asm__ __volatile__ ("bset %1,%0" | |
123 | : "+m" (((volatile char *)addr)[(nr^31) >> 3]) | |
124 | : "di" (nr) | |
125 | : "cc"); | |
126 | #endif | |
127 | } | |
128 | ||
129 | #define __set_bit(nr, addr) set_bit(nr, addr) | |
130 | ||
131 | /* | |
132 | * clear_bit() doesn't provide any barrier for the compiler. | |
133 | */ | |
134 | #define smp_mb__before_clear_bit() barrier() | |
135 | #define smp_mb__after_clear_bit() barrier() | |
136 | ||
137 | static __inline__ void clear_bit(int nr, volatile unsigned long * addr) | |
138 | { | |
139 | #ifdef CONFIG_COLDFIRE | |
140 | __asm__ __volatile__ ("lea %0,%%a0; bclr %1,(%%a0)" | |
141 | : "+m" (((volatile char *)addr)[(nr^31) >> 3]) | |
142 | : "d" (nr) | |
143 | : "%a0", "cc"); | |
144 | #else | |
145 | __asm__ __volatile__ ("bclr %1,%0" | |
146 | : "+m" (((volatile char *)addr)[(nr^31) >> 3]) | |
147 | : "di" (nr) | |
148 | : "cc"); | |
149 | #endif | |
150 | } | |
151 | ||
152 | #define __clear_bit(nr, addr) clear_bit(nr, addr) | |
153 | ||
154 | static __inline__ void change_bit(int nr, volatile unsigned long * addr) | |
155 | { | |
156 | #ifdef CONFIG_COLDFIRE | |
157 | __asm__ __volatile__ ("lea %0,%%a0; bchg %1,(%%a0)" | |
158 | : "+m" (((volatile char *)addr)[(nr^31) >> 3]) | |
159 | : "d" (nr) | |
160 | : "%a0", "cc"); | |
161 | #else | |
162 | __asm__ __volatile__ ("bchg %1,%0" | |
163 | : "+m" (((volatile char *)addr)[(nr^31) >> 3]) | |
164 | : "di" (nr) | |
165 | : "cc"); | |
166 | #endif | |
167 | } | |
168 | ||
169 | #define __change_bit(nr, addr) change_bit(nr, addr) | |
170 | ||
171 | static __inline__ int test_and_set_bit(int nr, volatile unsigned long * addr) | |
172 | { | |
173 | char retval; | |
174 | ||
175 | #ifdef CONFIG_COLDFIRE | |
176 | __asm__ __volatile__ ("lea %1,%%a0; bset %2,(%%a0); sne %0" | |
177 | : "=d" (retval), "+m" (((volatile char *)addr)[(nr^31) >> 3]) | |
178 | : "d" (nr) | |
179 | : "%a0"); | |
180 | #else | |
181 | __asm__ __volatile__ ("bset %2,%1; sne %0" | |
182 | : "=d" (retval), "+m" (((volatile char *)addr)[(nr^31) >> 3]) | |
183 | : "di" (nr) | |
184 | /* No clobber */); | |
185 | #endif | |
186 | ||
187 | return retval; | |
188 | } | |
189 | ||
190 | #define __test_and_set_bit(nr, addr) test_and_set_bit(nr, addr) | |
191 | ||
192 | static __inline__ int test_and_clear_bit(int nr, volatile unsigned long * addr) | |
193 | { | |
194 | char retval; | |
195 | ||
196 | #ifdef CONFIG_COLDFIRE | |
197 | __asm__ __volatile__ ("lea %1,%%a0; bclr %2,(%%a0); sne %0" | |
198 | : "=d" (retval), "+m" (((volatile char *)addr)[(nr^31) >> 3]) | |
199 | : "d" (nr) | |
200 | : "%a0"); | |
201 | #else | |
202 | __asm__ __volatile__ ("bclr %2,%1; sne %0" | |
203 | : "=d" (retval), "+m" (((volatile char *)addr)[(nr^31) >> 3]) | |
204 | : "di" (nr) | |
205 | /* No clobber */); | |
206 | #endif | |
207 | ||
208 | return retval; | |
209 | } | |
210 | ||
211 | #define __test_and_clear_bit(nr, addr) test_and_clear_bit(nr, addr) | |
212 | ||
213 | static __inline__ int test_and_change_bit(int nr, volatile unsigned long * addr) | |
214 | { | |
215 | char retval; | |
216 | ||
217 | #ifdef CONFIG_COLDFIRE | |
218 | __asm__ __volatile__ ("lea %1,%%a0\n\tbchg %2,(%%a0)\n\tsne %0" | |
219 | : "=d" (retval), "+m" (((volatile char *)addr)[(nr^31) >> 3]) | |
220 | : "d" (nr) | |
221 | : "%a0"); | |
222 | #else | |
223 | __asm__ __volatile__ ("bchg %2,%1; sne %0" | |
224 | : "=d" (retval), "+m" (((volatile char *)addr)[(nr^31) >> 3]) | |
225 | : "di" (nr) | |
226 | /* No clobber */); | |
227 | #endif | |
228 | ||
229 | return retval; | |
230 | } | |
231 | ||
232 | #define __test_and_change_bit(nr, addr) test_and_change_bit(nr, addr) | |
233 | ||
234 | /* | |
235 | * This routine doesn't need to be atomic. | |
236 | */ | |
237 | static __inline__ int __constant_test_bit(int nr, const volatile unsigned long * addr) | |
238 | { | |
239 | return ((1UL << (nr & 31)) & (((const volatile unsigned int *) addr)[nr >> 5])) != 0; | |
240 | } | |
241 | ||
242 | static __inline__ int __test_bit(int nr, const volatile unsigned long * addr) | |
243 | { | |
244 | int * a = (int *) addr; | |
245 | int mask; | |
246 | ||
247 | a += nr >> 5; | |
248 | mask = 1 << (nr & 0x1f); | |
249 | return ((mask & *a) != 0); | |
250 | } | |
251 | ||
252 | #define test_bit(nr,addr) \ | |
253 | (__builtin_constant_p(nr) ? \ | |
254 | __constant_test_bit((nr),(addr)) : \ | |
255 | __test_bit((nr),(addr))) | |
256 | ||
257 | #define find_first_zero_bit(addr, size) \ | |
258 | find_next_zero_bit((addr), (size), 0) | |
259 | #define find_first_bit(addr, size) \ | |
260 | find_next_bit((addr), (size), 0) | |
261 | ||
9c1ee938 | 262 | static __inline__ int find_next_zero_bit (const void * addr, int size, int offset) |
1da177e4 LT |
263 | { |
264 | unsigned long *p = ((unsigned long *) addr) + (offset >> 5); | |
265 | unsigned long result = offset & ~31UL; | |
266 | unsigned long tmp; | |
267 | ||
268 | if (offset >= size) | |
269 | return size; | |
270 | size -= result; | |
271 | offset &= 31UL; | |
272 | if (offset) { | |
273 | tmp = *(p++); | |
274 | tmp |= ~0UL >> (32-offset); | |
275 | if (size < 32) | |
276 | goto found_first; | |
277 | if (~tmp) | |
278 | goto found_middle; | |
279 | size -= 32; | |
280 | result += 32; | |
281 | } | |
282 | while (size & ~31UL) { | |
283 | if (~(tmp = *(p++))) | |
284 | goto found_middle; | |
285 | result += 32; | |
286 | size -= 32; | |
287 | } | |
288 | if (!size) | |
289 | return result; | |
290 | tmp = *p; | |
291 | ||
292 | found_first: | |
293 | tmp |= ~0UL >> size; | |
294 | found_middle: | |
295 | return result + ffz(tmp); | |
296 | } | |
297 | ||
298 | /* | |
299 | * Find next one bit in a bitmap reasonably efficiently. | |
300 | */ | |
301 | static __inline__ unsigned long find_next_bit(const unsigned long *addr, | |
302 | unsigned long size, unsigned long offset) | |
303 | { | |
304 | unsigned int *p = ((unsigned int *) addr) + (offset >> 5); | |
305 | unsigned int result = offset & ~31UL; | |
306 | unsigned int tmp; | |
307 | ||
308 | if (offset >= size) | |
309 | return size; | |
310 | size -= result; | |
311 | offset &= 31UL; | |
312 | if (offset) { | |
313 | tmp = *p++; | |
314 | tmp &= ~0UL << offset; | |
315 | if (size < 32) | |
316 | goto found_first; | |
317 | if (tmp) | |
318 | goto found_middle; | |
319 | size -= 32; | |
320 | result += 32; | |
321 | } | |
322 | while (size >= 32) { | |
323 | if ((tmp = *p++) != 0) | |
324 | goto found_middle; | |
325 | result += 32; | |
326 | size -= 32; | |
327 | } | |
328 | if (!size) | |
329 | return result; | |
330 | tmp = *p; | |
331 | ||
332 | found_first: | |
333 | tmp &= ~0UL >> (32 - size); | |
334 | if (tmp == 0UL) /* Are any bits set? */ | |
335 | return result + size; /* Nope. */ | |
336 | found_middle: | |
337 | return result + __ffs(tmp); | |
338 | } | |
339 | ||
340 | /* | |
341 | * hweightN: returns the hamming weight (i.e. the number | |
342 | * of bits set) of a N-bit word | |
343 | */ | |
344 | ||
345 | #define hweight32(x) generic_hweight32(x) | |
346 | #define hweight16(x) generic_hweight16(x) | |
347 | #define hweight8(x) generic_hweight8(x) | |
348 | ||
349 | ||
350 | static __inline__ int ext2_set_bit(int nr, volatile void * addr) | |
351 | { | |
352 | char retval; | |
353 | ||
354 | #ifdef CONFIG_COLDFIRE | |
355 | __asm__ __volatile__ ("lea %1,%%a0; bset %2,(%%a0); sne %0" | |
356 | : "=d" (retval), "+m" (((volatile char *)addr)[nr >> 3]) | |
357 | : "d" (nr) | |
358 | : "%a0"); | |
359 | #else | |
360 | __asm__ __volatile__ ("bset %2,%1; sne %0" | |
361 | : "=d" (retval), "+m" (((volatile char *)addr)[nr >> 3]) | |
362 | : "di" (nr) | |
363 | /* No clobber */); | |
364 | #endif | |
365 | ||
366 | return retval; | |
367 | } | |
368 | ||
369 | static __inline__ int ext2_clear_bit(int nr, volatile void * addr) | |
370 | { | |
371 | char retval; | |
372 | ||
373 | #ifdef CONFIG_COLDFIRE | |
374 | __asm__ __volatile__ ("lea %1,%%a0; bclr %2,(%%a0); sne %0" | |
375 | : "=d" (retval), "+m" (((volatile char *)addr)[nr >> 3]) | |
376 | : "d" (nr) | |
377 | : "%a0"); | |
378 | #else | |
379 | __asm__ __volatile__ ("bclr %2,%1; sne %0" | |
380 | : "=d" (retval), "+m" (((volatile char *)addr)[nr >> 3]) | |
381 | : "di" (nr) | |
382 | /* No clobber */); | |
383 | #endif | |
384 | ||
385 | return retval; | |
386 | } | |
387 | ||
388 | #define ext2_set_bit_atomic(lock, nr, addr) \ | |
389 | ({ \ | |
390 | int ret; \ | |
391 | spin_lock(lock); \ | |
392 | ret = ext2_set_bit((nr), (addr)); \ | |
393 | spin_unlock(lock); \ | |
394 | ret; \ | |
395 | }) | |
396 | ||
397 | #define ext2_clear_bit_atomic(lock, nr, addr) \ | |
398 | ({ \ | |
399 | int ret; \ | |
400 | spin_lock(lock); \ | |
401 | ret = ext2_clear_bit((nr), (addr)); \ | |
402 | spin_unlock(lock); \ | |
403 | ret; \ | |
404 | }) | |
405 | ||
406 | static __inline__ int ext2_test_bit(int nr, const volatile void * addr) | |
407 | { | |
408 | char retval; | |
409 | ||
410 | #ifdef CONFIG_COLDFIRE | |
411 | __asm__ __volatile__ ("lea %1,%%a0; btst %2,(%%a0); sne %0" | |
412 | : "=d" (retval) | |
413 | : "m" (((const volatile char *)addr)[nr >> 3]), "d" (nr) | |
414 | : "%a0"); | |
415 | #else | |
416 | __asm__ __volatile__ ("btst %2,%1; sne %0" | |
417 | : "=d" (retval) | |
418 | : "m" (((const volatile char *)addr)[nr >> 3]), "di" (nr) | |
419 | /* No clobber */); | |
420 | #endif | |
421 | ||
422 | return retval; | |
423 | } | |
424 | ||
425 | #define ext2_find_first_zero_bit(addr, size) \ | |
426 | ext2_find_next_zero_bit((addr), (size), 0) | |
427 | ||
428 | static __inline__ unsigned long ext2_find_next_zero_bit(void *addr, unsigned long size, unsigned long offset) | |
429 | { | |
430 | unsigned long *p = ((unsigned long *) addr) + (offset >> 5); | |
431 | unsigned long result = offset & ~31UL; | |
432 | unsigned long tmp; | |
433 | ||
434 | if (offset >= size) | |
435 | return size; | |
436 | size -= result; | |
437 | offset &= 31UL; | |
438 | if(offset) { | |
439 | /* We hold the little endian value in tmp, but then the | |
440 | * shift is illegal. So we could keep a big endian value | |
441 | * in tmp, like this: | |
442 | * | |
443 | * tmp = __swab32(*(p++)); | |
444 | * tmp |= ~0UL >> (32-offset); | |
445 | * | |
446 | * but this would decrease preformance, so we change the | |
447 | * shift: | |
448 | */ | |
449 | tmp = *(p++); | |
450 | tmp |= __swab32(~0UL >> (32-offset)); | |
451 | if(size < 32) | |
452 | goto found_first; | |
453 | if(~tmp) | |
454 | goto found_middle; | |
455 | size -= 32; | |
456 | result += 32; | |
457 | } | |
458 | while(size & ~31UL) { | |
459 | if(~(tmp = *(p++))) | |
460 | goto found_middle; | |
461 | result += 32; | |
462 | size -= 32; | |
463 | } | |
464 | if(!size) | |
465 | return result; | |
466 | tmp = *p; | |
467 | ||
468 | found_first: | |
469 | /* tmp is little endian, so we would have to swab the shift, | |
470 | * see above. But then we have to swab tmp below for ffz, so | |
471 | * we might as well do this here. | |
472 | */ | |
473 | return result + ffz(__swab32(tmp) | (~0UL << size)); | |
474 | found_middle: | |
475 | return result + ffz(__swab32(tmp)); | |
476 | } | |
477 | ||
478 | /* Bitmap functions for the minix filesystem. */ | |
479 | #define minix_test_and_set_bit(nr,addr) test_and_set_bit(nr,addr) | |
480 | #define minix_set_bit(nr,addr) set_bit(nr,addr) | |
481 | #define minix_test_and_clear_bit(nr,addr) test_and_clear_bit(nr,addr) | |
482 | #define minix_test_bit(nr,addr) test_bit(nr,addr) | |
483 | #define minix_find_first_zero_bit(addr,size) find_first_zero_bit(addr,size) | |
484 | ||
485 | /** | |
486 | * hweightN - returns the hamming weight of a N-bit word | |
487 | * @x: the word to weigh | |
488 | * | |
489 | * The Hamming Weight of a number is the total number of bits set in it. | |
490 | */ | |
491 | ||
492 | #define hweight32(x) generic_hweight32(x) | |
493 | #define hweight16(x) generic_hweight16(x) | |
494 | #define hweight8(x) generic_hweight8(x) | |
495 | ||
496 | #endif /* __KERNEL__ */ | |
497 | ||
498 | /* | |
499 | * fls: find last bit set. | |
500 | */ | |
501 | #define fls(x) generic_fls(x) | |
502 | ||
503 | #endif /* _M68KNOMMU_BITOPS_H */ |