Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | #ifndef _ASM_M32R_BITOPS_H |
2 | #define _ASM_M32R_BITOPS_H | |
3 | ||
4 | /* | |
5 | * linux/include/asm-m32r/bitops.h | |
6 | * | |
7 | * Copyright 1992, Linus Torvalds. | |
8 | * | |
9 | * M32R version: | |
10 | * Copyright (C) 2001, 2002 Hitoshi Yamamoto | |
11 | * Copyright (C) 2004 Hirokazu Takata <takata at linux-m32r.org> | |
12 | */ | |
13 | ||
14 | #include <linux/config.h> | |
15 | #include <linux/compiler.h> | |
16 | #include <asm/assembler.h> | |
17 | #include <asm/system.h> | |
18 | #include <asm/byteorder.h> | |
19 | #include <asm/types.h> | |
20 | ||
21 | /* | |
22 | * These have to be done with inline assembly: that way the bit-setting | |
23 | * is guaranteed to be atomic. All bit operations return 0 if the bit | |
24 | * was cleared before the operation and != 0 if it was not. | |
25 | * | |
26 | * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1). | |
27 | */ | |
28 | ||
29 | /** | |
30 | * set_bit - Atomically set a bit in memory | |
31 | * @nr: the bit to set | |
32 | * @addr: the address to start counting from | |
33 | * | |
34 | * This function is atomic and may not be reordered. See __set_bit() | |
35 | * if you do not require the atomic guarantees. | |
36 | * Note that @nr may be almost arbitrarily large; this function is not | |
37 | * restricted to acting on a single-word quantity. | |
38 | */ | |
39 | static __inline__ void set_bit(int nr, volatile void * addr) | |
40 | { | |
41 | __u32 mask; | |
42 | volatile __u32 *a = addr; | |
43 | unsigned long flags; | |
44 | unsigned long tmp; | |
45 | ||
46 | a += (nr >> 5); | |
47 | mask = (1 << (nr & 0x1F)); | |
48 | ||
49 | local_irq_save(flags); | |
50 | __asm__ __volatile__ ( | |
51 | DCACHE_CLEAR("%0", "r6", "%1") | |
52 | M32R_LOCK" %0, @%1; \n\t" | |
53 | "or %0, %2; \n\t" | |
54 | M32R_UNLOCK" %0, @%1; \n\t" | |
55 | : "=&r" (tmp) | |
56 | : "r" (a), "r" (mask) | |
57 | : "memory" | |
58 | #ifdef CONFIG_CHIP_M32700_TS1 | |
59 | , "r6" | |
60 | #endif /* CONFIG_CHIP_M32700_TS1 */ | |
61 | ); | |
62 | local_irq_restore(flags); | |
63 | } | |
64 | ||
65 | /** | |
66 | * __set_bit - Set a bit in memory | |
67 | * @nr: the bit to set | |
68 | * @addr: the address to start counting from | |
69 | * | |
70 | * Unlike set_bit(), this function is non-atomic and may be reordered. | |
71 | * If it's called on the same region of memory simultaneously, the effect | |
72 | * may be that only one operation succeeds. | |
73 | */ | |
74 | static __inline__ void __set_bit(int nr, volatile void * addr) | |
75 | { | |
76 | __u32 mask; | |
77 | volatile __u32 *a = addr; | |
78 | ||
79 | a += (nr >> 5); | |
80 | mask = (1 << (nr & 0x1F)); | |
81 | *a |= mask; | |
82 | } | |
83 | ||
84 | /** | |
85 | * clear_bit - Clears a bit in memory | |
86 | * @nr: Bit to clear | |
87 | * @addr: Address to start counting from | |
88 | * | |
89 | * clear_bit() is atomic and may not be reordered. However, it does | |
90 | * not contain a memory barrier, so if it is used for locking purposes, | |
91 | * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit() | |
92 | * in order to ensure changes are visible on other processors. | |
93 | */ | |
94 | static __inline__ void clear_bit(int nr, volatile void * addr) | |
95 | { | |
96 | __u32 mask; | |
97 | volatile __u32 *a = addr; | |
98 | unsigned long flags; | |
99 | unsigned long tmp; | |
100 | ||
101 | a += (nr >> 5); | |
102 | mask = (1 << (nr & 0x1F)); | |
103 | ||
104 | local_irq_save(flags); | |
105 | ||
106 | __asm__ __volatile__ ( | |
107 | DCACHE_CLEAR("%0", "r6", "%1") | |
108 | M32R_LOCK" %0, @%1; \n\t" | |
109 | "and %0, %2; \n\t" | |
110 | M32R_UNLOCK" %0, @%1; \n\t" | |
111 | : "=&r" (tmp) | |
112 | : "r" (a), "r" (~mask) | |
113 | : "memory" | |
114 | #ifdef CONFIG_CHIP_M32700_TS1 | |
115 | , "r6" | |
116 | #endif /* CONFIG_CHIP_M32700_TS1 */ | |
117 | ); | |
118 | local_irq_restore(flags); | |
119 | } | |
120 | ||
121 | static __inline__ void __clear_bit(int nr, volatile unsigned long * addr) | |
122 | { | |
123 | unsigned long mask; | |
124 | volatile unsigned long *a = addr; | |
125 | ||
126 | a += (nr >> 5); | |
127 | mask = (1 << (nr & 0x1F)); | |
128 | *a &= ~mask; | |
129 | } | |
130 | ||
131 | #define smp_mb__before_clear_bit() barrier() | |
132 | #define smp_mb__after_clear_bit() barrier() | |
133 | ||
134 | /** | |
135 | * __change_bit - Toggle a bit in memory | |
136 | * @nr: the bit to set | |
137 | * @addr: the address to start counting from | |
138 | * | |
139 | * Unlike change_bit(), this function is non-atomic and may be reordered. | |
140 | * If it's called on the same region of memory simultaneously, the effect | |
141 | * may be that only one operation succeeds. | |
142 | */ | |
143 | static __inline__ void __change_bit(int nr, volatile void * addr) | |
144 | { | |
145 | __u32 mask; | |
146 | volatile __u32 *a = addr; | |
147 | ||
148 | a += (nr >> 5); | |
149 | mask = (1 << (nr & 0x1F)); | |
150 | *a ^= mask; | |
151 | } | |
152 | ||
153 | /** | |
154 | * change_bit - Toggle a bit in memory | |
155 | * @nr: Bit to clear | |
156 | * @addr: Address to start counting from | |
157 | * | |
158 | * change_bit() is atomic and may not be reordered. | |
159 | * Note that @nr may be almost arbitrarily large; this function is not | |
160 | * restricted to acting on a single-word quantity. | |
161 | */ | |
162 | static __inline__ void change_bit(int nr, volatile void * addr) | |
163 | { | |
164 | __u32 mask; | |
165 | volatile __u32 *a = addr; | |
166 | unsigned long flags; | |
167 | unsigned long tmp; | |
168 | ||
169 | a += (nr >> 5); | |
170 | mask = (1 << (nr & 0x1F)); | |
171 | ||
172 | local_irq_save(flags); | |
173 | __asm__ __volatile__ ( | |
174 | DCACHE_CLEAR("%0", "r6", "%1") | |
175 | M32R_LOCK" %0, @%1; \n\t" | |
176 | "xor %0, %2; \n\t" | |
177 | M32R_UNLOCK" %0, @%1; \n\t" | |
178 | : "=&r" (tmp) | |
179 | : "r" (a), "r" (mask) | |
180 | : "memory" | |
181 | #ifdef CONFIG_CHIP_M32700_TS1 | |
182 | , "r6" | |
183 | #endif /* CONFIG_CHIP_M32700_TS1 */ | |
184 | ); | |
185 | local_irq_restore(flags); | |
186 | } | |
187 | ||
188 | /** | |
189 | * test_and_set_bit - Set a bit and return its old value | |
190 | * @nr: Bit to set | |
191 | * @addr: Address to count from | |
192 | * | |
193 | * This operation is atomic and cannot be reordered. | |
194 | * It also implies a memory barrier. | |
195 | */ | |
196 | static __inline__ int test_and_set_bit(int nr, volatile void * addr) | |
197 | { | |
198 | __u32 mask, oldbit; | |
199 | volatile __u32 *a = addr; | |
200 | unsigned long flags; | |
201 | unsigned long tmp; | |
202 | ||
203 | a += (nr >> 5); | |
204 | mask = (1 << (nr & 0x1F)); | |
205 | ||
206 | local_irq_save(flags); | |
207 | __asm__ __volatile__ ( | |
208 | DCACHE_CLEAR("%0", "%1", "%2") | |
209 | M32R_LOCK" %0, @%2; \n\t" | |
210 | "mv %1, %0; \n\t" | |
211 | "and %0, %3; \n\t" | |
212 | "or %1, %3; \n\t" | |
213 | M32R_UNLOCK" %1, @%2; \n\t" | |
214 | : "=&r" (oldbit), "=&r" (tmp) | |
215 | : "r" (a), "r" (mask) | |
216 | : "memory" | |
217 | ); | |
218 | local_irq_restore(flags); | |
219 | ||
220 | return (oldbit != 0); | |
221 | } | |
222 | ||
223 | /** | |
224 | * __test_and_set_bit - Set a bit and return its old value | |
225 | * @nr: Bit to set | |
226 | * @addr: Address to count from | |
227 | * | |
228 | * This operation is non-atomic and can be reordered. | |
229 | * If two examples of this operation race, one can appear to succeed | |
230 | * but actually fail. You must protect multiple accesses with a lock. | |
231 | */ | |
232 | static __inline__ int __test_and_set_bit(int nr, volatile void * addr) | |
233 | { | |
234 | __u32 mask, oldbit; | |
235 | volatile __u32 *a = addr; | |
236 | ||
237 | a += (nr >> 5); | |
238 | mask = (1 << (nr & 0x1F)); | |
239 | oldbit = (*a & mask); | |
240 | *a |= mask; | |
241 | ||
242 | return (oldbit != 0); | |
243 | } | |
244 | ||
245 | /** | |
246 | * test_and_clear_bit - Clear a bit and return its old value | |
247 | * @nr: Bit to set | |
248 | * @addr: Address to count from | |
249 | * | |
250 | * This operation is atomic and cannot be reordered. | |
251 | * It also implies a memory barrier. | |
252 | */ | |
253 | static __inline__ int test_and_clear_bit(int nr, volatile void * addr) | |
254 | { | |
255 | __u32 mask, oldbit; | |
256 | volatile __u32 *a = addr; | |
257 | unsigned long flags; | |
258 | unsigned long tmp; | |
259 | ||
260 | a += (nr >> 5); | |
261 | mask = (1 << (nr & 0x1F)); | |
262 | ||
263 | local_irq_save(flags); | |
264 | ||
265 | __asm__ __volatile__ ( | |
266 | DCACHE_CLEAR("%0", "%1", "%3") | |
267 | M32R_LOCK" %0, @%3; \n\t" | |
268 | "mv %1, %0; \n\t" | |
269 | "and %0, %2; \n\t" | |
270 | "not %2, %2; \n\t" | |
271 | "and %1, %2; \n\t" | |
272 | M32R_UNLOCK" %1, @%3; \n\t" | |
273 | : "=&r" (oldbit), "=&r" (tmp), "+r" (mask) | |
274 | : "r" (a) | |
275 | : "memory" | |
276 | ); | |
277 | local_irq_restore(flags); | |
278 | ||
279 | return (oldbit != 0); | |
280 | } | |
281 | ||
282 | /** | |
283 | * __test_and_clear_bit - Clear a bit and return its old value | |
284 | * @nr: Bit to set | |
285 | * @addr: Address to count from | |
286 | * | |
287 | * This operation is non-atomic and can be reordered. | |
288 | * If two examples of this operation race, one can appear to succeed | |
289 | * but actually fail. You must protect multiple accesses with a lock. | |
290 | */ | |
291 | static __inline__ int __test_and_clear_bit(int nr, volatile void * addr) | |
292 | { | |
293 | __u32 mask, oldbit; | |
294 | volatile __u32 *a = addr; | |
295 | ||
296 | a += (nr >> 5); | |
297 | mask = (1 << (nr & 0x1F)); | |
298 | oldbit = (*a & mask); | |
299 | *a &= ~mask; | |
300 | ||
301 | return (oldbit != 0); | |
302 | } | |
303 | ||
304 | /* WARNING: non atomic and it can be reordered! */ | |
305 | static __inline__ int __test_and_change_bit(int nr, volatile void * addr) | |
306 | { | |
307 | __u32 mask, oldbit; | |
308 | volatile __u32 *a = addr; | |
309 | ||
310 | a += (nr >> 5); | |
311 | mask = (1 << (nr & 0x1F)); | |
312 | oldbit = (*a & mask); | |
313 | *a ^= mask; | |
314 | ||
315 | return (oldbit != 0); | |
316 | } | |
317 | ||
318 | /** | |
319 | * test_and_change_bit - Change a bit and return its old value | |
320 | * @nr: Bit to set | |
321 | * @addr: Address to count from | |
322 | * | |
323 | * This operation is atomic and cannot be reordered. | |
324 | * It also implies a memory barrier. | |
325 | */ | |
326 | static __inline__ int test_and_change_bit(int nr, volatile void * addr) | |
327 | { | |
328 | __u32 mask, oldbit; | |
329 | volatile __u32 *a = addr; | |
330 | unsigned long flags; | |
331 | unsigned long tmp; | |
332 | ||
333 | a += (nr >> 5); | |
334 | mask = (1 << (nr & 0x1F)); | |
335 | ||
336 | local_irq_save(flags); | |
337 | __asm__ __volatile__ ( | |
338 | DCACHE_CLEAR("%0", "%1", "%2") | |
339 | M32R_LOCK" %0, @%2; \n\t" | |
340 | "mv %1, %0; \n\t" | |
341 | "and %0, %3; \n\t" | |
342 | "xor %1, %3; \n\t" | |
343 | M32R_UNLOCK" %1, @%2; \n\t" | |
344 | : "=&r" (oldbit), "=&r" (tmp) | |
345 | : "r" (a), "r" (mask) | |
346 | : "memory" | |
347 | ); | |
348 | local_irq_restore(flags); | |
349 | ||
350 | return (oldbit != 0); | |
351 | } | |
352 | ||
353 | /** | |
354 | * test_bit - Determine whether a bit is set | |
355 | * @nr: bit number to test | |
356 | * @addr: Address to start counting from | |
357 | */ | |
358 | static __inline__ int test_bit(int nr, const volatile void * addr) | |
359 | { | |
360 | __u32 mask; | |
361 | const volatile __u32 *a = addr; | |
362 | ||
363 | a += (nr >> 5); | |
364 | mask = (1 << (nr & 0x1F)); | |
365 | ||
366 | return ((*a & mask) != 0); | |
367 | } | |
368 | ||
369 | /** | |
370 | * ffz - find first zero in word. | |
371 | * @word: The word to search | |
372 | * | |
373 | * Undefined if no zero exists, so code should check against ~0UL first. | |
374 | */ | |
375 | static __inline__ unsigned long ffz(unsigned long word) | |
376 | { | |
377 | int k; | |
378 | ||
379 | word = ~word; | |
380 | k = 0; | |
381 | if (!(word & 0x0000ffff)) { k += 16; word >>= 16; } | |
382 | if (!(word & 0x000000ff)) { k += 8; word >>= 8; } | |
383 | if (!(word & 0x0000000f)) { k += 4; word >>= 4; } | |
384 | if (!(word & 0x00000003)) { k += 2; word >>= 2; } | |
385 | if (!(word & 0x00000001)) { k += 1; } | |
386 | ||
387 | return k; | |
388 | } | |
389 | ||
390 | /** | |
391 | * find_first_zero_bit - find the first zero bit in a memory region | |
392 | * @addr: The address to start the search at | |
393 | * @size: The maximum size to search | |
394 | * | |
395 | * Returns the bit-number of the first zero bit, not the number of the byte | |
396 | * containing a bit. | |
397 | */ | |
398 | ||
399 | #define find_first_zero_bit(addr, size) \ | |
400 | find_next_zero_bit((addr), (size), 0) | |
401 | ||
402 | /** | |
403 | * find_next_zero_bit - find the first zero bit in a memory region | |
404 | * @addr: The address to base the search on | |
405 | * @offset: The bitnumber to start searching at | |
406 | * @size: The maximum size to search | |
407 | */ | |
408 | static __inline__ int find_next_zero_bit(const unsigned long *addr, | |
409 | int size, int offset) | |
410 | { | |
411 | const unsigned long *p = addr + (offset >> 5); | |
412 | unsigned long result = offset & ~31UL; | |
413 | unsigned long tmp; | |
414 | ||
415 | if (offset >= size) | |
416 | return size; | |
417 | size -= result; | |
418 | offset &= 31UL; | |
419 | if (offset) { | |
420 | tmp = *(p++); | |
421 | tmp |= ~0UL >> (32-offset); | |
422 | if (size < 32) | |
423 | goto found_first; | |
424 | if (~tmp) | |
425 | goto found_middle; | |
426 | size -= 32; | |
427 | result += 32; | |
428 | } | |
429 | while (size & ~31UL) { | |
430 | if (~(tmp = *(p++))) | |
431 | goto found_middle; | |
432 | result += 32; | |
433 | size -= 32; | |
434 | } | |
435 | if (!size) | |
436 | return result; | |
437 | tmp = *p; | |
438 | ||
439 | found_first: | |
440 | tmp |= ~0UL << size; | |
441 | found_middle: | |
442 | return result + ffz(tmp); | |
443 | } | |
444 | ||
445 | /** | |
446 | * __ffs - find first bit in word. | |
447 | * @word: The word to search | |
448 | * | |
449 | * Undefined if no bit exists, so code should check against 0 first. | |
450 | */ | |
451 | static __inline__ unsigned long __ffs(unsigned long word) | |
452 | { | |
453 | int k = 0; | |
454 | ||
455 | if (!(word & 0x0000ffff)) { k += 16; word >>= 16; } | |
456 | if (!(word & 0x000000ff)) { k += 8; word >>= 8; } | |
457 | if (!(word & 0x0000000f)) { k += 4; word >>= 4; } | |
458 | if (!(word & 0x00000003)) { k += 2; word >>= 2; } | |
459 | if (!(word & 0x00000001)) { k += 1;} | |
460 | ||
461 | return k; | |
462 | } | |
463 | ||
464 | /* | |
465 | * fls: find last bit set. | |
466 | */ | |
467 | #define fls(x) generic_fls(x) | |
468 | ||
469 | #ifdef __KERNEL__ | |
470 | ||
471 | /* | |
472 | * Every architecture must define this function. It's the fastest | |
473 | * way of searching a 140-bit bitmap where the first 100 bits are | |
474 | * unlikely to be set. It's guaranteed that at least one of the 140 | |
475 | * bits is cleared. | |
476 | */ | |
477 | static inline int sched_find_first_bit(unsigned long *b) | |
478 | { | |
479 | if (unlikely(b[0])) | |
480 | return __ffs(b[0]); | |
481 | if (unlikely(b[1])) | |
482 | return __ffs(b[1]) + 32; | |
483 | if (unlikely(b[2])) | |
484 | return __ffs(b[2]) + 64; | |
485 | if (b[3]) | |
486 | return __ffs(b[3]) + 96; | |
487 | return __ffs(b[4]) + 128; | |
488 | } | |
489 | ||
490 | /** | |
491 | * find_next_bit - find the first set bit in a memory region | |
492 | * @addr: The address to base the search on | |
493 | * @offset: The bitnumber to start searching at | |
494 | * @size: The maximum size to search | |
495 | */ | |
496 | static inline unsigned long find_next_bit(const unsigned long *addr, | |
497 | unsigned long size, unsigned long offset) | |
498 | { | |
499 | unsigned int *p = ((unsigned int *) addr) + (offset >> 5); | |
500 | unsigned int result = offset & ~31UL; | |
501 | unsigned int tmp; | |
502 | ||
503 | if (offset >= size) | |
504 | return size; | |
505 | size -= result; | |
506 | offset &= 31UL; | |
507 | if (offset) { | |
508 | tmp = *p++; | |
509 | tmp &= ~0UL << offset; | |
510 | if (size < 32) | |
511 | goto found_first; | |
512 | if (tmp) | |
513 | goto found_middle; | |
514 | size -= 32; | |
515 | result += 32; | |
516 | } | |
517 | while (size >= 32) { | |
518 | if ((tmp = *p++) != 0) | |
519 | goto found_middle; | |
520 | result += 32; | |
521 | size -= 32; | |
522 | } | |
523 | if (!size) | |
524 | return result; | |
525 | tmp = *p; | |
526 | ||
527 | found_first: | |
528 | tmp &= ~0UL >> (32 - size); | |
529 | if (tmp == 0UL) /* Are any bits set? */ | |
530 | return result + size; /* Nope. */ | |
531 | found_middle: | |
532 | return result + __ffs(tmp); | |
533 | } | |
534 | ||
535 | /** | |
536 | * find_first_bit - find the first set bit in a memory region | |
537 | * @addr: The address to start the search at | |
538 | * @size: The maximum size to search | |
539 | * | |
540 | * Returns the bit-number of the first set bit, not the number of the byte | |
541 | * containing a bit. | |
542 | */ | |
543 | #define find_first_bit(addr, size) \ | |
544 | find_next_bit((addr), (size), 0) | |
545 | ||
546 | /** | |
547 | * ffs - find first bit set | |
548 | * @x: the word to search | |
549 | * | |
550 | * This is defined the same way as | |
551 | * the libc and compiler builtin ffs routines, therefore | |
552 | * differs in spirit from the above ffz (man ffs). | |
553 | */ | |
554 | #define ffs(x) generic_ffs(x) | |
555 | ||
556 | /** | |
557 | * hweightN - returns the hamming weight of a N-bit word | |
558 | * @x: the word to weigh | |
559 | * | |
560 | * The Hamming Weight of a number is the total number of bits set in it. | |
561 | */ | |
562 | ||
563 | #define hweight32(x) generic_hweight32(x) | |
564 | #define hweight16(x) generic_hweight16(x) | |
565 | #define hweight8(x) generic_hweight8(x) | |
566 | ||
567 | #endif /* __KERNEL__ */ | |
568 | ||
569 | #ifdef __KERNEL__ | |
570 | ||
571 | /* | |
572 | * ext2_XXXX function | |
573 | * orig: include/asm-sh/bitops.h | |
574 | */ | |
575 | ||
576 | #ifdef __LITTLE_ENDIAN__ | |
577 | #define ext2_set_bit test_and_set_bit | |
578 | #define ext2_clear_bit __test_and_clear_bit | |
579 | #define ext2_test_bit test_bit | |
580 | #define ext2_find_first_zero_bit find_first_zero_bit | |
581 | #define ext2_find_next_zero_bit find_next_zero_bit | |
582 | #else | |
583 | static inline int ext2_set_bit(int nr, volatile void * addr) | |
584 | { | |
585 | __u8 mask, oldbit; | |
586 | volatile __u8 *a = addr; | |
587 | ||
588 | a += (nr >> 3); | |
589 | mask = (1 << (nr & 0x07)); | |
590 | oldbit = (*a & mask); | |
591 | *a |= mask; | |
592 | ||
593 | return (oldbit != 0); | |
594 | } | |
595 | ||
596 | static inline int ext2_clear_bit(int nr, volatile void * addr) | |
597 | { | |
598 | __u8 mask, oldbit; | |
599 | volatile __u8 *a = addr; | |
600 | ||
601 | a += (nr >> 3); | |
602 | mask = (1 << (nr & 0x07)); | |
603 | oldbit = (*a & mask); | |
604 | *a &= ~mask; | |
605 | ||
606 | return (oldbit != 0); | |
607 | } | |
608 | ||
609 | static inline int ext2_test_bit(int nr, const volatile void * addr) | |
610 | { | |
611 | __u32 mask; | |
612 | const volatile __u8 *a = addr; | |
613 | ||
614 | a += (nr >> 3); | |
615 | mask = (1 << (nr & 0x07)); | |
616 | ||
617 | return ((mask & *a) != 0); | |
618 | } | |
619 | ||
620 | #define ext2_find_first_zero_bit(addr, size) \ | |
621 | ext2_find_next_zero_bit((addr), (size), 0) | |
622 | ||
623 | static inline unsigned long ext2_find_next_zero_bit(void *addr, | |
624 | unsigned long size, unsigned long offset) | |
625 | { | |
626 | unsigned long *p = ((unsigned long *) addr) + (offset >> 5); | |
627 | unsigned long result = offset & ~31UL; | |
628 | unsigned long tmp; | |
629 | ||
630 | if (offset >= size) | |
631 | return size; | |
632 | size -= result; | |
633 | offset &= 31UL; | |
634 | if(offset) { | |
635 | /* We hold the little endian value in tmp, but then the | |
636 | * shift is illegal. So we could keep a big endian value | |
637 | * in tmp, like this: | |
638 | * | |
639 | * tmp = __swab32(*(p++)); | |
640 | * tmp |= ~0UL >> (32-offset); | |
641 | * | |
642 | * but this would decrease preformance, so we change the | |
643 | * shift: | |
644 | */ | |
645 | tmp = *(p++); | |
646 | tmp |= __swab32(~0UL >> (32-offset)); | |
647 | if(size < 32) | |
648 | goto found_first; | |
649 | if(~tmp) | |
650 | goto found_middle; | |
651 | size -= 32; | |
652 | result += 32; | |
653 | } | |
654 | while(size & ~31UL) { | |
655 | if(~(tmp = *(p++))) | |
656 | goto found_middle; | |
657 | result += 32; | |
658 | size -= 32; | |
659 | } | |
660 | if(!size) | |
661 | return result; | |
662 | tmp = *p; | |
663 | ||
664 | found_first: | |
665 | /* tmp is little endian, so we would have to swab the shift, | |
666 | * see above. But then we have to swab tmp below for ffz, so | |
667 | * we might as well do this here. | |
668 | */ | |
669 | return result + ffz(__swab32(tmp) | (~0UL << size)); | |
670 | found_middle: | |
671 | return result + ffz(__swab32(tmp)); | |
672 | } | |
673 | #endif | |
674 | ||
675 | #define ext2_set_bit_atomic(lock, nr, addr) \ | |
676 | ({ \ | |
677 | int ret; \ | |
678 | spin_lock(lock); \ | |
679 | ret = ext2_set_bit((nr), (addr)); \ | |
680 | spin_unlock(lock); \ | |
681 | ret; \ | |
682 | }) | |
683 | ||
684 | #define ext2_clear_bit_atomic(lock, nr, addr) \ | |
685 | ({ \ | |
686 | int ret; \ | |
687 | spin_lock(lock); \ | |
688 | ret = ext2_clear_bit((nr), (addr)); \ | |
689 | spin_unlock(lock); \ | |
690 | ret; \ | |
691 | }) | |
692 | ||
693 | /* Bitmap functions for the minix filesystem. */ | |
694 | #define minix_test_and_set_bit(nr,addr) __test_and_set_bit(nr,addr) | |
695 | #define minix_set_bit(nr,addr) __set_bit(nr,addr) | |
696 | #define minix_test_and_clear_bit(nr,addr) __test_and_clear_bit(nr,addr) | |
697 | #define minix_test_bit(nr,addr) test_bit(nr,addr) | |
698 | #define minix_find_first_zero_bit(addr,size) find_first_zero_bit(addr,size) | |
699 | ||
700 | #endif /* __KERNEL__ */ | |
701 | ||
702 | #endif /* _ASM_M32R_BITOPS_H */ |