Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | #ifndef _I386_BITOPS_H |
2 | #define _I386_BITOPS_H | |
3 | ||
4 | /* | |
5 | * Copyright 1992, Linus Torvalds. | |
6 | */ | |
7 | ||
8 | #include <linux/config.h> | |
9 | #include <linux/compiler.h> | |
10 | ||
11 | /* | |
12 | * These have to be done with inline assembly: that way the bit-setting | |
13 | * is guaranteed to be atomic. All bit operations return 0 if the bit | |
14 | * was cleared before the operation and != 0 if it was not. | |
15 | * | |
16 | * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1). | |
17 | */ | |
18 | ||
19 | #ifdef CONFIG_SMP | |
20 | #define LOCK_PREFIX "lock ; " | |
21 | #else | |
22 | #define LOCK_PREFIX "" | |
23 | #endif | |
24 | ||
25 | #define ADDR (*(volatile long *) addr) | |
26 | ||
27 | /** | |
28 | * set_bit - Atomically set a bit in memory | |
29 | * @nr: the bit to set | |
30 | * @addr: the address to start counting from | |
31 | * | |
32 | * This function is atomic and may not be reordered. See __set_bit() | |
33 | * if you do not require the atomic guarantees. | |
34 | * | |
35 | * Note: there are no guarantees that this function will not be reordered | |
36 | * on non x86 architectures, so if you are writting portable code, | |
37 | * make sure not to rely on its reordering guarantees. | |
38 | * | |
39 | * Note that @nr may be almost arbitrarily large; this function is not | |
40 | * restricted to acting on a single-word quantity. | |
41 | */ | |
42 | static inline void set_bit(int nr, volatile unsigned long * addr) | |
43 | { | |
44 | __asm__ __volatile__( LOCK_PREFIX | |
45 | "btsl %1,%0" | |
46 | :"=m" (ADDR) | |
47 | :"Ir" (nr)); | |
48 | } | |
49 | ||
50 | /** | |
51 | * __set_bit - Set a bit in memory | |
52 | * @nr: the bit to set | |
53 | * @addr: the address to start counting from | |
54 | * | |
55 | * Unlike set_bit(), this function is non-atomic and may be reordered. | |
56 | * If it's called on the same region of memory simultaneously, the effect | |
57 | * may be that only one operation succeeds. | |
58 | */ | |
59 | static inline void __set_bit(int nr, volatile unsigned long * addr) | |
60 | { | |
61 | __asm__( | |
62 | "btsl %1,%0" | |
63 | :"=m" (ADDR) | |
64 | :"Ir" (nr)); | |
65 | } | |
66 | ||
67 | /** | |
68 | * clear_bit - Clears a bit in memory | |
69 | * @nr: Bit to clear | |
70 | * @addr: Address to start counting from | |
71 | * | |
72 | * clear_bit() is atomic and may not be reordered. However, it does | |
73 | * not contain a memory barrier, so if it is used for locking purposes, | |
74 | * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit() | |
75 | * in order to ensure changes are visible on other processors. | |
76 | */ | |
77 | static inline void clear_bit(int nr, volatile unsigned long * addr) | |
78 | { | |
79 | __asm__ __volatile__( LOCK_PREFIX | |
80 | "btrl %1,%0" | |
81 | :"=m" (ADDR) | |
82 | :"Ir" (nr)); | |
83 | } | |
84 | ||
85 | static inline void __clear_bit(int nr, volatile unsigned long * addr) | |
86 | { | |
87 | __asm__ __volatile__( | |
88 | "btrl %1,%0" | |
89 | :"=m" (ADDR) | |
90 | :"Ir" (nr)); | |
91 | } | |
92 | #define smp_mb__before_clear_bit() barrier() | |
93 | #define smp_mb__after_clear_bit() barrier() | |
94 | ||
95 | /** | |
96 | * __change_bit - Toggle a bit in memory | |
97 | * @nr: the bit to change | |
98 | * @addr: the address to start counting from | |
99 | * | |
100 | * Unlike change_bit(), this function is non-atomic and may be reordered. | |
101 | * If it's called on the same region of memory simultaneously, the effect | |
102 | * may be that only one operation succeeds. | |
103 | */ | |
104 | static inline void __change_bit(int nr, volatile unsigned long * addr) | |
105 | { | |
106 | __asm__ __volatile__( | |
107 | "btcl %1,%0" | |
108 | :"=m" (ADDR) | |
109 | :"Ir" (nr)); | |
110 | } | |
111 | ||
112 | /** | |
113 | * change_bit - Toggle a bit in memory | |
114 | * @nr: Bit to change | |
115 | * @addr: Address to start counting from | |
116 | * | |
117 | * change_bit() is atomic and may not be reordered. It may be | |
118 | * reordered on other architectures than x86. | |
119 | * Note that @nr may be almost arbitrarily large; this function is not | |
120 | * restricted to acting on a single-word quantity. | |
121 | */ | |
122 | static inline void change_bit(int nr, volatile unsigned long * addr) | |
123 | { | |
124 | __asm__ __volatile__( LOCK_PREFIX | |
125 | "btcl %1,%0" | |
126 | :"=m" (ADDR) | |
127 | :"Ir" (nr)); | |
128 | } | |
129 | ||
130 | /** | |
131 | * test_and_set_bit - Set a bit and return its old value | |
132 | * @nr: Bit to set | |
133 | * @addr: Address to count from | |
134 | * | |
135 | * This operation is atomic and cannot be reordered. | |
136 | * It may be reordered on other architectures than x86. | |
137 | * It also implies a memory barrier. | |
138 | */ | |
139 | static inline int test_and_set_bit(int nr, volatile unsigned long * addr) | |
140 | { | |
141 | int oldbit; | |
142 | ||
143 | __asm__ __volatile__( LOCK_PREFIX | |
144 | "btsl %2,%1\n\tsbbl %0,%0" | |
145 | :"=r" (oldbit),"=m" (ADDR) | |
146 | :"Ir" (nr) : "memory"); | |
147 | return oldbit; | |
148 | } | |
149 | ||
150 | /** | |
151 | * __test_and_set_bit - Set a bit and return its old value | |
152 | * @nr: Bit to set | |
153 | * @addr: Address to count from | |
154 | * | |
155 | * This operation is non-atomic and can be reordered. | |
156 | * If two examples of this operation race, one can appear to succeed | |
157 | * but actually fail. You must protect multiple accesses with a lock. | |
158 | */ | |
159 | static inline int __test_and_set_bit(int nr, volatile unsigned long * addr) | |
160 | { | |
161 | int oldbit; | |
162 | ||
163 | __asm__( | |
164 | "btsl %2,%1\n\tsbbl %0,%0" | |
165 | :"=r" (oldbit),"=m" (ADDR) | |
166 | :"Ir" (nr)); | |
167 | return oldbit; | |
168 | } | |
169 | ||
170 | /** | |
171 | * test_and_clear_bit - Clear a bit and return its old value | |
172 | * @nr: Bit to clear | |
173 | * @addr: Address to count from | |
174 | * | |
175 | * This operation is atomic and cannot be reordered. | |
176 | * It can be reorderdered on other architectures other than x86. | |
177 | * It also implies a memory barrier. | |
178 | */ | |
179 | static inline int test_and_clear_bit(int nr, volatile unsigned long * addr) | |
180 | { | |
181 | int oldbit; | |
182 | ||
183 | __asm__ __volatile__( LOCK_PREFIX | |
184 | "btrl %2,%1\n\tsbbl %0,%0" | |
185 | :"=r" (oldbit),"=m" (ADDR) | |
186 | :"Ir" (nr) : "memory"); | |
187 | return oldbit; | |
188 | } | |
189 | ||
190 | /** | |
191 | * __test_and_clear_bit - Clear a bit and return its old value | |
192 | * @nr: Bit to clear | |
193 | * @addr: Address to count from | |
194 | * | |
195 | * This operation is non-atomic and can be reordered. | |
196 | * If two examples of this operation race, one can appear to succeed | |
197 | * but actually fail. You must protect multiple accesses with a lock. | |
198 | */ | |
199 | static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr) | |
200 | { | |
201 | int oldbit; | |
202 | ||
203 | __asm__( | |
204 | "btrl %2,%1\n\tsbbl %0,%0" | |
205 | :"=r" (oldbit),"=m" (ADDR) | |
206 | :"Ir" (nr)); | |
207 | return oldbit; | |
208 | } | |
209 | ||
210 | /* WARNING: non atomic and it can be reordered! */ | |
211 | static inline int __test_and_change_bit(int nr, volatile unsigned long *addr) | |
212 | { | |
213 | int oldbit; | |
214 | ||
215 | __asm__ __volatile__( | |
216 | "btcl %2,%1\n\tsbbl %0,%0" | |
217 | :"=r" (oldbit),"=m" (ADDR) | |
218 | :"Ir" (nr) : "memory"); | |
219 | return oldbit; | |
220 | } | |
221 | ||
222 | /** | |
223 | * test_and_change_bit - Change a bit and return its old value | |
224 | * @nr: Bit to change | |
225 | * @addr: Address to count from | |
226 | * | |
227 | * This operation is atomic and cannot be reordered. | |
228 | * It also implies a memory barrier. | |
229 | */ | |
230 | static inline int test_and_change_bit(int nr, volatile unsigned long* addr) | |
231 | { | |
232 | int oldbit; | |
233 | ||
234 | __asm__ __volatile__( LOCK_PREFIX | |
235 | "btcl %2,%1\n\tsbbl %0,%0" | |
236 | :"=r" (oldbit),"=m" (ADDR) | |
237 | :"Ir" (nr) : "memory"); | |
238 | return oldbit; | |
239 | } | |
240 | ||
241 | #if 0 /* Fool kernel-doc since it doesn't do macros yet */ | |
242 | /** | |
243 | * test_bit - Determine whether a bit is set | |
244 | * @nr: bit number to test | |
245 | * @addr: Address to start counting from | |
246 | */ | |
247 | static int test_bit(int nr, const volatile void * addr); | |
248 | #endif | |
249 | ||
250 | static inline int constant_test_bit(int nr, const volatile unsigned long *addr) | |
251 | { | |
252 | return ((1UL << (nr & 31)) & (addr[nr >> 5])) != 0; | |
253 | } | |
254 | ||
255 | static inline int variable_test_bit(int nr, const volatile unsigned long * addr) | |
256 | { | |
257 | int oldbit; | |
258 | ||
259 | __asm__ __volatile__( | |
260 | "btl %2,%1\n\tsbbl %0,%0" | |
261 | :"=r" (oldbit) | |
262 | :"m" (ADDR),"Ir" (nr)); | |
263 | return oldbit; | |
264 | } | |
265 | ||
266 | #define test_bit(nr,addr) \ | |
267 | (__builtin_constant_p(nr) ? \ | |
268 | constant_test_bit((nr),(addr)) : \ | |
269 | variable_test_bit((nr),(addr))) | |
270 | ||
271 | #undef ADDR | |
272 | ||
273 | /** | |
274 | * find_first_zero_bit - find the first zero bit in a memory region | |
275 | * @addr: The address to start the search at | |
276 | * @size: The maximum size to search | |
277 | * | |
278 | * Returns the bit-number of the first zero bit, not the number of the byte | |
279 | * containing a bit. | |
280 | */ | |
281 | static inline int find_first_zero_bit(const unsigned long *addr, unsigned size) | |
282 | { | |
283 | int d0, d1, d2; | |
284 | int res; | |
285 | ||
286 | if (!size) | |
287 | return 0; | |
288 | /* This looks at memory. Mark it volatile to tell gcc not to move it around */ | |
289 | __asm__ __volatile__( | |
290 | "movl $-1,%%eax\n\t" | |
291 | "xorl %%edx,%%edx\n\t" | |
292 | "repe; scasl\n\t" | |
293 | "je 1f\n\t" | |
294 | "xorl -4(%%edi),%%eax\n\t" | |
295 | "subl $4,%%edi\n\t" | |
296 | "bsfl %%eax,%%edx\n" | |
297 | "1:\tsubl %%ebx,%%edi\n\t" | |
298 | "shll $3,%%edi\n\t" | |
299 | "addl %%edi,%%edx" | |
300 | :"=d" (res), "=&c" (d0), "=&D" (d1), "=&a" (d2) | |
301 | :"1" ((size + 31) >> 5), "2" (addr), "b" (addr) : "memory"); | |
302 | return res; | |
303 | } | |
304 | ||
305 | /** | |
306 | * find_next_zero_bit - find the first zero bit in a memory region | |
307 | * @addr: The address to base the search on | |
308 | * @offset: The bitnumber to start searching at | |
309 | * @size: The maximum size to search | |
310 | */ | |
311 | int find_next_zero_bit(const unsigned long *addr, int size, int offset); | |
312 | ||
cd85c8b4 SR |
313 | /** |
314 | * __ffs - find first bit in word. | |
315 | * @word: The word to search | |
316 | * | |
317 | * Undefined if no bit exists, so code should check against 0 first. | |
318 | */ | |
319 | static inline unsigned long __ffs(unsigned long word) | |
320 | { | |
321 | __asm__("bsfl %1,%0" | |
322 | :"=r" (word) | |
323 | :"rm" (word)); | |
324 | return word; | |
325 | } | |
326 | ||
1da177e4 LT |
327 | /** |
328 | * find_first_bit - find the first set bit in a memory region | |
329 | * @addr: The address to start the search at | |
330 | * @size: The maximum size to search | |
331 | * | |
332 | * Returns the bit-number of the first set bit, not the number of the byte | |
333 | * containing a bit. | |
334 | */ | |
335 | static inline int find_first_bit(const unsigned long *addr, unsigned size) | |
336 | { | |
cd85c8b4 | 337 | int x = 0; |
d6d2a2ab LT |
338 | |
339 | while (x < size) { | |
340 | unsigned long val = *addr++; | |
341 | if (val) | |
342 | return __ffs(val) + x; | |
cd85c8b4 | 343 | x += (sizeof(*addr)<<3); |
d6d2a2ab | 344 | } |
cd85c8b4 | 345 | return x; |
1da177e4 LT |
346 | } |
347 | ||
348 | /** | |
349 | * find_next_bit - find the first set bit in a memory region | |
350 | * @addr: The address to base the search on | |
351 | * @offset: The bitnumber to start searching at | |
352 | * @size: The maximum size to search | |
353 | */ | |
354 | int find_next_bit(const unsigned long *addr, int size, int offset); | |
355 | ||
356 | /** | |
357 | * ffz - find first zero in word. | |
358 | * @word: The word to search | |
359 | * | |
360 | * Undefined if no zero exists, so code should check against ~0UL first. | |
361 | */ | |
362 | static inline unsigned long ffz(unsigned long word) | |
363 | { | |
364 | __asm__("bsfl %1,%0" | |
365 | :"=r" (word) | |
366 | :"r" (~word)); | |
367 | return word; | |
368 | } | |
369 | ||
1da177e4 LT |
370 | /* |
371 | * fls: find last bit set. | |
372 | */ | |
373 | ||
374 | #define fls(x) generic_fls(x) | |
375 | ||
376 | #ifdef __KERNEL__ | |
377 | ||
378 | /* | |
379 | * Every architecture must define this function. It's the fastest | |
380 | * way of searching a 140-bit bitmap where the first 100 bits are | |
381 | * unlikely to be set. It's guaranteed that at least one of the 140 | |
382 | * bits is cleared. | |
383 | */ | |
384 | static inline int sched_find_first_bit(const unsigned long *b) | |
385 | { | |
386 | if (unlikely(b[0])) | |
387 | return __ffs(b[0]); | |
388 | if (unlikely(b[1])) | |
389 | return __ffs(b[1]) + 32; | |
390 | if (unlikely(b[2])) | |
391 | return __ffs(b[2]) + 64; | |
392 | if (b[3]) | |
393 | return __ffs(b[3]) + 96; | |
394 | return __ffs(b[4]) + 128; | |
395 | } | |
396 | ||
397 | /** | |
398 | * ffs - find first bit set | |
399 | * @x: the word to search | |
400 | * | |
401 | * This is defined the same way as | |
402 | * the libc and compiler builtin ffs routines, therefore | |
403 | * differs in spirit from the above ffz (man ffs). | |
404 | */ | |
405 | static inline int ffs(int x) | |
406 | { | |
407 | int r; | |
408 | ||
409 | __asm__("bsfl %1,%0\n\t" | |
410 | "jnz 1f\n\t" | |
411 | "movl $-1,%0\n" | |
412 | "1:" : "=r" (r) : "rm" (x)); | |
413 | return r+1; | |
414 | } | |
415 | ||
416 | /** | |
417 | * hweightN - returns the hamming weight of a N-bit word | |
418 | * @x: the word to weigh | |
419 | * | |
420 | * The Hamming Weight of a number is the total number of bits set in it. | |
421 | */ | |
422 | ||
423 | #define hweight32(x) generic_hweight32(x) | |
424 | #define hweight16(x) generic_hweight16(x) | |
425 | #define hweight8(x) generic_hweight8(x) | |
426 | ||
427 | #endif /* __KERNEL__ */ | |
428 | ||
429 | #ifdef __KERNEL__ | |
430 | ||
431 | #define ext2_set_bit(nr,addr) \ | |
432 | __test_and_set_bit((nr),(unsigned long*)addr) | |
433 | #define ext2_set_bit_atomic(lock,nr,addr) \ | |
434 | test_and_set_bit((nr),(unsigned long*)addr) | |
435 | #define ext2_clear_bit(nr, addr) \ | |
436 | __test_and_clear_bit((nr),(unsigned long*)addr) | |
437 | #define ext2_clear_bit_atomic(lock,nr, addr) \ | |
438 | test_and_clear_bit((nr),(unsigned long*)addr) | |
439 | #define ext2_test_bit(nr, addr) test_bit((nr),(unsigned long*)addr) | |
440 | #define ext2_find_first_zero_bit(addr, size) \ | |
441 | find_first_zero_bit((unsigned long*)addr, size) | |
442 | #define ext2_find_next_zero_bit(addr, size, off) \ | |
443 | find_next_zero_bit((unsigned long*)addr, size, off) | |
444 | ||
445 | /* Bitmap functions for the minix filesystem. */ | |
446 | #define minix_test_and_set_bit(nr,addr) __test_and_set_bit(nr,(void*)addr) | |
447 | #define minix_set_bit(nr,addr) __set_bit(nr,(void*)addr) | |
448 | #define minix_test_and_clear_bit(nr,addr) __test_and_clear_bit(nr,(void*)addr) | |
449 | #define minix_test_bit(nr,addr) test_bit(nr,(void*)addr) | |
450 | #define minix_find_first_zero_bit(addr,size) \ | |
451 | find_first_zero_bit((void*)addr,size) | |
452 | ||
453 | #endif /* __KERNEL__ */ | |
454 | ||
455 | #endif /* _I386_BITOPS_H */ |