Commit | Line | Data |
---|---|---|
1965aae3 PA |
1 | #ifndef _ASM_X86_BITOPS_H |
2 | #define _ASM_X86_BITOPS_H | |
1c54d770 JF |
3 | |
4 | /* | |
5 | * Copyright 1992, Linus Torvalds. | |
c8399943 AK |
6 | * |
7 | * Note: inlines with more than a single statement should be marked | |
8 | * __always_inline to avoid problems with older gcc's inlining heuristics. | |
1c54d770 JF |
9 | */ |
10 | ||
11 | #ifndef _LINUX_BITOPS_H | |
12 | #error only <linux/bitops.h> can be included directly | |
13 | #endif | |
14 | ||
15 | #include <linux/compiler.h> | |
16 | #include <asm/alternative.h> | |
17 | ||
e8f380e0 BP |
18 | #define BIT_64(n) (U64_C(1) << (n)) |
19 | ||
1c54d770 JF |
20 | /* |
21 | * These have to be done with inline assembly: that way the bit-setting | |
22 | * is guaranteed to be atomic. All bit operations return 0 if the bit | |
23 | * was cleared before the operation and != 0 if it was not. | |
24 | * | |
25 | * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1). | |
26 | */ | |
27 | ||
28 | #if __GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 1) | |
29 | /* Technically wrong, but this avoids compilation errors on some gcc | |
30 | versions. */ | |
1a750e0c | 31 | #define BITOP_ADDR(x) "=m" (*(volatile long *) (x)) |
1c54d770 | 32 | #else |
1a750e0c | 33 | #define BITOP_ADDR(x) "+m" (*(volatile long *) (x)) |
1c54d770 JF |
34 | #endif |
35 | ||
7dbceaf9 | 36 | #define ADDR BITOP_ADDR(addr) |
1a750e0c LT |
37 | |
38 | /* | |
39 | * We do the locked ops that don't return the old value as | |
40 | * a mask operation on a byte. | |
41 | */ | |
7dbceaf9 IM |
42 | #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr)) |
43 | #define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3)) | |
44 | #define CONST_MASK(nr) (1 << ((nr) & 7)) | |
1a750e0c | 45 | |
1c54d770 JF |
46 | /** |
47 | * set_bit - Atomically set a bit in memory | |
48 | * @nr: the bit to set | |
49 | * @addr: the address to start counting from | |
50 | * | |
51 | * This function is atomic and may not be reordered. See __set_bit() | |
52 | * if you do not require the atomic guarantees. | |
53 | * | |
54 | * Note: there are no guarantees that this function will not be reordered | |
55 | * on non x86 architectures, so if you are writing portable code, | |
56 | * make sure not to rely on its reordering guarantees. | |
57 | * | |
58 | * Note that @nr may be almost arbitrarily large; this function is not | |
59 | * restricted to acting on a single-word quantity. | |
60 | */ | |
c8399943 AK |
61 | static __always_inline void |
62 | set_bit(unsigned int nr, volatile unsigned long *addr) | |
1c54d770 | 63 | { |
7dbceaf9 IM |
64 | if (IS_IMMEDIATE(nr)) { |
65 | asm volatile(LOCK_PREFIX "orb %1,%0" | |
66 | : CONST_MASK_ADDR(nr, addr) | |
437a0a54 | 67 | : "iq" ((u8)CONST_MASK(nr)) |
7dbceaf9 IM |
68 | : "memory"); |
69 | } else { | |
70 | asm volatile(LOCK_PREFIX "bts %1,%0" | |
71 | : BITOP_ADDR(addr) : "Ir" (nr) : "memory"); | |
72 | } | |
1c54d770 JF |
73 | } |
74 | ||
75 | /** | |
76 | * __set_bit - Set a bit in memory | |
77 | * @nr: the bit to set | |
78 | * @addr: the address to start counting from | |
79 | * | |
80 | * Unlike set_bit(), this function is non-atomic and may be reordered. | |
81 | * If it's called on the same region of memory simultaneously, the effect | |
82 | * may be that only one operation succeeds. | |
83 | */ | |
5136dea5 | 84 | static inline void __set_bit(int nr, volatile unsigned long *addr) |
1c54d770 | 85 | { |
f19dcf4a | 86 | asm volatile("bts %1,%0" : ADDR : "Ir" (nr) : "memory"); |
1c54d770 JF |
87 | } |
88 | ||
1c54d770 JF |
89 | /** |
90 | * clear_bit - Clears a bit in memory | |
91 | * @nr: Bit to clear | |
92 | * @addr: Address to start counting from | |
93 | * | |
94 | * clear_bit() is atomic and may not be reordered. However, it does | |
95 | * not contain a memory barrier, so if it is used for locking purposes, | |
96 | * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit() | |
97 | * in order to ensure changes are visible on other processors. | |
98 | */ | |
c8399943 AK |
99 | static __always_inline void |
100 | clear_bit(int nr, volatile unsigned long *addr) | |
1c54d770 | 101 | { |
7dbceaf9 IM |
102 | if (IS_IMMEDIATE(nr)) { |
103 | asm volatile(LOCK_PREFIX "andb %1,%0" | |
104 | : CONST_MASK_ADDR(nr, addr) | |
437a0a54 | 105 | : "iq" ((u8)~CONST_MASK(nr))); |
7dbceaf9 IM |
106 | } else { |
107 | asm volatile(LOCK_PREFIX "btr %1,%0" | |
108 | : BITOP_ADDR(addr) | |
109 | : "Ir" (nr)); | |
110 | } | |
1c54d770 JF |
111 | } |
112 | ||
113 | /* | |
114 | * clear_bit_unlock - Clears a bit in memory | |
115 | * @nr: Bit to clear | |
116 | * @addr: Address to start counting from | |
117 | * | |
118 | * clear_bit() is atomic and implies release semantics before the memory | |
119 | * operation. It can be used for an unlock. | |
120 | */ | |
5136dea5 | 121 | static inline void clear_bit_unlock(unsigned nr, volatile unsigned long *addr) |
1c54d770 JF |
122 | { |
123 | barrier(); | |
124 | clear_bit(nr, addr); | |
125 | } | |
126 | ||
5136dea5 | 127 | static inline void __clear_bit(int nr, volatile unsigned long *addr) |
1c54d770 | 128 | { |
eb2b4e68 | 129 | asm volatile("btr %1,%0" : ADDR : "Ir" (nr)); |
1c54d770 JF |
130 | } |
131 | ||
132 | /* | |
133 | * __clear_bit_unlock - Clears a bit in memory | |
134 | * @nr: Bit to clear | |
135 | * @addr: Address to start counting from | |
136 | * | |
137 | * __clear_bit() is non-atomic and implies release semantics before the memory | |
138 | * operation. It can be used for an unlock if no other CPUs can concurrently | |
139 | * modify other bits in the word. | |
140 | * | |
141 | * No memory barrier is required here, because x86 cannot reorder stores past | |
142 | * older loads. Same principle as spin_unlock. | |
143 | */ | |
5136dea5 | 144 | static inline void __clear_bit_unlock(unsigned nr, volatile unsigned long *addr) |
1c54d770 JF |
145 | { |
146 | barrier(); | |
147 | __clear_bit(nr, addr); | |
148 | } | |
149 | ||
150 | #define smp_mb__before_clear_bit() barrier() | |
151 | #define smp_mb__after_clear_bit() barrier() | |
152 | ||
153 | /** | |
154 | * __change_bit - Toggle a bit in memory | |
155 | * @nr: the bit to change | |
156 | * @addr: the address to start counting from | |
157 | * | |
158 | * Unlike change_bit(), this function is non-atomic and may be reordered. | |
159 | * If it's called on the same region of memory simultaneously, the effect | |
160 | * may be that only one operation succeeds. | |
161 | */ | |
5136dea5 | 162 | static inline void __change_bit(int nr, volatile unsigned long *addr) |
1c54d770 | 163 | { |
eb2b4e68 | 164 | asm volatile("btc %1,%0" : ADDR : "Ir" (nr)); |
1c54d770 JF |
165 | } |
166 | ||
167 | /** | |
168 | * change_bit - Toggle a bit in memory | |
169 | * @nr: Bit to change | |
170 | * @addr: Address to start counting from | |
171 | * | |
172 | * change_bit() is atomic and may not be reordered. | |
173 | * Note that @nr may be almost arbitrarily large; this function is not | |
174 | * restricted to acting on a single-word quantity. | |
175 | */ | |
5136dea5 | 176 | static inline void change_bit(int nr, volatile unsigned long *addr) |
1c54d770 | 177 | { |
838e8bb7 UB |
178 | if (IS_IMMEDIATE(nr)) { |
179 | asm volatile(LOCK_PREFIX "xorb %1,%0" | |
180 | : CONST_MASK_ADDR(nr, addr) | |
181 | : "iq" ((u8)CONST_MASK(nr))); | |
182 | } else { | |
183 | asm volatile(LOCK_PREFIX "btc %1,%0" | |
184 | : BITOP_ADDR(addr) | |
185 | : "Ir" (nr)); | |
186 | } | |
1c54d770 JF |
187 | } |
188 | ||
189 | /** | |
190 | * test_and_set_bit - Set a bit and return its old value | |
191 | * @nr: Bit to set | |
192 | * @addr: Address to count from | |
193 | * | |
194 | * This operation is atomic and cannot be reordered. | |
195 | * It also implies a memory barrier. | |
196 | */ | |
5136dea5 | 197 | static inline int test_and_set_bit(int nr, volatile unsigned long *addr) |
1c54d770 JF |
198 | { |
199 | int oldbit; | |
200 | ||
201 | asm volatile(LOCK_PREFIX "bts %2,%1\n\t" | |
286275c9 | 202 | "sbb %0,%0" : "=r" (oldbit), ADDR : "Ir" (nr) : "memory"); |
1c54d770 JF |
203 | |
204 | return oldbit; | |
205 | } | |
206 | ||
207 | /** | |
208 | * test_and_set_bit_lock - Set a bit and return its old value for lock | |
209 | * @nr: Bit to set | |
210 | * @addr: Address to count from | |
211 | * | |
212 | * This is the same as test_and_set_bit on x86. | |
213 | */ | |
c8399943 AK |
214 | static __always_inline int |
215 | test_and_set_bit_lock(int nr, volatile unsigned long *addr) | |
1c54d770 JF |
216 | { |
217 | return test_and_set_bit(nr, addr); | |
218 | } | |
219 | ||
220 | /** | |
221 | * __test_and_set_bit - Set a bit and return its old value | |
222 | * @nr: Bit to set | |
223 | * @addr: Address to count from | |
224 | * | |
225 | * This operation is non-atomic and can be reordered. | |
226 | * If two examples of this operation race, one can appear to succeed | |
227 | * but actually fail. You must protect multiple accesses with a lock. | |
228 | */ | |
5136dea5 | 229 | static inline int __test_and_set_bit(int nr, volatile unsigned long *addr) |
1c54d770 JF |
230 | { |
231 | int oldbit; | |
232 | ||
eb2b4e68 SHT |
233 | asm("bts %2,%1\n\t" |
234 | "sbb %0,%0" | |
235 | : "=r" (oldbit), ADDR | |
236 | : "Ir" (nr)); | |
1c54d770 JF |
237 | return oldbit; |
238 | } | |
239 | ||
240 | /** | |
241 | * test_and_clear_bit - Clear a bit and return its old value | |
242 | * @nr: Bit to clear | |
243 | * @addr: Address to count from | |
244 | * | |
245 | * This operation is atomic and cannot be reordered. | |
246 | * It also implies a memory barrier. | |
247 | */ | |
5136dea5 | 248 | static inline int test_and_clear_bit(int nr, volatile unsigned long *addr) |
1c54d770 JF |
249 | { |
250 | int oldbit; | |
251 | ||
252 | asm volatile(LOCK_PREFIX "btr %2,%1\n\t" | |
253 | "sbb %0,%0" | |
286275c9 | 254 | : "=r" (oldbit), ADDR : "Ir" (nr) : "memory"); |
1c54d770 JF |
255 | |
256 | return oldbit; | |
257 | } | |
258 | ||
259 | /** | |
260 | * __test_and_clear_bit - Clear a bit and return its old value | |
261 | * @nr: Bit to clear | |
262 | * @addr: Address to count from | |
263 | * | |
264 | * This operation is non-atomic and can be reordered. | |
265 | * If two examples of this operation race, one can appear to succeed | |
266 | * but actually fail. You must protect multiple accesses with a lock. | |
d0a69d63 MT |
267 | * |
268 | * Note: the operation is performed atomically with respect to | |
269 | * the local CPU, but not other CPUs. Portable code should not | |
270 | * rely on this behaviour. | |
271 | * KVM relies on this behaviour on x86 for modifying memory that is also | |
272 | * accessed from a hypervisor on the same CPU if running in a VM: don't change | |
273 | * this without also updating arch/x86/kernel/kvm.c | |
1c54d770 | 274 | */ |
5136dea5 | 275 | static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr) |
1c54d770 JF |
276 | { |
277 | int oldbit; | |
278 | ||
eb2b4e68 | 279 | asm volatile("btr %2,%1\n\t" |
1c54d770 | 280 | "sbb %0,%0" |
eb2b4e68 SHT |
281 | : "=r" (oldbit), ADDR |
282 | : "Ir" (nr)); | |
1c54d770 JF |
283 | return oldbit; |
284 | } | |
285 | ||
286 | /* WARNING: non atomic and it can be reordered! */ | |
5136dea5 | 287 | static inline int __test_and_change_bit(int nr, volatile unsigned long *addr) |
1c54d770 JF |
288 | { |
289 | int oldbit; | |
290 | ||
eb2b4e68 | 291 | asm volatile("btc %2,%1\n\t" |
1c54d770 | 292 | "sbb %0,%0" |
eb2b4e68 SHT |
293 | : "=r" (oldbit), ADDR |
294 | : "Ir" (nr) : "memory"); | |
1c54d770 JF |
295 | |
296 | return oldbit; | |
297 | } | |
298 | ||
299 | /** | |
300 | * test_and_change_bit - Change a bit and return its old value | |
301 | * @nr: Bit to change | |
302 | * @addr: Address to count from | |
303 | * | |
304 | * This operation is atomic and cannot be reordered. | |
305 | * It also implies a memory barrier. | |
306 | */ | |
5136dea5 | 307 | static inline int test_and_change_bit(int nr, volatile unsigned long *addr) |
1c54d770 JF |
308 | { |
309 | int oldbit; | |
310 | ||
311 | asm volatile(LOCK_PREFIX "btc %2,%1\n\t" | |
312 | "sbb %0,%0" | |
286275c9 | 313 | : "=r" (oldbit), ADDR : "Ir" (nr) : "memory"); |
1c54d770 JF |
314 | |
315 | return oldbit; | |
316 | } | |
317 | ||
c8399943 | 318 | static __always_inline int constant_test_bit(unsigned int nr, const volatile unsigned long *addr) |
1c54d770 | 319 | { |
26996dd2 | 320 | return ((1UL << (nr % BITS_PER_LONG)) & |
c9e2fbd9 | 321 | (addr[nr / BITS_PER_LONG])) != 0; |
1c54d770 JF |
322 | } |
323 | ||
5136dea5 | 324 | static inline int variable_test_bit(int nr, volatile const unsigned long *addr) |
1c54d770 JF |
325 | { |
326 | int oldbit; | |
327 | ||
eb2b4e68 | 328 | asm volatile("bt %2,%1\n\t" |
1c54d770 JF |
329 | "sbb %0,%0" |
330 | : "=r" (oldbit) | |
eb2b4e68 | 331 | : "m" (*(unsigned long *)addr), "Ir" (nr)); |
1c54d770 JF |
332 | |
333 | return oldbit; | |
334 | } | |
335 | ||
336 | #if 0 /* Fool kernel-doc since it doesn't do macros yet */ | |
337 | /** | |
338 | * test_bit - Determine whether a bit is set | |
339 | * @nr: bit number to test | |
340 | * @addr: Address to start counting from | |
341 | */ | |
342 | static int test_bit(int nr, const volatile unsigned long *addr); | |
343 | #endif | |
344 | ||
f19dcf4a JP |
345 | #define test_bit(nr, addr) \ |
346 | (__builtin_constant_p((nr)) \ | |
347 | ? constant_test_bit((nr), (addr)) \ | |
348 | : variable_test_bit((nr), (addr))) | |
1c54d770 | 349 | |
5870661c JB |
350 | #if (defined(CONFIG_X86_GENERIC) || defined(CONFIG_GENERIC_CPU)) \ |
351 | && !defined(CONFIG_CC_OPTIMIZE_FOR_SIZE) | |
352 | /* | |
353 | * Since BSF and TZCNT have sufficiently similar semantics for the purposes | |
354 | * for which we use them here, BMI-capable hardware will decode the prefixed | |
355 | * variant as 'tzcnt ...' and may execute that faster than 'bsf ...', while | |
356 | * older hardware will ignore the REP prefix and decode it as 'bsf ...'. | |
357 | */ | |
358 | # define BSF_PREFIX "rep;" | |
359 | #else | |
360 | # define BSF_PREFIX | |
361 | #endif | |
362 | ||
12d9c842 AH |
363 | /** |
364 | * __ffs - find first set bit in word | |
365 | * @word: The word to search | |
366 | * | |
367 | * Undefined if no bit exists, so code should check against 0 first. | |
368 | */ | |
369 | static inline unsigned long __ffs(unsigned long word) | |
370 | { | |
5870661c | 371 | asm(BSF_PREFIX "bsf %1,%0" |
f19dcf4a JP |
372 | : "=r" (word) |
373 | : "rm" (word)); | |
12d9c842 AH |
374 | return word; |
375 | } | |
376 | ||
377 | /** | |
378 | * ffz - find first zero bit in word | |
379 | * @word: The word to search | |
380 | * | |
381 | * Undefined if no zero exists, so code should check against ~0UL first. | |
382 | */ | |
383 | static inline unsigned long ffz(unsigned long word) | |
384 | { | |
5870661c | 385 | asm(BSF_PREFIX "bsf %1,%0" |
f19dcf4a JP |
386 | : "=r" (word) |
387 | : "r" (~word)); | |
12d9c842 AH |
388 | return word; |
389 | } | |
390 | ||
5870661c JB |
391 | #undef BSF_PREFIX |
392 | ||
12d9c842 AH |
393 | /* |
394 | * __fls: find last set bit in word | |
395 | * @word: The word to search | |
396 | * | |
8450e853 | 397 | * Undefined if no set bit exists, so code should check against 0 first. |
12d9c842 AH |
398 | */ |
399 | static inline unsigned long __fls(unsigned long word) | |
400 | { | |
f19dcf4a JP |
401 | asm("bsr %1,%0" |
402 | : "=r" (word) | |
403 | : "rm" (word)); | |
12d9c842 AH |
404 | return word; |
405 | } | |
406 | ||
83d99df7 PA |
407 | #undef ADDR |
408 | ||
12d9c842 AH |
409 | #ifdef __KERNEL__ |
410 | /** | |
411 | * ffs - find first set bit in word | |
412 | * @x: the word to search | |
413 | * | |
414 | * This is defined the same way as the libc and compiler builtin ffs | |
415 | * routines, therefore differs in spirit from the other bitops. | |
416 | * | |
417 | * ffs(value) returns 0 if value is 0 or the position of the first | |
418 | * set bit if value is nonzero. The first (least significant) bit | |
419 | * is at position 1. | |
420 | */ | |
421 | static inline int ffs(int x) | |
422 | { | |
423 | int r; | |
ca3d30cc DH |
424 | |
425 | #ifdef CONFIG_X86_64 | |
426 | /* | |
427 | * AMD64 says BSFL won't clobber the dest reg if x==0; Intel64 says the | |
428 | * dest reg is undefined if x==0, but their CPU architect says its | |
429 | * value is written to set it to the same as before, except that the | |
430 | * top 32 bits will be cleared. | |
431 | * | |
432 | * We cannot do this on 32 bits because at the very least some | |
433 | * 486 CPUs did not behave this way. | |
434 | */ | |
ca3d30cc DH |
435 | asm("bsfl %1,%0" |
436 | : "=r" (r) | |
1edfbb41 | 437 | : "rm" (x), "0" (-1)); |
ca3d30cc | 438 | #elif defined(CONFIG_X86_CMOV) |
f19dcf4a JP |
439 | asm("bsfl %1,%0\n\t" |
440 | "cmovzl %2,%0" | |
ca3d30cc | 441 | : "=&r" (r) : "rm" (x), "r" (-1)); |
12d9c842 | 442 | #else |
f19dcf4a JP |
443 | asm("bsfl %1,%0\n\t" |
444 | "jnz 1f\n\t" | |
445 | "movl $-1,%0\n" | |
446 | "1:" : "=r" (r) : "rm" (x)); | |
12d9c842 AH |
447 | #endif |
448 | return r + 1; | |
449 | } | |
450 | ||
451 | /** | |
452 | * fls - find last set bit in word | |
453 | * @x: the word to search | |
454 | * | |
455 | * This is defined in a similar way as the libc and compiler builtin | |
456 | * ffs, but returns the position of the most significant set bit. | |
457 | * | |
458 | * fls(value) returns 0 if value is 0 or the position of the last | |
459 | * set bit if value is nonzero. The last (most significant) bit is | |
460 | * at position 32. | |
461 | */ | |
462 | static inline int fls(int x) | |
463 | { | |
464 | int r; | |
ca3d30cc DH |
465 | |
466 | #ifdef CONFIG_X86_64 | |
467 | /* | |
468 | * AMD64 says BSRL won't clobber the dest reg if x==0; Intel64 says the | |
469 | * dest reg is undefined if x==0, but their CPU architect says its | |
470 | * value is written to set it to the same as before, except that the | |
471 | * top 32 bits will be cleared. | |
472 | * | |
473 | * We cannot do this on 32 bits because at the very least some | |
474 | * 486 CPUs did not behave this way. | |
475 | */ | |
ca3d30cc DH |
476 | asm("bsrl %1,%0" |
477 | : "=r" (r) | |
1edfbb41 | 478 | : "rm" (x), "0" (-1)); |
ca3d30cc | 479 | #elif defined(CONFIG_X86_CMOV) |
f19dcf4a JP |
480 | asm("bsrl %1,%0\n\t" |
481 | "cmovzl %2,%0" | |
482 | : "=&r" (r) : "rm" (x), "rm" (-1)); | |
12d9c842 | 483 | #else |
f19dcf4a JP |
484 | asm("bsrl %1,%0\n\t" |
485 | "jnz 1f\n\t" | |
486 | "movl $-1,%0\n" | |
487 | "1:" : "=r" (r) : "rm" (x)); | |
12d9c842 AH |
488 | #endif |
489 | return r + 1; | |
490 | } | |
d66462f5 | 491 | |
ca3d30cc DH |
492 | /** |
493 | * fls64 - find last set bit in a 64-bit word | |
494 | * @x: the word to search | |
495 | * | |
496 | * This is defined in a similar way as the libc and compiler builtin | |
497 | * ffsll, but returns the position of the most significant set bit. | |
498 | * | |
499 | * fls64(value) returns 0 if value is 0 or the position of the last | |
500 | * set bit if value is nonzero. The last (most significant) bit is | |
501 | * at position 64. | |
502 | */ | |
503 | #ifdef CONFIG_X86_64 | |
504 | static __always_inline int fls64(__u64 x) | |
505 | { | |
1edfbb41 | 506 | int bitpos = -1; |
ca3d30cc DH |
507 | /* |
508 | * AMD64 says BSRQ won't clobber the dest reg if x==0; Intel64 says the | |
509 | * dest reg is undefined if x==0, but their CPU architect says its | |
510 | * value is written to set it to the same as before. | |
511 | */ | |
1edfbb41 | 512 | asm("bsrq %1,%q0" |
ca3d30cc DH |
513 | : "+r" (bitpos) |
514 | : "rm" (x)); | |
515 | return bitpos + 1; | |
516 | } | |
517 | #else | |
518 | #include <asm-generic/bitops/fls64.h> | |
519 | #endif | |
520 | ||
708ff2a0 AM |
521 | #include <asm-generic/bitops/find.h> |
522 | ||
d66462f5 AH |
523 | #include <asm-generic/bitops/sched.h> |
524 | ||
525 | #define ARCH_HAS_FAST_MULTIPLIER 1 | |
526 | ||
d61931d8 BP |
527 | #include <asm/arch_hweight.h> |
528 | ||
529 | #include <asm-generic/bitops/const_hweight.h> | |
1c54d770 | 530 | |
861b5ae7 | 531 | #include <asm-generic/bitops/le.h> |
d66462f5 | 532 | |
148817ba | 533 | #include <asm-generic/bitops/ext2-atomic-setbit.h> |
d66462f5 | 534 | |
d66462f5 | 535 | #endif /* __KERNEL__ */ |
1965aae3 | 536 | #endif /* _ASM_X86_BITOPS_H */ |