1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_X86_BITOPS_H
3 #define _ASM_X86_BITOPS_H
6 * Copyright 1992, Linus Torvalds.
8 * Note: inlines with more than a single statement should be marked
9 * __always_inline to avoid problems with older gcc's inlining heuristics.
12 #ifndef _LINUX_BITOPS_H
13 #error only <linux/bitops.h> can be included directly
16 #include <linux/compiler.h>
17 #include <asm/alternative.h>
18 #include <asm/rmwcc.h>
19 #include <asm/barrier.h>
21 #if BITS_PER_LONG == 32
22 # define _BITOPS_LONG_SHIFT 5
23 #elif BITS_PER_LONG == 64
24 # define _BITOPS_LONG_SHIFT 6
26 # error "Unexpected BITS_PER_LONG"
29 #define BIT_64(n) (U64_C(1) << (n))
32 * These have to be done with inline assembly: that way the bit-setting
33 * is guaranteed to be atomic. All bit operations return 0 if the bit
34 * was cleared before the operation and != 0 if it was not.
36 * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1).
39 #define RLONG_ADDR(x) "m" (*(volatile long *) (x))
40 #define WBYTE_ADDR(x) "+m" (*(volatile char *) (x))
42 #define ADDR RLONG_ADDR(addr)
45 * We do the locked ops that don't return the old value as
46 * a mask operation on a byte.
48 #define CONST_MASK_ADDR(nr, addr) WBYTE_ADDR((void *)(addr) + ((nr)>>3))
49 #define CONST_MASK(nr) (1 << ((nr) & 7))
51 static __always_inline void
52 arch_set_bit(long nr, volatile unsigned long *addr)
54 if (__builtin_constant_p(nr)) {
55 asm volatile(LOCK_PREFIX "orb %1,%0"
56 : CONST_MASK_ADDR(nr, addr)
57 : "iq" (CONST_MASK(nr) & 0xff)
60 asm volatile(LOCK_PREFIX __ASM_SIZE(bts) " %1,%0"
61 : : RLONG_ADDR(addr), "Ir" (nr) : "memory");
65 static __always_inline void
66 arch___set_bit(long nr, volatile unsigned long *addr)
68 asm volatile(__ASM_SIZE(bts) " %1,%0" : : ADDR, "Ir" (nr) : "memory");
71 static __always_inline void
72 arch_clear_bit(long nr, volatile unsigned long *addr)
74 if (__builtin_constant_p(nr)) {
75 asm volatile(LOCK_PREFIX "andb %1,%0"
76 : CONST_MASK_ADDR(nr, addr)
77 : "iq" (CONST_MASK(nr) ^ 0xff));
79 asm volatile(LOCK_PREFIX __ASM_SIZE(btr) " %1,%0"
80 : : RLONG_ADDR(addr), "Ir" (nr) : "memory");
84 static __always_inline void
85 arch_clear_bit_unlock(long nr, volatile unsigned long *addr)
88 arch_clear_bit(nr, addr);
91 static __always_inline void
92 arch___clear_bit(long nr, volatile unsigned long *addr)
94 asm volatile(__ASM_SIZE(btr) " %1,%0" : : ADDR, "Ir" (nr) : "memory");
97 static __always_inline bool
98 arch_clear_bit_unlock_is_negative_byte(long nr, volatile unsigned long *addr)
101 asm volatile(LOCK_PREFIX "andb %2,%1"
103 : CC_OUT(s) (negative), WBYTE_ADDR(addr)
104 : "ir" ((char) ~(1 << nr)) : "memory");
107 #define arch_clear_bit_unlock_is_negative_byte \
108 arch_clear_bit_unlock_is_negative_byte
110 static __always_inline void
111 arch___clear_bit_unlock(long nr, volatile unsigned long *addr)
113 arch___clear_bit(nr, addr);
116 static __always_inline void
117 arch___change_bit(long nr, volatile unsigned long *addr)
119 asm volatile(__ASM_SIZE(btc) " %1,%0" : : ADDR, "Ir" (nr) : "memory");
122 static __always_inline void
123 arch_change_bit(long nr, volatile unsigned long *addr)
125 if (__builtin_constant_p(nr)) {
126 asm volatile(LOCK_PREFIX "xorb %1,%0"
127 : CONST_MASK_ADDR(nr, addr)
128 : "iq" ((u8)CONST_MASK(nr)));
130 asm volatile(LOCK_PREFIX __ASM_SIZE(btc) " %1,%0"
131 : : RLONG_ADDR(addr), "Ir" (nr) : "memory");
135 static __always_inline bool
136 arch_test_and_set_bit(long nr, volatile unsigned long *addr)
138 return GEN_BINARY_RMWcc(LOCK_PREFIX __ASM_SIZE(bts), *addr, c, "Ir", nr);
141 static __always_inline bool
142 arch_test_and_set_bit_lock(long nr, volatile unsigned long *addr)
144 return arch_test_and_set_bit(nr, addr);
147 static __always_inline bool
148 arch___test_and_set_bit(long nr, volatile unsigned long *addr)
152 asm(__ASM_SIZE(bts) " %2,%1"
155 : ADDR, "Ir" (nr) : "memory");
159 static __always_inline bool
160 arch_test_and_clear_bit(long nr, volatile unsigned long *addr)
162 return GEN_BINARY_RMWcc(LOCK_PREFIX __ASM_SIZE(btr), *addr, c, "Ir", nr);
166 * Note: the operation is performed atomically with respect to
167 * the local CPU, but not other CPUs. Portable code should not
168 * rely on this behaviour.
169 * KVM relies on this behaviour on x86 for modifying memory that is also
170 * accessed from a hypervisor on the same CPU if running in a VM: don't change
171 * this without also updating arch/x86/kernel/kvm.c
173 static __always_inline bool
174 arch___test_and_clear_bit(long nr, volatile unsigned long *addr)
178 asm volatile(__ASM_SIZE(btr) " %2,%1"
181 : ADDR, "Ir" (nr) : "memory");
185 static __always_inline bool
186 arch___test_and_change_bit(long nr, volatile unsigned long *addr)
190 asm volatile(__ASM_SIZE(btc) " %2,%1"
193 : ADDR, "Ir" (nr) : "memory");
198 static __always_inline bool
199 arch_test_and_change_bit(long nr, volatile unsigned long *addr)
201 return GEN_BINARY_RMWcc(LOCK_PREFIX __ASM_SIZE(btc), *addr, c, "Ir", nr);
204 static __no_kcsan_or_inline bool constant_test_bit(long nr, const volatile unsigned long *addr)
207 * Because this is a plain access, we need to disable KCSAN here to
208 * avoid double instrumentation via instrumented bitops.
210 return ((1UL << (nr & (BITS_PER_LONG-1))) &
211 (addr[nr >> _BITOPS_LONG_SHIFT])) != 0;
214 static __always_inline bool variable_test_bit(long nr, volatile const unsigned long *addr)
218 asm volatile(__ASM_SIZE(bt) " %2,%1"
221 : "m" (*(unsigned long *)addr), "Ir" (nr) : "memory");
226 #define arch_test_bit(nr, addr) \
227 (__builtin_constant_p((nr)) \
228 ? constant_test_bit((nr), (addr)) \
229 : variable_test_bit((nr), (addr)))
232 * __ffs - find first set bit in word
233 * @word: The word to search
235 * Undefined if no bit exists, so code should check against 0 first.
237 static __always_inline unsigned long __ffs(unsigned long word)
246 * ffz - find first zero bit in word
247 * @word: The word to search
249 * Undefined if no zero exists, so code should check against ~0UL first.
251 static __always_inline unsigned long ffz(unsigned long word)
260 * __fls: find last set bit in word
261 * @word: The word to search
263 * Undefined if no set bit exists, so code should check against 0 first.
265 static __always_inline unsigned long __fls(unsigned long word)
277 * ffs - find first set bit in word
278 * @x: the word to search
280 * This is defined the same way as the libc and compiler builtin ffs
281 * routines, therefore differs in spirit from the other bitops.
283 * ffs(value) returns 0 if value is 0 or the position of the first
284 * set bit if value is nonzero. The first (least significant) bit
287 static __always_inline int ffs(int x)
293 * AMD64 says BSFL won't clobber the dest reg if x==0; Intel64 says the
294 * dest reg is undefined if x==0, but their CPU architect says its
295 * value is written to set it to the same as before, except that the
296 * top 32 bits will be cleared.
298 * We cannot do this on 32 bits because at the very least some
299 * 486 CPUs did not behave this way.
303 : "rm" (x), "0" (-1));
304 #elif defined(CONFIG_X86_CMOV)
307 : "=&r" (r) : "rm" (x), "r" (-1));
312 "1:" : "=r" (r) : "rm" (x));
318 * fls - find last set bit in word
319 * @x: the word to search
321 * This is defined in a similar way as the libc and compiler builtin
322 * ffs, but returns the position of the most significant set bit.
324 * fls(value) returns 0 if value is 0 or the position of the last
325 * set bit if value is nonzero. The last (most significant) bit is
328 static __always_inline int fls(unsigned int x)
334 * AMD64 says BSRL won't clobber the dest reg if x==0; Intel64 says the
335 * dest reg is undefined if x==0, but their CPU architect says its
336 * value is written to set it to the same as before, except that the
337 * top 32 bits will be cleared.
339 * We cannot do this on 32 bits because at the very least some
340 * 486 CPUs did not behave this way.
344 : "rm" (x), "0" (-1));
345 #elif defined(CONFIG_X86_CMOV)
348 : "=&r" (r) : "rm" (x), "rm" (-1));
353 "1:" : "=r" (r) : "rm" (x));
359 * fls64 - find last set bit in a 64-bit word
360 * @x: the word to search
362 * This is defined in a similar way as the libc and compiler builtin
363 * ffsll, but returns the position of the most significant set bit.
365 * fls64(value) returns 0 if value is 0 or the position of the last
366 * set bit if value is nonzero. The last (most significant) bit is
370 static __always_inline int fls64(__u64 x)
374 * AMD64 says BSRQ won't clobber the dest reg if x==0; Intel64 says the
375 * dest reg is undefined if x==0, but their CPU architect says its
376 * value is written to set it to the same as before.
384 #include <asm-generic/bitops/fls64.h>
387 #include <asm-generic/bitops/find.h>
389 #include <asm-generic/bitops/sched.h>
391 #include <asm/arch_hweight.h>
393 #include <asm-generic/bitops/const_hweight.h>
395 #include <asm-generic/bitops/instrumented-atomic.h>
396 #include <asm-generic/bitops/instrumented-non-atomic.h>
397 #include <asm-generic/bitops/instrumented-lock.h>
399 #include <asm-generic/bitops/le.h>
401 #include <asm-generic/bitops/ext2-atomic-setbit.h>
403 #endif /* __KERNEL__ */
404 #endif /* _ASM_X86_BITOPS_H */