Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
26333576 NP |
2 | #ifndef _ASM_GENERIC_BITOPS_LOCK_H_ |
3 | #define _ASM_GENERIC_BITOPS_LOCK_H_ | |
4 | ||
84c65911 WD |
5 | #include <linux/atomic.h> |
6 | #include <linux/compiler.h> | |
7 | #include <asm/barrier.h> | |
8 | ||
26333576 | 9 | /** |
cf3ee3c8 | 10 | * arch_test_and_set_bit_lock - Set a bit and return its old value, for lock |
26333576 NP |
11 | * @nr: Bit to set |
12 | * @addr: Address to count from | |
13 | * | |
61e02392 WD |
14 | * This operation is atomic and provides acquire barrier semantics if |
15 | * the returned value is 0. | |
26333576 NP |
16 | * It can be used to implement bit locks. |
17 | */ | |
cf3ee3c8 MR |
18 | static __always_inline int |
19 | arch_test_and_set_bit_lock(unsigned int nr, volatile unsigned long *p) | |
84c65911 WD |
20 | { |
21 | long old; | |
22 | unsigned long mask = BIT_MASK(nr); | |
23 | ||
24 | p += BIT_WORD(nr); | |
25 | if (READ_ONCE(*p) & mask) | |
26 | return 1; | |
27 | ||
0f613bfa | 28 | old = raw_atomic_long_fetch_or_acquire(mask, (atomic_long_t *)p); |
84c65911 WD |
29 | return !!(old & mask); |
30 | } | |
31 | ||
26333576 NP |
32 | |
33 | /** | |
cf3ee3c8 | 34 | * arch_clear_bit_unlock - Clear a bit in memory, for unlock |
26333576 NP |
35 | * @nr: the bit to set |
36 | * @addr: the address to start counting from | |
37 | * | |
38 | * This operation is atomic and provides release barrier semantics. | |
39 | */ | |
cf3ee3c8 MR |
40 | static __always_inline void |
41 | arch_clear_bit_unlock(unsigned int nr, volatile unsigned long *p) | |
84c65911 WD |
42 | { |
43 | p += BIT_WORD(nr); | |
0f613bfa | 44 | raw_atomic_long_fetch_andnot_release(BIT_MASK(nr), (atomic_long_t *)p); |
84c65911 | 45 | } |
26333576 NP |
46 | |
47 | /** | |
cf3ee3c8 | 48 | * arch___clear_bit_unlock - Clear a bit in memory, for unlock |
26333576 NP |
49 | * @nr: the bit to set |
50 | * @addr: the address to start counting from | |
51 | * | |
f75d4864 PZ |
52 | * A weaker form of clear_bit_unlock() as used by __bit_lock_unlock(). If all |
53 | * the bits in the word are protected by this lock some archs can use weaker | |
54 | * ops to safely unlock. | |
55 | * | |
56 | * See for example x86's implementation. | |
26333576 | 57 | */ |
cf3ee3c8 MR |
58 | static inline void |
59 | arch___clear_bit_unlock(unsigned int nr, volatile unsigned long *p) | |
84c65911 WD |
60 | { |
61 | unsigned long old; | |
26333576 | 62 | |
84c65911 WD |
63 | p += BIT_WORD(nr); |
64 | old = READ_ONCE(*p); | |
65 | old &= ~BIT_MASK(nr); | |
0f613bfa | 66 | raw_atomic_long_set_release((atomic_long_t *)p, old); |
84c65911 WD |
67 | } |
68 | ||
69 | /** | |
cf3ee3c8 MR |
70 | * arch_clear_bit_unlock_is_negative_byte - Clear a bit in memory and test if bottom |
71 | * byte is negative, for unlock. | |
84c65911 WD |
72 | * @nr: the bit to clear |
73 | * @addr: the address to start counting from | |
74 | * | |
75 | * This is a bit of a one-trick-pony for the filemap code, which clears | |
76 | * PG_locked and tests PG_waiters, | |
77 | */ | |
cf3ee3c8 MR |
78 | #ifndef arch_clear_bit_unlock_is_negative_byte |
79 | static inline bool arch_clear_bit_unlock_is_negative_byte(unsigned int nr, | |
80 | volatile unsigned long *p) | |
84c65911 WD |
81 | { |
82 | long old; | |
83 | unsigned long mask = BIT_MASK(nr); | |
84 | ||
85 | p += BIT_WORD(nr); | |
0f613bfa | 86 | old = raw_atomic_long_fetch_andnot_release(mask, (atomic_long_t *)p); |
84c65911 WD |
87 | return !!(old & BIT(7)); |
88 | } | |
cf3ee3c8 | 89 | #define arch_clear_bit_unlock_is_negative_byte arch_clear_bit_unlock_is_negative_byte |
84c65911 | 90 | #endif |
26333576 | 91 | |
cf3ee3c8 MR |
92 | #include <asm-generic/bitops/instrumented-lock.h> |
93 | ||
84c65911 | 94 | #endif /* _ASM_GENERIC_BITOPS_LOCK_H_ */ |