Commit | Line | Data |
---|---|---|
7a8a2429 AM |
1 | #ifndef _ASM_GENERIC_BITOPS_ATOMIC_H_ |
2 | #define _ASM_GENERIC_BITOPS_ATOMIC_H_ | |
3 | ||
4 | #include <asm/types.h> | |
5 | ||
6 | #define BITOP_MASK(nr) (1UL << ((nr) % BITS_PER_LONG)) | |
7 | #define BITOP_WORD(nr) ((nr) / BITS_PER_LONG) | |
8 | ||
9 | #ifdef CONFIG_SMP | |
10 | #include <asm/spinlock.h> | |
11 | #include <asm/cache.h> /* we use L1_CACHE_BYTES */ | |
12 | ||
13 | /* Use an array of spinlocks for our atomic_ts. | |
14 | * Hash function to index into a different SPINLOCK. | |
15 | * Since "a" is usually an address, use one spinlock per cacheline. | |
16 | */ | |
17 | # define ATOMIC_HASH_SIZE 4 | |
18 | # define ATOMIC_HASH(a) (&(__atomic_hash[ (((unsigned long) a)/L1_CACHE_BYTES) & (ATOMIC_HASH_SIZE-1) ])) | |
19 | ||
20 | extern raw_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned; | |
21 | ||
22 | /* Can't use raw_spin_lock_irq because of #include problems, so | |
23 | * this is the substitute */ | |
24 | #define _atomic_spin_lock_irqsave(l,f) do { \ | |
25 | raw_spinlock_t *s = ATOMIC_HASH(l); \ | |
26 | local_irq_save(f); \ | |
27 | __raw_spin_lock(s); \ | |
28 | } while(0) | |
29 | ||
30 | #define _atomic_spin_unlock_irqrestore(l,f) do { \ | |
31 | raw_spinlock_t *s = ATOMIC_HASH(l); \ | |
32 | __raw_spin_unlock(s); \ | |
33 | local_irq_restore(f); \ | |
34 | } while(0) | |
35 | ||
36 | ||
37 | #else | |
38 | # define _atomic_spin_lock_irqsave(l,f) do { local_irq_save(f); } while (0) | |
39 | # define _atomic_spin_unlock_irqrestore(l,f) do { local_irq_restore(f); } while (0) | |
40 | #endif | |
41 | ||
42 | /* | |
43 | * NMI events can occur at any time, including when interrupts have been | |
44 | * disabled by *_irqsave(). So you can get NMI events occurring while a | |
45 | * *_bit function is holding a spin lock. If the NMI handler also wants | |
46 | * to do bit manipulation (and they do) then you can get a deadlock | |
47 | * between the original caller of *_bit() and the NMI handler. | |
48 | * | |
49 | * by Keith Owens | |
50 | */ | |
51 | ||
52 | /** | |
53 | * set_bit - Atomically set a bit in memory | |
54 | * @nr: the bit to set | |
55 | * @addr: the address to start counting from | |
56 | * | |
57 | * This function is atomic and may not be reordered. See __set_bit() | |
58 | * if you do not require the atomic guarantees. | |
59 | * | |
60 | * Note: there are no guarantees that this function will not be reordered | |
61 | * on non x86 architectures, so if you are writting portable code, | |
62 | * make sure not to rely on its reordering guarantees. | |
63 | * | |
64 | * Note that @nr may be almost arbitrarily large; this function is not | |
65 | * restricted to acting on a single-word quantity. | |
66 | */ | |
67 | static inline void set_bit(int nr, volatile unsigned long *addr) | |
68 | { | |
69 | unsigned long mask = BITOP_MASK(nr); | |
70 | unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr); | |
71 | unsigned long flags; | |
72 | ||
73 | _atomic_spin_lock_irqsave(p, flags); | |
74 | *p |= mask; | |
75 | _atomic_spin_unlock_irqrestore(p, flags); | |
76 | } | |
77 | ||
78 | /** | |
79 | * clear_bit - Clears a bit in memory | |
80 | * @nr: Bit to clear | |
81 | * @addr: Address to start counting from | |
82 | * | |
83 | * clear_bit() is atomic and may not be reordered. However, it does | |
84 | * not contain a memory barrier, so if it is used for locking purposes, | |
85 | * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit() | |
86 | * in order to ensure changes are visible on other processors. | |
87 | */ | |
88 | static inline void clear_bit(int nr, volatile unsigned long *addr) | |
89 | { | |
90 | unsigned long mask = BITOP_MASK(nr); | |
91 | unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr); | |
92 | unsigned long flags; | |
93 | ||
94 | _atomic_spin_lock_irqsave(p, flags); | |
95 | *p &= ~mask; | |
96 | _atomic_spin_unlock_irqrestore(p, flags); | |
97 | } | |
98 | ||
99 | /** | |
100 | * change_bit - Toggle a bit in memory | |
101 | * @nr: Bit to change | |
102 | * @addr: Address to start counting from | |
103 | * | |
104 | * change_bit() is atomic and may not be reordered. It may be | |
105 | * reordered on other architectures than x86. | |
106 | * Note that @nr may be almost arbitrarily large; this function is not | |
107 | * restricted to acting on a single-word quantity. | |
108 | */ | |
109 | static inline void change_bit(int nr, volatile unsigned long *addr) | |
110 | { | |
111 | unsigned long mask = BITOP_MASK(nr); | |
112 | unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr); | |
113 | unsigned long flags; | |
114 | ||
115 | _atomic_spin_lock_irqsave(p, flags); | |
116 | *p ^= mask; | |
117 | _atomic_spin_unlock_irqrestore(p, flags); | |
118 | } | |
119 | ||
120 | /** | |
121 | * test_and_set_bit - Set a bit and return its old value | |
122 | * @nr: Bit to set | |
123 | * @addr: Address to count from | |
124 | * | |
125 | * This operation is atomic and cannot be reordered. | |
126 | * It may be reordered on other architectures than x86. | |
127 | * It also implies a memory barrier. | |
128 | */ | |
129 | static inline int test_and_set_bit(int nr, volatile unsigned long *addr) | |
130 | { | |
131 | unsigned long mask = BITOP_MASK(nr); | |
132 | unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr); | |
133 | unsigned long old; | |
134 | unsigned long flags; | |
135 | ||
136 | _atomic_spin_lock_irqsave(p, flags); | |
137 | old = *p; | |
138 | *p = old | mask; | |
139 | _atomic_spin_unlock_irqrestore(p, flags); | |
140 | ||
141 | return (old & mask) != 0; | |
142 | } | |
143 | ||
144 | /** | |
145 | * test_and_clear_bit - Clear a bit and return its old value | |
146 | * @nr: Bit to clear | |
147 | * @addr: Address to count from | |
148 | * | |
149 | * This operation is atomic and cannot be reordered. | |
150 | * It can be reorderdered on other architectures other than x86. | |
151 | * It also implies a memory barrier. | |
152 | */ | |
153 | static inline int test_and_clear_bit(int nr, volatile unsigned long *addr) | |
154 | { | |
155 | unsigned long mask = BITOP_MASK(nr); | |
156 | unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr); | |
157 | unsigned long old; | |
158 | unsigned long flags; | |
159 | ||
160 | _atomic_spin_lock_irqsave(p, flags); | |
161 | old = *p; | |
162 | *p = old & ~mask; | |
163 | _atomic_spin_unlock_irqrestore(p, flags); | |
164 | ||
165 | return (old & mask) != 0; | |
166 | } | |
167 | ||
168 | /** | |
169 | * test_and_change_bit - Change a bit and return its old value | |
170 | * @nr: Bit to change | |
171 | * @addr: Address to count from | |
172 | * | |
173 | * This operation is atomic and cannot be reordered. | |
174 | * It also implies a memory barrier. | |
175 | */ | |
176 | static inline int test_and_change_bit(int nr, volatile unsigned long *addr) | |
177 | { | |
178 | unsigned long mask = BITOP_MASK(nr); | |
179 | unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr); | |
180 | unsigned long old; | |
181 | unsigned long flags; | |
182 | ||
183 | _atomic_spin_lock_irqsave(p, flags); | |
184 | old = *p; | |
185 | *p = old ^ mask; | |
186 | _atomic_spin_unlock_irqrestore(p, flags); | |
187 | ||
188 | return (old & mask) != 0; | |
189 | } | |
190 | ||
191 | #endif /* _ASM_GENERIC_BITOPS_ATOMIC_H */ |