Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | #ifndef __ARCH_M68K_ATOMIC__ |
2 | #define __ARCH_M68K_ATOMIC__ | |
3 | ||
1da177e4 | 4 | |
2856f5e3 | 5 | #include <asm/system.h> |
1da177e4 LT |
6 | |
7 | /* | |
8 | * Atomic operations that C can't guarantee us. Useful for | |
9 | * resource counting etc.. | |
10 | */ | |
11 | ||
12 | /* | |
13 | * We do not have SMP m68k systems, so we don't have to deal with that. | |
14 | */ | |
15 | ||
16 | typedef struct { int counter; } atomic_t; | |
17 | #define ATOMIC_INIT(i) { (i) } | |
18 | ||
19 | #define atomic_read(v) ((v)->counter) | |
20 | #define atomic_set(v, i) (((v)->counter) = i) | |
21 | ||
22 | static inline void atomic_add(int i, atomic_t *v) | |
23 | { | |
24 | __asm__ __volatile__("addl %1,%0" : "+m" (*v) : "id" (i)); | |
25 | } | |
26 | ||
27 | static inline void atomic_sub(int i, atomic_t *v) | |
28 | { | |
29 | __asm__ __volatile__("subl %1,%0" : "+m" (*v) : "id" (i)); | |
30 | } | |
31 | ||
32 | static inline void atomic_inc(atomic_t *v) | |
33 | { | |
34 | __asm__ __volatile__("addql #1,%0" : "+m" (*v)); | |
35 | } | |
36 | ||
37 | static inline void atomic_dec(atomic_t *v) | |
38 | { | |
39 | __asm__ __volatile__("subql #1,%0" : "+m" (*v)); | |
40 | } | |
41 | ||
42 | static inline int atomic_dec_and_test(atomic_t *v) | |
43 | { | |
44 | char c; | |
45 | __asm__ __volatile__("subql #1,%1; seq %0" : "=d" (c), "+m" (*v)); | |
46 | return c != 0; | |
47 | } | |
48 | ||
49 | static inline int atomic_inc_and_test(atomic_t *v) | |
50 | { | |
51 | char c; | |
52 | __asm__ __volatile__("addql #1,%1; seq %0" : "=d" (c), "+m" (*v)); | |
53 | return c != 0; | |
54 | } | |
55 | ||
56 | #ifdef CONFIG_RMW_INSNS | |
7b61fcda | 57 | |
1da177e4 LT |
58 | static inline int atomic_add_return(int i, atomic_t *v) |
59 | { | |
60 | int t, tmp; | |
61 | ||
62 | __asm__ __volatile__( | |
63 | "1: movel %2,%1\n" | |
64 | " addl %3,%1\n" | |
65 | " casl %2,%1,%0\n" | |
66 | " jne 1b" | |
67 | : "+m" (*v), "=&d" (t), "=&d" (tmp) | |
68 | : "g" (i), "2" (atomic_read(v))); | |
69 | return t; | |
70 | } | |
71 | ||
72 | static inline int atomic_sub_return(int i, atomic_t *v) | |
73 | { | |
74 | int t, tmp; | |
75 | ||
76 | __asm__ __volatile__( | |
77 | "1: movel %2,%1\n" | |
78 | " subl %3,%1\n" | |
79 | " casl %2,%1,%0\n" | |
80 | " jne 1b" | |
81 | : "+m" (*v), "=&d" (t), "=&d" (tmp) | |
82 | : "g" (i), "2" (atomic_read(v))); | |
83 | return t; | |
84 | } | |
7b61fcda RZ |
85 | |
86 | #define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n))) | |
87 | #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) | |
88 | ||
1da177e4 | 89 | #else /* !CONFIG_RMW_INSNS */ |
7b61fcda | 90 | |
1da177e4 LT |
91 | static inline int atomic_add_return(int i, atomic_t * v) |
92 | { | |
93 | unsigned long flags; | |
94 | int t; | |
95 | ||
96 | local_irq_save(flags); | |
97 | t = atomic_read(v); | |
98 | t += i; | |
99 | atomic_set(v, t); | |
100 | local_irq_restore(flags); | |
101 | ||
102 | return t; | |
103 | } | |
104 | ||
105 | static inline int atomic_sub_return(int i, atomic_t * v) | |
106 | { | |
107 | unsigned long flags; | |
108 | int t; | |
109 | ||
110 | local_irq_save(flags); | |
111 | t = atomic_read(v); | |
112 | t -= i; | |
113 | atomic_set(v, t); | |
114 | local_irq_restore(flags); | |
115 | ||
116 | return t; | |
117 | } | |
7b61fcda RZ |
118 | |
119 | static inline int atomic_cmpxchg(atomic_t *v, int old, int new) | |
120 | { | |
121 | unsigned long flags; | |
122 | int prev; | |
123 | ||
124 | local_irq_save(flags); | |
125 | prev = atomic_read(v); | |
126 | if (prev == old) | |
127 | atomic_set(v, new); | |
128 | local_irq_restore(flags); | |
129 | return prev; | |
130 | } | |
131 | ||
132 | static inline int atomic_xchg(atomic_t *v, int new) | |
133 | { | |
134 | unsigned long flags; | |
135 | int prev; | |
136 | ||
137 | local_irq_save(flags); | |
138 | prev = atomic_read(v); | |
139 | atomic_set(v, new); | |
140 | local_irq_restore(flags); | |
141 | return prev; | |
142 | } | |
143 | ||
1da177e4 LT |
144 | #endif /* !CONFIG_RMW_INSNS */ |
145 | ||
146 | #define atomic_dec_return(v) atomic_sub_return(1, (v)) | |
147 | #define atomic_inc_return(v) atomic_add_return(1, (v)) | |
148 | ||
149 | static inline int atomic_sub_and_test(int i, atomic_t *v) | |
150 | { | |
151 | char c; | |
152 | __asm__ __volatile__("subl %2,%1; seq %0" : "=d" (c), "+m" (*v): "g" (i)); | |
153 | return c != 0; | |
154 | } | |
155 | ||
156 | static inline int atomic_add_negative(int i, atomic_t *v) | |
157 | { | |
158 | char c; | |
159 | __asm__ __volatile__("addl %2,%1; smi %0" : "=d" (c), "+m" (*v): "g" (i)); | |
160 | return c != 0; | |
161 | } | |
162 | ||
163 | static inline void atomic_clear_mask(unsigned long mask, unsigned long *v) | |
164 | { | |
165 | __asm__ __volatile__("andl %1,%0" : "+m" (*v) : "id" (~(mask))); | |
166 | } | |
167 | ||
168 | static inline void atomic_set_mask(unsigned long mask, unsigned long *v) | |
169 | { | |
170 | __asm__ __volatile__("orl %1,%0" : "+m" (*v) : "id" (mask)); | |
171 | } | |
172 | ||
2856f5e3 MD |
173 | static __inline__ int atomic_add_unless(atomic_t *v, int a, int u) |
174 | { | |
175 | int c, old; | |
176 | c = atomic_read(v); | |
177 | for (;;) { | |
178 | if (unlikely(c == (u))) | |
179 | break; | |
180 | old = atomic_cmpxchg((v), c, c + (a)); | |
181 | if (likely(old == c)) | |
182 | break; | |
183 | c = old; | |
184 | } | |
185 | return c != (u); | |
186 | } | |
187 | ||
8426e1f6 NP |
188 | #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) |
189 | ||
1da177e4 LT |
190 | /* Atomic operations are already serializing */ |
191 | #define smp_mb__before_atomic_dec() barrier() | |
192 | #define smp_mb__after_atomic_dec() barrier() | |
193 | #define smp_mb__before_atomic_inc() barrier() | |
194 | #define smp_mb__after_atomic_inc() barrier() | |
195 | ||
d3cb4871 | 196 | #include <asm-generic/atomic.h> |
1da177e4 | 197 | #endif /* __ARCH_M68K_ATOMIC __ */ |