Merge branch 'for-2.6.25' of git://git.kernel.org/pub/scm/linux/kernel/git/arnd/cell...
[linux-2.6-block.git] / include / asm-sparc / atomic.h
CommitLineData
1da177e4
LT
1/* atomic.h: These still suck, but the I-cache hit rate is higher.
2 *
64d329ee 3 * Copyright (C) 1996 David S. Miller (davem@davemloft.net)
1da177e4 4 * Copyright (C) 2000 Anton Blanchard (anton@linuxcare.com.au)
6197fe4d 5 * Copyright (C) 2007 Kyle McMartin (kyle@parisc-linux.org)
1da177e4
LT
6 *
7 * Additions by Keith M Wesolowski (wesolows@foobazco.org) based
8 * on asm-parisc/atomic.h Copyright (C) 2000 Philipp Rumpf <prumpf@tux.org>.
9 */
10
11#ifndef __ARCH_SPARC_ATOMIC__
12#define __ARCH_SPARC_ATOMIC__
13
6197fe4d 14#include <linux/types.h>
1da177e4
LT
15
16typedef struct { volatile int counter; } atomic_t;
17
18#ifdef __KERNEL__
19
6197fe4d
KM
20/* Emulate cmpxchg() the same way we emulate atomics,
21 * by hashing the object address and indexing into an array
22 * of spinlocks to get a bit of performance...
23 *
24 * See arch/sparc/lib/atomic32.c for implementation.
25 *
26 * Cribbed from <asm-parisc/atomic.h>
27 */
28#define __HAVE_ARCH_CMPXCHG 1
29
30/* bug catcher for when unsupported size is used - won't link */
31extern void __cmpxchg_called_with_bad_pointer(void);
32/* we only need to support cmpxchg of a u32 on sparc */
33extern unsigned long __cmpxchg_u32(volatile u32 *m, u32 old, u32 new_);
34
35/* don't worry...optimizer will get rid of most of this */
64d329ee 36static inline unsigned long
6197fe4d
KM
37__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new_, int size)
38{
39 switch(size) {
40 case 4:
41 return __cmpxchg_u32((u32 *)ptr, (u32)old, (u32)new_);
42 default:
43 __cmpxchg_called_with_bad_pointer();
44 break;
45 }
46 return old;
47}
48
49#define cmpxchg(ptr,o,n) ({ \
50 __typeof__(*(ptr)) _o_ = (o); \
51 __typeof__(*(ptr)) _n_ = (n); \
52 (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \
53 (unsigned long)_n_, sizeof(*(ptr))); \
54})
55
1da177e4
LT
56#define ATOMIC_INIT(i) { (i) }
57
58extern int __atomic_add_return(int, atomic_t *);
4a6dae6d 59extern int atomic_cmpxchg(atomic_t *, int, int);
ffbf670f 60#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
8426e1f6 61extern int atomic_add_unless(atomic_t *, int, int);
1da177e4
LT
62extern void atomic_set(atomic_t *, int);
63
64#define atomic_read(v) ((v)->counter)
65
66#define atomic_add(i, v) ((void)__atomic_add_return( (int)(i), (v)))
67#define atomic_sub(i, v) ((void)__atomic_add_return(-(int)(i), (v)))
68#define atomic_inc(v) ((void)__atomic_add_return( 1, (v)))
69#define atomic_dec(v) ((void)__atomic_add_return( -1, (v)))
70
71#define atomic_add_return(i, v) (__atomic_add_return( (int)(i), (v)))
72#define atomic_sub_return(i, v) (__atomic_add_return(-(int)(i), (v)))
73#define atomic_inc_return(v) (__atomic_add_return( 1, (v)))
74#define atomic_dec_return(v) (__atomic_add_return( -1, (v)))
75
76#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
77
78/*
79 * atomic_inc_and_test - increment and test
80 * @v: pointer of type atomic_t
81 *
82 * Atomically increments @v by 1
83 * and returns true if the result is zero, or false for all
84 * other cases.
85 */
86#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
87
88#define atomic_dec_and_test(v) (atomic_dec_return(v) == 0)
89#define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
90
8426e1f6
NP
91#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
92
1da177e4
LT
93/* This is the old 24-bit implementation. It's still used internally
94 * by some sparc-specific code, notably the semaphore implementation.
95 */
96typedef struct { volatile int counter; } atomic24_t;
97
98#ifndef CONFIG_SMP
99
100#define ATOMIC24_INIT(i) { (i) }
101#define atomic24_read(v) ((v)->counter)
102#define atomic24_set(v, i) (((v)->counter) = i)
103
104#else
105/* We do the bulk of the actual work out of line in two common
106 * routines in assembler, see arch/sparc/lib/atomic.S for the
107 * "fun" details.
108 *
109 * For SMP the trick is you embed the spin lock byte within
110 * the word, use the low byte so signedness is easily retained
111 * via a quick arithmetic shift. It looks like this:
112 *
113 * ----------------------------------------
114 * | signed 24-bit counter value | lock | atomic_t
115 * ----------------------------------------
116 * 31 8 7 0
117 */
118
119#define ATOMIC24_INIT(i) { ((i) << 8) }
120
121static inline int atomic24_read(const atomic24_t *v)
122{
123 int ret = v->counter;
124
125 while(ret & 0xff)
126 ret = v->counter;
127
128 return ret >> 8;
129}
130
131#define atomic24_set(v, i) (((v)->counter) = ((i) << 8))
132#endif
133
134static inline int __atomic24_add(int i, atomic24_t *v)
135{
136 register volatile int *ptr asm("g1");
137 register int increment asm("g2");
138 register int tmp1 asm("g3");
139 register int tmp2 asm("g4");
140 register int tmp3 asm("g7");
141
142 ptr = &v->counter;
143 increment = i;
144
145 __asm__ __volatile__(
146 "mov %%o7, %%g4\n\t"
147 "call ___atomic24_add\n\t"
148 " add %%o7, 8, %%o7\n"
149 : "=&r" (increment), "=r" (tmp1), "=r" (tmp2), "=r" (tmp3)
150 : "0" (increment), "r" (ptr)
151 : "memory", "cc");
152
153 return increment;
154}
155
156static inline int __atomic24_sub(int i, atomic24_t *v)
157{
158 register volatile int *ptr asm("g1");
159 register int increment asm("g2");
160 register int tmp1 asm("g3");
161 register int tmp2 asm("g4");
162 register int tmp3 asm("g7");
163
164 ptr = &v->counter;
165 increment = i;
166
167 __asm__ __volatile__(
168 "mov %%o7, %%g4\n\t"
169 "call ___atomic24_sub\n\t"
170 " add %%o7, 8, %%o7\n"
171 : "=&r" (increment), "=r" (tmp1), "=r" (tmp2), "=r" (tmp3)
172 : "0" (increment), "r" (ptr)
173 : "memory", "cc");
174
175 return increment;
176}
177
178#define atomic24_add(i, v) ((void)__atomic24_add((i), (v)))
179#define atomic24_sub(i, v) ((void)__atomic24_sub((i), (v)))
180
181#define atomic24_dec_return(v) __atomic24_sub(1, (v))
182#define atomic24_inc_return(v) __atomic24_add(1, (v))
183
184#define atomic24_sub_and_test(i, v) (__atomic24_sub((i), (v)) == 0)
185#define atomic24_dec_and_test(v) (__atomic24_sub(1, (v)) == 0)
186
187#define atomic24_inc(v) ((void)__atomic24_add(1, (v)))
188#define atomic24_dec(v) ((void)__atomic24_sub(1, (v)))
189
190#define atomic24_add_negative(i, v) (__atomic24_add((i), (v)) < 0)
191
192/* Atomic operations are already serializing */
193#define smp_mb__before_atomic_dec() barrier()
194#define smp_mb__after_atomic_dec() barrier()
195#define smp_mb__before_atomic_inc() barrier()
196#define smp_mb__after_atomic_inc() barrier()
197
198#endif /* !(__KERNEL__) */
199
d3cb4871 200#include <asm-generic/atomic.h>
1da177e4 201#endif /* !(__ARCH_SPARC_ATOMIC__) */