Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* atomic.h: These still suck, but the I-cache hit rate is higher. |
2 | * | |
3 | * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu) | |
4 | * Copyright (C) 2000 Anton Blanchard (anton@linuxcare.com.au) | |
5 | * | |
6 | * Additions by Keith M Wesolowski (wesolows@foobazco.org) based | |
7 | * on asm-parisc/atomic.h Copyright (C) 2000 Philipp Rumpf <prumpf@tux.org>. | |
8 | */ | |
9 | ||
10 | #ifndef __ARCH_SPARC_ATOMIC__ | |
11 | #define __ARCH_SPARC_ATOMIC__ | |
12 | ||
13 | #include <linux/config.h> | |
14 | ||
15 | typedef struct { volatile int counter; } atomic_t; | |
16 | ||
17 | #ifdef __KERNEL__ | |
18 | ||
19 | #define ATOMIC_INIT(i) { (i) } | |
20 | ||
21 | extern int __atomic_add_return(int, atomic_t *); | |
22 | extern void atomic_set(atomic_t *, int); | |
23 | ||
24 | #define atomic_read(v) ((v)->counter) | |
25 | ||
26 | #define atomic_add(i, v) ((void)__atomic_add_return( (int)(i), (v))) | |
27 | #define atomic_sub(i, v) ((void)__atomic_add_return(-(int)(i), (v))) | |
28 | #define atomic_inc(v) ((void)__atomic_add_return( 1, (v))) | |
29 | #define atomic_dec(v) ((void)__atomic_add_return( -1, (v))) | |
30 | ||
31 | #define atomic_add_return(i, v) (__atomic_add_return( (int)(i), (v))) | |
32 | #define atomic_sub_return(i, v) (__atomic_add_return(-(int)(i), (v))) | |
33 | #define atomic_inc_return(v) (__atomic_add_return( 1, (v))) | |
34 | #define atomic_dec_return(v) (__atomic_add_return( -1, (v))) | |
35 | ||
36 | #define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0) | |
37 | ||
38 | /* | |
39 | * atomic_inc_and_test - increment and test | |
40 | * @v: pointer of type atomic_t | |
41 | * | |
42 | * Atomically increments @v by 1 | |
43 | * and returns true if the result is zero, or false for all | |
44 | * other cases. | |
45 | */ | |
46 | #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0) | |
47 | ||
48 | #define atomic_dec_and_test(v) (atomic_dec_return(v) == 0) | |
49 | #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0) | |
50 | ||
51 | /* This is the old 24-bit implementation. It's still used internally | |
52 | * by some sparc-specific code, notably the semaphore implementation. | |
53 | */ | |
54 | typedef struct { volatile int counter; } atomic24_t; | |
55 | ||
56 | #ifndef CONFIG_SMP | |
57 | ||
58 | #define ATOMIC24_INIT(i) { (i) } | |
59 | #define atomic24_read(v) ((v)->counter) | |
60 | #define atomic24_set(v, i) (((v)->counter) = i) | |
61 | ||
62 | #else | |
63 | /* We do the bulk of the actual work out of line in two common | |
64 | * routines in assembler, see arch/sparc/lib/atomic.S for the | |
65 | * "fun" details. | |
66 | * | |
67 | * For SMP the trick is you embed the spin lock byte within | |
68 | * the word, use the low byte so signedness is easily retained | |
69 | * via a quick arithmetic shift. It looks like this: | |
70 | * | |
71 | * ---------------------------------------- | |
72 | * | signed 24-bit counter value | lock | atomic_t | |
73 | * ---------------------------------------- | |
74 | * 31 8 7 0 | |
75 | */ | |
76 | ||
77 | #define ATOMIC24_INIT(i) { ((i) << 8) } | |
78 | ||
79 | static inline int atomic24_read(const atomic24_t *v) | |
80 | { | |
81 | int ret = v->counter; | |
82 | ||
83 | while(ret & 0xff) | |
84 | ret = v->counter; | |
85 | ||
86 | return ret >> 8; | |
87 | } | |
88 | ||
89 | #define atomic24_set(v, i) (((v)->counter) = ((i) << 8)) | |
90 | #endif | |
91 | ||
92 | static inline int __atomic24_add(int i, atomic24_t *v) | |
93 | { | |
94 | register volatile int *ptr asm("g1"); | |
95 | register int increment asm("g2"); | |
96 | register int tmp1 asm("g3"); | |
97 | register int tmp2 asm("g4"); | |
98 | register int tmp3 asm("g7"); | |
99 | ||
100 | ptr = &v->counter; | |
101 | increment = i; | |
102 | ||
103 | __asm__ __volatile__( | |
104 | "mov %%o7, %%g4\n\t" | |
105 | "call ___atomic24_add\n\t" | |
106 | " add %%o7, 8, %%o7\n" | |
107 | : "=&r" (increment), "=r" (tmp1), "=r" (tmp2), "=r" (tmp3) | |
108 | : "0" (increment), "r" (ptr) | |
109 | : "memory", "cc"); | |
110 | ||
111 | return increment; | |
112 | } | |
113 | ||
114 | static inline int __atomic24_sub(int i, atomic24_t *v) | |
115 | { | |
116 | register volatile int *ptr asm("g1"); | |
117 | register int increment asm("g2"); | |
118 | register int tmp1 asm("g3"); | |
119 | register int tmp2 asm("g4"); | |
120 | register int tmp3 asm("g7"); | |
121 | ||
122 | ptr = &v->counter; | |
123 | increment = i; | |
124 | ||
125 | __asm__ __volatile__( | |
126 | "mov %%o7, %%g4\n\t" | |
127 | "call ___atomic24_sub\n\t" | |
128 | " add %%o7, 8, %%o7\n" | |
129 | : "=&r" (increment), "=r" (tmp1), "=r" (tmp2), "=r" (tmp3) | |
130 | : "0" (increment), "r" (ptr) | |
131 | : "memory", "cc"); | |
132 | ||
133 | return increment; | |
134 | } | |
135 | ||
136 | #define atomic24_add(i, v) ((void)__atomic24_add((i), (v))) | |
137 | #define atomic24_sub(i, v) ((void)__atomic24_sub((i), (v))) | |
138 | ||
139 | #define atomic24_dec_return(v) __atomic24_sub(1, (v)) | |
140 | #define atomic24_inc_return(v) __atomic24_add(1, (v)) | |
141 | ||
142 | #define atomic24_sub_and_test(i, v) (__atomic24_sub((i), (v)) == 0) | |
143 | #define atomic24_dec_and_test(v) (__atomic24_sub(1, (v)) == 0) | |
144 | ||
145 | #define atomic24_inc(v) ((void)__atomic24_add(1, (v))) | |
146 | #define atomic24_dec(v) ((void)__atomic24_sub(1, (v))) | |
147 | ||
148 | #define atomic24_add_negative(i, v) (__atomic24_add((i), (v)) < 0) | |
149 | ||
150 | /* Atomic operations are already serializing */ | |
151 | #define smp_mb__before_atomic_dec() barrier() | |
152 | #define smp_mb__after_atomic_dec() barrier() | |
153 | #define smp_mb__before_atomic_inc() barrier() | |
154 | #define smp_mb__after_atomic_inc() barrier() | |
155 | ||
156 | #endif /* !(__KERNEL__) */ | |
157 | ||
158 | #endif /* !(__ARCH_SPARC_ATOMIC__) */ |