Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/linville/wirel...
[linux-2.6-block.git] / arch / h8300 / include / asm / atomic.h
CommitLineData
1da177e4
LT
1#ifndef __ARCH_H8300_ATOMIC__
2#define __ARCH_H8300_ATOMIC__
3
ea435467
MW
4#include <linux/types.h>
5
1da177e4
LT
6/*
7 * Atomic operations that C can't guarantee us. Useful for
8 * resource counting etc..
9 */
10
1da177e4
LT
11#define ATOMIC_INIT(i) { (i) }
12
13#define atomic_read(v) ((v)->counter)
14#define atomic_set(v, i) (((v)->counter) = i)
15
16#include <asm/system.h>
17#include <linux/kernel.h>
18
19static __inline__ int atomic_add_return(int i, atomic_t *v)
20{
21 int ret,flags;
22 local_irq_save(flags);
23 ret = v->counter += i;
24 local_irq_restore(flags);
25 return ret;
26}
27
28#define atomic_add(i, v) atomic_add_return(i, v)
29#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
30
31static __inline__ int atomic_sub_return(int i, atomic_t *v)
32{
33 int ret,flags;
34 local_irq_save(flags);
35 ret = v->counter -= i;
36 local_irq_restore(flags);
37 return ret;
38}
39
40#define atomic_sub(i, v) atomic_sub_return(i, v)
b67405bb 41#define atomic_sub_and_test(i,v) (atomic_sub_return(i, v) == 0)
1da177e4
LT
42
43static __inline__ int atomic_inc_return(atomic_t *v)
44{
45 int ret,flags;
46 local_irq_save(flags);
47 v->counter++;
48 ret = v->counter;
49 local_irq_restore(flags);
50 return ret;
51}
52
53#define atomic_inc(v) atomic_inc_return(v)
54
55/*
56 * atomic_inc_and_test - increment and test
57 * @v: pointer of type atomic_t
58 *
59 * Atomically increments @v by 1
60 * and returns true if the result is zero, or false for all
61 * other cases.
62 */
63#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
64
65static __inline__ int atomic_dec_return(atomic_t *v)
66{
67 int ret,flags;
68 local_irq_save(flags);
69 --v->counter;
70 ret = v->counter;
71 local_irq_restore(flags);
72 return ret;
73}
74
75#define atomic_dec(v) atomic_dec_return(v)
76
77static __inline__ int atomic_dec_and_test(atomic_t *v)
78{
79 int ret,flags;
80 local_irq_save(flags);
81 --v->counter;
82 ret = v->counter;
83 local_irq_restore(flags);
84 return ret == 0;
85}
86
4a6dae6d
NP
87static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
88{
89 int ret;
90 unsigned long flags;
91
92 local_irq_save(flags);
93 ret = v->counter;
94 if (likely(ret == old))
95 v->counter = new;
96 local_irq_restore(flags);
97 return ret;
98}
99
ffbf670f
IM
100#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
101
8426e1f6
NP
102static inline int atomic_add_unless(atomic_t *v, int a, int u)
103{
104 int ret;
105 unsigned long flags;
106
107 local_irq_save(flags);
108 ret = v->counter;
109 if (ret != u)
110 v->counter += a;
111 local_irq_restore(flags);
112 return ret != u;
113}
114#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
115
1da177e4
LT
116static __inline__ void atomic_clear_mask(unsigned long mask, unsigned long *v)
117{
118 __asm__ __volatile__("stc ccr,r1l\n\t"
119 "orc #0x80,ccr\n\t"
120 "mov.l %0,er0\n\t"
121 "and.l %1,er0\n\t"
122 "mov.l er0,%0\n\t"
123 "ldc r1l,ccr"
124 : "=m" (*v) : "g" (~(mask)) :"er0","er1");
125}
126
127static __inline__ void atomic_set_mask(unsigned long mask, unsigned long *v)
128{
129 __asm__ __volatile__("stc ccr,r1l\n\t"
130 "orc #0x80,ccr\n\t"
131 "mov.l %0,er0\n\t"
132 "or.l %1,er0\n\t"
133 "mov.l er0,%0\n\t"
134 "ldc r1l,ccr"
135 : "=m" (*v) : "g" (mask) :"er0","er1");
136}
137
138/* Atomic operations are already serializing */
139#define smp_mb__before_atomic_dec() barrier()
140#define smp_mb__after_atomic_dec() barrier()
141#define smp_mb__before_atomic_inc() barrier()
142#define smp_mb__after_atomic_inc() barrier()
143
72099ed2 144#include <asm-generic/atomic-long.h>
1da177e4 145#endif /* __ARCH_H8300_ATOMIC __ */