X-Git-Url: https://git.kernel.dk/?a=blobdiff_plain;f=include%2Fasm-xtensa%2Fatomic.h;h=b3b23540f14d2c5087e4b8d09d519378e73e726a;hb=6c5d523826dc639df709ed0f88c5d2ce25379652;hp=d72bcb32ba4f74ce3d3863559f14170d8937d050;hpb=d2f6409584e2c62ffad81690562330ff3bf4a458;p=linux-block.git diff --git a/include/asm-xtensa/atomic.h b/include/asm-xtensa/atomic.h index d72bcb32ba4f..b3b23540f14d 100644 --- a/include/asm-xtensa/atomic.h +++ b/include/asm-xtensa/atomic.h @@ -13,7 +13,6 @@ #ifndef _XTENSA_ATOMIC_H #define _XTENSA_ATOMIC_H -#include #include typedef struct { volatile int counter; } atomic_t; @@ -22,7 +21,7 @@ typedef struct { volatile int counter; } atomic_t; #include #include -#define ATOMIC_INIT(i) ( (atomic_t) { (i) } ) +#define ATOMIC_INIT(i) { (i) } /* * This Xtensa implementation assumes that the right mechanism @@ -66,7 +65,7 @@ typedef struct { volatile int counter; } atomic_t; * * Atomically adds @i to @v. */ -extern __inline__ void atomic_add(int i, atomic_t * v) +static inline void atomic_add(int i, atomic_t * v) { unsigned int vval; @@ -90,7 +89,7 @@ extern __inline__ void atomic_add(int i, atomic_t * v) * * Atomically subtracts @i from @v. */ -extern __inline__ void atomic_sub(int i, atomic_t *v) +static inline void atomic_sub(int i, atomic_t *v) { unsigned int vval; @@ -111,7 +110,7 @@ extern __inline__ void atomic_sub(int i, atomic_t *v) * We use atomic_{add|sub}_return to define other functions. */ -extern __inline__ int atomic_add_return(int i, atomic_t * v) +static inline int atomic_add_return(int i, atomic_t * v) { unsigned int vval; @@ -130,7 +129,7 @@ extern __inline__ int atomic_add_return(int i, atomic_t * v) return vval; } -extern __inline__ int atomic_sub_return(int i, atomic_t * v) +static inline int atomic_sub_return(int i, atomic_t * v) { unsigned int vval; @@ -223,8 +222,36 @@ extern __inline__ int atomic_sub_return(int i, atomic_t * v) */ #define atomic_add_negative(i,v) (atomic_add_return((i),(v)) < 0) +#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n))) +#define atomic_xchg(v, new) (xchg(&((v)->counter), new)) -extern __inline__ void atomic_clear_mask(unsigned int mask, atomic_t *v) +/** + * atomic_add_unless - add unless the number is a given value + * @v: pointer of type atomic_t + * @a: the amount to add to v... + * @u: ...unless v is equal to u. + * + * Atomically adds @a to @v, so long as it was not @u. + * Returns non-zero if @v was not @u, and zero otherwise. + */ +static __inline__ int atomic_add_unless(atomic_t *v, int a, int u) +{ + int c, old; + c = atomic_read(v); + for (;;) { + if (unlikely(c == (u))) + break; + old = atomic_cmpxchg((v), c, c + (a)); + if (likely(old == c)) + break; + c = old; + } + return c != (u); +} + +#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) + +static inline void atomic_clear_mask(unsigned int mask, atomic_t *v) { unsigned int all_f = -1; unsigned int vval; @@ -243,7 +270,7 @@ extern __inline__ void atomic_clear_mask(unsigned int mask, atomic_t *v) ); } -extern __inline__ void atomic_set_mask(unsigned int mask, atomic_t *v) +static inline void atomic_set_mask(unsigned int mask, atomic_t *v) { unsigned int vval; @@ -266,6 +293,7 @@ extern __inline__ void atomic_set_mask(unsigned int mask, atomic_t *v) #define smp_mb__before_atomic_inc() barrier() #define smp_mb__after_atomic_inc() barrier() +#include #endif /* __KERNEL__ */ #endif /* _XTENSA_ATOMIC_H */