ARCv2: STAR 9000837815 workaround hardware exclusive transactions livelock
[linux-2.6-block.git] / arch / arc / include / asm / atomic.h
CommitLineData
14e968ba
VG
1/*
2 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9#ifndef _ASM_ARC_ATOMIC_H
10#define _ASM_ARC_ATOMIC_H
11
14e968ba
VG
12#ifndef __ASSEMBLY__
13
14#include <linux/types.h>
15#include <linux/compiler.h>
16#include <asm/cmpxchg.h>
17#include <asm/barrier.h>
18#include <asm/smp.h>
19
20#define atomic_read(v) ((v)->counter)
21
22#ifdef CONFIG_ARC_HAS_LLSC
23
24#define atomic_set(v, i) (((v)->counter) = (i))
25
a5c8b52a
VG
26#ifdef CONFIG_ISA_ARCV2
27#define PREFETCHW " prefetchw [%1] \n"
28#else
29#define PREFETCHW
30#endif
31
f7d11e93
PZ
32#define ATOMIC_OP(op, c_op, asm_op) \
33static inline void atomic_##op(int i, atomic_t *v) \
34{ \
35 unsigned int temp; \
36 \
37 __asm__ __volatile__( \
a5c8b52a
VG
38 "1: \n" \
39 PREFETCHW \
40 " llock %0, [%1] \n" \
f7d11e93
PZ
41 " " #asm_op " %0, %0, %2 \n" \
42 " scond %0, [%1] \n" \
43 " bnz 1b \n" \
44 : "=&r"(temp) /* Early clobber, to prevent reg reuse */ \
45 : "r"(&v->counter), "ir"(i) \
46 : "cc"); \
47} \
48
49#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
50static inline int atomic_##op##_return(int i, atomic_t *v) \
51{ \
52 unsigned int temp; \
53 \
2576c28e
VG
54 /* \
55 * Explicit full memory barrier needed before/after as \
56 * LLOCK/SCOND thmeselves don't provide any such semantics \
57 */ \
58 smp_mb(); \
59 \
f7d11e93 60 __asm__ __volatile__( \
a5c8b52a
VG
61 "1: \n" \
62 PREFETCHW \
63 " llock %0, [%1] \n" \
f7d11e93
PZ
64 " " #asm_op " %0, %0, %2 \n" \
65 " scond %0, [%1] \n" \
66 " bnz 1b \n" \
67 : "=&r"(temp) \
68 : "r"(&v->counter), "ir"(i) \
69 : "cc"); \
70 \
2576c28e
VG
71 smp_mb(); \
72 \
f7d11e93 73 return temp; \
14e968ba
VG
74}
75
76#else /* !CONFIG_ARC_HAS_LLSC */
77
78#ifndef CONFIG_SMP
79
80 /* violating atomic_xxx API locking protocol in UP for optimization sake */
81#define atomic_set(v, i) (((v)->counter) = (i))
82
83#else
84
85static inline void atomic_set(atomic_t *v, int i)
86{
87 /*
88 * Independent of hardware support, all of the atomic_xxx() APIs need
89 * to follow the same locking rules to make sure that a "hardware"
90 * atomic insn (e.g. LD) doesn't clobber an "emulated" atomic insn
91 * sequence
92 *
93 * Thus atomic_set() despite being 1 insn (and seemingly atomic)
94 * requires the locking.
95 */
96 unsigned long flags;
97
98 atomic_ops_lock(flags);
99 v->counter = i;
100 atomic_ops_unlock(flags);
101}
f7d11e93 102
14e968ba
VG
103#endif
104
105/*
106 * Non hardware assisted Atomic-R-M-W
107 * Locking would change to irq-disabling only (UP) and spinlocks (SMP)
108 */
109
f7d11e93
PZ
110#define ATOMIC_OP(op, c_op, asm_op) \
111static inline void atomic_##op(int i, atomic_t *v) \
112{ \
113 unsigned long flags; \
114 \
115 atomic_ops_lock(flags); \
116 v->counter c_op i; \
117 atomic_ops_unlock(flags); \
14e968ba
VG
118}
119
daaf40e5 120#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
f7d11e93
PZ
121static inline int atomic_##op##_return(int i, atomic_t *v) \
122{ \
123 unsigned long flags; \
124 unsigned long temp; \
125 \
2576c28e
VG
126 /* \
127 * spin lock/unlock provides the needed smp_mb() before/after \
128 */ \
f7d11e93
PZ
129 atomic_ops_lock(flags); \
130 temp = v->counter; \
131 temp c_op i; \
132 v->counter = temp; \
133 atomic_ops_unlock(flags); \
134 \
135 return temp; \
14e968ba
VG
136}
137
f7d11e93 138#endif /* !CONFIG_ARC_HAS_LLSC */
14e968ba 139
f7d11e93
PZ
140#define ATOMIC_OPS(op, c_op, asm_op) \
141 ATOMIC_OP(op, c_op, asm_op) \
142 ATOMIC_OP_RETURN(op, c_op, asm_op)
14e968ba 143
f7d11e93
PZ
144ATOMIC_OPS(add, +=, add)
145ATOMIC_OPS(sub, -=, sub)
146ATOMIC_OP(and, &=, and)
14e968ba 147
f7d11e93 148#define atomic_clear_mask(mask, v) atomic_and(~(mask), (v))
14e968ba 149
f7d11e93
PZ
150#undef ATOMIC_OPS
151#undef ATOMIC_OP_RETURN
152#undef ATOMIC_OP
14e968ba
VG
153
154/**
155 * __atomic_add_unless - add unless the number is a given value
156 * @v: pointer of type atomic_t
157 * @a: the amount to add to v...
158 * @u: ...unless v is equal to u.
159 *
160 * Atomically adds @a to @v, so long as it was not @u.
161 * Returns the old value of @v
162 */
163#define __atomic_add_unless(v, a, u) \
164({ \
165 int c, old; \
2576c28e
VG
166 \
167 /* \
168 * Explicit full memory barrier needed before/after as \
169 * LLOCK/SCOND thmeselves don't provide any such semantics \
170 */ \
171 smp_mb(); \
172 \
14e968ba
VG
173 c = atomic_read(v); \
174 while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c)\
175 c = old; \
2576c28e
VG
176 \
177 smp_mb(); \
178 \
14e968ba
VG
179 c; \
180})
181
182#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
183
184#define atomic_inc(v) atomic_add(1, v)
185#define atomic_dec(v) atomic_sub(1, v)
186
187#define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
188#define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
189#define atomic_inc_return(v) atomic_add_return(1, (v))
190#define atomic_dec_return(v) atomic_sub_return(1, (v))
191#define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
192
193#define atomic_add_negative(i, v) (atomic_add_return(i, v) < 0)
194
195#define ATOMIC_INIT(i) { (i) }
196
197#include <asm-generic/atomic64.h>
198
199#endif
200
201#endif