Merge tag 'ext4_for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tytso...
[linux-2.6-block.git] / arch / tile / include / asm / atomic_32.h
CommitLineData
867e359b
CM
1/*
2 * Copyright 2010 Tilera Corporation. All Rights Reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
12 * more details.
13 *
60063497 14 * Do not include directly; use <linux/atomic.h>.
867e359b
CM
15 */
16
17#ifndef _ASM_TILE_ATOMIC_32_H
18#define _ASM_TILE_ATOMIC_32_H
19
bd119c69 20#include <asm/barrier.h>
867e359b
CM
21#include <arch/chip.h>
22
23#ifndef __ASSEMBLY__
24
867e359b
CM
25/**
26 * atomic_add - add integer to atomic variable
27 * @i: integer value to add
28 * @v: pointer of type atomic_t
29 *
30 * Atomically adds @i to @v.
31 */
32static inline void atomic_add(int i, atomic_t *v)
33{
6dc9658f 34 _atomic_xchg_add(&v->counter, i);
867e359b
CM
35}
36
1af5de9a
PZ
37#define ATOMIC_OPS(op) \
38unsigned long _atomic_fetch_##op(volatile unsigned long *p, unsigned long mask); \
2957c035
CM
39static inline void atomic_##op(int i, atomic_t *v) \
40{ \
1af5de9a
PZ
41 _atomic_fetch_##op((unsigned long *)&v->counter, i); \
42} \
43static inline int atomic_fetch_##op(int i, atomic_t *v) \
44{ \
45 smp_mb(); \
46 return _atomic_fetch_##op((unsigned long *)&v->counter, i); \
2957c035
CM
47}
48
1af5de9a
PZ
49ATOMIC_OPS(and)
50ATOMIC_OPS(or)
51ATOMIC_OPS(xor)
52
53#undef ATOMIC_OPS
2957c035 54
1af5de9a
PZ
55static inline int atomic_fetch_add(int i, atomic_t *v)
56{
57 smp_mb();
58 return _atomic_xchg_add(&v->counter, i);
59}
2957c035 60
867e359b
CM
61/**
62 * atomic_add_return - add integer and return
63 * @v: pointer of type atomic_t
64 * @i: integer value to add
65 *
66 * Atomically adds @i to @v and returns @i + @v
67 */
68static inline int atomic_add_return(int i, atomic_t *v)
69{
70 smp_mb(); /* barrier for proper semantics */
6dc9658f 71 return _atomic_xchg_add(&v->counter, i) + i;
867e359b
CM
72}
73
74/**
f24219b4 75 * __atomic_add_unless - add unless the number is already a given value
867e359b
CM
76 * @v: pointer of type atomic_t
77 * @a: the amount to add to v...
78 * @u: ...unless v is equal to u.
79 *
80 * Atomically adds @a to @v, so long as @v was not already @u.
f24219b4 81 * Returns the old value of @v.
867e359b 82 */
f24219b4 83static inline int __atomic_add_unless(atomic_t *v, int a, int u)
867e359b
CM
84{
85 smp_mb(); /* barrier for proper semantics */
6dc9658f 86 return _atomic_xchg_add_unless(&v->counter, a, u);
867e359b
CM
87}
88
89/**
90 * atomic_set - set atomic variable
91 * @v: pointer of type atomic_t
92 * @i: required value
93 *
94 * Atomically sets the value of @v to @i.
95 *
96 * atomic_set() can't be just a raw store, since it would be lost if it
97 * fell between the load and store of one of the other atomic ops.
98 */
99static inline void atomic_set(atomic_t *v, int n)
100{
6dc9658f 101 _atomic_xchg(&v->counter, n);
867e359b
CM
102}
103
867e359b
CM
104/* A 64bit atomic type */
105
106typedef struct {
b924a690 107 long long counter;
867e359b
CM
108} atomic64_t;
109
110#define ATOMIC64_INIT(val) { (val) }
111
867e359b
CM
112/**
113 * atomic64_read - read atomic variable
114 * @v: pointer of type atomic64_t
115 *
116 * Atomically reads the value of @v.
117 */
b924a690 118static inline long long atomic64_read(const atomic64_t *v)
867e359b
CM
119{
120 /*
121 * Requires an atomic op to read both 32-bit parts consistently.
122 * Casting away const is safe since the atomic support routines
123 * do not write to memory if the value has not been modified.
124 */
b924a690 125 return _atomic64_xchg_add((long long *)&v->counter, 0);
867e359b
CM
126}
127
128/**
129 * atomic64_add - add integer to atomic variable
130 * @i: integer value to add
131 * @v: pointer of type atomic64_t
132 *
133 * Atomically adds @i to @v.
134 */
b924a690 135static inline void atomic64_add(long long i, atomic64_t *v)
867e359b 136{
6dc9658f 137 _atomic64_xchg_add(&v->counter, i);
867e359b
CM
138}
139
1af5de9a
PZ
140#define ATOMIC64_OPS(op) \
141long long _atomic64_fetch_##op(long long *v, long long n); \
142static inline void atomic64_##op(long long i, atomic64_t *v) \
143{ \
144 _atomic64_fetch_##op(&v->counter, i); \
145} \
b7271b9f 146static inline long long atomic64_fetch_##op(long long i, atomic64_t *v) \
2957c035 147{ \
1af5de9a
PZ
148 smp_mb(); \
149 return _atomic64_fetch_##op(&v->counter, i); \
2957c035
CM
150}
151
b7271b9f
PZ
152ATOMIC64_OPS(and)
153ATOMIC64_OPS(or)
154ATOMIC64_OPS(xor)
2957c035 155
1af5de9a
PZ
156#undef ATOMIC64_OPS
157
158static inline long long atomic64_fetch_add(long long i, atomic64_t *v)
159{
160 smp_mb();
161 return _atomic64_xchg_add(&v->counter, i);
162}
163
867e359b
CM
164/**
165 * atomic64_add_return - add integer and return
166 * @v: pointer of type atomic64_t
167 * @i: integer value to add
168 *
169 * Atomically adds @i to @v and returns @i + @v
170 */
b924a690 171static inline long long atomic64_add_return(long long i, atomic64_t *v)
867e359b
CM
172{
173 smp_mb(); /* barrier for proper semantics */
6dc9658f 174 return _atomic64_xchg_add(&v->counter, i) + i;
867e359b
CM
175}
176
177/**
178 * atomic64_add_unless - add unless the number is already a given value
179 * @v: pointer of type atomic64_t
180 * @a: the amount to add to v...
181 * @u: ...unless v is equal to u.
182 *
183 * Atomically adds @a to @v, so long as @v was not already @u.
07feea87 184 * Returns non-zero if @v was not @u, and zero otherwise.
867e359b 185 */
b924a690
CG
186static inline long long atomic64_add_unless(atomic64_t *v, long long a,
187 long long u)
867e359b
CM
188{
189 smp_mb(); /* barrier for proper semantics */
6dc9658f 190 return _atomic64_xchg_add_unless(&v->counter, a, u) != u;
867e359b
CM
191}
192
193/**
194 * atomic64_set - set atomic variable
195 * @v: pointer of type atomic64_t
196 * @i: required value
197 *
198 * Atomically sets the value of @v to @i.
199 *
200 * atomic64_set() can't be just a raw store, since it would be lost if it
201 * fell between the load and store of one of the other atomic ops.
202 */
b924a690 203static inline void atomic64_set(atomic64_t *v, long long n)
867e359b 204{
6dc9658f 205 _atomic64_xchg(&v->counter, n);
867e359b
CM
206}
207
208#define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
209#define atomic64_inc(v) atomic64_add(1LL, (v))
210#define atomic64_inc_return(v) atomic64_add_return(1LL, (v))
211#define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
212#define atomic64_sub_return(i, v) atomic64_add_return(-(i), (v))
1af5de9a 213#define atomic64_fetch_sub(i, v) atomic64_fetch_add(-(i), (v))
867e359b
CM
214#define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
215#define atomic64_sub(i, v) atomic64_add(-(i), (v))
216#define atomic64_dec(v) atomic64_sub(1LL, (v))
217#define atomic64_dec_return(v) atomic64_sub_return(1LL, (v))
218#define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
219#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
220
867e359b
CM
221#endif /* !__ASSEMBLY__ */
222
223/*
224 * Internal definitions only beyond this point.
225 */
226
867e359b
CM
227/*
228 * Number of atomic locks in atomic_locks[]. Must be a power of two.
229 * There is no reason for more than PAGE_SIZE / 8 entries, since that
230 * is the maximum number of pointer bits we can use to index this.
231 * And we cannot have more than PAGE_SIZE / 4, since this has to
232 * fit on a single page and each entry takes 4 bytes.
233 */
234#define ATOMIC_HASH_SHIFT (PAGE_SHIFT - 3)
235#define ATOMIC_HASH_SIZE (1 << ATOMIC_HASH_SHIFT)
236
237#ifndef __ASSEMBLY__
238extern int atomic_locks[];
239#endif
240
867e359b
CM
241/*
242 * All the code that may fault while holding an atomic lock must
243 * place the pointer to the lock in ATOMIC_LOCK_REG so the fault code
244 * can correctly release and reacquire the lock. Note that we
245 * mention the register number in a comment in "lib/atomic_asm.S" to help
246 * assembly coders from using this register by mistake, so if it
247 * is changed here, change that comment as well.
248 */
249#define ATOMIC_LOCK_REG 20
250#define ATOMIC_LOCK_REG_NAME r20
251
252#ifndef __ASSEMBLY__
253/* Called from setup to initialize a hash table to point to per_cpu locks. */
254void __init_atomic_per_cpu(void);
255
256#ifdef CONFIG_SMP
257/* Support releasing the atomic lock in do_page_fault_ics(). */
258void __atomic_fault_unlock(int *lock_ptr);
259#endif
0707ad30 260
47d632f9
CM
261/* Return a pointer to the lock for the given address. */
262int *__atomic_hashed_lock(volatile void *v);
263
0707ad30 264/* Private helper routines in lib/atomic_asm_32.S */
47d632f9
CM
265struct __get_user {
266 unsigned long val;
267 int err;
268};
b7271b9f 269extern struct __get_user __atomic32_cmpxchg(volatile int *p,
0707ad30 270 int *lock, int o, int n);
b7271b9f
PZ
271extern struct __get_user __atomic32_xchg(volatile int *p, int *lock, int n);
272extern struct __get_user __atomic32_xchg_add(volatile int *p, int *lock, int n);
273extern struct __get_user __atomic32_xchg_add_unless(volatile int *p,
0707ad30 274 int *lock, int o, int n);
b7271b9f
PZ
275extern struct __get_user __atomic32_fetch_or(volatile int *p, int *lock, int n);
276extern struct __get_user __atomic32_fetch_and(volatile int *p, int *lock, int n);
277extern struct __get_user __atomic32_fetch_andn(volatile int *p, int *lock, int n);
278extern struct __get_user __atomic32_fetch_xor(volatile int *p, int *lock, int n);
b924a690
CG
279extern long long __atomic64_cmpxchg(volatile long long *p, int *lock,
280 long long o, long long n);
281extern long long __atomic64_xchg(volatile long long *p, int *lock, long long n);
282extern long long __atomic64_xchg_add(volatile long long *p, int *lock,
283 long long n);
284extern long long __atomic64_xchg_add_unless(volatile long long *p,
285 int *lock, long long o, long long n);
1af5de9a
PZ
286extern long long __atomic64_fetch_and(volatile long long *p, int *lock, long long n);
287extern long long __atomic64_fetch_or(volatile long long *p, int *lock, long long n);
288extern long long __atomic64_fetch_xor(volatile long long *p, int *lock, long long n);
0707ad30 289
47d632f9
CM
290/* Return failure from the atomic wrappers. */
291struct __get_user __atomic_bad_address(int __user *addr);
292
867e359b
CM
293#endif /* !__ASSEMBLY__ */
294
295#endif /* _ASM_TILE_ATOMIC_32_H */