ARC: add compiler barrier to LLSC based cmpxchg
[linux-2.6-block.git] / arch / arc / include / asm / cmpxchg.h
CommitLineData
14e968ba
VG
1/*
2 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9#ifndef __ASM_ARC_CMPXCHG_H
10#define __ASM_ARC_CMPXCHG_H
11
12#include <linux/types.h>
13#include <asm/smp.h>
14
15#ifdef CONFIG_ARC_HAS_LLSC
16
17static inline unsigned long
18__cmpxchg(volatile void *ptr, unsigned long expected, unsigned long new)
19{
20 unsigned long prev;
21
22 __asm__ __volatile__(
23 "1: llock %0, [%1] \n"
24 " brne %0, %2, 2f \n"
25 " scond %3, [%1] \n"
26 " bnz 1b \n"
27 "2: \n"
d57f7272
VG
28 : "=&r"(prev) /* Early clobber, to prevent reg reuse */
29 : "r"(ptr), /* Not "m": llock only supports reg direct addr mode */
30 "ir"(expected),
31 "r"(new) /* can't be "ir". scond can't take LIMM for "b" */
32 : "cc", "memory"); /* so that gcc knows memory is being written here */
14e968ba
VG
33
34 return prev;
35}
36
37#else
38
39static inline unsigned long
40__cmpxchg(volatile void *ptr, unsigned long expected, unsigned long new)
41{
42 unsigned long flags;
43 int prev;
44 volatile unsigned long *p = ptr;
45
46 atomic_ops_lock(flags);
47 prev = *p;
48 if (prev == expected)
49 *p = new;
50 atomic_ops_unlock(flags);
51 return prev;
52}
53
54#endif /* CONFIG_ARC_HAS_LLSC */
55
56#define cmpxchg(ptr, o, n) ((typeof(*(ptr)))__cmpxchg((ptr), \
57 (unsigned long)(o), (unsigned long)(n)))
58
59/*
60 * Since not supported natively, ARC cmpxchg() uses atomic_ops_lock (UP/SMP)
61 * just to gaurantee semantics.
62 * atomic_cmpxchg() needs to use the same locks as it's other atomic siblings
63 * which also happens to be atomic_ops_lock.
64 *
65 * Thus despite semantically being different, implementation of atomic_cmpxchg()
66 * is same as cmpxchg().
67 */
68#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
69
70
71/*
72 * xchg (reg with memory) based on "Native atomic" EX insn
73 */
74static inline unsigned long __xchg(unsigned long val, volatile void *ptr,
75 int size)
76{
77 extern unsigned long __xchg_bad_pointer(void);
78
79 switch (size) {
80 case 4:
81 __asm__ __volatile__(
82 " ex %0, [%1] \n"
83 : "+r"(val)
84 : "r"(ptr)
85 : "memory");
86
87 return val;
88 }
89 return __xchg_bad_pointer();
90}
91
92#define _xchg(ptr, with) ((typeof(*(ptr)))__xchg((unsigned long)(with), (ptr), \
93 sizeof(*(ptr))))
94
95/*
96 * On ARC700, EX insn is inherently atomic, so by default "vanilla" xchg() need
97 * not require any locking. However there's a quirk.
98 * ARC lacks native CMPXCHG, thus emulated (see above), using external locking -
99 * incidently it "reuses" the same atomic_ops_lock used by atomic APIs.
100 * Now, llist code uses cmpxchg() and xchg() on same data, so xchg() needs to
101 * abide by same serializing rules, thus ends up using atomic_ops_lock as well.
102 *
103 * This however is only relevant if SMP and/or ARC lacks LLSC
104 * if (UP or LLSC)
105 * xchg doesn't need serialization
106 * else <==> !(UP or LLSC) <==> (!UP and !LLSC) <==> (SMP and !LLSC)
107 * xchg needs serialization
108 */
109
110#if !defined(CONFIG_ARC_HAS_LLSC) && defined(CONFIG_SMP)
111
112#define xchg(ptr, with) \
113({ \
114 unsigned long flags; \
115 typeof(*(ptr)) old_val; \
116 \
117 atomic_ops_lock(flags); \
118 old_val = _xchg(ptr, with); \
119 atomic_ops_unlock(flags); \
120 old_val; \
121})
122
123#else
124
125#define xchg(ptr, with) _xchg(ptr, with)
126
127#endif
128
129/*
130 * "atomic" variant of xchg()
131 * REQ: It needs to follow the same serialization rules as other atomic_xxx()
132 * Since xchg() doesn't always do that, it would seem that following defintion
133 * is incorrect. But here's the rationale:
134 * SMP : Even xchg() takes the atomic_ops_lock, so OK.
135 * LLSC: atomic_ops_lock are not relevent at all (even if SMP, since LLSC
136 * is natively "SMP safe", no serialization required).
137 * UP : other atomics disable IRQ, so no way a difft ctxt atomic_xchg()
138 * could clobber them. atomic_xchg() itself would be 1 insn, so it
139 * can't be clobbered by others. Thus no serialization required when
140 * atomic_xchg is involved.
141 */
142#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
143
144#endif