Merge branch 'x86-pti-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux-2.6-block.git] / arch / mips / include / asm / barrier.h
CommitLineData
0004a9df
RB
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2006 by Ralf Baechle (ralf@linux-mips.org)
7 */
8#ifndef __ASM_BARRIER_H
9#define __ASM_BARRIER_H
10
b81947c6 11#include <asm/addrspace.h>
bf929272 12#include <asm/sync.h>
6622ada3 13
fe0065e5
PB
14static inline void __sync(void)
15{
16 asm volatile(__SYNC(full, always) ::: "memory");
17}
0004a9df 18
21e3134b
PB
19static inline void rmb(void)
20{
21 asm volatile(__SYNC(rmb, always) ::: "memory");
22}
23#define rmb rmb
24
25static inline void wmb(void)
26{
27 asm volatile(__SYNC(wmb, always) ::: "memory");
28}
29#define wmb wmb
30
5c12a6ef
PB
31#define fast_mb() __sync()
32
0004a9df
RB
33#define __fast_iob() \
34 __asm__ __volatile__( \
35 ".set push\n\t" \
36 ".set noreorder\n\t" \
37 "lw $0,%0\n\t" \
38 "nop\n\t" \
39 ".set pop" \
40 : /* no output */ \
41 : "m" (*(int *)CKSEG1) \
42 : "memory")
6b07d38a 43#ifdef CONFIG_CPU_CAVIUM_OCTEON
6b07d38a
DD
44# define fast_iob() do { } while (0)
45#else /* ! CONFIG_CPU_CAVIUM_OCTEON */
6b07d38a
DD
46# ifdef CONFIG_SGI_IP28
47# define fast_iob() \
7a2852e4
TB
48 __asm__ __volatile__( \
49 ".set push\n\t" \
50 ".set noreorder\n\t" \
51 "lw $0,%0\n\t" \
52 "sync\n\t" \
53 "lw $0,%0\n\t" \
54 ".set pop" \
55 : /* no output */ \
56 : "m" (*(int *)CKSEG1ADDR(0x1fa00004)) \
57 : "memory")
6b07d38a
DD
58# else
59# define fast_iob() \
0004a9df
RB
60 do { \
61 __sync(); \
62 __fast_iob(); \
63 } while (0)
6b07d38a
DD
64# endif
65#endif /* CONFIG_CPU_CAVIUM_OCTEON */
0004a9df
RB
66
67#ifdef CONFIG_CPU_HAS_WB
68
69#include <asm/wbflush.h>
70
0004a9df
RB
71#define mb() wbflush()
72#define iob() wbflush()
73
74#else /* !CONFIG_CPU_HAS_WB */
75
0004a9df
RB
76#define mb() fast_mb()
77#define iob() fast_iob()
78
79#endif /* !CONFIG_CPU_HAS_WB */
80
a60514ba 81#if defined(CONFIG_WEAK_ORDERING)
05e6da74 82# define __smp_mb() __sync()
21e3134b
PB
83# define __smp_rmb() rmb()
84# define __smp_wmb() wmb()
0004a9df 85#else
05e6da74
PB
86# define __smp_mb() barrier()
87# define __smp_rmb() barrier()
88# define __smp_wmb() barrier()
0004a9df 89#endif
f252ffd5 90
42344113
PZ
91/*
92 * When LL/SC does imply order, it must also be a compiler barrier to avoid the
93 * compiler from reordering where the CPU will not. When it does not imply
94 * order, the compiler is also free to reorder across the LL/SC loop and
95 * ordering will be done by smp_llsc_mb() and friends.
96 */
17099b11 97#if defined(CONFIG_WEAK_REORDERING_BEYOND_LLSC) && defined(CONFIG_SMP)
3c1d3f09
PB
98# define __WEAK_LLSC_MB sync
99# define smp_llsc_mb() \
100 __asm__ __volatile__(__stringify(__WEAK_LLSC_MB) : : :"memory")
101# define __LLSC_CLOBBER
17099b11 102#else
3c1d3f09
PB
103# define __WEAK_LLSC_MB
104# define smp_llsc_mb() do { } while (0)
105# define __LLSC_CLOBBER "memory"
17099b11 106#endif
0004a9df 107
6b07d38a
DD
108#ifdef CONFIG_CPU_CAVIUM_OCTEON
109#define smp_mb__before_llsc() smp_wmb()
a60514ba 110#define __smp_mb__before_llsc() __smp_wmb()
500c2e1f
DD
111/* Cause previous writes to become visible on all CPUs as soon as possible */
112#define nudge_writes() __asm__ __volatile__(".set push\n\t" \
113 ".set arch=octeon\n\t" \
114 "syncw\n\t" \
115 ".set pop" : : : "memory")
6b07d38a 116#else
f252ffd5 117#define smp_mb__before_llsc() smp_llsc_mb()
a60514ba 118#define __smp_mb__before_llsc() smp_llsc_mb()
500c2e1f 119#define nudge_writes() mb()
6b07d38a 120#endif
f252ffd5 121
ae4cd0b1
PB
122/*
123 * In the Loongson3 LL/SC workaround case, all of our LL/SC loops already have
124 * a completion barrier immediately preceding the LL instruction. Therefore we
125 * can skip emitting a barrier from __smp_mb__before_atomic().
126 */
127#ifdef CONFIG_CPU_LOONGSON3_WORKAROUNDS
128# define __smp_mb__before_atomic()
129#else
130# define __smp_mb__before_atomic() __smp_mb__before_llsc()
131#endif
132
a60514ba 133#define __smp_mb__after_atomic() smp_llsc_mb()
91bbefe6 134
53511389
PB
135static inline void sync_ginv(void)
136{
185d7d7a 137 asm volatile(__SYNC(ginv, always));
53511389
PB
138}
139
fa083e28
MT
140#include <asm-generic/barrier.h>
141
0004a9df 142#endif /* __ASM_BARRIER_H */