Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * This file is subject to the terms and conditions of the GNU General Public | |
3 | * License. See the file "COPYING" in the main directory of this archive | |
4 | * for more details. | |
5 | * | |
a3c4946d RB |
6 | * Copyright (C) 2003, 2004 Ralf Baechle <ralf@linux-mips.org> |
7 | * Copyright (C) MIPS Technologies, Inc. | |
8 | * written by Ralf Baechle <ralf@linux-mips.org> | |
1da177e4 LT |
9 | */ |
10 | #ifndef _ASM_HAZARDS_H | |
11 | #define _ASM_HAZARDS_H | |
12 | ||
1da177e4 | 13 | |
36396f3c | 14 | #ifdef __ASSEMBLY__ |
d7d86aa8 | 15 | #define ASMMACRO(name, code...) .macro name; code; .endm |
1da177e4 LT |
16 | #else |
17 | ||
d7d86aa8 RB |
18 | #define ASMMACRO(name, code...) \ |
19 | __asm__(".macro " #name "; " #code "; .endm"); \ | |
20 | \ | |
21 | static inline void name(void) \ | |
22 | { \ | |
23 | __asm__ __volatile__ (#name); \ | |
24 | } | |
1da177e4 | 25 | |
1da177e4 LT |
26 | #endif |
27 | ||
d7d86aa8 RB |
28 | ASMMACRO(_ssnop, |
29 | sll $0, $0, 1 | |
30 | ) | |
31 | ||
32 | ASMMACRO(_ehb, | |
33 | sll $0, $0, 3 | |
34 | ) | |
35 | ||
1da177e4 | 36 | /* |
d7d86aa8 | 37 | * TLB hazards |
1da177e4 | 38 | */ |
d7d86aa8 | 39 | #if defined(CONFIG_CPU_MIPSR2) |
1da177e4 | 40 | |
1da177e4 | 41 | /* |
d7d86aa8 | 42 | * MIPSR2 defines ehb for hazard avoidance |
1da177e4 LT |
43 | */ |
44 | ||
d7d86aa8 RB |
45 | ASMMACRO(mtc0_tlbw_hazard, |
46 | _ehb | |
47 | ) | |
48 | ASMMACRO(tlbw_use_hazard, | |
49 | _ehb | |
50 | ) | |
51 | ASMMACRO(tlb_probe_hazard, | |
52 | _ehb | |
53 | ) | |
54 | ASMMACRO(irq_enable_hazard, | |
7605b390 | 55 | _ehb |
d7d86aa8 RB |
56 | ) |
57 | ASMMACRO(irq_disable_hazard, | |
1da177e4 | 58 | _ehb |
d7d86aa8 RB |
59 | ) |
60 | ASMMACRO(back_to_back_c0_hazard, | |
61 | _ehb | |
62 | ) | |
1da177e4 | 63 | /* |
d7d86aa8 RB |
64 | * gcc has a tradition of misscompiling the previous construct using the |
65 | * address of a label as argument to inline assembler. Gas otoh has the | |
66 | * annoying difference between la and dla which are only usable for 32-bit | |
67 | * rsp. 64-bit code, so can't be used without conditional compilation. | |
68 | * The alterantive is switching the assembler to 64-bit code which happens | |
69 | * to work right even for 32-bit code ... | |
1da177e4 | 70 | */ |
d7d86aa8 RB |
71 | #define instruction_hazard() \ |
72 | do { \ | |
73 | unsigned long tmp; \ | |
74 | \ | |
75 | __asm__ __volatile__( \ | |
76 | " .set mips64r2 \n" \ | |
77 | " dla %0, 1f \n" \ | |
78 | " jr.hb %0 \n" \ | |
79 | " .set mips0 \n" \ | |
80 | "1: \n" \ | |
81 | : "=r" (tmp)); \ | |
82 | } while (0) | |
1da177e4 | 83 | |
d7d86aa8 | 84 | #elif defined(CONFIG_CPU_R10000) |
1da177e4 LT |
85 | |
86 | /* | |
d7d86aa8 | 87 | * R10000 rocks - all hazards handled in hardware, so this becomes a nobrainer. |
1da177e4 | 88 | */ |
1da177e4 | 89 | |
d7d86aa8 RB |
90 | ASMMACRO(mtc0_tlbw_hazard, |
91 | ) | |
92 | ASMMACRO(tlbw_use_hazard, | |
93 | ) | |
94 | ASMMACRO(tlb_probe_hazard, | |
95 | ) | |
96 | ASMMACRO(irq_enable_hazard, | |
97 | ) | |
98 | ASMMACRO(irq_disable_hazard, | |
99 | ) | |
100 | ASMMACRO(back_to_back_c0_hazard, | |
101 | ) | |
102 | #define instruction_hazard() do { } while (0) | |
1da177e4 | 103 | |
d7d86aa8 | 104 | #elif defined(CONFIG_CPU_RM9000) |
88d535b6 | 105 | |
1da177e4 LT |
106 | /* |
107 | * RM9000 hazards. When the JTLB is updated by tlbwi or tlbwr, a subsequent | |
108 | * use of the JTLB for instructions should not occur for 4 cpu cycles and use | |
109 | * for data translations should not occur for 3 cpu cycles. | |
110 | */ | |
111 | ||
d7d86aa8 RB |
112 | ASMMACRO(mtc0_tlbw_hazard, |
113 | _ssnop; _ssnop; _ssnop; _ssnop | |
114 | ) | |
115 | ASMMACRO(tlbw_use_hazard, | |
116 | _ssnop; _ssnop; _ssnop; _ssnop | |
117 | ) | |
118 | ASMMACRO(tlb_probe_hazard, | |
119 | _ssnop; _ssnop; _ssnop; _ssnop | |
120 | ) | |
121 | ASMMACRO(irq_enable_hazard, | |
122 | ) | |
123 | ASMMACRO(irq_disable_hazard, | |
124 | ) | |
125 | ASMMACRO(back_to_back_c0_hazard, | |
126 | ) | |
127 | #define instruction_hazard() do { } while (0) | |
1da177e4 | 128 | |
d7d86aa8 | 129 | #elif defined(CONFIG_CPU_SB1) |
1da177e4 LT |
130 | |
131 | /* | |
d7d86aa8 | 132 | * Mostly like R4000 for historic reasons |
1da177e4 | 133 | */ |
d7d86aa8 RB |
134 | ASMMACRO(mtc0_tlbw_hazard, |
135 | ) | |
136 | ASMMACRO(tlbw_use_hazard, | |
137 | ) | |
138 | ASMMACRO(tlb_probe_hazard, | |
139 | ) | |
140 | ASMMACRO(irq_enable_hazard, | |
141 | ) | |
142 | ASMMACRO(irq_disable_hazard, | |
143 | _ssnop; _ssnop; _ssnop | |
144 | ) | |
145 | ASMMACRO(back_to_back_c0_hazard, | |
146 | ) | |
147 | #define instruction_hazard() do { } while (0) | |
5068debf | 148 | |
1da177e4 LT |
149 | #else |
150 | ||
151 | /* | |
d7d86aa8 RB |
152 | * Finally the catchall case for all other processors including R4000, R4400, |
153 | * R4600, R4700, R5000, RM7000, NEC VR41xx etc. | |
a3c4946d | 154 | * |
d7d86aa8 RB |
155 | * The taken branch will result in a two cycle penalty for the two killed |
156 | * instructions on R4000 / R4400. Other processors only have a single cycle | |
157 | * hazard so this is nice trick to have an optimal code for a range of | |
158 | * processors. | |
7043ad4f | 159 | */ |
d7d86aa8 | 160 | ASMMACRO(mtc0_tlbw_hazard, |
3f318370 | 161 | nop; nop |
d7d86aa8 RB |
162 | ) |
163 | ASMMACRO(tlbw_use_hazard, | |
164 | nop; nop; nop | |
165 | ) | |
166 | ASMMACRO(tlb_probe_hazard, | |
167 | nop; nop; nop | |
168 | ) | |
169 | ASMMACRO(irq_enable_hazard, | |
170 | ) | |
171 | ASMMACRO(irq_disable_hazard, | |
172 | nop; nop; nop | |
173 | ) | |
174 | ASMMACRO(back_to_back_c0_hazard, | |
175 | _ssnop; _ssnop; _ssnop; | |
176 | ) | |
cc61c1fe | 177 | #define instruction_hazard() do { } while (0) |
41c594ab | 178 | |
d7d86aa8 | 179 | #endif |
1da177e4 LT |
180 | |
181 | #endif /* _ASM_HAZARDS_H */ |