Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * This file is subject to the terms and conditions of the GNU General Public | |
3 | * License. See the file "COPYING" in the main directory of this archive | |
4 | * for more details. | |
5 | * | |
a3c4946d RB |
6 | * Copyright (C) 2003, 2004 Ralf Baechle <ralf@linux-mips.org> |
7 | * Copyright (C) MIPS Technologies, Inc. | |
8 | * written by Ralf Baechle <ralf@linux-mips.org> | |
1da177e4 LT |
9 | */ |
10 | #ifndef _ASM_HAZARDS_H | |
11 | #define _ASM_HAZARDS_H | |
12 | ||
1da177e4 | 13 | |
36396f3c | 14 | #ifdef __ASSEMBLY__ |
d7d86aa8 | 15 | #define ASMMACRO(name, code...) .macro name; code; .endm |
1da177e4 LT |
16 | #else |
17 | ||
d7d86aa8 RB |
18 | #define ASMMACRO(name, code...) \ |
19 | __asm__(".macro " #name "; " #code "; .endm"); \ | |
20 | \ | |
21 | static inline void name(void) \ | |
22 | { \ | |
23 | __asm__ __volatile__ (#name); \ | |
24 | } | |
1da177e4 | 25 | |
1da177e4 LT |
26 | #endif |
27 | ||
d7d86aa8 RB |
28 | ASMMACRO(_ssnop, |
29 | sll $0, $0, 1 | |
30 | ) | |
31 | ||
32 | ASMMACRO(_ehb, | |
33 | sll $0, $0, 3 | |
34 | ) | |
35 | ||
1da177e4 | 36 | /* |
d7d86aa8 | 37 | * TLB hazards |
1da177e4 | 38 | */ |
d7d86aa8 | 39 | #if defined(CONFIG_CPU_MIPSR2) |
1da177e4 | 40 | |
1da177e4 | 41 | /* |
d7d86aa8 | 42 | * MIPSR2 defines ehb for hazard avoidance |
1da177e4 LT |
43 | */ |
44 | ||
d7d86aa8 RB |
45 | ASMMACRO(mtc0_tlbw_hazard, |
46 | _ehb | |
47 | ) | |
48 | ASMMACRO(tlbw_use_hazard, | |
49 | _ehb | |
50 | ) | |
51 | ASMMACRO(tlb_probe_hazard, | |
52 | _ehb | |
53 | ) | |
54 | ASMMACRO(irq_enable_hazard, | |
55 | ) | |
56 | ASMMACRO(irq_disable_hazard, | |
1da177e4 | 57 | _ehb |
d7d86aa8 RB |
58 | ) |
59 | ASMMACRO(back_to_back_c0_hazard, | |
60 | _ehb | |
61 | ) | |
1da177e4 | 62 | /* |
d7d86aa8 RB |
63 | * gcc has a tradition of misscompiling the previous construct using the |
64 | * address of a label as argument to inline assembler. Gas otoh has the | |
65 | * annoying difference between la and dla which are only usable for 32-bit | |
66 | * rsp. 64-bit code, so can't be used without conditional compilation. | |
67 | * The alterantive is switching the assembler to 64-bit code which happens | |
68 | * to work right even for 32-bit code ... | |
1da177e4 | 69 | */ |
d7d86aa8 RB |
70 | #define instruction_hazard() \ |
71 | do { \ | |
72 | unsigned long tmp; \ | |
73 | \ | |
74 | __asm__ __volatile__( \ | |
75 | " .set mips64r2 \n" \ | |
76 | " dla %0, 1f \n" \ | |
77 | " jr.hb %0 \n" \ | |
78 | " .set mips0 \n" \ | |
79 | "1: \n" \ | |
80 | : "=r" (tmp)); \ | |
81 | } while (0) | |
1da177e4 | 82 | |
d7d86aa8 | 83 | #elif defined(CONFIG_CPU_R10000) |
1da177e4 LT |
84 | |
85 | /* | |
d7d86aa8 | 86 | * R10000 rocks - all hazards handled in hardware, so this becomes a nobrainer. |
1da177e4 | 87 | */ |
1da177e4 | 88 | |
d7d86aa8 RB |
89 | ASMMACRO(mtc0_tlbw_hazard, |
90 | ) | |
91 | ASMMACRO(tlbw_use_hazard, | |
92 | ) | |
93 | ASMMACRO(tlb_probe_hazard, | |
94 | ) | |
95 | ASMMACRO(irq_enable_hazard, | |
96 | ) | |
97 | ASMMACRO(irq_disable_hazard, | |
98 | ) | |
99 | ASMMACRO(back_to_back_c0_hazard, | |
100 | ) | |
101 | #define instruction_hazard() do { } while (0) | |
1da177e4 | 102 | |
d7d86aa8 | 103 | #elif defined(CONFIG_CPU_RM9000) |
88d535b6 | 104 | |
1da177e4 LT |
105 | /* |
106 | * RM9000 hazards. When the JTLB is updated by tlbwi or tlbwr, a subsequent | |
107 | * use of the JTLB for instructions should not occur for 4 cpu cycles and use | |
108 | * for data translations should not occur for 3 cpu cycles. | |
109 | */ | |
110 | ||
d7d86aa8 RB |
111 | ASMMACRO(mtc0_tlbw_hazard, |
112 | _ssnop; _ssnop; _ssnop; _ssnop | |
113 | ) | |
114 | ASMMACRO(tlbw_use_hazard, | |
115 | _ssnop; _ssnop; _ssnop; _ssnop | |
116 | ) | |
117 | ASMMACRO(tlb_probe_hazard, | |
118 | _ssnop; _ssnop; _ssnop; _ssnop | |
119 | ) | |
120 | ASMMACRO(irq_enable_hazard, | |
121 | ) | |
122 | ASMMACRO(irq_disable_hazard, | |
123 | ) | |
124 | ASMMACRO(back_to_back_c0_hazard, | |
125 | ) | |
126 | #define instruction_hazard() do { } while (0) | |
1da177e4 | 127 | |
d7d86aa8 | 128 | #elif defined(CONFIG_CPU_SB1) |
1da177e4 LT |
129 | |
130 | /* | |
d7d86aa8 | 131 | * Mostly like R4000 for historic reasons |
1da177e4 | 132 | */ |
d7d86aa8 RB |
133 | ASMMACRO(mtc0_tlbw_hazard, |
134 | ) | |
135 | ASMMACRO(tlbw_use_hazard, | |
136 | ) | |
137 | ASMMACRO(tlb_probe_hazard, | |
138 | ) | |
139 | ASMMACRO(irq_enable_hazard, | |
140 | ) | |
141 | ASMMACRO(irq_disable_hazard, | |
142 | _ssnop; _ssnop; _ssnop | |
143 | ) | |
144 | ASMMACRO(back_to_back_c0_hazard, | |
145 | ) | |
146 | #define instruction_hazard() do { } while (0) | |
5068debf | 147 | |
1da177e4 LT |
148 | #else |
149 | ||
150 | /* | |
d7d86aa8 RB |
151 | * Finally the catchall case for all other processors including R4000, R4400, |
152 | * R4600, R4700, R5000, RM7000, NEC VR41xx etc. | |
a3c4946d | 153 | * |
d7d86aa8 RB |
154 | * The taken branch will result in a two cycle penalty for the two killed |
155 | * instructions on R4000 / R4400. Other processors only have a single cycle | |
156 | * hazard so this is nice trick to have an optimal code for a range of | |
157 | * processors. | |
7043ad4f | 158 | */ |
d7d86aa8 RB |
159 | ASMMACRO(mtc0_tlbw_hazard, |
160 | nop | |
161 | ) | |
162 | ASMMACRO(tlbw_use_hazard, | |
163 | nop; nop; nop | |
164 | ) | |
165 | ASMMACRO(tlb_probe_hazard, | |
166 | nop; nop; nop | |
167 | ) | |
168 | ASMMACRO(irq_enable_hazard, | |
169 | ) | |
170 | ASMMACRO(irq_disable_hazard, | |
171 | nop; nop; nop | |
172 | ) | |
173 | ASMMACRO(back_to_back_c0_hazard, | |
174 | _ssnop; _ssnop; _ssnop; | |
175 | ) | |
cc61c1fe | 176 | #define instruction_hazard() do { } while (0) |
41c594ab | 177 | |
d7d86aa8 | 178 | #endif |
1da177e4 LT |
179 | |
180 | #endif /* _ASM_HAZARDS_H */ |