Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * Instruction-patching support. | |
3 | * | |
4 | * Copyright (C) 2003 Hewlett-Packard Co | |
5 | * David Mosberger-Tang <davidm@hpl.hp.com> | |
6 | */ | |
7 | #include <linux/init.h> | |
8 | #include <linux/string.h> | |
9 | ||
dd97d5cb | 10 | #include <asm/paravirt.h> |
1da177e4 LT |
11 | #include <asm/patch.h> |
12 | #include <asm/processor.h> | |
13 | #include <asm/sections.h> | |
14 | #include <asm/system.h> | |
15 | #include <asm/unistd.h> | |
16 | ||
17 | /* | |
18 | * This was adapted from code written by Tony Luck: | |
19 | * | |
20 | * The 64-bit value in a "movl reg=value" is scattered between the two words of the bundle | |
21 | * like this: | |
22 | * | |
23 | * 6 6 5 4 3 2 1 | |
24 | * 3210987654321098765432109876543210987654321098765432109876543210 | |
25 | * ABBBBBBBBBBBBBBBBBBBBBBBCCCCCCCCCCCCCCCCCCDEEEEEFFFFFFFFFGGGGGGG | |
26 | * | |
27 | * CCCCCCCCCCCCCCCCCCxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx | |
28 | * xxxxAFFFFFFFFFEEEEEDxGGGGGGGxxxxxxxxxxxxxBBBBBBBBBBBBBBBBBBBBBBB | |
29 | */ | |
30 | static u64 | |
31 | get_imm64 (u64 insn_addr) | |
32 | { | |
33 | u64 *p = (u64 *) (insn_addr & -16); /* mask out slot number */ | |
34 | ||
35 | return ( (p[1] & 0x0800000000000000UL) << 4) | /*A*/ | |
36 | ((p[1] & 0x00000000007fffffUL) << 40) | /*B*/ | |
37 | ((p[0] & 0xffffc00000000000UL) >> 24) | /*C*/ | |
38 | ((p[1] & 0x0000100000000000UL) >> 23) | /*D*/ | |
39 | ((p[1] & 0x0003e00000000000UL) >> 29) | /*E*/ | |
40 | ((p[1] & 0x07fc000000000000UL) >> 43) | /*F*/ | |
41 | ((p[1] & 0x000007f000000000UL) >> 36); /*G*/ | |
42 | } | |
43 | ||
44 | /* Patch instruction with "val" where "mask" has 1 bits. */ | |
45 | void | |
46 | ia64_patch (u64 insn_addr, u64 mask, u64 val) | |
47 | { | |
48 | u64 m0, m1, v0, v1, b0, b1, *b = (u64 *) (insn_addr & -16); | |
49 | # define insn_mask ((1UL << 41) - 1) | |
50 | unsigned long shift; | |
51 | ||
52 | b0 = b[0]; b1 = b[1]; | |
53 | shift = 5 + 41 * (insn_addr % 16); /* 5 bits of template, then 3 x 41-bit instructions */ | |
54 | if (shift >= 64) { | |
55 | m1 = mask << (shift - 64); | |
56 | v1 = val << (shift - 64); | |
57 | } else { | |
58 | m0 = mask << shift; m1 = mask >> (64 - shift); | |
59 | v0 = val << shift; v1 = val >> (64 - shift); | |
60 | b[0] = (b0 & ~m0) | (v0 & m0); | |
61 | } | |
62 | b[1] = (b1 & ~m1) | (v1 & m1); | |
63 | } | |
64 | ||
65 | void | |
66 | ia64_patch_imm64 (u64 insn_addr, u64 val) | |
67 | { | |
9c184a07 L |
68 | /* The assembler may generate offset pointing to either slot 1 |
69 | or slot 2 for a long (2-slot) instruction, occupying slots 1 | |
70 | and 2. */ | |
71 | insn_addr &= -16UL; | |
72 | ia64_patch(insn_addr + 2, | |
1da177e4 LT |
73 | 0x01fffefe000UL, ( ((val & 0x8000000000000000UL) >> 27) /* bit 63 -> 36 */ |
74 | | ((val & 0x0000000000200000UL) << 0) /* bit 21 -> 21 */ | |
75 | | ((val & 0x00000000001f0000UL) << 6) /* bit 16 -> 22 */ | |
76 | | ((val & 0x000000000000ff80UL) << 20) /* bit 7 -> 27 */ | |
77 | | ((val & 0x000000000000007fUL) << 13) /* bit 0 -> 13 */)); | |
9c184a07 | 78 | ia64_patch(insn_addr + 1, 0x1ffffffffffUL, val >> 22); |
1da177e4 LT |
79 | } |
80 | ||
81 | void | |
82 | ia64_patch_imm60 (u64 insn_addr, u64 val) | |
83 | { | |
9c184a07 L |
84 | /* The assembler may generate offset pointing to either slot 1 |
85 | or slot 2 for a long (2-slot) instruction, occupying slots 1 | |
86 | and 2. */ | |
87 | insn_addr &= -16UL; | |
88 | ia64_patch(insn_addr + 2, | |
1da177e4 LT |
89 | 0x011ffffe000UL, ( ((val & 0x0800000000000000UL) >> 23) /* bit 59 -> 36 */ |
90 | | ((val & 0x00000000000fffffUL) << 13) /* bit 0 -> 13 */)); | |
9c184a07 | 91 | ia64_patch(insn_addr + 1, 0x1fffffffffcUL, val >> 18); |
1da177e4 LT |
92 | } |
93 | ||
94 | /* | |
95 | * We need sometimes to load the physical address of a kernel | |
96 | * object. Often we can convert the virtual address to physical | |
97 | * at execution time, but sometimes (either for performance reasons | |
98 | * or during error recovery) we cannot to this. Patch the marked | |
99 | * bundles to load the physical address. | |
100 | */ | |
101 | void __init | |
102 | ia64_patch_vtop (unsigned long start, unsigned long end) | |
103 | { | |
104 | s32 *offp = (s32 *) start; | |
105 | u64 ip; | |
106 | ||
107 | while (offp < (s32 *) end) { | |
108 | ip = (u64) offp + *offp; | |
109 | ||
110 | /* replace virtual address with corresponding physical address: */ | |
111 | ia64_patch_imm64(ip, ia64_tpa(get_imm64(ip))); | |
112 | ia64_fc((void *) ip); | |
113 | ++offp; | |
114 | } | |
115 | ia64_sync_i(); | |
116 | ia64_srlz_i(); | |
117 | } | |
118 | ||
4dcc29e1 TL |
119 | /* |
120 | * Disable the RSE workaround by turning the conditional branch | |
121 | * that we tagged in each place the workaround was used into an | |
122 | * unconditional branch. | |
123 | */ | |
124 | void __init | |
125 | ia64_patch_rse (unsigned long start, unsigned long end) | |
126 | { | |
127 | s32 *offp = (s32 *) start; | |
128 | u64 ip, *b; | |
129 | ||
130 | while (offp < (s32 *) end) { | |
131 | ip = (u64) offp + *offp; | |
132 | ||
133 | b = (u64 *)(ip & -16); | |
134 | b[1] &= ~0xf800000L; | |
135 | ia64_fc((void *) ip); | |
136 | ++offp; | |
137 | } | |
138 | ia64_sync_i(); | |
139 | ia64_srlz_i(); | |
140 | } | |
141 | ||
914a4ea4 | 142 | void __init |
1da177e4 LT |
143 | ia64_patch_mckinley_e9 (unsigned long start, unsigned long end) |
144 | { | |
145 | static int first_time = 1; | |
146 | int need_workaround; | |
147 | s32 *offp = (s32 *) start; | |
148 | u64 *wp; | |
149 | ||
150 | need_workaround = (local_cpu_data->family == 0x1f && local_cpu_data->model == 0); | |
151 | ||
152 | if (first_time) { | |
153 | first_time = 0; | |
154 | if (need_workaround) | |
155 | printk(KERN_INFO "Leaving McKinley Errata 9 workaround enabled\n"); | |
1da177e4 LT |
156 | } |
157 | if (need_workaround) | |
158 | return; | |
159 | ||
160 | while (offp < (s32 *) end) { | |
161 | wp = (u64 *) ia64_imva((char *) offp + *offp); | |
4fe01c68 HS |
162 | wp[0] = 0x0000000100000011UL; /* nop.m 0; nop.i 0; br.ret.sptk.many b6 */ |
163 | wp[1] = 0x0084006880000200UL; | |
164 | wp[2] = 0x0000000100000000UL; /* nop.m 0; nop.i 0; nop.i 0 */ | |
165 | wp[3] = 0x0004000000000200UL; | |
1da177e4 LT |
166 | ia64_fc(wp); ia64_fc(wp + 2); |
167 | ++offp; | |
168 | } | |
169 | ia64_sync_i(); | |
170 | ia64_srlz_i(); | |
171 | } | |
172 | ||
dd97d5cb IY |
173 | extern unsigned long ia64_native_fsyscall_table[NR_syscalls]; |
174 | extern char ia64_native_fsys_bubble_down[]; | |
175 | struct pv_fsys_data pv_fsys_data __initdata = { | |
176 | .fsyscall_table = (unsigned long *)ia64_native_fsyscall_table, | |
177 | .fsys_bubble_down = (void *)ia64_native_fsys_bubble_down, | |
178 | }; | |
179 | ||
180 | unsigned long * __init | |
181 | paravirt_get_fsyscall_table(void) | |
182 | { | |
183 | return pv_fsys_data.fsyscall_table; | |
184 | } | |
185 | ||
186 | char * __init | |
187 | paravirt_get_fsys_bubble_down(void) | |
188 | { | |
189 | return pv_fsys_data.fsys_bubble_down; | |
190 | } | |
191 | ||
914a4ea4 | 192 | static void __init |
1da177e4 LT |
193 | patch_fsyscall_table (unsigned long start, unsigned long end) |
194 | { | |
dd97d5cb | 195 | u64 fsyscall_table = (u64)paravirt_get_fsyscall_table(); |
1da177e4 LT |
196 | s32 *offp = (s32 *) start; |
197 | u64 ip; | |
198 | ||
199 | while (offp < (s32 *) end) { | |
200 | ip = (u64) ia64_imva((char *) offp + *offp); | |
dd97d5cb | 201 | ia64_patch_imm64(ip, fsyscall_table); |
1da177e4 LT |
202 | ia64_fc((void *) ip); |
203 | ++offp; | |
204 | } | |
205 | ia64_sync_i(); | |
206 | ia64_srlz_i(); | |
207 | } | |
208 | ||
914a4ea4 | 209 | static void __init |
1da177e4 LT |
210 | patch_brl_fsys_bubble_down (unsigned long start, unsigned long end) |
211 | { | |
dd97d5cb | 212 | u64 fsys_bubble_down = (u64)paravirt_get_fsys_bubble_down(); |
1da177e4 LT |
213 | s32 *offp = (s32 *) start; |
214 | u64 ip; | |
215 | ||
216 | while (offp < (s32 *) end) { | |
217 | ip = (u64) offp + *offp; | |
218 | ia64_patch_imm60((u64) ia64_imva((void *) ip), | |
219 | (u64) (fsys_bubble_down - (ip & -16)) / 16); | |
220 | ia64_fc((void *) ip); | |
221 | ++offp; | |
222 | } | |
223 | ia64_sync_i(); | |
224 | ia64_srlz_i(); | |
225 | } | |
226 | ||
914a4ea4 | 227 | void __init |
1da177e4 LT |
228 | ia64_patch_gate (void) |
229 | { | |
e4ff5b8f IY |
230 | # define START(name) paravirt_get_gate_patchlist(PV_GATE_START_##name) |
231 | # define END(name) paravirt_get_gate_patchlist(PV_GATE_END_##name) | |
1da177e4 | 232 | |
e4ff5b8f IY |
233 | patch_fsyscall_table(START(FSYSCALL), END(FSYSCALL)); |
234 | patch_brl_fsys_bubble_down(START(BRL_FSYS_BUBBLE_DOWN), END(BRL_FSYS_BUBBLE_DOWN)); | |
235 | ia64_patch_vtop(START(VTOP), END(VTOP)); | |
236 | ia64_patch_mckinley_e9(START(MCKINLEY_E9), END(MCKINLEY_E9)); | |
1da177e4 | 237 | } |
a0776ec8 CK |
238 | |
239 | void ia64_patch_phys_stack_reg(unsigned long val) | |
240 | { | |
241 | s32 * offp = (s32 *) __start___phys_stack_reg_patchlist; | |
242 | s32 * end = (s32 *) __end___phys_stack_reg_patchlist; | |
243 | u64 ip, mask, imm; | |
244 | ||
245 | /* see instruction format A4: adds r1 = imm13, r3 */ | |
246 | mask = (0x3fUL << 27) | (0x7f << 13); | |
247 | imm = (((val >> 7) & 0x3f) << 27) | (val & 0x7f) << 13; | |
248 | ||
249 | while (offp < end) { | |
250 | ip = (u64) offp + *offp; | |
251 | ia64_patch(ip, mask, imm); | |
252 | ia64_fc(ip); | |
253 | ++offp; | |
254 | } | |
255 | ia64_sync_i(); | |
256 | ia64_srlz_i(); | |
257 | } |