Commit | Line | Data |
---|---|---|
90e9311a JH |
1 | /* |
2 | * This file is subject to the terms and conditions of the GNU General Public | |
3 | * License. See the file "COPYING" in the main directory of this archive | |
4 | * for more details. | |
5 | * | |
6 | * Generation of main entry point for the guest, exception handling. | |
7 | * | |
8 | * Copyright (C) 2012 MIPS Technologies, Inc. | |
9 | * Authors: Sanjay Lal <sanjayl@kymasys.com> | |
10 | * | |
11 | * Copyright (C) 2016 Imagination Technologies Ltd. | |
12 | */ | |
13 | ||
14 | #include <linux/kvm_host.h> | |
c550d539 | 15 | #include <linux/log2.h> |
a77dabc8 | 16 | #include <asm/mipsregs.h> |
7faa6eec | 17 | #include <asm/mmu_context.h> |
90e9311a | 18 | #include <asm/msa.h> |
881c8e05 | 19 | #include <asm/regdef.h> |
90e9311a | 20 | #include <asm/setup.h> |
a7cfa7ac | 21 | #include <asm/tlbex.h> |
90e9311a JH |
22 | #include <asm/uasm.h> |
23 | ||
90e9311a JH |
24 | #define CALLFRAME_SIZ 32 |
25 | ||
a77dabc8 | 26 | static unsigned int scratch_vcpu[2] = { C0_DDATALO }; |
1e5217f5 JH |
27 | static unsigned int scratch_tmp[2] = { C0_ERROREPC }; |
28 | ||
90e9311a JH |
29 | enum label_id { |
30 | label_fpu_1 = 1, | |
31 | label_msa_1, | |
32 | label_return_to_host, | |
33 | label_kernel_asid, | |
1f9ca62c | 34 | label_exit_common, |
90e9311a JH |
35 | }; |
36 | ||
37 | UASM_L_LA(_fpu_1) | |
38 | UASM_L_LA(_msa_1) | |
39 | UASM_L_LA(_return_to_host) | |
40 | UASM_L_LA(_kernel_asid) | |
1f9ca62c | 41 | UASM_L_LA(_exit_common) |
90e9311a JH |
42 | |
43 | static void *kvm_mips_build_enter_guest(void *addr); | |
44 | static void *kvm_mips_build_ret_from_exit(void *addr); | |
45 | static void *kvm_mips_build_ret_to_guest(void *addr); | |
46 | static void *kvm_mips_build_ret_to_host(void *addr); | |
47 | ||
29b500b5 JH |
48 | /* |
49 | * The version of this function in tlbex.c uses current_cpu_type(), but for KVM | |
50 | * we assume symmetry. | |
51 | */ | |
52 | static int c0_kscratch(void) | |
53 | { | |
95b8a5e0 | 54 | return 31; |
29b500b5 JH |
55 | } |
56 | ||
1e5217f5 JH |
57 | /** |
58 | * kvm_mips_entry_setup() - Perform global setup for entry code. | |
59 | * | |
60 | * Perform global setup for entry code, such as choosing a scratch register. | |
61 | * | |
62 | * Returns: 0 on success. | |
63 | * -errno on failure. | |
64 | */ | |
65 | int kvm_mips_entry_setup(void) | |
66 | { | |
67 | /* | |
68 | * We prefer to use KScratchN registers if they are available over the | |
69 | * defaults above, which may not work on all cores. | |
70 | */ | |
29b500b5 | 71 | unsigned int kscratch_mask = cpu_data[0].kscratch_mask; |
1e5217f5 | 72 | |
a7cfa7ac JH |
73 | if (pgd_reg != -1) |
74 | kscratch_mask &= ~BIT(pgd_reg); | |
75 | ||
1e5217f5 JH |
76 | /* Pick a scratch register for storing VCPU */ |
77 | if (kscratch_mask) { | |
29b500b5 | 78 | scratch_vcpu[0] = c0_kscratch(); |
1e5217f5 JH |
79 | scratch_vcpu[1] = ffs(kscratch_mask) - 1; |
80 | kscratch_mask &= ~BIT(scratch_vcpu[1]); | |
81 | } | |
82 | ||
83 | /* Pick a scratch register to use as a temp for saving state */ | |
84 | if (kscratch_mask) { | |
29b500b5 | 85 | scratch_tmp[0] = c0_kscratch(); |
1e5217f5 JH |
86 | scratch_tmp[1] = ffs(kscratch_mask) - 1; |
87 | kscratch_mask &= ~BIT(scratch_tmp[1]); | |
88 | } | |
89 | ||
90 | return 0; | |
91 | } | |
92 | ||
93 | static void kvm_mips_build_save_scratch(u32 **p, unsigned int tmp, | |
94 | unsigned int frame) | |
95 | { | |
96 | /* Save the VCPU scratch register value in cp0_epc of the stack frame */ | |
e41637d8 | 97 | UASM_i_MFC0(p, tmp, scratch_vcpu[0], scratch_vcpu[1]); |
1e5217f5 JH |
98 | UASM_i_SW(p, tmp, offsetof(struct pt_regs, cp0_epc), frame); |
99 | ||
100 | /* Save the temp scratch register value in cp0_cause of stack frame */ | |
29b500b5 | 101 | if (scratch_tmp[0] == c0_kscratch()) { |
e41637d8 | 102 | UASM_i_MFC0(p, tmp, scratch_tmp[0], scratch_tmp[1]); |
1e5217f5 JH |
103 | UASM_i_SW(p, tmp, offsetof(struct pt_regs, cp0_cause), frame); |
104 | } | |
105 | } | |
106 | ||
107 | static void kvm_mips_build_restore_scratch(u32 **p, unsigned int tmp, | |
108 | unsigned int frame) | |
109 | { | |
110 | /* | |
111 | * Restore host scratch register values saved by | |
112 | * kvm_mips_build_save_scratch(). | |
113 | */ | |
114 | UASM_i_LW(p, tmp, offsetof(struct pt_regs, cp0_epc), frame); | |
e41637d8 | 115 | UASM_i_MTC0(p, tmp, scratch_vcpu[0], scratch_vcpu[1]); |
1e5217f5 | 116 | |
29b500b5 | 117 | if (scratch_tmp[0] == c0_kscratch()) { |
1e5217f5 | 118 | UASM_i_LW(p, tmp, offsetof(struct pt_regs, cp0_cause), frame); |
e41637d8 | 119 | UASM_i_MTC0(p, tmp, scratch_tmp[0], scratch_tmp[1]); |
1e5217f5 JH |
120 | } |
121 | } | |
122 | ||
0d17aea5 JH |
123 | /** |
124 | * build_set_exc_base() - Assemble code to write exception base address. | |
125 | * @p: Code buffer pointer. | |
126 | * @reg: Source register (generated code may set WG bit in @reg). | |
127 | * | |
128 | * Assemble code to modify the exception base address in the EBase register, | |
129 | * using the appropriately sized access and setting the WG bit if necessary. | |
130 | */ | |
131 | static inline void build_set_exc_base(u32 **p, unsigned int reg) | |
132 | { | |
133 | if (cpu_has_ebase_wg) { | |
134 | /* Set WG so that all the bits get written */ | |
135 | uasm_i_ori(p, reg, reg, MIPS_EBASE_WG); | |
136 | UASM_i_MTC0(p, reg, C0_EBASE); | |
137 | } else { | |
138 | uasm_i_mtc0(p, reg, C0_EBASE); | |
139 | } | |
140 | } | |
141 | ||
90e9311a JH |
142 | /** |
143 | * kvm_mips_build_vcpu_run() - Assemble function to start running a guest VCPU. | |
144 | * @addr: Address to start writing code. | |
145 | * | |
146 | * Assemble the start of the vcpu_run function to run a guest VCPU. The function | |
147 | * conforms to the following prototype: | |
148 | * | |
0b7aa583 | 149 | * int vcpu_run(struct kvm_vcpu *vcpu); |
90e9311a JH |
150 | * |
151 | * The exit from the guest and return to the caller is handled by the code | |
152 | * generated by kvm_mips_build_ret_to_host(). | |
153 | * | |
154 | * Returns: Next address after end of written function. | |
155 | */ | |
156 | void *kvm_mips_build_vcpu_run(void *addr) | |
157 | { | |
158 | u32 *p = addr; | |
159 | unsigned int i; | |
160 | ||
161 | /* | |
881c8e05 | 162 | * GPR_A0: vcpu |
90e9311a JH |
163 | */ |
164 | ||
165 | /* k0/k1 not being used in host kernel context */ | |
881c8e05 | 166 | UASM_i_ADDIU(&p, GPR_K1, GPR_SP, -(int)sizeof(struct pt_regs)); |
90e9311a JH |
167 | for (i = 16; i < 32; ++i) { |
168 | if (i == 24) | |
169 | i = 28; | |
881c8e05 | 170 | UASM_i_SW(&p, i, offsetof(struct pt_regs, regs[i]), GPR_K1); |
90e9311a JH |
171 | } |
172 | ||
90e9311a | 173 | /* Save host status */ |
881c8e05 JY |
174 | uasm_i_mfc0(&p, GPR_V0, C0_STATUS); |
175 | UASM_i_SW(&p, GPR_V0, offsetof(struct pt_regs, cp0_status), GPR_K1); | |
90e9311a | 176 | |
1e5217f5 | 177 | /* Save scratch registers, will be used to store pointer to vcpu etc */ |
881c8e05 | 178 | kvm_mips_build_save_scratch(&p, GPR_V1, GPR_K1); |
90e9311a | 179 | |
1e5217f5 | 180 | /* VCPU scratch register has pointer to vcpu */ |
881c8e05 | 181 | UASM_i_MTC0(&p, GPR_A0, scratch_vcpu[0], scratch_vcpu[1]); |
90e9311a JH |
182 | |
183 | /* Offset into vcpu->arch */ | |
881c8e05 | 184 | UASM_i_ADDIU(&p, GPR_K1, GPR_A0, offsetof(struct kvm_vcpu, arch)); |
90e9311a JH |
185 | |
186 | /* | |
187 | * Save the host stack to VCPU, used for exception processing | |
188 | * when we exit from the Guest | |
189 | */ | |
881c8e05 | 190 | UASM_i_SW(&p, GPR_SP, offsetof(struct kvm_vcpu_arch, host_stack), GPR_K1); |
90e9311a JH |
191 | |
192 | /* Save the kernel gp as well */ | |
881c8e05 | 193 | UASM_i_SW(&p, GPR_GP, offsetof(struct kvm_vcpu_arch, host_gp), GPR_K1); |
90e9311a JH |
194 | |
195 | /* | |
196 | * Setup status register for running the guest in UM, interrupts | |
197 | * are disabled | |
198 | */ | |
881c8e05 JY |
199 | UASM_i_LA(&p, GPR_K0, ST0_EXL | KSU_USER | ST0_BEV | ST0_KX_IF_64); |
200 | uasm_i_mtc0(&p, GPR_K0, C0_STATUS); | |
90e9311a JH |
201 | uasm_i_ehb(&p); |
202 | ||
203 | /* load up the new EBASE */ | |
881c8e05 JY |
204 | UASM_i_LW(&p, GPR_K0, offsetof(struct kvm_vcpu_arch, guest_ebase), GPR_K1); |
205 | build_set_exc_base(&p, GPR_K0); | |
90e9311a JH |
206 | |
207 | /* | |
208 | * Now that the new EBASE has been loaded, unset BEV, set | |
209 | * interrupt mask as it was but make sure that timer interrupts | |
210 | * are enabled | |
211 | */ | |
881c8e05 JY |
212 | uasm_i_addiu(&p, GPR_K0, GPR_ZERO, ST0_EXL | KSU_USER | ST0_IE | ST0_KX_IF_64); |
213 | uasm_i_andi(&p, GPR_V0, GPR_V0, ST0_IM); | |
214 | uasm_i_or(&p, GPR_K0, GPR_K0, GPR_V0); | |
215 | uasm_i_mtc0(&p, GPR_K0, C0_STATUS); | |
90e9311a JH |
216 | uasm_i_ehb(&p); |
217 | ||
218 | p = kvm_mips_build_enter_guest(p); | |
219 | ||
220 | return p; | |
221 | } | |
222 | ||
223 | /** | |
224 | * kvm_mips_build_enter_guest() - Assemble code to resume guest execution. | |
225 | * @addr: Address to start writing code. | |
226 | * | |
227 | * Assemble the code to resume guest execution. This code is common between the | |
228 | * initial entry into the guest from the host, and returning from the exit | |
229 | * handler back to the guest. | |
230 | * | |
231 | * Returns: Next address after end of written function. | |
232 | */ | |
233 | static void *kvm_mips_build_enter_guest(void *addr) | |
234 | { | |
235 | u32 *p = addr; | |
236 | unsigned int i; | |
237 | struct uasm_label labels[2]; | |
238 | struct uasm_reloc relocs[2]; | |
1934a3ad JH |
239 | struct uasm_label __maybe_unused *l = labels; |
240 | struct uasm_reloc __maybe_unused *r = relocs; | |
90e9311a JH |
241 | |
242 | memset(labels, 0, sizeof(labels)); | |
243 | memset(relocs, 0, sizeof(relocs)); | |
244 | ||
245 | /* Set Guest EPC */ | |
881c8e05 JY |
246 | UASM_i_LW(&p, GPR_T0, offsetof(struct kvm_vcpu_arch, pc), GPR_K1); |
247 | UASM_i_MTC0(&p, GPR_T0, C0_EPC); | |
90e9311a | 248 | |
1934a3ad | 249 | /* Save normal linux process pgd (VZ guarantees pgd_reg is set) */ |
8bf31295 | 250 | if (cpu_has_ldpte) |
881c8e05 | 251 | UASM_i_MFC0(&p, GPR_K0, C0_PWBASE); |
8bf31295 | 252 | else |
881c8e05 JY |
253 | UASM_i_MFC0(&p, GPR_K0, c0_kscratch(), pgd_reg); |
254 | UASM_i_SW(&p, GPR_K0, offsetof(struct kvm_vcpu_arch, host_pgd), GPR_K1); | |
1934a3ad JH |
255 | |
256 | /* | |
257 | * Set up KVM GPA pgd. | |
258 | * This does roughly the same as TLBMISS_HANDLER_SETUP_PGD(): | |
259 | * - call tlbmiss_handler_setup_pgd(mm->pgd) | |
260 | * - write mm->pgd into CP0_PWBase | |
261 | * | |
881c8e05 | 262 | * We keep GPR_S0 pointing at struct kvm so we can load the ASID below. |
1934a3ad | 263 | */ |
881c8e05 JY |
264 | UASM_i_LW(&p, GPR_S0, (int)offsetof(struct kvm_vcpu, kvm) - |
265 | (int)offsetof(struct kvm_vcpu, arch), GPR_K1); | |
266 | UASM_i_LW(&p, GPR_A0, offsetof(struct kvm, arch.gpa_mm.pgd), GPR_S0); | |
267 | UASM_i_LA(&p, GPR_T9, (unsigned long)tlbmiss_handler_setup_pgd); | |
268 | uasm_i_jalr(&p, GPR_RA, GPR_T9); | |
1934a3ad JH |
269 | /* delay slot */ |
270 | if (cpu_has_htw) | |
881c8e05 | 271 | UASM_i_MTC0(&p, GPR_A0, C0_PWBASE); |
1934a3ad JH |
272 | else |
273 | uasm_i_nop(&p); | |
274 | ||
275 | /* Set GM bit to setup eret to VZ guest context */ | |
881c8e05 JY |
276 | uasm_i_addiu(&p, GPR_V1, GPR_ZERO, 1); |
277 | uasm_i_mfc0(&p, GPR_K0, C0_GUESTCTL0); | |
278 | uasm_i_ins(&p, GPR_K0, GPR_V1, MIPS_GCTL0_GM_SHIFT, 1); | |
279 | uasm_i_mtc0(&p, GPR_K0, C0_GUESTCTL0); | |
1934a3ad JH |
280 | |
281 | if (cpu_has_guestid) { | |
282 | /* | |
283 | * Set root mode GuestID, so that root TLB refill handler can | |
284 | * use the correct GuestID in the root TLB. | |
285 | */ | |
286 | ||
287 | /* Get current GuestID */ | |
881c8e05 | 288 | uasm_i_mfc0(&p, GPR_T0, C0_GUESTCTL1); |
1934a3ad | 289 | /* Set GuestCtl1.RID = GuestCtl1.ID */ |
881c8e05 | 290 | uasm_i_ext(&p, GPR_T1, GPR_T0, MIPS_GCTL1_ID_SHIFT, |
1934a3ad | 291 | MIPS_GCTL1_ID_WIDTH); |
881c8e05 | 292 | uasm_i_ins(&p, GPR_T0, GPR_T1, MIPS_GCTL1_RID_SHIFT, |
1934a3ad | 293 | MIPS_GCTL1_RID_WIDTH); |
881c8e05 | 294 | uasm_i_mtc0(&p, GPR_T0, C0_GUESTCTL1); |
1934a3ad JH |
295 | |
296 | /* GuestID handles dealiasing so we don't need to touch ASID */ | |
297 | goto skip_asid_restore; | |
298 | } | |
299 | ||
300 | /* Root ASID Dealias (RAD) */ | |
301 | ||
302 | /* Save host ASID */ | |
881c8e05 JY |
303 | UASM_i_MFC0(&p, GPR_K0, C0_ENTRYHI); |
304 | UASM_i_SW(&p, GPR_K0, offsetof(struct kvm_vcpu_arch, host_entryhi), | |
305 | GPR_K1); | |
1934a3ad JH |
306 | |
307 | /* Set the root ASID for the Guest */ | |
881c8e05 | 308 | UASM_i_ADDIU(&p, GPR_T1, GPR_S0, |
1934a3ad | 309 | offsetof(struct kvm, arch.gpa_mm.context.asid)); |
90e9311a JH |
310 | |
311 | /* t1: contains the base of the ASID array, need to get the cpu id */ | |
312 | /* smp_processor_id */ | |
881c8e05 | 313 | uasm_i_lw(&p, GPR_T2, offsetof(struct thread_info, cpu), GPR_GP); |
c550d539 | 314 | /* index the ASID array */ |
881c8e05 JY |
315 | uasm_i_sll(&p, GPR_T2, GPR_T2, ilog2(sizeof(long))); |
316 | UASM_i_ADDU(&p, GPR_T3, GPR_T1, GPR_T2); | |
317 | UASM_i_LW(&p, GPR_K0, 0, GPR_T3); | |
90e9311a | 318 | #ifdef CONFIG_MIPS_ASID_BITS_VARIABLE |
c550d539 JH |
319 | /* |
320 | * reuse ASID array offset | |
321 | * cpuinfo_mips is a multiple of sizeof(long) | |
322 | */ | |
881c8e05 JY |
323 | uasm_i_addiu(&p, GPR_T3, GPR_ZERO, sizeof(struct cpuinfo_mips)/sizeof(long)); |
324 | uasm_i_mul(&p, GPR_T2, GPR_T2, GPR_T3); | |
90e9311a | 325 | |
881c8e05 JY |
326 | UASM_i_LA_mostly(&p, GPR_AT, (long)&cpu_data[0].asid_mask); |
327 | UASM_i_ADDU(&p, GPR_AT, GPR_AT, GPR_T2); | |
328 | UASM_i_LW(&p, GPR_T2, uasm_rel_lo((long)&cpu_data[0].asid_mask), GPR_AT); | |
329 | uasm_i_and(&p, GPR_K0, GPR_K0, GPR_T2); | |
90e9311a | 330 | #else |
881c8e05 | 331 | uasm_i_andi(&p, GPR_K0, GPR_K0, MIPS_ENTRYHI_ASID); |
90e9311a | 332 | #endif |
7faa6eec | 333 | |
1934a3ad | 334 | /* Set up KVM VZ root ASID (!guestid) */ |
881c8e05 | 335 | uasm_i_mtc0(&p, GPR_K0, C0_ENTRYHI); |
1934a3ad | 336 | skip_asid_restore: |
90e9311a JH |
337 | uasm_i_ehb(&p); |
338 | ||
339 | /* Disable RDHWR access */ | |
881c8e05 | 340 | uasm_i_mtc0(&p, GPR_ZERO, C0_HWRENA); |
90e9311a JH |
341 | |
342 | /* load the guest context from VCPU and return */ | |
343 | for (i = 1; i < 32; ++i) { | |
344 | /* Guest k0/k1 loaded later */ | |
881c8e05 | 345 | if (i == GPR_K0 || i == GPR_K1) |
90e9311a | 346 | continue; |
881c8e05 | 347 | UASM_i_LW(&p, i, offsetof(struct kvm_vcpu_arch, gprs[i]), GPR_K1); |
90e9311a JH |
348 | } |
349 | ||
70e92c7e | 350 | #ifndef CONFIG_CPU_MIPSR6 |
90e9311a | 351 | /* Restore hi/lo */ |
881c8e05 JY |
352 | UASM_i_LW(&p, GPR_K0, offsetof(struct kvm_vcpu_arch, hi), GPR_K1); |
353 | uasm_i_mthi(&p, GPR_K0); | |
90e9311a | 354 | |
881c8e05 JY |
355 | UASM_i_LW(&p, GPR_K0, offsetof(struct kvm_vcpu_arch, lo), GPR_K1); |
356 | uasm_i_mtlo(&p, GPR_K0); | |
70e92c7e | 357 | #endif |
90e9311a JH |
358 | |
359 | /* Restore the guest's k0/k1 registers */ | |
881c8e05 JY |
360 | UASM_i_LW(&p, GPR_K0, offsetof(struct kvm_vcpu_arch, gprs[GPR_K0]), GPR_K1); |
361 | UASM_i_LW(&p, GPR_K1, offsetof(struct kvm_vcpu_arch, gprs[GPR_K1]), GPR_K1); | |
90e9311a JH |
362 | |
363 | /* Jump to guest */ | |
364 | uasm_i_eret(&p); | |
365 | ||
366 | uasm_resolve_relocs(relocs, labels); | |
367 | ||
368 | return p; | |
369 | } | |
370 | ||
a7cfa7ac JH |
371 | /** |
372 | * kvm_mips_build_tlb_refill_exception() - Assemble TLB refill handler. | |
373 | * @addr: Address to start writing code. | |
374 | * @handler: Address of common handler (within range of @addr). | |
375 | * | |
376 | * Assemble TLB refill exception fast path handler for guest execution. | |
377 | * | |
378 | * Returns: Next address after end of written function. | |
379 | */ | |
380 | void *kvm_mips_build_tlb_refill_exception(void *addr, void *handler) | |
381 | { | |
382 | u32 *p = addr; | |
383 | struct uasm_label labels[2]; | |
384 | struct uasm_reloc relocs[2]; | |
8bf31295 | 385 | #ifndef CONFIG_CPU_LOONGSON64 |
a7cfa7ac JH |
386 | struct uasm_label *l = labels; |
387 | struct uasm_reloc *r = relocs; | |
8bf31295 | 388 | #endif |
a7cfa7ac JH |
389 | |
390 | memset(labels, 0, sizeof(labels)); | |
391 | memset(relocs, 0, sizeof(relocs)); | |
392 | ||
393 | /* Save guest k1 into scratch register */ | |
881c8e05 | 394 | UASM_i_MTC0(&p, GPR_K1, scratch_tmp[0], scratch_tmp[1]); |
a7cfa7ac JH |
395 | |
396 | /* Get the VCPU pointer from the VCPU scratch register */ | |
881c8e05 | 397 | UASM_i_MFC0(&p, GPR_K1, scratch_vcpu[0], scratch_vcpu[1]); |
a7cfa7ac JH |
398 | |
399 | /* Save guest k0 into VCPU structure */ | |
881c8e05 | 400 | UASM_i_SW(&p, GPR_K0, offsetof(struct kvm_vcpu, arch.gprs[GPR_K0]), GPR_K1); |
a7cfa7ac JH |
401 | |
402 | /* | |
403 | * Some of the common tlbex code uses current_cpu_type(). For KVM we | |
404 | * assume symmetry and just disable preemption to silence the warning. | |
405 | */ | |
406 | preempt_disable(); | |
407 | ||
8bf31295 | 408 | #ifdef CONFIG_CPU_LOONGSON64 |
881c8e05 JY |
409 | UASM_i_MFC0(&p, GPR_K1, C0_PGD); |
410 | uasm_i_lddir(&p, GPR_K0, GPR_K1, 3); /* global page dir */ | |
8bf31295 | 411 | #ifndef __PAGETABLE_PMD_FOLDED |
881c8e05 | 412 | uasm_i_lddir(&p, GPR_K1, GPR_K0, 1); /* middle page dir */ |
8bf31295 | 413 | #endif |
881c8e05 JY |
414 | uasm_i_ldpte(&p, GPR_K1, 0); /* even */ |
415 | uasm_i_ldpte(&p, GPR_K1, 1); /* odd */ | |
8bf31295 HC |
416 | uasm_i_tlbwr(&p); |
417 | #else | |
a7cfa7ac JH |
418 | /* |
419 | * Now for the actual refill bit. A lot of this can be common with the | |
420 | * Linux TLB refill handler, however we don't need to handle so many | |
421 | * cases. We only need to handle user mode refills, and user mode runs | |
422 | * with 32-bit addressing. | |
423 | * | |
424 | * Therefore the branch to label_vmalloc generated by build_get_pmde64() | |
425 | * that isn't resolved should never actually get taken and is harmless | |
426 | * to leave in place for now. | |
427 | */ | |
428 | ||
429 | #ifdef CONFIG_64BIT | |
881c8e05 | 430 | build_get_pmde64(&p, &l, &r, GPR_K0, GPR_K1); /* get pmd in GPR_K1 */ |
a7cfa7ac | 431 | #else |
881c8e05 | 432 | build_get_pgde32(&p, GPR_K0, GPR_K1); /* get pgd in GPR_K1 */ |
a7cfa7ac JH |
433 | #endif |
434 | ||
435 | /* we don't support huge pages yet */ | |
436 | ||
881c8e05 JY |
437 | build_get_ptep(&p, GPR_K0, GPR_K1); |
438 | build_update_entries(&p, GPR_K0, GPR_K1); | |
a7cfa7ac | 439 | build_tlb_write_entry(&p, &l, &r, tlb_random); |
8bf31295 | 440 | #endif |
a7cfa7ac JH |
441 | |
442 | preempt_enable(); | |
443 | ||
444 | /* Get the VCPU pointer from the VCPU scratch register again */ | |
881c8e05 | 445 | UASM_i_MFC0(&p, GPR_K1, scratch_vcpu[0], scratch_vcpu[1]); |
a7cfa7ac JH |
446 | |
447 | /* Restore the guest's k0/k1 registers */ | |
881c8e05 | 448 | UASM_i_LW(&p, GPR_K0, offsetof(struct kvm_vcpu, arch.gprs[GPR_K0]), GPR_K1); |
a7cfa7ac | 449 | uasm_i_ehb(&p); |
881c8e05 | 450 | UASM_i_MFC0(&p, GPR_K1, scratch_tmp[0], scratch_tmp[1]); |
a7cfa7ac JH |
451 | |
452 | /* Jump to guest */ | |
453 | uasm_i_eret(&p); | |
454 | ||
455 | return p; | |
456 | } | |
457 | ||
90e9311a JH |
458 | /** |
459 | * kvm_mips_build_exception() - Assemble first level guest exception handler. | |
460 | * @addr: Address to start writing code. | |
1f9ca62c | 461 | * @handler: Address of common handler (within range of @addr). |
90e9311a JH |
462 | * |
463 | * Assemble exception vector code for guest execution. The generated vector will | |
1f9ca62c | 464 | * branch to the common exception handler generated by kvm_mips_build_exit(). |
90e9311a JH |
465 | * |
466 | * Returns: Next address after end of written function. | |
467 | */ | |
1f9ca62c | 468 | void *kvm_mips_build_exception(void *addr, void *handler) |
90e9311a JH |
469 | { |
470 | u32 *p = addr; | |
1f9ca62c JH |
471 | struct uasm_label labels[2]; |
472 | struct uasm_reloc relocs[2]; | |
473 | struct uasm_label *l = labels; | |
474 | struct uasm_reloc *r = relocs; | |
475 | ||
476 | memset(labels, 0, sizeof(labels)); | |
477 | memset(relocs, 0, sizeof(relocs)); | |
90e9311a | 478 | |
eadfb501 | 479 | /* Save guest k1 into scratch register */ |
881c8e05 | 480 | UASM_i_MTC0(&p, GPR_K1, scratch_tmp[0], scratch_tmp[1]); |
90e9311a | 481 | |
eadfb501 | 482 | /* Get the VCPU pointer from the VCPU scratch register */ |
881c8e05 JY |
483 | UASM_i_MFC0(&p, GPR_K1, scratch_vcpu[0], scratch_vcpu[1]); |
484 | UASM_i_ADDIU(&p, GPR_K1, GPR_K1, offsetof(struct kvm_vcpu, arch)); | |
eadfb501 JH |
485 | |
486 | /* Save guest k0 into VCPU structure */ | |
881c8e05 | 487 | UASM_i_SW(&p, GPR_K0, offsetof(struct kvm_vcpu_arch, gprs[GPR_K0]), GPR_K1); |
90e9311a | 488 | |
1f9ca62c JH |
489 | /* Branch to the common handler */ |
490 | uasm_il_b(&p, &r, label_exit_common); | |
90e9311a JH |
491 | uasm_i_nop(&p); |
492 | ||
1f9ca62c JH |
493 | uasm_l_exit_common(&l, handler); |
494 | uasm_resolve_relocs(relocs, labels); | |
495 | ||
90e9311a JH |
496 | return p; |
497 | } | |
498 | ||
499 | /** | |
500 | * kvm_mips_build_exit() - Assemble common guest exit handler. | |
501 | * @addr: Address to start writing code. | |
502 | * | |
503 | * Assemble the generic guest exit handling code. This is called by the | |
504 | * exception vectors (generated by kvm_mips_build_exception()), and calls | |
505 | * kvm_mips_handle_exit(), then either resumes the guest or returns to the host | |
506 | * depending on the return value. | |
507 | * | |
508 | * Returns: Next address after end of written function. | |
509 | */ | |
510 | void *kvm_mips_build_exit(void *addr) | |
511 | { | |
512 | u32 *p = addr; | |
513 | unsigned int i; | |
514 | struct uasm_label labels[3]; | |
515 | struct uasm_reloc relocs[3]; | |
516 | struct uasm_label *l = labels; | |
517 | struct uasm_reloc *r = relocs; | |
518 | ||
519 | memset(labels, 0, sizeof(labels)); | |
520 | memset(relocs, 0, sizeof(relocs)); | |
521 | ||
522 | /* | |
523 | * Generic Guest exception handler. We end up here when the guest | |
524 | * does something that causes a trap to kernel mode. | |
eadfb501 JH |
525 | * |
526 | * Both k0/k1 registers will have already been saved (k0 into the vcpu | |
527 | * structure, and k1 into the scratch_tmp register). | |
528 | * | |
529 | * The k1 register will already contain the kvm_vcpu_arch pointer. | |
90e9311a JH |
530 | */ |
531 | ||
90e9311a JH |
532 | /* Start saving Guest context to VCPU */ |
533 | for (i = 0; i < 32; ++i) { | |
534 | /* Guest k0/k1 saved later */ | |
881c8e05 | 535 | if (i == GPR_K0 || i == GPR_K1) |
90e9311a | 536 | continue; |
881c8e05 | 537 | UASM_i_SW(&p, i, offsetof(struct kvm_vcpu_arch, gprs[i]), GPR_K1); |
90e9311a JH |
538 | } |
539 | ||
70e92c7e | 540 | #ifndef CONFIG_CPU_MIPSR6 |
90e9311a | 541 | /* We need to save hi/lo and restore them on the way out */ |
881c8e05 JY |
542 | uasm_i_mfhi(&p, GPR_T0); |
543 | UASM_i_SW(&p, GPR_T0, offsetof(struct kvm_vcpu_arch, hi), GPR_K1); | |
90e9311a | 544 | |
881c8e05 JY |
545 | uasm_i_mflo(&p, GPR_T0); |
546 | UASM_i_SW(&p, GPR_T0, offsetof(struct kvm_vcpu_arch, lo), GPR_K1); | |
70e92c7e | 547 | #endif |
90e9311a | 548 | |
eadfb501 JH |
549 | /* Finally save guest k1 to VCPU */ |
550 | uasm_i_ehb(&p); | |
881c8e05 JY |
551 | UASM_i_MFC0(&p, GPR_T0, scratch_tmp[0], scratch_tmp[1]); |
552 | UASM_i_SW(&p, GPR_T0, offsetof(struct kvm_vcpu_arch, gprs[GPR_K1]), GPR_K1); | |
90e9311a JH |
553 | |
554 | /* Now that context has been saved, we can use other registers */ | |
555 | ||
556 | /* Restore vcpu */ | |
881c8e05 | 557 | UASM_i_MFC0(&p, GPR_S0, scratch_vcpu[0], scratch_vcpu[1]); |
90e9311a JH |
558 | |
559 | /* | |
560 | * Save Host level EPC, BadVaddr and Cause to VCPU, useful to process | |
561 | * the exception | |
562 | */ | |
881c8e05 JY |
563 | UASM_i_MFC0(&p, GPR_K0, C0_EPC); |
564 | UASM_i_SW(&p, GPR_K0, offsetof(struct kvm_vcpu_arch, pc), GPR_K1); | |
90e9311a | 565 | |
881c8e05 JY |
566 | UASM_i_MFC0(&p, GPR_K0, C0_BADVADDR); |
567 | UASM_i_SW(&p, GPR_K0, offsetof(struct kvm_vcpu_arch, host_cp0_badvaddr), | |
568 | GPR_K1); | |
90e9311a | 569 | |
881c8e05 JY |
570 | uasm_i_mfc0(&p, GPR_K0, C0_CAUSE); |
571 | uasm_i_sw(&p, GPR_K0, offsetof(struct kvm_vcpu_arch, host_cp0_cause), GPR_K1); | |
90e9311a | 572 | |
6a97c775 | 573 | if (cpu_has_badinstr) { |
881c8e05 JY |
574 | uasm_i_mfc0(&p, GPR_K0, C0_BADINSTR); |
575 | uasm_i_sw(&p, GPR_K0, offsetof(struct kvm_vcpu_arch, | |
576 | host_cp0_badinstr), GPR_K1); | |
6a97c775 JH |
577 | } |
578 | ||
579 | if (cpu_has_badinstrp) { | |
881c8e05 JY |
580 | uasm_i_mfc0(&p, GPR_K0, C0_BADINSTRP); |
581 | uasm_i_sw(&p, GPR_K0, offsetof(struct kvm_vcpu_arch, | |
582 | host_cp0_badinstrp), GPR_K1); | |
6a97c775 JH |
583 | } |
584 | ||
90e9311a JH |
585 | /* Now restore the host state just enough to run the handlers */ |
586 | ||
587 | /* Switch EBASE to the one used by Linux */ | |
588 | /* load up the host EBASE */ | |
881c8e05 | 589 | uasm_i_mfc0(&p, GPR_V0, C0_STATUS); |
90e9311a | 590 | |
881c8e05 JY |
591 | uasm_i_lui(&p, GPR_AT, ST0_BEV >> 16); |
592 | uasm_i_or(&p, GPR_K0, GPR_V0, GPR_AT); | |
90e9311a | 593 | |
881c8e05 | 594 | uasm_i_mtc0(&p, GPR_K0, C0_STATUS); |
90e9311a JH |
595 | uasm_i_ehb(&p); |
596 | ||
881c8e05 JY |
597 | UASM_i_LA_mostly(&p, GPR_K0, (long)&ebase); |
598 | UASM_i_LW(&p, GPR_K0, uasm_rel_lo((long)&ebase), GPR_K0); | |
599 | build_set_exc_base(&p, GPR_K0); | |
90e9311a | 600 | |
d37f4038 JH |
601 | if (raw_cpu_has_fpu) { |
602 | /* | |
603 | * If FPU is enabled, save FCR31 and clear it so that later | |
604 | * ctc1's don't trigger FPE for pending exceptions. | |
605 | */ | |
881c8e05 JY |
606 | uasm_i_lui(&p, GPR_AT, ST0_CU1 >> 16); |
607 | uasm_i_and(&p, GPR_V1, GPR_V0, GPR_AT); | |
608 | uasm_il_beqz(&p, &r, GPR_V1, label_fpu_1); | |
d37f4038 | 609 | uasm_i_nop(&p); |
881c8e05 JY |
610 | uasm_i_cfc1(&p, GPR_T0, 31); |
611 | uasm_i_sw(&p, GPR_T0, offsetof(struct kvm_vcpu_arch, fpu.fcr31), | |
612 | GPR_K1); | |
613 | uasm_i_ctc1(&p, GPR_ZERO, 31); | |
d37f4038 JH |
614 | uasm_l_fpu_1(&l, p); |
615 | } | |
90e9311a | 616 | |
38ea7a71 JH |
617 | if (cpu_has_msa) { |
618 | /* | |
619 | * If MSA is enabled, save MSACSR and clear it so that later | |
620 | * instructions don't trigger MSAFPE for pending exceptions. | |
621 | */ | |
881c8e05 JY |
622 | uasm_i_mfc0(&p, GPR_T0, C0_CONFIG5); |
623 | uasm_i_ext(&p, GPR_T0, GPR_T0, 27, 1); /* MIPS_CONF5_MSAEN */ | |
624 | uasm_il_beqz(&p, &r, GPR_T0, label_msa_1); | |
38ea7a71 | 625 | uasm_i_nop(&p); |
881c8e05 JY |
626 | uasm_i_cfcmsa(&p, GPR_T0, MSA_CSR); |
627 | uasm_i_sw(&p, GPR_T0, offsetof(struct kvm_vcpu_arch, fpu.msacsr), | |
628 | GPR_K1); | |
629 | uasm_i_ctcmsa(&p, MSA_CSR, GPR_ZERO); | |
38ea7a71 JH |
630 | uasm_l_msa_1(&l, p); |
631 | } | |
90e9311a | 632 | |
1934a3ad JH |
633 | /* Restore host ASID */ |
634 | if (!cpu_has_guestid) { | |
881c8e05 JY |
635 | UASM_i_LW(&p, GPR_K0, offsetof(struct kvm_vcpu_arch, host_entryhi), |
636 | GPR_K1); | |
637 | UASM_i_MTC0(&p, GPR_K0, C0_ENTRYHI); | |
1934a3ad JH |
638 | } |
639 | ||
640 | /* | |
641 | * Set up normal Linux process pgd. | |
642 | * This does roughly the same as TLBMISS_HANDLER_SETUP_PGD(): | |
643 | * - call tlbmiss_handler_setup_pgd(mm->pgd) | |
644 | * - write mm->pgd into CP0_PWBase | |
645 | */ | |
881c8e05 JY |
646 | UASM_i_LW(&p, GPR_A0, |
647 | offsetof(struct kvm_vcpu_arch, host_pgd), GPR_K1); | |
648 | UASM_i_LA(&p, GPR_T9, (unsigned long)tlbmiss_handler_setup_pgd); | |
649 | uasm_i_jalr(&p, GPR_RA, GPR_T9); | |
1934a3ad JH |
650 | /* delay slot */ |
651 | if (cpu_has_htw) | |
881c8e05 | 652 | UASM_i_MTC0(&p, GPR_A0, C0_PWBASE); |
1934a3ad JH |
653 | else |
654 | uasm_i_nop(&p); | |
655 | ||
656 | /* Clear GM bit so we don't enter guest mode when EXL is cleared */ | |
881c8e05 JY |
657 | uasm_i_mfc0(&p, GPR_K0, C0_GUESTCTL0); |
658 | uasm_i_ins(&p, GPR_K0, GPR_ZERO, MIPS_GCTL0_GM_SHIFT, 1); | |
659 | uasm_i_mtc0(&p, GPR_K0, C0_GUESTCTL0); | |
1934a3ad JH |
660 | |
661 | /* Save GuestCtl0 so we can access GExcCode after CPU migration */ | |
881c8e05 JY |
662 | uasm_i_sw(&p, GPR_K0, |
663 | offsetof(struct kvm_vcpu_arch, host_cp0_guestctl0), GPR_K1); | |
1934a3ad JH |
664 | |
665 | if (cpu_has_guestid) { | |
666 | /* | |
667 | * Clear root mode GuestID, so that root TLB operations use the | |
668 | * root GuestID in the root TLB. | |
669 | */ | |
881c8e05 | 670 | uasm_i_mfc0(&p, GPR_T0, C0_GUESTCTL1); |
1934a3ad | 671 | /* Set GuestCtl1.RID = MIPS_GCTL1_ROOT_GUESTID (i.e. 0) */ |
881c8e05 | 672 | uasm_i_ins(&p, GPR_T0, GPR_ZERO, MIPS_GCTL1_RID_SHIFT, |
1934a3ad | 673 | MIPS_GCTL1_RID_WIDTH); |
881c8e05 | 674 | uasm_i_mtc0(&p, GPR_T0, C0_GUESTCTL1); |
1934a3ad | 675 | } |
1934a3ad | 676 | |
90e9311a | 677 | /* Now that the new EBASE has been loaded, unset BEV and KSU_USER */ |
881c8e05 JY |
678 | uasm_i_addiu(&p, GPR_AT, GPR_ZERO, ~(ST0_EXL | KSU_USER | ST0_IE)); |
679 | uasm_i_and(&p, GPR_V0, GPR_V0, GPR_AT); | |
680 | uasm_i_lui(&p, GPR_AT, ST0_CU0 >> 16); | |
681 | uasm_i_or(&p, GPR_V0, GPR_V0, GPR_AT); | |
4c881451 | 682 | #ifdef CONFIG_64BIT |
881c8e05 | 683 | uasm_i_ori(&p, GPR_V0, GPR_V0, ST0_SX | ST0_UX); |
4c881451 | 684 | #endif |
881c8e05 | 685 | uasm_i_mtc0(&p, GPR_V0, C0_STATUS); |
90e9311a JH |
686 | uasm_i_ehb(&p); |
687 | ||
881c8e05 JY |
688 | /* Load up host GPR_GP */ |
689 | UASM_i_LW(&p, GPR_GP, offsetof(struct kvm_vcpu_arch, host_gp), GPR_K1); | |
90e9311a JH |
690 | |
691 | /* Need a stack before we can jump to "C" */ | |
881c8e05 | 692 | UASM_i_LW(&p, GPR_SP, offsetof(struct kvm_vcpu_arch, host_stack), GPR_K1); |
90e9311a JH |
693 | |
694 | /* Saved host state */ | |
881c8e05 | 695 | UASM_i_ADDIU(&p, GPR_SP, GPR_SP, -(int)sizeof(struct pt_regs)); |
90e9311a JH |
696 | |
697 | /* | |
698 | * XXXKYMA do we need to load the host ASID, maybe not because the | |
699 | * kernel entries are marked GLOBAL, need to verify | |
700 | */ | |
701 | ||
1e5217f5 | 702 | /* Restore host scratch registers, as we'll have clobbered them */ |
881c8e05 | 703 | kvm_mips_build_restore_scratch(&p, GPR_K0, GPR_SP); |
90e9311a JH |
704 | |
705 | /* Restore RDHWR access */ | |
881c8e05 JY |
706 | UASM_i_LA_mostly(&p, GPR_K0, (long)&hwrena); |
707 | uasm_i_lw(&p, GPR_K0, uasm_rel_lo((long)&hwrena), GPR_K0); | |
708 | uasm_i_mtc0(&p, GPR_K0, C0_HWRENA); | |
90e9311a JH |
709 | |
710 | /* Jump to handler */ | |
711 | /* | |
712 | * XXXKYMA: not sure if this is safe, how large is the stack?? | |
713 | * Now jump to the kvm_mips_handle_exit() to see if we can deal | |
714 | * with this in the kernel | |
715 | */ | |
881c8e05 JY |
716 | uasm_i_move(&p, GPR_A0, GPR_S0); |
717 | UASM_i_LA(&p, GPR_T9, (unsigned long)kvm_mips_handle_exit); | |
718 | uasm_i_jalr(&p, GPR_RA, GPR_T9); | |
719 | UASM_i_ADDIU(&p, GPR_SP, GPR_SP, -CALLFRAME_SIZ); | |
90e9311a JH |
720 | |
721 | uasm_resolve_relocs(relocs, labels); | |
722 | ||
723 | p = kvm_mips_build_ret_from_exit(p); | |
724 | ||
725 | return p; | |
726 | } | |
727 | ||
728 | /** | |
729 | * kvm_mips_build_ret_from_exit() - Assemble guest exit return handler. | |
730 | * @addr: Address to start writing code. | |
731 | * | |
732 | * Assemble the code to handle the return from kvm_mips_handle_exit(), either | |
733 | * resuming the guest or returning to the host depending on the return value. | |
734 | * | |
735 | * Returns: Next address after end of written function. | |
736 | */ | |
737 | static void *kvm_mips_build_ret_from_exit(void *addr) | |
738 | { | |
739 | u32 *p = addr; | |
740 | struct uasm_label labels[2]; | |
741 | struct uasm_reloc relocs[2]; | |
742 | struct uasm_label *l = labels; | |
743 | struct uasm_reloc *r = relocs; | |
744 | ||
745 | memset(labels, 0, sizeof(labels)); | |
746 | memset(relocs, 0, sizeof(relocs)); | |
747 | ||
748 | /* Return from handler Make sure interrupts are disabled */ | |
881c8e05 | 749 | uasm_i_di(&p, GPR_ZERO); |
90e9311a JH |
750 | uasm_i_ehb(&p); |
751 | ||
752 | /* | |
753 | * XXXKYMA: k0/k1 could have been blown away if we processed | |
754 | * an exception while we were handling the exception from the | |
755 | * guest, reload k1 | |
756 | */ | |
757 | ||
881c8e05 JY |
758 | uasm_i_move(&p, GPR_K1, GPR_S0); |
759 | UASM_i_ADDIU(&p, GPR_K1, GPR_K1, offsetof(struct kvm_vcpu, arch)); | |
90e9311a JH |
760 | |
761 | /* | |
762 | * Check return value, should tell us if we are returning to the | |
763 | * host (handle I/O etc)or resuming the guest | |
764 | */ | |
881c8e05 JY |
765 | uasm_i_andi(&p, GPR_T0, GPR_V0, RESUME_HOST); |
766 | uasm_il_bnez(&p, &r, GPR_T0, label_return_to_host); | |
90e9311a JH |
767 | uasm_i_nop(&p); |
768 | ||
769 | p = kvm_mips_build_ret_to_guest(p); | |
770 | ||
771 | uasm_l_return_to_host(&l, p); | |
772 | p = kvm_mips_build_ret_to_host(p); | |
773 | ||
774 | uasm_resolve_relocs(relocs, labels); | |
775 | ||
776 | return p; | |
777 | } | |
778 | ||
779 | /** | |
780 | * kvm_mips_build_ret_to_guest() - Assemble code to return to the guest. | |
781 | * @addr: Address to start writing code. | |
782 | * | |
783 | * Assemble the code to handle return from the guest exit handler | |
784 | * (kvm_mips_handle_exit()) back to the guest. | |
785 | * | |
786 | * Returns: Next address after end of written function. | |
787 | */ | |
788 | static void *kvm_mips_build_ret_to_guest(void *addr) | |
789 | { | |
790 | u32 *p = addr; | |
791 | ||
0b7aa583 | 792 | /* Put the saved pointer to vcpu (s0) back into the scratch register */ |
881c8e05 | 793 | UASM_i_MTC0(&p, GPR_S0, scratch_vcpu[0], scratch_vcpu[1]); |
90e9311a JH |
794 | |
795 | /* Load up the Guest EBASE to minimize the window where BEV is set */ | |
881c8e05 | 796 | UASM_i_LW(&p, GPR_T0, offsetof(struct kvm_vcpu_arch, guest_ebase), GPR_K1); |
90e9311a JH |
797 | |
798 | /* Switch EBASE back to the one used by KVM */ | |
881c8e05 JY |
799 | uasm_i_mfc0(&p, GPR_V1, C0_STATUS); |
800 | uasm_i_lui(&p, GPR_AT, ST0_BEV >> 16); | |
801 | uasm_i_or(&p, GPR_K0, GPR_V1, GPR_AT); | |
802 | uasm_i_mtc0(&p, GPR_K0, C0_STATUS); | |
90e9311a | 803 | uasm_i_ehb(&p); |
881c8e05 | 804 | build_set_exc_base(&p, GPR_T0); |
90e9311a JH |
805 | |
806 | /* Setup status register for running guest in UM */ | |
881c8e05 JY |
807 | uasm_i_ori(&p, GPR_V1, GPR_V1, ST0_EXL | KSU_USER | ST0_IE); |
808 | UASM_i_LA(&p, GPR_AT, ~(ST0_CU0 | ST0_MX | ST0_SX | ST0_UX)); | |
809 | uasm_i_and(&p, GPR_V1, GPR_V1, GPR_AT); | |
810 | uasm_i_mtc0(&p, GPR_V1, C0_STATUS); | |
90e9311a JH |
811 | uasm_i_ehb(&p); |
812 | ||
813 | p = kvm_mips_build_enter_guest(p); | |
814 | ||
815 | return p; | |
816 | } | |
817 | ||
818 | /** | |
819 | * kvm_mips_build_ret_to_host() - Assemble code to return to the host. | |
820 | * @addr: Address to start writing code. | |
821 | * | |
822 | * Assemble the code to handle return from the guest exit handler | |
823 | * (kvm_mips_handle_exit()) back to the host, i.e. to the caller of the vcpu_run | |
824 | * function generated by kvm_mips_build_vcpu_run(). | |
825 | * | |
826 | * Returns: Next address after end of written function. | |
827 | */ | |
828 | static void *kvm_mips_build_ret_to_host(void *addr) | |
829 | { | |
830 | u32 *p = addr; | |
831 | unsigned int i; | |
832 | ||
833 | /* EBASE is already pointing to Linux */ | |
881c8e05 JY |
834 | UASM_i_LW(&p, GPR_K1, offsetof(struct kvm_vcpu_arch, host_stack), GPR_K1); |
835 | UASM_i_ADDIU(&p, GPR_K1, GPR_K1, -(int)sizeof(struct pt_regs)); | |
90e9311a | 836 | |
90e9311a JH |
837 | /* |
838 | * r2/v0 is the return code, shift it down by 2 (arithmetic) | |
839 | * to recover the err code | |
840 | */ | |
881c8e05 JY |
841 | uasm_i_sra(&p, GPR_K0, GPR_V0, 2); |
842 | uasm_i_move(&p, GPR_V0, GPR_K0); | |
90e9311a JH |
843 | |
844 | /* Load context saved on the host stack */ | |
845 | for (i = 16; i < 31; ++i) { | |
846 | if (i == 24) | |
847 | i = 28; | |
881c8e05 | 848 | UASM_i_LW(&p, i, offsetof(struct pt_regs, regs[i]), GPR_K1); |
90e9311a JH |
849 | } |
850 | ||
90e9311a | 851 | /* Restore RDHWR access */ |
881c8e05 JY |
852 | UASM_i_LA_mostly(&p, GPR_K0, (long)&hwrena); |
853 | uasm_i_lw(&p, GPR_K0, uasm_rel_lo((long)&hwrena), GPR_K0); | |
854 | uasm_i_mtc0(&p, GPR_K0, C0_HWRENA); | |
90e9311a | 855 | |
881c8e05 JY |
856 | /* Restore GPR_RA, which is the address we will return to */ |
857 | UASM_i_LW(&p, GPR_RA, offsetof(struct pt_regs, regs[GPR_RA]), GPR_K1); | |
858 | uasm_i_jr(&p, GPR_RA); | |
90e9311a JH |
859 | uasm_i_nop(&p); |
860 | ||
861 | return p; | |
862 | } | |
863 |