Commit | Line | Data |
---|---|---|
f5c236dd | 1 | /* |
d116e812 DCZ |
2 | * This file is subject to the terms and conditions of the GNU General Public |
3 | * License. See the file "COPYING" in the main directory of this archive | |
4 | * for more details. | |
5 | * | |
6 | * KVM/MIPS: Deliver/Emulate exceptions to the guest kernel | |
7 | * | |
8 | * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. | |
9 | * Authors: Sanjay Lal <sanjayl@kymasys.com> | |
10 | */ | |
f5c236dd SL |
11 | |
12 | #include <linux/errno.h> | |
13 | #include <linux/err.h> | |
f5c236dd SL |
14 | #include <linux/vmalloc.h> |
15 | ||
16 | #include <linux/kvm_host.h> | |
17 | ||
d7d5b05f | 18 | #include "interrupt.h" |
f5c236dd SL |
19 | |
20 | static gpa_t kvm_trap_emul_gva_to_gpa_cb(gva_t gva) | |
21 | { | |
22 | gpa_t gpa; | |
8cffd197 | 23 | gva_t kseg = KSEGX(gva); |
f5c236dd SL |
24 | |
25 | if ((kseg == CKSEG0) || (kseg == CKSEG1)) | |
26 | gpa = CPHYSADDR(gva); | |
27 | else { | |
6ad78a5c | 28 | kvm_err("%s: cannot find GPA for GVA: %#lx\n", __func__, gva); |
f5c236dd SL |
29 | kvm_mips_dump_host_tlbs(); |
30 | gpa = KVM_INVALID_ADDR; | |
31 | } | |
32 | ||
f5c236dd | 33 | kvm_debug("%s: gva %#lx, gpa: %#llx\n", __func__, gva, gpa); |
f5c236dd SL |
34 | |
35 | return gpa; | |
36 | } | |
37 | ||
f5c236dd SL |
38 | static int kvm_trap_emul_handle_cop_unusable(struct kvm_vcpu *vcpu) |
39 | { | |
1c0cd66a | 40 | struct mips_coproc *cop0 = vcpu->arch.cop0; |
f5c236dd | 41 | struct kvm_run *run = vcpu->run; |
8cffd197 | 42 | u32 __user *opc = (u32 __user *) vcpu->arch.pc; |
31cf7498 | 43 | u32 cause = vcpu->arch.host_cp0_cause; |
f5c236dd SL |
44 | enum emulation_result er = EMULATE_DONE; |
45 | int ret = RESUME_GUEST; | |
46 | ||
1c0cd66a JH |
47 | if (((cause & CAUSEF_CE) >> CAUSEB_CE) == 1) { |
48 | /* FPU Unusable */ | |
49 | if (!kvm_mips_guest_has_fpu(&vcpu->arch) || | |
50 | (kvm_read_c0_guest_status(cop0) & ST0_CU1) == 0) { | |
51 | /* | |
52 | * Unusable/no FPU in guest: | |
53 | * deliver guest COP1 Unusable Exception | |
54 | */ | |
55 | er = kvm_mips_emulate_fpu_exc(cause, opc, run, vcpu); | |
56 | } else { | |
57 | /* Restore FPU state */ | |
58 | kvm_own_fpu(vcpu); | |
59 | er = EMULATE_DONE; | |
60 | } | |
61 | } else { | |
f5c236dd | 62 | er = kvm_mips_emulate_inst(cause, opc, run, vcpu); |
1c0cd66a | 63 | } |
f5c236dd SL |
64 | |
65 | switch (er) { | |
66 | case EMULATE_DONE: | |
67 | ret = RESUME_GUEST; | |
68 | break; | |
69 | ||
70 | case EMULATE_FAIL: | |
71 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | |
72 | ret = RESUME_HOST; | |
73 | break; | |
74 | ||
75 | case EMULATE_WAIT: | |
76 | run->exit_reason = KVM_EXIT_INTR; | |
77 | ret = RESUME_HOST; | |
78 | break; | |
79 | ||
80 | default: | |
81 | BUG(); | |
82 | } | |
83 | return ret; | |
84 | } | |
85 | ||
86 | static int kvm_trap_emul_handle_tlb_mod(struct kvm_vcpu *vcpu) | |
87 | { | |
88 | struct kvm_run *run = vcpu->run; | |
8cffd197 | 89 | u32 __user *opc = (u32 __user *) vcpu->arch.pc; |
f5c236dd | 90 | unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr; |
31cf7498 | 91 | u32 cause = vcpu->arch.host_cp0_cause; |
f5c236dd SL |
92 | enum emulation_result er = EMULATE_DONE; |
93 | int ret = RESUME_GUEST; | |
94 | ||
95 | if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0 | |
96 | || KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) { | |
31cf7498 | 97 | kvm_debug("USER/KSEG23 ADDR TLB MOD fault: cause %#x, PC: %p, BadVaddr: %#lx\n", |
d116e812 | 98 | cause, opc, badvaddr); |
f5c236dd SL |
99 | er = kvm_mips_handle_tlbmod(cause, opc, run, vcpu); |
100 | ||
101 | if (er == EMULATE_DONE) | |
102 | ret = RESUME_GUEST; | |
103 | else { | |
104 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | |
105 | ret = RESUME_HOST; | |
106 | } | |
107 | } else if (KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG0) { | |
d116e812 DCZ |
108 | /* |
109 | * XXXKYMA: The guest kernel does not expect to get this fault | |
110 | * when we are not using HIGHMEM. Need to address this in a | |
111 | * HIGHMEM kernel | |
f5c236dd | 112 | */ |
31cf7498 | 113 | kvm_err("TLB MOD fault not handled, cause %#x, PC: %p, BadVaddr: %#lx\n", |
6ad78a5c | 114 | cause, opc, badvaddr); |
f5c236dd SL |
115 | kvm_mips_dump_host_tlbs(); |
116 | kvm_arch_vcpu_dump_regs(vcpu); | |
117 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | |
118 | ret = RESUME_HOST; | |
119 | } else { | |
31cf7498 | 120 | kvm_err("Illegal TLB Mod fault address , cause %#x, PC: %p, BadVaddr: %#lx\n", |
6ad78a5c | 121 | cause, opc, badvaddr); |
f5c236dd SL |
122 | kvm_mips_dump_host_tlbs(); |
123 | kvm_arch_vcpu_dump_regs(vcpu); | |
124 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | |
125 | ret = RESUME_HOST; | |
126 | } | |
127 | return ret; | |
128 | } | |
129 | ||
3b08aec5 | 130 | static int kvm_trap_emul_handle_tlb_miss(struct kvm_vcpu *vcpu, bool store) |
f5c236dd SL |
131 | { |
132 | struct kvm_run *run = vcpu->run; | |
8cffd197 | 133 | u32 __user *opc = (u32 __user *) vcpu->arch.pc; |
f5c236dd | 134 | unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr; |
31cf7498 | 135 | u32 cause = vcpu->arch.host_cp0_cause; |
f5c236dd SL |
136 | enum emulation_result er = EMULATE_DONE; |
137 | int ret = RESUME_GUEST; | |
138 | ||
139 | if (((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR) | |
140 | && KVM_GUEST_KERNEL_MODE(vcpu)) { | |
141 | if (kvm_mips_handle_commpage_tlb_fault(badvaddr, vcpu) < 0) { | |
142 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | |
143 | ret = RESUME_HOST; | |
144 | } | |
145 | } else if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0 | |
146 | || KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) { | |
3b08aec5 JH |
147 | kvm_debug("USER ADDR TLB %s fault: cause %#x, PC: %p, BadVaddr: %#lx\n", |
148 | store ? "ST" : "LD", cause, opc, badvaddr); | |
f5c236dd | 149 | |
d116e812 DCZ |
150 | /* |
151 | * User Address (UA) fault, this could happen if | |
152 | * (1) TLB entry not present/valid in both Guest and shadow host | |
153 | * TLBs, in this case we pass on the fault to the guest | |
154 | * kernel and let it handle it. | |
155 | * (2) TLB entry is present in the Guest TLB but not in the | |
156 | * shadow, in this case we inject the TLB from the Guest TLB | |
157 | * into the shadow host TLB | |
f5c236dd SL |
158 | */ |
159 | ||
160 | er = kvm_mips_handle_tlbmiss(cause, opc, run, vcpu); | |
161 | if (er == EMULATE_DONE) | |
162 | ret = RESUME_GUEST; | |
163 | else { | |
164 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | |
165 | ret = RESUME_HOST; | |
166 | } | |
167 | } else if (KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG0) { | |
3b08aec5 JH |
168 | /* |
169 | * All KSEG0 faults are handled by KVM, as the guest kernel does | |
170 | * not expect to ever get them | |
171 | */ | |
f5c236dd SL |
172 | if (kvm_mips_handle_kseg0_tlb_fault |
173 | (vcpu->arch.host_cp0_badvaddr, vcpu) < 0) { | |
174 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | |
175 | ret = RESUME_HOST; | |
176 | } | |
d5888477 JH |
177 | } else if (KVM_GUEST_KERNEL_MODE(vcpu) |
178 | && (KSEGX(badvaddr) == CKSEG0 || KSEGX(badvaddr) == CKSEG1)) { | |
179 | /* | |
180 | * With EVA we may get a TLB exception instead of an address | |
181 | * error when the guest performs MMIO to KSeg1 addresses. | |
182 | */ | |
183 | kvm_debug("Emulate %s MMIO space\n", | |
184 | store ? "Store to" : "Load from"); | |
185 | er = kvm_mips_emulate_inst(cause, opc, run, vcpu); | |
186 | if (er == EMULATE_FAIL) { | |
187 | kvm_err("Emulate %s MMIO space failed\n", | |
188 | store ? "Store to" : "Load from"); | |
189 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | |
190 | ret = RESUME_HOST; | |
191 | } else { | |
192 | run->exit_reason = KVM_EXIT_MMIO; | |
193 | ret = RESUME_HOST; | |
194 | } | |
f5c236dd | 195 | } else { |
3b08aec5 JH |
196 | kvm_err("Illegal TLB %s fault address , cause %#x, PC: %p, BadVaddr: %#lx\n", |
197 | store ? "ST" : "LD", cause, opc, badvaddr); | |
f5c236dd SL |
198 | kvm_mips_dump_host_tlbs(); |
199 | kvm_arch_vcpu_dump_regs(vcpu); | |
200 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | |
201 | ret = RESUME_HOST; | |
202 | } | |
203 | return ret; | |
204 | } | |
205 | ||
3b08aec5 JH |
206 | static int kvm_trap_emul_handle_tlb_st_miss(struct kvm_vcpu *vcpu) |
207 | { | |
208 | return kvm_trap_emul_handle_tlb_miss(vcpu, true); | |
209 | } | |
210 | ||
211 | static int kvm_trap_emul_handle_tlb_ld_miss(struct kvm_vcpu *vcpu) | |
212 | { | |
213 | return kvm_trap_emul_handle_tlb_miss(vcpu, false); | |
214 | } | |
215 | ||
f5c236dd SL |
216 | static int kvm_trap_emul_handle_addr_err_st(struct kvm_vcpu *vcpu) |
217 | { | |
218 | struct kvm_run *run = vcpu->run; | |
8cffd197 | 219 | u32 __user *opc = (u32 __user *) vcpu->arch.pc; |
f5c236dd | 220 | unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr; |
31cf7498 | 221 | u32 cause = vcpu->arch.host_cp0_cause; |
f5c236dd SL |
222 | enum emulation_result er = EMULATE_DONE; |
223 | int ret = RESUME_GUEST; | |
224 | ||
225 | if (KVM_GUEST_KERNEL_MODE(vcpu) | |
226 | && (KSEGX(badvaddr) == CKSEG0 || KSEGX(badvaddr) == CKSEG1)) { | |
f5c236dd | 227 | kvm_debug("Emulate Store to MMIO space\n"); |
f5c236dd SL |
228 | er = kvm_mips_emulate_inst(cause, opc, run, vcpu); |
229 | if (er == EMULATE_FAIL) { | |
6ad78a5c | 230 | kvm_err("Emulate Store to MMIO space failed\n"); |
f5c236dd SL |
231 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; |
232 | ret = RESUME_HOST; | |
233 | } else { | |
234 | run->exit_reason = KVM_EXIT_MMIO; | |
235 | ret = RESUME_HOST; | |
236 | } | |
237 | } else { | |
31cf7498 | 238 | kvm_err("Address Error (STORE): cause %#x, PC: %p, BadVaddr: %#lx\n", |
6ad78a5c | 239 | cause, opc, badvaddr); |
f5c236dd SL |
240 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; |
241 | ret = RESUME_HOST; | |
242 | } | |
243 | return ret; | |
244 | } | |
245 | ||
246 | static int kvm_trap_emul_handle_addr_err_ld(struct kvm_vcpu *vcpu) | |
247 | { | |
248 | struct kvm_run *run = vcpu->run; | |
8cffd197 | 249 | u32 __user *opc = (u32 __user *) vcpu->arch.pc; |
f5c236dd | 250 | unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr; |
31cf7498 | 251 | u32 cause = vcpu->arch.host_cp0_cause; |
f5c236dd SL |
252 | enum emulation_result er = EMULATE_DONE; |
253 | int ret = RESUME_GUEST; | |
254 | ||
255 | if (KSEGX(badvaddr) == CKSEG0 || KSEGX(badvaddr) == CKSEG1) { | |
f5c236dd | 256 | kvm_debug("Emulate Load from MMIO space @ %#lx\n", badvaddr); |
f5c236dd SL |
257 | er = kvm_mips_emulate_inst(cause, opc, run, vcpu); |
258 | if (er == EMULATE_FAIL) { | |
6ad78a5c | 259 | kvm_err("Emulate Load from MMIO space failed\n"); |
f5c236dd SL |
260 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; |
261 | ret = RESUME_HOST; | |
262 | } else { | |
263 | run->exit_reason = KVM_EXIT_MMIO; | |
264 | ret = RESUME_HOST; | |
265 | } | |
266 | } else { | |
31cf7498 | 267 | kvm_err("Address Error (LOAD): cause %#x, PC: %p, BadVaddr: %#lx\n", |
6ad78a5c | 268 | cause, opc, badvaddr); |
f5c236dd SL |
269 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; |
270 | ret = RESUME_HOST; | |
271 | er = EMULATE_FAIL; | |
272 | } | |
273 | return ret; | |
274 | } | |
275 | ||
276 | static int kvm_trap_emul_handle_syscall(struct kvm_vcpu *vcpu) | |
277 | { | |
278 | struct kvm_run *run = vcpu->run; | |
8cffd197 | 279 | u32 __user *opc = (u32 __user *) vcpu->arch.pc; |
31cf7498 | 280 | u32 cause = vcpu->arch.host_cp0_cause; |
f5c236dd SL |
281 | enum emulation_result er = EMULATE_DONE; |
282 | int ret = RESUME_GUEST; | |
283 | ||
284 | er = kvm_mips_emulate_syscall(cause, opc, run, vcpu); | |
285 | if (er == EMULATE_DONE) | |
286 | ret = RESUME_GUEST; | |
287 | else { | |
288 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | |
289 | ret = RESUME_HOST; | |
290 | } | |
291 | return ret; | |
292 | } | |
293 | ||
294 | static int kvm_trap_emul_handle_res_inst(struct kvm_vcpu *vcpu) | |
295 | { | |
296 | struct kvm_run *run = vcpu->run; | |
8cffd197 | 297 | u32 __user *opc = (u32 __user *) vcpu->arch.pc; |
31cf7498 | 298 | u32 cause = vcpu->arch.host_cp0_cause; |
f5c236dd SL |
299 | enum emulation_result er = EMULATE_DONE; |
300 | int ret = RESUME_GUEST; | |
301 | ||
302 | er = kvm_mips_handle_ri(cause, opc, run, vcpu); | |
303 | if (er == EMULATE_DONE) | |
304 | ret = RESUME_GUEST; | |
305 | else { | |
306 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | |
307 | ret = RESUME_HOST; | |
308 | } | |
309 | return ret; | |
310 | } | |
311 | ||
312 | static int kvm_trap_emul_handle_break(struct kvm_vcpu *vcpu) | |
313 | { | |
314 | struct kvm_run *run = vcpu->run; | |
8cffd197 | 315 | u32 __user *opc = (u32 __user *) vcpu->arch.pc; |
31cf7498 | 316 | u32 cause = vcpu->arch.host_cp0_cause; |
f5c236dd SL |
317 | enum emulation_result er = EMULATE_DONE; |
318 | int ret = RESUME_GUEST; | |
319 | ||
320 | er = kvm_mips_emulate_bp_exc(cause, opc, run, vcpu); | |
321 | if (er == EMULATE_DONE) | |
322 | ret = RESUME_GUEST; | |
323 | else { | |
324 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | |
325 | ret = RESUME_HOST; | |
326 | } | |
327 | return ret; | |
328 | } | |
329 | ||
0a560427 JH |
330 | static int kvm_trap_emul_handle_trap(struct kvm_vcpu *vcpu) |
331 | { | |
332 | struct kvm_run *run = vcpu->run; | |
8cffd197 | 333 | u32 __user *opc = (u32 __user *)vcpu->arch.pc; |
31cf7498 | 334 | u32 cause = vcpu->arch.host_cp0_cause; |
0a560427 JH |
335 | enum emulation_result er = EMULATE_DONE; |
336 | int ret = RESUME_GUEST; | |
337 | ||
338 | er = kvm_mips_emulate_trap_exc(cause, opc, run, vcpu); | |
339 | if (er == EMULATE_DONE) { | |
340 | ret = RESUME_GUEST; | |
341 | } else { | |
342 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | |
343 | ret = RESUME_HOST; | |
344 | } | |
345 | return ret; | |
346 | } | |
347 | ||
c2537ed9 JH |
348 | static int kvm_trap_emul_handle_msa_fpe(struct kvm_vcpu *vcpu) |
349 | { | |
350 | struct kvm_run *run = vcpu->run; | |
8cffd197 | 351 | u32 __user *opc = (u32 __user *)vcpu->arch.pc; |
31cf7498 | 352 | u32 cause = vcpu->arch.host_cp0_cause; |
c2537ed9 JH |
353 | enum emulation_result er = EMULATE_DONE; |
354 | int ret = RESUME_GUEST; | |
355 | ||
356 | er = kvm_mips_emulate_msafpe_exc(cause, opc, run, vcpu); | |
357 | if (er == EMULATE_DONE) { | |
358 | ret = RESUME_GUEST; | |
359 | } else { | |
360 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | |
361 | ret = RESUME_HOST; | |
362 | } | |
363 | return ret; | |
364 | } | |
365 | ||
1c0cd66a JH |
366 | static int kvm_trap_emul_handle_fpe(struct kvm_vcpu *vcpu) |
367 | { | |
368 | struct kvm_run *run = vcpu->run; | |
8cffd197 | 369 | u32 __user *opc = (u32 __user *)vcpu->arch.pc; |
31cf7498 | 370 | u32 cause = vcpu->arch.host_cp0_cause; |
1c0cd66a JH |
371 | enum emulation_result er = EMULATE_DONE; |
372 | int ret = RESUME_GUEST; | |
373 | ||
374 | er = kvm_mips_emulate_fpe_exc(cause, opc, run, vcpu); | |
375 | if (er == EMULATE_DONE) { | |
376 | ret = RESUME_GUEST; | |
377 | } else { | |
378 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | |
379 | ret = RESUME_HOST; | |
380 | } | |
381 | return ret; | |
382 | } | |
383 | ||
c2537ed9 JH |
384 | /** |
385 | * kvm_trap_emul_handle_msa_disabled() - Guest used MSA while disabled in root. | |
386 | * @vcpu: Virtual CPU context. | |
387 | * | |
388 | * Handle when the guest attempts to use MSA when it is disabled. | |
389 | */ | |
98119ad5 JH |
390 | static int kvm_trap_emul_handle_msa_disabled(struct kvm_vcpu *vcpu) |
391 | { | |
c2537ed9 | 392 | struct mips_coproc *cop0 = vcpu->arch.cop0; |
98119ad5 | 393 | struct kvm_run *run = vcpu->run; |
8cffd197 | 394 | u32 __user *opc = (u32 __user *) vcpu->arch.pc; |
31cf7498 | 395 | u32 cause = vcpu->arch.host_cp0_cause; |
98119ad5 JH |
396 | enum emulation_result er = EMULATE_DONE; |
397 | int ret = RESUME_GUEST; | |
398 | ||
c2537ed9 JH |
399 | if (!kvm_mips_guest_has_msa(&vcpu->arch) || |
400 | (kvm_read_c0_guest_status(cop0) & (ST0_CU1 | ST0_FR)) == ST0_CU1) { | |
401 | /* | |
402 | * No MSA in guest, or FPU enabled and not in FR=1 mode, | |
403 | * guest reserved instruction exception | |
404 | */ | |
405 | er = kvm_mips_emulate_ri_exc(cause, opc, run, vcpu); | |
406 | } else if (!(kvm_read_c0_guest_config5(cop0) & MIPS_CONF5_MSAEN)) { | |
407 | /* MSA disabled by guest, guest MSA disabled exception */ | |
408 | er = kvm_mips_emulate_msadis_exc(cause, opc, run, vcpu); | |
409 | } else { | |
410 | /* Restore MSA/FPU state */ | |
411 | kvm_own_msa(vcpu); | |
412 | er = EMULATE_DONE; | |
413 | } | |
98119ad5 JH |
414 | |
415 | switch (er) { | |
416 | case EMULATE_DONE: | |
417 | ret = RESUME_GUEST; | |
418 | break; | |
419 | ||
420 | case EMULATE_FAIL: | |
421 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | |
422 | ret = RESUME_HOST; | |
423 | break; | |
424 | ||
425 | default: | |
426 | BUG(); | |
427 | } | |
428 | return ret; | |
429 | } | |
430 | ||
f5c236dd SL |
431 | static int kvm_trap_emul_vm_init(struct kvm *kvm) |
432 | { | |
433 | return 0; | |
434 | } | |
435 | ||
436 | static int kvm_trap_emul_vcpu_init(struct kvm_vcpu *vcpu) | |
437 | { | |
05108709 JH |
438 | vcpu->arch.kscratch_enabled = 0xfc; |
439 | ||
f5c236dd SL |
440 | return 0; |
441 | } | |
442 | ||
443 | static int kvm_trap_emul_vcpu_setup(struct kvm_vcpu *vcpu) | |
444 | { | |
445 | struct mips_coproc *cop0 = vcpu->arch.cop0; | |
e342925f | 446 | u32 config, config1; |
f5c236dd SL |
447 | int vcpu_id = vcpu->vcpu_id; |
448 | ||
d116e812 DCZ |
449 | /* |
450 | * Arch specific stuff, set up config registers properly so that the | |
84260972 | 451 | * guest will come up as expected |
f5c236dd | 452 | */ |
84260972 JH |
453 | #ifndef CONFIG_CPU_MIPSR6 |
454 | /* r2-r5, simulate a MIPS 24kc */ | |
f5c236dd | 455 | kvm_write_c0_guest_prid(cop0, 0x00019300); |
84260972 JH |
456 | #else |
457 | /* r6+, simulate a generic QEMU machine */ | |
458 | kvm_write_c0_guest_prid(cop0, 0x00010000); | |
459 | #endif | |
e342925f JH |
460 | /* |
461 | * Have config1, Cacheable, noncoherent, write-back, write allocate. | |
462 | * Endianness, arch revision & virtually tagged icache should match | |
463 | * host. | |
464 | */ | |
465 | config = read_c0_config() & MIPS_CONF_AR; | |
4e10b764 | 466 | config |= MIPS_CONF_M | CONF_CM_CACHABLE_NONCOHERENT | MIPS_CONF_MT_TLB; |
e342925f JH |
467 | #ifdef CONFIG_CPU_BIG_ENDIAN |
468 | config |= CONF_BE; | |
469 | #endif | |
470 | if (cpu_has_vtag_icache) | |
471 | config |= MIPS_CONF_VI; | |
472 | kvm_write_c0_guest_config(cop0, config); | |
f5c236dd SL |
473 | |
474 | /* Read the cache characteristics from the host Config1 Register */ | |
475 | config1 = (read_c0_config1() & ~0x7f); | |
476 | ||
477 | /* Set up MMU size */ | |
478 | config1 &= ~(0x3f << 25); | |
479 | config1 |= ((KVM_MIPS_GUEST_TLB_SIZE - 1) << 25); | |
480 | ||
481 | /* We unset some bits that we aren't emulating */ | |
4e10b764 JH |
482 | config1 &= ~(MIPS_CONF1_C2 | MIPS_CONF1_MD | MIPS_CONF1_PC | |
483 | MIPS_CONF1_WR | MIPS_CONF1_CA); | |
f5c236dd SL |
484 | kvm_write_c0_guest_config1(cop0, config1); |
485 | ||
2211ee81 JH |
486 | /* Have config3, no tertiary/secondary caches implemented */ |
487 | kvm_write_c0_guest_config2(cop0, MIPS_CONF_M); | |
488 | /* MIPS_CONF_M | (read_c0_config2() & 0xfff) */ | |
489 | ||
c771607a JH |
490 | /* Have config4, UserLocal */ |
491 | kvm_write_c0_guest_config3(cop0, MIPS_CONF_M | MIPS_CONF3_ULRI); | |
492 | ||
493 | /* Have config5 */ | |
494 | kvm_write_c0_guest_config4(cop0, MIPS_CONF_M); | |
495 | ||
496 | /* No config6 */ | |
497 | kvm_write_c0_guest_config5(cop0, 0); | |
f5c236dd SL |
498 | |
499 | /* Set Wait IE/IXMT Ignore in Config7, IAR, AR */ | |
500 | kvm_write_c0_guest_config7(cop0, (MIPS_CONF7_WII) | (1 << 10)); | |
501 | ||
d116e812 | 502 | /* |
92a76f6d | 503 | * Setup IntCtl defaults, compatibility mode for timer interrupts (HW5) |
d116e812 | 504 | */ |
f5c236dd SL |
505 | kvm_write_c0_guest_intctl(cop0, 0xFC000000); |
506 | ||
507 | /* Put in vcpu id as CPUNum into Ebase Reg to handle SMP Guests */ | |
37af2f30 JH |
508 | kvm_write_c0_guest_ebase(cop0, KVM_GUEST_KSEG0 | |
509 | (vcpu_id & MIPS_EBASE_CPUNUM)); | |
f5c236dd SL |
510 | |
511 | return 0; | |
512 | } | |
513 | ||
f5c43bd4 JH |
514 | static unsigned long kvm_trap_emul_num_regs(struct kvm_vcpu *vcpu) |
515 | { | |
516 | return 0; | |
517 | } | |
518 | ||
519 | static int kvm_trap_emul_copy_reg_indices(struct kvm_vcpu *vcpu, | |
520 | u64 __user *indices) | |
521 | { | |
522 | return 0; | |
523 | } | |
524 | ||
f8be02da JH |
525 | static int kvm_trap_emul_get_one_reg(struct kvm_vcpu *vcpu, |
526 | const struct kvm_one_reg *reg, | |
527 | s64 *v) | |
528 | { | |
529 | switch (reg->id) { | |
530 | case KVM_REG_MIPS_CP0_COUNT: | |
e30492bb | 531 | *v = kvm_mips_read_count(vcpu); |
f8be02da | 532 | break; |
f8239342 JH |
533 | case KVM_REG_MIPS_COUNT_CTL: |
534 | *v = vcpu->arch.count_ctl; | |
535 | break; | |
536 | case KVM_REG_MIPS_COUNT_RESUME: | |
537 | *v = ktime_to_ns(vcpu->arch.count_resume); | |
538 | break; | |
f74a8e22 JH |
539 | case KVM_REG_MIPS_COUNT_HZ: |
540 | *v = vcpu->arch.count_hz; | |
541 | break; | |
f8be02da JH |
542 | default: |
543 | return -EINVAL; | |
544 | } | |
545 | return 0; | |
546 | } | |
547 | ||
548 | static int kvm_trap_emul_set_one_reg(struct kvm_vcpu *vcpu, | |
549 | const struct kvm_one_reg *reg, | |
550 | s64 v) | |
551 | { | |
552 | struct mips_coproc *cop0 = vcpu->arch.cop0; | |
f8239342 | 553 | int ret = 0; |
c771607a | 554 | unsigned int cur, change; |
f8be02da JH |
555 | |
556 | switch (reg->id) { | |
557 | case KVM_REG_MIPS_CP0_COUNT: | |
e30492bb | 558 | kvm_mips_write_count(vcpu, v); |
f8be02da JH |
559 | break; |
560 | case KVM_REG_MIPS_CP0_COMPARE: | |
b45bacd2 | 561 | kvm_mips_write_compare(vcpu, v, false); |
e30492bb JH |
562 | break; |
563 | case KVM_REG_MIPS_CP0_CAUSE: | |
564 | /* | |
565 | * If the timer is stopped or started (DC bit) it must look | |
566 | * atomic with changes to the interrupt pending bits (TI, IRQ5). | |
567 | * A timer interrupt should not happen in between. | |
568 | */ | |
569 | if ((kvm_read_c0_guest_cause(cop0) ^ v) & CAUSEF_DC) { | |
570 | if (v & CAUSEF_DC) { | |
571 | /* disable timer first */ | |
572 | kvm_mips_count_disable_cause(vcpu); | |
573 | kvm_change_c0_guest_cause(cop0, ~CAUSEF_DC, v); | |
574 | } else { | |
575 | /* enable timer last */ | |
576 | kvm_change_c0_guest_cause(cop0, ~CAUSEF_DC, v); | |
577 | kvm_mips_count_enable_cause(vcpu); | |
578 | } | |
579 | } else { | |
580 | kvm_write_c0_guest_cause(cop0, v); | |
581 | } | |
f8be02da | 582 | break; |
c771607a JH |
583 | case KVM_REG_MIPS_CP0_CONFIG: |
584 | /* read-only for now */ | |
585 | break; | |
586 | case KVM_REG_MIPS_CP0_CONFIG1: | |
587 | cur = kvm_read_c0_guest_config1(cop0); | |
588 | change = (cur ^ v) & kvm_mips_config1_wrmask(vcpu); | |
589 | if (change) { | |
590 | v = cur ^ change; | |
591 | kvm_write_c0_guest_config1(cop0, v); | |
592 | } | |
593 | break; | |
594 | case KVM_REG_MIPS_CP0_CONFIG2: | |
595 | /* read-only for now */ | |
596 | break; | |
597 | case KVM_REG_MIPS_CP0_CONFIG3: | |
598 | cur = kvm_read_c0_guest_config3(cop0); | |
599 | change = (cur ^ v) & kvm_mips_config3_wrmask(vcpu); | |
600 | if (change) { | |
601 | v = cur ^ change; | |
602 | kvm_write_c0_guest_config3(cop0, v); | |
603 | } | |
604 | break; | |
605 | case KVM_REG_MIPS_CP0_CONFIG4: | |
606 | cur = kvm_read_c0_guest_config4(cop0); | |
607 | change = (cur ^ v) & kvm_mips_config4_wrmask(vcpu); | |
608 | if (change) { | |
609 | v = cur ^ change; | |
610 | kvm_write_c0_guest_config4(cop0, v); | |
611 | } | |
612 | break; | |
613 | case KVM_REG_MIPS_CP0_CONFIG5: | |
614 | cur = kvm_read_c0_guest_config5(cop0); | |
615 | change = (cur ^ v) & kvm_mips_config5_wrmask(vcpu); | |
616 | if (change) { | |
617 | v = cur ^ change; | |
618 | kvm_write_c0_guest_config5(cop0, v); | |
619 | } | |
620 | break; | |
f8239342 JH |
621 | case KVM_REG_MIPS_COUNT_CTL: |
622 | ret = kvm_mips_set_count_ctl(vcpu, v); | |
623 | break; | |
624 | case KVM_REG_MIPS_COUNT_RESUME: | |
625 | ret = kvm_mips_set_count_resume(vcpu, v); | |
626 | break; | |
f74a8e22 JH |
627 | case KVM_REG_MIPS_COUNT_HZ: |
628 | ret = kvm_mips_set_count_hz(vcpu, v); | |
629 | break; | |
f8be02da JH |
630 | default: |
631 | return -EINVAL; | |
632 | } | |
f8239342 | 633 | return ret; |
f8be02da JH |
634 | } |
635 | ||
b86ecb37 JH |
636 | static int kvm_trap_emul_vcpu_get_regs(struct kvm_vcpu *vcpu) |
637 | { | |
98e91b84 JH |
638 | kvm_lose_fpu(vcpu); |
639 | ||
b86ecb37 JH |
640 | return 0; |
641 | } | |
642 | ||
643 | static int kvm_trap_emul_vcpu_set_regs(struct kvm_vcpu *vcpu) | |
644 | { | |
645 | return 0; | |
646 | } | |
647 | ||
f5c236dd SL |
648 | static struct kvm_mips_callbacks kvm_trap_emul_callbacks = { |
649 | /* exit handlers */ | |
650 | .handle_cop_unusable = kvm_trap_emul_handle_cop_unusable, | |
651 | .handle_tlb_mod = kvm_trap_emul_handle_tlb_mod, | |
652 | .handle_tlb_st_miss = kvm_trap_emul_handle_tlb_st_miss, | |
653 | .handle_tlb_ld_miss = kvm_trap_emul_handle_tlb_ld_miss, | |
654 | .handle_addr_err_st = kvm_trap_emul_handle_addr_err_st, | |
655 | .handle_addr_err_ld = kvm_trap_emul_handle_addr_err_ld, | |
656 | .handle_syscall = kvm_trap_emul_handle_syscall, | |
657 | .handle_res_inst = kvm_trap_emul_handle_res_inst, | |
658 | .handle_break = kvm_trap_emul_handle_break, | |
0a560427 | 659 | .handle_trap = kvm_trap_emul_handle_trap, |
c2537ed9 | 660 | .handle_msa_fpe = kvm_trap_emul_handle_msa_fpe, |
1c0cd66a | 661 | .handle_fpe = kvm_trap_emul_handle_fpe, |
98119ad5 | 662 | .handle_msa_disabled = kvm_trap_emul_handle_msa_disabled, |
f5c236dd SL |
663 | |
664 | .vm_init = kvm_trap_emul_vm_init, | |
665 | .vcpu_init = kvm_trap_emul_vcpu_init, | |
666 | .vcpu_setup = kvm_trap_emul_vcpu_setup, | |
667 | .gva_to_gpa = kvm_trap_emul_gva_to_gpa_cb, | |
668 | .queue_timer_int = kvm_mips_queue_timer_int_cb, | |
669 | .dequeue_timer_int = kvm_mips_dequeue_timer_int_cb, | |
670 | .queue_io_int = kvm_mips_queue_io_int_cb, | |
671 | .dequeue_io_int = kvm_mips_dequeue_io_int_cb, | |
672 | .irq_deliver = kvm_mips_irq_deliver_cb, | |
673 | .irq_clear = kvm_mips_irq_clear_cb, | |
f5c43bd4 JH |
674 | .num_regs = kvm_trap_emul_num_regs, |
675 | .copy_reg_indices = kvm_trap_emul_copy_reg_indices, | |
f8be02da JH |
676 | .get_one_reg = kvm_trap_emul_get_one_reg, |
677 | .set_one_reg = kvm_trap_emul_set_one_reg, | |
b86ecb37 JH |
678 | .vcpu_get_regs = kvm_trap_emul_vcpu_get_regs, |
679 | .vcpu_set_regs = kvm_trap_emul_vcpu_set_regs, | |
f5c236dd SL |
680 | }; |
681 | ||
682 | int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks) | |
683 | { | |
684 | *install_callbacks = &kvm_trap_emul_callbacks; | |
685 | return 0; | |
686 | } |