Commit | Line | Data |
---|---|---|
e685c689 | 1 | /* |
d116e812 DCZ |
2 | * This file is subject to the terms and conditions of the GNU General Public |
3 | * License. See the file "COPYING" in the main directory of this archive | |
4 | * for more details. | |
5 | * | |
6 | * KVM/MIPS: Instruction/Exception emulation | |
7 | * | |
8 | * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. | |
9 | * Authors: Sanjay Lal <sanjayl@kymasys.com> | |
10 | */ | |
e685c689 SL |
11 | |
12 | #include <linux/errno.h> | |
13 | #include <linux/err.h> | |
e30492bb | 14 | #include <linux/ktime.h> |
e685c689 | 15 | #include <linux/kvm_host.h> |
e685c689 SL |
16 | #include <linux/vmalloc.h> |
17 | #include <linux/fs.h> | |
57c8a661 | 18 | #include <linux/memblock.h> |
e685c689 SL |
19 | #include <linux/random.h> |
20 | #include <asm/page.h> | |
21 | #include <asm/cacheflush.h> | |
f4956f62 | 22 | #include <asm/cacheops.h> |
e685c689 SL |
23 | #include <asm/cpu-info.h> |
24 | #include <asm/mmu_context.h> | |
25 | #include <asm/tlbflush.h> | |
26 | #include <asm/inst.h> | |
27 | ||
28 | #undef CONFIG_MIPS_MT | |
29 | #include <asm/r4kcache.h> | |
30 | #define CONFIG_MIPS_MT | |
31 | ||
d7d5b05f DCZ |
32 | #include "interrupt.h" |
33 | #include "commpage.h" | |
e685c689 SL |
34 | |
35 | #include "trace.h" | |
36 | ||
37 | /* | |
38 | * Compute the return address and do emulate branch simulation, if required. | |
39 | * This function should be called only in branch delay slot active. | |
40 | */ | |
122e51d4 JH |
41 | static int kvm_compute_return_epc(struct kvm_vcpu *vcpu, unsigned long instpc, |
42 | unsigned long *out) | |
e685c689 SL |
43 | { |
44 | unsigned int dspcontrol; | |
45 | union mips_instruction insn; | |
46 | struct kvm_vcpu_arch *arch = &vcpu->arch; | |
47 | long epc = instpc; | |
122e51d4 JH |
48 | long nextpc; |
49 | int err; | |
e685c689 | 50 | |
122e51d4 JH |
51 | if (epc & 3) { |
52 | kvm_err("%s: unaligned epc\n", __func__); | |
53 | return -EINVAL; | |
54 | } | |
e685c689 | 55 | |
d116e812 | 56 | /* Read the instruction */ |
6a97c775 | 57 | err = kvm_get_badinstrp((u32 *)epc, vcpu, &insn.word); |
122e51d4 JH |
58 | if (err) |
59 | return err; | |
e685c689 SL |
60 | |
61 | switch (insn.i_format.opcode) { | |
d116e812 | 62 | /* jr and jalr are in r_format format. */ |
e685c689 SL |
63 | case spec_op: |
64 | switch (insn.r_format.func) { | |
65 | case jalr_op: | |
66 | arch->gprs[insn.r_format.rd] = epc + 8; | |
67 | /* Fall through */ | |
68 | case jr_op: | |
69 | nextpc = arch->gprs[insn.r_format.rs]; | |
70 | break; | |
122e51d4 JH |
71 | default: |
72 | return -EINVAL; | |
e685c689 SL |
73 | } |
74 | break; | |
75 | ||
76 | /* | |
77 | * This group contains: | |
78 | * bltz_op, bgez_op, bltzl_op, bgezl_op, | |
79 | * bltzal_op, bgezal_op, bltzall_op, bgezall_op. | |
80 | */ | |
81 | case bcond_op: | |
82 | switch (insn.i_format.rt) { | |
83 | case bltz_op: | |
84 | case bltzl_op: | |
85 | if ((long)arch->gprs[insn.i_format.rs] < 0) | |
86 | epc = epc + 4 + (insn.i_format.simmediate << 2); | |
87 | else | |
88 | epc += 8; | |
89 | nextpc = epc; | |
90 | break; | |
91 | ||
92 | case bgez_op: | |
93 | case bgezl_op: | |
94 | if ((long)arch->gprs[insn.i_format.rs] >= 0) | |
95 | epc = epc + 4 + (insn.i_format.simmediate << 2); | |
96 | else | |
97 | epc += 8; | |
98 | nextpc = epc; | |
99 | break; | |
100 | ||
101 | case bltzal_op: | |
102 | case bltzall_op: | |
103 | arch->gprs[31] = epc + 8; | |
104 | if ((long)arch->gprs[insn.i_format.rs] < 0) | |
105 | epc = epc + 4 + (insn.i_format.simmediate << 2); | |
106 | else | |
107 | epc += 8; | |
108 | nextpc = epc; | |
109 | break; | |
110 | ||
111 | case bgezal_op: | |
112 | case bgezall_op: | |
113 | arch->gprs[31] = epc + 8; | |
114 | if ((long)arch->gprs[insn.i_format.rs] >= 0) | |
115 | epc = epc + 4 + (insn.i_format.simmediate << 2); | |
116 | else | |
117 | epc += 8; | |
118 | nextpc = epc; | |
119 | break; | |
120 | case bposge32_op: | |
122e51d4 JH |
121 | if (!cpu_has_dsp) { |
122 | kvm_err("%s: DSP branch but not DSP ASE\n", | |
123 | __func__); | |
124 | return -EINVAL; | |
125 | } | |
e685c689 SL |
126 | |
127 | dspcontrol = rddsp(0x01); | |
128 | ||
d116e812 | 129 | if (dspcontrol >= 32) |
e685c689 | 130 | epc = epc + 4 + (insn.i_format.simmediate << 2); |
d116e812 | 131 | else |
e685c689 SL |
132 | epc += 8; |
133 | nextpc = epc; | |
134 | break; | |
122e51d4 JH |
135 | default: |
136 | return -EINVAL; | |
e685c689 SL |
137 | } |
138 | break; | |
139 | ||
d116e812 | 140 | /* These are unconditional and in j_format. */ |
e685c689 SL |
141 | case jal_op: |
142 | arch->gprs[31] = instpc + 8; | |
143 | case j_op: | |
144 | epc += 4; | |
145 | epc >>= 28; | |
146 | epc <<= 28; | |
147 | epc |= (insn.j_format.target << 2); | |
148 | nextpc = epc; | |
149 | break; | |
150 | ||
d116e812 | 151 | /* These are conditional and in i_format. */ |
e685c689 SL |
152 | case beq_op: |
153 | case beql_op: | |
154 | if (arch->gprs[insn.i_format.rs] == | |
155 | arch->gprs[insn.i_format.rt]) | |
156 | epc = epc + 4 + (insn.i_format.simmediate << 2); | |
157 | else | |
158 | epc += 8; | |
159 | nextpc = epc; | |
160 | break; | |
161 | ||
162 | case bne_op: | |
163 | case bnel_op: | |
164 | if (arch->gprs[insn.i_format.rs] != | |
165 | arch->gprs[insn.i_format.rt]) | |
166 | epc = epc + 4 + (insn.i_format.simmediate << 2); | |
167 | else | |
168 | epc += 8; | |
169 | nextpc = epc; | |
170 | break; | |
171 | ||
2e0badfa JH |
172 | case blez_op: /* POP06 */ |
173 | #ifndef CONFIG_CPU_MIPSR6 | |
174 | case blezl_op: /* removed in R6 */ | |
175 | #endif | |
176 | if (insn.i_format.rt != 0) | |
177 | goto compact_branch; | |
e685c689 SL |
178 | if ((long)arch->gprs[insn.i_format.rs] <= 0) |
179 | epc = epc + 4 + (insn.i_format.simmediate << 2); | |
180 | else | |
181 | epc += 8; | |
182 | nextpc = epc; | |
183 | break; | |
184 | ||
2e0badfa JH |
185 | case bgtz_op: /* POP07 */ |
186 | #ifndef CONFIG_CPU_MIPSR6 | |
187 | case bgtzl_op: /* removed in R6 */ | |
188 | #endif | |
189 | if (insn.i_format.rt != 0) | |
190 | goto compact_branch; | |
e685c689 SL |
191 | if ((long)arch->gprs[insn.i_format.rs] > 0) |
192 | epc = epc + 4 + (insn.i_format.simmediate << 2); | |
193 | else | |
194 | epc += 8; | |
195 | nextpc = epc; | |
196 | break; | |
197 | ||
d116e812 | 198 | /* And now the FPA/cp1 branch instructions. */ |
e685c689 | 199 | case cop1_op: |
6ad78a5c | 200 | kvm_err("%s: unsupported cop1_op\n", __func__); |
122e51d4 | 201 | return -EINVAL; |
2e0badfa JH |
202 | |
203 | #ifdef CONFIG_CPU_MIPSR6 | |
204 | /* R6 added the following compact branches with forbidden slots */ | |
205 | case blezl_op: /* POP26 */ | |
206 | case bgtzl_op: /* POP27 */ | |
207 | /* only rt == 0 isn't compact branch */ | |
208 | if (insn.i_format.rt != 0) | |
209 | goto compact_branch; | |
122e51d4 | 210 | return -EINVAL; |
2e0badfa JH |
211 | case pop10_op: |
212 | case pop30_op: | |
213 | /* only rs == rt == 0 is reserved, rest are compact branches */ | |
214 | if (insn.i_format.rs != 0 || insn.i_format.rt != 0) | |
215 | goto compact_branch; | |
122e51d4 | 216 | return -EINVAL; |
2e0badfa JH |
217 | case pop66_op: |
218 | case pop76_op: | |
219 | /* only rs == 0 isn't compact branch */ | |
220 | if (insn.i_format.rs != 0) | |
221 | goto compact_branch; | |
122e51d4 | 222 | return -EINVAL; |
2e0badfa JH |
223 | compact_branch: |
224 | /* | |
225 | * If we've hit an exception on the forbidden slot, then | |
226 | * the branch must not have been taken. | |
227 | */ | |
228 | epc += 8; | |
229 | nextpc = epc; | |
230 | break; | |
231 | #else | |
232 | compact_branch: | |
122e51d4 | 233 | /* Fall through - Compact branches not supported before R6 */ |
2e0badfa | 234 | #endif |
122e51d4 JH |
235 | default: |
236 | return -EINVAL; | |
e685c689 SL |
237 | } |
238 | ||
122e51d4 JH |
239 | *out = nextpc; |
240 | return 0; | |
e685c689 SL |
241 | } |
242 | ||
bdb7ed86 | 243 | enum emulation_result update_pc(struct kvm_vcpu *vcpu, u32 cause) |
e685c689 | 244 | { |
122e51d4 | 245 | int err; |
e685c689 SL |
246 | |
247 | if (cause & CAUSEF_BD) { | |
122e51d4 JH |
248 | err = kvm_compute_return_epc(vcpu, vcpu->arch.pc, |
249 | &vcpu->arch.pc); | |
250 | if (err) | |
251 | return EMULATE_FAIL; | |
252 | } else { | |
e685c689 | 253 | vcpu->arch.pc += 4; |
122e51d4 | 254 | } |
e685c689 SL |
255 | |
256 | kvm_debug("update_pc(): New PC: %#lx\n", vcpu->arch.pc); | |
257 | ||
122e51d4 | 258 | return EMULATE_DONE; |
e685c689 SL |
259 | } |
260 | ||
6a97c775 JH |
261 | /** |
262 | * kvm_get_badinstr() - Get bad instruction encoding. | |
263 | * @opc: Guest pointer to faulting instruction. | |
264 | * @vcpu: KVM VCPU information. | |
265 | * | |
266 | * Gets the instruction encoding of the faulting instruction, using the saved | |
267 | * BadInstr register value if it exists, otherwise falling back to reading guest | |
268 | * memory at @opc. | |
269 | * | |
270 | * Returns: The instruction encoding of the faulting instruction. | |
271 | */ | |
272 | int kvm_get_badinstr(u32 *opc, struct kvm_vcpu *vcpu, u32 *out) | |
273 | { | |
274 | if (cpu_has_badinstr) { | |
275 | *out = vcpu->arch.host_cp0_badinstr; | |
276 | return 0; | |
277 | } else { | |
278 | return kvm_get_inst(opc, vcpu, out); | |
279 | } | |
280 | } | |
281 | ||
282 | /** | |
283 | * kvm_get_badinstrp() - Get bad prior instruction encoding. | |
284 | * @opc: Guest pointer to prior faulting instruction. | |
285 | * @vcpu: KVM VCPU information. | |
286 | * | |
287 | * Gets the instruction encoding of the prior faulting instruction (the branch | |
288 | * containing the delay slot which faulted), using the saved BadInstrP register | |
289 | * value if it exists, otherwise falling back to reading guest memory at @opc. | |
290 | * | |
291 | * Returns: The instruction encoding of the prior faulting instruction. | |
292 | */ | |
293 | int kvm_get_badinstrp(u32 *opc, struct kvm_vcpu *vcpu, u32 *out) | |
294 | { | |
295 | if (cpu_has_badinstrp) { | |
296 | *out = vcpu->arch.host_cp0_badinstrp; | |
297 | return 0; | |
298 | } else { | |
299 | return kvm_get_inst(opc, vcpu, out); | |
300 | } | |
301 | } | |
302 | ||
e30492bb JH |
303 | /** |
304 | * kvm_mips_count_disabled() - Find whether the CP0_Count timer is disabled. | |
305 | * @vcpu: Virtual CPU. | |
e685c689 | 306 | * |
f8239342 JH |
307 | * Returns: 1 if the CP0_Count timer is disabled by either the guest |
308 | * CP0_Cause.DC bit or the count_ctl.DC bit. | |
e30492bb | 309 | * 0 otherwise (in which case CP0_Count timer is running). |
e685c689 | 310 | */ |
f4474d50 | 311 | int kvm_mips_count_disabled(struct kvm_vcpu *vcpu) |
e685c689 SL |
312 | { |
313 | struct mips_coproc *cop0 = vcpu->arch.cop0; | |
d116e812 | 314 | |
f8239342 JH |
315 | return (vcpu->arch.count_ctl & KVM_REG_MIPS_COUNT_CTL_DC) || |
316 | (kvm_read_c0_guest_cause(cop0) & CAUSEF_DC); | |
e30492bb | 317 | } |
e685c689 | 318 | |
e30492bb JH |
319 | /** |
320 | * kvm_mips_ktime_to_count() - Scale ktime_t to a 32-bit count. | |
321 | * | |
322 | * Caches the dynamic nanosecond bias in vcpu->arch.count_dyn_bias. | |
323 | * | |
324 | * Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is running). | |
325 | */ | |
bdb7ed86 | 326 | static u32 kvm_mips_ktime_to_count(struct kvm_vcpu *vcpu, ktime_t now) |
e30492bb JH |
327 | { |
328 | s64 now_ns, periods; | |
329 | u64 delta; | |
330 | ||
331 | now_ns = ktime_to_ns(now); | |
332 | delta = now_ns + vcpu->arch.count_dyn_bias; | |
333 | ||
334 | if (delta >= vcpu->arch.count_period) { | |
335 | /* If delta is out of safe range the bias needs adjusting */ | |
336 | periods = div64_s64(now_ns, vcpu->arch.count_period); | |
337 | vcpu->arch.count_dyn_bias = -periods * vcpu->arch.count_period; | |
338 | /* Recalculate delta with new bias */ | |
339 | delta = now_ns + vcpu->arch.count_dyn_bias; | |
e685c689 SL |
340 | } |
341 | ||
e30492bb JH |
342 | /* |
343 | * We've ensured that: | |
344 | * delta < count_period | |
345 | * | |
346 | * Therefore the intermediate delta*count_hz will never overflow since | |
347 | * at the boundary condition: | |
348 | * delta = count_period | |
349 | * delta = NSEC_PER_SEC * 2^32 / count_hz | |
350 | * delta * count_hz = NSEC_PER_SEC * 2^32 | |
351 | */ | |
352 | return div_u64(delta * vcpu->arch.count_hz, NSEC_PER_SEC); | |
353 | } | |
354 | ||
f8239342 JH |
355 | /** |
356 | * kvm_mips_count_time() - Get effective current time. | |
357 | * @vcpu: Virtual CPU. | |
358 | * | |
359 | * Get effective monotonic ktime. This is usually a straightforward ktime_get(), | |
360 | * except when the master disable bit is set in count_ctl, in which case it is | |
361 | * count_resume, i.e. the time that the count was disabled. | |
362 | * | |
363 | * Returns: Effective monotonic ktime for CP0_Count. | |
364 | */ | |
365 | static inline ktime_t kvm_mips_count_time(struct kvm_vcpu *vcpu) | |
366 | { | |
367 | if (unlikely(vcpu->arch.count_ctl & KVM_REG_MIPS_COUNT_CTL_DC)) | |
368 | return vcpu->arch.count_resume; | |
369 | ||
370 | return ktime_get(); | |
371 | } | |
372 | ||
e30492bb JH |
373 | /** |
374 | * kvm_mips_read_count_running() - Read the current count value as if running. | |
375 | * @vcpu: Virtual CPU. | |
376 | * @now: Kernel time to read CP0_Count at. | |
377 | * | |
378 | * Returns the current guest CP0_Count register at time @now and handles if the | |
379 | * timer interrupt is pending and hasn't been handled yet. | |
380 | * | |
381 | * Returns: The current value of the guest CP0_Count register. | |
382 | */ | |
bdb7ed86 | 383 | static u32 kvm_mips_read_count_running(struct kvm_vcpu *vcpu, ktime_t now) |
e30492bb | 384 | { |
4355c44f JH |
385 | struct mips_coproc *cop0 = vcpu->arch.cop0; |
386 | ktime_t expires, threshold; | |
8cffd197 | 387 | u32 count, compare; |
e30492bb JH |
388 | int running; |
389 | ||
4355c44f JH |
390 | /* Calculate the biased and scaled guest CP0_Count */ |
391 | count = vcpu->arch.count_bias + kvm_mips_ktime_to_count(vcpu, now); | |
392 | compare = kvm_read_c0_guest_compare(cop0); | |
393 | ||
394 | /* | |
395 | * Find whether CP0_Count has reached the closest timer interrupt. If | |
396 | * not, we shouldn't inject it. | |
397 | */ | |
8cffd197 | 398 | if ((s32)(count - compare) < 0) |
4355c44f JH |
399 | return count; |
400 | ||
401 | /* | |
402 | * The CP0_Count we're going to return has already reached the closest | |
403 | * timer interrupt. Quickly check if it really is a new interrupt by | |
404 | * looking at whether the interval until the hrtimer expiry time is | |
405 | * less than 1/4 of the timer period. | |
406 | */ | |
e30492bb | 407 | expires = hrtimer_get_expires(&vcpu->arch.comparecount_timer); |
4355c44f JH |
408 | threshold = ktime_add_ns(now, vcpu->arch.count_period / 4); |
409 | if (ktime_before(expires, threshold)) { | |
e30492bb JH |
410 | /* |
411 | * Cancel it while we handle it so there's no chance of | |
412 | * interference with the timeout handler. | |
413 | */ | |
414 | running = hrtimer_cancel(&vcpu->arch.comparecount_timer); | |
415 | ||
416 | /* Nothing should be waiting on the timeout */ | |
417 | kvm_mips_callbacks->queue_timer_int(vcpu); | |
418 | ||
419 | /* | |
420 | * Restart the timer if it was running based on the expiry time | |
421 | * we read, so that we don't push it back 2 periods. | |
422 | */ | |
423 | if (running) { | |
424 | expires = ktime_add_ns(expires, | |
425 | vcpu->arch.count_period); | |
426 | hrtimer_start(&vcpu->arch.comparecount_timer, expires, | |
427 | HRTIMER_MODE_ABS); | |
428 | } | |
429 | } | |
430 | ||
4355c44f | 431 | return count; |
e30492bb JH |
432 | } |
433 | ||
434 | /** | |
435 | * kvm_mips_read_count() - Read the current count value. | |
436 | * @vcpu: Virtual CPU. | |
437 | * | |
438 | * Read the current guest CP0_Count value, taking into account whether the timer | |
439 | * is stopped. | |
440 | * | |
441 | * Returns: The current guest CP0_Count value. | |
442 | */ | |
bdb7ed86 | 443 | u32 kvm_mips_read_count(struct kvm_vcpu *vcpu) |
e30492bb JH |
444 | { |
445 | struct mips_coproc *cop0 = vcpu->arch.cop0; | |
446 | ||
447 | /* If count disabled just read static copy of count */ | |
448 | if (kvm_mips_count_disabled(vcpu)) | |
449 | return kvm_read_c0_guest_count(cop0); | |
450 | ||
451 | return kvm_mips_read_count_running(vcpu, ktime_get()); | |
452 | } | |
453 | ||
454 | /** | |
455 | * kvm_mips_freeze_hrtimer() - Safely stop the hrtimer. | |
456 | * @vcpu: Virtual CPU. | |
457 | * @count: Output pointer for CP0_Count value at point of freeze. | |
458 | * | |
459 | * Freeze the hrtimer safely and return both the ktime and the CP0_Count value | |
460 | * at the point it was frozen. It is guaranteed that any pending interrupts at | |
461 | * the point it was frozen are handled, and none after that point. | |
462 | * | |
463 | * This is useful where the time/CP0_Count is needed in the calculation of the | |
464 | * new parameters. | |
465 | * | |
466 | * Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is running). | |
467 | * | |
468 | * Returns: The ktime at the point of freeze. | |
469 | */ | |
f4474d50 | 470 | ktime_t kvm_mips_freeze_hrtimer(struct kvm_vcpu *vcpu, u32 *count) |
e30492bb JH |
471 | { |
472 | ktime_t now; | |
473 | ||
474 | /* stop hrtimer before finding time */ | |
475 | hrtimer_cancel(&vcpu->arch.comparecount_timer); | |
476 | now = ktime_get(); | |
477 | ||
478 | /* find count at this point and handle pending hrtimer */ | |
479 | *count = kvm_mips_read_count_running(vcpu, now); | |
480 | ||
481 | return now; | |
482 | } | |
483 | ||
e30492bb JH |
484 | /** |
485 | * kvm_mips_resume_hrtimer() - Resume hrtimer, updating expiry. | |
486 | * @vcpu: Virtual CPU. | |
487 | * @now: ktime at point of resume. | |
488 | * @count: CP0_Count at point of resume. | |
489 | * | |
490 | * Resumes the timer and updates the timer expiry based on @now and @count. | |
491 | * This can be used in conjunction with kvm_mips_freeze_timer() when timer | |
492 | * parameters need to be changed. | |
493 | * | |
494 | * It is guaranteed that a timer interrupt immediately after resume will be | |
495 | * handled, but not if CP_Compare is exactly at @count. That case is already | |
496 | * handled by kvm_mips_freeze_timer(). | |
497 | * | |
498 | * Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is running). | |
499 | */ | |
500 | static void kvm_mips_resume_hrtimer(struct kvm_vcpu *vcpu, | |
bdb7ed86 | 501 | ktime_t now, u32 count) |
e30492bb JH |
502 | { |
503 | struct mips_coproc *cop0 = vcpu->arch.cop0; | |
8cffd197 | 504 | u32 compare; |
e30492bb JH |
505 | u64 delta; |
506 | ktime_t expire; | |
507 | ||
508 | /* Calculate timeout (wrap 0 to 2^32) */ | |
509 | compare = kvm_read_c0_guest_compare(cop0); | |
8cffd197 | 510 | delta = (u64)(u32)(compare - count - 1) + 1; |
e30492bb JH |
511 | delta = div_u64(delta * NSEC_PER_SEC, vcpu->arch.count_hz); |
512 | expire = ktime_add_ns(now, delta); | |
513 | ||
514 | /* Update hrtimer to use new timeout */ | |
515 | hrtimer_cancel(&vcpu->arch.comparecount_timer); | |
516 | hrtimer_start(&vcpu->arch.comparecount_timer, expire, HRTIMER_MODE_ABS); | |
517 | } | |
518 | ||
f4474d50 JH |
519 | /** |
520 | * kvm_mips_restore_hrtimer() - Restore hrtimer after a gap, updating expiry. | |
521 | * @vcpu: Virtual CPU. | |
522 | * @before: Time before Count was saved, lower bound of drift calculation. | |
523 | * @count: CP0_Count at point of restore. | |
524 | * @min_drift: Minimum amount of drift permitted before correction. | |
525 | * Must be <= 0. | |
526 | * | |
527 | * Restores the timer from a particular @count, accounting for drift. This can | |
528 | * be used in conjunction with kvm_mips_freeze_timer() when a hardware timer is | |
529 | * to be used for a period of time, but the exact ktime corresponding to the | |
530 | * final Count that must be restored is not known. | |
531 | * | |
532 | * It is gauranteed that a timer interrupt immediately after restore will be | |
533 | * handled, but not if CP0_Compare is exactly at @count. That case should | |
534 | * already be handled when the hardware timer state is saved. | |
535 | * | |
536 | * Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is not | |
537 | * stopped). | |
538 | * | |
539 | * Returns: Amount of correction to count_bias due to drift. | |
540 | */ | |
541 | int kvm_mips_restore_hrtimer(struct kvm_vcpu *vcpu, ktime_t before, | |
542 | u32 count, int min_drift) | |
543 | { | |
544 | ktime_t now, count_time; | |
545 | u32 now_count, before_count; | |
546 | u64 delta; | |
547 | int drift, ret = 0; | |
548 | ||
549 | /* Calculate expected count at before */ | |
550 | before_count = vcpu->arch.count_bias + | |
551 | kvm_mips_ktime_to_count(vcpu, before); | |
552 | ||
553 | /* | |
554 | * Detect significantly negative drift, where count is lower than | |
555 | * expected. Some negative drift is expected when hardware counter is | |
556 | * set after kvm_mips_freeze_timer(), and it is harmless to allow the | |
557 | * time to jump forwards a little, within reason. If the drift is too | |
558 | * significant, adjust the bias to avoid a big Guest.CP0_Count jump. | |
559 | */ | |
560 | drift = count - before_count; | |
561 | if (drift < min_drift) { | |
562 | count_time = before; | |
563 | vcpu->arch.count_bias += drift; | |
564 | ret = drift; | |
565 | goto resume; | |
566 | } | |
567 | ||
568 | /* Calculate expected count right now */ | |
569 | now = ktime_get(); | |
570 | now_count = vcpu->arch.count_bias + kvm_mips_ktime_to_count(vcpu, now); | |
571 | ||
572 | /* | |
573 | * Detect positive drift, where count is higher than expected, and | |
574 | * adjust the bias to avoid guest time going backwards. | |
575 | */ | |
576 | drift = count - now_count; | |
577 | if (drift > 0) { | |
578 | count_time = now; | |
579 | vcpu->arch.count_bias += drift; | |
580 | ret = drift; | |
581 | goto resume; | |
582 | } | |
583 | ||
584 | /* Subtract nanosecond delta to find ktime when count was read */ | |
585 | delta = (u64)(u32)(now_count - count); | |
586 | delta = div_u64(delta * NSEC_PER_SEC, vcpu->arch.count_hz); | |
587 | count_time = ktime_sub_ns(now, delta); | |
588 | ||
589 | resume: | |
590 | /* Resume using the calculated ktime */ | |
591 | kvm_mips_resume_hrtimer(vcpu, count_time, count); | |
592 | return ret; | |
593 | } | |
594 | ||
e30492bb JH |
595 | /** |
596 | * kvm_mips_write_count() - Modify the count and update timer. | |
597 | * @vcpu: Virtual CPU. | |
598 | * @count: Guest CP0_Count value to set. | |
599 | * | |
600 | * Sets the CP0_Count value and updates the timer accordingly. | |
601 | */ | |
bdb7ed86 | 602 | void kvm_mips_write_count(struct kvm_vcpu *vcpu, u32 count) |
e30492bb JH |
603 | { |
604 | struct mips_coproc *cop0 = vcpu->arch.cop0; | |
605 | ktime_t now; | |
606 | ||
607 | /* Calculate bias */ | |
f8239342 | 608 | now = kvm_mips_count_time(vcpu); |
e30492bb JH |
609 | vcpu->arch.count_bias = count - kvm_mips_ktime_to_count(vcpu, now); |
610 | ||
611 | if (kvm_mips_count_disabled(vcpu)) | |
612 | /* The timer's disabled, adjust the static count */ | |
613 | kvm_write_c0_guest_count(cop0, count); | |
614 | else | |
615 | /* Update timeout */ | |
616 | kvm_mips_resume_hrtimer(vcpu, now, count); | |
617 | } | |
618 | ||
619 | /** | |
620 | * kvm_mips_init_count() - Initialise timer. | |
621 | * @vcpu: Virtual CPU. | |
a517c1ad | 622 | * @count_hz: Frequency of timer. |
e30492bb | 623 | * |
a517c1ad JH |
624 | * Initialise the timer to the specified frequency, zero it, and set it going if |
625 | * it's enabled. | |
e30492bb | 626 | */ |
a517c1ad | 627 | void kvm_mips_init_count(struct kvm_vcpu *vcpu, unsigned long count_hz) |
e30492bb | 628 | { |
a517c1ad JH |
629 | vcpu->arch.count_hz = count_hz; |
630 | vcpu->arch.count_period = div_u64((u64)NSEC_PER_SEC << 32, count_hz); | |
e30492bb JH |
631 | vcpu->arch.count_dyn_bias = 0; |
632 | ||
633 | /* Starting at 0 */ | |
634 | kvm_mips_write_count(vcpu, 0); | |
635 | } | |
636 | ||
f74a8e22 JH |
637 | /** |
638 | * kvm_mips_set_count_hz() - Update the frequency of the timer. | |
639 | * @vcpu: Virtual CPU. | |
640 | * @count_hz: Frequency of CP0_Count timer in Hz. | |
641 | * | |
642 | * Change the frequency of the CP0_Count timer. This is done atomically so that | |
643 | * CP0_Count is continuous and no timer interrupt is lost. | |
644 | * | |
645 | * Returns: -EINVAL if @count_hz is out of range. | |
646 | * 0 on success. | |
647 | */ | |
648 | int kvm_mips_set_count_hz(struct kvm_vcpu *vcpu, s64 count_hz) | |
649 | { | |
650 | struct mips_coproc *cop0 = vcpu->arch.cop0; | |
651 | int dc; | |
652 | ktime_t now; | |
653 | u32 count; | |
654 | ||
655 | /* ensure the frequency is in a sensible range... */ | |
656 | if (count_hz <= 0 || count_hz > NSEC_PER_SEC) | |
657 | return -EINVAL; | |
658 | /* ... and has actually changed */ | |
659 | if (vcpu->arch.count_hz == count_hz) | |
660 | return 0; | |
661 | ||
662 | /* Safely freeze timer so we can keep it continuous */ | |
663 | dc = kvm_mips_count_disabled(vcpu); | |
664 | if (dc) { | |
665 | now = kvm_mips_count_time(vcpu); | |
666 | count = kvm_read_c0_guest_count(cop0); | |
667 | } else { | |
668 | now = kvm_mips_freeze_hrtimer(vcpu, &count); | |
669 | } | |
670 | ||
671 | /* Update the frequency */ | |
672 | vcpu->arch.count_hz = count_hz; | |
673 | vcpu->arch.count_period = div_u64((u64)NSEC_PER_SEC << 32, count_hz); | |
674 | vcpu->arch.count_dyn_bias = 0; | |
675 | ||
676 | /* Calculate adjusted bias so dynamic count is unchanged */ | |
677 | vcpu->arch.count_bias = count - kvm_mips_ktime_to_count(vcpu, now); | |
678 | ||
679 | /* Update and resume hrtimer */ | |
680 | if (!dc) | |
681 | kvm_mips_resume_hrtimer(vcpu, now, count); | |
682 | return 0; | |
683 | } | |
684 | ||
e30492bb JH |
685 | /** |
686 | * kvm_mips_write_compare() - Modify compare and update timer. | |
687 | * @vcpu: Virtual CPU. | |
688 | * @compare: New CP0_Compare value. | |
b45bacd2 | 689 | * @ack: Whether to acknowledge timer interrupt. |
e30492bb JH |
690 | * |
691 | * Update CP0_Compare to a new value and update the timeout. | |
b45bacd2 JH |
692 | * If @ack, atomically acknowledge any pending timer interrupt, otherwise ensure |
693 | * any pending timer interrupt is preserved. | |
e30492bb | 694 | */ |
bdb7ed86 | 695 | void kvm_mips_write_compare(struct kvm_vcpu *vcpu, u32 compare, bool ack) |
e30492bb JH |
696 | { |
697 | struct mips_coproc *cop0 = vcpu->arch.cop0; | |
b45bacd2 JH |
698 | int dc; |
699 | u32 old_compare = kvm_read_c0_guest_compare(cop0); | |
5dee99b2 JH |
700 | s32 delta = compare - old_compare; |
701 | u32 cause; | |
702 | ktime_t now = ktime_set(0, 0); /* silence bogus GCC warning */ | |
8cffd197 | 703 | u32 count; |
e30492bb JH |
704 | |
705 | /* if unchanged, must just be an ack */ | |
b45bacd2 JH |
706 | if (old_compare == compare) { |
707 | if (!ack) | |
708 | return; | |
709 | kvm_mips_callbacks->dequeue_timer_int(vcpu); | |
710 | kvm_write_c0_guest_compare(cop0, compare); | |
e30492bb | 711 | return; |
b45bacd2 JH |
712 | } |
713 | ||
5dee99b2 JH |
714 | /* |
715 | * If guest CP0_Compare moves forward, CP0_GTOffset should be adjusted | |
716 | * too to prevent guest CP0_Count hitting guest CP0_Compare. | |
717 | * | |
718 | * The new GTOffset corresponds to the new value of CP0_Compare, and is | |
719 | * set prior to it being written into the guest context. We disable | |
720 | * preemption until the new value is written to prevent restore of a | |
721 | * GTOffset corresponding to the old CP0_Compare value. | |
722 | */ | |
723 | if (IS_ENABLED(CONFIG_KVM_MIPS_VZ) && delta > 0) { | |
724 | preempt_disable(); | |
725 | write_c0_gtoffset(compare - read_c0_count()); | |
726 | back_to_back_c0_hazard(); | |
727 | } | |
728 | ||
b45bacd2 JH |
729 | /* freeze_hrtimer() takes care of timer interrupts <= count */ |
730 | dc = kvm_mips_count_disabled(vcpu); | |
731 | if (!dc) | |
732 | now = kvm_mips_freeze_hrtimer(vcpu, &count); | |
733 | ||
734 | if (ack) | |
735 | kvm_mips_callbacks->dequeue_timer_int(vcpu); | |
5dee99b2 JH |
736 | else if (IS_ENABLED(CONFIG_KVM_MIPS_VZ)) |
737 | /* | |
738 | * With VZ, writing CP0_Compare acks (clears) CP0_Cause.TI, so | |
739 | * preserve guest CP0_Cause.TI if we don't want to ack it. | |
740 | */ | |
741 | cause = kvm_read_c0_guest_cause(cop0); | |
e30492bb | 742 | |
e30492bb JH |
743 | kvm_write_c0_guest_compare(cop0, compare); |
744 | ||
5dee99b2 JH |
745 | if (IS_ENABLED(CONFIG_KVM_MIPS_VZ)) { |
746 | if (delta > 0) | |
747 | preempt_enable(); | |
748 | ||
749 | back_to_back_c0_hazard(); | |
750 | ||
751 | if (!ack && cause & CAUSEF_TI) | |
752 | kvm_write_c0_guest_cause(cop0, cause); | |
753 | } | |
754 | ||
b45bacd2 JH |
755 | /* resume_hrtimer() takes care of timer interrupts > count */ |
756 | if (!dc) | |
757 | kvm_mips_resume_hrtimer(vcpu, now, count); | |
5dee99b2 JH |
758 | |
759 | /* | |
760 | * If guest CP0_Compare is moving backward, we delay CP0_GTOffset change | |
761 | * until after the new CP0_Compare is written, otherwise new guest | |
762 | * CP0_Count could hit new guest CP0_Compare. | |
763 | */ | |
764 | if (IS_ENABLED(CONFIG_KVM_MIPS_VZ) && delta <= 0) | |
765 | write_c0_gtoffset(compare - read_c0_count()); | |
e30492bb JH |
766 | } |
767 | ||
768 | /** | |
769 | * kvm_mips_count_disable() - Disable count. | |
770 | * @vcpu: Virtual CPU. | |
771 | * | |
772 | * Disable the CP0_Count timer. A timer interrupt on or before the final stop | |
773 | * time will be handled but not after. | |
774 | * | |
f8239342 JH |
775 | * Assumes CP0_Count was previously enabled but now Guest.CP0_Cause.DC or |
776 | * count_ctl.DC has been set (count disabled). | |
e30492bb JH |
777 | * |
778 | * Returns: The time that the timer was stopped. | |
779 | */ | |
780 | static ktime_t kvm_mips_count_disable(struct kvm_vcpu *vcpu) | |
781 | { | |
782 | struct mips_coproc *cop0 = vcpu->arch.cop0; | |
8cffd197 | 783 | u32 count; |
e30492bb JH |
784 | ktime_t now; |
785 | ||
786 | /* Stop hrtimer */ | |
787 | hrtimer_cancel(&vcpu->arch.comparecount_timer); | |
788 | ||
789 | /* Set the static count from the dynamic count, handling pending TI */ | |
790 | now = ktime_get(); | |
791 | count = kvm_mips_read_count_running(vcpu, now); | |
792 | kvm_write_c0_guest_count(cop0, count); | |
793 | ||
794 | return now; | |
795 | } | |
796 | ||
797 | /** | |
798 | * kvm_mips_count_disable_cause() - Disable count using CP0_Cause.DC. | |
799 | * @vcpu: Virtual CPU. | |
800 | * | |
801 | * Disable the CP0_Count timer and set CP0_Cause.DC. A timer interrupt on or | |
f8239342 JH |
802 | * before the final stop time will be handled if the timer isn't disabled by |
803 | * count_ctl.DC, but not after. | |
e30492bb JH |
804 | * |
805 | * Assumes CP0_Cause.DC is clear (count enabled). | |
806 | */ | |
807 | void kvm_mips_count_disable_cause(struct kvm_vcpu *vcpu) | |
808 | { | |
809 | struct mips_coproc *cop0 = vcpu->arch.cop0; | |
810 | ||
811 | kvm_set_c0_guest_cause(cop0, CAUSEF_DC); | |
f8239342 JH |
812 | if (!(vcpu->arch.count_ctl & KVM_REG_MIPS_COUNT_CTL_DC)) |
813 | kvm_mips_count_disable(vcpu); | |
e30492bb JH |
814 | } |
815 | ||
816 | /** | |
817 | * kvm_mips_count_enable_cause() - Enable count using CP0_Cause.DC. | |
818 | * @vcpu: Virtual CPU. | |
819 | * | |
820 | * Enable the CP0_Count timer and clear CP0_Cause.DC. A timer interrupt after | |
f8239342 JH |
821 | * the start time will be handled if the timer isn't disabled by count_ctl.DC, |
822 | * potentially before even returning, so the caller should be careful with | |
823 | * ordering of CP0_Cause modifications so as not to lose it. | |
e30492bb JH |
824 | * |
825 | * Assumes CP0_Cause.DC is set (count disabled). | |
826 | */ | |
827 | void kvm_mips_count_enable_cause(struct kvm_vcpu *vcpu) | |
828 | { | |
829 | struct mips_coproc *cop0 = vcpu->arch.cop0; | |
8cffd197 | 830 | u32 count; |
e30492bb JH |
831 | |
832 | kvm_clear_c0_guest_cause(cop0, CAUSEF_DC); | |
833 | ||
834 | /* | |
835 | * Set the dynamic count to match the static count. | |
f8239342 JH |
836 | * This starts the hrtimer if count_ctl.DC allows it. |
837 | * Otherwise it conveniently updates the biases. | |
e30492bb JH |
838 | */ |
839 | count = kvm_read_c0_guest_count(cop0); | |
840 | kvm_mips_write_count(vcpu, count); | |
841 | } | |
842 | ||
f8239342 JH |
843 | /** |
844 | * kvm_mips_set_count_ctl() - Update the count control KVM register. | |
845 | * @vcpu: Virtual CPU. | |
846 | * @count_ctl: Count control register new value. | |
847 | * | |
848 | * Set the count control KVM register. The timer is updated accordingly. | |
849 | * | |
850 | * Returns: -EINVAL if reserved bits are set. | |
851 | * 0 on success. | |
852 | */ | |
853 | int kvm_mips_set_count_ctl(struct kvm_vcpu *vcpu, s64 count_ctl) | |
854 | { | |
855 | struct mips_coproc *cop0 = vcpu->arch.cop0; | |
856 | s64 changed = count_ctl ^ vcpu->arch.count_ctl; | |
857 | s64 delta; | |
858 | ktime_t expire, now; | |
8cffd197 | 859 | u32 count, compare; |
f8239342 JH |
860 | |
861 | /* Only allow defined bits to be changed */ | |
862 | if (changed & ~(s64)(KVM_REG_MIPS_COUNT_CTL_DC)) | |
863 | return -EINVAL; | |
864 | ||
865 | /* Apply new value */ | |
866 | vcpu->arch.count_ctl = count_ctl; | |
867 | ||
868 | /* Master CP0_Count disable */ | |
869 | if (changed & KVM_REG_MIPS_COUNT_CTL_DC) { | |
870 | /* Is CP0_Cause.DC already disabling CP0_Count? */ | |
871 | if (kvm_read_c0_guest_cause(cop0) & CAUSEF_DC) { | |
872 | if (count_ctl & KVM_REG_MIPS_COUNT_CTL_DC) | |
873 | /* Just record the current time */ | |
874 | vcpu->arch.count_resume = ktime_get(); | |
875 | } else if (count_ctl & KVM_REG_MIPS_COUNT_CTL_DC) { | |
876 | /* disable timer and record current time */ | |
877 | vcpu->arch.count_resume = kvm_mips_count_disable(vcpu); | |
878 | } else { | |
879 | /* | |
880 | * Calculate timeout relative to static count at resume | |
881 | * time (wrap 0 to 2^32). | |
882 | */ | |
883 | count = kvm_read_c0_guest_count(cop0); | |
884 | compare = kvm_read_c0_guest_compare(cop0); | |
8cffd197 | 885 | delta = (u64)(u32)(compare - count - 1) + 1; |
f8239342 JH |
886 | delta = div_u64(delta * NSEC_PER_SEC, |
887 | vcpu->arch.count_hz); | |
888 | expire = ktime_add_ns(vcpu->arch.count_resume, delta); | |
889 | ||
890 | /* Handle pending interrupt */ | |
891 | now = ktime_get(); | |
892 | if (ktime_compare(now, expire) >= 0) | |
893 | /* Nothing should be waiting on the timeout */ | |
894 | kvm_mips_callbacks->queue_timer_int(vcpu); | |
895 | ||
896 | /* Resume hrtimer without changing bias */ | |
897 | count = kvm_mips_read_count_running(vcpu, now); | |
898 | kvm_mips_resume_hrtimer(vcpu, now, count); | |
899 | } | |
900 | } | |
901 | ||
902 | return 0; | |
903 | } | |
904 | ||
905 | /** | |
906 | * kvm_mips_set_count_resume() - Update the count resume KVM register. | |
907 | * @vcpu: Virtual CPU. | |
908 | * @count_resume: Count resume register new value. | |
909 | * | |
910 | * Set the count resume KVM register. | |
911 | * | |
912 | * Returns: -EINVAL if out of valid range (0..now). | |
913 | * 0 on success. | |
914 | */ | |
915 | int kvm_mips_set_count_resume(struct kvm_vcpu *vcpu, s64 count_resume) | |
916 | { | |
917 | /* | |
918 | * It doesn't make sense for the resume time to be in the future, as it | |
919 | * would be possible for the next interrupt to be more than a full | |
920 | * period in the future. | |
921 | */ | |
922 | if (count_resume < 0 || count_resume > ktime_to_ns(ktime_get())) | |
923 | return -EINVAL; | |
924 | ||
925 | vcpu->arch.count_resume = ns_to_ktime(count_resume); | |
926 | return 0; | |
927 | } | |
928 | ||
e30492bb JH |
929 | /** |
930 | * kvm_mips_count_timeout() - Push timer forward on timeout. | |
931 | * @vcpu: Virtual CPU. | |
932 | * | |
933 | * Handle an hrtimer event by push the hrtimer forward a period. | |
934 | * | |
935 | * Returns: The hrtimer_restart value to return to the hrtimer subsystem. | |
936 | */ | |
937 | enum hrtimer_restart kvm_mips_count_timeout(struct kvm_vcpu *vcpu) | |
938 | { | |
939 | /* Add the Count period to the current expiry time */ | |
940 | hrtimer_add_expires_ns(&vcpu->arch.comparecount_timer, | |
941 | vcpu->arch.count_period); | |
942 | return HRTIMER_RESTART; | |
e685c689 SL |
943 | } |
944 | ||
945 | enum emulation_result kvm_mips_emul_eret(struct kvm_vcpu *vcpu) | |
946 | { | |
947 | struct mips_coproc *cop0 = vcpu->arch.cop0; | |
948 | enum emulation_result er = EMULATE_DONE; | |
949 | ||
ede5f3e7 JH |
950 | if (kvm_read_c0_guest_status(cop0) & ST0_ERL) { |
951 | kvm_clear_c0_guest_status(cop0, ST0_ERL); | |
952 | vcpu->arch.pc = kvm_read_c0_guest_errorepc(cop0); | |
953 | } else if (kvm_read_c0_guest_status(cop0) & ST0_EXL) { | |
e685c689 SL |
954 | kvm_debug("[%#lx] ERET to %#lx\n", vcpu->arch.pc, |
955 | kvm_read_c0_guest_epc(cop0)); | |
956 | kvm_clear_c0_guest_status(cop0, ST0_EXL); | |
957 | vcpu->arch.pc = kvm_read_c0_guest_epc(cop0); | |
958 | ||
e685c689 | 959 | } else { |
6ad78a5c DCZ |
960 | kvm_err("[%#lx] ERET when MIPS_SR_EXL|MIPS_SR_ERL == 0\n", |
961 | vcpu->arch.pc); | |
e685c689 SL |
962 | er = EMULATE_FAIL; |
963 | } | |
964 | ||
965 | return er; | |
966 | } | |
967 | ||
968 | enum emulation_result kvm_mips_emul_wait(struct kvm_vcpu *vcpu) | |
969 | { | |
e685c689 SL |
970 | kvm_debug("[%#lx] !!!WAIT!!! (%#lx)\n", vcpu->arch.pc, |
971 | vcpu->arch.pending_exceptions); | |
972 | ||
973 | ++vcpu->stat.wait_exits; | |
1e09e86a | 974 | trace_kvm_exit(vcpu, KVM_TRACE_EXIT_WAIT); |
e685c689 | 975 | if (!vcpu->arch.pending_exceptions) { |
f4474d50 | 976 | kvm_vz_lose_htimer(vcpu); |
e685c689 SL |
977 | vcpu->arch.wait = 1; |
978 | kvm_vcpu_block(vcpu); | |
979 | ||
d116e812 DCZ |
980 | /* |
981 | * We we are runnable, then definitely go off to user space to | |
982 | * check if any I/O interrupts are pending. | |
e685c689 SL |
983 | */ |
984 | if (kvm_check_request(KVM_REQ_UNHALT, vcpu)) { | |
72875d8a | 985 | kvm_clear_request(KVM_REQ_UNHALT, vcpu); |
e685c689 SL |
986 | vcpu->run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN; |
987 | } | |
988 | } | |
989 | ||
d98403a5 | 990 | return EMULATE_DONE; |
e685c689 SL |
991 | } |
992 | ||
dc44abd6 JH |
993 | static void kvm_mips_change_entryhi(struct kvm_vcpu *vcpu, |
994 | unsigned long entryhi) | |
995 | { | |
996 | struct mips_coproc *cop0 = vcpu->arch.cop0; | |
997 | struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm; | |
998 | int cpu, i; | |
999 | u32 nasid = entryhi & KVM_ENTRYHI_ASID; | |
1000 | ||
1001 | if (((kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID) != nasid)) { | |
1002 | trace_kvm_asid_change(vcpu, kvm_read_c0_guest_entryhi(cop0) & | |
1003 | KVM_ENTRYHI_ASID, nasid); | |
1004 | ||
1005 | /* | |
1006 | * Flush entries from the GVA page tables. | |
1007 | * Guest user page table will get flushed lazily on re-entry to | |
1008 | * guest user if the guest ASID actually changes. | |
1009 | */ | |
1010 | kvm_mips_flush_gva_pt(kern_mm->pgd, KMF_KERN); | |
1011 | ||
1012 | /* | |
1013 | * Regenerate/invalidate kernel MMU context. | |
1014 | * The user MMU context will be regenerated lazily on re-entry | |
1015 | * to guest user if the guest ASID actually changes. | |
1016 | */ | |
1017 | preempt_disable(); | |
1018 | cpu = smp_processor_id(); | |
1019 | get_new_mmu_context(kern_mm, cpu); | |
1020 | for_each_possible_cpu(i) | |
1021 | if (i != cpu) | |
1022 | cpu_context(i, kern_mm) = 0; | |
1023 | preempt_enable(); | |
1024 | } | |
1025 | kvm_write_c0_guest_entryhi(cop0, entryhi); | |
1026 | } | |
1027 | ||
e685c689 SL |
1028 | enum emulation_result kvm_mips_emul_tlbr(struct kvm_vcpu *vcpu) |
1029 | { | |
1030 | struct mips_coproc *cop0 = vcpu->arch.cop0; | |
dc44abd6 | 1031 | struct kvm_mips_tlb *tlb; |
8cffd197 | 1032 | unsigned long pc = vcpu->arch.pc; |
dc44abd6 | 1033 | int index; |
e685c689 | 1034 | |
dc44abd6 JH |
1035 | index = kvm_read_c0_guest_index(cop0); |
1036 | if (index < 0 || index >= KVM_MIPS_GUEST_TLB_SIZE) { | |
1037 | /* UNDEFINED */ | |
1038 | kvm_debug("[%#lx] TLBR Index %#x out of range\n", pc, index); | |
1039 | index &= KVM_MIPS_GUEST_TLB_SIZE - 1; | |
1040 | } | |
1041 | ||
1042 | tlb = &vcpu->arch.guest_tlb[index]; | |
1043 | kvm_write_c0_guest_pagemask(cop0, tlb->tlb_mask); | |
1044 | kvm_write_c0_guest_entrylo0(cop0, tlb->tlb_lo[0]); | |
1045 | kvm_write_c0_guest_entrylo1(cop0, tlb->tlb_lo[1]); | |
1046 | kvm_mips_change_entryhi(vcpu, tlb->tlb_hi); | |
1047 | ||
1048 | return EMULATE_DONE; | |
e685c689 SL |
1049 | } |
1050 | ||
91e4f1b6 JH |
1051 | /** |
1052 | * kvm_mips_invalidate_guest_tlb() - Indicates a change in guest MMU map. | |
1053 | * @vcpu: VCPU with changed mappings. | |
1054 | * @tlb: TLB entry being removed. | |
1055 | * | |
1056 | * This is called to indicate a single change in guest MMU mappings, so that we | |
1057 | * can arrange TLB flushes on this and other CPUs. | |
1058 | */ | |
1059 | static void kvm_mips_invalidate_guest_tlb(struct kvm_vcpu *vcpu, | |
1060 | struct kvm_mips_tlb *tlb) | |
1061 | { | |
c550d539 JH |
1062 | struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm; |
1063 | struct mm_struct *user_mm = &vcpu->arch.guest_user_mm; | |
91e4f1b6 JH |
1064 | int cpu, i; |
1065 | bool user; | |
1066 | ||
1067 | /* No need to flush for entries which are already invalid */ | |
1068 | if (!((tlb->tlb_lo[0] | tlb->tlb_lo[1]) & ENTRYLO_V)) | |
1069 | return; | |
aba85929 JH |
1070 | /* Don't touch host kernel page tables or TLB mappings */ |
1071 | if ((unsigned long)tlb->tlb_hi > 0x7fffffff) | |
1072 | return; | |
91e4f1b6 JH |
1073 | /* User address space doesn't need flushing for KSeg2/3 changes */ |
1074 | user = tlb->tlb_hi < KVM_GUEST_KSEG0; | |
1075 | ||
1076 | preempt_disable(); | |
1077 | ||
aba85929 JH |
1078 | /* Invalidate page table entries */ |
1079 | kvm_trap_emul_invalidate_gva(vcpu, tlb->tlb_hi & VPN2_MASK, user); | |
1080 | ||
91e4f1b6 JH |
1081 | /* |
1082 | * Probe the shadow host TLB for the entry being overwritten, if one | |
1083 | * matches, invalidate it | |
1084 | */ | |
57e3869c | 1085 | kvm_mips_host_tlb_inv(vcpu, tlb->tlb_hi, user, true); |
91e4f1b6 JH |
1086 | |
1087 | /* Invalidate the whole ASID on other CPUs */ | |
1088 | cpu = smp_processor_id(); | |
1089 | for_each_possible_cpu(i) { | |
1090 | if (i == cpu) | |
1091 | continue; | |
1092 | if (user) | |
c550d539 JH |
1093 | cpu_context(i, user_mm) = 0; |
1094 | cpu_context(i, kern_mm) = 0; | |
91e4f1b6 JH |
1095 | } |
1096 | ||
1097 | preempt_enable(); | |
1098 | } | |
1099 | ||
e685c689 SL |
1100 | /* Write Guest TLB Entry @ Index */ |
1101 | enum emulation_result kvm_mips_emul_tlbwi(struct kvm_vcpu *vcpu) | |
1102 | { | |
1103 | struct mips_coproc *cop0 = vcpu->arch.cop0; | |
1104 | int index = kvm_read_c0_guest_index(cop0); | |
e685c689 | 1105 | struct kvm_mips_tlb *tlb = NULL; |
8cffd197 | 1106 | unsigned long pc = vcpu->arch.pc; |
e685c689 SL |
1107 | |
1108 | if (index < 0 || index >= KVM_MIPS_GUEST_TLB_SIZE) { | |
6ad78a5c | 1109 | kvm_debug("%s: illegal index: %d\n", __func__, index); |
8cffd197 | 1110 | kvm_debug("[%#lx] COP0_TLBWI [%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx, mask: %#lx)\n", |
6ad78a5c DCZ |
1111 | pc, index, kvm_read_c0_guest_entryhi(cop0), |
1112 | kvm_read_c0_guest_entrylo0(cop0), | |
1113 | kvm_read_c0_guest_entrylo1(cop0), | |
1114 | kvm_read_c0_guest_pagemask(cop0)); | |
e685c689 SL |
1115 | index = (index & ~0x80000000) % KVM_MIPS_GUEST_TLB_SIZE; |
1116 | } | |
1117 | ||
1118 | tlb = &vcpu->arch.guest_tlb[index]; | |
91e4f1b6 JH |
1119 | |
1120 | kvm_mips_invalidate_guest_tlb(vcpu, tlb); | |
e685c689 SL |
1121 | |
1122 | tlb->tlb_mask = kvm_read_c0_guest_pagemask(cop0); | |
1123 | tlb->tlb_hi = kvm_read_c0_guest_entryhi(cop0); | |
9fbfb06a JH |
1124 | tlb->tlb_lo[0] = kvm_read_c0_guest_entrylo0(cop0); |
1125 | tlb->tlb_lo[1] = kvm_read_c0_guest_entrylo1(cop0); | |
e685c689 | 1126 | |
8cffd197 | 1127 | kvm_debug("[%#lx] COP0_TLBWI [%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx, mask: %#lx)\n", |
d116e812 DCZ |
1128 | pc, index, kvm_read_c0_guest_entryhi(cop0), |
1129 | kvm_read_c0_guest_entrylo0(cop0), | |
1130 | kvm_read_c0_guest_entrylo1(cop0), | |
1131 | kvm_read_c0_guest_pagemask(cop0)); | |
e685c689 | 1132 | |
d98403a5 | 1133 | return EMULATE_DONE; |
e685c689 SL |
1134 | } |
1135 | ||
1136 | /* Write Guest TLB Entry @ Random Index */ | |
1137 | enum emulation_result kvm_mips_emul_tlbwr(struct kvm_vcpu *vcpu) | |
1138 | { | |
1139 | struct mips_coproc *cop0 = vcpu->arch.cop0; | |
e685c689 | 1140 | struct kvm_mips_tlb *tlb = NULL; |
8cffd197 | 1141 | unsigned long pc = vcpu->arch.pc; |
e685c689 SL |
1142 | int index; |
1143 | ||
e685c689 SL |
1144 | get_random_bytes(&index, sizeof(index)); |
1145 | index &= (KVM_MIPS_GUEST_TLB_SIZE - 1); | |
e685c689 | 1146 | |
e685c689 SL |
1147 | tlb = &vcpu->arch.guest_tlb[index]; |
1148 | ||
91e4f1b6 | 1149 | kvm_mips_invalidate_guest_tlb(vcpu, tlb); |
e685c689 SL |
1150 | |
1151 | tlb->tlb_mask = kvm_read_c0_guest_pagemask(cop0); | |
1152 | tlb->tlb_hi = kvm_read_c0_guest_entryhi(cop0); | |
9fbfb06a JH |
1153 | tlb->tlb_lo[0] = kvm_read_c0_guest_entrylo0(cop0); |
1154 | tlb->tlb_lo[1] = kvm_read_c0_guest_entrylo1(cop0); | |
e685c689 | 1155 | |
8cffd197 | 1156 | kvm_debug("[%#lx] COP0_TLBWR[%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx)\n", |
d116e812 DCZ |
1157 | pc, index, kvm_read_c0_guest_entryhi(cop0), |
1158 | kvm_read_c0_guest_entrylo0(cop0), | |
1159 | kvm_read_c0_guest_entrylo1(cop0)); | |
e685c689 | 1160 | |
d98403a5 | 1161 | return EMULATE_DONE; |
e685c689 SL |
1162 | } |
1163 | ||
1164 | enum emulation_result kvm_mips_emul_tlbp(struct kvm_vcpu *vcpu) | |
1165 | { | |
1166 | struct mips_coproc *cop0 = vcpu->arch.cop0; | |
1167 | long entryhi = kvm_read_c0_guest_entryhi(cop0); | |
8cffd197 | 1168 | unsigned long pc = vcpu->arch.pc; |
e685c689 SL |
1169 | int index = -1; |
1170 | ||
1171 | index = kvm_mips_guest_tlb_lookup(vcpu, entryhi); | |
1172 | ||
1173 | kvm_write_c0_guest_index(cop0, index); | |
1174 | ||
8cffd197 | 1175 | kvm_debug("[%#lx] COP0_TLBP (entryhi: %#lx), index: %d\n", pc, entryhi, |
e685c689 SL |
1176 | index); |
1177 | ||
d98403a5 | 1178 | return EMULATE_DONE; |
e685c689 SL |
1179 | } |
1180 | ||
c771607a JH |
1181 | /** |
1182 | * kvm_mips_config1_wrmask() - Find mask of writable bits in guest Config1 | |
1183 | * @vcpu: Virtual CPU. | |
1184 | * | |
1185 | * Finds the mask of bits which are writable in the guest's Config1 CP0 | |
1186 | * register, by userland (currently read-only to the guest). | |
1187 | */ | |
1188 | unsigned int kvm_mips_config1_wrmask(struct kvm_vcpu *vcpu) | |
1189 | { | |
6cdc65e3 JH |
1190 | unsigned int mask = 0; |
1191 | ||
1192 | /* Permit FPU to be present if FPU is supported */ | |
1193 | if (kvm_mips_guest_can_have_fpu(&vcpu->arch)) | |
1194 | mask |= MIPS_CONF1_FP; | |
1195 | ||
1196 | return mask; | |
c771607a JH |
1197 | } |
1198 | ||
1199 | /** | |
1200 | * kvm_mips_config3_wrmask() - Find mask of writable bits in guest Config3 | |
1201 | * @vcpu: Virtual CPU. | |
1202 | * | |
1203 | * Finds the mask of bits which are writable in the guest's Config3 CP0 | |
1204 | * register, by userland (currently read-only to the guest). | |
1205 | */ | |
1206 | unsigned int kvm_mips_config3_wrmask(struct kvm_vcpu *vcpu) | |
1207 | { | |
cef061d0 JH |
1208 | /* Config4 and ULRI are optional */ |
1209 | unsigned int mask = MIPS_CONF_M | MIPS_CONF3_ULRI; | |
2b6009d6 JH |
1210 | |
1211 | /* Permit MSA to be present if MSA is supported */ | |
1212 | if (kvm_mips_guest_can_have_msa(&vcpu->arch)) | |
1213 | mask |= MIPS_CONF3_MSA; | |
1214 | ||
1215 | return mask; | |
c771607a JH |
1216 | } |
1217 | ||
1218 | /** | |
1219 | * kvm_mips_config4_wrmask() - Find mask of writable bits in guest Config4 | |
1220 | * @vcpu: Virtual CPU. | |
1221 | * | |
1222 | * Finds the mask of bits which are writable in the guest's Config4 CP0 | |
1223 | * register, by userland (currently read-only to the guest). | |
1224 | */ | |
1225 | unsigned int kvm_mips_config4_wrmask(struct kvm_vcpu *vcpu) | |
1226 | { | |
1227 | /* Config5 is optional */ | |
05108709 JH |
1228 | unsigned int mask = MIPS_CONF_M; |
1229 | ||
1230 | /* KScrExist */ | |
654229a0 | 1231 | mask |= 0xfc << MIPS_CONF4_KSCREXIST_SHIFT; |
05108709 JH |
1232 | |
1233 | return mask; | |
c771607a JH |
1234 | } |
1235 | ||
1236 | /** | |
1237 | * kvm_mips_config5_wrmask() - Find mask of writable bits in guest Config5 | |
1238 | * @vcpu: Virtual CPU. | |
1239 | * | |
1240 | * Finds the mask of bits which are writable in the guest's Config5 CP0 | |
1241 | * register, by the guest itself. | |
1242 | */ | |
1243 | unsigned int kvm_mips_config5_wrmask(struct kvm_vcpu *vcpu) | |
1244 | { | |
6cdc65e3 JH |
1245 | unsigned int mask = 0; |
1246 | ||
2b6009d6 JH |
1247 | /* Permit MSAEn changes if MSA supported and enabled */ |
1248 | if (kvm_mips_guest_has_msa(&vcpu->arch)) | |
1249 | mask |= MIPS_CONF5_MSAEN; | |
1250 | ||
6cdc65e3 JH |
1251 | /* |
1252 | * Permit guest FPU mode changes if FPU is enabled and the relevant | |
1253 | * feature exists according to FIR register. | |
1254 | */ | |
1255 | if (kvm_mips_guest_has_fpu(&vcpu->arch)) { | |
1256 | if (cpu_has_fre) | |
1257 | mask |= MIPS_CONF5_FRE; | |
1258 | /* We don't support UFR or UFE */ | |
1259 | } | |
1260 | ||
1261 | return mask; | |
c771607a JH |
1262 | } |
1263 | ||
258f3a2e JH |
1264 | enum emulation_result kvm_mips_emulate_CP0(union mips_instruction inst, |
1265 | u32 *opc, u32 cause, | |
bdb7ed86 | 1266 | struct kvm_run *run, |
d116e812 | 1267 | struct kvm_vcpu *vcpu) |
e685c689 SL |
1268 | { |
1269 | struct mips_coproc *cop0 = vcpu->arch.cop0; | |
1270 | enum emulation_result er = EMULATE_DONE; | |
258f3a2e | 1271 | u32 rt, rd, sel; |
e685c689 SL |
1272 | unsigned long curr_pc; |
1273 | ||
1274 | /* | |
1275 | * Update PC and hold onto current PC in case there is | |
1276 | * an error and we want to rollback the PC | |
1277 | */ | |
1278 | curr_pc = vcpu->arch.pc; | |
1279 | er = update_pc(vcpu, cause); | |
d116e812 | 1280 | if (er == EMULATE_FAIL) |
e685c689 | 1281 | return er; |
e685c689 | 1282 | |
258f3a2e JH |
1283 | if (inst.co_format.co) { |
1284 | switch (inst.co_format.func) { | |
e685c689 SL |
1285 | case tlbr_op: /* Read indexed TLB entry */ |
1286 | er = kvm_mips_emul_tlbr(vcpu); | |
1287 | break; | |
1288 | case tlbwi_op: /* Write indexed */ | |
1289 | er = kvm_mips_emul_tlbwi(vcpu); | |
1290 | break; | |
1291 | case tlbwr_op: /* Write random */ | |
1292 | er = kvm_mips_emul_tlbwr(vcpu); | |
1293 | break; | |
1294 | case tlbp_op: /* TLB Probe */ | |
1295 | er = kvm_mips_emul_tlbp(vcpu); | |
1296 | break; | |
1297 | case rfe_op: | |
6ad78a5c | 1298 | kvm_err("!!!COP0_RFE!!!\n"); |
e685c689 SL |
1299 | break; |
1300 | case eret_op: | |
1301 | er = kvm_mips_emul_eret(vcpu); | |
1302 | goto dont_update_pc; | |
e685c689 SL |
1303 | case wait_op: |
1304 | er = kvm_mips_emul_wait(vcpu); | |
1305 | break; | |
955d8dc3 JH |
1306 | case hypcall_op: |
1307 | er = kvm_mips_emul_hypcall(vcpu, inst); | |
1308 | break; | |
e685c689 SL |
1309 | } |
1310 | } else { | |
258f3a2e JH |
1311 | rt = inst.c0r_format.rt; |
1312 | rd = inst.c0r_format.rd; | |
1313 | sel = inst.c0r_format.sel; | |
1314 | ||
1315 | switch (inst.c0r_format.rs) { | |
e685c689 SL |
1316 | case mfc_op: |
1317 | #ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS | |
1318 | cop0->stat[rd][sel]++; | |
1319 | #endif | |
1320 | /* Get reg */ | |
1321 | if ((rd == MIPS_CP0_COUNT) && (sel == 0)) { | |
172e02d1 JH |
1322 | vcpu->arch.gprs[rt] = |
1323 | (s32)kvm_mips_read_count(vcpu); | |
e685c689 SL |
1324 | } else if ((rd == MIPS_CP0_ERRCTL) && (sel == 0)) { |
1325 | vcpu->arch.gprs[rt] = 0x0; | |
1326 | #ifdef CONFIG_KVM_MIPS_DYN_TRANS | |
1327 | kvm_mips_trans_mfc0(inst, opc, vcpu); | |
1328 | #endif | |
d116e812 | 1329 | } else { |
172e02d1 | 1330 | vcpu->arch.gprs[rt] = (s32)cop0->reg[rd][sel]; |
e685c689 SL |
1331 | |
1332 | #ifdef CONFIG_KVM_MIPS_DYN_TRANS | |
1333 | kvm_mips_trans_mfc0(inst, opc, vcpu); | |
1334 | #endif | |
1335 | } | |
1336 | ||
6398da13 JH |
1337 | trace_kvm_hwr(vcpu, KVM_TRACE_MFC0, |
1338 | KVM_TRACE_COP0(rd, sel), | |
1339 | vcpu->arch.gprs[rt]); | |
e685c689 SL |
1340 | break; |
1341 | ||
1342 | case dmfc_op: | |
1343 | vcpu->arch.gprs[rt] = cop0->reg[rd][sel]; | |
6398da13 JH |
1344 | |
1345 | trace_kvm_hwr(vcpu, KVM_TRACE_DMFC0, | |
1346 | KVM_TRACE_COP0(rd, sel), | |
1347 | vcpu->arch.gprs[rt]); | |
e685c689 SL |
1348 | break; |
1349 | ||
1350 | case mtc_op: | |
1351 | #ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS | |
1352 | cop0->stat[rd][sel]++; | |
1353 | #endif | |
6398da13 JH |
1354 | trace_kvm_hwr(vcpu, KVM_TRACE_MTC0, |
1355 | KVM_TRACE_COP0(rd, sel), | |
1356 | vcpu->arch.gprs[rt]); | |
1357 | ||
e685c689 SL |
1358 | if ((rd == MIPS_CP0_TLB_INDEX) |
1359 | && (vcpu->arch.gprs[rt] >= | |
1360 | KVM_MIPS_GUEST_TLB_SIZE)) { | |
6ad78a5c DCZ |
1361 | kvm_err("Invalid TLB Index: %ld", |
1362 | vcpu->arch.gprs[rt]); | |
e685c689 SL |
1363 | er = EMULATE_FAIL; |
1364 | break; | |
1365 | } | |
e685c689 | 1366 | if ((rd == MIPS_CP0_PRID) && (sel == 1)) { |
7801bbe1 JH |
1367 | /* |
1368 | * Preserve core number, and keep the exception | |
1369 | * base in guest KSeg0. | |
1370 | */ | |
1371 | kvm_change_c0_guest_ebase(cop0, 0x1ffff000, | |
e685c689 | 1372 | vcpu->arch.gprs[rt]); |
e685c689 | 1373 | } else if (rd == MIPS_CP0_TLB_HI && sel == 0) { |
dc44abd6 JH |
1374 | kvm_mips_change_entryhi(vcpu, |
1375 | vcpu->arch.gprs[rt]); | |
e685c689 SL |
1376 | } |
1377 | /* Are we writing to COUNT */ | |
1378 | else if ((rd == MIPS_CP0_COUNT) && (sel == 0)) { | |
e30492bb | 1379 | kvm_mips_write_count(vcpu, vcpu->arch.gprs[rt]); |
e685c689 SL |
1380 | goto done; |
1381 | } else if ((rd == MIPS_CP0_COMPARE) && (sel == 0)) { | |
e685c689 SL |
1382 | /* If we are writing to COMPARE */ |
1383 | /* Clear pending timer interrupt, if any */ | |
e30492bb | 1384 | kvm_mips_write_compare(vcpu, |
b45bacd2 JH |
1385 | vcpu->arch.gprs[rt], |
1386 | true); | |
e685c689 | 1387 | } else if ((rd == MIPS_CP0_STATUS) && (sel == 0)) { |
6cdc65e3 JH |
1388 | unsigned int old_val, val, change; |
1389 | ||
1390 | old_val = kvm_read_c0_guest_status(cop0); | |
1391 | val = vcpu->arch.gprs[rt]; | |
1392 | change = val ^ old_val; | |
1393 | ||
1394 | /* Make sure that the NMI bit is never set */ | |
1395 | val &= ~ST0_NMI; | |
1396 | ||
1397 | /* | |
1398 | * Don't allow CU1 or FR to be set unless FPU | |
1399 | * capability enabled and exists in guest | |
1400 | * configuration. | |
1401 | */ | |
1402 | if (!kvm_mips_guest_has_fpu(&vcpu->arch)) | |
1403 | val &= ~(ST0_CU1 | ST0_FR); | |
1404 | ||
1405 | /* | |
1406 | * Also don't allow FR to be set if host doesn't | |
1407 | * support it. | |
1408 | */ | |
1409 | if (!(current_cpu_data.fpu_id & MIPS_FPIR_F64)) | |
1410 | val &= ~ST0_FR; | |
1411 | ||
1412 | ||
1413 | /* Handle changes in FPU mode */ | |
1414 | preempt_disable(); | |
1415 | ||
1416 | /* | |
1417 | * FPU and Vector register state is made | |
1418 | * UNPREDICTABLE by a change of FR, so don't | |
1419 | * even bother saving it. | |
1420 | */ | |
1421 | if (change & ST0_FR) | |
1422 | kvm_drop_fpu(vcpu); | |
1423 | ||
2b6009d6 JH |
1424 | /* |
1425 | * If MSA state is already live, it is undefined | |
1426 | * how it interacts with FR=0 FPU state, and we | |
1427 | * don't want to hit reserved instruction | |
1428 | * exceptions trying to save the MSA state later | |
1429 | * when CU=1 && FR=1, so play it safe and save | |
1430 | * it first. | |
1431 | */ | |
1432 | if (change & ST0_CU1 && !(val & ST0_FR) && | |
f943176a | 1433 | vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA) |
2b6009d6 JH |
1434 | kvm_lose_fpu(vcpu); |
1435 | ||
d116e812 | 1436 | /* |
6cdc65e3 JH |
1437 | * Propagate CU1 (FPU enable) changes |
1438 | * immediately if the FPU context is already | |
1439 | * loaded. When disabling we leave the context | |
1440 | * loaded so it can be quickly enabled again in | |
1441 | * the near future. | |
d116e812 | 1442 | */ |
6cdc65e3 | 1443 | if (change & ST0_CU1 && |
f943176a | 1444 | vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU) |
6cdc65e3 JH |
1445 | change_c0_status(ST0_CU1, val); |
1446 | ||
1447 | preempt_enable(); | |
1448 | ||
1449 | kvm_write_c0_guest_status(cop0, val); | |
e685c689 SL |
1450 | |
1451 | #ifdef CONFIG_KVM_MIPS_DYN_TRANS | |
6cdc65e3 JH |
1452 | /* |
1453 | * If FPU present, we need CU1/FR bits to take | |
1454 | * effect fairly soon. | |
1455 | */ | |
1456 | if (!kvm_mips_guest_has_fpu(&vcpu->arch)) | |
1457 | kvm_mips_trans_mtc0(inst, opc, vcpu); | |
e685c689 | 1458 | #endif |
6cdc65e3 JH |
1459 | } else if ((rd == MIPS_CP0_CONFIG) && (sel == 5)) { |
1460 | unsigned int old_val, val, change, wrmask; | |
1461 | ||
1462 | old_val = kvm_read_c0_guest_config5(cop0); | |
1463 | val = vcpu->arch.gprs[rt]; | |
1464 | ||
1465 | /* Only a few bits are writable in Config5 */ | |
1466 | wrmask = kvm_mips_config5_wrmask(vcpu); | |
1467 | change = (val ^ old_val) & wrmask; | |
1468 | val = old_val ^ change; | |
1469 | ||
1470 | ||
2b6009d6 | 1471 | /* Handle changes in FPU/MSA modes */ |
6cdc65e3 JH |
1472 | preempt_disable(); |
1473 | ||
1474 | /* | |
1475 | * Propagate FRE changes immediately if the FPU | |
1476 | * context is already loaded. | |
1477 | */ | |
1478 | if (change & MIPS_CONF5_FRE && | |
f943176a | 1479 | vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU) |
6cdc65e3 JH |
1480 | change_c0_config5(MIPS_CONF5_FRE, val); |
1481 | ||
2b6009d6 JH |
1482 | /* |
1483 | * Propagate MSAEn changes immediately if the | |
1484 | * MSA context is already loaded. When disabling | |
1485 | * we leave the context loaded so it can be | |
1486 | * quickly enabled again in the near future. | |
1487 | */ | |
1488 | if (change & MIPS_CONF5_MSAEN && | |
f943176a | 1489 | vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA) |
2b6009d6 JH |
1490 | change_c0_config5(MIPS_CONF5_MSAEN, |
1491 | val); | |
1492 | ||
6cdc65e3 JH |
1493 | preempt_enable(); |
1494 | ||
1495 | kvm_write_c0_guest_config5(cop0, val); | |
e30492bb | 1496 | } else if ((rd == MIPS_CP0_CAUSE) && (sel == 0)) { |
8cffd197 | 1497 | u32 old_cause, new_cause; |
d116e812 | 1498 | |
e30492bb JH |
1499 | old_cause = kvm_read_c0_guest_cause(cop0); |
1500 | new_cause = vcpu->arch.gprs[rt]; | |
1501 | /* Update R/W bits */ | |
1502 | kvm_change_c0_guest_cause(cop0, 0x08800300, | |
1503 | new_cause); | |
1504 | /* DC bit enabling/disabling timer? */ | |
1505 | if ((old_cause ^ new_cause) & CAUSEF_DC) { | |
1506 | if (new_cause & CAUSEF_DC) | |
1507 | kvm_mips_count_disable_cause(vcpu); | |
1508 | else | |
1509 | kvm_mips_count_enable_cause(vcpu); | |
1510 | } | |
cef061d0 JH |
1511 | } else if ((rd == MIPS_CP0_HWRENA) && (sel == 0)) { |
1512 | u32 mask = MIPS_HWRENA_CPUNUM | | |
1513 | MIPS_HWRENA_SYNCISTEP | | |
1514 | MIPS_HWRENA_CC | | |
1515 | MIPS_HWRENA_CCRES; | |
1516 | ||
1517 | if (kvm_read_c0_guest_config3(cop0) & | |
1518 | MIPS_CONF3_ULRI) | |
1519 | mask |= MIPS_HWRENA_ULR; | |
1520 | cop0->reg[rd][sel] = vcpu->arch.gprs[rt] & mask; | |
e685c689 SL |
1521 | } else { |
1522 | cop0->reg[rd][sel] = vcpu->arch.gprs[rt]; | |
1523 | #ifdef CONFIG_KVM_MIPS_DYN_TRANS | |
1524 | kvm_mips_trans_mtc0(inst, opc, vcpu); | |
1525 | #endif | |
1526 | } | |
e685c689 SL |
1527 | break; |
1528 | ||
1529 | case dmtc_op: | |
6ad78a5c DCZ |
1530 | kvm_err("!!!!!!![%#lx]dmtc_op: rt: %d, rd: %d, sel: %d!!!!!!\n", |
1531 | vcpu->arch.pc, rt, rd, sel); | |
6398da13 JH |
1532 | trace_kvm_hwr(vcpu, KVM_TRACE_DMTC0, |
1533 | KVM_TRACE_COP0(rd, sel), | |
1534 | vcpu->arch.gprs[rt]); | |
e685c689 SL |
1535 | er = EMULATE_FAIL; |
1536 | break; | |
1537 | ||
b2c59635 | 1538 | case mfmc0_op: |
e685c689 SL |
1539 | #ifdef KVM_MIPS_DEBUG_COP0_COUNTERS |
1540 | cop0->stat[MIPS_CP0_STATUS][0]++; | |
1541 | #endif | |
caa1faa7 | 1542 | if (rt != 0) |
e685c689 SL |
1543 | vcpu->arch.gprs[rt] = |
1544 | kvm_read_c0_guest_status(cop0); | |
e685c689 | 1545 | /* EI */ |
258f3a2e | 1546 | if (inst.mfmc0_format.sc) { |
b2c59635 | 1547 | kvm_debug("[%#lx] mfmc0_op: EI\n", |
e685c689 SL |
1548 | vcpu->arch.pc); |
1549 | kvm_set_c0_guest_status(cop0, ST0_IE); | |
1550 | } else { | |
b2c59635 | 1551 | kvm_debug("[%#lx] mfmc0_op: DI\n", |
e685c689 SL |
1552 | vcpu->arch.pc); |
1553 | kvm_clear_c0_guest_status(cop0, ST0_IE); | |
1554 | } | |
1555 | ||
1556 | break; | |
1557 | ||
1558 | case wrpgpr_op: | |
1559 | { | |
8cffd197 JH |
1560 | u32 css = cop0->reg[MIPS_CP0_STATUS][2] & 0xf; |
1561 | u32 pss = | |
e685c689 | 1562 | (cop0->reg[MIPS_CP0_STATUS][2] >> 6) & 0xf; |
d116e812 DCZ |
1563 | /* |
1564 | * We don't support any shadow register sets, so | |
1565 | * SRSCtl[PSS] == SRSCtl[CSS] = 0 | |
1566 | */ | |
e685c689 SL |
1567 | if (css || pss) { |
1568 | er = EMULATE_FAIL; | |
1569 | break; | |
1570 | } | |
1571 | kvm_debug("WRPGPR[%d][%d] = %#lx\n", pss, rd, | |
1572 | vcpu->arch.gprs[rt]); | |
1573 | vcpu->arch.gprs[rd] = vcpu->arch.gprs[rt]; | |
1574 | } | |
1575 | break; | |
1576 | default: | |
6ad78a5c | 1577 | kvm_err("[%#lx]MachEmulateCP0: unsupported COP0, copz: 0x%x\n", |
258f3a2e | 1578 | vcpu->arch.pc, inst.c0r_format.rs); |
e685c689 SL |
1579 | er = EMULATE_FAIL; |
1580 | break; | |
1581 | } | |
1582 | } | |
1583 | ||
1584 | done: | |
d116e812 DCZ |
1585 | /* Rollback PC only if emulation was unsuccessful */ |
1586 | if (er == EMULATE_FAIL) | |
e685c689 | 1587 | vcpu->arch.pc = curr_pc; |
e685c689 SL |
1588 | |
1589 | dont_update_pc: | |
1590 | /* | |
1591 | * This is for special instructions whose emulation | |
1592 | * updates the PC, so do not overwrite the PC under | |
1593 | * any circumstances | |
1594 | */ | |
1595 | ||
1596 | return er; | |
1597 | } | |
1598 | ||
258f3a2e JH |
1599 | enum emulation_result kvm_mips_emulate_store(union mips_instruction inst, |
1600 | u32 cause, | |
d116e812 DCZ |
1601 | struct kvm_run *run, |
1602 | struct kvm_vcpu *vcpu) | |
e685c689 | 1603 | { |
8b48d5b7 | 1604 | enum emulation_result er; |
258f3a2e | 1605 | u32 rt; |
e685c689 SL |
1606 | void *data = run->mmio.data; |
1607 | unsigned long curr_pc; | |
1608 | ||
1609 | /* | |
1610 | * Update PC and hold onto current PC in case there is | |
1611 | * an error and we want to rollback the PC | |
1612 | */ | |
1613 | curr_pc = vcpu->arch.pc; | |
1614 | er = update_pc(vcpu, cause); | |
1615 | if (er == EMULATE_FAIL) | |
1616 | return er; | |
1617 | ||
258f3a2e | 1618 | rt = inst.i_format.rt; |
e685c689 | 1619 | |
8b48d5b7 JH |
1620 | run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa( |
1621 | vcpu->arch.host_cp0_badvaddr); | |
1622 | if (run->mmio.phys_addr == KVM_INVALID_ADDR) | |
1623 | goto out_fail; | |
e685c689 | 1624 | |
8b48d5b7 | 1625 | switch (inst.i_format.opcode) { |
59d7814a JH |
1626 | #if defined(CONFIG_64BIT) && defined(CONFIG_KVM_MIPS_VZ) |
1627 | case sd_op: | |
1628 | run->mmio.len = 8; | |
1629 | *(u64 *)data = vcpu->arch.gprs[rt]; | |
1630 | ||
1631 | kvm_debug("[%#lx] OP_SD: eaddr: %#lx, gpr: %#lx, data: %#llx\n", | |
1632 | vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr, | |
1633 | vcpu->arch.gprs[rt], *(u64 *)data); | |
1634 | break; | |
1635 | #endif | |
1636 | ||
e685c689 | 1637 | case sw_op: |
8b48d5b7 JH |
1638 | run->mmio.len = 4; |
1639 | *(u32 *)data = vcpu->arch.gprs[rt]; | |
e685c689 SL |
1640 | |
1641 | kvm_debug("[%#lx] OP_SW: eaddr: %#lx, gpr: %#lx, data: %#x\n", | |
1642 | vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr, | |
8b48d5b7 | 1643 | vcpu->arch.gprs[rt], *(u32 *)data); |
e685c689 SL |
1644 | break; |
1645 | ||
1646 | case sh_op: | |
8b48d5b7 JH |
1647 | run->mmio.len = 2; |
1648 | *(u16 *)data = vcpu->arch.gprs[rt]; | |
e685c689 SL |
1649 | |
1650 | kvm_debug("[%#lx] OP_SH: eaddr: %#lx, gpr: %#lx, data: %#x\n", | |
1651 | vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr, | |
8b48d5b7 JH |
1652 | vcpu->arch.gprs[rt], *(u16 *)data); |
1653 | break; | |
1654 | ||
1655 | case sb_op: | |
1656 | run->mmio.len = 1; | |
1657 | *(u8 *)data = vcpu->arch.gprs[rt]; | |
1658 | ||
1659 | kvm_debug("[%#lx] OP_SB: eaddr: %#lx, gpr: %#lx, data: %#x\n", | |
1660 | vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr, | |
1661 | vcpu->arch.gprs[rt], *(u8 *)data); | |
e685c689 SL |
1662 | break; |
1663 | ||
1664 | default: | |
d86c1ebe | 1665 | kvm_err("Store not yet supported (inst=0x%08x)\n", |
258f3a2e | 1666 | inst.word); |
8b48d5b7 | 1667 | goto out_fail; |
e685c689 SL |
1668 | } |
1669 | ||
8b48d5b7 JH |
1670 | run->mmio.is_write = 1; |
1671 | vcpu->mmio_needed = 1; | |
1672 | vcpu->mmio_is_write = 1; | |
1673 | return EMULATE_DO_MMIO; | |
e685c689 | 1674 | |
8b48d5b7 JH |
1675 | out_fail: |
1676 | /* Rollback PC if emulation was unsuccessful */ | |
1677 | vcpu->arch.pc = curr_pc; | |
1678 | return EMULATE_FAIL; | |
e685c689 SL |
1679 | } |
1680 | ||
258f3a2e JH |
1681 | enum emulation_result kvm_mips_emulate_load(union mips_instruction inst, |
1682 | u32 cause, struct kvm_run *run, | |
d116e812 | 1683 | struct kvm_vcpu *vcpu) |
e685c689 | 1684 | { |
8b48d5b7 | 1685 | enum emulation_result er; |
e1e575f6 | 1686 | unsigned long curr_pc; |
258f3a2e | 1687 | u32 op, rt; |
e685c689 | 1688 | |
258f3a2e JH |
1689 | rt = inst.i_format.rt; |
1690 | op = inst.i_format.opcode; | |
e685c689 | 1691 | |
e1e575f6 JH |
1692 | /* |
1693 | * Find the resume PC now while we have safe and easy access to the | |
1694 | * prior branch instruction, and save it for | |
1695 | * kvm_mips_complete_mmio_load() to restore later. | |
1696 | */ | |
1697 | curr_pc = vcpu->arch.pc; | |
1698 | er = update_pc(vcpu, cause); | |
1699 | if (er == EMULATE_FAIL) | |
1700 | return er; | |
1701 | vcpu->arch.io_pc = vcpu->arch.pc; | |
1702 | vcpu->arch.pc = curr_pc; | |
1703 | ||
e685c689 SL |
1704 | vcpu->arch.io_gpr = rt; |
1705 | ||
8b48d5b7 JH |
1706 | run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa( |
1707 | vcpu->arch.host_cp0_badvaddr); | |
1708 | if (run->mmio.phys_addr == KVM_INVALID_ADDR) | |
1709 | return EMULATE_FAIL; | |
1710 | ||
1711 | vcpu->mmio_needed = 2; /* signed */ | |
e685c689 | 1712 | switch (op) { |
59d7814a JH |
1713 | #if defined(CONFIG_64BIT) && defined(CONFIG_KVM_MIPS_VZ) |
1714 | case ld_op: | |
1715 | run->mmio.len = 8; | |
1716 | break; | |
1717 | ||
1718 | case lwu_op: | |
1719 | vcpu->mmio_needed = 1; /* unsigned */ | |
1720 | /* fall through */ | |
1721 | #endif | |
e685c689 | 1722 | case lw_op: |
8b48d5b7 | 1723 | run->mmio.len = 4; |
e685c689 SL |
1724 | break; |
1725 | ||
e685c689 | 1726 | case lhu_op: |
8b48d5b7 JH |
1727 | vcpu->mmio_needed = 1; /* unsigned */ |
1728 | /* fall through */ | |
1729 | case lh_op: | |
1730 | run->mmio.len = 2; | |
e685c689 SL |
1731 | break; |
1732 | ||
1733 | case lbu_op: | |
8b48d5b7 JH |
1734 | vcpu->mmio_needed = 1; /* unsigned */ |
1735 | /* fall through */ | |
e685c689 | 1736 | case lb_op: |
8b48d5b7 | 1737 | run->mmio.len = 1; |
e685c689 SL |
1738 | break; |
1739 | ||
1740 | default: | |
d86c1ebe | 1741 | kvm_err("Load not yet supported (inst=0x%08x)\n", |
258f3a2e | 1742 | inst.word); |
8b48d5b7 JH |
1743 | vcpu->mmio_needed = 0; |
1744 | return EMULATE_FAIL; | |
e685c689 SL |
1745 | } |
1746 | ||
8b48d5b7 JH |
1747 | run->mmio.is_write = 0; |
1748 | vcpu->mmio_is_write = 0; | |
1749 | return EMULATE_DO_MMIO; | |
e685c689 SL |
1750 | } |
1751 | ||
60c7aa33 | 1752 | #ifndef CONFIG_KVM_MIPS_VZ |
4cf74c9c JH |
1753 | static enum emulation_result kvm_mips_guest_cache_op(int (*fn)(unsigned long), |
1754 | unsigned long curr_pc, | |
1755 | unsigned long addr, | |
1756 | struct kvm_run *run, | |
1757 | struct kvm_vcpu *vcpu, | |
1758 | u32 cause) | |
1759 | { | |
1760 | int err; | |
1761 | ||
1762 | for (;;) { | |
1763 | /* Carefully attempt the cache operation */ | |
1764 | kvm_trap_emul_gva_lockless_begin(vcpu); | |
1765 | err = fn(addr); | |
1766 | kvm_trap_emul_gva_lockless_end(vcpu); | |
1767 | ||
1768 | if (likely(!err)) | |
1769 | return EMULATE_DONE; | |
1770 | ||
1771 | /* | |
1772 | * Try to handle the fault and retry, maybe we just raced with a | |
1773 | * GVA invalidation. | |
1774 | */ | |
1775 | switch (kvm_trap_emul_gva_fault(vcpu, addr, false)) { | |
1776 | case KVM_MIPS_GVA: | |
1777 | case KVM_MIPS_GPA: | |
1778 | /* bad virtual or physical address */ | |
1779 | return EMULATE_FAIL; | |
1780 | case KVM_MIPS_TLB: | |
1781 | /* no matching guest TLB */ | |
1782 | vcpu->arch.host_cp0_badvaddr = addr; | |
1783 | vcpu->arch.pc = curr_pc; | |
1784 | kvm_mips_emulate_tlbmiss_ld(cause, NULL, run, vcpu); | |
1785 | return EMULATE_EXCEPT; | |
1786 | case KVM_MIPS_TLBINV: | |
1787 | /* invalid matching guest TLB */ | |
1788 | vcpu->arch.host_cp0_badvaddr = addr; | |
1789 | vcpu->arch.pc = curr_pc; | |
1790 | kvm_mips_emulate_tlbinv_ld(cause, NULL, run, vcpu); | |
1791 | return EMULATE_EXCEPT; | |
1792 | default: | |
1793 | break; | |
1794 | }; | |
1795 | } | |
1796 | } | |
1797 | ||
258f3a2e JH |
1798 | enum emulation_result kvm_mips_emulate_cache(union mips_instruction inst, |
1799 | u32 *opc, u32 cause, | |
d116e812 DCZ |
1800 | struct kvm_run *run, |
1801 | struct kvm_vcpu *vcpu) | |
e685c689 | 1802 | { |
e685c689 | 1803 | enum emulation_result er = EMULATE_DONE; |
8cffd197 JH |
1804 | u32 cache, op_inst, op, base; |
1805 | s16 offset; | |
e685c689 SL |
1806 | struct kvm_vcpu_arch *arch = &vcpu->arch; |
1807 | unsigned long va; | |
1808 | unsigned long curr_pc; | |
1809 | ||
1810 | /* | |
1811 | * Update PC and hold onto current PC in case there is | |
1812 | * an error and we want to rollback the PC | |
1813 | */ | |
1814 | curr_pc = vcpu->arch.pc; | |
1815 | er = update_pc(vcpu, cause); | |
1816 | if (er == EMULATE_FAIL) | |
1817 | return er; | |
1818 | ||
258f3a2e JH |
1819 | base = inst.i_format.rs; |
1820 | op_inst = inst.i_format.rt; | |
5cc4aafc JH |
1821 | if (cpu_has_mips_r6) |
1822 | offset = inst.spec3_format.simmediate; | |
1823 | else | |
1824 | offset = inst.i_format.simmediate; | |
f4956f62 JH |
1825 | cache = op_inst & CacheOp_Cache; |
1826 | op = op_inst & CacheOp_Op; | |
e685c689 SL |
1827 | |
1828 | va = arch->gprs[base] + offset; | |
1829 | ||
1830 | kvm_debug("CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n", | |
1831 | cache, op, base, arch->gprs[base], offset); | |
1832 | ||
d116e812 DCZ |
1833 | /* |
1834 | * Treat INDEX_INV as a nop, basically issued by Linux on startup to | |
1835 | * invalidate the caches entirely by stepping through all the | |
1836 | * ways/indexes | |
e685c689 | 1837 | */ |
f4956f62 | 1838 | if (op == Index_Writeback_Inv) { |
d116e812 DCZ |
1839 | kvm_debug("@ %#lx/%#lx CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n", |
1840 | vcpu->arch.pc, vcpu->arch.gprs[31], cache, op, base, | |
1841 | arch->gprs[base], offset); | |
e685c689 | 1842 | |
4fa9de5a JH |
1843 | if (cache == Cache_D) { |
1844 | #ifdef CONFIG_CPU_R4K_CACHE_TLB | |
e685c689 | 1845 | r4k_blast_dcache(); |
4fa9de5a JH |
1846 | #else |
1847 | switch (boot_cpu_type()) { | |
1848 | case CPU_CAVIUM_OCTEON3: | |
1849 | /* locally flush icache */ | |
1850 | local_flush_icache_range(0, 0); | |
1851 | break; | |
1852 | default: | |
1853 | __flush_cache_all(); | |
1854 | break; | |
1855 | } | |
1856 | #endif | |
1857 | } else if (cache == Cache_I) { | |
1858 | #ifdef CONFIG_CPU_R4K_CACHE_TLB | |
e685c689 | 1859 | r4k_blast_icache(); |
4fa9de5a JH |
1860 | #else |
1861 | switch (boot_cpu_type()) { | |
1862 | case CPU_CAVIUM_OCTEON3: | |
1863 | /* locally flush icache */ | |
1864 | local_flush_icache_range(0, 0); | |
1865 | break; | |
1866 | default: | |
1867 | flush_icache_all(); | |
1868 | break; | |
1869 | } | |
1870 | #endif | |
1871 | } else { | |
6ad78a5c DCZ |
1872 | kvm_err("%s: unsupported CACHE INDEX operation\n", |
1873 | __func__); | |
e685c689 SL |
1874 | return EMULATE_FAIL; |
1875 | } | |
1876 | ||
1877 | #ifdef CONFIG_KVM_MIPS_DYN_TRANS | |
1878 | kvm_mips_trans_cache_index(inst, opc, vcpu); | |
1879 | #endif | |
1880 | goto done; | |
1881 | } | |
1882 | ||
e685c689 | 1883 | /* XXXKYMA: Only a subset of cache ops are supported, used by Linux */ |
f4956f62 | 1884 | if (op_inst == Hit_Writeback_Inv_D || op_inst == Hit_Invalidate_D) { |
4cf74c9c JH |
1885 | /* |
1886 | * Perform the dcache part of icache synchronisation on the | |
1887 | * guest's behalf. | |
1888 | */ | |
1889 | er = kvm_mips_guest_cache_op(protected_writeback_dcache_line, | |
1890 | curr_pc, va, run, vcpu, cause); | |
1891 | if (er != EMULATE_DONE) | |
1892 | goto done; | |
e685c689 | 1893 | #ifdef CONFIG_KVM_MIPS_DYN_TRANS |
d116e812 DCZ |
1894 | /* |
1895 | * Replace the CACHE instruction, with a SYNCI, not the same, | |
1896 | * but avoids a trap | |
1897 | */ | |
e685c689 SL |
1898 | kvm_mips_trans_cache_va(inst, opc, vcpu); |
1899 | #endif | |
f4956f62 | 1900 | } else if (op_inst == Hit_Invalidate_I) { |
4cf74c9c JH |
1901 | /* Perform the icache synchronisation on the guest's behalf */ |
1902 | er = kvm_mips_guest_cache_op(protected_writeback_dcache_line, | |
1903 | curr_pc, va, run, vcpu, cause); | |
1904 | if (er != EMULATE_DONE) | |
1905 | goto done; | |
1906 | er = kvm_mips_guest_cache_op(protected_flush_icache_line, | |
1907 | curr_pc, va, run, vcpu, cause); | |
1908 | if (er != EMULATE_DONE) | |
1909 | goto done; | |
e685c689 SL |
1910 | |
1911 | #ifdef CONFIG_KVM_MIPS_DYN_TRANS | |
1912 | /* Replace the CACHE instruction, with a SYNCI */ | |
1913 | kvm_mips_trans_cache_va(inst, opc, vcpu); | |
1914 | #endif | |
1915 | } else { | |
6ad78a5c DCZ |
1916 | kvm_err("NO-OP CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n", |
1917 | cache, op, base, arch->gprs[base], offset); | |
e685c689 | 1918 | er = EMULATE_FAIL; |
e685c689 SL |
1919 | } |
1920 | ||
cc81e948 JH |
1921 | done: |
1922 | /* Rollback PC only if emulation was unsuccessful */ | |
1923 | if (er == EMULATE_FAIL) | |
1924 | vcpu->arch.pc = curr_pc; | |
4cf74c9c JH |
1925 | /* Guest exception needs guest to resume */ |
1926 | if (er == EMULATE_EXCEPT) | |
1927 | er = EMULATE_DONE; | |
cc81e948 | 1928 | |
e685c689 SL |
1929 | return er; |
1930 | } | |
1931 | ||
31cf7498 | 1932 | enum emulation_result kvm_mips_emulate_inst(u32 cause, u32 *opc, |
d116e812 DCZ |
1933 | struct kvm_run *run, |
1934 | struct kvm_vcpu *vcpu) | |
e685c689 | 1935 | { |
258f3a2e | 1936 | union mips_instruction inst; |
e685c689 | 1937 | enum emulation_result er = EMULATE_DONE; |
122e51d4 | 1938 | int err; |
e685c689 | 1939 | |
d116e812 DCZ |
1940 | /* Fetch the instruction. */ |
1941 | if (cause & CAUSEF_BD) | |
e685c689 | 1942 | opc += 1; |
6a97c775 | 1943 | err = kvm_get_badinstr(opc, vcpu, &inst.word); |
122e51d4 JH |
1944 | if (err) |
1945 | return EMULATE_FAIL; | |
e685c689 | 1946 | |
258f3a2e | 1947 | switch (inst.r_format.opcode) { |
e685c689 SL |
1948 | case cop0_op: |
1949 | er = kvm_mips_emulate_CP0(inst, opc, cause, run, vcpu); | |
1950 | break; | |
e685c689 | 1951 | |
5cc4aafc | 1952 | #ifndef CONFIG_CPU_MIPSR6 |
e685c689 SL |
1953 | case cache_op: |
1954 | ++vcpu->stat.cache_exits; | |
1e09e86a | 1955 | trace_kvm_exit(vcpu, KVM_TRACE_EXIT_CACHE); |
e685c689 SL |
1956 | er = kvm_mips_emulate_cache(inst, opc, cause, run, vcpu); |
1957 | break; | |
5cc4aafc JH |
1958 | #else |
1959 | case spec3_op: | |
1960 | switch (inst.spec3_format.func) { | |
1961 | case cache6_op: | |
1962 | ++vcpu->stat.cache_exits; | |
1963 | trace_kvm_exit(vcpu, KVM_TRACE_EXIT_CACHE); | |
1964 | er = kvm_mips_emulate_cache(inst, opc, cause, run, | |
1965 | vcpu); | |
1966 | break; | |
1967 | default: | |
1968 | goto unknown; | |
1969 | }; | |
1970 | break; | |
1971 | unknown: | |
1972 | #endif | |
e685c689 SL |
1973 | |
1974 | default: | |
6ad78a5c | 1975 | kvm_err("Instruction emulation not supported (%p/%#x)\n", opc, |
258f3a2e | 1976 | inst.word); |
e685c689 SL |
1977 | kvm_arch_vcpu_dump_regs(vcpu); |
1978 | er = EMULATE_FAIL; | |
1979 | break; | |
1980 | } | |
1981 | ||
1982 | return er; | |
1983 | } | |
60c7aa33 | 1984 | #endif /* CONFIG_KVM_MIPS_VZ */ |
e685c689 | 1985 | |
7801bbe1 JH |
1986 | /** |
1987 | * kvm_mips_guest_exception_base() - Find guest exception vector base address. | |
1988 | * | |
1989 | * Returns: The base address of the current guest exception vector, taking | |
1990 | * both Guest.CP0_Status.BEV and Guest.CP0_EBase into account. | |
1991 | */ | |
1992 | long kvm_mips_guest_exception_base(struct kvm_vcpu *vcpu) | |
1993 | { | |
1994 | struct mips_coproc *cop0 = vcpu->arch.cop0; | |
1995 | ||
1996 | if (kvm_read_c0_guest_status(cop0) & ST0_BEV) | |
1997 | return KVM_GUEST_CKSEG1ADDR(0x1fc00200); | |
1998 | else | |
1999 | return kvm_read_c0_guest_ebase(cop0) & MIPS_EBASE_BASE; | |
2000 | } | |
2001 | ||
31cf7498 | 2002 | enum emulation_result kvm_mips_emulate_syscall(u32 cause, |
bdb7ed86 | 2003 | u32 *opc, |
d116e812 DCZ |
2004 | struct kvm_run *run, |
2005 | struct kvm_vcpu *vcpu) | |
e685c689 SL |
2006 | { |
2007 | struct mips_coproc *cop0 = vcpu->arch.cop0; | |
2008 | struct kvm_vcpu_arch *arch = &vcpu->arch; | |
2009 | enum emulation_result er = EMULATE_DONE; | |
2010 | ||
2011 | if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { | |
2012 | /* save old pc */ | |
2013 | kvm_write_c0_guest_epc(cop0, arch->pc); | |
2014 | kvm_set_c0_guest_status(cop0, ST0_EXL); | |
2015 | ||
2016 | if (cause & CAUSEF_BD) | |
2017 | kvm_set_c0_guest_cause(cop0, CAUSEF_BD); | |
2018 | else | |
2019 | kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); | |
2020 | ||
2021 | kvm_debug("Delivering SYSCALL @ pc %#lx\n", arch->pc); | |
2022 | ||
2023 | kvm_change_c0_guest_cause(cop0, (0xff), | |
16d100db | 2024 | (EXCCODE_SYS << CAUSEB_EXCCODE)); |
e685c689 SL |
2025 | |
2026 | /* Set PC to the exception entry point */ | |
7801bbe1 | 2027 | arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180; |
e685c689 SL |
2028 | |
2029 | } else { | |
6ad78a5c | 2030 | kvm_err("Trying to deliver SYSCALL when EXL is already set\n"); |
e685c689 SL |
2031 | er = EMULATE_FAIL; |
2032 | } | |
2033 | ||
2034 | return er; | |
2035 | } | |
2036 | ||
31cf7498 | 2037 | enum emulation_result kvm_mips_emulate_tlbmiss_ld(u32 cause, |
bdb7ed86 | 2038 | u32 *opc, |
d116e812 DCZ |
2039 | struct kvm_run *run, |
2040 | struct kvm_vcpu *vcpu) | |
e685c689 SL |
2041 | { |
2042 | struct mips_coproc *cop0 = vcpu->arch.cop0; | |
2043 | struct kvm_vcpu_arch *arch = &vcpu->arch; | |
e685c689 | 2044 | unsigned long entryhi = (vcpu->arch. host_cp0_badvaddr & VPN2_MASK) | |
ca64c2be | 2045 | (kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID); |
e685c689 SL |
2046 | |
2047 | if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { | |
2048 | /* save old pc */ | |
2049 | kvm_write_c0_guest_epc(cop0, arch->pc); | |
2050 | kvm_set_c0_guest_status(cop0, ST0_EXL); | |
2051 | ||
2052 | if (cause & CAUSEF_BD) | |
2053 | kvm_set_c0_guest_cause(cop0, CAUSEF_BD); | |
2054 | else | |
2055 | kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); | |
2056 | ||
2057 | kvm_debug("[EXL == 0] delivering TLB MISS @ pc %#lx\n", | |
2058 | arch->pc); | |
2059 | ||
2060 | /* set pc to the exception entry point */ | |
7801bbe1 | 2061 | arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x0; |
e685c689 SL |
2062 | |
2063 | } else { | |
2064 | kvm_debug("[EXL == 1] delivering TLB MISS @ pc %#lx\n", | |
2065 | arch->pc); | |
2066 | ||
7801bbe1 | 2067 | arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180; |
e685c689 SL |
2068 | } |
2069 | ||
2070 | kvm_change_c0_guest_cause(cop0, (0xff), | |
16d100db | 2071 | (EXCCODE_TLBL << CAUSEB_EXCCODE)); |
e685c689 SL |
2072 | |
2073 | /* setup badvaddr, context and entryhi registers for the guest */ | |
2074 | kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr); | |
2075 | /* XXXKYMA: is the context register used by linux??? */ | |
2076 | kvm_write_c0_guest_entryhi(cop0, entryhi); | |
e685c689 | 2077 | |
d98403a5 | 2078 | return EMULATE_DONE; |
e685c689 SL |
2079 | } |
2080 | ||
31cf7498 | 2081 | enum emulation_result kvm_mips_emulate_tlbinv_ld(u32 cause, |
bdb7ed86 | 2082 | u32 *opc, |
d116e812 DCZ |
2083 | struct kvm_run *run, |
2084 | struct kvm_vcpu *vcpu) | |
e685c689 SL |
2085 | { |
2086 | struct mips_coproc *cop0 = vcpu->arch.cop0; | |
2087 | struct kvm_vcpu_arch *arch = &vcpu->arch; | |
e685c689 SL |
2088 | unsigned long entryhi = |
2089 | (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) | | |
ca64c2be | 2090 | (kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID); |
e685c689 SL |
2091 | |
2092 | if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { | |
2093 | /* save old pc */ | |
2094 | kvm_write_c0_guest_epc(cop0, arch->pc); | |
2095 | kvm_set_c0_guest_status(cop0, ST0_EXL); | |
2096 | ||
2097 | if (cause & CAUSEF_BD) | |
2098 | kvm_set_c0_guest_cause(cop0, CAUSEF_BD); | |
2099 | else | |
2100 | kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); | |
2101 | ||
2102 | kvm_debug("[EXL == 0] delivering TLB INV @ pc %#lx\n", | |
2103 | arch->pc); | |
e685c689 SL |
2104 | } else { |
2105 | kvm_debug("[EXL == 1] delivering TLB MISS @ pc %#lx\n", | |
2106 | arch->pc); | |
e685c689 SL |
2107 | } |
2108 | ||
7801bbe1 JH |
2109 | /* set pc to the exception entry point */ |
2110 | arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180; | |
2111 | ||
e685c689 | 2112 | kvm_change_c0_guest_cause(cop0, (0xff), |
16d100db | 2113 | (EXCCODE_TLBL << CAUSEB_EXCCODE)); |
e685c689 SL |
2114 | |
2115 | /* setup badvaddr, context and entryhi registers for the guest */ | |
2116 | kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr); | |
2117 | /* XXXKYMA: is the context register used by linux??? */ | |
2118 | kvm_write_c0_guest_entryhi(cop0, entryhi); | |
e685c689 | 2119 | |
d98403a5 | 2120 | return EMULATE_DONE; |
e685c689 SL |
2121 | } |
2122 | ||
31cf7498 | 2123 | enum emulation_result kvm_mips_emulate_tlbmiss_st(u32 cause, |
bdb7ed86 | 2124 | u32 *opc, |
d116e812 DCZ |
2125 | struct kvm_run *run, |
2126 | struct kvm_vcpu *vcpu) | |
e685c689 SL |
2127 | { |
2128 | struct mips_coproc *cop0 = vcpu->arch.cop0; | |
2129 | struct kvm_vcpu_arch *arch = &vcpu->arch; | |
e685c689 | 2130 | unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) | |
ca64c2be | 2131 | (kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID); |
e685c689 SL |
2132 | |
2133 | if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { | |
2134 | /* save old pc */ | |
2135 | kvm_write_c0_guest_epc(cop0, arch->pc); | |
2136 | kvm_set_c0_guest_status(cop0, ST0_EXL); | |
2137 | ||
2138 | if (cause & CAUSEF_BD) | |
2139 | kvm_set_c0_guest_cause(cop0, CAUSEF_BD); | |
2140 | else | |
2141 | kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); | |
2142 | ||
2143 | kvm_debug("[EXL == 0] Delivering TLB MISS @ pc %#lx\n", | |
2144 | arch->pc); | |
2145 | ||
2146 | /* Set PC to the exception entry point */ | |
7801bbe1 | 2147 | arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x0; |
e685c689 SL |
2148 | } else { |
2149 | kvm_debug("[EXL == 1] Delivering TLB MISS @ pc %#lx\n", | |
2150 | arch->pc); | |
7801bbe1 | 2151 | arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180; |
e685c689 SL |
2152 | } |
2153 | ||
2154 | kvm_change_c0_guest_cause(cop0, (0xff), | |
16d100db | 2155 | (EXCCODE_TLBS << CAUSEB_EXCCODE)); |
e685c689 SL |
2156 | |
2157 | /* setup badvaddr, context and entryhi registers for the guest */ | |
2158 | kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr); | |
2159 | /* XXXKYMA: is the context register used by linux??? */ | |
2160 | kvm_write_c0_guest_entryhi(cop0, entryhi); | |
e685c689 | 2161 | |
d98403a5 | 2162 | return EMULATE_DONE; |
e685c689 SL |
2163 | } |
2164 | ||
31cf7498 | 2165 | enum emulation_result kvm_mips_emulate_tlbinv_st(u32 cause, |
bdb7ed86 | 2166 | u32 *opc, |
d116e812 DCZ |
2167 | struct kvm_run *run, |
2168 | struct kvm_vcpu *vcpu) | |
e685c689 SL |
2169 | { |
2170 | struct mips_coproc *cop0 = vcpu->arch.cop0; | |
2171 | struct kvm_vcpu_arch *arch = &vcpu->arch; | |
e685c689 | 2172 | unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) | |
ca64c2be | 2173 | (kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID); |
e685c689 SL |
2174 | |
2175 | if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { | |
2176 | /* save old pc */ | |
2177 | kvm_write_c0_guest_epc(cop0, arch->pc); | |
2178 | kvm_set_c0_guest_status(cop0, ST0_EXL); | |
2179 | ||
2180 | if (cause & CAUSEF_BD) | |
2181 | kvm_set_c0_guest_cause(cop0, CAUSEF_BD); | |
2182 | else | |
2183 | kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); | |
2184 | ||
2185 | kvm_debug("[EXL == 0] Delivering TLB MISS @ pc %#lx\n", | |
2186 | arch->pc); | |
e685c689 SL |
2187 | } else { |
2188 | kvm_debug("[EXL == 1] Delivering TLB MISS @ pc %#lx\n", | |
2189 | arch->pc); | |
e685c689 SL |
2190 | } |
2191 | ||
7801bbe1 JH |
2192 | /* Set PC to the exception entry point */ |
2193 | arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180; | |
2194 | ||
e685c689 | 2195 | kvm_change_c0_guest_cause(cop0, (0xff), |
16d100db | 2196 | (EXCCODE_TLBS << CAUSEB_EXCCODE)); |
e685c689 SL |
2197 | |
2198 | /* setup badvaddr, context and entryhi registers for the guest */ | |
2199 | kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr); | |
2200 | /* XXXKYMA: is the context register used by linux??? */ | |
2201 | kvm_write_c0_guest_entryhi(cop0, entryhi); | |
e685c689 | 2202 | |
d98403a5 | 2203 | return EMULATE_DONE; |
e685c689 SL |
2204 | } |
2205 | ||
31cf7498 | 2206 | enum emulation_result kvm_mips_emulate_tlbmod(u32 cause, |
bdb7ed86 | 2207 | u32 *opc, |
d116e812 DCZ |
2208 | struct kvm_run *run, |
2209 | struct kvm_vcpu *vcpu) | |
e685c689 SL |
2210 | { |
2211 | struct mips_coproc *cop0 = vcpu->arch.cop0; | |
2212 | unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) | | |
ca64c2be | 2213 | (kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID); |
e685c689 | 2214 | struct kvm_vcpu_arch *arch = &vcpu->arch; |
e685c689 SL |
2215 | |
2216 | if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { | |
2217 | /* save old pc */ | |
2218 | kvm_write_c0_guest_epc(cop0, arch->pc); | |
2219 | kvm_set_c0_guest_status(cop0, ST0_EXL); | |
2220 | ||
2221 | if (cause & CAUSEF_BD) | |
2222 | kvm_set_c0_guest_cause(cop0, CAUSEF_BD); | |
2223 | else | |
2224 | kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); | |
2225 | ||
2226 | kvm_debug("[EXL == 0] Delivering TLB MOD @ pc %#lx\n", | |
2227 | arch->pc); | |
e685c689 SL |
2228 | } else { |
2229 | kvm_debug("[EXL == 1] Delivering TLB MOD @ pc %#lx\n", | |
2230 | arch->pc); | |
e685c689 SL |
2231 | } |
2232 | ||
7801bbe1 JH |
2233 | arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180; |
2234 | ||
16d100db JH |
2235 | kvm_change_c0_guest_cause(cop0, (0xff), |
2236 | (EXCCODE_MOD << CAUSEB_EXCCODE)); | |
e685c689 SL |
2237 | |
2238 | /* setup badvaddr, context and entryhi registers for the guest */ | |
2239 | kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr); | |
2240 | /* XXXKYMA: is the context register used by linux??? */ | |
2241 | kvm_write_c0_guest_entryhi(cop0, entryhi); | |
e685c689 | 2242 | |
d98403a5 | 2243 | return EMULATE_DONE; |
e685c689 SL |
2244 | } |
2245 | ||
31cf7498 | 2246 | enum emulation_result kvm_mips_emulate_fpu_exc(u32 cause, |
bdb7ed86 | 2247 | u32 *opc, |
d116e812 DCZ |
2248 | struct kvm_run *run, |
2249 | struct kvm_vcpu *vcpu) | |
e685c689 SL |
2250 | { |
2251 | struct mips_coproc *cop0 = vcpu->arch.cop0; | |
2252 | struct kvm_vcpu_arch *arch = &vcpu->arch; | |
e685c689 SL |
2253 | |
2254 | if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { | |
2255 | /* save old pc */ | |
2256 | kvm_write_c0_guest_epc(cop0, arch->pc); | |
2257 | kvm_set_c0_guest_status(cop0, ST0_EXL); | |
2258 | ||
2259 | if (cause & CAUSEF_BD) | |
2260 | kvm_set_c0_guest_cause(cop0, CAUSEF_BD); | |
2261 | else | |
2262 | kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); | |
2263 | ||
2264 | } | |
2265 | ||
7801bbe1 | 2266 | arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180; |
e685c689 SL |
2267 | |
2268 | kvm_change_c0_guest_cause(cop0, (0xff), | |
16d100db | 2269 | (EXCCODE_CPU << CAUSEB_EXCCODE)); |
e685c689 SL |
2270 | kvm_change_c0_guest_cause(cop0, (CAUSEF_CE), (0x1 << CAUSEB_CE)); |
2271 | ||
d98403a5 | 2272 | return EMULATE_DONE; |
e685c689 SL |
2273 | } |
2274 | ||
31cf7498 | 2275 | enum emulation_result kvm_mips_emulate_ri_exc(u32 cause, |
bdb7ed86 | 2276 | u32 *opc, |
d116e812 DCZ |
2277 | struct kvm_run *run, |
2278 | struct kvm_vcpu *vcpu) | |
e685c689 SL |
2279 | { |
2280 | struct mips_coproc *cop0 = vcpu->arch.cop0; | |
2281 | struct kvm_vcpu_arch *arch = &vcpu->arch; | |
2282 | enum emulation_result er = EMULATE_DONE; | |
2283 | ||
2284 | if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { | |
2285 | /* save old pc */ | |
2286 | kvm_write_c0_guest_epc(cop0, arch->pc); | |
2287 | kvm_set_c0_guest_status(cop0, ST0_EXL); | |
2288 | ||
2289 | if (cause & CAUSEF_BD) | |
2290 | kvm_set_c0_guest_cause(cop0, CAUSEF_BD); | |
2291 | else | |
2292 | kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); | |
2293 | ||
2294 | kvm_debug("Delivering RI @ pc %#lx\n", arch->pc); | |
2295 | ||
2296 | kvm_change_c0_guest_cause(cop0, (0xff), | |
16d100db | 2297 | (EXCCODE_RI << CAUSEB_EXCCODE)); |
e685c689 SL |
2298 | |
2299 | /* Set PC to the exception entry point */ | |
7801bbe1 | 2300 | arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180; |
e685c689 SL |
2301 | |
2302 | } else { | |
2303 | kvm_err("Trying to deliver RI when EXL is already set\n"); | |
2304 | er = EMULATE_FAIL; | |
2305 | } | |
2306 | ||
2307 | return er; | |
2308 | } | |
2309 | ||
31cf7498 | 2310 | enum emulation_result kvm_mips_emulate_bp_exc(u32 cause, |
bdb7ed86 | 2311 | u32 *opc, |
d116e812 DCZ |
2312 | struct kvm_run *run, |
2313 | struct kvm_vcpu *vcpu) | |
e685c689 SL |
2314 | { |
2315 | struct mips_coproc *cop0 = vcpu->arch.cop0; | |
2316 | struct kvm_vcpu_arch *arch = &vcpu->arch; | |
2317 | enum emulation_result er = EMULATE_DONE; | |
2318 | ||
2319 | if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { | |
2320 | /* save old pc */ | |
2321 | kvm_write_c0_guest_epc(cop0, arch->pc); | |
2322 | kvm_set_c0_guest_status(cop0, ST0_EXL); | |
2323 | ||
2324 | if (cause & CAUSEF_BD) | |
2325 | kvm_set_c0_guest_cause(cop0, CAUSEF_BD); | |
2326 | else | |
2327 | kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); | |
2328 | ||
2329 | kvm_debug("Delivering BP @ pc %#lx\n", arch->pc); | |
2330 | ||
2331 | kvm_change_c0_guest_cause(cop0, (0xff), | |
16d100db | 2332 | (EXCCODE_BP << CAUSEB_EXCCODE)); |
e685c689 SL |
2333 | |
2334 | /* Set PC to the exception entry point */ | |
7801bbe1 | 2335 | arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180; |
e685c689 SL |
2336 | |
2337 | } else { | |
6ad78a5c | 2338 | kvm_err("Trying to deliver BP when EXL is already set\n"); |
e685c689 SL |
2339 | er = EMULATE_FAIL; |
2340 | } | |
2341 | ||
2342 | return er; | |
2343 | } | |
2344 | ||
31cf7498 | 2345 | enum emulation_result kvm_mips_emulate_trap_exc(u32 cause, |
bdb7ed86 | 2346 | u32 *opc, |
0a560427 JH |
2347 | struct kvm_run *run, |
2348 | struct kvm_vcpu *vcpu) | |
2349 | { | |
2350 | struct mips_coproc *cop0 = vcpu->arch.cop0; | |
2351 | struct kvm_vcpu_arch *arch = &vcpu->arch; | |
2352 | enum emulation_result er = EMULATE_DONE; | |
2353 | ||
2354 | if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { | |
2355 | /* save old pc */ | |
2356 | kvm_write_c0_guest_epc(cop0, arch->pc); | |
2357 | kvm_set_c0_guest_status(cop0, ST0_EXL); | |
2358 | ||
2359 | if (cause & CAUSEF_BD) | |
2360 | kvm_set_c0_guest_cause(cop0, CAUSEF_BD); | |
2361 | else | |
2362 | kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); | |
2363 | ||
2364 | kvm_debug("Delivering TRAP @ pc %#lx\n", arch->pc); | |
2365 | ||
2366 | kvm_change_c0_guest_cause(cop0, (0xff), | |
16d100db | 2367 | (EXCCODE_TR << CAUSEB_EXCCODE)); |
0a560427 JH |
2368 | |
2369 | /* Set PC to the exception entry point */ | |
7801bbe1 | 2370 | arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180; |
0a560427 JH |
2371 | |
2372 | } else { | |
2373 | kvm_err("Trying to deliver TRAP when EXL is already set\n"); | |
2374 | er = EMULATE_FAIL; | |
2375 | } | |
2376 | ||
2377 | return er; | |
2378 | } | |
2379 | ||
31cf7498 | 2380 | enum emulation_result kvm_mips_emulate_msafpe_exc(u32 cause, |
bdb7ed86 | 2381 | u32 *opc, |
c2537ed9 JH |
2382 | struct kvm_run *run, |
2383 | struct kvm_vcpu *vcpu) | |
2384 | { | |
2385 | struct mips_coproc *cop0 = vcpu->arch.cop0; | |
2386 | struct kvm_vcpu_arch *arch = &vcpu->arch; | |
2387 | enum emulation_result er = EMULATE_DONE; | |
2388 | ||
2389 | if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { | |
2390 | /* save old pc */ | |
2391 | kvm_write_c0_guest_epc(cop0, arch->pc); | |
2392 | kvm_set_c0_guest_status(cop0, ST0_EXL); | |
2393 | ||
2394 | if (cause & CAUSEF_BD) | |
2395 | kvm_set_c0_guest_cause(cop0, CAUSEF_BD); | |
2396 | else | |
2397 | kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); | |
2398 | ||
2399 | kvm_debug("Delivering MSAFPE @ pc %#lx\n", arch->pc); | |
2400 | ||
2401 | kvm_change_c0_guest_cause(cop0, (0xff), | |
16d100db | 2402 | (EXCCODE_MSAFPE << CAUSEB_EXCCODE)); |
c2537ed9 JH |
2403 | |
2404 | /* Set PC to the exception entry point */ | |
7801bbe1 | 2405 | arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180; |
c2537ed9 JH |
2406 | |
2407 | } else { | |
2408 | kvm_err("Trying to deliver MSAFPE when EXL is already set\n"); | |
2409 | er = EMULATE_FAIL; | |
2410 | } | |
2411 | ||
2412 | return er; | |
2413 | } | |
2414 | ||
31cf7498 | 2415 | enum emulation_result kvm_mips_emulate_fpe_exc(u32 cause, |
bdb7ed86 | 2416 | u32 *opc, |
1c0cd66a JH |
2417 | struct kvm_run *run, |
2418 | struct kvm_vcpu *vcpu) | |
2419 | { | |
2420 | struct mips_coproc *cop0 = vcpu->arch.cop0; | |
2421 | struct kvm_vcpu_arch *arch = &vcpu->arch; | |
2422 | enum emulation_result er = EMULATE_DONE; | |
2423 | ||
2424 | if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { | |
2425 | /* save old pc */ | |
2426 | kvm_write_c0_guest_epc(cop0, arch->pc); | |
2427 | kvm_set_c0_guest_status(cop0, ST0_EXL); | |
2428 | ||
2429 | if (cause & CAUSEF_BD) | |
2430 | kvm_set_c0_guest_cause(cop0, CAUSEF_BD); | |
2431 | else | |
2432 | kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); | |
2433 | ||
2434 | kvm_debug("Delivering FPE @ pc %#lx\n", arch->pc); | |
2435 | ||
2436 | kvm_change_c0_guest_cause(cop0, (0xff), | |
16d100db | 2437 | (EXCCODE_FPE << CAUSEB_EXCCODE)); |
1c0cd66a JH |
2438 | |
2439 | /* Set PC to the exception entry point */ | |
7801bbe1 | 2440 | arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180; |
1c0cd66a JH |
2441 | |
2442 | } else { | |
2443 | kvm_err("Trying to deliver FPE when EXL is already set\n"); | |
2444 | er = EMULATE_FAIL; | |
2445 | } | |
2446 | ||
2447 | return er; | |
2448 | } | |
2449 | ||
31cf7498 | 2450 | enum emulation_result kvm_mips_emulate_msadis_exc(u32 cause, |
bdb7ed86 | 2451 | u32 *opc, |
c2537ed9 JH |
2452 | struct kvm_run *run, |
2453 | struct kvm_vcpu *vcpu) | |
2454 | { | |
2455 | struct mips_coproc *cop0 = vcpu->arch.cop0; | |
2456 | struct kvm_vcpu_arch *arch = &vcpu->arch; | |
2457 | enum emulation_result er = EMULATE_DONE; | |
2458 | ||
2459 | if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { | |
2460 | /* save old pc */ | |
2461 | kvm_write_c0_guest_epc(cop0, arch->pc); | |
2462 | kvm_set_c0_guest_status(cop0, ST0_EXL); | |
2463 | ||
2464 | if (cause & CAUSEF_BD) | |
2465 | kvm_set_c0_guest_cause(cop0, CAUSEF_BD); | |
2466 | else | |
2467 | kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); | |
2468 | ||
2469 | kvm_debug("Delivering MSADIS @ pc %#lx\n", arch->pc); | |
2470 | ||
2471 | kvm_change_c0_guest_cause(cop0, (0xff), | |
16d100db | 2472 | (EXCCODE_MSADIS << CAUSEB_EXCCODE)); |
c2537ed9 JH |
2473 | |
2474 | /* Set PC to the exception entry point */ | |
7801bbe1 | 2475 | arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180; |
c2537ed9 JH |
2476 | |
2477 | } else { | |
2478 | kvm_err("Trying to deliver MSADIS when EXL is already set\n"); | |
2479 | er = EMULATE_FAIL; | |
2480 | } | |
2481 | ||
2482 | return er; | |
2483 | } | |
2484 | ||
31cf7498 | 2485 | enum emulation_result kvm_mips_handle_ri(u32 cause, u32 *opc, |
d116e812 DCZ |
2486 | struct kvm_run *run, |
2487 | struct kvm_vcpu *vcpu) | |
e685c689 SL |
2488 | { |
2489 | struct mips_coproc *cop0 = vcpu->arch.cop0; | |
2490 | struct kvm_vcpu_arch *arch = &vcpu->arch; | |
2491 | enum emulation_result er = EMULATE_DONE; | |
2492 | unsigned long curr_pc; | |
258f3a2e | 2493 | union mips_instruction inst; |
122e51d4 | 2494 | int err; |
e685c689 SL |
2495 | |
2496 | /* | |
2497 | * Update PC and hold onto current PC in case there is | |
2498 | * an error and we want to rollback the PC | |
2499 | */ | |
2500 | curr_pc = vcpu->arch.pc; | |
2501 | er = update_pc(vcpu, cause); | |
2502 | if (er == EMULATE_FAIL) | |
2503 | return er; | |
2504 | ||
d116e812 | 2505 | /* Fetch the instruction. */ |
e685c689 SL |
2506 | if (cause & CAUSEF_BD) |
2507 | opc += 1; | |
6a97c775 | 2508 | err = kvm_get_badinstr(opc, vcpu, &inst.word); |
122e51d4 JH |
2509 | if (err) { |
2510 | kvm_err("%s: Cannot get inst @ %p (%d)\n", __func__, opc, err); | |
e685c689 SL |
2511 | return EMULATE_FAIL; |
2512 | } | |
2513 | ||
258f3a2e | 2514 | if (inst.r_format.opcode == spec3_op && |
8eeab81c JH |
2515 | inst.r_format.func == rdhwr_op && |
2516 | inst.r_format.rs == 0 && | |
2517 | (inst.r_format.re >> 3) == 0) { | |
26f4f3b5 | 2518 | int usermode = !KVM_GUEST_KERNEL_MODE(vcpu); |
258f3a2e JH |
2519 | int rd = inst.r_format.rd; |
2520 | int rt = inst.r_format.rt; | |
2521 | int sel = inst.r_format.re & 0x7; | |
6398da13 | 2522 | |
26f4f3b5 JH |
2523 | /* If usermode, check RDHWR rd is allowed by guest HWREna */ |
2524 | if (usermode && !(kvm_read_c0_guest_hwrena(cop0) & BIT(rd))) { | |
2525 | kvm_debug("RDHWR %#x disallowed by HWREna @ %p\n", | |
2526 | rd, opc); | |
2527 | goto emulate_ri; | |
2528 | } | |
e685c689 | 2529 | switch (rd) { |
aff565aa | 2530 | case MIPS_HWR_CPUNUM: /* CPU number */ |
cf1fb0f2 | 2531 | arch->gprs[rt] = vcpu->vcpu_id; |
e685c689 | 2532 | break; |
aff565aa | 2533 | case MIPS_HWR_SYNCISTEP: /* SYNCI length */ |
e685c689 SL |
2534 | arch->gprs[rt] = min(current_cpu_data.dcache.linesz, |
2535 | current_cpu_data.icache.linesz); | |
2536 | break; | |
aff565aa | 2537 | case MIPS_HWR_CC: /* Read count register */ |
172e02d1 | 2538 | arch->gprs[rt] = (s32)kvm_mips_read_count(vcpu); |
e685c689 | 2539 | break; |
aff565aa | 2540 | case MIPS_HWR_CCRES: /* Count register resolution */ |
e685c689 SL |
2541 | switch (current_cpu_data.cputype) { |
2542 | case CPU_20KC: | |
2543 | case CPU_25KF: | |
2544 | arch->gprs[rt] = 1; | |
2545 | break; | |
2546 | default: | |
2547 | arch->gprs[rt] = 2; | |
2548 | } | |
2549 | break; | |
aff565aa | 2550 | case MIPS_HWR_ULR: /* Read UserLocal register */ |
e685c689 | 2551 | arch->gprs[rt] = kvm_read_c0_guest_userlocal(cop0); |
e685c689 SL |
2552 | break; |
2553 | ||
2554 | default: | |
15505679 | 2555 | kvm_debug("RDHWR %#x not supported @ %p\n", rd, opc); |
26f4f3b5 | 2556 | goto emulate_ri; |
e685c689 | 2557 | } |
6398da13 JH |
2558 | |
2559 | trace_kvm_hwr(vcpu, KVM_TRACE_RDHWR, KVM_TRACE_HWR(rd, sel), | |
2560 | vcpu->arch.gprs[rt]); | |
e685c689 | 2561 | } else { |
258f3a2e JH |
2562 | kvm_debug("Emulate RI not supported @ %p: %#x\n", |
2563 | opc, inst.word); | |
26f4f3b5 | 2564 | goto emulate_ri; |
e685c689 SL |
2565 | } |
2566 | ||
26f4f3b5 JH |
2567 | return EMULATE_DONE; |
2568 | ||
2569 | emulate_ri: | |
e685c689 | 2570 | /* |
26f4f3b5 JH |
2571 | * Rollback PC (if in branch delay slot then the PC already points to |
2572 | * branch target), and pass the RI exception to the guest OS. | |
e685c689 | 2573 | */ |
26f4f3b5 JH |
2574 | vcpu->arch.pc = curr_pc; |
2575 | return kvm_mips_emulate_ri_exc(cause, opc, run, vcpu); | |
e685c689 SL |
2576 | } |
2577 | ||
d116e812 DCZ |
2578 | enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu, |
2579 | struct kvm_run *run) | |
e685c689 SL |
2580 | { |
2581 | unsigned long *gpr = &vcpu->arch.gprs[vcpu->arch.io_gpr]; | |
2582 | enum emulation_result er = EMULATE_DONE; | |
e685c689 SL |
2583 | |
2584 | if (run->mmio.len > sizeof(*gpr)) { | |
6ad78a5c | 2585 | kvm_err("Bad MMIO length: %d", run->mmio.len); |
e685c689 SL |
2586 | er = EMULATE_FAIL; |
2587 | goto done; | |
2588 | } | |
2589 | ||
e1e575f6 JH |
2590 | /* Restore saved resume PC */ |
2591 | vcpu->arch.pc = vcpu->arch.io_pc; | |
e685c689 SL |
2592 | |
2593 | switch (run->mmio.len) { | |
59d7814a JH |
2594 | case 8: |
2595 | *gpr = *(s64 *)run->mmio.data; | |
2596 | break; | |
2597 | ||
e685c689 | 2598 | case 4: |
59d7814a JH |
2599 | if (vcpu->mmio_needed == 2) |
2600 | *gpr = *(s32 *)run->mmio.data; | |
2601 | else | |
2602 | *gpr = *(u32 *)run->mmio.data; | |
e685c689 SL |
2603 | break; |
2604 | ||
2605 | case 2: | |
2606 | if (vcpu->mmio_needed == 2) | |
8cffd197 | 2607 | *gpr = *(s16 *) run->mmio.data; |
e685c689 | 2608 | else |
8cffd197 | 2609 | *gpr = *(u16 *)run->mmio.data; |
e685c689 SL |
2610 | |
2611 | break; | |
2612 | case 1: | |
2613 | if (vcpu->mmio_needed == 2) | |
8cffd197 | 2614 | *gpr = *(s8 *) run->mmio.data; |
e685c689 SL |
2615 | else |
2616 | *gpr = *(u8 *) run->mmio.data; | |
2617 | break; | |
2618 | } | |
2619 | ||
e685c689 SL |
2620 | done: |
2621 | return er; | |
2622 | } | |
2623 | ||
31cf7498 | 2624 | static enum emulation_result kvm_mips_emulate_exc(u32 cause, |
bdb7ed86 | 2625 | u32 *opc, |
d116e812 DCZ |
2626 | struct kvm_run *run, |
2627 | struct kvm_vcpu *vcpu) | |
e685c689 | 2628 | { |
8cffd197 | 2629 | u32 exccode = (cause >> CAUSEB_EXCCODE) & 0x1f; |
e685c689 SL |
2630 | struct mips_coproc *cop0 = vcpu->arch.cop0; |
2631 | struct kvm_vcpu_arch *arch = &vcpu->arch; | |
2632 | enum emulation_result er = EMULATE_DONE; | |
2633 | ||
2634 | if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { | |
2635 | /* save old pc */ | |
2636 | kvm_write_c0_guest_epc(cop0, arch->pc); | |
2637 | kvm_set_c0_guest_status(cop0, ST0_EXL); | |
2638 | ||
2639 | if (cause & CAUSEF_BD) | |
2640 | kvm_set_c0_guest_cause(cop0, CAUSEF_BD); | |
2641 | else | |
2642 | kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); | |
2643 | ||
2644 | kvm_change_c0_guest_cause(cop0, (0xff), | |
2645 | (exccode << CAUSEB_EXCCODE)); | |
2646 | ||
2647 | /* Set PC to the exception entry point */ | |
7801bbe1 | 2648 | arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180; |
e685c689 SL |
2649 | kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr); |
2650 | ||
2651 | kvm_debug("Delivering EXC %d @ pc %#lx, badVaddr: %#lx\n", | |
2652 | exccode, kvm_read_c0_guest_epc(cop0), | |
2653 | kvm_read_c0_guest_badvaddr(cop0)); | |
2654 | } else { | |
6ad78a5c | 2655 | kvm_err("Trying to deliver EXC when EXL is already set\n"); |
e685c689 SL |
2656 | er = EMULATE_FAIL; |
2657 | } | |
2658 | ||
2659 | return er; | |
2660 | } | |
2661 | ||
31cf7498 | 2662 | enum emulation_result kvm_mips_check_privilege(u32 cause, |
bdb7ed86 | 2663 | u32 *opc, |
d116e812 DCZ |
2664 | struct kvm_run *run, |
2665 | struct kvm_vcpu *vcpu) | |
e685c689 SL |
2666 | { |
2667 | enum emulation_result er = EMULATE_DONE; | |
8cffd197 | 2668 | u32 exccode = (cause >> CAUSEB_EXCCODE) & 0x1f; |
e685c689 SL |
2669 | unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr; |
2670 | ||
2671 | int usermode = !KVM_GUEST_KERNEL_MODE(vcpu); | |
2672 | ||
2673 | if (usermode) { | |
2674 | switch (exccode) { | |
16d100db JH |
2675 | case EXCCODE_INT: |
2676 | case EXCCODE_SYS: | |
2677 | case EXCCODE_BP: | |
2678 | case EXCCODE_RI: | |
2679 | case EXCCODE_TR: | |
2680 | case EXCCODE_MSAFPE: | |
2681 | case EXCCODE_FPE: | |
2682 | case EXCCODE_MSADIS: | |
e685c689 SL |
2683 | break; |
2684 | ||
16d100db | 2685 | case EXCCODE_CPU: |
e685c689 SL |
2686 | if (((cause & CAUSEF_CE) >> CAUSEB_CE) == 0) |
2687 | er = EMULATE_PRIV_FAIL; | |
2688 | break; | |
2689 | ||
16d100db | 2690 | case EXCCODE_MOD: |
e685c689 SL |
2691 | break; |
2692 | ||
16d100db | 2693 | case EXCCODE_TLBL: |
d116e812 DCZ |
2694 | /* |
2695 | * We we are accessing Guest kernel space, then send an | |
2696 | * address error exception to the guest | |
2697 | */ | |
e685c689 | 2698 | if (badvaddr >= (unsigned long) KVM_GUEST_KSEG0) { |
6ad78a5c DCZ |
2699 | kvm_debug("%s: LD MISS @ %#lx\n", __func__, |
2700 | badvaddr); | |
e685c689 | 2701 | cause &= ~0xff; |
16d100db | 2702 | cause |= (EXCCODE_ADEL << CAUSEB_EXCCODE); |
e685c689 SL |
2703 | er = EMULATE_PRIV_FAIL; |
2704 | } | |
2705 | break; | |
2706 | ||
16d100db | 2707 | case EXCCODE_TLBS: |
d116e812 DCZ |
2708 | /* |
2709 | * We we are accessing Guest kernel space, then send an | |
2710 | * address error exception to the guest | |
2711 | */ | |
e685c689 | 2712 | if (badvaddr >= (unsigned long) KVM_GUEST_KSEG0) { |
6ad78a5c DCZ |
2713 | kvm_debug("%s: ST MISS @ %#lx\n", __func__, |
2714 | badvaddr); | |
e685c689 | 2715 | cause &= ~0xff; |
16d100db | 2716 | cause |= (EXCCODE_ADES << CAUSEB_EXCCODE); |
e685c689 SL |
2717 | er = EMULATE_PRIV_FAIL; |
2718 | } | |
2719 | break; | |
2720 | ||
16d100db | 2721 | case EXCCODE_ADES: |
6ad78a5c DCZ |
2722 | kvm_debug("%s: address error ST @ %#lx\n", __func__, |
2723 | badvaddr); | |
e685c689 SL |
2724 | if ((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR) { |
2725 | cause &= ~0xff; | |
16d100db | 2726 | cause |= (EXCCODE_TLBS << CAUSEB_EXCCODE); |
e685c689 SL |
2727 | } |
2728 | er = EMULATE_PRIV_FAIL; | |
2729 | break; | |
16d100db | 2730 | case EXCCODE_ADEL: |
6ad78a5c DCZ |
2731 | kvm_debug("%s: address error LD @ %#lx\n", __func__, |
2732 | badvaddr); | |
e685c689 SL |
2733 | if ((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR) { |
2734 | cause &= ~0xff; | |
16d100db | 2735 | cause |= (EXCCODE_TLBL << CAUSEB_EXCCODE); |
e685c689 SL |
2736 | } |
2737 | er = EMULATE_PRIV_FAIL; | |
2738 | break; | |
2739 | default: | |
2740 | er = EMULATE_PRIV_FAIL; | |
2741 | break; | |
2742 | } | |
2743 | } | |
2744 | ||
d116e812 | 2745 | if (er == EMULATE_PRIV_FAIL) |
e685c689 | 2746 | kvm_mips_emulate_exc(cause, opc, run, vcpu); |
d116e812 | 2747 | |
e685c689 SL |
2748 | return er; |
2749 | } | |
2750 | ||
d116e812 DCZ |
2751 | /* |
2752 | * User Address (UA) fault, this could happen if | |
e685c689 SL |
2753 | * (1) TLB entry not present/valid in both Guest and shadow host TLBs, in this |
2754 | * case we pass on the fault to the guest kernel and let it handle it. | |
2755 | * (2) TLB entry is present in the Guest TLB but not in the shadow, in this | |
2756 | * case we inject the TLB from the Guest TLB into the shadow host TLB | |
2757 | */ | |
31cf7498 | 2758 | enum emulation_result kvm_mips_handle_tlbmiss(u32 cause, |
bdb7ed86 | 2759 | u32 *opc, |
d116e812 | 2760 | struct kvm_run *run, |
577ed7f7 JH |
2761 | struct kvm_vcpu *vcpu, |
2762 | bool write_fault) | |
e685c689 SL |
2763 | { |
2764 | enum emulation_result er = EMULATE_DONE; | |
8cffd197 | 2765 | u32 exccode = (cause >> CAUSEB_EXCCODE) & 0x1f; |
e685c689 SL |
2766 | unsigned long va = vcpu->arch.host_cp0_badvaddr; |
2767 | int index; | |
2768 | ||
e4e94c0f JH |
2769 | kvm_debug("kvm_mips_handle_tlbmiss: badvaddr: %#lx\n", |
2770 | vcpu->arch.host_cp0_badvaddr); | |
e685c689 | 2771 | |
d116e812 DCZ |
2772 | /* |
2773 | * KVM would not have got the exception if this entry was valid in the | |
2774 | * shadow host TLB. Check the Guest TLB, if the entry is not there then | |
2775 | * send the guest an exception. The guest exc handler should then inject | |
2776 | * an entry into the guest TLB. | |
e685c689 SL |
2777 | */ |
2778 | index = kvm_mips_guest_tlb_lookup(vcpu, | |
caa1faa7 | 2779 | (va & VPN2_MASK) | |
ca64c2be PB |
2780 | (kvm_read_c0_guest_entryhi(vcpu->arch.cop0) & |
2781 | KVM_ENTRYHI_ASID)); | |
e685c689 | 2782 | if (index < 0) { |
16d100db | 2783 | if (exccode == EXCCODE_TLBL) { |
e685c689 | 2784 | er = kvm_mips_emulate_tlbmiss_ld(cause, opc, run, vcpu); |
16d100db | 2785 | } else if (exccode == EXCCODE_TLBS) { |
e685c689 SL |
2786 | er = kvm_mips_emulate_tlbmiss_st(cause, opc, run, vcpu); |
2787 | } else { | |
6ad78a5c DCZ |
2788 | kvm_err("%s: invalid exc code: %d\n", __func__, |
2789 | exccode); | |
e685c689 SL |
2790 | er = EMULATE_FAIL; |
2791 | } | |
2792 | } else { | |
2793 | struct kvm_mips_tlb *tlb = &vcpu->arch.guest_tlb[index]; | |
2794 | ||
d116e812 DCZ |
2795 | /* |
2796 | * Check if the entry is valid, if not then setup a TLB invalid | |
2797 | * exception to the guest | |
2798 | */ | |
e685c689 | 2799 | if (!TLB_IS_VALID(*tlb, va)) { |
16d100db | 2800 | if (exccode == EXCCODE_TLBL) { |
e685c689 SL |
2801 | er = kvm_mips_emulate_tlbinv_ld(cause, opc, run, |
2802 | vcpu); | |
16d100db | 2803 | } else if (exccode == EXCCODE_TLBS) { |
e685c689 SL |
2804 | er = kvm_mips_emulate_tlbinv_st(cause, opc, run, |
2805 | vcpu); | |
2806 | } else { | |
6ad78a5c DCZ |
2807 | kvm_err("%s: invalid exc code: %d\n", __func__, |
2808 | exccode); | |
e685c689 SL |
2809 | er = EMULATE_FAIL; |
2810 | } | |
2811 | } else { | |
d116e812 | 2812 | kvm_debug("Injecting hi: %#lx, lo0: %#lx, lo1: %#lx into shadow host TLB\n", |
9fbfb06a | 2813 | tlb->tlb_hi, tlb->tlb_lo[0], tlb->tlb_lo[1]); |
d116e812 DCZ |
2814 | /* |
2815 | * OK we have a Guest TLB entry, now inject it into the | |
2816 | * shadow host TLB | |
2817 | */ | |
577ed7f7 JH |
2818 | if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb, va, |
2819 | write_fault)) { | |
9b731bcf JH |
2820 | kvm_err("%s: handling mapped seg tlb fault for %lx, index: %u, vcpu: %p, ASID: %#lx\n", |
2821 | __func__, va, index, vcpu, | |
2822 | read_c0_entryhi()); | |
2823 | er = EMULATE_FAIL; | |
2824 | } | |
e685c689 SL |
2825 | } |
2826 | } | |
2827 | ||
2828 | return er; | |
2829 | } |