Commit | Line | Data |
---|---|---|
e685c689 SL |
1 | /* |
2 | * This file is subject to the terms and conditions of the GNU General Public | |
3 | * License. See the file "COPYING" in the main directory of this archive | |
4 | * for more details. | |
5 | * | |
6 | * KVM/MIPS: Instruction/Exception emulation | |
7 | * | |
8 | * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. | |
9 | * Authors: Sanjay Lal <sanjayl@kymasys.com> | |
10 | */ | |
11 | ||
12 | #include <linux/errno.h> | |
13 | #include <linux/err.h> | |
e30492bb | 14 | #include <linux/ktime.h> |
e685c689 SL |
15 | #include <linux/kvm_host.h> |
16 | #include <linux/module.h> | |
17 | #include <linux/vmalloc.h> | |
18 | #include <linux/fs.h> | |
19 | #include <linux/bootmem.h> | |
20 | #include <linux/random.h> | |
21 | #include <asm/page.h> | |
22 | #include <asm/cacheflush.h> | |
23 | #include <asm/cpu-info.h> | |
24 | #include <asm/mmu_context.h> | |
25 | #include <asm/tlbflush.h> | |
26 | #include <asm/inst.h> | |
27 | ||
28 | #undef CONFIG_MIPS_MT | |
29 | #include <asm/r4kcache.h> | |
30 | #define CONFIG_MIPS_MT | |
31 | ||
32 | #include "kvm_mips_opcode.h" | |
33 | #include "kvm_mips_int.h" | |
34 | #include "kvm_mips_comm.h" | |
35 | ||
36 | #include "trace.h" | |
37 | ||
38 | /* | |
39 | * Compute the return address and do emulate branch simulation, if required. | |
40 | * This function should be called only in branch delay slot active. | |
41 | */ | |
42 | unsigned long kvm_compute_return_epc(struct kvm_vcpu *vcpu, | |
43 | unsigned long instpc) | |
44 | { | |
45 | unsigned int dspcontrol; | |
46 | union mips_instruction insn; | |
47 | struct kvm_vcpu_arch *arch = &vcpu->arch; | |
48 | long epc = instpc; | |
49 | long nextpc = KVM_INVALID_INST; | |
50 | ||
51 | if (epc & 3) | |
52 | goto unaligned; | |
53 | ||
54 | /* | |
55 | * Read the instruction | |
56 | */ | |
57 | insn.word = kvm_get_inst((uint32_t *) epc, vcpu); | |
58 | ||
59 | if (insn.word == KVM_INVALID_INST) | |
60 | return KVM_INVALID_INST; | |
61 | ||
62 | switch (insn.i_format.opcode) { | |
63 | /* | |
64 | * jr and jalr are in r_format format. | |
65 | */ | |
66 | case spec_op: | |
67 | switch (insn.r_format.func) { | |
68 | case jalr_op: | |
69 | arch->gprs[insn.r_format.rd] = epc + 8; | |
70 | /* Fall through */ | |
71 | case jr_op: | |
72 | nextpc = arch->gprs[insn.r_format.rs]; | |
73 | break; | |
74 | } | |
75 | break; | |
76 | ||
77 | /* | |
78 | * This group contains: | |
79 | * bltz_op, bgez_op, bltzl_op, bgezl_op, | |
80 | * bltzal_op, bgezal_op, bltzall_op, bgezall_op. | |
81 | */ | |
82 | case bcond_op: | |
83 | switch (insn.i_format.rt) { | |
84 | case bltz_op: | |
85 | case bltzl_op: | |
86 | if ((long)arch->gprs[insn.i_format.rs] < 0) | |
87 | epc = epc + 4 + (insn.i_format.simmediate << 2); | |
88 | else | |
89 | epc += 8; | |
90 | nextpc = epc; | |
91 | break; | |
92 | ||
93 | case bgez_op: | |
94 | case bgezl_op: | |
95 | if ((long)arch->gprs[insn.i_format.rs] >= 0) | |
96 | epc = epc + 4 + (insn.i_format.simmediate << 2); | |
97 | else | |
98 | epc += 8; | |
99 | nextpc = epc; | |
100 | break; | |
101 | ||
102 | case bltzal_op: | |
103 | case bltzall_op: | |
104 | arch->gprs[31] = epc + 8; | |
105 | if ((long)arch->gprs[insn.i_format.rs] < 0) | |
106 | epc = epc + 4 + (insn.i_format.simmediate << 2); | |
107 | else | |
108 | epc += 8; | |
109 | nextpc = epc; | |
110 | break; | |
111 | ||
112 | case bgezal_op: | |
113 | case bgezall_op: | |
114 | arch->gprs[31] = epc + 8; | |
115 | if ((long)arch->gprs[insn.i_format.rs] >= 0) | |
116 | epc = epc + 4 + (insn.i_format.simmediate << 2); | |
117 | else | |
118 | epc += 8; | |
119 | nextpc = epc; | |
120 | break; | |
121 | case bposge32_op: | |
122 | if (!cpu_has_dsp) | |
123 | goto sigill; | |
124 | ||
125 | dspcontrol = rddsp(0x01); | |
126 | ||
127 | if (dspcontrol >= 32) { | |
128 | epc = epc + 4 + (insn.i_format.simmediate << 2); | |
129 | } else | |
130 | epc += 8; | |
131 | nextpc = epc; | |
132 | break; | |
133 | } | |
134 | break; | |
135 | ||
136 | /* | |
137 | * These are unconditional and in j_format. | |
138 | */ | |
139 | case jal_op: | |
140 | arch->gprs[31] = instpc + 8; | |
141 | case j_op: | |
142 | epc += 4; | |
143 | epc >>= 28; | |
144 | epc <<= 28; | |
145 | epc |= (insn.j_format.target << 2); | |
146 | nextpc = epc; | |
147 | break; | |
148 | ||
149 | /* | |
150 | * These are conditional and in i_format. | |
151 | */ | |
152 | case beq_op: | |
153 | case beql_op: | |
154 | if (arch->gprs[insn.i_format.rs] == | |
155 | arch->gprs[insn.i_format.rt]) | |
156 | epc = epc + 4 + (insn.i_format.simmediate << 2); | |
157 | else | |
158 | epc += 8; | |
159 | nextpc = epc; | |
160 | break; | |
161 | ||
162 | case bne_op: | |
163 | case bnel_op: | |
164 | if (arch->gprs[insn.i_format.rs] != | |
165 | arch->gprs[insn.i_format.rt]) | |
166 | epc = epc + 4 + (insn.i_format.simmediate << 2); | |
167 | else | |
168 | epc += 8; | |
169 | nextpc = epc; | |
170 | break; | |
171 | ||
172 | case blez_op: /* not really i_format */ | |
173 | case blezl_op: | |
174 | /* rt field assumed to be zero */ | |
175 | if ((long)arch->gprs[insn.i_format.rs] <= 0) | |
176 | epc = epc + 4 + (insn.i_format.simmediate << 2); | |
177 | else | |
178 | epc += 8; | |
179 | nextpc = epc; | |
180 | break; | |
181 | ||
182 | case bgtz_op: | |
183 | case bgtzl_op: | |
184 | /* rt field assumed to be zero */ | |
185 | if ((long)arch->gprs[insn.i_format.rs] > 0) | |
186 | epc = epc + 4 + (insn.i_format.simmediate << 2); | |
187 | else | |
188 | epc += 8; | |
189 | nextpc = epc; | |
190 | break; | |
191 | ||
192 | /* | |
193 | * And now the FPA/cp1 branch instructions. | |
194 | */ | |
195 | case cop1_op: | |
196 | printk("%s: unsupported cop1_op\n", __func__); | |
197 | break; | |
198 | } | |
199 | ||
200 | return nextpc; | |
201 | ||
202 | unaligned: | |
203 | printk("%s: unaligned epc\n", __func__); | |
204 | return nextpc; | |
205 | ||
206 | sigill: | |
207 | printk("%s: DSP branch but not DSP ASE\n", __func__); | |
208 | return nextpc; | |
209 | } | |
210 | ||
211 | enum emulation_result update_pc(struct kvm_vcpu *vcpu, uint32_t cause) | |
212 | { | |
213 | unsigned long branch_pc; | |
214 | enum emulation_result er = EMULATE_DONE; | |
215 | ||
216 | if (cause & CAUSEF_BD) { | |
217 | branch_pc = kvm_compute_return_epc(vcpu, vcpu->arch.pc); | |
218 | if (branch_pc == KVM_INVALID_INST) { | |
219 | er = EMULATE_FAIL; | |
220 | } else { | |
221 | vcpu->arch.pc = branch_pc; | |
222 | kvm_debug("BD update_pc(): New PC: %#lx\n", vcpu->arch.pc); | |
223 | } | |
224 | } else | |
225 | vcpu->arch.pc += 4; | |
226 | ||
227 | kvm_debug("update_pc(): New PC: %#lx\n", vcpu->arch.pc); | |
228 | ||
229 | return er; | |
230 | } | |
231 | ||
e30492bb JH |
232 | /** |
233 | * kvm_mips_count_disabled() - Find whether the CP0_Count timer is disabled. | |
234 | * @vcpu: Virtual CPU. | |
e685c689 | 235 | * |
f8239342 JH |
236 | * Returns: 1 if the CP0_Count timer is disabled by either the guest |
237 | * CP0_Cause.DC bit or the count_ctl.DC bit. | |
e30492bb | 238 | * 0 otherwise (in which case CP0_Count timer is running). |
e685c689 | 239 | */ |
e30492bb | 240 | static inline int kvm_mips_count_disabled(struct kvm_vcpu *vcpu) |
e685c689 SL |
241 | { |
242 | struct mips_coproc *cop0 = vcpu->arch.cop0; | |
f8239342 JH |
243 | return (vcpu->arch.count_ctl & KVM_REG_MIPS_COUNT_CTL_DC) || |
244 | (kvm_read_c0_guest_cause(cop0) & CAUSEF_DC); | |
e30492bb | 245 | } |
e685c689 | 246 | |
e30492bb JH |
247 | /** |
248 | * kvm_mips_ktime_to_count() - Scale ktime_t to a 32-bit count. | |
249 | * | |
250 | * Caches the dynamic nanosecond bias in vcpu->arch.count_dyn_bias. | |
251 | * | |
252 | * Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is running). | |
253 | */ | |
254 | static uint32_t kvm_mips_ktime_to_count(struct kvm_vcpu *vcpu, ktime_t now) | |
255 | { | |
256 | s64 now_ns, periods; | |
257 | u64 delta; | |
258 | ||
259 | now_ns = ktime_to_ns(now); | |
260 | delta = now_ns + vcpu->arch.count_dyn_bias; | |
261 | ||
262 | if (delta >= vcpu->arch.count_period) { | |
263 | /* If delta is out of safe range the bias needs adjusting */ | |
264 | periods = div64_s64(now_ns, vcpu->arch.count_period); | |
265 | vcpu->arch.count_dyn_bias = -periods * vcpu->arch.count_period; | |
266 | /* Recalculate delta with new bias */ | |
267 | delta = now_ns + vcpu->arch.count_dyn_bias; | |
e685c689 SL |
268 | } |
269 | ||
e30492bb JH |
270 | /* |
271 | * We've ensured that: | |
272 | * delta < count_period | |
273 | * | |
274 | * Therefore the intermediate delta*count_hz will never overflow since | |
275 | * at the boundary condition: | |
276 | * delta = count_period | |
277 | * delta = NSEC_PER_SEC * 2^32 / count_hz | |
278 | * delta * count_hz = NSEC_PER_SEC * 2^32 | |
279 | */ | |
280 | return div_u64(delta * vcpu->arch.count_hz, NSEC_PER_SEC); | |
281 | } | |
282 | ||
f8239342 JH |
283 | /** |
284 | * kvm_mips_count_time() - Get effective current time. | |
285 | * @vcpu: Virtual CPU. | |
286 | * | |
287 | * Get effective monotonic ktime. This is usually a straightforward ktime_get(), | |
288 | * except when the master disable bit is set in count_ctl, in which case it is | |
289 | * count_resume, i.e. the time that the count was disabled. | |
290 | * | |
291 | * Returns: Effective monotonic ktime for CP0_Count. | |
292 | */ | |
293 | static inline ktime_t kvm_mips_count_time(struct kvm_vcpu *vcpu) | |
294 | { | |
295 | if (unlikely(vcpu->arch.count_ctl & KVM_REG_MIPS_COUNT_CTL_DC)) | |
296 | return vcpu->arch.count_resume; | |
297 | ||
298 | return ktime_get(); | |
299 | } | |
300 | ||
e30492bb JH |
301 | /** |
302 | * kvm_mips_read_count_running() - Read the current count value as if running. | |
303 | * @vcpu: Virtual CPU. | |
304 | * @now: Kernel time to read CP0_Count at. | |
305 | * | |
306 | * Returns the current guest CP0_Count register at time @now and handles if the | |
307 | * timer interrupt is pending and hasn't been handled yet. | |
308 | * | |
309 | * Returns: The current value of the guest CP0_Count register. | |
310 | */ | |
311 | static uint32_t kvm_mips_read_count_running(struct kvm_vcpu *vcpu, ktime_t now) | |
312 | { | |
313 | ktime_t expires; | |
314 | int running; | |
315 | ||
316 | /* Is the hrtimer pending? */ | |
317 | expires = hrtimer_get_expires(&vcpu->arch.comparecount_timer); | |
318 | if (ktime_compare(now, expires) >= 0) { | |
319 | /* | |
320 | * Cancel it while we handle it so there's no chance of | |
321 | * interference with the timeout handler. | |
322 | */ | |
323 | running = hrtimer_cancel(&vcpu->arch.comparecount_timer); | |
324 | ||
325 | /* Nothing should be waiting on the timeout */ | |
326 | kvm_mips_callbacks->queue_timer_int(vcpu); | |
327 | ||
328 | /* | |
329 | * Restart the timer if it was running based on the expiry time | |
330 | * we read, so that we don't push it back 2 periods. | |
331 | */ | |
332 | if (running) { | |
333 | expires = ktime_add_ns(expires, | |
334 | vcpu->arch.count_period); | |
335 | hrtimer_start(&vcpu->arch.comparecount_timer, expires, | |
336 | HRTIMER_MODE_ABS); | |
337 | } | |
338 | } | |
339 | ||
340 | /* Return the biased and scaled guest CP0_Count */ | |
341 | return vcpu->arch.count_bias + kvm_mips_ktime_to_count(vcpu, now); | |
342 | } | |
343 | ||
344 | /** | |
345 | * kvm_mips_read_count() - Read the current count value. | |
346 | * @vcpu: Virtual CPU. | |
347 | * | |
348 | * Read the current guest CP0_Count value, taking into account whether the timer | |
349 | * is stopped. | |
350 | * | |
351 | * Returns: The current guest CP0_Count value. | |
352 | */ | |
353 | uint32_t kvm_mips_read_count(struct kvm_vcpu *vcpu) | |
354 | { | |
355 | struct mips_coproc *cop0 = vcpu->arch.cop0; | |
356 | ||
357 | /* If count disabled just read static copy of count */ | |
358 | if (kvm_mips_count_disabled(vcpu)) | |
359 | return kvm_read_c0_guest_count(cop0); | |
360 | ||
361 | return kvm_mips_read_count_running(vcpu, ktime_get()); | |
362 | } | |
363 | ||
364 | /** | |
365 | * kvm_mips_freeze_hrtimer() - Safely stop the hrtimer. | |
366 | * @vcpu: Virtual CPU. | |
367 | * @count: Output pointer for CP0_Count value at point of freeze. | |
368 | * | |
369 | * Freeze the hrtimer safely and return both the ktime and the CP0_Count value | |
370 | * at the point it was frozen. It is guaranteed that any pending interrupts at | |
371 | * the point it was frozen are handled, and none after that point. | |
372 | * | |
373 | * This is useful where the time/CP0_Count is needed in the calculation of the | |
374 | * new parameters. | |
375 | * | |
376 | * Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is running). | |
377 | * | |
378 | * Returns: The ktime at the point of freeze. | |
379 | */ | |
380 | static ktime_t kvm_mips_freeze_hrtimer(struct kvm_vcpu *vcpu, | |
381 | uint32_t *count) | |
382 | { | |
383 | ktime_t now; | |
384 | ||
385 | /* stop hrtimer before finding time */ | |
386 | hrtimer_cancel(&vcpu->arch.comparecount_timer); | |
387 | now = ktime_get(); | |
388 | ||
389 | /* find count at this point and handle pending hrtimer */ | |
390 | *count = kvm_mips_read_count_running(vcpu, now); | |
391 | ||
392 | return now; | |
393 | } | |
394 | ||
395 | ||
396 | /** | |
397 | * kvm_mips_resume_hrtimer() - Resume hrtimer, updating expiry. | |
398 | * @vcpu: Virtual CPU. | |
399 | * @now: ktime at point of resume. | |
400 | * @count: CP0_Count at point of resume. | |
401 | * | |
402 | * Resumes the timer and updates the timer expiry based on @now and @count. | |
403 | * This can be used in conjunction with kvm_mips_freeze_timer() when timer | |
404 | * parameters need to be changed. | |
405 | * | |
406 | * It is guaranteed that a timer interrupt immediately after resume will be | |
407 | * handled, but not if CP_Compare is exactly at @count. That case is already | |
408 | * handled by kvm_mips_freeze_timer(). | |
409 | * | |
410 | * Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is running). | |
411 | */ | |
412 | static void kvm_mips_resume_hrtimer(struct kvm_vcpu *vcpu, | |
413 | ktime_t now, uint32_t count) | |
414 | { | |
415 | struct mips_coproc *cop0 = vcpu->arch.cop0; | |
416 | uint32_t compare; | |
417 | u64 delta; | |
418 | ktime_t expire; | |
419 | ||
420 | /* Calculate timeout (wrap 0 to 2^32) */ | |
421 | compare = kvm_read_c0_guest_compare(cop0); | |
422 | delta = (u64)(uint32_t)(compare - count - 1) + 1; | |
423 | delta = div_u64(delta * NSEC_PER_SEC, vcpu->arch.count_hz); | |
424 | expire = ktime_add_ns(now, delta); | |
425 | ||
426 | /* Update hrtimer to use new timeout */ | |
427 | hrtimer_cancel(&vcpu->arch.comparecount_timer); | |
428 | hrtimer_start(&vcpu->arch.comparecount_timer, expire, HRTIMER_MODE_ABS); | |
429 | } | |
430 | ||
431 | /** | |
432 | * kvm_mips_update_hrtimer() - Update next expiry time of hrtimer. | |
433 | * @vcpu: Virtual CPU. | |
434 | * | |
435 | * Recalculates and updates the expiry time of the hrtimer. This can be used | |
436 | * after timer parameters have been altered which do not depend on the time that | |
437 | * the change occurs (in those cases kvm_mips_freeze_hrtimer() and | |
438 | * kvm_mips_resume_hrtimer() are used directly). | |
439 | * | |
440 | * It is guaranteed that no timer interrupts will be lost in the process. | |
441 | * | |
442 | * Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is running). | |
443 | */ | |
444 | static void kvm_mips_update_hrtimer(struct kvm_vcpu *vcpu) | |
445 | { | |
446 | ktime_t now; | |
447 | uint32_t count; | |
448 | ||
449 | /* | |
450 | * freeze_hrtimer takes care of a timer interrupts <= count, and | |
451 | * resume_hrtimer the hrtimer takes care of a timer interrupts > count. | |
452 | */ | |
453 | now = kvm_mips_freeze_hrtimer(vcpu, &count); | |
454 | kvm_mips_resume_hrtimer(vcpu, now, count); | |
455 | } | |
456 | ||
457 | /** | |
458 | * kvm_mips_write_count() - Modify the count and update timer. | |
459 | * @vcpu: Virtual CPU. | |
460 | * @count: Guest CP0_Count value to set. | |
461 | * | |
462 | * Sets the CP0_Count value and updates the timer accordingly. | |
463 | */ | |
464 | void kvm_mips_write_count(struct kvm_vcpu *vcpu, uint32_t count) | |
465 | { | |
466 | struct mips_coproc *cop0 = vcpu->arch.cop0; | |
467 | ktime_t now; | |
468 | ||
469 | /* Calculate bias */ | |
f8239342 | 470 | now = kvm_mips_count_time(vcpu); |
e30492bb JH |
471 | vcpu->arch.count_bias = count - kvm_mips_ktime_to_count(vcpu, now); |
472 | ||
473 | if (kvm_mips_count_disabled(vcpu)) | |
474 | /* The timer's disabled, adjust the static count */ | |
475 | kvm_write_c0_guest_count(cop0, count); | |
476 | else | |
477 | /* Update timeout */ | |
478 | kvm_mips_resume_hrtimer(vcpu, now, count); | |
479 | } | |
480 | ||
481 | /** | |
482 | * kvm_mips_init_count() - Initialise timer. | |
483 | * @vcpu: Virtual CPU. | |
484 | * | |
485 | * Initialise the timer to a sensible frequency, namely 100MHz, zero it, and set | |
486 | * it going if it's enabled. | |
487 | */ | |
488 | void kvm_mips_init_count(struct kvm_vcpu *vcpu) | |
489 | { | |
490 | /* 100 MHz */ | |
491 | vcpu->arch.count_hz = 100*1000*1000; | |
492 | vcpu->arch.count_period = div_u64((u64)NSEC_PER_SEC << 32, | |
493 | vcpu->arch.count_hz); | |
494 | vcpu->arch.count_dyn_bias = 0; | |
495 | ||
496 | /* Starting at 0 */ | |
497 | kvm_mips_write_count(vcpu, 0); | |
498 | } | |
499 | ||
f74a8e22 JH |
500 | /** |
501 | * kvm_mips_set_count_hz() - Update the frequency of the timer. | |
502 | * @vcpu: Virtual CPU. | |
503 | * @count_hz: Frequency of CP0_Count timer in Hz. | |
504 | * | |
505 | * Change the frequency of the CP0_Count timer. This is done atomically so that | |
506 | * CP0_Count is continuous and no timer interrupt is lost. | |
507 | * | |
508 | * Returns: -EINVAL if @count_hz is out of range. | |
509 | * 0 on success. | |
510 | */ | |
511 | int kvm_mips_set_count_hz(struct kvm_vcpu *vcpu, s64 count_hz) | |
512 | { | |
513 | struct mips_coproc *cop0 = vcpu->arch.cop0; | |
514 | int dc; | |
515 | ktime_t now; | |
516 | u32 count; | |
517 | ||
518 | /* ensure the frequency is in a sensible range... */ | |
519 | if (count_hz <= 0 || count_hz > NSEC_PER_SEC) | |
520 | return -EINVAL; | |
521 | /* ... and has actually changed */ | |
522 | if (vcpu->arch.count_hz == count_hz) | |
523 | return 0; | |
524 | ||
525 | /* Safely freeze timer so we can keep it continuous */ | |
526 | dc = kvm_mips_count_disabled(vcpu); | |
527 | if (dc) { | |
528 | now = kvm_mips_count_time(vcpu); | |
529 | count = kvm_read_c0_guest_count(cop0); | |
530 | } else { | |
531 | now = kvm_mips_freeze_hrtimer(vcpu, &count); | |
532 | } | |
533 | ||
534 | /* Update the frequency */ | |
535 | vcpu->arch.count_hz = count_hz; | |
536 | vcpu->arch.count_period = div_u64((u64)NSEC_PER_SEC << 32, count_hz); | |
537 | vcpu->arch.count_dyn_bias = 0; | |
538 | ||
539 | /* Calculate adjusted bias so dynamic count is unchanged */ | |
540 | vcpu->arch.count_bias = count - kvm_mips_ktime_to_count(vcpu, now); | |
541 | ||
542 | /* Update and resume hrtimer */ | |
543 | if (!dc) | |
544 | kvm_mips_resume_hrtimer(vcpu, now, count); | |
545 | return 0; | |
546 | } | |
547 | ||
e30492bb JH |
548 | /** |
549 | * kvm_mips_write_compare() - Modify compare and update timer. | |
550 | * @vcpu: Virtual CPU. | |
551 | * @compare: New CP0_Compare value. | |
552 | * | |
553 | * Update CP0_Compare to a new value and update the timeout. | |
554 | */ | |
555 | void kvm_mips_write_compare(struct kvm_vcpu *vcpu, uint32_t compare) | |
556 | { | |
557 | struct mips_coproc *cop0 = vcpu->arch.cop0; | |
558 | ||
559 | /* if unchanged, must just be an ack */ | |
560 | if (kvm_read_c0_guest_compare(cop0) == compare) | |
561 | return; | |
562 | ||
563 | /* Update compare */ | |
564 | kvm_write_c0_guest_compare(cop0, compare); | |
565 | ||
566 | /* Update timeout if count enabled */ | |
567 | if (!kvm_mips_count_disabled(vcpu)) | |
568 | kvm_mips_update_hrtimer(vcpu); | |
569 | } | |
570 | ||
571 | /** | |
572 | * kvm_mips_count_disable() - Disable count. | |
573 | * @vcpu: Virtual CPU. | |
574 | * | |
575 | * Disable the CP0_Count timer. A timer interrupt on or before the final stop | |
576 | * time will be handled but not after. | |
577 | * | |
f8239342 JH |
578 | * Assumes CP0_Count was previously enabled but now Guest.CP0_Cause.DC or |
579 | * count_ctl.DC has been set (count disabled). | |
e30492bb JH |
580 | * |
581 | * Returns: The time that the timer was stopped. | |
582 | */ | |
583 | static ktime_t kvm_mips_count_disable(struct kvm_vcpu *vcpu) | |
584 | { | |
585 | struct mips_coproc *cop0 = vcpu->arch.cop0; | |
586 | uint32_t count; | |
587 | ktime_t now; | |
588 | ||
589 | /* Stop hrtimer */ | |
590 | hrtimer_cancel(&vcpu->arch.comparecount_timer); | |
591 | ||
592 | /* Set the static count from the dynamic count, handling pending TI */ | |
593 | now = ktime_get(); | |
594 | count = kvm_mips_read_count_running(vcpu, now); | |
595 | kvm_write_c0_guest_count(cop0, count); | |
596 | ||
597 | return now; | |
598 | } | |
599 | ||
600 | /** | |
601 | * kvm_mips_count_disable_cause() - Disable count using CP0_Cause.DC. | |
602 | * @vcpu: Virtual CPU. | |
603 | * | |
604 | * Disable the CP0_Count timer and set CP0_Cause.DC. A timer interrupt on or | |
f8239342 JH |
605 | * before the final stop time will be handled if the timer isn't disabled by |
606 | * count_ctl.DC, but not after. | |
e30492bb JH |
607 | * |
608 | * Assumes CP0_Cause.DC is clear (count enabled). | |
609 | */ | |
610 | void kvm_mips_count_disable_cause(struct kvm_vcpu *vcpu) | |
611 | { | |
612 | struct mips_coproc *cop0 = vcpu->arch.cop0; | |
613 | ||
614 | kvm_set_c0_guest_cause(cop0, CAUSEF_DC); | |
f8239342 JH |
615 | if (!(vcpu->arch.count_ctl & KVM_REG_MIPS_COUNT_CTL_DC)) |
616 | kvm_mips_count_disable(vcpu); | |
e30492bb JH |
617 | } |
618 | ||
619 | /** | |
620 | * kvm_mips_count_enable_cause() - Enable count using CP0_Cause.DC. | |
621 | * @vcpu: Virtual CPU. | |
622 | * | |
623 | * Enable the CP0_Count timer and clear CP0_Cause.DC. A timer interrupt after | |
f8239342 JH |
624 | * the start time will be handled if the timer isn't disabled by count_ctl.DC, |
625 | * potentially before even returning, so the caller should be careful with | |
626 | * ordering of CP0_Cause modifications so as not to lose it. | |
e30492bb JH |
627 | * |
628 | * Assumes CP0_Cause.DC is set (count disabled). | |
629 | */ | |
630 | void kvm_mips_count_enable_cause(struct kvm_vcpu *vcpu) | |
631 | { | |
632 | struct mips_coproc *cop0 = vcpu->arch.cop0; | |
633 | uint32_t count; | |
634 | ||
635 | kvm_clear_c0_guest_cause(cop0, CAUSEF_DC); | |
636 | ||
637 | /* | |
638 | * Set the dynamic count to match the static count. | |
f8239342 JH |
639 | * This starts the hrtimer if count_ctl.DC allows it. |
640 | * Otherwise it conveniently updates the biases. | |
e30492bb JH |
641 | */ |
642 | count = kvm_read_c0_guest_count(cop0); | |
643 | kvm_mips_write_count(vcpu, count); | |
644 | } | |
645 | ||
f8239342 JH |
646 | /** |
647 | * kvm_mips_set_count_ctl() - Update the count control KVM register. | |
648 | * @vcpu: Virtual CPU. | |
649 | * @count_ctl: Count control register new value. | |
650 | * | |
651 | * Set the count control KVM register. The timer is updated accordingly. | |
652 | * | |
653 | * Returns: -EINVAL if reserved bits are set. | |
654 | * 0 on success. | |
655 | */ | |
656 | int kvm_mips_set_count_ctl(struct kvm_vcpu *vcpu, s64 count_ctl) | |
657 | { | |
658 | struct mips_coproc *cop0 = vcpu->arch.cop0; | |
659 | s64 changed = count_ctl ^ vcpu->arch.count_ctl; | |
660 | s64 delta; | |
661 | ktime_t expire, now; | |
662 | uint32_t count, compare; | |
663 | ||
664 | /* Only allow defined bits to be changed */ | |
665 | if (changed & ~(s64)(KVM_REG_MIPS_COUNT_CTL_DC)) | |
666 | return -EINVAL; | |
667 | ||
668 | /* Apply new value */ | |
669 | vcpu->arch.count_ctl = count_ctl; | |
670 | ||
671 | /* Master CP0_Count disable */ | |
672 | if (changed & KVM_REG_MIPS_COUNT_CTL_DC) { | |
673 | /* Is CP0_Cause.DC already disabling CP0_Count? */ | |
674 | if (kvm_read_c0_guest_cause(cop0) & CAUSEF_DC) { | |
675 | if (count_ctl & KVM_REG_MIPS_COUNT_CTL_DC) | |
676 | /* Just record the current time */ | |
677 | vcpu->arch.count_resume = ktime_get(); | |
678 | } else if (count_ctl & KVM_REG_MIPS_COUNT_CTL_DC) { | |
679 | /* disable timer and record current time */ | |
680 | vcpu->arch.count_resume = kvm_mips_count_disable(vcpu); | |
681 | } else { | |
682 | /* | |
683 | * Calculate timeout relative to static count at resume | |
684 | * time (wrap 0 to 2^32). | |
685 | */ | |
686 | count = kvm_read_c0_guest_count(cop0); | |
687 | compare = kvm_read_c0_guest_compare(cop0); | |
688 | delta = (u64)(uint32_t)(compare - count - 1) + 1; | |
689 | delta = div_u64(delta * NSEC_PER_SEC, | |
690 | vcpu->arch.count_hz); | |
691 | expire = ktime_add_ns(vcpu->arch.count_resume, delta); | |
692 | ||
693 | /* Handle pending interrupt */ | |
694 | now = ktime_get(); | |
695 | if (ktime_compare(now, expire) >= 0) | |
696 | /* Nothing should be waiting on the timeout */ | |
697 | kvm_mips_callbacks->queue_timer_int(vcpu); | |
698 | ||
699 | /* Resume hrtimer without changing bias */ | |
700 | count = kvm_mips_read_count_running(vcpu, now); | |
701 | kvm_mips_resume_hrtimer(vcpu, now, count); | |
702 | } | |
703 | } | |
704 | ||
705 | return 0; | |
706 | } | |
707 | ||
708 | /** | |
709 | * kvm_mips_set_count_resume() - Update the count resume KVM register. | |
710 | * @vcpu: Virtual CPU. | |
711 | * @count_resume: Count resume register new value. | |
712 | * | |
713 | * Set the count resume KVM register. | |
714 | * | |
715 | * Returns: -EINVAL if out of valid range (0..now). | |
716 | * 0 on success. | |
717 | */ | |
718 | int kvm_mips_set_count_resume(struct kvm_vcpu *vcpu, s64 count_resume) | |
719 | { | |
720 | /* | |
721 | * It doesn't make sense for the resume time to be in the future, as it | |
722 | * would be possible for the next interrupt to be more than a full | |
723 | * period in the future. | |
724 | */ | |
725 | if (count_resume < 0 || count_resume > ktime_to_ns(ktime_get())) | |
726 | return -EINVAL; | |
727 | ||
728 | vcpu->arch.count_resume = ns_to_ktime(count_resume); | |
729 | return 0; | |
730 | } | |
731 | ||
e30492bb JH |
732 | /** |
733 | * kvm_mips_count_timeout() - Push timer forward on timeout. | |
734 | * @vcpu: Virtual CPU. | |
735 | * | |
736 | * Handle an hrtimer event by push the hrtimer forward a period. | |
737 | * | |
738 | * Returns: The hrtimer_restart value to return to the hrtimer subsystem. | |
739 | */ | |
740 | enum hrtimer_restart kvm_mips_count_timeout(struct kvm_vcpu *vcpu) | |
741 | { | |
742 | /* Add the Count period to the current expiry time */ | |
743 | hrtimer_add_expires_ns(&vcpu->arch.comparecount_timer, | |
744 | vcpu->arch.count_period); | |
745 | return HRTIMER_RESTART; | |
e685c689 SL |
746 | } |
747 | ||
748 | enum emulation_result kvm_mips_emul_eret(struct kvm_vcpu *vcpu) | |
749 | { | |
750 | struct mips_coproc *cop0 = vcpu->arch.cop0; | |
751 | enum emulation_result er = EMULATE_DONE; | |
752 | ||
753 | if (kvm_read_c0_guest_status(cop0) & ST0_EXL) { | |
754 | kvm_debug("[%#lx] ERET to %#lx\n", vcpu->arch.pc, | |
755 | kvm_read_c0_guest_epc(cop0)); | |
756 | kvm_clear_c0_guest_status(cop0, ST0_EXL); | |
757 | vcpu->arch.pc = kvm_read_c0_guest_epc(cop0); | |
758 | ||
759 | } else if (kvm_read_c0_guest_status(cop0) & ST0_ERL) { | |
760 | kvm_clear_c0_guest_status(cop0, ST0_ERL); | |
761 | vcpu->arch.pc = kvm_read_c0_guest_errorepc(cop0); | |
762 | } else { | |
763 | printk("[%#lx] ERET when MIPS_SR_EXL|MIPS_SR_ERL == 0\n", | |
764 | vcpu->arch.pc); | |
765 | er = EMULATE_FAIL; | |
766 | } | |
767 | ||
768 | return er; | |
769 | } | |
770 | ||
771 | enum emulation_result kvm_mips_emul_wait(struct kvm_vcpu *vcpu) | |
772 | { | |
773 | enum emulation_result er = EMULATE_DONE; | |
774 | ||
775 | kvm_debug("[%#lx] !!!WAIT!!! (%#lx)\n", vcpu->arch.pc, | |
776 | vcpu->arch.pending_exceptions); | |
777 | ||
778 | ++vcpu->stat.wait_exits; | |
779 | trace_kvm_exit(vcpu, WAIT_EXITS); | |
780 | if (!vcpu->arch.pending_exceptions) { | |
781 | vcpu->arch.wait = 1; | |
782 | kvm_vcpu_block(vcpu); | |
783 | ||
784 | /* We we are runnable, then definitely go off to user space to check if any | |
785 | * I/O interrupts are pending. | |
786 | */ | |
787 | if (kvm_check_request(KVM_REQ_UNHALT, vcpu)) { | |
788 | clear_bit(KVM_REQ_UNHALT, &vcpu->requests); | |
789 | vcpu->run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN; | |
790 | } | |
791 | } | |
792 | ||
793 | return er; | |
794 | } | |
795 | ||
796 | /* XXXKYMA: Linux doesn't seem to use TLBR, return EMULATE_FAIL for now so that we can catch | |
797 | * this, if things ever change | |
798 | */ | |
799 | enum emulation_result kvm_mips_emul_tlbr(struct kvm_vcpu *vcpu) | |
800 | { | |
801 | struct mips_coproc *cop0 = vcpu->arch.cop0; | |
802 | enum emulation_result er = EMULATE_FAIL; | |
803 | uint32_t pc = vcpu->arch.pc; | |
804 | ||
805 | printk("[%#x] COP0_TLBR [%ld]\n", pc, kvm_read_c0_guest_index(cop0)); | |
806 | return er; | |
807 | } | |
808 | ||
809 | /* Write Guest TLB Entry @ Index */ | |
810 | enum emulation_result kvm_mips_emul_tlbwi(struct kvm_vcpu *vcpu) | |
811 | { | |
812 | struct mips_coproc *cop0 = vcpu->arch.cop0; | |
813 | int index = kvm_read_c0_guest_index(cop0); | |
814 | enum emulation_result er = EMULATE_DONE; | |
815 | struct kvm_mips_tlb *tlb = NULL; | |
816 | uint32_t pc = vcpu->arch.pc; | |
817 | ||
818 | if (index < 0 || index >= KVM_MIPS_GUEST_TLB_SIZE) { | |
819 | printk("%s: illegal index: %d\n", __func__, index); | |
820 | printk | |
821 | ("[%#x] COP0_TLBWI [%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx, mask: %#lx)\n", | |
822 | pc, index, kvm_read_c0_guest_entryhi(cop0), | |
823 | kvm_read_c0_guest_entrylo0(cop0), | |
824 | kvm_read_c0_guest_entrylo1(cop0), | |
825 | kvm_read_c0_guest_pagemask(cop0)); | |
826 | index = (index & ~0x80000000) % KVM_MIPS_GUEST_TLB_SIZE; | |
827 | } | |
828 | ||
829 | tlb = &vcpu->arch.guest_tlb[index]; | |
830 | #if 1 | |
831 | /* Probe the shadow host TLB for the entry being overwritten, if one matches, invalidate it */ | |
832 | kvm_mips_host_tlb_inv(vcpu, tlb->tlb_hi); | |
833 | #endif | |
834 | ||
835 | tlb->tlb_mask = kvm_read_c0_guest_pagemask(cop0); | |
836 | tlb->tlb_hi = kvm_read_c0_guest_entryhi(cop0); | |
837 | tlb->tlb_lo0 = kvm_read_c0_guest_entrylo0(cop0); | |
838 | tlb->tlb_lo1 = kvm_read_c0_guest_entrylo1(cop0); | |
839 | ||
840 | kvm_debug | |
841 | ("[%#x] COP0_TLBWI [%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx, mask: %#lx)\n", | |
842 | pc, index, kvm_read_c0_guest_entryhi(cop0), | |
843 | kvm_read_c0_guest_entrylo0(cop0), kvm_read_c0_guest_entrylo1(cop0), | |
844 | kvm_read_c0_guest_pagemask(cop0)); | |
845 | ||
846 | return er; | |
847 | } | |
848 | ||
849 | /* Write Guest TLB Entry @ Random Index */ | |
850 | enum emulation_result kvm_mips_emul_tlbwr(struct kvm_vcpu *vcpu) | |
851 | { | |
852 | struct mips_coproc *cop0 = vcpu->arch.cop0; | |
853 | enum emulation_result er = EMULATE_DONE; | |
854 | struct kvm_mips_tlb *tlb = NULL; | |
855 | uint32_t pc = vcpu->arch.pc; | |
856 | int index; | |
857 | ||
858 | #if 1 | |
859 | get_random_bytes(&index, sizeof(index)); | |
860 | index &= (KVM_MIPS_GUEST_TLB_SIZE - 1); | |
861 | #else | |
862 | index = jiffies % KVM_MIPS_GUEST_TLB_SIZE; | |
863 | #endif | |
864 | ||
865 | if (index < 0 || index >= KVM_MIPS_GUEST_TLB_SIZE) { | |
866 | printk("%s: illegal index: %d\n", __func__, index); | |
867 | return EMULATE_FAIL; | |
868 | } | |
869 | ||
870 | tlb = &vcpu->arch.guest_tlb[index]; | |
871 | ||
872 | #if 1 | |
873 | /* Probe the shadow host TLB for the entry being overwritten, if one matches, invalidate it */ | |
874 | kvm_mips_host_tlb_inv(vcpu, tlb->tlb_hi); | |
875 | #endif | |
876 | ||
877 | tlb->tlb_mask = kvm_read_c0_guest_pagemask(cop0); | |
878 | tlb->tlb_hi = kvm_read_c0_guest_entryhi(cop0); | |
879 | tlb->tlb_lo0 = kvm_read_c0_guest_entrylo0(cop0); | |
880 | tlb->tlb_lo1 = kvm_read_c0_guest_entrylo1(cop0); | |
881 | ||
882 | kvm_debug | |
883 | ("[%#x] COP0_TLBWR[%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx)\n", | |
884 | pc, index, kvm_read_c0_guest_entryhi(cop0), | |
885 | kvm_read_c0_guest_entrylo0(cop0), | |
886 | kvm_read_c0_guest_entrylo1(cop0)); | |
887 | ||
888 | return er; | |
889 | } | |
890 | ||
891 | enum emulation_result kvm_mips_emul_tlbp(struct kvm_vcpu *vcpu) | |
892 | { | |
893 | struct mips_coproc *cop0 = vcpu->arch.cop0; | |
894 | long entryhi = kvm_read_c0_guest_entryhi(cop0); | |
895 | enum emulation_result er = EMULATE_DONE; | |
896 | uint32_t pc = vcpu->arch.pc; | |
897 | int index = -1; | |
898 | ||
899 | index = kvm_mips_guest_tlb_lookup(vcpu, entryhi); | |
900 | ||
901 | kvm_write_c0_guest_index(cop0, index); | |
902 | ||
903 | kvm_debug("[%#x] COP0_TLBP (entryhi: %#lx), index: %d\n", pc, entryhi, | |
904 | index); | |
905 | ||
906 | return er; | |
907 | } | |
908 | ||
909 | enum emulation_result | |
910 | kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc, uint32_t cause, | |
911 | struct kvm_run *run, struct kvm_vcpu *vcpu) | |
912 | { | |
913 | struct mips_coproc *cop0 = vcpu->arch.cop0; | |
914 | enum emulation_result er = EMULATE_DONE; | |
915 | int32_t rt, rd, copz, sel, co_bit, op; | |
916 | uint32_t pc = vcpu->arch.pc; | |
917 | unsigned long curr_pc; | |
918 | ||
919 | /* | |
920 | * Update PC and hold onto current PC in case there is | |
921 | * an error and we want to rollback the PC | |
922 | */ | |
923 | curr_pc = vcpu->arch.pc; | |
924 | er = update_pc(vcpu, cause); | |
925 | if (er == EMULATE_FAIL) { | |
926 | return er; | |
927 | } | |
928 | ||
929 | copz = (inst >> 21) & 0x1f; | |
930 | rt = (inst >> 16) & 0x1f; | |
931 | rd = (inst >> 11) & 0x1f; | |
932 | sel = inst & 0x7; | |
933 | co_bit = (inst >> 25) & 1; | |
934 | ||
e685c689 SL |
935 | if (co_bit) { |
936 | op = (inst) & 0xff; | |
937 | ||
938 | switch (op) { | |
939 | case tlbr_op: /* Read indexed TLB entry */ | |
940 | er = kvm_mips_emul_tlbr(vcpu); | |
941 | break; | |
942 | case tlbwi_op: /* Write indexed */ | |
943 | er = kvm_mips_emul_tlbwi(vcpu); | |
944 | break; | |
945 | case tlbwr_op: /* Write random */ | |
946 | er = kvm_mips_emul_tlbwr(vcpu); | |
947 | break; | |
948 | case tlbp_op: /* TLB Probe */ | |
949 | er = kvm_mips_emul_tlbp(vcpu); | |
950 | break; | |
951 | case rfe_op: | |
952 | printk("!!!COP0_RFE!!!\n"); | |
953 | break; | |
954 | case eret_op: | |
955 | er = kvm_mips_emul_eret(vcpu); | |
956 | goto dont_update_pc; | |
957 | break; | |
958 | case wait_op: | |
959 | er = kvm_mips_emul_wait(vcpu); | |
960 | break; | |
961 | } | |
962 | } else { | |
963 | switch (copz) { | |
964 | case mfc_op: | |
965 | #ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS | |
966 | cop0->stat[rd][sel]++; | |
967 | #endif | |
968 | /* Get reg */ | |
969 | if ((rd == MIPS_CP0_COUNT) && (sel == 0)) { | |
e30492bb | 970 | vcpu->arch.gprs[rt] = kvm_mips_read_count(vcpu); |
e685c689 SL |
971 | } else if ((rd == MIPS_CP0_ERRCTL) && (sel == 0)) { |
972 | vcpu->arch.gprs[rt] = 0x0; | |
973 | #ifdef CONFIG_KVM_MIPS_DYN_TRANS | |
974 | kvm_mips_trans_mfc0(inst, opc, vcpu); | |
975 | #endif | |
976 | } | |
977 | else { | |
978 | vcpu->arch.gprs[rt] = cop0->reg[rd][sel]; | |
979 | ||
980 | #ifdef CONFIG_KVM_MIPS_DYN_TRANS | |
981 | kvm_mips_trans_mfc0(inst, opc, vcpu); | |
982 | #endif | |
983 | } | |
984 | ||
985 | kvm_debug | |
986 | ("[%#x] MFCz[%d][%d], vcpu->arch.gprs[%d]: %#lx\n", | |
987 | pc, rd, sel, rt, vcpu->arch.gprs[rt]); | |
988 | ||
989 | break; | |
990 | ||
991 | case dmfc_op: | |
992 | vcpu->arch.gprs[rt] = cop0->reg[rd][sel]; | |
993 | break; | |
994 | ||
995 | case mtc_op: | |
996 | #ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS | |
997 | cop0->stat[rd][sel]++; | |
998 | #endif | |
999 | if ((rd == MIPS_CP0_TLB_INDEX) | |
1000 | && (vcpu->arch.gprs[rt] >= | |
1001 | KVM_MIPS_GUEST_TLB_SIZE)) { | |
1002 | printk("Invalid TLB Index: %ld", | |
1003 | vcpu->arch.gprs[rt]); | |
1004 | er = EMULATE_FAIL; | |
1005 | break; | |
1006 | } | |
1007 | #define C0_EBASE_CORE_MASK 0xff | |
1008 | if ((rd == MIPS_CP0_PRID) && (sel == 1)) { | |
1009 | /* Preserve CORE number */ | |
1010 | kvm_change_c0_guest_ebase(cop0, | |
1011 | ~(C0_EBASE_CORE_MASK), | |
1012 | vcpu->arch.gprs[rt]); | |
1013 | printk("MTCz, cop0->reg[EBASE]: %#lx\n", | |
1014 | kvm_read_c0_guest_ebase(cop0)); | |
1015 | } else if (rd == MIPS_CP0_TLB_HI && sel == 0) { | |
48c4ac97 DD |
1016 | uint32_t nasid = |
1017 | vcpu->arch.gprs[rt] & ASID_MASK; | |
e685c689 SL |
1018 | if ((KSEGX(vcpu->arch.gprs[rt]) != CKSEG0) |
1019 | && | |
48c4ac97 DD |
1020 | ((kvm_read_c0_guest_entryhi(cop0) & |
1021 | ASID_MASK) != nasid)) { | |
e685c689 SL |
1022 | |
1023 | kvm_debug | |
1024 | ("MTCz, change ASID from %#lx to %#lx\n", | |
48c4ac97 DD |
1025 | kvm_read_c0_guest_entryhi(cop0) & |
1026 | ASID_MASK, | |
1027 | vcpu->arch.gprs[rt] & ASID_MASK); | |
e685c689 SL |
1028 | |
1029 | /* Blow away the shadow host TLBs */ | |
1030 | kvm_mips_flush_host_tlb(1); | |
1031 | } | |
1032 | kvm_write_c0_guest_entryhi(cop0, | |
1033 | vcpu->arch.gprs[rt]); | |
1034 | } | |
1035 | /* Are we writing to COUNT */ | |
1036 | else if ((rd == MIPS_CP0_COUNT) && (sel == 0)) { | |
e30492bb | 1037 | kvm_mips_write_count(vcpu, vcpu->arch.gprs[rt]); |
e685c689 SL |
1038 | goto done; |
1039 | } else if ((rd == MIPS_CP0_COMPARE) && (sel == 0)) { | |
1040 | kvm_debug("[%#x] MTCz, COMPARE %#lx <- %#lx\n", | |
1041 | pc, kvm_read_c0_guest_compare(cop0), | |
1042 | vcpu->arch.gprs[rt]); | |
1043 | ||
1044 | /* If we are writing to COMPARE */ | |
1045 | /* Clear pending timer interrupt, if any */ | |
1046 | kvm_mips_callbacks->dequeue_timer_int(vcpu); | |
e30492bb JH |
1047 | kvm_mips_write_compare(vcpu, |
1048 | vcpu->arch.gprs[rt]); | |
e685c689 SL |
1049 | } else if ((rd == MIPS_CP0_STATUS) && (sel == 0)) { |
1050 | kvm_write_c0_guest_status(cop0, | |
1051 | vcpu->arch.gprs[rt]); | |
1052 | /* Make sure that CU1 and NMI bits are never set */ | |
1053 | kvm_clear_c0_guest_status(cop0, | |
1054 | (ST0_CU1 | ST0_NMI)); | |
1055 | ||
1056 | #ifdef CONFIG_KVM_MIPS_DYN_TRANS | |
1057 | kvm_mips_trans_mtc0(inst, opc, vcpu); | |
1058 | #endif | |
e30492bb JH |
1059 | } else if ((rd == MIPS_CP0_CAUSE) && (sel == 0)) { |
1060 | uint32_t old_cause, new_cause; | |
1061 | old_cause = kvm_read_c0_guest_cause(cop0); | |
1062 | new_cause = vcpu->arch.gprs[rt]; | |
1063 | /* Update R/W bits */ | |
1064 | kvm_change_c0_guest_cause(cop0, 0x08800300, | |
1065 | new_cause); | |
1066 | /* DC bit enabling/disabling timer? */ | |
1067 | if ((old_cause ^ new_cause) & CAUSEF_DC) { | |
1068 | if (new_cause & CAUSEF_DC) | |
1069 | kvm_mips_count_disable_cause(vcpu); | |
1070 | else | |
1071 | kvm_mips_count_enable_cause(vcpu); | |
1072 | } | |
e685c689 SL |
1073 | } else { |
1074 | cop0->reg[rd][sel] = vcpu->arch.gprs[rt]; | |
1075 | #ifdef CONFIG_KVM_MIPS_DYN_TRANS | |
1076 | kvm_mips_trans_mtc0(inst, opc, vcpu); | |
1077 | #endif | |
1078 | } | |
1079 | ||
1080 | kvm_debug("[%#x] MTCz, cop0->reg[%d][%d]: %#lx\n", pc, | |
1081 | rd, sel, cop0->reg[rd][sel]); | |
1082 | break; | |
1083 | ||
1084 | case dmtc_op: | |
1085 | printk | |
1086 | ("!!!!!!![%#lx]dmtc_op: rt: %d, rd: %d, sel: %d!!!!!!\n", | |
1087 | vcpu->arch.pc, rt, rd, sel); | |
1088 | er = EMULATE_FAIL; | |
1089 | break; | |
1090 | ||
1091 | case mfmcz_op: | |
1092 | #ifdef KVM_MIPS_DEBUG_COP0_COUNTERS | |
1093 | cop0->stat[MIPS_CP0_STATUS][0]++; | |
1094 | #endif | |
1095 | if (rt != 0) { | |
1096 | vcpu->arch.gprs[rt] = | |
1097 | kvm_read_c0_guest_status(cop0); | |
1098 | } | |
1099 | /* EI */ | |
1100 | if (inst & 0x20) { | |
1101 | kvm_debug("[%#lx] mfmcz_op: EI\n", | |
1102 | vcpu->arch.pc); | |
1103 | kvm_set_c0_guest_status(cop0, ST0_IE); | |
1104 | } else { | |
1105 | kvm_debug("[%#lx] mfmcz_op: DI\n", | |
1106 | vcpu->arch.pc); | |
1107 | kvm_clear_c0_guest_status(cop0, ST0_IE); | |
1108 | } | |
1109 | ||
1110 | break; | |
1111 | ||
1112 | case wrpgpr_op: | |
1113 | { | |
1114 | uint32_t css = | |
1115 | cop0->reg[MIPS_CP0_STATUS][2] & 0xf; | |
1116 | uint32_t pss = | |
1117 | (cop0->reg[MIPS_CP0_STATUS][2] >> 6) & 0xf; | |
1118 | /* We don't support any shadow register sets, so SRSCtl[PSS] == SRSCtl[CSS] = 0 */ | |
1119 | if (css || pss) { | |
1120 | er = EMULATE_FAIL; | |
1121 | break; | |
1122 | } | |
1123 | kvm_debug("WRPGPR[%d][%d] = %#lx\n", pss, rd, | |
1124 | vcpu->arch.gprs[rt]); | |
1125 | vcpu->arch.gprs[rd] = vcpu->arch.gprs[rt]; | |
1126 | } | |
1127 | break; | |
1128 | default: | |
1129 | printk | |
1130 | ("[%#lx]MachEmulateCP0: unsupported COP0, copz: 0x%x\n", | |
1131 | vcpu->arch.pc, copz); | |
1132 | er = EMULATE_FAIL; | |
1133 | break; | |
1134 | } | |
1135 | } | |
1136 | ||
1137 | done: | |
1138 | /* | |
1139 | * Rollback PC only if emulation was unsuccessful | |
1140 | */ | |
1141 | if (er == EMULATE_FAIL) { | |
1142 | vcpu->arch.pc = curr_pc; | |
1143 | } | |
1144 | ||
1145 | dont_update_pc: | |
1146 | /* | |
1147 | * This is for special instructions whose emulation | |
1148 | * updates the PC, so do not overwrite the PC under | |
1149 | * any circumstances | |
1150 | */ | |
1151 | ||
1152 | return er; | |
1153 | } | |
1154 | ||
1155 | enum emulation_result | |
1156 | kvm_mips_emulate_store(uint32_t inst, uint32_t cause, | |
1157 | struct kvm_run *run, struct kvm_vcpu *vcpu) | |
1158 | { | |
1159 | enum emulation_result er = EMULATE_DO_MMIO; | |
1160 | int32_t op, base, rt, offset; | |
1161 | uint32_t bytes; | |
1162 | void *data = run->mmio.data; | |
1163 | unsigned long curr_pc; | |
1164 | ||
1165 | /* | |
1166 | * Update PC and hold onto current PC in case there is | |
1167 | * an error and we want to rollback the PC | |
1168 | */ | |
1169 | curr_pc = vcpu->arch.pc; | |
1170 | er = update_pc(vcpu, cause); | |
1171 | if (er == EMULATE_FAIL) | |
1172 | return er; | |
1173 | ||
1174 | rt = (inst >> 16) & 0x1f; | |
1175 | base = (inst >> 21) & 0x1f; | |
1176 | offset = inst & 0xffff; | |
1177 | op = (inst >> 26) & 0x3f; | |
1178 | ||
1179 | switch (op) { | |
1180 | case sb_op: | |
1181 | bytes = 1; | |
1182 | if (bytes > sizeof(run->mmio.data)) { | |
1183 | kvm_err("%s: bad MMIO length: %d\n", __func__, | |
1184 | run->mmio.len); | |
1185 | } | |
1186 | run->mmio.phys_addr = | |
1187 | kvm_mips_callbacks->gva_to_gpa(vcpu->arch. | |
1188 | host_cp0_badvaddr); | |
1189 | if (run->mmio.phys_addr == KVM_INVALID_ADDR) { | |
1190 | er = EMULATE_FAIL; | |
1191 | break; | |
1192 | } | |
1193 | run->mmio.len = bytes; | |
1194 | run->mmio.is_write = 1; | |
1195 | vcpu->mmio_needed = 1; | |
1196 | vcpu->mmio_is_write = 1; | |
1197 | *(u8 *) data = vcpu->arch.gprs[rt]; | |
1198 | kvm_debug("OP_SB: eaddr: %#lx, gpr: %#lx, data: %#x\n", | |
1199 | vcpu->arch.host_cp0_badvaddr, vcpu->arch.gprs[rt], | |
1200 | *(uint8_t *) data); | |
1201 | ||
1202 | break; | |
1203 | ||
1204 | case sw_op: | |
1205 | bytes = 4; | |
1206 | if (bytes > sizeof(run->mmio.data)) { | |
1207 | kvm_err("%s: bad MMIO length: %d\n", __func__, | |
1208 | run->mmio.len); | |
1209 | } | |
1210 | run->mmio.phys_addr = | |
1211 | kvm_mips_callbacks->gva_to_gpa(vcpu->arch. | |
1212 | host_cp0_badvaddr); | |
1213 | if (run->mmio.phys_addr == KVM_INVALID_ADDR) { | |
1214 | er = EMULATE_FAIL; | |
1215 | break; | |
1216 | } | |
1217 | ||
1218 | run->mmio.len = bytes; | |
1219 | run->mmio.is_write = 1; | |
1220 | vcpu->mmio_needed = 1; | |
1221 | vcpu->mmio_is_write = 1; | |
1222 | *(uint32_t *) data = vcpu->arch.gprs[rt]; | |
1223 | ||
1224 | kvm_debug("[%#lx] OP_SW: eaddr: %#lx, gpr: %#lx, data: %#x\n", | |
1225 | vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr, | |
1226 | vcpu->arch.gprs[rt], *(uint32_t *) data); | |
1227 | break; | |
1228 | ||
1229 | case sh_op: | |
1230 | bytes = 2; | |
1231 | if (bytes > sizeof(run->mmio.data)) { | |
1232 | kvm_err("%s: bad MMIO length: %d\n", __func__, | |
1233 | run->mmio.len); | |
1234 | } | |
1235 | run->mmio.phys_addr = | |
1236 | kvm_mips_callbacks->gva_to_gpa(vcpu->arch. | |
1237 | host_cp0_badvaddr); | |
1238 | if (run->mmio.phys_addr == KVM_INVALID_ADDR) { | |
1239 | er = EMULATE_FAIL; | |
1240 | break; | |
1241 | } | |
1242 | ||
1243 | run->mmio.len = bytes; | |
1244 | run->mmio.is_write = 1; | |
1245 | vcpu->mmio_needed = 1; | |
1246 | vcpu->mmio_is_write = 1; | |
1247 | *(uint16_t *) data = vcpu->arch.gprs[rt]; | |
1248 | ||
1249 | kvm_debug("[%#lx] OP_SH: eaddr: %#lx, gpr: %#lx, data: %#x\n", | |
1250 | vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr, | |
1251 | vcpu->arch.gprs[rt], *(uint32_t *) data); | |
1252 | break; | |
1253 | ||
1254 | default: | |
1255 | printk("Store not yet supported"); | |
1256 | er = EMULATE_FAIL; | |
1257 | break; | |
1258 | } | |
1259 | ||
1260 | /* | |
1261 | * Rollback PC if emulation was unsuccessful | |
1262 | */ | |
1263 | if (er == EMULATE_FAIL) { | |
1264 | vcpu->arch.pc = curr_pc; | |
1265 | } | |
1266 | ||
1267 | return er; | |
1268 | } | |
1269 | ||
1270 | enum emulation_result | |
1271 | kvm_mips_emulate_load(uint32_t inst, uint32_t cause, | |
1272 | struct kvm_run *run, struct kvm_vcpu *vcpu) | |
1273 | { | |
1274 | enum emulation_result er = EMULATE_DO_MMIO; | |
1275 | int32_t op, base, rt, offset; | |
1276 | uint32_t bytes; | |
1277 | ||
1278 | rt = (inst >> 16) & 0x1f; | |
1279 | base = (inst >> 21) & 0x1f; | |
1280 | offset = inst & 0xffff; | |
1281 | op = (inst >> 26) & 0x3f; | |
1282 | ||
1283 | vcpu->arch.pending_load_cause = cause; | |
1284 | vcpu->arch.io_gpr = rt; | |
1285 | ||
1286 | switch (op) { | |
1287 | case lw_op: | |
1288 | bytes = 4; | |
1289 | if (bytes > sizeof(run->mmio.data)) { | |
1290 | kvm_err("%s: bad MMIO length: %d\n", __func__, | |
1291 | run->mmio.len); | |
1292 | er = EMULATE_FAIL; | |
1293 | break; | |
1294 | } | |
1295 | run->mmio.phys_addr = | |
1296 | kvm_mips_callbacks->gva_to_gpa(vcpu->arch. | |
1297 | host_cp0_badvaddr); | |
1298 | if (run->mmio.phys_addr == KVM_INVALID_ADDR) { | |
1299 | er = EMULATE_FAIL; | |
1300 | break; | |
1301 | } | |
1302 | ||
1303 | run->mmio.len = bytes; | |
1304 | run->mmio.is_write = 0; | |
1305 | vcpu->mmio_needed = 1; | |
1306 | vcpu->mmio_is_write = 0; | |
1307 | break; | |
1308 | ||
1309 | case lh_op: | |
1310 | case lhu_op: | |
1311 | bytes = 2; | |
1312 | if (bytes > sizeof(run->mmio.data)) { | |
1313 | kvm_err("%s: bad MMIO length: %d\n", __func__, | |
1314 | run->mmio.len); | |
1315 | er = EMULATE_FAIL; | |
1316 | break; | |
1317 | } | |
1318 | run->mmio.phys_addr = | |
1319 | kvm_mips_callbacks->gva_to_gpa(vcpu->arch. | |
1320 | host_cp0_badvaddr); | |
1321 | if (run->mmio.phys_addr == KVM_INVALID_ADDR) { | |
1322 | er = EMULATE_FAIL; | |
1323 | break; | |
1324 | } | |
1325 | ||
1326 | run->mmio.len = bytes; | |
1327 | run->mmio.is_write = 0; | |
1328 | vcpu->mmio_needed = 1; | |
1329 | vcpu->mmio_is_write = 0; | |
1330 | ||
1331 | if (op == lh_op) | |
1332 | vcpu->mmio_needed = 2; | |
1333 | else | |
1334 | vcpu->mmio_needed = 1; | |
1335 | ||
1336 | break; | |
1337 | ||
1338 | case lbu_op: | |
1339 | case lb_op: | |
1340 | bytes = 1; | |
1341 | if (bytes > sizeof(run->mmio.data)) { | |
1342 | kvm_err("%s: bad MMIO length: %d\n", __func__, | |
1343 | run->mmio.len); | |
1344 | er = EMULATE_FAIL; | |
1345 | break; | |
1346 | } | |
1347 | run->mmio.phys_addr = | |
1348 | kvm_mips_callbacks->gva_to_gpa(vcpu->arch. | |
1349 | host_cp0_badvaddr); | |
1350 | if (run->mmio.phys_addr == KVM_INVALID_ADDR) { | |
1351 | er = EMULATE_FAIL; | |
1352 | break; | |
1353 | } | |
1354 | ||
1355 | run->mmio.len = bytes; | |
1356 | run->mmio.is_write = 0; | |
1357 | vcpu->mmio_is_write = 0; | |
1358 | ||
1359 | if (op == lb_op) | |
1360 | vcpu->mmio_needed = 2; | |
1361 | else | |
1362 | vcpu->mmio_needed = 1; | |
1363 | ||
1364 | break; | |
1365 | ||
1366 | default: | |
1367 | printk("Load not yet supported"); | |
1368 | er = EMULATE_FAIL; | |
1369 | break; | |
1370 | } | |
1371 | ||
1372 | return er; | |
1373 | } | |
1374 | ||
1375 | int kvm_mips_sync_icache(unsigned long va, struct kvm_vcpu *vcpu) | |
1376 | { | |
1377 | unsigned long offset = (va & ~PAGE_MASK); | |
1378 | struct kvm *kvm = vcpu->kvm; | |
1379 | unsigned long pa; | |
1380 | gfn_t gfn; | |
1381 | pfn_t pfn; | |
1382 | ||
1383 | gfn = va >> PAGE_SHIFT; | |
1384 | ||
1385 | if (gfn >= kvm->arch.guest_pmap_npages) { | |
1386 | printk("%s: Invalid gfn: %#llx\n", __func__, gfn); | |
1387 | kvm_mips_dump_host_tlbs(); | |
1388 | kvm_arch_vcpu_dump_regs(vcpu); | |
1389 | return -1; | |
1390 | } | |
1391 | pfn = kvm->arch.guest_pmap[gfn]; | |
1392 | pa = (pfn << PAGE_SHIFT) | offset; | |
1393 | ||
1394 | printk("%s: va: %#lx, unmapped: %#x\n", __func__, va, CKSEG0ADDR(pa)); | |
1395 | ||
facaaec1 | 1396 | local_flush_icache_range(CKSEG0ADDR(pa), 32); |
e685c689 SL |
1397 | return 0; |
1398 | } | |
1399 | ||
1400 | #define MIPS_CACHE_OP_INDEX_INV 0x0 | |
1401 | #define MIPS_CACHE_OP_INDEX_LD_TAG 0x1 | |
1402 | #define MIPS_CACHE_OP_INDEX_ST_TAG 0x2 | |
1403 | #define MIPS_CACHE_OP_IMP 0x3 | |
1404 | #define MIPS_CACHE_OP_HIT_INV 0x4 | |
1405 | #define MIPS_CACHE_OP_FILL_WB_INV 0x5 | |
1406 | #define MIPS_CACHE_OP_HIT_HB 0x6 | |
1407 | #define MIPS_CACHE_OP_FETCH_LOCK 0x7 | |
1408 | ||
1409 | #define MIPS_CACHE_ICACHE 0x0 | |
1410 | #define MIPS_CACHE_DCACHE 0x1 | |
1411 | #define MIPS_CACHE_SEC 0x3 | |
1412 | ||
1413 | enum emulation_result | |
1414 | kvm_mips_emulate_cache(uint32_t inst, uint32_t *opc, uint32_t cause, | |
1415 | struct kvm_run *run, struct kvm_vcpu *vcpu) | |
1416 | { | |
1417 | struct mips_coproc *cop0 = vcpu->arch.cop0; | |
1418 | extern void (*r4k_blast_dcache) (void); | |
1419 | extern void (*r4k_blast_icache) (void); | |
1420 | enum emulation_result er = EMULATE_DONE; | |
1421 | int32_t offset, cache, op_inst, op, base; | |
1422 | struct kvm_vcpu_arch *arch = &vcpu->arch; | |
1423 | unsigned long va; | |
1424 | unsigned long curr_pc; | |
1425 | ||
1426 | /* | |
1427 | * Update PC and hold onto current PC in case there is | |
1428 | * an error and we want to rollback the PC | |
1429 | */ | |
1430 | curr_pc = vcpu->arch.pc; | |
1431 | er = update_pc(vcpu, cause); | |
1432 | if (er == EMULATE_FAIL) | |
1433 | return er; | |
1434 | ||
1435 | base = (inst >> 21) & 0x1f; | |
1436 | op_inst = (inst >> 16) & 0x1f; | |
1437 | offset = inst & 0xffff; | |
1438 | cache = (inst >> 16) & 0x3; | |
1439 | op = (inst >> 18) & 0x7; | |
1440 | ||
1441 | va = arch->gprs[base] + offset; | |
1442 | ||
1443 | kvm_debug("CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n", | |
1444 | cache, op, base, arch->gprs[base], offset); | |
1445 | ||
1446 | /* Treat INDEX_INV as a nop, basically issued by Linux on startup to invalidate | |
1447 | * the caches entirely by stepping through all the ways/indexes | |
1448 | */ | |
1449 | if (op == MIPS_CACHE_OP_INDEX_INV) { | |
1450 | kvm_debug | |
1451 | ("@ %#lx/%#lx CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n", | |
1452 | vcpu->arch.pc, vcpu->arch.gprs[31], cache, op, base, | |
1453 | arch->gprs[base], offset); | |
1454 | ||
1455 | if (cache == MIPS_CACHE_DCACHE) | |
1456 | r4k_blast_dcache(); | |
1457 | else if (cache == MIPS_CACHE_ICACHE) | |
1458 | r4k_blast_icache(); | |
1459 | else { | |
1460 | printk("%s: unsupported CACHE INDEX operation\n", | |
1461 | __func__); | |
1462 | return EMULATE_FAIL; | |
1463 | } | |
1464 | ||
1465 | #ifdef CONFIG_KVM_MIPS_DYN_TRANS | |
1466 | kvm_mips_trans_cache_index(inst, opc, vcpu); | |
1467 | #endif | |
1468 | goto done; | |
1469 | } | |
1470 | ||
1471 | preempt_disable(); | |
1472 | if (KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG0) { | |
1473 | ||
1474 | if (kvm_mips_host_tlb_lookup(vcpu, va) < 0) { | |
1475 | kvm_mips_handle_kseg0_tlb_fault(va, vcpu); | |
1476 | } | |
1477 | } else if ((KVM_GUEST_KSEGX(va) < KVM_GUEST_KSEG0) || | |
1478 | KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG23) { | |
1479 | int index; | |
1480 | ||
1481 | /* If an entry already exists then skip */ | |
1482 | if (kvm_mips_host_tlb_lookup(vcpu, va) >= 0) { | |
1483 | goto skip_fault; | |
1484 | } | |
1485 | ||
1486 | /* If address not in the guest TLB, then give the guest a fault, the | |
1487 | * resulting handler will do the right thing | |
1488 | */ | |
1489 | index = kvm_mips_guest_tlb_lookup(vcpu, (va & VPN2_MASK) | | |
48c4ac97 DD |
1490 | (kvm_read_c0_guest_entryhi |
1491 | (cop0) & ASID_MASK)); | |
e685c689 SL |
1492 | |
1493 | if (index < 0) { | |
1494 | vcpu->arch.host_cp0_entryhi = (va & VPN2_MASK); | |
1495 | vcpu->arch.host_cp0_badvaddr = va; | |
1496 | er = kvm_mips_emulate_tlbmiss_ld(cause, NULL, run, | |
1497 | vcpu); | |
1498 | preempt_enable(); | |
1499 | goto dont_update_pc; | |
1500 | } else { | |
1501 | struct kvm_mips_tlb *tlb = &vcpu->arch.guest_tlb[index]; | |
1502 | /* Check if the entry is valid, if not then setup a TLB invalid exception to the guest */ | |
1503 | if (!TLB_IS_VALID(*tlb, va)) { | |
1504 | er = kvm_mips_emulate_tlbinv_ld(cause, NULL, | |
1505 | run, vcpu); | |
1506 | preempt_enable(); | |
1507 | goto dont_update_pc; | |
1508 | } else { | |
1509 | /* We fault an entry from the guest tlb to the shadow host TLB */ | |
1510 | kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb, | |
1511 | NULL, | |
1512 | NULL); | |
1513 | } | |
1514 | } | |
1515 | } else { | |
1516 | printk | |
1517 | ("INVALID CACHE INDEX/ADDRESS (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n", | |
1518 | cache, op, base, arch->gprs[base], offset); | |
1519 | er = EMULATE_FAIL; | |
1520 | preempt_enable(); | |
1521 | goto dont_update_pc; | |
1522 | ||
1523 | } | |
1524 | ||
1525 | skip_fault: | |
1526 | /* XXXKYMA: Only a subset of cache ops are supported, used by Linux */ | |
1527 | if (cache == MIPS_CACHE_DCACHE | |
1528 | && (op == MIPS_CACHE_OP_FILL_WB_INV | |
1529 | || op == MIPS_CACHE_OP_HIT_INV)) { | |
1530 | flush_dcache_line(va); | |
1531 | ||
1532 | #ifdef CONFIG_KVM_MIPS_DYN_TRANS | |
1533 | /* Replace the CACHE instruction, with a SYNCI, not the same, but avoids a trap */ | |
1534 | kvm_mips_trans_cache_va(inst, opc, vcpu); | |
1535 | #endif | |
1536 | } else if (op == MIPS_CACHE_OP_HIT_INV && cache == MIPS_CACHE_ICACHE) { | |
1537 | flush_dcache_line(va); | |
1538 | flush_icache_line(va); | |
1539 | ||
1540 | #ifdef CONFIG_KVM_MIPS_DYN_TRANS | |
1541 | /* Replace the CACHE instruction, with a SYNCI */ | |
1542 | kvm_mips_trans_cache_va(inst, opc, vcpu); | |
1543 | #endif | |
1544 | } else { | |
1545 | printk | |
1546 | ("NO-OP CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n", | |
1547 | cache, op, base, arch->gprs[base], offset); | |
1548 | er = EMULATE_FAIL; | |
1549 | preempt_enable(); | |
1550 | goto dont_update_pc; | |
1551 | } | |
1552 | ||
1553 | preempt_enable(); | |
1554 | ||
1555 | dont_update_pc: | |
1556 | /* | |
1557 | * Rollback PC | |
1558 | */ | |
1559 | vcpu->arch.pc = curr_pc; | |
1560 | done: | |
1561 | return er; | |
1562 | } | |
1563 | ||
1564 | enum emulation_result | |
1565 | kvm_mips_emulate_inst(unsigned long cause, uint32_t *opc, | |
1566 | struct kvm_run *run, struct kvm_vcpu *vcpu) | |
1567 | { | |
1568 | enum emulation_result er = EMULATE_DONE; | |
1569 | uint32_t inst; | |
1570 | ||
1571 | /* | |
1572 | * Fetch the instruction. | |
1573 | */ | |
1574 | if (cause & CAUSEF_BD) { | |
1575 | opc += 1; | |
1576 | } | |
1577 | ||
1578 | inst = kvm_get_inst(opc, vcpu); | |
1579 | ||
1580 | switch (((union mips_instruction)inst).r_format.opcode) { | |
1581 | case cop0_op: | |
1582 | er = kvm_mips_emulate_CP0(inst, opc, cause, run, vcpu); | |
1583 | break; | |
1584 | case sb_op: | |
1585 | case sh_op: | |
1586 | case sw_op: | |
1587 | er = kvm_mips_emulate_store(inst, cause, run, vcpu); | |
1588 | break; | |
1589 | case lb_op: | |
1590 | case lbu_op: | |
1591 | case lhu_op: | |
1592 | case lh_op: | |
1593 | case lw_op: | |
1594 | er = kvm_mips_emulate_load(inst, cause, run, vcpu); | |
1595 | break; | |
1596 | ||
1597 | case cache_op: | |
1598 | ++vcpu->stat.cache_exits; | |
1599 | trace_kvm_exit(vcpu, CACHE_EXITS); | |
1600 | er = kvm_mips_emulate_cache(inst, opc, cause, run, vcpu); | |
1601 | break; | |
1602 | ||
1603 | default: | |
1604 | printk("Instruction emulation not supported (%p/%#x)\n", opc, | |
1605 | inst); | |
1606 | kvm_arch_vcpu_dump_regs(vcpu); | |
1607 | er = EMULATE_FAIL; | |
1608 | break; | |
1609 | } | |
1610 | ||
1611 | return er; | |
1612 | } | |
1613 | ||
1614 | enum emulation_result | |
1615 | kvm_mips_emulate_syscall(unsigned long cause, uint32_t *opc, | |
1616 | struct kvm_run *run, struct kvm_vcpu *vcpu) | |
1617 | { | |
1618 | struct mips_coproc *cop0 = vcpu->arch.cop0; | |
1619 | struct kvm_vcpu_arch *arch = &vcpu->arch; | |
1620 | enum emulation_result er = EMULATE_DONE; | |
1621 | ||
1622 | if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { | |
1623 | /* save old pc */ | |
1624 | kvm_write_c0_guest_epc(cop0, arch->pc); | |
1625 | kvm_set_c0_guest_status(cop0, ST0_EXL); | |
1626 | ||
1627 | if (cause & CAUSEF_BD) | |
1628 | kvm_set_c0_guest_cause(cop0, CAUSEF_BD); | |
1629 | else | |
1630 | kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); | |
1631 | ||
1632 | kvm_debug("Delivering SYSCALL @ pc %#lx\n", arch->pc); | |
1633 | ||
1634 | kvm_change_c0_guest_cause(cop0, (0xff), | |
1635 | (T_SYSCALL << CAUSEB_EXCCODE)); | |
1636 | ||
1637 | /* Set PC to the exception entry point */ | |
1638 | arch->pc = KVM_GUEST_KSEG0 + 0x180; | |
1639 | ||
1640 | } else { | |
1641 | printk("Trying to deliver SYSCALL when EXL is already set\n"); | |
1642 | er = EMULATE_FAIL; | |
1643 | } | |
1644 | ||
1645 | return er; | |
1646 | } | |
1647 | ||
1648 | enum emulation_result | |
1649 | kvm_mips_emulate_tlbmiss_ld(unsigned long cause, uint32_t *opc, | |
1650 | struct kvm_run *run, struct kvm_vcpu *vcpu) | |
1651 | { | |
1652 | struct mips_coproc *cop0 = vcpu->arch.cop0; | |
1653 | struct kvm_vcpu_arch *arch = &vcpu->arch; | |
1654 | enum emulation_result er = EMULATE_DONE; | |
1655 | unsigned long entryhi = (vcpu->arch. host_cp0_badvaddr & VPN2_MASK) | | |
48c4ac97 | 1656 | (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK); |
e685c689 SL |
1657 | |
1658 | if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { | |
1659 | /* save old pc */ | |
1660 | kvm_write_c0_guest_epc(cop0, arch->pc); | |
1661 | kvm_set_c0_guest_status(cop0, ST0_EXL); | |
1662 | ||
1663 | if (cause & CAUSEF_BD) | |
1664 | kvm_set_c0_guest_cause(cop0, CAUSEF_BD); | |
1665 | else | |
1666 | kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); | |
1667 | ||
1668 | kvm_debug("[EXL == 0] delivering TLB MISS @ pc %#lx\n", | |
1669 | arch->pc); | |
1670 | ||
1671 | /* set pc to the exception entry point */ | |
1672 | arch->pc = KVM_GUEST_KSEG0 + 0x0; | |
1673 | ||
1674 | } else { | |
1675 | kvm_debug("[EXL == 1] delivering TLB MISS @ pc %#lx\n", | |
1676 | arch->pc); | |
1677 | ||
1678 | arch->pc = KVM_GUEST_KSEG0 + 0x180; | |
1679 | } | |
1680 | ||
1681 | kvm_change_c0_guest_cause(cop0, (0xff), | |
1682 | (T_TLB_LD_MISS << CAUSEB_EXCCODE)); | |
1683 | ||
1684 | /* setup badvaddr, context and entryhi registers for the guest */ | |
1685 | kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr); | |
1686 | /* XXXKYMA: is the context register used by linux??? */ | |
1687 | kvm_write_c0_guest_entryhi(cop0, entryhi); | |
1688 | /* Blow away the shadow host TLBs */ | |
1689 | kvm_mips_flush_host_tlb(1); | |
1690 | ||
1691 | return er; | |
1692 | } | |
1693 | ||
1694 | enum emulation_result | |
1695 | kvm_mips_emulate_tlbinv_ld(unsigned long cause, uint32_t *opc, | |
1696 | struct kvm_run *run, struct kvm_vcpu *vcpu) | |
1697 | { | |
1698 | struct mips_coproc *cop0 = vcpu->arch.cop0; | |
1699 | struct kvm_vcpu_arch *arch = &vcpu->arch; | |
1700 | enum emulation_result er = EMULATE_DONE; | |
1701 | unsigned long entryhi = | |
1702 | (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) | | |
48c4ac97 | 1703 | (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK); |
e685c689 SL |
1704 | |
1705 | if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { | |
1706 | /* save old pc */ | |
1707 | kvm_write_c0_guest_epc(cop0, arch->pc); | |
1708 | kvm_set_c0_guest_status(cop0, ST0_EXL); | |
1709 | ||
1710 | if (cause & CAUSEF_BD) | |
1711 | kvm_set_c0_guest_cause(cop0, CAUSEF_BD); | |
1712 | else | |
1713 | kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); | |
1714 | ||
1715 | kvm_debug("[EXL == 0] delivering TLB INV @ pc %#lx\n", | |
1716 | arch->pc); | |
1717 | ||
1718 | /* set pc to the exception entry point */ | |
1719 | arch->pc = KVM_GUEST_KSEG0 + 0x180; | |
1720 | ||
1721 | } else { | |
1722 | kvm_debug("[EXL == 1] delivering TLB MISS @ pc %#lx\n", | |
1723 | arch->pc); | |
1724 | arch->pc = KVM_GUEST_KSEG0 + 0x180; | |
1725 | } | |
1726 | ||
1727 | kvm_change_c0_guest_cause(cop0, (0xff), | |
1728 | (T_TLB_LD_MISS << CAUSEB_EXCCODE)); | |
1729 | ||
1730 | /* setup badvaddr, context and entryhi registers for the guest */ | |
1731 | kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr); | |
1732 | /* XXXKYMA: is the context register used by linux??? */ | |
1733 | kvm_write_c0_guest_entryhi(cop0, entryhi); | |
1734 | /* Blow away the shadow host TLBs */ | |
1735 | kvm_mips_flush_host_tlb(1); | |
1736 | ||
1737 | return er; | |
1738 | } | |
1739 | ||
1740 | enum emulation_result | |
1741 | kvm_mips_emulate_tlbmiss_st(unsigned long cause, uint32_t *opc, | |
1742 | struct kvm_run *run, struct kvm_vcpu *vcpu) | |
1743 | { | |
1744 | struct mips_coproc *cop0 = vcpu->arch.cop0; | |
1745 | struct kvm_vcpu_arch *arch = &vcpu->arch; | |
1746 | enum emulation_result er = EMULATE_DONE; | |
1747 | unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) | | |
48c4ac97 | 1748 | (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK); |
e685c689 SL |
1749 | |
1750 | if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { | |
1751 | /* save old pc */ | |
1752 | kvm_write_c0_guest_epc(cop0, arch->pc); | |
1753 | kvm_set_c0_guest_status(cop0, ST0_EXL); | |
1754 | ||
1755 | if (cause & CAUSEF_BD) | |
1756 | kvm_set_c0_guest_cause(cop0, CAUSEF_BD); | |
1757 | else | |
1758 | kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); | |
1759 | ||
1760 | kvm_debug("[EXL == 0] Delivering TLB MISS @ pc %#lx\n", | |
1761 | arch->pc); | |
1762 | ||
1763 | /* Set PC to the exception entry point */ | |
1764 | arch->pc = KVM_GUEST_KSEG0 + 0x0; | |
1765 | } else { | |
1766 | kvm_debug("[EXL == 1] Delivering TLB MISS @ pc %#lx\n", | |
1767 | arch->pc); | |
1768 | arch->pc = KVM_GUEST_KSEG0 + 0x180; | |
1769 | } | |
1770 | ||
1771 | kvm_change_c0_guest_cause(cop0, (0xff), | |
1772 | (T_TLB_ST_MISS << CAUSEB_EXCCODE)); | |
1773 | ||
1774 | /* setup badvaddr, context and entryhi registers for the guest */ | |
1775 | kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr); | |
1776 | /* XXXKYMA: is the context register used by linux??? */ | |
1777 | kvm_write_c0_guest_entryhi(cop0, entryhi); | |
1778 | /* Blow away the shadow host TLBs */ | |
1779 | kvm_mips_flush_host_tlb(1); | |
1780 | ||
1781 | return er; | |
1782 | } | |
1783 | ||
1784 | enum emulation_result | |
1785 | kvm_mips_emulate_tlbinv_st(unsigned long cause, uint32_t *opc, | |
1786 | struct kvm_run *run, struct kvm_vcpu *vcpu) | |
1787 | { | |
1788 | struct mips_coproc *cop0 = vcpu->arch.cop0; | |
1789 | struct kvm_vcpu_arch *arch = &vcpu->arch; | |
1790 | enum emulation_result er = EMULATE_DONE; | |
1791 | unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) | | |
48c4ac97 | 1792 | (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK); |
e685c689 SL |
1793 | |
1794 | if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { | |
1795 | /* save old pc */ | |
1796 | kvm_write_c0_guest_epc(cop0, arch->pc); | |
1797 | kvm_set_c0_guest_status(cop0, ST0_EXL); | |
1798 | ||
1799 | if (cause & CAUSEF_BD) | |
1800 | kvm_set_c0_guest_cause(cop0, CAUSEF_BD); | |
1801 | else | |
1802 | kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); | |
1803 | ||
1804 | kvm_debug("[EXL == 0] Delivering TLB MISS @ pc %#lx\n", | |
1805 | arch->pc); | |
1806 | ||
1807 | /* Set PC to the exception entry point */ | |
1808 | arch->pc = KVM_GUEST_KSEG0 + 0x180; | |
1809 | } else { | |
1810 | kvm_debug("[EXL == 1] Delivering TLB MISS @ pc %#lx\n", | |
1811 | arch->pc); | |
1812 | arch->pc = KVM_GUEST_KSEG0 + 0x180; | |
1813 | } | |
1814 | ||
1815 | kvm_change_c0_guest_cause(cop0, (0xff), | |
1816 | (T_TLB_ST_MISS << CAUSEB_EXCCODE)); | |
1817 | ||
1818 | /* setup badvaddr, context and entryhi registers for the guest */ | |
1819 | kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr); | |
1820 | /* XXXKYMA: is the context register used by linux??? */ | |
1821 | kvm_write_c0_guest_entryhi(cop0, entryhi); | |
1822 | /* Blow away the shadow host TLBs */ | |
1823 | kvm_mips_flush_host_tlb(1); | |
1824 | ||
1825 | return er; | |
1826 | } | |
1827 | ||
1828 | /* TLBMOD: store into address matching TLB with Dirty bit off */ | |
1829 | enum emulation_result | |
1830 | kvm_mips_handle_tlbmod(unsigned long cause, uint32_t *opc, | |
1831 | struct kvm_run *run, struct kvm_vcpu *vcpu) | |
1832 | { | |
1833 | enum emulation_result er = EMULATE_DONE; | |
e685c689 | 1834 | #ifdef DEBUG |
3d654833 JH |
1835 | struct mips_coproc *cop0 = vcpu->arch.cop0; |
1836 | unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) | | |
1837 | (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK); | |
1838 | int index; | |
1839 | ||
e685c689 SL |
1840 | /* |
1841 | * If address not in the guest TLB, then we are in trouble | |
1842 | */ | |
1843 | index = kvm_mips_guest_tlb_lookup(vcpu, entryhi); | |
1844 | if (index < 0) { | |
1845 | /* XXXKYMA Invalidate and retry */ | |
1846 | kvm_mips_host_tlb_inv(vcpu, vcpu->arch.host_cp0_badvaddr); | |
1847 | kvm_err("%s: host got TLBMOD for %#lx but entry not present in Guest TLB\n", | |
1848 | __func__, entryhi); | |
1849 | kvm_mips_dump_guest_tlbs(vcpu); | |
1850 | kvm_mips_dump_host_tlbs(); | |
1851 | return EMULATE_FAIL; | |
1852 | } | |
1853 | #endif | |
1854 | ||
1855 | er = kvm_mips_emulate_tlbmod(cause, opc, run, vcpu); | |
1856 | return er; | |
1857 | } | |
1858 | ||
1859 | enum emulation_result | |
1860 | kvm_mips_emulate_tlbmod(unsigned long cause, uint32_t *opc, | |
1861 | struct kvm_run *run, struct kvm_vcpu *vcpu) | |
1862 | { | |
1863 | struct mips_coproc *cop0 = vcpu->arch.cop0; | |
1864 | unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) | | |
48c4ac97 | 1865 | (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK); |
e685c689 SL |
1866 | struct kvm_vcpu_arch *arch = &vcpu->arch; |
1867 | enum emulation_result er = EMULATE_DONE; | |
1868 | ||
1869 | if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { | |
1870 | /* save old pc */ | |
1871 | kvm_write_c0_guest_epc(cop0, arch->pc); | |
1872 | kvm_set_c0_guest_status(cop0, ST0_EXL); | |
1873 | ||
1874 | if (cause & CAUSEF_BD) | |
1875 | kvm_set_c0_guest_cause(cop0, CAUSEF_BD); | |
1876 | else | |
1877 | kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); | |
1878 | ||
1879 | kvm_debug("[EXL == 0] Delivering TLB MOD @ pc %#lx\n", | |
1880 | arch->pc); | |
1881 | ||
1882 | arch->pc = KVM_GUEST_KSEG0 + 0x180; | |
1883 | } else { | |
1884 | kvm_debug("[EXL == 1] Delivering TLB MOD @ pc %#lx\n", | |
1885 | arch->pc); | |
1886 | arch->pc = KVM_GUEST_KSEG0 + 0x180; | |
1887 | } | |
1888 | ||
1889 | kvm_change_c0_guest_cause(cop0, (0xff), (T_TLB_MOD << CAUSEB_EXCCODE)); | |
1890 | ||
1891 | /* setup badvaddr, context and entryhi registers for the guest */ | |
1892 | kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr); | |
1893 | /* XXXKYMA: is the context register used by linux??? */ | |
1894 | kvm_write_c0_guest_entryhi(cop0, entryhi); | |
1895 | /* Blow away the shadow host TLBs */ | |
1896 | kvm_mips_flush_host_tlb(1); | |
1897 | ||
1898 | return er; | |
1899 | } | |
1900 | ||
1901 | enum emulation_result | |
1902 | kvm_mips_emulate_fpu_exc(unsigned long cause, uint32_t *opc, | |
1903 | struct kvm_run *run, struct kvm_vcpu *vcpu) | |
1904 | { | |
1905 | struct mips_coproc *cop0 = vcpu->arch.cop0; | |
1906 | struct kvm_vcpu_arch *arch = &vcpu->arch; | |
1907 | enum emulation_result er = EMULATE_DONE; | |
1908 | ||
1909 | if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { | |
1910 | /* save old pc */ | |
1911 | kvm_write_c0_guest_epc(cop0, arch->pc); | |
1912 | kvm_set_c0_guest_status(cop0, ST0_EXL); | |
1913 | ||
1914 | if (cause & CAUSEF_BD) | |
1915 | kvm_set_c0_guest_cause(cop0, CAUSEF_BD); | |
1916 | else | |
1917 | kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); | |
1918 | ||
1919 | } | |
1920 | ||
1921 | arch->pc = KVM_GUEST_KSEG0 + 0x180; | |
1922 | ||
1923 | kvm_change_c0_guest_cause(cop0, (0xff), | |
1924 | (T_COP_UNUSABLE << CAUSEB_EXCCODE)); | |
1925 | kvm_change_c0_guest_cause(cop0, (CAUSEF_CE), (0x1 << CAUSEB_CE)); | |
1926 | ||
1927 | return er; | |
1928 | } | |
1929 | ||
1930 | enum emulation_result | |
1931 | kvm_mips_emulate_ri_exc(unsigned long cause, uint32_t *opc, | |
1932 | struct kvm_run *run, struct kvm_vcpu *vcpu) | |
1933 | { | |
1934 | struct mips_coproc *cop0 = vcpu->arch.cop0; | |
1935 | struct kvm_vcpu_arch *arch = &vcpu->arch; | |
1936 | enum emulation_result er = EMULATE_DONE; | |
1937 | ||
1938 | if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { | |
1939 | /* save old pc */ | |
1940 | kvm_write_c0_guest_epc(cop0, arch->pc); | |
1941 | kvm_set_c0_guest_status(cop0, ST0_EXL); | |
1942 | ||
1943 | if (cause & CAUSEF_BD) | |
1944 | kvm_set_c0_guest_cause(cop0, CAUSEF_BD); | |
1945 | else | |
1946 | kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); | |
1947 | ||
1948 | kvm_debug("Delivering RI @ pc %#lx\n", arch->pc); | |
1949 | ||
1950 | kvm_change_c0_guest_cause(cop0, (0xff), | |
1951 | (T_RES_INST << CAUSEB_EXCCODE)); | |
1952 | ||
1953 | /* Set PC to the exception entry point */ | |
1954 | arch->pc = KVM_GUEST_KSEG0 + 0x180; | |
1955 | ||
1956 | } else { | |
1957 | kvm_err("Trying to deliver RI when EXL is already set\n"); | |
1958 | er = EMULATE_FAIL; | |
1959 | } | |
1960 | ||
1961 | return er; | |
1962 | } | |
1963 | ||
1964 | enum emulation_result | |
1965 | kvm_mips_emulate_bp_exc(unsigned long cause, uint32_t *opc, | |
1966 | struct kvm_run *run, struct kvm_vcpu *vcpu) | |
1967 | { | |
1968 | struct mips_coproc *cop0 = vcpu->arch.cop0; | |
1969 | struct kvm_vcpu_arch *arch = &vcpu->arch; | |
1970 | enum emulation_result er = EMULATE_DONE; | |
1971 | ||
1972 | if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { | |
1973 | /* save old pc */ | |
1974 | kvm_write_c0_guest_epc(cop0, arch->pc); | |
1975 | kvm_set_c0_guest_status(cop0, ST0_EXL); | |
1976 | ||
1977 | if (cause & CAUSEF_BD) | |
1978 | kvm_set_c0_guest_cause(cop0, CAUSEF_BD); | |
1979 | else | |
1980 | kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); | |
1981 | ||
1982 | kvm_debug("Delivering BP @ pc %#lx\n", arch->pc); | |
1983 | ||
1984 | kvm_change_c0_guest_cause(cop0, (0xff), | |
1985 | (T_BREAK << CAUSEB_EXCCODE)); | |
1986 | ||
1987 | /* Set PC to the exception entry point */ | |
1988 | arch->pc = KVM_GUEST_KSEG0 + 0x180; | |
1989 | ||
1990 | } else { | |
1991 | printk("Trying to deliver BP when EXL is already set\n"); | |
1992 | er = EMULATE_FAIL; | |
1993 | } | |
1994 | ||
1995 | return er; | |
1996 | } | |
1997 | ||
1998 | /* | |
1999 | * ll/sc, rdhwr, sync emulation | |
2000 | */ | |
2001 | ||
2002 | #define OPCODE 0xfc000000 | |
2003 | #define BASE 0x03e00000 | |
2004 | #define RT 0x001f0000 | |
2005 | #define OFFSET 0x0000ffff | |
2006 | #define LL 0xc0000000 | |
2007 | #define SC 0xe0000000 | |
2008 | #define SPEC0 0x00000000 | |
2009 | #define SPEC3 0x7c000000 | |
2010 | #define RD 0x0000f800 | |
2011 | #define FUNC 0x0000003f | |
2012 | #define SYNC 0x0000000f | |
2013 | #define RDHWR 0x0000003b | |
2014 | ||
2015 | enum emulation_result | |
2016 | kvm_mips_handle_ri(unsigned long cause, uint32_t *opc, | |
2017 | struct kvm_run *run, struct kvm_vcpu *vcpu) | |
2018 | { | |
2019 | struct mips_coproc *cop0 = vcpu->arch.cop0; | |
2020 | struct kvm_vcpu_arch *arch = &vcpu->arch; | |
2021 | enum emulation_result er = EMULATE_DONE; | |
2022 | unsigned long curr_pc; | |
2023 | uint32_t inst; | |
2024 | ||
2025 | /* | |
2026 | * Update PC and hold onto current PC in case there is | |
2027 | * an error and we want to rollback the PC | |
2028 | */ | |
2029 | curr_pc = vcpu->arch.pc; | |
2030 | er = update_pc(vcpu, cause); | |
2031 | if (er == EMULATE_FAIL) | |
2032 | return er; | |
2033 | ||
2034 | /* | |
2035 | * Fetch the instruction. | |
2036 | */ | |
2037 | if (cause & CAUSEF_BD) | |
2038 | opc += 1; | |
2039 | ||
2040 | inst = kvm_get_inst(opc, vcpu); | |
2041 | ||
2042 | if (inst == KVM_INVALID_INST) { | |
2043 | printk("%s: Cannot get inst @ %p\n", __func__, opc); | |
2044 | return EMULATE_FAIL; | |
2045 | } | |
2046 | ||
2047 | if ((inst & OPCODE) == SPEC3 && (inst & FUNC) == RDHWR) { | |
26f4f3b5 | 2048 | int usermode = !KVM_GUEST_KERNEL_MODE(vcpu); |
e685c689 SL |
2049 | int rd = (inst & RD) >> 11; |
2050 | int rt = (inst & RT) >> 16; | |
26f4f3b5 JH |
2051 | /* If usermode, check RDHWR rd is allowed by guest HWREna */ |
2052 | if (usermode && !(kvm_read_c0_guest_hwrena(cop0) & BIT(rd))) { | |
2053 | kvm_debug("RDHWR %#x disallowed by HWREna @ %p\n", | |
2054 | rd, opc); | |
2055 | goto emulate_ri; | |
2056 | } | |
e685c689 SL |
2057 | switch (rd) { |
2058 | case 0: /* CPU number */ | |
2059 | arch->gprs[rt] = 0; | |
2060 | break; | |
2061 | case 1: /* SYNCI length */ | |
2062 | arch->gprs[rt] = min(current_cpu_data.dcache.linesz, | |
2063 | current_cpu_data.icache.linesz); | |
2064 | break; | |
2065 | case 2: /* Read count register */ | |
e30492bb | 2066 | arch->gprs[rt] = kvm_mips_read_count(vcpu); |
e685c689 SL |
2067 | break; |
2068 | case 3: /* Count register resolution */ | |
2069 | switch (current_cpu_data.cputype) { | |
2070 | case CPU_20KC: | |
2071 | case CPU_25KF: | |
2072 | arch->gprs[rt] = 1; | |
2073 | break; | |
2074 | default: | |
2075 | arch->gprs[rt] = 2; | |
2076 | } | |
2077 | break; | |
2078 | case 29: | |
e685c689 | 2079 | arch->gprs[rt] = kvm_read_c0_guest_userlocal(cop0); |
e685c689 SL |
2080 | break; |
2081 | ||
2082 | default: | |
15505679 | 2083 | kvm_debug("RDHWR %#x not supported @ %p\n", rd, opc); |
26f4f3b5 | 2084 | goto emulate_ri; |
e685c689 SL |
2085 | } |
2086 | } else { | |
15505679 | 2087 | kvm_debug("Emulate RI not supported @ %p: %#x\n", opc, inst); |
26f4f3b5 | 2088 | goto emulate_ri; |
e685c689 SL |
2089 | } |
2090 | ||
26f4f3b5 JH |
2091 | return EMULATE_DONE; |
2092 | ||
2093 | emulate_ri: | |
e685c689 | 2094 | /* |
26f4f3b5 JH |
2095 | * Rollback PC (if in branch delay slot then the PC already points to |
2096 | * branch target), and pass the RI exception to the guest OS. | |
e685c689 | 2097 | */ |
26f4f3b5 JH |
2098 | vcpu->arch.pc = curr_pc; |
2099 | return kvm_mips_emulate_ri_exc(cause, opc, run, vcpu); | |
e685c689 SL |
2100 | } |
2101 | ||
2102 | enum emulation_result | |
2103 | kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu, struct kvm_run *run) | |
2104 | { | |
2105 | unsigned long *gpr = &vcpu->arch.gprs[vcpu->arch.io_gpr]; | |
2106 | enum emulation_result er = EMULATE_DONE; | |
2107 | unsigned long curr_pc; | |
2108 | ||
2109 | if (run->mmio.len > sizeof(*gpr)) { | |
2110 | printk("Bad MMIO length: %d", run->mmio.len); | |
2111 | er = EMULATE_FAIL; | |
2112 | goto done; | |
2113 | } | |
2114 | ||
2115 | /* | |
2116 | * Update PC and hold onto current PC in case there is | |
2117 | * an error and we want to rollback the PC | |
2118 | */ | |
2119 | curr_pc = vcpu->arch.pc; | |
2120 | er = update_pc(vcpu, vcpu->arch.pending_load_cause); | |
2121 | if (er == EMULATE_FAIL) | |
2122 | return er; | |
2123 | ||
2124 | switch (run->mmio.len) { | |
2125 | case 4: | |
2126 | *gpr = *(int32_t *) run->mmio.data; | |
2127 | break; | |
2128 | ||
2129 | case 2: | |
2130 | if (vcpu->mmio_needed == 2) | |
2131 | *gpr = *(int16_t *) run->mmio.data; | |
2132 | else | |
2133 | *gpr = *(int16_t *) run->mmio.data; | |
2134 | ||
2135 | break; | |
2136 | case 1: | |
2137 | if (vcpu->mmio_needed == 2) | |
2138 | *gpr = *(int8_t *) run->mmio.data; | |
2139 | else | |
2140 | *gpr = *(u8 *) run->mmio.data; | |
2141 | break; | |
2142 | } | |
2143 | ||
2144 | if (vcpu->arch.pending_load_cause & CAUSEF_BD) | |
2145 | kvm_debug | |
2146 | ("[%#lx] Completing %d byte BD Load to gpr %d (0x%08lx) type %d\n", | |
2147 | vcpu->arch.pc, run->mmio.len, vcpu->arch.io_gpr, *gpr, | |
2148 | vcpu->mmio_needed); | |
2149 | ||
2150 | done: | |
2151 | return er; | |
2152 | } | |
2153 | ||
2154 | static enum emulation_result | |
2155 | kvm_mips_emulate_exc(unsigned long cause, uint32_t *opc, | |
2156 | struct kvm_run *run, struct kvm_vcpu *vcpu) | |
2157 | { | |
2158 | uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f; | |
2159 | struct mips_coproc *cop0 = vcpu->arch.cop0; | |
2160 | struct kvm_vcpu_arch *arch = &vcpu->arch; | |
2161 | enum emulation_result er = EMULATE_DONE; | |
2162 | ||
2163 | if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { | |
2164 | /* save old pc */ | |
2165 | kvm_write_c0_guest_epc(cop0, arch->pc); | |
2166 | kvm_set_c0_guest_status(cop0, ST0_EXL); | |
2167 | ||
2168 | if (cause & CAUSEF_BD) | |
2169 | kvm_set_c0_guest_cause(cop0, CAUSEF_BD); | |
2170 | else | |
2171 | kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); | |
2172 | ||
2173 | kvm_change_c0_guest_cause(cop0, (0xff), | |
2174 | (exccode << CAUSEB_EXCCODE)); | |
2175 | ||
2176 | /* Set PC to the exception entry point */ | |
2177 | arch->pc = KVM_GUEST_KSEG0 + 0x180; | |
2178 | kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr); | |
2179 | ||
2180 | kvm_debug("Delivering EXC %d @ pc %#lx, badVaddr: %#lx\n", | |
2181 | exccode, kvm_read_c0_guest_epc(cop0), | |
2182 | kvm_read_c0_guest_badvaddr(cop0)); | |
2183 | } else { | |
2184 | printk("Trying to deliver EXC when EXL is already set\n"); | |
2185 | er = EMULATE_FAIL; | |
2186 | } | |
2187 | ||
2188 | return er; | |
2189 | } | |
2190 | ||
2191 | enum emulation_result | |
2192 | kvm_mips_check_privilege(unsigned long cause, uint32_t *opc, | |
2193 | struct kvm_run *run, struct kvm_vcpu *vcpu) | |
2194 | { | |
2195 | enum emulation_result er = EMULATE_DONE; | |
2196 | uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f; | |
2197 | unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr; | |
2198 | ||
2199 | int usermode = !KVM_GUEST_KERNEL_MODE(vcpu); | |
2200 | ||
2201 | if (usermode) { | |
2202 | switch (exccode) { | |
2203 | case T_INT: | |
2204 | case T_SYSCALL: | |
2205 | case T_BREAK: | |
2206 | case T_RES_INST: | |
2207 | break; | |
2208 | ||
2209 | case T_COP_UNUSABLE: | |
2210 | if (((cause & CAUSEF_CE) >> CAUSEB_CE) == 0) | |
2211 | er = EMULATE_PRIV_FAIL; | |
2212 | break; | |
2213 | ||
2214 | case T_TLB_MOD: | |
2215 | break; | |
2216 | ||
2217 | case T_TLB_LD_MISS: | |
2218 | /* We we are accessing Guest kernel space, then send an address error exception to the guest */ | |
2219 | if (badvaddr >= (unsigned long) KVM_GUEST_KSEG0) { | |
2220 | printk("%s: LD MISS @ %#lx\n", __func__, | |
2221 | badvaddr); | |
2222 | cause &= ~0xff; | |
2223 | cause |= (T_ADDR_ERR_LD << CAUSEB_EXCCODE); | |
2224 | er = EMULATE_PRIV_FAIL; | |
2225 | } | |
2226 | break; | |
2227 | ||
2228 | case T_TLB_ST_MISS: | |
2229 | /* We we are accessing Guest kernel space, then send an address error exception to the guest */ | |
2230 | if (badvaddr >= (unsigned long) KVM_GUEST_KSEG0) { | |
2231 | printk("%s: ST MISS @ %#lx\n", __func__, | |
2232 | badvaddr); | |
2233 | cause &= ~0xff; | |
2234 | cause |= (T_ADDR_ERR_ST << CAUSEB_EXCCODE); | |
2235 | er = EMULATE_PRIV_FAIL; | |
2236 | } | |
2237 | break; | |
2238 | ||
2239 | case T_ADDR_ERR_ST: | |
2240 | printk("%s: address error ST @ %#lx\n", __func__, | |
2241 | badvaddr); | |
2242 | if ((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR) { | |
2243 | cause &= ~0xff; | |
2244 | cause |= (T_TLB_ST_MISS << CAUSEB_EXCCODE); | |
2245 | } | |
2246 | er = EMULATE_PRIV_FAIL; | |
2247 | break; | |
2248 | case T_ADDR_ERR_LD: | |
2249 | printk("%s: address error LD @ %#lx\n", __func__, | |
2250 | badvaddr); | |
2251 | if ((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR) { | |
2252 | cause &= ~0xff; | |
2253 | cause |= (T_TLB_LD_MISS << CAUSEB_EXCCODE); | |
2254 | } | |
2255 | er = EMULATE_PRIV_FAIL; | |
2256 | break; | |
2257 | default: | |
2258 | er = EMULATE_PRIV_FAIL; | |
2259 | break; | |
2260 | } | |
2261 | } | |
2262 | ||
2263 | if (er == EMULATE_PRIV_FAIL) { | |
2264 | kvm_mips_emulate_exc(cause, opc, run, vcpu); | |
2265 | } | |
2266 | return er; | |
2267 | } | |
2268 | ||
2269 | /* User Address (UA) fault, this could happen if | |
2270 | * (1) TLB entry not present/valid in both Guest and shadow host TLBs, in this | |
2271 | * case we pass on the fault to the guest kernel and let it handle it. | |
2272 | * (2) TLB entry is present in the Guest TLB but not in the shadow, in this | |
2273 | * case we inject the TLB from the Guest TLB into the shadow host TLB | |
2274 | */ | |
2275 | enum emulation_result | |
2276 | kvm_mips_handle_tlbmiss(unsigned long cause, uint32_t *opc, | |
2277 | struct kvm_run *run, struct kvm_vcpu *vcpu) | |
2278 | { | |
2279 | enum emulation_result er = EMULATE_DONE; | |
2280 | uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f; | |
2281 | unsigned long va = vcpu->arch.host_cp0_badvaddr; | |
2282 | int index; | |
2283 | ||
2284 | kvm_debug("kvm_mips_handle_tlbmiss: badvaddr: %#lx, entryhi: %#lx\n", | |
2285 | vcpu->arch.host_cp0_badvaddr, vcpu->arch.host_cp0_entryhi); | |
2286 | ||
2287 | /* KVM would not have got the exception if this entry was valid in the shadow host TLB | |
2288 | * Check the Guest TLB, if the entry is not there then send the guest an | |
2289 | * exception. The guest exc handler should then inject an entry into the | |
2290 | * guest TLB | |
2291 | */ | |
2292 | index = kvm_mips_guest_tlb_lookup(vcpu, | |
2293 | (va & VPN2_MASK) | | |
48c4ac97 DD |
2294 | (kvm_read_c0_guest_entryhi |
2295 | (vcpu->arch.cop0) & ASID_MASK)); | |
e685c689 SL |
2296 | if (index < 0) { |
2297 | if (exccode == T_TLB_LD_MISS) { | |
2298 | er = kvm_mips_emulate_tlbmiss_ld(cause, opc, run, vcpu); | |
2299 | } else if (exccode == T_TLB_ST_MISS) { | |
2300 | er = kvm_mips_emulate_tlbmiss_st(cause, opc, run, vcpu); | |
2301 | } else { | |
2302 | printk("%s: invalid exc code: %d\n", __func__, exccode); | |
2303 | er = EMULATE_FAIL; | |
2304 | } | |
2305 | } else { | |
2306 | struct kvm_mips_tlb *tlb = &vcpu->arch.guest_tlb[index]; | |
2307 | ||
2308 | /* Check if the entry is valid, if not then setup a TLB invalid exception to the guest */ | |
2309 | if (!TLB_IS_VALID(*tlb, va)) { | |
2310 | if (exccode == T_TLB_LD_MISS) { | |
2311 | er = kvm_mips_emulate_tlbinv_ld(cause, opc, run, | |
2312 | vcpu); | |
2313 | } else if (exccode == T_TLB_ST_MISS) { | |
2314 | er = kvm_mips_emulate_tlbinv_st(cause, opc, run, | |
2315 | vcpu); | |
2316 | } else { | |
2317 | printk("%s: invalid exc code: %d\n", __func__, | |
2318 | exccode); | |
2319 | er = EMULATE_FAIL; | |
2320 | } | |
2321 | } else { | |
2322 | #ifdef DEBUG | |
2323 | kvm_debug | |
2324 | ("Injecting hi: %#lx, lo0: %#lx, lo1: %#lx into shadow host TLB\n", | |
2325 | tlb->tlb_hi, tlb->tlb_lo0, tlb->tlb_lo1); | |
2326 | #endif | |
2327 | /* OK we have a Guest TLB entry, now inject it into the shadow host TLB */ | |
2328 | kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb, NULL, | |
2329 | NULL); | |
2330 | } | |
2331 | } | |
2332 | ||
2333 | return er; | |
2334 | } |