Commit | Line | Data |
---|---|---|
669e846e SL |
1 | /* |
2 | * This file is subject to the terms and conditions of the GNU General Public | |
3 | * License. See the file "COPYING" in the main directory of this archive | |
4 | * for more details. | |
5 | * | |
6 | * KVM/MIPS: MIPS specific KVM APIs | |
7 | * | |
8 | * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. | |
9 | * Authors: Sanjay Lal <sanjayl@kymasys.com> | |
d116e812 | 10 | */ |
669e846e SL |
11 | |
12 | #include <linux/errno.h> | |
13 | #include <linux/err.h> | |
98e91b84 | 14 | #include <linux/kdebug.h> |
669e846e SL |
15 | #include <linux/module.h> |
16 | #include <linux/vmalloc.h> | |
17 | #include <linux/fs.h> | |
18 | #include <linux/bootmem.h> | |
f798217d | 19 | #include <asm/fpu.h> |
669e846e SL |
20 | #include <asm/page.h> |
21 | #include <asm/cacheflush.h> | |
22 | #include <asm/mmu_context.h> | |
c4c6f2ca | 23 | #include <asm/pgtable.h> |
669e846e SL |
24 | |
25 | #include <linux/kvm_host.h> | |
26 | ||
d7d5b05f DCZ |
27 | #include "interrupt.h" |
28 | #include "commpage.h" | |
669e846e SL |
29 | |
30 | #define CREATE_TRACE_POINTS | |
31 | #include "trace.h" | |
32 | ||
33 | #ifndef VECTORSPACING | |
34 | #define VECTORSPACING 0x100 /* for EI/VI mode */ | |
35 | #endif | |
36 | ||
d116e812 | 37 | #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x) |
669e846e | 38 | struct kvm_stats_debugfs_item debugfs_entries[] = { |
d116e812 DCZ |
39 | { "wait", VCPU_STAT(wait_exits), KVM_STAT_VCPU }, |
40 | { "cache", VCPU_STAT(cache_exits), KVM_STAT_VCPU }, | |
41 | { "signal", VCPU_STAT(signal_exits), KVM_STAT_VCPU }, | |
42 | { "interrupt", VCPU_STAT(int_exits), KVM_STAT_VCPU }, | |
43 | { "cop_unsuable", VCPU_STAT(cop_unusable_exits), KVM_STAT_VCPU }, | |
44 | { "tlbmod", VCPU_STAT(tlbmod_exits), KVM_STAT_VCPU }, | |
45 | { "tlbmiss_ld", VCPU_STAT(tlbmiss_ld_exits), KVM_STAT_VCPU }, | |
46 | { "tlbmiss_st", VCPU_STAT(tlbmiss_st_exits), KVM_STAT_VCPU }, | |
47 | { "addrerr_st", VCPU_STAT(addrerr_st_exits), KVM_STAT_VCPU }, | |
48 | { "addrerr_ld", VCPU_STAT(addrerr_ld_exits), KVM_STAT_VCPU }, | |
49 | { "syscall", VCPU_STAT(syscall_exits), KVM_STAT_VCPU }, | |
50 | { "resvd_inst", VCPU_STAT(resvd_inst_exits), KVM_STAT_VCPU }, | |
51 | { "break_inst", VCPU_STAT(break_inst_exits), KVM_STAT_VCPU }, | |
0a560427 | 52 | { "trap_inst", VCPU_STAT(trap_inst_exits), KVM_STAT_VCPU }, |
c2537ed9 | 53 | { "msa_fpe", VCPU_STAT(msa_fpe_exits), KVM_STAT_VCPU }, |
1c0cd66a | 54 | { "fpe", VCPU_STAT(fpe_exits), KVM_STAT_VCPU }, |
c2537ed9 | 55 | { "msa_disabled", VCPU_STAT(msa_disabled_exits), KVM_STAT_VCPU }, |
d116e812 | 56 | { "flush_dcache", VCPU_STAT(flush_dcache_exits), KVM_STAT_VCPU }, |
f7819512 | 57 | { "halt_successful_poll", VCPU_STAT(halt_successful_poll), KVM_STAT_VCPU }, |
62bea5bf | 58 | { "halt_attempted_poll", VCPU_STAT(halt_attempted_poll), KVM_STAT_VCPU }, |
3491caf2 | 59 | { "halt_poll_invalid", VCPU_STAT(halt_poll_invalid), KVM_STAT_VCPU }, |
d116e812 | 60 | { "halt_wakeup", VCPU_STAT(halt_wakeup), KVM_STAT_VCPU }, |
669e846e SL |
61 | {NULL} |
62 | }; | |
63 | ||
64 | static int kvm_mips_reset_vcpu(struct kvm_vcpu *vcpu) | |
65 | { | |
66 | int i; | |
d116e812 | 67 | |
669e846e SL |
68 | for_each_possible_cpu(i) { |
69 | vcpu->arch.guest_kernel_asid[i] = 0; | |
70 | vcpu->arch.guest_user_asid[i] = 0; | |
71 | } | |
d116e812 | 72 | |
669e846e SL |
73 | return 0; |
74 | } | |
75 | ||
d116e812 DCZ |
76 | /* |
77 | * XXXKYMA: We are simulatoring a processor that has the WII bit set in | |
78 | * Config7, so we are "runnable" if interrupts are pending | |
669e846e SL |
79 | */ |
80 | int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu) | |
81 | { | |
82 | return !!(vcpu->arch.pending_exceptions); | |
83 | } | |
84 | ||
85 | int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu) | |
86 | { | |
87 | return 1; | |
88 | } | |
89 | ||
13a34e06 | 90 | int kvm_arch_hardware_enable(void) |
669e846e SL |
91 | { |
92 | return 0; | |
93 | } | |
94 | ||
669e846e SL |
95 | int kvm_arch_hardware_setup(void) |
96 | { | |
97 | return 0; | |
98 | } | |
99 | ||
669e846e SL |
100 | void kvm_arch_check_processor_compat(void *rtn) |
101 | { | |
d98403a5 | 102 | *(int *)rtn = 0; |
669e846e SL |
103 | } |
104 | ||
105 | static void kvm_mips_init_tlbs(struct kvm *kvm) | |
106 | { | |
107 | unsigned long wired; | |
108 | ||
d116e812 DCZ |
109 | /* |
110 | * Add a wired entry to the TLB, it is used to map the commpage to | |
111 | * the Guest kernel | |
112 | */ | |
669e846e SL |
113 | wired = read_c0_wired(); |
114 | write_c0_wired(wired + 1); | |
115 | mtc0_tlbw_hazard(); | |
116 | kvm->arch.commpage_tlb = wired; | |
117 | ||
118 | kvm_debug("[%d] commpage TLB: %d\n", smp_processor_id(), | |
119 | kvm->arch.commpage_tlb); | |
120 | } | |
121 | ||
122 | static void kvm_mips_init_vm_percpu(void *arg) | |
123 | { | |
124 | struct kvm *kvm = (struct kvm *)arg; | |
125 | ||
126 | kvm_mips_init_tlbs(kvm); | |
127 | kvm_mips_callbacks->vm_init(kvm); | |
128 | ||
129 | } | |
130 | ||
131 | int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) | |
132 | { | |
133 | if (atomic_inc_return(&kvm_mips_instance) == 1) { | |
6e95bfd2 JH |
134 | kvm_debug("%s: 1st KVM instance, setup host TLB parameters\n", |
135 | __func__); | |
669e846e SL |
136 | on_each_cpu(kvm_mips_init_vm_percpu, kvm, 1); |
137 | } | |
138 | ||
669e846e SL |
139 | return 0; |
140 | } | |
141 | ||
142 | void kvm_mips_free_vcpus(struct kvm *kvm) | |
143 | { | |
144 | unsigned int i; | |
145 | struct kvm_vcpu *vcpu; | |
146 | ||
147 | /* Put the pages we reserved for the guest pmap */ | |
148 | for (i = 0; i < kvm->arch.guest_pmap_npages; i++) { | |
149 | if (kvm->arch.guest_pmap[i] != KVM_INVALID_PAGE) | |
150 | kvm_mips_release_pfn_clean(kvm->arch.guest_pmap[i]); | |
151 | } | |
c6c0a663 | 152 | kfree(kvm->arch.guest_pmap); |
669e846e SL |
153 | |
154 | kvm_for_each_vcpu(i, vcpu, kvm) { | |
155 | kvm_arch_vcpu_free(vcpu); | |
156 | } | |
157 | ||
158 | mutex_lock(&kvm->lock); | |
159 | ||
160 | for (i = 0; i < atomic_read(&kvm->online_vcpus); i++) | |
161 | kvm->vcpus[i] = NULL; | |
162 | ||
163 | atomic_set(&kvm->online_vcpus, 0); | |
164 | ||
165 | mutex_unlock(&kvm->lock); | |
166 | } | |
167 | ||
669e846e SL |
168 | static void kvm_mips_uninit_tlbs(void *arg) |
169 | { | |
170 | /* Restore wired count */ | |
171 | write_c0_wired(0); | |
172 | mtc0_tlbw_hazard(); | |
173 | /* Clear out all the TLBs */ | |
174 | kvm_local_flush_tlb_all(); | |
175 | } | |
176 | ||
177 | void kvm_arch_destroy_vm(struct kvm *kvm) | |
178 | { | |
179 | kvm_mips_free_vcpus(kvm); | |
180 | ||
181 | /* If this is the last instance, restore wired count */ | |
182 | if (atomic_dec_return(&kvm_mips_instance) == 0) { | |
6e95bfd2 JH |
183 | kvm_debug("%s: last KVM instance, restoring TLB parameters\n", |
184 | __func__); | |
669e846e SL |
185 | on_each_cpu(kvm_mips_uninit_tlbs, NULL, 1); |
186 | } | |
187 | } | |
188 | ||
d116e812 DCZ |
189 | long kvm_arch_dev_ioctl(struct file *filp, unsigned int ioctl, |
190 | unsigned long arg) | |
669e846e | 191 | { |
ed829857 | 192 | return -ENOIOCTLCMD; |
669e846e SL |
193 | } |
194 | ||
5587027c AK |
195 | int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot, |
196 | unsigned long npages) | |
669e846e SL |
197 | { |
198 | return 0; | |
199 | } | |
200 | ||
201 | int kvm_arch_prepare_memory_region(struct kvm *kvm, | |
d116e812 | 202 | struct kvm_memory_slot *memslot, |
09170a49 | 203 | const struct kvm_userspace_memory_region *mem, |
d116e812 | 204 | enum kvm_mr_change change) |
669e846e SL |
205 | { |
206 | return 0; | |
207 | } | |
208 | ||
209 | void kvm_arch_commit_memory_region(struct kvm *kvm, | |
09170a49 | 210 | const struct kvm_userspace_memory_region *mem, |
d116e812 | 211 | const struct kvm_memory_slot *old, |
f36f3f28 | 212 | const struct kvm_memory_slot *new, |
d116e812 | 213 | enum kvm_mr_change change) |
669e846e SL |
214 | { |
215 | unsigned long npages = 0; | |
d98403a5 | 216 | int i; |
669e846e SL |
217 | |
218 | kvm_debug("%s: kvm: %p slot: %d, GPA: %llx, size: %llx, QVA: %llx\n", | |
219 | __func__, kvm, mem->slot, mem->guest_phys_addr, | |
220 | mem->memory_size, mem->userspace_addr); | |
221 | ||
222 | /* Setup Guest PMAP table */ | |
223 | if (!kvm->arch.guest_pmap) { | |
224 | if (mem->slot == 0) | |
225 | npages = mem->memory_size >> PAGE_SHIFT; | |
226 | ||
227 | if (npages) { | |
228 | kvm->arch.guest_pmap_npages = npages; | |
229 | kvm->arch.guest_pmap = | |
230 | kzalloc(npages * sizeof(unsigned long), GFP_KERNEL); | |
231 | ||
232 | if (!kvm->arch.guest_pmap) { | |
f7fdcb60 | 233 | kvm_err("Failed to allocate guest PMAP\n"); |
d98403a5 | 234 | return; |
669e846e SL |
235 | } |
236 | ||
6e95bfd2 JH |
237 | kvm_debug("Allocated space for Guest PMAP Table (%ld pages) @ %p\n", |
238 | npages, kvm->arch.guest_pmap); | |
669e846e SL |
239 | |
240 | /* Now setup the page table */ | |
d116e812 | 241 | for (i = 0; i < npages; i++) |
669e846e | 242 | kvm->arch.guest_pmap[i] = KVM_INVALID_PAGE; |
669e846e SL |
243 | } |
244 | } | |
669e846e SL |
245 | } |
246 | ||
669e846e SL |
247 | struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id) |
248 | { | |
669e846e SL |
249 | int err, size, offset; |
250 | void *gebase; | |
251 | int i; | |
252 | ||
253 | struct kvm_vcpu *vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL); | |
254 | ||
255 | if (!vcpu) { | |
256 | err = -ENOMEM; | |
257 | goto out; | |
258 | } | |
259 | ||
260 | err = kvm_vcpu_init(vcpu, kvm, id); | |
261 | ||
262 | if (err) | |
263 | goto out_free_cpu; | |
264 | ||
6e95bfd2 | 265 | kvm_debug("kvm @ %p: create cpu %d at %p\n", kvm, id, vcpu); |
669e846e | 266 | |
d116e812 DCZ |
267 | /* |
268 | * Allocate space for host mode exception handlers that handle | |
669e846e SL |
269 | * guest mode exits |
270 | */ | |
d116e812 | 271 | if (cpu_has_veic || cpu_has_vint) |
669e846e | 272 | size = 0x200 + VECTORSPACING * 64; |
d116e812 | 273 | else |
7006e2df | 274 | size = 0x4000; |
669e846e SL |
275 | |
276 | /* Save Linux EBASE */ | |
277 | vcpu->arch.host_ebase = (void *)read_c0_ebase(); | |
278 | ||
279 | gebase = kzalloc(ALIGN(size, PAGE_SIZE), GFP_KERNEL); | |
280 | ||
281 | if (!gebase) { | |
282 | err = -ENOMEM; | |
585bb8f9 | 283 | goto out_uninit_cpu; |
669e846e | 284 | } |
6e95bfd2 JH |
285 | kvm_debug("Allocated %d bytes for KVM Exception Handlers @ %p\n", |
286 | ALIGN(size, PAGE_SIZE), gebase); | |
669e846e SL |
287 | |
288 | /* Save new ebase */ | |
289 | vcpu->arch.guest_ebase = gebase; | |
290 | ||
291 | /* Copy L1 Guest Exception handler to correct offset */ | |
292 | ||
293 | /* TLB Refill, EXL = 0 */ | |
294 | memcpy(gebase, mips32_exception, | |
295 | mips32_exceptionEnd - mips32_exception); | |
296 | ||
297 | /* General Exception Entry point */ | |
298 | memcpy(gebase + 0x180, mips32_exception, | |
299 | mips32_exceptionEnd - mips32_exception); | |
300 | ||
301 | /* For vectored interrupts poke the exception code @ all offsets 0-7 */ | |
302 | for (i = 0; i < 8; i++) { | |
303 | kvm_debug("L1 Vectored handler @ %p\n", | |
304 | gebase + 0x200 + (i * VECTORSPACING)); | |
305 | memcpy(gebase + 0x200 + (i * VECTORSPACING), mips32_exception, | |
306 | mips32_exceptionEnd - mips32_exception); | |
307 | } | |
308 | ||
309 | /* General handler, relocate to unmapped space for sanity's sake */ | |
310 | offset = 0x2000; | |
6e95bfd2 JH |
311 | kvm_debug("Installing KVM Exception handlers @ %p, %#x bytes\n", |
312 | gebase + offset, | |
313 | mips32_GuestExceptionEnd - mips32_GuestException); | |
669e846e SL |
314 | |
315 | memcpy(gebase + offset, mips32_GuestException, | |
316 | mips32_GuestExceptionEnd - mips32_GuestException); | |
317 | ||
797179bc JH |
318 | #ifdef MODULE |
319 | offset += mips32_GuestExceptionEnd - mips32_GuestException; | |
320 | memcpy(gebase + offset, (char *)__kvm_mips_vcpu_run, | |
321 | __kvm_mips_vcpu_run_end - (char *)__kvm_mips_vcpu_run); | |
322 | vcpu->arch.vcpu_run = gebase + offset; | |
323 | #else | |
324 | vcpu->arch.vcpu_run = __kvm_mips_vcpu_run; | |
325 | #endif | |
326 | ||
669e846e | 327 | /* Invalidate the icache for these ranges */ |
facaaec1 JH |
328 | local_flush_icache_range((unsigned long)gebase, |
329 | (unsigned long)gebase + ALIGN(size, PAGE_SIZE)); | |
669e846e | 330 | |
d116e812 DCZ |
331 | /* |
332 | * Allocate comm page for guest kernel, a TLB will be reserved for | |
333 | * mapping GVA @ 0xFFFF8000 to this page | |
334 | */ | |
669e846e SL |
335 | vcpu->arch.kseg0_commpage = kzalloc(PAGE_SIZE << 1, GFP_KERNEL); |
336 | ||
337 | if (!vcpu->arch.kseg0_commpage) { | |
338 | err = -ENOMEM; | |
339 | goto out_free_gebase; | |
340 | } | |
341 | ||
6e95bfd2 | 342 | kvm_debug("Allocated COMM page @ %p\n", vcpu->arch.kseg0_commpage); |
669e846e SL |
343 | kvm_mips_commpage_init(vcpu); |
344 | ||
345 | /* Init */ | |
346 | vcpu->arch.last_sched_cpu = -1; | |
347 | ||
348 | /* Start off the timer */ | |
e30492bb | 349 | kvm_mips_init_count(vcpu); |
669e846e SL |
350 | |
351 | return vcpu; | |
352 | ||
353 | out_free_gebase: | |
354 | kfree(gebase); | |
355 | ||
585bb8f9 JH |
356 | out_uninit_cpu: |
357 | kvm_vcpu_uninit(vcpu); | |
358 | ||
669e846e SL |
359 | out_free_cpu: |
360 | kfree(vcpu); | |
361 | ||
362 | out: | |
363 | return ERR_PTR(err); | |
364 | } | |
365 | ||
366 | void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu) | |
367 | { | |
368 | hrtimer_cancel(&vcpu->arch.comparecount_timer); | |
369 | ||
370 | kvm_vcpu_uninit(vcpu); | |
371 | ||
372 | kvm_mips_dump_stats(vcpu); | |
373 | ||
c6c0a663 JH |
374 | kfree(vcpu->arch.guest_ebase); |
375 | kfree(vcpu->arch.kseg0_commpage); | |
8c9eb041 | 376 | kfree(vcpu); |
669e846e SL |
377 | } |
378 | ||
379 | void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) | |
380 | { | |
381 | kvm_arch_vcpu_free(vcpu); | |
382 | } | |
383 | ||
d116e812 DCZ |
384 | int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, |
385 | struct kvm_guest_debug *dbg) | |
669e846e | 386 | { |
ed829857 | 387 | return -ENOIOCTLCMD; |
669e846e SL |
388 | } |
389 | ||
390 | int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) | |
391 | { | |
392 | int r = 0; | |
393 | sigset_t sigsaved; | |
394 | ||
395 | if (vcpu->sigset_active) | |
396 | sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved); | |
397 | ||
398 | if (vcpu->mmio_needed) { | |
399 | if (!vcpu->mmio_is_write) | |
400 | kvm_mips_complete_mmio_load(vcpu, run); | |
401 | vcpu->mmio_needed = 0; | |
402 | } | |
403 | ||
f798217d JH |
404 | lose_fpu(1); |
405 | ||
044f0f03 | 406 | local_irq_disable(); |
669e846e SL |
407 | /* Check if we have any exceptions/interrupts pending */ |
408 | kvm_mips_deliver_interrupts(vcpu, | |
409 | kvm_read_c0_guest_cause(vcpu->arch.cop0)); | |
410 | ||
ccf73aaf | 411 | __kvm_guest_enter(); |
669e846e | 412 | |
c4c6f2ca JH |
413 | /* Disable hardware page table walking while in guest */ |
414 | htw_stop(); | |
415 | ||
797179bc | 416 | r = vcpu->arch.vcpu_run(run, vcpu); |
669e846e | 417 | |
c4c6f2ca JH |
418 | /* Re-enable HTW before enabling interrupts */ |
419 | htw_start(); | |
420 | ||
ccf73aaf | 421 | __kvm_guest_exit(); |
669e846e SL |
422 | local_irq_enable(); |
423 | ||
424 | if (vcpu->sigset_active) | |
425 | sigprocmask(SIG_SETMASK, &sigsaved, NULL); | |
426 | ||
427 | return r; | |
428 | } | |
429 | ||
d116e812 DCZ |
430 | int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, |
431 | struct kvm_mips_interrupt *irq) | |
669e846e SL |
432 | { |
433 | int intr = (int)irq->irq; | |
434 | struct kvm_vcpu *dvcpu = NULL; | |
435 | ||
436 | if (intr == 3 || intr == -3 || intr == 4 || intr == -4) | |
437 | kvm_debug("%s: CPU: %d, INTR: %d\n", __func__, irq->cpu, | |
438 | (int)intr); | |
439 | ||
440 | if (irq->cpu == -1) | |
441 | dvcpu = vcpu; | |
442 | else | |
443 | dvcpu = vcpu->kvm->vcpus[irq->cpu]; | |
444 | ||
445 | if (intr == 2 || intr == 3 || intr == 4) { | |
446 | kvm_mips_callbacks->queue_io_int(dvcpu, irq); | |
447 | ||
448 | } else if (intr == -2 || intr == -3 || intr == -4) { | |
449 | kvm_mips_callbacks->dequeue_io_int(dvcpu, irq); | |
450 | } else { | |
451 | kvm_err("%s: invalid interrupt ioctl (%d:%d)\n", __func__, | |
452 | irq->cpu, irq->irq); | |
453 | return -EINVAL; | |
454 | } | |
455 | ||
456 | dvcpu->arch.wait = 0; | |
457 | ||
8577370f MT |
458 | if (swait_active(&dvcpu->wq)) |
459 | swake_up(&dvcpu->wq); | |
669e846e SL |
460 | |
461 | return 0; | |
462 | } | |
463 | ||
d116e812 DCZ |
464 | int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, |
465 | struct kvm_mp_state *mp_state) | |
669e846e | 466 | { |
ed829857 | 467 | return -ENOIOCTLCMD; |
669e846e SL |
468 | } |
469 | ||
d116e812 DCZ |
470 | int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, |
471 | struct kvm_mp_state *mp_state) | |
669e846e | 472 | { |
ed829857 | 473 | return -ENOIOCTLCMD; |
669e846e SL |
474 | } |
475 | ||
4c73fb2b DD |
476 | static u64 kvm_mips_get_one_regs[] = { |
477 | KVM_REG_MIPS_R0, | |
478 | KVM_REG_MIPS_R1, | |
479 | KVM_REG_MIPS_R2, | |
480 | KVM_REG_MIPS_R3, | |
481 | KVM_REG_MIPS_R4, | |
482 | KVM_REG_MIPS_R5, | |
483 | KVM_REG_MIPS_R6, | |
484 | KVM_REG_MIPS_R7, | |
485 | KVM_REG_MIPS_R8, | |
486 | KVM_REG_MIPS_R9, | |
487 | KVM_REG_MIPS_R10, | |
488 | KVM_REG_MIPS_R11, | |
489 | KVM_REG_MIPS_R12, | |
490 | KVM_REG_MIPS_R13, | |
491 | KVM_REG_MIPS_R14, | |
492 | KVM_REG_MIPS_R15, | |
493 | KVM_REG_MIPS_R16, | |
494 | KVM_REG_MIPS_R17, | |
495 | KVM_REG_MIPS_R18, | |
496 | KVM_REG_MIPS_R19, | |
497 | KVM_REG_MIPS_R20, | |
498 | KVM_REG_MIPS_R21, | |
499 | KVM_REG_MIPS_R22, | |
500 | KVM_REG_MIPS_R23, | |
501 | KVM_REG_MIPS_R24, | |
502 | KVM_REG_MIPS_R25, | |
503 | KVM_REG_MIPS_R26, | |
504 | KVM_REG_MIPS_R27, | |
505 | KVM_REG_MIPS_R28, | |
506 | KVM_REG_MIPS_R29, | |
507 | KVM_REG_MIPS_R30, | |
508 | KVM_REG_MIPS_R31, | |
509 | ||
510 | KVM_REG_MIPS_HI, | |
511 | KVM_REG_MIPS_LO, | |
512 | KVM_REG_MIPS_PC, | |
513 | ||
514 | KVM_REG_MIPS_CP0_INDEX, | |
515 | KVM_REG_MIPS_CP0_CONTEXT, | |
7767b7d2 | 516 | KVM_REG_MIPS_CP0_USERLOCAL, |
4c73fb2b DD |
517 | KVM_REG_MIPS_CP0_PAGEMASK, |
518 | KVM_REG_MIPS_CP0_WIRED, | |
16fd5c1d | 519 | KVM_REG_MIPS_CP0_HWRENA, |
4c73fb2b | 520 | KVM_REG_MIPS_CP0_BADVADDR, |
f8be02da | 521 | KVM_REG_MIPS_CP0_COUNT, |
4c73fb2b | 522 | KVM_REG_MIPS_CP0_ENTRYHI, |
f8be02da | 523 | KVM_REG_MIPS_CP0_COMPARE, |
4c73fb2b DD |
524 | KVM_REG_MIPS_CP0_STATUS, |
525 | KVM_REG_MIPS_CP0_CAUSE, | |
fb6df0cd | 526 | KVM_REG_MIPS_CP0_EPC, |
1068eaaf | 527 | KVM_REG_MIPS_CP0_PRID, |
4c73fb2b DD |
528 | KVM_REG_MIPS_CP0_CONFIG, |
529 | KVM_REG_MIPS_CP0_CONFIG1, | |
530 | KVM_REG_MIPS_CP0_CONFIG2, | |
531 | KVM_REG_MIPS_CP0_CONFIG3, | |
c771607a JH |
532 | KVM_REG_MIPS_CP0_CONFIG4, |
533 | KVM_REG_MIPS_CP0_CONFIG5, | |
4c73fb2b | 534 | KVM_REG_MIPS_CP0_CONFIG7, |
f8239342 JH |
535 | KVM_REG_MIPS_CP0_ERROREPC, |
536 | ||
537 | KVM_REG_MIPS_COUNT_CTL, | |
538 | KVM_REG_MIPS_COUNT_RESUME, | |
f74a8e22 | 539 | KVM_REG_MIPS_COUNT_HZ, |
4c73fb2b DD |
540 | }; |
541 | ||
542 | static int kvm_mips_get_reg(struct kvm_vcpu *vcpu, | |
543 | const struct kvm_one_reg *reg) | |
544 | { | |
4c73fb2b | 545 | struct mips_coproc *cop0 = vcpu->arch.cop0; |
379245cd | 546 | struct mips_fpu_struct *fpu = &vcpu->arch.fpu; |
f8be02da | 547 | int ret; |
4c73fb2b | 548 | s64 v; |
ab86bd60 | 549 | s64 vs[2]; |
379245cd | 550 | unsigned int idx; |
4c73fb2b DD |
551 | |
552 | switch (reg->id) { | |
379245cd | 553 | /* General purpose registers */ |
4c73fb2b DD |
554 | case KVM_REG_MIPS_R0 ... KVM_REG_MIPS_R31: |
555 | v = (long)vcpu->arch.gprs[reg->id - KVM_REG_MIPS_R0]; | |
556 | break; | |
557 | case KVM_REG_MIPS_HI: | |
558 | v = (long)vcpu->arch.hi; | |
559 | break; | |
560 | case KVM_REG_MIPS_LO: | |
561 | v = (long)vcpu->arch.lo; | |
562 | break; | |
563 | case KVM_REG_MIPS_PC: | |
564 | v = (long)vcpu->arch.pc; | |
565 | break; | |
566 | ||
379245cd JH |
567 | /* Floating point registers */ |
568 | case KVM_REG_MIPS_FPR_32(0) ... KVM_REG_MIPS_FPR_32(31): | |
569 | if (!kvm_mips_guest_has_fpu(&vcpu->arch)) | |
570 | return -EINVAL; | |
571 | idx = reg->id - KVM_REG_MIPS_FPR_32(0); | |
572 | /* Odd singles in top of even double when FR=0 */ | |
573 | if (kvm_read_c0_guest_status(cop0) & ST0_FR) | |
574 | v = get_fpr32(&fpu->fpr[idx], 0); | |
575 | else | |
576 | v = get_fpr32(&fpu->fpr[idx & ~1], idx & 1); | |
577 | break; | |
578 | case KVM_REG_MIPS_FPR_64(0) ... KVM_REG_MIPS_FPR_64(31): | |
579 | if (!kvm_mips_guest_has_fpu(&vcpu->arch)) | |
580 | return -EINVAL; | |
581 | idx = reg->id - KVM_REG_MIPS_FPR_64(0); | |
582 | /* Can't access odd doubles in FR=0 mode */ | |
583 | if (idx & 1 && !(kvm_read_c0_guest_status(cop0) & ST0_FR)) | |
584 | return -EINVAL; | |
585 | v = get_fpr64(&fpu->fpr[idx], 0); | |
586 | break; | |
587 | case KVM_REG_MIPS_FCR_IR: | |
588 | if (!kvm_mips_guest_has_fpu(&vcpu->arch)) | |
589 | return -EINVAL; | |
590 | v = boot_cpu_data.fpu_id; | |
591 | break; | |
592 | case KVM_REG_MIPS_FCR_CSR: | |
593 | if (!kvm_mips_guest_has_fpu(&vcpu->arch)) | |
594 | return -EINVAL; | |
595 | v = fpu->fcr31; | |
596 | break; | |
597 | ||
ab86bd60 JH |
598 | /* MIPS SIMD Architecture (MSA) registers */ |
599 | case KVM_REG_MIPS_VEC_128(0) ... KVM_REG_MIPS_VEC_128(31): | |
600 | if (!kvm_mips_guest_has_msa(&vcpu->arch)) | |
601 | return -EINVAL; | |
602 | /* Can't access MSA registers in FR=0 mode */ | |
603 | if (!(kvm_read_c0_guest_status(cop0) & ST0_FR)) | |
604 | return -EINVAL; | |
605 | idx = reg->id - KVM_REG_MIPS_VEC_128(0); | |
606 | #ifdef CONFIG_CPU_LITTLE_ENDIAN | |
607 | /* least significant byte first */ | |
608 | vs[0] = get_fpr64(&fpu->fpr[idx], 0); | |
609 | vs[1] = get_fpr64(&fpu->fpr[idx], 1); | |
610 | #else | |
611 | /* most significant byte first */ | |
612 | vs[0] = get_fpr64(&fpu->fpr[idx], 1); | |
613 | vs[1] = get_fpr64(&fpu->fpr[idx], 0); | |
614 | #endif | |
615 | break; | |
616 | case KVM_REG_MIPS_MSA_IR: | |
617 | if (!kvm_mips_guest_has_msa(&vcpu->arch)) | |
618 | return -EINVAL; | |
619 | v = boot_cpu_data.msa_id; | |
620 | break; | |
621 | case KVM_REG_MIPS_MSA_CSR: | |
622 | if (!kvm_mips_guest_has_msa(&vcpu->arch)) | |
623 | return -EINVAL; | |
624 | v = fpu->msacsr; | |
625 | break; | |
626 | ||
379245cd | 627 | /* Co-processor 0 registers */ |
4c73fb2b DD |
628 | case KVM_REG_MIPS_CP0_INDEX: |
629 | v = (long)kvm_read_c0_guest_index(cop0); | |
630 | break; | |
631 | case KVM_REG_MIPS_CP0_CONTEXT: | |
632 | v = (long)kvm_read_c0_guest_context(cop0); | |
633 | break; | |
7767b7d2 JH |
634 | case KVM_REG_MIPS_CP0_USERLOCAL: |
635 | v = (long)kvm_read_c0_guest_userlocal(cop0); | |
636 | break; | |
4c73fb2b DD |
637 | case KVM_REG_MIPS_CP0_PAGEMASK: |
638 | v = (long)kvm_read_c0_guest_pagemask(cop0); | |
639 | break; | |
640 | case KVM_REG_MIPS_CP0_WIRED: | |
641 | v = (long)kvm_read_c0_guest_wired(cop0); | |
642 | break; | |
16fd5c1d JH |
643 | case KVM_REG_MIPS_CP0_HWRENA: |
644 | v = (long)kvm_read_c0_guest_hwrena(cop0); | |
645 | break; | |
4c73fb2b DD |
646 | case KVM_REG_MIPS_CP0_BADVADDR: |
647 | v = (long)kvm_read_c0_guest_badvaddr(cop0); | |
648 | break; | |
649 | case KVM_REG_MIPS_CP0_ENTRYHI: | |
650 | v = (long)kvm_read_c0_guest_entryhi(cop0); | |
651 | break; | |
f8be02da JH |
652 | case KVM_REG_MIPS_CP0_COMPARE: |
653 | v = (long)kvm_read_c0_guest_compare(cop0); | |
654 | break; | |
4c73fb2b DD |
655 | case KVM_REG_MIPS_CP0_STATUS: |
656 | v = (long)kvm_read_c0_guest_status(cop0); | |
657 | break; | |
658 | case KVM_REG_MIPS_CP0_CAUSE: | |
659 | v = (long)kvm_read_c0_guest_cause(cop0); | |
660 | break; | |
fb6df0cd JH |
661 | case KVM_REG_MIPS_CP0_EPC: |
662 | v = (long)kvm_read_c0_guest_epc(cop0); | |
663 | break; | |
1068eaaf JH |
664 | case KVM_REG_MIPS_CP0_PRID: |
665 | v = (long)kvm_read_c0_guest_prid(cop0); | |
666 | break; | |
4c73fb2b DD |
667 | case KVM_REG_MIPS_CP0_CONFIG: |
668 | v = (long)kvm_read_c0_guest_config(cop0); | |
669 | break; | |
670 | case KVM_REG_MIPS_CP0_CONFIG1: | |
671 | v = (long)kvm_read_c0_guest_config1(cop0); | |
672 | break; | |
673 | case KVM_REG_MIPS_CP0_CONFIG2: | |
674 | v = (long)kvm_read_c0_guest_config2(cop0); | |
675 | break; | |
676 | case KVM_REG_MIPS_CP0_CONFIG3: | |
677 | v = (long)kvm_read_c0_guest_config3(cop0); | |
678 | break; | |
c771607a JH |
679 | case KVM_REG_MIPS_CP0_CONFIG4: |
680 | v = (long)kvm_read_c0_guest_config4(cop0); | |
681 | break; | |
682 | case KVM_REG_MIPS_CP0_CONFIG5: | |
683 | v = (long)kvm_read_c0_guest_config5(cop0); | |
684 | break; | |
4c73fb2b DD |
685 | case KVM_REG_MIPS_CP0_CONFIG7: |
686 | v = (long)kvm_read_c0_guest_config7(cop0); | |
687 | break; | |
e93d4c15 JH |
688 | case KVM_REG_MIPS_CP0_ERROREPC: |
689 | v = (long)kvm_read_c0_guest_errorepc(cop0); | |
690 | break; | |
f8be02da JH |
691 | /* registers to be handled specially */ |
692 | case KVM_REG_MIPS_CP0_COUNT: | |
f8239342 JH |
693 | case KVM_REG_MIPS_COUNT_CTL: |
694 | case KVM_REG_MIPS_COUNT_RESUME: | |
f74a8e22 | 695 | case KVM_REG_MIPS_COUNT_HZ: |
f8be02da JH |
696 | ret = kvm_mips_callbacks->get_one_reg(vcpu, reg, &v); |
697 | if (ret) | |
698 | return ret; | |
699 | break; | |
4c73fb2b DD |
700 | default: |
701 | return -EINVAL; | |
702 | } | |
681865d4 DD |
703 | if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U64) { |
704 | u64 __user *uaddr64 = (u64 __user *)(long)reg->addr; | |
d116e812 | 705 | |
681865d4 DD |
706 | return put_user(v, uaddr64); |
707 | } else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U32) { | |
708 | u32 __user *uaddr32 = (u32 __user *)(long)reg->addr; | |
709 | u32 v32 = (u32)v; | |
d116e812 | 710 | |
681865d4 | 711 | return put_user(v32, uaddr32); |
ab86bd60 JH |
712 | } else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U128) { |
713 | void __user *uaddr = (void __user *)(long)reg->addr; | |
714 | ||
0178fd7d | 715 | return copy_to_user(uaddr, vs, 16) ? -EFAULT : 0; |
681865d4 DD |
716 | } else { |
717 | return -EINVAL; | |
718 | } | |
4c73fb2b DD |
719 | } |
720 | ||
721 | static int kvm_mips_set_reg(struct kvm_vcpu *vcpu, | |
722 | const struct kvm_one_reg *reg) | |
723 | { | |
4c73fb2b | 724 | struct mips_coproc *cop0 = vcpu->arch.cop0; |
379245cd JH |
725 | struct mips_fpu_struct *fpu = &vcpu->arch.fpu; |
726 | s64 v; | |
ab86bd60 | 727 | s64 vs[2]; |
379245cd | 728 | unsigned int idx; |
4c73fb2b | 729 | |
681865d4 DD |
730 | if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U64) { |
731 | u64 __user *uaddr64 = (u64 __user *)(long)reg->addr; | |
732 | ||
733 | if (get_user(v, uaddr64) != 0) | |
734 | return -EFAULT; | |
735 | } else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U32) { | |
736 | u32 __user *uaddr32 = (u32 __user *)(long)reg->addr; | |
737 | s32 v32; | |
738 | ||
739 | if (get_user(v32, uaddr32) != 0) | |
740 | return -EFAULT; | |
741 | v = (s64)v32; | |
ab86bd60 JH |
742 | } else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U128) { |
743 | void __user *uaddr = (void __user *)(long)reg->addr; | |
744 | ||
0178fd7d | 745 | return copy_from_user(vs, uaddr, 16) ? -EFAULT : 0; |
681865d4 DD |
746 | } else { |
747 | return -EINVAL; | |
748 | } | |
4c73fb2b DD |
749 | |
750 | switch (reg->id) { | |
379245cd | 751 | /* General purpose registers */ |
4c73fb2b DD |
752 | case KVM_REG_MIPS_R0: |
753 | /* Silently ignore requests to set $0 */ | |
754 | break; | |
755 | case KVM_REG_MIPS_R1 ... KVM_REG_MIPS_R31: | |
756 | vcpu->arch.gprs[reg->id - KVM_REG_MIPS_R0] = v; | |
757 | break; | |
758 | case KVM_REG_MIPS_HI: | |
759 | vcpu->arch.hi = v; | |
760 | break; | |
761 | case KVM_REG_MIPS_LO: | |
762 | vcpu->arch.lo = v; | |
763 | break; | |
764 | case KVM_REG_MIPS_PC: | |
765 | vcpu->arch.pc = v; | |
766 | break; | |
767 | ||
379245cd JH |
768 | /* Floating point registers */ |
769 | case KVM_REG_MIPS_FPR_32(0) ... KVM_REG_MIPS_FPR_32(31): | |
770 | if (!kvm_mips_guest_has_fpu(&vcpu->arch)) | |
771 | return -EINVAL; | |
772 | idx = reg->id - KVM_REG_MIPS_FPR_32(0); | |
773 | /* Odd singles in top of even double when FR=0 */ | |
774 | if (kvm_read_c0_guest_status(cop0) & ST0_FR) | |
775 | set_fpr32(&fpu->fpr[idx], 0, v); | |
776 | else | |
777 | set_fpr32(&fpu->fpr[idx & ~1], idx & 1, v); | |
778 | break; | |
779 | case KVM_REG_MIPS_FPR_64(0) ... KVM_REG_MIPS_FPR_64(31): | |
780 | if (!kvm_mips_guest_has_fpu(&vcpu->arch)) | |
781 | return -EINVAL; | |
782 | idx = reg->id - KVM_REG_MIPS_FPR_64(0); | |
783 | /* Can't access odd doubles in FR=0 mode */ | |
784 | if (idx & 1 && !(kvm_read_c0_guest_status(cop0) & ST0_FR)) | |
785 | return -EINVAL; | |
786 | set_fpr64(&fpu->fpr[idx], 0, v); | |
787 | break; | |
788 | case KVM_REG_MIPS_FCR_IR: | |
789 | if (!kvm_mips_guest_has_fpu(&vcpu->arch)) | |
790 | return -EINVAL; | |
791 | /* Read-only */ | |
792 | break; | |
793 | case KVM_REG_MIPS_FCR_CSR: | |
794 | if (!kvm_mips_guest_has_fpu(&vcpu->arch)) | |
795 | return -EINVAL; | |
796 | fpu->fcr31 = v; | |
797 | break; | |
798 | ||
ab86bd60 JH |
799 | /* MIPS SIMD Architecture (MSA) registers */ |
800 | case KVM_REG_MIPS_VEC_128(0) ... KVM_REG_MIPS_VEC_128(31): | |
801 | if (!kvm_mips_guest_has_msa(&vcpu->arch)) | |
802 | return -EINVAL; | |
803 | idx = reg->id - KVM_REG_MIPS_VEC_128(0); | |
804 | #ifdef CONFIG_CPU_LITTLE_ENDIAN | |
805 | /* least significant byte first */ | |
806 | set_fpr64(&fpu->fpr[idx], 0, vs[0]); | |
807 | set_fpr64(&fpu->fpr[idx], 1, vs[1]); | |
808 | #else | |
809 | /* most significant byte first */ | |
810 | set_fpr64(&fpu->fpr[idx], 1, vs[0]); | |
811 | set_fpr64(&fpu->fpr[idx], 0, vs[1]); | |
812 | #endif | |
813 | break; | |
814 | case KVM_REG_MIPS_MSA_IR: | |
815 | if (!kvm_mips_guest_has_msa(&vcpu->arch)) | |
816 | return -EINVAL; | |
817 | /* Read-only */ | |
818 | break; | |
819 | case KVM_REG_MIPS_MSA_CSR: | |
820 | if (!kvm_mips_guest_has_msa(&vcpu->arch)) | |
821 | return -EINVAL; | |
822 | fpu->msacsr = v; | |
823 | break; | |
824 | ||
379245cd | 825 | /* Co-processor 0 registers */ |
4c73fb2b DD |
826 | case KVM_REG_MIPS_CP0_INDEX: |
827 | kvm_write_c0_guest_index(cop0, v); | |
828 | break; | |
829 | case KVM_REG_MIPS_CP0_CONTEXT: | |
830 | kvm_write_c0_guest_context(cop0, v); | |
831 | break; | |
7767b7d2 JH |
832 | case KVM_REG_MIPS_CP0_USERLOCAL: |
833 | kvm_write_c0_guest_userlocal(cop0, v); | |
834 | break; | |
4c73fb2b DD |
835 | case KVM_REG_MIPS_CP0_PAGEMASK: |
836 | kvm_write_c0_guest_pagemask(cop0, v); | |
837 | break; | |
838 | case KVM_REG_MIPS_CP0_WIRED: | |
839 | kvm_write_c0_guest_wired(cop0, v); | |
840 | break; | |
16fd5c1d JH |
841 | case KVM_REG_MIPS_CP0_HWRENA: |
842 | kvm_write_c0_guest_hwrena(cop0, v); | |
843 | break; | |
4c73fb2b DD |
844 | case KVM_REG_MIPS_CP0_BADVADDR: |
845 | kvm_write_c0_guest_badvaddr(cop0, v); | |
846 | break; | |
847 | case KVM_REG_MIPS_CP0_ENTRYHI: | |
848 | kvm_write_c0_guest_entryhi(cop0, v); | |
849 | break; | |
850 | case KVM_REG_MIPS_CP0_STATUS: | |
851 | kvm_write_c0_guest_status(cop0, v); | |
852 | break; | |
fb6df0cd JH |
853 | case KVM_REG_MIPS_CP0_EPC: |
854 | kvm_write_c0_guest_epc(cop0, v); | |
855 | break; | |
1068eaaf JH |
856 | case KVM_REG_MIPS_CP0_PRID: |
857 | kvm_write_c0_guest_prid(cop0, v); | |
858 | break; | |
4c73fb2b DD |
859 | case KVM_REG_MIPS_CP0_ERROREPC: |
860 | kvm_write_c0_guest_errorepc(cop0, v); | |
861 | break; | |
f8be02da JH |
862 | /* registers to be handled specially */ |
863 | case KVM_REG_MIPS_CP0_COUNT: | |
864 | case KVM_REG_MIPS_CP0_COMPARE: | |
e30492bb | 865 | case KVM_REG_MIPS_CP0_CAUSE: |
c771607a JH |
866 | case KVM_REG_MIPS_CP0_CONFIG: |
867 | case KVM_REG_MIPS_CP0_CONFIG1: | |
868 | case KVM_REG_MIPS_CP0_CONFIG2: | |
869 | case KVM_REG_MIPS_CP0_CONFIG3: | |
870 | case KVM_REG_MIPS_CP0_CONFIG4: | |
871 | case KVM_REG_MIPS_CP0_CONFIG5: | |
f8239342 JH |
872 | case KVM_REG_MIPS_COUNT_CTL: |
873 | case KVM_REG_MIPS_COUNT_RESUME: | |
f74a8e22 | 874 | case KVM_REG_MIPS_COUNT_HZ: |
f8be02da | 875 | return kvm_mips_callbacks->set_one_reg(vcpu, reg, v); |
4c73fb2b DD |
876 | default: |
877 | return -EINVAL; | |
878 | } | |
879 | return 0; | |
880 | } | |
881 | ||
5fafd874 JH |
882 | static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu, |
883 | struct kvm_enable_cap *cap) | |
884 | { | |
885 | int r = 0; | |
886 | ||
887 | if (!kvm_vm_ioctl_check_extension(vcpu->kvm, cap->cap)) | |
888 | return -EINVAL; | |
889 | if (cap->flags) | |
890 | return -EINVAL; | |
891 | if (cap->args[0]) | |
892 | return -EINVAL; | |
893 | ||
894 | switch (cap->cap) { | |
895 | case KVM_CAP_MIPS_FPU: | |
896 | vcpu->arch.fpu_enabled = true; | |
897 | break; | |
d952bd07 JH |
898 | case KVM_CAP_MIPS_MSA: |
899 | vcpu->arch.msa_enabled = true; | |
900 | break; | |
5fafd874 JH |
901 | default: |
902 | r = -EINVAL; | |
903 | break; | |
904 | } | |
905 | ||
906 | return r; | |
907 | } | |
908 | ||
d116e812 DCZ |
909 | long kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl, |
910 | unsigned long arg) | |
669e846e SL |
911 | { |
912 | struct kvm_vcpu *vcpu = filp->private_data; | |
913 | void __user *argp = (void __user *)arg; | |
914 | long r; | |
669e846e SL |
915 | |
916 | switch (ioctl) { | |
4c73fb2b DD |
917 | case KVM_SET_ONE_REG: |
918 | case KVM_GET_ONE_REG: { | |
919 | struct kvm_one_reg reg; | |
d116e812 | 920 | |
4c73fb2b DD |
921 | if (copy_from_user(®, argp, sizeof(reg))) |
922 | return -EFAULT; | |
923 | if (ioctl == KVM_SET_ONE_REG) | |
924 | return kvm_mips_set_reg(vcpu, ®); | |
925 | else | |
926 | return kvm_mips_get_reg(vcpu, ®); | |
927 | } | |
928 | case KVM_GET_REG_LIST: { | |
929 | struct kvm_reg_list __user *user_list = argp; | |
930 | u64 __user *reg_dest; | |
931 | struct kvm_reg_list reg_list; | |
932 | unsigned n; | |
933 | ||
934 | if (copy_from_user(®_list, user_list, sizeof(reg_list))) | |
935 | return -EFAULT; | |
936 | n = reg_list.n; | |
937 | reg_list.n = ARRAY_SIZE(kvm_mips_get_one_regs); | |
938 | if (copy_to_user(user_list, ®_list, sizeof(reg_list))) | |
939 | return -EFAULT; | |
940 | if (n < reg_list.n) | |
941 | return -E2BIG; | |
942 | reg_dest = user_list->reg; | |
943 | if (copy_to_user(reg_dest, kvm_mips_get_one_regs, | |
944 | sizeof(kvm_mips_get_one_regs))) | |
945 | return -EFAULT; | |
946 | return 0; | |
947 | } | |
669e846e SL |
948 | case KVM_NMI: |
949 | /* Treat the NMI as a CPU reset */ | |
950 | r = kvm_mips_reset_vcpu(vcpu); | |
951 | break; | |
952 | case KVM_INTERRUPT: | |
953 | { | |
954 | struct kvm_mips_interrupt irq; | |
d116e812 | 955 | |
669e846e SL |
956 | r = -EFAULT; |
957 | if (copy_from_user(&irq, argp, sizeof(irq))) | |
958 | goto out; | |
959 | ||
669e846e SL |
960 | kvm_debug("[%d] %s: irq: %d\n", vcpu->vcpu_id, __func__, |
961 | irq.irq); | |
962 | ||
963 | r = kvm_vcpu_ioctl_interrupt(vcpu, &irq); | |
964 | break; | |
965 | } | |
5fafd874 JH |
966 | case KVM_ENABLE_CAP: { |
967 | struct kvm_enable_cap cap; | |
968 | ||
969 | r = -EFAULT; | |
970 | if (copy_from_user(&cap, argp, sizeof(cap))) | |
971 | goto out; | |
972 | r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap); | |
973 | break; | |
974 | } | |
669e846e | 975 | default: |
4c73fb2b | 976 | r = -ENOIOCTLCMD; |
669e846e SL |
977 | } |
978 | ||
979 | out: | |
980 | return r; | |
981 | } | |
982 | ||
d116e812 | 983 | /* Get (and clear) the dirty memory log for a memory slot. */ |
669e846e SL |
984 | int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log) |
985 | { | |
9f6b8029 | 986 | struct kvm_memslots *slots; |
669e846e SL |
987 | struct kvm_memory_slot *memslot; |
988 | unsigned long ga, ga_end; | |
989 | int is_dirty = 0; | |
990 | int r; | |
991 | unsigned long n; | |
992 | ||
993 | mutex_lock(&kvm->slots_lock); | |
994 | ||
995 | r = kvm_get_dirty_log(kvm, log, &is_dirty); | |
996 | if (r) | |
997 | goto out; | |
998 | ||
999 | /* If nothing is dirty, don't bother messing with page tables. */ | |
1000 | if (is_dirty) { | |
9f6b8029 PB |
1001 | slots = kvm_memslots(kvm); |
1002 | memslot = id_to_memslot(slots, log->slot); | |
669e846e SL |
1003 | |
1004 | ga = memslot->base_gfn << PAGE_SHIFT; | |
1005 | ga_end = ga + (memslot->npages << PAGE_SHIFT); | |
1006 | ||
6ad78a5c DCZ |
1007 | kvm_info("%s: dirty, ga: %#lx, ga_end %#lx\n", __func__, ga, |
1008 | ga_end); | |
669e846e SL |
1009 | |
1010 | n = kvm_dirty_bitmap_bytes(memslot); | |
1011 | memset(memslot->dirty_bitmap, 0, n); | |
1012 | } | |
1013 | ||
1014 | r = 0; | |
1015 | out: | |
1016 | mutex_unlock(&kvm->slots_lock); | |
1017 | return r; | |
1018 | ||
1019 | } | |
1020 | ||
1021 | long kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) | |
1022 | { | |
1023 | long r; | |
1024 | ||
1025 | switch (ioctl) { | |
1026 | default: | |
ed829857 | 1027 | r = -ENOIOCTLCMD; |
669e846e SL |
1028 | } |
1029 | ||
1030 | return r; | |
1031 | } | |
1032 | ||
1033 | int kvm_arch_init(void *opaque) | |
1034 | { | |
669e846e SL |
1035 | if (kvm_mips_callbacks) { |
1036 | kvm_err("kvm: module already exists\n"); | |
1037 | return -EEXIST; | |
1038 | } | |
1039 | ||
d98403a5 | 1040 | return kvm_mips_emulation_init(&kvm_mips_callbacks); |
669e846e SL |
1041 | } |
1042 | ||
1043 | void kvm_arch_exit(void) | |
1044 | { | |
1045 | kvm_mips_callbacks = NULL; | |
1046 | } | |
1047 | ||
d116e812 DCZ |
1048 | int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, |
1049 | struct kvm_sregs *sregs) | |
669e846e | 1050 | { |
ed829857 | 1051 | return -ENOIOCTLCMD; |
669e846e SL |
1052 | } |
1053 | ||
d116e812 DCZ |
1054 | int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, |
1055 | struct kvm_sregs *sregs) | |
669e846e | 1056 | { |
ed829857 | 1057 | return -ENOIOCTLCMD; |
669e846e SL |
1058 | } |
1059 | ||
31928aa5 | 1060 | void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) |
669e846e | 1061 | { |
669e846e SL |
1062 | } |
1063 | ||
1064 | int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) | |
1065 | { | |
ed829857 | 1066 | return -ENOIOCTLCMD; |
669e846e SL |
1067 | } |
1068 | ||
1069 | int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) | |
1070 | { | |
ed829857 | 1071 | return -ENOIOCTLCMD; |
669e846e SL |
1072 | } |
1073 | ||
1074 | int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) | |
1075 | { | |
1076 | return VM_FAULT_SIGBUS; | |
1077 | } | |
1078 | ||
784aa3d7 | 1079 | int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) |
669e846e SL |
1080 | { |
1081 | int r; | |
1082 | ||
1083 | switch (ext) { | |
4c73fb2b | 1084 | case KVM_CAP_ONE_REG: |
5fafd874 | 1085 | case KVM_CAP_ENABLE_CAP: |
4c73fb2b DD |
1086 | r = 1; |
1087 | break; | |
669e846e SL |
1088 | case KVM_CAP_COALESCED_MMIO: |
1089 | r = KVM_COALESCED_MMIO_PAGE_OFFSET; | |
1090 | break; | |
5fafd874 | 1091 | case KVM_CAP_MIPS_FPU: |
556f2a52 JH |
1092 | /* We don't handle systems with inconsistent cpu_has_fpu */ |
1093 | r = !!raw_cpu_has_fpu; | |
5fafd874 | 1094 | break; |
d952bd07 JH |
1095 | case KVM_CAP_MIPS_MSA: |
1096 | /* | |
1097 | * We don't support MSA vector partitioning yet: | |
1098 | * 1) It would require explicit support which can't be tested | |
1099 | * yet due to lack of support in current hardware. | |
1100 | * 2) It extends the state that would need to be saved/restored | |
1101 | * by e.g. QEMU for migration. | |
1102 | * | |
1103 | * When vector partitioning hardware becomes available, support | |
1104 | * could be added by requiring a flag when enabling | |
1105 | * KVM_CAP_MIPS_MSA capability to indicate that userland knows | |
1106 | * to save/restore the appropriate extra state. | |
1107 | */ | |
1108 | r = cpu_has_msa && !(boot_cpu_data.msa_id & MSA_IR_WRPF); | |
1109 | break; | |
669e846e SL |
1110 | default: |
1111 | r = 0; | |
1112 | break; | |
1113 | } | |
1114 | return r; | |
669e846e SL |
1115 | } |
1116 | ||
1117 | int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) | |
1118 | { | |
1119 | return kvm_mips_pending_timer(vcpu); | |
1120 | } | |
1121 | ||
1122 | int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu) | |
1123 | { | |
1124 | int i; | |
1125 | struct mips_coproc *cop0; | |
1126 | ||
1127 | if (!vcpu) | |
1128 | return -1; | |
1129 | ||
6ad78a5c DCZ |
1130 | kvm_debug("VCPU Register Dump:\n"); |
1131 | kvm_debug("\tpc = 0x%08lx\n", vcpu->arch.pc); | |
1132 | kvm_debug("\texceptions: %08lx\n", vcpu->arch.pending_exceptions); | |
669e846e SL |
1133 | |
1134 | for (i = 0; i < 32; i += 4) { | |
6ad78a5c | 1135 | kvm_debug("\tgpr%02d: %08lx %08lx %08lx %08lx\n", i, |
669e846e SL |
1136 | vcpu->arch.gprs[i], |
1137 | vcpu->arch.gprs[i + 1], | |
1138 | vcpu->arch.gprs[i + 2], vcpu->arch.gprs[i + 3]); | |
1139 | } | |
6ad78a5c DCZ |
1140 | kvm_debug("\thi: 0x%08lx\n", vcpu->arch.hi); |
1141 | kvm_debug("\tlo: 0x%08lx\n", vcpu->arch.lo); | |
669e846e SL |
1142 | |
1143 | cop0 = vcpu->arch.cop0; | |
6ad78a5c DCZ |
1144 | kvm_debug("\tStatus: 0x%08lx, Cause: 0x%08lx\n", |
1145 | kvm_read_c0_guest_status(cop0), | |
1146 | kvm_read_c0_guest_cause(cop0)); | |
669e846e | 1147 | |
6ad78a5c | 1148 | kvm_debug("\tEPC: 0x%08lx\n", kvm_read_c0_guest_epc(cop0)); |
669e846e SL |
1149 | |
1150 | return 0; | |
1151 | } | |
1152 | ||
1153 | int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) | |
1154 | { | |
1155 | int i; | |
1156 | ||
8d17dd04 | 1157 | for (i = 1; i < ARRAY_SIZE(vcpu->arch.gprs); i++) |
bf32ebf6 | 1158 | vcpu->arch.gprs[i] = regs->gpr[i]; |
8d17dd04 | 1159 | vcpu->arch.gprs[0] = 0; /* zero is special, and cannot be set. */ |
669e846e SL |
1160 | vcpu->arch.hi = regs->hi; |
1161 | vcpu->arch.lo = regs->lo; | |
1162 | vcpu->arch.pc = regs->pc; | |
1163 | ||
4c73fb2b | 1164 | return 0; |
669e846e SL |
1165 | } |
1166 | ||
1167 | int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) | |
1168 | { | |
1169 | int i; | |
1170 | ||
8d17dd04 | 1171 | for (i = 0; i < ARRAY_SIZE(vcpu->arch.gprs); i++) |
bf32ebf6 | 1172 | regs->gpr[i] = vcpu->arch.gprs[i]; |
669e846e SL |
1173 | |
1174 | regs->hi = vcpu->arch.hi; | |
1175 | regs->lo = vcpu->arch.lo; | |
1176 | regs->pc = vcpu->arch.pc; | |
1177 | ||
4c73fb2b | 1178 | return 0; |
669e846e SL |
1179 | } |
1180 | ||
0fae34f4 | 1181 | static void kvm_mips_comparecount_func(unsigned long data) |
669e846e SL |
1182 | { |
1183 | struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data; | |
1184 | ||
1185 | kvm_mips_callbacks->queue_timer_int(vcpu); | |
1186 | ||
1187 | vcpu->arch.wait = 0; | |
8577370f MT |
1188 | if (swait_active(&vcpu->wq)) |
1189 | swake_up(&vcpu->wq); | |
669e846e SL |
1190 | } |
1191 | ||
d116e812 | 1192 | /* low level hrtimer wake routine */ |
0fae34f4 | 1193 | static enum hrtimer_restart kvm_mips_comparecount_wakeup(struct hrtimer *timer) |
669e846e SL |
1194 | { |
1195 | struct kvm_vcpu *vcpu; | |
1196 | ||
1197 | vcpu = container_of(timer, struct kvm_vcpu, arch.comparecount_timer); | |
1198 | kvm_mips_comparecount_func((unsigned long) vcpu); | |
e30492bb | 1199 | return kvm_mips_count_timeout(vcpu); |
669e846e SL |
1200 | } |
1201 | ||
1202 | int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) | |
1203 | { | |
1204 | kvm_mips_callbacks->vcpu_init(vcpu); | |
1205 | hrtimer_init(&vcpu->arch.comparecount_timer, CLOCK_MONOTONIC, | |
1206 | HRTIMER_MODE_REL); | |
1207 | vcpu->arch.comparecount_timer.function = kvm_mips_comparecount_wakeup; | |
669e846e SL |
1208 | return 0; |
1209 | } | |
1210 | ||
d116e812 DCZ |
1211 | int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, |
1212 | struct kvm_translation *tr) | |
669e846e SL |
1213 | { |
1214 | return 0; | |
1215 | } | |
1216 | ||
1217 | /* Initial guest state */ | |
1218 | int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) | |
1219 | { | |
1220 | return kvm_mips_callbacks->vcpu_setup(vcpu); | |
1221 | } | |
1222 | ||
d116e812 | 1223 | static void kvm_mips_set_c0_status(void) |
669e846e SL |
1224 | { |
1225 | uint32_t status = read_c0_status(); | |
1226 | ||
669e846e SL |
1227 | if (cpu_has_dsp) |
1228 | status |= (ST0_MX); | |
1229 | ||
1230 | write_c0_status(status); | |
1231 | ehb(); | |
1232 | } | |
1233 | ||
1234 | /* | |
1235 | * Return value is in the form (errcode<<2 | RESUME_FLAG_HOST | RESUME_FLAG_NV) | |
1236 | */ | |
1237 | int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu) | |
1238 | { | |
1239 | uint32_t cause = vcpu->arch.host_cp0_cause; | |
1240 | uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f; | |
1241 | uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc; | |
1242 | unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr; | |
1243 | enum emulation_result er = EMULATE_DONE; | |
1244 | int ret = RESUME_GUEST; | |
1245 | ||
c4c6f2ca JH |
1246 | /* re-enable HTW before enabling interrupts */ |
1247 | htw_start(); | |
1248 | ||
669e846e SL |
1249 | /* Set a default exit reason */ |
1250 | run->exit_reason = KVM_EXIT_UNKNOWN; | |
1251 | run->ready_for_interrupt_injection = 1; | |
1252 | ||
d116e812 DCZ |
1253 | /* |
1254 | * Set the appropriate status bits based on host CPU features, | |
1255 | * before we hit the scheduler | |
1256 | */ | |
669e846e SL |
1257 | kvm_mips_set_c0_status(); |
1258 | ||
1259 | local_irq_enable(); | |
1260 | ||
1261 | kvm_debug("kvm_mips_handle_exit: cause: %#x, PC: %p, kvm_run: %p, kvm_vcpu: %p\n", | |
1262 | cause, opc, run, vcpu); | |
1263 | ||
d116e812 DCZ |
1264 | /* |
1265 | * Do a privilege check, if in UM most of these exit conditions end up | |
669e846e SL |
1266 | * causing an exception to be delivered to the Guest Kernel |
1267 | */ | |
1268 | er = kvm_mips_check_privilege(cause, opc, run, vcpu); | |
1269 | if (er == EMULATE_PRIV_FAIL) { | |
1270 | goto skip_emul; | |
1271 | } else if (er == EMULATE_FAIL) { | |
1272 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | |
1273 | ret = RESUME_HOST; | |
1274 | goto skip_emul; | |
1275 | } | |
1276 | ||
1277 | switch (exccode) { | |
16d100db JH |
1278 | case EXCCODE_INT: |
1279 | kvm_debug("[%d]EXCCODE_INT @ %p\n", vcpu->vcpu_id, opc); | |
669e846e SL |
1280 | |
1281 | ++vcpu->stat.int_exits; | |
1282 | trace_kvm_exit(vcpu, INT_EXITS); | |
1283 | ||
d116e812 | 1284 | if (need_resched()) |
669e846e | 1285 | cond_resched(); |
669e846e SL |
1286 | |
1287 | ret = RESUME_GUEST; | |
1288 | break; | |
1289 | ||
16d100db JH |
1290 | case EXCCODE_CPU: |
1291 | kvm_debug("EXCCODE_CPU: @ PC: %p\n", opc); | |
669e846e SL |
1292 | |
1293 | ++vcpu->stat.cop_unusable_exits; | |
1294 | trace_kvm_exit(vcpu, COP_UNUSABLE_EXITS); | |
1295 | ret = kvm_mips_callbacks->handle_cop_unusable(vcpu); | |
1296 | /* XXXKYMA: Might need to return to user space */ | |
d116e812 | 1297 | if (run->exit_reason == KVM_EXIT_IRQ_WINDOW_OPEN) |
669e846e | 1298 | ret = RESUME_HOST; |
669e846e SL |
1299 | break; |
1300 | ||
16d100db | 1301 | case EXCCODE_MOD: |
669e846e SL |
1302 | ++vcpu->stat.tlbmod_exits; |
1303 | trace_kvm_exit(vcpu, TLBMOD_EXITS); | |
1304 | ret = kvm_mips_callbacks->handle_tlb_mod(vcpu); | |
1305 | break; | |
1306 | ||
16d100db | 1307 | case EXCCODE_TLBS: |
d116e812 DCZ |
1308 | kvm_debug("TLB ST fault: cause %#x, status %#lx, PC: %p, BadVaddr: %#lx\n", |
1309 | cause, kvm_read_c0_guest_status(vcpu->arch.cop0), opc, | |
1310 | badvaddr); | |
669e846e SL |
1311 | |
1312 | ++vcpu->stat.tlbmiss_st_exits; | |
1313 | trace_kvm_exit(vcpu, TLBMISS_ST_EXITS); | |
1314 | ret = kvm_mips_callbacks->handle_tlb_st_miss(vcpu); | |
1315 | break; | |
1316 | ||
16d100db | 1317 | case EXCCODE_TLBL: |
669e846e SL |
1318 | kvm_debug("TLB LD fault: cause %#x, PC: %p, BadVaddr: %#lx\n", |
1319 | cause, opc, badvaddr); | |
1320 | ||
1321 | ++vcpu->stat.tlbmiss_ld_exits; | |
1322 | trace_kvm_exit(vcpu, TLBMISS_LD_EXITS); | |
1323 | ret = kvm_mips_callbacks->handle_tlb_ld_miss(vcpu); | |
1324 | break; | |
1325 | ||
16d100db | 1326 | case EXCCODE_ADES: |
669e846e SL |
1327 | ++vcpu->stat.addrerr_st_exits; |
1328 | trace_kvm_exit(vcpu, ADDRERR_ST_EXITS); | |
1329 | ret = kvm_mips_callbacks->handle_addr_err_st(vcpu); | |
1330 | break; | |
1331 | ||
16d100db | 1332 | case EXCCODE_ADEL: |
669e846e SL |
1333 | ++vcpu->stat.addrerr_ld_exits; |
1334 | trace_kvm_exit(vcpu, ADDRERR_LD_EXITS); | |
1335 | ret = kvm_mips_callbacks->handle_addr_err_ld(vcpu); | |
1336 | break; | |
1337 | ||
16d100db | 1338 | case EXCCODE_SYS: |
669e846e SL |
1339 | ++vcpu->stat.syscall_exits; |
1340 | trace_kvm_exit(vcpu, SYSCALL_EXITS); | |
1341 | ret = kvm_mips_callbacks->handle_syscall(vcpu); | |
1342 | break; | |
1343 | ||
16d100db | 1344 | case EXCCODE_RI: |
669e846e SL |
1345 | ++vcpu->stat.resvd_inst_exits; |
1346 | trace_kvm_exit(vcpu, RESVD_INST_EXITS); | |
1347 | ret = kvm_mips_callbacks->handle_res_inst(vcpu); | |
1348 | break; | |
1349 | ||
16d100db | 1350 | case EXCCODE_BP: |
669e846e SL |
1351 | ++vcpu->stat.break_inst_exits; |
1352 | trace_kvm_exit(vcpu, BREAK_INST_EXITS); | |
1353 | ret = kvm_mips_callbacks->handle_break(vcpu); | |
1354 | break; | |
1355 | ||
16d100db | 1356 | case EXCCODE_TR: |
0a560427 JH |
1357 | ++vcpu->stat.trap_inst_exits; |
1358 | trace_kvm_exit(vcpu, TRAP_INST_EXITS); | |
1359 | ret = kvm_mips_callbacks->handle_trap(vcpu); | |
1360 | break; | |
1361 | ||
16d100db | 1362 | case EXCCODE_MSAFPE: |
c2537ed9 JH |
1363 | ++vcpu->stat.msa_fpe_exits; |
1364 | trace_kvm_exit(vcpu, MSA_FPE_EXITS); | |
1365 | ret = kvm_mips_callbacks->handle_msa_fpe(vcpu); | |
1366 | break; | |
1367 | ||
16d100db | 1368 | case EXCCODE_FPE: |
1c0cd66a JH |
1369 | ++vcpu->stat.fpe_exits; |
1370 | trace_kvm_exit(vcpu, FPE_EXITS); | |
1371 | ret = kvm_mips_callbacks->handle_fpe(vcpu); | |
1372 | break; | |
1373 | ||
16d100db | 1374 | case EXCCODE_MSADIS: |
c2537ed9 JH |
1375 | ++vcpu->stat.msa_disabled_exits; |
1376 | trace_kvm_exit(vcpu, MSA_DISABLED_EXITS); | |
98119ad5 JH |
1377 | ret = kvm_mips_callbacks->handle_msa_disabled(vcpu); |
1378 | break; | |
1379 | ||
669e846e | 1380 | default: |
d116e812 DCZ |
1381 | kvm_err("Exception Code: %d, not yet handled, @ PC: %p, inst: 0x%08x BadVaddr: %#lx Status: %#lx\n", |
1382 | exccode, opc, kvm_get_inst(opc, vcpu), badvaddr, | |
1383 | kvm_read_c0_guest_status(vcpu->arch.cop0)); | |
669e846e SL |
1384 | kvm_arch_vcpu_dump_regs(vcpu); |
1385 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | |
1386 | ret = RESUME_HOST; | |
1387 | break; | |
1388 | ||
1389 | } | |
1390 | ||
1391 | skip_emul: | |
1392 | local_irq_disable(); | |
1393 | ||
1394 | if (er == EMULATE_DONE && !(ret & RESUME_HOST)) | |
1395 | kvm_mips_deliver_interrupts(vcpu, cause); | |
1396 | ||
1397 | if (!(ret & RESUME_HOST)) { | |
d116e812 | 1398 | /* Only check for signals if not already exiting to userspace */ |
669e846e SL |
1399 | if (signal_pending(current)) { |
1400 | run->exit_reason = KVM_EXIT_INTR; | |
1401 | ret = (-EINTR << 2) | RESUME_HOST; | |
1402 | ++vcpu->stat.signal_exits; | |
1403 | trace_kvm_exit(vcpu, SIGNAL_EXITS); | |
1404 | } | |
1405 | } | |
1406 | ||
98e91b84 JH |
1407 | if (ret == RESUME_GUEST) { |
1408 | /* | |
539cb89f JH |
1409 | * If FPU / MSA are enabled (i.e. the guest's FPU / MSA context |
1410 | * is live), restore FCR31 / MSACSR. | |
98e91b84 JH |
1411 | * |
1412 | * This should be before returning to the guest exception | |
539cb89f JH |
1413 | * vector, as it may well cause an [MSA] FP exception if there |
1414 | * are pending exception bits unmasked. (see | |
98e91b84 JH |
1415 | * kvm_mips_csr_die_notifier() for how that is handled). |
1416 | */ | |
1417 | if (kvm_mips_guest_has_fpu(&vcpu->arch) && | |
1418 | read_c0_status() & ST0_CU1) | |
1419 | __kvm_restore_fcsr(&vcpu->arch); | |
539cb89f JH |
1420 | |
1421 | if (kvm_mips_guest_has_msa(&vcpu->arch) && | |
1422 | read_c0_config5() & MIPS_CONF5_MSAEN) | |
1423 | __kvm_restore_msacsr(&vcpu->arch); | |
98e91b84 JH |
1424 | } |
1425 | ||
c4c6f2ca JH |
1426 | /* Disable HTW before returning to guest or host */ |
1427 | htw_stop(); | |
1428 | ||
669e846e SL |
1429 | return ret; |
1430 | } | |
1431 | ||
98e91b84 JH |
1432 | /* Enable FPU for guest and restore context */ |
1433 | void kvm_own_fpu(struct kvm_vcpu *vcpu) | |
1434 | { | |
1435 | struct mips_coproc *cop0 = vcpu->arch.cop0; | |
1436 | unsigned int sr, cfg5; | |
1437 | ||
1438 | preempt_disable(); | |
1439 | ||
539cb89f JH |
1440 | sr = kvm_read_c0_guest_status(cop0); |
1441 | ||
1442 | /* | |
1443 | * If MSA state is already live, it is undefined how it interacts with | |
1444 | * FR=0 FPU state, and we don't want to hit reserved instruction | |
1445 | * exceptions trying to save the MSA state later when CU=1 && FR=1, so | |
1446 | * play it safe and save it first. | |
1447 | * | |
1448 | * In theory we shouldn't ever hit this case since kvm_lose_fpu() should | |
1449 | * get called when guest CU1 is set, however we can't trust the guest | |
1450 | * not to clobber the status register directly via the commpage. | |
1451 | */ | |
1452 | if (cpu_has_msa && sr & ST0_CU1 && !(sr & ST0_FR) && | |
1453 | vcpu->arch.fpu_inuse & KVM_MIPS_FPU_MSA) | |
1454 | kvm_lose_fpu(vcpu); | |
1455 | ||
98e91b84 JH |
1456 | /* |
1457 | * Enable FPU for guest | |
1458 | * We set FR and FRE according to guest context | |
1459 | */ | |
98e91b84 JH |
1460 | change_c0_status(ST0_CU1 | ST0_FR, sr); |
1461 | if (cpu_has_fre) { | |
1462 | cfg5 = kvm_read_c0_guest_config5(cop0); | |
1463 | change_c0_config5(MIPS_CONF5_FRE, cfg5); | |
1464 | } | |
1465 | enable_fpu_hazard(); | |
1466 | ||
1467 | /* If guest FPU state not active, restore it now */ | |
1468 | if (!(vcpu->arch.fpu_inuse & KVM_MIPS_FPU_FPU)) { | |
1469 | __kvm_restore_fpu(&vcpu->arch); | |
1470 | vcpu->arch.fpu_inuse |= KVM_MIPS_FPU_FPU; | |
1471 | } | |
1472 | ||
1473 | preempt_enable(); | |
1474 | } | |
1475 | ||
539cb89f JH |
1476 | #ifdef CONFIG_CPU_HAS_MSA |
1477 | /* Enable MSA for guest and restore context */ | |
1478 | void kvm_own_msa(struct kvm_vcpu *vcpu) | |
1479 | { | |
1480 | struct mips_coproc *cop0 = vcpu->arch.cop0; | |
1481 | unsigned int sr, cfg5; | |
1482 | ||
1483 | preempt_disable(); | |
1484 | ||
1485 | /* | |
1486 | * Enable FPU if enabled in guest, since we're restoring FPU context | |
1487 | * anyway. We set FR and FRE according to guest context. | |
1488 | */ | |
1489 | if (kvm_mips_guest_has_fpu(&vcpu->arch)) { | |
1490 | sr = kvm_read_c0_guest_status(cop0); | |
1491 | ||
1492 | /* | |
1493 | * If FR=0 FPU state is already live, it is undefined how it | |
1494 | * interacts with MSA state, so play it safe and save it first. | |
1495 | */ | |
1496 | if (!(sr & ST0_FR) && | |
1497 | (vcpu->arch.fpu_inuse & (KVM_MIPS_FPU_FPU | | |
1498 | KVM_MIPS_FPU_MSA)) == KVM_MIPS_FPU_FPU) | |
1499 | kvm_lose_fpu(vcpu); | |
1500 | ||
1501 | change_c0_status(ST0_CU1 | ST0_FR, sr); | |
1502 | if (sr & ST0_CU1 && cpu_has_fre) { | |
1503 | cfg5 = kvm_read_c0_guest_config5(cop0); | |
1504 | change_c0_config5(MIPS_CONF5_FRE, cfg5); | |
1505 | } | |
1506 | } | |
1507 | ||
1508 | /* Enable MSA for guest */ | |
1509 | set_c0_config5(MIPS_CONF5_MSAEN); | |
1510 | enable_fpu_hazard(); | |
1511 | ||
1512 | switch (vcpu->arch.fpu_inuse & (KVM_MIPS_FPU_FPU | KVM_MIPS_FPU_MSA)) { | |
1513 | case KVM_MIPS_FPU_FPU: | |
1514 | /* | |
1515 | * Guest FPU state already loaded, only restore upper MSA state | |
1516 | */ | |
1517 | __kvm_restore_msa_upper(&vcpu->arch); | |
1518 | vcpu->arch.fpu_inuse |= KVM_MIPS_FPU_MSA; | |
1519 | break; | |
1520 | case 0: | |
1521 | /* Neither FPU or MSA already active, restore full MSA state */ | |
1522 | __kvm_restore_msa(&vcpu->arch); | |
1523 | vcpu->arch.fpu_inuse |= KVM_MIPS_FPU_MSA; | |
1524 | if (kvm_mips_guest_has_fpu(&vcpu->arch)) | |
1525 | vcpu->arch.fpu_inuse |= KVM_MIPS_FPU_FPU; | |
1526 | break; | |
1527 | default: | |
1528 | break; | |
1529 | } | |
1530 | ||
1531 | preempt_enable(); | |
1532 | } | |
1533 | #endif | |
1534 | ||
1535 | /* Drop FPU & MSA without saving it */ | |
98e91b84 JH |
1536 | void kvm_drop_fpu(struct kvm_vcpu *vcpu) |
1537 | { | |
1538 | preempt_disable(); | |
539cb89f JH |
1539 | if (cpu_has_msa && vcpu->arch.fpu_inuse & KVM_MIPS_FPU_MSA) { |
1540 | disable_msa(); | |
1541 | vcpu->arch.fpu_inuse &= ~KVM_MIPS_FPU_MSA; | |
1542 | } | |
98e91b84 JH |
1543 | if (vcpu->arch.fpu_inuse & KVM_MIPS_FPU_FPU) { |
1544 | clear_c0_status(ST0_CU1 | ST0_FR); | |
1545 | vcpu->arch.fpu_inuse &= ~KVM_MIPS_FPU_FPU; | |
1546 | } | |
1547 | preempt_enable(); | |
1548 | } | |
1549 | ||
539cb89f | 1550 | /* Save and disable FPU & MSA */ |
98e91b84 JH |
1551 | void kvm_lose_fpu(struct kvm_vcpu *vcpu) |
1552 | { | |
1553 | /* | |
539cb89f JH |
1554 | * FPU & MSA get disabled in root context (hardware) when it is disabled |
1555 | * in guest context (software), but the register state in the hardware | |
1556 | * may still be in use. This is why we explicitly re-enable the hardware | |
98e91b84 JH |
1557 | * before saving. |
1558 | */ | |
1559 | ||
1560 | preempt_disable(); | |
539cb89f JH |
1561 | if (cpu_has_msa && vcpu->arch.fpu_inuse & KVM_MIPS_FPU_MSA) { |
1562 | set_c0_config5(MIPS_CONF5_MSAEN); | |
1563 | enable_fpu_hazard(); | |
1564 | ||
1565 | __kvm_save_msa(&vcpu->arch); | |
1566 | ||
1567 | /* Disable MSA & FPU */ | |
1568 | disable_msa(); | |
4ac33429 | 1569 | if (vcpu->arch.fpu_inuse & KVM_MIPS_FPU_FPU) { |
539cb89f | 1570 | clear_c0_status(ST0_CU1 | ST0_FR); |
4ac33429 JH |
1571 | disable_fpu_hazard(); |
1572 | } | |
539cb89f JH |
1573 | vcpu->arch.fpu_inuse &= ~(KVM_MIPS_FPU_FPU | KVM_MIPS_FPU_MSA); |
1574 | } else if (vcpu->arch.fpu_inuse & KVM_MIPS_FPU_FPU) { | |
98e91b84 JH |
1575 | set_c0_status(ST0_CU1); |
1576 | enable_fpu_hazard(); | |
1577 | ||
1578 | __kvm_save_fpu(&vcpu->arch); | |
1579 | vcpu->arch.fpu_inuse &= ~KVM_MIPS_FPU_FPU; | |
1580 | ||
1581 | /* Disable FPU */ | |
1582 | clear_c0_status(ST0_CU1 | ST0_FR); | |
4ac33429 | 1583 | disable_fpu_hazard(); |
98e91b84 JH |
1584 | } |
1585 | preempt_enable(); | |
1586 | } | |
1587 | ||
1588 | /* | |
539cb89f JH |
1589 | * Step over a specific ctc1 to FCSR and a specific ctcmsa to MSACSR which are |
1590 | * used to restore guest FCSR/MSACSR state and may trigger a "harmless" FP/MSAFP | |
1591 | * exception if cause bits are set in the value being written. | |
98e91b84 JH |
1592 | */ |
1593 | static int kvm_mips_csr_die_notify(struct notifier_block *self, | |
1594 | unsigned long cmd, void *ptr) | |
1595 | { | |
1596 | struct die_args *args = (struct die_args *)ptr; | |
1597 | struct pt_regs *regs = args->regs; | |
1598 | unsigned long pc; | |
1599 | ||
539cb89f JH |
1600 | /* Only interested in FPE and MSAFPE */ |
1601 | if (cmd != DIE_FP && cmd != DIE_MSAFP) | |
98e91b84 JH |
1602 | return NOTIFY_DONE; |
1603 | ||
1604 | /* Return immediately if guest context isn't active */ | |
1605 | if (!(current->flags & PF_VCPU)) | |
1606 | return NOTIFY_DONE; | |
1607 | ||
1608 | /* Should never get here from user mode */ | |
1609 | BUG_ON(user_mode(regs)); | |
1610 | ||
1611 | pc = instruction_pointer(regs); | |
1612 | switch (cmd) { | |
1613 | case DIE_FP: | |
1614 | /* match 2nd instruction in __kvm_restore_fcsr */ | |
1615 | if (pc != (unsigned long)&__kvm_restore_fcsr + 4) | |
1616 | return NOTIFY_DONE; | |
1617 | break; | |
539cb89f JH |
1618 | case DIE_MSAFP: |
1619 | /* match 2nd/3rd instruction in __kvm_restore_msacsr */ | |
1620 | if (!cpu_has_msa || | |
1621 | pc < (unsigned long)&__kvm_restore_msacsr + 4 || | |
1622 | pc > (unsigned long)&__kvm_restore_msacsr + 8) | |
1623 | return NOTIFY_DONE; | |
1624 | break; | |
98e91b84 JH |
1625 | } |
1626 | ||
1627 | /* Move PC forward a little and continue executing */ | |
1628 | instruction_pointer(regs) += 4; | |
1629 | ||
1630 | return NOTIFY_STOP; | |
1631 | } | |
1632 | ||
1633 | static struct notifier_block kvm_mips_csr_die_notifier = { | |
1634 | .notifier_call = kvm_mips_csr_die_notify, | |
1635 | }; | |
1636 | ||
2db9d233 | 1637 | static int __init kvm_mips_init(void) |
669e846e SL |
1638 | { |
1639 | int ret; | |
1640 | ||
1641 | ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE); | |
1642 | ||
1643 | if (ret) | |
1644 | return ret; | |
1645 | ||
98e91b84 JH |
1646 | register_die_notifier(&kvm_mips_csr_die_notifier); |
1647 | ||
d116e812 DCZ |
1648 | /* |
1649 | * On MIPS, kernel modules are executed from "mapped space", which | |
1650 | * requires TLBs. The TLB handling code is statically linked with | |
d7d5b05f | 1651 | * the rest of the kernel (tlb.c) to avoid the possibility of |
d116e812 DCZ |
1652 | * double faulting. The issue is that the TLB code references |
1653 | * routines that are part of the the KVM module, which are only | |
1654 | * available once the module is loaded. | |
669e846e SL |
1655 | */ |
1656 | kvm_mips_gfn_to_pfn = gfn_to_pfn; | |
1657 | kvm_mips_release_pfn_clean = kvm_release_pfn_clean; | |
1658 | kvm_mips_is_error_pfn = is_error_pfn; | |
1659 | ||
669e846e SL |
1660 | return 0; |
1661 | } | |
1662 | ||
2db9d233 | 1663 | static void __exit kvm_mips_exit(void) |
669e846e SL |
1664 | { |
1665 | kvm_exit(); | |
1666 | ||
1667 | kvm_mips_gfn_to_pfn = NULL; | |
1668 | kvm_mips_release_pfn_clean = NULL; | |
1669 | kvm_mips_is_error_pfn = NULL; | |
98e91b84 JH |
1670 | |
1671 | unregister_die_notifier(&kvm_mips_csr_die_notifier); | |
669e846e SL |
1672 | } |
1673 | ||
1674 | module_init(kvm_mips_init); | |
1675 | module_exit(kvm_mips_exit); | |
1676 | ||
1677 | EXPORT_TRACEPOINT_SYMBOL(kvm_exit); |