Commit | Line | Data |
---|---|---|
669e846e SL |
1 | /* |
2 | * This file is subject to the terms and conditions of the GNU General Public | |
3 | * License. See the file "COPYING" in the main directory of this archive | |
4 | * for more details. | |
5 | * | |
6 | * KVM/MIPS: MIPS specific KVM APIs | |
7 | * | |
8 | * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. | |
9 | * Authors: Sanjay Lal <sanjayl@kymasys.com> | |
d116e812 | 10 | */ |
669e846e | 11 | |
05108709 | 12 | #include <linux/bitops.h> |
669e846e SL |
13 | #include <linux/errno.h> |
14 | #include <linux/err.h> | |
98e91b84 | 15 | #include <linux/kdebug.h> |
669e846e | 16 | #include <linux/module.h> |
d852b5f3 | 17 | #include <linux/uaccess.h> |
669e846e | 18 | #include <linux/vmalloc.h> |
174cd4b1 | 19 | #include <linux/sched/signal.h> |
669e846e | 20 | #include <linux/fs.h> |
57c8a661 | 21 | #include <linux/memblock.h> |
65fddcfc | 22 | #include <linux/pgtable.h> |
174cd4b1 | 23 | |
f798217d | 24 | #include <asm/fpu.h> |
669e846e SL |
25 | #include <asm/page.h> |
26 | #include <asm/cacheflush.h> | |
27 | #include <asm/mmu_context.h> | |
06c158c9 | 28 | #include <asm/pgalloc.h> |
669e846e SL |
29 | |
30 | #include <linux/kvm_host.h> | |
31 | ||
d7d5b05f | 32 | #include "interrupt.h" |
669e846e SL |
33 | |
34 | #define CREATE_TRACE_POINTS | |
35 | #include "trace.h" | |
36 | ||
37 | #ifndef VECTORSPACING | |
38 | #define VECTORSPACING 0x100 /* for EI/VI mode */ | |
39 | #endif | |
40 | ||
fcfe1bae JZ |
41 | const struct _kvm_stats_desc kvm_vm_stats_desc[] = { |
42 | KVM_GENERIC_VM_STATS() | |
43 | }; | |
fcfe1bae JZ |
44 | |
45 | const struct kvm_stats_header kvm_vm_stats_header = { | |
46 | .name_size = KVM_STATS_NAME_SIZE, | |
47 | .num_desc = ARRAY_SIZE(kvm_vm_stats_desc), | |
48 | .id_offset = sizeof(struct kvm_stats_header), | |
49 | .desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE, | |
50 | .data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE + | |
51 | sizeof(kvm_vm_stats_desc), | |
52 | }; | |
53 | ||
ce55c049 JZ |
54 | const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = { |
55 | KVM_GENERIC_VCPU_STATS(), | |
56 | STATS_DESC_COUNTER(VCPU, wait_exits), | |
57 | STATS_DESC_COUNTER(VCPU, cache_exits), | |
58 | STATS_DESC_COUNTER(VCPU, signal_exits), | |
59 | STATS_DESC_COUNTER(VCPU, int_exits), | |
60 | STATS_DESC_COUNTER(VCPU, cop_unusable_exits), | |
61 | STATS_DESC_COUNTER(VCPU, tlbmod_exits), | |
62 | STATS_DESC_COUNTER(VCPU, tlbmiss_ld_exits), | |
63 | STATS_DESC_COUNTER(VCPU, tlbmiss_st_exits), | |
64 | STATS_DESC_COUNTER(VCPU, addrerr_st_exits), | |
65 | STATS_DESC_COUNTER(VCPU, addrerr_ld_exits), | |
66 | STATS_DESC_COUNTER(VCPU, syscall_exits), | |
67 | STATS_DESC_COUNTER(VCPU, resvd_inst_exits), | |
68 | STATS_DESC_COUNTER(VCPU, break_inst_exits), | |
69 | STATS_DESC_COUNTER(VCPU, trap_inst_exits), | |
70 | STATS_DESC_COUNTER(VCPU, msa_fpe_exits), | |
71 | STATS_DESC_COUNTER(VCPU, fpe_exits), | |
72 | STATS_DESC_COUNTER(VCPU, msa_disabled_exits), | |
73 | STATS_DESC_COUNTER(VCPU, flush_dcache_exits), | |
74 | STATS_DESC_COUNTER(VCPU, vz_gpsi_exits), | |
75 | STATS_DESC_COUNTER(VCPU, vz_gsfc_exits), | |
76 | STATS_DESC_COUNTER(VCPU, vz_hc_exits), | |
77 | STATS_DESC_COUNTER(VCPU, vz_grr_exits), | |
78 | STATS_DESC_COUNTER(VCPU, vz_gva_exits), | |
79 | STATS_DESC_COUNTER(VCPU, vz_ghfc_exits), | |
80 | STATS_DESC_COUNTER(VCPU, vz_gpa_exits), | |
81 | STATS_DESC_COUNTER(VCPU, vz_resvd_exits), | |
82 | #ifdef CONFIG_CPU_LOONGSON64 | |
83 | STATS_DESC_COUNTER(VCPU, vz_cpucfg_exits), | |
84 | #endif | |
85 | }; | |
ce55c049 JZ |
86 | |
87 | const struct kvm_stats_header kvm_vcpu_stats_header = { | |
88 | .name_size = KVM_STATS_NAME_SIZE, | |
89 | .num_desc = ARRAY_SIZE(kvm_vcpu_stats_desc), | |
90 | .id_offset = sizeof(struct kvm_stats_header), | |
91 | .desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE, | |
92 | .data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE + | |
93 | sizeof(kvm_vcpu_stats_desc), | |
94 | }; | |
95 | ||
edec9d7b JH |
96 | bool kvm_trace_guest_mode_change; |
97 | ||
98 | int kvm_guest_mode_change_trace_reg(void) | |
99 | { | |
04146f22 | 100 | kvm_trace_guest_mode_change = true; |
edec9d7b JH |
101 | return 0; |
102 | } | |
103 | ||
104 | void kvm_guest_mode_change_trace_unreg(void) | |
105 | { | |
04146f22 | 106 | kvm_trace_guest_mode_change = false; |
edec9d7b JH |
107 | } |
108 | ||
d116e812 DCZ |
109 | /* |
110 | * XXXKYMA: We are simulatoring a processor that has the WII bit set in | |
111 | * Config7, so we are "runnable" if interrupts are pending | |
669e846e SL |
112 | */ |
113 | int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu) | |
114 | { | |
115 | return !!(vcpu->arch.pending_exceptions); | |
116 | } | |
117 | ||
199b5763 LM |
118 | bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu) |
119 | { | |
120 | return false; | |
121 | } | |
122 | ||
669e846e SL |
123 | int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu) |
124 | { | |
125 | return 1; | |
126 | } | |
127 | ||
071f24ad | 128 | int kvm_arch_enable_virtualization_cpu(void) |
669e846e | 129 | { |
5381eca1 | 130 | return kvm_mips_callbacks->enable_virtualization_cpu(); |
edab4fe1 JH |
131 | } |
132 | ||
071f24ad | 133 | void kvm_arch_disable_virtualization_cpu(void) |
edab4fe1 | 134 | { |
5381eca1 | 135 | kvm_mips_callbacks->disable_virtualization_cpu(); |
669e846e SL |
136 | } |
137 | ||
669e846e SL |
138 | int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) |
139 | { | |
a8a3c426 | 140 | switch (type) { |
15e9e35c HC |
141 | case KVM_VM_MIPS_AUTO: |
142 | break; | |
c992a4f6 | 143 | case KVM_VM_MIPS_VZ: |
a8a3c426 JH |
144 | break; |
145 | default: | |
146 | /* Unsupported KVM type */ | |
147 | return -EINVAL; | |
6732a1fb | 148 | } |
a8a3c426 | 149 | |
06c158c9 JH |
150 | /* Allocate page table to map GPA -> RPA */ |
151 | kvm->arch.gpa_mm.pgd = kvm_pgd_alloc(); | |
152 | if (!kvm->arch.gpa_mm.pgd) | |
153 | return -ENOMEM; | |
154 | ||
f21db309 HC |
155 | #ifdef CONFIG_CPU_LOONGSON64 |
156 | kvm_init_loongson_ipi(kvm); | |
157 | #endif | |
158 | ||
669e846e SL |
159 | return 0; |
160 | } | |
161 | ||
06c158c9 JH |
162 | static void kvm_mips_free_gpa_pt(struct kvm *kvm) |
163 | { | |
164 | /* It should always be safe to remove after flushing the whole range */ | |
165 | WARN_ON(!kvm_mips_flush_gpa_pt(kvm, 0, ~0)); | |
166 | pgd_free(NULL, kvm->arch.gpa_mm.pgd); | |
167 | } | |
168 | ||
669e846e SL |
169 | void kvm_arch_destroy_vm(struct kvm *kvm) |
170 | { | |
27592ae8 | 171 | kvm_destroy_vcpus(kvm); |
06c158c9 | 172 | kvm_mips_free_gpa_pt(kvm); |
669e846e SL |
173 | } |
174 | ||
d116e812 DCZ |
175 | long kvm_arch_dev_ioctl(struct file *filp, unsigned int ioctl, |
176 | unsigned long arg) | |
669e846e | 177 | { |
ed829857 | 178 | return -ENOIOCTLCMD; |
669e846e SL |
179 | } |
180 | ||
b6209110 JH |
181 | void kvm_arch_flush_shadow_all(struct kvm *kvm) |
182 | { | |
183 | /* Flush whole GPA */ | |
184 | kvm_mips_flush_gpa_pt(kvm, 0, ~0); | |
5194552f | 185 | kvm_flush_remote_tlbs(kvm); |
b6209110 JH |
186 | } |
187 | ||
188 | void kvm_arch_flush_shadow_memslot(struct kvm *kvm, | |
189 | struct kvm_memory_slot *slot) | |
190 | { | |
191 | /* | |
192 | * The slot has been made invalid (ready for moving or deletion), so we | |
193 | * need to ensure that it can no longer be accessed by any guest VCPUs. | |
194 | */ | |
195 | ||
196 | spin_lock(&kvm->mmu_lock); | |
197 | /* Flush slot from GPA */ | |
198 | kvm_mips_flush_gpa_pt(kvm, slot->base_gfn, | |
199 | slot->base_gfn + slot->npages - 1); | |
619b5072 | 200 | kvm_flush_remote_tlbs_memslot(kvm, slot); |
b6209110 JH |
201 | spin_unlock(&kvm->mmu_lock); |
202 | } | |
203 | ||
669e846e | 204 | int kvm_arch_prepare_memory_region(struct kvm *kvm, |
537a17b3 SC |
205 | const struct kvm_memory_slot *old, |
206 | struct kvm_memory_slot *new, | |
d116e812 | 207 | enum kvm_mr_change change) |
669e846e SL |
208 | { |
209 | return 0; | |
210 | } | |
211 | ||
212 | void kvm_arch_commit_memory_region(struct kvm *kvm, | |
9d4c197c | 213 | struct kvm_memory_slot *old, |
f36f3f28 | 214 | const struct kvm_memory_slot *new, |
d116e812 | 215 | enum kvm_mr_change change) |
669e846e | 216 | { |
a1ac9e17 JH |
217 | int needs_flush; |
218 | ||
a1ac9e17 JH |
219 | /* |
220 | * If dirty page logging is enabled, write protect all pages in the slot | |
221 | * ready for dirty logging. | |
222 | * | |
223 | * There is no need to do this in any of the following cases: | |
224 | * CREATE: No dirty mappings will already exist. | |
225 | * MOVE/DELETE: The old mappings will already have been cleaned up by | |
226 | * kvm_arch_flush_shadow_memslot() | |
227 | */ | |
228 | if (change == KVM_MR_FLAGS_ONLY && | |
229 | (!(old->flags & KVM_MEM_LOG_DIRTY_PAGES) && | |
230 | new->flags & KVM_MEM_LOG_DIRTY_PAGES)) { | |
231 | spin_lock(&kvm->mmu_lock); | |
232 | /* Write protect GPA page table entries */ | |
233 | needs_flush = kvm_mips_mkclean_gpa_pt(kvm, new->base_gfn, | |
234 | new->base_gfn + new->npages - 1); | |
a1ac9e17 | 235 | if (needs_flush) |
619b5072 | 236 | kvm_flush_remote_tlbs_memslot(kvm, new); |
a1ac9e17 JH |
237 | spin_unlock(&kvm->mmu_lock); |
238 | } | |
669e846e SL |
239 | } |
240 | ||
d7b8f890 JH |
241 | static inline void dump_handler(const char *symbol, void *start, void *end) |
242 | { | |
243 | u32 *p; | |
244 | ||
245 | pr_debug("LEAF(%s)\n", symbol); | |
246 | ||
247 | pr_debug("\t.set push\n"); | |
248 | pr_debug("\t.set noreorder\n"); | |
249 | ||
250 | for (p = start; p < (u32 *)end; ++p) | |
251 | pr_debug("\t.word\t0x%08x\t\t# %p\n", *p, p); | |
252 | ||
253 | pr_debug("\t.set\tpop\n"); | |
254 | ||
255 | pr_debug("\tEND(%s)\n", symbol); | |
256 | } | |
257 | ||
879a3763 SC |
258 | /* low level hrtimer wake routine */ |
259 | static enum hrtimer_restart kvm_mips_comparecount_wakeup(struct hrtimer *timer) | |
09df6307 | 260 | { |
879a3763 SC |
261 | struct kvm_vcpu *vcpu; |
262 | ||
263 | vcpu = container_of(timer, struct kvm_vcpu, arch.comparecount_timer); | |
09df6307 SC |
264 | |
265 | kvm_mips_callbacks->queue_timer_int(vcpu); | |
266 | ||
267 | vcpu->arch.wait = 0; | |
da4ad88c | 268 | rcuwait_wake_up(&vcpu->wait); |
09df6307 | 269 | |
09df6307 SC |
270 | return kvm_mips_count_timeout(vcpu); |
271 | } | |
272 | ||
897cc38e SC |
273 | int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id) |
274 | { | |
275 | return 0; | |
276 | } | |
277 | ||
e529ef66 | 278 | int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu) |
669e846e | 279 | { |
90e9311a | 280 | int err, size; |
a7cfa7ac | 281 | void *gebase, *p, *handler, *refill_start, *refill_end; |
669e846e SL |
282 | int i; |
283 | ||
e529ef66 SC |
284 | kvm_debug("kvm @ %p: create cpu %d at %p\n", |
285 | vcpu->kvm, vcpu->vcpu_id, vcpu); | |
669e846e | 286 | |
d11dfed5 SC |
287 | err = kvm_mips_callbacks->vcpu_init(vcpu); |
288 | if (err) | |
289 | return err; | |
290 | ||
291 | hrtimer_init(&vcpu->arch.comparecount_timer, CLOCK_MONOTONIC, | |
292 | HRTIMER_MODE_REL); | |
293 | vcpu->arch.comparecount_timer.function = kvm_mips_comparecount_wakeup; | |
294 | ||
d116e812 DCZ |
295 | /* |
296 | * Allocate space for host mode exception handlers that handle | |
669e846e SL |
297 | * guest mode exits |
298 | */ | |
d116e812 | 299 | if (cpu_has_veic || cpu_has_vint) |
669e846e | 300 | size = 0x200 + VECTORSPACING * 64; |
d116e812 | 301 | else |
7006e2df | 302 | size = 0x4000; |
669e846e | 303 | |
669e846e SL |
304 | gebase = kzalloc(ALIGN(size, PAGE_SIZE), GFP_KERNEL); |
305 | ||
306 | if (!gebase) { | |
307 | err = -ENOMEM; | |
d11dfed5 | 308 | goto out_uninit_vcpu; |
669e846e | 309 | } |
6e95bfd2 JH |
310 | kvm_debug("Allocated %d bytes for KVM Exception Handlers @ %p\n", |
311 | ALIGN(size, PAGE_SIZE), gebase); | |
669e846e | 312 | |
2a06dab8 JH |
313 | /* |
314 | * Check new ebase actually fits in CP0_EBase. The lack of a write gate | |
315 | * limits us to the low 512MB of physical address space. If the memory | |
316 | * we allocate is out of range, just give up now. | |
317 | */ | |
318 | if (!cpu_has_ebase_wg && virt_to_phys(gebase) >= 0x20000000) { | |
319 | kvm_err("CP0_EBase.WG required for guest exception base %pK\n", | |
320 | gebase); | |
321 | err = -ENOMEM; | |
322 | goto out_free_gebase; | |
323 | } | |
324 | ||
669e846e SL |
325 | /* Save new ebase */ |
326 | vcpu->arch.guest_ebase = gebase; | |
327 | ||
90e9311a | 328 | /* Build guest exception vectors dynamically in unmapped memory */ |
1f9ca62c | 329 | handler = gebase + 0x2000; |
669e846e | 330 | |
1934a3ad | 331 | /* TLB refill (or XTLB refill on 64-bit VZ where KX=1) */ |
a7cfa7ac | 332 | refill_start = gebase; |
45c7e8af | 333 | if (IS_ENABLED(CONFIG_64BIT)) |
1934a3ad | 334 | refill_start += 0x080; |
a7cfa7ac | 335 | refill_end = kvm_mips_build_tlb_refill_exception(refill_start, handler); |
669e846e SL |
336 | |
337 | /* General Exception Entry point */ | |
1f9ca62c | 338 | kvm_mips_build_exception(gebase + 0x180, handler); |
669e846e SL |
339 | |
340 | /* For vectored interrupts poke the exception code @ all offsets 0-7 */ | |
341 | for (i = 0; i < 8; i++) { | |
342 | kvm_debug("L1 Vectored handler @ %p\n", | |
343 | gebase + 0x200 + (i * VECTORSPACING)); | |
1f9ca62c JH |
344 | kvm_mips_build_exception(gebase + 0x200 + i * VECTORSPACING, |
345 | handler); | |
669e846e SL |
346 | } |
347 | ||
90e9311a | 348 | /* General exit handler */ |
1f9ca62c | 349 | p = handler; |
90e9311a JH |
350 | p = kvm_mips_build_exit(p); |
351 | ||
352 | /* Guest entry routine */ | |
353 | vcpu->arch.vcpu_run = p; | |
354 | p = kvm_mips_build_vcpu_run(p); | |
797179bc | 355 | |
d7b8f890 JH |
356 | /* Dump the generated code */ |
357 | pr_debug("#include <asm/asm.h>\n"); | |
358 | pr_debug("#include <asm/regdef.h>\n"); | |
359 | pr_debug("\n"); | |
360 | dump_handler("kvm_vcpu_run", vcpu->arch.vcpu_run, p); | |
a7cfa7ac | 361 | dump_handler("kvm_tlb_refill", refill_start, refill_end); |
d7b8f890 JH |
362 | dump_handler("kvm_gen_exc", gebase + 0x180, gebase + 0x200); |
363 | dump_handler("kvm_exit", gebase + 0x2000, vcpu->arch.vcpu_run); | |
364 | ||
669e846e | 365 | /* Invalidate the icache for these ranges */ |
32eb12a6 JH |
366 | flush_icache_range((unsigned long)gebase, |
367 | (unsigned long)gebase + ALIGN(size, PAGE_SIZE)); | |
669e846e | 368 | |
669e846e SL |
369 | /* Init */ |
370 | vcpu->arch.last_sched_cpu = -1; | |
c992a4f6 | 371 | vcpu->arch.last_exec_cpu = -1; |
669e846e | 372 | |
52598784 SC |
373 | /* Initial guest state */ |
374 | err = kvm_mips_callbacks->vcpu_setup(vcpu); | |
375 | if (err) | |
45c7e8af | 376 | goto out_free_gebase; |
52598784 | 377 | |
e529ef66 | 378 | return 0; |
669e846e SL |
379 | |
380 | out_free_gebase: | |
381 | kfree(gebase); | |
d11dfed5 SC |
382 | out_uninit_vcpu: |
383 | kvm_mips_callbacks->vcpu_uninit(vcpu); | |
e529ef66 | 384 | return err; |
669e846e SL |
385 | } |
386 | ||
47d51e5e | 387 | void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) |
669e846e SL |
388 | { |
389 | hrtimer_cancel(&vcpu->arch.comparecount_timer); | |
390 | ||
669e846e SL |
391 | kvm_mips_dump_stats(vcpu); |
392 | ||
aba85929 | 393 | kvm_mmu_free_memory_caches(vcpu); |
c6c0a663 | 394 | kfree(vcpu->arch.guest_ebase); |
d11dfed5 SC |
395 | |
396 | kvm_mips_callbacks->vcpu_uninit(vcpu); | |
669e846e SL |
397 | } |
398 | ||
d116e812 DCZ |
399 | int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, |
400 | struct kvm_guest_debug *dbg) | |
669e846e | 401 | { |
ed829857 | 402 | return -ENOIOCTLCMD; |
669e846e SL |
403 | } |
404 | ||
72e32445 MR |
405 | /* |
406 | * Actually run the vCPU, entering an RCU extended quiescent state (EQS) while | |
407 | * the vCPU is running. | |
408 | * | |
409 | * This must be noinstr as instrumentation may make use of RCU, and this is not | |
410 | * safe during the EQS. | |
411 | */ | |
412 | static int noinstr kvm_mips_vcpu_enter_exit(struct kvm_vcpu *vcpu) | |
413 | { | |
414 | int ret; | |
415 | ||
416 | guest_state_enter_irqoff(); | |
417 | ret = kvm_mips_callbacks->vcpu_run(vcpu); | |
418 | guest_state_exit_irqoff(); | |
419 | ||
420 | return ret; | |
421 | } | |
422 | ||
1b94f6f8 | 423 | int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) |
669e846e | 424 | { |
460df4c1 | 425 | int r = -EINTR; |
669e846e | 426 | |
accb757d CD |
427 | vcpu_load(vcpu); |
428 | ||
20b7035c | 429 | kvm_sigset_activate(vcpu); |
669e846e SL |
430 | |
431 | if (vcpu->mmio_needed) { | |
432 | if (!vcpu->mmio_is_write) | |
c34b26b9 | 433 | kvm_mips_complete_mmio_load(vcpu); |
669e846e SL |
434 | vcpu->mmio_needed = 0; |
435 | } | |
436 | ||
a6816314 | 437 | if (!vcpu->wants_to_run) |
460df4c1 PB |
438 | goto out; |
439 | ||
f798217d JH |
440 | lose_fpu(1); |
441 | ||
044f0f03 | 442 | local_irq_disable(); |
72e32445 | 443 | guest_timing_enter_irqoff(); |
93258604 | 444 | trace_kvm_enter(vcpu); |
25b08c7f | 445 | |
4841e0dd JH |
446 | /* |
447 | * Make sure the read of VCPU requests in vcpu_run() callback is not | |
448 | * reordered ahead of the write to vcpu->mode, or we could miss a TLB | |
449 | * flush request while the requester sees the VCPU as outside of guest | |
450 | * mode and not needing an IPI. | |
451 | */ | |
452 | smp_store_mb(vcpu->mode, IN_GUEST_MODE); | |
453 | ||
72e32445 MR |
454 | r = kvm_mips_vcpu_enter_exit(vcpu); |
455 | ||
456 | /* | |
457 | * We must ensure that any pending interrupts are taken before | |
458 | * we exit guest timing so that timer ticks are accounted as | |
459 | * guest time. Transiently unmask interrupts so that any | |
460 | * pending interrupts are taken. | |
461 | * | |
462 | * TODO: is there a barrier which ensures that pending interrupts are | |
463 | * recognised? Currently this just hopes that the CPU takes any pending | |
464 | * interrupts between the enable and disable. | |
465 | */ | |
466 | local_irq_enable(); | |
467 | local_irq_disable(); | |
25b08c7f | 468 | |
93258604 | 469 | trace_kvm_out(vcpu); |
72e32445 | 470 | guest_timing_exit_irqoff(); |
669e846e SL |
471 | local_irq_enable(); |
472 | ||
460df4c1 | 473 | out: |
20b7035c | 474 | kvm_sigset_deactivate(vcpu); |
669e846e | 475 | |
accb757d | 476 | vcpu_put(vcpu); |
669e846e SL |
477 | return r; |
478 | } | |
479 | ||
d116e812 DCZ |
480 | int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, |
481 | struct kvm_mips_interrupt *irq) | |
669e846e SL |
482 | { |
483 | int intr = (int)irq->irq; | |
484 | struct kvm_vcpu *dvcpu = NULL; | |
485 | ||
3f51d8fc HC |
486 | if (intr == kvm_priority_to_irq[MIPS_EXC_INT_IPI_1] || |
487 | intr == kvm_priority_to_irq[MIPS_EXC_INT_IPI_2] || | |
488 | intr == (-kvm_priority_to_irq[MIPS_EXC_INT_IPI_1]) || | |
489 | intr == (-kvm_priority_to_irq[MIPS_EXC_INT_IPI_2])) | |
669e846e SL |
490 | kvm_debug("%s: CPU: %d, INTR: %d\n", __func__, irq->cpu, |
491 | (int)intr); | |
492 | ||
493 | if (irq->cpu == -1) | |
494 | dvcpu = vcpu; | |
495 | else | |
75a9869f | 496 | dvcpu = kvm_get_vcpu(vcpu->kvm, irq->cpu); |
669e846e | 497 | |
3f51d8fc | 498 | if (intr == 2 || intr == 3 || intr == 4 || intr == 6) { |
669e846e SL |
499 | kvm_mips_callbacks->queue_io_int(dvcpu, irq); |
500 | ||
3f51d8fc | 501 | } else if (intr == -2 || intr == -3 || intr == -4 || intr == -6) { |
669e846e SL |
502 | kvm_mips_callbacks->dequeue_io_int(dvcpu, irq); |
503 | } else { | |
504 | kvm_err("%s: invalid interrupt ioctl (%d:%d)\n", __func__, | |
505 | irq->cpu, irq->irq); | |
506 | return -EINVAL; | |
507 | } | |
508 | ||
509 | dvcpu->arch.wait = 0; | |
510 | ||
da4ad88c | 511 | rcuwait_wake_up(&dvcpu->wait); |
669e846e SL |
512 | |
513 | return 0; | |
514 | } | |
515 | ||
d116e812 DCZ |
516 | int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, |
517 | struct kvm_mp_state *mp_state) | |
669e846e | 518 | { |
ed829857 | 519 | return -ENOIOCTLCMD; |
669e846e SL |
520 | } |
521 | ||
d116e812 DCZ |
522 | int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, |
523 | struct kvm_mp_state *mp_state) | |
669e846e | 524 | { |
ed829857 | 525 | return -ENOIOCTLCMD; |
669e846e SL |
526 | } |
527 | ||
4c73fb2b DD |
528 | static u64 kvm_mips_get_one_regs[] = { |
529 | KVM_REG_MIPS_R0, | |
530 | KVM_REG_MIPS_R1, | |
531 | KVM_REG_MIPS_R2, | |
532 | KVM_REG_MIPS_R3, | |
533 | KVM_REG_MIPS_R4, | |
534 | KVM_REG_MIPS_R5, | |
535 | KVM_REG_MIPS_R6, | |
536 | KVM_REG_MIPS_R7, | |
537 | KVM_REG_MIPS_R8, | |
538 | KVM_REG_MIPS_R9, | |
539 | KVM_REG_MIPS_R10, | |
540 | KVM_REG_MIPS_R11, | |
541 | KVM_REG_MIPS_R12, | |
542 | KVM_REG_MIPS_R13, | |
543 | KVM_REG_MIPS_R14, | |
544 | KVM_REG_MIPS_R15, | |
545 | KVM_REG_MIPS_R16, | |
546 | KVM_REG_MIPS_R17, | |
547 | KVM_REG_MIPS_R18, | |
548 | KVM_REG_MIPS_R19, | |
549 | KVM_REG_MIPS_R20, | |
550 | KVM_REG_MIPS_R21, | |
551 | KVM_REG_MIPS_R22, | |
552 | KVM_REG_MIPS_R23, | |
553 | KVM_REG_MIPS_R24, | |
554 | KVM_REG_MIPS_R25, | |
555 | KVM_REG_MIPS_R26, | |
556 | KVM_REG_MIPS_R27, | |
557 | KVM_REG_MIPS_R28, | |
558 | KVM_REG_MIPS_R29, | |
559 | KVM_REG_MIPS_R30, | |
560 | KVM_REG_MIPS_R31, | |
561 | ||
70e92c7e | 562 | #ifndef CONFIG_CPU_MIPSR6 |
4c73fb2b DD |
563 | KVM_REG_MIPS_HI, |
564 | KVM_REG_MIPS_LO, | |
70e92c7e | 565 | #endif |
4c73fb2b | 566 | KVM_REG_MIPS_PC, |
4c73fb2b DD |
567 | }; |
568 | ||
e5775930 JH |
569 | static u64 kvm_mips_get_one_regs_fpu[] = { |
570 | KVM_REG_MIPS_FCR_IR, | |
571 | KVM_REG_MIPS_FCR_CSR, | |
572 | }; | |
573 | ||
574 | static u64 kvm_mips_get_one_regs_msa[] = { | |
575 | KVM_REG_MIPS_MSA_IR, | |
576 | KVM_REG_MIPS_MSA_CSR, | |
577 | }; | |
578 | ||
f5c43bd4 JH |
579 | static unsigned long kvm_mips_num_regs(struct kvm_vcpu *vcpu) |
580 | { | |
581 | unsigned long ret; | |
582 | ||
583 | ret = ARRAY_SIZE(kvm_mips_get_one_regs); | |
e5775930 JH |
584 | if (kvm_mips_guest_can_have_fpu(&vcpu->arch)) { |
585 | ret += ARRAY_SIZE(kvm_mips_get_one_regs_fpu) + 48; | |
586 | /* odd doubles */ | |
587 | if (boot_cpu_data.fpu_id & MIPS_FPIR_F64) | |
588 | ret += 16; | |
589 | } | |
590 | if (kvm_mips_guest_can_have_msa(&vcpu->arch)) | |
591 | ret += ARRAY_SIZE(kvm_mips_get_one_regs_msa) + 32; | |
f5c43bd4 JH |
592 | ret += kvm_mips_callbacks->num_regs(vcpu); |
593 | ||
594 | return ret; | |
595 | } | |
596 | ||
597 | static int kvm_mips_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices) | |
598 | { | |
e5775930 JH |
599 | u64 index; |
600 | unsigned int i; | |
601 | ||
f5c43bd4 JH |
602 | if (copy_to_user(indices, kvm_mips_get_one_regs, |
603 | sizeof(kvm_mips_get_one_regs))) | |
604 | return -EFAULT; | |
605 | indices += ARRAY_SIZE(kvm_mips_get_one_regs); | |
606 | ||
e5775930 JH |
607 | if (kvm_mips_guest_can_have_fpu(&vcpu->arch)) { |
608 | if (copy_to_user(indices, kvm_mips_get_one_regs_fpu, | |
609 | sizeof(kvm_mips_get_one_regs_fpu))) | |
610 | return -EFAULT; | |
611 | indices += ARRAY_SIZE(kvm_mips_get_one_regs_fpu); | |
612 | ||
613 | for (i = 0; i < 32; ++i) { | |
614 | index = KVM_REG_MIPS_FPR_32(i); | |
615 | if (copy_to_user(indices, &index, sizeof(index))) | |
616 | return -EFAULT; | |
617 | ++indices; | |
618 | ||
619 | /* skip odd doubles if no F64 */ | |
620 | if (i & 1 && !(boot_cpu_data.fpu_id & MIPS_FPIR_F64)) | |
621 | continue; | |
622 | ||
623 | index = KVM_REG_MIPS_FPR_64(i); | |
624 | if (copy_to_user(indices, &index, sizeof(index))) | |
625 | return -EFAULT; | |
626 | ++indices; | |
627 | } | |
628 | } | |
629 | ||
630 | if (kvm_mips_guest_can_have_msa(&vcpu->arch)) { | |
631 | if (copy_to_user(indices, kvm_mips_get_one_regs_msa, | |
632 | sizeof(kvm_mips_get_one_regs_msa))) | |
633 | return -EFAULT; | |
634 | indices += ARRAY_SIZE(kvm_mips_get_one_regs_msa); | |
635 | ||
636 | for (i = 0; i < 32; ++i) { | |
637 | index = KVM_REG_MIPS_VEC_128(i); | |
638 | if (copy_to_user(indices, &index, sizeof(index))) | |
639 | return -EFAULT; | |
640 | ++indices; | |
641 | } | |
642 | } | |
643 | ||
f5c43bd4 JH |
644 | return kvm_mips_callbacks->copy_reg_indices(vcpu, indices); |
645 | } | |
646 | ||
4c73fb2b DD |
647 | static int kvm_mips_get_reg(struct kvm_vcpu *vcpu, |
648 | const struct kvm_one_reg *reg) | |
649 | { | |
e4de2057 | 650 | struct mips_coproc *cop0 = &vcpu->arch.cop0; |
379245cd | 651 | struct mips_fpu_struct *fpu = &vcpu->arch.fpu; |
f8be02da | 652 | int ret; |
4c73fb2b | 653 | s64 v; |
ab86bd60 | 654 | s64 vs[2]; |
379245cd | 655 | unsigned int idx; |
4c73fb2b DD |
656 | |
657 | switch (reg->id) { | |
379245cd | 658 | /* General purpose registers */ |
4c73fb2b DD |
659 | case KVM_REG_MIPS_R0 ... KVM_REG_MIPS_R31: |
660 | v = (long)vcpu->arch.gprs[reg->id - KVM_REG_MIPS_R0]; | |
661 | break; | |
70e92c7e | 662 | #ifndef CONFIG_CPU_MIPSR6 |
4c73fb2b DD |
663 | case KVM_REG_MIPS_HI: |
664 | v = (long)vcpu->arch.hi; | |
665 | break; | |
666 | case KVM_REG_MIPS_LO: | |
667 | v = (long)vcpu->arch.lo; | |
668 | break; | |
70e92c7e | 669 | #endif |
4c73fb2b DD |
670 | case KVM_REG_MIPS_PC: |
671 | v = (long)vcpu->arch.pc; | |
672 | break; | |
673 | ||
379245cd JH |
674 | /* Floating point registers */ |
675 | case KVM_REG_MIPS_FPR_32(0) ... KVM_REG_MIPS_FPR_32(31): | |
676 | if (!kvm_mips_guest_has_fpu(&vcpu->arch)) | |
677 | return -EINVAL; | |
678 | idx = reg->id - KVM_REG_MIPS_FPR_32(0); | |
679 | /* Odd singles in top of even double when FR=0 */ | |
680 | if (kvm_read_c0_guest_status(cop0) & ST0_FR) | |
681 | v = get_fpr32(&fpu->fpr[idx], 0); | |
682 | else | |
683 | v = get_fpr32(&fpu->fpr[idx & ~1], idx & 1); | |
684 | break; | |
685 | case KVM_REG_MIPS_FPR_64(0) ... KVM_REG_MIPS_FPR_64(31): | |
686 | if (!kvm_mips_guest_has_fpu(&vcpu->arch)) | |
687 | return -EINVAL; | |
688 | idx = reg->id - KVM_REG_MIPS_FPR_64(0); | |
689 | /* Can't access odd doubles in FR=0 mode */ | |
690 | if (idx & 1 && !(kvm_read_c0_guest_status(cop0) & ST0_FR)) | |
691 | return -EINVAL; | |
692 | v = get_fpr64(&fpu->fpr[idx], 0); | |
693 | break; | |
694 | case KVM_REG_MIPS_FCR_IR: | |
695 | if (!kvm_mips_guest_has_fpu(&vcpu->arch)) | |
696 | return -EINVAL; | |
697 | v = boot_cpu_data.fpu_id; | |
698 | break; | |
699 | case KVM_REG_MIPS_FCR_CSR: | |
700 | if (!kvm_mips_guest_has_fpu(&vcpu->arch)) | |
701 | return -EINVAL; | |
702 | v = fpu->fcr31; | |
703 | break; | |
704 | ||
ab86bd60 JH |
705 | /* MIPS SIMD Architecture (MSA) registers */ |
706 | case KVM_REG_MIPS_VEC_128(0) ... KVM_REG_MIPS_VEC_128(31): | |
707 | if (!kvm_mips_guest_has_msa(&vcpu->arch)) | |
708 | return -EINVAL; | |
709 | /* Can't access MSA registers in FR=0 mode */ | |
710 | if (!(kvm_read_c0_guest_status(cop0) & ST0_FR)) | |
711 | return -EINVAL; | |
712 | idx = reg->id - KVM_REG_MIPS_VEC_128(0); | |
713 | #ifdef CONFIG_CPU_LITTLE_ENDIAN | |
714 | /* least significant byte first */ | |
715 | vs[0] = get_fpr64(&fpu->fpr[idx], 0); | |
716 | vs[1] = get_fpr64(&fpu->fpr[idx], 1); | |
717 | #else | |
718 | /* most significant byte first */ | |
719 | vs[0] = get_fpr64(&fpu->fpr[idx], 1); | |
720 | vs[1] = get_fpr64(&fpu->fpr[idx], 0); | |
721 | #endif | |
722 | break; | |
723 | case KVM_REG_MIPS_MSA_IR: | |
724 | if (!kvm_mips_guest_has_msa(&vcpu->arch)) | |
725 | return -EINVAL; | |
726 | v = boot_cpu_data.msa_id; | |
727 | break; | |
728 | case KVM_REG_MIPS_MSA_CSR: | |
729 | if (!kvm_mips_guest_has_msa(&vcpu->arch)) | |
730 | return -EINVAL; | |
731 | v = fpu->msacsr; | |
732 | break; | |
733 | ||
f8be02da | 734 | /* registers to be handled specially */ |
cc68d22f | 735 | default: |
f8be02da JH |
736 | ret = kvm_mips_callbacks->get_one_reg(vcpu, reg, &v); |
737 | if (ret) | |
738 | return ret; | |
739 | break; | |
4c73fb2b | 740 | } |
681865d4 DD |
741 | if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U64) { |
742 | u64 __user *uaddr64 = (u64 __user *)(long)reg->addr; | |
d116e812 | 743 | |
681865d4 DD |
744 | return put_user(v, uaddr64); |
745 | } else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U32) { | |
746 | u32 __user *uaddr32 = (u32 __user *)(long)reg->addr; | |
747 | u32 v32 = (u32)v; | |
d116e812 | 748 | |
681865d4 | 749 | return put_user(v32, uaddr32); |
ab86bd60 JH |
750 | } else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U128) { |
751 | void __user *uaddr = (void __user *)(long)reg->addr; | |
752 | ||
0178fd7d | 753 | return copy_to_user(uaddr, vs, 16) ? -EFAULT : 0; |
681865d4 DD |
754 | } else { |
755 | return -EINVAL; | |
756 | } | |
4c73fb2b DD |
757 | } |
758 | ||
759 | static int kvm_mips_set_reg(struct kvm_vcpu *vcpu, | |
760 | const struct kvm_one_reg *reg) | |
761 | { | |
e4de2057 | 762 | struct mips_coproc *cop0 = &vcpu->arch.cop0; |
379245cd JH |
763 | struct mips_fpu_struct *fpu = &vcpu->arch.fpu; |
764 | s64 v; | |
ab86bd60 | 765 | s64 vs[2]; |
379245cd | 766 | unsigned int idx; |
4c73fb2b | 767 | |
681865d4 DD |
768 | if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U64) { |
769 | u64 __user *uaddr64 = (u64 __user *)(long)reg->addr; | |
770 | ||
771 | if (get_user(v, uaddr64) != 0) | |
772 | return -EFAULT; | |
773 | } else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U32) { | |
774 | u32 __user *uaddr32 = (u32 __user *)(long)reg->addr; | |
775 | s32 v32; | |
776 | ||
777 | if (get_user(v32, uaddr32) != 0) | |
778 | return -EFAULT; | |
779 | v = (s64)v32; | |
ab86bd60 JH |
780 | } else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U128) { |
781 | void __user *uaddr = (void __user *)(long)reg->addr; | |
782 | ||
0178fd7d | 783 | return copy_from_user(vs, uaddr, 16) ? -EFAULT : 0; |
681865d4 DD |
784 | } else { |
785 | return -EINVAL; | |
786 | } | |
4c73fb2b DD |
787 | |
788 | switch (reg->id) { | |
379245cd | 789 | /* General purpose registers */ |
4c73fb2b DD |
790 | case KVM_REG_MIPS_R0: |
791 | /* Silently ignore requests to set $0 */ | |
792 | break; | |
793 | case KVM_REG_MIPS_R1 ... KVM_REG_MIPS_R31: | |
794 | vcpu->arch.gprs[reg->id - KVM_REG_MIPS_R0] = v; | |
795 | break; | |
70e92c7e | 796 | #ifndef CONFIG_CPU_MIPSR6 |
4c73fb2b DD |
797 | case KVM_REG_MIPS_HI: |
798 | vcpu->arch.hi = v; | |
799 | break; | |
800 | case KVM_REG_MIPS_LO: | |
801 | vcpu->arch.lo = v; | |
802 | break; | |
70e92c7e | 803 | #endif |
4c73fb2b DD |
804 | case KVM_REG_MIPS_PC: |
805 | vcpu->arch.pc = v; | |
806 | break; | |
807 | ||
379245cd JH |
808 | /* Floating point registers */ |
809 | case KVM_REG_MIPS_FPR_32(0) ... KVM_REG_MIPS_FPR_32(31): | |
810 | if (!kvm_mips_guest_has_fpu(&vcpu->arch)) | |
811 | return -EINVAL; | |
812 | idx = reg->id - KVM_REG_MIPS_FPR_32(0); | |
813 | /* Odd singles in top of even double when FR=0 */ | |
814 | if (kvm_read_c0_guest_status(cop0) & ST0_FR) | |
815 | set_fpr32(&fpu->fpr[idx], 0, v); | |
816 | else | |
817 | set_fpr32(&fpu->fpr[idx & ~1], idx & 1, v); | |
818 | break; | |
819 | case KVM_REG_MIPS_FPR_64(0) ... KVM_REG_MIPS_FPR_64(31): | |
820 | if (!kvm_mips_guest_has_fpu(&vcpu->arch)) | |
821 | return -EINVAL; | |
822 | idx = reg->id - KVM_REG_MIPS_FPR_64(0); | |
823 | /* Can't access odd doubles in FR=0 mode */ | |
824 | if (idx & 1 && !(kvm_read_c0_guest_status(cop0) & ST0_FR)) | |
825 | return -EINVAL; | |
826 | set_fpr64(&fpu->fpr[idx], 0, v); | |
827 | break; | |
828 | case KVM_REG_MIPS_FCR_IR: | |
829 | if (!kvm_mips_guest_has_fpu(&vcpu->arch)) | |
830 | return -EINVAL; | |
831 | /* Read-only */ | |
832 | break; | |
833 | case KVM_REG_MIPS_FCR_CSR: | |
834 | if (!kvm_mips_guest_has_fpu(&vcpu->arch)) | |
835 | return -EINVAL; | |
836 | fpu->fcr31 = v; | |
837 | break; | |
838 | ||
ab86bd60 JH |
839 | /* MIPS SIMD Architecture (MSA) registers */ |
840 | case KVM_REG_MIPS_VEC_128(0) ... KVM_REG_MIPS_VEC_128(31): | |
841 | if (!kvm_mips_guest_has_msa(&vcpu->arch)) | |
842 | return -EINVAL; | |
843 | idx = reg->id - KVM_REG_MIPS_VEC_128(0); | |
844 | #ifdef CONFIG_CPU_LITTLE_ENDIAN | |
845 | /* least significant byte first */ | |
846 | set_fpr64(&fpu->fpr[idx], 0, vs[0]); | |
847 | set_fpr64(&fpu->fpr[idx], 1, vs[1]); | |
848 | #else | |
849 | /* most significant byte first */ | |
850 | set_fpr64(&fpu->fpr[idx], 1, vs[0]); | |
851 | set_fpr64(&fpu->fpr[idx], 0, vs[1]); | |
852 | #endif | |
853 | break; | |
854 | case KVM_REG_MIPS_MSA_IR: | |
855 | if (!kvm_mips_guest_has_msa(&vcpu->arch)) | |
856 | return -EINVAL; | |
857 | /* Read-only */ | |
858 | break; | |
859 | case KVM_REG_MIPS_MSA_CSR: | |
860 | if (!kvm_mips_guest_has_msa(&vcpu->arch)) | |
861 | return -EINVAL; | |
862 | fpu->msacsr = v; | |
863 | break; | |
864 | ||
f8be02da | 865 | /* registers to be handled specially */ |
4c73fb2b | 866 | default: |
cc68d22f | 867 | return kvm_mips_callbacks->set_one_reg(vcpu, reg, v); |
4c73fb2b DD |
868 | } |
869 | return 0; | |
870 | } | |
871 | ||
5fafd874 JH |
872 | static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu, |
873 | struct kvm_enable_cap *cap) | |
874 | { | |
875 | int r = 0; | |
876 | ||
877 | if (!kvm_vm_ioctl_check_extension(vcpu->kvm, cap->cap)) | |
878 | return -EINVAL; | |
879 | if (cap->flags) | |
880 | return -EINVAL; | |
881 | if (cap->args[0]) | |
882 | return -EINVAL; | |
883 | ||
884 | switch (cap->cap) { | |
885 | case KVM_CAP_MIPS_FPU: | |
886 | vcpu->arch.fpu_enabled = true; | |
887 | break; | |
d952bd07 JH |
888 | case KVM_CAP_MIPS_MSA: |
889 | vcpu->arch.msa_enabled = true; | |
890 | break; | |
5fafd874 JH |
891 | default: |
892 | r = -EINVAL; | |
893 | break; | |
894 | } | |
895 | ||
896 | return r; | |
897 | } | |
898 | ||
5cb0944c PB |
899 | long kvm_arch_vcpu_async_ioctl(struct file *filp, unsigned int ioctl, |
900 | unsigned long arg) | |
669e846e SL |
901 | { |
902 | struct kvm_vcpu *vcpu = filp->private_data; | |
903 | void __user *argp = (void __user *)arg; | |
669e846e | 904 | |
9b062471 CD |
905 | if (ioctl == KVM_INTERRUPT) { |
906 | struct kvm_mips_interrupt irq; | |
907 | ||
908 | if (copy_from_user(&irq, argp, sizeof(irq))) | |
909 | return -EFAULT; | |
910 | kvm_debug("[%d] %s: irq: %d\n", vcpu->vcpu_id, __func__, | |
911 | irq.irq); | |
912 | ||
913 | return kvm_vcpu_ioctl_interrupt(vcpu, &irq); | |
914 | } | |
915 | ||
5cb0944c PB |
916 | return -ENOIOCTLCMD; |
917 | } | |
918 | ||
919 | long kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl, | |
920 | unsigned long arg) | |
921 | { | |
922 | struct kvm_vcpu *vcpu = filp->private_data; | |
923 | void __user *argp = (void __user *)arg; | |
924 | long r; | |
925 | ||
9b062471 CD |
926 | vcpu_load(vcpu); |
927 | ||
669e846e | 928 | switch (ioctl) { |
4c73fb2b DD |
929 | case KVM_SET_ONE_REG: |
930 | case KVM_GET_ONE_REG: { | |
931 | struct kvm_one_reg reg; | |
d116e812 | 932 | |
9b062471 | 933 | r = -EFAULT; |
4c73fb2b | 934 | if (copy_from_user(®, argp, sizeof(reg))) |
9b062471 | 935 | break; |
4c73fb2b | 936 | if (ioctl == KVM_SET_ONE_REG) |
9b062471 | 937 | r = kvm_mips_set_reg(vcpu, ®); |
4c73fb2b | 938 | else |
9b062471 CD |
939 | r = kvm_mips_get_reg(vcpu, ®); |
940 | break; | |
4c73fb2b DD |
941 | } |
942 | case KVM_GET_REG_LIST: { | |
943 | struct kvm_reg_list __user *user_list = argp; | |
4c73fb2b DD |
944 | struct kvm_reg_list reg_list; |
945 | unsigned n; | |
946 | ||
9b062471 | 947 | r = -EFAULT; |
4c73fb2b | 948 | if (copy_from_user(®_list, user_list, sizeof(reg_list))) |
9b062471 | 949 | break; |
4c73fb2b | 950 | n = reg_list.n; |
f5c43bd4 | 951 | reg_list.n = kvm_mips_num_regs(vcpu); |
4c73fb2b | 952 | if (copy_to_user(user_list, ®_list, sizeof(reg_list))) |
9b062471 CD |
953 | break; |
954 | r = -E2BIG; | |
4c73fb2b | 955 | if (n < reg_list.n) |
669e846e | 956 | break; |
9b062471 CD |
957 | r = kvm_mips_copy_reg_indices(vcpu, user_list->reg); |
958 | break; | |
959 | } | |
5fafd874 JH |
960 | case KVM_ENABLE_CAP: { |
961 | struct kvm_enable_cap cap; | |
962 | ||
9b062471 | 963 | r = -EFAULT; |
5fafd874 | 964 | if (copy_from_user(&cap, argp, sizeof(cap))) |
9b062471 | 965 | break; |
5fafd874 JH |
966 | r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap); |
967 | break; | |
968 | } | |
669e846e | 969 | default: |
4c73fb2b | 970 | r = -ENOIOCTLCMD; |
669e846e | 971 | } |
9b062471 CD |
972 | |
973 | vcpu_put(vcpu); | |
669e846e SL |
974 | return r; |
975 | } | |
976 | ||
0dff0846 | 977 | void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot) |
669e846e | 978 | { |
669e846e | 979 | |
669e846e SL |
980 | } |
981 | ||
a1342c80 | 982 | int kvm_arch_flush_remote_tlbs(struct kvm *kvm) |
566a0bee PB |
983 | { |
984 | kvm_mips_callbacks->prepare_flush_shadow(kvm); | |
985 | return 1; | |
986 | } | |
987 | ||
d8708b80 | 988 | int kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) |
669e846e | 989 | { |
d8708b80 | 990 | int r; |
669e846e SL |
991 | |
992 | switch (ioctl) { | |
993 | default: | |
ed829857 | 994 | r = -ENOIOCTLCMD; |
669e846e SL |
995 | } |
996 | ||
997 | return r; | |
998 | } | |
999 | ||
d116e812 DCZ |
1000 | int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, |
1001 | struct kvm_sregs *sregs) | |
669e846e | 1002 | { |
ed829857 | 1003 | return -ENOIOCTLCMD; |
669e846e SL |
1004 | } |
1005 | ||
d116e812 DCZ |
1006 | int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, |
1007 | struct kvm_sregs *sregs) | |
669e846e | 1008 | { |
ed829857 | 1009 | return -ENOIOCTLCMD; |
669e846e SL |
1010 | } |
1011 | ||
31928aa5 | 1012 | void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) |
669e846e | 1013 | { |
669e846e SL |
1014 | } |
1015 | ||
1016 | int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) | |
1017 | { | |
ed829857 | 1018 | return -ENOIOCTLCMD; |
669e846e SL |
1019 | } |
1020 | ||
1021 | int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) | |
1022 | { | |
ed829857 | 1023 | return -ENOIOCTLCMD; |
669e846e SL |
1024 | } |
1025 | ||
1499fa80 | 1026 | vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) |
669e846e SL |
1027 | { |
1028 | return VM_FAULT_SIGBUS; | |
1029 | } | |
1030 | ||
784aa3d7 | 1031 | int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) |
669e846e SL |
1032 | { |
1033 | int r; | |
1034 | ||
1035 | switch (ext) { | |
4c73fb2b | 1036 | case KVM_CAP_ONE_REG: |
5fafd874 | 1037 | case KVM_CAP_ENABLE_CAP: |
230c5724 | 1038 | case KVM_CAP_READONLY_MEM: |
411740f5 | 1039 | case KVM_CAP_SYNC_MMU: |
460df4c1 | 1040 | case KVM_CAP_IMMEDIATE_EXIT: |
4c73fb2b DD |
1041 | r = 1; |
1042 | break; | |
12ed1fae | 1043 | case KVM_CAP_NR_VCPUS: |
57a2e13e | 1044 | r = min_t(unsigned int, num_online_cpus(), KVM_MAX_VCPUS); |
12ed1fae JH |
1045 | break; |
1046 | case KVM_CAP_MAX_VCPUS: | |
1047 | r = KVM_MAX_VCPUS; | |
1048 | break; | |
a86cb413 | 1049 | case KVM_CAP_MAX_VCPU_ID: |
a1c42dde | 1050 | r = KVM_MAX_VCPU_IDS; |
a86cb413 | 1051 | break; |
5fafd874 | 1052 | case KVM_CAP_MIPS_FPU: |
556f2a52 JH |
1053 | /* We don't handle systems with inconsistent cpu_has_fpu */ |
1054 | r = !!raw_cpu_has_fpu; | |
5fafd874 | 1055 | break; |
d952bd07 JH |
1056 | case KVM_CAP_MIPS_MSA: |
1057 | /* | |
1058 | * We don't support MSA vector partitioning yet: | |
1059 | * 1) It would require explicit support which can't be tested | |
1060 | * yet due to lack of support in current hardware. | |
1061 | * 2) It extends the state that would need to be saved/restored | |
1062 | * by e.g. QEMU for migration. | |
1063 | * | |
1064 | * When vector partitioning hardware becomes available, support | |
1065 | * could be added by requiring a flag when enabling | |
1066 | * KVM_CAP_MIPS_MSA capability to indicate that userland knows | |
1067 | * to save/restore the appropriate extra state. | |
1068 | */ | |
1069 | r = cpu_has_msa && !(boot_cpu_data.msa_id & MSA_IR_WRPF); | |
1070 | break; | |
669e846e | 1071 | default: |
607ef2fd | 1072 | r = kvm_mips_callbacks->check_extension(kvm, ext); |
669e846e SL |
1073 | break; |
1074 | } | |
1075 | return r; | |
669e846e SL |
1076 | } |
1077 | ||
1078 | int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) | |
1079 | { | |
f4474d50 | 1080 | return kvm_mips_pending_timer(vcpu) || |
e4de2057 | 1081 | kvm_read_c0_guest_cause(&vcpu->arch.cop0) & C_TI; |
669e846e SL |
1082 | } |
1083 | ||
1084 | int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu) | |
1085 | { | |
1086 | int i; | |
1087 | struct mips_coproc *cop0; | |
1088 | ||
1089 | if (!vcpu) | |
1090 | return -1; | |
1091 | ||
6ad78a5c DCZ |
1092 | kvm_debug("VCPU Register Dump:\n"); |
1093 | kvm_debug("\tpc = 0x%08lx\n", vcpu->arch.pc); | |
1094 | kvm_debug("\texceptions: %08lx\n", vcpu->arch.pending_exceptions); | |
669e846e SL |
1095 | |
1096 | for (i = 0; i < 32; i += 4) { | |
6ad78a5c | 1097 | kvm_debug("\tgpr%02d: %08lx %08lx %08lx %08lx\n", i, |
669e846e SL |
1098 | vcpu->arch.gprs[i], |
1099 | vcpu->arch.gprs[i + 1], | |
1100 | vcpu->arch.gprs[i + 2], vcpu->arch.gprs[i + 3]); | |
1101 | } | |
6ad78a5c DCZ |
1102 | kvm_debug("\thi: 0x%08lx\n", vcpu->arch.hi); |
1103 | kvm_debug("\tlo: 0x%08lx\n", vcpu->arch.lo); | |
669e846e | 1104 | |
e4de2057 | 1105 | cop0 = &vcpu->arch.cop0; |
a27660f3 | 1106 | kvm_debug("\tStatus: 0x%08x, Cause: 0x%08x\n", |
6ad78a5c DCZ |
1107 | kvm_read_c0_guest_status(cop0), |
1108 | kvm_read_c0_guest_cause(cop0)); | |
669e846e | 1109 | |
6ad78a5c | 1110 | kvm_debug("\tEPC: 0x%08lx\n", kvm_read_c0_guest_epc(cop0)); |
669e846e SL |
1111 | |
1112 | return 0; | |
1113 | } | |
1114 | ||
1115 | int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) | |
1116 | { | |
1117 | int i; | |
1118 | ||
875656fe CD |
1119 | vcpu_load(vcpu); |
1120 | ||
8d17dd04 | 1121 | for (i = 1; i < ARRAY_SIZE(vcpu->arch.gprs); i++) |
bf32ebf6 | 1122 | vcpu->arch.gprs[i] = regs->gpr[i]; |
8d17dd04 | 1123 | vcpu->arch.gprs[0] = 0; /* zero is special, and cannot be set. */ |
669e846e SL |
1124 | vcpu->arch.hi = regs->hi; |
1125 | vcpu->arch.lo = regs->lo; | |
1126 | vcpu->arch.pc = regs->pc; | |
1127 | ||
875656fe | 1128 | vcpu_put(vcpu); |
4c73fb2b | 1129 | return 0; |
669e846e SL |
1130 | } |
1131 | ||
1132 | int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) | |
1133 | { | |
1134 | int i; | |
1135 | ||
1fc9b76b CD |
1136 | vcpu_load(vcpu); |
1137 | ||
8d17dd04 | 1138 | for (i = 0; i < ARRAY_SIZE(vcpu->arch.gprs); i++) |
bf32ebf6 | 1139 | regs->gpr[i] = vcpu->arch.gprs[i]; |
669e846e SL |
1140 | |
1141 | regs->hi = vcpu->arch.hi; | |
1142 | regs->lo = vcpu->arch.lo; | |
1143 | regs->pc = vcpu->arch.pc; | |
1144 | ||
1fc9b76b | 1145 | vcpu_put(vcpu); |
4c73fb2b | 1146 | return 0; |
669e846e SL |
1147 | } |
1148 | ||
d116e812 DCZ |
1149 | int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, |
1150 | struct kvm_translation *tr) | |
669e846e SL |
1151 | { |
1152 | return 0; | |
1153 | } | |
1154 | ||
d116e812 | 1155 | static void kvm_mips_set_c0_status(void) |
669e846e | 1156 | { |
8cffd197 | 1157 | u32 status = read_c0_status(); |
669e846e | 1158 | |
669e846e SL |
1159 | if (cpu_has_dsp) |
1160 | status |= (ST0_MX); | |
1161 | ||
1162 | write_c0_status(status); | |
1163 | ehb(); | |
1164 | } | |
1165 | ||
1166 | /* | |
1167 | * Return value is in the form (errcode<<2 | RESUME_FLAG_HOST | RESUME_FLAG_NV) | |
1168 | */ | |
72e32445 | 1169 | static int __kvm_mips_handle_exit(struct kvm_vcpu *vcpu) |
669e846e | 1170 | { |
0b7aa583 | 1171 | struct kvm_run *run = vcpu->run; |
8cffd197 JH |
1172 | u32 cause = vcpu->arch.host_cp0_cause; |
1173 | u32 exccode = (cause >> CAUSEB_EXCCODE) & 0x1f; | |
1174 | u32 __user *opc = (u32 __user *) vcpu->arch.pc; | |
669e846e SL |
1175 | unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr; |
1176 | enum emulation_result er = EMULATE_DONE; | |
122e51d4 | 1177 | u32 inst; |
669e846e SL |
1178 | int ret = RESUME_GUEST; |
1179 | ||
4841e0dd JH |
1180 | vcpu->mode = OUTSIDE_GUEST_MODE; |
1181 | ||
669e846e SL |
1182 | /* Set a default exit reason */ |
1183 | run->exit_reason = KVM_EXIT_UNKNOWN; | |
1184 | run->ready_for_interrupt_injection = 1; | |
1185 | ||
d116e812 DCZ |
1186 | /* |
1187 | * Set the appropriate status bits based on host CPU features, | |
1188 | * before we hit the scheduler | |
1189 | */ | |
669e846e SL |
1190 | kvm_mips_set_c0_status(); |
1191 | ||
1192 | local_irq_enable(); | |
1193 | ||
1194 | kvm_debug("kvm_mips_handle_exit: cause: %#x, PC: %p, kvm_run: %p, kvm_vcpu: %p\n", | |
1195 | cause, opc, run, vcpu); | |
1e09e86a | 1196 | trace_kvm_exit(vcpu, exccode); |
669e846e | 1197 | |
669e846e | 1198 | switch (exccode) { |
16d100db JH |
1199 | case EXCCODE_INT: |
1200 | kvm_debug("[%d]EXCCODE_INT @ %p\n", vcpu->vcpu_id, opc); | |
669e846e SL |
1201 | |
1202 | ++vcpu->stat.int_exits; | |
669e846e | 1203 | |
d116e812 | 1204 | if (need_resched()) |
669e846e | 1205 | cond_resched(); |
669e846e SL |
1206 | |
1207 | ret = RESUME_GUEST; | |
1208 | break; | |
1209 | ||
16d100db JH |
1210 | case EXCCODE_CPU: |
1211 | kvm_debug("EXCCODE_CPU: @ PC: %p\n", opc); | |
669e846e SL |
1212 | |
1213 | ++vcpu->stat.cop_unusable_exits; | |
669e846e SL |
1214 | ret = kvm_mips_callbacks->handle_cop_unusable(vcpu); |
1215 | /* XXXKYMA: Might need to return to user space */ | |
d116e812 | 1216 | if (run->exit_reason == KVM_EXIT_IRQ_WINDOW_OPEN) |
669e846e | 1217 | ret = RESUME_HOST; |
669e846e SL |
1218 | break; |
1219 | ||
16d100db | 1220 | case EXCCODE_MOD: |
669e846e | 1221 | ++vcpu->stat.tlbmod_exits; |
669e846e SL |
1222 | ret = kvm_mips_callbacks->handle_tlb_mod(vcpu); |
1223 | break; | |
1224 | ||
16d100db | 1225 | case EXCCODE_TLBS: |
a27660f3 | 1226 | kvm_debug("TLB ST fault: cause %#x, status %#x, PC: %p, BadVaddr: %#lx\n", |
e4de2057 | 1227 | cause, kvm_read_c0_guest_status(&vcpu->arch.cop0), opc, |
d116e812 | 1228 | badvaddr); |
669e846e SL |
1229 | |
1230 | ++vcpu->stat.tlbmiss_st_exits; | |
669e846e SL |
1231 | ret = kvm_mips_callbacks->handle_tlb_st_miss(vcpu); |
1232 | break; | |
1233 | ||
16d100db | 1234 | case EXCCODE_TLBL: |
669e846e SL |
1235 | kvm_debug("TLB LD fault: cause %#x, PC: %p, BadVaddr: %#lx\n", |
1236 | cause, opc, badvaddr); | |
1237 | ||
1238 | ++vcpu->stat.tlbmiss_ld_exits; | |
669e846e SL |
1239 | ret = kvm_mips_callbacks->handle_tlb_ld_miss(vcpu); |
1240 | break; | |
1241 | ||
16d100db | 1242 | case EXCCODE_ADES: |
669e846e | 1243 | ++vcpu->stat.addrerr_st_exits; |
669e846e SL |
1244 | ret = kvm_mips_callbacks->handle_addr_err_st(vcpu); |
1245 | break; | |
1246 | ||
16d100db | 1247 | case EXCCODE_ADEL: |
669e846e | 1248 | ++vcpu->stat.addrerr_ld_exits; |
669e846e SL |
1249 | ret = kvm_mips_callbacks->handle_addr_err_ld(vcpu); |
1250 | break; | |
1251 | ||
16d100db | 1252 | case EXCCODE_SYS: |
669e846e | 1253 | ++vcpu->stat.syscall_exits; |
669e846e SL |
1254 | ret = kvm_mips_callbacks->handle_syscall(vcpu); |
1255 | break; | |
1256 | ||
16d100db | 1257 | case EXCCODE_RI: |
669e846e | 1258 | ++vcpu->stat.resvd_inst_exits; |
669e846e SL |
1259 | ret = kvm_mips_callbacks->handle_res_inst(vcpu); |
1260 | break; | |
1261 | ||
16d100db | 1262 | case EXCCODE_BP: |
669e846e | 1263 | ++vcpu->stat.break_inst_exits; |
669e846e SL |
1264 | ret = kvm_mips_callbacks->handle_break(vcpu); |
1265 | break; | |
1266 | ||
16d100db | 1267 | case EXCCODE_TR: |
0a560427 | 1268 | ++vcpu->stat.trap_inst_exits; |
0a560427 JH |
1269 | ret = kvm_mips_callbacks->handle_trap(vcpu); |
1270 | break; | |
1271 | ||
16d100db | 1272 | case EXCCODE_MSAFPE: |
c2537ed9 | 1273 | ++vcpu->stat.msa_fpe_exits; |
c2537ed9 JH |
1274 | ret = kvm_mips_callbacks->handle_msa_fpe(vcpu); |
1275 | break; | |
1276 | ||
16d100db | 1277 | case EXCCODE_FPE: |
1c0cd66a | 1278 | ++vcpu->stat.fpe_exits; |
1c0cd66a JH |
1279 | ret = kvm_mips_callbacks->handle_fpe(vcpu); |
1280 | break; | |
1281 | ||
16d100db | 1282 | case EXCCODE_MSADIS: |
c2537ed9 | 1283 | ++vcpu->stat.msa_disabled_exits; |
98119ad5 JH |
1284 | ret = kvm_mips_callbacks->handle_msa_disabled(vcpu); |
1285 | break; | |
1286 | ||
28c1e762 JH |
1287 | case EXCCODE_GE: |
1288 | /* defer exit accounting to handler */ | |
1289 | ret = kvm_mips_callbacks->handle_guest_exit(vcpu); | |
1290 | break; | |
1291 | ||
669e846e | 1292 | default: |
122e51d4 JH |
1293 | if (cause & CAUSEF_BD) |
1294 | opc += 1; | |
1295 | inst = 0; | |
6a97c775 | 1296 | kvm_get_badinstr(opc, vcpu, &inst); |
a27660f3 | 1297 | kvm_err("Exception Code: %d, not yet handled, @ PC: %p, inst: 0x%08x BadVaddr: %#lx Status: %#x\n", |
122e51d4 | 1298 | exccode, opc, inst, badvaddr, |
e4de2057 | 1299 | kvm_read_c0_guest_status(&vcpu->arch.cop0)); |
669e846e SL |
1300 | kvm_arch_vcpu_dump_regs(vcpu); |
1301 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | |
1302 | ret = RESUME_HOST; | |
1303 | break; | |
1304 | ||
1305 | } | |
1306 | ||
669e846e SL |
1307 | local_irq_disable(); |
1308 | ||
f4474d50 JH |
1309 | if (ret == RESUME_GUEST) |
1310 | kvm_vz_acquire_htimer(vcpu); | |
1311 | ||
669e846e SL |
1312 | if (er == EMULATE_DONE && !(ret & RESUME_HOST)) |
1313 | kvm_mips_deliver_interrupts(vcpu, cause); | |
1314 | ||
1315 | if (!(ret & RESUME_HOST)) { | |
d116e812 | 1316 | /* Only check for signals if not already exiting to userspace */ |
669e846e SL |
1317 | if (signal_pending(current)) { |
1318 | run->exit_reason = KVM_EXIT_INTR; | |
1319 | ret = (-EINTR << 2) | RESUME_HOST; | |
1320 | ++vcpu->stat.signal_exits; | |
1e09e86a | 1321 | trace_kvm_exit(vcpu, KVM_TRACE_EXIT_SIGNAL); |
669e846e SL |
1322 | } |
1323 | } | |
1324 | ||
98e91b84 | 1325 | if (ret == RESUME_GUEST) { |
93258604 JH |
1326 | trace_kvm_reenter(vcpu); |
1327 | ||
4841e0dd JH |
1328 | /* |
1329 | * Make sure the read of VCPU requests in vcpu_reenter() | |
1330 | * callback is not reordered ahead of the write to vcpu->mode, | |
1331 | * or we could miss a TLB flush request while the requester sees | |
1332 | * the VCPU as outside of guest mode and not needing an IPI. | |
1333 | */ | |
1334 | smp_store_mb(vcpu->mode, IN_GUEST_MODE); | |
1335 | ||
c34b26b9 | 1336 | kvm_mips_callbacks->vcpu_reenter(vcpu); |
25b08c7f | 1337 | |
98e91b84 | 1338 | /* |
539cb89f JH |
1339 | * If FPU / MSA are enabled (i.e. the guest's FPU / MSA context |
1340 | * is live), restore FCR31 / MSACSR. | |
98e91b84 JH |
1341 | * |
1342 | * This should be before returning to the guest exception | |
539cb89f JH |
1343 | * vector, as it may well cause an [MSA] FP exception if there |
1344 | * are pending exception bits unmasked. (see | |
98e91b84 JH |
1345 | * kvm_mips_csr_die_notifier() for how that is handled). |
1346 | */ | |
1347 | if (kvm_mips_guest_has_fpu(&vcpu->arch) && | |
1348 | read_c0_status() & ST0_CU1) | |
1349 | __kvm_restore_fcsr(&vcpu->arch); | |
539cb89f JH |
1350 | |
1351 | if (kvm_mips_guest_has_msa(&vcpu->arch) && | |
1352 | read_c0_config5() & MIPS_CONF5_MSAEN) | |
1353 | __kvm_restore_msacsr(&vcpu->arch); | |
98e91b84 | 1354 | } |
669e846e SL |
1355 | return ret; |
1356 | } | |
1357 | ||
72e32445 MR |
1358 | int noinstr kvm_mips_handle_exit(struct kvm_vcpu *vcpu) |
1359 | { | |
1360 | int ret; | |
1361 | ||
1362 | guest_state_exit_irqoff(); | |
1363 | ret = __kvm_mips_handle_exit(vcpu); | |
1364 | guest_state_enter_irqoff(); | |
1365 | ||
1366 | return ret; | |
1367 | } | |
1368 | ||
98e91b84 JH |
1369 | /* Enable FPU for guest and restore context */ |
1370 | void kvm_own_fpu(struct kvm_vcpu *vcpu) | |
1371 | { | |
e4de2057 | 1372 | struct mips_coproc *cop0 = &vcpu->arch.cop0; |
98e91b84 JH |
1373 | unsigned int sr, cfg5; |
1374 | ||
1375 | preempt_disable(); | |
1376 | ||
539cb89f JH |
1377 | sr = kvm_read_c0_guest_status(cop0); |
1378 | ||
1379 | /* | |
1380 | * If MSA state is already live, it is undefined how it interacts with | |
1381 | * FR=0 FPU state, and we don't want to hit reserved instruction | |
1382 | * exceptions trying to save the MSA state later when CU=1 && FR=1, so | |
1383 | * play it safe and save it first. | |
539cb89f JH |
1384 | */ |
1385 | if (cpu_has_msa && sr & ST0_CU1 && !(sr & ST0_FR) && | |
f943176a | 1386 | vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA) |
539cb89f JH |
1387 | kvm_lose_fpu(vcpu); |
1388 | ||
98e91b84 JH |
1389 | /* |
1390 | * Enable FPU for guest | |
1391 | * We set FR and FRE according to guest context | |
1392 | */ | |
98e91b84 JH |
1393 | change_c0_status(ST0_CU1 | ST0_FR, sr); |
1394 | if (cpu_has_fre) { | |
1395 | cfg5 = kvm_read_c0_guest_config5(cop0); | |
1396 | change_c0_config5(MIPS_CONF5_FRE, cfg5); | |
1397 | } | |
1398 | enable_fpu_hazard(); | |
1399 | ||
1400 | /* If guest FPU state not active, restore it now */ | |
f943176a | 1401 | if (!(vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU)) { |
98e91b84 | 1402 | __kvm_restore_fpu(&vcpu->arch); |
f943176a | 1403 | vcpu->arch.aux_inuse |= KVM_MIPS_AUX_FPU; |
04ebebf4 JH |
1404 | trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, KVM_TRACE_AUX_FPU); |
1405 | } else { | |
1406 | trace_kvm_aux(vcpu, KVM_TRACE_AUX_ENABLE, KVM_TRACE_AUX_FPU); | |
98e91b84 JH |
1407 | } |
1408 | ||
1409 | preempt_enable(); | |
1410 | } | |
1411 | ||
539cb89f JH |
1412 | #ifdef CONFIG_CPU_HAS_MSA |
1413 | /* Enable MSA for guest and restore context */ | |
1414 | void kvm_own_msa(struct kvm_vcpu *vcpu) | |
1415 | { | |
e4de2057 | 1416 | struct mips_coproc *cop0 = &vcpu->arch.cop0; |
539cb89f JH |
1417 | unsigned int sr, cfg5; |
1418 | ||
1419 | preempt_disable(); | |
1420 | ||
1421 | /* | |
1422 | * Enable FPU if enabled in guest, since we're restoring FPU context | |
1423 | * anyway. We set FR and FRE according to guest context. | |
1424 | */ | |
1425 | if (kvm_mips_guest_has_fpu(&vcpu->arch)) { | |
1426 | sr = kvm_read_c0_guest_status(cop0); | |
1427 | ||
1428 | /* | |
1429 | * If FR=0 FPU state is already live, it is undefined how it | |
1430 | * interacts with MSA state, so play it safe and save it first. | |
1431 | */ | |
1432 | if (!(sr & ST0_FR) && | |
f943176a JH |
1433 | (vcpu->arch.aux_inuse & (KVM_MIPS_AUX_FPU | |
1434 | KVM_MIPS_AUX_MSA)) == KVM_MIPS_AUX_FPU) | |
539cb89f JH |
1435 | kvm_lose_fpu(vcpu); |
1436 | ||
1437 | change_c0_status(ST0_CU1 | ST0_FR, sr); | |
1438 | if (sr & ST0_CU1 && cpu_has_fre) { | |
1439 | cfg5 = kvm_read_c0_guest_config5(cop0); | |
1440 | change_c0_config5(MIPS_CONF5_FRE, cfg5); | |
1441 | } | |
1442 | } | |
1443 | ||
1444 | /* Enable MSA for guest */ | |
1445 | set_c0_config5(MIPS_CONF5_MSAEN); | |
1446 | enable_fpu_hazard(); | |
1447 | ||
f943176a JH |
1448 | switch (vcpu->arch.aux_inuse & (KVM_MIPS_AUX_FPU | KVM_MIPS_AUX_MSA)) { |
1449 | case KVM_MIPS_AUX_FPU: | |
539cb89f JH |
1450 | /* |
1451 | * Guest FPU state already loaded, only restore upper MSA state | |
1452 | */ | |
1453 | __kvm_restore_msa_upper(&vcpu->arch); | |
f943176a | 1454 | vcpu->arch.aux_inuse |= KVM_MIPS_AUX_MSA; |
04ebebf4 | 1455 | trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, KVM_TRACE_AUX_MSA); |
539cb89f JH |
1456 | break; |
1457 | case 0: | |
1458 | /* Neither FPU or MSA already active, restore full MSA state */ | |
1459 | __kvm_restore_msa(&vcpu->arch); | |
f943176a | 1460 | vcpu->arch.aux_inuse |= KVM_MIPS_AUX_MSA; |
539cb89f | 1461 | if (kvm_mips_guest_has_fpu(&vcpu->arch)) |
f943176a | 1462 | vcpu->arch.aux_inuse |= KVM_MIPS_AUX_FPU; |
04ebebf4 JH |
1463 | trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, |
1464 | KVM_TRACE_AUX_FPU_MSA); | |
539cb89f JH |
1465 | break; |
1466 | default: | |
04ebebf4 | 1467 | trace_kvm_aux(vcpu, KVM_TRACE_AUX_ENABLE, KVM_TRACE_AUX_MSA); |
539cb89f JH |
1468 | break; |
1469 | } | |
1470 | ||
1471 | preempt_enable(); | |
1472 | } | |
1473 | #endif | |
1474 | ||
1475 | /* Drop FPU & MSA without saving it */ | |
98e91b84 JH |
1476 | void kvm_drop_fpu(struct kvm_vcpu *vcpu) |
1477 | { | |
1478 | preempt_disable(); | |
f943176a | 1479 | if (cpu_has_msa && vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA) { |
539cb89f | 1480 | disable_msa(); |
04ebebf4 | 1481 | trace_kvm_aux(vcpu, KVM_TRACE_AUX_DISCARD, KVM_TRACE_AUX_MSA); |
f943176a | 1482 | vcpu->arch.aux_inuse &= ~KVM_MIPS_AUX_MSA; |
539cb89f | 1483 | } |
f943176a | 1484 | if (vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU) { |
98e91b84 | 1485 | clear_c0_status(ST0_CU1 | ST0_FR); |
04ebebf4 | 1486 | trace_kvm_aux(vcpu, KVM_TRACE_AUX_DISCARD, KVM_TRACE_AUX_FPU); |
f943176a | 1487 | vcpu->arch.aux_inuse &= ~KVM_MIPS_AUX_FPU; |
98e91b84 JH |
1488 | } |
1489 | preempt_enable(); | |
1490 | } | |
1491 | ||
539cb89f | 1492 | /* Save and disable FPU & MSA */ |
98e91b84 JH |
1493 | void kvm_lose_fpu(struct kvm_vcpu *vcpu) |
1494 | { | |
1495 | /* | |
c58cf741 JH |
1496 | * With T&E, FPU & MSA get disabled in root context (hardware) when it |
1497 | * is disabled in guest context (software), but the register state in | |
1498 | * the hardware may still be in use. | |
1499 | * This is why we explicitly re-enable the hardware before saving. | |
98e91b84 JH |
1500 | */ |
1501 | ||
1502 | preempt_disable(); | |
f943176a | 1503 | if (cpu_has_msa && vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA) { |
539cb89f | 1504 | __kvm_save_msa(&vcpu->arch); |
04ebebf4 | 1505 | trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_FPU_MSA); |
539cb89f JH |
1506 | |
1507 | /* Disable MSA & FPU */ | |
1508 | disable_msa(); | |
f943176a | 1509 | if (vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU) { |
539cb89f | 1510 | clear_c0_status(ST0_CU1 | ST0_FR); |
4ac33429 JH |
1511 | disable_fpu_hazard(); |
1512 | } | |
f943176a JH |
1513 | vcpu->arch.aux_inuse &= ~(KVM_MIPS_AUX_FPU | KVM_MIPS_AUX_MSA); |
1514 | } else if (vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU) { | |
98e91b84 | 1515 | __kvm_save_fpu(&vcpu->arch); |
f943176a | 1516 | vcpu->arch.aux_inuse &= ~KVM_MIPS_AUX_FPU; |
04ebebf4 | 1517 | trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_FPU); |
98e91b84 JH |
1518 | |
1519 | /* Disable FPU */ | |
1520 | clear_c0_status(ST0_CU1 | ST0_FR); | |
4ac33429 | 1521 | disable_fpu_hazard(); |
98e91b84 JH |
1522 | } |
1523 | preempt_enable(); | |
1524 | } | |
1525 | ||
1526 | /* | |
539cb89f JH |
1527 | * Step over a specific ctc1 to FCSR and a specific ctcmsa to MSACSR which are |
1528 | * used to restore guest FCSR/MSACSR state and may trigger a "harmless" FP/MSAFP | |
1529 | * exception if cause bits are set in the value being written. | |
98e91b84 JH |
1530 | */ |
1531 | static int kvm_mips_csr_die_notify(struct notifier_block *self, | |
1532 | unsigned long cmd, void *ptr) | |
1533 | { | |
1534 | struct die_args *args = (struct die_args *)ptr; | |
1535 | struct pt_regs *regs = args->regs; | |
1536 | unsigned long pc; | |
1537 | ||
539cb89f JH |
1538 | /* Only interested in FPE and MSAFPE */ |
1539 | if (cmd != DIE_FP && cmd != DIE_MSAFP) | |
98e91b84 JH |
1540 | return NOTIFY_DONE; |
1541 | ||
1542 | /* Return immediately if guest context isn't active */ | |
1543 | if (!(current->flags & PF_VCPU)) | |
1544 | return NOTIFY_DONE; | |
1545 | ||
1546 | /* Should never get here from user mode */ | |
1547 | BUG_ON(user_mode(regs)); | |
1548 | ||
1549 | pc = instruction_pointer(regs); | |
1550 | switch (cmd) { | |
1551 | case DIE_FP: | |
1552 | /* match 2nd instruction in __kvm_restore_fcsr */ | |
1553 | if (pc != (unsigned long)&__kvm_restore_fcsr + 4) | |
1554 | return NOTIFY_DONE; | |
1555 | break; | |
539cb89f JH |
1556 | case DIE_MSAFP: |
1557 | /* match 2nd/3rd instruction in __kvm_restore_msacsr */ | |
1558 | if (!cpu_has_msa || | |
1559 | pc < (unsigned long)&__kvm_restore_msacsr + 4 || | |
1560 | pc > (unsigned long)&__kvm_restore_msacsr + 8) | |
1561 | return NOTIFY_DONE; | |
1562 | break; | |
98e91b84 JH |
1563 | } |
1564 | ||
1565 | /* Move PC forward a little and continue executing */ | |
1566 | instruction_pointer(regs) += 4; | |
1567 | ||
1568 | return NOTIFY_STOP; | |
1569 | } | |
1570 | ||
1571 | static struct notifier_block kvm_mips_csr_die_notifier = { | |
1572 | .notifier_call = kvm_mips_csr_die_notify, | |
1573 | }; | |
1574 | ||
3f51d8fc HC |
1575 | static u32 kvm_default_priority_to_irq[MIPS_EXC_MAX] = { |
1576 | [MIPS_EXC_INT_TIMER] = C_IRQ5, | |
1577 | [MIPS_EXC_INT_IO_1] = C_IRQ0, | |
1578 | [MIPS_EXC_INT_IPI_1] = C_IRQ1, | |
1579 | [MIPS_EXC_INT_IPI_2] = C_IRQ2, | |
1580 | }; | |
1581 | ||
1582 | static u32 kvm_loongson3_priority_to_irq[MIPS_EXC_MAX] = { | |
1583 | [MIPS_EXC_INT_TIMER] = C_IRQ5, | |
1584 | [MIPS_EXC_INT_IO_1] = C_IRQ0, | |
1585 | [MIPS_EXC_INT_IO_2] = C_IRQ1, | |
1586 | [MIPS_EXC_INT_IPI_1] = C_IRQ4, | |
1587 | }; | |
1588 | ||
1589 | u32 *kvm_priority_to_irq = kvm_default_priority_to_irq; | |
1590 | ||
1591 | u32 kvm_irq_to_priority(u32 irq) | |
1592 | { | |
1593 | int i; | |
1594 | ||
1595 | for (i = MIPS_EXC_INT_TIMER; i < MIPS_EXC_MAX; i++) { | |
1596 | if (kvm_priority_to_irq[i] == (1 << (irq + 8))) | |
1597 | return i; | |
1598 | } | |
1599 | ||
1600 | return MIPS_EXC_MAX; | |
1601 | } | |
1602 | ||
2db9d233 | 1603 | static int __init kvm_mips_init(void) |
669e846e SL |
1604 | { |
1605 | int ret; | |
1606 | ||
c8790d65 PB |
1607 | if (cpu_has_mmid) { |
1608 | pr_warn("KVM does not yet support MMIDs. KVM Disabled\n"); | |
1609 | return -EOPNOTSUPP; | |
1610 | } | |
1611 | ||
1e5217f5 JH |
1612 | ret = kvm_mips_entry_setup(); |
1613 | if (ret) | |
1614 | return ret; | |
1615 | ||
3fb8e89a SC |
1616 | ret = kvm_mips_emulation_init(); |
1617 | if (ret) | |
1618 | return ret; | |
1619 | ||
669e846e | 1620 | |
3f51d8fc HC |
1621 | if (boot_cpu_type() == CPU_LOONGSON64) |
1622 | kvm_priority_to_irq = kvm_loongson3_priority_to_irq; | |
1623 | ||
98e91b84 JH |
1624 | register_die_notifier(&kvm_mips_csr_die_notifier); |
1625 | ||
81a1cf9f | 1626 | ret = kvm_init(sizeof(struct kvm_vcpu), 0, THIS_MODULE); |
eed9fcdf SC |
1627 | if (ret) { |
1628 | unregister_die_notifier(&kvm_mips_csr_die_notifier); | |
1629 | return ret; | |
1630 | } | |
669e846e SL |
1631 | return 0; |
1632 | } | |
1633 | ||
2db9d233 | 1634 | static void __exit kvm_mips_exit(void) |
669e846e SL |
1635 | { |
1636 | kvm_exit(); | |
1637 | ||
98e91b84 | 1638 | unregister_die_notifier(&kvm_mips_csr_die_notifier); |
669e846e SL |
1639 | } |
1640 | ||
1641 | module_init(kvm_mips_init); | |
1642 | module_exit(kvm_mips_exit); | |
1643 | ||
1644 | EXPORT_TRACEPOINT_SYMBOL(kvm_exit); |