mips/kvm: Fix ABI by moving manipulation of CP0 registers to KVM_{G,S}ET_ONE_REG
[linux-2.6-block.git] / arch / mips / kvm / kvm_mips.c
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * KVM/MIPS: MIPS specific KVM APIs
7  *
8  * Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
9  * Authors: Sanjay Lal <sanjayl@kymasys.com>
10 */
11
12 #include <linux/errno.h>
13 #include <linux/err.h>
14 #include <linux/module.h>
15 #include <linux/vmalloc.h>
16 #include <linux/fs.h>
17 #include <linux/bootmem.h>
18 #include <asm/page.h>
19 #include <asm/cacheflush.h>
20 #include <asm/mmu_context.h>
21
22 #include <linux/kvm_host.h>
23
24 #include "kvm_mips_int.h"
25 #include "kvm_mips_comm.h"
26
27 #define CREATE_TRACE_POINTS
28 #include "trace.h"
29
30 #ifndef VECTORSPACING
31 #define VECTORSPACING 0x100     /* for EI/VI mode */
32 #endif
33
34 #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
35 struct kvm_stats_debugfs_item debugfs_entries[] = {
36         { "wait", VCPU_STAT(wait_exits) },
37         { "cache", VCPU_STAT(cache_exits) },
38         { "signal", VCPU_STAT(signal_exits) },
39         { "interrupt", VCPU_STAT(int_exits) },
40         { "cop_unsuable", VCPU_STAT(cop_unusable_exits) },
41         { "tlbmod", VCPU_STAT(tlbmod_exits) },
42         { "tlbmiss_ld", VCPU_STAT(tlbmiss_ld_exits) },
43         { "tlbmiss_st", VCPU_STAT(tlbmiss_st_exits) },
44         { "addrerr_st", VCPU_STAT(addrerr_st_exits) },
45         { "addrerr_ld", VCPU_STAT(addrerr_ld_exits) },
46         { "syscall", VCPU_STAT(syscall_exits) },
47         { "resvd_inst", VCPU_STAT(resvd_inst_exits) },
48         { "break_inst", VCPU_STAT(break_inst_exits) },
49         { "flush_dcache", VCPU_STAT(flush_dcache_exits) },
50         { "halt_wakeup", VCPU_STAT(halt_wakeup) },
51         {NULL}
52 };
53
54 static int kvm_mips_reset_vcpu(struct kvm_vcpu *vcpu)
55 {
56         int i;
57         for_each_possible_cpu(i) {
58                 vcpu->arch.guest_kernel_asid[i] = 0;
59                 vcpu->arch.guest_user_asid[i] = 0;
60         }
61         return 0;
62 }
63
64 gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
65 {
66         return gfn;
67 }
68
69 /* XXXKYMA: We are simulatoring a processor that has the WII bit set in Config7, so we
70  * are "runnable" if interrupts are pending
71  */
72 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
73 {
74         return !!(vcpu->arch.pending_exceptions);
75 }
76
77 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
78 {
79         return 1;
80 }
81
82 int kvm_arch_hardware_enable(void *garbage)
83 {
84         return 0;
85 }
86
87 void kvm_arch_hardware_disable(void *garbage)
88 {
89 }
90
91 int kvm_arch_hardware_setup(void)
92 {
93         return 0;
94 }
95
96 void kvm_arch_hardware_unsetup(void)
97 {
98 }
99
100 void kvm_arch_check_processor_compat(void *rtn)
101 {
102         int *r = (int *)rtn;
103         *r = 0;
104         return;
105 }
106
107 static void kvm_mips_init_tlbs(struct kvm *kvm)
108 {
109         unsigned long wired;
110
111         /* Add a wired entry to the TLB, it is used to map the commpage to the Guest kernel */
112         wired = read_c0_wired();
113         write_c0_wired(wired + 1);
114         mtc0_tlbw_hazard();
115         kvm->arch.commpage_tlb = wired;
116
117         kvm_debug("[%d] commpage TLB: %d\n", smp_processor_id(),
118                   kvm->arch.commpage_tlb);
119 }
120
121 static void kvm_mips_init_vm_percpu(void *arg)
122 {
123         struct kvm *kvm = (struct kvm *)arg;
124
125         kvm_mips_init_tlbs(kvm);
126         kvm_mips_callbacks->vm_init(kvm);
127
128 }
129
130 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
131 {
132         if (atomic_inc_return(&kvm_mips_instance) == 1) {
133                 kvm_info("%s: 1st KVM instance, setup host TLB parameters\n",
134                          __func__);
135                 on_each_cpu(kvm_mips_init_vm_percpu, kvm, 1);
136         }
137
138
139         return 0;
140 }
141
142 void kvm_mips_free_vcpus(struct kvm *kvm)
143 {
144         unsigned int i;
145         struct kvm_vcpu *vcpu;
146
147         /* Put the pages we reserved for the guest pmap */
148         for (i = 0; i < kvm->arch.guest_pmap_npages; i++) {
149                 if (kvm->arch.guest_pmap[i] != KVM_INVALID_PAGE)
150                         kvm_mips_release_pfn_clean(kvm->arch.guest_pmap[i]);
151         }
152
153         if (kvm->arch.guest_pmap)
154                 kfree(kvm->arch.guest_pmap);
155
156         kvm_for_each_vcpu(i, vcpu, kvm) {
157                 kvm_arch_vcpu_free(vcpu);
158         }
159
160         mutex_lock(&kvm->lock);
161
162         for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
163                 kvm->vcpus[i] = NULL;
164
165         atomic_set(&kvm->online_vcpus, 0);
166
167         mutex_unlock(&kvm->lock);
168 }
169
170 void kvm_arch_sync_events(struct kvm *kvm)
171 {
172 }
173
174 static void kvm_mips_uninit_tlbs(void *arg)
175 {
176         /* Restore wired count */
177         write_c0_wired(0);
178         mtc0_tlbw_hazard();
179         /* Clear out all the TLBs */
180         kvm_local_flush_tlb_all();
181 }
182
183 void kvm_arch_destroy_vm(struct kvm *kvm)
184 {
185         kvm_mips_free_vcpus(kvm);
186
187         /* If this is the last instance, restore wired count */
188         if (atomic_dec_return(&kvm_mips_instance) == 0) {
189                 kvm_info("%s: last KVM instance, restoring TLB parameters\n",
190                          __func__);
191                 on_each_cpu(kvm_mips_uninit_tlbs, NULL, 1);
192         }
193 }
194
195 long
196 kvm_arch_dev_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
197 {
198         return -EINVAL;
199 }
200
201 void kvm_arch_free_memslot(struct kvm_memory_slot *free,
202                            struct kvm_memory_slot *dont)
203 {
204 }
205
206 int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages)
207 {
208         return 0;
209 }
210
211 int kvm_arch_prepare_memory_region(struct kvm *kvm,
212                                 struct kvm_memory_slot *memslot,
213                                 struct kvm_userspace_memory_region *mem,
214                                 enum kvm_mr_change change)
215 {
216         return 0;
217 }
218
219 void kvm_arch_commit_memory_region(struct kvm *kvm,
220                                 struct kvm_userspace_memory_region *mem,
221                                 const struct kvm_memory_slot *old,
222                                 enum kvm_mr_change change)
223 {
224         unsigned long npages = 0;
225         int i, err = 0;
226
227         kvm_debug("%s: kvm: %p slot: %d, GPA: %llx, size: %llx, QVA: %llx\n",
228                   __func__, kvm, mem->slot, mem->guest_phys_addr,
229                   mem->memory_size, mem->userspace_addr);
230
231         /* Setup Guest PMAP table */
232         if (!kvm->arch.guest_pmap) {
233                 if (mem->slot == 0)
234                         npages = mem->memory_size >> PAGE_SHIFT;
235
236                 if (npages) {
237                         kvm->arch.guest_pmap_npages = npages;
238                         kvm->arch.guest_pmap =
239                             kzalloc(npages * sizeof(unsigned long), GFP_KERNEL);
240
241                         if (!kvm->arch.guest_pmap) {
242                                 kvm_err("Failed to allocate guest PMAP");
243                                 err = -ENOMEM;
244                                 goto out;
245                         }
246
247                         kvm_info
248                             ("Allocated space for Guest PMAP Table (%ld pages) @ %p\n",
249                              npages, kvm->arch.guest_pmap);
250
251                         /* Now setup the page table */
252                         for (i = 0; i < npages; i++) {
253                                 kvm->arch.guest_pmap[i] = KVM_INVALID_PAGE;
254                         }
255                 }
256         }
257 out:
258         return;
259 }
260
261 void kvm_arch_flush_shadow_all(struct kvm *kvm)
262 {
263 }
264
265 void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
266                                    struct kvm_memory_slot *slot)
267 {
268 }
269
270 void kvm_arch_flush_shadow(struct kvm *kvm)
271 {
272 }
273
274 struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
275 {
276         extern char mips32_exception[], mips32_exceptionEnd[];
277         extern char mips32_GuestException[], mips32_GuestExceptionEnd[];
278         int err, size, offset;
279         void *gebase;
280         int i;
281
282         struct kvm_vcpu *vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL);
283
284         if (!vcpu) {
285                 err = -ENOMEM;
286                 goto out;
287         }
288
289         err = kvm_vcpu_init(vcpu, kvm, id);
290
291         if (err)
292                 goto out_free_cpu;
293
294         kvm_info("kvm @ %p: create cpu %d at %p\n", kvm, id, vcpu);
295
296         /* Allocate space for host mode exception handlers that handle
297          * guest mode exits
298          */
299         if (cpu_has_veic || cpu_has_vint) {
300                 size = 0x200 + VECTORSPACING * 64;
301         } else {
302                 size = 0x200;
303         }
304
305         /* Save Linux EBASE */
306         vcpu->arch.host_ebase = (void *)read_c0_ebase();
307
308         gebase = kzalloc(ALIGN(size, PAGE_SIZE), GFP_KERNEL);
309
310         if (!gebase) {
311                 err = -ENOMEM;
312                 goto out_free_cpu;
313         }
314         kvm_info("Allocated %d bytes for KVM Exception Handlers @ %p\n",
315                  ALIGN(size, PAGE_SIZE), gebase);
316
317         /* Save new ebase */
318         vcpu->arch.guest_ebase = gebase;
319
320         /* Copy L1 Guest Exception handler to correct offset */
321
322         /* TLB Refill, EXL = 0 */
323         memcpy(gebase, mips32_exception,
324                mips32_exceptionEnd - mips32_exception);
325
326         /* General Exception Entry point */
327         memcpy(gebase + 0x180, mips32_exception,
328                mips32_exceptionEnd - mips32_exception);
329
330         /* For vectored interrupts poke the exception code @ all offsets 0-7 */
331         for (i = 0; i < 8; i++) {
332                 kvm_debug("L1 Vectored handler @ %p\n",
333                           gebase + 0x200 + (i * VECTORSPACING));
334                 memcpy(gebase + 0x200 + (i * VECTORSPACING), mips32_exception,
335                        mips32_exceptionEnd - mips32_exception);
336         }
337
338         /* General handler, relocate to unmapped space for sanity's sake */
339         offset = 0x2000;
340         kvm_info("Installing KVM Exception handlers @ %p, %#x bytes\n",
341                  gebase + offset,
342                  mips32_GuestExceptionEnd - mips32_GuestException);
343
344         memcpy(gebase + offset, mips32_GuestException,
345                mips32_GuestExceptionEnd - mips32_GuestException);
346
347         /* Invalidate the icache for these ranges */
348         mips32_SyncICache((unsigned long) gebase, ALIGN(size, PAGE_SIZE));
349
350         /* Allocate comm page for guest kernel, a TLB will be reserved for mapping GVA @ 0xFFFF8000 to this page */
351         vcpu->arch.kseg0_commpage = kzalloc(PAGE_SIZE << 1, GFP_KERNEL);
352
353         if (!vcpu->arch.kseg0_commpage) {
354                 err = -ENOMEM;
355                 goto out_free_gebase;
356         }
357
358         kvm_info("Allocated COMM page @ %p\n", vcpu->arch.kseg0_commpage);
359         kvm_mips_commpage_init(vcpu);
360
361         /* Init */
362         vcpu->arch.last_sched_cpu = -1;
363
364         /* Start off the timer */
365         kvm_mips_emulate_count(vcpu);
366
367         return vcpu;
368
369 out_free_gebase:
370         kfree(gebase);
371
372 out_free_cpu:
373         kfree(vcpu);
374
375 out:
376         return ERR_PTR(err);
377 }
378
379 void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
380 {
381         hrtimer_cancel(&vcpu->arch.comparecount_timer);
382
383         kvm_vcpu_uninit(vcpu);
384
385         kvm_mips_dump_stats(vcpu);
386
387         if (vcpu->arch.guest_ebase)
388                 kfree(vcpu->arch.guest_ebase);
389
390         if (vcpu->arch.kseg0_commpage)
391                 kfree(vcpu->arch.kseg0_commpage);
392
393 }
394
395 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
396 {
397         kvm_arch_vcpu_free(vcpu);
398 }
399
400 int
401 kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
402                                     struct kvm_guest_debug *dbg)
403 {
404         return -EINVAL;
405 }
406
407 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
408 {
409         int r = 0;
410         sigset_t sigsaved;
411
412         if (vcpu->sigset_active)
413                 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
414
415         if (vcpu->mmio_needed) {
416                 if (!vcpu->mmio_is_write)
417                         kvm_mips_complete_mmio_load(vcpu, run);
418                 vcpu->mmio_needed = 0;
419         }
420
421         /* Check if we have any exceptions/interrupts pending */
422         kvm_mips_deliver_interrupts(vcpu,
423                                     kvm_read_c0_guest_cause(vcpu->arch.cop0));
424
425         local_irq_disable();
426         kvm_guest_enter();
427
428         r = __kvm_mips_vcpu_run(run, vcpu);
429
430         kvm_guest_exit();
431         local_irq_enable();
432
433         if (vcpu->sigset_active)
434                 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
435
436         return r;
437 }
438
439 int
440 kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_mips_interrupt *irq)
441 {
442         int intr = (int)irq->irq;
443         struct kvm_vcpu *dvcpu = NULL;
444
445         if (intr == 3 || intr == -3 || intr == 4 || intr == -4)
446                 kvm_debug("%s: CPU: %d, INTR: %d\n", __func__, irq->cpu,
447                           (int)intr);
448
449         if (irq->cpu == -1)
450                 dvcpu = vcpu;
451         else
452                 dvcpu = vcpu->kvm->vcpus[irq->cpu];
453
454         if (intr == 2 || intr == 3 || intr == 4) {
455                 kvm_mips_callbacks->queue_io_int(dvcpu, irq);
456
457         } else if (intr == -2 || intr == -3 || intr == -4) {
458                 kvm_mips_callbacks->dequeue_io_int(dvcpu, irq);
459         } else {
460                 kvm_err("%s: invalid interrupt ioctl (%d:%d)\n", __func__,
461                         irq->cpu, irq->irq);
462                 return -EINVAL;
463         }
464
465         dvcpu->arch.wait = 0;
466
467         if (waitqueue_active(&dvcpu->wq)) {
468                 wake_up_interruptible(&dvcpu->wq);
469         }
470
471         return 0;
472 }
473
474 int
475 kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
476                                 struct kvm_mp_state *mp_state)
477 {
478         return -EINVAL;
479 }
480
481 int
482 kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
483                                 struct kvm_mp_state *mp_state)
484 {
485         return -EINVAL;
486 }
487
488 #define KVM_REG_MIPS_CP0_INDEX (0x10000 + 8 * 0 + 0)
489 #define KVM_REG_MIPS_CP0_ENTRYLO0 (0x10000 + 8 * 2 + 0)
490 #define KVM_REG_MIPS_CP0_ENTRYLO1 (0x10000 + 8 * 3 + 0)
491 #define KVM_REG_MIPS_CP0_CONTEXT (0x10000 + 8 * 4 + 0)
492 #define KVM_REG_MIPS_CP0_USERLOCAL (0x10000 + 8 * 4 + 2)
493 #define KVM_REG_MIPS_CP0_PAGEMASK (0x10000 + 8 * 5 + 0)
494 #define KVM_REG_MIPS_CP0_PAGEGRAIN (0x10000 + 8 * 5 + 1)
495 #define KVM_REG_MIPS_CP0_WIRED (0x10000 + 8 * 6 + 0)
496 #define KVM_REG_MIPS_CP0_HWRENA (0x10000 + 8 * 7 + 0)
497 #define KVM_REG_MIPS_CP0_BADVADDR (0x10000 + 8 * 8 + 0)
498 #define KVM_REG_MIPS_CP0_COUNT (0x10000 + 8 * 9 + 0)
499 #define KVM_REG_MIPS_CP0_ENTRYHI (0x10000 + 8 * 10 + 0)
500 #define KVM_REG_MIPS_CP0_COMPARE (0x10000 + 8 * 11 + 0)
501 #define KVM_REG_MIPS_CP0_STATUS (0x10000 + 8 * 12 + 0)
502 #define KVM_REG_MIPS_CP0_CAUSE (0x10000 + 8 * 13 + 0)
503 #define KVM_REG_MIPS_CP0_EBASE (0x10000 + 8 * 15 + 1)
504 #define KVM_REG_MIPS_CP0_CONFIG (0x10000 + 8 * 16 + 0)
505 #define KVM_REG_MIPS_CP0_CONFIG1 (0x10000 + 8 * 16 + 1)
506 #define KVM_REG_MIPS_CP0_CONFIG2 (0x10000 + 8 * 16 + 2)
507 #define KVM_REG_MIPS_CP0_CONFIG3 (0x10000 + 8 * 16 + 3)
508 #define KVM_REG_MIPS_CP0_CONFIG7 (0x10000 + 8 * 16 + 7)
509 #define KVM_REG_MIPS_CP0_XCONTEXT (0x10000 + 8 * 20 + 0)
510 #define KVM_REG_MIPS_CP0_ERROREPC (0x10000 + 8 * 30 + 0)
511
512 static u64 kvm_mips_get_one_regs[] = {
513         KVM_REG_MIPS_R0,
514         KVM_REG_MIPS_R1,
515         KVM_REG_MIPS_R2,
516         KVM_REG_MIPS_R3,
517         KVM_REG_MIPS_R4,
518         KVM_REG_MIPS_R5,
519         KVM_REG_MIPS_R6,
520         KVM_REG_MIPS_R7,
521         KVM_REG_MIPS_R8,
522         KVM_REG_MIPS_R9,
523         KVM_REG_MIPS_R10,
524         KVM_REG_MIPS_R11,
525         KVM_REG_MIPS_R12,
526         KVM_REG_MIPS_R13,
527         KVM_REG_MIPS_R14,
528         KVM_REG_MIPS_R15,
529         KVM_REG_MIPS_R16,
530         KVM_REG_MIPS_R17,
531         KVM_REG_MIPS_R18,
532         KVM_REG_MIPS_R19,
533         KVM_REG_MIPS_R20,
534         KVM_REG_MIPS_R21,
535         KVM_REG_MIPS_R22,
536         KVM_REG_MIPS_R23,
537         KVM_REG_MIPS_R24,
538         KVM_REG_MIPS_R25,
539         KVM_REG_MIPS_R26,
540         KVM_REG_MIPS_R27,
541         KVM_REG_MIPS_R28,
542         KVM_REG_MIPS_R29,
543         KVM_REG_MIPS_R30,
544         KVM_REG_MIPS_R31,
545
546         KVM_REG_MIPS_HI,
547         KVM_REG_MIPS_LO,
548         KVM_REG_MIPS_PC,
549
550         KVM_REG_MIPS_CP0_INDEX,
551         KVM_REG_MIPS_CP0_CONTEXT,
552         KVM_REG_MIPS_CP0_PAGEMASK,
553         KVM_REG_MIPS_CP0_WIRED,
554         KVM_REG_MIPS_CP0_BADVADDR,
555         KVM_REG_MIPS_CP0_ENTRYHI,
556         KVM_REG_MIPS_CP0_STATUS,
557         KVM_REG_MIPS_CP0_CAUSE,
558         /* EPC set via kvm_regs, et al. */
559         KVM_REG_MIPS_CP0_CONFIG,
560         KVM_REG_MIPS_CP0_CONFIG1,
561         KVM_REG_MIPS_CP0_CONFIG2,
562         KVM_REG_MIPS_CP0_CONFIG3,
563         KVM_REG_MIPS_CP0_CONFIG7,
564         KVM_REG_MIPS_CP0_ERROREPC
565 };
566
567 static int kvm_mips_get_reg(struct kvm_vcpu *vcpu,
568                             const struct kvm_one_reg *reg)
569 {
570         u64 __user *uaddr = (u64 __user *)(long)reg->addr;
571
572         struct mips_coproc *cop0 = vcpu->arch.cop0;
573         s64 v;
574
575         switch (reg->id) {
576         case KVM_REG_MIPS_R0 ... KVM_REG_MIPS_R31:
577                 v = (long)vcpu->arch.gprs[reg->id - KVM_REG_MIPS_R0];
578                 break;
579         case KVM_REG_MIPS_HI:
580                 v = (long)vcpu->arch.hi;
581                 break;
582         case KVM_REG_MIPS_LO:
583                 v = (long)vcpu->arch.lo;
584                 break;
585         case KVM_REG_MIPS_PC:
586                 v = (long)vcpu->arch.pc;
587                 break;
588
589         case KVM_REG_MIPS_CP0_INDEX:
590                 v = (long)kvm_read_c0_guest_index(cop0);
591                 break;
592         case KVM_REG_MIPS_CP0_CONTEXT:
593                 v = (long)kvm_read_c0_guest_context(cop0);
594                 break;
595         case KVM_REG_MIPS_CP0_PAGEMASK:
596                 v = (long)kvm_read_c0_guest_pagemask(cop0);
597                 break;
598         case KVM_REG_MIPS_CP0_WIRED:
599                 v = (long)kvm_read_c0_guest_wired(cop0);
600                 break;
601         case KVM_REG_MIPS_CP0_BADVADDR:
602                 v = (long)kvm_read_c0_guest_badvaddr(cop0);
603                 break;
604         case KVM_REG_MIPS_CP0_ENTRYHI:
605                 v = (long)kvm_read_c0_guest_entryhi(cop0);
606                 break;
607         case KVM_REG_MIPS_CP0_STATUS:
608                 v = (long)kvm_read_c0_guest_status(cop0);
609                 break;
610         case KVM_REG_MIPS_CP0_CAUSE:
611                 v = (long)kvm_read_c0_guest_cause(cop0);
612                 break;
613         case KVM_REG_MIPS_CP0_ERROREPC:
614                 v = (long)kvm_read_c0_guest_errorepc(cop0);
615                 break;
616         case KVM_REG_MIPS_CP0_CONFIG:
617                 v = (long)kvm_read_c0_guest_config(cop0);
618                 break;
619         case KVM_REG_MIPS_CP0_CONFIG1:
620                 v = (long)kvm_read_c0_guest_config1(cop0);
621                 break;
622         case KVM_REG_MIPS_CP0_CONFIG2:
623                 v = (long)kvm_read_c0_guest_config2(cop0);
624                 break;
625         case KVM_REG_MIPS_CP0_CONFIG3:
626                 v = (long)kvm_read_c0_guest_config3(cop0);
627                 break;
628         case KVM_REG_MIPS_CP0_CONFIG7:
629                 v = (long)kvm_read_c0_guest_config7(cop0);
630                 break;
631         default:
632                 return -EINVAL;
633         }
634         return put_user(v, uaddr);
635 }
636
637 static int kvm_mips_set_reg(struct kvm_vcpu *vcpu,
638                             const struct kvm_one_reg *reg)
639 {
640         u64 __user *uaddr = (u64 __user *)(long)reg->addr;
641         struct mips_coproc *cop0 = vcpu->arch.cop0;
642         u64 v;
643
644         if (get_user(v, uaddr) != 0)
645                 return -EFAULT;
646
647         switch (reg->id) {
648         case KVM_REG_MIPS_R0:
649                 /* Silently ignore requests to set $0 */
650                 break;
651         case KVM_REG_MIPS_R1 ... KVM_REG_MIPS_R31:
652                 vcpu->arch.gprs[reg->id - KVM_REG_MIPS_R0] = v;
653                 break;
654         case KVM_REG_MIPS_HI:
655                 vcpu->arch.hi = v;
656                 break;
657         case KVM_REG_MIPS_LO:
658                 vcpu->arch.lo = v;
659                 break;
660         case KVM_REG_MIPS_PC:
661                 vcpu->arch.pc = v;
662                 break;
663
664         case KVM_REG_MIPS_CP0_INDEX:
665                 kvm_write_c0_guest_index(cop0, v);
666                 break;
667         case KVM_REG_MIPS_CP0_CONTEXT:
668                 kvm_write_c0_guest_context(cop0, v);
669                 break;
670         case KVM_REG_MIPS_CP0_PAGEMASK:
671                 kvm_write_c0_guest_pagemask(cop0, v);
672                 break;
673         case KVM_REG_MIPS_CP0_WIRED:
674                 kvm_write_c0_guest_wired(cop0, v);
675                 break;
676         case KVM_REG_MIPS_CP0_BADVADDR:
677                 kvm_write_c0_guest_badvaddr(cop0, v);
678                 break;
679         case KVM_REG_MIPS_CP0_ENTRYHI:
680                 kvm_write_c0_guest_entryhi(cop0, v);
681                 break;
682         case KVM_REG_MIPS_CP0_STATUS:
683                 kvm_write_c0_guest_status(cop0, v);
684                 break;
685         case KVM_REG_MIPS_CP0_CAUSE:
686                 kvm_write_c0_guest_cause(cop0, v);
687                 break;
688         case KVM_REG_MIPS_CP0_ERROREPC:
689                 kvm_write_c0_guest_errorepc(cop0, v);
690                 break;
691         default:
692                 return -EINVAL;
693         }
694         return 0;
695 }
696
697 long
698 kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
699 {
700         struct kvm_vcpu *vcpu = filp->private_data;
701         void __user *argp = (void __user *)arg;
702         long r;
703
704         switch (ioctl) {
705         case KVM_SET_ONE_REG:
706         case KVM_GET_ONE_REG: {
707                 struct kvm_one_reg reg;
708                 if (copy_from_user(&reg, argp, sizeof(reg)))
709                         return -EFAULT;
710                 if (ioctl == KVM_SET_ONE_REG)
711                         return kvm_mips_set_reg(vcpu, &reg);
712                 else
713                         return kvm_mips_get_reg(vcpu, &reg);
714         }
715         case KVM_GET_REG_LIST: {
716                 struct kvm_reg_list __user *user_list = argp;
717                 u64 __user *reg_dest;
718                 struct kvm_reg_list reg_list;
719                 unsigned n;
720
721                 if (copy_from_user(&reg_list, user_list, sizeof(reg_list)))
722                         return -EFAULT;
723                 n = reg_list.n;
724                 reg_list.n = ARRAY_SIZE(kvm_mips_get_one_regs);
725                 if (copy_to_user(user_list, &reg_list, sizeof(reg_list)))
726                         return -EFAULT;
727                 if (n < reg_list.n)
728                         return -E2BIG;
729                 reg_dest = user_list->reg;
730                 if (copy_to_user(reg_dest, kvm_mips_get_one_regs,
731                                  sizeof(kvm_mips_get_one_regs)))
732                         return -EFAULT;
733                 return 0;
734         }
735         case KVM_NMI:
736                 /* Treat the NMI as a CPU reset */
737                 r = kvm_mips_reset_vcpu(vcpu);
738                 break;
739         case KVM_INTERRUPT:
740                 {
741                         struct kvm_mips_interrupt irq;
742                         r = -EFAULT;
743                         if (copy_from_user(&irq, argp, sizeof(irq)))
744                                 goto out;
745
746                         kvm_debug("[%d] %s: irq: %d\n", vcpu->vcpu_id, __func__,
747                                   irq.irq);
748
749                         r = kvm_vcpu_ioctl_interrupt(vcpu, &irq);
750                         break;
751                 }
752         default:
753                 r = -ENOIOCTLCMD;
754         }
755
756 out:
757         return r;
758 }
759
760 /*
761  * Get (and clear) the dirty memory log for a memory slot.
762  */
763 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
764 {
765         struct kvm_memory_slot *memslot;
766         unsigned long ga, ga_end;
767         int is_dirty = 0;
768         int r;
769         unsigned long n;
770
771         mutex_lock(&kvm->slots_lock);
772
773         r = kvm_get_dirty_log(kvm, log, &is_dirty);
774         if (r)
775                 goto out;
776
777         /* If nothing is dirty, don't bother messing with page tables. */
778         if (is_dirty) {
779                 memslot = &kvm->memslots->memslots[log->slot];
780
781                 ga = memslot->base_gfn << PAGE_SHIFT;
782                 ga_end = ga + (memslot->npages << PAGE_SHIFT);
783
784                 printk("%s: dirty, ga: %#lx, ga_end %#lx\n", __func__, ga,
785                        ga_end);
786
787                 n = kvm_dirty_bitmap_bytes(memslot);
788                 memset(memslot->dirty_bitmap, 0, n);
789         }
790
791         r = 0;
792 out:
793         mutex_unlock(&kvm->slots_lock);
794         return r;
795
796 }
797
798 long kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
799 {
800         long r;
801
802         switch (ioctl) {
803         default:
804                 r = -EINVAL;
805         }
806
807         return r;
808 }
809
810 int kvm_arch_init(void *opaque)
811 {
812         int ret;
813
814         if (kvm_mips_callbacks) {
815                 kvm_err("kvm: module already exists\n");
816                 return -EEXIST;
817         }
818
819         ret = kvm_mips_emulation_init(&kvm_mips_callbacks);
820
821         return ret;
822 }
823
824 void kvm_arch_exit(void)
825 {
826         kvm_mips_callbacks = NULL;
827 }
828
829 int
830 kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
831 {
832         return -ENOTSUPP;
833 }
834
835 int
836 kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
837 {
838         return -ENOTSUPP;
839 }
840
841 int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
842 {
843         return 0;
844 }
845
846 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
847 {
848         return -ENOTSUPP;
849 }
850
851 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
852 {
853         return -ENOTSUPP;
854 }
855
856 int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
857 {
858         return VM_FAULT_SIGBUS;
859 }
860
861 int kvm_dev_ioctl_check_extension(long ext)
862 {
863         int r;
864
865         switch (ext) {
866         case KVM_CAP_ONE_REG:
867                 r = 1;
868                 break;
869         case KVM_CAP_COALESCED_MMIO:
870                 r = KVM_COALESCED_MMIO_PAGE_OFFSET;
871                 break;
872         default:
873                 r = 0;
874                 break;
875         }
876         return r;
877 }
878
879 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
880 {
881         return kvm_mips_pending_timer(vcpu);
882 }
883
884 int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu)
885 {
886         int i;
887         struct mips_coproc *cop0;
888
889         if (!vcpu)
890                 return -1;
891
892         printk("VCPU Register Dump:\n");
893         printk("\tpc = 0x%08lx\n", vcpu->arch.pc);;
894         printk("\texceptions: %08lx\n", vcpu->arch.pending_exceptions);
895
896         for (i = 0; i < 32; i += 4) {
897                 printk("\tgpr%02d: %08lx %08lx %08lx %08lx\n", i,
898                        vcpu->arch.gprs[i],
899                        vcpu->arch.gprs[i + 1],
900                        vcpu->arch.gprs[i + 2], vcpu->arch.gprs[i + 3]);
901         }
902         printk("\thi: 0x%08lx\n", vcpu->arch.hi);
903         printk("\tlo: 0x%08lx\n", vcpu->arch.lo);
904
905         cop0 = vcpu->arch.cop0;
906         printk("\tStatus: 0x%08lx, Cause: 0x%08lx\n",
907                kvm_read_c0_guest_status(cop0), kvm_read_c0_guest_cause(cop0));
908
909         printk("\tEPC: 0x%08lx\n", kvm_read_c0_guest_epc(cop0));
910
911         return 0;
912 }
913
914 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
915 {
916         int i;
917
918         for (i = 1; i < ARRAY_SIZE(vcpu->arch.gprs); i++)
919                 vcpu->arch.gprs[i] = regs->gpr[i];
920         vcpu->arch.gprs[0] = 0; /* zero is special, and cannot be set. */
921         vcpu->arch.hi = regs->hi;
922         vcpu->arch.lo = regs->lo;
923         vcpu->arch.pc = regs->pc;
924
925         return 0;
926 }
927
928 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
929 {
930         int i;
931
932         for (i = 0; i < ARRAY_SIZE(vcpu->arch.gprs); i++)
933                 regs->gpr[i] = vcpu->arch.gprs[i];
934
935         regs->hi = vcpu->arch.hi;
936         regs->lo = vcpu->arch.lo;
937         regs->pc = vcpu->arch.pc;
938
939         return 0;
940 }
941
942 void kvm_mips_comparecount_func(unsigned long data)
943 {
944         struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data;
945
946         kvm_mips_callbacks->queue_timer_int(vcpu);
947
948         vcpu->arch.wait = 0;
949         if (waitqueue_active(&vcpu->wq)) {
950                 wake_up_interruptible(&vcpu->wq);
951         }
952 }
953
954 /*
955  * low level hrtimer wake routine.
956  */
957 enum hrtimer_restart kvm_mips_comparecount_wakeup(struct hrtimer *timer)
958 {
959         struct kvm_vcpu *vcpu;
960
961         vcpu = container_of(timer, struct kvm_vcpu, arch.comparecount_timer);
962         kvm_mips_comparecount_func((unsigned long) vcpu);
963         hrtimer_forward_now(&vcpu->arch.comparecount_timer,
964                             ktime_set(0, MS_TO_NS(10)));
965         return HRTIMER_RESTART;
966 }
967
968 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
969 {
970         kvm_mips_callbacks->vcpu_init(vcpu);
971         hrtimer_init(&vcpu->arch.comparecount_timer, CLOCK_MONOTONIC,
972                      HRTIMER_MODE_REL);
973         vcpu->arch.comparecount_timer.function = kvm_mips_comparecount_wakeup;
974         kvm_mips_init_shadow_tlb(vcpu);
975         return 0;
976 }
977
978 void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
979 {
980         return;
981 }
982
983 int
984 kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, struct kvm_translation *tr)
985 {
986         return 0;
987 }
988
989 /* Initial guest state */
990 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
991 {
992         return kvm_mips_callbacks->vcpu_setup(vcpu);
993 }
994
995 static
996 void kvm_mips_set_c0_status(void)
997 {
998         uint32_t status = read_c0_status();
999
1000         if (cpu_has_fpu)
1001                 status |= (ST0_CU1);
1002
1003         if (cpu_has_dsp)
1004                 status |= (ST0_MX);
1005
1006         write_c0_status(status);
1007         ehb();
1008 }
1009
1010 /*
1011  * Return value is in the form (errcode<<2 | RESUME_FLAG_HOST | RESUME_FLAG_NV)
1012  */
1013 int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
1014 {
1015         uint32_t cause = vcpu->arch.host_cp0_cause;
1016         uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
1017         uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
1018         unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
1019         enum emulation_result er = EMULATE_DONE;
1020         int ret = RESUME_GUEST;
1021
1022         /* Set a default exit reason */
1023         run->exit_reason = KVM_EXIT_UNKNOWN;
1024         run->ready_for_interrupt_injection = 1;
1025
1026         /* Set the appropriate status bits based on host CPU features, before we hit the scheduler */
1027         kvm_mips_set_c0_status();
1028
1029         local_irq_enable();
1030
1031         kvm_debug("kvm_mips_handle_exit: cause: %#x, PC: %p, kvm_run: %p, kvm_vcpu: %p\n",
1032                         cause, opc, run, vcpu);
1033
1034         /* Do a privilege check, if in UM most of these exit conditions end up
1035          * causing an exception to be delivered to the Guest Kernel
1036          */
1037         er = kvm_mips_check_privilege(cause, opc, run, vcpu);
1038         if (er == EMULATE_PRIV_FAIL) {
1039                 goto skip_emul;
1040         } else if (er == EMULATE_FAIL) {
1041                 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1042                 ret = RESUME_HOST;
1043                 goto skip_emul;
1044         }
1045
1046         switch (exccode) {
1047         case T_INT:
1048                 kvm_debug("[%d]T_INT @ %p\n", vcpu->vcpu_id, opc);
1049
1050                 ++vcpu->stat.int_exits;
1051                 trace_kvm_exit(vcpu, INT_EXITS);
1052
1053                 if (need_resched()) {
1054                         cond_resched();
1055                 }
1056
1057                 ret = RESUME_GUEST;
1058                 break;
1059
1060         case T_COP_UNUSABLE:
1061                 kvm_debug("T_COP_UNUSABLE: @ PC: %p\n", opc);
1062
1063                 ++vcpu->stat.cop_unusable_exits;
1064                 trace_kvm_exit(vcpu, COP_UNUSABLE_EXITS);
1065                 ret = kvm_mips_callbacks->handle_cop_unusable(vcpu);
1066                 /* XXXKYMA: Might need to return to user space */
1067                 if (run->exit_reason == KVM_EXIT_IRQ_WINDOW_OPEN) {
1068                         ret = RESUME_HOST;
1069                 }
1070                 break;
1071
1072         case T_TLB_MOD:
1073                 ++vcpu->stat.tlbmod_exits;
1074                 trace_kvm_exit(vcpu, TLBMOD_EXITS);
1075                 ret = kvm_mips_callbacks->handle_tlb_mod(vcpu);
1076                 break;
1077
1078         case T_TLB_ST_MISS:
1079                 kvm_debug
1080                     ("TLB ST fault:  cause %#x, status %#lx, PC: %p, BadVaddr: %#lx\n",
1081                      cause, kvm_read_c0_guest_status(vcpu->arch.cop0), opc,
1082                      badvaddr);
1083
1084                 ++vcpu->stat.tlbmiss_st_exits;
1085                 trace_kvm_exit(vcpu, TLBMISS_ST_EXITS);
1086                 ret = kvm_mips_callbacks->handle_tlb_st_miss(vcpu);
1087                 break;
1088
1089         case T_TLB_LD_MISS:
1090                 kvm_debug("TLB LD fault: cause %#x, PC: %p, BadVaddr: %#lx\n",
1091                           cause, opc, badvaddr);
1092
1093                 ++vcpu->stat.tlbmiss_ld_exits;
1094                 trace_kvm_exit(vcpu, TLBMISS_LD_EXITS);
1095                 ret = kvm_mips_callbacks->handle_tlb_ld_miss(vcpu);
1096                 break;
1097
1098         case T_ADDR_ERR_ST:
1099                 ++vcpu->stat.addrerr_st_exits;
1100                 trace_kvm_exit(vcpu, ADDRERR_ST_EXITS);
1101                 ret = kvm_mips_callbacks->handle_addr_err_st(vcpu);
1102                 break;
1103
1104         case T_ADDR_ERR_LD:
1105                 ++vcpu->stat.addrerr_ld_exits;
1106                 trace_kvm_exit(vcpu, ADDRERR_LD_EXITS);
1107                 ret = kvm_mips_callbacks->handle_addr_err_ld(vcpu);
1108                 break;
1109
1110         case T_SYSCALL:
1111                 ++vcpu->stat.syscall_exits;
1112                 trace_kvm_exit(vcpu, SYSCALL_EXITS);
1113                 ret = kvm_mips_callbacks->handle_syscall(vcpu);
1114                 break;
1115
1116         case T_RES_INST:
1117                 ++vcpu->stat.resvd_inst_exits;
1118                 trace_kvm_exit(vcpu, RESVD_INST_EXITS);
1119                 ret = kvm_mips_callbacks->handle_res_inst(vcpu);
1120                 break;
1121
1122         case T_BREAK:
1123                 ++vcpu->stat.break_inst_exits;
1124                 trace_kvm_exit(vcpu, BREAK_INST_EXITS);
1125                 ret = kvm_mips_callbacks->handle_break(vcpu);
1126                 break;
1127
1128         default:
1129                 kvm_err
1130                     ("Exception Code: %d, not yet handled, @ PC: %p, inst: 0x%08x  BadVaddr: %#lx Status: %#lx\n",
1131                      exccode, opc, kvm_get_inst(opc, vcpu), badvaddr,
1132                      kvm_read_c0_guest_status(vcpu->arch.cop0));
1133                 kvm_arch_vcpu_dump_regs(vcpu);
1134                 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1135                 ret = RESUME_HOST;
1136                 break;
1137
1138         }
1139
1140 skip_emul:
1141         local_irq_disable();
1142
1143         if (er == EMULATE_DONE && !(ret & RESUME_HOST))
1144                 kvm_mips_deliver_interrupts(vcpu, cause);
1145
1146         if (!(ret & RESUME_HOST)) {
1147                 /* Only check for signals if not already exiting to userspace  */
1148                 if (signal_pending(current)) {
1149                         run->exit_reason = KVM_EXIT_INTR;
1150                         ret = (-EINTR << 2) | RESUME_HOST;
1151                         ++vcpu->stat.signal_exits;
1152                         trace_kvm_exit(vcpu, SIGNAL_EXITS);
1153                 }
1154         }
1155
1156         return ret;
1157 }
1158
1159 int __init kvm_mips_init(void)
1160 {
1161         int ret;
1162
1163         ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
1164
1165         if (ret)
1166                 return ret;
1167
1168         /* On MIPS, kernel modules are executed from "mapped space", which requires TLBs.
1169          * The TLB handling code is statically linked with the rest of the kernel (kvm_tlb.c)
1170          * to avoid the possibility of double faulting. The issue is that the TLB code
1171          * references routines that are part of the the KVM module,
1172          * which are only available once the module is loaded.
1173          */
1174         kvm_mips_gfn_to_pfn = gfn_to_pfn;
1175         kvm_mips_release_pfn_clean = kvm_release_pfn_clean;
1176         kvm_mips_is_error_pfn = is_error_pfn;
1177
1178         pr_info("KVM/MIPS Initialized\n");
1179         return 0;
1180 }
1181
1182 void __exit kvm_mips_exit(void)
1183 {
1184         kvm_exit();
1185
1186         kvm_mips_gfn_to_pfn = NULL;
1187         kvm_mips_release_pfn_clean = NULL;
1188         kvm_mips_is_error_pfn = NULL;
1189
1190         pr_info("KVM/MIPS unloaded\n");
1191 }
1192
1193 module_init(kvm_mips_init);
1194 module_exit(kvm_mips_exit);
1195
1196 EXPORT_TRACEPOINT_SYMBOL(kvm_exit);