2 * Copyright (C) 2009. SUSE Linux Products GmbH. All rights reserved.
5 * Alexander Graf <agraf@suse.de>
6 * Kevin Wolf <mail@kevin-wolf.de>
7 * Paul Mackerras <paulus@samba.org>
10 * Functions relating to running KVM on Book 3S processors where
11 * we don't have access to hypervisor mode, and we run the guest
12 * in problem state (user mode).
14 * This file is derived from arch/powerpc/kvm/44x.c,
15 * by Hollis Blanchard <hollisb@us.ibm.com>.
17 * This program is free software; you can redistribute it and/or modify
18 * it under the terms of the GNU General Public License, version 2, as
19 * published by the Free Software Foundation.
22 #include <linux/kvm_host.h>
23 #include <linux/export.h>
24 #include <linux/err.h>
25 #include <linux/slab.h>
28 #include <asm/cputable.h>
29 #include <asm/cacheflush.h>
30 #include <asm/tlbflush.h>
31 #include <asm/uaccess.h>
33 #include <asm/kvm_ppc.h>
34 #include <asm/kvm_book3s.h>
35 #include <asm/mmu_context.h>
36 #include <asm/switch_to.h>
37 #include <linux/gfp.h>
38 #include <linux/sched.h>
39 #include <linux/vmalloc.h>
40 #include <linux/highmem.h>
44 /* #define EXIT_DEBUG */
45 /* #define DEBUG_EXT */
47 static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr,
50 /* Some compatibility defines */
51 #ifdef CONFIG_PPC_BOOK3S_32
52 #define MSR_USER32 MSR_USER
53 #define MSR_USER64 MSR_USER
54 #define HW_PAGE_SIZE PAGE_SIZE
57 void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
59 #ifdef CONFIG_PPC_BOOK3S_64
60 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
61 memcpy(svcpu->slb, to_book3s(vcpu)->slb_shadow, sizeof(svcpu->slb));
62 memcpy(&get_paca()->shadow_vcpu, to_book3s(vcpu)->shadow_vcpu,
63 sizeof(get_paca()->shadow_vcpu));
64 svcpu->slb_max = to_book3s(vcpu)->slb_shadow_max;
67 vcpu->cpu = smp_processor_id();
68 #ifdef CONFIG_PPC_BOOK3S_32
69 current->thread.kvm_shadow_vcpu = to_book3s(vcpu)->shadow_vcpu;
73 void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
75 #ifdef CONFIG_PPC_BOOK3S_64
76 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
77 memcpy(to_book3s(vcpu)->slb_shadow, svcpu->slb, sizeof(svcpu->slb));
78 memcpy(to_book3s(vcpu)->shadow_vcpu, &get_paca()->shadow_vcpu,
79 sizeof(get_paca()->shadow_vcpu));
80 to_book3s(vcpu)->slb_shadow_max = svcpu->slb_max;
84 kvmppc_giveup_ext(vcpu, MSR_FP);
85 kvmppc_giveup_ext(vcpu, MSR_VEC);
86 kvmppc_giveup_ext(vcpu, MSR_VSX);
90 int kvmppc_core_check_requests(struct kvm_vcpu *vcpu)
92 int r = 1; /* Indicate we want to get back into the guest */
94 /* We misuse TLB_FLUSH to indicate that we want to clear
95 all shadow cache entries */
96 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu))
97 kvmppc_mmu_pte_flush(vcpu, 0, 0);
102 /************* MMU Notifiers *************/
104 int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
106 trace_kvm_unmap_hva(hva);
109 * Flush all shadow tlb entries everywhere. This is slow, but
110 * we are 100% sure that we catch the to be unmapped page
112 kvm_flush_remote_tlbs(kvm);
117 int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end)
119 /* kvm_unmap_hva flushes everything anyways */
120 kvm_unmap_hva(kvm, start);
125 int kvm_age_hva(struct kvm *kvm, unsigned long hva)
127 /* XXX could be more clever ;) */
131 int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
133 /* XXX could be more clever ;) */
137 void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
139 /* The page will get remapped properly on its next fault */
140 kvm_unmap_hva(kvm, hva);
143 /*****************************************/
145 static void kvmppc_recalc_shadow_msr(struct kvm_vcpu *vcpu)
147 ulong smsr = vcpu->arch.shared->msr;
149 /* Guest MSR values */
150 smsr &= MSR_FE0 | MSR_FE1 | MSR_SF | MSR_SE | MSR_BE | MSR_DE;
151 /* Process MSR values */
152 smsr |= MSR_ME | MSR_RI | MSR_IR | MSR_DR | MSR_PR | MSR_EE;
153 /* External providers the guest reserved */
154 smsr |= (vcpu->arch.shared->msr & vcpu->arch.guest_owned_ext);
155 /* 64-bit Process MSR values */
156 #ifdef CONFIG_PPC_BOOK3S_64
157 smsr |= MSR_ISF | MSR_HV;
159 vcpu->arch.shadow_msr = smsr;
162 void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr)
164 ulong old_msr = vcpu->arch.shared->msr;
167 printk(KERN_INFO "KVM: Set MSR to 0x%llx\n", msr);
170 msr &= to_book3s(vcpu)->msr_mask;
171 vcpu->arch.shared->msr = msr;
172 kvmppc_recalc_shadow_msr(vcpu);
175 if (!vcpu->arch.pending_exceptions) {
176 kvm_vcpu_block(vcpu);
177 clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
178 vcpu->stat.halt_wakeup++;
180 /* Unset POW bit after we woke up */
182 vcpu->arch.shared->msr = msr;
186 if ((vcpu->arch.shared->msr & (MSR_PR|MSR_IR|MSR_DR)) !=
187 (old_msr & (MSR_PR|MSR_IR|MSR_DR))) {
188 kvmppc_mmu_flush_segments(vcpu);
189 kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu));
191 /* Preload magic page segment when in kernel mode */
192 if (!(msr & MSR_PR) && vcpu->arch.magic_page_pa) {
193 struct kvm_vcpu_arch *a = &vcpu->arch;
196 kvmppc_mmu_map_segment(vcpu, a->magic_page_ea);
198 kvmppc_mmu_map_segment(vcpu, a->magic_page_pa);
203 * When switching from 32 to 64-bit, we may have a stale 32-bit
204 * magic page around, we need to flush it. Typically 32-bit magic
205 * page will be instanciated when calling into RTAS. Note: We
206 * assume that such transition only happens while in kernel mode,
207 * ie, we never transition from user 32-bit to kernel 64-bit with
208 * a 32-bit magic page around.
210 if (vcpu->arch.magic_page_pa &&
211 !(old_msr & MSR_PR) && !(old_msr & MSR_SF) && (msr & MSR_SF)) {
212 /* going from RTAS to normal kernel code */
213 kvmppc_mmu_pte_flush(vcpu, (uint32_t)vcpu->arch.magic_page_pa,
217 /* Preload FPU if it's enabled */
218 if (vcpu->arch.shared->msr & MSR_FP)
219 kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP);
222 void kvmppc_set_pvr(struct kvm_vcpu *vcpu, u32 pvr)
226 vcpu->arch.hflags &= ~BOOK3S_HFLAG_SLB;
227 vcpu->arch.pvr = pvr;
228 #ifdef CONFIG_PPC_BOOK3S_64
229 if ((pvr >= 0x330000) && (pvr < 0x70330000)) {
230 kvmppc_mmu_book3s_64_init(vcpu);
231 if (!to_book3s(vcpu)->hior_explicit)
232 to_book3s(vcpu)->hior = 0xfff00000;
233 to_book3s(vcpu)->msr_mask = 0xffffffffffffffffULL;
234 vcpu->arch.cpu_type = KVM_CPU_3S_64;
238 kvmppc_mmu_book3s_32_init(vcpu);
239 if (!to_book3s(vcpu)->hior_explicit)
240 to_book3s(vcpu)->hior = 0;
241 to_book3s(vcpu)->msr_mask = 0xffffffffULL;
242 vcpu->arch.cpu_type = KVM_CPU_3S_32;
245 kvmppc_sanity_check(vcpu);
247 /* If we are in hypervisor level on 970, we can tell the CPU to
248 * treat DCBZ as 32 bytes store */
249 vcpu->arch.hflags &= ~BOOK3S_HFLAG_DCBZ32;
250 if (vcpu->arch.mmu.is_dcbz32(vcpu) && (mfmsr() & MSR_HV) &&
251 !strcmp(cur_cpu_spec->platform, "ppc970"))
252 vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32;
254 /* Cell performs badly if MSR_FEx are set. So let's hope nobody
255 really needs them in a VM on Cell and force disable them. */
256 if (!strcmp(cur_cpu_spec->platform, "ppc-cell-be"))
257 to_book3s(vcpu)->msr_mask &= ~(MSR_FE0 | MSR_FE1);
259 #ifdef CONFIG_PPC_BOOK3S_32
260 /* 32 bit Book3S always has 32 byte dcbz */
261 vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32;
264 /* On some CPUs we can execute paired single operations natively */
265 asm ( "mfpvr %0" : "=r"(host_pvr));
267 case 0x00080200: /* lonestar 2.0 */
268 case 0x00088202: /* lonestar 2.2 */
269 case 0x70000100: /* gekko 1.0 */
270 case 0x00080100: /* gekko 2.0 */
271 case 0x00083203: /* gekko 2.3a */
272 case 0x00083213: /* gekko 2.3b */
273 case 0x00083204: /* gekko 2.4 */
274 case 0x00083214: /* gekko 2.4e (8SE) - retail HW2 */
275 case 0x00087200: /* broadway */
276 vcpu->arch.hflags |= BOOK3S_HFLAG_NATIVE_PS;
277 /* Enable HID2.PSE - in case we need it later */
278 mtspr(SPRN_HID2_GEKKO, mfspr(SPRN_HID2_GEKKO) | (1 << 29));
282 /* Book3s_32 CPUs always have 32 bytes cache line size, which Linux assumes. To
283 * make Book3s_32 Linux work on Book3s_64, we have to make sure we trap dcbz to
284 * emulate 32 bytes dcbz length.
286 * The Book3s_64 inventors also realized this case and implemented a special bit
287 * in the HID5 register, which is a hypervisor ressource. Thus we can't use it.
289 * My approach here is to patch the dcbz instruction on executing pages.
291 static void kvmppc_patch_dcbz(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte)
298 hpage = gfn_to_page(vcpu->kvm, pte->raddr >> PAGE_SHIFT);
299 if (is_error_page(hpage))
302 hpage_offset = pte->raddr & ~PAGE_MASK;
303 hpage_offset &= ~0xFFFULL;
307 page = kmap_atomic(hpage);
309 /* patch dcbz into reserved instruction, so we trap */
310 for (i=hpage_offset; i < hpage_offset + (HW_PAGE_SIZE / 4); i++)
311 if ((page[i] & 0xff0007ff) == INS_DCBZ)
312 page[i] &= 0xfffffff7;
318 static int kvmppc_visible_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
320 ulong mp_pa = vcpu->arch.magic_page_pa;
322 if (!(vcpu->arch.shared->msr & MSR_SF))
323 mp_pa = (uint32_t)mp_pa;
325 if (unlikely(mp_pa) &&
326 unlikely((mp_pa & KVM_PAM) >> PAGE_SHIFT == gfn)) {
330 return kvm_is_visible_gfn(vcpu->kvm, gfn);
333 int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
334 ulong eaddr, int vec)
336 bool data = (vec == BOOK3S_INTERRUPT_DATA_STORAGE);
337 int r = RESUME_GUEST;
340 struct kvmppc_pte pte;
341 bool is_mmio = false;
342 bool dr = (vcpu->arch.shared->msr & MSR_DR) ? true : false;
343 bool ir = (vcpu->arch.shared->msr & MSR_IR) ? true : false;
346 relocated = data ? dr : ir;
348 /* Resolve real address if translation turned on */
350 page_found = vcpu->arch.mmu.xlate(vcpu, eaddr, &pte, data);
352 pte.may_execute = true;
354 pte.may_write = true;
355 pte.raddr = eaddr & KVM_PAM;
357 pte.vpage = eaddr >> 12;
360 switch (vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) {
362 pte.vpage |= ((u64)VSID_REAL << (SID_SHIFT - 12));
366 vcpu->arch.mmu.esid_to_vsid(vcpu, eaddr >> SID_SHIFT, &vsid);
368 if ((vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) == MSR_DR)
369 pte.vpage |= ((u64)VSID_REAL_DR << (SID_SHIFT - 12));
371 pte.vpage |= ((u64)VSID_REAL_IR << (SID_SHIFT - 12));
375 page_found = -EINVAL;
379 if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
380 (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32))) {
382 * If we do the dcbz hack, we have to NX on every execution,
383 * so we can patch the executing code. This renders our guest
386 pte.may_execute = !data;
389 if (page_found == -ENOENT) {
390 /* Page not found in guest PTE entries */
391 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
392 vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu);
393 vcpu->arch.shared->dsisr = svcpu->fault_dsisr;
394 vcpu->arch.shared->msr |=
395 (svcpu->shadow_srr1 & 0x00000000f8000000ULL);
397 kvmppc_book3s_queue_irqprio(vcpu, vec);
398 } else if (page_found == -EPERM) {
399 /* Storage protection */
400 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
401 vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu);
402 vcpu->arch.shared->dsisr = svcpu->fault_dsisr & ~DSISR_NOHPTE;
403 vcpu->arch.shared->dsisr |= DSISR_PROTFAULT;
404 vcpu->arch.shared->msr |=
405 svcpu->shadow_srr1 & 0x00000000f8000000ULL;
407 kvmppc_book3s_queue_irqprio(vcpu, vec);
408 } else if (page_found == -EINVAL) {
409 /* Page not found in guest SLB */
410 vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu);
411 kvmppc_book3s_queue_irqprio(vcpu, vec + 0x80);
412 } else if (!is_mmio &&
413 kvmppc_visible_gfn(vcpu, pte.raddr >> PAGE_SHIFT)) {
414 /* The guest's PTE is not mapped yet. Map on the host */
415 kvmppc_mmu_map_page(vcpu, &pte);
417 vcpu->stat.sp_storage++;
418 else if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
419 (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32)))
420 kvmppc_patch_dcbz(vcpu, &pte);
423 vcpu->stat.mmio_exits++;
424 vcpu->arch.paddr_accessed = pte.raddr;
425 vcpu->arch.vaddr_accessed = pte.eaddr;
426 r = kvmppc_emulate_mmio(run, vcpu);
427 if ( r == RESUME_HOST_NV )
434 static inline int get_fpr_index(int i)
442 /* Give up external provider (FPU, Altivec, VSX) */
443 void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr)
445 struct thread_struct *t = ¤t->thread;
446 u64 *vcpu_fpr = vcpu->arch.fpr;
448 u64 *vcpu_vsx = vcpu->arch.vsr;
450 u64 *thread_fpr = (u64*)t->fpr;
453 if (!(vcpu->arch.guest_owned_ext & msr))
457 printk(KERN_INFO "Giving up ext 0x%lx\n", msr);
463 for (i = 0; i < ARRAY_SIZE(vcpu->arch.fpr); i++)
464 vcpu_fpr[i] = thread_fpr[get_fpr_index(i)];
466 vcpu->arch.fpscr = t->fpscr.val;
469 #ifdef CONFIG_ALTIVEC
470 giveup_altivec(current);
471 memcpy(vcpu->arch.vr, t->vr, sizeof(vcpu->arch.vr));
472 vcpu->arch.vscr = t->vscr;
477 __giveup_vsx(current);
478 for (i = 0; i < ARRAY_SIZE(vcpu->arch.vsr); i++)
479 vcpu_vsx[i] = thread_fpr[get_fpr_index(i) + 1];
486 vcpu->arch.guest_owned_ext &= ~msr;
487 current->thread.regs->msr &= ~msr;
488 kvmppc_recalc_shadow_msr(vcpu);
491 static int kvmppc_read_inst(struct kvm_vcpu *vcpu)
493 ulong srr0 = kvmppc_get_pc(vcpu);
494 u32 last_inst = kvmppc_get_last_inst(vcpu);
497 ret = kvmppc_ld(vcpu, &srr0, sizeof(u32), &last_inst, false);
498 if (ret == -ENOENT) {
499 ulong msr = vcpu->arch.shared->msr;
501 msr = kvmppc_set_field(msr, 33, 33, 1);
502 msr = kvmppc_set_field(msr, 34, 36, 0);
503 vcpu->arch.shared->msr = kvmppc_set_field(msr, 42, 47, 0);
504 kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_INST_STORAGE);
505 return EMULATE_AGAIN;
511 static int kvmppc_check_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr)
514 /* Need to do paired single emulation? */
515 if (!(vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE))
518 /* Read out the instruction */
519 if (kvmppc_read_inst(vcpu) == EMULATE_DONE)
520 /* Need to emulate */
523 return EMULATE_AGAIN;
526 /* Handle external providers (FPU, Altivec, VSX) */
527 static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr,
530 struct thread_struct *t = ¤t->thread;
531 u64 *vcpu_fpr = vcpu->arch.fpr;
533 u64 *vcpu_vsx = vcpu->arch.vsr;
535 u64 *thread_fpr = (u64*)t->fpr;
538 /* When we have paired singles, we emulate in software */
539 if (vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE)
542 if (!(vcpu->arch.shared->msr & msr)) {
543 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
547 /* We already own the ext */
548 if (vcpu->arch.guest_owned_ext & msr) {
553 printk(KERN_INFO "Loading up ext 0x%lx\n", msr);
556 current->thread.regs->msr |= msr;
560 for (i = 0; i < ARRAY_SIZE(vcpu->arch.fpr); i++)
561 thread_fpr[get_fpr_index(i)] = vcpu_fpr[i];
563 t->fpscr.val = vcpu->arch.fpscr;
565 kvmppc_load_up_fpu();
568 #ifdef CONFIG_ALTIVEC
569 memcpy(t->vr, vcpu->arch.vr, sizeof(vcpu->arch.vr));
570 t->vscr = vcpu->arch.vscr;
572 kvmppc_load_up_altivec();
577 for (i = 0; i < ARRAY_SIZE(vcpu->arch.vsr); i++)
578 thread_fpr[get_fpr_index(i) + 1] = vcpu_vsx[i];
579 kvmppc_load_up_vsx();
586 vcpu->arch.guest_owned_ext |= msr;
588 kvmppc_recalc_shadow_msr(vcpu);
593 int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
594 unsigned int exit_nr)
599 vcpu->stat.sum_exits++;
601 run->exit_reason = KVM_EXIT_UNKNOWN;
602 run->ready_for_interrupt_injection = 1;
604 /* We get here with MSR.EE=1 */
606 trace_kvm_exit(exit_nr, vcpu);
610 case BOOK3S_INTERRUPT_INST_STORAGE:
612 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
613 ulong shadow_srr1 = svcpu->shadow_srr1;
614 vcpu->stat.pf_instruc++;
616 #ifdef CONFIG_PPC_BOOK3S_32
617 /* We set segments as unused segments when invalidating them. So
618 * treat the respective fault as segment fault. */
619 if (svcpu->sr[kvmppc_get_pc(vcpu) >> SID_SHIFT] == SR_INVALID) {
620 kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu));
628 /* only care about PTEG not found errors, but leave NX alone */
629 if (shadow_srr1 & 0x40000000) {
630 r = kvmppc_handle_pagefault(run, vcpu, kvmppc_get_pc(vcpu), exit_nr);
631 vcpu->stat.sp_instruc++;
632 } else if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
633 (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32))) {
635 * XXX If we do the dcbz hack we use the NX bit to flush&patch the page,
636 * so we can't use the NX bit inside the guest. Let's cross our fingers,
637 * that no guest that needs the dcbz hack does NX.
639 kvmppc_mmu_pte_flush(vcpu, kvmppc_get_pc(vcpu), ~0xFFFUL);
642 vcpu->arch.shared->msr |= shadow_srr1 & 0x58000000;
643 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
648 case BOOK3S_INTERRUPT_DATA_STORAGE:
650 ulong dar = kvmppc_get_fault_dar(vcpu);
651 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
652 u32 fault_dsisr = svcpu->fault_dsisr;
653 vcpu->stat.pf_storage++;
655 #ifdef CONFIG_PPC_BOOK3S_32
656 /* We set segments as unused segments when invalidating them. So
657 * treat the respective fault as segment fault. */
658 if ((svcpu->sr[dar >> SID_SHIFT]) == SR_INVALID) {
659 kvmppc_mmu_map_segment(vcpu, dar);
667 /* The only case we need to handle is missing shadow PTEs */
668 if (fault_dsisr & DSISR_NOHPTE) {
669 r = kvmppc_handle_pagefault(run, vcpu, dar, exit_nr);
671 vcpu->arch.shared->dar = dar;
672 vcpu->arch.shared->dsisr = fault_dsisr;
673 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
678 case BOOK3S_INTERRUPT_DATA_SEGMENT:
679 if (kvmppc_mmu_map_segment(vcpu, kvmppc_get_fault_dar(vcpu)) < 0) {
680 vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu);
681 kvmppc_book3s_queue_irqprio(vcpu,
682 BOOK3S_INTERRUPT_DATA_SEGMENT);
686 case BOOK3S_INTERRUPT_INST_SEGMENT:
687 if (kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu)) < 0) {
688 kvmppc_book3s_queue_irqprio(vcpu,
689 BOOK3S_INTERRUPT_INST_SEGMENT);
693 /* We're good on these - the host merely wanted to get our attention */
694 case BOOK3S_INTERRUPT_DECREMENTER:
695 case BOOK3S_INTERRUPT_HV_DECREMENTER:
696 vcpu->stat.dec_exits++;
699 case BOOK3S_INTERRUPT_EXTERNAL:
700 case BOOK3S_INTERRUPT_EXTERNAL_LEVEL:
701 case BOOK3S_INTERRUPT_EXTERNAL_HV:
702 vcpu->stat.ext_intr_exits++;
705 case BOOK3S_INTERRUPT_PERFMON:
708 case BOOK3S_INTERRUPT_PROGRAM:
709 case BOOK3S_INTERRUPT_H_EMUL_ASSIST:
711 enum emulation_result er;
712 struct kvmppc_book3s_shadow_vcpu *svcpu;
716 svcpu = svcpu_get(vcpu);
717 flags = svcpu->shadow_srr1 & 0x1f0000ull;
720 if (vcpu->arch.shared->msr & MSR_PR) {
722 printk(KERN_INFO "Userspace triggered 0x700 exception at 0x%lx (0x%x)\n", kvmppc_get_pc(vcpu), kvmppc_get_last_inst(vcpu));
724 if ((kvmppc_get_last_inst(vcpu) & 0xff0007ff) !=
725 (INS_DCBZ & 0xfffffff7)) {
726 kvmppc_core_queue_program(vcpu, flags);
732 vcpu->stat.emulated_inst_exits++;
733 er = kvmppc_emulate_instruction(run, vcpu);
742 printk(KERN_CRIT "%s: emulation at %lx failed (%08x)\n",
743 __func__, kvmppc_get_pc(vcpu), kvmppc_get_last_inst(vcpu));
744 kvmppc_core_queue_program(vcpu, flags);
747 case EMULATE_DO_MMIO:
748 run->exit_reason = KVM_EXIT_MMIO;
756 case BOOK3S_INTERRUPT_SYSCALL:
757 if (vcpu->arch.papr_enabled &&
758 (kvmppc_get_last_inst(vcpu) == 0x44000022) &&
759 !(vcpu->arch.shared->msr & MSR_PR)) {
760 /* SC 1 papr hypercalls */
761 ulong cmd = kvmppc_get_gpr(vcpu, 3);
764 #ifdef CONFIG_KVM_BOOK3S_64_PR
765 if (kvmppc_h_pr(vcpu, cmd) == EMULATE_DONE) {
771 run->papr_hcall.nr = cmd;
772 for (i = 0; i < 9; ++i) {
773 ulong gpr = kvmppc_get_gpr(vcpu, 4 + i);
774 run->papr_hcall.args[i] = gpr;
776 run->exit_reason = KVM_EXIT_PAPR_HCALL;
777 vcpu->arch.hcall_needed = 1;
779 } else if (vcpu->arch.osi_enabled &&
780 (((u32)kvmppc_get_gpr(vcpu, 3)) == OSI_SC_MAGIC_R3) &&
781 (((u32)kvmppc_get_gpr(vcpu, 4)) == OSI_SC_MAGIC_R4)) {
783 u64 *gprs = run->osi.gprs;
786 run->exit_reason = KVM_EXIT_OSI;
787 for (i = 0; i < 32; i++)
788 gprs[i] = kvmppc_get_gpr(vcpu, i);
789 vcpu->arch.osi_needed = 1;
791 } else if (!(vcpu->arch.shared->msr & MSR_PR) &&
792 (((u32)kvmppc_get_gpr(vcpu, 0)) == KVM_SC_MAGIC_R0)) {
793 /* KVM PV hypercalls */
794 kvmppc_set_gpr(vcpu, 3, kvmppc_kvm_pv(vcpu));
798 vcpu->stat.syscall_exits++;
799 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
803 case BOOK3S_INTERRUPT_FP_UNAVAIL:
804 case BOOK3S_INTERRUPT_ALTIVEC:
805 case BOOK3S_INTERRUPT_VSX:
810 case BOOK3S_INTERRUPT_FP_UNAVAIL: ext_msr = MSR_FP; break;
811 case BOOK3S_INTERRUPT_ALTIVEC: ext_msr = MSR_VEC; break;
812 case BOOK3S_INTERRUPT_VSX: ext_msr = MSR_VSX; break;
815 switch (kvmppc_check_ext(vcpu, exit_nr)) {
817 /* everything ok - let's enable the ext */
818 r = kvmppc_handle_ext(vcpu, exit_nr, ext_msr);
821 /* we need to emulate this instruction */
822 goto program_interrupt;
825 /* nothing to worry about - go again */
830 case BOOK3S_INTERRUPT_ALIGNMENT:
831 if (kvmppc_read_inst(vcpu) == EMULATE_DONE) {
832 vcpu->arch.shared->dsisr = kvmppc_alignment_dsisr(vcpu,
833 kvmppc_get_last_inst(vcpu));
834 vcpu->arch.shared->dar = kvmppc_alignment_dar(vcpu,
835 kvmppc_get_last_inst(vcpu));
836 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
840 case BOOK3S_INTERRUPT_MACHINE_CHECK:
841 case BOOK3S_INTERRUPT_TRACE:
842 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
847 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
848 ulong shadow_srr1 = svcpu->shadow_srr1;
850 /* Ugh - bork here! What did we get? */
851 printk(KERN_EMERG "exit_nr=0x%x | pc=0x%lx | msr=0x%lx\n",
852 exit_nr, kvmppc_get_pc(vcpu), shadow_srr1);
859 if (!(r & RESUME_HOST)) {
860 /* To avoid clobbering exit_reason, only check for signals if
861 * we aren't already exiting to userspace for some other
865 * Interrupts could be timers for the guest which we have to
866 * inject again, so let's postpone them until we're in the guest
867 * and if we really did time things so badly, then we just exit
868 * again due to a host external interrupt.
871 s = kvmppc_prepare_to_enter(vcpu);
876 kvmppc_lazy_ee_enable();
880 trace_kvm_book3s_reenter(r, vcpu);
885 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
886 struct kvm_sregs *sregs)
888 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
891 sregs->pvr = vcpu->arch.pvr;
893 sregs->u.s.sdr1 = to_book3s(vcpu)->sdr1;
894 if (vcpu->arch.hflags & BOOK3S_HFLAG_SLB) {
895 for (i = 0; i < 64; i++) {
896 sregs->u.s.ppc64.slb[i].slbe = vcpu->arch.slb[i].orige | i;
897 sregs->u.s.ppc64.slb[i].slbv = vcpu->arch.slb[i].origv;
900 for (i = 0; i < 16; i++)
901 sregs->u.s.ppc32.sr[i] = vcpu->arch.shared->sr[i];
903 for (i = 0; i < 8; i++) {
904 sregs->u.s.ppc32.ibat[i] = vcpu3s->ibat[i].raw;
905 sregs->u.s.ppc32.dbat[i] = vcpu3s->dbat[i].raw;
912 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
913 struct kvm_sregs *sregs)
915 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
918 kvmppc_set_pvr(vcpu, sregs->pvr);
920 vcpu3s->sdr1 = sregs->u.s.sdr1;
921 if (vcpu->arch.hflags & BOOK3S_HFLAG_SLB) {
922 for (i = 0; i < 64; i++) {
923 vcpu->arch.mmu.slbmte(vcpu, sregs->u.s.ppc64.slb[i].slbv,
924 sregs->u.s.ppc64.slb[i].slbe);
927 for (i = 0; i < 16; i++) {
928 vcpu->arch.mmu.mtsrin(vcpu, i, sregs->u.s.ppc32.sr[i]);
930 for (i = 0; i < 8; i++) {
931 kvmppc_set_bat(vcpu, &(vcpu3s->ibat[i]), false,
932 (u32)sregs->u.s.ppc32.ibat[i]);
933 kvmppc_set_bat(vcpu, &(vcpu3s->ibat[i]), true,
934 (u32)(sregs->u.s.ppc32.ibat[i] >> 32));
935 kvmppc_set_bat(vcpu, &(vcpu3s->dbat[i]), false,
936 (u32)sregs->u.s.ppc32.dbat[i]);
937 kvmppc_set_bat(vcpu, &(vcpu3s->dbat[i]), true,
938 (u32)(sregs->u.s.ppc32.dbat[i] >> 32));
942 /* Flush the MMU after messing with the segments */
943 kvmppc_mmu_pte_flush(vcpu, 0, 0);
948 int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *val)
953 case KVM_REG_PPC_HIOR:
954 *val = get_reg_val(id, to_book3s(vcpu)->hior);
964 int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *val)
969 case KVM_REG_PPC_HIOR:
970 to_book3s(vcpu)->hior = set_reg_val(id, *val);
971 to_book3s(vcpu)->hior_explicit = true;
981 int kvmppc_core_check_processor_compat(void)
986 struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
988 struct kvmppc_vcpu_book3s *vcpu_book3s;
989 struct kvm_vcpu *vcpu;
993 vcpu_book3s = vzalloc(sizeof(struct kvmppc_vcpu_book3s));
997 vcpu_book3s->shadow_vcpu = (struct kvmppc_book3s_shadow_vcpu *)
998 kzalloc(sizeof(*vcpu_book3s->shadow_vcpu), GFP_KERNEL);
999 if (!vcpu_book3s->shadow_vcpu)
1002 vcpu = &vcpu_book3s->vcpu;
1003 err = kvm_vcpu_init(vcpu, kvm, id);
1005 goto free_shadow_vcpu;
1007 p = __get_free_page(GFP_KERNEL|__GFP_ZERO);
1008 /* the real shared page fills the last 4k of our page */
1009 vcpu->arch.shared = (void*)(p + PAGE_SIZE - 4096);
1013 #ifdef CONFIG_PPC_BOOK3S_64
1014 /* default to book3s_64 (970fx) */
1015 vcpu->arch.pvr = 0x3C0301;
1017 /* default to book3s_32 (750) */
1018 vcpu->arch.pvr = 0x84202;
1020 kvmppc_set_pvr(vcpu, vcpu->arch.pvr);
1021 vcpu->arch.slb_nr = 64;
1023 vcpu->arch.shadow_msr = MSR_USER64;
1025 err = kvmppc_mmu_init(vcpu);
1032 kvm_vcpu_uninit(vcpu);
1034 kfree(vcpu_book3s->shadow_vcpu);
1038 return ERR_PTR(err);
1041 void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)
1043 struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
1045 free_page((unsigned long)vcpu->arch.shared & PAGE_MASK);
1046 kvm_vcpu_uninit(vcpu);
1047 kfree(vcpu_book3s->shadow_vcpu);
1051 int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
1054 double fpr[32][TS_FPRWIDTH];
1057 #ifdef CONFIG_ALTIVEC
1060 unsigned long uninitialized_var(vrsave);
1068 /* Check if we can run the vcpu at all */
1069 if (!vcpu->arch.sane) {
1070 kvm_run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1076 * Interrupts could be timers for the guest which we have to inject
1077 * again, so let's postpone them until we're in the guest and if we
1078 * really did time things so badly, then we just exit again due to
1079 * a host external interrupt.
1081 local_irq_disable();
1082 ret = kvmppc_prepare_to_enter(vcpu);
1088 /* Save FPU state in stack */
1089 if (current->thread.regs->msr & MSR_FP)
1090 giveup_fpu(current);
1091 memcpy(fpr, current->thread.fpr, sizeof(current->thread.fpr));
1092 fpscr = current->thread.fpscr.val;
1093 fpexc_mode = current->thread.fpexc_mode;
1095 #ifdef CONFIG_ALTIVEC
1096 /* Save Altivec state in stack */
1097 used_vr = current->thread.used_vr;
1099 if (current->thread.regs->msr & MSR_VEC)
1100 giveup_altivec(current);
1101 memcpy(vr, current->thread.vr, sizeof(current->thread.vr));
1102 vscr = current->thread.vscr;
1103 vrsave = current->thread.vrsave;
1108 /* Save VSX state in stack */
1109 used_vsr = current->thread.used_vsr;
1110 if (used_vsr && (current->thread.regs->msr & MSR_VSX))
1111 __giveup_vsx(current);
1114 /* Remember the MSR with disabled extensions */
1115 ext_msr = current->thread.regs->msr;
1117 /* Preload FPU if it's enabled */
1118 if (vcpu->arch.shared->msr & MSR_FP)
1119 kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP);
1121 kvmppc_lazy_ee_enable();
1123 ret = __kvmppc_vcpu_run(kvm_run, vcpu);
1125 /* No need for kvm_guest_exit. It's done in handle_exit.
1126 We also get here with interrupts enabled. */
1128 current->thread.regs->msr = ext_msr;
1130 /* Make sure we save the guest FPU/Altivec/VSX state */
1131 kvmppc_giveup_ext(vcpu, MSR_FP);
1132 kvmppc_giveup_ext(vcpu, MSR_VEC);
1133 kvmppc_giveup_ext(vcpu, MSR_VSX);
1135 /* Restore FPU state from stack */
1136 memcpy(current->thread.fpr, fpr, sizeof(current->thread.fpr));
1137 current->thread.fpscr.val = fpscr;
1138 current->thread.fpexc_mode = fpexc_mode;
1140 #ifdef CONFIG_ALTIVEC
1141 /* Restore Altivec state from stack */
1142 if (used_vr && current->thread.used_vr) {
1143 memcpy(current->thread.vr, vr, sizeof(current->thread.vr));
1144 current->thread.vscr = vscr;
1145 current->thread.vrsave = vrsave;
1147 current->thread.used_vr = used_vr;
1151 current->thread.used_vsr = used_vsr;
1155 vcpu->mode = OUTSIDE_GUEST_MODE;
1160 * Get (and clear) the dirty memory log for a memory slot.
1162 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
1163 struct kvm_dirty_log *log)
1165 struct kvm_memory_slot *memslot;
1166 struct kvm_vcpu *vcpu;
1172 mutex_lock(&kvm->slots_lock);
1174 r = kvm_get_dirty_log(kvm, log, &is_dirty);
1178 /* If nothing is dirty, don't bother messing with page tables. */
1180 memslot = id_to_memslot(kvm->memslots, log->slot);
1182 ga = memslot->base_gfn << PAGE_SHIFT;
1183 ga_end = ga + (memslot->npages << PAGE_SHIFT);
1185 kvm_for_each_vcpu(n, vcpu, kvm)
1186 kvmppc_mmu_pte_pflush(vcpu, ga, ga_end);
1188 n = kvm_dirty_bitmap_bytes(memslot);
1189 memset(memslot->dirty_bitmap, 0, n);
1194 mutex_unlock(&kvm->slots_lock);
1199 int kvm_vm_ioctl_get_smmu_info(struct kvm *kvm, struct kvm_ppc_smmu_info *info)
1204 /* SLB is always 64 entries */
1205 info->slb_size = 64;
1207 /* Standard 4k base page size segment */
1208 info->sps[0].page_shift = 12;
1209 info->sps[0].slb_enc = 0;
1210 info->sps[0].enc[0].page_shift = 12;
1211 info->sps[0].enc[0].pte_enc = 0;
1213 /* Standard 16M large page size segment */
1214 info->sps[1].page_shift = 24;
1215 info->sps[1].slb_enc = SLB_VSID_L;
1216 info->sps[1].enc[0].page_shift = 24;
1217 info->sps[1].enc[0].pte_enc = 0;
1221 #endif /* CONFIG_PPC64 */
1223 void kvmppc_core_free_memslot(struct kvm_memory_slot *free,
1224 struct kvm_memory_slot *dont)
1228 int kvmppc_core_create_memslot(struct kvm_memory_slot *slot,
1229 unsigned long npages)
1234 int kvmppc_core_prepare_memory_region(struct kvm *kvm,
1235 struct kvm_memory_slot *memslot,
1236 struct kvm_userspace_memory_region *mem)
1241 void kvmppc_core_commit_memory_region(struct kvm *kvm,
1242 struct kvm_userspace_memory_region *mem,
1243 struct kvm_memory_slot old)
1247 void kvmppc_core_flush_memslot(struct kvm *kvm, struct kvm_memory_slot *memslot)
1251 int kvmppc_core_init_vm(struct kvm *kvm)
1254 INIT_LIST_HEAD(&kvm->arch.spapr_tce_tables);
1260 void kvmppc_core_destroy_vm(struct kvm *kvm)
1263 WARN_ON(!list_empty(&kvm->arch.spapr_tce_tables));
1267 static int kvmppc_book3s_init(void)
1271 r = kvm_init(NULL, sizeof(struct kvmppc_vcpu_book3s), 0,
1277 r = kvmppc_mmu_hpte_sysinit();
1282 static void kvmppc_book3s_exit(void)
1284 kvmppc_mmu_hpte_sysexit();
1288 module_init(kvmppc_book3s_init);
1289 module_exit(kvmppc_book3s_exit);