KVM: PPC: Ensure split mode works
authorAlexander Graf <agraf@suse.de>
Wed, 24 Mar 2010 20:48:17 +0000 (21:48 +0100)
committerAvi Kivity <avi@redhat.com>
Mon, 17 May 2010 09:16:49 +0000 (12:16 +0300)
On PowerPC we can go into MMU Split Mode. That means that either
data relocation is on but instruction relocation is off or vice
versa.

That mode didn't work properly, as we weren't always flushing
entries when going into a new split mode, potentially mapping
different code or data that we're supposed to.

Signed-off-by: Alexander Graf <agraf@suse.de>
Signed-off-by: Avi Kivity <avi@redhat.com>
arch/powerpc/include/asm/kvm_book3s.h
arch/powerpc/kvm/book3s.c

index e6ea974df44e46ee722848b165284cc61f9f6c7d..14d0262ae00bb58669bdf9c3443572698201051f 100644 (file)
@@ -99,10 +99,11 @@ struct kvmppc_vcpu_book3s {
 #define CONTEXT_GUEST          1
 #define CONTEXT_GUEST_END      2
 
-#define VSID_REAL      0xfffffffffff00000
-#define VSID_REAL_DR   0xffffffffffe00000
-#define VSID_REAL_IR   0xffffffffffd00000
-#define VSID_BAT       0xffffffffffc00000
+#define VSID_REAL_DR   0x7ffffffffff00000
+#define VSID_REAL_IR   0x7fffffffffe00000
+#define VSID_SPLIT_MASK        0x7fffffffffe00000
+#define VSID_REAL      0x7fffffffffc00000
+#define VSID_BAT       0x7fffffffffb00000
 #define VSID_PR                0x8000000000000000
 
 extern void kvmppc_mmu_pte_flush(struct kvm_vcpu *vcpu, u64 ea, u64 ea_mask);
index 8cab902771a2aaa82b26a13ba12e10907244c99c..ff5a420582574f5776eb285d17d115a8e75065b4 100644 (file)
@@ -134,6 +134,14 @@ void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr)
 
        if (((vcpu->arch.msr & (MSR_IR|MSR_DR)) != (old_msr & (MSR_IR|MSR_DR))) ||
            (vcpu->arch.msr & MSR_PR) != (old_msr & MSR_PR)) {
+               bool dr = (vcpu->arch.msr & MSR_DR) ? true : false;
+               bool ir = (vcpu->arch.msr & MSR_IR) ? true : false;
+
+               /* Flush split mode PTEs */
+               if (dr != ir)
+                       kvmppc_mmu_pte_vflush(vcpu, VSID_SPLIT_MASK,
+                                             VSID_SPLIT_MASK);
+
                kvmppc_mmu_flush_segments(vcpu);
                kvmppc_mmu_map_segment(vcpu, vcpu->arch.pc);
        }
@@ -396,15 +404,7 @@ static int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr, bool data,
        } else {
                pte->eaddr = eaddr;
                pte->raddr = eaddr & 0xffffffff;
-               pte->vpage = eaddr >> 12;
-               switch (vcpu->arch.msr & (MSR_DR|MSR_IR)) {
-               case 0:
-                       pte->vpage |= VSID_REAL;
-               case MSR_DR:
-                       pte->vpage |= VSID_REAL_DR;
-               case MSR_IR:
-                       pte->vpage |= VSID_REAL_IR;
-               }
+               pte->vpage = VSID_REAL | eaddr >> 12;
                pte->may_read = true;
                pte->may_write = true;
                pte->may_execute = true;
@@ -513,12 +513,10 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
        int page_found = 0;
        struct kvmppc_pte pte;
        bool is_mmio = false;
+       bool dr = (vcpu->arch.msr & MSR_DR) ? true : false;
+       bool ir = (vcpu->arch.msr & MSR_IR) ? true : false;
 
-       if ( vec == BOOK3S_INTERRUPT_DATA_STORAGE ) {
-               relocated = (vcpu->arch.msr & MSR_DR);
-       } else {
-               relocated = (vcpu->arch.msr & MSR_IR);
-       }
+       relocated = data ? dr : ir;
 
        /* Resolve real address if translation turned on */
        if (relocated) {
@@ -530,14 +528,18 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
                pte.raddr = eaddr & 0xffffffff;
                pte.eaddr = eaddr;
                pte.vpage = eaddr >> 12;
-               switch (vcpu->arch.msr & (MSR_DR|MSR_IR)) {
-               case 0:
-                       pte.vpage |= VSID_REAL;
-               case MSR_DR:
-                       pte.vpage |= VSID_REAL_DR;
-               case MSR_IR:
-                       pte.vpage |= VSID_REAL_IR;
-               }
+       }
+
+       switch (vcpu->arch.msr & (MSR_DR|MSR_IR)) {
+       case 0:
+               pte.vpage |= VSID_REAL;
+               break;
+       case MSR_DR:
+               pte.vpage |= VSID_REAL_DR;
+               break;
+       case MSR_IR:
+               pte.vpage |= VSID_REAL_IR;
+               break;
        }
 
        if (vcpu->arch.mmu.is_dcbz32(vcpu) &&