KVM: SVM: enhance MOV CR intercept handler
authorAndre Przywara <andre.przywara@amd.com>
Tue, 21 Dec 2010 10:12:04 +0000 (11:12 +0100)
committerAvi Kivity <avi@redhat.com>
Wed, 12 Jan 2011 09:31:03 +0000 (11:31 +0200)
Newer SVM implementations provide the GPR number in the VMCB, so
that the emulation path is no longer necesarry to handle CR
register access intercepts. Implement the handling in svm.c and
use it when the info is provided.

Signed-off-by: Andre Przywara <andre.przywara@amd.com>
Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
arch/x86/include/asm/svm.h
arch/x86/kvm/svm.c

index f7087bf9caf0cbc24879eb3358753da58ce5a8b8..f0ffb81840899dc5eb55ca7e3f2c4b3945904b48 100644 (file)
@@ -260,6 +260,8 @@ struct __attribute__ ((__packed__)) vmcb {
 #define SVM_EXITINFOSHIFT_TS_REASON_JMP 38
 #define SVM_EXITINFOSHIFT_TS_HAS_ERROR_CODE 44
 
+#define SVM_EXITINFO_REG_MASK 0x0F
+
 #define        SVM_EXIT_READ_CR0       0x000
 #define        SVM_EXIT_READ_CR3       0x003
 #define        SVM_EXIT_READ_CR4       0x004
index 2830a73ea736e21f2c8b404d37a3815f1ae0ceb4..5abaa5b2f6248a185c2f03028e4268d9c8e69340 100644 (file)
@@ -2660,12 +2660,80 @@ static int emulate_on_interception(struct vcpu_svm *svm)
        return emulate_instruction(&svm->vcpu, 0) == EMULATE_DONE;
 }
 
+#define CR_VALID (1ULL << 63)
+
+static int cr_interception(struct vcpu_svm *svm)
+{
+       int reg, cr;
+       unsigned long val;
+       int err;
+
+       if (!static_cpu_has(X86_FEATURE_DECODEASSISTS))
+               return emulate_on_interception(svm);
+
+       if (unlikely((svm->vmcb->control.exit_info_1 & CR_VALID) == 0))
+               return emulate_on_interception(svm);
+
+       reg = svm->vmcb->control.exit_info_1 & SVM_EXITINFO_REG_MASK;
+       cr = svm->vmcb->control.exit_code - SVM_EXIT_READ_CR0;
+
+       err = 0;
+       if (cr >= 16) { /* mov to cr */
+               cr -= 16;
+               val = kvm_register_read(&svm->vcpu, reg);
+               switch (cr) {
+               case 0:
+                       err = kvm_set_cr0(&svm->vcpu, val);
+                       break;
+               case 3:
+                       err = kvm_set_cr3(&svm->vcpu, val);
+                       break;
+               case 4:
+                       err = kvm_set_cr4(&svm->vcpu, val);
+                       break;
+               case 8:
+                       err = kvm_set_cr8(&svm->vcpu, val);
+                       break;
+               default:
+                       WARN(1, "unhandled write to CR%d", cr);
+                       kvm_queue_exception(&svm->vcpu, UD_VECTOR);
+                       return 1;
+               }
+       } else { /* mov from cr */
+               switch (cr) {
+               case 0:
+                       val = kvm_read_cr0(&svm->vcpu);
+                       break;
+               case 2:
+                       val = svm->vcpu.arch.cr2;
+                       break;
+               case 3:
+                       val = svm->vcpu.arch.cr3;
+                       break;
+               case 4:
+                       val = kvm_read_cr4(&svm->vcpu);
+                       break;
+               case 8:
+                       val = kvm_get_cr8(&svm->vcpu);
+                       break;
+               default:
+                       WARN(1, "unhandled read from CR%d", cr);
+                       kvm_queue_exception(&svm->vcpu, UD_VECTOR);
+                       return 1;
+               }
+               kvm_register_write(&svm->vcpu, reg, val);
+       }
+       kvm_complete_insn_gp(&svm->vcpu, err);
+
+       return 1;
+}
+
 static int cr0_write_interception(struct vcpu_svm *svm)
 {
        struct kvm_vcpu *vcpu = &svm->vcpu;
        int r;
 
-       r = emulate_instruction(&svm->vcpu, 0);
+       r = cr_interception(svm);
 
        if (svm->nested.vmexit_rip) {
                kvm_register_write(vcpu, VCPU_REGS_RIP, svm->nested.vmexit_rip);
@@ -2674,7 +2742,7 @@ static int cr0_write_interception(struct vcpu_svm *svm)
                svm->nested.vmexit_rip = 0;
        }
 
-       return r == EMULATE_DONE;
+       return r;
 }
 
 static int cr8_write_interception(struct vcpu_svm *svm)
@@ -2684,13 +2752,13 @@ static int cr8_write_interception(struct vcpu_svm *svm)
 
        u8 cr8_prev = kvm_get_cr8(&svm->vcpu);
        /* instruction emulation calls kvm_set_cr8() */
-       r = emulate_instruction(&svm->vcpu, 0);
+       r = cr_interception(svm);
        if (irqchip_in_kernel(svm->vcpu.kvm)) {
                clr_cr_intercept(svm, INTERCEPT_CR8_WRITE);
-               return r == EMULATE_DONE;
+               return r;
        }
        if (cr8_prev <= kvm_get_cr8(&svm->vcpu))
-               return r == EMULATE_DONE;
+               return r;
        kvm_run->exit_reason = KVM_EXIT_SET_TPR;
        return 0;
 }
@@ -2933,14 +3001,14 @@ static int pause_interception(struct vcpu_svm *svm)
 }
 
 static int (*svm_exit_handlers[])(struct vcpu_svm *svm) = {
-       [SVM_EXIT_READ_CR0]                     = emulate_on_interception,
-       [SVM_EXIT_READ_CR3]                     = emulate_on_interception,
-       [SVM_EXIT_READ_CR4]                     = emulate_on_interception,
-       [SVM_EXIT_READ_CR8]                     = emulate_on_interception,
+       [SVM_EXIT_READ_CR0]                     = cr_interception,
+       [SVM_EXIT_READ_CR3]                     = cr_interception,
+       [SVM_EXIT_READ_CR4]                     = cr_interception,
+       [SVM_EXIT_READ_CR8]                     = cr_interception,
        [SVM_EXIT_CR0_SEL_WRITE]                = emulate_on_interception,
        [SVM_EXIT_WRITE_CR0]                    = cr0_write_interception,
-       [SVM_EXIT_WRITE_CR3]                    = emulate_on_interception,
-       [SVM_EXIT_WRITE_CR4]                    = emulate_on_interception,
+       [SVM_EXIT_WRITE_CR3]                    = cr_interception,
+       [SVM_EXIT_WRITE_CR4]                    = cr_interception,
        [SVM_EXIT_WRITE_CR8]                    = cr8_write_interception,
        [SVM_EXIT_READ_DR0]                     = emulate_on_interception,
        [SVM_EXIT_READ_DR1]                     = emulate_on_interception,