kvm: nVMX: Reorder error checks for emulated VMXON
[linux-2.6-block.git] / arch / x86 / kvm / vmx.c
index 24db5fb6f575af27d3b61a67b15ce9996158ed8b..4e691035a32d392bc050a98ff216e77d495469c3 100644 (file)
@@ -132,12 +132,6 @@ module_param_named(preemption_timer, enable_preemption_timer, bool, S_IRUGO);
 
 #define VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE 5
 
-#define VMX_VPID_EXTENT_SUPPORTED_MASK         \
-       (VMX_VPID_EXTENT_INDIVIDUAL_ADDR_BIT |  \
-       VMX_VPID_EXTENT_SINGLE_CONTEXT_BIT |    \
-       VMX_VPID_EXTENT_GLOBAL_CONTEXT_BIT |    \
-       VMX_VPID_EXTENT_SINGLE_NON_GLOBAL_BIT)
-
 /*
  * Hyper-V requires all of these, so mark them as supported even though
  * they are just treated the same as all-context.
@@ -5242,10 +5236,10 @@ static void ept_set_mmio_spte_mask(void)
        /*
         * EPT Misconfigurations can be generated if the value of bits 2:0
         * of an EPT paging-structure entry is 110b (write/execute).
-        * Also, magic bits (0x3ull << 62) is set to quickly identify mmio
-        * spte.
+        * Also, special bit (62) is set to quickly identify mmio spte.
         */
-       kvm_mmu_set_mmio_spte_mask((0x3ull << 62) | 0x6ull);
+       kvm_mmu_set_mmio_spte_mask(SPTE_SPECIAL_MASK |
+                                  VMX_EPT_MISCONFIG_WX_VALUE);
 }
 
 #define VMX_XSS_EXIT_BITMAP 0
@@ -6158,7 +6152,7 @@ static int handle_wrmsr(struct kvm_vcpu *vcpu)
 
 static int handle_tpr_below_threshold(struct kvm_vcpu *vcpu)
 {
-       kvm_make_request(KVM_REQ_EVENT, vcpu);
+       kvm_apic_update_ppr(vcpu);
        return 1;
 }
 
@@ -6380,14 +6374,20 @@ static int handle_ept_violation(struct kvm_vcpu *vcpu)
        gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS);
        trace_kvm_page_fault(gpa, exit_qualification);
 
-       /* it is a read fault? */
-       error_code = (exit_qualification << 2) & PFERR_USER_MASK;
-       /* it is a write fault? */
-       error_code |= exit_qualification & PFERR_WRITE_MASK;
-       /* It is a fetch fault? */
-       error_code |= (exit_qualification << 2) & PFERR_FETCH_MASK;
-       /* ept page table is present? */
-       error_code |= (exit_qualification & 0x38) != 0;
+       /* Is it a read fault? */
+       error_code = (exit_qualification & EPT_VIOLATION_READ)
+                    ? PFERR_USER_MASK : 0;
+       /* Is it a write fault? */
+       error_code |= (exit_qualification & EPT_VIOLATION_WRITE)
+                     ? PFERR_WRITE_MASK : 0;
+       /* Is it a fetch fault? */
+       error_code |= (exit_qualification & EPT_VIOLATION_INSTR)
+                     ? PFERR_FETCH_MASK : 0;
+       /* ept page table entry is present? */
+       error_code |= (exit_qualification &
+                      (EPT_VIOLATION_READABLE | EPT_VIOLATION_WRITABLE |
+                       EPT_VIOLATION_EXECUTABLE))
+                     ? PFERR_PRESENT_MASK : 0;
 
        vcpu->arch.exit_qualification = exit_qualification;
 
@@ -6578,6 +6578,19 @@ static void wakeup_handler(void)
        spin_unlock(&per_cpu(blocked_vcpu_on_cpu_lock, cpu));
 }
 
+void vmx_enable_tdp(void)
+{
+       kvm_mmu_set_mask_ptes(VMX_EPT_READABLE_MASK,
+               enable_ept_ad_bits ? VMX_EPT_ACCESS_BIT : 0ull,
+               enable_ept_ad_bits ? VMX_EPT_DIRTY_BIT : 0ull,
+               0ull, VMX_EPT_EXECUTABLE_MASK,
+               cpu_has_vmx_ept_execute_only() ? 0ull : VMX_EPT_READABLE_MASK,
+               enable_ept_ad_bits ? 0ull : SPTE_SPECIAL_MASK | VMX_EPT_RWX_MASK);
+
+       ept_set_mmio_spte_mask();
+       kvm_enable_tdp();
+}
+
 static __init int hardware_setup(void)
 {
        int r = -ENOMEM, i, msr;
@@ -6703,16 +6716,9 @@ static __init int hardware_setup(void)
        /* SELF-IPI */
        vmx_disable_intercept_msr_x2apic(0x83f, MSR_TYPE_W, true);
 
-       if (enable_ept) {
-               kvm_mmu_set_mask_ptes(VMX_EPT_READABLE_MASK,
-                       (enable_ept_ad_bits) ? VMX_EPT_ACCESS_BIT : 0ull,
-                       (enable_ept_ad_bits) ? VMX_EPT_DIRTY_BIT : 0ull,
-                       0ull, VMX_EPT_EXECUTABLE_MASK,
-                       cpu_has_vmx_ept_execute_only() ?
-                                     0ull : VMX_EPT_READABLE_MASK);
-               ept_set_mmio_spte_mask();
-               kvm_enable_tdp();
-       } else
+       if (enable_ept)
+               vmx_enable_tdp();
+       else
                kvm_disable_tdp();
 
        update_ple_window_actual_max();
@@ -7174,9 +7180,6 @@ static int handle_vmon(struct kvm_vcpu *vcpu)
                return 1;
        }
 
-       if (nested_vmx_check_vmptr(vcpu, EXIT_REASON_VMON, NULL))
-               return 1;
-
        if (vmx->nested.vmxon) {
                nested_vmx_failValid(vcpu, VMXERR_VMXON_IN_VMX_ROOT_OPERATION);
                return kvm_skip_emulated_instruction(vcpu);
@@ -7188,6 +7191,9 @@ static int handle_vmon(struct kvm_vcpu *vcpu)
                return 1;
        }
 
+       if (nested_vmx_check_vmptr(vcpu, EXIT_REASON_VMON, NULL))
+               return 1;
+
        if (cpu_has_vmx_msr_bitmap()) {
                vmx->nested.msr_bitmap =
                                (unsigned long *)__get_free_page(GFP_KERNEL);
@@ -10473,12 +10479,12 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
            !nested_guest_cr4_valid(vcpu, vmcs12->guest_cr4)) {
                nested_vmx_entry_failure(vcpu, vmcs12,
                        EXIT_REASON_INVALID_STATE, ENTRY_FAIL_DEFAULT);
-               goto out;
+               return 1;
        }
        if (vmcs12->vmcs_link_pointer != -1ull) {
                nested_vmx_entry_failure(vcpu, vmcs12,
                        EXIT_REASON_INVALID_STATE, ENTRY_FAIL_VMCS_LINK_PTR);
-               goto out;
+               return 1;
        }
 
        /*
@@ -10498,7 +10504,7 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
                     ia32e != !!(vmcs12->guest_ia32_efer & EFER_LME))) {
                        nested_vmx_entry_failure(vcpu, vmcs12,
                                EXIT_REASON_INVALID_STATE, ENTRY_FAIL_DEFAULT);
-                       goto out;
+                       return 1;
                }
        }
 
@@ -10516,7 +10522,7 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
                    ia32e != !!(vmcs12->host_ia32_efer & EFER_LME)) {
                        nested_vmx_entry_failure(vcpu, vmcs12,
                                EXIT_REASON_INVALID_STATE, ENTRY_FAIL_DEFAULT);
-                       goto out;
+                       return 1;
                }
        }