KVM: X86: Rename and move the function vmx_handle_memory_failure to x86.c
authorBabu Moger <babu.moger@amd.com>
Fri, 11 Sep 2020 19:29:05 +0000 (14:29 -0500)
committerPaolo Bonzini <pbonzini@redhat.com>
Mon, 28 Sep 2020 11:57:16 +0000 (07:57 -0400)
Handling of kvm_read/write_guest_virt*() errors can be moved to common
code. The same code can be used by both VMX and SVM.

Signed-off-by: Babu Moger <babu.moger@amd.com>
Reviewed-by: Jim Mattson <jmattson@google.com>
Message-Id: <159985254493.11252.6603092560732507607.stgit@bmoger-ubuntu>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/kvm/vmx/nested.c
arch/x86/kvm/vmx/vmx.c
arch/x86/kvm/vmx/vmx.h
arch/x86/kvm/x86.c
arch/x86/kvm/x86.h

index 07dc5663a48e0d672f681110547ec1ce30dda6f1..b6ce9ce91029e5c02bb4d708bca2d06eb888f1fd 100644 (file)
@@ -4696,7 +4696,7 @@ static int nested_vmx_get_vmptr(struct kvm_vcpu *vcpu, gpa_t *vmpointer,
 
        r = kvm_read_guest_virt(vcpu, gva, vmpointer, sizeof(*vmpointer), &e);
        if (r != X86EMUL_CONTINUE) {
-               *ret = vmx_handle_memory_failure(vcpu, r, &e);
+               *ret = kvm_handle_memory_failure(vcpu, r, &e);
                return -EINVAL;
        }
 
@@ -5003,7 +5003,7 @@ static int handle_vmread(struct kvm_vcpu *vcpu)
                /* _system ok, nested_vmx_check_permission has verified cpl=0 */
                r = kvm_write_guest_virt_system(vcpu, gva, &value, len, &e);
                if (r != X86EMUL_CONTINUE)
-                       return vmx_handle_memory_failure(vcpu, r, &e);
+                       return kvm_handle_memory_failure(vcpu, r, &e);
        }
 
        return nested_vmx_succeed(vcpu);
@@ -5076,7 +5076,7 @@ static int handle_vmwrite(struct kvm_vcpu *vcpu)
                        return 1;
                r = kvm_read_guest_virt(vcpu, gva, &value, len, &e);
                if (r != X86EMUL_CONTINUE)
-                       return vmx_handle_memory_failure(vcpu, r, &e);
+                       return kvm_handle_memory_failure(vcpu, r, &e);
        }
 
        field = kvm_register_readl(vcpu, (((instr_info) >> 28) & 0xf));
@@ -5238,7 +5238,7 @@ static int handle_vmptrst(struct kvm_vcpu *vcpu)
        r = kvm_write_guest_virt_system(vcpu, gva, (void *)&current_vmptr,
                                        sizeof(gpa_t), &e);
        if (r != X86EMUL_CONTINUE)
-               return vmx_handle_memory_failure(vcpu, r, &e);
+               return kvm_handle_memory_failure(vcpu, r, &e);
 
        return nested_vmx_succeed(vcpu);
 }
@@ -5291,7 +5291,7 @@ static int handle_invept(struct kvm_vcpu *vcpu)
                return 1;
        r = kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e);
        if (r != X86EMUL_CONTINUE)
-               return vmx_handle_memory_failure(vcpu, r, &e);
+               return kvm_handle_memory_failure(vcpu, r, &e);
 
        /*
         * Nested EPT roots are always held through guest_mmu,
@@ -5373,7 +5373,7 @@ static int handle_invvpid(struct kvm_vcpu *vcpu)
                return 1;
        r = kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e);
        if (r != X86EMUL_CONTINUE)
-               return vmx_handle_memory_failure(vcpu, r, &e);
+               return kvm_handle_memory_failure(vcpu, r, &e);
 
        if (operand.vpid >> 16)
                return nested_vmx_fail(vcpu,
index f4cfc18366deff0b7955ab0db4d62ff501b71068..76d657d5d5f7fcf02251015d0788aa76b49a70dd 100644 (file)
@@ -1598,33 +1598,6 @@ static int skip_emulated_instruction(struct kvm_vcpu *vcpu)
        return 1;
 }
 
-/*
- * Handles kvm_read/write_guest_virt*() result and either injects #PF or returns
- * KVM_EXIT_INTERNAL_ERROR for cases not currently handled by KVM. Return value
- * indicates whether exit to userspace is needed.
- */
-int vmx_handle_memory_failure(struct kvm_vcpu *vcpu, int r,
-                             struct x86_exception *e)
-{
-       if (r == X86EMUL_PROPAGATE_FAULT) {
-               kvm_inject_emulated_page_fault(vcpu, e);
-               return 1;
-       }
-
-       /*
-        * In case kvm_read/write_guest_virt*() failed with X86EMUL_IO_NEEDED
-        * while handling a VMX instruction KVM could've handled the request
-        * correctly by exiting to userspace and performing I/O but there
-        * doesn't seem to be a real use-case behind such requests, just return
-        * KVM_EXIT_INTERNAL_ERROR for now.
-        */
-       vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
-       vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
-       vcpu->run->internal.ndata = 0;
-
-       return 0;
-}
-
 /*
  * Recognizes a pending MTF VM-exit and records the nested state for later
  * delivery.
@@ -5558,7 +5531,7 @@ static int handle_invpcid(struct kvm_vcpu *vcpu)
 
        r = kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e);
        if (r != X86EMUL_CONTINUE)
-               return vmx_handle_memory_failure(vcpu, r, &e);
+               return kvm_handle_memory_failure(vcpu, r, &e);
 
        if (operand.pcid >> 12 != 0) {
                kvm_inject_gp(vcpu, 0);
index a2f82127c1707ae71c0c2836bef6ed3898361f27..d7ec66db5eb853270d5f6faee23000b509a72f46 100644 (file)
@@ -354,8 +354,6 @@ struct shared_msr_entry *find_msr_entry(struct vcpu_vmx *vmx, u32 msr);
 void pt_update_intercept_for_msr(struct vcpu_vmx *vmx);
 void vmx_update_host_rsp(struct vcpu_vmx *vmx, unsigned long host_rsp);
 int vmx_find_msr_index(struct vmx_msrs *m, u32 msr);
-int vmx_handle_memory_failure(struct kvm_vcpu *vcpu, int r,
-                             struct x86_exception *e);
 void vmx_ept_load_pdptrs(struct kvm_vcpu *vcpu);
 
 #define POSTED_INTR_ON  0
index 836baad47fe9795674ec451e23b8003121298249..e560687ecf3443f9f56b2d65e9958f2dbcf3ca4b 100644 (file)
@@ -10765,6 +10765,34 @@ void kvm_fixup_and_inject_pf_error(struct kvm_vcpu *vcpu, gva_t gva, u16 error_c
 }
 EXPORT_SYMBOL_GPL(kvm_fixup_and_inject_pf_error);
 
+/*
+ * Handles kvm_read/write_guest_virt*() result and either injects #PF or returns
+ * KVM_EXIT_INTERNAL_ERROR for cases not currently handled by KVM. Return value
+ * indicates whether exit to userspace is needed.
+ */
+int kvm_handle_memory_failure(struct kvm_vcpu *vcpu, int r,
+                             struct x86_exception *e)
+{
+       if (r == X86EMUL_PROPAGATE_FAULT) {
+               kvm_inject_emulated_page_fault(vcpu, e);
+               return 1;
+       }
+
+       /*
+        * In case kvm_read/write_guest_virt*() failed with X86EMUL_IO_NEEDED
+        * while handling a VMX instruction KVM could've handled the request
+        * correctly by exiting to userspace and performing I/O but there
+        * doesn't seem to be a real use-case behind such requests, just return
+        * KVM_EXIT_INTERNAL_ERROR for now.
+        */
+       vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+       vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
+       vcpu->run->internal.ndata = 0;
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(kvm_handle_memory_failure);
+
 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_exit);
 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_fast_mmio);
 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_inj_virq);
index ea20b8bfd5dc6e2377f65572bd85f205d7813a2a..c05a953ad29ad8317afb759b576e18cc63696011 100644 (file)
@@ -371,6 +371,8 @@ void kvm_load_host_xsave_state(struct kvm_vcpu *vcpu);
 int kvm_spec_ctrl_test_value(u64 value);
 int kvm_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
 bool kvm_vcpu_exit_request(struct kvm_vcpu *vcpu);
+int kvm_handle_memory_failure(struct kvm_vcpu *vcpu, int r,
+                             struct x86_exception *e);
 
 #define  KVM_MSR_RET_INVALID  2