X86/nVMX: Update the PML table without mapping and unmapping the page
authorKarimAllah Ahmed <karahmed@amazon.de>
Thu, 31 Jan 2019 20:24:32 +0000 (21:24 +0100)
committerPaolo Bonzini <pbonzini@redhat.com>
Tue, 30 Apr 2019 19:32:38 +0000 (21:32 +0200)
Update the PML table without mapping and unmapping the page. This also
avoids using kvm_vcpu_gpa_to_page(..) which assumes that there is a "struct
page" for guest memory.

As a side-effect of using kvm_write_guest_page the page is also properly
marked as dirty.

Signed-off-by: KarimAllah Ahmed <karahmed@amazon.de>
Reviewed-by: David Hildenbrand <david@redhat.com>
Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/kvm/vmx/vmx.c

index 835f642e70e21fea4f8d51b3d455ac292b58b8fe..afe45a28020cad680f027ee7ff7dbc848d86ed9c 100644 (file)
@@ -7116,9 +7116,7 @@ static int vmx_write_pml_buffer(struct kvm_vcpu *vcpu)
 {
        struct vmcs12 *vmcs12;
        struct vcpu_vmx *vmx = to_vmx(vcpu);
-       gpa_t gpa;
-       struct page *page = NULL;
-       u64 *pml_address;
+       gpa_t gpa, dst;
 
        if (is_guest_mode(vcpu)) {
                WARN_ON_ONCE(vmx->nested.pml_full);
@@ -7138,15 +7136,13 @@ static int vmx_write_pml_buffer(struct kvm_vcpu *vcpu)
                }
 
                gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS) & ~0xFFFull;
+               dst = vmcs12->pml_address + sizeof(u64) * vmcs12->guest_pml_index;
 
-               page = kvm_vcpu_gpa_to_page(vcpu, vmcs12->pml_address);
-               if (is_error_page(page))
+               if (kvm_write_guest_page(vcpu->kvm, gpa_to_gfn(dst), &gpa,
+                                        offset_in_page(dst), sizeof(gpa)))
                        return 0;
 
-               pml_address = kmap(page);
-               pml_address[vmcs12->guest_pml_index--] = gpa;
-               kunmap(page);
-               kvm_release_page_clean(page);
+               vmcs12->guest_pml_index--;
        }
 
        return 0;