KVM: x86: Copy kvm_x86_ops by value to eliminate layer of indirection
[linux-block.git] / arch / x86 / kvm / mmu.h
index a647601c9e1c1ddc06dfb030b81cf74d61b97db6..8a3b1bce722a4774de571c326ffd0012624d2835 100644 (file)
@@ -95,11 +95,11 @@ static inline unsigned long kvm_get_active_pcid(struct kvm_vcpu *vcpu)
        return kvm_get_pcid(vcpu, kvm_read_cr3(vcpu));
 }
 
-static inline void kvm_mmu_load_cr3(struct kvm_vcpu *vcpu)
+static inline void kvm_mmu_load_pgd(struct kvm_vcpu *vcpu)
 {
        if (VALID_PAGE(vcpu->arch.mmu->root_hpa))
-               vcpu->arch.mmu->set_cr3(vcpu, vcpu->arch.mmu->root_hpa |
-                                             kvm_get_active_pcid(vcpu));
+               kvm_x86_ops.load_mmu_pgd(vcpu, vcpu->arch.mmu->root_hpa |
+                                              kvm_get_active_pcid(vcpu));
 }
 
 int kvm_tdp_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
@@ -170,8 +170,8 @@ static inline u8 permission_fault(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
                                  unsigned pte_access, unsigned pte_pkey,
                                  unsigned pfec)
 {
-       int cpl = kvm_x86_ops->get_cpl(vcpu);
-       unsigned long rflags = kvm_x86_ops->get_rflags(vcpu);
+       int cpl = kvm_x86_ops.get_cpl(vcpu);
+       unsigned long rflags = kvm_x86_ops.get_rflags(vcpu);
 
        /*
         * If CPL < 3, SMAP prevention are disabled if EFLAGS.AC = 1.