KVM: x86: Copy kvm_x86_ops by value to eliminate layer of indirection
[linux-block.git] / arch / x86 / kvm / mmu.h
index d55674f44a18b52ac81b9daa3088a7ba442fe2cc..8a3b1bce722a4774de571c326ffd0012624d2835 100644 (file)
@@ -95,11 +95,24 @@ static inline unsigned long kvm_get_active_pcid(struct kvm_vcpu *vcpu)
        return kvm_get_pcid(vcpu, kvm_read_cr3(vcpu));
 }
 
-static inline void kvm_mmu_load_cr3(struct kvm_vcpu *vcpu)
+static inline void kvm_mmu_load_pgd(struct kvm_vcpu *vcpu)
 {
        if (VALID_PAGE(vcpu->arch.mmu->root_hpa))
-               vcpu->arch.mmu->set_cr3(vcpu, vcpu->arch.mmu->root_hpa |
-                                             kvm_get_active_pcid(vcpu));
+               kvm_x86_ops.load_mmu_pgd(vcpu, vcpu->arch.mmu->root_hpa |
+                                              kvm_get_active_pcid(vcpu));
+}
+
+int kvm_tdp_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
+                      bool prefault);
+
+static inline int kvm_mmu_do_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
+                                       u32 err, bool prefault)
+{
+#ifdef CONFIG_RETPOLINE
+       if (likely(vcpu->arch.mmu->page_fault == kvm_tdp_page_fault))
+               return kvm_tdp_page_fault(vcpu, cr2_or_gpa, err, prefault);
+#endif
+       return vcpu->arch.mmu->page_fault(vcpu, cr2_or_gpa, err, prefault);
 }
 
 /*
@@ -157,8 +170,8 @@ static inline u8 permission_fault(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
                                  unsigned pte_access, unsigned pte_pkey,
                                  unsigned pfec)
 {
-       int cpl = kvm_x86_ops->get_cpl(vcpu);
-       unsigned long rflags = kvm_x86_ops->get_rflags(vcpu);
+       int cpl = kvm_x86_ops.get_cpl(vcpu);
+       unsigned long rflags = kvm_x86_ops.get_rflags(vcpu);
 
        /*
         * If CPL < 3, SMAP prevention are disabled if EFLAGS.AC = 1.