kvm: rename pfn_t to kvm_pfn_t
[linux-2.6-block.git] / arch / arm / kvm / mmu.c
index 22f7fa0124ec1d80c550fea0ecf55a2c7d603091..aba61fd3697aa6260f6b0b3626434e2859bc3248 100644 (file)
@@ -992,9 +992,9 @@ out:
        return ret;
 }
 
-static bool transparent_hugepage_adjust(pfn_t *pfnp, phys_addr_t *ipap)
+static bool transparent_hugepage_adjust(kvm_pfn_t *pfnp, phys_addr_t *ipap)
 {
-       pfn_t pfn = *pfnp;
+       kvm_pfn_t pfn = *pfnp;
        gfn_t gfn = *ipap >> PAGE_SHIFT;
 
        if (PageTransCompound(pfn_to_page(pfn))) {
@@ -1201,7 +1201,7 @@ void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
        kvm_mmu_write_protect_pt_masked(kvm, slot, gfn_offset, mask);
 }
 
-static void coherent_cache_guest_page(struct kvm_vcpu *vcpu, pfn_t pfn,
+static void coherent_cache_guest_page(struct kvm_vcpu *vcpu, kvm_pfn_t pfn,
                                      unsigned long size, bool uncached)
 {
        __coherent_cache_guest_page(vcpu, pfn, size, uncached);
@@ -1218,7 +1218,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
        struct kvm *kvm = vcpu->kvm;
        struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache;
        struct vm_area_struct *vma;
-       pfn_t pfn;
+       kvm_pfn_t pfn;
        pgprot_t mem_type = PAGE_S2;
        bool fault_ipa_uncached;
        bool logging_active = memslot_is_logging(memslot);
@@ -1346,7 +1346,7 @@ static void handle_access_fault(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa)
 {
        pmd_t *pmd;
        pte_t *pte;
-       pfn_t pfn;
+       kvm_pfn_t pfn;
        bool pfn_valid = false;
 
        trace_kvm_access_fault(fault_ipa);