KVM: MMU: remove pt_access in mmu_set_spte
authorXiao Guangrong <xiaoguangrong@linux.vnet.ibm.com>
Tue, 5 Feb 2013 07:27:27 +0000 (15:27 +0800)
committerMarcelo Tosatti <mtosatti@redhat.com>
Thu, 7 Feb 2013 00:42:08 +0000 (22:42 -0200)
It is only used in debug code, so drop it

Reviewed-by: Gleb Natapov <gleb@redhat.com>
Signed-off-by: Xiao Guangrong <xiaoguangrong@linux.vnet.ibm.com>
Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
arch/x86/kvm/mmu.c
arch/x86/kvm/paging_tmpl.h

index 5356d8d2d496078a7a227dcb9b595c481ec0016a..e956e9bed29492de89fc8f312307ec7a5a28bfc8 100644 (file)
@@ -2388,16 +2388,15 @@ done:
 }
 
 static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
-                        unsigned pt_access, unsigned pte_access,
-                        int write_fault, int *emulate, int level, gfn_t gfn,
-                        pfn_t pfn, bool speculative, bool host_writable)
+                        unsigned pte_access, int write_fault, int *emulate,
+                        int level, gfn_t gfn, pfn_t pfn, bool speculative,
+                        bool host_writable)
 {
        int was_rmapped = 0;
        int rmap_count;
 
-       pgprintk("%s: spte %llx access %x write_fault %d gfn %llx\n",
-                __func__, *sptep, pt_access,
-                write_fault, gfn);
+       pgprintk("%s: spte %llx write_fault %d gfn %llx\n", __func__,
+                *sptep, write_fault, gfn);
 
        if (is_rmap_spte(*sptep)) {
                /*
@@ -2513,7 +2512,7 @@ static int direct_pte_prefetch_many(struct kvm_vcpu *vcpu,
                return -1;
 
        for (i = 0; i < ret; i++, gfn++, start++)
-               mmu_set_spte(vcpu, start, ACC_ALL, access, 0, NULL,
+               mmu_set_spte(vcpu, start, access, 0, NULL,
                             sp->role.level, gfn, page_to_pfn(pages[i]),
                             true, true);
 
@@ -2574,9 +2573,7 @@ static int __direct_map(struct kvm_vcpu *vcpu, gpa_t v, int write,
 
        for_each_shadow_entry(vcpu, (u64)gfn << PAGE_SHIFT, iterator) {
                if (iterator.level == level) {
-                       unsigned pte_access = ACC_ALL;
-
-                       mmu_set_spte(vcpu, iterator.sptep, ACC_ALL, pte_access,
+                       mmu_set_spte(vcpu, iterator.sptep, ACC_ALL,
                                     write, &emulate, level, gfn, pfn,
                                     prefault, map_writable);
                        direct_pte_prefetch(vcpu, iterator.sptep);
index 34c5c99323f41e94e1c640619a3d57051424846f..105dd5bd550e5995d36b36df6c8739ccaedba033 100644 (file)
@@ -326,8 +326,8 @@ FNAME(prefetch_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
         * we call mmu_set_spte() with host_writable = true because
         * pte_prefetch_gfn_to_pfn always gets a writable pfn.
         */
-       mmu_set_spte(vcpu, spte, sp->role.access, pte_access, 0,
-                    NULL, PT_PAGE_TABLE_LEVEL, gfn, pfn, true, true);
+       mmu_set_spte(vcpu, spte, pte_access, 0, NULL, PT_PAGE_TABLE_LEVEL,
+                    gfn, pfn, true, true);
 
        return true;
 }
@@ -470,9 +470,8 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
        }
 
        clear_sp_write_flooding_count(it.sptep);
-       mmu_set_spte(vcpu, it.sptep, access, gw->pte_access,
-                    write_fault, &emulate, it.level,
-                    gw->gfn, pfn, prefault, map_writable);
+       mmu_set_spte(vcpu, it.sptep, gw->pte_access, write_fault, &emulate,
+                    it.level, gw->gfn, pfn, prefault, map_writable);
        FNAME(pte_prefetch)(vcpu, gw, it.sptep);
 
        return emulate;