KVM: MIPS/MMU: Drop kvm_get_new_mmu_context()
authorJames Hogan <james.hogan@imgtec.com>
Fri, 7 Oct 2016 21:39:41 +0000 (22:39 +0100)
committerJames Hogan <james.hogan@imgtec.com>
Fri, 3 Feb 2017 15:21:04 +0000 (15:21 +0000)
MIPS KVM uses its own variation of get_new_mmu_context() which takes an
extra vcpu pointer (unused) and does exactly the same thing.

Switch to just using get_new_mmu_context() directly and drop KVM's
version of it as it doesn't really serve any purpose.

The nearby declarations of kvm_mips_alloc_new_mmu_context(),
kvm_mips_vcpu_load() and kvm_mips_vcpu_put() are also removed from
kvm_host.h, as no definitions or users exist.

Signed-off-by: James Hogan <james.hogan@imgtec.com>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: "Radim Krčmář" <rkrcmar@redhat.com>
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: linux-mips@linux-mips.org
Cc: kvm@vger.kernel.org
arch/mips/include/asm/kvm_host.h
arch/mips/kvm/emulate.c
arch/mips/kvm/mmu.c
arch/mips/kvm/trap_emul.c

index 174857f146b16b61d8af5fbe9c903e72c3770f87..1337abb18e2b710ba3d81fe0baa394c40e9e6528 100644 (file)
@@ -638,11 +638,6 @@ void kvm_mips_flush_gva_pt(pgd_t *pgd, enum kvm_mips_flush flags);
 void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu);
 void kvm_trap_emul_invalidate_gva(struct kvm_vcpu *vcpu, unsigned long addr,
                                  bool user);
-extern void kvm_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu,
-                                   struct kvm_vcpu *vcpu);
-extern void kvm_mips_alloc_new_mmu_context(struct kvm_vcpu *vcpu);
-extern void kvm_mips_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
-extern void kvm_mips_vcpu_put(struct kvm_vcpu *vcpu);
 
 /* Emulation */
 u32 kvm_get_inst(u32 *opc, struct kvm_vcpu *vcpu);
index cd11d787d9dc512df00993415df125ab9874f493..67ea39973b96cc551f51e8db0393be45ea03ef6f 100644 (file)
@@ -1198,8 +1198,7 @@ enum emulation_result kvm_mips_emulate_CP0(union mips_instruction inst,
                                         */
                                        preempt_disable();
                                        cpu = smp_processor_id();
-                                       kvm_get_new_mmu_context(kern_mm,
-                                                               cpu, vcpu);
+                                       get_new_mmu_context(kern_mm, cpu);
                                        for_each_possible_cpu(i)
                                                if (i != cpu)
                                                        cpu_context(i, kern_mm) = 0;
index cf832ea963d8e432c202530bae7c663c2232d87e..aab604e75d3b6552df9a6a397a017620eace3670 100644 (file)
@@ -443,25 +443,6 @@ int kvm_mips_handle_commpage_tlb_fault(unsigned long badvaddr,
        return 0;
 }
 
-void kvm_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu,
-                            struct kvm_vcpu *vcpu)
-{
-       unsigned long asid = asid_cache(cpu);
-
-       asid += cpu_asid_inc();
-       if (!(asid & cpu_asid_mask(&cpu_data[cpu]))) {
-               if (cpu_has_vtag_icache)
-                       flush_icache_all();
-
-               local_flush_tlb_all();      /* start new asid cycle */
-
-               if (!asid)      /* fix version if needed */
-                       asid = asid_first_version(cpu);
-       }
-
-       cpu_context(cpu, mm) = asid_cache(cpu) = asid;
-}
-
 /**
  * kvm_mips_migrate_count() - Migrate timer.
  * @vcpu:      Virtual CPU.
index ee8b5ad8c7c50b46d32d664f52fc9f08cdcfafa4..653850c05b33368dd52d46a157f1314683549939 100644 (file)
@@ -706,7 +706,7 @@ static int kvm_trap_emul_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
 
        if ((cpu_context(cpu, kern_mm) ^ asid_cache(cpu)) &
                                                asid_version_mask(cpu)) {
-               kvm_get_new_mmu_context(kern_mm, cpu, vcpu);
+               get_new_mmu_context(kern_mm, cpu);
 
                kvm_debug("[%d]: cpu_context: %#lx\n", cpu,
                          cpu_context(cpu, current->mm));
@@ -716,7 +716,7 @@ static int kvm_trap_emul_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
 
        if ((cpu_context(cpu, user_mm) ^ asid_cache(cpu)) &
                                                asid_version_mask(cpu)) {
-               kvm_get_new_mmu_context(user_mm, cpu, vcpu);
+               get_new_mmu_context(user_mm, cpu);
 
                kvm_debug("[%d]: cpu_context: %#lx\n", cpu,
                          cpu_context(cpu, current->mm));
@@ -779,7 +779,7 @@ static void kvm_trap_emul_vcpu_reenter(struct kvm_run *run,
                gasid = kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID;
                if (gasid != vcpu->arch.last_user_gasid) {
                        kvm_mips_flush_gva_pt(user_mm->pgd, KMF_USER);
-                       kvm_get_new_mmu_context(user_mm, cpu, vcpu);
+                       get_new_mmu_context(user_mm, cpu);
                        for_each_possible_cpu(i)
                                if (i != cpu)
                                        cpu_context(i, user_mm) = 0;