KVM: arm64: Rename __tlb_switch_to_{guest,host}() in VHE
authorFuad Tabba <tabba@google.com>
Tue, 23 Apr 2024 15:05:18 +0000 (16:05 +0100)
committerMarc Zyngier <maz@kernel.org>
Wed, 1 May 2024 15:48:14 +0000 (16:48 +0100)
Rename __tlb_switch_to_{guest,host}() to
{enter,exit}_vmid_context() in VHE code to maintain symmetry
between the nVHE and VHE TLB invalidations.

No functional change intended.

Suggested-by: Oliver Upton <oliver.upton@linux.dev>
Signed-off-by: Fuad Tabba <tabba@google.com>
Acked-by: Oliver Upton <oliver.upton@linux.dev>
Link: https://lore.kernel.org/r/20240423150538.2103045-11-tabba@google.com
Signed-off-by: Marc Zyngier <maz@kernel.org>
arch/arm64/kvm/hyp/vhe/tlb.c

index b32e2940df7dc83418fe39c4095998033326262e..4a653c4a277d30b458704aece37b9f5a456a981a 100644 (file)
@@ -17,8 +17,8 @@ struct tlb_inv_context {
        u64                     sctlr;
 };
 
-static void __tlb_switch_to_guest(struct kvm_s2_mmu *mmu,
-                                 struct tlb_inv_context *cxt)
+static void enter_vmid_context(struct kvm_s2_mmu *mmu,
+                              struct tlb_inv_context *cxt)
 {
        struct kvm_vcpu *vcpu = kvm_get_running_vcpu();
        u64 val;
@@ -67,7 +67,7 @@ static void __tlb_switch_to_guest(struct kvm_s2_mmu *mmu,
        isb();
 }
 
-static void __tlb_switch_to_host(struct tlb_inv_context *cxt)
+static void exit_vmid_context(struct tlb_inv_context *cxt)
 {
        /*
         * We're done with the TLB operation, let's restore the host's
@@ -97,7 +97,7 @@ void __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu,
        dsb(ishst);
 
        /* Switch to requested VMID */
-       __tlb_switch_to_guest(mmu, &cxt);
+       enter_vmid_context(mmu, &cxt);
 
        /*
         * We could do so much better if we had the VA as well.
@@ -118,7 +118,7 @@ void __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu,
        dsb(ish);
        isb();
 
-       __tlb_switch_to_host(&cxt);
+       exit_vmid_context(&cxt);
 }
 
 void __kvm_tlb_flush_vmid_ipa_nsh(struct kvm_s2_mmu *mmu,
@@ -129,7 +129,7 @@ void __kvm_tlb_flush_vmid_ipa_nsh(struct kvm_s2_mmu *mmu,
        dsb(nshst);
 
        /* Switch to requested VMID */
-       __tlb_switch_to_guest(mmu, &cxt);
+       enter_vmid_context(mmu, &cxt);
 
        /*
         * We could do so much better if we had the VA as well.
@@ -150,7 +150,7 @@ void __kvm_tlb_flush_vmid_ipa_nsh(struct kvm_s2_mmu *mmu,
        dsb(nsh);
        isb();
 
-       __tlb_switch_to_host(&cxt);
+       exit_vmid_context(&cxt);
 }
 
 void __kvm_tlb_flush_vmid_range(struct kvm_s2_mmu *mmu,
@@ -169,7 +169,7 @@ void __kvm_tlb_flush_vmid_range(struct kvm_s2_mmu *mmu,
        dsb(ishst);
 
        /* Switch to requested VMID */
-       __tlb_switch_to_guest(mmu, &cxt);
+       enter_vmid_context(mmu, &cxt);
 
        __flush_s2_tlb_range_op(ipas2e1is, start, pages, stride, 0);
 
@@ -178,7 +178,7 @@ void __kvm_tlb_flush_vmid_range(struct kvm_s2_mmu *mmu,
        dsb(ish);
        isb();
 
-       __tlb_switch_to_host(&cxt);
+       exit_vmid_context(&cxt);
 }
 
 void __kvm_tlb_flush_vmid(struct kvm_s2_mmu *mmu)
@@ -188,13 +188,13 @@ void __kvm_tlb_flush_vmid(struct kvm_s2_mmu *mmu)
        dsb(ishst);
 
        /* Switch to requested VMID */
-       __tlb_switch_to_guest(mmu, &cxt);
+       enter_vmid_context(mmu, &cxt);
 
        __tlbi(vmalls12e1is);
        dsb(ish);
        isb();
 
-       __tlb_switch_to_host(&cxt);
+       exit_vmid_context(&cxt);
 }
 
 void __kvm_flush_cpu_context(struct kvm_s2_mmu *mmu)
@@ -202,14 +202,14 @@ void __kvm_flush_cpu_context(struct kvm_s2_mmu *mmu)
        struct tlb_inv_context cxt;
 
        /* Switch to requested VMID */
-       __tlb_switch_to_guest(mmu, &cxt);
+       enter_vmid_context(mmu, &cxt);
 
        __tlbi(vmalle1);
        asm volatile("ic iallu");
        dsb(nsh);
        isb();
 
-       __tlb_switch_to_host(&cxt);
+       exit_vmid_context(&cxt);
 }
 
 void __kvm_flush_vm_context(void)