x86: KVM: SVM: use kvm_lock_all_vcpus instead of a custom implementation
authorMaxim Levitsky <mlevitsk@redhat.com>
Mon, 12 May 2025 18:04:05 +0000 (14:04 -0400)
committerPaolo Bonzini <pbonzini@redhat.com>
Tue, 27 May 2025 16:16:41 +0000 (12:16 -0400)
Use kvm_lock_all_vcpus instead of sev's own implementation.

Because kvm_lock_all_vcpus uses the _nest_lock feature of lockdep, which
ignores subclasses, there is no longer a need to use separate subclasses
for source and target VMs.

No functional change intended.

Suggested-by: Paolo Bonzini <pbonzini@redhat.com>
Signed-off-by: Maxim Levitsky <mlevitsk@redhat.com>
Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Message-ID: <20250512180407.659015-5-mlevitsk@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/kvm/svm/sev.c

index a7a7dc5073363bafc436f304cb75f186fb564078..710ca9810a171c13284fe1b82c8a47e1daba6b6b 100644 (file)
@@ -1882,70 +1882,6 @@ static void sev_unlock_two_vms(struct kvm *dst_kvm, struct kvm *src_kvm)
        atomic_set_release(&src_sev->migration_in_progress, 0);
 }
 
-/* vCPU mutex subclasses.  */
-enum sev_migration_role {
-       SEV_MIGRATION_SOURCE = 0,
-       SEV_MIGRATION_TARGET,
-       SEV_NR_MIGRATION_ROLES,
-};
-
-static int sev_lock_vcpus_for_migration(struct kvm *kvm,
-                                       enum sev_migration_role role)
-{
-       struct kvm_vcpu *vcpu;
-       unsigned long i, j;
-
-       kvm_for_each_vcpu(i, vcpu, kvm) {
-               if (mutex_lock_killable_nested(&vcpu->mutex, role))
-                       goto out_unlock;
-
-#ifdef CONFIG_PROVE_LOCKING
-               if (!i)
-                       /*
-                        * Reset the role to one that avoids colliding with
-                        * the role used for the first vcpu mutex.
-                        */
-                       role = SEV_NR_MIGRATION_ROLES;
-               else
-                       mutex_release(&vcpu->mutex.dep_map, _THIS_IP_);
-#endif
-       }
-
-       return 0;
-
-out_unlock:
-
-       kvm_for_each_vcpu(j, vcpu, kvm) {
-               if (i == j)
-                       break;
-
-#ifdef CONFIG_PROVE_LOCKING
-               if (j)
-                       mutex_acquire(&vcpu->mutex.dep_map, role, 0, _THIS_IP_);
-#endif
-
-               mutex_unlock(&vcpu->mutex);
-       }
-       return -EINTR;
-}
-
-static void sev_unlock_vcpus_for_migration(struct kvm *kvm)
-{
-       struct kvm_vcpu *vcpu;
-       unsigned long i;
-       bool first = true;
-
-       kvm_for_each_vcpu(i, vcpu, kvm) {
-               if (first)
-                       first = false;
-               else
-                       mutex_acquire(&vcpu->mutex.dep_map,
-                                     SEV_NR_MIGRATION_ROLES, 0, _THIS_IP_);
-
-               mutex_unlock(&vcpu->mutex);
-       }
-}
-
 static void sev_migrate_from(struct kvm *dst_kvm, struct kvm *src_kvm)
 {
        struct kvm_sev_info *dst = to_kvm_sev_info(dst_kvm);
@@ -2083,10 +2019,10 @@ int sev_vm_move_enc_context_from(struct kvm *kvm, unsigned int source_fd)
                charged = true;
        }
 
-       ret = sev_lock_vcpus_for_migration(kvm, SEV_MIGRATION_SOURCE);
+       ret = kvm_lock_all_vcpus(kvm);
        if (ret)
                goto out_dst_cgroup;
-       ret = sev_lock_vcpus_for_migration(source_kvm, SEV_MIGRATION_TARGET);
+       ret = kvm_lock_all_vcpus(source_kvm);
        if (ret)
                goto out_dst_vcpu;
 
@@ -2100,9 +2036,9 @@ int sev_vm_move_enc_context_from(struct kvm *kvm, unsigned int source_fd)
        ret = 0;
 
 out_source_vcpu:
-       sev_unlock_vcpus_for_migration(source_kvm);
+       kvm_unlock_all_vcpus(source_kvm);
 out_dst_vcpu:
-       sev_unlock_vcpus_for_migration(kvm);
+       kvm_unlock_all_vcpus(kvm);
 out_dst_cgroup:
        /* Operates on the source on success, on the destination on failure.  */
        if (charged)