KVM: arm64: use kvm_trylock_all_vcpus when locking all vCPUs
authorMaxim Levitsky <mlevitsk@redhat.com>
Mon, 12 May 2025 18:04:06 +0000 (14:04 -0400)
committerPaolo Bonzini <pbonzini@redhat.com>
Tue, 27 May 2025 16:16:41 +0000 (12:16 -0400)
Use kvm_trylock_all_vcpus instead of a custom implementation when locking
all vCPUs of a VM, to avoid triggering a lockdep warning, in the case in
which the VM is configured to have more than MAX_LOCK_DEPTH vCPUs.

This fixes the following false lockdep warning:

[  328.171264] BUG: MAX_LOCK_DEPTH too low!
[  328.175227] turning off the locking correctness validator.
[  328.180726] Please attach the output of /proc/lock_stat to the bug report
[  328.187531] depth: 48  max: 48!
[  328.190678] 48 locks held by qemu-kvm/11664:
[  328.194957]  #0: ffff800086de5ba0 (&kvm->lock){+.+.}-{3:3}, at: kvm_ioctl_create_device+0x174/0x5b0
[  328.204048]  #1: ffff0800e78800b8 (&vcpu->mutex){+.+.}-{3:3}, at: lock_all_vcpus+0x16c/0x2a0
[  328.212521]  #2: ffff07ffeee51e98 (&vcpu->mutex){+.+.}-{3:3}, at: lock_all_vcpus+0x16c/0x2a0
[  328.220991]  #3: ffff0800dc7d80b8 (&vcpu->mutex){+.+.}-{3:3}, at: lock_all_vcpus+0x16c/0x2a0
[  328.229463]  #4: ffff07ffe0c980b8 (&vcpu->mutex){+.+.}-{3:3}, at: lock_all_vcpus+0x16c/0x2a0
[  328.237934]  #5: ffff0800a3883c78 (&vcpu->mutex){+.+.}-{3:3}, at: lock_all_vcpus+0x16c/0x2a0
[  328.246405]  #6: ffff07fffbe480b8 (&vcpu->mutex){+.+.}-{3:3}, at: lock_all_vcpus+0x16c/0x2a0

Suggested-by: Paolo Bonzini <pbonzini@redhat.com>
Signed-off-by: Maxim Levitsky <mlevitsk@redhat.com>
Acked-by: Marc Zyngier <maz@kernel.org>
Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Message-ID: <20250512180407.659015-6-mlevitsk@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/arm64/include/asm/kvm_host.h
arch/arm64/kvm/arch_timer.c
arch/arm64/kvm/arm.c
arch/arm64/kvm/vgic/vgic-init.c
arch/arm64/kvm/vgic/vgic-its.c
arch/arm64/kvm/vgic/vgic-kvm-device.c

index d941abc6b5eef6143454f194b677729a3ece7be7..6ce2c51734820dbdb4110ef13b1c48d36d89abb6 100644 (file)
@@ -1320,9 +1320,6 @@ int __init populate_sysreg_config(const struct sys_reg_desc *sr,
                                  unsigned int idx);
 int __init populate_nv_trap_config(void);
 
-bool lock_all_vcpus(struct kvm *kvm);
-void unlock_all_vcpus(struct kvm *kvm);
-
 void kvm_calculate_traps(struct kvm_vcpu *vcpu);
 
 /* MMIO helpers */
index 5133dcbfe9f761b3de75d941fd8041f27b44d47f..fdbc8beec930bd3a1ad033a06102ff54a97fb89e 100644 (file)
@@ -1766,7 +1766,7 @@ int kvm_vm_ioctl_set_counter_offset(struct kvm *kvm,
 
        mutex_lock(&kvm->lock);
 
-       if (lock_all_vcpus(kvm)) {
+       if (!kvm_trylock_all_vcpus(kvm)) {
                set_bit(KVM_ARCH_FLAG_VM_COUNTER_OFFSET, &kvm->arch.flags);
 
                /*
@@ -1778,7 +1778,7 @@ int kvm_vm_ioctl_set_counter_offset(struct kvm *kvm,
                kvm->arch.timer_data.voffset = offset->counter_offset;
                kvm->arch.timer_data.poffset = offset->counter_offset;
 
-               unlock_all_vcpus(kvm);
+               kvm_unlock_all_vcpus(kvm);
        } else {
                ret = -EBUSY;
        }
index 36cfcffb40d89c8a66f9c4cb10211cf66ebbdad8..248e257e988b6ce65021def0a3dcde62e9e25387 100644 (file)
@@ -1924,49 +1924,6 @@ int kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
        }
 }
 
-/* unlocks vcpus from @vcpu_lock_idx and smaller */
-static void unlock_vcpus(struct kvm *kvm, int vcpu_lock_idx)
-{
-       struct kvm_vcpu *tmp_vcpu;
-
-       for (; vcpu_lock_idx >= 0; vcpu_lock_idx--) {
-               tmp_vcpu = kvm_get_vcpu(kvm, vcpu_lock_idx);
-               mutex_unlock(&tmp_vcpu->mutex);
-       }
-}
-
-void unlock_all_vcpus(struct kvm *kvm)
-{
-       lockdep_assert_held(&kvm->lock);
-
-       unlock_vcpus(kvm, atomic_read(&kvm->online_vcpus) - 1);
-}
-
-/* Returns true if all vcpus were locked, false otherwise */
-bool lock_all_vcpus(struct kvm *kvm)
-{
-       struct kvm_vcpu *tmp_vcpu;
-       unsigned long c;
-
-       lockdep_assert_held(&kvm->lock);
-
-       /*
-        * Any time a vcpu is in an ioctl (including running), the
-        * core KVM code tries to grab the vcpu->mutex.
-        *
-        * By grabbing the vcpu->mutex of all VCPUs we ensure that no
-        * other VCPUs can fiddle with the state while we access it.
-        */
-       kvm_for_each_vcpu(c, tmp_vcpu, kvm) {
-               if (!mutex_trylock(&tmp_vcpu->mutex)) {
-                       unlock_vcpus(kvm, c - 1);
-                       return false;
-               }
-       }
-
-       return true;
-}
-
 static unsigned long nvhe_percpu_size(void)
 {
        return (unsigned long)CHOOSE_NVHE_SYM(__per_cpu_end) -
index 1f33e71c2a731b7ab496c52828c1f52ba90a15f7..6a426d403a6b385667358f939b671f332c055d5b 100644 (file)
@@ -88,7 +88,7 @@ int kvm_vgic_create(struct kvm *kvm, u32 type)
        lockdep_assert_held(&kvm->lock);
 
        ret = -EBUSY;
-       if (!lock_all_vcpus(kvm))
+       if (kvm_trylock_all_vcpus(kvm))
                return ret;
 
        mutex_lock(&kvm->arch.config_lock);
@@ -142,7 +142,7 @@ int kvm_vgic_create(struct kvm *kvm, u32 type)
 
 out_unlock:
        mutex_unlock(&kvm->arch.config_lock);
-       unlock_all_vcpus(kvm);
+       kvm_unlock_all_vcpus(kvm);
        return ret;
 }
 
index 569f9da9049fe81638122fac884f565860510a06..2eb3e023f66a830d23f1844a092c3eb37fcc9631 100644 (file)
@@ -1971,7 +1971,7 @@ static int vgic_its_attr_regs_access(struct kvm_device *dev,
 
        mutex_lock(&dev->kvm->lock);
 
-       if (!lock_all_vcpus(dev->kvm)) {
+       if (kvm_trylock_all_vcpus(dev->kvm)) {
                mutex_unlock(&dev->kvm->lock);
                return -EBUSY;
        }
@@ -2006,7 +2006,7 @@ static int vgic_its_attr_regs_access(struct kvm_device *dev,
        }
 out:
        mutex_unlock(&dev->kvm->arch.config_lock);
-       unlock_all_vcpus(dev->kvm);
+       kvm_unlock_all_vcpus(dev->kvm);
        mutex_unlock(&dev->kvm->lock);
        return ret;
 }
@@ -2676,7 +2676,7 @@ static int vgic_its_ctrl(struct kvm *kvm, struct vgic_its *its, u64 attr)
 
        mutex_lock(&kvm->lock);
 
-       if (!lock_all_vcpus(kvm)) {
+       if (kvm_trylock_all_vcpus(kvm)) {
                mutex_unlock(&kvm->lock);
                return -EBUSY;
        }
@@ -2698,7 +2698,7 @@ static int vgic_its_ctrl(struct kvm *kvm, struct vgic_its *its, u64 attr)
 
        mutex_unlock(&its->its_lock);
        mutex_unlock(&kvm->arch.config_lock);
-       unlock_all_vcpus(kvm);
+       kvm_unlock_all_vcpus(kvm);
        mutex_unlock(&kvm->lock);
        return ret;
 }
index 359094f68c23ebd726c955fb16ecb04679309385..f9ae790163fb0d0151433d968594ba9a7ed4fb9b 100644 (file)
@@ -268,7 +268,7 @@ static int vgic_set_common_attr(struct kvm_device *dev,
                                return -ENXIO;
                        mutex_lock(&dev->kvm->lock);
 
-                       if (!lock_all_vcpus(dev->kvm)) {
+                       if (kvm_trylock_all_vcpus(dev->kvm)) {
                                mutex_unlock(&dev->kvm->lock);
                                return -EBUSY;
                        }
@@ -276,7 +276,7 @@ static int vgic_set_common_attr(struct kvm_device *dev,
                        mutex_lock(&dev->kvm->arch.config_lock);
                        r = vgic_v3_save_pending_tables(dev->kvm);
                        mutex_unlock(&dev->kvm->arch.config_lock);
-                       unlock_all_vcpus(dev->kvm);
+                       kvm_unlock_all_vcpus(dev->kvm);
                        mutex_unlock(&dev->kvm->lock);
                        return r;
                }
@@ -390,7 +390,7 @@ static int vgic_v2_attr_regs_access(struct kvm_device *dev,
 
        mutex_lock(&dev->kvm->lock);
 
-       if (!lock_all_vcpus(dev->kvm)) {
+       if (kvm_trylock_all_vcpus(dev->kvm)) {
                mutex_unlock(&dev->kvm->lock);
                return -EBUSY;
        }
@@ -415,7 +415,7 @@ static int vgic_v2_attr_regs_access(struct kvm_device *dev,
 
 out:
        mutex_unlock(&dev->kvm->arch.config_lock);
-       unlock_all_vcpus(dev->kvm);
+       kvm_unlock_all_vcpus(dev->kvm);
        mutex_unlock(&dev->kvm->lock);
 
        if (!ret && !is_write)
@@ -554,7 +554,7 @@ static int vgic_v3_attr_regs_access(struct kvm_device *dev,
 
        mutex_lock(&dev->kvm->lock);
 
-       if (!lock_all_vcpus(dev->kvm)) {
+       if (kvm_trylock_all_vcpus(dev->kvm)) {
                mutex_unlock(&dev->kvm->lock);
                return -EBUSY;
        }
@@ -611,7 +611,7 @@ static int vgic_v3_attr_regs_access(struct kvm_device *dev,
 
 out:
        mutex_unlock(&dev->kvm->arch.config_lock);
-       unlock_all_vcpus(dev->kvm);
+       kvm_unlock_all_vcpus(dev->kvm);
        mutex_unlock(&dev->kvm->lock);
 
        if (!ret && uaccess && !is_write) {