Use kvm_trylock_all_vcpus instead of a custom implementation when locking
all vCPUs of a VM, to avoid triggering a lockdep warning, in the case in
which the VM is configured to have more than MAX_LOCK_DEPTH vCPUs.
This fixes the following false lockdep warning:
[ 328.171264] BUG: MAX_LOCK_DEPTH too low!
[ 328.175227] turning off the locking correctness validator.
[ 328.180726] Please attach the output of /proc/lock_stat to the bug report
[ 328.187531] depth: 48 max: 48!
[ 328.190678] 48 locks held by qemu-kvm/11664:
[ 328.194957] #0:
ffff800086de5ba0 (&kvm->lock){+.+.}-{3:3}, at: kvm_ioctl_create_device+0x174/0x5b0
[ 328.204048] #1:
ffff0800e78800b8 (&vcpu->mutex){+.+.}-{3:3}, at: lock_all_vcpus+0x16c/0x2a0
[ 328.212521] #2:
ffff07ffeee51e98 (&vcpu->mutex){+.+.}-{3:3}, at: lock_all_vcpus+0x16c/0x2a0
[ 328.220991] #3:
ffff0800dc7d80b8 (&vcpu->mutex){+.+.}-{3:3}, at: lock_all_vcpus+0x16c/0x2a0
[ 328.229463] #4:
ffff07ffe0c980b8 (&vcpu->mutex){+.+.}-{3:3}, at: lock_all_vcpus+0x16c/0x2a0
[ 328.237934] #5:
ffff0800a3883c78 (&vcpu->mutex){+.+.}-{3:3}, at: lock_all_vcpus+0x16c/0x2a0
[ 328.246405] #6:
ffff07fffbe480b8 (&vcpu->mutex){+.+.}-{3:3}, at: lock_all_vcpus+0x16c/0x2a0
Suggested-by: Paolo Bonzini <pbonzini@redhat.com>
Signed-off-by: Maxim Levitsky <mlevitsk@redhat.com>
Acked-by: Marc Zyngier <maz@kernel.org>
Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Message-ID: <
20250512180407.659015-6-mlevitsk@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
unsigned int idx);
int __init populate_nv_trap_config(void);
-bool lock_all_vcpus(struct kvm *kvm);
-void unlock_all_vcpus(struct kvm *kvm);
-
void kvm_calculate_traps(struct kvm_vcpu *vcpu);
/* MMIO helpers */
mutex_lock(&kvm->lock);
- if (lock_all_vcpus(kvm)) {
+ if (!kvm_trylock_all_vcpus(kvm)) {
set_bit(KVM_ARCH_FLAG_VM_COUNTER_OFFSET, &kvm->arch.flags);
/*
kvm->arch.timer_data.voffset = offset->counter_offset;
kvm->arch.timer_data.poffset = offset->counter_offset;
- unlock_all_vcpus(kvm);
+ kvm_unlock_all_vcpus(kvm);
} else {
ret = -EBUSY;
}
}
}
-/* unlocks vcpus from @vcpu_lock_idx and smaller */
-static void unlock_vcpus(struct kvm *kvm, int vcpu_lock_idx)
-{
- struct kvm_vcpu *tmp_vcpu;
-
- for (; vcpu_lock_idx >= 0; vcpu_lock_idx--) {
- tmp_vcpu = kvm_get_vcpu(kvm, vcpu_lock_idx);
- mutex_unlock(&tmp_vcpu->mutex);
- }
-}
-
-void unlock_all_vcpus(struct kvm *kvm)
-{
- lockdep_assert_held(&kvm->lock);
-
- unlock_vcpus(kvm, atomic_read(&kvm->online_vcpus) - 1);
-}
-
-/* Returns true if all vcpus were locked, false otherwise */
-bool lock_all_vcpus(struct kvm *kvm)
-{
- struct kvm_vcpu *tmp_vcpu;
- unsigned long c;
-
- lockdep_assert_held(&kvm->lock);
-
- /*
- * Any time a vcpu is in an ioctl (including running), the
- * core KVM code tries to grab the vcpu->mutex.
- *
- * By grabbing the vcpu->mutex of all VCPUs we ensure that no
- * other VCPUs can fiddle with the state while we access it.
- */
- kvm_for_each_vcpu(c, tmp_vcpu, kvm) {
- if (!mutex_trylock(&tmp_vcpu->mutex)) {
- unlock_vcpus(kvm, c - 1);
- return false;
- }
- }
-
- return true;
-}
-
static unsigned long nvhe_percpu_size(void)
{
return (unsigned long)CHOOSE_NVHE_SYM(__per_cpu_end) -
lockdep_assert_held(&kvm->lock);
ret = -EBUSY;
- if (!lock_all_vcpus(kvm))
+ if (kvm_trylock_all_vcpus(kvm))
return ret;
mutex_lock(&kvm->arch.config_lock);
out_unlock:
mutex_unlock(&kvm->arch.config_lock);
- unlock_all_vcpus(kvm);
+ kvm_unlock_all_vcpus(kvm);
return ret;
}
mutex_lock(&dev->kvm->lock);
- if (!lock_all_vcpus(dev->kvm)) {
+ if (kvm_trylock_all_vcpus(dev->kvm)) {
mutex_unlock(&dev->kvm->lock);
return -EBUSY;
}
}
out:
mutex_unlock(&dev->kvm->arch.config_lock);
- unlock_all_vcpus(dev->kvm);
+ kvm_unlock_all_vcpus(dev->kvm);
mutex_unlock(&dev->kvm->lock);
return ret;
}
mutex_lock(&kvm->lock);
- if (!lock_all_vcpus(kvm)) {
+ if (kvm_trylock_all_vcpus(kvm)) {
mutex_unlock(&kvm->lock);
return -EBUSY;
}
mutex_unlock(&its->its_lock);
mutex_unlock(&kvm->arch.config_lock);
- unlock_all_vcpus(kvm);
+ kvm_unlock_all_vcpus(kvm);
mutex_unlock(&kvm->lock);
return ret;
}
return -ENXIO;
mutex_lock(&dev->kvm->lock);
- if (!lock_all_vcpus(dev->kvm)) {
+ if (kvm_trylock_all_vcpus(dev->kvm)) {
mutex_unlock(&dev->kvm->lock);
return -EBUSY;
}
mutex_lock(&dev->kvm->arch.config_lock);
r = vgic_v3_save_pending_tables(dev->kvm);
mutex_unlock(&dev->kvm->arch.config_lock);
- unlock_all_vcpus(dev->kvm);
+ kvm_unlock_all_vcpus(dev->kvm);
mutex_unlock(&dev->kvm->lock);
return r;
}
mutex_lock(&dev->kvm->lock);
- if (!lock_all_vcpus(dev->kvm)) {
+ if (kvm_trylock_all_vcpus(dev->kvm)) {
mutex_unlock(&dev->kvm->lock);
return -EBUSY;
}
out:
mutex_unlock(&dev->kvm->arch.config_lock);
- unlock_all_vcpus(dev->kvm);
+ kvm_unlock_all_vcpus(dev->kvm);
mutex_unlock(&dev->kvm->lock);
if (!ret && !is_write)
mutex_lock(&dev->kvm->lock);
- if (!lock_all_vcpus(dev->kvm)) {
+ if (kvm_trylock_all_vcpus(dev->kvm)) {
mutex_unlock(&dev->kvm->lock);
return -EBUSY;
}
out:
mutex_unlock(&dev->kvm->arch.config_lock);
- unlock_all_vcpus(dev->kvm);
+ kvm_unlock_all_vcpus(dev->kvm);
mutex_unlock(&dev->kvm->lock);
if (!ret && uaccess && !is_write) {