KVM: arm64: Use config_lock to protect data ordered against KVM_RUN
authorOliver Upton <oliver.upton@linux.dev>
Mon, 27 Mar 2023 16:47:46 +0000 (16:47 +0000)
committerMarc Zyngier <maz@kernel.org>
Wed, 29 Mar 2023 13:08:31 +0000 (14:08 +0100)
There are various bits of VM-scoped data that can only be configured
before the first call to KVM_RUN, such as the hypercall bitmaps and
the PMU. As these fields are protected by the kvm->lock and accessed
while holding vcpu->mutex, this is yet another example of lock
inversion.

Change out the kvm->lock for kvm->arch.config_lock in all of these
instances. Opportunistically simplify the locking mechanics of the
PMU configuration by holding the config_lock for the entirety of
kvm_arm_pmu_v3_set_attr().

Note that this also addresses a couple of bugs. There is an unguarded
read of the PMU version in KVM_ARM_VCPU_PMU_V3_FILTER which could race
with KVM_ARM_VCPU_PMU_V3_SET_PMU. Additionally, until now writes to the
per-vCPU vPMU irq were not serialized VM-wide, meaning concurrent calls
to KVM_ARM_VCPU_PMU_V3_IRQ could lead to a false positive in
pmu_irq_is_valid().

Cc: stable@vger.kernel.org
Tested-by: Jeremy Linton <jeremy.linton@arm.com>
Signed-off-by: Oliver Upton <oliver.upton@linux.dev>
Signed-off-by: Marc Zyngier <maz@kernel.org>
Link: https://lore.kernel.org/r/20230327164747.2466958-4-oliver.upton@linux.dev
arch/arm64/kvm/arm.c
arch/arm64/kvm/guest.c
arch/arm64/kvm/hypercalls.c
arch/arm64/kvm/pmu-emul.c

index 1620ec3d95ef88e3f517bbfd5fe55bd8d9222d2a..fd8d355aca15eb9f07e2f856d2040e3cd90a0556 100644 (file)
@@ -624,9 +624,9 @@ int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu)
        if (kvm_vm_is_protected(kvm))
                kvm_call_hyp_nvhe(__pkvm_vcpu_init_traps, vcpu);
 
-       mutex_lock(&kvm->lock);
+       mutex_lock(&kvm->arch.config_lock);
        set_bit(KVM_ARCH_FLAG_HAS_RAN_ONCE, &kvm->arch.flags);
-       mutex_unlock(&kvm->lock);
+       mutex_unlock(&kvm->arch.config_lock);
 
        return ret;
 }
index 07444fa2288887ec6f8455045fada76d02e77862..481c79cf22cd24e7f1a450ea8e9d471f2b9d0a04 100644 (file)
@@ -957,7 +957,9 @@ int kvm_arm_vcpu_arch_set_attr(struct kvm_vcpu *vcpu,
 
        switch (attr->group) {
        case KVM_ARM_VCPU_PMU_V3_CTRL:
+               mutex_lock(&vcpu->kvm->arch.config_lock);
                ret = kvm_arm_pmu_v3_set_attr(vcpu, attr);
+               mutex_unlock(&vcpu->kvm->arch.config_lock);
                break;
        case KVM_ARM_VCPU_TIMER_CTRL:
                ret = kvm_arm_timer_set_attr(vcpu, attr);
index 5da884e11337a6d420e3dc71456b469057300d1c..fbdbf4257f764ab7e379a71422eab1fed439bf99 100644 (file)
@@ -377,7 +377,7 @@ static int kvm_arm_set_fw_reg_bmap(struct kvm_vcpu *vcpu, u64 reg_id, u64 val)
        if (val & ~fw_reg_features)
                return -EINVAL;
 
-       mutex_lock(&kvm->lock);
+       mutex_lock(&kvm->arch.config_lock);
 
        if (test_bit(KVM_ARCH_FLAG_HAS_RAN_ONCE, &kvm->arch.flags) &&
            val != *fw_reg_bmap) {
@@ -387,7 +387,7 @@ static int kvm_arm_set_fw_reg_bmap(struct kvm_vcpu *vcpu, u64 reg_id, u64 val)
 
        WRITE_ONCE(*fw_reg_bmap, val);
 out:
-       mutex_unlock(&kvm->lock);
+       mutex_unlock(&kvm->arch.config_lock);
        return ret;
 }
 
index 24908400e190616f317b9e6725b4605c12d1c8a4..2401684168388bc58b23f5a5a7a5d18cdfae91cd 100644 (file)
@@ -874,7 +874,7 @@ static int kvm_arm_pmu_v3_set_pmu(struct kvm_vcpu *vcpu, int pmu_id)
        struct arm_pmu *arm_pmu;
        int ret = -ENXIO;
 
-       mutex_lock(&kvm->lock);
+       lockdep_assert_held(&kvm->arch.config_lock);
        mutex_lock(&arm_pmus_lock);
 
        list_for_each_entry(entry, &arm_pmus, entry) {
@@ -894,7 +894,6 @@ static int kvm_arm_pmu_v3_set_pmu(struct kvm_vcpu *vcpu, int pmu_id)
        }
 
        mutex_unlock(&arm_pmus_lock);
-       mutex_unlock(&kvm->lock);
        return ret;
 }
 
@@ -902,22 +901,20 @@ int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
 {
        struct kvm *kvm = vcpu->kvm;
 
+       lockdep_assert_held(&kvm->arch.config_lock);
+
        if (!kvm_vcpu_has_pmu(vcpu))
                return -ENODEV;
 
        if (vcpu->arch.pmu.created)
                return -EBUSY;
 
-       mutex_lock(&kvm->lock);
        if (!kvm->arch.arm_pmu) {
                /* No PMU set, get the default one */
                kvm->arch.arm_pmu = kvm_pmu_probe_armpmu();
-               if (!kvm->arch.arm_pmu) {
-                       mutex_unlock(&kvm->lock);
+               if (!kvm->arch.arm_pmu)
                        return -ENODEV;
-               }
        }
-       mutex_unlock(&kvm->lock);
 
        switch (attr->attr) {
        case KVM_ARM_VCPU_PMU_V3_IRQ: {
@@ -961,19 +958,13 @@ int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
                     filter.action != KVM_PMU_EVENT_DENY))
                        return -EINVAL;
 
-               mutex_lock(&kvm->lock);
-
-               if (test_bit(KVM_ARCH_FLAG_HAS_RAN_ONCE, &kvm->arch.flags)) {
-                       mutex_unlock(&kvm->lock);
+               if (test_bit(KVM_ARCH_FLAG_HAS_RAN_ONCE, &kvm->arch.flags))
                        return -EBUSY;
-               }
 
                if (!kvm->arch.pmu_filter) {
                        kvm->arch.pmu_filter = bitmap_alloc(nr_events, GFP_KERNEL_ACCOUNT);
-                       if (!kvm->arch.pmu_filter) {
-                               mutex_unlock(&kvm->lock);
+                       if (!kvm->arch.pmu_filter)
                                return -ENOMEM;
-                       }
 
                        /*
                         * The default depends on the first applied filter.
@@ -992,8 +983,6 @@ int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
                else
                        bitmap_clear(kvm->arch.pmu_filter, filter.base_event, filter.nevents);
 
-               mutex_unlock(&kvm->lock);
-
                return 0;
        }
        case KVM_ARM_VCPU_PMU_V3_SET_PMU: {