KVM: arm64: Protect vLPI translation with vgic_irq::irq_lock
authorOliver Upton <oliver.upton@linux.dev>
Fri, 23 May 2025 19:47:19 +0000 (12:47 -0700)
committerMarc Zyngier <maz@kernel.org>
Fri, 30 May 2025 08:11:29 +0000 (09:11 +0100)
Though undocumented, KVM generally protects the translation of a vLPI
with the its_lock. While this makes perfectly good sense, as the ITS
itself contains the guest translation, an upcoming change will require
twiddling the vLPI mapping in an atomic context.

Switch to using the vIRQ's irq_lock to protect the translation. Use of
the its_lock in vgic_v4_unset_forwarding() is preserved for now as it
still needs to walk the ITS.

Tested-by: Sweet Tea Dorminy <sweettea-kernel@dorminy.me>
Signed-off-by: Oliver Upton <oliver.upton@linux.dev>
Link: https://lore.kernel.org/r/20250523194722.4066715-3-oliver.upton@linux.dev
Signed-off-by: Marc Zyngier <maz@kernel.org>
arch/arm64/kvm/vgic/vgic-its.c
arch/arm64/kvm/vgic/vgic-v4.c

index 569f9da9049fe81638122fac884f565860510a06..beca12dae7795c77ff9a1efe5cb12fcf0c13df47 100644 (file)
@@ -306,39 +306,34 @@ static int update_lpi_config(struct kvm *kvm, struct vgic_irq *irq,
                }
        }
 
-       raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
-
        if (irq->hw)
-               return its_prop_update_vlpi(irq->host_irq, prop, needs_inv);
+               ret = its_prop_update_vlpi(irq->host_irq, prop, needs_inv);
 
-       return 0;
+       raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
+       return ret;
 }
 
 static int update_affinity(struct vgic_irq *irq, struct kvm_vcpu *vcpu)
 {
-       int ret = 0;
-       unsigned long flags;
+       struct its_vlpi_map map;
+       int ret;
 
-       raw_spin_lock_irqsave(&irq->irq_lock, flags);
+       guard(raw_spinlock_irqsave)(&irq->irq_lock);
        irq->target_vcpu = vcpu;
-       raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
 
-       if (irq->hw) {
-               struct its_vlpi_map map;
-
-               ret = its_get_vlpi(irq->host_irq, &map);
-               if (ret)
-                       return ret;
+       if (!irq->hw)
+               return 0;
 
-               if (map.vpe)
-                       atomic_dec(&map.vpe->vlpi_count);
-               map.vpe = &vcpu->arch.vgic_cpu.vgic_v3.its_vpe;
-               atomic_inc(&map.vpe->vlpi_count);
+       ret = its_get_vlpi(irq->host_irq, &map);
+       if (ret)
+               return ret;
 
-               ret = its_map_vlpi(irq->host_irq, &map);
-       }
+       if (map.vpe)
+               atomic_dec(&map.vpe->vlpi_count);
 
-       return ret;
+       map.vpe = &vcpu->arch.vgic_cpu.vgic_v3.its_vpe;
+       atomic_inc(&map.vpe->vlpi_count);
+       return its_map_vlpi(irq->host_irq, &map);
 }
 
 static struct kvm_vcpu *collection_to_vcpu(struct kvm *kvm,
@@ -756,12 +751,17 @@ int vgic_its_inject_msi(struct kvm *kvm, struct kvm_msi *msi)
 /* Requires the its_lock to be held. */
 static void its_free_ite(struct kvm *kvm, struct its_ite *ite)
 {
+       struct vgic_irq *irq = ite->irq;
        list_del(&ite->ite_list);
 
        /* This put matches the get in vgic_add_lpi. */
-       if (ite->irq) {
-               if (ite->irq->hw)
-                       WARN_ON(its_unmap_vlpi(ite->irq->host_irq));
+       if (irq) {
+               scoped_guard(raw_spinlock_irqsave, &irq->irq_lock) {
+                       if (irq->hw)
+                               WARN_ON(its_unmap_vlpi(ite->irq->host_irq));
+
+                       irq->hw = false;
+               }
 
                vgic_put_irq(kvm, ite->irq);
        }
index 8b25e7650998169a285d6f28d5b12e6f5ec6390f..01a5de8e9e94e8b904ad9d7f010623bbbcd3a160 100644 (file)
@@ -457,9 +457,11 @@ int kvm_vgic_v4_set_forwarding(struct kvm *kvm, int virq,
                                 irq_entry->msi.data, &irq))
                return 0;
 
+       raw_spin_lock_irqsave(&irq->irq_lock, flags);
+
        /* Silently exit if the vLPI is already mapped */
        if (irq->hw)
-               return 0;
+               goto out_unlock_irq;
 
        /*
         * Emit the mapping request. If it fails, the ITS probably
@@ -479,30 +481,30 @@ int kvm_vgic_v4_set_forwarding(struct kvm *kvm, int virq,
 
        ret = its_map_vlpi(virq, &map);
        if (ret)
-               return ret;
+               goto out_unlock_irq;
 
        irq->hw         = true;
        irq->host_irq   = virq;
        atomic_inc(&map.vpe->vlpi_count);
 
        /* Transfer pending state */
-       raw_spin_lock_irqsave(&irq->irq_lock, flags);
-       if (irq->pending_latch) {
-               ret = irq_set_irqchip_state(irq->host_irq,
-                                           IRQCHIP_STATE_PENDING,
-                                           irq->pending_latch);
-               WARN_RATELIMIT(ret, "IRQ %d", irq->host_irq);
+       if (!irq->pending_latch)
+               goto out_unlock_irq;
 
-               /*
-                * Clear pending_latch and communicate this state
-                * change via vgic_queue_irq_unlock.
-                */
-               irq->pending_latch = false;
-               vgic_queue_irq_unlock(kvm, irq, flags);
-       } else {
-               raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
-       }
+       ret = irq_set_irqchip_state(irq->host_irq, IRQCHIP_STATE_PENDING,
+                                   irq->pending_latch);
+       WARN_RATELIMIT(ret, "IRQ %d", irq->host_irq);
+
+       /*
+        * Clear pending_latch and communicate this state
+        * change via vgic_queue_irq_unlock.
+        */
+       irq->pending_latch = false;
+       vgic_queue_irq_unlock(kvm, irq, flags);
+       return ret;
 
+out_unlock_irq:
+       raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
        return ret;
 }
 
@@ -511,7 +513,8 @@ int kvm_vgic_v4_unset_forwarding(struct kvm *kvm, int virq,
 {
        struct vgic_its *its;
        struct vgic_irq *irq;
-       int ret;
+       unsigned long flags;
+       int ret = 0;
 
        if (!vgic_supports_direct_msis(kvm))
                return 0;
@@ -531,6 +534,7 @@ int kvm_vgic_v4_unset_forwarding(struct kvm *kvm, int virq,
        if (ret)
                goto out;
 
+       raw_spin_lock_irqsave(&irq->irq_lock, flags);
        WARN_ON(irq->hw && irq->host_irq != virq);
        if (irq->hw) {
                atomic_dec(&irq->target_vcpu->arch.vgic_cpu.vgic_v3.its_vpe.vlpi_count);
@@ -538,6 +542,7 @@ int kvm_vgic_v4_unset_forwarding(struct kvm *kvm, int virq,
                ret = its_unmap_vlpi(virq);
        }
 
+       raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
 out:
        mutex_unlock(&its->its_lock);
        return ret;